intel: Replace sprinkled intel_batchbuffer_flush with MI_FLUSH or nothing.

Most of these were to ensure that caches got synchronized between 2d (or meta)
rendering and later use of the target as a source, such as for texture
miptree setup.  Those are replaced with intel_batchbuffer_emit_mi_flush(),
which just drops an MI_FLUSH.  Most of the remainder were to ensure that
REFERENCES_CLIPRECTS batchbuffers got flushed before the lock was dropped.
Those are now replaced by automatically flushing those when dropping the lock.
This commit is contained in:
Eric Anholt 2008-06-26 13:45:31 -07:00
parent f6abe8f0f2
commit 93f701bc36
13 changed files with 26 additions and 112 deletions

View file

@ -272,8 +272,6 @@ do_blit_readpixels(GLcontext * ctx,
rect.x2 - rect.x1, rect.y2 - rect.y1,
GL_COPY);
}
intel_batchbuffer_flush(intel->batch);
}
UNLOCK_HARDWARE(intel);

View file

@ -33,68 +33,6 @@
#ifndef BRW_DEFINES_H
#define BRW_DEFINES_H
/*
*/
#define MI_NOOP 0x00
#define MI_USER_INTERRUPT 0x02
#define MI_WAIT_FOR_EVENT 0x03
#define MI_REPORT_HEAD 0x07
#define MI_ARB_ON_OFF 0x08
#define MI_BATCH_BUFFER_END 0x0A
#define MI_OVERLAY_FLIP 0x11
#define MI_LOAD_SCAN_LINES_INCL 0x12
#define MI_LOAD_SCAN_LINES_EXCL 0x13
#define MI_DISPLAY_BUFFER_INFO 0x14
#define MI_SET_CONTEXT 0x18
#define MI_STORE_DATA_IMM 0x20
#define MI_STORE_DATA_INDEX 0x21
#define MI_LOAD_REGISTER_IMM 0x22
#define MI_STORE_REGISTER_MEM 0x24
#define MI_BATCH_BUFFER_START 0x31
#define MI_SYNCHRONOUS_FLIP 0x0
#define MI_ASYNCHRONOUS_FLIP 0x1
#define MI_BUFFER_SECURE 0x0
#define MI_BUFFER_NONSECURE 0x1
#define MI_ARBITRATE_AT_CHAIN_POINTS 0x0
#define MI_ARBITRATE_BETWEEN_INSTS 0x1
#define MI_NO_ARBITRATION 0x3
#define MI_CONDITION_CODE_WAIT_DISABLED 0x0
#define MI_CONDITION_CODE_WAIT_0 0x1
#define MI_CONDITION_CODE_WAIT_1 0x2
#define MI_CONDITION_CODE_WAIT_2 0x3
#define MI_CONDITION_CODE_WAIT_3 0x4
#define MI_CONDITION_CODE_WAIT_4 0x5
#define MI_DISPLAY_PIPE_A 0x0
#define MI_DISPLAY_PIPE_B 0x1
#define MI_DISPLAY_PLANE_A 0x0
#define MI_DISPLAY_PLANE_B 0x1
#define MI_DISPLAY_PLANE_C 0x2
#define MI_STANDARD_FLIP 0x0
#define MI_ENQUEUE_FLIP_PERFORM_BASE_FRAME_NUMBER_LOAD 0x1
#define MI_ENQUEUE_FLIP_TARGET_FRAME_NUMBER_RELATIVE 0x2
#define MI_ENQUEUE_FLIP_ABSOLUTE_TARGET_FRAME_NUMBER 0x3
#define MI_PHYSICAL_ADDRESS 0x0
#define MI_VIRTUAL_ADDRESS 0x1
#define MI_BUFFER_MEMORY_MAIN 0x0
#define MI_BUFFER_MEMORY_GTT 0x2
#define MI_BUFFER_MEMORY_PER_PROCESS_GTT 0x3
#define MI_FLIP_CONTINUE 0x0
#define MI_FLIP_ON 0x1
#define MI_FLIP_OFF 0x2
#define MI_UNTRUSTED_REGISTER_SPACE 0x0
#define MI_TRUSTED_REGISTER_SPACE 0x1
/* 3D state:
*/
#define _3DOP_3DSTATE_PIPELINED 0x0
@ -118,7 +56,6 @@
#define _3DSTATE_LINE_STIPPLE 0x08
#define _3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP 0x09
#define _3DCONTROL 0x00
#define _3DPRIMITIVE 0x00
#define PIPE_CONTROL_NOWRITE 0x00
#define PIPE_CONTROL_WRITEIMMEDIATE 0x01

View file

@ -4,6 +4,7 @@
#include "mtypes.h"
#include "dri_bufmgr.h"
#include "intel_reg.h"
struct intel_context;
@ -144,4 +145,11 @@ intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
#define ADVANCE_BATCH() do { } while(0)
static INLINE void
intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch)
{
intel_batchbuffer_require_space(batch, 4, IGNORE_CLIPRECTS);
intel_batchbuffer_emit_dword(batch, MI_FLUSH);
}
#endif

View file

@ -159,14 +159,10 @@ intelCopyBuffer(const __DRIdrawablePrivate * dPriv,
ADVANCE_BATCH();
}
/* Emit a flush so that, on systems where we don't have automatic flushing
* set (such as 965), the results all land on the screen in a timely
* fashion.
/* Flush the rendering and the batch so that the results all land on the
* screen in a timely fashion.
*/
BEGIN_BATCH(1, IGNORE_CLIPRECTS);
OUT_BATCH(MI_FLUSH);
ADVANCE_BATCH();
intel_batchbuffer_emit_mi_flush(intel->batch);
intel_batchbuffer_flush(intel->batch);
}
@ -372,10 +368,7 @@ intelEmitCopyBlit(struct intel_context *intel,
src_offset + src_y * src_pitch);
ADVANCE_BATCH();
}
BEGIN_BATCH(1, NO_LOOP_CLIPRECTS);
OUT_BATCH(MI_FLUSH);
ADVANCE_BATCH();
intel_batchbuffer_flush(intel->batch);
intel_batchbuffer_emit_mi_flush(intel->batch);
}
@ -556,7 +549,7 @@ intelClearWithBlit(GLcontext *ctx, GLbitfield mask)
}
}
}
intel_batchbuffer_flush(intel->batch);
intel_batchbuffer_emit_mi_flush(intel->batch);
}
UNLOCK_HARDWARE(intel);
@ -594,7 +587,7 @@ intelEmitImmediateColorExpandBlit(struct intel_context *intel,
(8 * 4) +
(3 * 4) +
dwords,
NO_LOOP_CLIPRECTS );
REFERENCES_CLIPRECTS );
opcode = XY_SETUP_BLT_CMD;
if (cpp == 4)
@ -616,7 +609,7 @@ intelEmitImmediateColorExpandBlit(struct intel_context *intel,
if (dst_tiled)
blit_cmd |= XY_DST_TILED;
BEGIN_BATCH(8 + 3, NO_LOOP_CLIPRECTS);
BEGIN_BATCH(8 + 3, REFERENCES_CLIPRECTS);
OUT_BATCH(opcode);
OUT_BATCH(br13);
OUT_BATCH((0 << 16) | 0); /* clip x1, y1 */
@ -636,5 +629,7 @@ intelEmitImmediateColorExpandBlit(struct intel_context *intel,
intel_batchbuffer_data( intel->batch,
src_bits,
dwords * 4,
NO_LOOP_CLIPRECTS );
REFERENCES_CLIPRECTS );
intel_batchbuffer_emit_mi_flush(intel->batch);
}

View file

@ -32,6 +32,7 @@
#include "intel_context.h"
#include "intel_buffer_objects.h"
#include "intel_batchbuffer.h"
#include "intel_regions.h"
#include "dri_bufmgr.h"

View file

@ -1008,6 +1008,7 @@ void UNLOCK_HARDWARE( struct intel_context *intel )
* Nothing should be left in batch outside of LOCK/UNLOCK which references
* cliprects.
*/
assert(intel->batch->cliprect_mode != REFERENCES_CLIPRECTS);
if (intel->batch->cliprect_mode == REFERENCES_CLIPRECTS)
intel_batchbuffer_flush(intel->batch);
}

View file

@ -43,7 +43,7 @@
#include "intel_buffer_objects.h"
#include "intel_buffers.h"
#include "intel_pixel.h"
#include "intel_reg.h"
#define FILE_DEBUG_FLAG DEBUG_PIXEL
@ -301,9 +301,8 @@ do_blit_bitmap( GLcontext *ctx,
}
}
}
out:
intel_batchbuffer_flush(intel->batch);
}
out:
UNLOCK_HARDWARE(intel);

View file

@ -229,7 +229,7 @@ do_texture_copypixels(GLcontext * ctx,
out:
intel->vtbl.leave_meta_state(intel);
intel_batchbuffer_flush(intel->batch);
intel_batchbuffer_emit_mi_flush(intel->batch);
}
UNLOCK_HARDWARE(intel);
@ -345,10 +345,8 @@ do_blit_copypixels(GLcontext * ctx,
ctx->Color.ColorLogicOpEnabled ?
ctx->Color.LogicOp : GL_COPY);
}
out:
intel_batchbuffer_flush(intel->batch);
}
out:
UNLOCK_HARDWARE(intel);
DBG("%s: success\n", __FUNCTION__);

View file

@ -181,7 +181,7 @@ do_texture_drawpixels(GLcontext * ctx,
srcx, srcx + width, srcy + height, srcy);
out:
intel->vtbl.leave_meta_state(intel);
intel_batchbuffer_flush(intel->batch);
intel_batchbuffer_emit_mi_flush(intel->batch);
}
UNLOCK_HARDWARE(intel);
return GL_TRUE;
@ -322,7 +322,6 @@ do_blit_drawpixels(GLcontext * ctx,
ctx->Color.ColorLogicOpEnabled ?
ctx->Color.LogicOp : GL_COPY);
}
intel_batchbuffer_flush(intel->batch);
}
UNLOCK_HARDWARE(intel);

View file

@ -376,8 +376,6 @@ intel_region_cow(struct intel_context *intel, struct intel_region *region)
/* Now blit from the texture buffer to the new buffer:
*/
intel_batchbuffer_flush(intel->batch);
was_locked = intel->locked;
if (intel->locked)
LOCK_HARDWARE(intel);
@ -390,8 +388,6 @@ intel_region_cow(struct intel_context *intel, struct intel_region *region)
region->pitch, region->height,
GL_COPY);
intel_batchbuffer_flush(intel->batch);
if (was_locked)
UNLOCK_HARDWARE(intel);
}

View file

@ -151,8 +151,6 @@ do_copy_texsubimage(struct intel_context *intel,
intelImage->mt->region->tiled,
x, y + height, dstx, dsty, width, height,
GL_COPY); /* ? */
intel_batchbuffer_flush(intel->batch);
}
}

View file

@ -235,8 +235,6 @@ try_pbo_upload(struct intel_context *intel,
dst_stride, dst_buffer, dst_offset, GL_FALSE,
0, 0, 0, 0, width, height,
GL_COPY);
intel_batchbuffer_flush(intel->batch);
}
UNLOCK_HARDWARE(intel);

View file

@ -124,13 +124,10 @@ intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit)
struct intel_texture_object *intelObj = intel_texture_object(tObj);
int comp_byte = 0;
int cpp;
GLuint face, i;
GLuint nr_faces = 0;
struct intel_texture_image *firstImage;
GLboolean need_flush = GL_FALSE;
/* We know/require this is true by now:
*/
assert(intelObj->base._Complete);
@ -223,21 +220,10 @@ intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit)
*/
if (intelObj->mt != intelImage->mt) {
copy_image_data_to_tree(intel, intelObj, intelImage);
need_flush = GL_TRUE;
}
}
}
#ifdef I915
/* XXX: what is this flush about?
* On 965, it causes a batch flush in the middle of the state relocation
* emits, which means that the eventual rendering doesn't have all of the
* required relocations in place.
*/
if (need_flush)
intel_batchbuffer_flush(intel->batch);
#endif
return GL_TRUE;
}