mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-05 00:58:05 +02:00
[intel] Add more cliprect modes to cover other meanings for batch emits.
The previous change gave us only two modes, one which looped over the batch per cliprect (3d drawing) and one that didn't (state updeast). However, we really want 4: - Batch doesn't care about cliprects (state updates) - Batch needs DRAWING_RECTANGLE looping per cliprect (3d drawing) - Batch needs to be executed just once (region fills, copies, etc.) - Batch already includes cliprect handling, and must be flushed by unlock time (copybuffers, clears). All callers should now be fixed to use one of these states for any batchbuffer emits. Thanks to Keith Whitwell for pointing out the failure.
This commit is contained in:
parent
7086df5868
commit
a04b632350
20 changed files with 126 additions and 85 deletions
|
|
@ -295,7 +295,7 @@ i830_emit_invarient_state(struct intel_context *intel)
|
|||
{
|
||||
BATCH_LOCALS;
|
||||
|
||||
BEGIN_BATCH(40, 0);
|
||||
BEGIN_BATCH(40, IGNORE_CLIPRECTS);
|
||||
|
||||
OUT_BATCH(_3DSTATE_DFLT_DIFFUSE_CMD);
|
||||
OUT_BATCH(0);
|
||||
|
|
@ -372,15 +372,7 @@ i830_emit_invarient_state(struct intel_context *intel)
|
|||
|
||||
|
||||
#define emit( intel, state, size ) \
|
||||
do { \
|
||||
int k; \
|
||||
BEGIN_BATCH(size / sizeof(GLuint), 0); \
|
||||
for (k = 0 ; k < size / sizeof(GLuint) ; k++) { \
|
||||
if (0) _mesa_printf(" 0x%08x\n", state[k]); \
|
||||
OUT_BATCH(state[k]); \
|
||||
} \
|
||||
ADVANCE_BATCH(); \
|
||||
} while (0)
|
||||
intel_batchbuffer_data(intel->batch, state, size, IGNORE_CLIPRECTS )
|
||||
|
||||
static GLuint
|
||||
get_dirty(struct i830_hw_state *state)
|
||||
|
|
@ -473,13 +465,13 @@ i830_do_emit_state(struct intel_context *intel)
|
|||
|
||||
if (dirty & I830_UPLOAD_CTX) {
|
||||
DBG("I830_UPLOAD_CTX:\n");
|
||||
emit(i830, state->Ctx, sizeof(state->Ctx));
|
||||
emit(intel, state->Ctx, sizeof(state->Ctx));
|
||||
|
||||
}
|
||||
|
||||
if (dirty & I830_UPLOAD_BUFFERS) {
|
||||
DBG("I830_UPLOAD_BUFFERS:\n");
|
||||
BEGIN_BATCH(I830_DEST_SETUP_SIZE + 2, 0);
|
||||
BEGIN_BATCH(I830_DEST_SETUP_SIZE + 2, IGNORE_CLIPRECTS);
|
||||
OUT_BATCH(state->Buffer[I830_DESTREG_CBUFADDR0]);
|
||||
OUT_BATCH(state->Buffer[I830_DESTREG_CBUFADDR1]);
|
||||
OUT_RELOC(state->draw_region->buffer,
|
||||
|
|
@ -505,14 +497,14 @@ i830_do_emit_state(struct intel_context *intel)
|
|||
|
||||
if (dirty & I830_UPLOAD_STIPPLE) {
|
||||
DBG("I830_UPLOAD_STIPPLE:\n");
|
||||
emit(i830, state->Stipple, sizeof(state->Stipple));
|
||||
emit(intel, state->Stipple, sizeof(state->Stipple));
|
||||
}
|
||||
|
||||
for (i = 0; i < I830_TEX_UNITS; i++) {
|
||||
if ((dirty & I830_UPLOAD_TEX(i))) {
|
||||
DBG("I830_UPLOAD_TEX(%d):\n", i);
|
||||
|
||||
BEGIN_BATCH(I830_TEX_SETUP_SIZE + 1, 0);
|
||||
BEGIN_BATCH(I830_TEX_SETUP_SIZE + 1, IGNORE_CLIPRECTS);
|
||||
OUT_BATCH(state->Tex[i][I830_TEXREG_TM0LI]);
|
||||
|
||||
if (state->tex_buffer[i]) {
|
||||
|
|
@ -539,7 +531,7 @@ i830_do_emit_state(struct intel_context *intel)
|
|||
if (dirty & I830_UPLOAD_TEXBLEND(i)) {
|
||||
DBG("I830_UPLOAD_TEXBLEND(%d): %d words\n", i,
|
||||
state->TexBlendWordsUsed[i]);
|
||||
emit(i830, state->TexBlend[i], state->TexBlendWordsUsed[i] * 4);
|
||||
emit(intel, state->TexBlend[i], state->TexBlendWordsUsed[i] * 4);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -170,7 +170,7 @@ i915_emit_invarient_state(struct intel_context *intel)
|
|||
{
|
||||
BATCH_LOCALS;
|
||||
|
||||
BEGIN_BATCH(200, 0);
|
||||
BEGIN_BATCH(200, IGNORE_CLIPRECTS);
|
||||
|
||||
OUT_BATCH(_3DSTATE_AA_CMD |
|
||||
AA_LINE_ECAAR_WIDTH_ENABLE |
|
||||
|
|
@ -229,7 +229,7 @@ i915_emit_invarient_state(struct intel_context *intel)
|
|||
|
||||
|
||||
#define emit(intel, state, size ) \
|
||||
intel_batchbuffer_data(intel->batch, state, size, 0 )
|
||||
intel_batchbuffer_data(intel->batch, state, size, IGNORE_CLIPRECTS )
|
||||
|
||||
static GLuint
|
||||
get_dirty(struct i915_hw_state *state)
|
||||
|
|
@ -354,7 +354,7 @@ i915_do_emit_state(struct intel_context *intel)
|
|||
if (dirty & I915_UPLOAD_BUFFERS) {
|
||||
if (INTEL_DEBUG & DEBUG_STATE)
|
||||
fprintf(stderr, "I915_UPLOAD_BUFFERS:\n");
|
||||
BEGIN_BATCH(I915_DEST_SETUP_SIZE + 2, 0);
|
||||
BEGIN_BATCH(I915_DEST_SETUP_SIZE + 2, IGNORE_CLIPRECTS);
|
||||
OUT_BATCH(state->Buffer[I915_DESTREG_CBUFADDR0]);
|
||||
OUT_BATCH(state->Buffer[I915_DESTREG_CBUFADDR1]);
|
||||
OUT_RELOC(state->draw_region->buffer,
|
||||
|
|
@ -400,7 +400,7 @@ i915_do_emit_state(struct intel_context *intel)
|
|||
if (dirty & I915_UPLOAD_TEX(i))
|
||||
nr++;
|
||||
|
||||
BEGIN_BATCH(2 + nr * 3, 0);
|
||||
BEGIN_BATCH(2 + nr * 3, IGNORE_CLIPRECTS);
|
||||
OUT_BATCH(_3DSTATE_MAP_STATE | (3 * nr));
|
||||
OUT_BATCH((dirty & I915_UPLOAD_TEX_ALL) >> I915_UPLOAD_TEX_0_SHIFT);
|
||||
for (i = 0; i < I915_TEX_UNITS; i++)
|
||||
|
|
@ -424,7 +424,7 @@ i915_do_emit_state(struct intel_context *intel)
|
|||
}
|
||||
ADVANCE_BATCH();
|
||||
|
||||
BEGIN_BATCH(2 + nr * 3, 0);
|
||||
BEGIN_BATCH(2 + nr * 3, IGNORE_CLIPRECTS);
|
||||
OUT_BATCH(_3DSTATE_SAMPLER_STATE | (3 * nr));
|
||||
OUT_BATCH((dirty & I915_UPLOAD_TEX_ALL) >> I915_UPLOAD_TEX_0_SHIFT);
|
||||
for (i = 0; i < I915_TEX_UNITS; i++)
|
||||
|
|
|
|||
|
|
@ -832,5 +832,11 @@ void UNLOCK_HARDWARE( struct intel_context *intel )
|
|||
|
||||
if (INTEL_DEBUG & DEBUG_LOCK)
|
||||
_mesa_printf("%s - unlocked\n", __progname);
|
||||
}
|
||||
|
||||
/**
|
||||
* Nothing should be left in batch outside of LOCK/UNLOCK which references
|
||||
* cliprects.
|
||||
*/
|
||||
assert(intel->batch->cliprect_mode != REFERENCES_CLIPRECTS);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -113,7 +113,7 @@ intelDmaPrimitive(struct intel_context *intel, GLenum prim)
|
|||
fprintf(stderr, "%s %s\n", __FUNCTION__, _mesa_lookup_enum_by_nr(prim));
|
||||
INTEL_FIREVERTICES(intel);
|
||||
intel->vtbl.reduced_primitive_state(intel, reduced_prim[prim]);
|
||||
intelStartInlinePrimitive(intel, hw_prim[prim], INTEL_BATCH_CLIPRECTS);
|
||||
intelStartInlinePrimitive(intel, hw_prim[prim], LOOP_CLIPRECTS);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -103,7 +103,7 @@ intelStartInlinePrimitive(struct intel_context *intel,
|
|||
|
||||
/* _mesa_printf("%s *", __progname); */
|
||||
|
||||
intel_wait_flips(intel, batch_flags);
|
||||
intel_wait_flips(intel);
|
||||
|
||||
/* Emit a slot which will be filled with the inline primitive
|
||||
* command later.
|
||||
|
|
@ -129,11 +129,11 @@ void
|
|||
intelWrapInlinePrimitive(struct intel_context *intel)
|
||||
{
|
||||
GLuint prim = intel->prim.primitive;
|
||||
GLuint cliprects_enable = intel->batch->cliprects_enable;
|
||||
enum cliprect_mode cliprect_mode = intel->batch->cliprect_mode;
|
||||
|
||||
intel_flush_inline_primitive(intel);
|
||||
intel_batchbuffer_flush(intel->batch);
|
||||
intelStartInlinePrimitive(intel, prim, cliprects_enable); /* ??? */
|
||||
intelStartInlinePrimitive(intel, prim, cliprect_mode); /* ??? */
|
||||
}
|
||||
|
||||
GLuint *
|
||||
|
|
@ -942,7 +942,7 @@ intelRasterPrimitive(GLcontext * ctx, GLenum rprim, GLuint hwprim)
|
|||
if (hwprim != intel->prim.primitive) {
|
||||
INTEL_FIREVERTICES(intel);
|
||||
|
||||
intelStartInlinePrimitive(intel, hwprim, INTEL_BATCH_CLIPRECTS);
|
||||
intelStartInlinePrimitive(intel, hwprim, LOOP_CLIPRECTS);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1079,10 +1079,10 @@ intel_meta_draw_poly(struct intel_context *intel,
|
|||
if (!was_locked)
|
||||
LOCK_HARDWARE(intel);
|
||||
|
||||
/* All 3d primitives should be emitted with INTEL_BATCH_CLIPRECTS,
|
||||
/* All 3d primitives should be emitted with LOOP_CLIPRECTS,
|
||||
* otherwise the drawing origin (DR4) might not be set correctly.
|
||||
*/
|
||||
intelStartInlinePrimitive(intel, PRIM3D_TRIFAN, INTEL_BATCH_CLIPRECTS);
|
||||
intelStartInlinePrimitive(intel, PRIM3D_TRIFAN, LOOP_CLIPRECTS);
|
||||
vb = (union fi *) intelExtendInlinePrimitive(intel, n * 6);
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
|
|
|
|||
|
|
@ -656,6 +656,7 @@ void brw_debug_batch(struct intel_context *intel);
|
|||
void brwUpdateTextureState( struct intel_context *intel );
|
||||
void brw_FrameBufferTexInit( struct brw_context *brw );
|
||||
void brw_FrameBufferTexDestroy( struct brw_context *brw );
|
||||
void brw_validate_textures( struct brw_context *brw );
|
||||
|
||||
/*======================================================================
|
||||
* brw_metaops.c
|
||||
|
|
|
|||
|
|
@ -201,7 +201,7 @@ static void upload_constant_buffer(struct brw_context *brw)
|
|||
brw->curbe.tracked_state.dirty.mesa |= fp->param_state;
|
||||
|
||||
if (sz == 0) {
|
||||
BEGIN_BATCH(2, INTEL_BATCH_NO_CLIPRECTS);
|
||||
BEGIN_BATCH(2, IGNORE_CLIPRECTS);
|
||||
OUT_BATCH((CMD_CONST_BUFFER << 16) | (2 - 2));
|
||||
OUT_BATCH(0);
|
||||
ADVANCE_BATCH();
|
||||
|
|
@ -322,7 +322,7 @@ static void upload_constant_buffer(struct brw_context *brw)
|
|||
* flushes as necessary when doublebuffering of CURBEs isn't
|
||||
* possible.
|
||||
*/
|
||||
BEGIN_BATCH(2, INTEL_BATCH_NO_CLIPRECTS);
|
||||
BEGIN_BATCH(2, IGNORE_CLIPRECTS);
|
||||
OUT_BATCH((CMD_CONST_BUFFER << 16) | (1 << 8) | (2 - 2));
|
||||
OUT_RELOC(brw->curbe.curbe_bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
|
||||
(sz - 1));
|
||||
|
|
|
|||
|
|
@ -147,7 +147,7 @@ static void brw_emit_prim( struct brw_context *brw,
|
|||
|
||||
if (prim_packet.verts_per_instance) {
|
||||
intel_batchbuffer_data( brw->intel.batch, &prim_packet,
|
||||
sizeof(prim_packet), INTEL_BATCH_CLIPRECTS);
|
||||
sizeof(prim_packet), LOOP_CLIPRECTS);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -256,6 +256,8 @@ static GLboolean brw_try_draw_prims( GLcontext *ctx,
|
|||
if (ctx->NewState)
|
||||
_mesa_update_state( ctx );
|
||||
|
||||
brw_validate_textures( brw );
|
||||
|
||||
/* Bind all inputs, derive varying and size information:
|
||||
*/
|
||||
brw_merge_inputs( brw, arrays );
|
||||
|
|
|
|||
|
|
@ -538,7 +538,7 @@ GLboolean brw_upload_vertices( struct brw_context *brw,
|
|||
vbp.header.bits.length = (1 + nr_enabled * 4) - 2;
|
||||
vbp.header.bits.opcode = CMD_VERTEX_BUFFER;
|
||||
|
||||
BEGIN_BATCH(vbp.header.bits.length+2, 0);
|
||||
BEGIN_BATCH(vbp.header.bits.length+2, IGNORE_CLIPRECTS);
|
||||
OUT_BATCH( vbp.header.dword );
|
||||
|
||||
for (i = 0; i < nr_enabled; i++) {
|
||||
|
|
@ -625,7 +625,7 @@ void brw_upload_indices( struct brw_context *brw,
|
|||
ib.header.bits.cut_index_enable = 0;
|
||||
|
||||
|
||||
BEGIN_BATCH(4, 0);
|
||||
BEGIN_BATCH(4, IGNORE_CLIPRECTS);
|
||||
OUT_BATCH( ib.header.dword );
|
||||
OUT_RELOC( buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, offset);
|
||||
OUT_RELOC( buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ static void upload_binding_table_pointers(struct brw_context *brw)
|
|||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(6, INTEL_BATCH_NO_CLIPRECTS);
|
||||
BEGIN_BATCH(6, IGNORE_CLIPRECTS);
|
||||
OUT_BATCH(CMD_BINDING_TABLE_PTRS << 16 | (6 - 2));
|
||||
OUT_BATCH(0); /* vs */
|
||||
OUT_BATCH(0); /* gs */
|
||||
|
|
@ -112,7 +112,7 @@ static void upload_pipelined_state_pointers(struct brw_context *brw )
|
|||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
|
||||
BEGIN_BATCH(7, INTEL_BATCH_NO_CLIPRECTS);
|
||||
BEGIN_BATCH(7, IGNORE_CLIPRECTS);
|
||||
OUT_BATCH(CMD_PIPELINED_STATE_POINTERS << 16 | (7 - 2));
|
||||
OUT_RELOC(brw->vs.state_bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, 0);
|
||||
if (brw->gs.prog_active)
|
||||
|
|
@ -184,7 +184,7 @@ static void upload_depthbuffer(struct brw_context *brw)
|
|||
struct intel_region *region = brw->state.depth_region;
|
||||
|
||||
if (region == NULL) {
|
||||
BEGIN_BATCH(5, INTEL_BATCH_NO_CLIPRECTS);
|
||||
BEGIN_BATCH(5, IGNORE_CLIPRECTS);
|
||||
OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (5 - 2));
|
||||
OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
|
||||
(BRW_SURFACE_NULL << 29));
|
||||
|
|
@ -210,7 +210,7 @@ static void upload_depthbuffer(struct brw_context *brw)
|
|||
return;
|
||||
}
|
||||
|
||||
BEGIN_BATCH(5, INTEL_BATCH_NO_CLIPRECTS);
|
||||
BEGIN_BATCH(5, IGNORE_CLIPRECTS);
|
||||
OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (5 - 2));
|
||||
OUT_BATCH(((region->pitch * region->cpp) - 1) |
|
||||
(format << 18) |
|
||||
|
|
@ -445,7 +445,7 @@ static void upload_state_base_address( struct brw_context *brw )
|
|||
/* Output the structure (brw_state_base_address) directly to the
|
||||
* batchbuffer, so we can emit relocations inline.
|
||||
*/
|
||||
BEGIN_BATCH(6, INTEL_BATCH_NO_CLIPRECTS);
|
||||
BEGIN_BATCH(6, IGNORE_CLIPRECTS);
|
||||
OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
|
||||
OUT_BATCH(1); /* General state base address */
|
||||
OUT_BATCH(1); /* Surface state base address */
|
||||
|
|
|
|||
|
|
@ -122,7 +122,7 @@ void brw_destroy_cache( struct brw_context *brw );
|
|||
/***********************************************************************
|
||||
* brw_state_batch.c
|
||||
*/
|
||||
#define BRW_BATCH_STRUCT(brw, s) intel_batchbuffer_data( brw->intel.batch, (s), sizeof(*(s)), 0)
|
||||
#define BRW_BATCH_STRUCT(brw, s) intel_batchbuffer_data( brw->intel.batch, (s), sizeof(*(s)), IGNORE_CLIPRECTS)
|
||||
#define BRW_CACHED_BATCH_STRUCT(brw, s) brw_cached_batch_struct( brw, (s), sizeof(*(s)) )
|
||||
|
||||
GLboolean brw_cached_batch_struct( struct brw_context *brw,
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ GLboolean brw_cached_batch_struct( struct brw_context *brw,
|
|||
struct header *newheader = (struct header *)data;
|
||||
|
||||
if (brw->emit_state_always) {
|
||||
intel_batchbuffer_data(brw->intel.batch, data, sz, 0);
|
||||
intel_batchbuffer_data(brw->intel.batch, data, sz, IGNORE_CLIPRECTS);
|
||||
return GL_TRUE;
|
||||
}
|
||||
|
||||
|
|
@ -75,7 +75,7 @@ GLboolean brw_cached_batch_struct( struct brw_context *brw,
|
|||
|
||||
emit:
|
||||
memcpy(item->header, newheader, sz);
|
||||
intel_batchbuffer_data(brw->intel.batch, data, sz, 0);
|
||||
intel_batchbuffer_data(brw->intel.batch, data, sz, IGNORE_CLIPRECTS);
|
||||
return GL_TRUE;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -44,6 +44,7 @@
|
|||
#include "intel_context.h"
|
||||
#include "intel_ioctl.h"
|
||||
#include "intel_regions.h"
|
||||
#include "intel_tex.h"
|
||||
#include "brw_context.h"
|
||||
#include "brw_defines.h"
|
||||
|
||||
|
|
@ -76,3 +77,21 @@ void brw_FrameBufferTexDestroy( struct brw_context *brw )
|
|||
brw->intel.ctx.Driver.DeleteTexture( &brw->intel.ctx,
|
||||
brw->intel.frame_buffer_texobj );
|
||||
}
|
||||
|
||||
/**
|
||||
* Finalizes all textures, completing any rendering that needs to be done
|
||||
* to prepare them.
|
||||
*/
|
||||
void brw_validate_textures( struct brw_context *brw )
|
||||
{
|
||||
struct intel_context *intel = &brw->intel;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BRW_MAX_TEX_UNIT; i++) {
|
||||
struct gl_texture_unit *texUnit = &brw->attribs.Texture->Unit[i];
|
||||
|
||||
if (texUnit->_ReallyEnabled) {
|
||||
intel_finalize_mipmap_tree(intel, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -394,9 +394,7 @@ static void upload_wm_surfaces(struct brw_context *brw )
|
|||
|
||||
/* _NEW_TEXTURE, BRW_NEW_TEXDATA
|
||||
*/
|
||||
if (texUnit->_ReallyEnabled &&
|
||||
intel_finalize_mipmap_tree(intel, i))
|
||||
{
|
||||
if (texUnit->_ReallyEnabled) {
|
||||
brw_update_texture_surface(ctx, i);
|
||||
brw->wm.nr_surfaces = i+2;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -812,6 +812,12 @@ void UNLOCK_HARDWARE( struct intel_context *intel )
|
|||
|
||||
DRM_UNLOCK(intel->driFd, intel->driHwLock, intel->hHWContext);
|
||||
_glthread_UNLOCK_MUTEX(lockMutex);
|
||||
|
||||
/**
|
||||
* Nothing should be left in batch outside of LOCK/UNLOCK which references
|
||||
* cliprects.
|
||||
*/
|
||||
assert(intel->batch->cliprect_mode != REFERENCES_CLIPRECTS);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -87,7 +87,7 @@ intel_batchbuffer_reset(struct intel_batchbuffer *batch)
|
|||
batch->ptr = batch->map;
|
||||
batch->dirty_state = ~0;
|
||||
batch->id = batch->intel->batch_id++;
|
||||
batch->cliprects_enable = INTEL_BATCH_NO_CLIPRECTS;
|
||||
batch->cliprect_mode = IGNORE_CLIPRECTS;
|
||||
}
|
||||
|
||||
struct intel_batchbuffer *
|
||||
|
|
@ -143,18 +143,18 @@ do_flush_locked(struct intel_batchbuffer *batch,
|
|||
*/
|
||||
|
||||
if (!(intel->numClipRects == 0 &&
|
||||
batch->cliprects_enable == INTEL_BATCH_CLIPRECTS)) {
|
||||
batch->cliprect_mode == LOOP_CLIPRECTS)) {
|
||||
if (intel->ttm == GL_TRUE) {
|
||||
intel_exec_ioctl(batch->intel,
|
||||
used,
|
||||
batch->cliprects_enable == INTEL_BATCH_NO_CLIPRECTS,
|
||||
batch->cliprect_mode != LOOP_CLIPRECTS,
|
||||
allow_unlock,
|
||||
start, count, &batch->last_fence);
|
||||
} else {
|
||||
intel_batch_ioctl(batch->intel,
|
||||
batch->buf->offset,
|
||||
used,
|
||||
batch->cliprects_enable == INTEL_BATCH_NO_CLIPRECTS,
|
||||
batch->cliprect_mode != LOOP_CLIPRECTS,
|
||||
allow_unlock);
|
||||
}
|
||||
}
|
||||
|
|
@ -162,7 +162,7 @@ do_flush_locked(struct intel_batchbuffer *batch,
|
|||
dri_post_submit(batch->buf, &batch->last_fence);
|
||||
|
||||
if (intel->numClipRects == 0 &&
|
||||
batch->cliprects_enable == INTEL_BATCH_CLIPRECTS) {
|
||||
batch->cliprect_mode == LOOP_CLIPRECTS) {
|
||||
if (allow_unlock) {
|
||||
/* If we are not doing any actual user-visible rendering,
|
||||
* do a sched_yield to keep the app from pegging the cpu while
|
||||
|
|
@ -264,10 +264,10 @@ intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
|
|||
void
|
||||
intel_batchbuffer_data(struct intel_batchbuffer *batch,
|
||||
const void *data, GLuint bytes,
|
||||
enum cliprects_enable cliprects_enable)
|
||||
enum cliprect_mode cliprect_mode)
|
||||
{
|
||||
assert((bytes & 3) == 0);
|
||||
intel_batchbuffer_require_space(batch, bytes, cliprects_enable);
|
||||
intel_batchbuffer_require_space(batch, bytes, cliprect_mode);
|
||||
__memcpy(batch->ptr, data, bytes);
|
||||
batch->ptr += bytes;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,9 +10,29 @@ struct intel_context;
|
|||
#define BATCH_SZ 16384
|
||||
#define BATCH_RESERVED 16
|
||||
|
||||
enum cliprects_enable {
|
||||
INTEL_BATCH_CLIPRECTS = 0,
|
||||
INTEL_BATCH_NO_CLIPRECTS = 1
|
||||
enum cliprect_mode {
|
||||
/**
|
||||
* Batchbuffer contents may be looped over per cliprect, but do not
|
||||
* require it.
|
||||
*/
|
||||
IGNORE_CLIPRECTS,
|
||||
/**
|
||||
* Batchbuffer contents require looping over per cliprect at batch submit
|
||||
* time.
|
||||
*/
|
||||
LOOP_CLIPRECTS,
|
||||
/**
|
||||
* Batchbuffer contents contain drawing that should not be executed multiple
|
||||
* times.
|
||||
*/
|
||||
NO_LOOP_CLIPRECTS,
|
||||
/**
|
||||
* Batchbuffer contents contain drawing that already handles cliprects, such
|
||||
* as 2D drawing to front/back/depth that doesn't respect DRAWING_RECTANGLE.
|
||||
* Equivalent behavior to NO_LOOP_CLIPRECTS, but may not persist in batch
|
||||
* outside of LOCK/UNLOCK.
|
||||
*/
|
||||
REFERENCES_CLIPRECTS
|
||||
};
|
||||
|
||||
struct intel_batchbuffer
|
||||
|
|
@ -25,7 +45,7 @@ struct intel_batchbuffer
|
|||
GLubyte *map;
|
||||
GLubyte *ptr;
|
||||
|
||||
enum cliprects_enable cliprects_enable;
|
||||
enum cliprect_mode cliprect_mode;
|
||||
|
||||
GLuint size;
|
||||
|
||||
|
|
@ -52,7 +72,7 @@ void intel_batchbuffer_reset(struct intel_batchbuffer *batch);
|
|||
*/
|
||||
void intel_batchbuffer_data(struct intel_batchbuffer *batch,
|
||||
const void *data, GLuint bytes,
|
||||
enum cliprects_enable cliprects_enable);
|
||||
enum cliprect_mode cliprect_mode);
|
||||
|
||||
void intel_batchbuffer_release_space(struct intel_batchbuffer *batch,
|
||||
GLuint bytes);
|
||||
|
|
@ -85,36 +105,35 @@ intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, GLuint dword)
|
|||
static INLINE void
|
||||
intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
|
||||
GLuint sz,
|
||||
enum cliprects_enable cliprects_enable)
|
||||
enum cliprect_mode cliprect_mode)
|
||||
{
|
||||
assert(sz < batch->size - 8);
|
||||
if (intel_batchbuffer_space(batch) < sz)
|
||||
intel_batchbuffer_flush(batch);
|
||||
|
||||
/* Upgrade the buffer to being looped over per cliprect if this batch
|
||||
* emit needs it. The code used to emit a batch whenever the
|
||||
* cliprects_enable was changed, but reducing the overhead of frequent
|
||||
* batch flushing is more important than reducing state parsing,
|
||||
* particularly as we move towards private backbuffers and number
|
||||
* cliprects always being 1 except at swap.
|
||||
*/
|
||||
if (cliprects_enable == INTEL_BATCH_CLIPRECTS)
|
||||
batch->cliprects_enable = INTEL_BATCH_CLIPRECTS;
|
||||
if (cliprect_mode != IGNORE_CLIPRECTS) {
|
||||
if (batch->cliprect_mode == IGNORE_CLIPRECTS) {
|
||||
batch->cliprect_mode = cliprect_mode;
|
||||
} else {
|
||||
if (batch->cliprect_mode != cliprect_mode)
|
||||
intel_batchbuffer_flush(batch);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Here are the crusty old macros, to be removed:
|
||||
*/
|
||||
#define BATCH_LOCALS
|
||||
|
||||
#define BEGIN_BATCH(n, cliprects_enable) do { \
|
||||
intel_batchbuffer_require_space(intel->batch, (n)*4, cliprects_enable); \
|
||||
#define BEGIN_BATCH(n, cliprect_mode) do { \
|
||||
intel_batchbuffer_require_space(intel->batch, (n)*4, cliprect_mode); \
|
||||
} while (0)
|
||||
|
||||
#define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel->batch, d)
|
||||
|
||||
#define OUT_RELOC(buf, cliprects_enable, delta) do { \
|
||||
#define OUT_RELOC(buf, cliprect_mode, delta) do { \
|
||||
assert((delta) >= 0); \
|
||||
intel_batchbuffer_emit_reloc(intel->batch, buf, cliprects_enable, delta); \
|
||||
intel_batchbuffer_emit_reloc(intel->batch, buf, cliprect_mode, delta); \
|
||||
} while (0)
|
||||
|
||||
#define ADVANCE_BATCH() do { } while(0)
|
||||
|
|
|
|||
|
|
@ -142,7 +142,7 @@ intelCopyBuffer(const __DRIdrawablePrivate * dPriv,
|
|||
src_x = box.x1 - dPriv->x + dPriv->backX;
|
||||
src_y = box.y1 - dPriv->y + dPriv->backY;
|
||||
|
||||
BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
|
||||
BEGIN_BATCH(8, REFERENCES_CLIPRECTS);
|
||||
OUT_BATCH(CMD);
|
||||
OUT_BATCH(BR13 | dst_pitch);
|
||||
OUT_BATCH((box.y1 << 16) | box.x1);
|
||||
|
|
@ -212,7 +212,7 @@ intelEmitFillBlit(struct intel_context *intel,
|
|||
assert(w > 0);
|
||||
assert(h > 0);
|
||||
|
||||
BEGIN_BATCH(6, INTEL_BATCH_NO_CLIPRECTS);
|
||||
BEGIN_BATCH(6, NO_LOOP_CLIPRECTS);
|
||||
OUT_BATCH(CMD);
|
||||
OUT_BATCH(BR13 | dst_pitch);
|
||||
OUT_BATCH((y << 16) | x);
|
||||
|
|
@ -324,7 +324,7 @@ intelEmitCopyBlit(struct intel_context *intel,
|
|||
assert(dst_x < dst_x2);
|
||||
assert(dst_y < dst_y2);
|
||||
|
||||
BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
|
||||
BEGIN_BATCH(8, NO_LOOP_CLIPRECTS);
|
||||
OUT_BATCH(CMD);
|
||||
OUT_BATCH(BR13 | dst_pitch);
|
||||
OUT_BATCH((dst_y << 16) | dst_x);
|
||||
|
|
@ -341,7 +341,7 @@ intelEmitCopyBlit(struct intel_context *intel,
|
|||
assert(dst_x < dst_x2);
|
||||
assert(h > 0);
|
||||
|
||||
BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
|
||||
BEGIN_BATCH(8, NO_LOOP_CLIPRECTS);
|
||||
OUT_BATCH(CMD);
|
||||
OUT_BATCH(BR13 | dst_pitch);
|
||||
OUT_BATCH((0 << 16) | dst_x);
|
||||
|
|
@ -515,12 +515,12 @@ intelClearWithBlit(GLcontext *ctx, GLbitfield mask)
|
|||
_mesa_debug(ctx, "hardware blit clear buf %d rb id %d\n",
|
||||
buf, irb->Base.Name);
|
||||
*/
|
||||
intel_wait_flips(intel, INTEL_BATCH_NO_CLIPRECTS);
|
||||
intel_wait_flips(intel);
|
||||
|
||||
assert(b.x1 < b.x2);
|
||||
assert(b.y1 < b.y2);
|
||||
|
||||
BEGIN_BATCH(6, INTEL_BATCH_NO_CLIPRECTS);
|
||||
BEGIN_BATCH(6, REFERENCES_CLIPRECTS);
|
||||
OUT_BATCH(CMD);
|
||||
OUT_BATCH(BR13);
|
||||
OUT_BATCH((b.y1 << 16) | b.x1);
|
||||
|
|
@ -574,7 +574,7 @@ intelEmitImmediateColorExpandBlit(struct intel_context *intel,
|
|||
(8 * 4) +
|
||||
(3 * 4) +
|
||||
dwords,
|
||||
INTEL_BATCH_NO_CLIPRECTS );
|
||||
NO_LOOP_CLIPRECTS );
|
||||
|
||||
opcode = XY_SETUP_BLT_CMD;
|
||||
if (cpp == 4)
|
||||
|
|
@ -592,7 +592,7 @@ intelEmitImmediateColorExpandBlit(struct intel_context *intel,
|
|||
if (dst_tiled)
|
||||
blit_cmd |= XY_DST_TILED;
|
||||
|
||||
BEGIN_BATCH(8 + 3, INTEL_BATCH_NO_CLIPRECTS);
|
||||
BEGIN_BATCH(8 + 3, NO_LOOP_CLIPRECTS);
|
||||
OUT_BATCH(opcode);
|
||||
OUT_BATCH(br13);
|
||||
OUT_BATCH((0 << 16) | 0); /* clip x1, y1 */
|
||||
|
|
@ -610,5 +610,5 @@ intelEmitImmediateColorExpandBlit(struct intel_context *intel,
|
|||
intel_batchbuffer_data( intel->batch,
|
||||
src_bits,
|
||||
dwords * 4,
|
||||
INTEL_BATCH_NO_CLIPRECTS );
|
||||
NO_LOOP_CLIPRECTS );
|
||||
}
|
||||
|
|
|
|||
|
|
@ -469,9 +469,6 @@ intelClearWithTris(struct intel_context *intel, GLbitfield mask)
|
|||
intel->vtbl.meta_color_mask(intel, GL_TRUE);
|
||||
intel->vtbl.meta_draw_region(intel, irbColor->region, NULL);
|
||||
|
||||
/* XXX: Using INTEL_BATCH_NO_CLIPRECTS here is dangerous as the
|
||||
* drawing origin may not be correctly emitted.
|
||||
*/
|
||||
intel->vtbl.meta_draw_quad(intel,
|
||||
fb->_Xmin,
|
||||
fb->_Xmax,
|
||||
|
|
@ -625,7 +622,7 @@ intelClear(GLcontext *ctx, GLbitfield mask)
|
|||
|
||||
/* Emit wait for pending flips */
|
||||
void
|
||||
intel_wait_flips(struct intel_context *intel, GLuint batch_flags)
|
||||
intel_wait_flips(struct intel_context *intel)
|
||||
{
|
||||
struct intel_framebuffer *intel_fb =
|
||||
(struct intel_framebuffer *) intel->ctx.DrawBuffer;
|
||||
|
|
@ -641,7 +638,7 @@ intel_wait_flips(struct intel_context *intel, GLuint batch_flags)
|
|||
BATCH_LOCALS;
|
||||
|
||||
/* Wait for pending flips to take effect */
|
||||
BEGIN_BATCH(2, batch_flags);
|
||||
BEGIN_BATCH(2, NO_LOOP_CLIPRECTS);
|
||||
OUT_BATCH(pf_planes & 0x1 ? (MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP)
|
||||
: 0);
|
||||
OUT_BATCH(pf_planes & 0x2 ? (MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_B_FLIP)
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
|
||||
|
|
@ -42,7 +43,7 @@ extern struct intel_region *intel_readbuf_region(struct intel_context *intel);
|
|||
|
||||
extern struct intel_region *intel_drawbuf_region(struct intel_context *intel);
|
||||
|
||||
extern void intel_wait_flips(struct intel_context *intel, GLuint batch_flags);
|
||||
extern void intel_wait_flips(struct intel_context *intel);
|
||||
|
||||
extern void intelSwapBuffers(__DRIdrawablePrivate * dPriv);
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue