mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-09 06:48:06 +02:00
Make sure bmBufferOffset is called for all active buffers every time
we render. Currenly requires that some state be re-examined after every LOCK_HARDWARE().
This commit is contained in:
parent
c5cb8e2c6f
commit
133f141680
9 changed files with 20 additions and 26 deletions
|
|
@ -135,6 +135,7 @@ struct brw_context;
|
|||
#define BRW_NEW_PSP 0x800
|
||||
#define BRW_NEW_METAOPS 0x1000
|
||||
#define BRW_NEW_FENCE 0x2000
|
||||
#define BRW_NEW_LOCK 0x4000
|
||||
|
||||
|
||||
|
||||
|
|
@ -429,8 +430,8 @@ struct brw_context
|
|||
GLuint primitive;
|
||||
|
||||
GLboolean emit_state_always;
|
||||
|
||||
GLboolean wrap;
|
||||
GLboolean tmp_fallback;
|
||||
|
||||
struct {
|
||||
struct brw_state_flags dirty;
|
||||
|
|
@ -443,9 +444,6 @@ struct brw_context
|
|||
} state;
|
||||
|
||||
struct brw_state_pointers attribs;
|
||||
|
||||
GLboolean tmp_fallback;
|
||||
|
||||
struct brw_mem_pool pool[BRW_MAX_POOL];
|
||||
struct brw_cache cache[BRW_MAX_CACHE];
|
||||
struct brw_cached_batch_item *cached_batch_items;
|
||||
|
|
|
|||
|
|
@ -276,6 +276,7 @@ static void upload_depthbuffer(struct brw_context *brw)
|
|||
bd.dword1.bits.tiled_surface = intel->depth_region->tiled;
|
||||
bd.dword1.bits.surface_type = BRW_SURFACE_2D;
|
||||
|
||||
/* BRW_NEW_LOCK */
|
||||
bd.dword2_base_addr = bmBufferOffset(intel, region->buffer);
|
||||
|
||||
bd.dword3.bits.mipmap_layout = BRW_SURFACE_MIPMAPLAYOUT_BELOW;
|
||||
|
|
@ -292,7 +293,7 @@ static void upload_depthbuffer(struct brw_context *brw)
|
|||
const struct brw_tracked_state brw_depthbuffer = {
|
||||
.dirty = {
|
||||
.mesa = 0,
|
||||
.brw = BRW_NEW_CONTEXT | BRW_NEW_FENCE,
|
||||
.brw = BRW_NEW_CONTEXT | BRW_NEW_LOCK,
|
||||
.cache = 0
|
||||
},
|
||||
.update = upload_depthbuffer
|
||||
|
|
@ -505,9 +506,11 @@ static void upload_state_base_address( struct brw_context *brw )
|
|||
sba.header.opcode = CMD_STATE_BASE_ADDRESS;
|
||||
sba.header.length = 0x4;
|
||||
|
||||
/* BRW_NEW_LOCK */
|
||||
sba.bits0.general_state_address = bmBufferOffset(intel, brw->pool[BRW_GS_POOL].buffer) >> 5;
|
||||
sba.bits0.modify_enable = 1;
|
||||
|
||||
/* BRW_NEW_LOCK */
|
||||
sba.bits1.surface_state_address = bmBufferOffset(intel, brw->pool[BRW_SS_POOL].buffer) >> 5;
|
||||
sba.bits1.modify_enable = 1;
|
||||
|
||||
|
|
@ -522,7 +525,7 @@ static void upload_state_base_address( struct brw_context *brw )
|
|||
const struct brw_tracked_state brw_state_base_address = {
|
||||
.dirty = {
|
||||
.mesa = 0,
|
||||
.brw = BRW_NEW_CONTEXT | BRW_NEW_FENCE,
|
||||
.brw = BRW_NEW_CONTEXT | BRW_NEW_LOCK,
|
||||
.cache = 0
|
||||
},
|
||||
.update = upload_state_base_address
|
||||
|
|
|
|||
|
|
@ -123,6 +123,8 @@ static void brw_note_unlock( struct intel_context *intel )
|
|||
|
||||
brw_pool_check_wrap(brw, &brw->pool[BRW_GS_POOL]);
|
||||
brw_pool_check_wrap(brw, &brw->pool[BRW_SS_POOL]);
|
||||
|
||||
brw_context(&intel->ctx)->state.dirty.brw |= BRW_NEW_LOCK;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -776,7 +776,7 @@ void brw_wm_pass_fp( struct brw_wm_compile *c )
|
|||
|
||||
if (INTEL_DEBUG & DEBUG_WM) {
|
||||
_mesa_printf("\n\n\npre-fp:\n");
|
||||
/* _mesa_print_program(&fp->program); */
|
||||
_mesa_print_program(&fp->program.Base);
|
||||
_mesa_printf("\n");
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -98,6 +98,10 @@ static void upload_wm_unit(struct brw_context *brw )
|
|||
|
||||
assert(per_thread <= 12 * 1024);
|
||||
wm.thread2.per_thread_scratch_space = (per_thread / 1024) - 1;
|
||||
|
||||
/* XXX: could make this dynamic as this is so rarely active:
|
||||
*/
|
||||
/* BRW_NEW_LOCK */
|
||||
wm.thread2.scratch_space_base_pointer =
|
||||
bmBufferOffset(intel, brw->wm.scratch_buffer) >> 10;
|
||||
}
|
||||
|
|
@ -179,7 +183,7 @@ const struct brw_tracked_state brw_wm_unit = {
|
|||
|
||||
.brw = (BRW_NEW_FRAGMENT_PROGRAM |
|
||||
BRW_NEW_CURBE_OFFSETS |
|
||||
BRW_NEW_FENCE),
|
||||
BRW_NEW_LOCK),
|
||||
|
||||
.cache = (CACHE_NEW_SURFACE |
|
||||
CACHE_NEW_WM_PROG |
|
||||
|
|
|
|||
|
|
@ -137,6 +137,7 @@ void brw_update_texture_surface( GLcontext *ctx,
|
|||
*/
|
||||
/* surf->ss0.data_return_format = BRW_SURFACERETURNFORMAT_S1; */
|
||||
|
||||
/* BRW_NEW_LOCK */
|
||||
surf->ss1.base_addr = bmBufferOffset(intel,
|
||||
intelObj->mt->region->buffer);
|
||||
|
||||
|
|
@ -198,6 +199,7 @@ static void upload_wm_surfaces(struct brw_context *brw )
|
|||
surf.ss0.writedisable_blue = !brw->attribs.Color->ColorMask[2];
|
||||
surf.ss0.writedisable_alpha = !brw->attribs.Color->ColorMask[3];
|
||||
|
||||
/* BRW_NEW_LOCK */
|
||||
surf.ss1.base_addr = bmBufferOffset(&brw->intel, region->buffer);
|
||||
|
||||
|
||||
|
|
@ -240,7 +242,7 @@ const struct brw_tracked_state brw_wm_surfaces = {
|
|||
.dirty = {
|
||||
.mesa = _NEW_COLOR | _NEW_TEXTURE | _NEW_BUFFERS,
|
||||
.brw = (BRW_NEW_CONTEXT |
|
||||
BRW_NEW_FENCE), /* required for bmBufferOffset */
|
||||
BRW_NEW_LOCK), /* required for bmBufferOffset */
|
||||
.cache = 0
|
||||
},
|
||||
.update = upload_wm_surfaces
|
||||
|
|
|
|||
|
|
@ -201,6 +201,7 @@ static const struct dri_debug_control debug_control[] =
|
|||
{ "sing", DEBUG_SINGLE_THREAD },
|
||||
{ "thre", DEBUG_SINGLE_THREAD },
|
||||
{ "wm", DEBUG_WM },
|
||||
{ "vs", DEBUG_VS },
|
||||
{ NULL, 0 }
|
||||
};
|
||||
|
||||
|
|
@ -558,7 +559,6 @@ static void intelContendedLock( struct intel_context *intel, GLuint flags )
|
|||
/* Lost context?
|
||||
*/
|
||||
if (sarea->ctxOwner != me) {
|
||||
intel->perf_boxes |= I830_BOX_LOST_CONTEXT;
|
||||
sarea->ctxOwner = me;
|
||||
|
||||
/* Should also fence the frontbuffer even if ctxOwner doesn't
|
||||
|
|
@ -572,12 +572,6 @@ static void intelContendedLock( struct intel_context *intel, GLuint flags )
|
|||
intel->vtbl.lost_hardware( intel );
|
||||
}
|
||||
|
||||
/* Because the X server issues drawing commands without properly
|
||||
* fencing them, we need to be paraniod about waiting for hardware
|
||||
* rendering to finish after a contended lock.
|
||||
*/
|
||||
intel->flushBeforeFallback = GL_TRUE;
|
||||
|
||||
/* Drawable changed?
|
||||
*/
|
||||
if (dPriv && intel->lastStamp != dPriv->lastStamp) {
|
||||
|
|
|
|||
|
|
@ -174,8 +174,6 @@ struct intel_context
|
|||
|
||||
GLboolean aub_wrap;
|
||||
|
||||
GLboolean flushBeforeFallback;
|
||||
|
||||
struct intel_batchbuffer *batch;
|
||||
|
||||
struct {
|
||||
|
|
@ -200,7 +198,6 @@ struct intel_context
|
|||
GLboolean hw_stencil;
|
||||
GLboolean hw_stipple;
|
||||
GLboolean depth_buffer_is_float;
|
||||
GLboolean perf_boxes;
|
||||
GLboolean no_hw;
|
||||
GLboolean no_rast;
|
||||
GLboolean thrashing;
|
||||
|
|
@ -385,6 +382,7 @@ extern int INTEL_DEBUG;
|
|||
#define DEBUG_SINGLE_THREAD 0x8000
|
||||
#define DEBUG_WM 0x10000
|
||||
#define DEBUG_URB 0x20000
|
||||
#define DEBUG_VS 0x40000
|
||||
|
||||
|
||||
#define PCI_CHIP_845_G 0x2562
|
||||
|
|
|
|||
|
|
@ -209,13 +209,6 @@ void intelSpanRenderStart( GLcontext *ctx )
|
|||
|
||||
LOCK_HARDWARE(intel);
|
||||
|
||||
#if 0
|
||||
if (intel->flushBeforeFallback) {
|
||||
intelFinish(&intel->ctx);
|
||||
intel->flushBeforeFallback = GL_FALSE;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Just map the framebuffer and all textures. Bufmgr code will
|
||||
* take care of waiting on the necessary fences:
|
||||
*/
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue