1) Add a new flag in the sarea (coopting the unused texAge value) to

identify context switches between members of a share group -
ie. multiple contexts in a single application, possibly on different
threads.  In this case the contexts share a bufmgr instance and there
is no need to evict textures - so don't.

2) Use a new flag 'need_flush' to ensure hardware rendering is flushed
prior to starting a software fallback.
This commit is contained in:
Keith Whitwell 2006-09-20 14:44:40 +00:00
parent 3a5319293c
commit b35121d54d
5 changed files with 39 additions and 8 deletions

View file

@ -328,6 +328,7 @@ static GLboolean brw_try_draw_prims( GLcontext *ctx,
brw_emit_prim(brw, &prim[i]);
}
intel->need_flush = GL_TRUE;
retval = GL_TRUE;
}

View file

@ -182,6 +182,8 @@ void bmUnmapBufferAUB( struct intel_context *,
int bmValidateBuffers( struct intel_context * );
void bmReleaseBuffers( struct intel_context * );
GLuint bmCtxId( struct intel_context *intel );
GLboolean bmError( struct intel_context * );
void bmEvictAll( struct intel_context * );

View file

@ -117,6 +117,7 @@ struct bufmgr {
struct block fenced; /* after bmFenceBuffers (mi_flush, emit irq, write dword) */
/* then to pool->lru or free() */
unsigned ctxId;
unsigned last_fence;
unsigned free_on_hardware;
@ -578,6 +579,12 @@ struct bufmgr *bm_fake_intel_Attach( struct intel_context *intel )
make_empty_list(&bm.referenced);
make_empty_list(&bm.fenced);
make_empty_list(&bm.on_hardware);
/* The context id of any of the share group. This won't be used
* in communication with the kernel, so it doesn't matter if
* this context is eventually deleted.
*/
bm.ctxId = intel->hHWContext;
}
nr_attach++;
@ -1242,7 +1249,6 @@ void bmReleaseBuffers( struct intel_context *intel )
LOCK(bm);
{
struct block *block, *tmp;
assert(intel->locked);
foreach_s (block, tmp, &bm->referenced) {
@ -1432,3 +1438,9 @@ GLboolean bmError( struct intel_context *intel )
return retval;
}
GLuint bmCtxId( struct intel_context *intel )
{
return intel->bm->ctxId;
}

View file

@ -539,18 +539,13 @@ GLboolean intelMakeCurrent(__DRIcontextPrivate *driContextPriv,
}
static void lost_hardware( struct intel_context *intel )
{
bm_fake_NotifyContendedLockTake( intel );
intel->vtbl.lost_hardware( intel );
}
static void intelContendedLock( struct intel_context *intel, GLuint flags )
{
__DRIdrawablePrivate *dPriv = intel->driDrawable;
__DRIscreenPrivate *sPriv = intel->driScreen;
volatile drmI830Sarea * sarea = intel->sarea;
int me = intel->hHWContext;
int my_bufmgr = bmCtxId(intel);
drmGetLock(intel->driFd, intel->hHWContext, flags);
@ -564,12 +559,23 @@ static void intelContendedLock( struct intel_context *intel, GLuint flags )
intel->locked = 1;
intel->need_flush = 1;
/* Lost context?
*/
if (sarea->ctxOwner != me) {
DBG("Lost Context: sarea->ctxOwner %x me %x\n", sarea->ctxOwner, me);
sarea->ctxOwner = me;
lost_hardware(intel);
intel->vtbl.lost_hardware( intel );
}
/* As above, but don't evict the texture data on transitions
* between contexts which all share a local buffer manager.
*/
if (sarea->texAge != my_bufmgr) {
DBG("Lost Textures: sarea->texAge %x my_bufmgr %x\n", sarea->ctxOwner, my_bufmgr);
sarea->texAge = my_bufmgr;
bm_fake_NotifyContendedLockTake( intel );
}
/* Drawable changed?

View file

@ -207,6 +207,16 @@ void intelSpanRenderStart( GLcontext *ctx )
{
struct intel_context *intel = intel_context(ctx);
if (intel->need_flush) {
LOCK_HARDWARE(intel);
intel->vtbl.emit_flush(intel, 0);
intel_batchbuffer_flush(intel->batch);
intel->need_flush = 0;
UNLOCK_HARDWARE(intel);
intelFinish(intel);
}
LOCK_HARDWARE(intel);
/* Just map the framebuffer and all textures. Bufmgr code will