Wait for buffer idle unlocked before mapping in some cases.

Greatly improves responsiveness.
Add an MI_FLUSH after each batchbuffer and tell the kernel we're doing so with
the new DRM_I915_FENCE_FLAG_FLUSHED (Requires drm update).
This can be done on a per-batchbuffer basis.
The DRM handles all fence accounting and signals earlier fences that also needs
a flush.
This commit is contained in:
Thomas Hellström 2006-09-15 14:52:31 +00:00
parent fc4bc6fc97
commit 6a33e6d221
6 changed files with 26 additions and 8 deletions

View file

@ -159,6 +159,7 @@ do_flush_locked(struct intel_batchbuffer *batch,
GLuint *ptr;
GLuint i;
struct intel_context *intel = batch->intel;
unsigned fenceFlags;
driBOValidateList(batch->intel->driFd, &batch->list);
@ -213,12 +214,13 @@ do_flush_locked(struct intel_batchbuffer *batch,
driFenceUnReference(batch->last_fence);
/*
* Kernel fencing.
* Kernel fencing. The flags tells the kernel that we've
* programmed an MI_FLUSH.
*/
fenceFlags = DRM_I915_FENCE_FLAG_FLUSHED;
batch->last_fence = driFenceBuffers(batch->intel->driFd,
"Batch fence", 0);
"Batch fence", fenceFlags);
/*
* User space fencing.
@ -254,13 +256,13 @@ intel_batchbuffer_flush(struct intel_batchbuffer *batch)
* performance drain that we would like to avoid.
*/
if (used & 4) {
((int *) batch->ptr)[0] = 0; /*intel->vtbl.flush_cmd(); */
((int *) batch->ptr)[0] = intel->vtbl.flush_cmd();
((int *) batch->ptr)[1] = 0;
((int *) batch->ptr)[2] = MI_BATCH_BUFFER_END;
used += 12;
}
else {
((int *) batch->ptr)[0] = 0; /*intel->vtbl.flush_cmd(); */
((int *) batch->ptr)[0] = intel->vtbl.flush_cmd();
((int *) batch->ptr)[1] = MI_BATCH_BUFFER_END;
used += 8;
}

View file

@ -48,6 +48,13 @@
#define FILE_DEBUG_FLAG DEBUG_BUFMGR
void
intel_region_idle(struct intel_context *intel, struct intel_region *region)
{
DBG("%s\n", __FUNCTION__);
if (driBOMap(region->buffer, DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0))
driBOUnmap(region->buffer);
}
/* XXX: Thread safety?
*/

View file

@ -78,6 +78,9 @@ struct intel_region *intel_region_create_static(struct intel_context *intel,
GLuint cpp,
GLuint pitch, GLuint height);
void intel_region_idle(struct intel_context *intel,
struct intel_region *ib);
/* Map/unmap regions. This is refcounted also:
*/
GLubyte *intel_region_map(struct intel_context *intel,

View file

@ -313,7 +313,7 @@ intelSpanRenderStart(GLcontext * ctx)
struct intel_context *intel = intel_context(ctx);
GLuint i;
intelFlush(&intel->ctx);
intelFinish(&intel->ctx);
LOCK_HARDWARE(intel);
#if 0

View file

@ -461,6 +461,9 @@ intelTexImage(GLcontext * ctx,
return;
if (intelImage->mt)
intel_region_idle(intel, intelImage->mt->region);
LOCK_HARDWARE(intel);
if (intelImage->mt) {

View file

@ -65,12 +65,15 @@ intelTexSubimage(GLcontext * ctx,
if (!pixels)
return;
if (intelImage->mt)
intel_region_idle(intel, intelImage->mt->region);
LOCK_HARDWARE(intel);
/* Map buffer if necessary. Need to lock to prevent other contexts
* from uploading the buffer under us.
*/
if (intelImage->mt)
if (intelImage->mt)
texImage->Data = intel_miptree_image_map(intel,
intelImage->mt,
intelImage->face,