mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-01-25 18:50:31 +01:00
i965: Use brw_bo_wait() for brw_bo_wait_rendering()
Currently, we use set_domain() to cause a stall on rendering. But the set-domain ioctl has the side-effect of changing the kernel's cache domain underneath the struct_mutex, which may perturb state if there was no rendering to wait upon and in general is much heavier than the lockless wait-ioctl. Historically libdrm used set-domain as we did not have an explicit wait-ioctl (and the patches to teach it to use wait if available were lost in the mists). Since mesa already depends upon a kernel support the wait-ioctl, we do not need to supply a fallback. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
This commit is contained in:
parent
3b28eaabf6
commit
833108ac14
5 changed files with 10 additions and 8 deletions
|
|
@ -831,10 +831,12 @@ brw_bo_get_subdata(struct brw_bo *bo, uint64_t offset,
|
|||
|
||||
/** Waits for all GPU rendering with the object to have completed. */
|
||||
void
|
||||
brw_bo_wait_rendering(struct brw_context *brw, struct brw_bo *bo)
|
||||
brw_bo_wait_rendering(struct brw_bo *bo)
|
||||
{
|
||||
set_domain(brw, "waiting for",
|
||||
bo, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
|
||||
/* We require a kernel recent enough for WAIT_IOCTL support.
|
||||
* See intel_init_bufmgr()
|
||||
*/
|
||||
brw_bo_wait(bo, -1);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -227,7 +227,7 @@ int brw_bo_get_subdata(struct brw_bo *bo, uint64_t offset,
|
|||
* bo_subdata, etc. It is merely a way for the driver to implement
|
||||
* glFinish.
|
||||
*/
|
||||
void brw_bo_wait_rendering(struct brw_context *brw, struct brw_bo *bo);
|
||||
void brw_bo_wait_rendering(struct brw_bo *bo);
|
||||
|
||||
/**
|
||||
* Tears down the buffer manager instance.
|
||||
|
|
|
|||
|
|
@ -256,7 +256,7 @@ intel_finish(struct gl_context * ctx)
|
|||
intel_glFlush(ctx);
|
||||
|
||||
if (brw->batch.last_bo)
|
||||
brw_bo_wait_rendering(brw, brw->batch.last_bo);
|
||||
brw_bo_wait_rendering(brw->batch.last_bo);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
|||
|
|
@ -1350,7 +1350,7 @@ brw_wait_perf_query(struct gl_context *ctx, struct gl_perf_query_object *o)
|
|||
if (brw_batch_references(&brw->batch, bo))
|
||||
intel_batchbuffer_flush(brw);
|
||||
|
||||
brw_bo_wait_rendering(brw, bo);
|
||||
brw_bo_wait_rendering(bo);
|
||||
|
||||
/* Due to a race condition between the OA unit signaling report
|
||||
* availability and the report actually being written into memory,
|
||||
|
|
|
|||
|
|
@ -497,7 +497,7 @@ throttle(struct brw_context *brw)
|
|||
/* Pass NULL rather than brw so we avoid perf_debug warnings;
|
||||
* stalling is common and expected here...
|
||||
*/
|
||||
brw_bo_wait_rendering(NULL, brw->throttle_batch[1]);
|
||||
brw_bo_wait_rendering(brw->throttle_batch[1]);
|
||||
}
|
||||
brw_bo_unreference(brw->throttle_batch[1]);
|
||||
}
|
||||
|
|
@ -723,7 +723,7 @@ _intel_batchbuffer_flush_fence(struct brw_context *brw,
|
|||
|
||||
if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
|
||||
fprintf(stderr, "waiting for idle\n");
|
||||
brw_bo_wait_rendering(brw, brw->batch.bo);
|
||||
brw_bo_wait_rendering(brw->batch.bo);
|
||||
}
|
||||
|
||||
/* Start a new batch buffer. */
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue