mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-05 00:58:05 +02:00
i965: Add intel_batchbuffer_flush_fence()
A variant of intel_batchbuffer_flush() with parameters for in and out fence fds. Reviewed-by: Rafael Antognolli <rafael.antognolli@intel.com> Tested-by: Rafael Antognolli <rafael.antognolli@intel.com> Acked-by: Kenneth Graunke <kenneth@whitecape.org>
This commit is contained in:
parent
358661c794
commit
d1ce499dae
2 changed files with 26 additions and 12 deletions
|
|
@ -319,7 +319,7 @@ throttle(struct brw_context *brw)
|
|||
/* TODO: Push this whole function into bufmgr.
|
||||
*/
|
||||
static int
|
||||
do_flush_locked(struct brw_context *brw)
|
||||
do_flush_locked(struct brw_context *brw, int in_fence_fd, int *out_fence_fd)
|
||||
{
|
||||
struct intel_batchbuffer *batch = &brw->batch;
|
||||
int ret = 0;
|
||||
|
|
@ -353,11 +353,15 @@ do_flush_locked(struct brw_context *brw)
|
|||
brw_annotate_aub(brw);
|
||||
|
||||
if (brw->hw_ctx == NULL || batch->ring != RENDER_RING) {
|
||||
assert(in_fence_fd == -1);
|
||||
assert(out_fence_fd == NULL);
|
||||
ret = drm_intel_bo_mrb_exec(batch->bo, 4 * USED_BATCH(*batch),
|
||||
NULL, 0, 0, flags);
|
||||
} else {
|
||||
ret = drm_intel_gem_bo_context_exec(batch->bo, brw->hw_ctx,
|
||||
4 * USED_BATCH(*batch), flags);
|
||||
ret = drm_intel_gem_bo_fence_exec(batch->bo, brw->hw_ctx,
|
||||
4 * USED_BATCH(*batch),
|
||||
in_fence_fd, out_fence_fd,
|
||||
flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -378,9 +382,17 @@ do_flush_locked(struct brw_context *brw)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* The in_fence_fd is ignored if -1. Otherwise this function takes ownership
|
||||
* of the fd.
|
||||
*
|
||||
* The out_fence_fd is ignored if NULL. Otherwise, the caller takes ownership
|
||||
* of the returned fd.
|
||||
*/
|
||||
int
|
||||
_intel_batchbuffer_flush(struct brw_context *brw,
|
||||
const char *file, int line)
|
||||
_intel_batchbuffer_flush_fence(struct brw_context *brw,
|
||||
int in_fence_fd, int *out_fence_fd,
|
||||
const char *file, int line)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
|
@ -419,7 +431,7 @@ _intel_batchbuffer_flush(struct brw_context *brw,
|
|||
/* Check that we didn't just wrap our batchbuffer at a bad time. */
|
||||
assert(!brw->no_batch_wrap);
|
||||
|
||||
ret = do_flush_locked(brw);
|
||||
ret = do_flush_locked(brw, in_fence_fd, out_fence_fd);
|
||||
|
||||
if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
|
||||
fprintf(stderr, "waiting for idle\n");
|
||||
|
|
|
|||
|
|
@ -46,14 +46,16 @@ void intel_batchbuffer_save_state(struct brw_context *brw);
|
|||
void intel_batchbuffer_reset_to_saved(struct brw_context *brw);
|
||||
void intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
|
||||
enum brw_gpu_ring ring);
|
||||
int _intel_batchbuffer_flush_fence(struct brw_context *brw,
|
||||
int in_fence_fd, int *out_fence_fd,
|
||||
const char *file, int line);
|
||||
|
||||
int _intel_batchbuffer_flush(struct brw_context *brw,
|
||||
const char *file, int line);
|
||||
|
||||
#define intel_batchbuffer_flush(intel) \
|
||||
_intel_batchbuffer_flush(intel, __FILE__, __LINE__)
|
||||
|
||||
#define intel_batchbuffer_flush(brw) \
|
||||
_intel_batchbuffer_flush_fence((brw), -1, NULL, __FILE__, __LINE__)
|
||||
|
||||
#define intel_batchbuffer_flush_fence(brw, in_fence_fd, out_fence_fd) \
|
||||
_intel_batchbuffer_flush_fence((brw), (in_fence_fd), (out_fence_fd), \
|
||||
__FILE__, __LINE__)
|
||||
|
||||
/* Unlike bmBufferData, this currently requires the buffer be mapped.
|
||||
* Consider it a convenience function wrapping multple
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue