iris: Make an iris_foreach_batch macro that skips unsupported batches

IRIS_BATCH_BLITTER isn't supported prior to Tigerlake; in general,
batches may not be supported on all hardware.  In most cases, querying
them is harmless (if useless): they reference nothing, have no commands
to flush, and so on.  However, the fence code does need to know that
certain batches don't exist, so it can avoid adding inter-batch fences
involving them.

This patch introduces a new iris_foreach_batch() iterator macro that
walks over all batches that are actually supported on the platform,
while skipping the others.  It provides a central place to update should
we add or reorder more batches in the future.

Fixes various tests in the piglit.spec.ext_external_objects.* category.

Thanks to Tapani Pälli for catching this.

Fixes: a90a1f15 ("iris: Create an IRIS_BATCH_BLITTER for using the BLT command streamer")
Reviewed-by: Caio Oliveira <caio.oliveira@intel.com>
Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/14834>
This commit is contained in:
Kenneth Graunke 2022-01-28 03:23:34 -08:00 committed by Marge Bot
parent c4b400285a
commit fd0e4aedeb
7 changed files with 66 additions and 58 deletions

View file

@ -181,13 +181,13 @@ iris_init_batch(struct iris_context *ice,
struct iris_batch *batch = &ice->batches[name]; struct iris_batch *batch = &ice->batches[name];
struct iris_screen *screen = (void *) ice->ctx.screen; struct iris_screen *screen = (void *) ice->ctx.screen;
/* Note: ctx_id, exec_flags and has_engines_context fields are initialized /* Note: screen, ctx_id, exec_flags and has_engines_context fields are
* at an earlier phase when contexts are created. * initialized at an earlier phase when contexts are created.
* *
* Ref: iris_init_engines_context(), iris_init_non_engine_contexts() * See iris_init_batches(), which calls either iris_init_engines_context()
* or iris_init_non_engine_contexts().
*/ */
batch->screen = screen;
batch->dbg = &ice->dbg; batch->dbg = &ice->dbg;
batch->reset = &ice->reset; batch->reset = &ice->reset;
batch->state_sizes = ice->state.sizes; batch->state_sizes = ice->state.sizes;
@ -214,11 +214,12 @@ iris_init_batch(struct iris_context *ice,
batch->cache.render = _mesa_hash_table_create(NULL, _mesa_hash_pointer, batch->cache.render = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
_mesa_key_pointer_equal); _mesa_key_pointer_equal);
batch->num_other_batches = 0;
memset(batch->other_batches, 0, sizeof(batch->other_batches)); memset(batch->other_batches, 0, sizeof(batch->other_batches));
for (int i = 0, j = 0; i < IRIS_BATCH_COUNT; i++) { iris_foreach_batch(ice, other_batch) {
if (i != name) if (batch != other_batch)
batch->other_batches[j++] = &ice->batches[i]; batch->other_batches[batch->num_other_batches++] = other_batch;
} }
if (INTEL_DEBUG(DEBUG_ANY)) { if (INTEL_DEBUG(DEBUG_ANY)) {
@ -250,8 +251,7 @@ iris_init_non_engine_contexts(struct iris_context *ice, int priority)
{ {
struct iris_screen *screen = (void *) ice->ctx.screen; struct iris_screen *screen = (void *) ice->ctx.screen;
for (int i = 0; i < IRIS_BATCH_COUNT; i++) { iris_foreach_batch(ice, batch) {
struct iris_batch *batch = &ice->batches[i];
batch->ctx_id = iris_create_hw_context(screen->bufmgr); batch->ctx_id = iris_create_hw_context(screen->bufmgr);
batch->exec_flags = I915_EXEC_RENDER; batch->exec_flags = I915_EXEC_RENDER;
batch->has_engines_context = false; batch->has_engines_context = false;
@ -315,8 +315,8 @@ iris_init_engines_context(struct iris_context *ice, int priority)
struct iris_screen *screen = (void *) ice->ctx.screen; struct iris_screen *screen = (void *) ice->ctx.screen;
iris_hw_context_set_priority(screen->bufmgr, engines_ctx, priority); iris_hw_context_set_priority(screen->bufmgr, engines_ctx, priority);
for (int i = 0; i < IRIS_BATCH_COUNT; i++) { iris_foreach_batch(ice, batch) {
struct iris_batch *batch = &ice->batches[i]; unsigned i = batch - &ice->batches[0];
batch->ctx_id = engines_ctx; batch->ctx_id = engines_ctx;
batch->exec_flags = i; batch->exec_flags = i;
batch->has_engines_context = true; batch->has_engines_context = true;
@ -328,10 +328,14 @@ iris_init_engines_context(struct iris_context *ice, int priority)
void void
iris_init_batches(struct iris_context *ice, int priority) iris_init_batches(struct iris_context *ice, int priority)
{ {
/* We have to do this early for iris_foreach_batch() to work */
for (int i = 0; i < IRIS_BATCH_COUNT; i++)
ice->batches[i].screen = (void *) ice->ctx.screen;
if (!iris_init_engines_context(ice, priority)) if (!iris_init_engines_context(ice, priority))
iris_init_non_engine_contexts(ice, priority); iris_init_non_engine_contexts(ice, priority);
for (int i = 0; i < IRIS_BATCH_COUNT; i++) iris_foreach_batch(ice, batch)
iris_init_batch(ice, (enum iris_batch_name) i); iris_init_batch(ice, batch - &ice->batches[0]);
} }
static int static int
@ -400,7 +404,7 @@ flush_for_cross_batch_dependencies(struct iris_batch *batch,
* it had already referenced, we may need to flush other batches in order * it had already referenced, we may need to flush other batches in order
* to correctly synchronize them. * to correctly synchronize them.
*/ */
for (int b = 0; b < ARRAY_SIZE(batch->other_batches); b++) { for (int b = 0; b < batch->num_other_batches; b++) {
struct iris_batch *other_batch = batch->other_batches[b]; struct iris_batch *other_batch = batch->other_batches[b];
int other_index = find_exec_index(other_batch, bo); int other_index = find_exec_index(other_batch, bo);
@ -598,8 +602,8 @@ iris_destroy_batches(struct iris_context *ice)
ice->batches[0].ctx_id); ice->batches[0].ctx_id);
} }
for (int i = 0; i < IRIS_BATCH_COUNT; i++) iris_foreach_batch(ice, batch)
iris_batch_free(&ice->batches[i]); iris_batch_free(batch);
} }
/** /**
@ -726,10 +730,10 @@ replace_kernel_ctx(struct iris_batch *batch)
int new_ctx = iris_create_engines_context(ice, priority); int new_ctx = iris_create_engines_context(ice, priority);
if (new_ctx < 0) if (new_ctx < 0)
return false; return false;
for (int i = 0; i < IRIS_BATCH_COUNT; i++) { iris_foreach_batch(ice, bat) {
ice->batches[i].ctx_id = new_ctx; bat->ctx_id = new_ctx;
/* Notify the context that state must be re-initialized. */ /* Notify the context that state must be re-initialized. */
iris_lost_context_state(&ice->batches[i]); iris_lost_context_state(bat);
} }
iris_destroy_kernel_context(bufmgr, old_ctx); iris_destroy_kernel_context(bufmgr, old_ctx);
} else { } else {
@ -810,6 +814,7 @@ update_bo_syncobjs(struct iris_batch *batch, struct iris_bo *bo, bool write)
{ {
struct iris_screen *screen = batch->screen; struct iris_screen *screen = batch->screen;
struct iris_bufmgr *bufmgr = screen->bufmgr; struct iris_bufmgr *bufmgr = screen->bufmgr;
struct iris_context *ice = batch->ice;
/* Make sure bo->deps is big enough */ /* Make sure bo->deps is big enough */
if (screen->id >= bo->deps_size) { if (screen->id >= bo->deps_size) {
@ -838,7 +843,9 @@ update_bo_syncobjs(struct iris_batch *batch, struct iris_bo *bo, bool write)
* have come from a different context, and apps don't like it when we don't * have come from a different context, and apps don't like it when we don't
* do inter-context tracking. * do inter-context tracking.
*/ */
for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++) { iris_foreach_batch(ice, batch_i) {
unsigned i = batch_i->name;
/* If the bo is being written to by others, wait for them. */ /* If the bo is being written to by others, wait for them. */
if (bo_deps->write_syncobjs[i]) if (bo_deps->write_syncobjs[i])
move_syncobj_to_batch(batch, &bo_deps->write_syncobjs[i], move_syncobj_to_batch(batch, &bo_deps->write_syncobjs[i],

View file

@ -136,6 +136,7 @@ struct iris_batch {
/** List of other batches which we might need to flush to use a BO */ /** List of other batches which we might need to flush to use a BO */
struct iris_batch *other_batches[IRIS_BATCH_COUNT - 1]; struct iris_batch *other_batches[IRIS_BATCH_COUNT - 1];
unsigned num_other_batches;
struct { struct {
/** /**
@ -382,4 +383,9 @@ iris_batch_mark_reset_sync(struct iris_batch *batch)
const char * const char *
iris_batch_name_to_string(enum iris_batch_name name); iris_batch_name_to_string(enum iris_batch_name name);
#define iris_foreach_batch(ice, batch) \
for (struct iris_batch *batch = &ice->batches[0]; \
batch <= &ice->batches[((struct iris_screen *)ice->ctx.screen)->devinfo.ver >= 12 ? IRIS_BATCH_BLITTER : IRIS_BATCH_COMPUTE]; \
++batch)
#endif #endif

View file

@ -114,9 +114,9 @@ iris_border_color_pool_reserve(struct iris_context *ice, unsigned count)
if (remaining_entries < count) { if (remaining_entries < count) {
/* It's safe to flush because we're called outside of state upload. */ /* It's safe to flush because we're called outside of state upload. */
for (int i = 0; i < IRIS_BATCH_COUNT; i++) { iris_foreach_batch(ice, batch) {
if (iris_batch_references(&ice->batches[i], pool->bo)) if (iris_batch_references(batch, pool->bo))
iris_batch_flush(&ice->batches[i]); iris_batch_flush(batch);
} }
iris_reset_border_color_pool(pool, pool->bo->bufmgr); iris_reset_border_color_pool(pool, pool->bo->bufmgr);

View file

@ -98,12 +98,12 @@ iris_get_device_reset_status(struct pipe_context *ctx)
/* Check the reset status of each batch's hardware context, and take the /* Check the reset status of each batch's hardware context, and take the
* worst status (if one was guilty, proclaim guilt). * worst status (if one was guilty, proclaim guilt).
*/ */
for (int i = 0; i < IRIS_BATCH_COUNT; i++) { iris_foreach_batch(ice, batch) {
/* This will also recreate the hardware contexts as necessary, so any /* This will also recreate the hardware contexts as necessary, so any
* future queries will show no resets. We only want to report once. * future queries will show no resets. We only want to report once.
*/ */
enum pipe_reset_status batch_reset = enum pipe_reset_status batch_reset =
iris_batch_check_for_reset(&ice->batches[i]); iris_batch_check_for_reset(batch);
if (batch_reset == PIPE_NO_RESET) if (batch_reset == PIPE_NO_RESET)
continue; continue;

View file

@ -263,8 +263,8 @@ iris_fence_flush(struct pipe_context *ctx,
iris_flush_dirty_dmabufs(ice); iris_flush_dirty_dmabufs(ice);
if (!deferred) { if (!deferred) {
for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++) iris_foreach_batch(ice, batch)
iris_batch_flush(&ice->batches[i]); iris_batch_flush(batch);
} }
if (flags & PIPE_FLUSH_END_OF_FRAME) { if (flags & PIPE_FLUSH_END_OF_FRAME) {
@ -286,8 +286,8 @@ iris_fence_flush(struct pipe_context *ctx,
if (deferred) if (deferred)
fence->unflushed_ctx = ctx; fence->unflushed_ctx = ctx;
for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) { iris_foreach_batch(ice, batch) {
struct iris_batch *batch = &ice->batches[b]; unsigned b = batch->name;
if (deferred && iris_batch_bytes_used(batch) > 0) { if (deferred && iris_batch_bytes_used(batch) > 0) {
struct iris_fine_fence *fine = struct iris_fine_fence *fine =
@ -339,9 +339,7 @@ iris_fence_await(struct pipe_context *ctx,
if (iris_fine_fence_signaled(fine)) if (iris_fine_fence_signaled(fine))
continue; continue;
for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) { iris_foreach_batch(ice, batch) {
struct iris_batch *batch = &ice->batches[b];
/* We're going to make any future work in this batch wait for our /* We're going to make any future work in this batch wait for our
* fence to have gone by. But any currently queued work doesn't * fence to have gone by. But any currently queued work doesn't
* need to wait. Flush the batch now, so it can happen sooner. * need to wait. Flush the batch now, so it can happen sooner.
@ -402,14 +400,14 @@ iris_fence_finish(struct pipe_screen *p_screen,
* that it matches first. * that it matches first.
*/ */
if (ctx && ctx == fence->unflushed_ctx) { if (ctx && ctx == fence->unflushed_ctx) {
for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++) { iris_foreach_batch(ice, batch) {
struct iris_fine_fence *fine = fence->fine[i]; struct iris_fine_fence *fine = fence->fine[batch->name];
if (iris_fine_fence_signaled(fine)) if (iris_fine_fence_signaled(fine))
continue; continue;
if (fine->syncobj == iris_batch_get_signal_syncobj(&ice->batches[i])) if (fine->syncobj == iris_batch_get_signal_syncobj(batch))
iris_batch_flush(&ice->batches[i]); iris_batch_flush(batch);
} }
/* The fence is no longer deferred. */ /* The fence is no longer deferred. */
@ -595,7 +593,7 @@ iris_fence_signal(struct pipe_context *ctx,
if (ctx == fence->unflushed_ctx) if (ctx == fence->unflushed_ctx)
return; return;
for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) { iris_foreach_batch(ice, batch) {
for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) { for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) {
struct iris_fine_fence *fine = fence->fine[i]; struct iris_fine_fence *fine = fence->fine[i];
@ -603,9 +601,8 @@ iris_fence_signal(struct pipe_context *ctx,
if (iris_fine_fence_signaled(fine)) if (iris_fine_fence_signaled(fine))
continue; continue;
ice->batches[b].contains_fence_signal = true; batch->contains_fence_signal = true;
iris_batch_add_syncobj(&ice->batches[b], fine->syncobj, iris_batch_add_syncobj(batch, fine->syncobj, I915_EXEC_FENCE_SIGNAL);
I915_EXEC_FENCE_SIGNAL);
} }
} }
} }

View file

@ -357,11 +357,10 @@ iris_memory_barrier(struct pipe_context *ctx, unsigned flags)
PIPE_CONTROL_TILE_CACHE_FLUSH; PIPE_CONTROL_TILE_CACHE_FLUSH;
} }
for (int i = 0; i < IRIS_BATCH_COUNT; i++) { iris_foreach_batch(ice, batch) {
if (ice->batches[i].contains_draw) { if (batch->contains_draw) {
iris_batch_maybe_flush(&ice->batches[i], 24); iris_batch_maybe_flush(batch, 24);
iris_emit_pipe_control_flush(&ice->batches[i], "API: memory barrier", iris_emit_pipe_control_flush(batch, "API: memory barrier", bits);
bits);
} }
} }
} }

View file

@ -1404,9 +1404,9 @@ iris_flush_resource(struct pipe_context *ctx, struct pipe_resource *resource)
* sure to get rid of any compression that a consumer wouldn't know how * sure to get rid of any compression that a consumer wouldn't know how
* to handle. * to handle.
*/ */
for (int i = 0; i < IRIS_BATCH_COUNT; i++) { iris_foreach_batch(ice, batch) {
if (iris_batch_references(&ice->batches[i], res->bo)) if (iris_batch_references(batch, res->bo))
iris_batch_flush(&ice->batches[i]); iris_batch_flush(batch);
} }
iris_resource_disable_aux(res); iris_resource_disable_aux(res);
@ -1741,8 +1741,8 @@ resource_is_busy(struct iris_context *ice,
{ {
bool busy = iris_bo_busy(res->bo); bool busy = iris_bo_busy(res->bo);
for (int i = 0; i < IRIS_BATCH_COUNT; i++) iris_foreach_batch(ice, batch)
busy |= iris_batch_references(&ice->batches[i], res->bo); busy |= iris_batch_references(batch, res->bo);
return busy; return busy;
} }
@ -2339,9 +2339,9 @@ iris_transfer_map(struct pipe_context *ctx,
} }
if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) { if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
for (int i = 0; i < IRIS_BATCH_COUNT; i++) { iris_foreach_batch(ice, batch) {
if (iris_batch_references(&ice->batches[i], res->bo)) if (iris_batch_references(batch, res->bo))
iris_batch_flush(&ice->batches[i]); iris_batch_flush(batch);
} }
} }
@ -2384,8 +2384,7 @@ iris_transfer_flush_region(struct pipe_context *ctx,
} }
if (history_flush & ~PIPE_CONTROL_CS_STALL) { if (history_flush & ~PIPE_CONTROL_CS_STALL) {
for (int i = 0; i < IRIS_BATCH_COUNT; i++) { iris_foreach_batch(ice, batch) {
struct iris_batch *batch = &ice->batches[i];
if (batch->contains_draw || batch->cache.render->entries) { if (batch->contains_draw || batch->cache.render->entries) {
iris_batch_maybe_flush(batch, 24); iris_batch_maybe_flush(batch, 24);
iris_emit_pipe_control_flush(batch, iris_emit_pipe_control_flush(batch,
@ -2474,9 +2473,9 @@ iris_texture_subdata(struct pipe_context *ctx,
iris_resource_access_raw(ice, res, level, box->z, box->depth, true); iris_resource_access_raw(ice, res, level, box->z, box->depth, true);
for (int i = 0; i < IRIS_BATCH_COUNT; i++) { iris_foreach_batch(ice, batch) {
if (iris_batch_references(&ice->batches[i], res->bo)) if (iris_batch_references(batch, res->bo))
iris_batch_flush(&ice->batches[i]); iris_batch_flush(batch);
} }
uint8_t *dst = iris_bo_map(&ice->dbg, res->bo, MAP_WRITE | MAP_RAW); uint8_t *dst = iris_bo_map(&ice->dbg, res->bo, MAP_WRITE | MAP_RAW);