diff --git a/src/gallium/drivers/iris/iris_batch.c b/src/gallium/drivers/iris/iris_batch.c index 83397812d58..1364f5fdb07 100644 --- a/src/gallium/drivers/iris/iris_batch.c +++ b/src/gallium/drivers/iris/iris_batch.c @@ -181,13 +181,13 @@ iris_init_batch(struct iris_context *ice, struct iris_batch *batch = &ice->batches[name]; struct iris_screen *screen = (void *) ice->ctx.screen; - /* Note: ctx_id, exec_flags and has_engines_context fields are initialized - * at an earlier phase when contexts are created. + /* Note: screen, ctx_id, exec_flags and has_engines_context fields are + * initialized at an earlier phase when contexts are created. * - * Ref: iris_init_engines_context(), iris_init_non_engine_contexts() + * See iris_init_batches(), which calls either iris_init_engines_context() + * or iris_init_non_engine_contexts(). */ - batch->screen = screen; batch->dbg = &ice->dbg; batch->reset = &ice->reset; batch->state_sizes = ice->state.sizes; @@ -214,11 +214,12 @@ iris_init_batch(struct iris_context *ice, batch->cache.render = _mesa_hash_table_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal); + batch->num_other_batches = 0; memset(batch->other_batches, 0, sizeof(batch->other_batches)); - for (int i = 0, j = 0; i < IRIS_BATCH_COUNT; i++) { - if (i != name) - batch->other_batches[j++] = &ice->batches[i]; + iris_foreach_batch(ice, other_batch) { + if (batch != other_batch) + batch->other_batches[batch->num_other_batches++] = other_batch; } if (INTEL_DEBUG(DEBUG_ANY)) { @@ -250,8 +251,7 @@ iris_init_non_engine_contexts(struct iris_context *ice, int priority) { struct iris_screen *screen = (void *) ice->ctx.screen; - for (int i = 0; i < IRIS_BATCH_COUNT; i++) { - struct iris_batch *batch = &ice->batches[i]; + iris_foreach_batch(ice, batch) { batch->ctx_id = iris_create_hw_context(screen->bufmgr); batch->exec_flags = I915_EXEC_RENDER; batch->has_engines_context = false; @@ -315,8 +315,8 @@ iris_init_engines_context(struct iris_context *ice, int priority) struct iris_screen *screen = (void *) ice->ctx.screen; iris_hw_context_set_priority(screen->bufmgr, engines_ctx, priority); - for (int i = 0; i < IRIS_BATCH_COUNT; i++) { - struct iris_batch *batch = &ice->batches[i]; + iris_foreach_batch(ice, batch) { + unsigned i = batch - &ice->batches[0]; batch->ctx_id = engines_ctx; batch->exec_flags = i; batch->has_engines_context = true; @@ -328,10 +328,14 @@ iris_init_engines_context(struct iris_context *ice, int priority) void iris_init_batches(struct iris_context *ice, int priority) { + /* We have to do this early for iris_foreach_batch() to work */ + for (int i = 0; i < IRIS_BATCH_COUNT; i++) + ice->batches[i].screen = (void *) ice->ctx.screen; + if (!iris_init_engines_context(ice, priority)) iris_init_non_engine_contexts(ice, priority); - for (int i = 0; i < IRIS_BATCH_COUNT; i++) - iris_init_batch(ice, (enum iris_batch_name) i); + iris_foreach_batch(ice, batch) + iris_init_batch(ice, batch - &ice->batches[0]); } static int @@ -400,7 +404,7 @@ flush_for_cross_batch_dependencies(struct iris_batch *batch, * it had already referenced, we may need to flush other batches in order * to correctly synchronize them. */ - for (int b = 0; b < ARRAY_SIZE(batch->other_batches); b++) { + for (int b = 0; b < batch->num_other_batches; b++) { struct iris_batch *other_batch = batch->other_batches[b]; int other_index = find_exec_index(other_batch, bo); @@ -598,8 +602,8 @@ iris_destroy_batches(struct iris_context *ice) ice->batches[0].ctx_id); } - for (int i = 0; i < IRIS_BATCH_COUNT; i++) - iris_batch_free(&ice->batches[i]); + iris_foreach_batch(ice, batch) + iris_batch_free(batch); } /** @@ -726,10 +730,10 @@ replace_kernel_ctx(struct iris_batch *batch) int new_ctx = iris_create_engines_context(ice, priority); if (new_ctx < 0) return false; - for (int i = 0; i < IRIS_BATCH_COUNT; i++) { - ice->batches[i].ctx_id = new_ctx; + iris_foreach_batch(ice, bat) { + bat->ctx_id = new_ctx; /* Notify the context that state must be re-initialized. */ - iris_lost_context_state(&ice->batches[i]); + iris_lost_context_state(bat); } iris_destroy_kernel_context(bufmgr, old_ctx); } else { @@ -810,6 +814,7 @@ update_bo_syncobjs(struct iris_batch *batch, struct iris_bo *bo, bool write) { struct iris_screen *screen = batch->screen; struct iris_bufmgr *bufmgr = screen->bufmgr; + struct iris_context *ice = batch->ice; /* Make sure bo->deps is big enough */ if (screen->id >= bo->deps_size) { @@ -838,7 +843,9 @@ update_bo_syncobjs(struct iris_batch *batch, struct iris_bo *bo, bool write) * have come from a different context, and apps don't like it when we don't * do inter-context tracking. */ - for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++) { + iris_foreach_batch(ice, batch_i) { + unsigned i = batch_i->name; + /* If the bo is being written to by others, wait for them. */ if (bo_deps->write_syncobjs[i]) move_syncobj_to_batch(batch, &bo_deps->write_syncobjs[i], diff --git a/src/gallium/drivers/iris/iris_batch.h b/src/gallium/drivers/iris/iris_batch.h index 1b5b4638753..82f576d47a4 100644 --- a/src/gallium/drivers/iris/iris_batch.h +++ b/src/gallium/drivers/iris/iris_batch.h @@ -136,6 +136,7 @@ struct iris_batch { /** List of other batches which we might need to flush to use a BO */ struct iris_batch *other_batches[IRIS_BATCH_COUNT - 1]; + unsigned num_other_batches; struct { /** @@ -382,4 +383,9 @@ iris_batch_mark_reset_sync(struct iris_batch *batch) const char * iris_batch_name_to_string(enum iris_batch_name name); +#define iris_foreach_batch(ice, batch) \ + for (struct iris_batch *batch = &ice->batches[0]; \ + batch <= &ice->batches[((struct iris_screen *)ice->ctx.screen)->devinfo.ver >= 12 ? IRIS_BATCH_BLITTER : IRIS_BATCH_COMPUTE]; \ + ++batch) + #endif diff --git a/src/gallium/drivers/iris/iris_border_color.c b/src/gallium/drivers/iris/iris_border_color.c index 6c794752bd5..17097a6b0f6 100644 --- a/src/gallium/drivers/iris/iris_border_color.c +++ b/src/gallium/drivers/iris/iris_border_color.c @@ -114,9 +114,9 @@ iris_border_color_pool_reserve(struct iris_context *ice, unsigned count) if (remaining_entries < count) { /* It's safe to flush because we're called outside of state upload. */ - for (int i = 0; i < IRIS_BATCH_COUNT; i++) { - if (iris_batch_references(&ice->batches[i], pool->bo)) - iris_batch_flush(&ice->batches[i]); + iris_foreach_batch(ice, batch) { + if (iris_batch_references(batch, pool->bo)) + iris_batch_flush(batch); } iris_reset_border_color_pool(pool, pool->bo->bufmgr); diff --git a/src/gallium/drivers/iris/iris_context.c b/src/gallium/drivers/iris/iris_context.c index 1a92df1a5e6..b91f6f8ef8b 100644 --- a/src/gallium/drivers/iris/iris_context.c +++ b/src/gallium/drivers/iris/iris_context.c @@ -98,12 +98,12 @@ iris_get_device_reset_status(struct pipe_context *ctx) /* Check the reset status of each batch's hardware context, and take the * worst status (if one was guilty, proclaim guilt). */ - for (int i = 0; i < IRIS_BATCH_COUNT; i++) { + iris_foreach_batch(ice, batch) { /* This will also recreate the hardware contexts as necessary, so any * future queries will show no resets. We only want to report once. */ enum pipe_reset_status batch_reset = - iris_batch_check_for_reset(&ice->batches[i]); + iris_batch_check_for_reset(batch); if (batch_reset == PIPE_NO_RESET) continue; diff --git a/src/gallium/drivers/iris/iris_fence.c b/src/gallium/drivers/iris/iris_fence.c index bcaeb5d6757..387adb12122 100644 --- a/src/gallium/drivers/iris/iris_fence.c +++ b/src/gallium/drivers/iris/iris_fence.c @@ -263,8 +263,8 @@ iris_fence_flush(struct pipe_context *ctx, iris_flush_dirty_dmabufs(ice); if (!deferred) { - for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++) - iris_batch_flush(&ice->batches[i]); + iris_foreach_batch(ice, batch) + iris_batch_flush(batch); } if (flags & PIPE_FLUSH_END_OF_FRAME) { @@ -286,8 +286,8 @@ iris_fence_flush(struct pipe_context *ctx, if (deferred) fence->unflushed_ctx = ctx; - for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) { - struct iris_batch *batch = &ice->batches[b]; + iris_foreach_batch(ice, batch) { + unsigned b = batch->name; if (deferred && iris_batch_bytes_used(batch) > 0) { struct iris_fine_fence *fine = @@ -339,9 +339,7 @@ iris_fence_await(struct pipe_context *ctx, if (iris_fine_fence_signaled(fine)) continue; - for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) { - struct iris_batch *batch = &ice->batches[b]; - + iris_foreach_batch(ice, batch) { /* We're going to make any future work in this batch wait for our * fence to have gone by. But any currently queued work doesn't * need to wait. Flush the batch now, so it can happen sooner. @@ -402,14 +400,14 @@ iris_fence_finish(struct pipe_screen *p_screen, * that it matches first. */ if (ctx && ctx == fence->unflushed_ctx) { - for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++) { - struct iris_fine_fence *fine = fence->fine[i]; + iris_foreach_batch(ice, batch) { + struct iris_fine_fence *fine = fence->fine[batch->name]; if (iris_fine_fence_signaled(fine)) continue; - if (fine->syncobj == iris_batch_get_signal_syncobj(&ice->batches[i])) - iris_batch_flush(&ice->batches[i]); + if (fine->syncobj == iris_batch_get_signal_syncobj(batch)) + iris_batch_flush(batch); } /* The fence is no longer deferred. */ @@ -595,7 +593,7 @@ iris_fence_signal(struct pipe_context *ctx, if (ctx == fence->unflushed_ctx) return; - for (unsigned b = 0; b < IRIS_BATCH_COUNT; b++) { + iris_foreach_batch(ice, batch) { for (unsigned i = 0; i < ARRAY_SIZE(fence->fine); i++) { struct iris_fine_fence *fine = fence->fine[i]; @@ -603,9 +601,8 @@ iris_fence_signal(struct pipe_context *ctx, if (iris_fine_fence_signaled(fine)) continue; - ice->batches[b].contains_fence_signal = true; - iris_batch_add_syncobj(&ice->batches[b], fine->syncobj, - I915_EXEC_FENCE_SIGNAL); + batch->contains_fence_signal = true; + iris_batch_add_syncobj(batch, fine->syncobj, I915_EXEC_FENCE_SIGNAL); } } } diff --git a/src/gallium/drivers/iris/iris_pipe_control.c b/src/gallium/drivers/iris/iris_pipe_control.c index df6814fd32c..5ef0acba396 100644 --- a/src/gallium/drivers/iris/iris_pipe_control.c +++ b/src/gallium/drivers/iris/iris_pipe_control.c @@ -357,11 +357,10 @@ iris_memory_barrier(struct pipe_context *ctx, unsigned flags) PIPE_CONTROL_TILE_CACHE_FLUSH; } - for (int i = 0; i < IRIS_BATCH_COUNT; i++) { - if (ice->batches[i].contains_draw) { - iris_batch_maybe_flush(&ice->batches[i], 24); - iris_emit_pipe_control_flush(&ice->batches[i], "API: memory barrier", - bits); + iris_foreach_batch(ice, batch) { + if (batch->contains_draw) { + iris_batch_maybe_flush(batch, 24); + iris_emit_pipe_control_flush(batch, "API: memory barrier", bits); } } } diff --git a/src/gallium/drivers/iris/iris_resource.c b/src/gallium/drivers/iris/iris_resource.c index 109c6de59ff..04d64b41670 100644 --- a/src/gallium/drivers/iris/iris_resource.c +++ b/src/gallium/drivers/iris/iris_resource.c @@ -1404,9 +1404,9 @@ iris_flush_resource(struct pipe_context *ctx, struct pipe_resource *resource) * sure to get rid of any compression that a consumer wouldn't know how * to handle. */ - for (int i = 0; i < IRIS_BATCH_COUNT; i++) { - if (iris_batch_references(&ice->batches[i], res->bo)) - iris_batch_flush(&ice->batches[i]); + iris_foreach_batch(ice, batch) { + if (iris_batch_references(batch, res->bo)) + iris_batch_flush(batch); } iris_resource_disable_aux(res); @@ -1741,8 +1741,8 @@ resource_is_busy(struct iris_context *ice, { bool busy = iris_bo_busy(res->bo); - for (int i = 0; i < IRIS_BATCH_COUNT; i++) - busy |= iris_batch_references(&ice->batches[i], res->bo); + iris_foreach_batch(ice, batch) + busy |= iris_batch_references(batch, res->bo); return busy; } @@ -2339,9 +2339,9 @@ iris_transfer_map(struct pipe_context *ctx, } if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) { - for (int i = 0; i < IRIS_BATCH_COUNT; i++) { - if (iris_batch_references(&ice->batches[i], res->bo)) - iris_batch_flush(&ice->batches[i]); + iris_foreach_batch(ice, batch) { + if (iris_batch_references(batch, res->bo)) + iris_batch_flush(batch); } } @@ -2384,8 +2384,7 @@ iris_transfer_flush_region(struct pipe_context *ctx, } if (history_flush & ~PIPE_CONTROL_CS_STALL) { - for (int i = 0; i < IRIS_BATCH_COUNT; i++) { - struct iris_batch *batch = &ice->batches[i]; + iris_foreach_batch(ice, batch) { if (batch->contains_draw || batch->cache.render->entries) { iris_batch_maybe_flush(batch, 24); iris_emit_pipe_control_flush(batch, @@ -2474,9 +2473,9 @@ iris_texture_subdata(struct pipe_context *ctx, iris_resource_access_raw(ice, res, level, box->z, box->depth, true); - for (int i = 0; i < IRIS_BATCH_COUNT; i++) { - if (iris_batch_references(&ice->batches[i], res->bo)) - iris_batch_flush(&ice->batches[i]); + iris_foreach_batch(ice, batch) { + if (iris_batch_references(batch, res->bo)) + iris_batch_flush(batch); } uint8_t *dst = iris_bo_map(&ice->dbg, res->bo, MAP_WRITE | MAP_RAW);