freedreno/batch: Stop tracking cross-context deps

The app is expected to provide suitable cross-context synchronization
(fences, etc), so don't try to do it's job for them.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21274>
This commit is contained in:
Rob Clark 2023-02-13 15:25:28 -08:00 committed by Marge Bot
parent a4b949fe61
commit 9a6de00e98
3 changed files with 45 additions and 14 deletions

View file

@ -233,6 +233,7 @@ batch_flush_dependencies(struct fd_batch *batch) assert_dt
struct fd_batch *dep;
foreach_batch (dep, cache, batch->dependents_mask) {
assert(dep->ctx == batch->ctx);
fd_batch_flush(dep);
fd_batch_reference(&dep, NULL);
}
@ -431,6 +432,8 @@ fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
{
fd_screen_assert_locked(batch->ctx->screen);
assert(batch->ctx == dep->ctx);
if (fd_batch_has_dep(batch, dep))
return;
@ -459,7 +462,6 @@ flush_write_batch(struct fd_resource *rsc) assert_dt
static void
fd_batch_add_resource(struct fd_batch *batch, struct fd_resource *rsc)
{
if (likely(fd_batch_references_resource(batch, rsc))) {
assert(_mesa_set_search_pre_hashed(batch->resources, rsc->hash, rsc));
return;
@ -474,6 +476,8 @@ fd_batch_add_resource(struct fd_batch *batch, struct fd_resource *rsc)
void
fd_batch_resource_write(struct fd_batch *batch, struct fd_resource *rsc)
{
struct fd_resource_tracking *track = rsc->track;
fd_screen_assert_locked(batch->ctx->screen);
DBG("%p: write %p", batch, rsc);
@ -483,7 +487,7 @@ fd_batch_resource_write(struct fd_batch *batch, struct fd_resource *rsc)
*/
rsc->valid = true;
if (rsc->track->write_batch == batch)
if (track->write_batch == batch)
return;
fd_batch_write_prep(batch, rsc);
@ -494,17 +498,29 @@ fd_batch_resource_write(struct fd_batch *batch, struct fd_resource *rsc)
/* note, invalidate write batch, to avoid further writes to rsc
* resulting in a write-after-read hazard.
*/
/* if we are pending read or write by any other batch: */
if (unlikely(rsc->track->batch_mask & ~(1 << batch->idx))) {
/* if we are pending read or write by any other batch, they need to
* be ordered before the current batch:
*/
if (unlikely(track->batch_mask & ~(1 << batch->idx))) {
struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
struct fd_batch *dep;
if (rsc->track->write_batch)
flush_write_batch(rsc);
if (track->write_batch) {
/* Cross-context writes without flush/barrier are undefined.
* Lets simply protect ourself from crashing by avoiding cross-
* ctx dependencies and let the app have the undefined behavior
* it asked for:
*/
if (track->write_batch->ctx != batch->ctx)
return;
foreach_batch (dep, cache, rsc->track->batch_mask) {
flush_write_batch(rsc);
}
foreach_batch (dep, cache, track->batch_mask) {
struct fd_batch *b = NULL;
if (dep == batch)
if ((dep == batch) || (dep->ctx != batch->ctx))
continue;
/* note that batch_add_dep could flush and unref dep, so
* we need to hold a reference to keep it live for the
@ -516,7 +532,7 @@ fd_batch_resource_write(struct fd_batch *batch, struct fd_resource *rsc)
fd_batch_reference_locked(&b, NULL);
}
}
fd_batch_reference_locked(&rsc->track->write_batch, batch);
fd_batch_reference_locked(&track->write_batch, batch);
fd_batch_add_resource(batch, rsc);
}
@ -531,12 +547,24 @@ fd_batch_resource_read_slowpath(struct fd_batch *batch, struct fd_resource *rsc)
DBG("%p: read %p", batch, rsc);
struct fd_resource_tracking *track = rsc->track;
/* If reading a resource pending a write, go ahead and flush the
* writer. This avoids situations where we end up having to
* flush the current batch in _resource_used()
*/
if (unlikely(rsc->track->write_batch && rsc->track->write_batch != batch))
if (unlikely(track->write_batch && track->write_batch != batch)) {
if (track->write_batch->ctx != batch->ctx) {
/* Reading results from another context without flush/barrier
* is undefined. Let's simply protect ourself from crashing
* by avoiding cross-ctx dependencies and let the app have the
* undefined behavior it asked for:
*/
return;
}
flush_write_batch(rsc);
}
fd_batch_add_resource(batch, rsc);
}

View file

@ -196,7 +196,8 @@ fd_bc_flush(struct fd_context *ctx, bool deferred) assert_dt
}
for (unsigned i = 0; i < n; i++) {
if (batches[i] && (batches[i] != current_batch)) {
if (batches[i] && (batches[i] != current_batch) &&
(batches[i]->ctx == current_batch->ctx)) {
fd_batch_add_dep(current_batch, batches[i]);
}
}
@ -238,7 +239,8 @@ fd_bc_flush_writer(struct fd_context *ctx, struct fd_resource *rsc) assert_dt
fd_screen_unlock(ctx->screen);
if (write_batch) {
fd_batch_flush(write_batch);
if (write_batch->ctx == ctx)
fd_batch_flush(write_batch);
fd_batch_reference(&write_batch, NULL);
}
}
@ -263,7 +265,8 @@ fd_bc_flush_readers(struct fd_context *ctx, struct fd_resource *rsc) assert_dt
fd_screen_unlock(ctx->screen);
for (int i = 0; i < batch_count; i++) {
fd_batch_flush(batches[i]);
if (batches[i]->ctx == ctx)
fd_batch_flush(batches[i]);
fd_batch_reference(&batches[i], NULL);
}
}

View file

@ -54,7 +54,7 @@ fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
*/
fd_batch_reference(&batch, ctx->batch);
DBG("%p: flush: flags=%x, fencep=%p", batch, flags, fencep);
DBG("%p: %p: flush: flags=%x, fencep=%p", ctx, batch, flags, fencep);
if (fencep && !batch) {
batch = fd_context_batch(ctx);