panfrost: Replace writers pointer with hash table

This ensures each context can have a separate batch writing a resource
and we don't race trying to flush each other's batches. Unfortunately
the extra hash table operations regress draw-overhead numbers by about
8% but I'd rather eat the overhead and have an obviously correct
implementation than leave known buggy code in tree.

Signed-off-by: Alyssa Rosenzweig <alyssa@collabora.com>
Cc: mesa-stable
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/12528>
(cherry picked from commit 8503cab2e0)

Conflicts:
	src/gallium/drivers/panfrost/pan_job.c
This commit is contained in:
Alyssa Rosenzweig 2021-08-17 17:16:54 +00:00 committed by Dylan Baker
parent ee6739d4d0
commit 4489684269
6 changed files with 29 additions and 11 deletions

View file

@ -1129,7 +1129,7 @@
"description": "panfrost: Replace writers pointer with hash table",
"nominated": true,
"nomination_type": 0,
"resolution": 0,
"resolution": 1,
"main_sha": null,
"because_sha": null
},

View file

@ -794,6 +794,8 @@ panfrost_destroy(struct pipe_context *pipe)
{
struct panfrost_context *panfrost = pan_context(pipe);
_mesa_hash_table_destroy(panfrost->writers, NULL);
if (panfrost->blitter)
util_blitter_destroy(panfrost->blitter);
@ -1126,6 +1128,9 @@ panfrost_create_context(struct pipe_screen *screen, void *priv, unsigned flags)
ctx->blitter = util_blitter_create(gallium);
ctx->writers = _mesa_hash_table_create(gallium, _mesa_hash_pointer,
_mesa_key_pointer_equal);
assert(ctx->blitter);
/* Prepare for render! */

View file

@ -146,6 +146,9 @@ struct panfrost_context {
BITSET_DECLARE(active, PAN_MAX_BATCHES);
} batches;
/* Map from resources to panfrost_batches */
struct hash_table *writers;
/* Bound job batch */
struct panfrost_batch *batch;

View file

@ -132,8 +132,10 @@ panfrost_batch_cleanup(struct panfrost_batch *batch)
set_foreach_remove(batch->resources, entry) {
struct panfrost_resource *rsrc = (void *) entry->key;
if (rsrc->track.writer == batch)
rsrc->track.writer = NULL;
if (_mesa_hash_table_search(ctx->writers, rsrc)) {
_mesa_hash_table_remove_key(ctx->writers, rsrc);
rsrc->track.nr_writers--;
}
rsrc->track.nr_users--;
@ -272,7 +274,8 @@ panfrost_batch_update_access(struct panfrost_batch *batch,
{
struct panfrost_context *ctx = batch->ctx;
uint32_t batch_idx = panfrost_batch_idx(batch);
struct panfrost_batch *writer = rsrc->track.writer;
struct hash_entry *entry = _mesa_hash_table_search(ctx->writers, rsrc);
struct panfrost_batch *writer = entry ? entry->data : NULL;
bool found = false;
_mesa_set_search_or_add(batch->resources, rsrc, &found);
@ -301,8 +304,10 @@ panfrost_batch_update_access(struct panfrost_batch *batch,
}
}
if (writes)
rsrc->track.writer = batch;
if (writes) {
_mesa_hash_table_insert(ctx->writers, rsrc, batch);
rsrc->track.nr_writers++;
}
}
static void
@ -933,8 +938,10 @@ void
panfrost_flush_writer(struct panfrost_context *ctx,
struct panfrost_resource *rsrc)
{
if (rsrc->track.writer) {
panfrost_batch_submit(rsrc->track.writer, ctx->syncobj, ctx->syncobj);
struct hash_entry *entry = _mesa_hash_table_search(ctx->writers, rsrc);
if (entry) {
panfrost_batch_submit(entry->data, ctx->syncobj, ctx->syncobj);
}
}

View file

@ -870,7 +870,7 @@ panfrost_ptr_map(struct pipe_context *pctx,
bool valid = BITSET_TEST(rsrc->valid.data, level);
if ((usage & PIPE_MAP_READ) && (valid || rsrc->track.writer)) {
if ((usage & PIPE_MAP_READ) && (valid || rsrc->track.nr_writers > 0)) {
pan_blit_to_staging(pctx, transfer);
panfrost_flush_writer(ctx, staging);
panfrost_bo_wait(staging->image.data.bo, INT64_MAX, false);

View file

@ -48,11 +48,14 @@ struct panfrost_resource {
} damage;
struct {
struct panfrost_batch *writer;
/** Number of batches accessing this resource. Used to check if
* a resource is in use. */
_Atomic unsigned nr_users;
/** Number of batches writing this resource. Note that only one
* batch per context may write a resource, so this is the
* number of contexts that have an active writer. */
_Atomic unsigned nr_writers;
} track;
struct renderonly_scanout *scanout;