freedreno: hold batch references when flushing

It is possible for a batch to be freed under our feet when flushing, so
it is best to hold a reference to all of them up-front.

Signed-off-by: Rob Clark <robdclark@gmail.com>
This commit is contained in:
Rob Clark 2018-07-17 09:40:23 -04:00
parent 71add09e79
commit f2570409f9

View file

@ -124,33 +124,52 @@ fd_bc_fini(struct fd_batch_cache *cache)
_mesa_hash_table_destroy(cache->ht, NULL);
}
static void
bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx, bool deferred)
{
/* fd_batch_flush() (and fd_batch_add_dep() which calls it indirectly)
* can cause batches to be unref'd and freed under our feet, so grab
* a reference to all the batches we need up-front.
*/
struct fd_batch *batches[ARRAY_SIZE(cache->batches)] = {0};
struct fd_batch *batch;
unsigned n = 0;
fd_context_lock(ctx);
foreach_batch(batch, cache, cache->batch_mask) {
if (batch->ctx == ctx) {
fd_batch_reference_locked(&batches[n++], batch);
}
}
if (deferred) {
struct fd_batch *current_batch = ctx->batch;
for (unsigned i = 0; i < n; i++) {
if (batches[i] != current_batch) {
fd_batch_add_dep(current_batch, batches[i]);
}
}
fd_context_unlock(ctx);
} else {
fd_context_unlock(ctx);
for (unsigned i = 0; i < n; i++) {
fd_batch_flush(batches[i], false, false);
}
}
for (unsigned i = 0; i < n; i++) {
fd_batch_reference(&batches[i], NULL);
}
}
void
fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
{
struct hash_entry *entry;
struct fd_batch *last_batch = NULL;
mtx_lock(&ctx->screen->lock);
hash_table_foreach(cache->ht, entry) {
struct fd_batch *batch = NULL;
/* hold a reference since we can drop screen->lock: */
fd_batch_reference_locked(&batch, (struct fd_batch *)entry->data);
if (batch->ctx == ctx) {
mtx_unlock(&ctx->screen->lock);
fd_batch_reference(&last_batch, batch);
fd_batch_flush(batch, false, false);
mtx_lock(&ctx->screen->lock);
}
fd_batch_reference_locked(&batch, NULL);
}
mtx_unlock(&ctx->screen->lock);
if (last_batch) {
fd_batch_sync(last_batch);
fd_batch_reference(&last_batch, NULL);
}
bc_flush(cache, ctx, false);
}
/* deferred flush doesn't actually flush, but it marks every other
@ -161,20 +180,7 @@ fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
void
fd_bc_flush_deferred(struct fd_batch_cache *cache, struct fd_context *ctx)
{
struct fd_batch *current_batch = ctx->batch;
struct hash_entry *entry;
mtx_lock(&ctx->screen->lock);
hash_table_foreach(cache->ht, entry) {
struct fd_batch *batch = entry->data;
if (batch == current_batch)
continue;
if (batch->ctx == ctx)
fd_batch_add_dep(current_batch, batch);
}
mtx_unlock(&ctx->screen->lock);
bc_flush(cache, ctx, true);
}
void