freedreno: deferred flush support

Signed-off-by: Rob Clark <robdclark@gmail.com>
This commit is contained in:
Rob Clark 2017-11-19 11:42:25 -05:00
parent 15ebf387fc
commit 2fcf6faa06
5 changed files with 32 additions and 4 deletions

View file

@ -350,8 +350,8 @@ batch_depends_on(struct fd_batch *batch, struct fd_batch *other)
return false;
}
static void
batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
void
fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
{
if (batch->dependents_mask & (1 << dep->idx))
return;
@ -398,7 +398,7 @@ fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool wri
* fd_bc_invalidate_batch()
*/
fd_batch_reference(&b, dep);
batch_add_dep(batch, b);
fd_batch_add_dep(batch, b);
fd_bc_invalidate_batch(b, false);
fd_batch_reference_locked(&b, NULL);
}
@ -406,7 +406,7 @@ fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool wri
fd_batch_reference_locked(&rsc->write_batch, batch);
} else {
if (rsc->write_batch) {
batch_add_dep(batch, rsc->write_batch);
fd_batch_add_dep(batch, rsc->write_batch);
fd_bc_invalidate_batch(rsc->write_batch, false);
}
}

View file

@ -207,6 +207,7 @@ struct fd_batch * fd_batch_create(struct fd_context *ctx);
void fd_batch_reset(struct fd_batch *batch);
void fd_batch_sync(struct fd_batch *batch);
void fd_batch_flush(struct fd_batch *batch, bool sync, bool force);
void fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep);
void fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write);
void fd_batch_check_size(struct fd_batch *batch);

View file

@ -153,6 +153,30 @@ fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
}
}
/* deferred flush doesn't actually flush, but it marks every other
* batch associated with the context as dependent on the current
* batch. So when the current batch gets flushed, all other batches
* that came before also get flushed.
*/
void
fd_bc_flush_deferred(struct fd_batch_cache *cache, struct fd_context *ctx)
{
struct fd_batch *current_batch = ctx->batch;
struct hash_entry *entry;
mtx_lock(&ctx->screen->lock);
hash_table_foreach(cache->ht, entry) {
struct fd_batch *batch = entry->data;
if (batch == current_batch)
continue;
if (batch->ctx == ctx)
fd_batch_add_dep(current_batch, batch);
}
mtx_unlock(&ctx->screen->lock);
}
void
fd_bc_invalidate_context(struct fd_context *ctx)
{

View file

@ -63,6 +63,7 @@ void fd_bc_init(struct fd_batch_cache *cache);
void fd_bc_fini(struct fd_batch_cache *cache);
void fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx);
void fd_bc_flush_deferred(struct fd_batch_cache *cache, struct fd_context *ctx);
void fd_bc_invalidate_context(struct fd_context *ctx);
void fd_bc_invalidate_batch(struct fd_batch *batch, bool destroy);

View file

@ -54,6 +54,8 @@ fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
if (!ctx->screen->reorder) {
fd_batch_flush(ctx->batch, true, false);
} else if (flags & PIPE_FLUSH_DEFERRED) {
fd_bc_flush_deferred(&ctx->screen->batch_cache, ctx);
} else {
fd_bc_flush(&ctx->screen->batch_cache, ctx);
}