freedreno: Make a bunch of the batch cache take ctx as the arg.

As we move the batch cache from screen to context, it cleans up the diff a
ton to separate this mechanical change out, and makes the API generally
more consistent (since you usually have to pass the ctx anyway).

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/11368>
This commit is contained in:
Emma Anholt 2021-06-14 11:48:14 -07:00 committed by Marge Bot
parent a960b0e477
commit 32bed95e0b
6 changed files with 33 additions and 35 deletions

View file

@ -441,7 +441,7 @@ fd5_blitter_blit(struct fd_context *ctx,
return false;
}
batch = fd_bc_alloc_batch(&ctx->screen->batch_cache, ctx, true);
batch = fd_bc_alloc_batch(ctx, true);
fd_batch_update_queries(batch);

View file

@ -912,7 +912,7 @@ handle_rgba_blit(struct fd_context *ctx,
fd6_validate_format(ctx, src, info->src.format);
fd6_validate_format(ctx, dst, info->dst.format);
batch = fd_bc_alloc_batch(&ctx->screen->batch_cache, ctx, true);
batch = fd_bc_alloc_batch(ctx, true);
fd_screen_lock(ctx->screen);

View file

@ -142,9 +142,10 @@ fd_bc_fini(struct fd_batch_cache *cache)
}
static void
bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx,
bool deferred) assert_dt
bc_flush(struct fd_context *ctx, bool deferred) assert_dt
{
struct fd_batch_cache *cache = &ctx->screen->batch_cache;
/* fd_batch_flush() (and fd_batch_add_dep() which calls it indirectly)
* can cause batches to be unref'd and freed under our feet, so grab
* a reference to all the batches we need up-front.
@ -188,9 +189,9 @@ bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx,
}
void
fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
fd_bc_flush(struct fd_context *ctx)
{
bc_flush(cache, ctx, false);
bc_flush(ctx, false);
}
/* deferred flush doesn't actually flush, but it marks every other
@ -199,20 +200,20 @@ fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
* that came before also get flushed.
*/
void
fd_bc_flush_deferred(struct fd_batch_cache *cache, struct fd_context *ctx)
fd_bc_flush_deferred(struct fd_context *ctx)
{
bc_flush(cache, ctx, true);
bc_flush(ctx, true);
}
void
fd_bc_dump(struct fd_screen *screen, const char *fmt, ...)
fd_bc_dump(struct fd_context *ctx, const char *fmt, ...)
{
struct fd_batch_cache *cache = &screen->batch_cache;
struct fd_batch_cache *cache = &ctx->screen->batch_cache;
if (!FD_DBG(MSGS))
return;
fd_screen_lock(screen);
fd_screen_lock(ctx->screen);
va_list ap;
va_start(ap, fmt);
@ -229,7 +230,7 @@ fd_bc_dump(struct fd_screen *screen, const char *fmt, ...)
printf("----\n");
fd_screen_unlock(screen);
fd_screen_unlock(ctx->screen);
}
void
@ -393,9 +394,9 @@ alloc_batch_locked(struct fd_batch_cache *cache, struct fd_context *ctx,
}
struct fd_batch *
fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx,
bool nondraw)
fd_bc_alloc_batch(struct fd_context *ctx, bool nondraw)
{
struct fd_batch_cache *cache = &ctx->screen->batch_cache;
struct fd_batch *batch;
/* For normal draw batches, pctx->set_framebuffer_state() handles
@ -416,9 +417,9 @@ fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx,
}
static struct fd_batch *
batch_from_key(struct fd_batch_cache *cache, struct fd_batch_key *key,
struct fd_context *ctx) assert_dt
batch_from_key(struct fd_context *ctx, struct fd_batch_key *key) assert_dt
{
struct fd_batch_cache *cache = &ctx->screen->batch_cache;
struct fd_batch *batch = NULL;
uint32_t hash = fd_batch_key_hash(key);
struct hash_entry *entry =
@ -479,7 +480,7 @@ key_surf(struct fd_batch_key *key, unsigned idx, unsigned pos,
}
struct fd_batch *
fd_batch_from_fb(struct fd_batch_cache *cache, struct fd_context *ctx,
fd_batch_from_fb(struct fd_context *ctx,
const struct pipe_framebuffer_state *pfb)
{
unsigned idx = 0, n = pfb->nr_cbufs + (pfb->zsbuf ? 1 : 0);
@ -501,7 +502,7 @@ fd_batch_from_fb(struct fd_batch_cache *cache, struct fd_context *ctx,
key->num_surfs = idx;
fd_screen_lock(ctx->screen);
struct fd_batch *batch = batch_from_key(cache, key, ctx);
struct fd_batch *batch = batch_from_key(ctx, key);
fd_screen_unlock(ctx->screen);
return batch;

View file

@ -66,22 +66,19 @@ struct fd_batch_cache {
void fd_bc_init(struct fd_batch_cache *cache);
void fd_bc_fini(struct fd_batch_cache *cache);
void fd_bc_flush(struct fd_batch_cache *cache,
struct fd_context *ctx) assert_dt;
void fd_bc_flush_deferred(struct fd_batch_cache *cache,
struct fd_context *ctx) assert_dt;
void fd_bc_dump(struct fd_screen *screen, const char *fmt, ...)
void fd_bc_flush(struct fd_context *ctx) assert_dt;
void fd_bc_flush_deferred(struct fd_context *ctx) assert_dt;
void fd_bc_dump(struct fd_context *ctx, const char *fmt, ...)
_util_printf_format(2, 3);
void fd_bc_invalidate_context(struct fd_context *ctx);
void fd_bc_invalidate_batch(struct fd_batch *batch, bool destroy);
void fd_bc_invalidate_resource(struct fd_resource *rsc, bool destroy);
struct fd_batch *fd_bc_alloc_batch(struct fd_batch_cache *cache,
struct fd_context *ctx,
struct fd_batch *fd_bc_alloc_batch(struct fd_context *ctx,
bool nondraw) assert_dt;
struct fd_batch *
fd_batch_from_fb(struct fd_batch_cache *cache, struct fd_context *ctx,
fd_batch_from_fb(struct fd_context *ctx,
const struct pipe_framebuffer_state *pfb) assert_dt;
#endif /* FREEDRENO_BATCH_CACHE_H_ */

View file

@ -57,7 +57,7 @@ fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
if (fencep && !batch) {
batch = fd_context_batch(ctx);
} else if (!batch) {
fd_bc_dump(ctx->screen, "%p: NULL batch, remaining:\n", ctx);
fd_bc_dump(ctx, "%p: NULL batch, remaining:\n", ctx);
return;
}
@ -83,7 +83,7 @@ fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
if (ctx->last_fence) {
fd_fence_repopulate(*fencep, ctx->last_fence);
fd_fence_ref(&fence, *fencep);
fd_bc_dump(ctx->screen, "%p: (deferred) reuse last_fence, remaining:\n", ctx);
fd_bc_dump(ctx, "%p: (deferred) reuse last_fence, remaining:\n", ctx);
goto out;
}
@ -109,7 +109,7 @@ fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
*/
if (ctx->last_fence) {
fd_fence_ref(&fence, ctx->last_fence);
fd_bc_dump(ctx->screen, "%p: reuse last_fence, remaining:\n", ctx);
fd_bc_dump(ctx, "%p: reuse last_fence, remaining:\n", ctx);
goto out;
}
@ -119,7 +119,7 @@ fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
if (flags & PIPE_FLUSH_FENCE_FD)
fence->submit_fence.use_fence_fd = true;
fd_bc_dump(ctx->screen, "%p: flushing %p<%u>, flags=0x%x, pending:\n", ctx,
fd_bc_dump(ctx, "%p: flushing %p<%u>, flags=0x%x, pending:\n", ctx,
batch, batch->seqno, flags);
/* If we get here, we need to flush for a fence, even if there is
@ -130,12 +130,12 @@ fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
if (!ctx->screen->reorder) {
fd_batch_flush(batch);
} else if (flags & PIPE_FLUSH_DEFERRED) {
fd_bc_flush_deferred(&ctx->screen->batch_cache, ctx);
fd_bc_flush_deferred(ctx);
} else {
fd_bc_flush(&ctx->screen->batch_cache, ctx);
fd_bc_flush(ctx);
}
fd_bc_dump(ctx->screen, "%p: remaining:\n", ctx);
fd_bc_dump(ctx, "%p: remaining:\n", ctx);
out:
if (fencep)
@ -297,7 +297,7 @@ fd_context_batch(struct fd_context *ctx)
if (unlikely(!batch)) {
batch =
fd_batch_from_fb(&ctx->screen->batch_cache, ctx, &ctx->framebuffer);
fd_batch_from_fb(ctx, &ctx->framebuffer);
util_copy_framebuffer_state(&batch->framebuffer, &ctx->framebuffer);
fd_batch_reference(&ctx->batch, batch);
fd_context_all_dirty(ctx);

View file

@ -536,7 +536,7 @@ fd_launch_grid(struct pipe_context *pctx,
&ctx->shaderbuf[PIPE_SHADER_COMPUTE];
struct fd_batch *batch, *save_batch = NULL;
batch = fd_bc_alloc_batch(&ctx->screen->batch_cache, ctx, true);
batch = fd_bc_alloc_batch(ctx, true);
fd_batch_reference(&save_batch, ctx->batch);
fd_batch_reference(&ctx->batch, batch);
fd_context_all_dirty(ctx);