mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-04 20:38:06 +02:00
freedreno: drop ring arg from _set_stage()
It is always the draw ring. Except for a5xx queries like time-elapsed, where we will eventually want to emit cmds into both binning and draw rings. Signed-off-by: Rob Clark <robdclark@gmail.com>
This commit is contained in:
parent
5923780b2a
commit
52d2fa37f5
7 changed files with 13 additions and 17 deletions
|
|
@ -262,7 +262,7 @@ batch_flush(struct fd_batch *batch)
|
|||
/* close out the draw cmds by making sure any active queries are
|
||||
* paused:
|
||||
*/
|
||||
fd_batch_set_stage(batch, batch->draw, FD_STAGE_NULL);
|
||||
fd_batch_set_stage(batch, FD_STAGE_NULL);
|
||||
|
||||
fd_context_all_dirty(batch->ctx);
|
||||
batch_flush_reset_dependencies(batch, true);
|
||||
|
|
|
|||
|
|
@ -307,8 +307,7 @@ struct fd_context {
|
|||
void (*query_prepare)(struct fd_batch *batch, uint32_t num_tiles);
|
||||
void (*query_prepare_tile)(struct fd_batch *batch, uint32_t n,
|
||||
struct fd_ringbuffer *ring);
|
||||
void (*query_set_stage)(struct fd_batch *batch,
|
||||
struct fd_ringbuffer *ring, enum fd_render_stage stage);
|
||||
void (*query_set_stage)(struct fd_batch *batch, enum fd_render_stage stage);
|
||||
|
||||
/*
|
||||
* Common pre-cooked VBO state (used for a3xx and later):
|
||||
|
|
@ -388,8 +387,7 @@ fd_supported_prim(struct fd_context *ctx, unsigned prim)
|
|||
}
|
||||
|
||||
static inline void
|
||||
fd_batch_set_stage(struct fd_batch *batch,
|
||||
struct fd_ringbuffer *ring, enum fd_render_stage stage)
|
||||
fd_batch_set_stage(struct fd_batch *batch, enum fd_render_stage stage)
|
||||
{
|
||||
struct fd_context *ctx = batch->ctx;
|
||||
|
||||
|
|
@ -404,7 +402,7 @@ fd_batch_set_stage(struct fd_batch *batch,
|
|||
return;
|
||||
|
||||
if (ctx->query_set_stage)
|
||||
ctx->query_set_stage(batch, ring, stage);
|
||||
ctx->query_set_stage(batch, stage);
|
||||
|
||||
batch->stage = stage;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -110,7 +110,7 @@ fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
|
|||
/* NOTE: needs to be before resource_written(batch->query_buf), otherwise
|
||||
* query_buf may not be created yet.
|
||||
*/
|
||||
fd_batch_set_stage(batch, batch->draw, FD_STAGE_DRAW);
|
||||
fd_batch_set_stage(batch, FD_STAGE_DRAW);
|
||||
|
||||
/*
|
||||
* Figure out the buffers/features we need:
|
||||
|
|
@ -375,7 +375,7 @@ fd_clear(struct pipe_context *pctx, unsigned buffers,
|
|||
return;
|
||||
}
|
||||
|
||||
fd_batch_set_stage(batch, batch->draw, FD_STAGE_CLEAR);
|
||||
fd_batch_set_stage(batch, FD_STAGE_CLEAR);
|
||||
|
||||
ctx->clear(ctx, buffers, color, depth, stencil);
|
||||
|
||||
|
|
|
|||
|
|
@ -380,8 +380,7 @@ fd_hw_query_prepare_tile(struct fd_batch *batch, uint32_t n,
|
|||
}
|
||||
|
||||
void
|
||||
fd_hw_query_set_stage(struct fd_batch *batch, struct fd_ringbuffer *ring,
|
||||
enum fd_render_stage stage)
|
||||
fd_hw_query_set_stage(struct fd_batch *batch, enum fd_render_stage stage)
|
||||
{
|
||||
if (stage != batch->stage) {
|
||||
struct fd_hw_query *hq;
|
||||
|
|
@ -390,9 +389,9 @@ fd_hw_query_set_stage(struct fd_batch *batch, struct fd_ringbuffer *ring,
|
|||
bool now_active = is_active(hq, stage);
|
||||
|
||||
if (now_active && !was_active)
|
||||
resume_query(batch, hq, ring);
|
||||
resume_query(batch, hq, batch->draw);
|
||||
else if (was_active && !now_active)
|
||||
pause_query(batch, hq, ring);
|
||||
pause_query(batch, hq, batch->draw);
|
||||
}
|
||||
}
|
||||
clear_sample_cache(batch);
|
||||
|
|
|
|||
|
|
@ -146,8 +146,7 @@ void __fd_hw_sample_destroy(struct fd_context *ctx, struct fd_hw_sample *samp);
|
|||
void fd_hw_query_prepare(struct fd_batch *batch, uint32_t num_tiles);
|
||||
void fd_hw_query_prepare_tile(struct fd_batch *batch, uint32_t n,
|
||||
struct fd_ringbuffer *ring);
|
||||
void fd_hw_query_set_stage(struct fd_batch *batch,
|
||||
struct fd_ringbuffer *ring, enum fd_render_stage stage);
|
||||
void fd_hw_query_set_stage(struct fd_batch *batch, enum fd_render_stage stage);
|
||||
void fd_hw_query_enable(struct fd_batch *batch, struct fd_ringbuffer *ring);
|
||||
void fd_hw_query_register_provider(struct pipe_context *pctx,
|
||||
const struct fd_hw_sample_provider *provider);
|
||||
|
|
|
|||
|
|
@ -1091,7 +1091,7 @@ fd_blitter_pipe_begin(struct fd_context *ctx, bool render_cond, bool discard,
|
|||
ctx->cond_query, ctx->cond_cond, ctx->cond_mode);
|
||||
|
||||
if (ctx->batch)
|
||||
fd_batch_set_stage(ctx->batch, ctx->batch->draw, stage);
|
||||
fd_batch_set_stage(ctx->batch, stage);
|
||||
|
||||
ctx->in_blit = discard;
|
||||
}
|
||||
|
|
@ -1100,7 +1100,7 @@ void
|
|||
fd_blitter_pipe_end(struct fd_context *ctx)
|
||||
{
|
||||
if (ctx->batch)
|
||||
fd_batch_set_stage(ctx->batch, ctx->batch->draw, FD_STAGE_NULL);
|
||||
fd_batch_set_stage(ctx->batch, FD_STAGE_NULL);
|
||||
ctx->in_blit = false;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -126,7 +126,7 @@ fd_set_framebuffer_state(struct pipe_context *pctx,
|
|||
fd_batch_reference(&old_batch, ctx->batch);
|
||||
|
||||
if (likely(old_batch))
|
||||
fd_batch_set_stage(old_batch, old_batch->draw, FD_STAGE_NULL);
|
||||
fd_batch_set_stage(old_batch, FD_STAGE_NULL);
|
||||
|
||||
batch = fd_batch_from_fb(&ctx->screen->batch_cache, ctx, framebuffer);
|
||||
fd_batch_reference(&ctx->batch, NULL);
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue