mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-08 09:08:10 +02:00
radeonsi: remove SI_OP_SKIP_CACHE_INV_BEFORE
The only remaining use had no effect because it doesn't call si_barrier_before_internal_op at all and instead implements its own barrier. Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/31193>
This commit is contained in:
parent
67593f0c51
commit
d225fb3025
4 changed files with 6 additions and 12 deletions
|
|
@ -75,8 +75,7 @@ void si_execute_clears(struct si_context *sctx, struct si_clear_info *info,
|
|||
|
||||
si_mark_atom_dirty(sctx, &sctx->atoms.s.cache_flush);
|
||||
|
||||
unsigned flags = SI_OP_SKIP_CACHE_INV_BEFORE |
|
||||
(render_condition_enable ? SI_OP_CS_RENDER_COND_ENABLE : 0);
|
||||
unsigned flags = render_condition_enable ? SI_OP_CS_RENDER_COND_ENABLE : 0;
|
||||
|
||||
/* Execute clears. */
|
||||
for (unsigned i = 0; i < num_clears; i++) {
|
||||
|
|
|
|||
|
|
@ -101,11 +101,8 @@ void si_barrier_before_internal_op(struct si_context *sctx, unsigned flags,
|
|||
sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
|
||||
|
||||
/* Invalidate the VMEM cache only. The SMEM cache isn't used by shader buffers. */
|
||||
if (!(flags & SI_OP_SKIP_CACHE_INV_BEFORE))
|
||||
sctx->flags |= SI_CONTEXT_INV_VCACHE;
|
||||
|
||||
if (sctx->flags)
|
||||
si_mark_atom_dirty(sctx, &sctx->atoms.s.cache_flush);
|
||||
sctx->flags |= SI_CONTEXT_INV_VCACHE;
|
||||
si_mark_atom_dirty(sctx, &sctx->atoms.s.cache_flush);
|
||||
}
|
||||
|
||||
void si_barrier_after_internal_op(struct si_context *sctx, unsigned flags,
|
||||
|
|
|
|||
|
|
@ -151,7 +151,7 @@ void si_cp_dma_clear_buffer(struct si_context *sctx, struct radeon_cmdbuf *cs,
|
|||
assert(!sctx->screen->info.cp_sdma_ge_use_system_memory_scope);
|
||||
assert(size && size % 4 == 0);
|
||||
|
||||
if (!(user_flags & SI_OP_SKIP_CACHE_INV_BEFORE) && !cp_dma_use_L2(sctx)) {
|
||||
if (!cp_dma_use_L2(sctx)) {
|
||||
sctx->flags |= SI_CONTEXT_INV_L2;
|
||||
si_mark_atom_dirty(sctx, &sctx->atoms.s.cache_flush);
|
||||
}
|
||||
|
|
@ -237,7 +237,7 @@ void si_cp_dma_copy_buffer(struct si_context *sctx, struct pipe_resource *dst,
|
|||
assert(size);
|
||||
assert(dst && src);
|
||||
|
||||
if (!(user_flags & SI_OP_SKIP_CACHE_INV_BEFORE) && !cp_dma_use_L2(sctx)) {
|
||||
if (!cp_dma_use_L2(sctx)) {
|
||||
sctx->flags |= SI_CONTEXT_INV_L2;
|
||||
si_mark_atom_dirty(sctx, &sctx->atoms.s.cache_flush);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1464,9 +1464,7 @@ void si_destroy_compute(struct si_compute *program);
|
|||
#define SI_OP_SYNC_BEFORE (SI_OP_SYNC_CS_BEFORE | SI_OP_SYNC_PS_BEFORE)
|
||||
#define SI_OP_SYNC_AFTER (1 << 3)
|
||||
#define SI_OP_SYNC_BEFORE_AFTER (SI_OP_SYNC_BEFORE | SI_OP_SYNC_AFTER)
|
||||
#define SI_OP_SKIP_CACHE_INV_BEFORE (1 << 4) /* don't invalidate caches */
|
||||
/* gap */
|
||||
#define SI_OP_CS_RENDER_COND_ENABLE (1 << 6)
|
||||
#define SI_OP_CS_RENDER_COND_ENABLE (1 << 4)
|
||||
/* gap */
|
||||
/* Only for si_compute_blit: */
|
||||
#define SI_OP_FAIL_IF_SLOW (1 << 9)
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue