radeonsi: remove SI_OP_SYNC_AFTER

All places that call si_barrier_after_internal_op also set SI_OP_SYNC_AFTER,
so we can do the sync unconditionally.

If we want to skip the "after" sync in the future, we just won't call
si_barrier_after_internal_op.

CP DMA is the only one that will sync even without
si_barrier_after_internal_op, but CP DMA ops are usually small and almost
never used on GFX10+.

Reviewed-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/31193>
This commit is contained in:
Marek Olšák 2024-08-22 16:57:06 -04:00 committed by Marge Bot
parent d225fb3025
commit eb6c098cd1
14 changed files with 50 additions and 64 deletions

View file

@ -393,7 +393,7 @@ static void gfx11_sh_query_get_result_resource(struct si_context *sctx, struct s
/* ssbo[2] is either tmp_buffer or resource */
assert(ssbo[2].buffer);
unsigned op_flags = SI_OP_SYNC_PS_BEFORE | SI_OP_SYNC_AFTER;
unsigned op_flags = SI_OP_SYNC_PS_BEFORE;
unsigned writable_bitmask = (1 << 2) | (ssbo[1].buffer ? 1 << 1 : 0);
si_barrier_before_internal_op(sctx, op_flags, 3, ssbo, writable_bitmask, 0, NULL);

View file

@ -965,7 +965,7 @@ void si_resource_copy_region(struct pipe_context *ctx, struct pipe_resource *dst
/* Handle buffers first. */
if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
unsigned flags = SI_OP_SYNC_BEFORE_AFTER;
unsigned flags = SI_OP_SYNC_BEFORE;
si_barrier_before_simple_buffer_op(sctx, flags, dst, src);
si_copy_buffer(sctx, dst, src, dstx, src_box->x, src_box->width, flags);
@ -1271,7 +1271,7 @@ static void si_blit(struct pipe_context *ctx, const struct pipe_blit_info *info)
if (unlikely(sctx->sqtt_enabled))
sctx->sqtt_next_event = EventCmdCopyImage;
if (si_compute_blit(sctx, info, NULL, 0, 0, SI_OP_SYNC_BEFORE_AFTER | SI_OP_FAIL_IF_SLOW))
if (si_compute_blit(sctx, info, NULL, 0, 0, SI_OP_SYNC_BEFORE | SI_OP_FAIL_IF_SLOW))
return;
si_gfx_blit(ctx, info);

View file

@ -194,10 +194,9 @@ bool si_alloc_resource(struct si_screen *sscreen, struct si_resource *res)
struct si_context *ctx = si_get_aux_context(&sscreen->aux_context.general);
uint32_t value = 0;
unsigned flags = SI_OP_SYNC_AFTER;
si_clear_buffer(ctx, &res->b.b, 0, res->bo_size, &value, 4, flags,
si_clear_buffer(ctx, &res->b.b, 0, res->bo_size, &value, 4, 0,
SI_AUTO_SELECT_CLEAR_METHOD);
si_barrier_after_simple_buffer_op(ctx, flags, &res->b.b, NULL);
si_barrier_after_simple_buffer_op(ctx, 0, &res->b.b, NULL);
si_put_aux_context_flush(&sscreen->aux_context.general);
}
@ -445,7 +444,7 @@ static void *si_buffer_transfer_map(struct pipe_context *ctx, struct pipe_resour
box->width + (box->x % SI_MAP_BUFFER_ALIGNMENT), 256);
if (staging) {
/* Copy the VRAM buffer to the staging buffer. */
unsigned flags = SI_OP_SYNC_BEFORE_AFTER;
unsigned flags = SI_OP_SYNC_BEFORE;
si_barrier_before_simple_buffer_op(sctx, flags, &staging->b.b, resource);
si_copy_buffer(sctx, &staging->b.b, resource, box->x % SI_MAP_BUFFER_ALIGNMENT,
@ -484,7 +483,7 @@ static void si_buffer_do_flush_region(struct pipe_context *ctx, struct pipe_tran
if (stransfer->staging) {
unsigned src_offset =
stransfer->b.b.offset + transfer->box.x % SI_MAP_BUFFER_ALIGNMENT + (box->x - transfer->box.x);
unsigned flags = SI_OP_SYNC_BEFORE_AFTER;
unsigned flags = SI_OP_SYNC_BEFORE;
/* Copy the staging buffer into the original one. */
si_barrier_before_simple_buffer_op(sctx, flags, transfer->resource, &stransfer->staging->b.b);

View file

@ -112,23 +112,19 @@ void si_barrier_after_internal_op(struct si_context *sctx, unsigned flags,
unsigned num_images,
const struct pipe_image_view *images)
{
if (flags & SI_OP_SYNC_AFTER) {
sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
if (num_images) {
/* Make sure image stores are visible to CB, which doesn't use L2 on GFX6-8. */
sctx->flags |= sctx->gfx_level <= GFX8 ? SI_CONTEXT_WB_L2 : 0;
/* Make sure image stores are visible to all CUs. */
sctx->flags |= SI_CONTEXT_INV_VCACHE;
}
/* Make sure buffer stores are visible to all CUs and also as index/indirect buffers. */
if (num_buffers)
sctx->flags |= SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE | SI_CONTEXT_PFP_SYNC_ME;
si_mark_atom_dirty(sctx, &sctx->atoms.s.cache_flush);
if (num_images) {
/* Make sure image stores are visible to CB, which doesn't use L2 on GFX6-8. */
sctx->flags |= sctx->gfx_level <= GFX8 ? SI_CONTEXT_WB_L2 : 0;
/* Make sure image stores are visible to all CUs. */
sctx->flags |= SI_CONTEXT_INV_VCACHE;
}
/* Make sure buffer stores are visible to all CUs and also as index/indirect buffers. */
if (num_buffers)
sctx->flags |= SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE | SI_CONTEXT_PFP_SYNC_ME;
/* We must set TC_L2_dirty for buffers because:
* - GFX6,12: CP DMA doesn't use L2.
* - GFX6-7,12: Index buffer reads don't use L2.
@ -141,19 +137,19 @@ void si_barrier_after_internal_op(struct si_context *sctx, unsigned flags,
si_resource(buffers[u_bit_scan(&writable_buffers_mask)].buffer)->TC_L2_dirty = true;
/* Make sure RBs see our DCC image stores if RBs and TCCs (L2 instances) are non-coherent. */
if (flags & SI_OP_SYNC_AFTER && sctx->gfx_level >= GFX10 &&
sctx->screen->info.tcc_rb_non_coherent) {
if (sctx->gfx_level >= GFX10 && sctx->screen->info.tcc_rb_non_coherent) {
for (unsigned i = 0; i < num_images; i++) {
if (vi_dcc_enabled((struct si_texture*)images[i].resource, images[i].u.tex.level) &&
images[i].access & PIPE_IMAGE_ACCESS_WRITE &&
(sctx->screen->always_allow_dcc_stores ||
images[i].access & SI_IMAGE_ACCESS_ALLOW_DCC_STORE)) {
sctx->flags |= SI_CONTEXT_INV_L2;
si_mark_atom_dirty(sctx, &sctx->atoms.s.cache_flush);
break;
}
}
}
si_mark_atom_dirty(sctx, &sctx->atoms.s.cache_flush);
}
static void si_set_dst_src_barrier_buffers(struct pipe_shader_buffer *buffers,
@ -441,7 +437,7 @@ static void si_pipe_clear_buffer(struct pipe_context *ctx, struct pipe_resource
int clear_value_size)
{
struct si_context *sctx = (struct si_context *)ctx;
unsigned flags = SI_OP_SYNC_BEFORE_AFTER;
unsigned flags = SI_OP_SYNC_BEFORE;
si_barrier_before_simple_buffer_op(sctx, flags, dst, NULL);
si_clear_buffer(sctx, dst, offset, size, (uint32_t *)clear_value, clear_value_size, flags,
@ -663,7 +659,7 @@ void si_compute_expand_fmask(struct pipe_context *ctx, struct pipe_resource *tex
struct pipe_grid_info info = {0};
set_work_size(&info, 8, 8, 1, tex->width0, tex->height0, is_array ? tex->array_size : 1);
unsigned flags = SI_OP_SYNC_BEFORE_AFTER;
unsigned flags = SI_OP_SYNC_BEFORE;
si_barrier_before_internal_op(sctx, flags, 0, NULL, 0, 1, &image);
si_compute_begin_internal(sctx, flags);
si_launch_grid_internal(sctx, &info, *shader);
@ -687,13 +683,12 @@ void si_compute_expand_fmask(struct pipe_context *ctx, struct pipe_resource *tex
/* Clear FMASK to identity. */
struct si_texture *stex = (struct si_texture *)tex;
unsigned op_flags = SI_OP_SYNC_AFTER;
si_clear_buffer(sctx, tex, stex->surface.fmask_offset, stex->surface.fmask_size,
(uint32_t *)&fmask_expand_values[log_fragments][log_samples - 1],
log_fragments >= 2 && log_samples == 4 ? 8 : 4, op_flags,
log_fragments >= 2 && log_samples == 4 ? 8 : 4, 0,
SI_AUTO_SELECT_CLEAR_METHOD);
si_barrier_after_simple_buffer_op(sctx, op_flags, tex, NULL);
si_barrier_after_simple_buffer_op(sctx, 0, tex, NULL);
}
void si_compute_clear_image_dcc_single(struct si_context *sctx, struct si_texture *tex,
@ -777,7 +772,7 @@ bool si_compute_clear_image(struct si_context *sctx, struct pipe_resource *tex,
}
return si_compute_blit(sctx, &info, color, access, 0,
SI_OP_SYNC_BEFORE_AFTER | (fail_if_slow ? SI_OP_FAIL_IF_SLOW : 0));
SI_OP_SYNC_BEFORE | (fail_if_slow ? SI_OP_FAIL_IF_SLOW : 0));
}
bool si_compute_copy_image(struct si_context *sctx, struct pipe_resource *dst, unsigned dst_level,
@ -889,7 +884,7 @@ bool si_compute_copy_image(struct si_context *sctx, struct pipe_resource *dst, u
fail_if_slow &= !dst_access && !src_access;
bool success = si_compute_blit(sctx, &info, NULL, dst_access, src_access,
SI_OP_SYNC_BEFORE_AFTER | (fail_if_slow ? SI_OP_FAIL_IF_SLOW : 0));
SI_OP_SYNC_BEFORE | (fail_if_slow ? SI_OP_FAIL_IF_SLOW : 0));
assert((!dst_access && !src_access) || success);
return success;
}

View file

@ -136,7 +136,7 @@ static void si_cp_dma_prepare(struct si_context *sctx, struct pipe_resource *dst
/* Do the synchronization after the last dma, so that all data
* is written to memory.
*/
if (user_flags & SI_OP_SYNC_AFTER && byte_count == remaining_size)
if (byte_count == remaining_size)
*packet_flags |= CP_DMA_SYNC;
}

View file

@ -57,10 +57,9 @@ void si_init_cp_reg_shadowing(struct si_context *sctx)
if (sctx->shadowing.registers) {
/* We need to clear the shadowed reg buffer. */
unsigned flags = SI_OP_SYNC_AFTER;
si_cp_dma_clear_buffer(sctx, &sctx->gfx_cs, &sctx->shadowing.registers->b.b,
0, sctx->shadowing.registers->bo_size, 0, flags);
si_barrier_after_simple_buffer_op(sctx, flags, &sctx->shadowing.registers->b.b, NULL);
0, sctx->shadowing.registers->bo_size, 0, 0);
si_barrier_after_simple_buffer_op(sctx, 0, &sctx->shadowing.registers->b.b, NULL);
/* Create the shadowing preamble. (allocate enough dwords because the preamble is large) */
struct si_pm4_state *shadowing_preamble = si_pm4_create_sized(sctx->screen, 256, false);

View file

@ -829,11 +829,9 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen, unsign
* for some reason when the compute codepath is used.
*/
uint32_t clear_value = 0;
unsigned op_flags = SI_OP_SYNC_AFTER;
si_clear_buffer(sctx, sctx->null_const_buf.buffer, 0, sctx->null_const_buf.buffer->width0,
&clear_value, 4, op_flags, SI_CP_DMA_CLEAR_METHOD);
si_barrier_after_simple_buffer_op(sctx, op_flags, sctx->null_const_buf.buffer, NULL);
&clear_value, 4, 0, SI_CP_DMA_CLEAR_METHOD);
si_barrier_after_simple_buffer_op(sctx, 0, sctx->null_const_buf.buffer, NULL);
}
if (!(flags & SI_CONTEXT_FLAG_AUX)) {
@ -1090,7 +1088,7 @@ static void si_test_vmfault(struct si_screen *sscreen, uint64_t test_flags)
si_resource(buf)->gpu_address = 0; /* cause a VM fault */
if (test_flags & DBG(TEST_VMFAULT_CP)) {
si_cp_dma_copy_buffer(sctx, buf, buf, 0, 4, 4, SI_OP_SYNC_BEFORE_AFTER);
si_cp_dma_copy_buffer(sctx, buf, buf, 0, 4, 4, SI_OP_SYNC_BEFORE);
ctx->flush(ctx, NULL, 0);
puts("VM fault test: CP - done.");
}

View file

@ -1462,9 +1462,7 @@ void si_destroy_compute(struct si_compute *program);
#define SI_OP_SYNC_PS_BEFORE (1 << 1)
#define SI_OP_SYNC_GE_BEFORE (1 << 2) /* only sync VS, TCS, TES, GS */
#define SI_OP_SYNC_BEFORE (SI_OP_SYNC_CS_BEFORE | SI_OP_SYNC_PS_BEFORE)
#define SI_OP_SYNC_AFTER (1 << 3)
#define SI_OP_SYNC_BEFORE_AFTER (SI_OP_SYNC_BEFORE | SI_OP_SYNC_AFTER)
#define SI_OP_CS_RENDER_COND_ENABLE (1 << 4)
#define SI_OP_CS_RENDER_COND_ENABLE (1 << 3)
/* gap */
/* Only for si_compute_blit: */
#define SI_OP_FAIL_IF_SLOW (1 << 9)

View file

@ -1645,13 +1645,12 @@ static void si_query_hw_get_result_resource(struct si_context *sctx, struct si_q
si_cp_wait_mem(sctx, &sctx->gfx_cs, va, 0x80000000, 0x80000000, WAIT_REG_MEM_EQUAL);
}
unsigned op_flags = SI_OP_SYNC_AFTER;
unsigned writable_bitmask = 0x4;
si_barrier_before_internal_op(sctx, op_flags, 3, ssbo, writable_bitmask, 0, NULL);
si_barrier_before_internal_op(sctx, 0, 3, ssbo, writable_bitmask, 0, NULL);
si_launch_grid_internal_ssbos(sctx, &grid, sctx->query_result_shader,
op_flags, 3, ssbo, writable_bitmask);
si_barrier_after_internal_op(sctx, op_flags, 3, ssbo, writable_bitmask, 0, NULL);
0, 3, ssbo, writable_bitmask);
si_barrier_after_internal_op(sctx, 0, 3, ssbo, writable_bitmask, 0, NULL);
}
si_restore_qbo_state(sctx, &saved_state);

View file

@ -985,10 +985,9 @@ static void post_upload_binary(struct si_screen *sscreen, struct si_shader *shad
* a compute shader, and we can't use shaders in the code that is responsible for making
* them available.
*/
unsigned flags = SI_OP_SYNC_AFTER;
si_cp_dma_copy_buffer(upload_ctx, &shader->bo->b.b, staging, 0, staging_offset,
binary_size, flags);
si_barrier_after_simple_buffer_op(upload_ctx, flags, &shader->bo->b.b, staging);
binary_size, 0);
si_barrier_after_simple_buffer_op(upload_ctx, 0, &shader->bo->b.b, staging);
upload_ctx->flags |= SI_CONTEXT_INV_ICACHE | SI_CONTEXT_INV_L2;
#if 0 /* debug: validate whether the copy was successful */

View file

@ -2113,10 +2113,9 @@ static void si_draw(struct pipe_context *ctx,
if (unlikely(!indexbuf))
return;
unsigned flags = SI_OP_SYNC_AFTER;
si_compute_shorten_ubyte_buffer(sctx, indexbuf, info->index.resource, start_offset,
index_offset + start, count, flags);
si_barrier_after_simple_buffer_op(sctx, flags, indexbuf, info->index.resource);
index_offset + start, count, 0);
si_barrier_after_simple_buffer_op(sctx, 0, indexbuf, info->index.resource);
index_offset = 0;
index_size = 2;

View file

@ -621,7 +621,7 @@ void si_test_blit_perf(struct si_screen *sscreen)
break;
case METHOD_COMPUTE:
success &= si_compute_blit(sctx, &info, NULL, 0, 0,
SI_OP_SYNC_BEFORE_AFTER);
SI_OP_SYNC_BEFORE);
break;
case METHOD_SPECIAL:
if (test_flavor == TEST_BLIT && !yflip) {

View file

@ -207,7 +207,7 @@ void si_test_dma_perf(struct si_screen *sscreen)
if (method == METHOD_DEFAULT) {
if (is_copy) {
unsigned flags = SI_OP_SYNC_BEFORE_AFTER;
unsigned flags = SI_OP_SYNC_BEFORE;
si_barrier_before_simple_buffer_op(sctx, flags, dst, src);
si_copy_buffer(sctx, dst, src, dst_offset, src_offset, size, flags);
@ -231,7 +231,7 @@ void si_test_dma_perf(struct si_screen *sscreen)
continue;
}
unsigned flags = SI_OP_SYNC_BEFORE_AFTER;
unsigned flags = SI_OP_SYNC_BEFORE;
si_barrier_before_simple_buffer_op(sctx, flags, dst, src);
si_cp_dma_copy_buffer(sctx, dst, src, dst_offset, src_offset, size, flags);
si_barrier_after_simple_buffer_op(sctx, flags, dst, src);
@ -245,7 +245,7 @@ void si_test_dma_perf(struct si_screen *sscreen)
}
assert(clear_value_size == 4);
unsigned flags = SI_OP_SYNC_BEFORE_AFTER;
unsigned flags = SI_OP_SYNC_BEFORE;
si_barrier_before_simple_buffer_op(sctx, flags, dst, src);
si_cp_dma_clear_buffer(sctx, &sctx->gfx_cs, dst, dst_offset, size,
@ -254,7 +254,7 @@ void si_test_dma_perf(struct si_screen *sscreen)
}
} else {
/* Compute */
unsigned flags = SI_OP_SYNC_BEFORE_AFTER;
unsigned flags = SI_OP_SYNC_BEFORE;
si_barrier_before_simple_buffer_op(sctx, flags, dst, src);
success &=
@ -489,7 +489,7 @@ void si_test_clear_buffer(struct si_screen *sscreen)
printf("%s, ", COLOR_RESET);
fflush(stdout);
unsigned flags = SI_OP_SYNC_BEFORE_AFTER;
unsigned flags = SI_OP_SYNC_BEFORE;
si_barrier_before_simple_buffer_op(sctx, flags, dst, NULL);
bool done = si_compute_clear_copy_buffer(sctx, dst, dst_offset, NULL, 0, op_size,
(uint32_t*)clear_value, clear_value_size,
@ -597,7 +597,7 @@ void si_test_copy_buffer(struct si_screen *sscreen)
}
fflush(stdout);
unsigned flags = SI_OP_SYNC_BEFORE_AFTER;
unsigned flags = SI_OP_SYNC_BEFORE;
si_barrier_before_simple_buffer_op(sctx, flags, dst, src);
bool done = si_compute_clear_copy_buffer(sctx, dst, dst_offset, src, src_offset, op_size,
NULL, 0, flags, dwords_per_thread, false);

View file

@ -537,7 +537,7 @@ void si_test_image_copy_region(struct si_screen *sscreen)
/* clear dst pixels */
uint32_t zero = 0;
unsigned flags = SI_OP_SYNC_BEFORE_AFTER;
unsigned flags = SI_OP_SYNC_BEFORE;
si_barrier_before_simple_buffer_op(sctx, flags, dst, NULL);
si_clear_buffer(sctx, dst, 0, sdst->surface.surf_size, &zero, 4, flags,
@ -719,7 +719,7 @@ void si_test_blit(struct si_screen *sscreen, unsigned test_flags)
/* clear dst pixels */
uint32_t zero = 0;
unsigned flags = SI_OP_SYNC_BEFORE_AFTER;
unsigned flags = SI_OP_SYNC_BEFORE;
/* Using 2 consecutive barriers calls results in a single merged barrier for both resources. */
si_barrier_before_simple_buffer_op(sctx, flags, gfx_dst, NULL);
@ -940,7 +940,7 @@ void si_test_blit(struct si_screen *sscreen, unsigned test_flags)
if (only_cb_resolve)
success = si_msaa_resolve_blit_via_CB(ctx, &info, false);
else
success = si_compute_blit(sctx, &info, NULL, 0, 0, SI_OP_SYNC_BEFORE_AFTER);
success = si_compute_blit(sctx, &info, NULL, 0, 0, SI_OP_SYNC_BEFORE);
if (success) {
printf(" %-7s", only_cb_resolve ? "resolve" : "comp");