ir3: Drop non-scoped barrier handling

Now unreachable.

Signed-off-by: Alyssa Rosenzweig <alyssa@collabora.com>
Reviewed-by: Emma Anholt <emma@anholt.net>
Reviewed-by: Caio Oliveira <caio.oliveira@intel.com>
Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21634>
This commit is contained in:
Alyssa Rosenzweig 2023-03-01 14:50:27 -05:00 committed by Marge Bot
parent fc93e8e537
commit c8147e69d3

View file

@ -1580,134 +1580,77 @@ emit_intrinsic_barrier(struct ir3_context *ctx, nir_intrinsic_instr *intr)
* between a5xx and a6xx,
*/
switch (intr->intrinsic) {
case nir_intrinsic_control_barrier:
emit_control_barrier(ctx);
return;
case nir_intrinsic_scoped_barrier: {
nir_scope exec_scope = nir_intrinsic_execution_scope(intr);
nir_variable_mode modes = nir_intrinsic_memory_modes(intr);
/* loads/stores are always cache-coherent so we can filter out
* available/visible.
nir_scope exec_scope = nir_intrinsic_execution_scope(intr);
nir_variable_mode modes = nir_intrinsic_memory_modes(intr);
/* loads/stores are always cache-coherent so we can filter out
* available/visible.
*/
nir_memory_semantics semantics =
nir_intrinsic_memory_semantics(intr) & (NIR_MEMORY_ACQUIRE |
NIR_MEMORY_RELEASE);
if (ctx->so->type == MESA_SHADER_TESS_CTRL) {
/* Remove mode corresponding to nir_intrinsic_memory_barrier_tcs_patch,
* because hull shaders dispatch 32 wide so an entire patch will
* always fit in a single warp and execute in lock-step.
*
* TODO: memory barrier also tells us not to reorder stores, this
* information is lost here (backend doesn't reorder stores so we
* are safe for now).
*/
nir_memory_semantics semantics =
nir_intrinsic_memory_semantics(intr) & (NIR_MEMORY_ACQUIRE |
NIR_MEMORY_RELEASE);
if (ctx->so->type == MESA_SHADER_TESS_CTRL) {
/* Remove mode corresponding to nir_intrinsic_memory_barrier_tcs_patch,
* because hull shaders dispatch 32 wide so an entire patch will
* always fit in a single warp and execute in lock-step.
*
* TODO: memory barrier also tells us not to reorder stores, this
* information is lost here (backend doesn't reorder stores so we
* are safe for now).
*/
modes &= ~nir_var_shader_out;
}
assert(!(modes & nir_var_shader_out));
if ((modes &
(nir_var_mem_shared | nir_var_mem_ssbo | nir_var_mem_global |
nir_var_image)) && semantics) {
barrier = ir3_FENCE(b);
barrier->cat7.r = true;
barrier->cat7.w = true;
if (modes & (nir_var_mem_ssbo | nir_var_image | nir_var_mem_global)) {
barrier->cat7.g = true;
}
if (ctx->compiler->gen >= 6) {
if (modes & (nir_var_mem_ssbo | nir_var_image)) {
barrier->cat7.l = true;
}
} else {
if (modes & (nir_var_mem_shared | nir_var_mem_ssbo | nir_var_image)) {
barrier->cat7.l = true;
}
}
barrier->barrier_class = 0;
barrier->barrier_conflict = 0;
if (modes & nir_var_mem_shared) {
barrier->barrier_class |= IR3_BARRIER_SHARED_W;
barrier->barrier_conflict |=
IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W;
}
if (modes & (nir_var_mem_ssbo | nir_var_mem_global)) {
barrier->barrier_class |= IR3_BARRIER_BUFFER_W;
barrier->barrier_conflict |=
IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
}
if (modes & nir_var_image) {
barrier->barrier_class |= IR3_BARRIER_IMAGE_W;
barrier->barrier_conflict |=
IR3_BARRIER_IMAGE_W | IR3_BARRIER_IMAGE_R;
}
array_insert(b, b->keeps, barrier);
}
if (exec_scope >= NIR_SCOPE_WORKGROUP) {
emit_control_barrier(ctx);
}
return;
}
case nir_intrinsic_memory_barrier_tcs_patch:
/* Not applicable, see explanation for scoped_barrier + shader_out */
return;
case nir_intrinsic_memory_barrier_buffer:
barrier = ir3_FENCE(b);
barrier->cat7.g = true;
if (ctx->compiler->gen >= 6)
barrier->cat7.l = true;
barrier->cat7.r = true;
barrier->cat7.w = true;
barrier->barrier_class = IR3_BARRIER_BUFFER_W;
barrier->barrier_conflict = IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
break;
case nir_intrinsic_memory_barrier_image:
barrier = ir3_FENCE(b);
barrier->cat7.g = true;
barrier->cat7.l = true;
barrier->cat7.r = true;
barrier->cat7.w = true;
barrier->barrier_class = IR3_BARRIER_IMAGE_W;
barrier->barrier_conflict = IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W;
break;
case nir_intrinsic_memory_barrier_shared:
barrier = ir3_FENCE(b);
if (ctx->compiler->gen < 6)
barrier->cat7.l = true;
barrier->cat7.r = true;
barrier->cat7.w = true;
barrier->barrier_class = IR3_BARRIER_SHARED_W;
barrier->barrier_conflict = IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W;
break;
case nir_intrinsic_memory_barrier:
case nir_intrinsic_group_memory_barrier:
barrier = ir3_FENCE(b);
barrier->cat7.g = true;
barrier->cat7.l = true;
barrier->cat7.r = true;
barrier->cat7.w = true;
barrier->barrier_class =
IR3_BARRIER_SHARED_W | IR3_BARRIER_IMAGE_W | IR3_BARRIER_BUFFER_W;
barrier->barrier_conflict = IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W |
IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W |
IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
break;
default:
unreachable("boo");
modes &= ~nir_var_shader_out;
}
/* make sure barrier doesn't get DCE'd */
array_insert(b, b->keeps, barrier);
assert(!(modes & nir_var_shader_out));
if ((modes & (nir_var_mem_shared | nir_var_mem_ssbo | nir_var_mem_global |
nir_var_image)) && semantics) {
barrier = ir3_FENCE(b);
barrier->cat7.r = true;
barrier->cat7.w = true;
if (modes & (nir_var_mem_ssbo | nir_var_image | nir_var_mem_global)) {
barrier->cat7.g = true;
}
if (ctx->compiler->gen >= 6) {
if (modes & (nir_var_mem_ssbo | nir_var_image)) {
barrier->cat7.l = true;
}
} else {
if (modes & (nir_var_mem_shared | nir_var_mem_ssbo | nir_var_image)) {
barrier->cat7.l = true;
}
}
barrier->barrier_class = 0;
barrier->barrier_conflict = 0;
if (modes & nir_var_mem_shared) {
barrier->barrier_class |= IR3_BARRIER_SHARED_W;
barrier->barrier_conflict |=
IR3_BARRIER_SHARED_R | IR3_BARRIER_SHARED_W;
}
if (modes & (nir_var_mem_ssbo | nir_var_mem_global)) {
barrier->barrier_class |= IR3_BARRIER_BUFFER_W;
barrier->barrier_conflict |=
IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
}
if (modes & nir_var_image) {
barrier->barrier_class |= IR3_BARRIER_IMAGE_W;
barrier->barrier_conflict |=
IR3_BARRIER_IMAGE_W | IR3_BARRIER_IMAGE_R;
}
/* make sure barrier doesn't get DCE'd */
array_insert(b, b->keeps, barrier);
}
if (exec_scope >= NIR_SCOPE_WORKGROUP) {
emit_control_barrier(ctx);
}
}
static void
@ -2267,13 +2210,6 @@ emit_intrinsic(struct ir3_context *ctx, nir_intrinsic_instr *intr)
dst[0] = ctx->funcs->emit_intrinsic_atomic_image(ctx, intr);
break;
case nir_intrinsic_scoped_barrier:
case nir_intrinsic_control_barrier:
case nir_intrinsic_memory_barrier:
case nir_intrinsic_group_memory_barrier:
case nir_intrinsic_memory_barrier_buffer:
case nir_intrinsic_memory_barrier_image:
case nir_intrinsic_memory_barrier_shared:
case nir_intrinsic_memory_barrier_tcs_patch:
emit_intrinsic_barrier(ctx, intr);
/* note that blk ptr no longer valid, make that obvious: */
b = NULL;