compiler: Move from nir_scope to mesa_scope

Just moving the enum and performing renames, no behavior change.

Acked-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Acked-by: Yonggang Luo <luoyonggang@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/23328>
This commit is contained in:
Caio Oliveira 2023-05-30 12:05:30 -07:00 committed by Marge Bot
parent 4d26b38caf
commit 59cc77f0fa
52 changed files with 236 additions and 235 deletions

View file

@ -264,7 +264,7 @@ ac_nir_export_position(nir_builder *b,
if (gfx_level >= GFX10 && no_param_export && b->shader->info.writes_memory) {
nir_cursor cursor = b->cursor;
b->cursor = nir_before_instr(&final_exp->instr);
nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_RELEASE,
nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_RELEASE,
nir_var_mem_ssbo | nir_var_mem_global | nir_var_image);
b->cursor = cursor;
}
@ -1098,8 +1098,8 @@ ac_nir_lower_legacy_gs(nir_shader *nir,
s.primitive_count);
/* Wait for all stores to finish. */
nir_scoped_barrier(b, .execution_scope = NIR_SCOPE_INVOCATION,
.memory_scope = NIR_SCOPE_DEVICE,
nir_scoped_barrier(b, .execution_scope = SCOPE_INVOCATION,
.memory_scope = SCOPE_DEVICE,
.memory_semantics = NIR_MEMORY_RELEASE,
.memory_modes = nir_var_shader_out | nir_var_mem_ssbo |
nir_var_mem_global | nir_var_image);

View file

@ -358,7 +358,7 @@ repack_invocations_in_workgroup(nir_builder *b, nir_ssa_def *input_bool,
nir_store_shared(b, nir_u2u8(b, surviving_invocations_in_current_wave), lds_offset);
nir_scoped_barrier(b, .execution_scope=NIR_SCOPE_WORKGROUP, .memory_scope=NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
.memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_mem_shared);
nir_ssa_def *packed_counts =
@ -564,8 +564,8 @@ emit_ngg_nogs_prim_export(nir_builder *b, lower_ngg_nogs_state *s, nir_ssa_def *
/* pack user edge flag info into arg */
if (s->has_user_edgeflags) {
/* Workgroup barrier: wait for ES threads store user edge flags to LDS */
nir_scoped_barrier(b, .execution_scope = NIR_SCOPE_WORKGROUP,
.memory_scope = NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_mem_shared);
@ -993,7 +993,7 @@ compact_vertices_after_culling(nir_builder *b,
* Waves that have no vertices and primitives left can s_endpgm right here.
*/
nir_scoped_barrier(b, .execution_scope=NIR_SCOPE_WORKGROUP, .memory_scope=NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
.memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_mem_shared);
nir_ssa_def *es_survived = nir_ilt(b, invocation_index, num_live_vertices_in_workgroup);
@ -1517,7 +1517,7 @@ add_deferred_attribute_culling(nir_builder *b, nir_cf_list *original_extracted_c
}
nir_pop_if(b, if_es_thread);
nir_scoped_barrier(b, .execution_scope=NIR_SCOPE_WORKGROUP, .memory_scope=NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
.memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_mem_shared);
nir_store_var(b, s->gs_accepted_var, nir_imm_false(b), 0x1u);
@ -1570,7 +1570,7 @@ add_deferred_attribute_culling(nir_builder *b, nir_cf_list *original_extracted_c
}
nir_pop_if(b, if_gs_thread);
nir_scoped_barrier(b, .execution_scope=NIR_SCOPE_WORKGROUP, .memory_scope=NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
.memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_mem_shared);
nir_store_var(b, s->es_accepted_var, nir_imm_false(b), 0x1u);
@ -1910,8 +1910,8 @@ ngg_build_streamout_buffer_info(nir_builder *b,
}
nir_pop_if(b, if_invocation_0);
nir_scoped_barrier(b, .execution_scope = NIR_SCOPE_WORKGROUP,
.memory_scope = NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_mem_shared);
@ -2071,7 +2071,7 @@ ngg_nogs_build_streamout(nir_builder *b, lower_ngg_nogs_state *s)
* TODO: not sure if we need this barrier when late prim export, as I
* can't observe test fail without this barrier.
*/
nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_RELEASE, nir_var_mem_ssbo);
nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_RELEASE, nir_var_mem_ssbo);
}
static unsigned
@ -2389,8 +2389,8 @@ ac_nir_lower_ngg_nogs(nir_shader *shader, const ac_nir_lower_ngg_options *option
/* Wait for culling to finish using LDS. */
if (need_prim_id_store_shared || has_user_edgeflags) {
nir_scoped_barrier(b, .execution_scope = NIR_SCOPE_WORKGROUP,
.memory_scope = NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_mem_shared);
}
@ -2408,7 +2408,7 @@ ac_nir_lower_ngg_nogs(nir_shader *shader, const ac_nir_lower_ngg_options *option
emit_ngg_nogs_prim_id_store_shared(b, &state);
/* Wait for GS threads to store primitive ID in LDS. */
nir_scoped_barrier(b, .execution_scope = NIR_SCOPE_WORKGROUP, .memory_scope = NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP, .memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_shared);
}
@ -3162,8 +3162,8 @@ ngg_gs_cull_primitive(nir_builder *b, nir_ssa_def *tid_in_tg, nir_ssa_def *max_v
nir_pop_if(b, if_prim_enable);
/* Wait for LDS primflag access done. */
nir_scoped_barrier(b, .execution_scope = NIR_SCOPE_WORKGROUP,
.memory_scope = NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_mem_shared);
@ -3256,8 +3256,8 @@ ngg_gs_build_streamout(nir_builder *b, lower_ngg_gs_state *s)
}
/* Workgroup barrier: wait for LDS scratch reads finish. */
nir_scoped_barrier(b, .execution_scope = NIR_SCOPE_WORKGROUP,
.memory_scope = NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_mem_shared);
@ -3371,7 +3371,7 @@ ngg_gs_finale(nir_builder *b, lower_ngg_gs_state *s)
ngg_gs_setup_vertex_compaction(b, vertex_live, tid_in_tg, exporter_tid_in_tg, s);
/* Workgroup barrier: wait for all LDS stores to finish. */
nir_scoped_barrier(b, .execution_scope=NIR_SCOPE_WORKGROUP, .memory_scope=NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
.memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_mem_shared);
ngg_gs_export_primitives(b, max_prmcnt, tid_in_tg, exporter_tid_in_tg, out_vtx_primflag_0, s);
@ -3420,7 +3420,7 @@ ac_nir_lower_ngg_gs(nir_shader *shader, const ac_nir_lower_ngg_options *options)
b->cursor = nir_before_cf_list(&impl->body);
/* Workgroup barrier: wait for ES threads */
nir_scoped_barrier(b, .execution_scope=NIR_SCOPE_WORKGROUP, .memory_scope=NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
.memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_mem_shared);
state.lds_addr_gs_out_vtx = nir_load_lds_ngg_gs_out_vertex_base_amd(b);
@ -3434,7 +3434,7 @@ ac_nir_lower_ngg_gs(nir_shader *shader, const ac_nir_lower_ngg_options *options)
nir_pop_if(b, if_gs_thread);
/* Workgroup barrier: wait for all GS threads to finish */
nir_scoped_barrier(b, .execution_scope=NIR_SCOPE_WORKGROUP, .memory_scope=NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
.memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_mem_shared);
if (state.streamout_enabled)
@ -4090,15 +4090,15 @@ ms_emit_legacy_workgroup_index(nir_builder *b, lower_ngg_ms_state *s)
nir_if *if_wave_0 = nir_push_if(b, nir_ieq_imm(b, wave_id, 0));
{
nir_store_shared(b, workgroup_index, zero, .base = workgroup_index_lds_addr);
nir_scoped_barrier(b, .execution_scope = NIR_SCOPE_WORKGROUP,
.memory_scope = NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_mem_shared);
}
nir_push_else(b, if_wave_0);
{
nir_scoped_barrier(b, .execution_scope = NIR_SCOPE_WORKGROUP,
.memory_scope = NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_mem_shared);
loaded_workgroup_index = nir_load_shared(b, 1, 32, zero, .base = workgroup_index_lds_addr);
@ -4156,8 +4156,8 @@ set_ms_final_output_counts(nir_builder *b,
}
nir_pop_if(b, if_elected);
nir_scoped_barrier(b, .execution_scope = NIR_SCOPE_WORKGROUP,
.memory_scope = NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_mem_shared);
@ -4165,8 +4165,8 @@ set_ms_final_output_counts(nir_builder *b,
}
nir_push_else(b, if_wave_0);
{
nir_scoped_barrier(b, .execution_scope = NIR_SCOPE_WORKGROUP,
.memory_scope = NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_mem_shared);
@ -4287,7 +4287,7 @@ emit_ms_finale(nir_builder *b, lower_ngg_ms_state *s)
nir_block *last_block = nir_impl_last_block(b->impl);
b->cursor = nir_after_block(last_block);
nir_scoped_barrier(b, .execution_scope=NIR_SCOPE_WORKGROUP, .memory_scope=NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(b, .execution_scope=SCOPE_WORKGROUP, .memory_scope=SCOPE_WORKGROUP,
.memory_semantics=NIR_MEMORY_ACQ_REL, .memory_modes=nir_var_shader_out|nir_var_mem_shared);
nir_ssa_def *num_prm;
@ -4444,7 +4444,7 @@ handle_smaller_ms_api_workgroup(nir_builder *b,
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
bool is_workgroup_barrier =
intrin->intrinsic == nir_intrinsic_scoped_barrier &&
nir_intrinsic_execution_scope(intrin) == NIR_SCOPE_WORKGROUP;
nir_intrinsic_execution_scope(intrin) == SCOPE_WORKGROUP;
if (!is_workgroup_barrier)
continue;
@ -4454,8 +4454,8 @@ handle_smaller_ms_api_workgroup(nir_builder *b,
* In this case, we can change the barriers to subgroup scope
* and avoid adding additional barriers.
*/
nir_intrinsic_set_memory_scope(intrin, NIR_SCOPE_SUBGROUP);
nir_intrinsic_set_execution_scope(intrin, NIR_SCOPE_SUBGROUP);
nir_intrinsic_set_memory_scope(intrin, SCOPE_SUBGROUP);
nir_intrinsic_set_execution_scope(intrin, SCOPE_SUBGROUP);
} else {
has_any_workgroup_barriers = true;
}
@ -4482,8 +4482,8 @@ handle_smaller_ms_api_workgroup(nir_builder *b,
}
nir_pop_if(b, if_first_in_workgroup);
nir_scoped_barrier(b, .execution_scope = NIR_SCOPE_WORKGROUP,
.memory_scope = NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_shader_out | nir_var_mem_shared);
}
@ -4504,8 +4504,8 @@ handle_smaller_ms_api_workgroup(nir_builder *b,
}
nir_pop_if(b, if_elected_again);
nir_scoped_barrier(b, .execution_scope = NIR_SCOPE_WORKGROUP,
.memory_scope = NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_shader_out | nir_var_mem_shared);
}
@ -4527,8 +4527,8 @@ handle_smaller_ms_api_workgroup(nir_builder *b,
{
nir_loop *loop = nir_push_loop(b);
{
nir_scoped_barrier(b, .execution_scope = NIR_SCOPE_WORKGROUP,
.memory_scope = NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_shader_out | nir_var_mem_shared);

View file

@ -153,8 +153,8 @@ lower_task_launch_mesh_workgroups(nir_builder *b,
* always a waitcnt_vscnt instruction in order to avoid a race condition
* between payload stores and their loads after mesh shaders launch.
*/
nir_scoped_barrier(b, .execution_scope = NIR_SCOPE_WORKGROUP,
.memory_scope = NIR_SCOPE_DEVICE,
nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_DEVICE,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_mem_task_payload | nir_var_shader_out |
nir_var_mem_ssbo | nir_var_mem_global);
@ -176,7 +176,7 @@ lower_task_launch_mesh_workgroups(nir_builder *b,
/* Dispatch dimensions of mesh shader workgroups. */
task_write_draw_ring(b, nir_vec3(b, x, y, z), 0, s);
/* Prevent the two stores from being reordered. */
nir_scoped_memory_barrier(b, NIR_SCOPE_INVOCATION, NIR_MEMORY_RELEASE, nir_var_shader_out);
nir_scoped_memory_barrier(b, SCOPE_INVOCATION, NIR_MEMORY_RELEASE, nir_var_shader_out);
/* Ready bit, only write the low 8 bits. */
task_write_draw_ring(b, task_draw_ready_bit(b, s), 12, s);
}

View file

@ -496,13 +496,13 @@ update_hs_scoped_barrier(nir_intrinsic_instr *intrin, lower_tess_io_state *st)
}
nir_intrinsic_set_memory_modes(intrin, mem_modes);
nir_scope exec_scope = nir_intrinsic_execution_scope(intrin);
if (exec_scope == NIR_SCOPE_WORKGROUP && st->tcs_out_patch_fits_subgroup)
nir_intrinsic_set_execution_scope(intrin, NIR_SCOPE_SUBGROUP);
mesa_scope exec_scope = nir_intrinsic_execution_scope(intrin);
if (exec_scope == SCOPE_WORKGROUP && st->tcs_out_patch_fits_subgroup)
nir_intrinsic_set_execution_scope(intrin, SCOPE_SUBGROUP);
nir_scope mem_scope = nir_intrinsic_memory_scope(intrin);
if (mem_scope == NIR_SCOPE_WORKGROUP && st->tcs_out_patch_fits_subgroup)
nir_intrinsic_set_memory_scope(intrin, NIR_SCOPE_SUBGROUP);
mesa_scope mem_scope = nir_intrinsic_memory_scope(intrin);
if (mem_scope == SCOPE_WORKGROUP && st->tcs_out_patch_fits_subgroup)
nir_intrinsic_set_memory_scope(intrin, SCOPE_SUBGROUP);
}
static nir_ssa_def *
@ -566,8 +566,8 @@ hs_emit_write_tess_factors(nir_shader *shader,
/* If tess factors are load from LDS, wait previous LDS stores done. */
if (!st->tcs_pass_tessfactors_by_reg) {
nir_scope scope = st->tcs_out_patch_fits_subgroup ?
NIR_SCOPE_SUBGROUP : NIR_SCOPE_WORKGROUP;
mesa_scope scope = st->tcs_out_patch_fits_subgroup ?
SCOPE_SUBGROUP : SCOPE_WORKGROUP;
nir_scoped_barrier(b, .execution_scope = scope, .memory_scope = scope,
.memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_shared);

View file

@ -7209,16 +7209,16 @@ visit_load_smem(isel_context* ctx, nir_intrinsic_instr* instr)
}
sync_scope
translate_nir_scope(nir_scope scope)
translate_nir_scope(mesa_scope scope)
{
switch (scope) {
case NIR_SCOPE_NONE:
case NIR_SCOPE_INVOCATION: return scope_invocation;
case NIR_SCOPE_SUBGROUP: return scope_subgroup;
case NIR_SCOPE_WORKGROUP: return scope_workgroup;
case NIR_SCOPE_QUEUE_FAMILY: return scope_queuefamily;
case NIR_SCOPE_DEVICE: return scope_device;
case NIR_SCOPE_SHADER_CALL: return scope_invocation;
case SCOPE_NONE:
case SCOPE_INVOCATION: return scope_invocation;
case SCOPE_SUBGROUP: return scope_subgroup;
case SCOPE_WORKGROUP: return scope_workgroup;
case SCOPE_QUEUE_FAMILY: return scope_queuefamily;
case SCOPE_DEVICE: return scope_device;
case SCOPE_SHADER_CALL: return scope_invocation;
}
unreachable("invalid scope");
}
@ -8847,17 +8847,17 @@ visit_intrinsic(isel_context* ctx, nir_intrinsic_instr* instr)
}
case nir_intrinsic_shader_clock: {
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
if (nir_intrinsic_memory_scope(instr) == NIR_SCOPE_SUBGROUP &&
if (nir_intrinsic_memory_scope(instr) == SCOPE_SUBGROUP &&
ctx->options->gfx_level >= GFX10_3) {
/* "((size - 1) << 11) | register" (SHADER_CYCLES is encoded as register 29) */
Temp clock = bld.sopk(aco_opcode::s_getreg_b32, bld.def(s1), ((20 - 1) << 11) | 29);
bld.pseudo(aco_opcode::p_create_vector, Definition(dst), clock, Operand::zero());
} else if (nir_intrinsic_memory_scope(instr) == NIR_SCOPE_DEVICE &&
} else if (nir_intrinsic_memory_scope(instr) == SCOPE_DEVICE &&
ctx->options->gfx_level >= GFX11) {
bld.sop1(aco_opcode::s_sendmsg_rtn_b64, Definition(dst),
Operand::c32(sendmsg_rtn_get_realtime));
} else {
aco_opcode opcode = nir_intrinsic_memory_scope(instr) == NIR_SCOPE_DEVICE
aco_opcode opcode = nir_intrinsic_memory_scope(instr) == SCOPE_DEVICE
? aco_opcode::s_memrealtime
: aco_opcode::s_memtime;
bld.smem(opcode, Definition(dst), memory_sync_info(0, semantic_volatile));

View file

@ -458,9 +458,9 @@ void ac_build_optimization_barrier(struct ac_llvm_context *ctx, LLVMValueRef *pg
}
}
LLVMValueRef ac_build_shader_clock(struct ac_llvm_context *ctx, nir_scope scope)
LLVMValueRef ac_build_shader_clock(struct ac_llvm_context *ctx, mesa_scope scope)
{
if (ctx->gfx_level >= GFX11 && scope == NIR_SCOPE_DEVICE) {
if (ctx->gfx_level >= GFX11 && scope == SCOPE_DEVICE) {
const char *name = "llvm.amdgcn.s.sendmsg.rtn.i64";
LLVMValueRef arg = LLVMConstInt(ctx->i32, 0x83 /* realtime */, 0);
LLVMValueRef tmp = ac_build_intrinsic(ctx, name, ctx->i64, &arg, 1, 0);
@ -468,7 +468,7 @@ LLVMValueRef ac_build_shader_clock(struct ac_llvm_context *ctx, nir_scope scope)
}
const char *subgroup = "llvm.readcyclecounter";
const char *name = scope == NIR_SCOPE_DEVICE ? "llvm.amdgcn.s.memrealtime" : subgroup;
const char *name = scope == SCOPE_DEVICE ? "llvm.amdgcn.s.memrealtime" : subgroup;
LLVMValueRef tmp = ac_build_intrinsic(ctx, name, ctx->i64, NULL, 0, 0);
return LLVMBuildBitCast(ctx->builder, tmp, ctx->v2i32, "");

View file

@ -173,7 +173,7 @@ LLVMValueRef ac_build_phi(struct ac_llvm_context *ctx, LLVMTypeRef type, unsigne
void ac_build_s_barrier(struct ac_llvm_context *ctx, gl_shader_stage stage);
void ac_build_optimization_barrier(struct ac_llvm_context *ctx, LLVMValueRef *pgpr, bool sgpr);
LLVMValueRef ac_build_shader_clock(struct ac_llvm_context *ctx, nir_scope scope);
LLVMValueRef ac_build_shader_clock(struct ac_llvm_context *ctx, mesa_scope scope);
LLVMValueRef ac_build_ballot(struct ac_llvm_context *ctx, LLVMValueRef value);
LLVMValueRef ac_get_i1_sgpr_mask(struct ac_llvm_context *ctx, LLVMValueRef value);

View file

@ -3297,7 +3297,7 @@ static bool visit_intrinsic(struct ac_nir_context *ctx, nir_intrinsic_instr *ins
if (wait_flags)
ac_build_waitcnt(&ctx->ac, wait_flags);
if (nir_intrinsic_execution_scope(instr) == NIR_SCOPE_WORKGROUP)
if (nir_intrinsic_execution_scope(instr) == SCOPE_WORKGROUP)
ac_build_s_barrier(&ctx->ac, ctx->stage);
break;
}

View file

@ -62,11 +62,11 @@ build_expand_depth_stencil_compute_shader(struct radv_device *dev)
nir_image_deref_load(&b, 4, 32, &nir_build_deref_var(&b, input_img)->dest.ssa, global_id,
nir_ssa_undef(&b, 1, 32), nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D);
/* We need a NIR_SCOPE_DEVICE memory_scope because ACO will avoid
/* We need a SCOPE_DEVICE memory_scope because ACO will avoid
* creating a vmcnt(0) because it expects the L1 cache to keep memory
* operations in-order for the same workgroup. The vmcnt(0) seems
* necessary however. */
nir_scoped_barrier(&b, .execution_scope = NIR_SCOPE_WORKGROUP, .memory_scope = NIR_SCOPE_DEVICE,
nir_scoped_barrier(&b, .execution_scope = SCOPE_WORKGROUP, .memory_scope = SCOPE_DEVICE,
.memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_ssbo);
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, global_id, nir_ssa_undef(&b, 1, 32), data,

View file

@ -60,11 +60,11 @@ build_dcc_decompress_compute_shader(struct radv_device *dev)
nir_image_deref_load(&b, 4, 32, &nir_build_deref_var(&b, input_img)->dest.ssa, img_coord,
nir_ssa_undef(&b, 1, 32), nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_2D);
/* We need a NIR_SCOPE_DEVICE memory_scope because ACO will avoid
/* We need a SCOPE_DEVICE memory_scope because ACO will avoid
* creating a vmcnt(0) because it expects the L1 cache to keep memory
* operations in-order for the same workgroup. The vmcnt(0) seems
* necessary however. */
nir_scoped_barrier(&b, .execution_scope = NIR_SCOPE_WORKGROUP, .memory_scope = NIR_SCOPE_DEVICE,
nir_scoped_barrier(&b, .execution_scope = SCOPE_WORKGROUP, .memory_scope = SCOPE_DEVICE,
.memory_semantics = NIR_MEMORY_ACQ_REL, .memory_modes = nir_var_mem_ssbo);
nir_image_deref_store(&b, &nir_build_deref_var(&b, output_img)->dest.ssa, img_coord, nir_ssa_undef(&b, 1, 32), data,

View file

@ -147,7 +147,7 @@ build_occlusion_query_shader(struct radv_device *device)
const uint32_t rb_avail_offset = 16 * util_last_bit64(enabled_rb_mask) - 4;
/* Prevent the SSBO load to be moved out of the loop. */
nir_scoped_memory_barrier(&b, NIR_SCOPE_INVOCATION, NIR_MEMORY_ACQUIRE, nir_var_mem_ssbo);
nir_scoped_memory_barrier(&b, SCOPE_INVOCATION, NIR_MEMORY_ACQUIRE, nir_var_mem_ssbo);
nir_ssa_def *load_offset = nir_iadd_imm(&b, input_base, rb_avail_offset);
nir_ssa_def *load = nir_load_ssbo(&b, 1, 32, src_buf, load_offset, .align_mul = 4, .access = ACCESS_COHERENT);

View file

@ -907,14 +907,14 @@ agx_emit_intrinsic(agx_builder *b, nir_intrinsic_instr *instr)
case nir_intrinsic_scoped_barrier: {
bool needs_threadgroup_barrier = false;
if (nir_intrinsic_execution_scope(instr) != NIR_SCOPE_NONE) {
assert(nir_intrinsic_execution_scope(instr) > NIR_SCOPE_SUBGROUP &&
if (nir_intrinsic_execution_scope(instr) != SCOPE_NONE) {
assert(nir_intrinsic_execution_scope(instr) > SCOPE_SUBGROUP &&
"todo: subgroup barriers");
needs_threadgroup_barrier = true;
}
if (nir_intrinsic_memory_scope(instr) != NIR_SCOPE_NONE) {
if (nir_intrinsic_memory_scope(instr) != SCOPE_NONE) {
nir_variable_mode modes = nir_intrinsic_memory_modes(instr);
if (modes & nir_var_mem_global)
@ -923,7 +923,7 @@ agx_emit_intrinsic(agx_builder *b, nir_intrinsic_instr *instr)
if (modes & nir_var_mem_shared)
needs_threadgroup_barrier = true;
if (nir_intrinsic_memory_scope(instr) >= NIR_SCOPE_WORKGROUP)
if (nir_intrinsic_memory_scope(instr) >= SCOPE_WORKGROUP)
needs_threadgroup_barrier = true;
}

View file

@ -3511,7 +3511,7 @@ ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr)
*/
ntq_flush_tmu(c);
if (nir_intrinsic_execution_scope(instr) != NIR_SCOPE_NONE) {
if (nir_intrinsic_execution_scope(instr) != SCOPE_NONE) {
/* Ensure we flag the use of the control barrier. NIR's
* gather info pass usually takes care of this, but that
* requires that we call that pass after any other pass

View file

@ -1389,23 +1389,23 @@ nir_visitor::visit(ir_call *ir)
*
* https://www.khronos.org/registry/OpenGL/extensions/ARB/ARB_gl_spirv.txt
*/
nir_scope scope;
mesa_scope scope;
unsigned modes;
switch (ir->callee->intrinsic_id) {
case ir_intrinsic_memory_barrier:
scope = NIR_SCOPE_DEVICE;
scope = SCOPE_DEVICE;
modes = nir_var_image |
nir_var_mem_ssbo |
nir_var_mem_shared |
nir_var_mem_global;
break;
case ir_intrinsic_memory_barrier_buffer:
scope = NIR_SCOPE_DEVICE;
scope = SCOPE_DEVICE;
modes = nir_var_mem_ssbo |
nir_var_mem_global;
break;
case ir_intrinsic_memory_barrier_image:
scope = NIR_SCOPE_DEVICE;
scope = SCOPE_DEVICE;
modes = nir_var_image;
break;
case ir_intrinsic_memory_barrier_shared:
@ -1413,11 +1413,11 @@ nir_visitor::visit(ir_call *ir)
* follow their lead. Note GL_KHR_vulkan_glsl also does
* something similar.
*/
scope = NIR_SCOPE_DEVICE;
scope = SCOPE_DEVICE;
modes = nir_var_mem_shared;
break;
case ir_intrinsic_group_memory_barrier:
scope = NIR_SCOPE_WORKGROUP;
scope = SCOPE_WORKGROUP;
modes = nir_var_image |
nir_var_mem_ssbo |
nir_var_mem_shared |
@ -1427,7 +1427,7 @@ nir_visitor::visit(ir_call *ir)
/* There's no nir_var_atomic_counter, but since atomic counters are lowered
* to SSBOs, we use nir_var_mem_ssbo instead.
*/
scope = NIR_SCOPE_DEVICE;
scope = SCOPE_DEVICE;
modes = nir_var_mem_ssbo;
break;
default:
@ -1440,7 +1440,7 @@ nir_visitor::visit(ir_call *ir)
}
case nir_intrinsic_shader_clock:
nir_ssa_dest_init(&instr->instr, &instr->dest, 2, 32);
nir_intrinsic_set_memory_scope(instr, NIR_SCOPE_SUBGROUP);
nir_intrinsic_set_memory_scope(instr, SCOPE_SUBGROUP);
nir_builder_instr_insert(&b, &instr->instr);
break;
case nir_intrinsic_begin_invocation_interlock:
@ -2652,10 +2652,10 @@ void
nir_visitor::visit(ir_barrier *)
{
if (shader->info.stage == MESA_SHADER_COMPUTE) {
nir_scoped_barrier(&b, NIR_SCOPE_WORKGROUP, NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP,
NIR_MEMORY_ACQ_REL, nir_var_mem_shared);
} else if (shader->info.stage == MESA_SHADER_TESS_CTRL) {
nir_scoped_barrier(&b, NIR_SCOPE_WORKGROUP, NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP,
NIR_MEMORY_ACQ_REL, nir_var_shader_out);
}
}

View file

@ -1899,16 +1899,6 @@ typedef enum {
NIR_MEMORY_MAKE_VISIBLE = 1 << 3,
} nir_memory_semantics;
typedef enum {
NIR_SCOPE_NONE,
NIR_SCOPE_INVOCATION,
NIR_SCOPE_SUBGROUP,
NIR_SCOPE_SHADER_CALL,
NIR_SCOPE_WORKGROUP,
NIR_SCOPE_QUEUE_FAMILY,
NIR_SCOPE_DEVICE,
} nir_scope;
/**
* \name NIR intrinsics semantic flags
*

View file

@ -1835,11 +1835,11 @@ nir_compare_func(nir_builder *b, enum compare_func func,
static inline void
nir_scoped_memory_barrier(nir_builder *b,
nir_scope scope,
mesa_scope scope,
nir_memory_semantics semantics,
nir_variable_mode modes)
{
nir_scoped_barrier(b, NIR_SCOPE_NONE, scope, semantics, modes);
nir_scoped_barrier(b, SCOPE_NONE, scope, semantics, modes);
}
nir_ssa_def *

View file

@ -765,10 +765,10 @@ gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader,
case nir_intrinsic_scoped_barrier:
shader->info.uses_control_barrier |=
nir_intrinsic_execution_scope(instr) != NIR_SCOPE_NONE;
nir_intrinsic_execution_scope(instr) != SCOPE_NONE;
shader->info.uses_memory_barrier |=
nir_intrinsic_memory_scope(instr) != NIR_SCOPE_NONE;
nir_intrinsic_memory_scope(instr) != SCOPE_NONE;
break;
case nir_intrinsic_store_zs_agx:

View file

@ -246,10 +246,10 @@ index("nir_memory_semantics", "memory_semantics")
index("nir_variable_mode", "memory_modes")
# Scope of a memory operation
index("nir_scope", "memory_scope")
index("mesa_scope", "memory_scope")
# Scope of a control barrier
index("nir_scope", "execution_scope")
index("mesa_scope", "execution_scope")
# Semantics of an IO instruction
index("struct nir_io_semantics", "io_semantics")
@ -366,7 +366,7 @@ barrier("terminate")
# Control/Memory barrier with explicit scope. Follows the semantics of SPIR-V
# OpMemoryBarrier and OpControlBarrier, used to implement Vulkan Memory Model.
# Storage that the barrier applies is represented using NIR variable modes.
# For an OpMemoryBarrier, set EXECUTION_SCOPE to NIR_SCOPE_NONE.
# For an OpMemoryBarrier, set EXECUTION_SCOPE to SCOPE_NONE.
intrinsic("scoped_barrier",
indices=[EXECUTION_SCOPE, MEMORY_SCOPE, MEMORY_SEMANTICS, MEMORY_MODES])

View file

@ -856,7 +856,7 @@ lower_subgroups_instr(nir_builder *b, nir_instr *instr, void *_options)
break;
case nir_intrinsic_rotate:
if (nir_intrinsic_execution_scope(intrin) == NIR_SCOPE_SUBGROUP) {
if (nir_intrinsic_execution_scope(intrin) == SCOPE_SUBGROUP) {
if (options->lower_rotate_to_shuffle)
return lower_to_shuffle(b, intrin, options);
else if (options->lower_to_scalar && intrin->num_components > 1)

View file

@ -90,8 +90,8 @@ append_launch_mesh_workgroups_to_nv_task(nir_builder *b,
nir_store_shared(b, zero, zero, .base = s->task_count_shared_addr);
nir_scoped_barrier(b,
.execution_scope = NIR_SCOPE_WORKGROUP,
.memory_scope = NIR_SCOPE_WORKGROUP,
.execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_RELEASE,
.memory_modes = nir_var_mem_shared);
@ -101,8 +101,8 @@ append_launch_mesh_workgroups_to_nv_task(nir_builder *b,
b->cursor = nir_after_cf_list(&b->impl->body);
nir_scoped_barrier(b,
.execution_scope = NIR_SCOPE_WORKGROUP,
.memory_scope = NIR_SCOPE_WORKGROUP,
.execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQUIRE,
.memory_modes = nir_var_mem_shared);
@ -233,8 +233,8 @@ emit_shared_to_payload_copy(nir_builder *b,
/* Wait for all previous shared stores to finish.
* This is necessary because we placed the payload in shared memory.
*/
nir_scoped_barrier(b, .execution_scope = NIR_SCOPE_WORKGROUP,
.memory_scope = NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_mem_shared);

View file

@ -191,7 +191,7 @@ nir_zero_initialize_shared_memory(nir_shader *shader,
}
nir_pop_loop(&b, loop);
nir_scoped_barrier(&b, NIR_SCOPE_WORKGROUP, NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP,
NIR_MEMORY_ACQ_REL, nir_var_mem_shared);
nir_metadata_preserve(nir_shader_get_entrypoint(shader), nir_metadata_none);

View file

@ -1323,7 +1323,7 @@ handle_barrier(struct vectorize_ctx *ctx, bool *progress, nir_function_impl *imp
modes = nir_var_all;
break;
case nir_intrinsic_scoped_barrier:
if (nir_intrinsic_memory_scope(intrin) == NIR_SCOPE_NONE)
if (nir_intrinsic_memory_scope(intrin) == SCOPE_NONE)
break;
modes = nir_intrinsic_memory_modes(intrin) & (nir_var_mem_ssbo |
@ -1333,7 +1333,7 @@ handle_barrier(struct vectorize_ctx *ctx, bool *progress, nir_function_impl *imp
acquire = nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_ACQUIRE;
release = nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_RELEASE;
switch (nir_intrinsic_memory_scope(intrin)) {
case NIR_SCOPE_INVOCATION:
case SCOPE_INVOCATION:
/* a barier should never be required for correctness with these scopes */
modes = 0;
break;

View file

@ -1074,17 +1074,17 @@ print_intrinsic_instr(nir_intrinsic_instr *instr, print_state *state)
case NIR_INTRINSIC_EXECUTION_SCOPE:
case NIR_INTRINSIC_MEMORY_SCOPE: {
fprintf(fp, "%s=", nir_intrinsic_index_names[idx]);
nir_scope scope =
mesa_scope scope =
idx == NIR_INTRINSIC_MEMORY_SCOPE ? nir_intrinsic_memory_scope(instr)
: nir_intrinsic_execution_scope(instr);
switch (scope) {
case NIR_SCOPE_NONE: fprintf(fp, "NONE"); break;
case NIR_SCOPE_DEVICE: fprintf(fp, "DEVICE"); break;
case NIR_SCOPE_QUEUE_FAMILY: fprintf(fp, "QUEUE_FAMILY"); break;
case NIR_SCOPE_WORKGROUP: fprintf(fp, "WORKGROUP"); break;
case NIR_SCOPE_SHADER_CALL: fprintf(fp, "SHADER_CALL"); break;
case NIR_SCOPE_SUBGROUP: fprintf(fp, "SUBGROUP"); break;
case NIR_SCOPE_INVOCATION: fprintf(fp, "INVOCATION"); break;
case SCOPE_NONE: fprintf(fp, "NONE"); break;
case SCOPE_DEVICE: fprintf(fp, "DEVICE"); break;
case SCOPE_QUEUE_FAMILY: fprintf(fp, "QUEUE_FAMILY"); break;
case SCOPE_WORKGROUP: fprintf(fp, "WORKGROUP"); break;
case SCOPE_SHADER_CALL: fprintf(fp, "SHADER_CALL"); break;
case SCOPE_SUBGROUP: fprintf(fp, "SUBGROUP"); break;
case SCOPE_INVOCATION: fprintf(fp, "INVOCATION"); break;
}
break;
}

View file

@ -806,7 +806,7 @@ TEST_F(nir_load_store_vectorize_test, ubo_load_adjacent_memory_barrier)
{
create_load(nir_var_mem_ubo, 0, 0, 0x1);
nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_ACQ_REL,
nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_ACQ_REL,
nir_var_mem_ssbo);
create_load(nir_var_mem_ubo, 0, 4, 0x2);
@ -823,7 +823,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_memory_barrier)
{
create_load(nir_var_mem_ssbo, 0, 0, 0x1);
nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_ACQ_REL,
nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_ACQ_REL,
nir_var_mem_ssbo);
create_load(nir_var_mem_ssbo, 0, 4, 0x2);
@ -842,7 +842,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_memory_barrier)
TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_barrier)
{
create_load(nir_var_mem_ssbo, 0, 0, 0x1);
nir_scoped_barrier(b, NIR_SCOPE_WORKGROUP, NIR_SCOPE_NONE,
nir_scoped_barrier(b, SCOPE_WORKGROUP, SCOPE_NONE,
(nir_memory_semantics)0, (nir_variable_mode)0);
create_load(nir_var_mem_ssbo, 0, 4, 0x2);
@ -858,7 +858,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_memory_barrier_shared)
{
create_load(nir_var_mem_ssbo, 0, 0, 0x1);
nir_scoped_memory_barrier(b, NIR_SCOPE_WORKGROUP, NIR_MEMORY_ACQ_REL,
nir_scoped_memory_barrier(b, SCOPE_WORKGROUP, NIR_MEMORY_ACQ_REL,
nir_var_mem_shared);
create_load(nir_var_mem_ssbo, 0, 4, 0x2);

View file

@ -624,7 +624,7 @@ TEST_F(nir_copy_prop_vars_test, memory_barrier_in_two_blocks)
nir_store_var(b, v[2], nir_load_var(b, v[0]), 1);
nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_ACQ_REL,
nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_ACQ_REL,
nir_var_mem_global);
nir_store_var(b, v[3], nir_load_var(b, v[1]), 1);
@ -644,7 +644,7 @@ TEST_F(nir_redundant_load_vars_test, acquire_barrier_prevents_load_removal)
nir_load_var(b, x[0]);
nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
nir_var_mem_global);
nir_load_var(b, x[0]);
@ -662,7 +662,7 @@ TEST_F(nir_redundant_load_vars_test, acquire_barrier_prevents_same_mode_load_rem
nir_load_var(b, x[0]);
nir_load_var(b, x[1]);
nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
nir_var_mem_global);
nir_load_var(b, x[0]);
@ -684,7 +684,7 @@ TEST_F(nir_redundant_load_vars_test, acquire_barrier_allows_different_mode_load_
nir_load_var(b, y[0]);
nir_load_var(b, y[1]);
nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
nir_var_mem_global);
nir_load_var(b, x[0]);
@ -721,7 +721,7 @@ TEST_F(nir_redundant_load_vars_test, release_barrier_allows_load_removal)
nir_load_var(b, x[0]);
nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_RELEASE,
nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_RELEASE,
nir_var_mem_global);
nir_load_var(b, x[0]);
@ -739,7 +739,7 @@ TEST_F(nir_redundant_load_vars_test, release_barrier_allows_same_mode_load_remov
nir_load_var(b, x[0]);
nir_load_var(b, x[1]);
nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_RELEASE,
nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_RELEASE,
nir_var_mem_global);
nir_load_var(b, x[0]);
@ -761,7 +761,7 @@ TEST_F(nir_redundant_load_vars_test, release_barrier_allows_different_mode_load_
nir_load_var(b, y[0]);
nir_load_var(b, y[1]);
nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_RELEASE,
nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_RELEASE,
nir_var_mem_global);
nir_load_var(b, x[0]);
@ -793,7 +793,7 @@ TEST_F(nir_copy_prop_vars_test, acquire_barrier_prevents_propagation)
nir_store_var(b, x[0], nir_imm_int(b, 10), 1);
nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
nir_var_mem_global);
nir_load_var(b, x[0]);
@ -812,7 +812,7 @@ TEST_F(nir_copy_prop_vars_test, acquire_barrier_prevents_same_mode_propagation)
nir_store_var(b, x[0], nir_imm_int(b, 10), 1);
nir_store_var(b, x[1], nir_imm_int(b, 20), 1);
nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
nir_var_mem_global);
nir_load_var(b, x[0]);
@ -835,7 +835,7 @@ TEST_F(nir_copy_prop_vars_test, acquire_barrier_allows_different_mode_propagatio
nir_store_var(b, y[0], nir_imm_int(b, 30), 1);
nir_store_var(b, y[1], nir_imm_int(b, 40), 1);
nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
nir_var_mem_global);
nir_load_var(b, x[0]);
@ -875,7 +875,7 @@ TEST_F(nir_copy_prop_vars_test, release_barrier_allows_propagation)
nir_store_var(b, x[0], nir_imm_int(b, 10), 1);
nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_RELEASE,
nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_RELEASE,
nir_var_mem_global);
nir_load_var(b, x[0]);
@ -893,7 +893,7 @@ TEST_F(nir_copy_prop_vars_test, release_barrier_allows_same_mode_propagation)
nir_store_var(b, x[0], nir_imm_int(b, 10), 1);
nir_store_var(b, x[1], nir_imm_int(b, 20), 1);
nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_RELEASE,
nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_RELEASE,
nir_var_mem_global);
nir_load_var(b, x[0]);
@ -916,7 +916,7 @@ TEST_F(nir_copy_prop_vars_test, release_barrier_allows_different_mode_propagatio
nir_store_var(b, y[0], nir_imm_int(b, 30), 1);
nir_store_var(b, y[1], nir_imm_int(b, 40), 1);
nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_RELEASE,
nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_RELEASE,
nir_var_mem_global);
nir_load_var(b, x[0]);
@ -949,7 +949,7 @@ TEST_F(nir_copy_prop_vars_test, acquire_barrier_prevents_propagation_from_copy)
nir_copy_var(b, x[1], x[0]);
nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
nir_var_mem_global);
nir_copy_var(b, x[2], x[1]);
@ -975,7 +975,7 @@ TEST_F(nir_copy_prop_vars_test, acquire_barrier_prevents_propagation_from_copy_t
nir_copy_var(b, y[0], x[0]);
nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_ACQUIRE,
nir_var_mem_global);
nir_copy_var(b, x[1], y[0]);
@ -1000,7 +1000,7 @@ TEST_F(nir_copy_prop_vars_test, release_barrier_allows_propagation_from_copy)
nir_copy_var(b, x[1], x[0]);
nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_RELEASE,
nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_RELEASE,
nir_var_mem_global);
nir_copy_var(b, x[2], x[1]);
@ -1026,7 +1026,7 @@ TEST_F(nir_copy_prop_vars_test, release_barrier_allows_propagation_from_copy_to_
nir_copy_var(b, y[0], x[0]);
nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_RELEASE,
nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_RELEASE,
nir_var_mem_global);
nir_copy_var(b, x[1], y[0]);
@ -1784,7 +1784,7 @@ TEST_F(nir_dead_write_vars_test, DISABLED_memory_barrier_in_two_blocks)
/* Because it is before the barrier, this will kill the previous store to that target. */
nir_store_var(b, v[0], nir_imm_int(b, 3), 1);
nir_scoped_memory_barrier(b, NIR_SCOPE_DEVICE, NIR_MEMORY_ACQ_REL,
nir_scoped_memory_barrier(b, SCOPE_DEVICE, NIR_MEMORY_ACQ_REL,
nir_var_mem_global);
nir_store_var(b, v[1], nir_imm_int(b, 4), 1);

View file

@ -1376,6 +1376,17 @@ typedef enum
NUM_TEXTURE_TARGETS
} gl_texture_index;
/* Ordered from narrower to wider scope. */
typedef enum {
SCOPE_NONE,
SCOPE_INVOCATION,
SCOPE_SUBGROUP,
SCOPE_SHADER_CALL,
SCOPE_WORKGROUP,
SCOPE_QUEUE_FAMILY,
SCOPE_DEVICE,
} mesa_scope;
#ifdef __cplusplus
} /* extern "C" */
#endif

View file

@ -2579,7 +2579,7 @@ vtn_mem_semantics_to_nir_var_modes(struct vtn_builder *b,
return modes;
}
nir_scope
mesa_scope
vtn_translate_scope(struct vtn_builder *b, SpvScope scope)
{
switch (scope) {
@ -2589,25 +2589,25 @@ vtn_translate_scope(struct vtn_builder *b, SpvScope scope)
"If the Vulkan memory model is declared and any instruction "
"uses Device scope, the VulkanMemoryModelDeviceScope "
"capability must be declared.");
return NIR_SCOPE_DEVICE;
return SCOPE_DEVICE;
case SpvScopeQueueFamily:
vtn_fail_if(!b->options->caps.vk_memory_model,
"To use Queue Family scope, the VulkanMemoryModel capability "
"must be declared.");
return NIR_SCOPE_QUEUE_FAMILY;
return SCOPE_QUEUE_FAMILY;
case SpvScopeWorkgroup:
return NIR_SCOPE_WORKGROUP;
return SCOPE_WORKGROUP;
case SpvScopeSubgroup:
return NIR_SCOPE_SUBGROUP;
return SCOPE_SUBGROUP;
case SpvScopeInvocation:
return NIR_SCOPE_INVOCATION;
return SCOPE_INVOCATION;
case SpvScopeShaderCallKHR:
return NIR_SCOPE_SHADER_CALL;
return SCOPE_SHADER_CALL;
default:
vtn_fail("Invalid memory scope");
@ -2622,12 +2622,12 @@ vtn_emit_scoped_control_barrier(struct vtn_builder *b, SpvScope exec_scope,
nir_memory_semantics nir_semantics =
vtn_mem_semantics_to_nir_mem_semantics(b, semantics);
nir_variable_mode modes = vtn_mem_semantics_to_nir_var_modes(b, semantics);
nir_scope nir_exec_scope = vtn_translate_scope(b, exec_scope);
mesa_scope nir_exec_scope = vtn_translate_scope(b, exec_scope);
/* Memory semantics is optional for OpControlBarrier. */
nir_scope nir_mem_scope;
mesa_scope nir_mem_scope;
if (nir_semantics == 0 || modes == 0)
nir_mem_scope = NIR_SCOPE_NONE;
nir_mem_scope = SCOPE_NONE;
else
nir_mem_scope = vtn_translate_scope(b, mem_scope);

View file

@ -84,8 +84,8 @@ TEST_F(AvailabilityVisibility, opload_vis)
EXPECT_EQ(nir_intrinsic_memory_semantics(intrinsic), NIR_MEMORY_MAKE_VISIBLE | NIR_MEMORY_ACQUIRE);
EXPECT_NE(nir_intrinsic_memory_modes(intrinsic) & nir_var_mem_ssbo, 0);
EXPECT_EQ(nir_intrinsic_memory_scope(intrinsic), NIR_SCOPE_DEVICE);
EXPECT_EQ(nir_intrinsic_execution_scope(intrinsic), NIR_SCOPE_NONE);
EXPECT_EQ(nir_intrinsic_memory_scope(intrinsic), SCOPE_DEVICE);
EXPECT_EQ(nir_intrinsic_execution_scope(intrinsic), SCOPE_NONE);
}
TEST_F(AvailabilityVisibility, opstore_avail)
@ -148,8 +148,8 @@ TEST_F(AvailabilityVisibility, opstore_avail)
EXPECT_EQ(nir_intrinsic_memory_semantics(intrinsic), NIR_MEMORY_MAKE_AVAILABLE | NIR_MEMORY_RELEASE);
EXPECT_NE(nir_intrinsic_memory_modes(intrinsic) & nir_var_mem_ssbo, 0);
EXPECT_EQ(nir_intrinsic_memory_scope(intrinsic), NIR_SCOPE_DEVICE);
EXPECT_EQ(nir_intrinsic_execution_scope(intrinsic), NIR_SCOPE_NONE);
EXPECT_EQ(nir_intrinsic_memory_scope(intrinsic), SCOPE_DEVICE);
EXPECT_EQ(nir_intrinsic_execution_scope(intrinsic), SCOPE_NONE);
}
TEST_F(AvailabilityVisibility, opcopymemory_visavail_both_combined)
@ -214,13 +214,13 @@ TEST_F(AvailabilityVisibility, opcopymemory_visavail_both_combined)
EXPECT_EQ(nir_intrinsic_memory_semantics(first), NIR_MEMORY_MAKE_VISIBLE | NIR_MEMORY_ACQUIRE);
EXPECT_NE(nir_intrinsic_memory_modes(first) & nir_var_mem_ssbo, 0);
EXPECT_EQ(nir_intrinsic_memory_scope(first), NIR_SCOPE_WORKGROUP);
EXPECT_EQ(nir_intrinsic_execution_scope(first), NIR_SCOPE_NONE);
EXPECT_EQ(nir_intrinsic_memory_scope(first), SCOPE_WORKGROUP);
EXPECT_EQ(nir_intrinsic_execution_scope(first), SCOPE_NONE);
EXPECT_EQ(nir_intrinsic_memory_semantics(second), NIR_MEMORY_MAKE_AVAILABLE | NIR_MEMORY_RELEASE);
EXPECT_NE(nir_intrinsic_memory_modes(second) & nir_var_mem_ssbo, 0);
EXPECT_EQ(nir_intrinsic_memory_scope(second), NIR_SCOPE_DEVICE);
EXPECT_EQ(nir_intrinsic_execution_scope(first), NIR_SCOPE_NONE);
EXPECT_EQ(nir_intrinsic_memory_scope(second), SCOPE_DEVICE);
EXPECT_EQ(nir_intrinsic_execution_scope(first), SCOPE_NONE);
}
TEST_F(AvailabilityVisibility, opcopymemory_visavail_both_separate)
@ -286,13 +286,13 @@ TEST_F(AvailabilityVisibility, opcopymemory_visavail_both_separate)
EXPECT_EQ(nir_intrinsic_memory_semantics(first), NIR_MEMORY_MAKE_VISIBLE | NIR_MEMORY_ACQUIRE);
EXPECT_NE(nir_intrinsic_memory_modes(first) & nir_var_mem_ssbo, 0);
EXPECT_EQ(nir_intrinsic_memory_scope(first), NIR_SCOPE_WORKGROUP);
EXPECT_EQ(nir_intrinsic_execution_scope(first), NIR_SCOPE_NONE);
EXPECT_EQ(nir_intrinsic_memory_scope(first), SCOPE_WORKGROUP);
EXPECT_EQ(nir_intrinsic_execution_scope(first), SCOPE_NONE);
EXPECT_EQ(nir_intrinsic_memory_semantics(second), NIR_MEMORY_MAKE_AVAILABLE | NIR_MEMORY_RELEASE);
EXPECT_NE(nir_intrinsic_memory_modes(second) & nir_var_mem_ssbo, 0);
EXPECT_EQ(nir_intrinsic_memory_scope(second), NIR_SCOPE_DEVICE);
EXPECT_EQ(nir_intrinsic_execution_scope(second), NIR_SCOPE_NONE);
EXPECT_EQ(nir_intrinsic_memory_scope(second), SCOPE_DEVICE);
EXPECT_EQ(nir_intrinsic_execution_scope(second), SCOPE_NONE);
}
TEST_F(AvailabilityVisibility, opcopymemory_avail)
@ -354,8 +354,8 @@ TEST_F(AvailabilityVisibility, opcopymemory_avail)
EXPECT_EQ(nir_intrinsic_memory_semantics(intrinsic), NIR_MEMORY_MAKE_AVAILABLE | NIR_MEMORY_RELEASE);
EXPECT_NE(nir_intrinsic_memory_modes(intrinsic) & nir_var_mem_ssbo, 0);
EXPECT_EQ(nir_intrinsic_memory_scope(intrinsic), NIR_SCOPE_DEVICE);
EXPECT_EQ(nir_intrinsic_execution_scope(intrinsic), NIR_SCOPE_NONE);
EXPECT_EQ(nir_intrinsic_memory_scope(intrinsic), SCOPE_DEVICE);
EXPECT_EQ(nir_intrinsic_execution_scope(intrinsic), SCOPE_NONE);
}
TEST_F(AvailabilityVisibility, opcopymemory_vis)
@ -417,6 +417,6 @@ TEST_F(AvailabilityVisibility, opcopymemory_vis)
EXPECT_EQ(nir_intrinsic_memory_semantics(intrinsic), NIR_MEMORY_MAKE_VISIBLE | NIR_MEMORY_ACQUIRE);
EXPECT_NE(nir_intrinsic_memory_modes(intrinsic) & nir_var_mem_ssbo, 0);
EXPECT_EQ(nir_intrinsic_memory_scope(intrinsic), NIR_SCOPE_WORKGROUP);
EXPECT_EQ(nir_intrinsic_execution_scope(intrinsic), NIR_SCOPE_NONE);
EXPECT_EQ(nir_intrinsic_memory_scope(intrinsic), SCOPE_WORKGROUP);
EXPECT_EQ(nir_intrinsic_execution_scope(intrinsic), SCOPE_NONE);
}

View file

@ -43,7 +43,7 @@ vtn_handle_amd_gcn_shader_instruction(struct vtn_builder *b, SpvOp ext_opcode,
break;
}
case TimeAMD: {
def = nir_pack_64_2x32(&b->nb, nir_shader_clock(&b->nb, NIR_SCOPE_SUBGROUP));
def = nir_pack_64_2x32(&b->nb, nir_shader_clock(&b->nb, SCOPE_SUBGROUP));
break;
}
default:

View file

@ -598,8 +598,8 @@ handle_core(struct vtn_builder *b, uint32_t opcode,
* The libclc we have uses a __local pointer but clang gives us generic
* pointers. Fortunately, the whole function is just a barrier.
*/
nir_scoped_barrier(&b->nb, .execution_scope = NIR_SCOPE_WORKGROUP,
.memory_scope = NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(&b->nb, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQUIRE |
NIR_MEMORY_RELEASE,
.memory_modes = nir_var_mem_shared |

View file

@ -531,7 +531,7 @@ const struct glsl_type *
vtn_type_get_nir_type(struct vtn_builder *b, struct vtn_type *type,
enum vtn_variable_mode mode);
nir_scope
mesa_scope
vtn_translate_scope(struct vtn_builder *b, SpvScope scope);
struct vtn_image_pointer {

View file

@ -341,7 +341,7 @@ vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode,
}
case SpvOpGroupNonUniformRotateKHR: {
const nir_scope scope = vtn_translate_scope(b, vtn_constant_uint(b, w[3]));
const mesa_scope scope = vtn_translate_scope(b, vtn_constant_uint(b, w[3]));
const uint32_t cluster_size = count > 6 ? vtn_constant_uint(b, w[6]) : 0;
vtn_fail_if(cluster_size && !IS_POT(cluster_size),
"Behavior is undefined unless ClusterSize is at least 1 and a power of 2.");

View file

@ -1591,7 +1591,7 @@ emit_intrinsic_barrier(struct ir3_context *ctx, nir_intrinsic_instr *intr)
* between a5xx and a6xx,
*/
nir_scope exec_scope = nir_intrinsic_execution_scope(intr);
mesa_scope exec_scope = nir_intrinsic_execution_scope(intr);
nir_variable_mode modes = nir_intrinsic_memory_modes(intr);
/* loads/stores are always cache-coherent so we can filter out
* available/visible.
@ -1659,7 +1659,7 @@ emit_intrinsic_barrier(struct ir3_context *ctx, nir_intrinsic_instr *intr)
array_insert(b, b->keeps, barrier);
}
if (exec_scope >= NIR_SCOPE_WORKGROUP) {
if (exec_scope >= SCOPE_WORKGROUP) {
emit_control_barrier(ctx);
}
}

View file

@ -1872,14 +1872,14 @@ visit_barrier(struct lp_build_nir_context *bld_base,
nir_intrinsic_instr *instr)
{
LLVMBuilderRef builder = bld_base->base.gallivm->builder;
nir_scope exec_scope = nir_intrinsic_execution_scope(instr);
mesa_scope exec_scope = nir_intrinsic_execution_scope(instr);
unsigned nir_semantics = nir_intrinsic_memory_semantics(instr);
if (nir_semantics) {
LLVMAtomicOrdering ordering = LLVMAtomicOrderingSequentiallyConsistent;
LLVMBuildFence(builder, ordering, false, "");
}
if (exec_scope != NIR_SCOPE_NONE)
if (exec_scope != SCOPE_NONE)
bld_base->barrier(bld_base);
}

View file

@ -2345,7 +2345,7 @@ ntt_emit_barrier(struct ntt_compile *c, nir_intrinsic_instr *intr)
{
bool compute = gl_shader_stage_is_compute(c->s->info.stage);
if (nir_intrinsic_memory_scope(intr) != NIR_SCOPE_NONE) {
if (nir_intrinsic_memory_scope(intr) != SCOPE_NONE) {
nir_variable_mode modes = nir_intrinsic_memory_modes(intr);
unsigned membar = 0;
@ -2368,7 +2368,7 @@ ntt_emit_barrier(struct ntt_compile *c, nir_intrinsic_instr *intr)
* optimize a bit.
*/
if (membar && compute &&
nir_intrinsic_memory_scope(intr) == NIR_SCOPE_WORKGROUP) {
nir_intrinsic_memory_scope(intr) == SCOPE_WORKGROUP) {
membar |= TGSI_MEMBAR_THREAD_GROUP;
}
@ -2378,7 +2378,7 @@ ntt_emit_barrier(struct ntt_compile *c, nir_intrinsic_instr *intr)
ntt_MEMBAR(c, ureg_imm1u(c->ureg, membar));
}
if (nir_intrinsic_execution_scope(intr) != NIR_SCOPE_NONE) {
if (nir_intrinsic_execution_scope(intr) != SCOPE_NONE) {
assert(compute || c->s->info.stage == MESA_SHADER_TESS_CTRL);
ntt_BARRIER(c);
}

View file

@ -1127,7 +1127,7 @@ ttn_ucmp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
static void
ttn_barrier(nir_builder *b)
{
nir_scoped_barrier(b, .execution_scope = NIR_SCOPE_WORKGROUP);
nir_scoped_barrier(b, .execution_scope = SCOPE_WORKGROUP);
}
static void

View file

@ -680,7 +680,7 @@ Shader::scan_instruction(nir_instr *instr)
m_chain_instr.prepare_mem_barrier |=
(nir_intrinsic_memory_modes(intr) &
(nir_var_mem_ssbo | nir_var_mem_global | nir_var_image) &&
nir_intrinsic_memory_scope(intr) != NIR_SCOPE_NONE);
nir_intrinsic_memory_scope(intr) != SCOPE_NONE);
break;
default:;
}
@ -1333,7 +1333,7 @@ Shader::emit_group_barrier(nir_intrinsic_instr *intr)
bool Shader::emit_scoped_barrier(nir_intrinsic_instr *intr)
{
if ((nir_intrinsic_execution_scope(intr) == NIR_SCOPE_WORKGROUP)) {
if ((nir_intrinsic_execution_scope(intr) == SCOPE_WORKGROUP)) {
if (!emit_group_barrier(intr))
return false;
}
@ -1347,7 +1347,7 @@ bool Shader::emit_scoped_barrier(nir_intrinsic_instr *intr)
* shader instance). */
auto full_barrier_mem_modes = nir_var_mem_ssbo | nir_var_image | nir_var_mem_global;
if ((nir_intrinsic_memory_scope(intr) != NIR_SCOPE_NONE) &&
if ((nir_intrinsic_memory_scope(intr) != SCOPE_NONE) &&
(nir_intrinsic_memory_modes(intr) & full_barrier_mem_modes)) {
return emit_wait_ack();
}

View file

@ -70,7 +70,7 @@ static void scan_tess_ctrl(nir_cf_node *cf_node, unsigned *upper_block_tf_writem
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic == nir_intrinsic_scoped_barrier &&
nir_intrinsic_execution_scope(intrin) >= NIR_SCOPE_WORKGROUP) {
nir_intrinsic_execution_scope(intrin) >= SCOPE_WORKGROUP) {
/* If we find a barrier in nested control flow put this in the
* too hard basket. In GLSL this is not possible but it is in

View file

@ -307,16 +307,16 @@ find_image_type(struct ntv_context *ctx, nir_variable *var)
}
static SpvScope
get_scope(nir_scope scope)
get_scope(mesa_scope scope)
{
SpvScope conv[] = {
[NIR_SCOPE_NONE] = 0,
[NIR_SCOPE_INVOCATION] = SpvScopeInvocation,
[NIR_SCOPE_SUBGROUP] = SpvScopeSubgroup,
[NIR_SCOPE_SHADER_CALL] = SpvScopeShaderCallKHR,
[NIR_SCOPE_WORKGROUP] = SpvScopeWorkgroup,
[NIR_SCOPE_QUEUE_FAMILY] = SpvScopeQueueFamily,
[NIR_SCOPE_DEVICE] = SpvScopeDevice,
[SCOPE_NONE] = 0,
[SCOPE_INVOCATION] = SpvScopeInvocation,
[SCOPE_SUBGROUP] = SpvScopeSubgroup,
[SCOPE_SHADER_CALL] = SpvScopeShaderCallKHR,
[SCOPE_WORKGROUP] = SpvScopeWorkgroup,
[SCOPE_QUEUE_FAMILY] = SpvScopeQueueFamily,
[SCOPE_DEVICE] = SpvScopeDevice,
};
return conv[scope];
}
@ -3536,7 +3536,7 @@ emit_barrier(struct ntv_context *ctx, nir_intrinsic_instr *intr)
SpvScope mem_scope = get_scope(nir_intrinsic_memory_scope(intr));
SpvMemorySemanticsMask semantics = 0;
if (nir_intrinsic_memory_scope(intr) != NIR_SCOPE_NONE) {
if (nir_intrinsic_memory_scope(intr) != SCOPE_NONE) {
nir_variable_mode modes = nir_intrinsic_memory_modes(intr);
if (modes & nir_var_image)
@ -3557,7 +3557,7 @@ emit_barrier(struct ntv_context *ctx, nir_intrinsic_instr *intr)
semantics |= SpvMemorySemanticsAcquireReleaseMask;
}
if (nir_intrinsic_execution_scope(intr) != NIR_SCOPE_NONE)
if (nir_intrinsic_execution_scope(intr) != SCOPE_NONE)
spirv_builder_emit_control_barrier(&ctx->builder, scope, mem_scope, semantics);
else
spirv_builder_emit_memory_barrier(&ctx->builder, mem_scope, semantics);

View file

@ -228,9 +228,9 @@ remove_scoped_barriers_impl(nir_builder *b, nir_instr *instr, void *data)
if (intr->intrinsic != nir_intrinsic_scoped_barrier)
return false;
if (data) {
if (nir_intrinsic_memory_scope(intr) == NIR_SCOPE_WORKGROUP ||
nir_intrinsic_memory_scope(intr) == NIR_SCOPE_DEVICE ||
nir_intrinsic_memory_scope(intr) == NIR_SCOPE_QUEUE_FAMILY)
if (nir_intrinsic_memory_scope(intr) == SCOPE_WORKGROUP ||
nir_intrinsic_memory_scope(intr) == SCOPE_DEVICE ||
nir_intrinsic_memory_scope(intr) == SCOPE_QUEUE_FAMILY)
return false;
}
nir_instr_remove(instr);

View file

@ -2756,9 +2756,9 @@ fs_visitor::nir_emit_tcs_intrinsic(const fs_builder &bld,
break;
case nir_intrinsic_scoped_barrier:
if (nir_intrinsic_memory_scope(instr) != NIR_SCOPE_NONE)
if (nir_intrinsic_memory_scope(instr) != SCOPE_NONE)
nir_emit_intrinsic(bld, instr);
if (nir_intrinsic_execution_scope(instr) == NIR_SCOPE_WORKGROUP) {
if (nir_intrinsic_execution_scope(instr) == SCOPE_WORKGROUP) {
if (tcs_prog_data->instances != 1)
emit_tcs_barrier();
}
@ -3684,9 +3684,9 @@ fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
switch (instr->intrinsic) {
case nir_intrinsic_scoped_barrier:
if (nir_intrinsic_memory_scope(instr) != NIR_SCOPE_NONE)
if (nir_intrinsic_memory_scope(instr) != SCOPE_NONE)
nir_emit_intrinsic(bld, instr);
if (nir_intrinsic_execution_scope(instr) == NIR_SCOPE_WORKGROUP) {
if (nir_intrinsic_execution_scope(instr) == SCOPE_WORKGROUP) {
/* The whole workgroup fits in a single HW thread, so all the
* invocations are already executed lock-step. Instead of an actual
* barrier just emit a scheduling fence, that will generate no code.
@ -4297,19 +4297,19 @@ lsc_fence_descriptor_for_intrinsic(const struct intel_device_info *devinfo,
if (nir_intrinsic_has_memory_scope(instr)) {
switch (nir_intrinsic_memory_scope(instr)) {
case NIR_SCOPE_DEVICE:
case NIR_SCOPE_QUEUE_FAMILY:
case SCOPE_DEVICE:
case SCOPE_QUEUE_FAMILY:
scope = LSC_FENCE_TILE;
flush_type = LSC_FLUSH_TYPE_EVICT;
break;
case NIR_SCOPE_WORKGROUP:
case SCOPE_WORKGROUP:
scope = LSC_FENCE_THREADGROUP;
flush_type = LSC_FLUSH_TYPE_EVICT;
break;
case NIR_SCOPE_SHADER_CALL:
case NIR_SCOPE_INVOCATION:
case NIR_SCOPE_SUBGROUP:
case NIR_SCOPE_NONE:
case SCOPE_SHADER_CALL:
case SCOPE_INVOCATION:
case SCOPE_SUBGROUP:
case SCOPE_NONE:
break;
}
} else {
@ -4523,7 +4523,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
slm_fence = modes & nir_var_mem_shared;
tgm_fence = modes & nir_var_image;
urb_fence = modes & (nir_var_shader_out | nir_var_mem_task_payload);
if (nir_intrinsic_memory_scope(instr) != NIR_SCOPE_NONE)
if (nir_intrinsic_memory_scope(instr) != SCOPE_NONE)
opcode = SHADER_OPCODE_MEMORY_FENCE;
break;
}

View file

@ -704,7 +704,7 @@ brw_nir_initialize_mue(nir_shader *nir,
* may start filling MUE before other finished initializing.
*/
if (workgroup_size > dispatch_width) {
nir_scoped_barrier(&b, NIR_SCOPE_WORKGROUP, NIR_SCOPE_WORKGROUP,
nir_scoped_barrier(&b, SCOPE_WORKGROUP, SCOPE_WORKGROUP,
NIR_MEMORY_ACQ_REL, nir_var_shader_out);
}

View file

@ -1356,8 +1356,8 @@ bool combine_all_memory_barriers(nir_intrinsic_instr *a,
void *data)
{
/* Only combine pure memory barriers */
if ((nir_intrinsic_execution_scope(a) != NIR_SCOPE_NONE) ||
(nir_intrinsic_execution_scope(b) != NIR_SCOPE_NONE))
if ((nir_intrinsic_execution_scope(a) != SCOPE_NONE) ||
(nir_intrinsic_execution_scope(b) != SCOPE_NONE))
return false;
/* Translation to backend IR will get rid of modes we don't care about, so

View file

@ -707,7 +707,7 @@ vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
}
case nir_intrinsic_scoped_barrier: {
if (nir_intrinsic_memory_scope(instr) == NIR_SCOPE_NONE)
if (nir_intrinsic_memory_scope(instr) == SCOPE_NONE)
break;
const vec4_builder bld =
vec4_builder(this).at_end().annotate(current_annotation, base_ir);

View file

@ -305,9 +305,9 @@ vec4_tcs_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
}
case nir_intrinsic_scoped_barrier:
if (nir_intrinsic_memory_scope(instr) != NIR_SCOPE_NONE)
if (nir_intrinsic_memory_scope(instr) != SCOPE_NONE)
vec4_visitor::nir_emit_intrinsic(instr);
if (nir_intrinsic_execution_scope(instr) == NIR_SCOPE_WORKGROUP) {
if (nir_intrinsic_execution_scope(instr) == SCOPE_WORKGROUP) {
dst_reg header = dst_reg(this, glsl_type::uvec4_type);
emit(TCS_OPCODE_CREATE_BARRIER_HEADER, header);
emit(SHADER_OPCODE_BARRIER, dst_null_ud(), src_reg(header));

View file

@ -158,7 +158,7 @@ anv_mesh_convert_attrs_prim_to_vert(struct nir_shader *nir,
b.cursor = nir_after_cf_list(&impl->body);
/* wait for all subgroups to finish */
nir_scoped_barrier(&b, NIR_SCOPE_WORKGROUP);
nir_scoped_barrier(&b, SCOPE_WORKGROUP);
nir_ssa_def *zero = nir_imm_int(&b, 0);

View file

@ -1960,8 +1960,8 @@ lower_subgroup_id(nir_builder *b, nir_instr *instr, void *data)
nir_pop_if(b, nif);
nir_scoped_barrier(b,
.execution_scope = NIR_SCOPE_WORKGROUP,
.memory_scope = NIR_SCOPE_WORKGROUP,
.execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_mem_shared);

View file

@ -33,7 +33,7 @@ is_memory_barrier_tcs_patch(const nir_intrinsic_instr *intr)
if (intr->intrinsic == nir_intrinsic_scoped_barrier &&
nir_intrinsic_memory_modes(intr) & nir_var_shader_out) {
assert(nir_intrinsic_memory_modes(intr) == nir_var_shader_out);
assert(nir_intrinsic_memory_scope(intr) == NIR_SCOPE_WORKGROUP);
assert(nir_intrinsic_memory_scope(intr) == SCOPE_WORKGROUP);
return true;
} else {
return false;

View file

@ -3079,19 +3079,19 @@ load_ubo(struct ntd_context *ctx, const struct dxil_value *handle,
}
static bool
emit_barrier_impl(struct ntd_context *ctx, nir_variable_mode modes, nir_scope execution_scope, nir_scope mem_scope)
emit_barrier_impl(struct ntd_context *ctx, nir_variable_mode modes, mesa_scope execution_scope, mesa_scope mem_scope)
{
const struct dxil_value *opcode, *mode;
const struct dxil_func *func;
uint32_t flags = 0;
if (execution_scope == NIR_SCOPE_WORKGROUP)
if (execution_scope == SCOPE_WORKGROUP)
flags |= DXIL_BARRIER_MODE_SYNC_THREAD_GROUP;
bool is_compute = ctx->mod.shader_kind == DXIL_COMPUTE_SHADER;
if (modes & (nir_var_mem_ssbo | nir_var_mem_global | nir_var_image)) {
if (mem_scope > NIR_SCOPE_WORKGROUP || !is_compute)
if (mem_scope > SCOPE_WORKGROUP || !is_compute)
flags |= DXIL_BARRIER_MODE_UAV_FENCE_GLOBAL;
else
flags |= DXIL_BARRIER_MODE_UAV_FENCE_THREAD_GROUP;

View file

@ -2257,25 +2257,25 @@ Converter::visit(nir_intrinsic_instr *insn)
break;
}
case nir_intrinsic_scoped_barrier: {
nir_scope exec_scope = nir_intrinsic_execution_scope(insn);
nir_scope mem_scope = nir_intrinsic_memory_scope(insn);
mesa_scope exec_scope = nir_intrinsic_execution_scope(insn);
mesa_scope mem_scope = nir_intrinsic_memory_scope(insn);
nir_variable_mode modes = nir_intrinsic_memory_modes(insn);
nir_variable_mode valid_modes =
nir_var_mem_global | nir_var_image | nir_var_mem_ssbo | nir_var_mem_shared;
if (mem_scope != NIR_SCOPE_NONE && (modes & valid_modes)) {
if (mem_scope != SCOPE_NONE && (modes & valid_modes)) {
Instruction *bar = mkOp(OP_MEMBAR, TYPE_NONE, NULL);
bar->fixed = 1;
if (mem_scope >= NIR_SCOPE_QUEUE_FAMILY)
if (mem_scope >= SCOPE_QUEUE_FAMILY)
bar->subOp = NV50_IR_SUBOP_MEMBAR(M, GL);
else
bar->subOp = NV50_IR_SUBOP_MEMBAR(M, CTA);
}
if (exec_scope != NIR_SCOPE_NONE &&
!(exec_scope == NIR_SCOPE_WORKGROUP && nir->info.stage == MESA_SHADER_TESS_CTRL)) {
if (exec_scope != SCOPE_NONE &&
!(exec_scope == SCOPE_WORKGROUP && nir->info.stage == MESA_SHADER_TESS_CTRL)) {
Instruction *bar = mkOp2(OP_BAR, TYPE_U32, NULL, mkImm(0), mkImm(0));
bar->fixed = 1;
bar->subOp = NV50_IR_SUBOP_BAR_SYNC;

View file

@ -1578,9 +1578,9 @@ bi_emit_intrinsic(bi_builder *b, nir_intrinsic_instr *instr)
break;
case nir_intrinsic_scoped_barrier:
if (nir_intrinsic_execution_scope(instr) != NIR_SCOPE_NONE) {
if (nir_intrinsic_execution_scope(instr) != SCOPE_NONE) {
assert(b->shader->stage != MESA_SHADER_FRAGMENT);
assert(nir_intrinsic_execution_scope(instr) > NIR_SCOPE_SUBGROUP &&
assert(nir_intrinsic_execution_scope(instr) > SCOPE_SUBGROUP &&
"todo: subgroup barriers (different divergence rules)");
bi_barrier(b);
}

View file

@ -2044,11 +2044,11 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
break;
case nir_intrinsic_scoped_barrier:
if (nir_intrinsic_execution_scope(instr) != NIR_SCOPE_NONE) {
if (nir_intrinsic_execution_scope(instr) != SCOPE_NONE) {
schedule_barrier(ctx);
emit_control_barrier(ctx);
schedule_barrier(ctx);
} else if (nir_intrinsic_memory_scope(instr) != NIR_SCOPE_NONE) {
} else if (nir_intrinsic_memory_scope(instr) != SCOPE_NONE) {
/* Midgard doesn't seem to want special handling, though we do need to
* take care when scheduling to avoid incorrect reordering.
*