nir: rename nir_lower_indirect_derefs -> nir_lower_indirect_derefs_to_if_else_trees

This describes better what it does.

Reviewed-by: Timothy Arceri <tarceri@itsqueeze.com>
Acked-by: Iago Toral Quiroga <itoral@igalia.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/38471>
This commit is contained in:
Marek Olšák 2025-11-16 11:55:21 -05:00 committed by Marge Bot
parent 22871fb8bd
commit 9e339f4b32
30 changed files with 73 additions and 55 deletions

View file

@ -228,7 +228,8 @@ ac_nir_lower_indirect_derefs(nir_shader *shader,
glsl_get_natural_size_align_bytes, glsl_get_natural_size_align_bytes);
/* This lowers indirect indexing to if-else ladders. */
NIR_PASS(progress, shader, nir_lower_indirect_derefs, nir_var_function_temp, UINT32_MAX);
NIR_PASS(progress, shader, nir_lower_indirect_derefs_to_if_else_trees,
nir_var_function_temp, UINT32_MAX);
return progress;
}

View file

@ -127,7 +127,7 @@ radv_nir_lower_rt_vars(nir_shader *shader, nir_variable_mode mode, uint32_t base
{
bool progress = false;
progress |= nir_lower_indirect_derefs(shader, mode, UINT32_MAX);
progress |= nir_lower_indirect_derefs_to_if_else_trees(shader, mode, UINT32_MAX);
progress |= nir_lower_vars_to_explicit_types(shader, mode, glsl_get_natural_size_align_bytes);

View file

@ -3689,7 +3689,8 @@ agx_preprocess_nir(nir_shader *nir)
/* Lower large arrays to scratch and small arrays to csel */
NIR_PASS(_, nir, nir_lower_vars_to_scratch, nir_var_function_temp, 256,
glsl_get_natural_size_align_bytes, glsl_get_word_size_align_bytes);
NIR_PASS(_, nir, nir_lower_indirect_derefs, nir_var_function_temp, ~0);
NIR_PASS(_, nir, nir_lower_indirect_derefs_to_if_else_trees,
nir_var_function_temp, ~0);
NIR_PASS(_, nir, nir_split_var_copies);
NIR_PASS(_, nir, nir_lower_global_vars_to_local);
NIR_PASS(_, nir, nir_lower_var_copies);
@ -3805,7 +3806,8 @@ agx_compile_shader_nir(nir_shader *nir, struct agx_shader_key *key,
NIR_PASS(_, nir, nir_lower_vars_to_scratch, nir_var_function_temp, 256,
glsl_get_natural_size_align_bytes,
glsl_get_natural_size_align_bytes);
NIR_PASS(_, nir, nir_lower_indirect_derefs, nir_var_function_temp, ~0);
NIR_PASS(_, nir, nir_lower_indirect_derefs_to_if_else_trees,
nir_var_function_temp, ~0);
}
/* Cleanup 8-bit math before lowering */

View file

@ -907,8 +907,8 @@ hk_lower_nir(struct hk_device *dev, nir_shader *nir,
else if (nir->info.stage == MESA_SHADER_VERTEX)
lower_indirect_modes |= nir_var_shader_in | nir_var_shader_out;
NIR_PASS(_, nir, nir_lower_indirect_derefs, lower_indirect_modes,
UINT32_MAX);
NIR_PASS(_, nir, nir_lower_indirect_derefs_to_if_else_trees,
lower_indirect_modes, UINT32_MAX);
NIR_PASS(_, nir, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
glsl_type_size,

View file

@ -347,9 +347,10 @@ preprocess_nir(nir_shader *nir)
/* Lower a bunch of stuff */
NIR_PASS(_, nir, nir_lower_var_copies);
NIR_PASS(_, nir, nir_lower_indirect_derefs, nir_var_shader_in, UINT32_MAX);
NIR_PASS(_, nir, nir_lower_indirect_derefs_to_if_else_trees,
nir_var_shader_in, UINT32_MAX);
NIR_PASS(_, nir, nir_lower_indirect_derefs,
NIR_PASS(_, nir, nir_lower_indirect_derefs_to_if_else_trees,
nir_var_function_temp, 2);
NIR_PASS(_, nir, nir_lower_array_deref_of_vec,

View file

@ -184,7 +184,7 @@ else
'nir_lower_idiv.c',
'nir_lower_image.c',
'nir_lower_image_atomics_to_global.c',
'nir_lower_indirect_derefs.c',
'nir_lower_indirect_derefs_to_if_else_trees.c',
'nir_lower_input_attachments.c',
'nir_lower_int64.c',
'nir_lower_interpolation.c',

View file

@ -5161,11 +5161,12 @@ bool nir_lower_array_deref_of_vec(nir_shader *shader, nir_variable_mode modes,
bool (*filter)(nir_variable *),
nir_lower_array_deref_of_vec_options options);
bool nir_lower_indirect_derefs(nir_shader *shader, nir_variable_mode modes,
uint32_t max_lower_array_len);
bool nir_lower_indirect_derefs_to_if_else_trees(nir_shader *shader,
nir_variable_mode modes,
uint32_t max_lower_array_len);
bool nir_lower_indirect_var_derefs(nir_shader *shader,
const struct set *vars);
bool nir_lower_indirect_var_derefs_to_if_else_trees(nir_shader *shader,
const struct set *vars);
bool nir_lower_locals_to_regs(nir_shader *shader, uint8_t bool_bitsize);

View file

@ -228,8 +228,9 @@ lower_indirects_impl(nir_function_impl *impl, nir_variable_mode modes,
* that does a binary search on the array index.
*/
bool
nir_lower_indirect_derefs(nir_shader *shader, nir_variable_mode modes,
uint32_t max_lower_array_len)
nir_lower_indirect_derefs_to_if_else_trees(nir_shader *shader,
nir_variable_mode modes,
uint32_t max_lower_array_len)
{
bool progress = false;
@ -243,7 +244,8 @@ nir_lower_indirect_derefs(nir_shader *shader, nir_variable_mode modes,
/** Lowers indirects on any variables in the given set */
bool
nir_lower_indirect_var_derefs(nir_shader *shader, const struct set *vars)
nir_lower_indirect_var_derefs_to_if_else_trees(nir_shader *shader,
const struct set *vars)
{
bool progress = false;

View file

@ -1261,8 +1261,8 @@ nir_lower_io_passes(nir_shader *nir, bool renumber_vs_inputs)
* The problem is that nir_lower_io_vars_to_temporaries doesn't handle TCS.
*/
if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
NIR_PASS(_, nir, nir_lower_indirect_derefs, nir_var_shader_out,
UINT32_MAX);
NIR_PASS(_, nir, nir_lower_indirect_derefs_to_if_else_trees,
nir_var_shader_out, UINT32_MAX);
}
}

View file

@ -557,9 +557,9 @@ wrapper_unroll(nir_loop *loop)
* statements that are converted to a loop to take advantage of
* exiting jump instruction handling. In this case we could make
* use of a binary seach pattern like we do in
* nir_lower_indirect_derefs(), this should allow us to unroll the
* loops in an optimal way and should also avoid some of the
* register pressure that comes from simply nesting the
* nir_lower_indirect_derefs_to_if_else_trees(), this should allow us
* to unroll the loops in an optimal way and should also avoid some of
* the register pressure that comes from simply nesting the
* terminators one after the other.
*/
if (list_length(&loop->info->loop_terminator_list) > 3)

View file

@ -237,7 +237,8 @@ compile(void *memctx, const uint32_t *spirv, size_t spirv_size)
bool scratch_lowered = false;
NIR_PASS(scratch_lowered, nir, nir_lower_scratch_to_var);
if (scratch_lowered) {
NIR_PASS(_, nir, nir_lower_indirect_derefs, nir_var_function_temp, ~0);
NIR_PASS(_, nir, nir_lower_indirect_derefs_to_if_else_trees,
nir_var_function_temp, ~0);
}
/* Prune derefs/variables late, since scratch lowering leaves dead

View file

@ -514,14 +514,14 @@ ir3_nir_lower_io_vars_to_temporaries(nir_shader *s)
/* Regardless of the above, we need to lower indirect references to
* compact variables such as clip/cull distances because due to how
* TCS<->TES IO works we cannot handle indirect accesses that "straddle"
* vec4 components. nir_lower_indirect_derefs has a special case for
* compact variables, so it will actually lower them even though we pass
* in 0 modes.
* vec4 components. nir_lower_indirect_derefs_to_if_else_trees has a special
* case for compact variables, so it will actually lower them even though we
* pass in 0 modes.
*
* Using temporaries would be slightly better but
* nir_lower_io_vars_to_temporaries currently doesn't support TCS i/o.
*/
NIR_PASS(_, s, nir_lower_indirect_derefs, 0, UINT32_MAX);
NIR_PASS(_, s, nir_lower_indirect_derefs_to_if_else_trees, 0, UINT32_MAX);
}
/**

View file

@ -3889,7 +3889,8 @@ const void *nir_to_tgsi_options(struct nir_shader *s,
* having matching declarations.
*/
if (s->info.stage == MESA_SHADER_FRAGMENT) {
NIR_PASS(_, s, nir_lower_indirect_derefs, nir_var_shader_in, UINT32_MAX);
NIR_PASS(_, s, nir_lower_indirect_derefs_to_if_else_trees,
nir_var_shader_in, UINT32_MAX);
NIR_PASS(_, s, nir_remove_dead_variables, nir_var_shader_in, NULL);
}
@ -3899,7 +3900,7 @@ const void *nir_to_tgsi_options(struct nir_shader *s,
*/
if (s->info.stage == MESA_SHADER_TESS_CTRL ||
s->info.stage == MESA_SHADER_TESS_EVAL) {
NIR_PASS(_, s, nir_lower_indirect_derefs, 0 , UINT32_MAX);
NIR_PASS(_, s, nir_lower_indirect_derefs_to_if_else_trees, 0, UINT32_MAX);
}
NIR_PASS(_, s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
@ -3936,7 +3937,8 @@ const void *nir_to_tgsi_options(struct nir_shader *s,
ntt_optimize_nir(s, screen, options);
NIR_PASS(_, s, nir_lower_indirect_derefs, no_indirects_mask, UINT32_MAX);
NIR_PASS(_, s, nir_lower_indirect_derefs_to_if_else_trees,
no_indirects_mask, UINT32_MAX);
/* Lower demote_if to if (cond) { demote } because TGSI doesn't have a DEMOTE_IF. */
NIR_PASS(_, s, nir_lower_discard_if, nir_lower_demote_if_to_cf);

View file

@ -170,7 +170,8 @@ crocus_init_shader_caps(struct crocus_screen *screen)
/* Lie about these to avoid st/mesa's GLSL IR lowering of indirects,
* which we don't want. Our compiler backend will check elk_compiler's
* options and call nir_lower_indirect_derefs appropriately anyway.
* options and call nir_lower_indirect_derefs_to_if_else_trees
* appropriately anyway.
*/
caps->indirect_temp_addr = true;
caps->indirect_const_addr = true;

View file

@ -1273,7 +1273,8 @@ etna_compile_shader(struct etna_shader_variant *v)
(nir_lower_io_options)0);
NIR_PASS(_, s, nir_lower_vars_to_ssa);
NIR_PASS(_, s, nir_lower_indirect_derefs, nir_var_all, UINT32_MAX);
NIR_PASS(_, s, nir_lower_indirect_derefs_to_if_else_trees, nir_var_all,
UINT32_MAX);
NIR_PASS(_, s, etna_nir_lower_texture, &v->key, v->shader->info);
NIR_PASS(_, s, nir_lower_alu_width, alu_width_cb, NULL);

View file

@ -110,8 +110,8 @@ ir2_optimize_nir(nir_shader *s, bool lower)
}
OPT_V(s, nir_lower_vars_to_ssa);
OPT_V(s, nir_lower_indirect_derefs, nir_var_shader_in | nir_var_shader_out,
UINT32_MAX);
OPT_V(s, nir_lower_indirect_derefs_to_if_else_trees,
nir_var_shader_in | nir_var_shader_out, UINT32_MAX);
if (lower) {
OPT_V(s, ir3_nir_apply_trig_workarounds);

View file

@ -215,7 +215,8 @@ iris_init_shader_caps(struct iris_screen *screen)
/* Lie about these to avoid st/mesa's GLSL IR lowering of indirects,
* which we don't want. Our compiler backend will check brw_compiler's
* options and call nir_lower_indirect_derefs appropriately anyway.
* options and call nir_lower_indirect_derefs_to_if_else_trees
* appropriately anyway.
*/
caps->indirect_temp_addr = true;
caps->indirect_const_addr = true;

View file

@ -1991,7 +1991,8 @@ nir_to_rc(struct nir_shader *s, struct pipe_screen *screen,
* having matching declarations.
*/
if (s->info.stage == MESA_SHADER_FRAGMENT) {
NIR_PASS(_, s, nir_lower_indirect_derefs, nir_var_shader_in, UINT32_MAX);
NIR_PASS(_, s, nir_lower_indirect_derefs_to_if_else_trees,
nir_var_shader_in, UINT32_MAX);
NIR_PASS(_, s, nir_remove_dead_variables, nir_var_shader_in, NULL);
}

View file

@ -792,7 +792,8 @@ r600_lower_and_optimize_nir(nir_shader *sh,
/**/
if (lower_64bit)
NIR_PASS(_, sh, nir_lower_indirect_derefs, nir_var_function_temp, 10);
NIR_PASS(_, sh, nir_lower_indirect_derefs_to_if_else_trees,
nir_var_function_temp, 10);
NIR_PASS(_, sh, nir_opt_constant_folding);
NIR_PASS(_, sh, nir_io_add_const_offset_to_base, io_modes);

View file

@ -586,7 +586,7 @@ void pco_preprocess_nir(pco_ctx *ctx, nir_shader *nir)
NIR_PASS(_,
nir,
nir_lower_indirect_derefs,
nir_lower_indirect_derefs_to_if_else_trees,
nir_var_shader_in | nir_var_shader_out,
UINT32_MAX);
@ -605,7 +605,7 @@ void pco_preprocess_nir(pco_ctx *ctx, nir_shader *nir)
NIR_PASS(_,
nir,
nir_lower_indirect_derefs,
nir_lower_indirect_derefs_to_if_else_trees,
nir_var_function_temp,
UINT32_MAX);
@ -703,12 +703,12 @@ void pco_link_nir(pco_ctx *ctx,
NIR_PASS(_,
producer,
nir_lower_indirect_derefs,
nir_lower_indirect_derefs_to_if_else_trees,
nir_var_shader_in | nir_var_shader_out,
UINT32_MAX);
NIR_PASS(_,
consumer,
nir_lower_indirect_derefs,
nir_lower_indirect_derefs_to_if_else_trees,
nir_var_shader_in | nir_var_shader_out,
UINT32_MAX);

View file

@ -1412,7 +1412,7 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir,
nir_variable_mode indirect_mask =
brw_nir_no_indirect_mask(compiler, nir->info.stage);
OPT(nir_lower_indirect_derefs, indirect_mask, UINT32_MAX);
OPT(nir_lower_indirect_derefs_to_if_else_trees, indirect_mask, UINT32_MAX);
/* Even in cases where we can handle indirect temporaries via scratch, we
* it can still be expensive. Lower indirects on small arrays to
@ -1428,7 +1428,7 @@ brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir,
* that one kerbal space program shader.
*/
if (!(indirect_mask & nir_var_function_temp))
OPT(nir_lower_indirect_derefs, nir_var_function_temp, 16);
OPT(nir_lower_indirect_derefs_to_if_else_trees, nir_var_function_temp, 16);
/* Lower array derefs of vectors for SSBO and UBO loads. For both UBOs and
* SSBOs, our back-end is capable of loading an entire vec4 at a time and

View file

@ -1095,7 +1095,7 @@ elk_preprocess_nir(const struct elk_compiler *compiler, nir_shader *nir,
nir_variable_mode indirect_mask =
elk_nir_no_indirect_mask(compiler, nir->info.stage);
OPT(nir_lower_indirect_derefs, indirect_mask, UINT32_MAX);
OPT(nir_lower_indirect_derefs_to_if_else_trees, indirect_mask, UINT32_MAX);
/* Even in cases where we can handle indirect temporaries via scratch, we
* it can still be expensive. Lower indirects on small arrays to
@ -1111,7 +1111,7 @@ elk_preprocess_nir(const struct elk_compiler *compiler, nir_shader *nir,
* that one kerbal space program shader.
*/
if (is_scalar && !(indirect_mask & nir_var_function_temp))
OPT(nir_lower_indirect_derefs, nir_var_function_temp, 16);
OPT(nir_lower_indirect_derefs_to_if_else_trees, nir_var_function_temp, 16);
/* Lower array derefs of vectors for SSBO and UBO loads. For both UBOs and
* SSBOs, our back-end is capable of loading an entire vec4 at a time and
@ -1208,10 +1208,10 @@ elk_nir_link_shaders(const struct elk_compiler *compiler,
* temporaries so we need to lower indirects on any of the
* varyings we have demoted here.
*/
NIR_PASS(_, producer, nir_lower_indirect_derefs,
NIR_PASS(_, producer, nir_lower_indirect_derefs_to_if_else_trees,
elk_nir_no_indirect_mask(compiler, producer->info.stage),
UINT32_MAX);
NIR_PASS(_, consumer, nir_lower_indirect_derefs,
NIR_PASS(_, consumer, nir_lower_indirect_derefs_to_if_else_trees,
elk_nir_no_indirect_mask(compiler, consumer->info.stage),
UINT32_MAX);

View file

@ -1334,7 +1334,8 @@ anv_shader_lower_nir(struct anv_device *device,
nir->info.cs.has_cooperative_matrix) {
anv_fixup_subgroup_size(device, &nir->info);
NIR_PASS(_, nir, brw_nir_lower_cmat, nir->info.api_subgroup_size);
NIR_PASS(_, nir, nir_lower_indirect_derefs, nir_var_function_temp, 16);
NIR_PASS(_, nir, nir_lower_indirect_derefs_to_if_else_trees,
nir_var_function_temp, 16);
}
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));

View file

@ -1875,7 +1875,7 @@ msl_preprocess_nir(struct nir_shader *nir)
nir_var_function_temp | nir_var_shader_in | nir_var_shader_out);
NIR_PASS(_, nir, nir_lower_alu_to_scalar, kk_scalarize_filter, NULL);
NIR_PASS(_, nir, nir_lower_indirect_derefs,
NIR_PASS(_, nir, nir_lower_indirect_derefs_to_if_else_trees,
nir_var_shader_in | nir_var_shader_out, UINT32_MAX);
NIR_PASS(_, nir, nir_lower_vars_to_scratch, nir_var_function_temp, 0,
glsl_get_natural_size_align_bytes,

View file

@ -539,7 +539,7 @@ st_link_glsl_to_nir(struct gl_context *ctx,
(nir_variable_mode)0;
if (mode)
nir_lower_indirect_derefs(nir, mode, UINT32_MAX);
nir_lower_indirect_derefs_to_if_else_trees(nir, mode, UINT32_MAX);
}
/* This needs to run after the initial pass of nir_lower_vars_to_ssa, so

View file

@ -246,7 +246,7 @@ st_nir_lower_builtin(nir_shader *shader)
* be eliminated beforehand to avoid trying to lower one of those
* builtins
*/
progress |= nir_lower_indirect_var_derefs(shader, vars);
progress |= nir_lower_indirect_var_derefs_to_if_else_trees(shader, vars);
if (nir_shader_intrinsics_pass(shader, lower_builtin_instr,
nir_metadata_control_flow, NULL)) {

View file

@ -6314,7 +6314,8 @@ optimize_nir(struct nir_shader *s, const struct nir_to_dxil_options *opts)
do {
progress = false;
NIR_PASS(progress, s, nir_lower_vars_to_ssa);
NIR_PASS(progress, s, nir_lower_indirect_derefs, nir_var_function_temp, 4);
NIR_PASS(progress, s, nir_lower_indirect_derefs_to_if_else_trees,
nir_var_function_temp, 4);
NIR_PASS(progress, s, nir_lower_alu_to_scalar, NULL, NULL);
NIR_PASS(progress, s, nir_opt_copy_prop);
NIR_PASS(progress, s, nir_opt_copy_prop_vars);

View file

@ -1048,7 +1048,7 @@ nak_postprocess_nir(nir_shader *nir,
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
OPT(nir, nir_lower_indirect_derefs, 0, UINT32_MAX);
OPT(nir, nir_lower_indirect_derefs_to_if_else_trees, 0, UINT32_MAX);
if (nir->info.stage == MESA_SHADER_TESS_EVAL) {
OPT(nir, nir_lower_tess_coord_z,
@ -1083,7 +1083,7 @@ nak_postprocess_nir(nir_shader *nir,
break;
case MESA_SHADER_FRAGMENT:
OPT(nir, nir_lower_indirect_derefs,
OPT(nir, nir_lower_indirect_derefs_to_if_else_trees,
nir_var_shader_in | nir_var_shader_out, UINT32_MAX);
OPT(nir, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
type_size_vec4, nir_lower_io_lower_64bit_to_32_new |

View file

@ -6219,7 +6219,8 @@ bifrost_preprocess_nir(nir_shader *nir, unsigned gpu_id)
NIR_PASS(_, nir, nir_lower_scratch_to_var);
NIR_PASS(_, nir, nir_lower_vars_to_scratch, nir_var_function_temp, 256,
vars_to_scratch_size_align_func, vars_to_scratch_size_align_func);
NIR_PASS(_, nir, nir_lower_indirect_derefs, nir_var_function_temp, ~0);
NIR_PASS(_, nir, nir_lower_indirect_derefs_to_if_else_trees,
nir_var_function_temp, ~0);
NIR_PASS(_, nir, nir_split_var_copies);
NIR_PASS(_, nir, nir_lower_var_copies);

View file

@ -914,7 +914,7 @@ panvk_lower_nir(struct panvk_device *dev, nir_shader *nir,
/* Postprocess can add copies back in and lower_io can't handle them */
NIR_PASS(_, nir, nir_lower_var_copies);
NIR_PASS(_, nir, nir_lower_indirect_derefs,
NIR_PASS(_, nir, nir_lower_indirect_derefs_to_if_else_trees,
nir_var_shader_in | nir_var_shader_out, UINT32_MAX);
NIR_PASS(_, nir, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
glsl_type_size, nir_lower_io_use_interpolated_input_intrinsics);