nir: rename nir_lower_io_to_temporaries -> nir_lower_io_vars_to_temporaries

Acked-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/35760>
This commit is contained in:
Marek Olšák 2025-06-25 19:05:19 -04:00 committed by Marge Bot
parent 1e03827c77
commit 1754507d49
38 changed files with 67 additions and 67 deletions

View file

@ -25,7 +25,7 @@ lower_legacy_gs_store_output(nir_builder *b, nir_intrinsic_instr *intrin,
lower_legacy_gs_state *s)
{
/* Assume:
* - the shader used nir_lower_io_to_temporaries
* - the shader used nir_lower_io_vars_to_temporaries
* - 64-bit outputs are lowered
* - no indirect indexing is present
*/

View file

@ -13,7 +13,7 @@ static void
gather_outputs(nir_builder *b, nir_function_impl *impl, ac_nir_prerast_out *out)
{
/* Assume:
* - the shader used nir_lower_io_to_temporaries
* - the shader used nir_lower_io_vars_to_temporaries
* - 64-bit outputs are lowered
* - no indirect indexing is present
*/

View file

@ -1524,7 +1524,7 @@ static void
ngg_nogs_gather_outputs(nir_builder *b, struct exec_list *cf_list, lower_ngg_nogs_state *s)
{
/* Assume:
* - the shader used nir_lower_io_to_temporaries
* - the shader used nir_lower_io_vars_to_temporaries
* - 64-bit outputs are lowered
* - no indirect indexing is present
*/

View file

@ -82,7 +82,7 @@ ac_nir_calc_io_off(nir_builder *b, unsigned component, nir_def *io_offset, nir_d
* Meant to be used for VS/TES/GS when they are the last pre-rasterization stage.
*
* Assumptions:
* - We called nir_lower_io_to_temporaries on the shader
* - We called nir_lower_io_vars_to_temporaries on the shader
* - 64-bit outputs are lowered
* - no indirect indexing is present
*/

View file

@ -565,9 +565,9 @@ radv_shader_spirv_to_nir(struct radv_device *device, const struct radv_shader_st
if (nir->info.stage == MESA_SHADER_VERTEX || nir->info.stage == MESA_SHADER_GEOMETRY ||
nir->info.stage == MESA_SHADER_FRAGMENT) {
NIR_PASS(_, nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
NIR_PASS(_, nir, nir_lower_io_vars_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
} else if (nir->info.stage == MESA_SHADER_TESS_EVAL) {
NIR_PASS(_, nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), true, false);
NIR_PASS(_, nir, nir_lower_io_vars_to_temporaries, nir_shader_get_entrypoint(nir), true, false);
}
NIR_PASS(_, nir, nir_split_var_copies);

View file

@ -132,7 +132,7 @@ hk_preprocess_nir_internal(struct vk_physical_device *vk_pdev, nir_shader *nir)
NIR_PASS(_, nir, nir_lower_returns);
}
/* Unroll loops before lowering indirects via nir_lower_io_to_temporaries */
/* Unroll loops before lowering indirects via nir_lower_io_vars_to_temporaries */
UNUSED bool progress = false;
NIR_PASS(_, nir, nir_lower_global_vars_to_local);
@ -161,7 +161,7 @@ hk_preprocess_nir_internal(struct vk_physical_device *vk_pdev, nir_shader *nir)
*/
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
NIR_PASS(_, nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir),
NIR_PASS(_, nir, nir_lower_io_vars_to_temporaries, nir_shader_get_entrypoint(nir),
true, false);
NIR_PASS(_, nir, nir_lower_global_vars_to_local);

View file

@ -316,7 +316,7 @@ preprocess_nir(nir_shader *nir)
NIR_PASS(_, nir, lower_intrinsics);
}
NIR_PASS(_, nir, nir_lower_io_to_temporaries,
NIR_PASS(_, nir, nir_lower_io_vars_to_temporaries,
nir_shader_get_entrypoint(nir), true, false);
NIR_PASS(_, nir, nir_lower_system_values);

View file

@ -1326,7 +1326,7 @@ preprocess_shader(const struct gl_constants *consts,
(nir->info.outputs_written & (VARYING_BIT_CLIP_DIST0 | VARYING_BIT_CLIP_DIST1)))
NIR_PASS(_, nir, gl_nir_zero_initialize_clip_distance);
NIR_PASS(_, nir, nir_lower_io_to_temporaries,
NIR_PASS(_, nir, nir_lower_io_vars_to_temporaries,
nir_shader_get_entrypoint(nir), true,
options->lower_all_io_to_temps ||
nir->info.stage == MESA_SHADER_VERTEX ||

View file

@ -188,7 +188,7 @@ else
'nir_lower_int_to_float.c',
'nir_lower_io.c',
'nir_lower_io_array_vars_to_elements.c',
'nir_lower_io_to_temporaries.c',
'nir_lower_io_vars_to_temporaries.c',
'nir_lower_io_to_scalar.c',
'nir_lower_io_vars_to_scalar.c',
'nir_lower_is_helper_invocation.c',

View file

@ -480,7 +480,7 @@ typedef struct nir_variable {
/**
* Can this variable be coalesced with another?
*
* This is set by nir_lower_io_to_temporaries to say that any
* This is set by nir_lower_io_vars_to_temporaries to say that any
* copies involving this variable should stay put. Propagating it can
* duplicate the resulting load/store, which is not wanted, and may
* result in a load/store of the variable with an indirect offset which
@ -4934,9 +4934,9 @@ bool nir_lower_indirect_var_derefs(nir_shader *shader,
bool nir_lower_locals_to_regs(nir_shader *shader, uint8_t bool_bitsize);
bool nir_lower_io_to_temporaries(nir_shader *shader,
nir_function_impl *entrypoint,
bool outputs, bool inputs);
bool nir_lower_io_vars_to_temporaries(nir_shader *shader,
nir_function_impl *entrypoint,
bool outputs, bool inputs);
bool nir_lower_vars_to_scratch(nir_shader *shader,
nir_variable_mode modes,

View file

@ -26,7 +26,7 @@
* intrinsics, and resets the offset source to 0. Non-constant offsets remain
* unchanged - since we don't know what part of a compound variable is
* accessed, we allocate storage for the entire thing. For drivers that use
* nir_lower_io_to_temporaries() before nir_lower_io(), this guarantees that
* nir_lower_io_vars_to_temporaries() before nir_lower_io(), this guarantees that
* the offset source will be 0, so that they don't have to add it in manually.
*/

View file

@ -630,7 +630,7 @@ lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state,
static nir_def *
uncompact_view_index(nir_builder *b, nir_src compact_index_src)
{
/* We require nir_lower_io_to_temporaries when using absolute view indices,
/* We require nir_lower_io_vars_to_temporaries when using absolute view indices,
* which ensures index is constant */
assert(nir_src_is_const(compact_index_src));
unsigned compact_index = nir_src_as_uint(compact_index_src);
@ -1039,7 +1039,7 @@ nir_lower_io_passes(nir_shader *nir, bool renumber_vs_inputs)
nir->xfb_info == NULL;
/* TODO: Sorting variables by location is required due to some bug
* in nir_lower_io_to_temporaries. If variables are not sorted,
* in nir_lower_io_vars_to_temporaries. If variables are not sorted,
* dEQP-GLES31.functional.separate_shader.random.0 fails.
*
* This isn't needed if nir_assign_io_var_locations is called because it
@ -1052,7 +1052,7 @@ nir_lower_io_passes(nir_shader *nir, bool renumber_vs_inputs)
nir_sort_variables_by_location(nir, varying_var_mask);
if (!has_indirect_inputs || !has_indirect_outputs) {
NIR_PASS(_, nir, nir_lower_io_to_temporaries,
NIR_PASS(_, nir, nir_lower_io_vars_to_temporaries,
nir_shader_get_entrypoint(nir), !has_indirect_outputs,
!has_indirect_inputs);
@ -1063,8 +1063,8 @@ nir_lower_io_passes(nir_shader *nir, bool renumber_vs_inputs)
NIR_PASS(_, nir, nir_lower_var_copies);
NIR_PASS(_, nir, nir_lower_global_vars_to_local);
/* This is partially redundant with nir_lower_io_to_temporaries.
* The problem is that nir_lower_io_to_temporaries doesn't handle TCS.
/* This is partially redundant with nir_lower_io_vars_to_temporaries.
* The problem is that nir_lower_io_vars_to_temporaries doesn't handle TCS.
*/
if (nir->info.stage == MESA_SHADER_TESS_CTRL) {
NIR_PASS(_, nir, nir_lower_indirect_derefs,

View file

@ -331,8 +331,8 @@ move_variables_to_list(nir_shader *shader, nir_variable_mode mode,
}
bool
nir_lower_io_to_temporaries(nir_shader *shader, nir_function_impl *entrypoint,
bool outputs, bool inputs)
nir_lower_io_vars_to_temporaries(nir_shader *shader, nir_function_impl *entrypoint,
bool outputs, bool inputs)
{
struct lower_io_state state;

View file

@ -11,7 +11,7 @@
* we know that it will never cause clipping/culling.
* Remove the sysval_output in that case.
*
* Assumes that nir_lower_io_to_temporaries was run,
* Assumes that nir_lower_io_vars_to_temporaries was run,
* and works best with scalar store_outputs.
*/

View file

@ -30,7 +30,7 @@
*
* Merges compatible input/output variables residing in different components
* of the same location. It's expected that further passes such as
* nir_lower_io_to_temporaries will combine loads and stores of the merged
* nir_lower_io_vars_to_temporaries will combine loads and stores of the merged
* variables, producing vector nir_load_input/nir_store_output instructions
* when all is said and done.
*/
@ -436,7 +436,7 @@ nir_opt_vectorize_io_vars_impl(nir_function_impl *impl, nir_variable_mode modes)
/* Actually lower all the IO load/store intrinsics. Load instructions are
* lowered to a vector load and an ALU instruction to grab the channels we
* want. Outputs are lowered to a write-masked store of the vector output.
* For non-TCS outputs, we then run nir_lower_io_to_temporaries at the end
* For non-TCS outputs, we then run nir_lower_io_vars_to_temporaries at the end
* to clean up the partial writes.
*/
nir_foreach_block(block, impl) {
@ -567,7 +567,7 @@ nir_opt_vectorize_io_vars_impl(nir_function_impl *impl, nir_variable_mode modes)
}
/* Demote the old var to a global, so that things like
* nir_lower_io_to_temporaries() don't trigger on it.
* nir_lower_io_vars_to_temporaries() don't trigger on it.
*/
util_dynarray_foreach(&demote_vars, nir_variable *, varp) {
(*varp)->data.mode = nir_var_shader_temp;

View file

@ -580,7 +580,7 @@ validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state)
* good to validate here that no new copy derefs were added. Right now
* we can't as there are some specific cases where copies are added even
* after the lowering. One example is the Intel compiler, that calls
* nir_lower_io_to_temporaries when linking some shader stages.
* nir_lower_io_vars_to_temporaries when linking some shader stages.
*/
break;
}

View file

@ -465,7 +465,7 @@ ir3_nir_lower_ssbo_size(nir_shader *s, uint8_t ssbo_size_to_bytes_shift)
}
void
ir3_nir_lower_io_to_temporaries(nir_shader *s)
ir3_nir_lower_io_vars_to_temporaries(nir_shader *s)
{
/* Outputs consumed by the VPC, VS inputs, and FS outputs are all handled
* by the hardware pre-loading registers at the beginning and then reading
@ -474,7 +474,7 @@ ir3_nir_lower_io_to_temporaries(nir_shader *s)
* indirect accesses on those. Other i/o is lowered in ir3_nir_lower_tess,
* and indirects work just fine for those. GS outputs may be consumed by
* VPC, but have their own lowering in ir3_nir_lower_gs() which does
* something similar to nir_lower_io_to_temporaries so we shouldn't need
* something similar to nir_lower_io_vars_to_temporaries so we shouldn't need
* to lower them.
*
* Note: this might be a little inefficient for VS or TES outputs which are
@ -492,10 +492,10 @@ ir3_nir_lower_io_to_temporaries(nir_shader *s)
bool lower_output = s->info.stage != MESA_SHADER_TESS_CTRL &&
s->info.stage != MESA_SHADER_GEOMETRY;
if (lower_input || lower_output) {
NIR_PASS(_, s, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(s),
NIR_PASS(_, s, nir_lower_io_vars_to_temporaries, nir_shader_get_entrypoint(s),
lower_output, lower_input);
/* nir_lower_io_to_temporaries() creates global variables and copy
/* nir_lower_io_vars_to_temporaries() creates global variables and copy
* instructions which need to be cleaned up.
*/
NIR_PASS(_, s, nir_split_var_copies);
@ -511,7 +511,7 @@ ir3_nir_lower_io_to_temporaries(nir_shader *s)
* in 0 modes.
*
* Using temporaries would be slightly better but
* nir_lower_io_to_temporaries currently doesn't support TCS i/o.
* nir_lower_io_vars_to_temporaries currently doesn't support TCS i/o.
*/
NIR_PASS(_, s, nir_lower_indirect_derefs, 0, UINT32_MAX);
}

View file

@ -66,7 +66,7 @@ bool ir3_nir_opt_triops_bitwise(nir_shader *nir);
bool ir3_optimize_loop(struct ir3_compiler *compiler,
const struct ir3_shader_nir_options *options,
nir_shader *s);
void ir3_nir_lower_io_to_temporaries(nir_shader *s);
void ir3_nir_lower_io_vars_to_temporaries(nir_shader *s);
void ir3_finalize_nir(struct ir3_compiler *compiler,
const struct ir3_shader_nir_options *options,
nir_shader *s);

View file

@ -254,7 +254,7 @@ lower_block_to_explicit_output(nir_block *block, nir_builder *b,
case nir_intrinsic_store_output: {
// src[] = { value, offset }.
/* nir_lower_io_to_temporaries replaces all access to output
/* nir_lower_io_vars_to_temporaries replaces all access to output
* variables with temp variables and then emits a nir_copy_var at
* the end of the shader. Thus, we should always get a full wrmask
* here.

View file

@ -2584,7 +2584,7 @@ tu_shader_create(struct tu_device *dev,
* store at the end instead of having to rewrite every store specified by
* the user.
*/
ir3_nir_lower_io_to_temporaries(nir);
ir3_nir_lower_io_vars_to_temporaries(nir);
if (nir->info.stage == MESA_SHADER_VERTEX && key->multiview_mask) {
tu_nir_lower_multiview(nir, key->multiview_mask, dev);

View file

@ -1172,7 +1172,7 @@ crocus_compile_vs(struct crocus_context *ice,
/* Check if variables were found. */
if (nir_lower_clip_vs(nir, (1 << key->nr_userclip_plane_consts) - 1,
true, false, NULL)) {
nir_lower_io_to_temporaries(nir, impl, true, false);
nir_lower_io_vars_to_temporaries(nir, impl, true, false);
nir_lower_global_vars_to_local(nir);
nir_lower_vars_to_ssa(nir);
nir_shader_gather_info(nir, impl);
@ -1532,7 +1532,7 @@ crocus_compile_tes(struct crocus_context *ice,
nir_function_impl *impl = nir_shader_get_entrypoint(nir);
nir_lower_clip_vs(nir, (1 << key->nr_userclip_plane_consts) - 1, true,
false, NULL);
nir_lower_io_to_temporaries(nir, impl, true, false);
nir_lower_io_vars_to_temporaries(nir, impl, true, false);
nir_lower_global_vars_to_local(nir);
nir_lower_vars_to_ssa(nir);
nir_shader_gather_info(nir, impl);
@ -1675,7 +1675,7 @@ crocus_compile_gs(struct crocus_context *ice,
nir_function_impl *impl = nir_shader_get_entrypoint(nir);
nir_lower_clip_gs(nir, (1 << key->nr_userclip_plane_consts) - 1, false,
NULL);
nir_lower_io_to_temporaries(nir, impl, true, false);
nir_lower_io_vars_to_temporaries(nir, impl, true, false);
nir_lower_global_vars_to_local(nir);
nir_lower_vars_to_ssa(nir);
nir_shader_gather_info(nir, impl);

View file

@ -127,11 +127,11 @@ load_glsl(unsigned num_files, char *const *files, gl_shader_stage stage)
if (nir_options->lower_all_io_to_temps ||
nir->info.stage == MESA_SHADER_VERTEX ||
nir->info.stage == MESA_SHADER_GEOMETRY) {
NIR_PASS_V(nir, nir_lower_io_to_temporaries,
NIR_PASS_V(nir, nir_lower_io_vars_to_temporaries,
nir_shader_get_entrypoint(nir), true, true);
} else if (nir->info.stage == MESA_SHADER_TESS_EVAL ||
nir->info.stage == MESA_SHADER_FRAGMENT) {
NIR_PASS_V(nir, nir_lower_io_to_temporaries,
NIR_PASS_V(nir, nir_lower_io_vars_to_temporaries,
nir_shader_get_entrypoint(nir), true, false);
}
@ -419,7 +419,7 @@ main(int argc, char **argv)
const struct ir3_shader_nir_options options = {};
ir3_nir_lower_io_to_temporaries(nir);
ir3_nir_lower_io_vars_to_temporaries(nir);
ir3_finalize_nir(compiler, &options, nir);
struct ir3_shader *shader = rzalloc_size(NULL, sizeof(*shader));

View file

@ -489,7 +489,7 @@ ir3_screen_finalize_nir(struct pipe_screen *pscreen, struct nir_shader *nir)
MESA_TRACE_FUNC();
ir3_nir_lower_io_to_temporaries(nir);
ir3_nir_lower_io_vars_to_temporaries(nir);
ir3_finalize_nir(screen->compiler, &options, nir);
return NULL;

View file

@ -1872,7 +1872,7 @@ iris_compile_vs(struct iris_screen *screen,
/* Check if variables were found. */
if (nir_lower_clip_vs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
true, false, NULL)) {
nir_lower_io_to_temporaries(nir, impl, true, false);
nir_lower_io_vars_to_temporaries(nir, impl, true, false);
nir_lower_global_vars_to_local(nir);
nir_lower_vars_to_ssa(nir);
nir_shader_gather_info(nir, impl);
@ -2311,7 +2311,7 @@ iris_compile_tes(struct iris_screen *screen,
nir_function_impl *impl = nir_shader_get_entrypoint(nir);
nir_lower_clip_vs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
true, false, NULL);
nir_lower_io_to_temporaries(nir, impl, true, false);
nir_lower_io_vars_to_temporaries(nir, impl, true, false);
nir_lower_global_vars_to_local(nir);
nir_lower_vars_to_ssa(nir);
nir_shader_gather_info(nir, impl);
@ -2500,7 +2500,7 @@ iris_compile_gs(struct iris_screen *screen,
nir_function_impl *impl = nir_shader_get_entrypoint(nir);
nir_lower_clip_gs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
false, NULL);
nir_lower_io_to_temporaries(nir, impl, true, false);
nir_lower_io_vars_to_temporaries(nir, impl, true, false);
nir_lower_global_vars_to_local(nir);
nir_lower_vars_to_ssa(nir);
nir_shader_gather_info(nir, impl);

View file

@ -3418,7 +3418,7 @@ Converter::run()
if (lowered) {
nir_function_impl *impl = nir_shader_get_entrypoint(nir);
NIR_PASS(_, nir, nir_lower_io_to_temporaries, impl, true, false);
NIR_PASS(_, nir, nir_lower_io_vars_to_temporaries, impl, true, false);
NIR_PASS(_, nir, nir_lower_global_vars_to_local);
NIR_PASS(_, nir, nv50_nir_lower_load_user_clip_plane, info);
} else {

View file

@ -381,7 +381,7 @@ lvp_shader_lower(struct lvp_device *pdevice, nir_shader *nir, struct lvp_pipelin
optimize(nir);
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
NIR_PASS_V(nir, nir_lower_io_vars_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
NIR_PASS_V(nir, nir_split_var_copies);
NIR_PASS_V(nir, nir_lower_global_vars_to_local);

View file

@ -1536,7 +1536,7 @@ brw_nir_link_shaders(const struct brw_compiler *compiler,
producer->info.stage != MESA_SHADER_TASK) {
/* Calling lower_io_to_vector creates output variable writes with
* write-masks. On non-TCS outputs, the back-end can't handle it and we
* need to call nir_lower_io_to_temporaries to get rid of them. This,
* need to call nir_lower_io_vars_to_temporaries to get rid of them. This,
* in turn, creates temporary variables and extra copy_deref intrinsics
* that we need to clean up.
*
@ -1544,7 +1544,7 @@ brw_nir_link_shaders(const struct brw_compiler *compiler,
* between whole workgroup, possibly using multiple HW threads). For
* those write-mask in output is handled by I/O lowering.
*/
NIR_PASS_V(producer, nir_lower_io_to_temporaries,
NIR_PASS_V(producer, nir_lower_io_vars_to_temporaries,
nir_shader_get_entrypoint(producer), true, false);
NIR_PASS(_, producer, nir_lower_global_vars_to_local);
NIR_PASS(_, producer, nir_split_var_copies);

View file

@ -102,7 +102,7 @@ brw_nir_lower_alpha_to_coverage(nir_shader *shader)
if (intrin->intrinsic != nir_intrinsic_store_output)
continue;
/* We call nir_lower_io_to_temporaries to lower FS outputs to
/* We call nir_lower_io_vars_to_temporaries to lower FS outputs to
* temporaries with a copy at the end so this should be the last
* block in the shader.
*/

View file

@ -1204,11 +1204,11 @@ elk_nir_link_shaders(const struct elk_compiler *compiler,
if (producer->info.stage != MESA_SHADER_TESS_CTRL) {
/* Calling lower_io_to_vector creates output variable writes with
* write-masks. On non-TCS outputs, the back-end can't handle it and we
* need to call nir_lower_io_to_temporaries to get rid of them. This,
* need to call nir_lower_io_vars_to_temporaries to get rid of them. This,
* in turn, creates temporary variables and extra copy_deref intrinsics
* that we need to clean up.
*/
NIR_PASS_V(producer, nir_lower_io_to_temporaries,
NIR_PASS_V(producer, nir_lower_io_vars_to_temporaries,
nir_shader_get_entrypoint(producer), true, false);
NIR_PASS(_, producer, nir_lower_global_vars_to_local);
NIR_PASS(_, producer, nir_split_var_copies);

View file

@ -107,7 +107,7 @@ elk_nir_lower_alpha_to_coverage(nir_shader *shader,
if (intrin->intrinsic != nir_intrinsic_store_output)
continue;
/* We call nir_lower_io_to_temporaries to lower FS outputs to
/* We call nir_lower_io_vars_to_temporaries to lower FS outputs to
* temporaries with a copy at the end so this should be the last
* block in the shader.
*/

View file

@ -98,7 +98,7 @@ anv_shader_stage_to_nir(struct anv_device *device,
}
}
NIR_PASS_V(nir, nir_lower_io_to_temporaries,
NIR_PASS_V(nir, nir_lower_io_vars_to_temporaries,
nir_shader_get_entrypoint(nir), true, false);
return nir;

View file

@ -89,7 +89,7 @@ anv_shader_stage_to_nir(struct anv_device *device,
nir_print_shader(nir, stderr);
}
NIR_PASS_V(nir, nir_lower_io_to_temporaries,
NIR_PASS_V(nir, nir_lower_io_vars_to_temporaries,
nir_shader_get_entrypoint(nir), true, false);
const struct nir_lower_sysvals_to_varyings_options sysvals_to_varyings = {

View file

@ -1021,7 +1021,7 @@ dxil_spirv_nir_passes(nir_shader *nir,
NIR_PASS_V(nir, dxil_nir_lower_int_cubemaps, false);
NIR_PASS_V(nir, nir_lower_clip_cull_distance_array_vars);
NIR_PASS_V(nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
NIR_PASS_V(nir, nir_lower_io_vars_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
NIR_PASS_V(nir, nir_lower_global_vars_to_local);
NIR_PASS_V(nir, nir_split_var_copies);
NIR_PASS_V(nir, nir_lower_var_copies);

View file

@ -299,7 +299,7 @@ nak_preprocess_nir(nir_shader *nir, const struct nak_compiler *nak)
nir_validate_ssa_dominance(nir, "before nak_preprocess_nir");
OPT(nir, nir_lower_io_to_temporaries,
OPT(nir, nir_lower_io_vars_to_temporaries,
nir_shader_get_entrypoint(nir),
true /* outputs */, false /* inputs */);

View file

@ -1278,7 +1278,7 @@ bi_emit_store_vary(bi_builder *b, nir_intrinsic_instr *instr)
/* Only look at the total components needed. In effect, we fill in all
* the intermediate "holes" in the write mask, since we can't mask off
* stores. Since nir_lower_io_to_temporaries ensures each varying is
* stores. Since nir_lower_io_vars_to_temporaries ensures each varying is
* written at most once, anything that's masked out is undefined, so it
* doesn't matter what we write there. So we may as well do the
* simplest thing possible. */
@ -5083,7 +5083,7 @@ glsl_type_size(const struct glsl_type *type, bool bindless)
}
/* Split stores to memory. We don't split stores to vertex outputs, since
* nir_lower_io_to_temporaries will ensure there's only a single write.
* nir_lower_io_vars_to_temporaries will ensure there's only a single write.
*/
static bool

View file

@ -19,7 +19,7 @@
* known at compile-time (for example, with monolithic pipelines in vulkan),
* this may be lowered to a constant.
*
* This pass is expected to run after nir_lower_io_to_temporaries and
* This pass is expected to run after nir_lower_io_vars_to_temporaries and
* nir_lower_io, so each IO location must have at most one read or write.
* These properties are preserved.
*
@ -31,7 +31,7 @@
static nir_intrinsic_instr *
find_pos_store(nir_function_impl *impl)
{
/* nir_lower_io_to_temporaries ensures all stores are in the exit block */
/* nir_lower_io_vars_to_temporaries ensures all stores are in the exit block */
nir_block *block = nir_impl_last_block(impl);
nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
@ -64,7 +64,7 @@ is_noperspective_load(nir_intrinsic_instr* intrin)
static bool
has_noperspective_load(nir_function_impl *impl)
{
/* nir_lower_io_to_temporaries ersures all loads are in the first block */
/* nir_lower_io_vars_to_temporaries ersures all loads are in the first block */
nir_block *block = nir_start_block(impl);
nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
@ -93,7 +93,7 @@ get_maybe_noperspective_outputs(nir_function_impl *impl)
uint32_t used_outputs = 0;
uint32_t integer_outputs = 0;
/* nir_lower_io_to_temporaries ensures all stores are in the exit block */
/* nir_lower_io_vars_to_temporaries ensures all stores are in the exit block */
nir_block *block = nir_impl_last_block(impl);
nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)

View file

@ -34,7 +34,7 @@
* Midgard and Bifrost is slot-based, writing out an entire vec4 slot at a time.
*
* NOTE: this expects all stores to be outside of control flow, and with
* constant offsets. It should be run after nir_lower_io_to_temporaries.
* constant offsets. It should be run after nir_lower_io_vars_to_temporaries.
*/
static bool
lower_store_component(nir_builder *b, nir_intrinsic_instr *intr, void *data)

View file

@ -393,7 +393,7 @@ panvk_preprocess_nir(UNUSED struct vk_physical_device *vk_pdev,
if (nir->info.stage == MESA_SHADER_FRAGMENT)
NIR_PASS(_, nir, nir_opt_vectorize_io_vars, nir_var_shader_out);
NIR_PASS(_, nir, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(nir),
NIR_PASS(_, nir, nir_lower_io_vars_to_temporaries, nir_shader_get_entrypoint(nir),
true, true);
#if PAN_ARCH < 9
@ -782,7 +782,7 @@ panvk_lower_nir(struct panvk_device *dev, nir_shader *nir,
NIR_PASS(_, nir, nir_lower_multiview, options);
/* Pull output writes out of the loop and give them constant offsets for
* pan_lower_store_components */
NIR_PASS(_, nir, nir_lower_io_to_temporaries,
NIR_PASS(_, nir, nir_lower_io_vars_to_temporaries,
nir_shader_get_entrypoint(nir), true, false);
}
#endif