diff --git a/src/amd/common/nir/ac_nir_lower_tess_io_to_mem.c b/src/amd/common/nir/ac_nir_lower_tess_io_to_mem.c index d8a32efaac5..17360ccf644 100644 --- a/src/amd/common/nir/ac_nir_lower_tess_io_to_mem.c +++ b/src/amd/common/nir/ac_nir_lower_tess_io_to_mem.c @@ -1600,7 +1600,8 @@ ac_nir_lower_hs_outputs_to_mem(nir_shader *shader, const nir_tcs_info *info, { assert(shader->info.stage == MESA_SHADER_TESS_CTRL); - NIR_PASS(_, shader, nir_io_add_const_offset_to_base, nir_var_shader_out); + /* Fold constant offset srcs for IO. */ + NIR_PASS(_, shader, nir_opt_constant_folding); lower_tess_io_state state = { .gfx_level = gfx_level, diff --git a/src/amd/vulkan/nir/radv_nir_lower_io.c b/src/amd/vulkan/nir/radv_nir_lower_io.c index da598acfca3..665c73b08df 100644 --- a/src/amd/vulkan/nir/radv_nir_lower_io.c +++ b/src/amd/vulkan/nir/radv_nir_lower_io.c @@ -157,11 +157,9 @@ radv_nir_lower_io(struct radv_device *device, nir_shader *nir) nir_lower_io_lower_64bit_to_32 | nir_lower_io_use_interpolated_input_intrinsics); } - /* This pass needs actual constants */ + /* Fold constant offset srcs for IO. */ NIR_PASS(_, nir, nir_opt_constant_folding); - NIR_PASS(_, nir, nir_io_add_const_offset_to_base, nir_var_shader_in | nir_var_shader_out); - if (nir->xfb_info) { NIR_PASS(_, nir, nir_io_add_intrinsic_xfb_info); diff --git a/src/asahi/vulkan/hk_shader.c b/src/asahi/vulkan/hk_shader.c index ad939fdfb57..8cec01f403f 100644 --- a/src/asahi/vulkan/hk_shader.c +++ b/src/asahi/vulkan/hk_shader.c @@ -1405,9 +1405,8 @@ hk_compile_shader(struct hk_device *dev, struct vk_shader_compile_info *info, NIR_PASS(_, nir, nir_recompute_io_bases, nir_var_shader_in); } - /* the shader_out portion of this is load-bearing even for tess eval */ - NIR_PASS(_, nir, nir_io_add_const_offset_to_base, - nir_var_shader_in | nir_var_shader_out); + /* Fold constant offset srcs for IO. */ + NIR_PASS(_, nir, nir_opt_constant_folding); for (enum hk_vs_variant v = 0; v < HK_VS_VARIANTS; ++v) { /* Only compile the software variant if we might use this shader with @@ -1523,8 +1522,6 @@ nir_opts(nir_shader *nir) NIR_PASS(progress, nir, nir_opt_phi_precision); NIR_PASS(progress, nir, nir_opt_algebraic); NIR_PASS(progress, nir, nir_opt_constant_folding); - NIR_PASS(progress, nir, nir_io_add_const_offset_to_base, - nir_var_shader_in | nir_var_shader_out); NIR_PASS(progress, nir, nir_opt_undef); NIR_PASS(progress, nir, nir_opt_loop_unroll); @@ -1556,8 +1553,8 @@ hk_compile_shaders(struct vk_device *vk_dev, uint32_t shader_count, info->set_layout_count, info->set_layouts, hk_features); if (nir->xfb_info) { - nir_io_add_const_offset_to_base( - nir, nir_var_shader_in | nir_var_shader_out); + /* Fold constant offset srcs for IO. */ + NIR_PASS(_, nir, nir_opt_constant_folding); nir_io_add_intrinsic_xfb_info(nir); } diff --git a/src/compiler/glsl/gl_nir_linker.c b/src/compiler/glsl/gl_nir_linker.c index 891d707e9f5..d5ed4b10e44 100644 --- a/src/compiler/glsl/gl_nir_linker.c +++ b/src/compiler/glsl/gl_nir_linker.c @@ -101,8 +101,6 @@ gl_nir_opts(nir_shader *nir) NIR_PASS(progress, nir, nir_opt_phi_precision); NIR_PASS(progress, nir, nir_opt_algebraic); NIR_PASS(progress, nir, nir_opt_constant_folding); - NIR_PASS(progress, nir, nir_io_add_const_offset_to_base, - nir_var_shader_in | nir_var_shader_out); if (!nir->info.flrp_lowered) { unsigned lower_flrp = diff --git a/src/compiler/nir/meson.build b/src/compiler/nir/meson.build index 7fd3f552bfa..49c28177fe7 100644 --- a/src/compiler/nir/meson.build +++ b/src/compiler/nir/meson.build @@ -131,7 +131,6 @@ else 'nir_inline_uniforms.c', 'nir_instr_set.c', 'nir_instr_set.h', - 'nir_io_add_const_offset_to_base.c', 'nir_io_add_xfb_info.c', 'nir_legacy.c', 'nir_legacy.h', diff --git a/src/compiler/nir/nir.h b/src/compiler/nir/nir.h index c9b4abb97a8..aa877000fde 100644 --- a/src/compiler/nir/nir.h +++ b/src/compiler/nir/nir.h @@ -5299,7 +5299,6 @@ bool nir_lower_io(nir_shader *shader, int (*type_size)(const struct glsl_type *, bool), nir_lower_io_options); -bool nir_io_add_const_offset_to_base(nir_shader *nir, nir_variable_mode modes); void nir_lower_io_passes(nir_shader *nir, bool renumber_vs_inputs); bool nir_io_add_intrinsic_xfb_info(nir_shader *nir); bool nir_lower_io_indirect_loads(nir_shader *nir, nir_variable_mode modes); diff --git a/src/compiler/nir/nir_io_add_const_offset_to_base.c b/src/compiler/nir/nir_io_add_const_offset_to_base.c deleted file mode 100644 index e72f1798068..00000000000 --- a/src/compiler/nir/nir_io_add_const_offset_to_base.c +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright © 2014 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -/** - * This pass adds constant offsets to instr->const_index[0] for input/output - * intrinsics, and resets the offset source to 0. Non-constant offsets remain - * unchanged - since we don't know what part of a compound variable is - * accessed, we allocate storage for the entire thing. For drivers that use - * nir_lower_io_vars_to_temporaries() before nir_lower_io(), this guarantees that - * the offset source will be 0, so that they don't have to add it in manually. - */ - -#include "nir.h" -#include "nir_builder.h" - -static bool -is_input(nir_intrinsic_instr *intrin) -{ - return intrin->intrinsic == nir_intrinsic_load_input || - intrin->intrinsic == nir_intrinsic_load_per_primitive_input || - intrin->intrinsic == nir_intrinsic_load_input_vertex || - intrin->intrinsic == nir_intrinsic_load_per_vertex_input || - intrin->intrinsic == nir_intrinsic_load_interpolated_input || - intrin->intrinsic == nir_intrinsic_load_fs_input_interp_deltas; -} - -static bool -is_output(nir_intrinsic_instr *intrin) -{ - return intrin->intrinsic == nir_intrinsic_load_output || - intrin->intrinsic == nir_intrinsic_load_per_vertex_output || - intrin->intrinsic == nir_intrinsic_load_per_view_output || - intrin->intrinsic == nir_intrinsic_load_per_primitive_output || - intrin->intrinsic == nir_intrinsic_store_output || - intrin->intrinsic == nir_intrinsic_store_per_vertex_output || - intrin->intrinsic == nir_intrinsic_store_per_view_output || - intrin->intrinsic == nir_intrinsic_store_per_primitive_output; -} - -static bool -is_dual_slot(nir_intrinsic_instr *intrin) -{ - if (intrin->intrinsic == nir_intrinsic_store_output || - intrin->intrinsic == nir_intrinsic_store_per_vertex_output || - intrin->intrinsic == nir_intrinsic_store_per_view_output || - intrin->intrinsic == nir_intrinsic_store_per_primitive_output) { - return nir_src_bit_size(intrin->src[0]) == 64 && - nir_src_num_components(intrin->src[0]) >= 3; - } - - return intrin->def.bit_size == 64 && - intrin->def.num_components >= 3; -} - -static bool -add_const_offset_to_base_block(nir_block *block, nir_builder *b, - nir_variable_mode modes) -{ - bool progress = false; - nir_foreach_instr_safe(instr, block) { - if (instr->type != nir_instr_type_intrinsic) - continue; - - nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); - - if (((modes & nir_var_shader_in) && is_input(intrin)) || - ((modes & nir_var_shader_out) && is_output(intrin))) { - nir_io_semantics sem = nir_intrinsic_io_semantics(intrin); - - /* NV_mesh_shader: ignore MS primitive indices. */ - if (b->shader->info.stage == MESA_SHADER_MESH && - sem.location == VARYING_SLOT_PRIMITIVE_INDICES && - !(b->shader->info.per_primitive_outputs & - VARYING_BIT_PRIMITIVE_INDICES)) - continue; - - nir_src *offset = nir_get_io_offset_src(intrin); - - /* TODO: Better handling of per-view variables here */ - if (nir_src_is_const(*offset) && - !nir_intrinsic_io_semantics(intrin).per_view) { - unsigned off = nir_src_as_uint(*offset); - - if (off) { - nir_intrinsic_set_base(intrin, nir_intrinsic_base(intrin) + off); - - sem.location += off; - b->cursor = nir_before_instr(&intrin->instr); - nir_src_rewrite(offset, nir_imm_int(b, 0)); - } - /* non-indirect indexing should reduce num_slots */ - sem.num_slots = is_dual_slot(intrin) ? 2 : 1; - - nir_io_semantics original = nir_intrinsic_io_semantics(intrin); - progress |= memcmp(&original, &sem, sizeof(sem)); - nir_intrinsic_set_io_semantics(intrin, sem); - } - } - } - - return progress; -} - -bool -nir_io_add_const_offset_to_base(nir_shader *nir, nir_variable_mode modes) -{ - bool progress = false; - - nir_foreach_function_impl(impl, nir) { - bool impl_progress = false; - nir_builder b = nir_builder_create(impl); - nir_foreach_block(block, impl) { - impl_progress |= add_const_offset_to_base_block(block, &b, modes); - } - progress |= impl_progress; - nir_progress(impl_progress, impl, nir_metadata_control_flow); - } - - return progress; -} diff --git a/src/compiler/nir/nir_lower_io.c b/src/compiler/nir/nir_lower_io.c index 343323413d2..87044cd3db0 100644 --- a/src/compiler/nir/nir_lower_io.c +++ b/src/compiler/nir/nir_lower_io.c @@ -1261,11 +1261,10 @@ nir_lower_io_passes(nir_shader *nir, bool renumber_vs_inputs) (renumber_vs_inputs ? nir_lower_io_lower_64bit_to_32_new : nir_lower_io_lower_64bit_to_32) | nir_lower_io_use_interpolated_input_intrinsics); - /* nir_io_add_const_offset_to_base needs actual constants. */ + /* Fold constant offset srcs for IO. */ NIR_PASS(_, nir, nir_opt_constant_folding); - NIR_PASS(_, nir, nir_io_add_const_offset_to_base, nir_var_shader_in | nir_var_shader_out); - /* This must be called after nir_io_add_const_offset_to_base. */ + /* This must be called after folding constant offset srcs. */ if (nir->info.stage != MESA_SHADER_MESH && !(nir->options->support_indirect_inputs & BITFIELD_BIT(nir->info.stage))) NIR_PASS(_, nir, nir_lower_io_indirect_loads, nir_var_shader_in); diff --git a/src/compiler/nir/nir_opt_varyings.c b/src/compiler/nir/nir_opt_varyings.c index b7fd27a315d..55797507488 100644 --- a/src/compiler/nir/nir_opt_varyings.c +++ b/src/compiler/nir/nir_opt_varyings.c @@ -1273,7 +1273,7 @@ gather_inputs(struct nir_builder *builder, nir_intrinsic_instr *intr, void *cb_d /* nir_lower_io_to_scalar is required before this */ assert(intr->def.num_components == 1); /* Non-zero constant offsets should have been folded by - * nir_io_add_const_offset_to_base. + * nir_opt_constant_folding. */ nir_src offset = *nir_get_io_offset_src(intr); assert(!nir_src_is_const(offset) || nir_src_as_uint(offset) == 0); @@ -1515,7 +1515,7 @@ gather_outputs(struct nir_builder *builder, nir_intrinsic_instr *intr, void *cb_ } /* Non-zero constant offsets should have been folded by - * nir_io_add_const_offset_to_base. + * nir_opt_constant_folding. */ nir_src offset = *nir_get_io_offset_src(intr); assert(!nir_src_is_const(offset) || nir_src_as_uint(offset) == 0); diff --git a/src/gallium/drivers/panfrost/pan_shader.c b/src/gallium/drivers/panfrost/pan_shader.c index b3fbf8511b4..59732bc2dc7 100644 --- a/src/gallium/drivers/panfrost/pan_shader.c +++ b/src/gallium/drivers/panfrost/pan_shader.c @@ -159,8 +159,7 @@ panfrost_shader_compile(struct panfrost_screen *screen, const nir_shader *ir, pan_get_fixed_varying_mask(s->info.outputs_written); if (s->info.has_transform_feedback_varyings) { - NIR_PASS(_, s, nir_io_add_const_offset_to_base, - nir_var_shader_in | nir_var_shader_out); + NIR_PASS(_, s, nir_opt_constant_folding); NIR_PASS(_, s, nir_io_add_intrinsic_xfb_info); NIR_PASS(_, s, pan_lower_xfb); } diff --git a/src/gallium/drivers/r600/sfn/sfn_nir.cpp b/src/gallium/drivers/r600/sfn/sfn_nir.cpp index 4cac384653e..a5fc37bf548 100644 --- a/src/gallium/drivers/r600/sfn/sfn_nir.cpp +++ b/src/gallium/drivers/r600/sfn/sfn_nir.cpp @@ -795,8 +795,8 @@ r600_lower_and_optimize_nir(nir_shader *sh, NIR_PASS(_, sh, nir_lower_indirect_derefs_to_if_else_trees, nir_var_function_temp, 10); + /* Fold constant offset srcs for IO. */ NIR_PASS(_, sh, nir_opt_constant_folding); - NIR_PASS(_, sh, nir_io_add_const_offset_to_base, io_modes); NIR_PASS(_, sh, nir_lower_alu_to_scalar, r600_lower_to_scalar_instr_filter, NULL); NIR_PASS(_, sh, nir_lower_phis_to_scalar, NULL, NULL); diff --git a/src/gallium/drivers/radeonsi/si_shader_nir.c b/src/gallium/drivers/radeonsi/si_shader_nir.c index ec7304f8de2..1b68e4b2828 100644 --- a/src/gallium/drivers/radeonsi/si_shader_nir.c +++ b/src/gallium/drivers/radeonsi/si_shader_nir.c @@ -136,14 +136,6 @@ void si_nir_late_opts(nir_shader *nir) more_late_algebraic = false; NIR_PASS(more_late_algebraic, nir, nir_opt_algebraic_late); NIR_PASS(_, nir, nir_opt_constant_folding); - - /* We should run this after constant folding for stages that support indirect - * inputs/outputs. - */ - if (nir->options->support_indirect_inputs & BITFIELD_BIT(nir->info.stage) || - nir->options->support_indirect_outputs & BITFIELD_BIT(nir->info.stage)) - NIR_PASS(_, nir, nir_io_add_const_offset_to_base, nir_var_shader_in | nir_var_shader_out); - NIR_PASS(_, nir, nir_opt_copy_prop); NIR_PASS(_, nir, nir_opt_dce); NIR_PASS(_, nir, nir_opt_cse); diff --git a/src/gallium/drivers/zink/zink_compiler.c b/src/gallium/drivers/zink/zink_compiler.c index b322e6f3f16..9882ec7aaaf 100644 --- a/src/gallium/drivers/zink/zink_compiler.c +++ b/src/gallium/drivers/zink/zink_compiler.c @@ -4067,10 +4067,6 @@ zink_shader_compile(struct zink_screen *screen, bool can_shobj, struct zink_shad if (inlined_uniforms) { optimize_nir(nir, zs, true); - /* This must be done again. */ - NIR_PASS(_, nir, nir_io_add_const_offset_to_base, nir_var_shader_in | - nir_var_shader_out); - nir_function_impl *impl = nir_shader_get_entrypoint(nir); if (impl->ssa_alloc > ZINK_ALWAYS_INLINE_LIMIT) zs->can_inline = false; diff --git a/src/imagination/pco/pco_nir.c b/src/imagination/pco/pco_nir.c index 0f7a4abcab1..3b9ce60196b 100644 --- a/src/imagination/pco/pco_nir.c +++ b/src/imagination/pco/pco_nir.c @@ -633,10 +633,10 @@ void pco_preprocess_nir(pco_ctx *ctx, nir_shader *nir) nir_var_function_temp | nir_var_shader_temp, NULL); + /* Fold constant offset srcs for IO. */ NIR_PASS(_, nir, - nir_io_add_const_offset_to_base, - nir_var_shader_in | nir_var_shader_out); + nir_opt_constant_folding); NIR_PASS(_, nir, @@ -888,10 +888,6 @@ void pco_lower_nir(pco_ctx *ctx, nir_shader *nir, pco_data *data) NIR_PASS(_, nir, nir_opt_dce); NIR_PASS(_, nir, nir_opt_constant_folding); - NIR_PASS(_, - nir, - nir_io_add_const_offset_to_base, - nir_var_shader_in | nir_var_shader_out); /* Internal shaders will be using invalid32 types at this stage. */ if (!nir->info.internal) diff --git a/src/intel/compiler/brw/brw_nir.c b/src/intel/compiler/brw/brw_nir.c index 492103f44f4..ac977e8b999 100644 --- a/src/intel/compiler/brw/brw_nir.c +++ b/src/intel/compiler/brw/brw_nir.c @@ -643,11 +643,9 @@ brw_nir_lower_vs_inputs(nir_shader *nir) NIR_PASS(_, nir, nir_lower_io, nir_var_shader_in, type_size_vec4, nir_lower_io_lower_64bit_to_32_new); - /* This pass needs actual constants */ + /* Fold constant offset srcs for IO. */ NIR_PASS(_, nir, nir_opt_constant_folding); - NIR_PASS(_, nir, nir_io_add_const_offset_to_base, nir_var_shader_in); - /* Update shader_info::dual_slot_inputs */ nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir)); @@ -784,11 +782,9 @@ brw_nir_lower_vue_inputs(nir_shader *nir, NIR_PASS(_, nir, nir_lower_io, nir_var_shader_in, type_size_vec4, nir_lower_io_lower_64bit_to_32); - /* This pass needs actual constants */ + /* Fold constant offset srcs for IO. */ NIR_PASS(_, nir, nir_opt_constant_folding); - NIR_PASS(_, nir, nir_io_add_const_offset_to_base, nir_var_shader_in); - nir_foreach_function_impl(impl, nir) { nir_foreach_block(block, impl) { nir_foreach_instr(instr, block) { @@ -837,11 +833,10 @@ brw_nir_lower_tes_inputs(nir_shader *nir, NIR_PASS(_, nir, nir_lower_io, nir_var_shader_in, type_size_vec4, nir_lower_io_lower_64bit_to_32); - /* Run add_const_offset_to_base to allow update base/io_semantic::location + /* Run nir_opt_constant_folding to allow update base/io_semantic::location * for the remapping pass to look into the VUE mapping. */ NIR_PASS(_, nir, nir_opt_constant_folding); - NIR_PASS(_, nir, nir_io_add_const_offset_to_base, nir_var_shader_in); NIR_PASS(_, nir, remap_tess_levels, devinfo, nir->info.tess._primitive_mode); @@ -853,8 +848,6 @@ brw_nir_lower_tes_inputs(nir_shader *nir, NIR_PASS(_, nir, nir_opt_algebraic); NIR_PASS(_, nir, nir_opt_constant_folding); - NIR_PASS(_, nir, nir_io_add_const_offset_to_base, nir_var_shader_in); - NIR_PASS(_, nir, lower_inputs_to_urb_intrinsics, devinfo); } @@ -1093,10 +1086,8 @@ brw_nir_lower_fs_inputs(nir_shader *nir, indirect_primitive_id); } - /* This pass needs actual constants */ + /* Fold constant offset srcs for IO. */ NIR_PASS(_, nir, nir_opt_constant_folding); - - NIR_PASS(_, nir, nir_io_add_const_offset_to_base, nir_var_shader_in); } void @@ -1137,11 +1128,10 @@ brw_nir_lower_tcs_outputs(nir_shader *nir, NIR_PASS(_, nir, nir_lower_io, nir_var_shader_out, type_size_vec4, nir_lower_io_lower_64bit_to_32); - /* Run add_const_offset_to_base to allow update base/io_semantic::location + /* Run nir_opt_constant_folding to allow update base/io_semantic::location * for the remapping pass to look into the VUE mapping. */ NIR_PASS(_, nir, nir_opt_constant_folding); - NIR_PASS(_, nir, nir_io_add_const_offset_to_base, nir_var_shader_out); NIR_PASS(_, nir, remap_tess_levels, devinfo, tes_primitive_mode); NIR_PASS(_, nir, remap_patch_urb_offsets, vue_map); @@ -1150,7 +1140,6 @@ brw_nir_lower_tcs_outputs(nir_shader *nir, * just fold it for the backend. */ NIR_PASS(_, nir, nir_opt_constant_folding); - NIR_PASS(_, nir, nir_io_add_const_offset_to_base, nir_var_shader_out); NIR_PASS(_, nir, lower_outputs_to_urb_intrinsics, devinfo); } diff --git a/src/intel/compiler/elk/elk_fs_nir.cpp b/src/intel/compiler/elk/elk_fs_nir.cpp index 8fb9121db1c..01bb43fbc33 100644 --- a/src/intel/compiler/elk/elk_fs_nir.cpp +++ b/src/intel/compiler/elk/elk_fs_nir.cpp @@ -2502,8 +2502,8 @@ get_indirect_offset(nir_to_elk_state &ntb, nir_intrinsic_instr *instr) nir_src *offset_src = nir_get_io_offset_src(instr); if (nir_src_is_const(*offset_src)) { - /* The only constant offset we should find is 0. elk_nir.c's - * add_const_offset_to_base() will fold other constant offsets + /* The only constant offset we should find is 0. + * nir_opt_constant_folding will fold other constant offsets * into the "base" index. */ assert(nir_src_as_uint(*offset_src) == 0); diff --git a/src/intel/compiler/elk/elk_nir.c b/src/intel/compiler/elk/elk_nir.c index 35b3f954440..d0d8b33b2d8 100644 --- a/src/intel/compiler/elk/elk_nir.c +++ b/src/intel/compiler/elk/elk_nir.c @@ -257,11 +257,9 @@ elk_nir_lower_vs_inputs(nir_shader *nir, nir_lower_io(nir, nir_var_shader_in, elk_type_size_vec4, nir_lower_io_lower_64bit_to_32); - /* This pass needs actual constants */ + /* Fold constant offset srcs for IO. */ nir_opt_constant_folding(nir); - nir_io_add_const_offset_to_base(nir, nir_var_shader_in); - elk_nir_apply_attribute_workarounds(nir, vs_attrib_wa_flags); /* The last step is to remap VERT_ATTRIB_* to actual registers */ @@ -381,11 +379,9 @@ elk_nir_lower_vue_inputs(nir_shader *nir, nir_lower_io(nir, nir_var_shader_in, elk_type_size_vec4, nir_lower_io_lower_64bit_to_32); - /* This pass needs actual constants */ + /* Fold constant offset srcs for IO. */ nir_opt_constant_folding(nir); - nir_io_add_const_offset_to_base(nir, nir_var_shader_in); - nir_foreach_function_impl(impl, nir) { nir_foreach_block(block, impl) { nir_foreach_instr(instr, block) { @@ -429,11 +425,9 @@ elk_nir_lower_tes_inputs(nir_shader *nir, const struct intel_vue_map *vue_map) nir_lower_io(nir, nir_var_shader_in, elk_type_size_vec4, nir_lower_io_lower_64bit_to_32); - /* This pass needs actual constants */ + /* Fold constant offset srcs for IO. */ nir_opt_constant_folding(nir); - nir_io_add_const_offset_to_base(nir, nir_var_shader_in); - nir_foreach_function_impl(impl, nir) { nir_builder b = nir_builder_create(impl); nir_foreach_block(block, impl) { @@ -611,10 +605,8 @@ elk_nir_lower_fs_inputs(nir_shader *nir, nir_metadata_control_flow, NULL); - /* This pass needs actual constants */ + /* Fold constant offset srcs for IO. */ nir_opt_constant_folding(nir); - - nir_io_add_const_offset_to_base(nir, nir_var_shader_in); } void @@ -639,11 +631,9 @@ elk_nir_lower_tcs_outputs(nir_shader *nir, const struct intel_vue_map *vue_map, nir_lower_io(nir, nir_var_shader_out, elk_type_size_vec4, nir_lower_io_lower_64bit_to_32); - /* This pass needs actual constants */ + /* Fold constant offset srcs for IO. */ nir_opt_constant_folding(nir); - nir_io_add_const_offset_to_base(nir, nir_var_shader_out); - nir_foreach_function_impl(impl, nir) { nir_builder b = nir_builder_create(impl); nir_foreach_block(block, impl) { diff --git a/src/intel/compiler/elk/elk_vec4_nir.cpp b/src/intel/compiler/elk/elk_vec4_nir.cpp index 1ec2443366d..a651c372347 100644 --- a/src/intel/compiler/elk/elk_vec4_nir.cpp +++ b/src/intel/compiler/elk/elk_vec4_nir.cpp @@ -275,8 +275,8 @@ vec4_visitor::get_indirect_offset(nir_intrinsic_instr *instr) nir_src *offset_src = nir_get_io_offset_src(instr); if (nir_src_is_const(*offset_src)) { - /* The only constant offset we should find is 0. elk_nir.c's - * add_const_offset_to_base() will fold other constant offsets + /* The only constant offset we should find is 0. + * nir_opt_constant_folding will fold other constant offsets * into the base index. */ assert(nir_src_as_uint(*offset_src) == 0); diff --git a/src/kosmickrisp/vulkan/kk_shader.c b/src/kosmickrisp/vulkan/kk_shader.c index 6c70bc9f3f6..9d7b009d07c 100644 --- a/src/kosmickrisp/vulkan/kk_shader.c +++ b/src/kosmickrisp/vulkan/kk_shader.c @@ -286,8 +286,8 @@ kk_lower_vs_vbo(nir_shader *nir, const struct vk_graphics_pipeline_state *state) "Fixed-function attributes not used in Vulkan"); NIR_PASS(_, nir, nir_recompute_io_bases, nir_var_shader_in); /* the shader_out portion of this is load-bearing even for tess eval */ - NIR_PASS(_, nir, nir_io_add_const_offset_to_base, - nir_var_shader_in | nir_var_shader_out); + /* Fold constant offset srcs for IO. */ + NIR_PASS(_, nir, nir_opt_constant_folding); struct kk_attribute attributes[KK_MAX_ATTRIBS] = {}; uint64_t attribs_read = nir->info.inputs_read >> VERT_ATTRIB_GENERIC0; @@ -368,7 +368,8 @@ kk_lower_fs_blend(nir_shader *nir, }; } } - NIR_PASS(_, nir, nir_io_add_const_offset_to_base, nir_var_shader_out); + /* Fold constant offset srcs for IO. */ + NIR_PASS(_, nir, nir_opt_constant_folding); NIR_PASS(_, nir, nir_lower_blend, &opts); } @@ -730,8 +731,6 @@ nir_opts(nir_shader *nir) NIR_PASS(progress, nir, nir_opt_phi_precision); NIR_PASS(progress, nir, nir_opt_algebraic); NIR_PASS(progress, nir, nir_opt_constant_folding); - NIR_PASS(progress, nir, nir_io_add_const_offset_to_base, - nir_var_shader_in | nir_var_shader_out); NIR_PASS(progress, nir, nir_opt_undef); NIR_PASS(progress, nir, nir_opt_loop_unroll); diff --git a/src/poly/nir/poly_nir_lower_gs.c b/src/poly/nir/poly_nir_lower_gs.c index 01cc0e04e6c..60435176a98 100644 --- a/src/poly/nir/poly_nir_lower_gs.c +++ b/src/poly/nir/poly_nir_lower_gs.c @@ -1188,8 +1188,8 @@ poly_nir_lower_gs(nir_shader *gs, nir_shader **gs_count, nir_shader **gs_copy, { /* Lower I/O as assumed by the rest of GS lowering */ if (gs->xfb_info != NULL) { - NIR_PASS(_, gs, nir_io_add_const_offset_to_base, - nir_var_shader_in | nir_var_shader_out); + /* Fold constant offset srcs for IO. */ + NIR_PASS(_, gs, nir_opt_constant_folding); NIR_PASS(_, gs, nir_io_add_intrinsic_xfb_info); }