nir: remove nir_io_add_const_offset_to_base
Some checks are pending
macOS-CI / macOS-CI (dri) (push) Waiting to run
macOS-CI / macOS-CI (xlib) (push) Waiting to run

nir_opt_constant_folding does it now.

Acked-by: Emma Anholt <emma@anholt.net>
Acked-by: Alyssa Rosenzweig <alyssa.rosenzweig@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/38277>
This commit is contained in:
Marek Olšák 2025-11-05 20:17:30 -05:00 committed by Marge Bot
parent 726bbb352e
commit fa0bea5ff8
20 changed files with 35 additions and 223 deletions

View file

@ -1600,7 +1600,8 @@ ac_nir_lower_hs_outputs_to_mem(nir_shader *shader, const nir_tcs_info *info,
{
assert(shader->info.stage == MESA_SHADER_TESS_CTRL);
NIR_PASS(_, shader, nir_io_add_const_offset_to_base, nir_var_shader_out);
/* Fold constant offset srcs for IO. */
NIR_PASS(_, shader, nir_opt_constant_folding);
lower_tess_io_state state = {
.gfx_level = gfx_level,

View file

@ -157,11 +157,9 @@ radv_nir_lower_io(struct radv_device *device, nir_shader *nir)
nir_lower_io_lower_64bit_to_32 | nir_lower_io_use_interpolated_input_intrinsics);
}
/* This pass needs actual constants */
/* Fold constant offset srcs for IO. */
NIR_PASS(_, nir, nir_opt_constant_folding);
NIR_PASS(_, nir, nir_io_add_const_offset_to_base, nir_var_shader_in | nir_var_shader_out);
if (nir->xfb_info) {
NIR_PASS(_, nir, nir_io_add_intrinsic_xfb_info);

View file

@ -1405,9 +1405,8 @@ hk_compile_shader(struct hk_device *dev, struct vk_shader_compile_info *info,
NIR_PASS(_, nir, nir_recompute_io_bases, nir_var_shader_in);
}
/* the shader_out portion of this is load-bearing even for tess eval */
NIR_PASS(_, nir, nir_io_add_const_offset_to_base,
nir_var_shader_in | nir_var_shader_out);
/* Fold constant offset srcs for IO. */
NIR_PASS(_, nir, nir_opt_constant_folding);
for (enum hk_vs_variant v = 0; v < HK_VS_VARIANTS; ++v) {
/* Only compile the software variant if we might use this shader with
@ -1523,8 +1522,6 @@ nir_opts(nir_shader *nir)
NIR_PASS(progress, nir, nir_opt_phi_precision);
NIR_PASS(progress, nir, nir_opt_algebraic);
NIR_PASS(progress, nir, nir_opt_constant_folding);
NIR_PASS(progress, nir, nir_io_add_const_offset_to_base,
nir_var_shader_in | nir_var_shader_out);
NIR_PASS(progress, nir, nir_opt_undef);
NIR_PASS(progress, nir, nir_opt_loop_unroll);
@ -1556,8 +1553,8 @@ hk_compile_shaders(struct vk_device *vk_dev, uint32_t shader_count,
info->set_layout_count, info->set_layouts, hk_features);
if (nir->xfb_info) {
nir_io_add_const_offset_to_base(
nir, nir_var_shader_in | nir_var_shader_out);
/* Fold constant offset srcs for IO. */
NIR_PASS(_, nir, nir_opt_constant_folding);
nir_io_add_intrinsic_xfb_info(nir);
}

View file

@ -101,8 +101,6 @@ gl_nir_opts(nir_shader *nir)
NIR_PASS(progress, nir, nir_opt_phi_precision);
NIR_PASS(progress, nir, nir_opt_algebraic);
NIR_PASS(progress, nir, nir_opt_constant_folding);
NIR_PASS(progress, nir, nir_io_add_const_offset_to_base,
nir_var_shader_in | nir_var_shader_out);
if (!nir->info.flrp_lowered) {
unsigned lower_flrp =

View file

@ -131,7 +131,6 @@ else
'nir_inline_uniforms.c',
'nir_instr_set.c',
'nir_instr_set.h',
'nir_io_add_const_offset_to_base.c',
'nir_io_add_xfb_info.c',
'nir_legacy.c',
'nir_legacy.h',

View file

@ -5299,7 +5299,6 @@ bool nir_lower_io(nir_shader *shader,
int (*type_size)(const struct glsl_type *, bool),
nir_lower_io_options);
bool nir_io_add_const_offset_to_base(nir_shader *nir, nir_variable_mode modes);
void nir_lower_io_passes(nir_shader *nir, bool renumber_vs_inputs);
bool nir_io_add_intrinsic_xfb_info(nir_shader *nir);
bool nir_lower_io_indirect_loads(nir_shader *nir, nir_variable_mode modes);

View file

@ -1,140 +0,0 @@
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/**
* This pass adds constant offsets to instr->const_index[0] for input/output
* intrinsics, and resets the offset source to 0. Non-constant offsets remain
* unchanged - since we don't know what part of a compound variable is
* accessed, we allocate storage for the entire thing. For drivers that use
* nir_lower_io_vars_to_temporaries() before nir_lower_io(), this guarantees that
* the offset source will be 0, so that they don't have to add it in manually.
*/
#include "nir.h"
#include "nir_builder.h"
static bool
is_input(nir_intrinsic_instr *intrin)
{
return intrin->intrinsic == nir_intrinsic_load_input ||
intrin->intrinsic == nir_intrinsic_load_per_primitive_input ||
intrin->intrinsic == nir_intrinsic_load_input_vertex ||
intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
intrin->intrinsic == nir_intrinsic_load_interpolated_input ||
intrin->intrinsic == nir_intrinsic_load_fs_input_interp_deltas;
}
static bool
is_output(nir_intrinsic_instr *intrin)
{
return intrin->intrinsic == nir_intrinsic_load_output ||
intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
intrin->intrinsic == nir_intrinsic_load_per_view_output ||
intrin->intrinsic == nir_intrinsic_load_per_primitive_output ||
intrin->intrinsic == nir_intrinsic_store_output ||
intrin->intrinsic == nir_intrinsic_store_per_vertex_output ||
intrin->intrinsic == nir_intrinsic_store_per_view_output ||
intrin->intrinsic == nir_intrinsic_store_per_primitive_output;
}
static bool
is_dual_slot(nir_intrinsic_instr *intrin)
{
if (intrin->intrinsic == nir_intrinsic_store_output ||
intrin->intrinsic == nir_intrinsic_store_per_vertex_output ||
intrin->intrinsic == nir_intrinsic_store_per_view_output ||
intrin->intrinsic == nir_intrinsic_store_per_primitive_output) {
return nir_src_bit_size(intrin->src[0]) == 64 &&
nir_src_num_components(intrin->src[0]) >= 3;
}
return intrin->def.bit_size == 64 &&
intrin->def.num_components >= 3;
}
static bool
add_const_offset_to_base_block(nir_block *block, nir_builder *b,
nir_variable_mode modes)
{
bool progress = false;
nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (((modes & nir_var_shader_in) && is_input(intrin)) ||
((modes & nir_var_shader_out) && is_output(intrin))) {
nir_io_semantics sem = nir_intrinsic_io_semantics(intrin);
/* NV_mesh_shader: ignore MS primitive indices. */
if (b->shader->info.stage == MESA_SHADER_MESH &&
sem.location == VARYING_SLOT_PRIMITIVE_INDICES &&
!(b->shader->info.per_primitive_outputs &
VARYING_BIT_PRIMITIVE_INDICES))
continue;
nir_src *offset = nir_get_io_offset_src(intrin);
/* TODO: Better handling of per-view variables here */
if (nir_src_is_const(*offset) &&
!nir_intrinsic_io_semantics(intrin).per_view) {
unsigned off = nir_src_as_uint(*offset);
if (off) {
nir_intrinsic_set_base(intrin, nir_intrinsic_base(intrin) + off);
sem.location += off;
b->cursor = nir_before_instr(&intrin->instr);
nir_src_rewrite(offset, nir_imm_int(b, 0));
}
/* non-indirect indexing should reduce num_slots */
sem.num_slots = is_dual_slot(intrin) ? 2 : 1;
nir_io_semantics original = nir_intrinsic_io_semantics(intrin);
progress |= memcmp(&original, &sem, sizeof(sem));
nir_intrinsic_set_io_semantics(intrin, sem);
}
}
}
return progress;
}
bool
nir_io_add_const_offset_to_base(nir_shader *nir, nir_variable_mode modes)
{
bool progress = false;
nir_foreach_function_impl(impl, nir) {
bool impl_progress = false;
nir_builder b = nir_builder_create(impl);
nir_foreach_block(block, impl) {
impl_progress |= add_const_offset_to_base_block(block, &b, modes);
}
progress |= impl_progress;
nir_progress(impl_progress, impl, nir_metadata_control_flow);
}
return progress;
}

View file

@ -1261,11 +1261,10 @@ nir_lower_io_passes(nir_shader *nir, bool renumber_vs_inputs)
(renumber_vs_inputs ? nir_lower_io_lower_64bit_to_32_new : nir_lower_io_lower_64bit_to_32) |
nir_lower_io_use_interpolated_input_intrinsics);
/* nir_io_add_const_offset_to_base needs actual constants. */
/* Fold constant offset srcs for IO. */
NIR_PASS(_, nir, nir_opt_constant_folding);
NIR_PASS(_, nir, nir_io_add_const_offset_to_base, nir_var_shader_in | nir_var_shader_out);
/* This must be called after nir_io_add_const_offset_to_base. */
/* This must be called after folding constant offset srcs. */
if (nir->info.stage != MESA_SHADER_MESH &&
!(nir->options->support_indirect_inputs & BITFIELD_BIT(nir->info.stage)))
NIR_PASS(_, nir, nir_lower_io_indirect_loads, nir_var_shader_in);

View file

@ -1273,7 +1273,7 @@ gather_inputs(struct nir_builder *builder, nir_intrinsic_instr *intr, void *cb_d
/* nir_lower_io_to_scalar is required before this */
assert(intr->def.num_components == 1);
/* Non-zero constant offsets should have been folded by
* nir_io_add_const_offset_to_base.
* nir_opt_constant_folding.
*/
nir_src offset = *nir_get_io_offset_src(intr);
assert(!nir_src_is_const(offset) || nir_src_as_uint(offset) == 0);
@ -1515,7 +1515,7 @@ gather_outputs(struct nir_builder *builder, nir_intrinsic_instr *intr, void *cb_
}
/* Non-zero constant offsets should have been folded by
* nir_io_add_const_offset_to_base.
* nir_opt_constant_folding.
*/
nir_src offset = *nir_get_io_offset_src(intr);
assert(!nir_src_is_const(offset) || nir_src_as_uint(offset) == 0);

View file

@ -159,8 +159,7 @@ panfrost_shader_compile(struct panfrost_screen *screen, const nir_shader *ir,
pan_get_fixed_varying_mask(s->info.outputs_written);
if (s->info.has_transform_feedback_varyings) {
NIR_PASS(_, s, nir_io_add_const_offset_to_base,
nir_var_shader_in | nir_var_shader_out);
NIR_PASS(_, s, nir_opt_constant_folding);
NIR_PASS(_, s, nir_io_add_intrinsic_xfb_info);
NIR_PASS(_, s, pan_lower_xfb);
}

View file

@ -795,8 +795,8 @@ r600_lower_and_optimize_nir(nir_shader *sh,
NIR_PASS(_, sh, nir_lower_indirect_derefs_to_if_else_trees,
nir_var_function_temp, 10);
/* Fold constant offset srcs for IO. */
NIR_PASS(_, sh, nir_opt_constant_folding);
NIR_PASS(_, sh, nir_io_add_const_offset_to_base, io_modes);
NIR_PASS(_, sh, nir_lower_alu_to_scalar, r600_lower_to_scalar_instr_filter, NULL);
NIR_PASS(_, sh, nir_lower_phis_to_scalar, NULL, NULL);

View file

@ -136,14 +136,6 @@ void si_nir_late_opts(nir_shader *nir)
more_late_algebraic = false;
NIR_PASS(more_late_algebraic, nir, nir_opt_algebraic_late);
NIR_PASS(_, nir, nir_opt_constant_folding);
/* We should run this after constant folding for stages that support indirect
* inputs/outputs.
*/
if (nir->options->support_indirect_inputs & BITFIELD_BIT(nir->info.stage) ||
nir->options->support_indirect_outputs & BITFIELD_BIT(nir->info.stage))
NIR_PASS(_, nir, nir_io_add_const_offset_to_base, nir_var_shader_in | nir_var_shader_out);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_dce);
NIR_PASS(_, nir, nir_opt_cse);

View file

@ -4067,10 +4067,6 @@ zink_shader_compile(struct zink_screen *screen, bool can_shobj, struct zink_shad
if (inlined_uniforms) {
optimize_nir(nir, zs, true);
/* This must be done again. */
NIR_PASS(_, nir, nir_io_add_const_offset_to_base, nir_var_shader_in |
nir_var_shader_out);
nir_function_impl *impl = nir_shader_get_entrypoint(nir);
if (impl->ssa_alloc > ZINK_ALWAYS_INLINE_LIMIT)
zs->can_inline = false;

View file

@ -633,10 +633,10 @@ void pco_preprocess_nir(pco_ctx *ctx, nir_shader *nir)
nir_var_function_temp | nir_var_shader_temp,
NULL);
/* Fold constant offset srcs for IO. */
NIR_PASS(_,
nir,
nir_io_add_const_offset_to_base,
nir_var_shader_in | nir_var_shader_out);
nir_opt_constant_folding);
NIR_PASS(_,
nir,
@ -888,10 +888,6 @@ void pco_lower_nir(pco_ctx *ctx, nir_shader *nir, pco_data *data)
NIR_PASS(_, nir, nir_opt_dce);
NIR_PASS(_, nir, nir_opt_constant_folding);
NIR_PASS(_,
nir,
nir_io_add_const_offset_to_base,
nir_var_shader_in | nir_var_shader_out);
/* Internal shaders will be using invalid32 types at this stage. */
if (!nir->info.internal)

View file

@ -643,11 +643,9 @@ brw_nir_lower_vs_inputs(nir_shader *nir)
NIR_PASS(_, nir, nir_lower_io, nir_var_shader_in, type_size_vec4,
nir_lower_io_lower_64bit_to_32_new);
/* This pass needs actual constants */
/* Fold constant offset srcs for IO. */
NIR_PASS(_, nir, nir_opt_constant_folding);
NIR_PASS(_, nir, nir_io_add_const_offset_to_base, nir_var_shader_in);
/* Update shader_info::dual_slot_inputs */
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
@ -784,11 +782,9 @@ brw_nir_lower_vue_inputs(nir_shader *nir,
NIR_PASS(_, nir, nir_lower_io, nir_var_shader_in, type_size_vec4,
nir_lower_io_lower_64bit_to_32);
/* This pass needs actual constants */
/* Fold constant offset srcs for IO. */
NIR_PASS(_, nir, nir_opt_constant_folding);
NIR_PASS(_, nir, nir_io_add_const_offset_to_base, nir_var_shader_in);
nir_foreach_function_impl(impl, nir) {
nir_foreach_block(block, impl) {
nir_foreach_instr(instr, block) {
@ -837,11 +833,10 @@ brw_nir_lower_tes_inputs(nir_shader *nir,
NIR_PASS(_, nir, nir_lower_io, nir_var_shader_in, type_size_vec4,
nir_lower_io_lower_64bit_to_32);
/* Run add_const_offset_to_base to allow update base/io_semantic::location
/* Run nir_opt_constant_folding to allow update base/io_semantic::location
* for the remapping pass to look into the VUE mapping.
*/
NIR_PASS(_, nir, nir_opt_constant_folding);
NIR_PASS(_, nir, nir_io_add_const_offset_to_base, nir_var_shader_in);
NIR_PASS(_, nir, remap_tess_levels, devinfo,
nir->info.tess._primitive_mode);
@ -853,8 +848,6 @@ brw_nir_lower_tes_inputs(nir_shader *nir,
NIR_PASS(_, nir, nir_opt_algebraic);
NIR_PASS(_, nir, nir_opt_constant_folding);
NIR_PASS(_, nir, nir_io_add_const_offset_to_base, nir_var_shader_in);
NIR_PASS(_, nir, lower_inputs_to_urb_intrinsics, devinfo);
}
@ -1093,10 +1086,8 @@ brw_nir_lower_fs_inputs(nir_shader *nir,
indirect_primitive_id);
}
/* This pass needs actual constants */
/* Fold constant offset srcs for IO. */
NIR_PASS(_, nir, nir_opt_constant_folding);
NIR_PASS(_, nir, nir_io_add_const_offset_to_base, nir_var_shader_in);
}
void
@ -1137,11 +1128,10 @@ brw_nir_lower_tcs_outputs(nir_shader *nir,
NIR_PASS(_, nir, nir_lower_io, nir_var_shader_out, type_size_vec4,
nir_lower_io_lower_64bit_to_32);
/* Run add_const_offset_to_base to allow update base/io_semantic::location
/* Run nir_opt_constant_folding to allow update base/io_semantic::location
* for the remapping pass to look into the VUE mapping.
*/
NIR_PASS(_, nir, nir_opt_constant_folding);
NIR_PASS(_, nir, nir_io_add_const_offset_to_base, nir_var_shader_out);
NIR_PASS(_, nir, remap_tess_levels, devinfo, tes_primitive_mode);
NIR_PASS(_, nir, remap_patch_urb_offsets, vue_map);
@ -1150,7 +1140,6 @@ brw_nir_lower_tcs_outputs(nir_shader *nir,
* just fold it for the backend.
*/
NIR_PASS(_, nir, nir_opt_constant_folding);
NIR_PASS(_, nir, nir_io_add_const_offset_to_base, nir_var_shader_out);
NIR_PASS(_, nir, lower_outputs_to_urb_intrinsics, devinfo);
}

View file

@ -2502,8 +2502,8 @@ get_indirect_offset(nir_to_elk_state &ntb, nir_intrinsic_instr *instr)
nir_src *offset_src = nir_get_io_offset_src(instr);
if (nir_src_is_const(*offset_src)) {
/* The only constant offset we should find is 0. elk_nir.c's
* add_const_offset_to_base() will fold other constant offsets
/* The only constant offset we should find is 0.
* nir_opt_constant_folding will fold other constant offsets
* into the "base" index.
*/
assert(nir_src_as_uint(*offset_src) == 0);

View file

@ -257,11 +257,9 @@ elk_nir_lower_vs_inputs(nir_shader *nir,
nir_lower_io(nir, nir_var_shader_in, elk_type_size_vec4,
nir_lower_io_lower_64bit_to_32);
/* This pass needs actual constants */
/* Fold constant offset srcs for IO. */
nir_opt_constant_folding(nir);
nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
elk_nir_apply_attribute_workarounds(nir, vs_attrib_wa_flags);
/* The last step is to remap VERT_ATTRIB_* to actual registers */
@ -381,11 +379,9 @@ elk_nir_lower_vue_inputs(nir_shader *nir,
nir_lower_io(nir, nir_var_shader_in, elk_type_size_vec4,
nir_lower_io_lower_64bit_to_32);
/* This pass needs actual constants */
/* Fold constant offset srcs for IO. */
nir_opt_constant_folding(nir);
nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
nir_foreach_function_impl(impl, nir) {
nir_foreach_block(block, impl) {
nir_foreach_instr(instr, block) {
@ -429,11 +425,9 @@ elk_nir_lower_tes_inputs(nir_shader *nir, const struct intel_vue_map *vue_map)
nir_lower_io(nir, nir_var_shader_in, elk_type_size_vec4,
nir_lower_io_lower_64bit_to_32);
/* This pass needs actual constants */
/* Fold constant offset srcs for IO. */
nir_opt_constant_folding(nir);
nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
nir_foreach_function_impl(impl, nir) {
nir_builder b = nir_builder_create(impl);
nir_foreach_block(block, impl) {
@ -611,10 +605,8 @@ elk_nir_lower_fs_inputs(nir_shader *nir,
nir_metadata_control_flow,
NULL);
/* This pass needs actual constants */
/* Fold constant offset srcs for IO. */
nir_opt_constant_folding(nir);
nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
}
void
@ -639,11 +631,9 @@ elk_nir_lower_tcs_outputs(nir_shader *nir, const struct intel_vue_map *vue_map,
nir_lower_io(nir, nir_var_shader_out, elk_type_size_vec4,
nir_lower_io_lower_64bit_to_32);
/* This pass needs actual constants */
/* Fold constant offset srcs for IO. */
nir_opt_constant_folding(nir);
nir_io_add_const_offset_to_base(nir, nir_var_shader_out);
nir_foreach_function_impl(impl, nir) {
nir_builder b = nir_builder_create(impl);
nir_foreach_block(block, impl) {

View file

@ -275,8 +275,8 @@ vec4_visitor::get_indirect_offset(nir_intrinsic_instr *instr)
nir_src *offset_src = nir_get_io_offset_src(instr);
if (nir_src_is_const(*offset_src)) {
/* The only constant offset we should find is 0. elk_nir.c's
* add_const_offset_to_base() will fold other constant offsets
/* The only constant offset we should find is 0.
* nir_opt_constant_folding will fold other constant offsets
* into the base index.
*/
assert(nir_src_as_uint(*offset_src) == 0);

View file

@ -286,8 +286,8 @@ kk_lower_vs_vbo(nir_shader *nir, const struct vk_graphics_pipeline_state *state)
"Fixed-function attributes not used in Vulkan");
NIR_PASS(_, nir, nir_recompute_io_bases, nir_var_shader_in);
/* the shader_out portion of this is load-bearing even for tess eval */
NIR_PASS(_, nir, nir_io_add_const_offset_to_base,
nir_var_shader_in | nir_var_shader_out);
/* Fold constant offset srcs for IO. */
NIR_PASS(_, nir, nir_opt_constant_folding);
struct kk_attribute attributes[KK_MAX_ATTRIBS] = {};
uint64_t attribs_read = nir->info.inputs_read >> VERT_ATTRIB_GENERIC0;
@ -368,7 +368,8 @@ kk_lower_fs_blend(nir_shader *nir,
};
}
}
NIR_PASS(_, nir, nir_io_add_const_offset_to_base, nir_var_shader_out);
/* Fold constant offset srcs for IO. */
NIR_PASS(_, nir, nir_opt_constant_folding);
NIR_PASS(_, nir, nir_lower_blend, &opts);
}
@ -730,8 +731,6 @@ nir_opts(nir_shader *nir)
NIR_PASS(progress, nir, nir_opt_phi_precision);
NIR_PASS(progress, nir, nir_opt_algebraic);
NIR_PASS(progress, nir, nir_opt_constant_folding);
NIR_PASS(progress, nir, nir_io_add_const_offset_to_base,
nir_var_shader_in | nir_var_shader_out);
NIR_PASS(progress, nir, nir_opt_undef);
NIR_PASS(progress, nir, nir_opt_loop_unroll);

View file

@ -1188,8 +1188,8 @@ poly_nir_lower_gs(nir_shader *gs, nir_shader **gs_count, nir_shader **gs_copy,
{
/* Lower I/O as assumed by the rest of GS lowering */
if (gs->xfb_info != NULL) {
NIR_PASS(_, gs, nir_io_add_const_offset_to_base,
nir_var_shader_in | nir_var_shader_out);
/* Fold constant offset srcs for IO. */
NIR_PASS(_, gs, nir_opt_constant_folding);
NIR_PASS(_, gs, nir_io_add_intrinsic_xfb_info);
}