nir: add nir_vectorize_cb callback parameter to nir_lower_phis_to_scalar()
Some checks are pending
macOS-CI / macOS-CI (dri) (push) Waiting to run
macOS-CI / macOS-CI (xlib) (push) Waiting to run

Similar to nir_lower_alu_width(), the callback can return the
desired number of components for a phi, or 0 for no lowering.

The previous behavior of nir_lower_phis_to_scalar() with lower_all=true
can be elicited via nir_lower_all_phis_to_scalar() while the previous
behavior with lower_all=false now corresponds to nir_lower_phis_to_scalar()
with NULL callback.

Reviewed-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Reviewed-by: Mel Henning <mhenning@darkrefraction.com>
Reviewed-by: Georg Lehmann <dadschoorse@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/35783>
This commit is contained in:
Daniel Schürmann 2025-06-26 17:01:36 +02:00 committed by Marge Bot
parent 23b7b3b919
commit 2c51a8870d
31 changed files with 88 additions and 62 deletions

View file

@ -1797,7 +1797,7 @@ ac_nir_lower_ngg_nogs(nir_shader *shader, const ac_nir_lower_ngg_options *option
nir_lower_vars_to_ssa(shader);
nir_remove_dead_variables(shader, nir_var_function_temp, NULL);
nir_lower_alu_to_scalar(shader, NULL, NULL);
nir_lower_phis_to_scalar(shader, true);
nir_lower_all_phis_to_scalar(shader);
if (options->can_cull) {
/* It's beneficial to redo these opts after splitting the shader. */

View file

@ -1429,7 +1429,7 @@ ac_nir_lower_ngg_mesh(nir_shader *shader,
nir_lower_vars_to_ssa(shader);
nir_remove_dead_variables(shader, nir_var_function_temp, NULL);
nir_lower_alu_to_scalar(shader, NULL, NULL);
nir_lower_phis_to_scalar(shader, true);
nir_lower_all_phis_to_scalar(shader);
/* Optimize load_local_invocation_index. When the API workgroup is smaller than the HW workgroup,
* local_invocation_id isn't initialized for all lanes and we can't perform this optimization for

View file

@ -1624,7 +1624,7 @@ ac_nir_lower_hs_outputs_to_mem(nir_shader *shader, const nir_tcs_info *info,
NIR_PASS(_, shader, nir_lower_vars_to_ssa);
NIR_PASS(_, shader, nir_remove_dead_variables, nir_var_function_temp, NULL);
NIR_PASS(_, shader, nir_lower_alu_to_scalar, NULL, NULL);
NIR_PASS(_, shader, nir_lower_phis_to_scalar, true);
NIR_PASS(_, shader, nir_lower_all_phis_to_scalar);
return true;
}

View file

@ -249,7 +249,7 @@ void
setup_nir(isel_context* ctx, nir_shader* nir)
{
nir_convert_to_lcssa(nir, true, false);
if (nir_lower_phis_to_scalar(nir, true)) {
if (nir_lower_all_phis_to_scalar(nir)) {
nir_copy_prop(nir);
nir_opt_dce(nir);
}

View file

@ -181,7 +181,7 @@ radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively)
NIR_LOOP_PASS(_, skip, shader, nir_lower_vars_to_ssa);
NIR_LOOP_PASS(_, skip, shader, nir_lower_alu_width, vectorize_vec2_16bit, NULL);
NIR_LOOP_PASS(_, skip, shader, nir_lower_phis_to_scalar, true);
NIR_LOOP_PASS(_, skip, shader, nir_lower_all_phis_to_scalar);
NIR_LOOP_PASS(progress, skip, shader, nir_copy_prop);
NIR_LOOP_PASS(progress, skip, shader, nir_opt_remove_phis);

View file

@ -55,7 +55,7 @@ optimize(nir_shader *nir)
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_remove_phis);
NIR_PASS(progress, nir, nir_lower_phis_to_scalar, true);
NIR_PASS(progress, nir, nir_lower_all_phis_to_scalar);
NIR_PASS(progress, nir, nir_opt_dce);
NIR_PASS(progress, nir, nir_opt_dead_cf);
NIR_PASS(progress, nir, nir_opt_cse);

View file

@ -3205,7 +3205,7 @@ agx_optimize_nir(nir_shader *nir, bool soft_fault, uint16_t *preamble_size)
NIR_PASS(_, nir, nir_opt_sink, move_all);
NIR_PASS(_, nir, nir_opt_move, move_all);
NIR_PASS(_, nir, nir_lower_phis_to_scalar, true);
NIR_PASS(_, nir, nir_lower_all_phis_to_scalar);
}
/*
@ -3782,7 +3782,7 @@ agx_preprocess_nir(nir_shader *nir)
NIR_PASS(_, nir, nir_shader_intrinsics_pass, agx_lower_front_face,
nir_metadata_control_flow, NULL);
NIR_PASS(_, nir, agx_nir_lower_subgroups);
NIR_PASS(_, nir, nir_lower_phis_to_scalar, true);
NIR_PASS(_, nir, nir_lower_all_phis_to_scalar);
NIR_PASS(_, nir, nir_shader_alu_pass, agx_nir_lower_fdiv,
nir_metadata_control_flow, NULL);

View file

@ -2142,7 +2142,7 @@ v3d_optimize_nir(struct v3d_compile *c, struct nir_shader *s)
NULL);
NIR_PASS(progress, s, nir_lower_alu_to_scalar, NULL, NULL);
NIR_PASS(progress, s, nir_lower_phis_to_scalar, false);
NIR_PASS(progress, s, nir_lower_phis_to_scalar, NULL, NULL);
NIR_PASS(progress, s, nir_copy_prop);
NIR_PASS(progress, s, nir_opt_remove_phis);
NIR_PASS(progress, s, nir_opt_dce);

View file

@ -69,7 +69,7 @@ gl_nir_opts(nir_shader *nir)
if (nir->options->lower_to_scalar) {
NIR_PASS(_, nir, nir_lower_alu_to_scalar,
nir->options->lower_to_scalar_filter, NULL);
NIR_PASS(_, nir, nir_lower_phis_to_scalar, false);
NIR_PASS(_, nir, nir_lower_phis_to_scalar, NULL, NULL);
}
NIR_PASS(_, nir, nir_lower_alu);

View file

@ -5303,7 +5303,8 @@ bool nir_lower_alu_conversion_to_intrinsic(nir_shader *shader);
bool nir_lower_int_to_float(nir_shader *shader);
bool nir_lower_load_const_to_scalar(nir_shader *shader);
bool nir_lower_read_invocation_to_scalar(nir_shader *shader);
bool nir_lower_phis_to_scalar(nir_shader *shader, bool lower_all);
bool nir_lower_phis_to_scalar(nir_shader *shader, nir_vectorize_cb cb, const void *data);
bool nir_lower_all_phis_to_scalar(nir_shader *shader);
void nir_lower_io_array_vars_to_elements(nir_shader *producer, nir_shader *consumer);
bool nir_lower_io_array_vars_to_elements_no_indirects(nir_shader *shader,
bool outputs_only);

View file

@ -33,7 +33,8 @@ struct lower_phis_to_scalar_state {
nir_shader *shader;
nir_builder builder;
bool lower_all;
nir_vectorize_cb cb;
const void *data;
};
static bool
@ -143,15 +144,10 @@ is_phi_src_scalarizable(nir_phi_src *src)
* given vector component; this move can almost certainly be coalesced
* away.
*/
static bool
should_lower_phi(nir_phi_instr *phi, struct lower_phis_to_scalar_state *state)
static uint8_t
should_lower_phi(const nir_instr *instr, const void *data)
{
/* Already scalar */
if (phi->def.num_components == 1)
return false;
if (state->lower_all)
return true;
nir_phi_instr *phi = nir_instr_as_phi(instr);
nir_foreach_phi_src(src, phi) {
/* This loop ignores srcs that are not scalarizable because its likely
@ -160,10 +156,10 @@ should_lower_phi(nir_phi_instr *phi, struct lower_phis_to_scalar_state *state)
* Deus Ex: MD.
*/
if (is_phi_src_scalarizable(src))
return true;
return 1;
}
return false;
return 0;
}
static bool
@ -177,45 +173,56 @@ lower_phis_to_scalar_block(nir_block *block,
* we're modifying the linked list of instructions.
*/
nir_foreach_phi_safe(phi, block) {
if (!should_lower_phi(phi, state))
/* Already scalar */
if (phi->def.num_components == 1)
continue;
unsigned target_width = 0;
unsigned num_components = phi->def.num_components;
target_width = state->cb(&phi->instr, state->data);
if (target_width == 0 || num_components <= target_width)
continue;
/* Create a vecN operation to combine the results. Most of these
* will be redundant, but copy propagation should clean them up for
* us. No need to add the complexity here.
*/
nir_def *vec_srcs[NIR_MAX_VEC_COMPONENTS];
nir_scalar vec_srcs[NIR_MAX_VEC_COMPONENTS];
for (unsigned i = 0; i < phi->def.num_components; i++) {
for (unsigned chan = 0; chan < num_components; chan += target_width) {
unsigned components = MIN2(target_width, num_components - chan);
nir_phi_instr *new_phi = nir_phi_instr_create(state->shader);
nir_def_init(&new_phi->instr, &new_phi->def, 1,
nir_def_init(&new_phi->instr, &new_phi->def, components,
phi->def.bit_size);
vec_srcs[i] = &new_phi->def;
nir_foreach_phi_src(src, phi) {
nir_def *def;
state->builder.cursor = nir_after_block_before_jump(src->pred);
if (nir_src_is_undef(src->src)) {
/* Just create a 1-component undef instead of moving out of the
/* Just create an undef instead of moving out of the
* original one. This makes it easier for other passes to
* detect undefs without having to chase moves.
*/
def = nir_undef(&state->builder, 1, phi->def.bit_size);
def = nir_undef(&state->builder, components, phi->def.bit_size);
} else {
/* We need to insert a mov to grab the i'th component of src */
def = nir_channel(&state->builder, src->src.ssa, i);
/* We need to insert a mov to grab the correct components of src. */
def = nir_channels(&state->builder, src->src.ssa,
nir_component_mask(components) << chan);
}
nir_phi_instr_add_src(new_phi, src->pred, def);
}
nir_instr_insert_before(&phi->instr, &new_phi->instr);
for (unsigned i = 0; i < components; i++)
vec_srcs[chan + i] = nir_get_scalar(&new_phi->def, i);
}
state->builder.cursor = nir_after_phis(block);
nir_def *vec = nir_vec(&state->builder, vec_srcs, phi->def.num_components);
nir_def *vec = nir_vec_scalars(&state->builder, vec_srcs, phi->def.num_components);
nir_def_replace(&phi->def, vec);
@ -235,14 +242,20 @@ lower_phis_to_scalar_block(nir_block *block,
}
static bool
lower_phis_to_scalar_impl(nir_function_impl *impl, bool lower_all)
lower_phis_to_scalar_impl(nir_function_impl *impl, nir_vectorize_cb cb, const void *data)
{
struct lower_phis_to_scalar_state state;
bool progress = false;
state.shader = impl->function->shader;
state.builder = nir_builder_create(impl);
state.lower_all = lower_all;
if (cb) {
state.cb = cb;
state.data = data;
} else {
state.cb = should_lower_phi;
state.data = NULL;
}
nir_foreach_block(block, impl) {
progress = lower_phis_to_scalar_block(block, &state) || progress;
@ -261,13 +274,25 @@ lower_phis_to_scalar_impl(nir_function_impl *impl, bool lower_all)
* don't bother lowering because that would generate hard-to-coalesce movs.
*/
bool
nir_lower_phis_to_scalar(nir_shader *shader, bool lower_all)
nir_lower_phis_to_scalar(nir_shader *shader, nir_vectorize_cb cb, const void *data)
{
bool progress = false;
nir_foreach_function_impl(impl, shader) {
progress = lower_phis_to_scalar_impl(impl, lower_all) || progress;
progress = lower_phis_to_scalar_impl(impl, cb, data) || progress;
}
return progress;
}
static uint8_t
lower_all_phis(const nir_instr *phi, const void *_)
{
return 1;
}
bool
nir_lower_all_phis_to_scalar(nir_shader *shader)
{
return nir_lower_phis_to_scalar(shader, lower_all_phis, NULL);
}

View file

@ -92,7 +92,7 @@ optimize(nir_shader *nir)
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_remove_phis);
NIR_PASS(progress, nir, nir_lower_phis_to_scalar, true);
NIR_PASS(progress, nir, nir_lower_all_phis_to_scalar);
NIR_PASS(progress, nir, nir_opt_dce);
NIR_PASS(progress, nir, nir_opt_dead_cf);
NIR_PASS(progress, nir, nir_opt_cse);

View file

@ -324,7 +324,7 @@ ir3_optimize_loop(struct ir3_compiler *compiler,
OPT(s, nir_lower_vars_to_ssa);
progress |= OPT(s, nir_lower_alu_to_scalar, NULL, NULL);
progress |= OPT(s, nir_lower_phis_to_scalar, false);
progress |= OPT(s, nir_lower_phis_to_scalar, NULL, NULL);
progress |= OPT(s, nir_copy_prop);
progress |= OPT(s, nir_opt_deref);

View file

@ -6126,7 +6126,7 @@ lp_build_nir_soa_prepasses(struct nir_shader *nir)
NIR_PASS(_, nir, nir_lower_load_const_to_scalar);
NIR_PASS(_, nir, nir_convert_to_lcssa, false, false);
NIR_PASS(_, nir, nir_lower_phis_to_scalar, true);
NIR_PASS(_, nir, nir_lower_all_phis_to_scalar);
bool progress;
do {

View file

@ -2400,7 +2400,7 @@ ttn_optimize_nir(nir_shader *nir)
if (nir->options->lower_to_scalar) {
NIR_PASS(progress, nir, nir_lower_alu_to_scalar,
nir->options->lower_to_scalar_filter, NULL);
NIR_PASS(progress, nir, nir_lower_phis_to_scalar, false);
NIR_PASS(progress, nir, nir_lower_phis_to_scalar, NULL, NULL);
}
NIR_PASS(progress, nir, nir_lower_alu);

View file

@ -128,7 +128,7 @@ lima_program_optimize_vs_nir(struct nir_shader *s)
NIR_PASS_V(s, nir_lower_vars_to_ssa);
NIR_PASS(progress, s, nir_lower_alu_to_scalar, NULL, NULL);
NIR_PASS(progress, s, nir_lower_phis_to_scalar, false);
NIR_PASS(progress, s, nir_lower_phis_to_scalar, NULL, NULL);
NIR_PASS(progress, s, nir_copy_prop);
NIR_PASS(progress, s, nir_opt_remove_phis);
NIR_PASS(progress, s, nir_opt_dce);

View file

@ -3443,7 +3443,7 @@ Converter::run()
NIR_PASS(_, nir, nir_lower_load_const_to_scalar);
NIR_PASS(_, nir, nir_lower_alu_to_scalar, NULL, NULL);
NIR_PASS(_, nir, nir_lower_phis_to_scalar, false);
NIR_PASS(_, nir, nir_lower_phis_to_scalar, NULL, NULL);
NIR_PASS(_, nir, nir_lower_frexp);

View file

@ -861,7 +861,7 @@ r600_finalize_nir_common(nir_shader *nir, enum amd_gfx_level gfx_level)
NIR_PASS(_, nir, nir_lower_idiv, &idiv_options);
NIR_PASS(_, nir, r600_nir_lower_trigen, gfx_level);
NIR_PASS(_, nir, nir_lower_phis_to_scalar, false);
NIR_PASS(_, nir, nir_lower_phis_to_scalar, NULL, NULL);
NIR_PASS(_, nir, nir_lower_undef_to_zero);
struct nir_lower_tex_options lower_tex_options = {0};
@ -957,11 +957,11 @@ r600_lower_and_optimize_nir(nir_shader *sh,
NIR_PASS(_, sh, nir_io_add_const_offset_to_base, io_modes);
NIR_PASS(_, sh, nir_lower_alu_to_scalar, r600_lower_to_scalar_instr_filter, NULL);
NIR_PASS(_, sh, nir_lower_phis_to_scalar, false);
NIR_PASS(_, sh, nir_lower_phis_to_scalar, NULL, NULL);
if (lower_64bit)
NIR_PASS(_, sh, r600::r600_nir_split_64bit_io);
NIR_PASS(_, sh, nir_lower_alu_to_scalar, r600_lower_to_scalar_instr_filter, NULL);
NIR_PASS(_, sh, nir_lower_phis_to_scalar, false);
NIR_PASS(_, sh, nir_lower_phis_to_scalar, NULL, NULL);
NIR_PASS(_, sh, nir_lower_alu_to_scalar, r600_lower_to_scalar_instr_filter, NULL);
NIR_PASS(_, sh, nir_copy_prop);
NIR_PASS(_, sh, nir_opt_dce);
@ -989,7 +989,7 @@ r600_lower_and_optimize_nir(nir_shader *sh,
}
NIR_PASS(_, sh, nir_lower_alu_to_scalar, r600_lower_to_scalar_instr_filter, NULL);
NIR_PASS(_, sh, nir_lower_phis_to_scalar, false);
NIR_PASS(_, sh, nir_lower_phis_to_scalar, NULL, NULL);
NIR_PASS(_, sh, nir_lower_alu_to_scalar, r600_lower_to_scalar_instr_filter, NULL);
NIR_PASS(_, sh, r600_nir_lower_int_tg4);
NIR_PASS(_, sh, r600::r600_nir_lower_tex_to_backend, gfx_level);

View file

@ -53,7 +53,7 @@ void si_nir_opts(struct si_screen *sscreen, struct nir_shader *nir, bool has_arr
NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
NIR_PASS(progress, nir, nir_lower_alu_to_scalar, nir->options->lower_to_scalar_filter, NULL);
NIR_PASS(progress, nir, nir_lower_phis_to_scalar, false);
NIR_PASS(progress, nir, nir_lower_phis_to_scalar, NULL, NULL);
if (has_array_temps) {
NIR_PASS(progress, nir, nir_split_array_vars, nir_var_function_temp);
@ -77,7 +77,7 @@ void si_nir_opts(struct si_screen *sscreen, struct nir_shader *nir, bool has_arr
NIR_PASS_V(nir, nir_lower_alu_to_scalar, nir->options->lower_to_scalar_filter, NULL);
}
if (lower_phis_to_scalar)
NIR_PASS_V(nir, nir_lower_phis_to_scalar, false);
NIR_PASS_V(nir, nir_lower_phis_to_scalar, NULL, NULL);
progress |= lower_alu_to_scalar | lower_phis_to_scalar;
NIR_PASS(progress, nir, nir_opt_cse);

View file

@ -1496,7 +1496,7 @@ vc4_optimize_nir(struct nir_shader *s)
NIR_PASS(_, s, nir_lower_vars_to_ssa);
NIR_PASS(progress, s, nir_lower_alu_to_scalar, NULL, NULL);
NIR_PASS(progress, s, nir_lower_phis_to_scalar, false);
NIR_PASS(progress, s, nir_lower_phis_to_scalar, NULL, NULL);
NIR_PASS(progress, s, nir_copy_prop);
NIR_PASS(progress, s, nir_opt_remove_phis);
NIR_PASS(progress, s, nir_opt_dce);

View file

@ -1568,7 +1568,7 @@ optimize_nir(struct nir_shader *s, struct zink_shader *zs, bool can_shrink)
}
NIR_PASS(progress, s, nir_opt_dce);
NIR_PASS(progress, s, nir_opt_dead_cf);
NIR_PASS(progress, s, nir_lower_phis_to_scalar, false);
NIR_PASS(progress, s, nir_lower_phis_to_scalar, NULL, NULL);
NIR_PASS(progress, s, nir_opt_cse);
nir_opt_peephole_select_options peephole_select_options = {
@ -3284,7 +3284,7 @@ lower_64bit_vars(nir_shader *shader, bool doubles_only)
ralloc_free(derefs);
if (progress) {
nir_lower_alu_to_scalar(shader, filter_64_bit_instr, NULL);
nir_lower_phis_to_scalar(shader, false);
nir_lower_phis_to_scalar(shader, NULL, NULL);
optimize_nir(shader, NULL, true);
}
return progress;

View file

@ -590,7 +590,7 @@ fn opt_nir(nir: &mut NirShader, dev: &Device, has_explicit_types: bool) {
nir_options.lower_to_scalar_filter,
ptr::null(),
);
nir_pass!(nir, nir_lower_phis_to_scalar, false);
nir_pass!(nir, nir_lower_phis_to_scalar, None, ptr::null());
}
progress |= nir_pass!(nir, nir_opt_deref);

View file

@ -1007,7 +1007,7 @@ brw_nir_optimize(nir_shader *nir,
LOOP_OPT(nir_copy_prop);
LOOP_OPT(nir_lower_phis_to_scalar, false);
LOOP_OPT(nir_lower_phis_to_scalar, NULL, NULL);
LOOP_OPT(nir_copy_prop);
LOOP_OPT(nir_opt_dce);

View file

@ -27,7 +27,7 @@ optimize(nir_shader *nir)
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_remove_phis);
NIR_PASS(progress, nir, nir_lower_phis_to_scalar, true);
NIR_PASS(progress, nir, nir_lower_all_phis_to_scalar);
NIR_PASS(progress, nir, nir_opt_dce);
NIR_PASS(progress, nir, nir_opt_dead_cf);
NIR_PASS(progress, nir, nir_opt_cse);

View file

@ -715,7 +715,7 @@ elk_nir_optimize(nir_shader *nir, bool is_scalar,
OPT(nir_copy_prop);
if (is_scalar) {
OPT(nir_lower_phis_to_scalar, false);
OPT(nir_lower_phis_to_scalar, NULL, NULL);
}
OPT(nir_copy_prop);

View file

@ -27,7 +27,7 @@ optimize(nir_shader *nir)
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_remove_phis);
NIR_PASS(progress, nir, nir_lower_phis_to_scalar, true);
NIR_PASS(progress, nir, nir_lower_all_phis_to_scalar);
NIR_PASS(progress, nir, nir_opt_dce);
NIR_PASS(progress, nir, nir_opt_dead_cf);
NIR_PASS(progress, nir, nir_opt_cse);

View file

@ -275,7 +275,7 @@ st_glsl_to_nir_post_opts(struct st_context *st, struct gl_program *prog,
* vectorize them afterwards again */
if (!nir->options->lower_to_scalar) {
NIR_PASS(revectorize, nir, nir_lower_alu_to_scalar, filter_64_bit_instr, nullptr);
NIR_PASS(revectorize, nir, nir_lower_phis_to_scalar, false);
NIR_PASS(revectorize, nir, nir_lower_phis_to_scalar, NULL, NULL);
}
/* doubles lowering requires frexp to be lowered first if it will be,
* since the pass generates other 64-bit ops. Most backends lower

View file

@ -6349,7 +6349,7 @@ optimize_nir(struct nir_shader *s, const struct nir_to_dxil_options *opts)
NIR_PASS(progress, s, nir_opt_deref);
NIR_PASS(progress, s, dxil_nir_lower_upcast_phis, opts->lower_int16 ? 32 : 16);
NIR_PASS(progress, s, nir_lower_64bit_phis);
NIR_PASS(progress, s, nir_lower_phis_to_scalar, true);
NIR_PASS(progress, s, nir_lower_all_phis_to_scalar);
NIR_PASS(progress, s, nir_opt_loop_unroll);
NIR_PASS(progress, s, nir_lower_pack);
NIR_PASS(progress, s, dxil_nir_remove_oob_array_accesses);

View file

@ -130,7 +130,7 @@ optimize_nir(nir_shader *nir, const struct nak_compiler *nak, bool allow_copies)
OPT(nir, nir_lower_alu_width, vectorize_filter_cb, NULL);
OPT(nir, nir_opt_vectorize, vectorize_filter_cb, NULL);
OPT(nir, nir_lower_phis_to_scalar, false);
OPT(nir, nir_lower_phis_to_scalar, NULL, NULL);
OPT(nir, nir_lower_frexp);
OPT(nir, nir_copy_prop);
OPT(nir, nir_opt_dce);

View file

@ -66,7 +66,7 @@ optimize(nir_shader *nir)
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_remove_phis);
NIR_PASS(progress, nir, nir_lower_phis_to_scalar, true);
NIR_PASS(progress, nir, nir_lower_all_phis_to_scalar);
NIR_PASS(progress, nir, nir_opt_dce);
NIR_PASS(progress, nir, nir_opt_dead_cf);
NIR_PASS(progress, nir, nir_opt_cse);

View file

@ -5985,7 +5985,7 @@ bifrost_preprocess_nir(nir_shader *nir, unsigned gpu_id)
NIR_PASS(_, nir, nir_lower_alu_to_scalar, bi_scalarize_filter, NULL);
NIR_PASS(_, nir, nir_lower_load_const_to_scalar);
NIR_PASS(_, nir, nir_lower_phis_to_scalar, true);
NIR_PASS(_, nir, nir_lower_all_phis_to_scalar);
NIR_PASS(_, nir, nir_lower_flrp, 16 | 32 | 64, false /* always_precise */);
NIR_PASS(_, nir, nir_lower_var_copies);
NIR_PASS(_, nir, nir_lower_alu);