nir: rename nir_copy_prop -> nir_opt_copy_prop

Acked-by: Alyssa Rosenzweig <alyssa.rosenzweig@intel.com>
Reviewed-by: Daniel Schürmann <daniel@schuermann.dev>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/38411>
This commit is contained in:
Marek Olšák 2025-11-12 15:40:23 -05:00 committed by Marge Bot
parent 296839f489
commit e372365cf4
70 changed files with 159 additions and 159 deletions

View file

@ -959,7 +959,7 @@ prepare_shader_for_culling(nir_shader *shader, nir_function_impl *impl,
do {
/* These can't use NIR_PASS because NIR_DEBUG=serialize,clone invalidates pointers. */
progress = nir_opt_undef(shader);
progress |= nir_copy_prop(shader);
progress |= nir_opt_copy_prop(shader);
progress |= nir_opt_dce(shader);
progress |= nir_opt_dead_cf(shader);
} while (progress);
@ -1780,7 +1780,7 @@ ac_nir_lower_ngg_nogs(nir_shader *shader, const ac_nir_lower_ngg_options *option
do {
/* These can't use NIR_PASS because NIR_DEBUG=serialize,clone invalidates pointers. */
progress = nir_opt_undef(shader);
progress |= nir_copy_prop(shader);
progress |= nir_opt_copy_prop(shader);
progress |= nir_opt_dce(shader);
progress |= nir_opt_dead_cf(shader);
} while (progress);

View file

@ -234,7 +234,7 @@ setup_nir(isel_context* ctx, nir_shader* nir)
{
nir_convert_to_lcssa(nir, true, false);
if (nir_lower_phis_to_scalar(nir, ac_nir_lower_phis_to_scalar_cb, NULL)) {
nir_copy_prop(nir);
nir_opt_copy_prop(nir);
nir_opt_dce(nir);
}

View file

@ -29,7 +29,7 @@ radv_nir_lower_io_vars_to_scalar(nir_shader *nir, nir_variable_mode mask)
NIR_PASS(progress, nir, nir_lower_io_vars_to_scalar, mask);
if (progress) {
/* Optimize the new vector code and then remove dead vars */
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_shrink_vectors, true);
if (mask & nir_var_shader_out) {

View file

@ -379,7 +379,7 @@ radv_postprocess_nir(struct radv_device *device, const struct radv_graphics_stat
progress = false;
NIR_PASS(progress, stage->nir, nir_opt_load_store_vectorize, &vectorize_opts);
if (progress) {
NIR_PASS(_, stage->nir, nir_copy_prop);
NIR_PASS(_, stage->nir, nir_opt_copy_prop);
NIR_PASS(_, stage->nir, nir_opt_shrink_stores, !instance->drirc.debug.disable_shrink_image_store);
constant_fold_for_push_const = true;
@ -569,7 +569,7 @@ radv_postprocess_nir(struct radv_device *device, const struct radv_graphics_stat
if (!stage->key.optimisations_disabled) {
NIR_PASS(_, stage->nir, nir_opt_dce);
NIR_PASS(_, stage->nir, nir_copy_prop);
NIR_PASS(_, stage->nir, nir_opt_copy_prop);
NIR_PASS(_, stage->nir, nir_opt_constant_folding);
NIR_PASS(_, stage->nir, nir_opt_cse);
NIR_PASS(_, stage->nir, nir_opt_shrink_vectors, true);
@ -640,7 +640,7 @@ radv_postprocess_nir(struct radv_device *device, const struct radv_graphics_stat
* nir_opt_vectorize from vectorzing the alu uses of them.
*/
if (run_copy_prop) {
NIR_PASS(_, stage->nir, nir_copy_prop);
NIR_PASS(_, stage->nir, nir_opt_copy_prop);
NIR_PASS(_, stage->nir, nir_opt_dce);
}
@ -660,7 +660,7 @@ radv_postprocess_nir(struct radv_device *device, const struct radv_graphics_stat
NIR_PASS(_, stage->nir, ac_nir_opt_pack_half, gfx_level);
NIR_PASS(_, stage->nir, nir_lower_load_const_to_scalar);
NIR_PASS(_, stage->nir, nir_copy_prop);
NIR_PASS(_, stage->nir, nir_opt_copy_prop);
NIR_PASS(_, stage->nir, nir_opt_dce);
if (!stage->key.optimisations_disabled) {

View file

@ -1724,7 +1724,7 @@ radv_graphics_shaders_link_varyings(struct radv_shader_stage *stages, enum amd_g
NIR_PASS(_, consumer, nir_lower_io_to_scalar, nir_var_shader_in, NULL, NULL);
/* Eliminate useless vec->mov copies resulting from scalarization. */
NIR_PASS(_, producer, nir_copy_prop);
NIR_PASS(_, producer, nir_opt_copy_prop);
NIR_PASS(_, producer, nir_opt_constant_folding);
}

View file

@ -174,7 +174,7 @@ radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively)
NIR_LOOP_PASS(progress, skip, shader, nir_shrink_vec_array_vars, nir_var_function_temp | nir_var_mem_shared);
NIR_LOOP_PASS(_, skip, shader, nir_lower_phis_to_scalar, ac_nir_lower_phis_to_scalar_cb, NULL);
NIR_LOOP_PASS(progress, skip, shader, nir_copy_prop);
NIR_LOOP_PASS(progress, skip, shader, nir_opt_copy_prop);
NIR_LOOP_PASS(progress, skip, shader, nir_opt_remove_phis);
NIR_LOOP_PASS(progress, skip, shader, nir_opt_dce);
NIR_LOOP_PASS(progress, skip, shader, nir_opt_dead_cf);
@ -182,7 +182,7 @@ radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively)
NIR_LOOP_PASS_NOT_IDEMPOTENT(opt_loop_progress, skip, shader, nir_opt_loop);
if (opt_loop_progress) {
progress = true;
NIR_LOOP_PASS(progress, skip, shader, nir_copy_prop);
NIR_LOOP_PASS(progress, skip, shader, nir_opt_copy_prop);
NIR_LOOP_PASS(progress, skip, shader, nir_opt_remove_phis);
NIR_LOOP_PASS(progress, skip, shader, nir_opt_dce);
}
@ -227,7 +227,7 @@ radv_optimize_nir_algebraic_early(nir_shader *nir)
bool more_algebraic = true;
while (more_algebraic) {
more_algebraic = false;
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_dce);
NIR_PASS(_, nir, nir_opt_constant_folding);
NIR_PASS(_, nir, nir_opt_cse);
@ -263,7 +263,7 @@ radv_optimize_nir_algebraic_late(nir_shader *nir)
more_late_algebraic = false;
NIR_LOOP_PASS_NOT_IDEMPOTENT(more_late_algebraic, skip, nir, nir_opt_algebraic_late);
NIR_LOOP_PASS(_, skip, nir, nir_opt_constant_folding);
NIR_LOOP_PASS(_, skip, nir, nir_copy_prop);
NIR_LOOP_PASS(_, skip, nir, nir_opt_copy_prop);
NIR_LOOP_PASS(_, skip, nir, nir_opt_dce);
NIR_LOOP_PASS(_, skip, nir, nir_opt_cse);
}
@ -532,7 +532,7 @@ radv_shader_spirv_to_nir(struct radv_device *device, const struct radv_shader_st
NIR_PASS(progress, nir, nir_inline_functions);
if (progress) {
NIR_PASS(_, nir, nir_opt_copy_prop_vars);
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
}
NIR_PASS(_, nir, nir_opt_deref);

View file

@ -53,7 +53,7 @@ optimize(nir_shader *nir)
NIR_PASS(progress, nir, nir_lower_var_copies);
NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_remove_phis);
NIR_PASS(progress, nir, nir_lower_all_phis_to_scalar);
NIR_PASS(progress, nir, nir_opt_dce);
@ -114,7 +114,7 @@ compile(void *memctx, const uint32_t *spirv, size_t spirv_size)
NIR_PASS(_, nir, nir_lower_returns);
NIR_PASS(_, nir, nir_inline_functions);
nir_remove_non_exported(nir);
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_deref);
/* We can't deal with constant data, get rid of it */

View file

@ -2890,7 +2890,7 @@ agx_optimize_loop_nir(nir_shader *nir)
do {
progress = false;
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_remove_phis);
NIR_PASS(progress, nir, nir_opt_dce);
NIR_PASS(progress, nir, nir_opt_dead_cf);
@ -3118,7 +3118,7 @@ agx_optimize_nir(nir_shader *nir, bool soft_fault, uint16_t *preamble_size,
/* Before optimizing bounds checks, we need to clean up and index defs so
* optimize_bounds does the right thing.
*/
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_dce);
nir_foreach_function_impl(impl, nir) {
@ -3132,7 +3132,7 @@ agx_optimize_nir(nir_shader *nir, bool soft_fault, uint16_t *preamble_size,
NIR_PASS(_, nir, agx_nir_fuse_selects);
NIR_PASS(_, nir, nir_opt_constant_folding);
NIR_PASS(_, nir, nir_opt_combine_barriers, NULL, NULL);
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_dce);
NIR_PASS(_, nir, nir_opt_cse);
NIR_PASS(_, nir, nir_lower_alu_to_scalar, NULL, NULL);
@ -3152,7 +3152,7 @@ agx_optimize_nir(nir_shader *nir, bool soft_fault, uint16_t *preamble_size,
* can't deal with dead phis.
*/
do {
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_dce);
progress = false;
} while (progress);

View file

@ -143,7 +143,7 @@ hk_preprocess_nir_internal(struct vk_physical_device *vk_pdev, nir_shader *nir)
do {
progress = false;
NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_dce);
NIR_PASS(progress, nir, nir_opt_constant_folding);
NIR_PASS(progress, nir, nir_opt_loop);
@ -867,7 +867,7 @@ hk_lower_nir(struct hk_device *dev, nir_shader *nir,
progress = false;
NIR_PASS(progress, nir, nir_opt_constant_folding);
NIR_PASS(progress, nir, nir_opt_algebraic);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_dce);
} while (progress);
@ -1505,7 +1505,7 @@ nir_opts(nir_shader *nir)
progress = false;
NIR_PASS(progress, nir, nir_opt_loop);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_remove_phis);
NIR_PASS(progress, nir, nir_opt_dce);

View file

@ -2155,7 +2155,7 @@ v3d_optimize_nir(struct v3d_compile *c, struct nir_shader *s)
NIR_PASS(progress, s, nir_lower_alu_to_scalar, NULL, NULL);
NIR_PASS(progress, s, nir_lower_phis_to_scalar, NULL, NULL);
NIR_PASS(progress, s, nir_copy_prop);
NIR_PASS(progress, s, nir_opt_copy_prop);
NIR_PASS(progress, s, nir_opt_remove_phis);
NIR_PASS(progress, s, nir_opt_dce);
NIR_PASS(progress, s, nir_opt_dead_cf);
@ -2183,7 +2183,7 @@ v3d_optimize_nir(struct v3d_compile *c, struct nir_shader *s)
if (nir_opt_loop(s)) {
progress = true;
NIR_PASS(progress, s, nir_copy_prop);
NIR_PASS(progress, s, nir_opt_copy_prop);
NIR_PASS(progress, s, nir_opt_dce);
}

View file

@ -1842,7 +1842,7 @@ v3d_attempt_compile(struct v3d_compile *c)
* this. We also want to run the lowering before v3d_optimize to
* clean-up redundant get_buffer_size calls produced in the pass.
*/
NIR_PASS(_, c->s, nir_copy_prop);
NIR_PASS(_, c->s, nir_opt_copy_prop);
NIR_PASS(_, c->s, nir_opt_constant_folding);
NIR_PASS(_, c->s, nir_lower_robust_access,
@ -1890,7 +1890,7 @@ v3d_attempt_compile(struct v3d_compile *c)
more_late_algebraic = false;
NIR_PASS(more_late_algebraic, c->s, nir_opt_algebraic_late);
NIR_PASS(_, c->s, nir_opt_constant_folding);
NIR_PASS(_, c->s, nir_copy_prop);
NIR_PASS(_, c->s, nir_opt_copy_prop);
NIR_PASS(_, c->s, nir_opt_dce);
NIR_PASS(_, c->s, nir_opt_cse);
}

View file

@ -404,7 +404,7 @@ nir_load_libclc_shader(unsigned ptr_bit_size,
NIR_PASS(progress, nir, nir_opt_copy_prop_vars);
NIR_PASS(progress, nir, nir_lower_var_copies);
NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_remove_phis);
NIR_PASS(progress, nir, nir_opt_dce);
NIR_PASS(progress, nir, nir_opt_if, false);

View file

@ -76,7 +76,7 @@ gl_nir_opts(nir_shader *nir)
NIR_PASS(_, nir, nir_lower_alu);
NIR_PASS(_, nir, nir_lower_pack);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_remove_phis);
NIR_PASS(progress, nir, nir_opt_dce);
@ -84,7 +84,7 @@ gl_nir_opts(nir_shader *nir)
NIR_PASS(opt_loop_progress, nir, nir_opt_loop);
if (opt_loop_progress) {
progress = true;
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_dce);
}
NIR_PASS(progress, nir, nir_opt_if, 0);

View file

@ -2927,7 +2927,7 @@ glsl_float64_funcs_to_nir(struct gl_context *ctx,
*/
NIR_PASS(_, nir, nir_lower_vars_to_ssa);
NIR_PASS(_, nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_dce);
NIR_PASS(_, nir, nir_opt_cse);
NIR_PASS(_, nir, nir_opt_gcm, true);

View file

@ -6294,8 +6294,8 @@ bool nir_minimize_call_live_states(nir_shader *shader);
bool nir_opt_combine_stores(nir_shader *shader, nir_variable_mode modes);
bool nir_copy_prop_impl(nir_function_impl *impl);
bool nir_copy_prop(nir_shader *shader);
bool nir_opt_copy_prop_impl(nir_function_impl *impl);
bool nir_opt_copy_prop(nir_shader *shader);
bool nir_opt_copy_prop_vars(nir_shader *shader);

View file

@ -122,7 +122,7 @@ nir_lower_scratch_to_var(nir_shader *nir)
NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
NIR_PASS(progress, nir, nir_opt_constant_folding);
NIR_PASS(progress, nir, nir_opt_algebraic);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_cse);
NIR_PASS(progress, nir, nir_opt_dce);
} while (progress);

View file

@ -167,7 +167,7 @@ copy_prop_instr(nir_instr *instr)
}
bool
nir_copy_prop_impl(nir_function_impl *impl)
nir_opt_copy_prop_impl(nir_function_impl *impl)
{
bool progress = false;
@ -181,12 +181,12 @@ nir_copy_prop_impl(nir_function_impl *impl)
}
bool
nir_copy_prop(nir_shader *shader)
nir_opt_copy_prop(nir_shader *shader)
{
bool progress = false;
nir_foreach_function_impl(impl, shader) {
if (nir_copy_prop_impl(impl))
if (nir_opt_copy_prop_impl(impl))
progress = true;
}

View file

@ -590,7 +590,7 @@ nir_opt_reassociate_loop(nir_shader *nir, nir_reassociate_options in_opts)
NIR_PASS(progress, nir, nir_opt_algebraic);
NIR_PASS(progress, nir, nir_opt_constant_folding);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_cse);
NIR_PASS(progress, nir, nir_opt_dce);
any_progress |= progress;

View file

@ -231,7 +231,7 @@ TEST_F(nir_opt_mqsad_test, mqsad)
}
ASSERT_TRUE(nir_opt_mqsad(b->shader));
nir_copy_prop(b->shader);
nir_opt_copy_prop(b->shader);
nir_opt_dce(b->shader);
require_one_alu(nir_op_mqsad_4x8);

View file

@ -152,7 +152,7 @@ nir_load_store_vectorize_test::run_vectorizer(nir_variable_mode modes,
nir_validate_shader(b->shader, NULL);
if (cse)
nir_opt_cse(b->shader);
nir_copy_prop(b->shader);
nir_opt_copy_prop(b->shader);
nir_opt_algebraic(b->shader);
nir_opt_constant_folding(b->shader);
}

View file

@ -268,11 +268,11 @@ protected:
void optimize()
{
NIR_PASS(_, b1->shader, nir_copy_prop);
NIR_PASS(_, b1->shader, nir_opt_copy_prop);
NIR_PASS(_, b1->shader, nir_opt_dce);
NIR_PASS(_, b1->shader, nir_opt_cse);
NIR_PASS(_, b2->shader, nir_copy_prop);
NIR_PASS(_, b2->shader, nir_opt_copy_prop);
NIR_PASS(_, b2->shader, nir_opt_dce);
NIR_PASS(_, b2->shader, nir_opt_cse);
}

View file

@ -1799,7 +1799,7 @@ TEST_F(nir_combine_stores_test, non_overlapping_stores)
nir_validate_shader(b->shader, NULL);
/* Clean up to verify from where the values in combined store are coming. */
nir_copy_prop(b->shader);
nir_opt_copy_prop(b->shader);
nir_opt_dce(b->shader);
ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 1);
@ -1838,7 +1838,7 @@ TEST_F(nir_combine_stores_test, overlapping_stores)
nir_validate_shader(b->shader, NULL);
/* Clean up to verify from where the values in combined store are coming. */
nir_copy_prop(b->shader);
nir_opt_copy_prop(b->shader);
nir_opt_dce(b->shader);
ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 1);
@ -1904,7 +1904,7 @@ TEST_F(nir_combine_stores_test, direct_array_derefs)
nir_validate_shader(b->shader, NULL);
/* Clean up to verify from where the values in combined store are coming. */
nir_copy_prop(b->shader);
nir_opt_copy_prop(b->shader);
nir_opt_dce(b->shader);
ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 1);

View file

@ -310,7 +310,7 @@ int main(int argc, char **argv)
OPT(nir_opt_cse);
OPT(nir_opt_dead_cf);
OPT(nir_lower_vars_to_ssa);
OPT(nir_copy_prop);
OPT(nir_opt_copy_prop);
OPT(nir_opt_deref);
OPT(nir_opt_constant_folding);
OPT(nir_opt_copy_prop_vars);

View file

@ -90,7 +90,7 @@ optimize(nir_shader *nir)
NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_remove_phis);
NIR_PASS(progress, nir, nir_lower_all_phis_to_scalar);
NIR_PASS(progress, nir, nir_opt_dce);
@ -158,7 +158,7 @@ compile(void *memctx, const uint32_t *spirv, size_t spirv_size)
NIR_PASS(_, nir, nir_lower_returns);
NIR_PASS(_, nir, nir_inline_functions);
nir_remove_non_exported(nir);
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_deref);
/* We can't deal with constant data, get rid of it */
@ -248,7 +248,7 @@ compile(void *memctx, const uint32_t *spirv, size_t spirv_size)
nir_var_function_temp | nir_var_shader_temp, NULL);
/* Do a last round of clean up after the extra lowering */
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_constant_folding);
NIR_PASS(_, nir, nir_opt_algebraic);
NIR_PASS(_, nir, nir_opt_cse);

View file

@ -788,7 +788,7 @@ vtn_function_emit(struct vtn_builder *b, struct vtn_function *func,
vtn_handle_phi_second_pass);
if (func->nir_func->impl->structured)
nir_copy_prop_impl(impl);
nir_opt_copy_prop_impl(impl);
nir_rematerialize_derefs_in_use_blocks_impl(impl);
/*

View file

@ -119,7 +119,7 @@ ir3_context_init(struct ir3_compiler *compiler, struct ir3_shader *shader,
if (vectorized) {
NIR_PASS(_, ctx->s, nir_opt_undef);
NIR_PASS(_, ctx->s, nir_copy_prop);
NIR_PASS(_, ctx->s, nir_opt_copy_prop);
NIR_PASS(_, ctx->s, nir_opt_dce);
/* nir_opt_vectorize could replace swizzled movs with vectorized movs in a

View file

@ -328,7 +328,7 @@ ir3_optimize_loop(struct ir3_compiler *compiler,
progress |= OPT(s, nir_lower_alu_to_scalar, NULL, NULL);
progress |= OPT(s, nir_lower_phis_to_scalar, NULL, NULL);
progress |= OPT(s, nir_copy_prop);
progress |= OPT(s, nir_opt_copy_prop);
progress |= OPT(s, nir_opt_deref);
progress |= OPT(s, nir_opt_dce);
progress |= OPT(s, nir_opt_cse);
@ -412,7 +412,7 @@ ir3_optimize_loop(struct ir3_compiler *compiler,
* things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
* to make progress.
*/
OPT(s, nir_copy_prop);
OPT(s, nir_opt_copy_prop);
OPT(s, nir_opt_dce);
}
progress |= OPT(s, nir_opt_if, nir_opt_if_optimize_phi_true_false);
@ -1376,7 +1376,7 @@ ir3_nir_lower_variant(struct ir3_shader_variant *so,
OPT(s, nir_opt_16bit_tex_image, &opt_16bit_options);
}
OPT(s, nir_opt_constant_folding);
OPT(s, nir_copy_prop);
OPT(s, nir_opt_copy_prop);
OPT(s, nir_opt_dce);
OPT(s, nir_opt_cse);
}

View file

@ -185,7 +185,7 @@ tu_spirv_to_nir_library(struct tu_device *dev,
NIR_PASS(_, nir, nir_lower_returns);
NIR_PASS(_, nir, nir_inline_functions);
nir_remove_non_exported(nir);
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_deref);
/* We can't deal with constant data, get rid of it */
@ -215,7 +215,7 @@ tu_spirv_to_nir_library(struct tu_device *dev,
*/
NIR_PASS(_, nir, nir_lower_vars_to_ssa);
NIR_PASS(_, nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_dce);
NIR_PASS(_, nir, nir_opt_cse);
NIR_PASS(_, nir, nir_opt_gcm, true);
@ -2999,7 +2999,7 @@ lower_io_to_scalar_early(nir_shader *nir, nir_variable_mode mask)
if (progress) {
/* Optimize the new vector code and then remove dead vars. */
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
if (mask & nir_var_shader_out) {
/* Optimize swizzled movs of load_const for nir_link_opt_varyings's

View file

@ -85,7 +85,7 @@ lp_build_opt_nir(struct nir_shader *nir)
progress = false;
NIR_PASS(progress, nir, nir_opt_algebraic_late);
if (progress) {
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_dce);
NIR_PASS(_, nir, nir_opt_cse);
}

View file

@ -6210,7 +6210,7 @@ lp_build_nir_soa_prepasses(struct nir_shader *nir)
do {
progress = false;
NIR_PASS(progress, nir, nir_lower_alu_to_scalar, NULL, NULL);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_dce);
} while (progress);

View file

@ -3314,7 +3314,7 @@ ntt_optimize_nir(struct nir_shader *s, struct pipe_screen *screen,
NIR_PASS(progress, s, nir_lower_vars_to_ssa);
NIR_PASS(progress, s, nir_split_64bit_vec3_and_vec4);
NIR_PASS(progress, s, nir_copy_prop);
NIR_PASS(progress, s, nir_opt_copy_prop);
NIR_PASS(progress, s, nir_opt_algebraic);
NIR_PASS(progress, s, nir_opt_constant_folding);
NIR_PASS(progress, s, nir_opt_remove_phis);
@ -3948,7 +3948,7 @@ const void *nir_to_tgsi_options(struct nir_shader *s,
progress = false;
NIR_PASS(progress, s, nir_opt_algebraic_late);
if (progress) {
NIR_PASS(_, s, nir_copy_prop);
NIR_PASS(_, s, nir_opt_copy_prop);
NIR_PASS(_, s, nir_opt_dce);
NIR_PASS(_, s, nir_opt_cse);
}
@ -3963,7 +3963,7 @@ const void *nir_to_tgsi_options(struct nir_shader *s,
NIR_PASS(_, s, nir_lower_bool_to_float,
!options->lower_cmp && !options->lower_fabs);
/* bool_to_float generates MOVs for b2f32 that we want to clean up. */
NIR_PASS(_, s, nir_copy_prop);
NIR_PASS(_, s, nir_opt_copy_prop);
NIR_PASS(_, s, nir_opt_dce);
}

View file

@ -2410,12 +2410,12 @@ ttn_optimize_nir(nir_shader *nir)
NIR_PASS(progress, nir, nir_lower_alu);
NIR_PASS(progress, nir, nir_lower_pack);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_remove_phis);
NIR_PASS(progress, nir, nir_opt_dce);
if (nir_opt_loop(nir)) {
progress = true;
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_dce);
}
NIR_PASS(progress, nir, nir_opt_if, nir_opt_if_optimize_phi_true_false);

View file

@ -1835,11 +1835,11 @@ agx_shader_initialize(struct agx_device *dev, struct agx_uncompiled_shader *so,
NIR_PASS(_, nir, nir_lower_load_const_to_scalar);
NIR_PASS(_, nir, agx_nir_cleanup_amul);
NIR_PASS(_, nir, nir_opt_constant_folding);
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_cse);
NIR_PASS(_, nir, nir_opt_dce);
NIR_PASS(_, nir, nir_opt_shrink_vectors, true);
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(
_, nir, nir_opt_load_store_vectorize,

View file

@ -157,7 +157,7 @@ etna_optimize_loop(nir_shader *s)
progress |= OPT(s, nir_opt_copy_prop_vars);
progress |= OPT(s, nir_opt_shrink_stores, true);
progress |= OPT(s, nir_opt_shrink_vectors, false);
progress |= OPT(s, nir_copy_prop);
progress |= OPT(s, nir_opt_copy_prop);
progress |= OPT(s, nir_opt_dce);
progress |= OPT(s, nir_opt_cse);
@ -177,7 +177,7 @@ etna_optimize_loop(nir_shader *s)
* things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
* to make progress.
*/
OPT(s, nir_copy_prop);
OPT(s, nir_opt_copy_prop);
OPT(s, nir_opt_dce);
}
progress |= OPT(s, nir_opt_loop_unroll);
@ -1318,7 +1318,7 @@ etna_compile_shader(struct etna_shader_variant *v)
NIR_PASS(_, s, nir_opt_algebraic_late);
NIR_PASS(_, s, nir_move_vec_src_uses_to_dest, false);
NIR_PASS(_, s, nir_copy_prop);
NIR_PASS(_, s, nir_opt_copy_prop);
/* need copy prop after uses_to_dest, and before src mods: see
* dEQP-GLES2.functional.shaders.random.all_features.fragment.95
*/

View file

@ -60,7 +60,7 @@ ir2_optimize_loop(nir_shader *s)
OPT_V(s, nir_lower_vars_to_ssa);
progress |= OPT(s, nir_opt_copy_prop_vars);
progress |= OPT(s, nir_copy_prop);
progress |= OPT(s, nir_opt_copy_prop);
progress |= OPT(s, nir_opt_dce);
progress |= OPT(s, nir_opt_cse);
/* progress |= OPT(s, nir_opt_gcm, true); */
@ -80,7 +80,7 @@ ir2_optimize_loop(nir_shader *s)
* things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
* to make progress.
*/
OPT(s, nir_copy_prop);
OPT(s, nir_opt_copy_prop);
OPT(s, nir_opt_dce);
}
progress |= OPT(s, nir_opt_loop_unroll);
@ -1124,7 +1124,7 @@ ir2_nir_compile(struct ir2_context *ctx, bool binning)
if (binning)
cleanup_binning(ctx);
OPT_V(ctx->nir, nir_copy_prop);
OPT_V(ctx->nir, nir_opt_copy_prop);
OPT_V(ctx->nir, nir_opt_dce);
OPT_V(ctx->nir, nir_opt_move, nir_move_comparisons);

View file

@ -183,7 +183,7 @@ i915_optimize_nir(struct nir_shader *s)
progress = false;
NIR_PASS(progress, s, nir_lower_vars_to_ssa);
NIR_PASS(progress, s, nir_copy_prop);
NIR_PASS(progress, s, nir_opt_copy_prop);
NIR_PASS(progress, s, nir_opt_algebraic);
NIR_PASS(progress, s, nir_opt_constant_folding);
NIR_PASS(progress, s, nir_opt_remove_phis);

View file

@ -419,7 +419,7 @@ iris_ensure_indirect_generation_shader(struct iris_batch *batch)
nir->info.shared_size = 0;
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_constant_folding);
NIR_PASS(_, nir, nir_opt_dce);

View file

@ -129,7 +129,7 @@ lima_program_optimize_vs_nir(struct nir_shader *s)
NIR_PASS(_, s, nir_lower_vars_to_ssa);
NIR_PASS(progress, s, nir_lower_alu_to_scalar, NULL, NULL);
NIR_PASS(progress, s, nir_lower_phis_to_scalar, NULL, NULL);
NIR_PASS(progress, s, nir_copy_prop);
NIR_PASS(progress, s, nir_opt_copy_prop);
NIR_PASS(progress, s, nir_opt_remove_phis);
NIR_PASS(progress, s, nir_opt_dce);
NIR_PASS(progress, s, nir_opt_dead_cf);
@ -155,7 +155,7 @@ lima_program_optimize_vs_nir(struct nir_shader *s)
NIR_PASS(progress, s, lima_nir_lower_ftrunc);
NIR_PASS(_, s, nir_lower_bool_to_float, true);
NIR_PASS(_, s, nir_copy_prop);
NIR_PASS(_, s, nir_opt_copy_prop);
NIR_PASS(_, s, nir_opt_dce);
NIR_PASS(_, s, lima_nir_split_loads);
NIR_PASS(_, s, nir_convert_from_ssa, true, false);
@ -245,7 +245,7 @@ lima_program_optimize_fs_nir(struct nir_shader *s,
NIR_PASS(_, s, nir_lower_vars_to_ssa);
NIR_PASS(progress, s, nir_lower_alu_to_scalar, lima_alu_to_scalar_filter_cb, NULL);
NIR_PASS(progress, s, nir_copy_prop);
NIR_PASS(progress, s, nir_opt_copy_prop);
NIR_PASS(progress, s, nir_opt_remove_phis);
NIR_PASS(progress, s, nir_opt_dce);
NIR_PASS(progress, s, nir_opt_dead_cf);
@ -280,7 +280,7 @@ lima_program_optimize_fs_nir(struct nir_shader *s,
NIR_PASS(_, s, nir_opt_algebraic_late);
NIR_PASS(_, s, lima_nir_ppir_algebraic_late);
NIR_PASS(_, s, nir_copy_prop);
NIR_PASS(_, s, nir_opt_copy_prop);
NIR_PASS(_, s, nir_opt_dce);
NIR_PASS(_, s, nir_convert_from_ssa, true, false);

View file

@ -3373,13 +3373,13 @@ Converter::runOptLoop()
bool progress;
do {
progress = false;
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_remove_phis);
NIR_PASS(progress, nir, nir_opt_loop);
NIR_PASS(progress, nir, nir_opt_cse);
NIR_PASS(progress, nir, nir_opt_algebraic);
NIR_PASS(progress, nir, nir_opt_constant_folding);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_dce);
NIR_PASS(progress, nir, nir_opt_dead_cf);
NIR_PASS(progress, nir, nir_lower_64bit_phis);

View file

@ -246,7 +246,7 @@ panfrost_nir_lower_sysvals(nir_shader *shader, unsigned arch,
do {
progress = false;
NIR_PASS(progress, shader, nir_copy_prop);
NIR_PASS(progress, shader, nir_opt_copy_prop);
NIR_PASS(progress, shader, nir_opt_constant_folding);
NIR_PASS(progress, shader, nir_opt_dce);
} while (progress);

View file

@ -2031,7 +2031,7 @@ nir_to_rc(struct nir_shader *s, struct pipe_screen *screen,
progress = false;
NIR_PASS(progress, s, nir_opt_algebraic_late);
if (progress) {
NIR_PASS(_, s, nir_copy_prop);
NIR_PASS(_, s, nir_opt_copy_prop);
NIR_PASS(_, s, nir_opt_dce);
NIR_PASS(_, s, nir_opt_cse);
}
@ -2042,12 +2042,12 @@ nir_to_rc(struct nir_shader *s, struct pipe_screen *screen,
}
NIR_PASS(_, s, nir_lower_int_to_float);
NIR_PASS(_, s, nir_copy_prop);
NIR_PASS(_, s, nir_opt_copy_prop);
NIR_PASS(_, s, r300_nir_post_integer_lowering);
NIR_PASS(_, s, nir_lower_bool_to_float,
is_r500 || s->info.stage == MESA_SHADER_FRAGMENT);
/* bool_to_float generates MOVs for b2f32 that we want to clean up. */
NIR_PASS(_, s, nir_copy_prop);
NIR_PASS(_, s, nir_opt_copy_prop);
/* CSE cleanup after late ftrunc lowering. */
NIR_PASS(_, s, nir_opt_cse);
/* At this point we need to clean;

View file

@ -168,7 +168,7 @@ r300_optimize_nir(struct nir_shader *s, struct r300_screen *screen)
progress = false;
NIR_PASS(_, s, nir_lower_vars_to_ssa);
NIR_PASS(progress, s, nir_copy_prop);
NIR_PASS(progress, s, nir_opt_copy_prop);
NIR_PASS(progress, s, r300_nir_lower_flrp);
NIR_PASS(progress, s, nir_opt_algebraic);
if (s->info.stage == MESA_SHADER_VERTEX) {

View file

@ -619,7 +619,7 @@ optimize_once(nir_shader *shader)
bool progress = false;
NIR_PASS(progress, shader, nir_lower_alu_to_scalar, r600_lower_to_scalar_instr_filter, NULL);
NIR_PASS(progress, shader, nir_lower_vars_to_ssa);
NIR_PASS(progress, shader, nir_copy_prop);
NIR_PASS(progress, shader, nir_opt_copy_prop);
NIR_PASS(progress, shader, nir_opt_dce);
NIR_PASS(progress, shader, nir_opt_algebraic);
if (shader->options->has_bitfield_select)
@ -630,7 +630,7 @@ optimize_once(nir_shader *shader)
if (nir_opt_loop(shader)) {
progress = true;
NIR_PASS(progress, shader, nir_copy_prop);
NIR_PASS(progress, shader, nir_opt_copy_prop);
NIR_PASS(progress, shader, nir_opt_dce);
}
@ -804,7 +804,7 @@ r600_lower_and_optimize_nir(nir_shader *sh,
NIR_PASS(_, sh, nir_lower_alu_to_scalar, r600_lower_to_scalar_instr_filter, NULL);
NIR_PASS(_, sh, nir_lower_phis_to_scalar, NULL, NULL);
NIR_PASS(_, sh, nir_lower_alu_to_scalar, r600_lower_to_scalar_instr_filter, NULL);
NIR_PASS(_, sh, nir_copy_prop);
NIR_PASS(_, sh, nir_opt_copy_prop);
NIR_PASS(_, sh, nir_opt_dce);
if (r600_is_last_vertex_stage(sh, *key))
@ -885,7 +885,7 @@ r600_lower_and_optimize_nir(nir_shader *sh,
NIR_PASS(late_algebraic_progress, sh, r600_sfn_lower_alu);
NIR_PASS(late_algebraic_progress, sh, nir_opt_algebraic_late);
NIR_PASS(late_algebraic_progress, sh, nir_opt_constant_folding);
NIR_PASS(late_algebraic_progress, sh, nir_copy_prop);
NIR_PASS(late_algebraic_progress, sh, nir_opt_copy_prop);
NIR_PASS(late_algebraic_progress, sh, nir_opt_dce);
NIR_PASS(late_algebraic_progress, sh, nir_opt_cse);
} while (late_algebraic_progress);

View file

@ -180,7 +180,7 @@ si_nir_is_output_const_if_tex_is_const(nir_shader *shader, float *in, float *out
bool progress;
do {
progress = false;
NIR_PASS(progress, shader, nir_copy_prop);
NIR_PASS(progress, shader, nir_opt_copy_prop);
NIR_PASS(progress, shader, nir_opt_remove_phis);
NIR_PASS(progress, shader, nir_opt_dce);
NIR_PASS(progress, shader, nir_opt_dead_cf);

View file

@ -65,7 +65,7 @@ void si_nir_opts(struct si_screen *sscreen, struct nir_shader *nir, bool has_arr
NIR_PASS(lower_alu_to_scalar, nir, nir_opt_loop);
/* (Constant) copy propagation is needed for txf with offsets. */
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_remove_phis);
NIR_PASS(progress, nir, nir_opt_dce);
/* nir_opt_if_optimize_phi_true_false is disabled on LLVM14 (#6976) */
@ -144,7 +144,7 @@ void si_nir_late_opts(nir_shader *nir)
nir->options->support_indirect_outputs & BITFIELD_BIT(nir->info.stage))
NIR_PASS(_, nir, nir_io_add_const_offset_to_base, nir_var_shader_in | nir_var_shader_out);
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_dce);
NIR_PASS(_, nir, nir_opt_cse);
}

View file

@ -1497,7 +1497,7 @@ vc4_optimize_nir(struct nir_shader *s)
NIR_PASS(_, s, nir_lower_vars_to_ssa);
NIR_PASS(progress, s, nir_lower_alu_to_scalar, NULL, NULL);
NIR_PASS(progress, s, nir_lower_phis_to_scalar, NULL, NULL);
NIR_PASS(progress, s, nir_copy_prop);
NIR_PASS(progress, s, nir_opt_copy_prop);
NIR_PASS(progress, s, nir_opt_remove_phis);
NIR_PASS(progress, s, nir_opt_dce);
NIR_PASS(progress, s, nir_opt_dead_cf);
@ -2305,7 +2305,7 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage,
more_late_algebraic = false;
NIR_PASS(more_late_algebraic, c->s, nir_opt_algebraic_late);
NIR_PASS(_, c->s, nir_opt_constant_folding);
NIR_PASS(_, c->s, nir_copy_prop);
NIR_PASS(_, c->s, nir_opt_copy_prop);
NIR_PASS(_, c->s, nir_opt_dce);
NIR_PASS(_, c->s, nir_opt_cse);
}

View file

@ -1548,7 +1548,7 @@ optimize_nir(struct nir_shader *s, struct zink_shader *zs, bool can_shrink)
NIR_PASS(_, s, nir_lower_vars_to_ssa);
NIR_PASS(progress, s, nir_lower_alu_to_scalar, filter_pack_instr, NULL);
NIR_PASS(progress, s, nir_opt_copy_prop_vars);
NIR_PASS(progress, s, nir_copy_prop);
NIR_PASS(progress, s, nir_opt_copy_prop);
NIR_PASS(progress, s, nir_opt_remove_phis);
if (s->options->lower_int64_options) {
NIR_PASS(progress, s, nir_lower_64bit_phis);
@ -1579,7 +1579,7 @@ optimize_nir(struct nir_shader *s, struct zink_shader *zs, bool can_shrink)
progress = false;
NIR_PASS(progress, s, nir_opt_algebraic_late);
if (progress) {
NIR_PASS(_, s, nir_copy_prop);
NIR_PASS(_, s, nir_opt_copy_prop);
NIR_PASS(_, s, nir_opt_dce);
NIR_PASS(_, s, nir_opt_cse);
}

View file

@ -209,7 +209,7 @@ optimize(nir_shader *nir)
NIR_PASS(progress, nir, nir_opt_copy_prop_vars);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_dce);
nir_opt_peephole_select_options peephole_select_options = {
@ -231,7 +231,7 @@ optimize(nir_shader *nir)
* things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
* to make progress.
*/
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_dce);
NIR_PASS(progress, nir, nir_opt_remove_phis);
}

View file

@ -619,7 +619,7 @@ fn opt_nir(nir: &mut NirShader, dev: &Device, has_explicit_types: bool) {
while {
let mut progress = false;
progress |= nir_pass!(nir, nir_copy_prop);
progress |= nir_pass!(nir, nir_opt_copy_prop);
progress |= nir_pass!(nir, nir_opt_copy_prop_vars);
progress |= nir_pass!(nir, nir_opt_dead_write_vars);
@ -717,7 +717,7 @@ fn compile_nir_to_args(
while {
let mut progress = false;
nir_pass!(nir, nir_split_var_copies);
progress |= nir_pass!(nir, nir_copy_prop);
progress |= nir_pass!(nir, nir_opt_copy_prop);
progress |= nir_pass!(nir, nir_opt_copy_prop_vars);
progress |= nir_pass!(nir, nir_opt_dead_write_vars);
progress |= nir_pass!(nir, nir_opt_deref);

View file

@ -471,7 +471,7 @@ static void pco_nir_opt(pco_ctx *ctx, nir_shader *nir, bool algebraic)
NIR_PASS(progress, nir, nir_opt_copy_prop_vars);
NIR_PASS(progress, nir, nir_opt_dead_write_vars);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_remove_phis);
NIR_PASS(progress, nir, nir_opt_dce);
NIR_PASS(progress, nir, nir_opt_dead_cf);
@ -882,7 +882,7 @@ void pco_lower_nir(pco_ctx *ctx, nir_shader *nir, pco_data *data)
? nir_var_shader_out
: nir_var_shader_in;
NIR_PASS(_, nir, nir_lower_io_to_scalar, vec_modes, NULL, NULL);
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_dce);
NIR_PASS(_, nir, nir_opt_cse);
NIR_PASS(_, nir, nir_opt_vectorize_io, vec_modes, false);
@ -1092,7 +1092,7 @@ void pco_postprocess_nir(pco_ctx *ctx, nir_shader *nir, pco_data *data)
NIR_PASS(_, nir, pco_nir_lower_algebraic_late);
NIR_PASS(_, nir, nir_opt_constant_folding);
NIR_PASS(_, nir, nir_lower_load_const_to_scalar);
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_dce);
NIR_PASS(_, nir, nir_opt_cse);
} while (progress);
@ -1110,7 +1110,7 @@ void pco_postprocess_nir(pco_ctx *ctx, nir_shader *nir, pco_data *data)
NULL);
NIR_PASS(_, nir, nir_convert_from_ssa, true, false);
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_move_vec_src_uses_to_dest, false);
NIR_PASS(_, nir, nir_opt_dce);

View file

@ -125,7 +125,7 @@ static void optimize(nir_shader *nir)
NIR_PASS(progress, nir, nir_lower_var_copies);
NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_remove_phis);
NIR_PASS(progress, nir, nir_lower_all_phis_to_scalar);
NIR_PASS(progress, nir, nir_opt_dce);
@ -192,7 +192,7 @@ spv_to_nir(void *mem_ctx, uint32_t *spirv_map, unsigned spirv_len)
NIR_PASS(_, nir, nir_lower_returns);
NIR_PASS(_, nir, nir_inline_functions);
nir_remove_non_exported(nir);
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_deref);
/* We can't deal with constant data, get rid of it */

View file

@ -1067,11 +1067,11 @@ brw_nir_optimize(nir_shader *nir,
LOOP_OPT(nir_lower_alu_to_scalar, NULL, NULL);
LOOP_OPT(nir_copy_prop);
LOOP_OPT(nir_opt_copy_prop);
LOOP_OPT(nir_lower_phis_to_scalar, NULL, NULL);
LOOP_OPT(nir_copy_prop);
LOOP_OPT(nir_opt_copy_prop);
LOOP_OPT(nir_opt_dce);
LOOP_OPT(nir_opt_cse);
LOOP_OPT(nir_opt_combine_stores, nir_var_all);
@ -1126,7 +1126,7 @@ brw_nir_optimize(nir_shader *nir,
* things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
* to make progress.
*/
LOOP_OPT(nir_copy_prop);
LOOP_OPT(nir_opt_copy_prop);
LOOP_OPT(nir_opt_dce);
}
LOOP_OPT_NOT_IDEMPOTENT(nir_opt_if, nir_opt_if_optimize_phi_true_false);
@ -1961,11 +1961,11 @@ brw_vectorize_lower_mem_access(nir_shader *nir,
OPT(nir_opt_load_store_vectorize, &options);
OPT(nir_opt_constant_folding);
OPT(nir_copy_prop);
OPT(nir_opt_copy_prop);
if (OPT(brw_nir_rebase_const_offset_ubo_loads)) {
OPT(nir_opt_cse);
OPT(nir_copy_prop);
OPT(nir_opt_copy_prop);
nir_load_store_vectorize_options ubo_options = {
.modes = nir_var_mem_ubo,
@ -1998,7 +1998,7 @@ brw_vectorize_lower_mem_access(nir_shader *nir,
progress = false;
OPT(nir_lower_pack);
OPT(nir_copy_prop);
OPT(nir_opt_copy_prop);
OPT(nir_opt_dce);
OPT(nir_opt_cse);
OPT(nir_opt_algebraic);
@ -2311,7 +2311,7 @@ brw_postprocess_nir_opts(nir_shader *nir, const struct brw_compiler *compiler,
OPT(intel_nir_opt_peephole_imul32x16);
if (OPT(nir_opt_comparison_pre)) {
OPT(nir_copy_prop);
OPT(nir_opt_copy_prop);
OPT(nir_opt_dce);
OPT(nir_opt_cse);
@ -2339,7 +2339,7 @@ brw_postprocess_nir_opts(nir_shader *nir, const struct brw_compiler *compiler,
if (progress) {
OPT(nir_opt_constant_folding);
OPT(nir_copy_prop);
OPT(nir_opt_copy_prop);
OPT(nir_opt_dce);
OPT(nir_opt_cse);
}
@ -2352,12 +2352,12 @@ brw_postprocess_nir_opts(nir_shader *nir, const struct brw_compiler *compiler,
while (OPT(nir_opt_algebraic_distribute_src_mods)) {
OPT(nir_opt_constant_folding);
OPT(nir_copy_prop);
OPT(nir_opt_copy_prop);
OPT(nir_opt_dce);
OPT(nir_opt_cse);
}
OPT(nir_copy_prop);
OPT(nir_opt_copy_prop);
OPT(nir_opt_dce);
nir_move_options move_all = nir_move_const_undef | nir_move_load_ubo |
@ -2454,7 +2454,7 @@ brw_postprocess_nir_out_of_ssa(nir_shader *nir,
}
OPT(nir_lower_bool_to_int32);
OPT(nir_copy_prop);
OPT(nir_opt_copy_prop);
OPT(nir_opt_dce);
OPT(nir_lower_locals_to_regs, 32);

View file

@ -707,13 +707,13 @@ elk_nir_optimize(nir_shader *nir, bool is_scalar,
OPT(nir_opt_shrink_vectors, false);
}
OPT(nir_copy_prop);
OPT(nir_opt_copy_prop);
if (is_scalar) {
OPT(nir_lower_phis_to_scalar, NULL, NULL);
}
OPT(nir_copy_prop);
OPT(nir_opt_copy_prop);
OPT(nir_opt_dce);
OPT(nir_opt_cse);
OPT(nir_opt_combine_stores, nir_var_all);
@ -777,7 +777,7 @@ elk_nir_optimize(nir_shader *nir, bool is_scalar,
* things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
* to make progress.
*/
OPT(nir_copy_prop);
OPT(nir_opt_copy_prop);
OPT(nir_opt_dce);
}
OPT(nir_opt_if, nir_opt_if_optimize_phi_true_false);
@ -1443,7 +1443,7 @@ elk_vectorize_lower_mem_access(nir_shader *nir,
progress = false;
OPT(nir_lower_pack);
OPT(nir_copy_prop);
OPT(nir_opt_copy_prop);
OPT(nir_opt_dce);
OPT(nir_opt_cse);
OPT(nir_opt_algebraic);
@ -1525,7 +1525,7 @@ elk_postprocess_nir(nir_shader *nir, const struct elk_compiler *compiler,
OPT(intel_nir_opt_peephole_imul32x16);
if (OPT(nir_opt_comparison_pre)) {
OPT(nir_copy_prop);
OPT(nir_opt_copy_prop);
OPT(nir_opt_dce);
OPT(nir_opt_cse);
@ -1561,7 +1561,7 @@ elk_postprocess_nir(nir_shader *nir, const struct elk_compiler *compiler,
if (is_scalar)
OPT(nir_opt_constant_folding);
OPT(nir_copy_prop);
OPT(nir_opt_copy_prop);
OPT(nir_opt_dce);
OPT(nir_opt_cse);
}
@ -1586,12 +1586,12 @@ elk_postprocess_nir(nir_shader *nir, const struct elk_compiler *compiler,
if (is_scalar)
OPT(nir_opt_constant_folding);
OPT(nir_copy_prop);
OPT(nir_opt_copy_prop);
OPT(nir_opt_dce);
OPT(nir_opt_cse);
}
OPT(nir_copy_prop);
OPT(nir_opt_copy_prop);
OPT(nir_opt_dce);
nir_move_options common = nir_move_const_undef | nir_move_load_input |
@ -1623,7 +1623,7 @@ elk_postprocess_nir(nir_shader *nir, const struct elk_compiler *compiler,
}
OPT(nir_lower_bool_to_int32);
OPT(nir_copy_prop);
OPT(nir_opt_copy_prop);
OPT(nir_opt_dce);
OPT(nir_lower_locals_to_regs, 32);

View file

@ -125,7 +125,7 @@ compile_shader(struct anv_device *device,
nir->info.shared_size = 0;
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_constant_folding);
NIR_PASS(_, nir, nir_opt_dce);

View file

@ -1407,7 +1407,7 @@ anv_shader_lower_nir(struct anv_device *device,
do {
progress = false;
NIR_PASS(progress, nir, nir_opt_algebraic);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_constant_folding);
NIR_PASS(progress, nir, nir_opt_dce);
} while (progress);

View file

@ -521,7 +521,7 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
* calculations often create and then constant-fold so that, when we
* get to anv_nir_lower_ubo_loads, we can detect constant offsets.
*/
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_constant_folding);
NIR_PASS(_, nir, anv_nir_lower_ubo_loads);

View file

@ -1897,7 +1897,7 @@ msl_optimize_nir(struct nir_shader *nir)
NIR_PASS(progress, nir, nir_opt_dce);
NIR_PASS(progress, nir, nir_opt_cse);
NIR_PASS(progress, nir, nir_opt_dead_cf);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_deref);
NIR_PASS(progress, nir, nir_opt_constant_folding);
NIR_PASS(progress, nir, nir_opt_copy_prop_vars);
@ -1917,7 +1917,7 @@ msl_optimize_nir(struct nir_shader *nir)
NIR_PASS(_, nir, msl_nir_lower_algebraic_late);
NIR_PASS(_, nir, nir_convert_from_ssa, true, false);
nir_trivialize_registers(nir);
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
return progress;
}

View file

@ -715,7 +715,7 @@ nir_opts(nir_shader *nir)
progress = false;
NIR_PASS(progress, nir, nir_opt_loop);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_remove_phis);
NIR_PASS(progress, nir, nir_opt_dce);

View file

@ -316,7 +316,7 @@ _mesa_spirv_to_nir(struct gl_context *ctx,
NIR_PASS(_, nir, nir_lower_variable_initializers, nir_var_function_temp);
NIR_PASS(_, nir, nir_lower_returns);
NIR_PASS(_, nir, nir_inline_functions);
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_deref);
/* Pick off the single entrypoint that we want */

View file

@ -820,7 +820,7 @@ clc_spirv_to_dxil(struct clc_libclc *lib,
do
{
progress = false;
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop_vars);
NIR_PASS(progress, nir, nir_opt_deref);
NIR_PASS(progress, nir, nir_opt_dce);
@ -849,7 +849,7 @@ clc_spirv_to_dxil(struct clc_libclc *lib,
do
{
progress = false;
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop_vars);
NIR_PASS(progress, nir, nir_opt_deref);
NIR_PASS(progress, nir, nir_opt_dce);
@ -953,7 +953,7 @@ clc_spirv_to_dxil(struct clc_libclc *lib,
do {
progress = false;
NIR_PASS(progress, nir, nir_opt_memcpy);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop_vars);
NIR_PASS(progress, nir, nir_opt_deref);
NIR_PASS(progress, nir, nir_opt_dce);

View file

@ -602,7 +602,7 @@ dxil_nir_lower_shared_to_var(nir_shader *nir)
NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
NIR_PASS(progress, nir, nir_opt_constant_folding);
NIR_PASS(progress, nir, nir_opt_algebraic);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_cse);
NIR_PASS(progress, nir, nir_opt_dce);
} while (progress);

View file

@ -6316,7 +6316,7 @@ optimize_nir(struct nir_shader *s, const struct nir_to_dxil_options *opts)
NIR_PASS(progress, s, nir_lower_vars_to_ssa);
NIR_PASS(progress, s, nir_lower_indirect_derefs, nir_var_function_temp, 4);
NIR_PASS(progress, s, nir_lower_alu_to_scalar, NULL, NULL);
NIR_PASS(progress, s, nir_copy_prop);
NIR_PASS(progress, s, nir_opt_copy_prop);
NIR_PASS(progress, s, nir_opt_copy_prop_vars);
NIR_PASS(progress, s, nir_lower_bit_size, lower_bit_size_callback, (void*)opts);
NIR_PASS(progress, s, dxil_nir_lower_8bit_conv);

View file

@ -119,7 +119,7 @@ dxil_spirv_nir_prep(nir_shader *nir)
NIR_PASS(_, nir, nir_lower_variable_initializers, nir_var_function_temp);
NIR_PASS(_, nir, nir_lower_returns);
NIR_PASS(_, nir, nir_inline_functions);
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_deref);
/* Pick off the single entrypoint that we want */
@ -1064,7 +1064,7 @@ dxil_spirv_nir_passes(nir_shader *nir,
do
{
progress = false;
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop_vars);
NIR_PASS(progress, nir, nir_opt_deref);
NIR_PASS(progress, nir, nir_opt_dce);
@ -1073,7 +1073,7 @@ dxil_spirv_nir_passes(nir_shader *nir,
NIR_PASS(progress, nir, nir_opt_cse);
if (nir_opt_loop(nir)) {
progress = true;
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_dce);
}
NIR_PASS(progress, nir, nir_lower_vars_to_ssa);

View file

@ -172,7 +172,7 @@ optimize_nir(nir_shader *nir, const struct nak_compiler *nak, bool allow_copies)
OPT(nir, nir_opt_vectorize, vectorize_filter_cb, NULL);
OPT(nir, nir_lower_phis_to_scalar, phi_vectorize_cb, NULL);
OPT(nir, nir_lower_frexp);
OPT(nir, nir_copy_prop);
OPT(nir, nir_opt_copy_prop);
OPT(nir, nir_opt_dce);
OPT(nir, nir_opt_cse);
@ -199,7 +199,7 @@ optimize_nir(nir_shader *nir, const struct nak_compiler *nak, bool allow_copies)
* if we want any hope of nir_opt_if or nir_opt_loop_unroll to make
* progress.
*/
OPT(nir, nir_copy_prop);
OPT(nir, nir_opt_copy_prop);
OPT(nir, nir_opt_dce);
}
OPT(nir, nir_opt_if, nir_opt_if_optimize_phi_true_false);
@ -1120,7 +1120,7 @@ nak_postprocess_nir(nir_shader *nir,
if (progress) {
OPT(nir, nir_opt_constant_folding);
OPT(nir, nir_copy_prop);
OPT(nir, nir_opt_copy_prop);
OPT(nir, nir_opt_dce);
OPT(nir, nir_opt_cse);
}
@ -1150,7 +1150,7 @@ nak_postprocess_nir(nir_shader *nir,
if (nak->sm >= 73) {
OPT(nir, nak_nir_mark_lcssa_invariants);
if (OPT(nir, nak_nir_lower_non_uniform_ldcx, nak)) {
OPT(nir, nir_copy_prop);
OPT(nir, nir_opt_copy_prop);
OPT(nir, nir_opt_dce);
}
}

View file

@ -64,7 +64,7 @@ optimize(nir_shader *nir)
NIR_PASS(progress, nir, nir_lower_var_copies);
NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_remove_phis);
NIR_PASS(progress, nir, nir_lower_all_phis_to_scalar);
NIR_PASS(progress, nir, nir_opt_dce);
@ -131,7 +131,7 @@ compile(void *memctx, const uint32_t *spirv, size_t spirv_size, unsigned arch)
NIR_PASS(_, nir, nir_lower_returns);
NIR_PASS(_, nir, nir_inline_functions);
nir_remove_non_exported(nir);
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_deref);
/* We can't deal with constant data, get rid of it */

View file

@ -5613,7 +5613,7 @@ bi_optimize_loop_nir(nir_shader *nir, unsigned gpu_id, bool allow_copies)
NIR_PASS(progress, nir, nir_lower_alu_width, bi_vectorize_filter, &gpu_id);
NIR_PASS(progress, nir, nir_opt_vectorize, bi_vectorize_filter, &gpu_id);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_dce);
NIR_PASS(progress, nir, nir_opt_cse);
@ -5638,7 +5638,7 @@ bi_optimize_loop_nir(nir_shader *nir, unsigned gpu_id, bool allow_copies)
* if we want any hope of nir_opt_if or nir_opt_loop_unroll to make
* progress.
*/
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_dce);
}
@ -5709,7 +5709,7 @@ bi_optimize_nir(nir_shader *nir, unsigned gpu_id, nir_variable_mode robust2_mode
late_algebraic = false;
NIR_PASS(late_algebraic, nir, nir_opt_algebraic_late);
NIR_PASS(_, nir, nir_opt_constant_folding);
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_dce);
NIR_PASS(_, nir, nir_opt_cse);
}
@ -5735,7 +5735,7 @@ bi_optimize_nir(nir_shader *nir, unsigned gpu_id, nir_variable_mode robust2_mode
late_algebraic = false;
NIR_PASS(late_algebraic, nir, nir_opt_algebraic_late);
NIR_PASS(_, nir, nir_opt_constant_folding);
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_dce);
NIR_PASS(_, nir, nir_opt_cse);
}
@ -6849,7 +6849,7 @@ bifrost_compile_shader_nir(nir_shader *nir,
/* If shader output lower made progress, ensure to merge adjacent if that were added */
if (shader_output_pass) {
/* First we clean up and deduplicate added condition logic */
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_dce);
NIR_PASS(_, nir, nir_opt_cse);

View file

@ -475,7 +475,7 @@ optimise_nir(nir_shader *nir, unsigned quirks, bool is_blend)
NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_remove_phis);
NIR_PASS(progress, nir, nir_opt_dce);
NIR_PASS(progress, nir, nir_opt_dead_cf);
@ -509,7 +509,7 @@ optimise_nir(nir_shader *nir, unsigned quirks, bool is_blend)
NIR_PASS(progress, nir, nir_opt_dce);
NIR_PASS(progress, nir, nir_opt_algebraic);
NIR_PASS(progress, nir, nir_opt_constant_folding);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
} while (progress);
NIR_PASS(progress, nir, nir_opt_algebraic_late);
@ -529,7 +529,7 @@ optimise_nir(nir_shader *nir, unsigned quirks, bool is_blend)
NIR_PASS(progress, nir, nir_opt_dce);
NIR_PASS(progress, nir, nir_opt_constant_folding);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
} while (progress);
/* Backend scheduler is purely local, so do some global optimizations

View file

@ -673,7 +673,7 @@ lower_load_push_consts(nir_shader *nir, struct panvk_shader_variant *shader)
bool progress = false;
do {
progress = false;
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_copy_prop);
NIR_PASS(progress, nir, nir_opt_remove_phis);
NIR_PASS(progress, nir, nir_opt_dce);
NIR_PASS(progress, nir, nir_opt_dead_cf);

View file

@ -1212,7 +1212,7 @@ poly_nir_lower_gs(nir_shader *gs, nir_shader **gs_count, nir_shader **gs_copy,
NIR_PASS(progress, gs, nir_lower_variable_initializers,
nir_var_shader_temp);
NIR_PASS(progress, gs, nir_lower_vars_to_ssa);
NIR_PASS(progress, gs, nir_copy_prop);
NIR_PASS(progress, gs, nir_opt_copy_prop);
NIR_PASS(progress, gs, nir_opt_constant_folding);
NIR_PASS(progress, gs, nir_opt_algebraic);
NIR_PASS(progress, gs, nir_opt_cse);
@ -1307,7 +1307,7 @@ poly_nir_lower_gs(nir_shader *gs, nir_shader **gs_count, nir_shader **gs_copy,
NIR_PASS(progress, gs, nir_lower_variable_initializers,
nir_var_shader_temp);
NIR_PASS(progress, gs, nir_lower_vars_to_ssa);
NIR_PASS(progress, gs, nir_copy_prop);
NIR_PASS(progress, gs, nir_opt_copy_prop);
NIR_PASS(progress, gs, nir_opt_constant_folding);
NIR_PASS(progress, gs, nir_opt_algebraic);
NIR_PASS(progress, gs, nir_opt_cse);

View file

@ -163,7 +163,7 @@ vk_spirv_to_nir(struct vk_device *device,
NIR_PASS(_, nir, nir_lower_variable_initializers, nir_var_function_temp);
NIR_PASS(_, nir, nir_lower_returns);
NIR_PASS(_, nir, nir_inline_functions);
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_copy_prop);
NIR_PASS(_, nir, nir_opt_constant_folding);
NIR_PASS(_, nir, nir_opt_deref);