lavapipe: use NIR_PASS(_, ...) instead of NIR_PASS_V

Reviewed-by: Emma Anholt <emma@anholt.net>
Reviewed-by: Konstantin Seurer <konstantin.seurer@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/36291>
This commit is contained in:
Georg Lehmann 2025-07-22 18:06:29 +02:00 committed by Marge Bot
parent 544d0c03e2
commit 18be5603ed

View file

@ -258,9 +258,9 @@ void
lvp_shader_optimize(nir_shader *nir)
{
optimize(nir);
NIR_PASS_V(nir, nir_lower_var_copies);
NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
NIR_PASS_V(nir, nir_opt_dce);
NIR_PASS(_, nir, nir_lower_var_copies);
NIR_PASS(_, nir, nir_remove_dead_variables, nir_var_function_temp, NULL);
NIR_PASS(_, nir, nir_opt_dce);
nir_sweep(nir);
}
@ -328,13 +328,13 @@ lvp_shader_lower(struct lvp_device *pdevice, nir_shader *nir, struct lvp_pipelin
struct vk_pipeline_robustness_state *robustness)
{
if (nir->info.stage != MESA_SHADER_TESS_CTRL)
NIR_PASS_V(nir, remove_barriers, nir->info.stage == MESA_SHADER_COMPUTE || nir->info.stage == MESA_SHADER_MESH || nir->info.stage == MESA_SHADER_TASK);
NIR_PASS(_, nir, remove_barriers, nir->info.stage == MESA_SHADER_COMPUTE || nir->info.stage == MESA_SHADER_MESH || nir->info.stage == MESA_SHADER_TASK);
const struct nir_lower_sysvals_to_varyings_options sysvals_to_varyings = {
.frag_coord = true,
.point_coord = true,
};
NIR_PASS_V(nir, nir_lower_sysvals_to_varyings, &sysvals_to_varyings);
NIR_PASS(_, nir, nir_lower_sysvals_to_varyings, &sysvals_to_varyings);
struct nir_lower_subgroups_options subgroup_opts = {0};
subgroup_opts.lower_quad = true;
@ -343,36 +343,36 @@ lvp_shader_lower(struct lvp_device *pdevice, nir_shader *nir, struct lvp_pipelin
subgroup_opts.ballot_bit_size = 32;
subgroup_opts.lower_inverse_ballot = true;
subgroup_opts.lower_rotate_to_shuffle = true;
NIR_PASS_V(nir, nir_lower_subgroups, &subgroup_opts);
NIR_PASS(_, nir, nir_lower_subgroups, &subgroup_opts);
if (nir->info.stage == MESA_SHADER_FRAGMENT)
lvp_lower_input_attachments(nir, false);
NIR_PASS_V(nir, nir_lower_system_values);
NIR_PASS_V(nir, nir_lower_is_helper_invocation);
NIR_PASS(_, nir, nir_lower_system_values);
NIR_PASS(_, nir, nir_lower_is_helper_invocation);
const struct nir_lower_compute_system_values_options compute_system_values = {0};
NIR_PASS_V(nir, nir_lower_compute_system_values, &compute_system_values);
NIR_PASS(_, nir, nir_lower_compute_system_values, &compute_system_values);
NIR_PASS_V(nir, nir_remove_dead_variables,
nir_var_uniform | nir_var_image, NULL);
NIR_PASS(_, nir, nir_remove_dead_variables,
nir_var_uniform | nir_var_image, NULL);
optimize(nir);
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
NIR_PASS_V(nir, nir_lower_io_vars_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
NIR_PASS_V(nir, nir_split_var_copies);
NIR_PASS_V(nir, nir_lower_global_vars_to_local);
NIR_PASS(_, nir, nir_lower_io_vars_to_temporaries, nir_shader_get_entrypoint(nir), true, true);
NIR_PASS(_, nir, nir_split_var_copies);
NIR_PASS(_, nir, nir_lower_global_vars_to_local);
NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_push_const,
nir_address_format_32bit_offset);
NIR_PASS(_, nir, nir_lower_explicit_io, nir_var_mem_push_const,
nir_address_format_32bit_offset);
NIR_PASS_V(nir, nir_lower_explicit_io,
nir_var_mem_ubo | nir_var_mem_ssbo,
nir_address_format_vec2_index_32bit_offset);
NIR_PASS(_, nir, nir_lower_explicit_io,
nir_var_mem_ubo | nir_var_mem_ssbo,
nir_address_format_vec2_index_32bit_offset);
NIR_PASS_V(nir, nir_lower_explicit_io,
nir_var_mem_global | nir_var_mem_constant,
nir_address_format_64bit_global);
NIR_PASS(_, nir, nir_lower_explicit_io,
nir_var_mem_global | nir_var_mem_constant,
nir_address_format_64bit_global);
NIR_PASS(_, nir, nir_vk_lower_ycbcr_tex, lvp_ycbcr_conversion_lookup, layout);
@ -388,23 +388,23 @@ lvp_shader_lower(struct lvp_device *pdevice, nir_shader *nir, struct lvp_pipelin
if (nir->info.stage == MESA_SHADER_COMPUTE ||
nir->info.stage == MESA_SHADER_TASK ||
nir->info.stage == MESA_SHADER_MESH) {
NIR_PASS_V(nir, nir_lower_vars_to_explicit_types, nir_var_mem_shared, shared_var_info);
NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_shared, nir_address_format_32bit_offset);
NIR_PASS(_, nir, nir_lower_vars_to_explicit_types, nir_var_mem_shared, shared_var_info);
NIR_PASS(_, nir, nir_lower_explicit_io, nir_var_mem_shared, nir_address_format_32bit_offset);
}
if (nir->info.stage == MESA_SHADER_TASK ||
nir->info.stage == MESA_SHADER_MESH) {
NIR_PASS_V(nir, nir_lower_vars_to_explicit_types, nir_var_mem_task_payload, shared_var_info);
NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_task_payload, nir_address_format_32bit_offset);
NIR_PASS(_, nir, nir_lower_vars_to_explicit_types, nir_var_mem_task_payload, shared_var_info);
NIR_PASS(_, nir, nir_lower_explicit_io, nir_var_mem_task_payload, nir_address_format_32bit_offset);
}
NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_shader_temp, NULL);
NIR_PASS(_, nir, nir_remove_dead_variables, nir_var_shader_temp, NULL);
if (nir->info.stage == MESA_SHADER_VERTEX ||
nir->info.stage == MESA_SHADER_GEOMETRY) {
NIR_PASS_V(nir, nir_lower_io_array_vars_to_elements_no_indirects, false);
NIR_PASS(_, nir, nir_lower_io_array_vars_to_elements_no_indirects, false);
} else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
NIR_PASS_V(nir, nir_lower_io_array_vars_to_elements_no_indirects, true);
NIR_PASS(_, nir, nir_lower_io_array_vars_to_elements_no_indirects, true);
}
// TODO: also optimize the tex srcs. see radeonSI for reference */
@ -413,7 +413,7 @@ lvp_shader_lower(struct lvp_device *pdevice, nir_shader *nir, struct lvp_pipelin
.rounding_mode = nir_rounding_mode_undef,
.opt_tex_dest_types = nir_type_float | nir_type_uint | nir_type_int,
};
NIR_PASS_V(nir, nir_opt_16bit_tex_image, &opt_16bit_options);
NIR_PASS(_, nir, nir_opt_16bit_tex_image, &opt_16bit_options);
/* Lower texture OPs llvmpipe supports to reduce the amount of sample
* functions that need to be pre-compiled.