anv: Remove NIR_PASS_V usage

Reviewed-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Reviewed-by: Christian Gmeiner <cgmeiner@igalia.com>
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>

anv: Fix for metadata failure

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/35889>
This commit is contained in:
jhananit 2025-06-24 16:17:55 +00:00 committed by Marge Bot
parent d455074cc8
commit a74ac59220
10 changed files with 49 additions and 44 deletions

View file

@ -68,19 +68,19 @@ compile_shader(struct anv_device *device,
nir_shader *nir = b.shader;
NIR_PASS_V(nir, nir_lower_vars_to_ssa);
NIR_PASS_V(nir, nir_opt_cse);
NIR_PASS_V(nir, nir_opt_gcm, true);
NIR_PASS(_, nir, nir_lower_vars_to_ssa);
NIR_PASS(_, nir, nir_opt_cse);
NIR_PASS(_, nir, nir_opt_gcm, true);
nir_opt_peephole_select_options peephole_select_options = {
.limit = 1,
};
NIR_PASS_V(nir, nir_opt_peephole_select, &peephole_select_options);
NIR_PASS(_, nir, nir_opt_peephole_select, &peephole_select_options);
NIR_PASS_V(nir, nir_lower_variable_initializers, ~0);
NIR_PASS(_, nir, nir_lower_variable_initializers, ~0);
NIR_PASS_V(nir, nir_split_var_copies);
NIR_PASS_V(nir, nir_split_per_member_structs);
NIR_PASS(_, nir, nir_split_var_copies);
NIR_PASS(_, nir, nir_split_per_member_structs);
if (stage == MESA_SHADER_COMPUTE) {
nir->info.workgroup_size[0] = 16;
@ -92,10 +92,10 @@ compile_shader(struct anv_device *device,
struct brw_nir_compiler_opts opts = {};
brw_preprocess_nir(compiler, nir, &opts);
NIR_PASS_V(nir, nir_propagate_invariant, false);
NIR_PASS(_, nir, nir_propagate_invariant, false);
if (stage == MESA_SHADER_FRAGMENT) {
NIR_PASS_V(nir, nir_lower_input_attachments,
NIR_PASS(_, nir, nir_lower_input_attachments,
&(nir_input_attachment_options) {
.use_fragcoord_sysval = true,
.use_layer_id_sysval = true,
@ -106,8 +106,8 @@ compile_shader(struct anv_device *device,
.lower_cs_local_id_to_index = true,
.lower_workgroup_id_to_index = gl_shader_stage_is_mesh(stage),
};
NIR_PASS_V(nir, nir_lower_compute_system_values, &options);
NIR_PASS_V(nir, nir_shader_intrinsics_pass, lower_base_workgroup_id,
NIR_PASS(_, nir, nir_lower_compute_system_values, &options);
NIR_PASS(_, nir, nir_shader_intrinsics_pass, lower_base_workgroup_id,
nir_metadata_control_flow, NULL);
}
@ -117,9 +117,9 @@ compile_shader(struct anv_device *device,
nir->info.shared_size = 0;
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
NIR_PASS_V(nir, nir_copy_prop);
NIR_PASS_V(nir, nir_opt_constant_folding);
NIR_PASS_V(nir, nir_opt_dce);
NIR_PASS(_, nir, nir_copy_prop);
NIR_PASS(_, nir, nir_opt_constant_folding);
NIR_PASS(_, nir, nir_opt_dce);
union brw_any_prog_key key;
memset(&key, 0, sizeof(key));
@ -128,7 +128,7 @@ compile_shader(struct anv_device *device,
memset(&prog_data, 0, sizeof(prog_data));
if (stage == MESA_SHADER_COMPUTE) {
NIR_PASS_V(nir, brw_nir_lower_cs_intrinsics,
NIR_PASS(_, nir, brw_nir_lower_cs_intrinsics,
device->info, &prog_data.cs);
}
@ -140,7 +140,7 @@ compile_shader(struct anv_device *device,
.callback = brw_nir_should_vectorize_mem,
.robust_modes = (nir_variable_mode)0,
};
NIR_PASS_V(nir, nir_opt_load_store_vectorize, &options);
NIR_PASS(_, nir, nir_opt_load_store_vectorize, &options);
nir->num_uniforms = uniform_size;

View file

@ -101,7 +101,7 @@ anv_nir_ubo_addr_format(const struct anv_physical_device *pdevice,
bool anv_nir_lower_ubo_loads(nir_shader *shader);
void anv_nir_apply_pipeline_layout(nir_shader *shader,
bool anv_nir_apply_pipeline_layout(nir_shader *shader,
const struct anv_physical_device *pdevice,
enum brw_robustness_flags robust_flags,
bool independent_sets,
@ -110,7 +110,7 @@ void anv_nir_apply_pipeline_layout(nir_shader *shader,
struct anv_pipeline_push_map *push_map,
void *push_map_mem_ctx);
void anv_nir_compute_push_layout(nir_shader *nir,
bool anv_nir_compute_push_layout(nir_shader *nir,
const struct anv_physical_device *pdevice,
enum brw_robustness_flags robust_flags,
bool fragment_dynamic,

View file

@ -2551,7 +2551,7 @@ build_packed_binding_table(struct apply_pipeline_layout_state *state,
}
}
void
bool
anv_nir_apply_pipeline_layout(nir_shader *shader,
const struct anv_physical_device *pdevice,
enum brw_robustness_flags robust_flags,
@ -2561,6 +2561,7 @@ anv_nir_apply_pipeline_layout(nir_shader *shader,
struct anv_pipeline_push_map *push_map,
void *push_map_mem_ctx)
{
bool progress = false;
#ifndef NDEBUG
/* We should not have have any reference to a descriptor set that is not
* given through the pipeline layout (layout->set[set].layout = NULL).
@ -2596,7 +2597,7 @@ anv_nir_apply_pipeline_layout(nir_shader *shader,
}
/* Find all use sets/bindings */
nir_shader_instructions_pass(shader, get_used_bindings,
progress |= nir_shader_instructions_pass(shader, get_used_bindings,
nir_metadata_all, &state);
/* Build the binding table */
@ -2632,16 +2633,16 @@ anv_nir_apply_pipeline_layout(nir_shader *shader,
* information by the time we get to the load/store/atomic
* intrinsics in that pass.
*/
nir_shader_instructions_pass(shader, lower_direct_buffer_instr,
progress |= nir_shader_instructions_pass(shader, lower_direct_buffer_instr,
nir_metadata_control_flow,
&state);
/* We just got rid of all the direct access. Delete it so it's not in the
* way when we do our indirect lowering.
*/
nir_opt_dce(shader);
progress |= nir_opt_dce(shader);
nir_shader_instructions_pass(shader, apply_pipeline_layout,
progress |= nir_shader_instructions_pass(shader, apply_pipeline_layout,
nir_metadata_none,
&state);
@ -2683,4 +2684,5 @@ anv_nir_apply_pipeline_layout(nir_shader *shader,
_mesa_sha1_compute(map->sampler_to_descriptor,
map->sampler_count * sizeof(struct anv_pipeline_binding),
map->sampler_sha1);
return progress;
}

View file

@ -26,7 +26,7 @@
#include "compiler/brw_nir.h"
#include "util/mesa-sha1.h"
void
bool
anv_nir_compute_push_layout(nir_shader *nir,
const struct anv_physical_device *pdevice,
enum brw_robustness_flags robust_flags,
@ -387,6 +387,7 @@ anv_nir_compute_push_layout(nir_shader *nir,
_mesa_sha1_compute(map->push_ranges,
sizeof(map->push_ranges),
map->push_sha1);
return false;
}
void

View file

@ -98,7 +98,7 @@ anv_shader_stage_to_nir(struct anv_device *device,
}
}
NIR_PASS_V(nir, nir_lower_io_vars_to_temporaries,
NIR_PASS(_, nir, nir_lower_io_vars_to_temporaries,
nir_shader_get_entrypoint(nir), true, false);
return nir;
@ -1005,7 +1005,7 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
if (nir->info.stage == MESA_SHADER_COMPUTE && nir->info.cs.has_cooperative_matrix) {
anv_fixup_subgroup_size(pipeline->device, &nir->info);
NIR_PASS(_, nir, brw_nir_lower_cmat, nir->info.subgroup_size);
NIR_PASS_V(nir, nir_lower_indirect_derefs, nir_var_function_temp, 16);
NIR_PASS(_, nir, nir_lower_indirect_derefs, nir_var_function_temp, 16);
}
/* The patch control points are delivered through a push constant when
@ -1064,7 +1064,7 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
struct anv_pipeline_push_map push_map = {};
/* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
NIR_PASS_V(nir, anv_nir_apply_pipeline_layout,
NIR_PASS(_, nir, anv_nir_apply_pipeline_layout,
pdevice, stage->key.base.robust_flags,
layout->independent_sets,
layout, &stage->bind_map, &push_map, mem_ctx);
@ -1127,16 +1127,16 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
NIR_PASS(_, nir, nir_opt_dce);
}
NIR_PASS_V(nir, anv_nir_update_resource_intel_block);
NIR_PASS(_, nir, anv_nir_update_resource_intel_block);
NIR_PASS_V(nir, anv_nir_compute_push_layout,
NIR_PASS(_, nir, anv_nir_compute_push_layout,
pdevice, stage->key.base.robust_flags,
anv_graphics_pipeline_stage_fragment_dynamic(stage),
anv_graphics_pipeline_stage_mesh_dynamic(stage),
prog_data, &stage->bind_map, &push_map,
pipeline->layout.type, mem_ctx);
NIR_PASS_V(nir, anv_nir_lower_resource_intel, pdevice,
NIR_PASS(_, nir, anv_nir_lower_resource_intel, pdevice,
pipeline->layout.type);
if (gl_shader_stage_uses_workgroup(nir->info.stage)) {
@ -3408,12 +3408,12 @@ compile_upload_rt_shader(struct anv_ray_tracing_pipeline *pipeline,
NIR_PASS(_, nir, nir_lower_shader_calls, &opts,
&resume_shaders, &num_resume_shaders, mem_ctx);
NIR_PASS(_, nir, brw_nir_lower_shader_calls, &lowering_state);
NIR_PASS_V(nir, brw_nir_lower_rt_intrinsics, &stage->key.base, devinfo);
NIR_PASS(_, nir, brw_nir_lower_rt_intrinsics, &stage->key.base, devinfo);
}
for (unsigned i = 0; i < num_resume_shaders; i++) {
NIR_PASS(_,resume_shaders[i], brw_nir_lower_shader_calls, &lowering_state);
NIR_PASS_V(resume_shaders[i], brw_nir_lower_rt_intrinsics, &stage->key.base, devinfo);
NIR_PASS(_, resume_shaders[i], brw_nir_lower_rt_intrinsics, &stage->key.base, devinfo);
}
struct brw_compile_bs_params params = {
@ -3949,7 +3949,7 @@ anv_device_init_rt_shaders(struct anv_device *device)
nir_shader *trivial_return_nir =
brw_nir_create_trivial_return_shader(device->physical->compiler, tmp_ctx);
NIR_PASS_V(trivial_return_nir, brw_nir_lower_rt_intrinsics,
NIR_PASS(_, trivial_return_nir, brw_nir_lower_rt_intrinsics,
&return_key.key.base, device->info);
struct brw_bs_prog_data return_prog_data = { 0, };
@ -4007,7 +4007,7 @@ anv_device_init_rt_shaders(struct anv_device *device)
nir_shader *null_ahs_nir =
brw_nir_create_null_ahs_shader(device->physical->compiler, tmp_ctx);
NIR_PASS_V(null_ahs_nir, brw_nir_lower_rt_intrinsics,
NIR_PASS(_, null_ahs_nir, brw_nir_lower_rt_intrinsics,
&null_return_key.key.base, device->info);
struct brw_bs_prog_data return_prog_data = { 0, };

View file

@ -670,9 +670,9 @@ anv_load_fp64_shader(struct anv_device *device)
nir_validate_shader(nir, "after spirv_to_nir");
NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
NIR_PASS_V(nir, nir_lower_returns);
NIR_PASS_V(nir, nir_inline_functions);
NIR_PASS(_, nir, nir_lower_variable_initializers, nir_var_function_temp);
NIR_PASS(_, nir, nir_lower_returns);
NIR_PASS(_, nir, nir_inline_functions);
anv_device_upload_nir(device, device->internal_cache,
nir, sha1);

View file

@ -66,13 +66,13 @@ anv_nir_ubo_addr_format(const struct anv_physical_device *pdevice,
bool anv_nir_lower_ubo_loads(nir_shader *shader);
void anv_nir_apply_pipeline_layout(nir_shader *shader,
bool anv_nir_apply_pipeline_layout(nir_shader *shader,
const struct anv_physical_device *pdevice,
enum elk_robustness_flags robust_flags,
const struct anv_pipeline_layout *layout,
struct anv_pipeline_bind_map *map);
void anv_nir_compute_push_layout(nir_shader *nir,
bool anv_nir_compute_push_layout(nir_shader *nir,
const struct anv_physical_device *pdevice,
enum elk_robustness_flags robust_flags,
struct elk_stage_prog_data *prog_data,

View file

@ -1224,7 +1224,7 @@ compare_binding_infos(const void *_a, const void *_b)
return a->binding - b->binding;
}
void
bool
anv_nir_apply_pipeline_layout(nir_shader *shader,
const struct anv_physical_device *pdevice,
enum elk_robustness_flags robust_flags,
@ -1474,4 +1474,5 @@ anv_nir_apply_pipeline_layout(nir_shader *shader,
_mesa_sha1_compute(map->sampler_to_descriptor,
map->sampler_count * sizeof(struct anv_pipeline_binding),
map->sampler_sha1);
return true;
}

View file

@ -28,7 +28,7 @@
#define sizeof_field(type, field) sizeof(((type *)0)->field)
void
bool
anv_nir_compute_push_layout(nir_shader *nir,
const struct anv_physical_device *pdevice,
enum elk_robustness_flags robust_flags,
@ -237,6 +237,7 @@ anv_nir_compute_push_layout(nir_shader *nir,
_mesa_sha1_compute(map->push_ranges,
sizeof(map->push_ranges),
map->push_sha1);
return false;
}
void

View file

@ -89,7 +89,7 @@ anv_shader_stage_to_nir(struct anv_device *device,
nir_print_shader(nir, stderr);
}
NIR_PASS_V(nir, nir_lower_io_vars_to_temporaries,
NIR_PASS(_, nir, nir_lower_io_vars_to_temporaries,
nir_shader_get_entrypoint(nir), true, false);
const struct nir_lower_sysvals_to_varyings_options sysvals_to_varyings = {
@ -511,7 +511,7 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
nir_address_format_32bit_offset);
/* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
NIR_PASS_V(nir, anv_nir_apply_pipeline_layout,
NIR_PASS(_, nir, anv_nir_apply_pipeline_layout,
pdevice, stage->key.base.robust_flags,
layout, &stage->bind_map);
@ -550,7 +550,7 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
});
}
NIR_PASS_V(nir, anv_nir_compute_push_layout,
NIR_PASS(_, nir, anv_nir_compute_push_layout,
pdevice, stage->key.base.robust_flags,
prog_data, &stage->bind_map, mem_ctx);