ir3: reformat after previous commit

Signed-off-by: Job Noorman <jnoorman@igalia.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/33701>
This commit is contained in:
Job Noorman 2025-02-24 07:52:10 +01:00 committed by Marge Bot
parent 2fedc82c0c
commit 7210054db8
2 changed files with 34 additions and 33 deletions

View file

@ -490,7 +490,7 @@ ir3_nir_lower_io_to_temporaries(nir_shader *s)
s->info.stage != MESA_SHADER_GEOMETRY;
if (lower_input || lower_output) {
NIR_PASS(_, s, nir_lower_io_to_temporaries, nir_shader_get_entrypoint(s),
lower_output, lower_input);
lower_output, lower_input);
/* nir_lower_io_to_temporaries() creates global variables and copy
* instructions which need to be cleaned up.
@ -756,8 +756,9 @@ ir3_nir_post_finalize(struct ir3_shader *shader)
MESA_TRACE_FUNC();
NIR_PASS(_, s, nir_lower_io, nir_var_shader_in | nir_var_shader_out,
ir3_glsl_type_size, nir_lower_io_lower_64bit_to_32 |
nir_lower_io_use_interpolated_input_intrinsics);
ir3_glsl_type_size,
nir_lower_io_lower_64bit_to_32 |
nir_lower_io_use_interpolated_input_intrinsics);
if (s->info.stage == MESA_SHADER_FRAGMENT) {
/* NOTE: lower load_barycentric_at_sample first, since it
@ -804,10 +805,8 @@ ir3_nir_post_finalize(struct ir3_shader *shader)
}
if (mediump_varyings) {
NIR_PASS(_, s, nir_lower_mediump_io,
nir_var_shader_in,
mediump_varyings,
false);
NIR_PASS(_, s, nir_lower_mediump_io, nir_var_shader_in,
mediump_varyings, false);
}
/* This should come after input lowering, to opportunistically lower non-mediump outputs. */
@ -882,8 +881,8 @@ ir3_nir_post_finalize(struct ir3_shader *shader)
const nir_lower_idiv_options lower_idiv_options = {
.allow_fp16 = true,
};
NIR_PASS(_, s, nir_lower_idiv, &lower_idiv_options); /* idiv generated by cube lowering */
NIR_PASS(_, s, nir_lower_idiv,
&lower_idiv_options); /* idiv generated by cube lowering */
/* The resinfo opcode returns the size in dwords on a4xx */
if (compiler->gen == 4)
@ -1010,12 +1009,12 @@ ir3_nir_lower_variant(struct ir3_shader_variant *so,
switch (so->type) {
case MESA_SHADER_VERTEX:
NIR_PASS(_, s, ir3_nir_lower_to_explicit_output, so,
so->key.tessellation);
so->key.tessellation);
progress = true;
break;
case MESA_SHADER_TESS_CTRL:
NIR_PASS(_, s, nir_lower_io_to_scalar,
nir_var_shader_in | nir_var_shader_out, NULL, NULL);
nir_var_shader_in | nir_var_shader_out, NULL, NULL);
NIR_PASS(_, s, ir3_nir_lower_tess_ctrl, so, so->key.tessellation);
NIR_PASS(_, s, ir3_nir_lower_to_explicit_input, so);
progress = true;
@ -1024,7 +1023,7 @@ ir3_nir_lower_variant(struct ir3_shader_variant *so,
NIR_PASS(_, s, ir3_nir_lower_tess_eval, so, so->key.tessellation);
if (so->key.has_gs)
NIR_PASS(_, s, ir3_nir_lower_to_explicit_output, so,
so->key.tessellation);
so->key.tessellation);
progress = true;
break;
case MESA_SHADER_GEOMETRY:
@ -1065,7 +1064,7 @@ ir3_nir_lower_variant(struct ir3_shader_variant *so,
* could theoretically do better.
*/
OPT(s, nir_opt_large_constants, glsl_get_vec4_size_align_bytes,
32 /* bytes */);
32 /* bytes */);
progress |= OPT(s, ir3_nir_lower_load_constant, so);
/* Lower large temporaries to scratch, which in Qualcomm terms is private

View file

@ -121,7 +121,8 @@ tu_spirv_to_nir(struct tu_device *dev,
NIR_PASS(_, nir, nir_split_var_copies);
NIR_PASS(_, nir, nir_lower_var_copies);
NIR_PASS(_, nir, nir_lower_mediump_vars, nir_var_function_temp | nir_var_shader_temp | nir_var_mem_shared);
NIR_PASS(_, nir, nir_lower_mediump_vars,
nir_var_function_temp | nir_var_shader_temp | nir_var_mem_shared);
NIR_PASS(_, nir, nir_opt_copy_prop_vars);
NIR_PASS(_, nir, nir_opt_combine_stores, nir_var_all);
@ -2519,7 +2520,6 @@ tu_shader_create(struct tu_device *dev,
};
NIR_PASS(_, nir, tu_nir_lower_fdm, &fdm_options);
/* This needs to happen before multiview lowering which rewrites store
* instructions of the position variable, so that we can just rewrite one
* store at the end instead of having to rewrite every store specified by
@ -2539,24 +2539,21 @@ tu_shader_create(struct tu_device *dev,
}
NIR_PASS(_, nir, nir_lower_explicit_io, nir_var_mem_push_const,
nir_address_format_32bit_offset);
nir_address_format_32bit_offset);
NIR_PASS(_, nir, nir_lower_explicit_io,
nir_var_mem_ubo | nir_var_mem_ssbo,
nir_address_format_vec2_index_32bit_offset);
NIR_PASS(_, nir, nir_lower_explicit_io, nir_var_mem_ubo | nir_var_mem_ssbo,
nir_address_format_vec2_index_32bit_offset);
NIR_PASS(_, nir, nir_lower_explicit_io,
nir_var_mem_global,
nir_address_format_64bit_global);
NIR_PASS(_, nir, nir_lower_explicit_io, nir_var_mem_global,
nir_address_format_64bit_global);
if (nir->info.stage == MESA_SHADER_COMPUTE) {
if (!nir->info.shared_memory_explicit_layout) {
NIR_PASS(_, nir, nir_lower_vars_to_explicit_types,
nir_var_mem_shared, shared_type_info);
nir_var_mem_shared, shared_type_info);
}
NIR_PASS(_, nir, nir_lower_explicit_io,
nir_var_mem_shared,
nir_address_format_32bit_offset);
NIR_PASS(_, nir, nir_lower_explicit_io, nir_var_mem_shared,
nir_address_format_32bit_offset);
if (nir->info.zero_initialize_shared_memory && nir->info.shared_size > 0) {
const unsigned chunk_size = 16; /* max single store size */
@ -2566,13 +2563,15 @@ tu_shader_create(struct tu_device *dev,
* that accesses are limited to those bounds.
*/
const unsigned shared_size = ALIGN(nir->info.shared_size, chunk_size);
NIR_PASS(_, nir, nir_zero_initialize_shared_memory, shared_size, chunk_size);
NIR_PASS(_, nir, nir_zero_initialize_shared_memory, shared_size,
chunk_size);
}
const struct nir_lower_compute_system_values_options compute_sysval_options = {
.has_base_workgroup_id = true,
};
NIR_PASS(_, nir, nir_lower_compute_system_values, &compute_sysval_options);
NIR_PASS(_, nir, nir_lower_compute_system_values,
&compute_sysval_options);
}
nir_assign_io_var_locations(nir, nir_var_shader_in, &nir->num_inputs, nir->info.stage);
@ -2611,8 +2610,8 @@ tu_shader_create(struct tu_device *dev,
struct ir3_const_allocations const_allocs = {};
NIR_PASS(_, nir, tu_lower_io, dev, shader, layout,
key->read_only_input_attachments, key->dynamic_renderpass,
&const_allocs);
key->read_only_input_attachments, key->dynamic_renderpass,
&const_allocs);
nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
@ -2745,9 +2744,11 @@ tu_link_shaders(nir_shader **shaders, unsigned shaders_count)
const nir_remove_dead_variables_options out_var_opts = {
.can_remove_var = nir_vk_is_not_xfb_output,
};
NIR_PASS(_, producer, nir_remove_dead_variables, nir_var_shader_out, &out_var_opts);
NIR_PASS(_, producer, nir_remove_dead_variables, nir_var_shader_out,
&out_var_opts);
NIR_PASS(_, consumer, nir_remove_dead_variables, nir_var_shader_in, NULL);
NIR_PASS(_, consumer, nir_remove_dead_variables, nir_var_shader_in,
NULL);
bool progress = nir_remove_unused_varyings(producer, consumer);
@ -2755,7 +2756,8 @@ tu_link_shaders(nir_shader **shaders, unsigned shaders_count)
if (progress) {
if (nir_lower_global_vars_to_local(producer)) {
/* Remove dead writes, which can remove input loads */
NIR_PASS(_, producer, nir_remove_dead_variables, nir_var_shader_temp, NULL);
NIR_PASS(_, producer, nir_remove_dead_variables,
nir_var_shader_temp, NULL);
NIR_PASS(_, producer, nir_opt_dce);
}
nir_lower_global_vars_to_local(consumer);