nir: Use nir_shader_intrinsics_pass in nir_lower_io_to_scalar
Some checks are pending
macOS-CI / macOS-CI (dri) (push) Waiting to run
macOS-CI / macOS-CI (xlib) (push) Waiting to run

Reviewed-by: Alyssa Rosenzweig <alyssa.rosenzweig@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/38816>
This commit is contained in:
Arcady Goldmints-Orlov 2025-10-20 16:53:21 -04:00 committed by Marge Bot
parent e9ad86db08
commit 0df8aa940c
6 changed files with 17 additions and 27 deletions

View file

@ -652,10 +652,8 @@ ac_nir_mem_vectorize_callback(unsigned align_mul, unsigned align_offset, unsigne
return false;
}
bool ac_nir_scalarize_overfetching_loads_callback(const nir_instr *instr, const void *data)
bool ac_nir_scalarize_overfetching_loads_callback(const nir_intrinsic_instr *intr, const void *data)
{
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
/* Reject opcodes we don't scalarize. */
switch (intr->intrinsic) {
case nir_intrinsic_load_ubo:

View file

@ -433,7 +433,7 @@ ac_nir_mem_vectorize_callback(unsigned align_mul, unsigned align_offset, unsigne
nir_intrinsic_instr *low, nir_intrinsic_instr *high, void *data);
bool
ac_nir_scalarize_overfetching_loads_callback(const nir_instr *instr, const void *data);
ac_nir_scalarize_overfetching_loads_callback(const nir_intrinsic_instr *intr, const void *data);
bool
ac_nir_store_may_be_subdword(const nir_intrinsic_instr *instr);

View file

@ -5596,7 +5596,7 @@ bool nir_lower_all_phis_to_scalar(nir_shader *shader);
void nir_lower_io_array_vars_to_elements(nir_shader *producer, nir_shader *consumer);
bool nir_lower_io_array_vars_to_elements_no_indirects(nir_shader *shader,
bool outputs_only);
bool nir_lower_io_to_scalar(nir_shader *shader, nir_variable_mode mask, nir_instr_filter_cb filter, void *filter_data);
bool nir_lower_io_to_scalar(nir_shader *shader, nir_variable_mode mask, nir_intrin_filter_cb filter, void *filter_data);
bool nir_lower_io_vars_to_scalar(nir_shader *shader, nir_variable_mode mask);
bool nir_opt_vectorize_io_vars(nir_shader *shader, nir_variable_mode mask);
bool nir_lower_tess_level_array_vars_to_vec(nir_shader *shader);

View file

@ -268,20 +268,15 @@ lower_store_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
struct scalarize_state {
nir_variable_mode mask;
nir_instr_filter_cb filter;
nir_intrin_filter_cb filter;
void *filter_data;
};
static bool
nir_lower_io_to_scalar_instr(nir_builder *b, nir_instr *instr, void *data)
nir_lower_io_to_scalar_instr(nir_builder *b, nir_intrinsic_instr *intr, void *data)
{
struct scalarize_state *state = data;
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->num_components == 1)
return false;
@ -291,7 +286,7 @@ nir_lower_io_to_scalar_instr(nir_builder *b, nir_instr *instr, void *data)
intr->intrinsic == nir_intrinsic_load_interpolated_input ||
intr->intrinsic == nir_intrinsic_load_input_vertex) &&
(state->mask & nir_var_shader_in) &&
(!state->filter || state->filter(instr, state->filter_data))) {
(!state->filter || state->filter(intr, state->filter_data))) {
lower_load_input_to_scalar(b, intr);
return true;
}
@ -301,7 +296,7 @@ nir_lower_io_to_scalar_instr(nir_builder *b, nir_instr *instr, void *data)
intr->intrinsic == nir_intrinsic_load_per_view_output ||
intr->intrinsic == nir_intrinsic_load_per_primitive_output) &&
(state->mask & nir_var_shader_out) &&
(!state->filter || state->filter(instr, state->filter_data))) {
(!state->filter || state->filter(intr, state->filter_data))) {
lower_load_input_to_scalar(b, intr);
return true;
}
@ -311,7 +306,7 @@ nir_lower_io_to_scalar_instr(nir_builder *b, nir_instr *instr, void *data)
(intr->intrinsic == nir_intrinsic_load_global && (state->mask & nir_var_mem_global)) ||
(intr->intrinsic == nir_intrinsic_load_shared && (state->mask & nir_var_mem_shared)) ||
(intr->intrinsic == nir_intrinsic_load_push_constant && (state->mask & nir_var_mem_push_const))) &&
(!state->filter || state->filter(instr, state->filter_data))) {
(!state->filter || state->filter(intr, state->filter_data))) {
lower_load_to_scalar(b, intr);
return true;
}
@ -321,7 +316,7 @@ nir_lower_io_to_scalar_instr(nir_builder *b, nir_instr *instr, void *data)
intr->intrinsic == nir_intrinsic_store_per_view_output ||
intr->intrinsic == nir_intrinsic_store_per_primitive_output) &&
state->mask & nir_var_shader_out &&
(!state->filter || state->filter(instr, state->filter_data))) {
(!state->filter || state->filter(intr, state->filter_data))) {
lower_store_output_to_scalar(b, intr);
return true;
}
@ -329,7 +324,7 @@ nir_lower_io_to_scalar_instr(nir_builder *b, nir_instr *instr, void *data)
if (((intr->intrinsic == nir_intrinsic_store_ssbo && (state->mask & nir_var_mem_ssbo)) ||
(intr->intrinsic == nir_intrinsic_store_global && (state->mask & nir_var_mem_global)) ||
(intr->intrinsic == nir_intrinsic_store_shared && (state->mask & nir_var_mem_shared))) &&
(!state->filter || state->filter(instr, state->filter_data))) {
(!state->filter || state->filter(intr, state->filter_data))) {
lower_store_to_scalar(b, intr);
return true;
}
@ -338,15 +333,15 @@ nir_lower_io_to_scalar_instr(nir_builder *b, nir_instr *instr, void *data)
}
bool
nir_lower_io_to_scalar(nir_shader *shader, nir_variable_mode mask, nir_instr_filter_cb filter, void *filter_data)
nir_lower_io_to_scalar(nir_shader *shader, nir_variable_mode mask, nir_intrin_filter_cb filter, void *filter_data)
{
struct scalarize_state state = {
mask,
filter,
filter_data
};
return nir_shader_instructions_pass(shader,
nir_lower_io_to_scalar_instr,
nir_metadata_control_flow,
&state);
return nir_shader_intrinsics_pass(shader,
nir_lower_io_to_scalar_instr,
nir_metadata_control_flow,
&state);
}

View file

@ -137,10 +137,9 @@ ir3_load_driver_ubo_indirect(nir_builder *b, unsigned components,
}
static bool
ir3_nir_should_scalarize_mem(const nir_instr *instr, const void *data)
ir3_nir_should_scalarize_mem(const nir_intrinsic_instr *intrin, const void *data)
{
const struct ir3_compiler *compiler = data;
const nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
/* Scalarize load_ssbo's that we could otherwise lower to isam,
* as the tex cache benefit outweighs the benefit of vectorizing

View file

@ -144,12 +144,10 @@ static uint8_t vectorize_filter(const nir_instr *instr, UNUSED const void *data)
* \param[in] data User data.
* \return True if the instruction was found.
*/
static bool frag_in_scalar_filter(const nir_instr *instr, const void *data)
static bool frag_in_scalar_filter(const nir_intrinsic_instr *intr, const void *data)
{
assert(instr->type == nir_instr_type_intrinsic);
nir_shader *nir = (nir_shader *)data;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_input &&
intr->intrinsic != nir_intrinsic_load_interpolated_input) {
return false;