treewide: Use nir_shader_intrinsic_pass sometimes

This converts a lot of trivial passes. Nice boilerplate deletion. Via Coccinelle
patch (with a small manual fix-up for panfrost where coccinelle got confused by
genxml + ninja clang-format squashed in, and for Zink because my semantic patch
was slightly buggy).

    @def@
    typedef bool;
    typedef nir_builder;
    typedef nir_instr;
    typedef nir_def;
    identifier fn, instr, intr, x, builder, data;
    @@

    static fn(nir_builder* builder,
    -nir_instr *instr,
    +nir_intrinsic_instr *intr,
    ...)
    {
    (
    -   if (instr->type != nir_instr_type_intrinsic)
    -      return false;
    -   nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
    |
    -   nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
    -   if (instr->type != nir_instr_type_intrinsic)
    -      return false;
    )

    <...
    (
    -instr->x
    +intr->instr.x
    |
    -instr
    +&intr->instr
    )
    ...>

    }

    @pass depends on def@
    identifier def.fn;
    expression shader, progress;
    @@

    (
    -nir_shader_instructions_pass(shader, fn,
    +nir_shader_intrinsics_pass(shader, fn,
    ...)
    |
    -NIR_PASS_V(shader, nir_shader_instructions_pass, fn,
    +NIR_PASS_V(shader, nir_shader_intrinsics_pass, fn,
    ...)
    |
    -NIR_PASS(progress, shader, nir_shader_instructions_pass, fn,
    +NIR_PASS(progress, shader, nir_shader_intrinsics_pass, fn,
    ...)
    )

Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Acked-by: Faith Ekstrand <faith.ekstrand@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24852>
This commit is contained in:
Alyssa Rosenzweig 2023-08-23 12:48:10 -04:00 committed by Marge Bot
parent 5fa9f842b0
commit 465b138f01
68 changed files with 417 additions and 761 deletions

View file

@ -106,14 +106,9 @@ emit_split_buffer_store(nir_builder *b, nir_def *d, nir_def *desc, nir_def *v_of
static bool
lower_es_output_store(nir_builder *b,
nir_instr *instr,
nir_intrinsic_instr *intrin,
void *state)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_store_output)
return false;
@ -145,14 +140,14 @@ lower_es_output_store(nir_builder *b,
*/
unsigned semantic = nir_intrinsic_io_semantics(intrin).location;
if (semantic == VARYING_SLOT_LAYER || semantic == VARYING_SLOT_VIEWPORT) {
nir_instr_remove(instr);
nir_instr_remove(&intrin->instr);
return true;
}
lower_esgs_io_state *st = (lower_esgs_io_state *) state;
unsigned write_mask = nir_intrinsic_write_mask(intrin);
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intrin->instr);
nir_def *io_off = ac_nir_calc_io_offset(b, intrin, nir_imm_int(b, 16u), 4u, st->map_io);
if (st->gfx_level <= GFX8) {
@ -169,7 +164,7 @@ lower_es_output_store(nir_builder *b,
nir_store_shared(b, intrin->src[0].ssa, off, .write_mask = write_mask);
}
nir_instr_remove(instr);
nir_instr_remove(&intrin->instr);
return true;
}
@ -298,8 +293,7 @@ ac_nir_lower_es_outputs_to_mem(nir_shader *shader,
.map_io = map,
};
nir_shader_instructions_pass(shader,
lower_es_output_store,
nir_shader_intrinsics_pass(shader, lower_es_output_store,
nir_metadata_block_index | nir_metadata_dominance,
&state);
}

View file

@ -50,13 +50,8 @@ try_extract_additions(nir_builder *b, nir_scalar scalar, uint64_t *out_const,
}
static bool
process_instr(nir_builder *b, nir_instr *instr, void *_)
process_instr(nir_builder *b, nir_intrinsic_instr *intrin, void *_)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
nir_intrinsic_op op;
switch (intrin->intrinsic) {
case nir_intrinsic_load_global:
@ -130,6 +125,6 @@ process_instr(nir_builder *b, nir_instr *instr, void *_)
bool
ac_nir_lower_global_access(nir_shader *shader)
{
return nir_shader_instructions_pass(shader, process_instr,
return nir_shader_intrinsics_pass(shader, process_instr,
nir_metadata_block_index | nir_metadata_dominance, NULL);
}

View file

@ -199,14 +199,9 @@ tcs_output_needs_lds(nir_intrinsic_instr *intrin,
static bool
lower_ls_output_store(nir_builder *b,
nir_instr *instr,
nir_intrinsic_instr *intrin,
void *state)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_store_output)
return false;
@ -227,7 +222,7 @@ lower_ls_output_store(nir_builder *b,
*/
unsigned semantic = nir_intrinsic_io_semantics(intrin).location;
if (semantic == VARYING_SLOT_LAYER || semantic == VARYING_SLOT_VIEWPORT) {
nir_instr_remove(instr);
nir_instr_remove(&intrin->instr);
return true;
}
@ -237,7 +232,7 @@ lower_ls_output_store(nir_builder *b,
if (match_mask(MESA_SHADER_VERTEX, intrin, st->tcs_temp_only_inputs, false))
return false;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intrin->instr);
nir_def *vertex_idx = nir_load_local_invocation_index(b);
nir_def *base_off_var = nir_imul(b, vertex_idx, nir_load_lshs_vertex_stride_amd(b));
@ -252,7 +247,7 @@ lower_ls_output_store(nir_builder *b,
* it will be used by same-invocation TCS input loads.
*/
if (!st->tcs_in_out_eq)
nir_instr_remove(instr);
nir_instr_remove(&intrin->instr);
return true;
}
@ -753,8 +748,7 @@ ac_nir_lower_ls_outputs_to_mem(nir_shader *shader,
.map_io = map,
};
nir_shader_instructions_pass(shader,
lower_ls_output_store,
nir_shader_intrinsics_pass(shader, lower_ls_output_store,
nir_metadata_block_index | nir_metadata_dominance,
&state);
}

View file

@ -68,17 +68,12 @@ shader_query_bool_setting(nir_builder *b, unsigned mask, lower_abi_state *s)
}
static bool
lower_abi_instr(nir_builder *b, nir_instr *instr, void *state)
lower_abi_instr(nir_builder *b, nir_intrinsic_instr *intrin, void *state)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
lower_abi_state *s = (lower_abi_state *)state;
gl_shader_stage stage = b->shader->info.stage;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intrin->instr);
nir_def *replacement = NULL;
bool progress = true;
@ -507,8 +502,8 @@ lower_abi_instr(nir_builder *b, nir_instr *instr, void *state)
if (replacement)
nir_def_rewrite_uses(&intrin->def, replacement);
nir_instr_remove(instr);
nir_instr_free(instr);
nir_instr_remove(&intrin->instr);
nir_instr_free(&intrin->instr);
return true;
}
@ -560,5 +555,5 @@ radv_nir_lower_abi(nir_shader *shader, enum amd_gfx_level gfx_level, const struc
state.gsvs_ring[i] = load_gsvs_ring(&b, &state, i);
}
nir_shader_instructions_pass(shader, lower_abi_instr, nir_metadata_dominance | nir_metadata_block_index, &state);
nir_shader_intrinsics_pass(shader, lower_abi_instr, nir_metadata_dominance | nir_metadata_block_index, &state);
}

View file

@ -385,18 +385,14 @@ lower_load_vs_input(nir_builder *b, nir_intrinsic_instr *intrin, lower_vs_inputs
}
static bool
lower_vs_input_instr(nir_builder *b, nir_instr *instr, void *state)
lower_vs_input_instr(nir_builder *b, nir_intrinsic_instr *intrin, void *state)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_load_input)
return false;
lower_vs_inputs_state *s = (lower_vs_inputs_state *)state;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intrin->instr);
nir_def *replacement = NULL;
@ -407,8 +403,8 @@ lower_vs_input_instr(nir_builder *b, nir_instr *instr, void *state)
}
nir_def_rewrite_uses(&intrin->def, replacement);
nir_instr_remove(instr);
nir_instr_free(instr);
nir_instr_remove(&intrin->instr);
nir_instr_free(&intrin->instr);
return true;
}
@ -426,6 +422,6 @@ radv_nir_lower_vs_inputs(nir_shader *shader, const struct radv_shader_stage *vs_
.rad_info = rad_info,
};
return nir_shader_instructions_pass(shader, lower_vs_input_instr, nir_metadata_dominance | nir_metadata_block_index,
&state);
return nir_shader_intrinsics_pass(shader, lower_vs_input_instr, nir_metadata_dominance | nir_metadata_block_index,
&state);
}

View file

@ -71,17 +71,13 @@ lower_zs_emit(nir_block *block)
}
static bool
lower_discard(nir_builder *b, nir_instr *instr, UNUSED void *data)
lower_discard(nir_builder *b, nir_intrinsic_instr *intr, UNUSED void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_discard &&
intr->intrinsic != nir_intrinsic_discard_if)
return false;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intr->instr);
nir_def *all_samples = nir_imm_intN_t(b, ALL_SAMPLES, 16);
nir_def *no_samples = nir_imm_intN_t(b, 0, 16);
@ -92,7 +88,7 @@ lower_discard(nir_builder *b, nir_instr *instr, UNUSED void *data)
/* This will get lowered later as needed */
nir_discard_agx(b, killed_samples);
nir_instr_remove(instr);
nir_instr_remove(&intr->instr);
return true;
}
@ -102,7 +98,7 @@ agx_nir_lower_discard(nir_shader *s)
if (!s->info.fs.uses_discard)
return false;
return nir_shader_instructions_pass(
return nir_shader_intrinsics_pass(
s, lower_discard, nir_metadata_block_index | nir_metadata_dominance,
NULL);
}

View file

@ -65,19 +65,15 @@
#define BASE_S 2
static bool
lower_sample_mask_to_zs(nir_builder *b, nir_instr *instr, UNUSED void *data)
lower_sample_mask_to_zs(nir_builder *b, nir_intrinsic_instr *intr,
UNUSED void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
bool depth_written =
b->shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_DEPTH);
bool stencil_written =
b->shader->info.outputs_written & BITFIELD64_BIT(FRAG_RESULT_STENCIL);
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intr->instr);
/* Existing zs_emit instructions need to be fixed up to write their own depth
* for consistency.
@ -106,24 +102,20 @@ lower_sample_mask_to_zs(nir_builder *b, nir_instr *instr, UNUSED void *data)
: nir_undef(b, 1, 16) /* stencil */,
.base = BASE_Z | (stencil_written ? BASE_S : 0));
nir_instr_remove(instr);
nir_instr_remove(&intr->instr);
return true;
}
static bool
lower_discard_to_sample_mask_0(nir_builder *b, nir_instr *instr,
lower_discard_to_sample_mask_0(nir_builder *b, nir_intrinsic_instr *intr,
UNUSED void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_discard_agx)
return false;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intr->instr);
nir_sample_mask_agx(b, intr->src[0].ssa, nir_imm_intN_t(b, 0, 16));
nir_instr_remove(instr);
nir_instr_remove(&intr->instr);
return true;
}
@ -162,7 +154,7 @@ agx_nir_lower_sample_mask(nir_shader *shader, unsigned nr_samples)
/* sample_mask can't be used with zs_emit, so lower sample_mask to zs_emit */
if (shader->info.outputs_written & (BITFIELD64_BIT(FRAG_RESULT_DEPTH) |
BITFIELD64_BIT(FRAG_RESULT_STENCIL))) {
bool progress = nir_shader_instructions_pass(
bool progress = nir_shader_intrinsics_pass(
shader, lower_sample_mask_to_zs,
nir_metadata_block_index | nir_metadata_dominance, NULL);
@ -219,9 +211,9 @@ agx_nir_lower_sample_mask(nir_shader *shader, unsigned nr_samples)
}
}
nir_shader_instructions_pass(
shader, lower_discard_to_sample_mask_0,
nir_metadata_block_index | nir_metadata_dominance, NULL);
nir_shader_intrinsics_pass(shader, lower_discard_to_sample_mask_0,
nir_metadata_block_index | nir_metadata_dominance,
NULL);
return true;
}

View file

@ -483,12 +483,8 @@ lower_sampler_bias(nir_builder *b, nir_instr *instr, UNUSED void *data)
}
static bool
legalize_image_lod(nir_builder *b, nir_instr *instr, UNUSED void *data)
legalize_image_lod(nir_builder *b, nir_intrinsic_instr *intr, UNUSED void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
nir_src *src;
#define CASE(op, idx) \
@ -510,7 +506,7 @@ legalize_image_lod(nir_builder *b, nir_instr *instr, UNUSED void *data)
if (src->ssa->bit_size == 16)
return false;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intr->instr);
nir_src_rewrite(src, nir_i2i16(b, src->ssa));
return true;
}
@ -717,13 +713,9 @@ lower_1d_image(nir_builder *b, nir_intrinsic_instr *intr)
}
static bool
lower_images(nir_builder *b, nir_instr *instr, UNUSED void *data)
lower_images(nir_builder *b, nir_intrinsic_instr *intr, UNUSED void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intr->instr);
switch (intr->intrinsic) {
case nir_intrinsic_image_load:
@ -808,9 +800,9 @@ agx_nir_lower_texture(nir_shader *s, bool support_lod_bias)
nir_metadata_block_index | nir_metadata_dominance, NULL);
}
NIR_PASS(progress, s, nir_shader_instructions_pass, legalize_image_lod,
NIR_PASS(progress, s, nir_shader_intrinsics_pass, legalize_image_lod,
nir_metadata_block_index | nir_metadata_dominance, NULL);
NIR_PASS(progress, s, nir_shader_instructions_pass, lower_images,
NIR_PASS(progress, s, nir_shader_intrinsics_pass, lower_images,
nir_metadata_block_index | nir_metadata_dominance, NULL);
NIR_PASS(progress, s, nir_legalize_16bit_sampler_srcs, tex_constraints);
@ -827,13 +819,10 @@ agx_nir_lower_texture(nir_shader *s, bool support_lod_bias)
}
static bool
lower_multisampled_store(nir_builder *b, nir_instr *instr, UNUSED void *data)
lower_multisampled_store(nir_builder *b, nir_intrinsic_instr *intr,
UNUSED void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intr->instr);
if (intr->intrinsic != nir_intrinsic_bindless_image_store)
return false;
@ -854,7 +843,7 @@ lower_multisampled_store(nir_builder *b, nir_instr *instr, UNUSED void *data)
bool
agx_nir_lower_multisampled_image_store(nir_shader *s)
{
return nir_shader_instructions_pass(
return nir_shader_intrinsics_pass(
s, lower_multisampled_store,
nir_metadata_block_index | nir_metadata_dominance, NULL);
}

View file

@ -140,13 +140,10 @@ lower_sample_mask_write(nir_builder *b, nir_instr *instr, void *data)
* sample_mask_in --> sample_mask_in & api_sample_mask
*/
static bool
lower_sample_mask_read(nir_builder *b, nir_instr *instr, UNUSED void *_)
lower_sample_mask_read(nir_builder *b, nir_intrinsic_instr *intr,
UNUSED void *_)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
b->cursor = nir_after_instr(instr);
b->cursor = nir_after_instr(&intr->instr);
if (intr->intrinsic != nir_intrinsic_load_sample_mask_in)
return false;
@ -196,9 +193,9 @@ agx_nir_lower_monolithic_msaa(nir_shader *shader, struct agx_msaa_state *state)
insert_sample_mask_write(shader);
/* Additional, sample_mask_in needs to account for the API-level mask */
nir_shader_instructions_pass(
shader, lower_sample_mask_read,
nir_metadata_block_index | nir_metadata_dominance, &state->nr_samples);
nir_shader_intrinsics_pass(shader, lower_sample_mask_read,
nir_metadata_block_index | nir_metadata_dominance,
&state->nr_samples);
/* In single sampled programs, interpolateAtSample needs to return the
* center pixel. TODO: Generalize for dynamic sample count.

View file

@ -16,13 +16,9 @@ mask_by_sample_id(nir_builder *b, nir_def *mask)
}
static bool
lower_to_sample(nir_builder *b, nir_instr *instr, void *_)
lower_to_sample(nir_builder *b, nir_intrinsic_instr *intr, void *_)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intr->instr);
switch (intr->intrinsic) {
case nir_intrinsic_load_sample_pos: {
@ -57,7 +53,7 @@ lower_to_sample(nir_builder *b, nir_instr *instr, void *_)
/* Collect and rewrite */
nir_def_rewrite_uses(&intr->def, nir_vec2(b, xy[0], xy[1]));
nir_instr_remove(instr);
nir_instr_remove(&intr->instr);
return true;
}
@ -66,7 +62,7 @@ lower_to_sample(nir_builder *b, nir_instr *instr, void *_)
* of the sample currently being shaded when sample shading is used. Mask
* by the sample ID to make that happen.
*/
b->cursor = nir_after_instr(instr);
b->cursor = nir_after_instr(&intr->instr);
nir_def *old = &intr->def;
nir_def *lowered = mask_by_sample_id(b, old);
nir_def_rewrite_uses_after(old, lowered, lowered->parent_instr);
@ -77,7 +73,7 @@ lower_to_sample(nir_builder *b, nir_instr *instr, void *_)
/* Lower fragment varyings with "sample" interpolation to
* interpolateAtSample() with the sample ID
*/
b->cursor = nir_after_instr(instr);
b->cursor = nir_after_instr(&intr->instr);
nir_def *old = &intr->def;
nir_def *lowered = nir_load_barycentric_at_sample(
@ -116,7 +112,7 @@ agx_nir_lower_sample_intrinsics(nir_shader *shader)
if (!shader->info.fs.uses_sample_shading)
return false;
return nir_shader_instructions_pass(
return nir_shader_intrinsics_pass(
shader, lower_to_sample,
nir_metadata_block_index | nir_metadata_dominance, NULL);
}

View file

@ -204,15 +204,9 @@ v3d_nir_lower_image_load(nir_builder *b, nir_intrinsic_instr *instr)
static bool
v3d_nir_lower_image_load_store_cb(nir_builder *b,
nir_instr *instr,
nir_intrinsic_instr *intr,
void *_state)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr =
nir_instr_as_intrinsic(instr);
switch (intr->intrinsic) {
case nir_intrinsic_image_load:
return v3d_nir_lower_image_load(b, intr);
@ -228,7 +222,8 @@ v3d_nir_lower_image_load_store_cb(nir_builder *b,
bool
v3d_nir_lower_image_load_store(nir_shader *s)
{
return nir_shader_instructions_pass(s, v3d_nir_lower_image_load_store_cb,
return nir_shader_intrinsics_pass(s,
v3d_nir_lower_image_load_store_cb,
nir_metadata_block_index |
nir_metadata_dominance, NULL);
}

View file

@ -229,12 +229,9 @@ lower_store_bitsize(nir_builder *b,
}
static bool
lower_load_store_bitsize(nir_builder *b, nir_instr *instr, void *data)
lower_load_store_bitsize(nir_builder *b, nir_intrinsic_instr *intr,
void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
switch (intr->intrinsic) {
case nir_intrinsic_load_ssbo:
case nir_intrinsic_load_ubo:
@ -256,8 +253,7 @@ lower_load_store_bitsize(nir_builder *b, nir_instr *instr, void *data)
bool
v3d_nir_lower_load_store_bitsize(nir_shader *s)
{
return nir_shader_instructions_pass(s,
lower_load_store_bitsize,
return nir_shader_intrinsics_pass(s, lower_load_store_bitsize,
nir_metadata_block_index |
nir_metadata_dominance,
NULL);

View file

@ -117,15 +117,9 @@ v3d_nir_lower_store_scratch(nir_builder *b, nir_intrinsic_instr *instr)
static bool
v3d_nir_lower_scratch_cb(nir_builder *b,
nir_instr *instr,
nir_intrinsic_instr *intr,
void *_state)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr =
nir_instr_as_intrinsic(instr);
switch (intr->intrinsic) {
case nir_intrinsic_load_scratch:
v3d_nir_lower_load_scratch(b, intr);
@ -143,7 +137,7 @@ v3d_nir_lower_scratch_cb(nir_builder *b,
bool
v3d_nir_lower_scratch(nir_shader *s)
{
return nir_shader_instructions_pass(s, v3d_nir_lower_scratch_cb,
return nir_shader_intrinsics_pass(s, v3d_nir_lower_scratch_cb,
nir_metadata_block_index |
nir_metadata_dominance, NULL);
}

View file

@ -893,12 +893,8 @@ lower_pipeline_layout_info(nir_shader *shader,
/* This flips gl_PointCoord.y to match Vulkan requirements */
static bool
lower_point_coord_cb(nir_builder *b, nir_instr *instr, void *_state)
lower_point_coord_cb(nir_builder *b, nir_intrinsic_instr *intr, void *_state)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_input)
return false;
@ -919,7 +915,7 @@ static bool
v3d_nir_lower_point_coord(nir_shader *s)
{
assert(s->info.stage == MESA_SHADER_FRAGMENT);
return nir_shader_instructions_pass(s, lower_point_coord_cb,
return nir_shader_intrinsics_pass(s, lower_point_coord_cb,
nir_metadata_block_index |
nir_metadata_dominance, NULL);
}

View file

@ -60,13 +60,9 @@ recursive_if_chain(nir_builder *b, nir_deref_instr *deref, nir_def *value, unsig
* so we rewrite disabled clip planes to a zero value in order to disable them
*/
static bool
lower_clip_plane_store(nir_builder *b, nir_instr *instr_, void *cb_data)
lower_clip_plane_store(nir_builder *b, nir_intrinsic_instr *instr,
void *cb_data)
{
if (instr_->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *instr = nir_instr_as_intrinsic(instr_);
unsigned clip_plane_enable = *(unsigned *)cb_data;
nir_variable *out;
unsigned plane;
@ -128,7 +124,7 @@ nir_lower_clip_disable(nir_shader *shader, unsigned clip_plane_enable)
if (clip_plane_enable == u_bit_consecutive(0, shader->info.clip_distance_array_size))
return false;
return nir_shader_instructions_pass(shader, lower_clip_plane_store,
return nir_shader_intrinsics_pass(shader, lower_clip_plane_store,
nir_metadata_block_index |
nir_metadata_dominance,
&clip_plane_enable);

View file

@ -24,12 +24,9 @@
#include "nir_builder.h"
static bool
lower_pos_write(nir_builder *b, nir_instr *instr, UNUSED void *cb_data)
lower_pos_write(nir_builder *b, nir_intrinsic_instr *intr,
UNUSED void *cb_data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_store_deref)
return false;
@ -62,7 +59,7 @@ nir_lower_clip_halfz(nir_shader *shader)
shader->info.stage != MESA_SHADER_TESS_EVAL)
return;
nir_shader_instructions_pass(shader, lower_pos_write,
nir_shader_intrinsics_pass(shader, lower_pos_write,
nir_metadata_block_index |
nir_metadata_dominance,
NULL);

View file

@ -47,12 +47,9 @@
*/
static bool
nir_lower_fb_read_instr(nir_builder *b, nir_instr *instr, UNUSED void *cb_data)
nir_lower_fb_read_instr(nir_builder *b, nir_intrinsic_instr *intr,
UNUSED void *cb_data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_output)
return false;
@ -89,7 +86,7 @@ nir_lower_fb_read(nir_shader *shader)
{
assert(shader->info.stage == MESA_SHADER_FRAGMENT);
return nir_shader_instructions_pass(shader, nir_lower_fb_read_instr,
return nir_shader_intrinsics_pass(shader, nir_lower_fb_read_instr,
nir_metadata_block_index |
nir_metadata_dominance,
NULL);

View file

@ -8,19 +8,15 @@
#include "nir_builder_opcodes.h"
static bool
lower(nir_builder *b, nir_instr *instr, UNUSED void *data)
lower(nir_builder *b, nir_intrinsic_instr *intr, UNUSED void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_frag_coord)
return false;
/* load_pixel_coord gives the top-left corner of the pixel, but frag_coord
* should return the centre of the pixel.
*/
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intr->instr);
nir_def *top_left_xy = nir_u2f32(b, nir_load_pixel_coord(b));
nir_def *xy = nir_fadd_imm(b, top_left_xy, 0.5);
@ -34,6 +30,7 @@ lower(nir_builder *b, nir_instr *instr, UNUSED void *data)
bool
nir_lower_frag_coord_to_pixel_coord(nir_shader *shader)
{
return nir_shader_instructions_pass(
shader, lower, nir_metadata_block_index | nir_metadata_dominance, NULL);
return nir_shader_intrinsics_pass(shader, lower,
nir_metadata_block_index | nir_metadata_dominance,
NULL);
}

View file

@ -26,12 +26,8 @@
#include "compiler/nir/nir_builder.h"
static bool
lower(nir_builder *b, nir_instr *instr, void *data)
lower(nir_builder *b, nir_intrinsic_instr *intr, void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
bool *lower_plain_stores = data;
switch (intr->intrinsic) {
@ -53,14 +49,14 @@ lower(nir_builder *b, nir_instr *instr, void *data)
return false;
}
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intr->instr);
bool has_dest = nir_intrinsic_infos[intr->intrinsic].has_dest;
nir_def *undef = NULL;
nir_def *helper = nir_load_helper_invocation(b, 1);
nir_push_if(b, nir_inot(b, helper));
nir_instr_remove(instr);
nir_builder_instr_insert(b, instr);
nir_instr_remove(&intr->instr);
nir_builder_instr_insert(b, &intr->instr);
/* Per the spec, it does not matter what we return for helper threads.
* Represent this by an ssa_undef in the hopes the backend will be clever
@ -96,7 +92,7 @@ lower(nir_builder *b, nir_instr *instr, void *data)
nir_instr *phi_instr = phi->parent_instr;
nir_phi_instr *phi_as_phi = nir_instr_as_phi(phi_instr);
nir_phi_src *phi_src = nir_phi_get_src_from_block(phi_as_phi,
instr->block);
intr->instr.block);
nir_src_rewrite(&phi_src->src, &intr->def);
}
@ -107,6 +103,6 @@ bool
nir_lower_helper_writes(nir_shader *shader, bool lower_plain_stores)
{
assert(shader->info.stage == MESA_SHADER_FRAGMENT);
return nir_shader_instructions_pass(shader, lower, nir_metadata_none,
return nir_shader_intrinsics_pass(shader, lower, nir_metadata_none,
&lower_plain_stores);
}

View file

@ -32,13 +32,9 @@
*/
static bool
lower_point_smooth(nir_builder *b, nir_instr *instr, UNUSED void *_state)
lower_point_smooth(nir_builder *b, nir_intrinsic_instr *intr,
UNUSED void *_state)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_store_output &&
intr->intrinsic != nir_intrinsic_store_deref)
return false;
@ -99,7 +95,7 @@ bool
nir_lower_point_smooth(nir_shader *shader)
{
assert(shader->info.stage == MESA_SHADER_FRAGMENT);
return nir_shader_instructions_pass(shader, lower_point_smooth,
return nir_shader_intrinsics_pass(shader, lower_point_smooth,
nir_metadata_loop_analysis |
nir_metadata_block_index |
nir_metadata_dominance,

View file

@ -26,23 +26,18 @@
static bool
lower_single_sampled_instr(nir_builder *b,
nir_instr *instr,
nir_intrinsic_instr *intrin,
UNUSED void *cb_data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
nir_def *lowered;
switch (intrin->intrinsic) {
case nir_intrinsic_load_sample_id:
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intrin->instr);
lowered = nir_imm_int(b, 0);
break;
case nir_intrinsic_load_sample_pos:
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intrin->instr);
lowered = nir_imm_vec2(b, 0.5, 0.5);
break;
@ -53,20 +48,20 @@ lower_single_sampled_instr(nir_builder *b,
if (b->shader->options->lower_helper_invocation)
return false;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intrin->instr);
lowered = nir_b2i32(b, nir_inot(b, nir_load_helper_invocation(b, 1)));
break;
case nir_intrinsic_interp_deref_at_centroid:
case nir_intrinsic_interp_deref_at_sample:
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intrin->instr);
lowered = nir_load_deref(b, nir_src_as_deref(intrin->src[0]));
break;
case nir_intrinsic_load_barycentric_centroid:
case nir_intrinsic_load_barycentric_sample:
case nir_intrinsic_load_barycentric_at_sample:
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intrin->instr);
lowered = nir_load_barycentric(b, nir_intrinsic_load_barycentric_pixel,
nir_intrinsic_interp_mode(intrin));
@ -84,7 +79,7 @@ lower_single_sampled_instr(nir_builder *b,
}
nir_def_rewrite_uses(&intrin->def, lowered);
nir_instr_remove(instr);
nir_instr_remove(&intrin->instr);
return true;
}
@ -120,7 +115,7 @@ nir_lower_single_sampled(nir_shader *shader)
BITSET_CLEAR(shader->info.system_values_read,
SYSTEM_VALUE_BARYCENTRIC_LINEAR_CENTROID);
return nir_shader_instructions_pass(shader, lower_single_sampled_instr,
return nir_shader_intrinsics_pass(shader, lower_single_sampled_instr,
nir_metadata_block_index |
nir_metadata_dominance,
NULL) ||

View file

@ -9,16 +9,12 @@
#include "shader_enums.h"
static bool
lower_tess_coord_z(nir_builder *b, nir_instr *instr, void *state)
lower_tess_coord_z(nir_builder *b, nir_intrinsic_instr *intr, void *state)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_tess_coord)
return false;
b->cursor = nir_instr_remove(instr);
b->cursor = nir_instr_remove(&intr->instr);
nir_def *xy = nir_load_tess_coord_xy(b);
nir_def *x = nir_channel(b, xy, 0);
nir_def *y = nir_channel(b, xy, 1);
@ -37,7 +33,7 @@ lower_tess_coord_z(nir_builder *b, nir_instr *instr, void *state)
bool
nir_lower_tess_coord_z(nir_shader *shader, bool triangles)
{
return nir_shader_instructions_pass(shader, lower_tess_coord_z,
return nir_shader_intrinsics_pass(shader, lower_tess_coord_z,
nir_metadata_block_index |
nir_metadata_dominance,
&triangles);

View file

@ -114,12 +114,8 @@ nir_lower_deref_copy_instr(nir_builder *b, nir_intrinsic_instr *copy)
}
static bool
lower_var_copies_instr(nir_builder *b, nir_instr *instr, void *data)
lower_var_copies_instr(nir_builder *b, nir_intrinsic_instr *copy, void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *copy = nir_instr_as_intrinsic(instr);
if (copy->intrinsic != nir_intrinsic_copy_deref)
return false;
@ -141,8 +137,7 @@ nir_lower_var_copies(nir_shader *shader)
{
shader->info.var_copies_lowered = true;
return nir_shader_instructions_pass(shader,
lower_var_copies_instr,
return nir_shader_intrinsics_pass(shader, lower_var_copies_instr,
nir_metadata_block_index |
nir_metadata_dominance,
NULL);

View file

@ -41,13 +41,9 @@
#include "nir/nir_builder.h"
static bool
lower_viewport_transform_instr(nir_builder *b, nir_instr *instr,
lower_viewport_transform_instr(nir_builder *b, nir_intrinsic_instr *intr,
void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_store_deref)
return false;
@ -56,7 +52,7 @@ lower_viewport_transform_instr(nir_builder *b, nir_instr *instr,
var->data.location != VARYING_SLOT_POS)
return false;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intr->instr);
/* Grab the source and viewport */
nir_def *input_point = nir_ssa_for_src(b, intr->src[1], 4);
@ -95,8 +91,7 @@ nir_lower_viewport_transform(nir_shader *shader)
{
assert((shader->info.stage == MESA_SHADER_VERTEX) || (shader->info.stage == MESA_SHADER_GEOMETRY) || (shader->info.stage == MESA_SHADER_TESS_EVAL));
return nir_shader_instructions_pass(shader,
lower_viewport_transform_instr,
return nir_shader_intrinsics_pass(shader, lower_viewport_transform_instr,
nir_metadata_block_index |
nir_metadata_dominance,
NULL);

View file

@ -65,12 +65,8 @@ update_fragcoord(nir_builder *b, nir_intrinsic_instr *intr)
}
static bool
lower_wpos_center_instr(nir_builder *b, nir_instr *instr, void *data)
lower_wpos_center_instr(nir_builder *b, nir_intrinsic_instr *intr, void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_frag_coord)
return false;
@ -83,8 +79,7 @@ nir_lower_wpos_center(nir_shader *shader)
{
assert(shader->info.stage == MESA_SHADER_FRAGMENT);
return nir_shader_instructions_pass(shader,
lower_wpos_center_instr,
return nir_shader_intrinsics_pass(shader, lower_wpos_center_instr,
nir_metadata_block_index |
nir_metadata_dominance,
NULL);

View file

@ -83,12 +83,9 @@ split_deref_copy_instr(nir_builder *b,
}
static bool
split_var_copies_instr(nir_builder *b, nir_instr *instr, UNUSED void *cb_data)
split_var_copies_instr(nir_builder *b, nir_intrinsic_instr *copy,
UNUSED void *cb_data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *copy = nir_instr_as_intrinsic(instr);
if (copy->intrinsic != nir_intrinsic_copy_deref)
return false;
@ -106,7 +103,7 @@ split_var_copies_instr(nir_builder *b, nir_instr *instr, UNUSED void *cb_data)
bool
nir_split_var_copies(nir_shader *shader)
{
return nir_shader_instructions_pass(shader, split_var_copies_instr,
return nir_shader_intrinsics_pass(shader, split_var_copies_instr,
nir_metadata_block_index |
nir_metadata_dominance,
NULL);

View file

@ -584,12 +584,8 @@ lower_instr(nir_builder *b, nir_instr *instr, void *cb_data)
* them to load_uniform which turns into constant memory loads.
*/
static bool
lower_inline_ubo(nir_builder *b, nir_instr *instr, void *cb_data)
lower_inline_ubo(nir_builder *b, nir_intrinsic_instr *intrin, void *cb_data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_load_ubo)
return false;
@ -635,7 +631,7 @@ lower_inline_ubo(nir_builder *b, nir_instr *instr, void *cb_data)
nir_def *offset = intrin->src[1].ssa;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intrin->instr);
nir_def *val;
if (use_load) {
@ -651,7 +647,7 @@ lower_inline_ubo(nir_builder *b, nir_instr *instr, void *cb_data)
}
nir_def_rewrite_uses(&intrin->def, val);
nir_instr_remove(instr);
nir_instr_remove(&intrin->instr);
return true;
}
@ -794,8 +790,7 @@ tu_lower_io(nir_shader *shader, struct tu_device *dev,
bool progress = false;
if (const_state->num_inline_ubos) {
progress |= nir_shader_instructions_pass(shader,
lower_inline_ubo,
progress |= nir_shader_intrinsics_pass(shader, lower_inline_ubo,
nir_metadata_none,
&params);
}

View file

@ -191,12 +191,8 @@ lower_sysvals(nir_builder *b, nir_instr *instr, void *data)
/* Step 2: Record system value loads */
static bool
record_loads(nir_builder *b, nir_instr *instr, void *data)
record_loads(nir_builder *b, nir_intrinsic_instr *intr, void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_sysval_agx)
return false;
@ -360,9 +356,9 @@ agx_nir_layout_uniforms(nir_shader *shader, bool internal_bindless,
unsigned *push_size)
{
struct state state = {0};
nir_shader_instructions_pass(
shader, record_loads, nir_metadata_block_index | nir_metadata_dominance,
&state);
nir_shader_intrinsics_pass(shader, record_loads,
nir_metadata_block_index | nir_metadata_dominance,
&state);
if (internal_bindless)
reserve_internal_bindless(&state, shader->info.stage);

View file

@ -389,12 +389,8 @@ lower_xfb_output(nir_builder *b, nir_intrinsic_instr *intr,
}
static bool
lower_xfb(nir_builder *b, nir_instr *instr, UNUSED void *data)
lower_xfb(nir_builder *b, nir_intrinsic_instr *intr, UNUSED void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_store_output)
return false;
@ -419,7 +415,7 @@ lower_xfb(nir_builder *b, nir_instr *instr, UNUSED void *data)
}
}
nir_instr_remove(instr);
nir_instr_remove(&intr->instr);
return progress;
}
@ -552,7 +548,7 @@ agx_nir_lower_xfb(nir_shader *nir, struct agx_xfb_key *key)
NIR_PASS_V(nir, nir_io_add_intrinsic_xfb_info);
NIR_PASS_V(nir, insert_overflow_check, key);
NIR_PASS_V(nir, nir_shader_instructions_pass, lower_xfb,
NIR_PASS_V(nir, nir_shader_intrinsics_pass, lower_xfb,
nir_metadata_block_index | nir_metadata_dominance, key);
NIR_PASS_V(nir, nir_shader_instructions_pass, lower_xfb_intrinsics,
nir_metadata_block_index | nir_metadata_dominance, key);

View file

@ -189,12 +189,8 @@ convert_value(nir_builder *b, nir_def *value,
}
static bool
lower_image_cast_instr(nir_builder *b, nir_instr *instr, void *_data)
lower_image_cast_instr(nir_builder *b, nir_intrinsic_instr *intr, void *_data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_image_deref_load &&
intr->intrinsic != nir_intrinsic_image_deref_store)
return false;
@ -216,12 +212,12 @@ lower_image_cast_instr(nir_builder *b, nir_instr *instr, void *_data)
nir_def *value;
const struct util_format_description *from_desc, *to_desc;
if (intr->intrinsic == nir_intrinsic_image_deref_load) {
b->cursor = nir_after_instr(instr);
b->cursor = nir_after_instr(&intr->instr);
value = &intr->def;
from_desc = util_format_description(emulation_format);
to_desc = util_format_description(real_format);
} else {
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intr->instr);
value = intr->src[3].ssa;
from_desc = util_format_description(real_format);
to_desc = util_format_description(emulation_format);
@ -251,8 +247,9 @@ lower_image_cast_instr(nir_builder *b, nir_instr *instr, void *_data)
bool
d3d12_lower_image_casts(nir_shader *s, struct d3d12_image_format_conversion_info_arr *info)
{
bool progress = nir_shader_instructions_pass(s, lower_image_cast_instr,
nir_metadata_block_index | nir_metadata_dominance, info);
bool progress = nir_shader_intrinsics_pass(s, lower_image_cast_instr,
nir_metadata_block_index | nir_metadata_dominance,
info);
if (progress) {
nir_foreach_image_variable(var, s) {

View file

@ -255,13 +255,9 @@ d3d12_lower_uint_cast(nir_shader *nir, bool is_signed)
}
static bool
lower_load_draw_params(nir_builder *b, nir_instr *instr, void *draw_params)
lower_load_draw_params(nir_builder *b, nir_intrinsic_instr *intr,
void *draw_params)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_first_vertex &&
intr->intrinsic != nir_intrinsic_load_base_instance &&
intr->intrinsic != nir_intrinsic_load_draw_id &&
@ -276,7 +272,7 @@ lower_load_draw_params(nir_builder *b, nir_instr *instr, void *draw_params)
intr->intrinsic == nir_intrinsic_load_base_instance ? 1 :
intr->intrinsic == nir_intrinsic_load_draw_id ? 2 : 3;
nir_def_rewrite_uses(&intr->def, nir_channel(b, load, channel));
nir_instr_remove(instr);
nir_instr_remove(&intr->instr);
return true;
}
@ -288,16 +284,15 @@ d3d12_lower_load_draw_params(struct nir_shader *nir)
if (nir->info.stage != MESA_SHADER_VERTEX)
return false;
return nir_shader_instructions_pass(nir, lower_load_draw_params,
nir_metadata_block_index | nir_metadata_dominance, &draw_params);
return nir_shader_intrinsics_pass(nir, lower_load_draw_params,
nir_metadata_block_index | nir_metadata_dominance,
&draw_params);
}
static bool
lower_load_patch_vertices_in(nir_builder *b, nir_instr *instr, void *_state)
lower_load_patch_vertices_in(nir_builder *b, nir_intrinsic_instr *intr,
void *_state)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_patch_vertices_in)
return false;
@ -306,7 +301,7 @@ lower_load_patch_vertices_in(nir_builder *b, nir_instr *instr, void *_state)
d3d12_get_state_var(b, D3D12_STATE_VAR_PATCH_VERTICES_IN, "d3d12_FirstVertex", glsl_uint_type(), _state) :
nir_imm_int(b, b->shader->info.tess.tcs_vertices_out);
nir_def_rewrite_uses(&intr->def, load);
nir_instr_remove(instr);
nir_instr_remove(&intr->instr);
return true;
}
@ -319,8 +314,9 @@ d3d12_lower_load_patch_vertices_in(struct nir_shader *nir)
nir->info.stage != MESA_SHADER_TESS_EVAL)
return false;
return nir_shader_instructions_pass(nir, lower_load_patch_vertices_in,
nir_metadata_block_index | nir_metadata_dominance, &var);
return nir_shader_intrinsics_pass(nir, lower_load_patch_vertices_in,
nir_metadata_block_index | nir_metadata_dominance,
&var);
}
struct invert_depth_state
@ -857,11 +853,9 @@ struct multistream_state {
};
static bool
split_multistream_varying_stores(nir_builder *b, nir_instr *instr, void *_state)
split_multistream_varying_stores(nir_builder *b, nir_intrinsic_instr *intr,
void *_state)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_store_deref)
return false;
@ -891,7 +885,7 @@ split_multistream_varying_stores(nir_builder *b, nir_instr *instr, void *_state)
new_path = nir_build_deref_follower(b, new_path, path.path[i]);
}
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intr->instr);
unsigned mask_num_channels = (1 << var_state->subvars[subvar].num_components) - 1;
unsigned orig_write_mask = nir_intrinsic_write_mask(intr);
nir_def *sub_value = nir_channels(b, intr->src[1].ssa, mask_num_channels << first_channel);
@ -903,7 +897,7 @@ split_multistream_varying_stores(nir_builder *b, nir_instr *instr, void *_state)
}
nir_deref_path_finish(&path);
nir_instr_free_and_dce(instr);
nir_instr_free_and_dce(&intr->instr);
return true;
}
@ -958,8 +952,9 @@ d3d12_split_multistream_varyings(nir_shader *s)
}
if (progress) {
nir_shader_instructions_pass(s, split_multistream_varying_stores,
nir_metadata_block_index | nir_metadata_dominance, &state);
nir_shader_intrinsics_pass(s, split_multistream_varying_stores,
nir_metadata_block_index | nir_metadata_dominance,
&state);
} else {
nir_shader_preserve_all_metadata(s);
}

View file

@ -25,12 +25,8 @@
#include "pan_context.h"
static bool
pass(nir_builder *b, nir_instr *instr, void *data)
pass(nir_builder *b, nir_intrinsic_instr *intr, void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_store_output)
return false;
@ -39,7 +35,7 @@ pass(nir_builder *b, nir_instr *instr, void *data)
if (location >= FRAG_RESULT_DATA0 &&
(location - FRAG_RESULT_DATA0) >= (*nr_cbufs)) {
nir_instr_remove(instr);
nir_instr_remove(&intr->instr);
return true;
} else {
return false;
@ -49,6 +45,6 @@ pass(nir_builder *b, nir_instr *instr, void *data)
bool
panfrost_nir_remove_fragcolor_stores(nir_shader *s, unsigned nr_cbufs)
{
return nir_shader_instructions_pass(
return nir_shader_intrinsics_pass(
s, pass, nir_metadata_block_index | nir_metadata_dominance, &nr_cbufs);
}

View file

@ -375,13 +375,9 @@ r600_map_atomic(nir_intrinsic_op op)
}
static bool
r600_lower_deref_instr(nir_builder *b, nir_instr *instr_, UNUSED void *cb_data)
r600_lower_deref_instr(nir_builder *b, nir_intrinsic_instr *instr,
UNUSED void *cb_data)
{
if (instr_->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *instr = nir_instr_as_intrinsic(instr_);
nir_intrinsic_op op = r600_map_atomic(instr->intrinsic);
if (nir_num_intrinsics == op)
return false;
@ -464,10 +460,9 @@ r600_nir_lower_atomics(nir_shader *shader)
shader->variables.push_tail(&var->node);
}
return nir_shader_instructions_pass(shader,
r600_lower_deref_instr,
nir_metadata_block_index | nir_metadata_dominance,
NULL);
return nir_shader_intrinsics_pass(shader, r600_lower_deref_instr,
nir_metadata_block_index | nir_metadata_dominance,
NULL);
}
using r600::r600_lower_fs_out_to_vector;
using r600::r600_lower_scratch_addresses;

View file

@ -568,18 +568,14 @@ load_vs_input_from_vertex_buffer(nir_builder *b, unsigned input_index,
}
static bool
lower_vs_input_instr(nir_builder *b, nir_instr *instr, void *state)
lower_vs_input_instr(nir_builder *b, nir_intrinsic_instr *intrin, void *state)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_load_input)
return false;
struct lower_vs_inputs_state *s = (struct lower_vs_inputs_state *)state;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intrin->instr);
unsigned input_index = nir_intrinsic_base(intrin);
unsigned component = nir_intrinsic_component(intrin);
@ -594,8 +590,8 @@ lower_vs_input_instr(nir_builder *b, nir_instr *instr, void *state)
nir_def *replacement = nir_vec(b, &comp[component], num_components);
nir_def_rewrite_uses(&intrin->def, replacement);
nir_instr_remove(instr);
nir_instr_free(instr);
nir_instr_remove(&intrin->instr);
nir_instr_free(&intrin->instr);
return true;
}
@ -617,7 +613,7 @@ si_nir_lower_vs_inputs(nir_shader *nir, struct si_shader *shader, struct si_shad
if (!sel->info.base.vs.blit_sgprs_amd)
get_vertex_index_for_all_inputs(nir, &state);
return nir_shader_instructions_pass(nir, lower_vs_input_instr,
return nir_shader_intrinsics_pass(nir, lower_vs_input_instr,
nir_metadata_dominance | nir_metadata_block_index,
&state);
}

View file

@ -106,12 +106,9 @@ struct replace_param {
};
static bool
store_instr_depends_on_tex(nir_builder *b, nir_instr *instr, void *state)
store_instr_depends_on_tex(nir_builder *b, nir_intrinsic_instr *intrin,
void *state)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_store_output)
return false;
@ -160,7 +157,7 @@ si_nir_is_output_const_if_tex_is_const(nir_shader *shader, float *in, float *out
p.texunit = texunit;
/* Test if the single store_output only depends on constants and a single texture op */
if (nir_shader_instructions_pass(shader, store_instr_depends_on_tex, nir_metadata_all, &p)) {
if (nir_shader_intrinsics_pass(shader, store_instr_depends_on_tex, nir_metadata_all, &p)) {
assert(*p.texunit != -1);
/* Replace nir_tex_instr using texunit by vec4(v) */

View file

@ -1671,12 +1671,9 @@ static bool si_nir_kill_outputs(nir_shader *nir, const union si_shader_key *key)
return progress;
}
static bool clamp_vertex_color_instr(nir_builder *b, nir_instr *instr, void *state)
static bool clamp_vertex_color_instr(nir_builder *b,
nir_intrinsic_instr *intrin, void *state)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_store_output)
return false;
@ -1690,7 +1687,7 @@ static bool clamp_vertex_color_instr(nir_builder *b, nir_instr *instr, void *sta
/* only scalar output */
assert(intrin->src[0].ssa->num_components == 1);
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intrin->instr);
nir_def *color = intrin->src[0].ssa;
nir_def *clamp = nir_load_clamp_vertex_color_amd(b);
@ -1706,7 +1703,7 @@ static bool si_nir_clamp_vertex_color(nir_shader *nir)
if (!(nir->info.outputs_written & mask))
return false;
return nir_shader_instructions_pass(nir, clamp_vertex_color_instr,
return nir_shader_intrinsics_pass(nir, clamp_vertex_color_instr,
nir_metadata_dominance | nir_metadata_block_index,
NULL);
}

View file

@ -288,12 +288,9 @@ v3d_shader_precompile(struct v3d_context *v3d,
}
static bool
lower_uniform_offset_to_bytes_cb(nir_builder *b, nir_instr *instr, void *_state)
lower_uniform_offset_to_bytes_cb(nir_builder *b, nir_intrinsic_instr *intr,
void *_state)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_uniform)
return false;
@ -324,7 +321,7 @@ lower_textures_cb(nir_builder *b, nir_instr *instr, void *_state)
static bool
v3d_nir_lower_uniform_offset_to_bytes(nir_shader *s)
{
return nir_shader_instructions_pass(s, lower_uniform_offset_to_bytes_cb,
return nir_shader_intrinsics_pass(s, lower_uniform_offset_to_bytes_cb,
nir_metadata_block_index |
nir_metadata_dominance, NULL);
}

View file

@ -101,11 +101,8 @@ fields[member_idx].offset = offsetof(struct zink_gfx_push_constant, field);
}
static bool
lower_basevertex_instr(nir_builder *b, nir_instr *in, void *data)
lower_basevertex_instr(nir_builder *b, nir_intrinsic_instr *instr, void *data)
{
if (in->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *instr = nir_instr_as_intrinsic(in);
if (instr->intrinsic != nir_intrinsic_load_base_vertex)
return false;
@ -136,16 +133,14 @@ lower_basevertex(nir_shader *shader)
if (!BITSET_TEST(shader->info.system_values_read, SYSTEM_VALUE_BASE_VERTEX))
return false;
return nir_shader_instructions_pass(shader, lower_basevertex_instr, nir_metadata_dominance, NULL);
return nir_shader_intrinsics_pass(shader, lower_basevertex_instr,
nir_metadata_dominance, NULL);
}
static bool
lower_drawid_instr(nir_builder *b, nir_instr *in, void *data)
lower_drawid_instr(nir_builder *b, nir_intrinsic_instr *instr, void *data)
{
if (in->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *instr = nir_instr_as_intrinsic(in);
if (instr->intrinsic != nir_intrinsic_load_draw_id)
return false;
@ -170,7 +165,8 @@ lower_drawid(nir_shader *shader)
if (!BITSET_TEST(shader->info.system_values_read, SYSTEM_VALUE_DRAW_ID))
return false;
return nir_shader_instructions_pass(shader, lower_drawid_instr, nir_metadata_dominance, NULL);
return nir_shader_intrinsics_pass(shader, lower_drawid_instr,
nir_metadata_dominance, NULL);
}
struct lower_gl_point_state {
@ -1167,13 +1163,10 @@ zink_create_quads_emulation_gs(const nir_shader_compiler_options *options,
}
static bool
lower_system_values_to_inlined_uniforms_instr(nir_builder *b, nir_instr *instr, void *data)
lower_system_values_to_inlined_uniforms_instr(nir_builder *b,
nir_intrinsic_instr *intrin,
void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
int inlined_uniform_offset;
switch (intrin->intrinsic) {
case nir_intrinsic_load_flat_mask:
@ -1192,14 +1185,15 @@ lower_system_values_to_inlined_uniforms_instr(nir_builder *b, nir_instr *instr,
.align_mul = 4, .align_offset = 0,
.range_base = 0, .range = ~0);
nir_def_rewrite_uses(&intrin->def, new_dest_def);
nir_instr_remove(instr);
nir_instr_remove(&intrin->instr);
return true;
}
bool
zink_lower_system_values_to_inlined_uniforms(nir_shader *nir)
{
return nir_shader_instructions_pass(nir, lower_system_values_to_inlined_uniforms_instr,
return nir_shader_intrinsics_pass(nir,
lower_system_values_to_inlined_uniforms_instr,
nir_metadata_dominance, NULL);
}
@ -2614,11 +2608,8 @@ rewrite_read_as_0(nir_builder *b, nir_instr *instr, void *data)
static bool
delete_psiz_store_instr(nir_builder *b, nir_instr *instr, void *data)
delete_psiz_store_instr(nir_builder *b, nir_intrinsic_instr *intr, void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
switch (intr->intrinsic) {
case nir_intrinsic_store_output:
case nir_intrinsic_store_per_primitive_output:
@ -2629,14 +2620,15 @@ delete_psiz_store_instr(nir_builder *b, nir_instr *instr, void *data)
}
if (nir_intrinsic_io_semantics(intr).location != VARYING_SLOT_PSIZ)
return false;
nir_instr_remove(instr);
nir_instr_remove(&intr->instr);
return true;
}
static bool
delete_psiz_store(nir_shader *nir)
{
return nir_shader_instructions_pass(nir, delete_psiz_store_instr, nir_metadata_dominance, NULL);
return nir_shader_intrinsics_pass(nir, delete_psiz_store_instr,
nir_metadata_dominance, NULL);
}
void
@ -3387,14 +3379,12 @@ lower_zs_swizzle_tex(nir_shader *nir, const void *swizzle, bool shadow_only)
}
static bool
invert_point_coord_instr(nir_builder *b, nir_instr *instr, void *data)
invert_point_coord_instr(nir_builder *b, nir_intrinsic_instr *intr,
void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_point_coord)
return false;
b->cursor = nir_after_instr(instr);
b->cursor = nir_after_instr(&intr->instr);
nir_def *def = nir_vec2(b, nir_channel(b, &intr->def, 0),
nir_fsub_imm(b, 1.0, nir_channel(b, &intr->def, 1)));
nir_def_rewrite_uses_after(&intr->def, def, def->parent_instr);
@ -3406,15 +3396,13 @@ invert_point_coord(nir_shader *nir)
{
if (!BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_POINT_COORD))
return false;
return nir_shader_instructions_pass(nir, invert_point_coord_instr, nir_metadata_dominance, NULL);
return nir_shader_intrinsics_pass(nir, invert_point_coord_instr,
nir_metadata_dominance, NULL);
}
static bool
add_derefs_instr(nir_builder *b, nir_instr *instr, void *data)
add_derefs_instr(nir_builder *b, nir_intrinsic_instr *intr, void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
bool is_load = false;
bool is_input = false;
bool is_interp = false;
@ -3465,7 +3453,7 @@ add_derefs_instr(nir_builder *b, nir_instr *instr, void *data)
if (var->data.location_frac + size <= c || var->data.location_frac > c)
continue;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intr->instr);
nir_deref_instr *deref = nir_build_deref_var(b, var);
if (nir_is_arrayed_io(var, b->shader->info.stage)) {
assert(intr->intrinsic != nir_intrinsic_store_output);
@ -3562,7 +3550,7 @@ add_derefs_instr(nir_builder *b, nir_instr *instr, void *data)
}
nir_store_deref(b, deref, store, BITFIELD_RANGE(c - var->data.location_frac, intr->num_components));
}
nir_instr_remove(instr);
nir_instr_remove(&intr->instr);
return true;
}
unreachable("failed to find variable for explicit io!");
@ -3572,7 +3560,8 @@ add_derefs_instr(nir_builder *b, nir_instr *instr, void *data)
static bool
add_derefs(nir_shader *nir)
{
return nir_shader_instructions_pass(nir, add_derefs_instr, nir_metadata_dominance, NULL);
return nir_shader_intrinsics_pass(nir, add_derefs_instr,
nir_metadata_dominance, NULL);
}
static struct zink_shader_object
@ -3880,14 +3869,12 @@ zink_shader_compile_separate(struct zink_screen *screen, struct zink_shader *zs)
}
static bool
lower_baseinstance_instr(nir_builder *b, nir_instr *instr, void *data)
lower_baseinstance_instr(nir_builder *b, nir_intrinsic_instr *intr,
void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_instance_id)
return false;
b->cursor = nir_after_instr(instr);
b->cursor = nir_after_instr(&intr->instr);
nir_def *def = nir_isub(b, &intr->def, nir_load_base_instance(b));
nir_def_rewrite_uses_after(&intr->def, def, def->parent_instr);
return true;
@ -3898,7 +3885,8 @@ lower_baseinstance(nir_shader *shader)
{
if (shader->info.stage != MESA_SHADER_VERTEX)
return false;
return nir_shader_instructions_pass(shader, lower_baseinstance_instr, nir_metadata_dominance, NULL);
return nir_shader_intrinsics_pass(shader, lower_baseinstance_instr,
nir_metadata_dominance, NULL);
}
/* gl_nir_lower_buffers makes variables unusable for all UBO/SSBO access
@ -4193,11 +4181,9 @@ lower_bindless(nir_shader *shader, struct zink_bindless_info *bindless)
/* convert shader image/texture io variables to int64 handles for bindless indexing */
static bool
lower_bindless_io_instr(nir_builder *b, nir_instr *in, void *data)
lower_bindless_io_instr(nir_builder *b, nir_intrinsic_instr *instr,
void *data)
{
if (in->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *instr = nir_instr_as_intrinsic(in);
bool is_load = false;
bool is_input = false;
bool is_interp = false;
@ -4221,7 +4207,8 @@ lower_bindless_io_instr(nir_builder *b, nir_instr *in, void *data)
static bool
lower_bindless_io(nir_shader *shader)
{
return nir_shader_instructions_pass(shader, lower_bindless_io_instr, nir_metadata_dominance, NULL);
return nir_shader_intrinsics_pass(shader, lower_bindless_io_instr,
nir_metadata_dominance, NULL);
}
static uint32_t
@ -4463,11 +4450,8 @@ is_residency_code(nir_def *src)
}
static bool
lower_sparse_instr(nir_builder *b, nir_instr *in, void *data)
lower_sparse_instr(nir_builder *b, nir_intrinsic_instr *instr, void *data)
{
if (in->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *instr = nir_instr_as_intrinsic(in);
if (instr->intrinsic == nir_intrinsic_sparse_residency_code_and) {
b->cursor = nir_before_instr(&instr->instr);
nir_def *src0;
@ -4481,8 +4465,8 @@ lower_sparse_instr(nir_builder *b, nir_instr *in, void *data)
else
src1 = instr->src[1].ssa;
nir_def *def = nir_iand(b, src0, src1);
nir_def_rewrite_uses_after(&instr->def, def, in);
nir_instr_remove(in);
nir_def_rewrite_uses_after(&instr->def, def, &instr->instr);
nir_instr_remove(&instr->instr);
return true;
}
if (instr->intrinsic != nir_intrinsic_is_sparse_texels_resident)
@ -4517,7 +4501,7 @@ lower_sparse_instr(nir_builder *b, nir_instr *in, void *data)
src = nir_u2uN(b, src, instr->def.bit_size);
}
nir_def_rewrite_uses(&instr->def, src);
nir_instr_remove(in);
nir_instr_remove(&instr->instr);
}
return true;
}
@ -4525,7 +4509,8 @@ lower_sparse_instr(nir_builder *b, nir_instr *in, void *data)
static bool
lower_sparse(nir_shader *shader)
{
return nir_shader_instructions_pass(shader, lower_sparse_instr, nir_metadata_dominance, NULL);
return nir_shader_intrinsics_pass(shader, lower_sparse_instr,
nir_metadata_dominance, NULL);
}
static bool

View file

@ -93,31 +93,28 @@ lvp_build_initialize_node_payloads(nir_builder *b, nir_intrinsic_instr *intr)
}
static bool
lvp_lower_node_payload_intrinsic(nir_builder *b, nir_instr *instr, void *data)
lvp_lower_node_payload_intrinsic(nir_builder *b, nir_intrinsic_instr *intr,
void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic == nir_intrinsic_enqueue_node_payloads) {
nir_instr_remove(instr);
nir_instr_remove(&intr->instr);
return false;
}
b->cursor = nir_after_instr(instr);
b->cursor = nir_after_instr(&intr->instr);
switch (intr->intrinsic) {
case nir_intrinsic_initialize_node_payloads:
lvp_build_initialize_node_payloads(b, intr);
nir_instr_remove(instr);
nir_instr_remove(&intr->instr);
return true;
case nir_intrinsic_finalize_incoming_node_payload:
nir_def_rewrite_uses(&intr->def, nir_imm_true(b));
nir_instr_remove(instr);
nir_instr_remove(&intr->instr);
return true;
case nir_intrinsic_load_coalesced_input_count:
nir_def_rewrite_uses(&intr->def, nir_imm_int(b, 1));
nir_instr_remove(instr);
nir_instr_remove(&intr->instr);
return true;
default:
return false;
@ -127,7 +124,7 @@ lvp_lower_node_payload_intrinsic(nir_builder *b, nir_instr *instr, void *data)
static bool
lvp_lower_exec_graph_intrinsics(nir_shader *nir)
{
return nir_shader_instructions_pass(nir, lvp_lower_node_payload_intrinsic,
return nir_shader_intrinsics_pass(nir, lvp_lower_node_payload_intrinsic,
nir_metadata_block_index | nir_metadata_dominance, NULL);
}

View file

@ -148,12 +148,8 @@ lower_image_intrinsic(nir_builder *b,
}
static bool
lower_load_ubo(nir_builder *b, nir_instr *instr, void *data_cb)
lower_load_ubo(nir_builder *b, nir_intrinsic_instr *intrin, void *data_cb)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_load_ubo)
return false;
@ -169,7 +165,7 @@ lower_load_ubo(nir_builder *b, nir_instr *instr, void *data_cb)
if (bind_layout->type != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK)
return false;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intrin->instr);
nir_src_rewrite(&intrin->src[0], nir_imm_int(b, binding.desc_set + 1));
@ -229,6 +225,8 @@ void lvp_lower_pipeline_layout(const struct lvp_device *device,
struct lvp_pipeline_layout *layout,
nir_shader *shader)
{
nir_shader_instructions_pass(shader, lower_load_ubo, nir_metadata_block_index | nir_metadata_dominance, layout);
nir_shader_intrinsics_pass(shader, lower_load_ubo,
nir_metadata_block_index | nir_metadata_dominance,
layout);
nir_shader_lower_instructions(shader, lower_vulkan_resource_index, lower_vri_instr, layout);
}

View file

@ -132,11 +132,8 @@ shared_var_info(const struct glsl_type *type, unsigned *size, unsigned *align)
}
static bool
remove_barriers_impl(nir_builder *b, nir_instr *instr, void *data)
remove_barriers_impl(nir_builder *b, nir_intrinsic_instr *intr, void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_barrier)
return false;
if (data) {
@ -145,22 +142,21 @@ remove_barriers_impl(nir_builder *b, nir_instr *instr, void *data)
nir_intrinsic_memory_scope(intr) == SCOPE_QUEUE_FAMILY)
return false;
}
nir_instr_remove(instr);
nir_instr_remove(&intr->instr);
return true;
}
static bool
remove_barriers(nir_shader *nir, bool is_compute)
{
return nir_shader_instructions_pass(nir, remove_barriers_impl, nir_metadata_dominance, (void*)is_compute);
return nir_shader_intrinsics_pass(nir, remove_barriers_impl,
nir_metadata_dominance,
(void*)is_compute);
}
static bool
lower_demote_impl(nir_builder *b, nir_instr *instr, void *data)
lower_demote_impl(nir_builder *b, nir_intrinsic_instr *intr, void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic == nir_intrinsic_demote || intr->intrinsic == nir_intrinsic_terminate) {
intr->intrinsic = nir_intrinsic_discard;
return true;
@ -175,7 +171,8 @@ lower_demote_impl(nir_builder *b, nir_instr *instr, void *data)
static bool
lower_demote(nir_shader *nir)
{
return nir_shader_instructions_pass(nir, lower_demote_impl, nir_metadata_dominance, NULL);
return nir_shader_intrinsics_pass(nir, lower_demote_impl,
nir_metadata_dominance, NULL);
}
static bool

View file

@ -345,13 +345,9 @@ blorp_compile_vs(struct blorp_context *blorp, void *mem_ctx,
}
static bool
lower_base_workgroup_id(nir_builder *b, nir_instr *instr, UNUSED void *data)
lower_base_workgroup_id(nir_builder *b, nir_intrinsic_instr *intrin,
UNUSED void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_load_base_workgroup_id)
return false;
@ -387,7 +383,7 @@ blorp_compile_cs(struct blorp_context *blorp, void *mem_ctx,
cs_prog_data->base.param = rzalloc_array(NULL, uint32_t, nr_params);
NIR_PASS_V(nir, brw_nir_lower_cs_intrinsics);
NIR_PASS_V(nir, nir_shader_instructions_pass, lower_base_workgroup_id,
NIR_PASS_V(nir, nir_shader_intrinsics_pass, lower_base_workgroup_id,
nir_metadata_block_index | nir_metadata_dominance, NULL);
struct brw_compile_cs_params params = {

View file

@ -95,13 +95,10 @@ shared_type_info(const struct glsl_type *type, unsigned *size, unsigned *align)
}
static bool
brw_nir_lower_launch_mesh_workgroups_instr(nir_builder *b, nir_instr *instr, void *data)
brw_nir_lower_launch_mesh_workgroups_instr(nir_builder *b,
nir_intrinsic_instr *intrin,
void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_launch_mesh_workgroups)
return false;
@ -131,7 +128,7 @@ brw_nir_lower_launch_mesh_workgroups_instr(nir_builder *b, nir_instr *instr, voi
}
nir_pop_if(b, if_stmt);
nir_instr_remove(instr);
nir_instr_remove(&intrin->instr);
return true;
}
@ -139,7 +136,7 @@ brw_nir_lower_launch_mesh_workgroups_instr(nir_builder *b, nir_instr *instr, voi
static bool
brw_nir_lower_launch_mesh_workgroups(nir_shader *nir)
{
return nir_shader_instructions_pass(nir,
return nir_shader_intrinsics_pass(nir,
brw_nir_lower_launch_mesh_workgroups_instr,
nir_metadata_none,
NULL);
@ -240,13 +237,10 @@ brw_nir_adjust_payload(nir_shader *shader, const struct brw_compiler *compiler)
}
static bool
brw_nir_align_launch_mesh_workgroups_instr(nir_builder *b, nir_instr *instr, void *data)
brw_nir_align_launch_mesh_workgroups_instr(nir_builder *b,
nir_intrinsic_instr *intrin,
void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_launch_mesh_workgroups)
return false;
@ -263,7 +257,7 @@ brw_nir_align_launch_mesh_workgroups_instr(nir_builder *b, nir_instr *instr, voi
static bool
brw_nir_align_launch_mesh_workgroups(nir_shader *nir)
{
return nir_shader_instructions_pass(nir,
return nir_shader_intrinsics_pass(nir,
brw_nir_align_launch_mesh_workgroups_instr,
nir_metadata_block_index |
nir_metadata_dominance,
@ -1194,13 +1188,10 @@ brw_nir_adjust_offset(nir_builder *b, nir_intrinsic_instr *intrin, uint32_t pitc
}
static bool
brw_nir_adjust_offset_for_arrayed_indices_instr(nir_builder *b, nir_instr *instr, void *data)
brw_nir_adjust_offset_for_arrayed_indices_instr(nir_builder *b,
nir_intrinsic_instr *intrin,
void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
const struct brw_mue_map *map = (const struct brw_mue_map *) data;
/* Remap per_vertex and per_primitive offsets using the extra source and
@ -1235,7 +1226,7 @@ brw_nir_adjust_offset_for_arrayed_indices_instr(nir_builder *b, nir_instr *instr
static bool
brw_nir_adjust_offset_for_arrayed_indices(nir_shader *nir, const struct brw_mue_map *map)
{
return nir_shader_instructions_pass(nir,
return nir_shader_intrinsics_pass(nir,
brw_nir_adjust_offset_for_arrayed_indices_instr,
nir_metadata_block_index |
nir_metadata_dominance,
@ -1335,12 +1326,9 @@ brw_can_pack_primitive_indices(nir_shader *nir, struct index_packing_state *stat
}
static bool
brw_pack_primitive_indices_instr(nir_builder *b, nir_instr *instr, void *data)
brw_pack_primitive_indices_instr(nir_builder *b, nir_intrinsic_instr *intrin,
void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_store_deref)
return false;
@ -1385,7 +1373,7 @@ brw_pack_primitive_indices_instr(nir_builder *b, nir_instr *instr, void *data)
nir_build_store_deref(b, &new_array_deref->def, new_data);
nir_instr_remove(instr);
nir_instr_remove(&intrin->instr);
return true;
}
@ -1407,8 +1395,7 @@ brw_pack_primitive_indices(nir_shader *nir, void *data)
state->packed_prim_indices->data.interpolation = INTERP_MODE_NONE;
state->packed_prim_indices->data.per_primitive = 1;
return nir_shader_instructions_pass(nir,
brw_pack_primitive_indices_instr,
return nir_shader_intrinsics_pass(nir, brw_pack_primitive_indices_instr,
nir_metadata_block_index |
nir_metadata_dominance,
data);

View file

@ -439,23 +439,19 @@ brw_nir_lower_tes_inputs(nir_shader *nir, const struct brw_vue_map *vue_map)
static bool
lower_barycentric_per_sample(nir_builder *b,
nir_instr *instr,
nir_intrinsic_instr *intrin,
UNUSED void *cb_data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_load_barycentric_pixel &&
intrin->intrinsic != nir_intrinsic_load_barycentric_centroid)
return false;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intrin->instr);
nir_def *centroid =
nir_load_barycentric(b, nir_intrinsic_load_barycentric_sample,
nir_intrinsic_interp_mode(intrin));
nir_def_rewrite_uses(&intrin->def, centroid);
nir_instr_remove(instr);
nir_instr_remove(&intrin->instr);
return true;
}
@ -475,17 +471,13 @@ lower_barycentric_per_sample(nir_builder *b,
* FRAGMENT_INTERPOLATION_OFFSET_BITS."
*/
static bool
lower_barycentric_at_offset(nir_builder *b, nir_instr *instr, void *data)
lower_barycentric_at_offset(nir_builder *b, nir_intrinsic_instr *intrin,
void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_load_barycentric_at_offset)
return false;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intrin->instr);
assert(intrin->src[0].ssa);
nir_def *offset =
@ -537,13 +529,13 @@ brw_nir_lower_fs_inputs(nir_shader *nir,
if (key->multisample_fbo == BRW_NEVER) {
nir_lower_single_sampled(nir);
} else if (key->persample_interp == BRW_ALWAYS) {
nir_shader_instructions_pass(nir, lower_barycentric_per_sample,
nir_shader_intrinsics_pass(nir, lower_barycentric_per_sample,
nir_metadata_block_index |
nir_metadata_dominance,
NULL);
}
nir_shader_instructions_pass(nir, lower_barycentric_at_offset,
nir_shader_intrinsics_pass(nir, lower_barycentric_at_offset,
nir_metadata_block_index |
nir_metadata_dominance,
NULL);

View file

@ -34,12 +34,9 @@
#include "compiler/nir/nir_deref.h"
static bool
clamp_per_vertex_loads_instr(nir_builder *b, nir_instr *instr, void *cb_data)
clamp_per_vertex_loads_instr(nir_builder *b, nir_intrinsic_instr *intrin,
void *cb_data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_load_deref)
return false;
@ -75,7 +72,7 @@ brw_nir_clamp_per_vertex_loads(nir_shader *shader)
{
void *mem_ctx = ralloc_context(NULL);
bool ret = nir_shader_instructions_pass(shader, clamp_per_vertex_loads_instr,
bool ret = nir_shader_intrinsics_pass(shader, clamp_per_vertex_loads_instr,
nir_metadata_block_index |
nir_metadata_dominance,
mem_ctx);
@ -86,18 +83,15 @@ brw_nir_clamp_per_vertex_loads(nir_shader *shader)
}
static bool
lower_patch_vertices_instr(nir_builder *b, nir_instr *instr, void *cb_data)
lower_patch_vertices_instr(nir_builder *b, nir_intrinsic_instr *intrin,
void *cb_data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_load_patch_vertices_in)
return false;
unsigned *input_vertices = cb_data;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intrin->instr);
nir_def_rewrite_uses(&intrin->def, nir_imm_int(b, *input_vertices));
@ -107,7 +101,7 @@ lower_patch_vertices_instr(nir_builder *b, nir_instr *instr, void *cb_data)
bool
brw_nir_lower_patch_vertices_in(nir_shader *shader, unsigned input_vertices)
{
return nir_shader_instructions_pass(shader, lower_patch_vertices_instr,
return nir_shader_intrinsics_pass(shader, lower_patch_vertices_instr,
nir_metadata_block_index |
nir_metadata_dominance,
&input_vertices);

View file

@ -280,13 +280,9 @@ skip_resource_intel_cleanup(nir_instr *instr)
static bool
brw_nir_cleanup_resource_intel_instr(nir_builder *b,
nir_instr *instr,
nir_intrinsic_instr *intrin,
void *cb_data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_resource_intel)
return false;
@ -311,7 +307,7 @@ brw_nir_cleanup_resource_intel(nir_shader *shader)
{
void *mem_ctx = ralloc_context(NULL);
bool ret = nir_shader_instructions_pass(shader,
bool ret = nir_shader_intrinsics_pass(shader,
brw_nir_cleanup_resource_intel_instr,
nir_metadata_block_index |
nir_metadata_dominance,

View file

@ -47,13 +47,9 @@
#include "compiler/nir/nir_builder.h"
static bool
lower_shading_rate_output_instr(nir_builder *b, nir_instr *instr,
lower_shading_rate_output_instr(nir_builder *b, nir_intrinsic_instr *intrin,
UNUSED void *_state)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
nir_intrinsic_op op = intrin->intrinsic;
if (op != nir_intrinsic_load_output &&
@ -69,7 +65,7 @@ lower_shading_rate_output_instr(nir_builder *b, nir_instr *instr,
bool is_store = op == nir_intrinsic_store_output ||
op == nir_intrinsic_store_per_primitive_output;
b->cursor = is_store ? nir_before_instr(instr) : nir_after_instr(instr);
b->cursor = is_store ? nir_before_instr(&intrin->instr) : nir_after_instr(&intrin->instr);
if (is_store) {
nir_def *bit_field = intrin->src[0].ssa;
@ -106,7 +102,7 @@ lower_shading_rate_output_instr(nir_builder *b, nir_instr *instr,
bool
brw_nir_lower_shading_rate_output(nir_shader *nir)
{
return nir_shader_instructions_pass(nir, lower_shading_rate_output_instr,
return nir_shader_intrinsics_pass(nir, lower_shading_rate_output_instr,
nir_metadata_block_index |
nir_metadata_dominance, NULL);
}

View file

@ -39,12 +39,9 @@
#include "shaders/query_copy_fragment_spv.h"
static bool
lower_vulkan_descriptors_instr(nir_builder *b, nir_instr *instr, void *cb_data)
lower_vulkan_descriptors_instr(nir_builder *b, nir_intrinsic_instr *intrin,
void *cb_data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_load_vulkan_descriptor)
return false;
@ -54,7 +51,7 @@ lower_vulkan_descriptors_instr(nir_builder *b, nir_instr *instr, void *cb_data)
nir_instr_as_intrinsic(res_index_instr);
assert(res_index_intrin->intrinsic == nir_intrinsic_vulkan_resource_index);
b->cursor = nir_after_instr(instr);
b->cursor = nir_after_instr(&intrin->instr);
const struct anv_internal_kernel_bind_map *bind_map = cb_data;
uint32_t binding = nir_intrinsic_binding(res_index_intrin);
@ -102,21 +99,16 @@ static bool
lower_vulkan_descriptors(nir_shader *shader,
const struct anv_internal_kernel_bind_map *bind_map)
{
return nir_shader_instructions_pass(shader,
lower_vulkan_descriptors_instr,
return nir_shader_intrinsics_pass(shader, lower_vulkan_descriptors_instr,
nir_metadata_block_index |
nir_metadata_dominance,
(void *)bind_map);
}
static bool
lower_base_workgroup_id(nir_builder *b, nir_instr *instr, UNUSED void *data)
lower_base_workgroup_id(nir_builder *b, nir_intrinsic_instr *intrin,
UNUSED void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_load_base_workgroup_id)
return false;
@ -126,16 +118,13 @@ lower_base_workgroup_id(nir_builder *b, nir_instr *instr, UNUSED void *data)
}
static bool
lower_load_ubo_to_uniforms(nir_builder *b, nir_instr *instr, void *cb_data)
lower_load_ubo_to_uniforms(nir_builder *b, nir_intrinsic_instr *intrin,
void *cb_data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_load_ubo)
return false;
b->cursor = nir_instr_remove(instr);
b->cursor = nir_instr_remove(&intrin->instr);
nir_def_rewrite_uses(
&intrin->def,
@ -210,7 +199,7 @@ compile_upload_spirv(struct anv_device *device,
.lower_workgroup_id_to_index = true,
};
NIR_PASS_V(nir, nir_lower_compute_system_values, &options);
NIR_PASS_V(nir, nir_shader_instructions_pass, lower_base_workgroup_id,
NIR_PASS_V(nir, nir_shader_intrinsics_pass, lower_base_workgroup_id,
nir_metadata_block_index | nir_metadata_dominance, NULL);
}
@ -238,8 +227,7 @@ compile_upload_spirv(struct anv_device *device,
NIR_PASS_V(nir, nir_opt_dce);
if (stage == MESA_SHADER_COMPUTE) {
NIR_PASS_V(nir, nir_shader_instructions_pass,
lower_load_ubo_to_uniforms,
NIR_PASS_V(nir, nir_shader_intrinsics_pass, lower_load_ubo_to_uniforms,
nir_metadata_block_index | nir_metadata_dominance,
NULL);
NIR_PASS_V(nir, brw_nir_lower_cs_intrinsics);

View file

@ -36,16 +36,13 @@
#define sizeof_field(type, field) sizeof(((type *)0)->field)
static bool
lower_patch_vertices_in_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
lower_patch_vertices_in_instr(nir_builder *b, nir_intrinsic_instr *load,
UNUSED void *_data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
if (load->intrinsic != nir_intrinsic_load_patch_vertices_in)
return false;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&load->instr);
nir_def_rewrite_uses(
&load->def,
@ -62,7 +59,7 @@ lower_patch_vertices_in_instr(nir_builder *b, nir_instr *instr, UNUSED void *_da
bool
anv_nir_lower_load_patch_vertices_in(nir_shader *shader)
{
return nir_shader_instructions_pass(shader, lower_patch_vertices_in_instr,
return nir_shader_intrinsics_pass(shader, lower_patch_vertices_in_instr,
nir_metadata_block_index |
nir_metadata_dominance,
NULL);

View file

@ -30,12 +30,9 @@
* This pass must be run before anv_nir_compute_push_layout().
*/
static bool
update_resource_intel_block(nir_builder *b, nir_instr *instr, UNUSED void *data)
update_resource_intel_block(nir_builder *b, nir_intrinsic_instr *intrin,
UNUSED void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_resource_intel)
return false;
@ -69,7 +66,7 @@ update_resource_intel_block(nir_builder *b, nir_instr *instr, UNUSED void *data)
bool
anv_nir_update_resource_intel_block(nir_shader *shader)
{
return nir_shader_instructions_pass(shader, update_resource_intel_block,
return nir_shader_intrinsics_pass(shader, update_resource_intel_block,
nir_metadata_all,
NULL);
}
@ -87,12 +84,8 @@ struct lower_resource_state {
* combined the constant detection does not work anymore.
*/
static bool
lower_resource_intel(nir_builder *b, nir_instr *instr, void *data)
lower_resource_intel(nir_builder *b, nir_intrinsic_instr *intrin, void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_resource_intel)
return false;
@ -107,7 +100,7 @@ lower_resource_intel(nir_builder *b, nir_instr *instr, void *data)
if (!is_bindless)
return true;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intrin->instr);
nir_def *set_offset = intrin->src[0].ssa;
nir_def *binding_offset = intrin->src[1].ssa;
@ -168,7 +161,7 @@ anv_nir_lower_resource_intel(nir_shader *shader,
.desc_type = desc_type,
.device = device,
};
return nir_shader_instructions_pass(shader, lower_resource_intel,
return nir_shader_intrinsics_pass(shader, lower_resource_intel,
nir_metadata_block_index |
nir_metadata_dominance,
&state);

View file

@ -25,17 +25,14 @@
#include "nir_builder.h"
static bool
lower_ubo_load_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
lower_ubo_load_instr(nir_builder *b, nir_intrinsic_instr *load,
UNUSED void *_data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
if (load->intrinsic != nir_intrinsic_load_global_constant_offset &&
load->intrinsic != nir_intrinsic_load_global_constant_bounded)
return false;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&load->instr);
nir_def *base_addr = load->src[0].ssa;
nir_def *bound = NULL;
@ -117,7 +114,7 @@ lower_ubo_load_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
bool
anv_nir_lower_ubo_loads(nir_shader *shader)
{
return nir_shader_instructions_pass(shader, lower_ubo_load_instr,
return nir_shader_intrinsics_pass(shader, lower_ubo_load_instr,
nir_metadata_none,
NULL);
}

View file

@ -73,12 +73,10 @@ anv_nir_prim_count_store(nir_builder *b, nir_def *val)
}
static bool
anv_nir_lower_set_vtx_and_prim_count_instr(nir_builder *b, nir_instr *instr, void *data)
anv_nir_lower_set_vtx_and_prim_count_instr(nir_builder *b,
nir_intrinsic_instr *intrin,
void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_set_vertex_and_primitive_count)
return false;
@ -98,7 +96,7 @@ anv_nir_lower_set_vtx_and_prim_count_instr(nir_builder *b, nir_instr *instr, voi
state->primitive_count = anv_nir_prim_count_store(b, intrin->src[1].ssa);
nir_instr_remove(instr);
nir_instr_remove(&intrin->instr);
return true;
}
@ -108,8 +106,7 @@ anv_nir_lower_set_vtx_and_prim_count(nir_shader *nir)
{
struct lower_set_vtx_and_prim_count_state state = { NULL, };
nir_shader_instructions_pass(nir,
anv_nir_lower_set_vtx_and_prim_count_instr,
nir_shader_intrinsics_pass(nir, anv_nir_lower_set_vtx_and_prim_count_instr,
nir_metadata_none,
&state);

View file

@ -25,17 +25,14 @@
#include "nir_builder.h"
static bool
lower_ubo_load_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
lower_ubo_load_instr(nir_builder *b, nir_intrinsic_instr *load,
UNUSED void *_data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
if (load->intrinsic != nir_intrinsic_load_global_constant_offset &&
load->intrinsic != nir_intrinsic_load_global_constant_bounded)
return false;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&load->instr);
nir_def *base_addr = load->src[0].ssa;
nir_def *bound = NULL;
@ -117,7 +114,7 @@ lower_ubo_load_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
bool
anv_nir_lower_ubo_loads(nir_shader *shader)
{
return nir_shader_instructions_pass(shader, lower_ubo_load_instr,
return nir_shader_intrinsics_pass(shader, lower_ubo_load_instr,
nir_metadata_none,
NULL);
}

View file

@ -154,12 +154,9 @@ get_variable(nir_builder *b, nir_deref_path *path,
}
static bool
lower_builtin_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
lower_builtin_instr(nir_builder *b, nir_intrinsic_instr *intrin,
UNUSED void *_data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_load_deref)
return false;
@ -200,7 +197,7 @@ lower_builtin_instr(nir_builder *b, nir_instr *instr, UNUSED void *_data)
nir_variable *new_var = get_variable(b, &path, element);
nir_deref_path_finish(&path);
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intrin->instr);
nir_def *def = nir_load_var(b, new_var);
@ -245,7 +242,7 @@ st_nir_lower_builtin(nir_shader *shader)
*/
nir_lower_indirect_var_derefs(shader, vars);
if (nir_shader_instructions_pass(shader, lower_builtin_instr,
if (nir_shader_intrinsics_pass(shader, lower_builtin_instr,
nir_metadata_block_index |
nir_metadata_dominance, NULL))
nir_remove_dead_derefs(shader);

View file

@ -284,11 +284,8 @@ dxil_nir_lower_constant_to_temp(nir_shader *nir)
}
static bool
flatten_var_arrays(nir_builder *b, nir_instr *instr, void *data)
flatten_var_arrays(nir_builder *b, nir_intrinsic_instr *intr, void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
switch (intr->intrinsic) {
case nir_intrinsic_load_deref:
case nir_intrinsic_store_deref:
@ -335,7 +332,7 @@ flatten_var_arrays(nir_builder *b, nir_instr *instr, void *data)
unsigned vector_comps = intr->num_components;
if (vector_comps > 1) {
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intr->instr);
if (intr->intrinsic == nir_intrinsic_load_deref) {
nir_def *components[NIR_MAX_VEC_COMPONENTS];
for (unsigned i = 0; i < vector_comps; ++i) {
@ -353,7 +350,7 @@ flatten_var_arrays(nir_builder *b, nir_instr *instr, void *data)
nir_store_deref(b, comp_deref, nir_channel(b, intr->src[1].ssa, i), 1);
}
}
nir_instr_remove(instr);
nir_instr_remove(&intr->instr);
} else {
nir_src_rewrite(&intr->src[0], &nir_build_deref_array(b, new_var_deref, index)->def);
}
@ -419,7 +416,7 @@ dxil_nir_flatten_var_arrays(nir_shader *shader, nir_variable_mode modes)
if (!progress)
return false;
nir_shader_instructions_pass(shader, flatten_var_arrays,
nir_shader_intrinsics_pass(shader, flatten_var_arrays,
nir_metadata_block_index |
nir_metadata_dominance |
nir_metadata_loop_analysis,
@ -429,11 +426,8 @@ dxil_nir_flatten_var_arrays(nir_shader *shader, nir_variable_mode modes)
}
static bool
lower_deref_bit_size(nir_builder *b, nir_instr *instr, void *data)
lower_deref_bit_size(nir_builder *b, nir_intrinsic_instr *intr, void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
switch (intr->intrinsic) {
case nir_intrinsic_load_deref:
case nir_intrinsic_store_deref:
@ -461,12 +455,12 @@ lower_deref_bit_size(nir_builder *b, nir_instr *instr, void *data)
deref->type = var_scalar_type;
if (intr->intrinsic == nir_intrinsic_load_deref) {
intr->def.bit_size = glsl_get_bit_size(var_scalar_type);
b->cursor = nir_after_instr(instr);
b->cursor = nir_after_instr(&intr->instr);
nir_def *downcast = nir_type_convert(b, &intr->def, new_type, old_type, nir_rounding_mode_undef);
nir_def_rewrite_uses_after(&intr->def, downcast, downcast->parent_instr);
}
else {
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intr->instr);
nir_def *upcast = nir_type_convert(b, intr->src[1].ssa, old_type, new_type, nir_rounding_mode_undef);
nir_src_rewrite(&intr->src[1], upcast);
}
@ -486,7 +480,7 @@ lower_deref_bit_size(nir_builder *b, nir_instr *instr, void *data)
deref = nir_build_deref_array_imm(b, parent, 0);
nir_deref_instr *deref2 = nir_build_deref_array(b, parent,
nir_iadd_imm(b, deref->arr.index.ssa, 1));
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intr->instr);
if (intr->intrinsic == nir_intrinsic_load_deref) {
nir_def *src1 = nir_load_deref(b, deref);
nir_def *src2 = nir_load_deref(b, deref2);
@ -497,7 +491,7 @@ lower_deref_bit_size(nir_builder *b, nir_instr *instr, void *data)
nir_store_deref(b, deref, src1, 1);
nir_store_deref(b, deref, src2, 1);
}
nir_instr_remove(instr);
nir_instr_remove(&intr->instr);
}
return true;
}
@ -605,7 +599,7 @@ dxil_nir_lower_var_bit_size(nir_shader *shader, nir_variable_mode modes,
if (!progress)
return false;
nir_shader_instructions_pass(shader, lower_deref_bit_size,
nir_shader_intrinsics_pass(shader, lower_deref_bit_size,
nir_metadata_block_index |
nir_metadata_dominance |
nir_metadata_loop_analysis,
@ -1163,11 +1157,9 @@ lower_load_local_group_size(nir_builder *b, nir_intrinsic_instr *intr)
}
static bool
lower_system_values_impl(nir_builder *b, nir_instr *instr, void *_state)
lower_system_values_impl(nir_builder *b, nir_intrinsic_instr *intr,
void *_state)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
switch (intr->intrinsic) {
case nir_intrinsic_load_workgroup_size:
lower_load_local_group_size(b, intr);
@ -1180,8 +1172,9 @@ lower_system_values_impl(nir_builder *b, nir_instr *instr, void *_state)
bool
dxil_nir_lower_system_values(nir_shader *shader)
{
return nir_shader_instructions_pass(shader, lower_system_values_impl,
nir_metadata_block_index | nir_metadata_dominance | nir_metadata_loop_analysis, NULL);
return nir_shader_intrinsics_pass(shader, lower_system_values_impl,
nir_metadata_block_index | nir_metadata_dominance | nir_metadata_loop_analysis,
NULL);
}
static const struct glsl_type *
@ -1393,12 +1386,9 @@ dxil_nir_split_typed_samplers(nir_shader *nir)
static bool
lower_sysval_to_load_input_impl(nir_builder *b, nir_instr *instr, void *data)
lower_sysval_to_load_input_impl(nir_builder *b, nir_intrinsic_instr *intr,
void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
gl_system_value sysval = SYSTEM_VALUE_MAX;
switch (intr->intrinsic) {
case nir_intrinsic_load_front_face:
@ -1423,7 +1413,7 @@ lower_sysval_to_load_input_impl(nir_builder *b, nir_instr *instr, void *data)
const unsigned bit_size = (sysval == SYSTEM_VALUE_FRONT_FACE)
? 32 : intr->def.bit_size;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intr->instr);
nir_def *result = nir_load_input(b, intr->def.num_components, bit_size, nir_imm_int(b, 0),
.base = var->data.driver_location, .dest_type = dest_type);
@ -1441,8 +1431,9 @@ lower_sysval_to_load_input_impl(nir_builder *b, nir_instr *instr, void *data)
bool
dxil_nir_lower_sysval_to_load_input(nir_shader *s, nir_variable **sysval_vars)
{
return nir_shader_instructions_pass(s, lower_sysval_to_load_input_impl,
nir_metadata_block_index | nir_metadata_dominance, sysval_vars);
return nir_shader_intrinsics_pass(s, lower_sysval_to_load_input_impl,
nir_metadata_block_index | nir_metadata_dominance,
sysval_vars);
}
/* Comparison function to sort io values so that first come normal varyings,
@ -1908,11 +1899,8 @@ dxil_nir_lower_sample_pos(nir_shader *s)
}
static bool
lower_subgroup_id(nir_builder *b, nir_instr *instr, void *data)
lower_subgroup_id(nir_builder *b, nir_intrinsic_instr *intr, void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_subgroup_id)
return false;
@ -1961,19 +1949,17 @@ bool
dxil_nir_lower_subgroup_id(nir_shader *s)
{
nir_def *subgroup_id = NULL;
return nir_shader_instructions_pass(s, lower_subgroup_id, nir_metadata_none, &subgroup_id);
return nir_shader_intrinsics_pass(s, lower_subgroup_id, nir_metadata_none,
&subgroup_id);
}
static bool
lower_num_subgroups(nir_builder *b, nir_instr *instr, void *data)
lower_num_subgroups(nir_builder *b, nir_intrinsic_instr *intr, void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_num_subgroups)
return false;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intr->instr);
nir_def *subgroup_size = nir_load_subgroup_size(b);
nir_def *size_minus_one = nir_iadd_imm(b, subgroup_size, -1);
nir_def *workgroup_size_vec = nir_load_workgroup_size(b);
@ -1988,7 +1974,7 @@ lower_num_subgroups(nir_builder *b, nir_instr *instr, void *data)
bool
dxil_nir_lower_num_subgroups(nir_shader *s)
{
return nir_shader_instructions_pass(s, lower_num_subgroups,
return nir_shader_intrinsics_pass(s, lower_num_subgroups,
nir_metadata_block_index |
nir_metadata_dominance |
nir_metadata_loop_analysis, NULL);
@ -2140,11 +2126,8 @@ lower_inclusive_to_exclusive(nir_builder *b, nir_intrinsic_instr *intr)
}
static bool
lower_subgroup_scan(nir_builder *b, nir_instr *instr, void *data)
lower_subgroup_scan(nir_builder *b, nir_intrinsic_instr *intr, void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
switch (intr->intrinsic) {
case nir_intrinsic_exclusive_scan:
case nir_intrinsic_inclusive_scan:
@ -2165,7 +2148,7 @@ lower_subgroup_scan(nir_builder *b, nir_instr *instr, void *data)
return false;
}
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intr->instr);
nir_op op = nir_intrinsic_reduction_op(intr);
nir_def *subgroup_id = nir_load_subgroup_invocation(b);
nir_def *active_threads = nir_ballot(b, 4, 32, nir_imm_true(b));
@ -2216,7 +2199,8 @@ lower_subgroup_scan(nir_builder *b, nir_instr *instr, void *data)
bool
dxil_nir_lower_unsupported_subgroup_scan(nir_shader *s)
{
bool ret = nir_shader_instructions_pass(s, lower_subgroup_scan, nir_metadata_none, NULL);
bool ret = nir_shader_intrinsics_pass(s, lower_subgroup_scan,
nir_metadata_none, NULL);
if (ret) {
/* Lower the ballot bitfield tests */
nir_lower_subgroups_options options = { .ballot_bit_size = 32, .ballot_components = 4 };
@ -2226,12 +2210,8 @@ dxil_nir_lower_unsupported_subgroup_scan(nir_shader *s)
}
static bool
lower_load_face(nir_builder *b, nir_instr *instr, void *data)
lower_load_face(nir_builder *b, nir_intrinsic_instr *intr, void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_front_face)
return false;
@ -2241,7 +2221,7 @@ lower_load_face(nir_builder *b, nir_instr *instr, void *data)
nir_def *load = nir_ine_imm(b, nir_load_var(b, var), 0);
nir_def_rewrite_uses(&intr->def, load);
nir_instr_remove(instr);
nir_instr_remove(&intr->instr);
return true;
}
@ -2256,7 +2236,7 @@ dxil_nir_forward_front_face(nir_shader *nir)
var->data.location = VARYING_SLOT_VAR12;
var->data.interpolation = INTERP_MODE_FLAT;
return nir_shader_instructions_pass(nir, lower_load_face,
return nir_shader_intrinsics_pass(nir, lower_load_face,
nir_metadata_block_index | nir_metadata_dominance,
var);
}
@ -2615,11 +2595,9 @@ guess_image_format_for_var(nir_shader *s, nir_variable *var)
}
static bool
update_intrinsic_formats(nir_builder *b, nir_instr *instr, void *data)
update_intrinsic_formats(nir_builder *b, nir_intrinsic_instr *intr,
void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (!nir_intrinsic_has_format(intr))
return false;
nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
@ -2651,6 +2629,7 @@ dxil_nir_guess_image_formats(nir_shader *s)
nir_foreach_variable_with_modes(var, s, nir_var_image) {
progress |= guess_image_format_for_var(s, var);
}
nir_shader_instructions_pass(s, update_intrinsic_formats, nir_metadata_all, NULL);
nir_shader_intrinsics_pass(s, update_intrinsic_formats, nir_metadata_all,
NULL);
return progress;
}

View file

@ -774,11 +774,8 @@ dxil_spirv_write_pntc(nir_shader *nir, const struct dxil_spirv_runtime_conf *con
}
static bool
lower_pntc_read(nir_builder *b, nir_instr *instr, void *data)
lower_pntc_read(nir_builder *b, nir_intrinsic_instr *intr, void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_deref)
return false;
nir_variable *var = nir_intrinsic_get_var(intr, 0);
@ -788,7 +785,7 @@ lower_pntc_read(nir_builder *b, nir_instr *instr, void *data)
nir_def *point_center = &intr->def;
nir_variable *pos_var = (nir_variable *)data;
b->cursor = nir_after_instr(instr);
b->cursor = nir_after_instr(&intr->instr);
nir_def *pos;
if (var->data.sample == pos_var->data.sample)
@ -818,7 +815,7 @@ dxil_spirv_compute_pntc(nir_shader *nir)
pos->data.location = VARYING_SLOT_POS;
pos->data.sample = nir_find_variable_with_location(nir, nir_var_shader_in, VARYING_SLOT_PNTC)->data.sample;
}
nir_shader_instructions_pass(nir, lower_pntc_read,
nir_shader_intrinsics_pass(nir, lower_pntc_read,
nir_metadata_block_index |
nir_metadata_dominance |
nir_metadata_loop_analysis,
@ -826,11 +823,9 @@ dxil_spirv_compute_pntc(nir_shader *nir)
}
static bool
lower_view_index_to_rt_layer_instr(nir_builder *b, nir_instr *instr, void *data)
lower_view_index_to_rt_layer_instr(nir_builder *b, nir_intrinsic_instr *intr,
void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_store_deref)
return false;
@ -840,7 +835,7 @@ lower_view_index_to_rt_layer_instr(nir_builder *b, nir_instr *instr, void *data)
var->data.location != VARYING_SLOT_LAYER)
return false;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intr->instr);
nir_def *layer = intr->src[1].ssa;
nir_def *new_layer = nir_iadd(b, layer,
nir_load_view_index(b));
@ -869,8 +864,7 @@ static void
lower_view_index_to_rt_layer(nir_shader *nir)
{
bool existing_write =
nir_shader_instructions_pass(nir,
lower_view_index_to_rt_layer_instr,
nir_shader_intrinsics_pass(nir, lower_view_index_to_rt_layer_instr,
nir_metadata_block_index |
nir_metadata_dominance |
nir_metadata_loop_analysis, NULL);

View file

@ -101,12 +101,9 @@ nvk_physical_device_spirv_options(const struct nvk_physical_device *pdev,
}
static bool
lower_image_size_to_txs(nir_builder *b, nir_instr *instr, UNUSED void *_data)
lower_image_size_to_txs(nir_builder *b, nir_intrinsic_instr *intrin,
UNUSED void *_data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_image_deref_size)
return false;
@ -138,13 +135,10 @@ lower_image_size_to_txs(nir_builder *b, nir_instr *instr, UNUSED void *_data)
}
static bool
lower_load_global_constant_offset_instr(nir_builder *b, nir_instr *instr,
lower_load_global_constant_offset_instr(nir_builder *b,
nir_intrinsic_instr *intrin,
UNUSED void *_data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_load_global_constant_offset &&
intrin->intrinsic != nir_intrinsic_load_global_constant_bounded)
return false;
@ -463,7 +457,7 @@ nvk_lower_nir(struct nvk_device *dev, nir_shader *nir,
NIR_PASS(_, nir, nir_lower_explicit_io, nir_var_mem_push_const,
nir_address_format_32bit_offset);
NIR_PASS(_, nir, nir_shader_instructions_pass, lower_image_size_to_txs,
NIR_PASS(_, nir, nir_shader_intrinsics_pass, lower_image_size_to_txs,
nir_metadata_block_index | nir_metadata_dominance, NULL);
/* Lower non-uniform access before lower_descriptors */
@ -494,7 +488,7 @@ nvk_lower_nir(struct nvk_device *dev, nir_shader *nir,
nvk_buffer_addr_format(rs->storage_buffers));
NIR_PASS(_, nir, nir_lower_explicit_io, nir_var_mem_ubo,
nvk_buffer_addr_format(rs->uniform_buffers));
NIR_PASS(_, nir, nir_shader_instructions_pass,
NIR_PASS(_, nir, nir_shader_intrinsics_pass,
lower_load_global_constant_offset_instr,
nir_metadata_block_index | nir_metadata_dominance, NULL);

View file

@ -38,12 +38,9 @@
*/
static bool
bi_lower_divergent_indirects_impl(nir_builder *b, nir_instr *instr, void *data)
bi_lower_divergent_indirects_impl(nir_builder *b, nir_intrinsic_instr *intr,
void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
gl_shader_stage stage = b->shader->info.stage;
nir_src *offset;
@ -78,7 +75,7 @@ bi_lower_divergent_indirects_impl(nir_builder *b, nir_instr *instr, void *data)
/* This indirect does need it */
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intr->instr);
nir_def *lane = nir_load_subgroup_invocation(b);
unsigned *lanes = data;
@ -93,7 +90,7 @@ bi_lower_divergent_indirects_impl(nir_builder *b, nir_instr *instr, void *data)
for (unsigned i = 0; i < (*lanes); ++i) {
nir_push_if(b, nir_ieq_imm(b, lane, i));
nir_instr *c = nir_instr_clone(b->shader, instr);
nir_instr *c = nir_instr_clone(b->shader, &intr->instr);
nir_intrinsic_instr *c_intr = nir_instr_as_intrinsic(c);
nir_builder_instr_insert(b, c);
nir_pop_if(b, NULL);
@ -107,13 +104,13 @@ bi_lower_divergent_indirects_impl(nir_builder *b, nir_instr *instr, void *data)
if (has_dest)
nir_def_rewrite_uses(&intr->def, res);
nir_instr_remove(instr);
nir_instr_remove(&intr->instr);
return true;
}
bool
bi_lower_divergent_indirects(nir_shader *shader, unsigned lanes)
{
return nir_shader_instructions_pass(
shader, bi_lower_divergent_indirects_impl, nir_metadata_none, &lanes);
return nir_shader_intrinsics_pass(shader, bi_lower_divergent_indirects_impl,
nir_metadata_none, &lanes);
}

View file

@ -4575,12 +4575,9 @@ bi_fp32_varying_mask(nir_shader *nir)
}
static bool
bi_lower_sample_mask_writes(nir_builder *b, nir_instr *instr, void *data)
bi_lower_sample_mask_writes(nir_builder *b, nir_intrinsic_instr *intr,
void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_store_output)
return false;
@ -4601,12 +4598,9 @@ bi_lower_sample_mask_writes(nir_builder *b, nir_instr *instr, void *data)
}
static bool
bi_lower_load_output(nir_builder *b, nir_instr *instr, UNUSED void *data)
bi_lower_load_output(nir_builder *b, nir_intrinsic_instr *intr,
UNUSED void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_output)
return false;
@ -4633,7 +4627,7 @@ bifrost_nir_lower_load_output(nir_shader *nir)
{
assert(nir->info.stage == MESA_SHADER_FRAGMENT);
return nir_shader_instructions_pass(
return nir_shader_intrinsics_pass(
nir, bi_lower_load_output,
nir_metadata_block_index | nir_metadata_dominance, NULL);
}
@ -4691,7 +4685,7 @@ bifrost_preprocess_nir(nir_shader *nir, unsigned gpu_id)
nir_var_shader_in | nir_var_shader_out,
~bi_fp32_varying_mask(nir), false);
NIR_PASS_V(nir, nir_shader_instructions_pass, bi_lower_sample_mask_writes,
NIR_PASS_V(nir, nir_shader_intrinsics_pass, bi_lower_sample_mask_writes,
nir_metadata_block_index | nir_metadata_dominance, NULL);
NIR_PASS_V(nir, bifrost_nir_lower_load_output);

View file

@ -604,12 +604,9 @@ get_equation_str(const struct pan_blend_rt_state *rt_state, char *str,
}
static bool
pan_inline_blend_constants(nir_builder *b, nir_instr *instr, void *data)
pan_inline_blend_constants(nir_builder *b, nir_intrinsic_instr *intr,
void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_blend_const_color_rgba)
return false;
@ -620,10 +617,10 @@ pan_inline_blend_constants(nir_builder *b, nir_instr *instr, void *data)
nir_const_value_for_float(floats[2], 32),
nir_const_value_for_float(floats[3], 32)};
b->cursor = nir_after_instr(instr);
b->cursor = nir_after_instr(&intr->instr);
nir_def *constant = nir_build_imm(b, 4, 32, constants);
nir_def_rewrite_uses(&intr->def, constant);
nir_instr_remove(instr);
nir_instr_remove(&intr->instr);
return true;
}
@ -720,10 +717,9 @@ GENX(pan_blend_create_shader)(const struct panfrost_device *dev,
b.shader->info.io_lowered = true;
NIR_PASS_V(b.shader, nir_lower_blend, &options);
nir_shader_instructions_pass(
b.shader, pan_inline_blend_constants,
nir_metadata_block_index | nir_metadata_dominance,
(void *)state->constants);
nir_shader_intrinsics_pass(b.shader, pan_inline_blend_constants,
nir_metadata_block_index | nir_metadata_dominance,
(void *)state->constants);
return b.shader;
}
@ -791,12 +787,8 @@ struct rt_conversion_inputs {
};
static bool
inline_rt_conversion(nir_builder *b, nir_instr *instr, void *data)
inline_rt_conversion(nir_builder *b, nir_intrinsic_instr *intr, void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_rt_conversion_pan)
return false;
@ -806,7 +798,7 @@ inline_rt_conversion(nir_builder *b, nir_instr *instr, void *data)
uint64_t conversion = GENX(pan_blend_get_internal_desc)(
inputs->dev, inputs->formats[rt], rt, size, false);
b->cursor = nir_after_instr(instr);
b->cursor = nir_after_instr(&intr->instr);
nir_def_rewrite_uses(&intr->def, nir_imm_int(b, conversion >> 32));
return true;
}
@ -815,7 +807,7 @@ bool
GENX(pan_inline_rt_conversion)(nir_shader *s, const struct panfrost_device *dev,
enum pipe_format *formats)
{
return nir_shader_instructions_pass(
return nir_shader_intrinsics_pass(
s, inline_rt_conversion,
nir_metadata_block_index | nir_metadata_dominance,
&(struct rt_conversion_inputs){.dev = dev, .formats = formats});

View file

@ -386,12 +386,9 @@ pan_blitter_get_blend_shaders(struct panfrost_device *dev, unsigned rt_count,
* unnecessary lowering.
*/
static bool
lower_sampler_parameters(nir_builder *b, nir_instr *instr, UNUSED void *data)
lower_sampler_parameters(nir_builder *b, nir_intrinsic_instr *intr,
UNUSED void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_sampler_lod_parameters_pan)
return false;
@ -401,7 +398,7 @@ lower_sampler_parameters(nir_builder *b, nir_instr *instr, UNUSED void *data)
nir_const_value_for_float(0.0f, 32), /* lod_bias */
};
b->cursor = nir_after_instr(instr);
b->cursor = nir_after_instr(&intr->instr);
nir_def_rewrite_uses(&intr->def, nir_build_imm(b, 3, 32, constants));
return true;
}
@ -627,8 +624,7 @@ pan_blitter_get_blit_shader(struct panfrost_device *dev,
pan_shader_preprocess(b.shader, inputs.gpu_id);
if (PAN_ARCH == 4) {
NIR_PASS_V(b.shader, nir_shader_instructions_pass,
lower_sampler_parameters,
NIR_PASS_V(b.shader, nir_shader_intrinsics_pass, lower_sampler_parameters,
nir_metadata_block_index | nir_metadata_dominance, NULL);
}

View file

@ -212,13 +212,9 @@ glsl_type_size(const struct glsl_type *type, bool bindless)
}
static bool
midgard_nir_lower_global_load_instr(nir_builder *b, nir_instr *instr,
midgard_nir_lower_global_load_instr(nir_builder *b, nir_intrinsic_instr *intr,
void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_global &&
intr->intrinsic != nir_intrinsic_load_shared)
return false;
@ -229,7 +225,7 @@ midgard_nir_lower_global_load_instr(nir_builder *b, nir_instr *instr,
if (util_bitcount(totalsz) < 2 && totalsz <= 128)
return false;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intr->instr);
nir_def *addr = intr->src[0].ssa;
@ -273,7 +269,7 @@ midgard_nir_lower_global_load_instr(nir_builder *b, nir_instr *instr,
static bool
midgard_nir_lower_global_load(nir_shader *shader)
{
return nir_shader_instructions_pass(
return nir_shader_intrinsics_pass(
shader, midgard_nir_lower_global_load_instr,
nir_metadata_block_index | nir_metadata_dominance, NULL);
}

View file

@ -29,13 +29,9 @@
#include "midgard_nir.h"
static bool
nir_lower_image_bitsize(nir_builder *b, nir_instr *instr, UNUSED void *data)
nir_lower_image_bitsize(nir_builder *b, nir_intrinsic_instr *intr,
UNUSED void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
switch (intr->intrinsic) {
case nir_intrinsic_image_load:
case nir_intrinsic_image_store:
@ -48,7 +44,7 @@ nir_lower_image_bitsize(nir_builder *b, nir_instr *instr, UNUSED void *data)
if (nir_src_bit_size(intr->src[1]) == 16)
return false;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intr->instr);
nir_def *coord =
nir_ssa_for_src(b, intr->src[1], nir_src_num_components(intr->src[1]));
@ -63,7 +59,7 @@ nir_lower_image_bitsize(nir_builder *b, nir_instr *instr, UNUSED void *data)
bool
midgard_nir_lower_image_bitsize(nir_shader *shader)
{
return nir_shader_instructions_pass(
return nir_shader_intrinsics_pass(
shader, nir_lower_image_bitsize,
nir_metadata_block_index | nir_metadata_dominance, NULL);
}

View file

@ -34,13 +34,9 @@
*/
static bool
nir_lower_64bit_intrin_instr(nir_builder *b, nir_instr *instr, void *data)
nir_lower_64bit_intrin_instr(nir_builder *b, nir_intrinsic_instr *intr,
void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
switch (intr->intrinsic) {
case nir_intrinsic_load_global_invocation_id:
case nir_intrinsic_load_global_invocation_id_zero_base:
@ -55,7 +51,7 @@ nir_lower_64bit_intrin_instr(nir_builder *b, nir_instr *instr, void *data)
if (intr->def.bit_size != 64)
return false;
b->cursor = nir_after_instr(instr);
b->cursor = nir_after_instr(&intr->instr);
intr->def.bit_size = 32;
@ -69,7 +65,7 @@ nir_lower_64bit_intrin_instr(nir_builder *b, nir_instr *instr, void *data)
bool
pan_nir_lower_64bit_intrin(nir_shader *shader)
{
return nir_shader_instructions_pass(
return nir_shader_intrinsics_pass(
shader, nir_lower_64bit_intrin_instr,
nir_metadata_block_index | nir_metadata_dominance, NULL);
}

View file

@ -30,16 +30,13 @@
* way to implement load_sample_id_no_per_sample. */
static bool
pan_lower_helper_invocation_instr(nir_builder *b, nir_instr *instr, void *data)
pan_lower_helper_invocation_instr(nir_builder *b, nir_intrinsic_instr *intr,
void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_helper_invocation)
return false;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intr->instr);
nir_def *mask = nir_load_sample_mask_in(b);
nir_def *eq = nir_ieq_imm(b, mask, 0);
@ -51,7 +48,7 @@ pan_lower_helper_invocation_instr(nir_builder *b, nir_instr *instr, void *data)
bool
pan_lower_helper_invocation(nir_shader *shader)
{
return nir_shader_instructions_pass(
return nir_shader_intrinsics_pass(
shader, pan_lower_helper_invocation_instr,
nir_metadata_block_index | nir_metadata_dominance, NULL);
}

View file

@ -34,13 +34,8 @@
* Midgard and Bifrost is slot-based, writing out an entire vec4 slot at a time.
*/
static bool
lower_store_component(nir_builder *b, nir_instr *instr, void *data)
lower_store_component(nir_builder *b, nir_intrinsic_instr *intr, void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_store_output)
return false;
@ -95,7 +90,7 @@ pan_nir_lower_store_component(nir_shader *s)
assert(s->info.stage == MESA_SHADER_VERTEX);
struct hash_table_u64 *stores = _mesa_hash_table_u64_create(NULL);
bool progress = nir_shader_instructions_pass(
bool progress = nir_shader_intrinsics_pass(
s, lower_store_component,
nir_metadata_block_index | nir_metadata_dominance, stores);
_mesa_hash_table_u64_destroy(stores);

View file

@ -58,18 +58,13 @@ lower_xfb_output(nir_builder *b, nir_intrinsic_instr *intr,
}
static bool
lower_xfb(nir_builder *b, nir_instr *instr, UNUSED void *data)
lower_xfb(nir_builder *b, nir_intrinsic_instr *intr, UNUSED void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
/* In transform feedback programs, vertex ID becomes zero-based, so apply
* that lowering even on Valhall.
*/
if (intr->intrinsic == nir_intrinsic_load_vertex_id) {
b->cursor = nir_instr_remove(instr);
b->cursor = nir_instr_remove(&intr->instr);
nir_def *repl =
nir_iadd(b, nir_load_vertex_id_zero_base(b), nir_load_first_vertex(b));
@ -98,13 +93,13 @@ lower_xfb(nir_builder *b, nir_instr *instr, UNUSED void *data)
}
}
nir_instr_remove(instr);
nir_instr_remove(&intr->instr);
return progress;
}
bool
pan_lower_xfb(nir_shader *nir)
{
return nir_shader_instructions_pass(
return nir_shader_intrinsics_pass(
nir, lower_xfb, nir_metadata_block_index | nir_metadata_dominance, NULL);
}

View file

@ -179,16 +179,13 @@ panvk_lower_blend(struct panfrost_device *pdev, nir_shader *nir,
}
static bool
panvk_lower_load_push_constant(nir_builder *b, nir_instr *instr, void *data)
panvk_lower_load_push_constant(nir_builder *b, nir_intrinsic_instr *intr,
void *data)
{
if (instr->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if (intr->intrinsic != nir_intrinsic_load_push_constant)
return false;
b->cursor = nir_before_instr(instr);
b->cursor = nir_before_instr(&intr->instr);
nir_def *ubo_load =
nir_load_ubo(b, intr->def.num_components, intr->def.bit_size,
nir_imm_int(b, PANVK_PUSH_CONST_UBO_INDEX), intr->src[0].ssa,
@ -196,7 +193,7 @@ panvk_lower_load_push_constant(nir_builder *b, nir_instr *instr, void *data)
.range_base = nir_intrinsic_base(intr),
.range = nir_intrinsic_range(intr));
nir_def_rewrite_uses(&intr->def, ubo_load);
nir_instr_remove(instr);
nir_instr_remove(&intr->instr);
return true;
}
@ -321,7 +318,7 @@ panvk_per_arch(shader_create)(struct panvk_device *dev, gl_shader_stage stage,
nir_address_format_32bit_offset);
}
NIR_PASS_V(nir, nir_shader_instructions_pass, panvk_lower_load_push_constant,
NIR_PASS_V(nir, nir_shader_intrinsics_pass, panvk_lower_load_push_constant,
nir_metadata_block_index | nir_metadata_dominance,
(void *)layout);