mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-08 04:48:08 +02:00
treewide: add & use parent instr helpers
We add a bunch of new helpers to avoid the need to touch >parent_instr, including the full set of: * nir_def_is_* * nir_def_as_*_or_null * nir_def_as_* [assumes the right instr type] * nir_src_is_* * nir_src_as_* * nir_scalar_is_* * nir_scalar_as_* Plus nir_def_instr() where there's no more suitable helper. Also an existing helper is renamed to unify all the names, while we're churning the tree: * nir_src_as_alu_instr -> nir_src_as_alu ..and then we port the tree to use the helpers as much as possible, using nir_def_instr() where that does not work. Acked-by: Marek Olšák <maraeo@gmail.com> --- To eliminate nir_def::parent_instr we need to churn the tree anyway, so I'm taking this opportunity to clean up a lot of NIR patterns. Co-authored-by: Konstantin Seurer <konstantin.seurer@gmail.com> Signed-off-by: Alyssa Rosenzweig <alyssa.rosenzweig@intel.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/38313>
This commit is contained in:
parent
34e7fa2fe6
commit
de32f9275f
164 changed files with 648 additions and 668 deletions
|
|
@ -91,7 +91,7 @@ process_instr(nir_builder *b, nir_intrinsic_instr *intrin, void *_)
|
|||
uint64_t off_const = 0;
|
||||
nir_def *offset = NULL;
|
||||
nir_scalar src = {addr_src->ssa, 0};
|
||||
b->cursor = nir_after_instr(addr_src->ssa->parent_instr);
|
||||
b->cursor = nir_after_def(addr_src->ssa);
|
||||
nir_def *addr = try_extract_additions(b, src, &off_const, &offset, false);
|
||||
addr = addr ? addr : addr_src->ssa;
|
||||
|
||||
|
|
|
|||
|
|
@ -31,11 +31,11 @@ set_smem_access_flags(nir_builder *b, nir_intrinsic_instr *intrin, void *cb_data
|
|||
return false;
|
||||
case nir_intrinsic_load_ubo:
|
||||
case nir_intrinsic_load_ssbo:
|
||||
if (intrin->src[0].ssa->parent_instr->block->cf_node.parent->type != nir_cf_node_function)
|
||||
if (nir_def_block(intrin->src[0].ssa)->cf_node.parent->type != nir_cf_node_function)
|
||||
break;
|
||||
FALLTHROUGH;
|
||||
case nir_intrinsic_load_constant:
|
||||
intrin->src[0].ssa->parent_instr->pass_flags = 1;
|
||||
nir_def_instr(intrin->src[0].ssa)->pass_flags = 1;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
|
@ -72,7 +72,7 @@ set_smem_access_flags(nir_builder *b, nir_intrinsic_instr *intrin, void *cb_data
|
|||
nir_intrinsic_set_access(intrin, access | ACCESS_SMEM_AMD);
|
||||
|
||||
/* Check if this instruction can be executed speculatively. */
|
||||
if (intrin->src[0].ssa->parent_instr->pass_flags == 1)
|
||||
if (nir_def_instr(intrin->src[0].ssa)->pass_flags == 1)
|
||||
nir_intrinsic_set_access(intrin, nir_intrinsic_access(intrin) | ACCESS_CAN_SPECULATE);
|
||||
|
||||
return access != nir_intrinsic_access(intrin);
|
||||
|
|
|
|||
|
|
@ -400,7 +400,7 @@ remove_culling_shader_outputs(nir_shader *culling_shader, lower_ngg_nogs_state *
|
|||
static void
|
||||
replace_scalar_component_uses(nir_builder *b, nir_scalar old, nir_scalar rep)
|
||||
{
|
||||
if (old.def->parent_instr->type == nir_instr_type_load_const)
|
||||
if (nir_def_is_const(old.def))
|
||||
return;
|
||||
|
||||
assert(old.def->bit_size == rep.def->bit_size);
|
||||
|
|
@ -437,7 +437,7 @@ apply_repacked_pos_output(nir_builder *b, nir_intrinsic_instr *intrin, void *sta
|
|||
|
||||
for (unsigned comp = 0; comp < store_val->num_components; ++comp) {
|
||||
nir_scalar val = nir_scalar_chase_movs(nir_get_scalar(store_val, comp));
|
||||
b->cursor = nir_after_instr_and_phis(val.def->parent_instr);
|
||||
b->cursor = nir_after_instr_and_phis(nir_def_instr(val.def));
|
||||
nir_def *reloaded = nir_load_var(b, s->position_value_var);
|
||||
|
||||
replace_scalar_component_uses(b, val, nir_get_scalar(reloaded, store_pos_component + comp));
|
||||
|
|
@ -604,7 +604,7 @@ analyze_shader_before_culling_walk(nir_def *ssa,
|
|||
uint8_t flag,
|
||||
lower_ngg_nogs_state *s)
|
||||
{
|
||||
nir_instr *instr = ssa->parent_instr;
|
||||
nir_instr *instr = nir_def_instr(ssa);
|
||||
uint8_t old_pass_flags = instr->pass_flags;
|
||||
instr->pass_flags |= flag;
|
||||
|
||||
|
|
|
|||
|
|
@ -426,7 +426,7 @@ lower_ps_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin, void *state)
|
|||
}
|
||||
|
||||
if (s->options->ps_iter_samples >= 2 &&
|
||||
sample_id->parent_instr->type == nir_instr_type_intrinsic &&
|
||||
nir_def_is_intrinsic(sample_id) &&
|
||||
nir_def_as_intrinsic(sample_id)->intrinsic == nir_intrinsic_load_sample_id) {
|
||||
nir_def_replace(&intrin->def, nir_load_barycentric_sample(b, 32, .interp_mode = mode));
|
||||
return true;
|
||||
|
|
|
|||
|
|
@ -387,7 +387,7 @@ filter_load_tcs_per_vertex_input(const nir_instr *instr,
|
|||
|
||||
nir_src *off_src = nir_get_io_offset_src(intrin);
|
||||
nir_src *vertex_index_src = nir_get_io_arrayed_index_src(intrin);
|
||||
nir_instr *vertex_index_instr = vertex_index_src->ssa->parent_instr;
|
||||
nir_instr *vertex_index_instr = nir_def_instr(vertex_index_src->ssa);
|
||||
const nir_io_semantics io_sem = nir_intrinsic_io_semantics(intrin);
|
||||
|
||||
/* If this is accessed via gl_InvocationIndex, don't use LDS if tcs_inputs_via_temp is also set,
|
||||
|
|
|
|||
|
|
@ -230,7 +230,7 @@ static bool can_move_coord(nir_scalar scalar, coord_info *info, nir_block *tople
|
|||
* optimizing nir_texop_txd. Otherwise, we only use nir_strict_wqm_coord_amd
|
||||
* for cases that D3D11 requires.
|
||||
*/
|
||||
if (txd && nir_block_dominates(scalar.def->parent_instr->block, toplevel_block)) {
|
||||
if (txd && nir_block_dominates(nir_def_block(scalar.def), toplevel_block)) {
|
||||
info->load = NULL;
|
||||
return true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -270,7 +270,7 @@ bool ac_nir_optimize_outputs(nir_shader *nir, bool sprite_tex_disallowed,
|
|||
/* nir_lower_io_to_scalar is required before this */
|
||||
assert(intr->src[0].ssa->num_components == 1);
|
||||
/* No intrinsic should store undef. */
|
||||
assert(intr->src[0].ssa->parent_instr->type != nir_instr_type_undef);
|
||||
assert(!nir_src_is_undef(intr->src[0]));
|
||||
|
||||
/* Gather the output. */
|
||||
struct ac_out_info *out_info = &outputs[sem.location];
|
||||
|
|
@ -283,7 +283,7 @@ bool ac_nir_optimize_outputs(nir_shader *nir, bool sprite_tex_disallowed,
|
|||
|
||||
unsigned chan = sem.high_16bits * 4 + nir_intrinsic_component(intr);
|
||||
out_info->chan[chan].store_intr = intr;
|
||||
out_info->chan[chan].value = intr->src[0].ssa->parent_instr;
|
||||
out_info->chan[chan].value = nir_def_instr(intr->src[0].ssa);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -65,10 +65,8 @@ Temp merged_wave_info_to_mask(isel_context* ctx, unsigned i);
|
|||
void
|
||||
get_const_vec(nir_def* vec, nir_const_value* cv[4])
|
||||
{
|
||||
if (vec->parent_instr->type != nir_instr_type_alu)
|
||||
return;
|
||||
nir_alu_instr* vec_instr = nir_def_as_alu(vec);
|
||||
if (vec_instr->op != nir_op_vec(vec->num_components))
|
||||
nir_alu_instr* vec_instr = nir_def_as_alu_or_null(vec);
|
||||
if (!vec_instr || vec_instr->op != nir_op_vec(vec->num_components))
|
||||
return;
|
||||
|
||||
for (unsigned i = 0; i < vec->num_components; i++) {
|
||||
|
|
@ -729,9 +727,9 @@ Operand
|
|||
get_phi_operand(isel_context* ctx, nir_def* ssa, RegClass rc)
|
||||
{
|
||||
Temp tmp = get_ssa_temp(ctx, ssa);
|
||||
if (ssa->parent_instr->type == nir_instr_type_undef) {
|
||||
if (nir_def_is_undef(ssa)) {
|
||||
return Operand(rc);
|
||||
} else if (ssa->bit_size == 1 && ssa->parent_instr->type == nir_instr_type_load_const) {
|
||||
} else if (ssa->bit_size == 1 && nir_def_is_const(ssa)) {
|
||||
bool val = nir_def_as_load_const(ssa)->value[0].b;
|
||||
return Operand::c32_or_c64(val ? -1 : 0, ctx->program->lane_mask == s2);
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -1314,7 +1314,8 @@ visit_store_output(isel_context* ctx, nir_intrinsic_instr* instr)
|
|||
if (ls_need_output || ps_need_output) {
|
||||
bool stored_to_temps = store_output_to_temps(ctx, instr);
|
||||
if (!stored_to_temps) {
|
||||
isel_err(instr->src[1].ssa->parent_instr, "Unimplemented output offset instruction");
|
||||
isel_err(nir_def_instr(instr->src[1].ssa),
|
||||
"Unimplemented output offset instruction");
|
||||
abort();
|
||||
}
|
||||
} else {
|
||||
|
|
@ -1459,7 +1460,8 @@ visit_load_fs_input(isel_context* ctx, nir_intrinsic_instr* instr)
|
|||
nir_src offset = *nir_get_io_offset_src(instr);
|
||||
|
||||
if (!nir_src_is_const(offset) || nir_src_as_uint(offset))
|
||||
isel_err(offset.ssa->parent_instr, "Unimplemented non-zero nir_intrinsic_load_input offset");
|
||||
isel_err(nir_def_instr(offset.ssa),
|
||||
"Unimplemented non-zero nir_intrinsic_load_input offset");
|
||||
|
||||
Temp prim_mask = get_arg(ctx, ctx->args->prim_mask);
|
||||
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@
|
|||
static inline unsigned
|
||||
src_get_fotid_mask(nir_src src)
|
||||
{
|
||||
return src.ssa->parent_instr->pass_flags;
|
||||
return nir_def_instr(src.ssa)->pass_flags;
|
||||
}
|
||||
|
||||
static inline unsigned
|
||||
|
|
@ -437,7 +437,7 @@ opt_fotid_shuffle(nir_builder *b, nir_intrinsic_instr *instr, const radv_nir_opt
|
|||
{
|
||||
if (instr->intrinsic != nir_intrinsic_shuffle)
|
||||
return false;
|
||||
if (!instr->src[1].ssa->parent_instr->pass_flags)
|
||||
if (!nir_def_instr(instr->src[1].ssa)->pass_flags)
|
||||
return false;
|
||||
|
||||
unsigned src_idx = 0;
|
||||
|
|
@ -530,7 +530,7 @@ opt_fotid_bool(nir_builder *b, nir_alu_instr *instr, const radv_nir_opt_tid_func
|
|||
|
||||
nir_def *ballot = nir_vec(b, ballot_comp, options->hw_ballot_num_comp);
|
||||
nir_def *res = nir_inverse_ballot(b, ballot);
|
||||
res->parent_instr->pass_flags = 1;
|
||||
nir_def_instr(res)->pass_flags = 1;
|
||||
|
||||
nir_def_replace(&instr->def, res);
|
||||
return true;
|
||||
|
|
@ -550,7 +550,7 @@ visit_instr(nir_builder *b, nir_instr *instr, void *params)
|
|||
/* revist shuffles that we skipped previously */
|
||||
bool progress = false;
|
||||
for (unsigned i = 1; i < 3; i++) {
|
||||
nir_instr *src_instr = alu->src[i].src.ssa->parent_instr;
|
||||
nir_instr *src_instr = nir_def_instr(alu->src[i].src.ssa);
|
||||
if (src_instr->type == nir_instr_type_intrinsic) {
|
||||
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(src_instr);
|
||||
progress |= opt_fotid_shuffle(b, intrin, options, true);
|
||||
|
|
|
|||
|
|
@ -3164,38 +3164,23 @@ agx_optimize_nir(nir_shader *nir, bool soft_fault, uint16_t *preamble_size,
|
|||
* conformant not to, but every app gets this wrong.
|
||||
*/
|
||||
static bool
|
||||
gather_texcoords(nir_builder *b, nir_instr *instr, void *data)
|
||||
gather_texcoords(nir_builder *b, nir_tex_instr *tex, void *data)
|
||||
{
|
||||
uint64_t *mask = data;
|
||||
|
||||
if (instr->type != nir_instr_type_tex)
|
||||
nir_def *coord = nir_get_tex_src(tex, nir_tex_src_coord);
|
||||
if (!coord)
|
||||
return false;
|
||||
|
||||
nir_tex_instr *tex = nir_instr_as_tex(instr);
|
||||
nir_scalar x = nir_scalar_resolved(coord, 0);
|
||||
nir_scalar y = nir_scalar_resolved(coord, 1);
|
||||
nir_intrinsic_instr *intr = nir_scalar_as_intrinsic(x);
|
||||
|
||||
int coord_idx = nir_tex_instr_src_index(tex, nir_tex_src_coord);
|
||||
if (coord_idx < 0)
|
||||
return false;
|
||||
if (x.def == y.def && intr &&
|
||||
intr->intrinsic == nir_intrinsic_load_interpolated_input) {
|
||||
|
||||
nir_src src = tex->src[coord_idx].src;
|
||||
nir_scalar x = nir_scalar_resolved(src.ssa, 0);
|
||||
nir_scalar y = nir_scalar_resolved(src.ssa, 1);
|
||||
uint64_t *mask = data;
|
||||
*mask |= BITFIELD64_BIT(nir_intrinsic_io_semantics(intr).location);
|
||||
}
|
||||
|
||||
if (x.def != y.def)
|
||||
return false;
|
||||
|
||||
nir_instr *parent = x.def->parent_instr;
|
||||
|
||||
if (parent->type != nir_instr_type_intrinsic)
|
||||
return false;
|
||||
|
||||
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(parent);
|
||||
|
||||
if (intr->intrinsic != nir_intrinsic_load_interpolated_input)
|
||||
return false;
|
||||
|
||||
nir_io_semantics sem = nir_intrinsic_io_semantics(intr);
|
||||
*mask |= BITFIELD64_BIT(sem.location);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
@ -3209,7 +3194,7 @@ agx_gather_texcoords(nir_shader *nir)
|
|||
assert(nir->info.stage == MESA_SHADER_FRAGMENT);
|
||||
|
||||
uint64_t mask = 0;
|
||||
nir_shader_instructions_pass(nir, gather_texcoords, nir_metadata_all, &mask);
|
||||
nir_shader_tex_pass(nir, gather_texcoords, nir_metadata_all, &mask);
|
||||
return mask;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -16,14 +16,11 @@
|
|||
static nir_preamble_class
|
||||
preamble_class(nir_def *def)
|
||||
{
|
||||
nir_instr *instr = def->parent_instr;
|
||||
if (instr->type != nir_instr_type_intrinsic)
|
||||
nir_intrinsic_instr *intr = nir_def_as_intrinsic_or_null(def);
|
||||
if (!intr || (nir_intrinsic_has_desc_set(intr) &&
|
||||
nir_intrinsic_desc_set(intr) >= 32 /* encoding restriction */))
|
||||
return nir_preamble_class_general;
|
||||
|
||||
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
|
||||
if (nir_intrinsic_has_desc_set(intr) && nir_intrinsic_desc_set(intr) >= 32)
|
||||
return nir_preamble_class_general /* encoding restriction */;
|
||||
|
||||
if (intr->intrinsic == nir_intrinsic_bindless_image_agx)
|
||||
return nir_preamble_class_image;
|
||||
else if (intr->intrinsic == nir_intrinsic_bindless_sampler_agx)
|
||||
|
|
|
|||
|
|
@ -89,11 +89,11 @@ lower(nir_builder *b, nir_intrinsic_instr *intr, void *data)
|
|||
if (sem.location == VARYING_SLOT_LAYER) {
|
||||
assert(ctx->layer == NULL && "only written once");
|
||||
ctx->layer = value;
|
||||
ctx->after_layer_viewport = nir_after_instr(index->parent_instr);
|
||||
ctx->after_layer_viewport = nir_after_def(index);
|
||||
} else if (sem.location == VARYING_SLOT_VIEWPORT) {
|
||||
assert(ctx->viewport == NULL && "only written once");
|
||||
ctx->viewport = value;
|
||||
ctx->after_layer_viewport = nir_after_instr(index->parent_instr);
|
||||
ctx->after_layer_viewport = nir_after_def(index);
|
||||
} else if (sem.location == VARYING_SLOT_CLIP_DIST0 ||
|
||||
sem.location == VARYING_SLOT_CLIP_DIST1) {
|
||||
|
||||
|
|
|
|||
|
|
@ -1231,7 +1231,7 @@ ntq_emit_comparison(struct v3d_compile *c,
|
|||
static struct nir_alu_instr *
|
||||
ntq_get_alu_parent(nir_src src)
|
||||
{
|
||||
if (src.ssa->parent_instr->type != nir_instr_type_alu)
|
||||
if (!nir_src_is_alu(src))
|
||||
return NULL;
|
||||
nir_alu_instr *instr = nir_def_as_alu(src.ssa);
|
||||
if (!instr)
|
||||
|
|
@ -3103,7 +3103,7 @@ nir_src_derived_from_reg(nir_src src)
|
|||
if (nir_load_reg_for_def(def))
|
||||
return true;
|
||||
|
||||
nir_instr *parent = def->parent_instr;
|
||||
nir_instr *parent = nir_def_instr(def);
|
||||
switch (parent->type) {
|
||||
case nir_instr_type_alu: {
|
||||
nir_alu_instr *alu = nir_instr_as_alu(parent);
|
||||
|
|
|
|||
|
|
@ -631,7 +631,7 @@ v3d_nir_lower_null_pointers_cb(nir_builder *b,
|
|||
return false;
|
||||
|
||||
/* Otherwise, see if it comes from a bcsel including a null pointer */
|
||||
if (src->ssa->parent_instr->type != nir_instr_type_alu)
|
||||
if (!nir_def_is_alu(src->ssa))
|
||||
return false;
|
||||
|
||||
nir_alu_instr *alu = nir_def_as_alu(src->ssa);
|
||||
|
|
@ -1578,7 +1578,7 @@ v3d_nir_sort_constant_ubo_load(nir_block *block, nir_intrinsic_instr *ref)
|
|||
*/
|
||||
break;
|
||||
}
|
||||
if (intr->src[1].ssa->parent_instr == tmp) {
|
||||
if (nir_def_instr(intr->src[1].ssa) == tmp) {
|
||||
offset_inst = tmp;
|
||||
break;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1392,8 +1392,8 @@ nir_instr_dce_add_dead_srcs_cb(nir_src *src, void *state)
|
|||
nir_instr_worklist *wl = state;
|
||||
|
||||
list_del(&src->use_link);
|
||||
if (!nir_instr_free_and_dce_is_live(src->ssa->parent_instr))
|
||||
nir_instr_worklist_push_tail(wl, src->ssa->parent_instr);
|
||||
if (!nir_instr_free_and_dce_is_live(nir_def_instr(src->ssa)))
|
||||
nir_instr_worklist_push_tail(wl, nir_def_instr(src->ssa));
|
||||
|
||||
/* Stop nir_instr_remove from trying to delete the link again. */
|
||||
src->ssa = NULL;
|
||||
|
|
@ -1543,12 +1543,8 @@ nir_const_value_as_float(nir_const_value value, unsigned bit_size)
|
|||
nir_const_value *
|
||||
nir_src_as_const_value(nir_src src)
|
||||
{
|
||||
if (src.ssa->parent_instr->type != nir_instr_type_load_const)
|
||||
return NULL;
|
||||
|
||||
nir_load_const_instr *load = nir_def_as_load_const(src.ssa);
|
||||
|
||||
return load->value;
|
||||
nir_load_const_instr *load = nir_src_as_load_const(src);
|
||||
return load ? load->value : NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -1562,11 +1558,11 @@ bool
|
|||
nir_src_is_always_uniform(nir_src src)
|
||||
{
|
||||
/* Constants are trivially uniform */
|
||||
if (src.ssa->parent_instr->type == nir_instr_type_load_const)
|
||||
if (nir_src_is_const(src))
|
||||
return true;
|
||||
|
||||
if (src.ssa->parent_instr->type == nir_instr_type_intrinsic) {
|
||||
nir_intrinsic_instr *intr = nir_def_as_intrinsic(src.ssa);
|
||||
if (nir_src_is_intrinsic(src)) {
|
||||
nir_intrinsic_instr *intr = nir_src_as_intrinsic(src);
|
||||
/* As are uniform variables */
|
||||
if (intr->intrinsic == nir_intrinsic_load_uniform &&
|
||||
nir_src_is_always_uniform(intr->src[0]))
|
||||
|
|
@ -1583,8 +1579,8 @@ nir_src_is_always_uniform(nir_src src)
|
|||
}
|
||||
|
||||
/* Operating together uniform expressions produces a uniform result */
|
||||
if (src.ssa->parent_instr->type == nir_instr_type_alu) {
|
||||
nir_alu_instr *alu = nir_def_as_alu(src.ssa);
|
||||
if (nir_src_is_alu(src)) {
|
||||
nir_alu_instr *alu = nir_src_as_alu(src);
|
||||
for (int i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
|
||||
if (!nir_src_is_always_uniform(alu->src[i].src))
|
||||
return false;
|
||||
|
|
@ -1747,7 +1743,7 @@ is_instr_between(nir_instr *start, nir_instr *end, nir_instr *between)
|
|||
* want without touching the fixup code.
|
||||
*
|
||||
* This function assumes that after_me is in the same block as
|
||||
* def->parent_instr and that after_me comes after def->parent_instr.
|
||||
* nir_def_instr(def) and that after_me comes after nir_def_instr(def).
|
||||
*/
|
||||
void
|
||||
nir_def_rewrite_uses_after_instr(nir_def *def, nir_def *new_ssa,
|
||||
|
|
@ -1758,13 +1754,13 @@ nir_def_rewrite_uses_after_instr(nir_def *def, nir_def *new_ssa,
|
|||
|
||||
nir_foreach_use_including_if_safe(use_src, def) {
|
||||
if (!nir_src_is_if(use_src)) {
|
||||
assert(nir_src_parent_instr(use_src) != def->parent_instr);
|
||||
assert(nir_src_parent_instr(use_src) != nir_def_instr(def));
|
||||
|
||||
/* Since def already dominates all of its uses, the only way a use can
|
||||
* not be dominated by after_me is if it is between def and after_me in
|
||||
* the instruction list.
|
||||
*/
|
||||
if (is_instr_between(def->parent_instr, after_me, nir_src_parent_instr(use_src)))
|
||||
if (is_instr_between(nir_def_instr(def), after_me, nir_src_parent_instr(use_src)))
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
@ -2910,10 +2906,10 @@ nir_binding
|
|||
nir_chase_binding(nir_src rsrc)
|
||||
{
|
||||
nir_binding res = { 0 };
|
||||
if (rsrc.ssa->parent_instr->type == nir_instr_type_deref) {
|
||||
if (nir_src_is_deref(rsrc)) {
|
||||
const struct glsl_type *type = glsl_without_array(nir_src_as_deref(rsrc)->type);
|
||||
bool is_image = glsl_type_is_image(type) || glsl_type_is_sampler(type);
|
||||
while (rsrc.ssa->parent_instr->type == nir_instr_type_deref) {
|
||||
while (nir_src_is_deref(rsrc)) {
|
||||
nir_deref_instr *deref = nir_src_as_deref(rsrc);
|
||||
|
||||
if (deref->deref_type == nir_deref_type_var) {
|
||||
|
|
@ -2939,7 +2935,7 @@ nir_chase_binding(nir_src rsrc)
|
|||
*/
|
||||
unsigned num_components = nir_src_num_components(rsrc);
|
||||
while (true) {
|
||||
nir_alu_instr *alu = nir_src_as_alu_instr(rsrc);
|
||||
nir_alu_instr *alu = nir_src_as_alu(rsrc);
|
||||
nir_intrinsic_instr *intrin = nir_src_as_intrinsic(rsrc);
|
||||
if (alu && alu->op == nir_op_mov) {
|
||||
for (unsigned i = 0; i < num_components; i++) {
|
||||
|
|
@ -3048,7 +3044,7 @@ nir_scalar
|
|||
nir_scalar_chase_movs(nir_scalar s)
|
||||
{
|
||||
while (nir_scalar_is_alu(s)) {
|
||||
nir_alu_instr *alu = nir_def_as_alu(s.def);
|
||||
nir_alu_instr *alu = nir_scalar_as_alu(s);
|
||||
if (alu->op == nir_op_mov) {
|
||||
s.def = alu->src[0].src.ssa;
|
||||
s.comp = alu->src[0].swizzle[s.comp];
|
||||
|
|
@ -3831,3 +3827,10 @@ nir_atomic_op_to_alu(nir_atomic_op op)
|
|||
|
||||
UNREACHABLE("Invalid nir_atomic_op");
|
||||
}
|
||||
|
||||
const nir_instr *
|
||||
nir_def_instr_noninline(const nir_def *def)
|
||||
{
|
||||
/* Wrapper for Rust bindgen */
|
||||
return nir_def_instr_const(def);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1030,12 +1030,6 @@ typedef struct nir_def {
|
|||
bool loop_invariant;
|
||||
} nir_def;
|
||||
|
||||
static inline nir_block *
|
||||
nir_def_block(nir_def *def)
|
||||
{
|
||||
return def->parent_instr->block;
|
||||
}
|
||||
|
||||
typedef struct nir_src {
|
||||
/* Instruction or if-statement that consumes this value as a source. This
|
||||
* should only be accessed through nir_src_* helpers.
|
||||
|
|
@ -1173,24 +1167,6 @@ nir_src_num_components(nir_src src)
|
|||
return src.ssa->num_components;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
nir_src_is_const(nir_src src)
|
||||
{
|
||||
return src.ssa->parent_instr->type == nir_instr_type_load_const;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
nir_src_is_undef(nir_src src)
|
||||
{
|
||||
return src.ssa->parent_instr->type == nir_instr_type_undef;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
nir_src_is_deref(nir_src src)
|
||||
{
|
||||
return src.ssa->parent_instr->type == nir_instr_type_deref;
|
||||
}
|
||||
|
||||
bool nir_src_is_divergent(nir_src *src);
|
||||
|
||||
/* Are all components the same, ie. .xxxx */
|
||||
|
|
@ -2779,6 +2755,69 @@ nir_phi_get_src_from_block(nir_phi_instr *phi, nir_block *block)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
const nir_instr *nir_def_instr_noninline(const nir_def *def);
|
||||
|
||||
static inline nir_instr *
|
||||
nir_def_instr_nonconst(nir_def *def)
|
||||
{
|
||||
static_assert(offsetof(nir_alu_instr, def) == offsetof(nir_undef_instr, def),
|
||||
"nir_alu_instr: nir_def always has to be at the same offset relative to nir_instr.");
|
||||
static_assert(offsetof(nir_deref_instr, def) == offsetof(nir_undef_instr, def),
|
||||
"nir_deref_instr: nir_def always has to be at the same offset relative to nir_instr.");
|
||||
static_assert(offsetof(nir_tex_instr, def) == offsetof(nir_undef_instr, def),
|
||||
"nir_tex_instr: nir_def always has to be at the same offset relative to nir_instr.");
|
||||
static_assert(offsetof(nir_intrinsic_instr, def) == offsetof(nir_undef_instr, def),
|
||||
"nir_intrinsic_instr: nir_def always has to be at the same offset relative to nir_instr.");
|
||||
static_assert(offsetof(nir_load_const_instr, def) == offsetof(nir_undef_instr, def),
|
||||
"nir_load_const_instr: nir_def always has to be at the same offset relative to nir_instr.");
|
||||
static_assert(offsetof(nir_phi_instr, def) == offsetof(nir_undef_instr, def),
|
||||
"nir_phi_instr: nir_def always has to be at the same offset relative to nir_instr.");
|
||||
return &container_of(def, nir_undef_instr, def)->instr;
|
||||
}
|
||||
|
||||
static inline const nir_instr *
|
||||
nir_def_instr_const(const nir_def *def)
|
||||
{
|
||||
return (const nir_instr *)nir_def_instr_nonconst((nir_def *)def);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
/* Required for function overloading */
|
||||
extern "C++" {
|
||||
|
||||
static inline nir_instr *
|
||||
nir_def_instr(nir_def *def)
|
||||
{
|
||||
return nir_def_instr_nonconst(def);
|
||||
}
|
||||
|
||||
static inline const nir_instr *
|
||||
nir_def_instr(const nir_def *def)
|
||||
{
|
||||
return nir_def_instr_const(def);
|
||||
}
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* For C we can use _Generic to overload on the constantness properly.
|
||||
*/
|
||||
#define nir_def_instr(def) _Generic((def), \
|
||||
const nir_def *: nir_def_instr_const, \
|
||||
nir_def *: nir_def_instr_nonconst)(def)
|
||||
#endif
|
||||
|
||||
static inline nir_block *
|
||||
nir_def_block(nir_def *def)
|
||||
{
|
||||
return nir_def_instr(def)->block;
|
||||
}
|
||||
|
||||
static inline nir_instr_type
|
||||
nir_def_instr_type(const nir_def *def)
|
||||
{
|
||||
return nir_def_instr(def)->type;
|
||||
}
|
||||
|
||||
/* This struct contains metadata for correlating the final nir shader
|
||||
* (after many lowering and optimization passes) with the source spir-v
|
||||
* or glsl. To avoid adding unnecessary overhead when the driver does not
|
||||
|
|
@ -2813,6 +2852,11 @@ typedef struct nir_instr_debug_info {
|
|||
nir_instr instr;
|
||||
} nir_instr_debug_info;
|
||||
|
||||
typedef struct nir_scalar {
|
||||
nir_def *def;
|
||||
unsigned comp;
|
||||
} nir_scalar;
|
||||
|
||||
NIR_DEFINE_CAST(nir_instr_as_alu, nir_instr, nir_alu_instr, instr,
|
||||
type, nir_instr_type_alu)
|
||||
NIR_DEFINE_CAST(nir_instr_as_deref, nir_instr, nir_deref_instr, instr,
|
||||
|
|
@ -2832,18 +2876,49 @@ NIR_DEFINE_CAST(nir_instr_as_undef, nir_instr, nir_undef_instr, instr,
|
|||
NIR_DEFINE_CAST(nir_instr_as_phi, nir_instr, nir_phi_instr, instr,
|
||||
type, nir_instr_type_phi)
|
||||
|
||||
#define NIR_DEFINE_DEF_AS_INSTR(type, suffix) \
|
||||
static inline type *nir_def_as_##suffix(const nir_def *def) \
|
||||
{ \
|
||||
return nir_instr_as_##suffix(def->parent_instr); \
|
||||
#define NIR_DEFINE_DEF_AS_INSTR(instr_type, suffix, cast) \
|
||||
static inline instr_type *nir_def_as_##cast(const nir_def *def) \
|
||||
{ \
|
||||
return nir_instr_as_##cast(nir_def_instr(def)); \
|
||||
} \
|
||||
\
|
||||
static inline bool nir_def_is_##suffix(const nir_def *def) \
|
||||
{ \
|
||||
return nir_def_instr(def)->type == nir_instr_type_##cast; \
|
||||
} \
|
||||
\
|
||||
static inline instr_type *nir_def_as_##cast##_or_null(const nir_def *def) \
|
||||
{ \
|
||||
return nir_def_is_##suffix(def) ? nir_def_as_##cast(def) : NULL; \
|
||||
} \
|
||||
\
|
||||
static inline bool nir_src_is_##suffix(nir_src src) \
|
||||
{ \
|
||||
return nir_def_is_##suffix(src.ssa); \
|
||||
} \
|
||||
\
|
||||
static inline bool nir_scalar_is_##suffix(nir_scalar s) \
|
||||
{ \
|
||||
return nir_def_is_##suffix(s.def); \
|
||||
} \
|
||||
\
|
||||
static inline instr_type *nir_src_as_##cast(nir_src src) \
|
||||
{ \
|
||||
return nir_src_is_##suffix(src) ? nir_def_as_##cast(src.ssa) : NULL; \
|
||||
} \
|
||||
\
|
||||
static inline instr_type *nir_scalar_as_##cast(nir_scalar s) \
|
||||
{ \
|
||||
return nir_def_is_##suffix(s.def) ? nir_def_as_##cast(s.def) : NULL; \
|
||||
}
|
||||
|
||||
NIR_DEFINE_DEF_AS_INSTR(nir_alu_instr, alu)
|
||||
NIR_DEFINE_DEF_AS_INSTR(nir_intrinsic_instr, intrinsic)
|
||||
NIR_DEFINE_DEF_AS_INSTR(nir_tex_instr, tex)
|
||||
NIR_DEFINE_DEF_AS_INSTR(nir_phi_instr, phi)
|
||||
NIR_DEFINE_DEF_AS_INSTR(nir_deref_instr, deref)
|
||||
NIR_DEFINE_DEF_AS_INSTR(nir_load_const_instr, load_const)
|
||||
NIR_DEFINE_DEF_AS_INSTR(nir_alu_instr, alu, alu)
|
||||
NIR_DEFINE_DEF_AS_INSTR(nir_intrinsic_instr, intrinsic, intrinsic)
|
||||
NIR_DEFINE_DEF_AS_INSTR(nir_tex_instr, tex, tex)
|
||||
NIR_DEFINE_DEF_AS_INSTR(nir_phi_instr, phi, phi)
|
||||
NIR_DEFINE_DEF_AS_INSTR(nir_deref_instr, deref, deref)
|
||||
NIR_DEFINE_DEF_AS_INSTR(nir_load_const_instr, const, load_const)
|
||||
NIR_DEFINE_DEF_AS_INSTR(nir_undef_instr, undef, undef)
|
||||
|
||||
#undef NIR_DEFINE_DEF_AS_INSTR
|
||||
|
||||
|
|
@ -2873,28 +2948,11 @@ NIR_DEFINE_SRC_AS_CONST(double, float)
|
|||
|
||||
#undef NIR_DEFINE_SRC_AS_CONST
|
||||
|
||||
typedef struct nir_scalar {
|
||||
nir_def *def;
|
||||
unsigned comp;
|
||||
} nir_scalar;
|
||||
|
||||
static inline bool
|
||||
nir_scalar_is_const(nir_scalar s)
|
||||
{
|
||||
return s.def->parent_instr->type == nir_instr_type_load_const;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
nir_scalar_is_undef(nir_scalar s)
|
||||
{
|
||||
return s.def->parent_instr->type == nir_instr_type_undef;
|
||||
}
|
||||
|
||||
static inline nir_const_value
|
||||
nir_scalar_as_const_value(nir_scalar s)
|
||||
{
|
||||
assert(s.comp < s.def->num_components);
|
||||
nir_load_const_instr *load = nir_instr_as_load_const(s.def->parent_instr);
|
||||
nir_load_const_instr *load = nir_scalar_as_load_const(s);
|
||||
return load->value[s.comp];
|
||||
}
|
||||
|
||||
|
|
@ -2913,24 +2971,12 @@ NIR_DEFINE_SCALAR_AS_CONST(double, float)
|
|||
|
||||
#undef NIR_DEFINE_SCALAR_AS_CONST
|
||||
|
||||
static inline bool
|
||||
nir_scalar_is_alu(nir_scalar s)
|
||||
{
|
||||
return s.def->parent_instr->type == nir_instr_type_alu;
|
||||
}
|
||||
|
||||
static inline nir_op
|
||||
nir_scalar_alu_op(nir_scalar s)
|
||||
{
|
||||
return nir_def_as_alu(s.def)->op;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
nir_scalar_is_intrinsic(nir_scalar s)
|
||||
{
|
||||
return s.def->parent_instr->type == nir_instr_type_intrinsic;
|
||||
}
|
||||
|
||||
static inline nir_intrinsic_op
|
||||
nir_scalar_intrinsic_op(nir_scalar s)
|
||||
{
|
||||
|
|
@ -2942,7 +2988,7 @@ nir_scalar_chase_alu_src(nir_scalar s, unsigned alu_src_idx)
|
|||
{
|
||||
nir_scalar out = { NULL, 0 };
|
||||
|
||||
nir_alu_instr *alu = nir_instr_as_alu(s.def->parent_instr);
|
||||
nir_alu_instr *alu = nir_scalar_as_alu(s);
|
||||
assert(alu_src_idx < nir_op_infos[alu->op].num_inputs);
|
||||
|
||||
/* Our component must be written */
|
||||
|
|
@ -4299,6 +4345,18 @@ nir_after_impl(nir_function_impl *impl)
|
|||
return nir_after_cf_list(&impl->body);
|
||||
}
|
||||
|
||||
static inline nir_cursor
|
||||
nir_before_def(nir_def *def)
|
||||
{
|
||||
return nir_before_instr(nir_def_instr(def));
|
||||
}
|
||||
|
||||
static inline nir_cursor
|
||||
nir_after_def(nir_def *def)
|
||||
{
|
||||
return nir_after_instr(nir_def_instr(def));
|
||||
}
|
||||
|
||||
/**
|
||||
* Insert a NIR instruction at the given cursor.
|
||||
*
|
||||
|
|
@ -4408,20 +4466,6 @@ bool nir_foreach_phi_src_leaving_block(nir_block *instr,
|
|||
|
||||
nir_const_value *nir_src_as_const_value(nir_src src);
|
||||
|
||||
#define NIR_SRC_AS_(name, c_type, type_enum, cast_macro) \
|
||||
static inline c_type * \
|
||||
nir_src_as_##name(nir_src src) \
|
||||
{ \
|
||||
return src.ssa->parent_instr->type == type_enum \
|
||||
? cast_macro(src.ssa->parent_instr) \
|
||||
: NULL; \
|
||||
}
|
||||
|
||||
NIR_SRC_AS_(alu_instr, nir_alu_instr, nir_instr_type_alu, nir_instr_as_alu)
|
||||
NIR_SRC_AS_(intrinsic, nir_intrinsic_instr,
|
||||
nir_instr_type_intrinsic, nir_instr_as_intrinsic)
|
||||
NIR_SRC_AS_(deref, nir_deref_instr, nir_instr_type_deref, nir_instr_as_deref)
|
||||
|
||||
const char *nir_src_as_string(nir_src src);
|
||||
|
||||
bool nir_src_is_always_uniform(nir_src src);
|
||||
|
|
@ -4495,14 +4539,14 @@ void nir_def_rewrite_uses_after_instr(nir_def *def, nir_def *new_ssa,
|
|||
static inline void
|
||||
nir_def_rewrite_uses_after(nir_def *def, nir_def *new_ssa)
|
||||
{
|
||||
nir_def_rewrite_uses_after_instr(def, new_ssa, new_ssa->parent_instr);
|
||||
nir_def_rewrite_uses_after_instr(def, new_ssa, nir_def_instr(new_ssa));
|
||||
}
|
||||
|
||||
static inline void
|
||||
nir_def_replace(nir_def *def, nir_def *new_ssa)
|
||||
{
|
||||
nir_def_rewrite_uses(def, new_ssa);
|
||||
nir_instr_remove(def->parent_instr);
|
||||
nir_instr_remove(nir_def_instr(def));
|
||||
}
|
||||
|
||||
nir_component_mask_t nir_src_components_read(const nir_src *src);
|
||||
|
|
@ -6561,7 +6605,7 @@ nir_static_workgroup_size(const nir_shader *s);
|
|||
static inline nir_intrinsic_instr *
|
||||
nir_reg_get_decl(nir_def *reg)
|
||||
{
|
||||
assert(reg->parent_instr->type == nir_instr_type_intrinsic);
|
||||
assert(nir_def_is_intrinsic(reg));
|
||||
nir_intrinsic_instr *decl = nir_def_as_intrinsic(reg);
|
||||
assert(decl->intrinsic == nir_intrinsic_decl_reg);
|
||||
|
||||
|
|
@ -6653,7 +6697,7 @@ nir_is_store_reg(nir_intrinsic_instr *intr)
|
|||
static inline nir_intrinsic_instr *
|
||||
nir_load_reg_for_def(const nir_def *def)
|
||||
{
|
||||
if (def->parent_instr->type != nir_instr_type_intrinsic)
|
||||
if (!nir_def_is_intrinsic(def))
|
||||
return NULL;
|
||||
|
||||
nir_intrinsic_instr *intr = nir_def_as_intrinsic(def);
|
||||
|
|
|
|||
|
|
@ -1027,7 +1027,7 @@ static inline nir_def *
|
|||
nir_iadd_imm_nuw(nir_builder *b, nir_def *x, uint64_t y)
|
||||
{
|
||||
nir_def *d = nir_iadd_imm(b, x, y);
|
||||
if (d != x && d->parent_instr->type == nir_instr_type_alu)
|
||||
if (d != x && nir_def_is_alu(d))
|
||||
nir_def_as_alu(d)->no_unsigned_wrap = true;
|
||||
return d;
|
||||
}
|
||||
|
|
@ -1036,7 +1036,7 @@ static inline nir_def *
|
|||
nir_iadd_nuw(nir_builder *b, nir_def *x, nir_def *y)
|
||||
{
|
||||
nir_def *d = nir_iadd(b, x, y);
|
||||
if (d->parent_instr->type == nir_instr_type_alu)
|
||||
if (nir_def_is_alu(d))
|
||||
nir_def_as_alu(d)->no_unsigned_wrap = true;
|
||||
return d;
|
||||
}
|
||||
|
|
@ -1118,7 +1118,7 @@ static inline nir_def *
|
|||
nir_imul_imm_nuw(nir_builder *build, nir_def *x, uint64_t y)
|
||||
{
|
||||
nir_def *d = nir_imul_imm(build, x, y);
|
||||
if (d != x && d->parent_instr->type == nir_instr_type_alu)
|
||||
if (d != x && nir_def_is_alu(d))
|
||||
nir_def_as_alu(d)->no_unsigned_wrap = true;
|
||||
return d;
|
||||
}
|
||||
|
|
@ -1127,7 +1127,7 @@ static inline nir_def *
|
|||
nir_imul_nuw(nir_builder *build, nir_def *x, nir_def *y)
|
||||
{
|
||||
nir_def *d = nir_imul(build, x, y);
|
||||
if (d->parent_instr->type == nir_instr_type_alu)
|
||||
if (nir_def_is_alu(d))
|
||||
nir_def_as_alu(d)->no_unsigned_wrap = true;
|
||||
return d;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -963,11 +963,10 @@ opt_alu_of_cast(nir_alu_instr *alu)
|
|||
bool progress = false;
|
||||
|
||||
for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
|
||||
nir_instr *src_instr = alu->src[i].src.ssa->parent_instr;
|
||||
if (src_instr->type != nir_instr_type_deref)
|
||||
if (!nir_src_is_deref(alu->src[i].src))
|
||||
continue;
|
||||
|
||||
nir_deref_instr *src_deref = nir_instr_as_deref(src_instr);
|
||||
nir_deref_instr *src_deref = nir_src_as_deref(alu->src[i].src);
|
||||
if (src_deref->deref_type != nir_deref_type_cast)
|
||||
continue;
|
||||
|
||||
|
|
|
|||
|
|
@ -1301,7 +1301,7 @@ visit_if_merge_phi(nir_phi_instr *phi, bool if_cond_divergent, bool ignore_undef
|
|||
phi->def.divergent = true;
|
||||
return true;
|
||||
}
|
||||
if (src->src.ssa->parent_instr->type != nir_instr_type_undef) {
|
||||
if (!nir_src_is_undef(src->src)) {
|
||||
defined_srcs++;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -67,17 +67,17 @@ struct block_parallel_copies {
|
|||
static bool
|
||||
def_after(nir_def *a, nir_def *b)
|
||||
{
|
||||
if (a->parent_instr->type == nir_instr_type_undef)
|
||||
if (nir_def_is_undef(a))
|
||||
return false;
|
||||
|
||||
if (b->parent_instr->type == nir_instr_type_undef)
|
||||
if (nir_def_is_undef(b))
|
||||
return true;
|
||||
|
||||
/* If they're in the same block, we can rely on whichever instruction
|
||||
* comes first in the block.
|
||||
*/
|
||||
if (nir_def_block(a) == nir_def_block(b))
|
||||
return a->parent_instr->index > b->parent_instr->index;
|
||||
return nir_def_instr(a)->index > nir_def_instr(b)->index;
|
||||
|
||||
/* Otherwise, if blocks are distinct, we sort them in DFS pre-order */
|
||||
return nir_def_block(a)->dom_pre_index > nir_def_block(b)->dom_pre_index;
|
||||
|
|
@ -87,7 +87,7 @@ def_after(nir_def *a, nir_def *b)
|
|||
static bool
|
||||
ssa_def_dominates(nir_def *a, nir_def *b)
|
||||
{
|
||||
if (a->parent_instr->type == nir_instr_type_undef) {
|
||||
if (nir_def_is_undef(a)) {
|
||||
/* SSA undefs always dominate */
|
||||
return true;
|
||||
}
|
||||
|
|
@ -349,7 +349,7 @@ isolate_phi_nodes_block(nir_shader *shader, nir_block *block, struct from_ssa_st
|
|||
pred_copy->divergent = state->consider_divergence && nir_src_is_divergent(&src->src);
|
||||
|
||||
struct block_parallel_copies *pred_copies = &state->parallel_copies[src->pred->index];
|
||||
util_dynarray_append(&pred_copies->end, nir_instr_as_intrinsic(pred_copy->parent_instr));
|
||||
util_dynarray_append(&pred_copies->end, nir_def_as_intrinsic(pred_copy));
|
||||
|
||||
nir_src_rewrite(&src->src, pred_copy);
|
||||
}
|
||||
|
|
@ -414,7 +414,7 @@ aggressive_coalesce_parallel_copy(struct util_dynarray *pcopy,
|
|||
/* Since load_const instructions are SSA only, we can't replace their
|
||||
* destinations with registers and, therefore, can't coalesce them.
|
||||
*/
|
||||
if (copy->src[0].ssa->parent_instr->type == nir_instr_type_load_const)
|
||||
if (nir_src_is_const(copy->src[0]))
|
||||
continue;
|
||||
|
||||
merge_node *src_node = get_merge_node(copy->src[0].ssa, state);
|
||||
|
|
@ -491,18 +491,17 @@ static bool
|
|||
def_replace_with_reg(nir_def *def, nir_function_impl *impl)
|
||||
{
|
||||
/* These are handled elsewhere */
|
||||
assert(def->parent_instr->type != nir_instr_type_undef &&
|
||||
def->parent_instr->type != nir_instr_type_load_const);
|
||||
assert(!nir_def_is_undef(def) && !nir_def_is_const(def));
|
||||
|
||||
nir_builder b = nir_builder_create(impl);
|
||||
|
||||
nir_def *reg = decl_reg_for_ssa_def(&b, def);
|
||||
nir_rewrite_uses_to_load_reg(&b, def, reg);
|
||||
|
||||
if (def->parent_instr->type == nir_instr_type_phi)
|
||||
if (nir_def_is_phi(def))
|
||||
b.cursor = nir_before_block_after_phis(nir_def_block(def));
|
||||
else
|
||||
b.cursor = nir_after_instr(def->parent_instr);
|
||||
b.cursor = nir_after_def(def);
|
||||
|
||||
nir_store_reg(&b, def, reg);
|
||||
return true;
|
||||
|
|
@ -572,7 +571,7 @@ rewrite_ssa_def(nir_def *def, void *void_state)
|
|||
/* At this point we know a priori that this SSA def is part of a
|
||||
* nir_dest. We can use exec_node_data to get the dest pointer.
|
||||
*/
|
||||
assert(def->parent_instr->type != nir_instr_type_load_const);
|
||||
assert(!nir_def_is_const(def));
|
||||
nir_store_reg(&state->builder, def, reg);
|
||||
|
||||
state->progress = true;
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ static bool
|
|||
accum_src_deps(nir_src *src, void *opaque)
|
||||
{
|
||||
foreach_src_data *data = (foreach_src_data *)opaque;
|
||||
nir_instr *src_instr = src->ssa->parent_instr;
|
||||
nir_instr *src_instr = nir_def_instr(src->ssa);
|
||||
|
||||
if (src_instr->type == nir_instr_type_load_const ||
|
||||
src_instr->type == nir_instr_type_undef)
|
||||
|
|
@ -154,7 +154,7 @@ nir_gather_output_dependencies(nir_shader *nir, nir_output_deps *deps)
|
|||
*/
|
||||
util_dynarray_foreach(&if_cond_stack, nir_def *, cond) {
|
||||
accum_deps(this_instr_deps,
|
||||
instr_deps[(*cond)->parent_instr->index],
|
||||
instr_deps[nir_def_instr(*cond)->index],
|
||||
num_bitset_words);
|
||||
}
|
||||
|
||||
|
|
@ -195,7 +195,7 @@ nir_gather_output_dependencies(nir_shader *nir, nir_output_deps *deps)
|
|||
*/
|
||||
util_dynarray_foreach(&if_cond_stack, nir_def *, cond) {
|
||||
accum_deps(instr_deps[phi->instr.index],
|
||||
instr_deps[(*cond)->parent_instr->index],
|
||||
instr_deps[nir_def_instr(*cond)->index],
|
||||
num_bitset_words);
|
||||
}
|
||||
|
||||
|
|
@ -265,7 +265,7 @@ nir_gather_output_dependencies(nir_shader *nir, nir_output_deps *deps)
|
|||
nir_foreach_phi(phi, nir_cf_node_cf_tree_next(parent_cf)) {
|
||||
util_dynarray_foreach(&if_cond_stack, nir_def *, cond) {
|
||||
accum_deps(instr_deps[phi->instr.index],
|
||||
instr_deps[(*cond)->parent_instr->index],
|
||||
instr_deps[nir_def_instr(*cond)->index],
|
||||
num_bitset_words);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -76,7 +76,7 @@ nir_collect_src_uniforms(const nir_src *src, int component,
|
|||
assert(max_num_bo > 0 && max_num_bo <= MAX_NUM_BO);
|
||||
assert(component < src->ssa->num_components);
|
||||
|
||||
nir_instr *instr = src->ssa->parent_instr;
|
||||
nir_instr *instr = nir_def_instr(src->ssa);
|
||||
|
||||
switch (instr->type) {
|
||||
case nir_instr_type_alu: {
|
||||
|
|
|
|||
|
|
@ -358,7 +358,7 @@ nir_srcs_equal(nir_src src1, nir_src src2)
|
|||
static nir_alu_instr *
|
||||
get_neg_instr(nir_src s, nir_alu_type base_type)
|
||||
{
|
||||
nir_alu_instr *alu = nir_src_as_alu_instr(s);
|
||||
nir_alu_instr *alu = nir_src_as_alu(s);
|
||||
|
||||
return alu != NULL && (alu->op == (base_type == nir_type_float ? nir_op_fneg : nir_op_ineg))
|
||||
? alu
|
||||
|
|
|
|||
|
|
@ -67,11 +67,8 @@ chase_alu_src_helper(const nir_src *src)
|
|||
static inline bool
|
||||
chase_source_mod(nir_def **ssa, nir_op op, uint8_t *swizzle)
|
||||
{
|
||||
if ((*ssa)->parent_instr->type != nir_instr_type_alu)
|
||||
return false;
|
||||
|
||||
nir_alu_instr *alu = nir_def_as_alu((*ssa));
|
||||
if (alu->op != op)
|
||||
nir_alu_instr *alu = nir_def_as_alu_or_null(*ssa);
|
||||
if (!alu || alu->op != op)
|
||||
return false;
|
||||
|
||||
/* If there are other uses of the modifier that don't fold, we can't fold it
|
||||
|
|
@ -97,7 +94,7 @@ chase_source_mod(nir_def **ssa, nir_op op, uint8_t *swizzle)
|
|||
nir_legacy_alu_src
|
||||
nir_legacy_chase_alu_src(const nir_alu_src *src, bool fuse_fabs)
|
||||
{
|
||||
if (src->src.ssa->parent_instr->type == nir_instr_type_alu) {
|
||||
if (nir_src_is_alu(src->src)) {
|
||||
nir_legacy_alu_src out = {
|
||||
.src.is_ssa = true,
|
||||
.src.ssa = src->src.ssa,
|
||||
|
|
@ -164,23 +161,18 @@ nir_legacy_fsat_folds(nir_alu_instr *fsat)
|
|||
assert(&fsat->src[0].src ==
|
||||
list_first_entry(&def->uses, nir_src, use_link));
|
||||
|
||||
nir_instr *generate = def->parent_instr;
|
||||
if (generate->type != nir_instr_type_alu)
|
||||
return false;
|
||||
|
||||
nir_alu_instr *generate_alu = nir_instr_as_alu(generate);
|
||||
nir_alu_type dest_type = nir_op_infos[generate_alu->op].output_type;
|
||||
if (dest_type != nir_type_float)
|
||||
nir_alu_instr *generate = nir_def_as_alu_or_null(def);
|
||||
if (!generate || nir_op_infos[generate->op].output_type != nir_type_float)
|
||||
return false;
|
||||
|
||||
/* If we are a saturating a source modifier fsat(fabs(x)), we need to emit
|
||||
* either the fsat or the modifier or else the sequence disappears.
|
||||
*/
|
||||
if (generate_alu->op == nir_op_fabs || generate_alu->op == nir_op_fneg)
|
||||
if (generate->op == nir_op_fabs || generate->op == nir_op_fneg)
|
||||
return false;
|
||||
|
||||
/* We can't do expansions without a move in the middle */
|
||||
unsigned nr_components = generate_alu->def.num_components;
|
||||
unsigned nr_components = generate->def.num_components;
|
||||
if (fsat->def.num_components != nr_components)
|
||||
return false;
|
||||
|
||||
|
|
|
|||
|
|
@ -1140,12 +1140,8 @@ is_direct_uniform_load(nir_def *def, nir_scalar *s)
|
|||
*/
|
||||
*s = nir_scalar_resolved(def, 0);
|
||||
|
||||
nir_def *ssa = s->def;
|
||||
if (ssa->parent_instr->type != nir_instr_type_intrinsic)
|
||||
return false;
|
||||
|
||||
nir_intrinsic_instr *intr = nir_def_as_intrinsic(ssa);
|
||||
if (intr->intrinsic != nir_intrinsic_load_deref)
|
||||
nir_intrinsic_instr *intr = nir_scalar_as_intrinsic(*s);
|
||||
if (!intr || intr->intrinsic != nir_intrinsic_load_deref)
|
||||
return false;
|
||||
|
||||
nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
|
||||
|
|
@ -1406,7 +1402,7 @@ nir_link_opt_varyings(nir_shader *producer, nir_shader *consumer)
|
|||
continue;
|
||||
|
||||
nir_def *ssa = intr->src[1].ssa;
|
||||
if (ssa->parent_instr->type == nir_instr_type_load_const) {
|
||||
if (nir_def_is_const(ssa)) {
|
||||
progress |= replace_varying_input_by_constant_load(consumer, intr);
|
||||
continue;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -294,18 +294,20 @@ nir_def_is_live_at(nir_def *def, nir_instr *instr)
|
|||
bool
|
||||
nir_defs_interfere(nir_def *a, nir_def *b)
|
||||
{
|
||||
if (a->parent_instr == b->parent_instr) {
|
||||
nir_instr *a_instr = nir_def_instr(a);
|
||||
nir_instr *b_instr = nir_def_instr(b);
|
||||
if (a_instr == b_instr) {
|
||||
/* Two variables defined at the same time interfere assuming at
|
||||
* least one isn't dead.
|
||||
*/
|
||||
return true;
|
||||
} else if (a->parent_instr->type == nir_instr_type_undef ||
|
||||
b->parent_instr->type == nir_instr_type_undef) {
|
||||
} else if (a_instr->type == nir_instr_type_undef ||
|
||||
b_instr->type == nir_instr_type_undef) {
|
||||
/* If either variable is an ssa_undef, then there's no interference */
|
||||
return false;
|
||||
} else if (a->parent_instr->index < b->parent_instr->index) {
|
||||
return nir_def_is_live_at(a, b->parent_instr);
|
||||
} else if (a_instr->index < b_instr->index) {
|
||||
return nir_def_is_live_at(a, b_instr);
|
||||
} else {
|
||||
return nir_def_is_live_at(b, a->parent_instr);
|
||||
return nir_def_is_live_at(b, a_instr);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -176,10 +176,10 @@ phi_instr_as_alu(nir_phi_instr *phi)
|
|||
{
|
||||
nir_alu_instr *first = NULL;
|
||||
nir_foreach_phi_src(src, phi) {
|
||||
if (src->src.ssa->parent_instr->type != nir_instr_type_alu)
|
||||
nir_alu_instr *alu = nir_src_as_alu(src->src);
|
||||
if (!alu)
|
||||
return NULL;
|
||||
|
||||
nir_alu_instr *alu = nir_def_as_alu(src->src.ssa);
|
||||
if (first == NULL) {
|
||||
first = alu;
|
||||
} else {
|
||||
|
|
@ -206,7 +206,7 @@ alu_src_has_identity_swizzle(nir_alu_instr *alu, unsigned src_idx)
|
|||
static bool
|
||||
is_only_uniform_src(nir_src *src)
|
||||
{
|
||||
nir_instr *instr = src->ssa->parent_instr;
|
||||
nir_instr *instr = nir_def_instr(src->ssa);
|
||||
|
||||
switch (instr->type) {
|
||||
case nir_instr_type_alu: {
|
||||
|
|
@ -266,7 +266,7 @@ compute_induction_information(loop_info_state *state)
|
|||
/* Detect inductions variables that are incremented in both branches
|
||||
* of an unnested if rather than in a loop block.
|
||||
*/
|
||||
if (src->parent_instr->type == nir_instr_type_phi) {
|
||||
if (nir_def_is_phi(src)) {
|
||||
nir_phi_instr *src_phi = nir_def_as_phi(src);
|
||||
nir_alu_instr *src_phi_alu = phi_instr_as_alu(src_phi);
|
||||
if (src_phi_alu) {
|
||||
|
|
@ -274,7 +274,7 @@ compute_induction_information(loop_info_state *state)
|
|||
}
|
||||
}
|
||||
|
||||
if (src->parent_instr->type == nir_instr_type_alu && !var.update_src) {
|
||||
if (nir_def_is_alu(src) && !var.update_src) {
|
||||
var.def = src;
|
||||
nir_alu_instr *alu = nir_def_as_alu(src);
|
||||
|
||||
|
|
@ -357,7 +357,7 @@ find_loop_terminators(loop_info_state *state)
|
|||
if (!break_blk)
|
||||
continue;
|
||||
|
||||
if (nif->condition.ssa->parent_instr->type == nir_instr_type_phi) {
|
||||
if (nir_src_is_phi(nif->condition)) {
|
||||
state->loop->info->complex_loop = true;
|
||||
return false;
|
||||
}
|
||||
|
|
@ -372,7 +372,7 @@ find_loop_terminators(loop_info_state *state)
|
|||
terminator->break_block = break_blk;
|
||||
terminator->continue_from_block = continue_from_blk;
|
||||
terminator->continue_from_then = continue_from_then;
|
||||
terminator->conditional_instr = nif->condition.ssa->parent_instr;
|
||||
terminator->conditional_instr = nir_def_instr(nif->condition.ssa);
|
||||
|
||||
success = true;
|
||||
}
|
||||
|
|
@ -1109,7 +1109,7 @@ find_trip_count(loop_info_state *state, unsigned execution_mode,
|
|||
|
||||
alu_op = nir_scalar_alu_op(cond);
|
||||
trip_count_known = false;
|
||||
terminator->conditional_instr = cond.def->parent_instr;
|
||||
terminator->conditional_instr = nir_def_instr(cond.def);
|
||||
terminator->exact_trip_count_unknown = true;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ lower_large_src(nir_src *src, void *s)
|
|||
{
|
||||
lower_state *state = s;
|
||||
|
||||
nir_instr *parent = src->ssa->parent_instr;
|
||||
nir_instr *parent = nir_def_instr(src->ssa);
|
||||
|
||||
/* No need to visit instructions we've already visited.. this also
|
||||
* avoids infinite recursion when phi's are involved:
|
||||
|
|
|
|||
|
|
@ -119,7 +119,7 @@ nir_lower_array_deref_of_vec_impl(nir_function_impl *impl,
|
|||
nir_def *index = deref->arr.index.ssa;
|
||||
nir_def *scalar =
|
||||
nir_vector_extract(&b, &intrin->def, index);
|
||||
if (scalar->parent_instr->type == nir_instr_type_undef) {
|
||||
if (nir_def_is_undef(scalar)) {
|
||||
nir_def_replace(&intrin->def, scalar);
|
||||
} else {
|
||||
nir_def_rewrite_uses_after(&intrin->def, scalar);
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ convert_to_bit_size(nir_builder *bld, nir_def *src,
|
|||
assert(src->bit_size < bit_size);
|
||||
|
||||
/* create b2i32(a) instead of i2i32(b2i8(a))/i2i32(b2i16(a)) */
|
||||
nir_alu_instr *alu = nir_src_as_alu_instr(nir_src_for_ssa(src));
|
||||
nir_alu_instr *alu = nir_def_as_alu_or_null(src);
|
||||
if ((type & (nir_type_uint | nir_type_int)) && bit_size == 32 &&
|
||||
alu && (alu->op == nir_op_b2i8 || alu->op == nir_op_b2i16)) {
|
||||
nir_alu_instr *instr = nir_alu_instr_create(bld->shader, nir_op_b2i32);
|
||||
|
|
@ -67,7 +67,7 @@ before_conversion(nir_builder *bld, nir_alu_type type, unsigned bit_size, nir_de
|
|||
default:
|
||||
return NULL;
|
||||
}
|
||||
if (def->parent_instr->type != nir_instr_type_alu) {
|
||||
if (!nir_def_is_alu(def)) {
|
||||
return NULL;
|
||||
}
|
||||
nir_alu_instr *alu_instr = nir_def_as_alu(def);
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ set_const_initialiser(nir_deref_instr **p, nir_constant *top_level_init,
|
|||
/* Now that we have selected the corrent nir_constant we copy the constant
|
||||
* values to it.
|
||||
*/
|
||||
nir_instr *src_instr = const_src->ssa->parent_instr;
|
||||
nir_instr *src_instr = nir_def_instr(const_src->ssa);
|
||||
assert(src_instr->type == nir_instr_type_load_const);
|
||||
nir_load_const_instr *load = nir_instr_as_load_const(src_instr);
|
||||
|
||||
|
|
|
|||
|
|
@ -474,7 +474,7 @@ nir_get_explicit_deref_range(nir_deref_instr *deref,
|
|||
}
|
||||
|
||||
case nir_deref_type_cast: {
|
||||
nir_instr *parent_instr = deref->parent.ssa->parent_instr;
|
||||
nir_instr *parent_instr = nir_def_instr(deref->parent.ssa);
|
||||
|
||||
switch (parent_instr->type) {
|
||||
case nir_instr_type_load_const: {
|
||||
|
|
|
|||
|
|
@ -547,7 +547,7 @@ convert_flrp_instruction(nir_builder *bld,
|
|||
* There is no need to handle t = 0.5 specially. nir_opt_algebraic
|
||||
* already has optimizations to convert 0.5x + 0.5y to 0.5(x + y).
|
||||
*/
|
||||
if (alu->src[2].src.ssa->parent_instr->type == nir_instr_type_load_const) {
|
||||
if (nir_src_is_const(alu->src[2].src)) {
|
||||
replace_with_strict(bld, dead_flrp, alu);
|
||||
return;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -92,9 +92,8 @@ lower(nir_builder *b, nir_intrinsic_instr *intr, void *data)
|
|||
*/
|
||||
nir_def_rewrite_uses(&intr->def, phi);
|
||||
|
||||
nir_instr *phi_instr = phi->parent_instr;
|
||||
nir_phi_instr *phi_as_phi = nir_instr_as_phi(phi_instr);
|
||||
nir_phi_src *phi_src = nir_phi_get_src_from_block(phi_as_phi,
|
||||
nir_phi_instr *phi_instr = nir_def_as_phi(phi);
|
||||
nir_phi_src *phi_src = nir_phi_get_src_from_block(phi_instr,
|
||||
intr->instr.block);
|
||||
nir_src_rewrite(&phi_src->src, &intr->def);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -76,8 +76,7 @@ lower(nir_builder *b, nir_intrinsic_instr *intr, void *data)
|
|||
.format = format,
|
||||
.access = nir_intrinsic_access(intr));
|
||||
|
||||
nir_instr *address_instr = address->parent_instr;
|
||||
nir_intrinsic_instr *address_intr = nir_instr_as_intrinsic(address_instr);
|
||||
nir_intrinsic_instr *address_intr = nir_def_as_intrinsic(address);
|
||||
|
||||
address_intr->intrinsic = address_op;
|
||||
if (address_op == nir_intrinsic_image_texel_address) {
|
||||
|
|
@ -100,8 +99,7 @@ lower(nir_builder *b, nir_intrinsic_instr *intr, void *data)
|
|||
/* Replace the image atomic with the global atomic. Remove the image
|
||||
* explicitly because it has side effects so is not DCE'd.
|
||||
*/
|
||||
nir_def_rewrite_uses(&intr->def, global);
|
||||
nir_instr_remove(&intr->instr);
|
||||
nir_def_replace(&intr->def, global);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ check_for_lowered_ffloor(nir_alu_instr *fadd)
|
|||
nir_alu_instr *fneg = NULL;
|
||||
nir_src x;
|
||||
for (unsigned i = 0; i < 2; i++) {
|
||||
nir_alu_instr *fadd_src_alu = nir_src_as_alu_instr(fadd->src[i].src);
|
||||
nir_alu_instr *fadd_src_alu = nir_src_as_alu(fadd->src[i].src);
|
||||
if (fadd_src_alu && fadd_src_alu->op == nir_op_fneg) {
|
||||
fneg = fadd_src_alu;
|
||||
x = fadd->src[1 - i].src;
|
||||
|
|
@ -69,7 +69,7 @@ check_for_lowered_ffloor(nir_alu_instr *fadd)
|
|||
if (!fneg || !instr_has_only_trivial_swizzles(fneg))
|
||||
return false;
|
||||
|
||||
nir_alu_instr *ffract = nir_src_as_alu_instr(fneg->src[0].src);
|
||||
nir_alu_instr *ffract = nir_src_as_alu(fneg->src[0].src);
|
||||
if (ffract && ffract->op == nir_op_ffract &&
|
||||
nir_srcs_equal(ffract->src[0].src, x) &&
|
||||
instr_has_only_trivial_swizzles(ffract))
|
||||
|
|
@ -123,7 +123,7 @@ lower_alu_instr(nir_builder *b, nir_alu_instr *alu)
|
|||
/* If the source was already integer, then we did't need to truncate and
|
||||
* can switch it to a mov that can be copy-propagated away.
|
||||
*/
|
||||
nir_alu_instr *src_alu = nir_src_as_alu_instr(alu->src[0].src);
|
||||
nir_alu_instr *src_alu = nir_src_as_alu(alu->src[0].src);
|
||||
if (src_alu) {
|
||||
switch (src_alu->op) {
|
||||
/* Check for the y = x - ffract(x) patterns from lowered ffloor. */
|
||||
|
|
|
|||
|
|
@ -321,8 +321,7 @@ static nir_def *
|
|||
simplify_offset_src(nir_builder *b, nir_def *offset, unsigned num_slots)
|
||||
{
|
||||
/* Force index=0 for any indirect access to array[1]. */
|
||||
if (num_slots == 1 &&
|
||||
offset->parent_instr->type != nir_instr_type_load_const)
|
||||
if (num_slots == 1 && !nir_def_is_const(offset))
|
||||
return nir_imm_int(b, 0);
|
||||
|
||||
return offset;
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ nir_lower_mediump_io(nir_shader *nir, nir_variable_mode modes,
|
|||
bool is_fragdepth = (nir->info.stage == MESA_SHADER_FRAGMENT &&
|
||||
sem.location == FRAG_RESULT_DEPTH);
|
||||
if (!sem.medium_precision &&
|
||||
(is_varying || is_fragdepth || val->parent_instr->type != nir_instr_type_alu ||
|
||||
(is_varying || is_fragdepth || !nir_def_is_alu(val) ||
|
||||
nir_def_as_alu(val)->op != upconvert_op)) {
|
||||
continue;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -177,7 +177,7 @@ add_non_uniform_instr(struct nu_state *state, struct nu_handle *handles,
|
|||
key.instr_index = instr->index;
|
||||
|
||||
for (uint32_t i = 0; i < handle_count; i++)
|
||||
key.handle_indixes[i] = handles[i].handle->parent_instr->index;
|
||||
key.handle_indixes[i] = nir_def_instr(handles[i].handle)->index;
|
||||
|
||||
struct hash_entry *entry = _mesa_hash_table_search(state->accesses, &key);
|
||||
if (!entry) {
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ static bool
|
|||
is_phi_src_scalarizable(nir_phi_src *src)
|
||||
{
|
||||
|
||||
nir_instr *src_instr = src->src.ssa->parent_instr;
|
||||
nir_instr *src_instr = nir_def_instr(src->src.ssa);
|
||||
switch (src_instr->type) {
|
||||
case nir_instr_type_alu: {
|
||||
nir_alu_instr *src_alu = nir_instr_as_alu(src_instr);
|
||||
|
|
|
|||
|
|
@ -170,7 +170,7 @@ can_remat_instr(nir_instr *instr, struct u_sparse_bitset *remat)
|
|||
static bool
|
||||
can_remat_ssa_def(nir_def *def, struct u_sparse_bitset *remat)
|
||||
{
|
||||
return can_remat_instr(def->parent_instr, remat);
|
||||
return can_remat_instr(nir_def_instr(def), remat);
|
||||
}
|
||||
|
||||
struct add_instr_data {
|
||||
|
|
@ -186,7 +186,7 @@ add_src_instr(nir_src *src, void *state)
|
|||
return true;
|
||||
|
||||
util_dynarray_foreach(data->buf, nir_instr *, instr_ptr) {
|
||||
if (*instr_ptr == src->ssa->parent_instr)
|
||||
if (*instr_ptr == nir_def_instr(src->ssa))
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -194,7 +194,7 @@ add_src_instr(nir_src *src, void *state)
|
|||
if (data->buf->size >= data->buf->capacity)
|
||||
return false;
|
||||
|
||||
util_dynarray_append(data->buf, src->ssa->parent_instr);
|
||||
util_dynarray_append(data->buf, nir_def_instr(src->ssa));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -215,7 +215,7 @@ can_remat_chain_ssa_def(nir_def *def, struct u_sparse_bitset *remat, struct util
|
|||
void *mem_ctx = ralloc_context(NULL);
|
||||
|
||||
/* Add all the instructions involved in build this ssa_def */
|
||||
util_dynarray_append(buf, def->parent_instr);
|
||||
util_dynarray_append(buf, nir_def_instr(def));
|
||||
|
||||
unsigned idx = 0;
|
||||
struct add_instr_data data = {
|
||||
|
|
@ -271,7 +271,7 @@ fail:
|
|||
static nir_def *
|
||||
remat_ssa_def(nir_builder *b, nir_def *def, struct hash_table *remap_table)
|
||||
{
|
||||
nir_instr *clone = nir_instr_clone_deep(b->shader, def->parent_instr, remap_table);
|
||||
nir_instr *clone = nir_instr_clone_deep(b->shader, nir_def_instr(def), remap_table);
|
||||
nir_builder_instr_insert(b, clone);
|
||||
return nir_instr_def(clone);
|
||||
}
|
||||
|
|
@ -491,7 +491,7 @@ spill_ssa_defs_and_lower_shader_calls(nir_shader *shader, uint32_t num_calls,
|
|||
if (!u_sparse_bitset_test(call_live[c], def->index))
|
||||
continue;
|
||||
|
||||
if (!options->should_remat_callback(def->parent_instr,
|
||||
if (!options->should_remat_callback(nir_def_instr(def),
|
||||
options->should_remat_data))
|
||||
continue;
|
||||
|
||||
|
|
@ -809,7 +809,7 @@ duplicate_loop_bodies(nir_function_impl *impl, nir_instr *resume_instr)
|
|||
/* Initialize resume to true at the start of the shader, right after
|
||||
* the register is declared at the start.
|
||||
*/
|
||||
b.cursor = nir_after_instr(resume_reg->parent_instr);
|
||||
b.cursor = nir_after_def(resume_reg);
|
||||
nir_store_reg(&b, nir_imm_true(&b), resume_reg);
|
||||
|
||||
/* Set resume to false right after the resume instruction */
|
||||
|
|
@ -1383,7 +1383,7 @@ opt_remove_respills_instr(struct nir_builder *b,
|
|||
if (store_intrin->intrinsic != nir_intrinsic_store_stack)
|
||||
return false;
|
||||
|
||||
nir_instr *value_instr = store_intrin->src[0].ssa->parent_instr;
|
||||
nir_instr *value_instr = nir_def_instr(store_intrin->src[0].ssa);
|
||||
if (value_instr->type != nir_instr_type_intrinsic)
|
||||
return false;
|
||||
|
||||
|
|
|
|||
|
|
@ -549,7 +549,7 @@ lower_compute_system_value_instr(nir_builder *b,
|
|||
b->shader->info.derivative_group == DERIVATIVE_GROUP_QUADS &&
|
||||
_mesa_set_search(state->lower_once_list, instr) == NULL) {
|
||||
nir_def *ids = nir_load_local_invocation_id(b);
|
||||
_mesa_set_add(state->lower_once_list, ids->parent_instr);
|
||||
_mesa_set_add(state->lower_once_list, nir_def_instr(ids));
|
||||
|
||||
nir_def *x = nir_channel(b, ids, 0);
|
||||
nir_def *y = nir_channel(b, ids, 1);
|
||||
|
|
@ -704,7 +704,7 @@ lower_compute_system_value_instr(nir_builder *b,
|
|||
nir_def *group_size = nir_u2uN(b, nir_load_workgroup_size(b), bit_size);
|
||||
nir_def *base_group_id = nir_load_base_workgroup_id(b, bit_size);
|
||||
|
||||
_mesa_set_add(state->lower_once_list, global_id->parent_instr);
|
||||
_mesa_set_add(state->lower_once_list, nir_def_instr(global_id));
|
||||
|
||||
return nir_iadd(b, global_id, nir_imul(b, base_group_id, group_size));
|
||||
} else if (options && options->global_id_is_32bit && bit_size > 32) {
|
||||
|
|
|
|||
|
|
@ -1560,7 +1560,7 @@ nir_tex_parse_txd_coords(nir_shader *shader, nir_tex_instr *tex, nir_instr **ddx
|
|||
op != nir_intrinsic_ddy_coarse)
|
||||
return 0;
|
||||
|
||||
ddxy_instrs[i * 2 + j] = ddxy_comp.def->parent_instr;
|
||||
ddxy_instrs[i * 2 + j] = nir_def_instr(ddxy_comp.def);
|
||||
|
||||
nir_def *def = nir_def_as_intrinsic(ddxy_comp.def)->src[0].ssa;
|
||||
ddxy_comp = nir_scalar_resolved(def, ddxy_comp.comp);
|
||||
|
|
|
|||
|
|
@ -606,7 +606,7 @@ nir_def_set_name(nir_shader *shader, nir_def *def, char *name)
|
|||
if (!name || likely(!shader->has_debug_info))
|
||||
return;
|
||||
|
||||
nir_instr_debug_info *debug_info = nir_instr_get_debug_info(def->parent_instr);
|
||||
nir_instr_debug_info *debug_info = nir_instr_get_debug_info(nir_def_instr(def));
|
||||
debug_info->variable_name = name;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ insert_store(nir_builder *b, nir_def *reg, nir_alu_instr *vec,
|
|||
}
|
||||
|
||||
/* No sense storing from undef, just return the write mask */
|
||||
if (src->parent_instr->type == nir_instr_type_undef)
|
||||
if (nir_def_is_undef(src))
|
||||
return write_mask;
|
||||
|
||||
b->cursor = nir_before_instr(&vec->instr);
|
||||
|
|
@ -85,12 +85,10 @@ try_coalesce(nir_builder *b, nir_def *reg, nir_alu_instr *vec,
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (vec->src[start_idx].src.ssa->parent_instr->type != nir_instr_type_alu)
|
||||
nir_alu_instr *src_alu = nir_src_as_alu(vec->src[start_idx].src);
|
||||
if (!src_alu)
|
||||
return 0;
|
||||
|
||||
nir_alu_instr *src_alu =
|
||||
nir_def_as_alu(vec->src[start_idx].src.ssa);
|
||||
|
||||
if (has_replicated_dest(src_alu)) {
|
||||
/* The fdot instruction is special: It replicates its result to all
|
||||
* components. This means that we can always rewrite its destination
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ nir_mod_analysis(nir_scalar val, nir_alu_type val_type, unsigned div, unsigned *
|
|||
|
||||
assert(util_is_power_of_two_nonzero(div));
|
||||
|
||||
switch (val.def->parent_instr->type) {
|
||||
switch (nir_def_instr_type(val.def)) {
|
||||
case nir_instr_type_load_const: {
|
||||
nir_load_const_instr *load =
|
||||
nir_def_as_load_const(val.def);
|
||||
|
|
|
|||
|
|
@ -48,10 +48,10 @@
|
|||
static bool
|
||||
ssa_def_dominates_instr(nir_def *def, nir_instr *instr)
|
||||
{
|
||||
if (instr->index <= def->parent_instr->index) {
|
||||
if (instr->index <= nir_def_instr(def)->index) {
|
||||
return false;
|
||||
} else if (nir_def_block(def) == instr->block) {
|
||||
return def->parent_instr->index < instr->index;
|
||||
return nir_def_instr(def)->index < instr->index;
|
||||
} else {
|
||||
return nir_block_dominates(nir_def_block(def), instr->block);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ opt_bary_at_sample(nir_builder *b, nir_intrinsic_instr *intr, bool lower_sample_
|
|||
static bool
|
||||
src_is_vec2_sample_pos_minus_half(nir_src src)
|
||||
{
|
||||
nir_alu_instr *alu = nir_src_as_alu_instr(src);
|
||||
nir_alu_instr *alu = nir_src_as_alu(src);
|
||||
if (!alu || alu->op != nir_op_vec2)
|
||||
return false;
|
||||
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ remat_ssa_def(nir_builder *b, nir_def *def, struct hash_table *remap_table,
|
|||
def->bit_size, def_blocks);
|
||||
_mesa_hash_table_insert(phi_value_table, def, val);
|
||||
|
||||
nir_instr *clone = nir_instr_clone_deep(b->shader, def->parent_instr,
|
||||
nir_instr *clone = nir_instr_clone_deep(b->shader, nir_def_instr(def),
|
||||
remap_table);
|
||||
nir_builder_instr_insert(b, clone);
|
||||
nir_def *new_def = nir_instr_def(clone);
|
||||
|
|
@ -83,13 +83,13 @@ can_remat_chain(nir_src *src, void *data)
|
|||
if (_mesa_hash_table_search(check_data->remap_table, src->ssa))
|
||||
return true;
|
||||
|
||||
if (!can_remat_instr(src->ssa->parent_instr))
|
||||
if (!can_remat_instr(nir_def_instr(src->ssa)))
|
||||
return false;
|
||||
|
||||
if (check_data->chain_length++ >= 16)
|
||||
return false;
|
||||
|
||||
return nir_foreach_src(src->ssa->parent_instr, can_remat_chain, check_data);
|
||||
return nir_foreach_src(nir_def_instr(src->ssa), can_remat_chain, check_data);
|
||||
}
|
||||
|
||||
struct remat_chain_data {
|
||||
|
|
@ -108,7 +108,7 @@ do_remat_chain(nir_src *src, void *data)
|
|||
if (_mesa_hash_table_search(remat_data->remap_table, src->ssa))
|
||||
return true;
|
||||
|
||||
nir_foreach_src(src->ssa->parent_instr, do_remat_chain, remat_data);
|
||||
nir_foreach_src(nir_def_instr(src->ssa), do_remat_chain, remat_data);
|
||||
|
||||
remat_ssa_def(remat_data->b, src->ssa, remat_data->remap_table,
|
||||
remat_data->phi_value_table, remat_data->phi_builder,
|
||||
|
|
@ -139,7 +139,7 @@ rewrite_instr_src_from_phi_builder(nir_src *src, void *data)
|
|||
bool can_rewrite = true;
|
||||
if (nir_def_block(new_def) == block && new_def->index != UINT32_MAX)
|
||||
can_rewrite =
|
||||
!nir_instr_is_before(nir_src_parent_instr(src), new_def->parent_instr);
|
||||
!nir_instr_is_before(nir_src_parent_instr(src), nir_def_instr(new_def));
|
||||
|
||||
if (can_rewrite)
|
||||
nir_src_rewrite(src, new_def);
|
||||
|
|
@ -208,7 +208,7 @@ nir_minimize_call_live_states_impl(nir_function_impl *impl)
|
|||
.chain_length = 1,
|
||||
};
|
||||
|
||||
if (!nir_foreach_src(rematerializable[i]->parent_instr,
|
||||
if (!nir_foreach_src(nir_def_instr(rematerializable[i]),
|
||||
can_remat_chain, &check_data))
|
||||
continue;
|
||||
|
||||
|
|
@ -220,7 +220,7 @@ nir_minimize_call_live_states_impl(nir_function_impl *impl)
|
|||
.def_blocks = def_blocks,
|
||||
};
|
||||
|
||||
nir_foreach_src(rematerializable[i]->parent_instr, do_remat_chain,
|
||||
nir_foreach_src(nir_def_instr(rematerializable[i]), do_remat_chain,
|
||||
&remat_data);
|
||||
|
||||
remat_ssa_def(&b, rematerializable[i], remap_table, phi_value_table,
|
||||
|
|
|
|||
|
|
@ -59,11 +59,9 @@ nir_try_constant_fold_alu(nir_builder *b, nir_alu_instr *alu)
|
|||
!nir_alu_type_get_type_size(nir_op_infos[alu->op].input_types[i]))
|
||||
bit_size = alu->src[i].src.ssa->bit_size;
|
||||
|
||||
nir_instr *src_instr = alu->src[i].src.ssa->parent_instr;
|
||||
|
||||
if (src_instr->type != nir_instr_type_load_const)
|
||||
nir_load_const_instr *load_const = nir_src_as_load_const(alu->src[i].src);
|
||||
if (!load_const)
|
||||
return NULL;
|
||||
nir_load_const_instr *load_const = nir_instr_as_load_const(src_instr);
|
||||
|
||||
for (unsigned j = 0; j < nir_ssa_alu_instr_src_components(alu, i);
|
||||
j++) {
|
||||
|
|
|
|||
|
|
@ -754,7 +754,7 @@ load_from_ssa_entry_value(struct copy_prop_var_state *state,
|
|||
if (load_def == NULL)
|
||||
load_def = nir_load_deref(b, entry->dst.instr);
|
||||
|
||||
if (load_def->parent_instr == &intrin->instr)
|
||||
if (nir_def_instr(load_def) == &intrin->instr)
|
||||
keep_intrin = true;
|
||||
|
||||
comps[i] = nir_get_scalar(load_def, i);
|
||||
|
|
|
|||
|
|
@ -35,11 +35,10 @@ bool
|
|||
nir_def_is_frag_coord_z(nir_def *def)
|
||||
{
|
||||
nir_scalar scalar = nir_scalar_resolved(def, 0);
|
||||
nir_instr *instr = scalar.def->parent_instr;
|
||||
if (instr->type != nir_instr_type_intrinsic)
|
||||
nir_intrinsic_instr *intrin = nir_scalar_as_intrinsic(scalar);
|
||||
if (!intrin)
|
||||
return false;
|
||||
|
||||
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
|
||||
switch (intrin->intrinsic) {
|
||||
case nir_intrinsic_load_frag_coord:
|
||||
/* Depth is gl_FragCoord.z */
|
||||
|
|
|
|||
|
|
@ -166,7 +166,7 @@ static bool
|
|||
is_src_scalarizable(nir_src *src)
|
||||
{
|
||||
|
||||
nir_instr *src_instr = src->ssa->parent_instr;
|
||||
nir_instr *src_instr = nir_def_instr(src->ssa);
|
||||
switch (src_instr->type) {
|
||||
case nir_instr_type_alu: {
|
||||
nir_alu_instr *src_alu = nir_instr_as_alu(src_instr);
|
||||
|
|
@ -416,7 +416,7 @@ gcm_schedule_early_src(nir_src *src, void *void_state)
|
|||
struct gcm_state *state = void_state;
|
||||
nir_instr *instr = state->instr;
|
||||
|
||||
gcm_schedule_early_instr(src->ssa->parent_instr, void_state);
|
||||
gcm_schedule_early_instr(nir_def_instr(src->ssa), void_state);
|
||||
|
||||
/* While the index isn't a proper dominance depth, it does have the
|
||||
* property that if A dominates B then A->index <= B->index. Since we
|
||||
|
|
@ -426,7 +426,7 @@ gcm_schedule_early_src(nir_src *src, void *void_state)
|
|||
* Therefore, we can just go ahead and just compare indices.
|
||||
*/
|
||||
struct gcm_instr_info *src_info =
|
||||
&state->instr_infos[src->ssa->parent_instr->index];
|
||||
&state->instr_infos[nir_def_instr(src->ssa)->index];
|
||||
struct gcm_instr_info *info = &state->instr_infos[instr->index];
|
||||
if (info->early_block->index < src_info->early_block->index)
|
||||
info->early_block = src_info->early_block;
|
||||
|
|
@ -657,17 +657,17 @@ gcm_schedule_late_def(nir_def *def, void *void_state)
|
|||
}
|
||||
|
||||
nir_block *early_block =
|
||||
state->instr_infos[def->parent_instr->index].early_block;
|
||||
state->instr_infos[nir_def_instr(def)->index].early_block;
|
||||
|
||||
/* Some instructions may never be used. Flag them and the instruction
|
||||
* placement code will get rid of them for us.
|
||||
*/
|
||||
if (lca == NULL) {
|
||||
def->parent_instr->block = NULL;
|
||||
nir_def_instr(def)->block = NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (def->parent_instr->pass_flags & GCM_INSTR_SCHEDULE_EARLIER_ONLY &&
|
||||
if (nir_def_instr(def)->pass_flags & GCM_INSTR_SCHEDULE_EARLIER_ONLY &&
|
||||
lca != nir_def_block(def) &&
|
||||
nir_block_dominates(nir_def_block(def), lca)) {
|
||||
lca = nir_def_block(def);
|
||||
|
|
@ -679,12 +679,12 @@ gcm_schedule_late_def(nir_def *def, void *void_state)
|
|||
* as far outside loops as we can get.
|
||||
*/
|
||||
nir_block *best_block =
|
||||
gcm_choose_block_for_instr(def->parent_instr, early_block, lca, state);
|
||||
gcm_choose_block_for_instr(nir_def_instr(def), early_block, lca, state);
|
||||
|
||||
if (nir_def_block(def) != best_block)
|
||||
state->progress = true;
|
||||
|
||||
def->parent_instr->block = best_block;
|
||||
nir_def_instr(def)->block = best_block;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ get_load_resource(nir_instr *instr)
|
|||
switch (tex->src[i].src_type) {
|
||||
case nir_tex_src_texture_deref:
|
||||
case nir_tex_src_texture_handle:
|
||||
return (opaque_resource*)tex->src[i].src.ssa->parent_instr;
|
||||
return (opaque_resource*)nir_def_instr(tex->src[i].src.ssa);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
@ -112,7 +112,7 @@ get_load_resource(nir_instr *instr)
|
|||
/* load_ubo is ignored because it's usually cheap. */
|
||||
case nir_intrinsic_load_ssbo:
|
||||
case nir_intrinsic_load_global:
|
||||
return (opaque_resource*)nir_instr_as_intrinsic(instr)->src[0].ssa->parent_instr;
|
||||
return (opaque_resource*)nir_def_instr(nir_instr_as_intrinsic(instr)->src[0].ssa);
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -167,7 +167,7 @@ has_only_sources_less_than(nir_src *src, void *data)
|
|||
|
||||
/* true if nir_foreach_src should keep going */
|
||||
return state->block != nir_def_block(src->ssa) ||
|
||||
state->infos[src->ssa->parent_instr->index].instr_index <
|
||||
state->infos[nir_def_instr(src->ssa)->index].instr_index <
|
||||
state->first_instr_index;
|
||||
}
|
||||
|
||||
|
|
@ -329,11 +329,11 @@ static bool
|
|||
gather_indirections(nir_src *src, void *data)
|
||||
{
|
||||
struct indirection_state *state = (struct indirection_state *)data;
|
||||
nir_instr *instr = src->ssa->parent_instr;
|
||||
nir_instr *instr = nir_def_instr(src->ssa);
|
||||
|
||||
/* We only count indirections within the same block. */
|
||||
if (instr->block == state->block) {
|
||||
unsigned indirections = get_num_indirections(src->ssa->parent_instr,
|
||||
unsigned indirections = get_num_indirections(nir_def_instr(src->ssa),
|
||||
state->infos);
|
||||
|
||||
if (instr->type == nir_instr_type_tex || is_grouped_load(instr))
|
||||
|
|
|
|||
|
|
@ -159,7 +159,7 @@ opt_peel_loop_initial_if(nir_loop *loop)
|
|||
nir_if *nif = nir_cf_node_as_if(if_node);
|
||||
|
||||
nir_def *cond = nif->condition.ssa;
|
||||
if (cond->parent_instr->type != nir_instr_type_phi)
|
||||
if (!nir_def_is_phi(cond))
|
||||
return false;
|
||||
|
||||
nir_phi_instr *cond_phi = nir_def_as_phi(cond);
|
||||
|
|
@ -288,7 +288,7 @@ is_trivial_bcsel(const nir_instr *instr, bool allow_non_phi_src)
|
|||
nir_def_block(bcsel->src[i].src.ssa) != instr->block)
|
||||
return false;
|
||||
|
||||
if (bcsel->src[i].src.ssa->parent_instr->type != nir_instr_type_phi) {
|
||||
if (!nir_src_is_phi(bcsel->src[i].src)) {
|
||||
/* opt_split_alu_of_phi() is able to peel that src from the loop */
|
||||
if (i == 0 || !allow_non_phi_src)
|
||||
return false;
|
||||
|
|
@ -420,7 +420,7 @@ opt_split_alu_of_phi(nir_builder *b, nir_loop *loop, nir_opt_if_options options)
|
|||
nir_def *continue_srcs[8]; // FINISHME: Array size?
|
||||
|
||||
for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
|
||||
nir_instr *const src_instr = alu->src[i].src.ssa->parent_instr;
|
||||
nir_instr *const src_instr = nir_def_instr(alu->src[i].src.ssa);
|
||||
|
||||
/* If the source is a phi in the loop header block, then the
|
||||
* prev_srcs and continue_srcs will come from the different sources
|
||||
|
|
@ -440,13 +440,11 @@ opt_split_alu_of_phi(nir_builder *b, nir_loop *loop, nir_opt_if_options options)
|
|||
|
||||
nir_foreach_phi_src(src_of_phi, phi) {
|
||||
if (src_of_phi->pred == prev_block) {
|
||||
if (src_of_phi->src.ssa->parent_instr->type !=
|
||||
nir_instr_type_undef) {
|
||||
if (!nir_src_is_undef(src_of_phi->src)) {
|
||||
is_prev_result_undef = false;
|
||||
}
|
||||
|
||||
if (src_of_phi->src.ssa->parent_instr->type !=
|
||||
nir_instr_type_load_const) {
|
||||
if (!nir_src_is_const(src_of_phi->src)) {
|
||||
is_prev_result_const = false;
|
||||
}
|
||||
|
||||
|
|
@ -1026,7 +1024,7 @@ opt_if_evaluate_condition_use(nir_builder *b, nir_if *nif)
|
|||
}
|
||||
|
||||
bool invert = false;
|
||||
nir_alu_instr *alu = nir_src_as_alu_instr(nif->condition);
|
||||
nir_alu_instr *alu = nir_src_as_alu(nif->condition);
|
||||
if (alu != NULL && alu->op == nir_op_inot &&
|
||||
nir_src_num_components(alu->src[0].src) == 1) {
|
||||
/* Consider
|
||||
|
|
@ -1047,7 +1045,7 @@ opt_if_evaluate_condition_use(nir_builder *b, nir_if *nif)
|
|||
}
|
||||
|
||||
invert = true;
|
||||
alu = nir_src_as_alu_instr(alu->src[0].src);
|
||||
alu = nir_src_as_alu(alu->src[0].src);
|
||||
}
|
||||
|
||||
if (alu != NULL) {
|
||||
|
|
@ -1073,7 +1071,7 @@ opt_if_evaluate_condition_use(nir_builder *b, nir_if *nif)
|
|||
/* Just like above, if the op is inot, peel the inot off and try
|
||||
* some more.
|
||||
*/
|
||||
nir_alu_instr *alu_src = nir_src_as_alu_instr(alu->src[i].src);
|
||||
nir_alu_instr *alu_src = nir_src_as_alu(alu->src[i].src);
|
||||
if (alu_src != NULL && alu_src->op == nir_op_inot &&
|
||||
nir_src_num_components(alu_src->src[0].src) == 1) {
|
||||
nir_foreach_use_including_if_safe(use_src, alu_src->src[0].src.ssa) {
|
||||
|
|
@ -1223,7 +1221,7 @@ opt_phi_src_unused(nir_builder *b, nir_phi_instr *phi,
|
|||
{
|
||||
/* Return early, if either of the sources is already undef. */
|
||||
nir_foreach_phi_src(phi_src, phi) {
|
||||
if (phi_src->src.ssa->parent_instr->type == nir_instr_type_undef)
|
||||
if (nir_src_is_undef(phi_src->src))
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -132,7 +132,7 @@ src_is_quad_broadcast(nir_block *block, nir_src src, nir_intrinsic_instr **intri
|
|||
static bool
|
||||
src_is_alu(nir_op op, nir_src src, nir_src srcs[2])
|
||||
{
|
||||
nir_alu_instr *alu = nir_src_as_alu_instr(src);
|
||||
nir_alu_instr *alu = nir_src_as_alu(src);
|
||||
if (alu == NULL || alu->op != op)
|
||||
return false;
|
||||
|
||||
|
|
|
|||
|
|
@ -64,9 +64,9 @@ set_src_needs_helpers(nir_src *src, void *_data)
|
|||
{
|
||||
struct helper_state *hs = _data;
|
||||
if (!BITSET_TEST(hs->needs_helpers, src->ssa->index) &&
|
||||
!instr_never_needs_helpers(src->ssa->parent_instr)) {
|
||||
!instr_never_needs_helpers(nir_def_instr(src->ssa))) {
|
||||
BITSET_SET(hs->needs_helpers, src->ssa->index);
|
||||
nir_instr_worklist_push_tail(&hs->worklist, src->ssa->parent_instr);
|
||||
nir_instr_worklist_push_tail(&hs->worklist, nir_def_instr(src->ssa));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -446,7 +446,7 @@ parse_offset(nir_scalar base, uint64_t *offset)
|
|||
}
|
||||
}
|
||||
|
||||
if (base.def->parent_instr->type == nir_instr_type_intrinsic) {
|
||||
if (nir_def_is_intrinsic(base.def)) {
|
||||
nir_intrinsic_instr *intrin = nir_def_as_intrinsic(base.def);
|
||||
if (intrin->intrinsic == nir_intrinsic_load_vulkan_descriptor)
|
||||
base.def = NULL;
|
||||
|
|
@ -911,7 +911,7 @@ hoist_base_addr(nir_instr *instr, nir_instr *to_hoist)
|
|||
/* For ALU, recursively hoist the sources. */
|
||||
nir_alu_instr *alu = nir_instr_as_alu(to_hoist);
|
||||
for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++)
|
||||
hoist_base_addr(instr, alu->src[i].src.ssa->parent_instr);
|
||||
hoist_base_addr(instr, nir_def_instr(alu->src[i].src.ssa));
|
||||
}
|
||||
|
||||
nir_instr_move(nir_before_instr(instr), to_hoist);
|
||||
|
|
@ -970,7 +970,7 @@ vectorize_loads(nir_builder *b, struct vectorize_ctx *ctx,
|
|||
/* update uses */
|
||||
if (first == low) {
|
||||
nir_def_rewrite_uses_after_instr(&low->intrin->def, low_def,
|
||||
high_def->parent_instr);
|
||||
nir_def_instr(high_def));
|
||||
nir_def_rewrite_uses(&high->intrin->def, high_def);
|
||||
} else {
|
||||
nir_def_rewrite_uses(&low->intrin->def, low_def);
|
||||
|
|
@ -989,7 +989,7 @@ vectorize_loads(nir_builder *b, struct vectorize_ctx *ctx,
|
|||
|
||||
/* Hoist low base addr before first intrinsic. */
|
||||
nir_def *base = low->intrin->src[info->base_src].ssa;
|
||||
hoist_base_addr(first->instr, base->parent_instr);
|
||||
hoist_base_addr(first->instr, nir_def_instr(base));
|
||||
nir_src_rewrite(&first->intrin->src[info->base_src], base);
|
||||
|
||||
if (nir_intrinsic_has_offset_shift(first->intrin)) {
|
||||
|
|
@ -1539,7 +1539,7 @@ try_vectorize_shared2(struct vectorize_ctx *ctx,
|
|||
/* Take low as base address. */
|
||||
nir_def *offset = low->intrin->src[first->is_store].ssa;
|
||||
if (first != low)
|
||||
hoist_base_addr(&first->intrin->instr, offset->parent_instr);
|
||||
hoist_base_addr(&first->intrin->instr, nir_def_instr(offset));
|
||||
nir_builder b = nir_builder_at(nir_after_instr(first->is_store ? second->instr : first->instr));
|
||||
offset = nir_iadd_imm(&b, offset, nir_intrinsic_base(low->intrin));
|
||||
|
||||
|
|
|
|||
|
|
@ -313,7 +313,7 @@ can_constant_fold(nir_scalar scalar, nir_block *loop_header)
|
|||
return true;
|
||||
}
|
||||
|
||||
if (scalar.def->parent_instr->type == nir_instr_type_phi) {
|
||||
if (nir_def_instr_type(scalar.def) == nir_instr_type_phi) {
|
||||
/* If this is a phi from anything but the loop header, we cannot constant-fold. */
|
||||
if (nir_def_block(scalar.def) != loop_header)
|
||||
return false;
|
||||
|
|
|
|||
|
|
@ -643,8 +643,8 @@ comparison_contains_instr(nir_scalar cond_scalar, nir_instr *instr)
|
|||
if (nir_is_terminator_condition_with_two_inputs(cond_scalar)) {
|
||||
nir_alu_instr *comparison =
|
||||
nir_def_as_alu(cond_scalar.def);
|
||||
return comparison->src[0].src.ssa->parent_instr == instr ||
|
||||
comparison->src[1].src.ssa->parent_instr == instr;
|
||||
return nir_def_instr(comparison->src[0].src.ssa) == instr ||
|
||||
nir_def_instr(comparison->src[1].src.ssa) == instr;
|
||||
}
|
||||
|
||||
return false;
|
||||
|
|
@ -692,7 +692,7 @@ remove_out_of_bounds_induction_use(nir_shader *shader, nir_loop *loop,
|
|||
if (intrin->intrinsic == nir_intrinsic_load_deref) {
|
||||
nir_alu_instr *term_alu =
|
||||
nir_def_as_alu(term->nif->condition.ssa);
|
||||
b.cursor = nir_before_instr(term->nif->condition.ssa->parent_instr);
|
||||
b.cursor = nir_before_def(term->nif->condition.ssa);
|
||||
|
||||
/* If the out of bounds load is used in the comparison of the
|
||||
* loop terminator replace the condition with true so that the
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ nir_opt_move_block(nir_block *block, nir_move_options options)
|
|||
bool progress = false;
|
||||
nir_instr *last_instr = nir_block_ends_in_jump(block) ? nir_block_last_instr(block) : NULL;
|
||||
const nir_if *iff = nir_block_get_following_if(block);
|
||||
const nir_instr *if_cond_instr = iff ? iff->condition.ssa->parent_instr : NULL;
|
||||
const nir_instr *if_cond_instr = iff ? nir_def_instr(iff->condition.ssa) : NULL;
|
||||
|
||||
/* Walk the instructions backwards.
|
||||
* The instructions get indexed while iterating.
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ static bool
|
|||
add_src_to_worklist(nir_src *src, void *state_)
|
||||
{
|
||||
struct move_discard_state *state = state_;
|
||||
nir_instr *instr = src->ssa->parent_instr;
|
||||
nir_instr *instr = nir_def_instr(src->ssa);
|
||||
if (instr->pass_flags)
|
||||
return true;
|
||||
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ static bool
|
|||
can_move_src_to_top(nir_src *src, void *_state)
|
||||
{
|
||||
opt_move_to_top_state *state = (opt_move_to_top_state *)_state;
|
||||
nir_instr *instr = src->ssa->parent_instr;
|
||||
nir_instr *instr = nir_def_instr(src->ssa);
|
||||
|
||||
assert(util_bitcount(instr->pass_flags & (PASS_FLAG_CANT_MOVE |
|
||||
PASS_FLAG_CAN_MOVE)) <= 1);
|
||||
|
|
@ -111,7 +111,7 @@ can_move_src_to_top(nir_src *src, void *_state)
|
|||
static bool
|
||||
move_src(nir_src *src, void *_state)
|
||||
{
|
||||
nir_instr *instr = src->ssa->parent_instr;
|
||||
nir_instr *instr = nir_def_instr(src->ssa);
|
||||
nir_builder *b = (nir_builder *)_state;
|
||||
|
||||
if (instr->pass_flags & PASS_FLAG_MOVED)
|
||||
|
|
|
|||
|
|
@ -237,7 +237,7 @@ try_move_narrowing_dst(nir_builder *b, nir_phi_instr *phi)
|
|||
/* Push the conversion into the new phi sources: */
|
||||
nir_foreach_phi_src(src, phi) {
|
||||
/* insert new conversion instr in block of original phi src: */
|
||||
b->cursor = nir_after_instr_and_phis(src->src.ssa->parent_instr);
|
||||
b->cursor = nir_after_instr_and_phis(nir_def_instr(src->src.ssa));
|
||||
nir_def *old_src = src->src.ssa;
|
||||
nir_def *new_src = nir_build_alu(b, op, old_src, NULL, NULL, NULL);
|
||||
|
||||
|
|
@ -309,7 +309,7 @@ find_widening_op(nir_phi_instr *phi, unsigned *bit_size)
|
|||
*bit_size = 0;
|
||||
|
||||
nir_foreach_phi_src(src, phi) {
|
||||
nir_instr *instr = src->src.ssa->parent_instr;
|
||||
nir_instr *instr = nir_def_instr(src->src.ssa);
|
||||
if (instr->type == nir_instr_type_load_const) {
|
||||
has_load_const = true;
|
||||
continue;
|
||||
|
|
@ -345,7 +345,7 @@ find_widening_op(nir_phi_instr *phi, unsigned *bit_size)
|
|||
* sequence to make the rest of the transformation possible:
|
||||
*/
|
||||
nir_foreach_phi_src(src, phi) {
|
||||
nir_instr *instr = src->src.ssa->parent_instr;
|
||||
nir_instr *instr = nir_def_instr(src->src.ssa);
|
||||
if (instr->type != nir_instr_type_load_const)
|
||||
continue;
|
||||
|
||||
|
|
@ -379,7 +379,7 @@ try_move_widening_src(nir_builder *b, nir_phi_instr *phi)
|
|||
|
||||
/* Remove the widening conversions from the phi sources: */
|
||||
nir_foreach_phi_src(src, phi) {
|
||||
nir_instr *instr = src->src.ssa->parent_instr;
|
||||
nir_instr *instr = nir_def_instr(src->src.ssa);
|
||||
nir_def *new_src;
|
||||
|
||||
b->cursor = nir_after_instr(instr);
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ enum bool_type {
|
|||
static inline uint8_t
|
||||
src_pass_flags(nir_src *src)
|
||||
{
|
||||
return src->ssa->parent_instr->pass_flags;
|
||||
return nir_def_instr(src->ssa)->pass_flags;
|
||||
}
|
||||
|
||||
static inline nir_block *
|
||||
|
|
@ -189,7 +189,7 @@ phi_to_bool(nir_builder *b, nir_phi_instr *phi, void *unused)
|
|||
|
||||
nir_foreach_use_safe(src, &phi->def) {
|
||||
if (nir_src_parent_instr(src) == &phi->instr ||
|
||||
nir_src_parent_instr(src) == res->parent_instr)
|
||||
nir_src_parent_instr(src) == nir_def_instr(res))
|
||||
continue;
|
||||
nir_src_rewrite(src, res);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -515,7 +515,7 @@ replace_for_block(nir_builder *b, opt_preamble_ctx *ctx,
|
|||
nir_before_block_after_phis(nir_cursor_current_block(b->cursor));
|
||||
|
||||
nir_def *repl = nir_if_phi(b, then_def, else_def);
|
||||
clone = repl->parent_instr;
|
||||
clone = nir_def_instr(repl);
|
||||
|
||||
_mesa_hash_table_insert(remap_table, &phi->def, repl);
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -42,13 +42,13 @@ mark_query_read(struct set *queries,
|
|||
nir_def *rq_def = intrin->src[0].ssa;
|
||||
|
||||
nir_variable *query;
|
||||
if (rq_def->parent_instr->type == nir_instr_type_intrinsic) {
|
||||
if (nir_def_is_intrinsic(rq_def)) {
|
||||
nir_intrinsic_instr *load_deref =
|
||||
nir_def_as_intrinsic(rq_def);
|
||||
assert(load_deref->intrinsic == nir_intrinsic_load_deref);
|
||||
|
||||
query = nir_intrinsic_get_var(load_deref, 0);
|
||||
} else if (rq_def->parent_instr->type == nir_instr_type_deref) {
|
||||
} else if (nir_def_is_deref(rq_def)) {
|
||||
query = nir_deref_instr_get_variable(nir_def_as_deref(rq_def));
|
||||
} else {
|
||||
return;
|
||||
|
|
|
|||
|
|
@ -234,7 +234,7 @@ build_chain(struct chain *c, nir_scalar def, unsigned reserved_count)
|
|||
c->length + reserved_plus_remaining + 2 <= MAX_CHAIN_LENGTH) {
|
||||
|
||||
/* Any interior nodes cannot be the root */
|
||||
src.def->parent_instr->pass_flags = PASS_FLAG_INTERIOR;
|
||||
nir_def_instr(src.def)->pass_flags = PASS_FLAG_INTERIOR;
|
||||
|
||||
/* Recurse, reserving space for the next sources */
|
||||
build_chain(c, src, reserved_count + remaining);
|
||||
|
|
|
|||
|
|
@ -34,18 +34,21 @@ phi_srcs_equal(nir_def *a, nir_def *b)
|
|||
if (a == b)
|
||||
return true;
|
||||
|
||||
if (a->parent_instr->type != b->parent_instr->type)
|
||||
nir_instr *a_instr = nir_def_instr(a);
|
||||
nir_instr *b_instr = nir_def_instr(b);
|
||||
|
||||
if (a_instr->type != b_instr->type)
|
||||
return false;
|
||||
|
||||
if (a->parent_instr->type != nir_instr_type_alu &&
|
||||
a->parent_instr->type != nir_instr_type_load_const)
|
||||
if (a_instr->type != nir_instr_type_alu &&
|
||||
a_instr->type != nir_instr_type_load_const)
|
||||
return false;
|
||||
|
||||
if (!nir_instrs_equal(a->parent_instr, b->parent_instr))
|
||||
if (!nir_instrs_equal(a_instr, b_instr))
|
||||
return false;
|
||||
|
||||
/* nir_instrs_equal ignores exact/fast_math */
|
||||
if (a->parent_instr->type == nir_instr_type_alu) {
|
||||
if (a_instr->type == nir_instr_type_alu) {
|
||||
nir_alu_instr *a_alu = nir_def_as_alu(a);
|
||||
nir_alu_instr *b_alu = nir_def_as_alu(b);
|
||||
if (a_alu->exact != b_alu->exact || a_alu->fp_fast_math != b_alu->fp_fast_math)
|
||||
|
|
@ -65,9 +68,9 @@ src_dominates_block(nir_src *src, void *state)
|
|||
static bool
|
||||
can_rematerialize_phi_src(nir_block *imm_dom, nir_def *def)
|
||||
{
|
||||
if (def->parent_instr->type == nir_instr_type_alu) {
|
||||
return nir_foreach_src(def->parent_instr, src_dominates_block, imm_dom);
|
||||
} else if (def->parent_instr->type == nir_instr_type_load_const) {
|
||||
if (nir_def_is_alu(def)) {
|
||||
return nir_foreach_src(nir_def_instr(def), src_dominates_block, imm_dom);
|
||||
} else if (nir_def_is_const(def)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
|
@ -136,7 +139,7 @@ remove_phis_instr(nir_builder *b, nir_phi_instr *phi, void *unused)
|
|||
def = nir_undef(b, phi->def.num_components, phi->def.bit_size);
|
||||
} else if (needs_remat) {
|
||||
b->cursor = nir_after_block_before_jump(block->imm_dom);
|
||||
nir_instr *remat = nir_instr_clone(b->shader, def->parent_instr);
|
||||
nir_instr *remat = nir_instr_clone(b->shader, nir_def_instr(def));
|
||||
nir_builder_instr_insert(b, remat);
|
||||
def = nir_instr_def(remat);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ shrink_dest_to_read_mask(nir_def *def, bool shrink_start)
|
|||
nir_intrinsic_instr *intr = NULL;
|
||||
nir_src *offset_src = NULL;
|
||||
|
||||
if (def->parent_instr->type == nir_instr_type_intrinsic) {
|
||||
if (nir_def_is_intrinsic(def)) {
|
||||
intr = nir_def_as_intrinsic(def);
|
||||
offset_src = nir_get_io_offset_src(intr);
|
||||
}
|
||||
|
|
@ -555,7 +555,7 @@ opt_shrink_vectors_phi(nir_builder *b, nir_phi_instr *instr)
|
|||
* used only in the phi, the movs will disappear later after copy propagate.
|
||||
*/
|
||||
nir_foreach_phi_src(phi_src, instr) {
|
||||
b->cursor = nir_after_instr_and_phis(phi_src->src.ssa->parent_instr);
|
||||
b->cursor = nir_after_instr_and_phis(nir_def_instr(phi_src->src.ssa));
|
||||
|
||||
nir_alu_src alu_src = {
|
||||
.src = nir_src_for_ssa(phi_src->src.ssa)
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ is_constant_like(nir_src *src)
|
|||
return true;
|
||||
|
||||
/* Otherwise, look for constant-like intrinsics */
|
||||
nir_instr *parent = src->ssa->parent_instr;
|
||||
nir_instr *parent = nir_def_instr(src->ssa);
|
||||
if (parent->type != nir_instr_type_intrinsic)
|
||||
return false;
|
||||
|
||||
|
|
|
|||
|
|
@ -112,7 +112,7 @@ opt_undef_vecN(nir_builder *b, nir_alu_instr *alu)
|
|||
static uint32_t
|
||||
nir_get_undef_mask(nir_def *def)
|
||||
{
|
||||
nir_instr *instr = def->parent_instr;
|
||||
nir_instr *instr = nir_def_instr(def);
|
||||
|
||||
if (instr->type == nir_instr_type_undef)
|
||||
return BITSET_MASK(def->num_components);
|
||||
|
|
|
|||
|
|
@ -132,7 +132,7 @@ match_invocation_comparison(nir_scalar scalar)
|
|||
return get_dim(nir_scalar_chase_alu_src(scalar, 1));
|
||||
if (!nir_scalar_chase_alu_src(scalar, 1).def->divergent)
|
||||
return get_dim(nir_scalar_chase_alu_src(scalar, 0));
|
||||
} else if (scalar.def->parent_instr->type == nir_instr_type_intrinsic) {
|
||||
} else if (nir_def_is_intrinsic(scalar.def)) {
|
||||
nir_intrinsic_instr *intrin = nir_def_as_intrinsic(scalar.def);
|
||||
if (intrin->intrinsic == nir_intrinsic_elect) {
|
||||
return 0x8;
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ opt_uub_iand(nir_builder *b, nir_alu_instr *alu, opt_uub_state *state)
|
|||
if (uub(state, src) > low_mask)
|
||||
return false;
|
||||
|
||||
b->cursor = nir_after_instr(src.def->parent_instr);
|
||||
b->cursor = nir_after_def(src.def);
|
||||
nir_def_replace(&alu->def, nir_mov_scalar(b, src));
|
||||
return true;
|
||||
}
|
||||
|
|
@ -283,7 +283,7 @@ opt_uub(nir_builder *b, nir_alu_instr *alu, void *data)
|
|||
|
||||
/* If the upper bound is zero, zero is the only possible value. */
|
||||
if (uub(state, nir_get_scalar(&alu->def, 0)) == 0) {
|
||||
b->cursor = nir_after_instr(alu->def.parent_instr);
|
||||
b->cursor = nir_after_def(&alu->def);
|
||||
nir_def_replace(&alu->def, nir_imm_zero(b, 1, alu->def.bit_size));
|
||||
return true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1466,7 +1466,7 @@ gather_inputs(struct nir_builder *builder, nir_intrinsic_instr *intr, void *cb_d
|
|||
if (linkage->consumer_stage == MESA_SHADER_TESS_CTRL &&
|
||||
intr->intrinsic == nir_intrinsic_load_per_vertex_input) {
|
||||
nir_src *vertex_index_src = nir_get_io_arrayed_index_src(intr);
|
||||
nir_instr *vertex_index_instr = vertex_index_src->ssa->parent_instr;
|
||||
nir_instr *vertex_index_instr = nir_def_instr(vertex_index_src->ssa);
|
||||
|
||||
if (!is_sysval(vertex_index_instr, SYSTEM_VALUE_INVOCATION_ID)) {
|
||||
if (intr->def.bit_size == 32)
|
||||
|
|
@ -1506,8 +1506,7 @@ gather_outputs(struct nir_builder *builder, nir_intrinsic_instr *intr, void *cb_
|
|||
/* nir_lower_io_to_scalar is required before this */
|
||||
assert(intr->src[0].ssa->num_components == 1);
|
||||
/* nit_opt_undef is required before this. */
|
||||
assert(intr->src[0].ssa->parent_instr->type !=
|
||||
nir_instr_type_undef);
|
||||
assert(!nir_src_is_undef(intr->src[0]));
|
||||
} else {
|
||||
/* nir_lower_io_to_scalar is required before this */
|
||||
assert(intr->def.num_components == 1);
|
||||
|
|
@ -1648,7 +1647,7 @@ gather_outputs(struct nir_builder *builder, nir_intrinsic_instr *intr, void *cb_
|
|||
if (linkage->producer_stage == MESA_SHADER_MESH &&
|
||||
intr->intrinsic == nir_intrinsic_store_per_vertex_output) {
|
||||
nir_src *vertex_index_src = nir_get_io_arrayed_index_src(intr);
|
||||
nir_instr *vertex_index_instr = vertex_index_src->ssa->parent_instr;
|
||||
nir_instr *vertex_index_instr = nir_def_instr(vertex_index_src->ssa);
|
||||
|
||||
if (!is_sysval(vertex_index_instr, SYSTEM_VALUE_INVOCATION_ID)) {
|
||||
if (value.def->bit_size == 32)
|
||||
|
|
@ -2171,7 +2170,7 @@ find_per_vertex_load_for_tes_interp(nir_instr *instr)
|
|||
unsigned num_srcs = nir_op_infos[alu->op].num_inputs;
|
||||
|
||||
for (unsigned i = 0; i < num_srcs; i++) {
|
||||
nir_instr *src = alu->src[i].src.ssa->parent_instr;
|
||||
nir_instr *src = nir_def_instr(alu->src[i].src.ssa);
|
||||
nir_intrinsic_instr *intr = find_per_vertex_load_for_tes_interp(src);
|
||||
|
||||
if (intr)
|
||||
|
|
@ -2221,13 +2220,13 @@ static nir_def *
|
|||
clone_ssa_impl(struct linkage_info *linkage, nir_builder *b, nir_def *ssa)
|
||||
{
|
||||
struct hash_entry *entry = _mesa_hash_table_search(linkage->clones_ht,
|
||||
ssa->parent_instr);
|
||||
nir_def_instr(ssa));
|
||||
if (entry)
|
||||
return entry->data;
|
||||
|
||||
nir_def *clone = NULL;
|
||||
|
||||
switch (ssa->parent_instr->type) {
|
||||
switch (nir_def_instr_type(ssa)) {
|
||||
case nir_instr_type_load_const:
|
||||
clone = nir_build_imm(b, ssa->num_components, ssa->bit_size,
|
||||
nir_def_as_load_const(ssa)->value);
|
||||
|
|
@ -2344,7 +2343,7 @@ clone_ssa_impl(struct linkage_info *linkage, nir_builder *b, nir_def *ssa)
|
|||
UNREACHABLE("unexpected instruction type");
|
||||
}
|
||||
|
||||
_mesa_hash_table_insert(linkage->clones_ht, ssa->parent_instr, clone);
|
||||
_mesa_hash_table_insert(linkage->clones_ht, nir_def_instr(ssa), clone);
|
||||
return clone;
|
||||
}
|
||||
|
||||
|
|
@ -2388,7 +2387,7 @@ is_uniform_expression(nir_instr *instr, struct is_uniform_expr_state *state);
|
|||
static bool
|
||||
src_is_uniform_expression(nir_src *src, void *data)
|
||||
{
|
||||
return is_uniform_expression(src->ssa->parent_instr,
|
||||
return is_uniform_expression(nir_def_instr(src->ssa),
|
||||
(struct is_uniform_expr_state *)data);
|
||||
}
|
||||
|
||||
|
|
@ -2466,7 +2465,7 @@ propagate_uniform_expressions(struct linkage_info *linkage,
|
|||
*/
|
||||
nir_shader_clear_pass_flags(linkage->producer_builder.shader);
|
||||
|
||||
if (!is_uniform_expression(slot->producer.value.def->parent_instr, &state))
|
||||
if (!is_uniform_expression(nir_def_instr(slot->producer.value.def), &state))
|
||||
continue;
|
||||
|
||||
if (state.cost > linkage->max_varying_expression_cost)
|
||||
|
|
@ -2613,7 +2612,7 @@ get_input_qualifier(struct linkage_info *linkage, unsigned i)
|
|||
|
||||
assert(load->intrinsic == nir_intrinsic_load_interpolated_input);
|
||||
|
||||
nir_instr *baryc_instr = load->src[0].ssa->parent_instr;
|
||||
nir_instr *baryc_instr = nir_def_instr(load->src[0].ssa);
|
||||
nir_intrinsic_instr *baryc = baryc_instr->type == nir_instr_type_intrinsic ? nir_instr_as_intrinsic(baryc_instr) : NULL;
|
||||
|
||||
if (linkage->has_flexible_interp) {
|
||||
|
|
@ -2872,7 +2871,7 @@ gather_fmul_tess_coord(nir_intrinsic_instr *load, nir_alu_instr *fmul,
|
|||
unsigned *tess_coord_used, nir_def **load_tess_coord)
|
||||
{
|
||||
unsigned other_src = fmul->src[0].src.ssa == &load->def;
|
||||
nir_instr *other_instr = fmul->src[other_src].src.ssa->parent_instr;
|
||||
nir_instr *other_instr = nir_def_instr(fmul->src[other_src].src.ssa);
|
||||
|
||||
assert(fmul->src[!other_src].swizzle[0] == 0);
|
||||
|
||||
|
|
@ -3110,7 +3109,7 @@ find_open_coded_tes_input_interpolation(struct linkage_info *linkage)
|
|||
(!((instr)->pass_flags & (FLAG_MOVABLE | FLAG_UNMOVABLE)))
|
||||
|
||||
#define GET_SRC_INTERP(alu, i) \
|
||||
((alu)->src[i].src.ssa->parent_instr->pass_flags & FLAG_INTERP_MASK)
|
||||
(nir_def_instr((alu)->src[i].src.ssa)->pass_flags & FLAG_INTERP_MASK)
|
||||
|
||||
static bool
|
||||
can_move_alu_across_interp(struct linkage_info *linkage, nir_alu_instr *alu)
|
||||
|
|
@ -3206,7 +3205,7 @@ update_movable_flags(struct linkage_info *linkage, nir_instr *instr)
|
|||
alu_interp = FLAG_INTERP_CONVERGENT;
|
||||
|
||||
for (unsigned i = 0; i < num_srcs; i++) {
|
||||
nir_instr *src_instr = alu->src[i].src.ssa->parent_instr;
|
||||
nir_instr *src_instr = nir_def_instr(alu->src[i].src.ssa);
|
||||
|
||||
if (NEED_UPDATE_MOVABLE_FLAGS(src_instr))
|
||||
update_movable_flags(linkage, src_instr);
|
||||
|
|
@ -3262,7 +3261,7 @@ update_movable_flags(struct linkage_info *linkage, nir_instr *instr)
|
|||
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
|
||||
|
||||
if (intr->intrinsic == nir_intrinsic_load_deref) {
|
||||
nir_instr *deref = intr->src[0].ssa->parent_instr;
|
||||
nir_instr *deref = nir_def_instr(intr->src[0].ssa);
|
||||
|
||||
if (NEED_UPDATE_MOVABLE_FLAGS(deref))
|
||||
update_movable_flags(linkage, deref);
|
||||
|
|
@ -3305,7 +3304,7 @@ update_movable_flags(struct linkage_info *linkage, nir_instr *instr)
|
|||
return;
|
||||
|
||||
case nir_deref_type_array: {
|
||||
nir_instr *index = deref->arr.index.ssa->parent_instr;
|
||||
nir_instr *index = nir_def_instr(deref->arr.index.ssa);
|
||||
|
||||
if (NEED_UPDATE_MOVABLE_FLAGS(index))
|
||||
update_movable_flags(linkage, index);
|
||||
|
|
@ -3370,7 +3369,7 @@ gather_used_input_loads(nir_instr *instr,
|
|||
unsigned num_srcs = nir_op_infos[alu->op].num_inputs;
|
||||
|
||||
for (unsigned i = 0; i < num_srcs; i++) {
|
||||
gather_used_input_loads(alu->src[i].src.ssa->parent_instr,
|
||||
gather_used_input_loads(nir_def_instr(alu->src[i].src.ssa),
|
||||
loads, num_loads);
|
||||
}
|
||||
return;
|
||||
|
|
@ -3384,7 +3383,7 @@ gather_used_input_loads(nir_instr *instr,
|
|||
return;
|
||||
|
||||
case nir_intrinsic_load_deref:
|
||||
gather_used_input_loads(intr->src[0].ssa->parent_instr,
|
||||
gather_used_input_loads(nir_def_instr(intr->src[0].ssa),
|
||||
loads, num_loads);
|
||||
return;
|
||||
|
||||
|
|
@ -3417,7 +3416,7 @@ gather_used_input_loads(nir_instr *instr,
|
|||
return;
|
||||
|
||||
case nir_deref_type_array:
|
||||
gather_used_input_loads(deref->arr.index.ssa->parent_instr,
|
||||
gather_used_input_loads(nir_def_instr(deref->arr.index.ssa),
|
||||
loads, num_loads);
|
||||
return;
|
||||
|
||||
|
|
@ -3509,7 +3508,7 @@ try_move_postdominator(struct linkage_info *linkage,
|
|||
unsigned slot_index = final_slot;
|
||||
struct scalar_slot *slot = &linkage->slot[slot_index];
|
||||
nir_builder *b = &linkage->consumer_builder;
|
||||
b->cursor = nir_after_instr(load_def->parent_instr);
|
||||
b->cursor = nir_after_instr(nir_def_instr(load_def));
|
||||
nir_def *postdom_def = nir_instr_def(postdom);
|
||||
unsigned alu_interp = postdom->pass_flags & FLAG_INTERP_MASK;
|
||||
nir_def *new_input, *new_tes_loads[3];
|
||||
|
|
@ -3905,18 +3904,18 @@ backward_inter_shader_code_motion(struct linkage_info *linkage,
|
|||
if (linkage->producer_stage == MESA_SHADER_VERTEX) {
|
||||
/* VS -> TES has no constraints on VS stores. */
|
||||
load_def = &slot->consumer.tes_interp_load->def;
|
||||
load_def->parent_instr->pass_flags |= FLAG_ALU_IS_TES_INTERP_LOAD |
|
||||
slot->consumer.tes_interp_mode;
|
||||
nir_def_instr(load_def)->pass_flags |= FLAG_ALU_IS_TES_INTERP_LOAD |
|
||||
slot->consumer.tes_interp_mode;
|
||||
} else {
|
||||
assert(linkage->producer_stage == MESA_SHADER_TESS_CTRL);
|
||||
assert(store->intrinsic == nir_intrinsic_store_per_vertex_output);
|
||||
|
||||
/* The vertex index of the store must InvocationID. */
|
||||
if (is_sysval(store->src[1].ssa->parent_instr,
|
||||
if (is_sysval(nir_def_instr(store->src[1].ssa),
|
||||
SYSTEM_VALUE_INVOCATION_ID)) {
|
||||
load_def = &slot->consumer.tes_interp_load->def;
|
||||
load_def->parent_instr->pass_flags |= FLAG_ALU_IS_TES_INTERP_LOAD |
|
||||
slot->consumer.tes_interp_mode;
|
||||
nir_def_instr(load_def)->pass_flags |= FLAG_ALU_IS_TES_INTERP_LOAD |
|
||||
slot->consumer.tes_interp_mode;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
|
|
@ -3945,7 +3944,7 @@ backward_inter_shader_code_motion(struct linkage_info *linkage,
|
|||
switch (load->intrinsic) {
|
||||
case nir_intrinsic_load_interpolated_input: {
|
||||
assert(linkage->consumer_stage == MESA_SHADER_FRAGMENT);
|
||||
nir_instr *baryc_instr = load->src[0].ssa->parent_instr;
|
||||
nir_instr *baryc_instr = nir_def_instr(load->src[0].ssa);
|
||||
|
||||
/* This is either lowered barycentric_at_offset/at_sample or user
|
||||
* barycentrics. Treat it like barycentric_at_offset.
|
||||
|
|
@ -4011,7 +4010,7 @@ backward_inter_shader_code_motion(struct linkage_info *linkage,
|
|||
}
|
||||
}
|
||||
|
||||
load_def->parent_instr->pass_flags |= FLAG_MOVABLE;
|
||||
nir_def_instr(load_def)->pass_flags |= FLAG_MOVABLE;
|
||||
|
||||
/* Disallow transform feedback. The load is "movable" for the purpose of
|
||||
* finding a movable post-dominator, we just can't rewrite the store
|
||||
|
|
@ -4053,7 +4052,7 @@ backward_inter_shader_code_motion(struct linkage_info *linkage,
|
|||
|
||||
for (unsigned i = 0; i < num_movable_loads; i++) {
|
||||
nir_def *load_def = movable_loads[i].def;
|
||||
nir_instr *iter = load_def->parent_instr;
|
||||
nir_instr *iter = nir_def_instr(load_def);
|
||||
nir_instr *movable_postdom = NULL;
|
||||
|
||||
/* Find the farthest post-dominator that is movable. */
|
||||
|
|
@ -4087,10 +4086,8 @@ backward_inter_shader_code_motion(struct linkage_info *linkage,
|
|||
alu->src[0].src.ssa == load_def) ||
|
||||
(nir_op_infos[alu->op].num_inputs == 2 &&
|
||||
((alu->src[0].src.ssa == load_def &&
|
||||
alu->src[1].src.ssa->parent_instr->type ==
|
||||
nir_instr_type_load_const) ||
|
||||
(alu->src[0].src.ssa->parent_instr->type ==
|
||||
nir_instr_type_load_const &&
|
||||
nir_src_is_const(alu->src[1].src)) ||
|
||||
(nir_src_is_const(alu->src[0].src) &&
|
||||
alu->src[1].src.ssa == load_def)))))
|
||||
continue;
|
||||
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ hash_phi_src(uint32_t hash, const nir_phi_instr *phi, const nir_phi_src *src,
|
|||
} else if (src->pred->index < phi->instr.block->index) {
|
||||
hash = HASH(hash, chased.def);
|
||||
} else {
|
||||
nir_instr *chased_instr = chased.def->parent_instr;
|
||||
nir_instr *chased_instr = nir_def_instr(chased.def);
|
||||
hash = HASH(hash, chased_instr->type);
|
||||
|
||||
if (chased_instr->type == nir_instr_type_alu)
|
||||
|
|
@ -178,8 +178,8 @@ phi_srcs_equal(nir_block *block, const nir_phi_src *src1,
|
|||
* (forward-edge) sources are vectorized, chances are the back-edge will
|
||||
* also be vectorized.
|
||||
*/
|
||||
nir_instr *chased_instr1 = chased1.def->parent_instr;
|
||||
nir_instr *chased_instr2 = chased2.def->parent_instr;
|
||||
nir_instr *chased_instr1 = nir_def_instr(chased1.def);
|
||||
nir_instr *chased_instr2 = nir_def_instr(chased2.def);
|
||||
|
||||
if (chased_instr1->type != chased_instr2->type)
|
||||
return false;
|
||||
|
|
@ -346,8 +346,8 @@ rewrite_uses(nir_builder *b, struct set *instr_set, nir_def *def1,
|
|||
nir_def_rewrite_uses(def2, new_def2);
|
||||
}
|
||||
|
||||
nir_instr_remove(def1->parent_instr);
|
||||
nir_instr_remove(def2->parent_instr);
|
||||
nir_instr_remove(nir_def_instr(def1));
|
||||
nir_instr_remove(nir_def_instr(def2));
|
||||
}
|
||||
|
||||
static nir_instr *
|
||||
|
|
@ -412,7 +412,7 @@ instr_try_combine_phi(struct set *instr_set, nir_phi_instr *phi1, nir_phi_instr
|
|||
swizzle[i] = new_srcs[i].comp;
|
||||
}
|
||||
|
||||
b.cursor = nir_after_instr_and_phis(def->parent_instr);
|
||||
b.cursor = nir_after_instr_and_phis(nir_def_instr(def));
|
||||
new_src = nir_swizzle(&b, def, swizzle, total_components);
|
||||
} else {
|
||||
/* This is a loop back-edge so we haven't vectorized the sources yet.
|
||||
|
|
|
|||
|
|
@ -148,8 +148,8 @@ print_def(nir_def *def, print_state *state)
|
|||
def->bit_size, sizes[def->num_components],
|
||||
padding, "", state->def_prefix, def->index);
|
||||
|
||||
if (def->parent_instr->has_debug_info) {
|
||||
nir_instr_debug_info *debug_info = nir_instr_get_debug_info(def->parent_instr);
|
||||
if (nir_def_instr(def)->has_debug_info) {
|
||||
nir_instr_debug_info *debug_info = nir_instr_get_debug_info(nir_def_instr(def));
|
||||
if (debug_info->variable_name)
|
||||
fprintf(fp, ".%s", debug_info->variable_name);
|
||||
}
|
||||
|
|
@ -407,7 +407,7 @@ print_src(const nir_src *src, print_state *state, nir_alu_type src_type)
|
|||
{
|
||||
FILE *fp = state->fp;
|
||||
fprintf(fp, "%s%u", state->def_prefix, src->ssa->index);
|
||||
nir_instr *instr = src->ssa->parent_instr;
|
||||
nir_instr *instr = nir_def_instr(src->ssa);
|
||||
|
||||
if (instr->has_debug_info) {
|
||||
nir_instr_debug_info *debug_info = nir_instr_get_debug_info(instr);
|
||||
|
|
|
|||
|
|
@ -492,7 +492,7 @@ get_fp_key(struct analysis_query *q)
|
|||
struct fp_query *fp_q = (struct fp_query *)q;
|
||||
const nir_src *src = &fp_q->instr->src[fp_q->src].src;
|
||||
|
||||
if (src->ssa->parent_instr->type != nir_instr_type_alu)
|
||||
if (!nir_def_is_alu(src->ssa))
|
||||
return 0;
|
||||
|
||||
uintptr_t type_encoding;
|
||||
|
|
@ -589,7 +589,7 @@ process_fp_query(struct analysis_state *state, struct analysis_query *aq, uint32
|
|||
return;
|
||||
}
|
||||
|
||||
if (instr->src[src].src.ssa->parent_instr->type != nir_instr_type_alu) {
|
||||
if (!nir_src_is_alu(instr->src[src].src)) {
|
||||
*result = pack_data((struct ssa_result_range){ unknown, false, false, false });
|
||||
return;
|
||||
}
|
||||
|
|
@ -1527,7 +1527,7 @@ search_phi_bcsel(nir_scalar scalar, nir_scalar *buf, unsigned buf_size, struct s
|
|||
return 0;
|
||||
_mesa_set_add(visited, scalar.def);
|
||||
|
||||
if (scalar.def->parent_instr->type == nir_instr_type_phi) {
|
||||
if (nir_def_instr_type(scalar.def) == nir_instr_type_phi) {
|
||||
nir_phi_instr *phi = nir_def_as_phi(scalar.def);
|
||||
unsigned num_sources_left = exec_list_length(&phi->srcs);
|
||||
if (buf_size >= num_sources_left) {
|
||||
|
|
@ -2158,7 +2158,7 @@ process_uub_query(struct analysis_state *state, struct analysis_query *aq, uint3
|
|||
get_intrinsic_uub(state, q, result, src);
|
||||
else if (nir_scalar_is_alu(q.scalar))
|
||||
get_alu_uub(state, q, result, src);
|
||||
else if (q.scalar.def->parent_instr->type == nir_instr_type_phi)
|
||||
else if (nir_def_instr_type(q.scalar.def) == nir_instr_type_phi)
|
||||
get_phi_uub(state, q, result, src);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -113,7 +113,7 @@ repair_ssa_def(nir_def *def, void *void_state)
|
|||
* deref information.
|
||||
*/
|
||||
if (!nir_src_is_if(src) &&
|
||||
def->parent_instr->type == nir_instr_type_deref &&
|
||||
nir_def_is_deref(def) &&
|
||||
nir_src_parent_instr(src)->type == nir_instr_type_deref &&
|
||||
nir_instr_as_deref(nir_src_parent_instr(src))->deref_type != nir_deref_type_cast) {
|
||||
nir_deref_instr *cast =
|
||||
|
|
|
|||
|
|
@ -274,7 +274,7 @@ nir_schedule_ssa_deps(nir_def *def, void *in_state)
|
|||
{
|
||||
nir_deps_state *state = in_state;
|
||||
struct hash_table *instr_map = state->scoreboard->instr_map;
|
||||
nir_schedule_node *def_n = nir_schedule_get_node(instr_map, def->parent_instr);
|
||||
nir_schedule_node *def_n = nir_schedule_get_node(instr_map, nir_def_instr(def));
|
||||
|
||||
nir_foreach_use(src, def) {
|
||||
nir_schedule_node *use_n = nir_schedule_get_node(instr_map,
|
||||
|
|
@ -908,7 +908,7 @@ nir_schedule_mark_src_scheduled(nir_src *src, void *state)
|
|||
* they're often folded as immediates into backend instructions and have
|
||||
* many unrelated instructions all referencing the same value (0).
|
||||
*/
|
||||
if (src->ssa->parent_instr->type != nir_instr_type_load_const) {
|
||||
if (!nir_def_is_const(src->ssa)) {
|
||||
nir_foreach_use(other_src, src->ssa) {
|
||||
if (nir_src_parent_instr(other_src) == nir_src_parent_instr(src))
|
||||
continue;
|
||||
|
|
@ -943,7 +943,7 @@ nir_schedule_mark_def_scheduled(nir_def *def, void *state)
|
|||
{
|
||||
nir_schedule_scoreboard *scoreboard = state;
|
||||
|
||||
nir_schedule_mark_use(scoreboard, def, def->parent_instr,
|
||||
nir_schedule_mark_use(scoreboard, def, nir_def_instr(def),
|
||||
nir_schedule_def_pressure(def));
|
||||
|
||||
return true;
|
||||
|
|
@ -1185,8 +1185,8 @@ nir_schedule_ssa_def_init_scoreboard(nir_def *def, void *state)
|
|||
/* We don't consider decl_reg to be a use to avoid extending register live
|
||||
* ranges any further than needed.
|
||||
*/
|
||||
if (!is_decl_reg(def->parent_instr))
|
||||
_mesa_set_add(def_uses, def->parent_instr);
|
||||
if (!is_decl_reg(nir_def_instr(def)))
|
||||
_mesa_set_add(def_uses, nir_def_instr(def));
|
||||
|
||||
nir_foreach_use(src, def) {
|
||||
_mesa_set_add(def_uses, nir_src_parent_instr(src));
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ src_is_type(nir_src src, nir_alu_type type)
|
|||
{
|
||||
assert(type != nir_type_invalid);
|
||||
|
||||
if (src.ssa->parent_instr->type == nir_instr_type_alu) {
|
||||
if (nir_src_is_alu(src)) {
|
||||
nir_alu_instr *src_alu = nir_def_as_alu(src.ssa);
|
||||
nir_alu_type output_type = nir_op_infos[src_alu->op].output_type;
|
||||
|
||||
|
|
@ -101,7 +101,7 @@ src_is_type(nir_src src, nir_alu_type type)
|
|||
}
|
||||
|
||||
return nir_alu_type_get_base_type(output_type) == type;
|
||||
} else if (src.ssa->parent_instr->type == nir_instr_type_intrinsic) {
|
||||
} else if (nir_src_is_intrinsic(src)) {
|
||||
nir_intrinsic_instr *intr = nir_def_as_intrinsic(src.ssa);
|
||||
|
||||
if (type == nir_type_bool) {
|
||||
|
|
@ -263,7 +263,7 @@ match_value(const nir_algebraic_table *table,
|
|||
|
||||
switch (value->type) {
|
||||
case nir_search_value_expression:
|
||||
if (instr->src[src].src.ssa->parent_instr->type != nir_instr_type_alu)
|
||||
if (!nir_src_is_alu(instr->src[src].src))
|
||||
return false;
|
||||
|
||||
return match_expression(table, nir_search_value_as_expression(value),
|
||||
|
|
@ -275,7 +275,7 @@ match_value(const nir_algebraic_table *table,
|
|||
assert(var->variable < NIR_SEARCH_MAX_VARIABLES);
|
||||
|
||||
if (var->is_constant &&
|
||||
instr->src[src].src.ssa->parent_instr->type != nir_instr_type_load_const)
|
||||
!nir_src_is_const(instr->src[src].src))
|
||||
return false;
|
||||
|
||||
if (var->cond_index != -1 && !table->variable_cond[var->cond_index](state->state, instr,
|
||||
|
|
@ -508,7 +508,7 @@ construct_value(nir_builder *build,
|
|||
assert(def->index ==
|
||||
util_dynarray_num_elements(state->states, uint16_t));
|
||||
util_dynarray_append_typed(state->states, uint16_t, 0);
|
||||
nir_algebraic_automaton(def->parent_instr, state->states, state->pass_op_table);
|
||||
nir_algebraic_automaton(nir_def_instr(def), state->states, state->pass_op_table);
|
||||
|
||||
nir_alu_src val;
|
||||
val.src = nir_src_for_ssa(def);
|
||||
|
|
@ -560,7 +560,7 @@ construct_value(nir_builder *build,
|
|||
assert(cval->index ==
|
||||
util_dynarray_num_elements(state->states, uint16_t));
|
||||
util_dynarray_append_typed(state->states, uint16_t, 0);
|
||||
nir_algebraic_automaton(cval->parent_instr, state->states,
|
||||
nir_algebraic_automaton(nir_def_instr(cval), state->states,
|
||||
state->pass_op_table);
|
||||
|
||||
nir_alu_src val;
|
||||
|
|
@ -758,7 +758,7 @@ nir_replace_instr(nir_builder *build, nir_alu_instr *instr,
|
|||
* keeping algebraic optimizations and code motion optimizations separate
|
||||
* seems safest.
|
||||
*/
|
||||
nir_alu_instr *const src_instr = nir_src_as_alu_instr(instr->src[0].src);
|
||||
nir_alu_instr *const src_instr = nir_src_as_alu(instr->src[0].src);
|
||||
if (src_instr != NULL &&
|
||||
(instr->op == nir_op_fneg || instr->op == nir_op_fabs ||
|
||||
instr->op == nir_op_ineg || instr->op == nir_op_iabs ||
|
||||
|
|
@ -791,14 +791,14 @@ nir_replace_instr(nir_builder *build, nir_alu_instr *instr,
|
|||
nir_mov_alu(build, val, instr->def.num_components);
|
||||
if (ssa_val->index == util_dynarray_num_elements(states, uint16_t)) {
|
||||
util_dynarray_append_typed(states, uint16_t, 0);
|
||||
nir_algebraic_automaton(ssa_val->parent_instr, states, table->pass_op_table);
|
||||
nir_algebraic_automaton(nir_def_instr(ssa_val), states, table->pass_op_table);
|
||||
}
|
||||
|
||||
/* Rewrite the uses of the old SSA value to the new one, and recurse
|
||||
* through the uses updating the automaton's state.
|
||||
*/
|
||||
nir_def_rewrite_uses(&instr->def, ssa_val);
|
||||
nir_algebraic_update_automaton(ssa_val->parent_instr, algebraic_worklist,
|
||||
nir_algebraic_update_automaton(nir_def_instr(ssa_val), algebraic_worklist,
|
||||
states, table->pass_op_table);
|
||||
|
||||
/* Nothing uses the instr any more, so drop it out of the program. Note
|
||||
|
|
|
|||
|
|
@ -444,7 +444,7 @@ is_not_fmul(const nir_search_state *state, const nir_alu_instr *instr, unsigned
|
|||
UNUSED unsigned num_components, UNUSED const uint8_t *swizzle)
|
||||
{
|
||||
nir_alu_instr *src_alu =
|
||||
nir_src_as_alu_instr(instr->src[src].src);
|
||||
nir_src_as_alu(instr->src[src].src);
|
||||
|
||||
if (src_alu == NULL)
|
||||
return true;
|
||||
|
|
@ -460,7 +460,7 @@ is_fmul(const nir_search_state *state, const nir_alu_instr *instr, unsigned src,
|
|||
UNUSED unsigned num_components, UNUSED const uint8_t *swizzle)
|
||||
{
|
||||
nir_alu_instr *src_alu =
|
||||
nir_src_as_alu_instr(instr->src[src].src);
|
||||
nir_src_as_alu(instr->src[src].src);
|
||||
|
||||
if (src_alu == NULL)
|
||||
return false;
|
||||
|
|
@ -476,13 +476,13 @@ is_fsign(const nir_alu_instr *instr, unsigned src,
|
|||
UNUSED unsigned num_components, UNUSED const uint8_t *swizzle)
|
||||
{
|
||||
nir_alu_instr *src_alu =
|
||||
nir_src_as_alu_instr(instr->src[src].src);
|
||||
nir_src_as_alu(instr->src[src].src);
|
||||
|
||||
if (src_alu == NULL)
|
||||
return false;
|
||||
|
||||
if (src_alu->op == nir_op_fneg)
|
||||
src_alu = nir_src_as_alu_instr(src_alu->src[0].src);
|
||||
src_alu = nir_src_as_alu(src_alu->src[0].src);
|
||||
|
||||
return src_alu != NULL && src_alu->op == nir_op_fsign;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1258,7 +1258,7 @@ get_non_self_referential_store_comps(nir_intrinsic_instr *store)
|
|||
{
|
||||
nir_component_mask_t comps = nir_intrinsic_write_mask(store);
|
||||
|
||||
nir_instr *src_instr = store->src[1].ssa->parent_instr;
|
||||
nir_instr *src_instr = nir_def_instr(store->src[1].ssa);
|
||||
if (src_instr->type != nir_instr_type_alu)
|
||||
return comps;
|
||||
|
||||
|
|
|
|||
|
|
@ -92,7 +92,7 @@ is_use_inside_loop(nir_src *use, nir_loop *loop)
|
|||
static bool
|
||||
is_defined_before_loop(nir_def *def, nir_loop *loop)
|
||||
{
|
||||
nir_instr *instr = def->parent_instr;
|
||||
nir_instr *instr = nir_def_instr(def);
|
||||
nir_block *block_before_loop =
|
||||
nir_cf_node_as_block(nir_cf_node_prev(&loop->cf_node));
|
||||
|
||||
|
|
@ -114,10 +114,12 @@ def_is_invariant(nir_def *def, nir_loop *loop)
|
|||
if (is_defined_before_loop(def, loop))
|
||||
return invariant;
|
||||
|
||||
if (def->parent_instr->pass_flags == undefined)
|
||||
def->parent_instr->pass_flags = instr_is_invariant(def->parent_instr, loop);
|
||||
nir_instr *instr = nir_def_instr(def);
|
||||
|
||||
return def->parent_instr->pass_flags == invariant;
|
||||
if (instr->pass_flags == undefined)
|
||||
instr->pass_flags = instr_is_invariant(instr, loop);
|
||||
|
||||
return instr->pass_flags == invariant;
|
||||
}
|
||||
|
||||
static bool
|
||||
|
|
@ -196,8 +198,8 @@ convert_loop_exit_for_ssa(nir_def *def, void *void_state)
|
|||
/* Don't create LCSSA-Phis for loop-invariant variables */
|
||||
if (state->skip_invariants &&
|
||||
(def->bit_size != 1 || state->skip_bool_invariants)) {
|
||||
assert(def->parent_instr->pass_flags != undefined);
|
||||
if (def->parent_instr->pass_flags == invariant)
|
||||
assert(nir_def_instr(def)->pass_flags != undefined);
|
||||
if (nir_def_instr(def)->pass_flags == invariant)
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -223,7 +225,7 @@ convert_loop_exit_for_ssa(nir_def *def, void *void_state)
|
|||
if (all_uses_inside_loop)
|
||||
return true;
|
||||
|
||||
if (def->parent_instr->type == nir_instr_type_deref) {
|
||||
if (nir_def_is_deref(def)) {
|
||||
nir_rematerialize_deref_in_use_blocks(nir_def_as_deref(def));
|
||||
return true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -116,7 +116,7 @@ trivialize_src(nir_src *src, void *state_)
|
|||
{
|
||||
struct trivialize_src_state *state = state_;
|
||||
|
||||
nir_instr *parent = src->ssa->parent_instr;
|
||||
nir_instr *parent = nir_def_instr(src->ssa);
|
||||
if (parent->type != nir_instr_type_intrinsic)
|
||||
return true;
|
||||
|
||||
|
|
@ -429,7 +429,7 @@ trivialize_stores(nir_function_impl *impl, nir_block *block)
|
|||
nontrivial |= !list_is_singular(&value->uses);
|
||||
|
||||
/* SSA-only instruction types */
|
||||
nir_instr *parent = value->parent_instr;
|
||||
nir_instr *parent = nir_def_instr(value);
|
||||
nontrivial |= (parent->type == nir_instr_type_load_const) ||
|
||||
(parent->type == nir_instr_type_undef);
|
||||
|
||||
|
|
|
|||
|
|
@ -140,7 +140,7 @@ parse_intrinsic(nir_shader *nir, nir_intrinsic_instr *intr,
|
|||
}
|
||||
|
||||
if (intr->intrinsic == nir_intrinsic_load_interpolated_input &&
|
||||
intr->src[0].ssa->parent_instr->type == nir_instr_type_intrinsic)
|
||||
nir_src_is_intrinsic(intr->src[0]))
|
||||
desc->baryc = nir_def_as_intrinsic(intr->src[0].ssa);
|
||||
|
||||
/* Find the variable if it exists. */
|
||||
|
|
|
|||
|
|
@ -227,7 +227,6 @@ validate_def(nir_def *def, validate_state *state)
|
|||
validate_assert(state, !BITSET_TEST(state->ssa_defs_found, def->index));
|
||||
BITSET_SET(state->ssa_defs_found, def->index);
|
||||
|
||||
validate_assert(state, def->parent_instr == state->instr);
|
||||
validate_num_components(state, def->num_components);
|
||||
|
||||
list_validate(&def->uses);
|
||||
|
|
@ -353,7 +352,7 @@ validate_deref_instr(nir_deref_instr *instr, validate_state *state)
|
|||
validate_sized_src(&instr->parent, state, instr->def.bit_size,
|
||||
instr->def.num_components);
|
||||
|
||||
nir_instr *parent_instr = instr->parent.ssa->parent_instr;
|
||||
nir_instr *parent_instr = nir_def_instr(instr->parent.ssa);
|
||||
|
||||
/* The parent must come from another deref instruction */
|
||||
validate_assert(state, parent_instr->type == nir_instr_type_deref);
|
||||
|
|
@ -483,7 +482,7 @@ validate_register_handle(nir_src handle_src,
|
|||
validate_state *state)
|
||||
{
|
||||
nir_def *handle = handle_src.ssa;
|
||||
nir_instr *parent = handle->parent_instr;
|
||||
nir_instr *parent = nir_def_instr(handle);
|
||||
|
||||
if (!validate_assert(state, parent->type == nir_instr_type_intrinsic))
|
||||
return;
|
||||
|
|
@ -892,7 +891,7 @@ validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state)
|
|||
validate_assert(state,
|
||||
(nir_src_is_const(*offset_src) &&
|
||||
nir_src_as_uint(*offset_src) == 0) ||
|
||||
offset_src->ssa->parent_instr->type == nir_instr_type_phi);
|
||||
nir_def_is_phi(offset_src->ssa));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ static bool
|
|||
nir_instr_worklist_add_srcs_cb(nir_src *src, void *state)
|
||||
{
|
||||
nir_instr_worklist *wl = state;
|
||||
nir_instr_worklist_push_tail(wl, src->ssa->parent_instr);
|
||||
nir_instr_worklist_push_tail(wl, nir_def_instr(src->ssa));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -73,13 +73,13 @@ TEST_F(nir_core_test, nir_instr_free_and_dce_test)
|
|||
nir_def *add01 = nir_iadd(b, zero, one);
|
||||
nir_def *add11 = nir_iadd(b, one, one);
|
||||
|
||||
nir_cursor c = nir_instr_free_and_dce(add01->parent_instr);
|
||||
nir_cursor c = nir_instr_free_and_dce(nir_def_instr(add01));
|
||||
ASSERT_FALSE(shader_contains_def(add01));
|
||||
ASSERT_TRUE(shader_contains_def(add11));
|
||||
ASSERT_FALSE(shader_contains_def(zero));
|
||||
ASSERT_TRUE(shader_contains_def(one));
|
||||
|
||||
ASSERT_TRUE(nir_cursors_equal(c, nir_before_instr(add11->parent_instr)));
|
||||
ASSERT_TRUE(nir_cursors_equal(c, nir_before_def(add11)));
|
||||
|
||||
nir_validate_shader(b->shader, "after remove_and_dce");
|
||||
}
|
||||
|
|
@ -89,11 +89,11 @@ TEST_F(nir_core_test, nir_instr_free_and_dce_all_test)
|
|||
nir_def *one = nir_imm_int(b, 1);
|
||||
nir_def *add = nir_iadd(b, one, one);
|
||||
|
||||
nir_cursor c = nir_instr_free_and_dce(add->parent_instr);
|
||||
nir_cursor c = nir_instr_free_and_dce(nir_def_instr(add));
|
||||
ASSERT_FALSE(shader_contains_def(add));
|
||||
ASSERT_FALSE(shader_contains_def(one));
|
||||
|
||||
ASSERT_TRUE(nir_cursors_equal(c, nir_before_block(nir_start_block(b->impl))));
|
||||
ASSERT_TRUE(nir_cursors_equal(c, nir_before_impl(b->impl)));
|
||||
|
||||
nir_validate_shader(b->shader, "after remove_and_dce");
|
||||
}
|
||||
|
|
@ -107,12 +107,12 @@ TEST_F(nir_core_test, nir_instr_free_and_dce_multiple_src_test)
|
|||
* nir_instr_remove for instructions with srcs. */
|
||||
nir_def *add2 = nir_iadd(b, add, add);
|
||||
|
||||
nir_cursor c = nir_instr_free_and_dce(add2->parent_instr);
|
||||
nir_cursor c = nir_instr_free_and_dce(nir_def_instr(add2));
|
||||
ASSERT_FALSE(shader_contains_def(add2));
|
||||
ASSERT_FALSE(shader_contains_def(add));
|
||||
ASSERT_FALSE(shader_contains_def(one));
|
||||
|
||||
ASSERT_TRUE(nir_cursors_equal(c, nir_before_block(nir_start_block(b->impl))));
|
||||
ASSERT_TRUE(nir_cursors_equal(c, nir_before_impl(b->impl)));
|
||||
|
||||
nir_validate_shader(b->shader, "after remove_and_dce");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@
|
|||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nir.h"
|
||||
#include "nir_test.h"
|
||||
|
||||
/* This is a macro so you get good line numbers */
|
||||
|
|
@ -66,8 +67,8 @@ protected:
|
|||
void create_shared_store(nir_deref_instr *deref, uint32_t id,
|
||||
unsigned bit_size=32, unsigned components=1, unsigned wrmask=0xf);
|
||||
|
||||
bool test_alu(nir_instr *instr, nir_op op);
|
||||
bool test_alu_def(nir_instr *instr, unsigned index, nir_def *def, unsigned swizzle=0);
|
||||
bool test_alu(nir_def *def, nir_op op);
|
||||
bool test_alu_def(nir_def *def1, unsigned index, nir_def *def2, unsigned swizzle=0);
|
||||
|
||||
static bool mem_vectorize_callback(unsigned align_mul, unsigned align_offset,
|
||||
unsigned bit_size,
|
||||
|
|
@ -321,22 +322,23 @@ void nir_load_store_vectorize_test::create_shared_store(
|
|||
nir_store_deref(b, deref, value, wrmask & ((1 << components) - 1));
|
||||
}
|
||||
|
||||
bool nir_load_store_vectorize_test::test_alu(nir_instr *instr, nir_op op)
|
||||
bool nir_load_store_vectorize_test::test_alu(nir_def *def, nir_op op)
|
||||
{
|
||||
return instr->type == nir_instr_type_alu && nir_instr_as_alu(instr)->op == op;
|
||||
return nir_def_instr(def)->type == nir_instr_type_alu &&
|
||||
nir_instr_as_alu(nir_def_instr(def))->op == op;
|
||||
}
|
||||
|
||||
bool nir_load_store_vectorize_test::test_alu_def(
|
||||
nir_instr *instr, unsigned index, nir_def *def, unsigned swizzle)
|
||||
nir_def *def1, unsigned index, nir_def *def2, unsigned swizzle)
|
||||
{
|
||||
if (instr->type != nir_instr_type_alu)
|
||||
if (nir_def_instr(def1)->type != nir_instr_type_alu)
|
||||
return false;
|
||||
|
||||
nir_alu_instr *alu = nir_instr_as_alu(instr);
|
||||
nir_alu_instr *alu = nir_instr_as_alu(nir_def_instr(def1));
|
||||
|
||||
if (index >= nir_op_infos[alu->op].num_inputs)
|
||||
return false;
|
||||
if (alu->src[index].src.ssa != def)
|
||||
if (alu->src[index].src.ssa != def2)
|
||||
return false;
|
||||
if (alu->src[index].swizzle[0] != swizzle)
|
||||
return false;
|
||||
|
|
@ -615,9 +617,9 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_indirect_neg_stride)
|
|||
EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
|
||||
|
||||
/* nir_opt_algebraic optimizes the imul */
|
||||
ASSERT_TRUE(test_alu(load->src[1].ssa->parent_instr, nir_op_ineg));
|
||||
ASSERT_TRUE(test_alu(load->src[1].ssa, nir_op_ineg));
|
||||
nir_def *offset = nir_def_as_alu(load->src[1].ssa)->src[0].src.ssa;
|
||||
ASSERT_TRUE(test_alu(offset->parent_instr, nir_op_ishl));
|
||||
ASSERT_TRUE(test_alu(offset, nir_op_ishl));
|
||||
nir_alu_instr *shl = nir_def_as_alu(offset);
|
||||
ASSERT_EQ(shl->src[0].src.ssa, inv_plus_one);
|
||||
ASSERT_EQ(nir_src_as_uint(shl->src[1].src), 2);
|
||||
|
|
@ -957,15 +959,15 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_8_8_16)
|
|||
nir_def *val = loads[0x3]->src.ssa;
|
||||
ASSERT_EQ(val->bit_size, 16);
|
||||
ASSERT_EQ(val->num_components, 1);
|
||||
ASSERT_TRUE(test_alu(val->parent_instr, nir_op_ior));
|
||||
ASSERT_TRUE(test_alu(val, nir_op_ior));
|
||||
nir_def *low = nir_def_as_alu(val)->src[0].src.ssa;
|
||||
nir_def *high = nir_def_as_alu(val)->src[1].src.ssa;
|
||||
ASSERT_TRUE(test_alu(high->parent_instr, nir_op_ishl));
|
||||
ASSERT_TRUE(test_alu(high, nir_op_ishl));
|
||||
high = nir_def_as_alu(high)->src[0].src.ssa;
|
||||
ASSERT_TRUE(test_alu(low->parent_instr, nir_op_u2u16));
|
||||
ASSERT_TRUE(test_alu(high->parent_instr, nir_op_u2u16));
|
||||
ASSERT_TRUE(test_alu_def(low->parent_instr, 0, &load->def, 2));
|
||||
ASSERT_TRUE(test_alu_def(high->parent_instr, 0, &load->def, 3));
|
||||
ASSERT_TRUE(test_alu(low, nir_op_u2u16));
|
||||
ASSERT_TRUE(test_alu(high, nir_op_u2u16));
|
||||
ASSERT_TRUE(test_alu_def(low, 0, &load->def, 2));
|
||||
ASSERT_TRUE(test_alu_def(high, 0, &load->def, 3));
|
||||
}
|
||||
|
||||
TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_32_32_64)
|
||||
|
|
@ -989,7 +991,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_32_32_64)
|
|||
nir_def *val = loads[0x2]->src.ssa;
|
||||
ASSERT_EQ(val->bit_size, 64);
|
||||
ASSERT_EQ(val->num_components, 1);
|
||||
ASSERT_TRUE(test_alu(val->parent_instr, nir_op_pack_64_2x32));
|
||||
ASSERT_TRUE(test_alu(val, nir_op_pack_64_2x32));
|
||||
nir_alu_instr *pack = nir_def_as_alu(val);
|
||||
EXPECT_INSTR_SWIZZLES(pack, load, "zw");
|
||||
}
|
||||
|
|
@ -1016,14 +1018,14 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_adjacent_32_32_64_64)
|
|||
nir_def *val = loads[0x2]->src.ssa;
|
||||
ASSERT_EQ(val->bit_size, 64);
|
||||
ASSERT_EQ(val->num_components, 1);
|
||||
ASSERT_TRUE(test_alu(val->parent_instr, nir_op_mov));
|
||||
ASSERT_TRUE(test_alu(val, nir_op_mov));
|
||||
nir_alu_instr *mov = nir_def_as_alu(val);
|
||||
EXPECT_INSTR_SWIZZLES(mov, load, "y");
|
||||
|
||||
val = loads[0x1]->src.ssa;
|
||||
ASSERT_EQ(val->bit_size, 32);
|
||||
ASSERT_EQ(val->num_components, 2);
|
||||
ASSERT_TRUE(test_alu(val->parent_instr, nir_op_unpack_64_2x32));
|
||||
ASSERT_TRUE(test_alu(val, nir_op_unpack_64_2x32));
|
||||
nir_alu_instr *unpack = nir_def_as_alu(val);
|
||||
EXPECT_INSTR_SWIZZLES(unpack, load, "x");
|
||||
}
|
||||
|
|
@ -1049,7 +1051,7 @@ TEST_F(nir_load_store_vectorize_test, ssbo_load_intersecting_32_32_64)
|
|||
nir_def *val = loads[0x2]->src.ssa;
|
||||
ASSERT_EQ(val->bit_size, 64);
|
||||
ASSERT_EQ(val->num_components, 1);
|
||||
ASSERT_TRUE(test_alu(val->parent_instr, nir_op_pack_64_2x32));
|
||||
ASSERT_TRUE(test_alu(val, nir_op_pack_64_2x32));
|
||||
nir_alu_instr *pack = nir_def_as_alu(val);
|
||||
EXPECT_INSTR_SWIZZLES(pack, load, "yz");
|
||||
}
|
||||
|
|
@ -1470,10 +1472,10 @@ TEST_F(nir_load_store_vectorize_test, shared_load_bool)
|
|||
ASSERT_EQ(deref->var, var);
|
||||
|
||||
/* The loaded value is converted to Boolean by (loaded != 0). */
|
||||
ASSERT_TRUE(test_alu(loads[0x1]->src.ssa->parent_instr, nir_op_ine));
|
||||
ASSERT_TRUE(test_alu(loads[0x2]->src.ssa->parent_instr, nir_op_ine));
|
||||
ASSERT_TRUE(test_alu_def(loads[0x1]->src.ssa->parent_instr, 0, &load->def, 0));
|
||||
ASSERT_TRUE(test_alu_def(loads[0x2]->src.ssa->parent_instr, 0, &load->def, 1));
|
||||
ASSERT_TRUE(test_alu(loads[0x1]->src.ssa, nir_op_ine));
|
||||
ASSERT_TRUE(test_alu(loads[0x2]->src.ssa, nir_op_ine));
|
||||
ASSERT_TRUE(test_alu_def(loads[0x1]->src.ssa, 0, &load->def, 0));
|
||||
ASSERT_TRUE(test_alu_def(loads[0x2]->src.ssa, 0, &load->def, 1));
|
||||
}
|
||||
|
||||
TEST_F(nir_load_store_vectorize_test, shared_load_bool_mixed)
|
||||
|
|
@ -1510,8 +1512,8 @@ TEST_F(nir_load_store_vectorize_test, shared_load_bool_mixed)
|
|||
ASSERT_EQ(deref->var, var);
|
||||
|
||||
/* The loaded value is converted to Boolean by (loaded != 0). */
|
||||
ASSERT_TRUE(test_alu(loads[0x1]->src.ssa->parent_instr, nir_op_ine));
|
||||
ASSERT_TRUE(test_alu_def(loads[0x1]->src.ssa->parent_instr, 0, &load->def, 0));
|
||||
ASSERT_TRUE(test_alu(loads[0x1]->src.ssa, nir_op_ine));
|
||||
ASSERT_TRUE(test_alu_def(loads[0x1]->src.ssa, 0, &load->def, 0));
|
||||
|
||||
EXPECT_INSTR_SWIZZLES(movs[0x2], load, "y");
|
||||
}
|
||||
|
|
@ -2363,8 +2365,8 @@ TEST_F(nir_load_store_vectorize_test, ssbo_shifted_different_bit_size_adjacent)
|
|||
ASSERT_EQ(nir_def_components_read(&load->def), 0xf);
|
||||
ASSERT_EQ(nir_src_as_uint(load->src[1]), 0);
|
||||
EXPECT_INSTR_SWIZZLES(movs[0x1], load, "xy");
|
||||
nir_instr *mov2_src = movs[0x2]->src[0].src.ssa->parent_instr;
|
||||
nir_def *mov2_src = movs[0x2]->src[0].src.ssa;
|
||||
ASSERT_TRUE(test_alu(mov2_src, nir_op_pack_32_2x16));
|
||||
nir_alu_instr *pack = nir_instr_as_alu(mov2_src);
|
||||
nir_alu_instr *pack = nir_def_as_alu(mov2_src);
|
||||
EXPECT_INSTR_SWIZZLES(pack, load, "zw");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -325,7 +325,7 @@ shader_contains_instr(nir_builder *b, nir_instr *i)
|
|||
static inline bool
|
||||
shader_contains_def(nir_builder *b, nir_def *def)
|
||||
{
|
||||
return shader_contains_instr(b, def->parent_instr);
|
||||
return shader_contains_instr(b, nir_def_instr(def));
|
||||
}
|
||||
|
||||
static inline bool
|
||||
|
|
|
|||
|
|
@ -287,7 +287,7 @@ TEST_F(unsigned_upper_bound_test, loop_phi_bcsel)
|
|||
|
||||
nir_phi_instr_add_src(phi, nir_def_block(zero), zero);
|
||||
nir_phi_instr_add_src(phi, nir_def_block(sel), sel);
|
||||
b->cursor = nir_before_instr(sel->parent_instr);
|
||||
b->cursor = nir_before_instr(nir_def_instr(sel));
|
||||
nir_builder_instr_insert(b, &phi->instr);
|
||||
|
||||
nir_validate_shader(b->shader, NULL);
|
||||
|
|
|
|||
|
|
@ -1085,7 +1085,7 @@ TEST_F(nir_copy_prop_vars_test, load_direct_array_deref_on_vector_reuses_previou
|
|||
nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 2);
|
||||
|
||||
/* NOTE: The ALU instruction is how we get the vec.y. */
|
||||
ASSERT_TRUE(nir_src_as_alu_instr(store->src[1]));
|
||||
ASSERT_TRUE(nir_src_as_alu(store->src[1]));
|
||||
}
|
||||
|
||||
TEST_F(nir_copy_prop_vars_test, load_direct_array_deref_on_vector_reuses_previous_copy)
|
||||
|
|
@ -1142,7 +1142,7 @@ TEST_F(nir_copy_prop_vars_test, load_direct_array_deref_on_vector_gets_reused)
|
|||
ASSERT_EQ(count_intrinsics(nir_intrinsic_store_deref), 2);
|
||||
|
||||
nir_intrinsic_instr *store = get_intrinsic(nir_intrinsic_store_deref, 1);
|
||||
ASSERT_TRUE(nir_src_as_alu_instr(store->src[1]));
|
||||
ASSERT_TRUE(nir_src_as_alu(store->src[1]));
|
||||
}
|
||||
|
||||
TEST_F(nir_copy_prop_vars_test, store_load_direct_array_deref_on_vector)
|
||||
|
|
@ -1185,7 +1185,7 @@ TEST_F(nir_copy_prop_vars_test, store_load_direct_array_deref_on_vector)
|
|||
|
||||
/* Fourth store will compose first and second store values. */
|
||||
nir_intrinsic_instr *fourth_store = get_intrinsic(nir_intrinsic_store_deref, 3);
|
||||
EXPECT_TRUE(nir_src_as_alu_instr(fourth_store->src[1]));
|
||||
EXPECT_TRUE(nir_src_as_alu(fourth_store->src[1]));
|
||||
}
|
||||
|
||||
TEST_F(nir_copy_prop_vars_test, store_load_indirect_array_deref_on_vector)
|
||||
|
|
@ -1807,7 +1807,7 @@ TEST_F(nir_combine_stores_test, non_overlapping_stores)
|
|||
ASSERT_EQ(nir_intrinsic_write_mask(combined), 0xf);
|
||||
ASSERT_EQ(nir_intrinsic_get_var(combined, 0), out);
|
||||
|
||||
nir_alu_instr *vec = nir_src_as_alu_instr(combined->src[1]);
|
||||
nir_alu_instr *vec = nir_src_as_alu(combined->src[1]);
|
||||
ASSERT_TRUE(vec);
|
||||
for (int i = 0; i < 4; i++) {
|
||||
nir_intrinsic_instr *load = nir_src_as_intrinsic(vec->src[i].src);
|
||||
|
|
@ -1846,7 +1846,7 @@ TEST_F(nir_combine_stores_test, overlapping_stores)
|
|||
ASSERT_EQ(nir_intrinsic_write_mask(combined), 0xf);
|
||||
ASSERT_EQ(nir_intrinsic_get_var(combined, 0), out);
|
||||
|
||||
nir_alu_instr *vec = nir_src_as_alu_instr(combined->src[1]);
|
||||
nir_alu_instr *vec = nir_src_as_alu(combined->src[1]);
|
||||
ASSERT_TRUE(vec);
|
||||
|
||||
/* Component x comes from v[0]. */
|
||||
|
|
@ -1912,7 +1912,7 @@ TEST_F(nir_combine_stores_test, direct_array_derefs)
|
|||
ASSERT_EQ(nir_intrinsic_write_mask(combined), 0xf);
|
||||
ASSERT_EQ(nir_intrinsic_get_var(combined, 0), out);
|
||||
|
||||
nir_alu_instr *vec = nir_src_as_alu_instr(combined->src[1]);
|
||||
nir_alu_instr *vec = nir_src_as_alu(combined->src[1]);
|
||||
ASSERT_TRUE(vec);
|
||||
|
||||
/* Component x comes from v[0]. */
|
||||
|
|
|
|||
|
|
@ -106,7 +106,7 @@ impl ALUType {
|
|||
|
||||
impl nir_def {
|
||||
pub fn parent_instr(&self) -> &nir_instr {
|
||||
unsafe { self.parent_instr.as_ref() }.unwrap()
|
||||
unsafe { &*nir_def_instr_noninline(self as *const _) }
|
||||
}
|
||||
|
||||
pub fn components_read(&self) -> nir_component_mask_t {
|
||||
|
|
|
|||
|
|
@ -116,5 +116,5 @@ TEST_F(NonSemantic, printf)
|
|||
|
||||
nir_intrinsic_instr *intrinsic = find_intrinsic(nir_intrinsic_printf, 0);
|
||||
ASSERT_NE(intrinsic, nullptr);
|
||||
ASSERT_TRUE(intrinsic->src[0].ssa->parent_instr->type == nir_instr_type_deref);
|
||||
ASSERT_TRUE(nir_def_instr(intrinsic->src[0].ssa)->type == nir_instr_type_deref);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -894,7 +894,7 @@ vtn_add_printf_string(struct vtn_builder *b, uint32_t id, u_printf_info *info)
|
|||
|
||||
while (deref->deref_type != nir_deref_type_var) {
|
||||
nir_scalar parent = nir_scalar_resolved(deref->parent.ssa, 0);
|
||||
if (parent.def->parent_instr->type != nir_instr_type_deref) {
|
||||
if (!nir_def_is_deref(parent.def)) {
|
||||
deref = NULL;
|
||||
break;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -598,7 +598,7 @@ get_deref_tail(nir_deref_instr *deref)
|
|||
nir_def_as_deref(deref->parent.ssa);
|
||||
|
||||
if (parent->deref_type == nir_deref_type_cast &&
|
||||
parent->parent.ssa->parent_instr->type == nir_instr_type_deref) {
|
||||
nir_def_is_deref(parent->parent.ssa)) {
|
||||
nir_deref_instr *grandparent =
|
||||
nir_def_as_deref(parent->parent.ssa);
|
||||
|
||||
|
|
|
|||
|
|
@ -4190,7 +4190,7 @@ read_phi_src(struct ir3_context *ctx, struct ir3_block *blk,
|
|||
|
||||
nir_foreach_phi_src (nsrc, nphi) {
|
||||
if (blk->nblock == nsrc->pred) {
|
||||
if (nsrc->src.ssa->parent_instr->type == nir_instr_type_undef) {
|
||||
if (nir_src_is_undef(nsrc->src)) {
|
||||
/* Create an ir3 undef */
|
||||
return NULL;
|
||||
} else {
|
||||
|
|
@ -4397,7 +4397,7 @@ get_branch_condition(struct ir3_context *ctx, nir_src *src, unsigned comp,
|
|||
{
|
||||
struct ir3_instruction *condition = ir3_get_src(ctx, src)[comp];
|
||||
|
||||
if (src->ssa->parent_instr->type == nir_instr_type_alu) {
|
||||
if (nir_def_is_alu(src->ssa)) {
|
||||
nir_alu_instr *nir_cond = nir_def_as_alu(src->ssa);
|
||||
|
||||
if (nir_cond->op == nir_op_inot) {
|
||||
|
|
@ -4420,7 +4420,7 @@ fold_conditional_branch(struct ir3_context *ctx, struct nir_src *nir_cond)
|
|||
if (!ctx->compiler->has_branch_and_or)
|
||||
return NULL;
|
||||
|
||||
if (nir_cond->ssa->parent_instr->type != nir_instr_type_alu)
|
||||
if (!nir_def_is_alu(nir_cond->ssa))
|
||||
return NULL;
|
||||
|
||||
nir_alu_instr *alu_cond = nir_def_as_alu(nir_cond->ssa);
|
||||
|
|
|
|||
|
|
@ -153,7 +153,7 @@ bool ir3_get_driver_param_info(const nir_shader *shader,
|
|||
static inline nir_intrinsic_instr *
|
||||
ir3_bindless_resource(nir_src src)
|
||||
{
|
||||
if (src.ssa->parent_instr->type != nir_instr_type_intrinsic)
|
||||
if (!nir_src_is_intrinsic(src))
|
||||
return NULL;
|
||||
|
||||
nir_intrinsic_instr *intrin = nir_def_as_intrinsic(src.ssa);
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue