treewide: use nir_def_block

Via Coccinelle patch:

    @@
    expression definition;
    @@

    -definition->parent_instr->block
    +nir_def_block(definition)

Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Reviewed-by: Emma Anholt <emma@anholt.net>
Reviewed-by: Marek Olšák <maraeo@gmail.com>
Acked-by: Karol Herbst <kherbst@redhat.com>
Acked-by: Konstantin Seurer <konstantin.seurer@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/36489>
This commit is contained in:
Alyssa Rosenzweig 2025-07-31 09:37:16 -04:00 committed by Marge Bot
parent 82ae8b1d33
commit bcf1a1c20b
30 changed files with 62 additions and 65 deletions

View file

@ -2984,7 +2984,7 @@ optimize_bounds(nir_builder *b, nir_intrinsic_instr *intr, void *data)
return false;
/* To avoid dominance problems, we must sink loads. */
if (this.def->parent_instr->block != intr->instr.block) {
if (nir_def_block(this.def) != intr->instr.block) {
return false;
}
}

View file

@ -2203,7 +2203,7 @@ nir_function_impl_lower_instructions(nir_function_impl *impl,
if (new_def && new_def != NIR_LOWER_INSTR_PROGRESS &&
new_def != NIR_LOWER_INSTR_PROGRESS_REPLACE) {
assert(old_def != NULL);
if (new_def->parent_instr->block != instr->block)
if (nir_def_block(new_def) != instr->block)
preserved = nir_metadata_none;
list_for_each_entry_safe(nir_src, use_src, &old_uses, use_link)

View file

@ -74,7 +74,7 @@ nir_src_is_divergent(nir_src *src)
return true;
nir_cf_node *use_node = nir_src_get_block(src)->cf_node.parent;
nir_cf_node *def_node = src->ssa->parent_instr->block->cf_node.parent;
nir_cf_node *def_node = nir_def_block(src->ssa)->cf_node.parent;
/* Short-cut the common case. */
if (def_node == use_node)
@ -121,14 +121,14 @@ src_invariant(nir_src *src, void *loop)
nir_block *first_block = nir_loop_first_block(loop);
/* Invariant if SSA is defined before the current loop. */
if (src->ssa->parent_instr->block->index < first_block->index)
if (nir_def_block(src->ssa)->index < first_block->index)
return true;
if (!src->ssa->loop_invariant)
return false;
/* The value might be defined in a nested loop. */
nir_cf_node *cf_node = src->ssa->parent_instr->block->cf_node.parent;
nir_cf_node *cf_node = nir_def_block(src->ssa)->cf_node.parent;
while (cf_node->type != nir_cf_node_loop)
cf_node = cf_node->parent;

View file

@ -68,12 +68,11 @@ def_after(nir_def *a, nir_def *b)
/* If they're in the same block, we can rely on whichever instruction
* comes first in the block.
*/
if (a->parent_instr->block == b->parent_instr->block)
if (nir_def_block(a) == nir_def_block(b))
return a->parent_instr->index > b->parent_instr->index;
/* Otherwise, if blocks are distinct, we sort them in DFS pre-order */
return a->parent_instr->block->dom_pre_index >
b->parent_instr->block->dom_pre_index;
return nir_def_block(a)->dom_pre_index > nir_def_block(b)->dom_pre_index;
}
/* Returns true if a dominates b */
@ -86,11 +85,10 @@ ssa_def_dominates(nir_def *a, nir_def *b)
}
if (def_after(a, b)) {
return false;
} else if (a->parent_instr->block == b->parent_instr->block) {
} else if (nir_def_block(a) == nir_def_block(b)) {
return def_after(b, a);
} else {
return nir_block_dominates(a->parent_instr->block,
b->parent_instr->block);
return nir_block_dominates(nir_def_block(a), nir_def_block(b));
}
}
@ -593,7 +591,7 @@ def_replace_with_reg(nir_def *def, nir_function_impl *impl)
nir_rewrite_uses_to_load_reg(&b, def, reg);
if (def->parent_instr->type == nir_instr_type_phi)
b.cursor = nir_before_block_after_phis(def->parent_instr->block);
b.cursor = nir_before_block_after_phis(nir_def_block(def));
else
b.cursor = nir_after_instr(def->parent_instr);
@ -1177,7 +1175,7 @@ nir_lower_phis_to_regs_block(nir_block *block)
nir_foreach_phi_src(src, phi) {
_mesa_set_add(visited_blocks, src->src.ssa->parent_instr->block);
_mesa_set_add(visited_blocks, nir_def_block(src->src.ssa));
place_phi_read(&b, reg, src->src.ssa, src->pred, visited_blocks);
_mesa_set_clear(visited_blocks, NULL);
}
@ -1208,7 +1206,7 @@ def_replace_with_reg_state(nir_def *def, void *void_state)
static bool
ssa_def_is_local_to_block(nir_def *def, UNUSED void *state)
{
nir_block *block = def->parent_instr->block;
nir_block *block = nir_def_block(def);
nir_foreach_use_including_if(use_src, def) {
if (nir_src_is_if(use_src) ||
nir_src_parent_instr(use_src)->block != block ||

View file

@ -290,7 +290,7 @@ nir_def_is_live_at(nir_def *def, nir_instr *instr)
return true;
} else {
if (BITSET_TEST(instr->block->live_in, def->index) ||
def->parent_instr->block == instr->block) {
nir_def_block(def) == instr->block) {
/* In this case it is either live coming into instr's block or it
* is defined in the same block. In this case, we simply need to
* see if it is used after instr.

View file

@ -260,7 +260,7 @@ compute_induction_information(loop_info_state *state)
/* If one of the sources is in an if branch or nested loop then don't
* attempt to go any further.
*/
if (src->parent_instr->block->cf_node.parent != &state->loop->cf_node)
if (nir_def_block(src)->cf_node.parent != &state->loop->cf_node)
break;
/* Detect inductions variables that are incremented in both branches

View file

@ -1744,7 +1744,7 @@ nir_block_loop_depth(nir_block *block)
static nir_block *
find_last_dominant_use_block(nir_function_impl *impl, nir_def *value)
{
nir_block *old_block = value->parent_instr->block;
nir_block *old_block = nir_def_block(value);
unsigned old_block_loop_depth = nir_block_loop_depth(old_block);
nir_foreach_block_reverse_safe(block, impl) {

View file

@ -50,10 +50,10 @@ ssa_def_dominates_instr(nir_def *def, nir_instr *instr)
{
if (instr->index <= def->parent_instr->index) {
return false;
} else if (def->parent_instr->block == instr->block) {
} else if (nir_def_block(def) == instr->block) {
return def->parent_instr->index < instr->index;
} else {
return nir_block_dominates(def->parent_instr->block, instr->block);
return nir_block_dominates(nir_def_block(def), instr->block);
}
}

View file

@ -48,7 +48,7 @@ remat_ssa_def(nir_builder *b, nir_def *def, struct hash_table *remap_table,
struct nir_phi_builder *phi_builder, BITSET_WORD *def_blocks)
{
memset(def_blocks, 0, BITSET_WORDS(b->impl->num_blocks) * sizeof(BITSET_WORD));
BITSET_SET(def_blocks, def->parent_instr->block->index);
BITSET_SET(def_blocks, nir_def_block(def)->index);
BITSET_SET(def_blocks, nir_cursor_current_block(b->cursor)->index);
struct nir_phi_builder_value *val =
nir_phi_builder_add_value(phi_builder, def->num_components,
@ -61,9 +61,8 @@ remat_ssa_def(nir_builder *b, nir_def *def, struct hash_table *remap_table,
nir_def *new_def = nir_instr_def(clone);
_mesa_hash_table_insert(remap_table, def, new_def);
if (nir_cursor_current_block(b->cursor)->index !=
def->parent_instr->block->index)
nir_phi_builder_value_set_block_def(val, def->parent_instr->block, def);
if (nir_cursor_current_block(b->cursor)->index != nir_def_block(def)->index)
nir_phi_builder_value_set_block_def(val, nir_def_block(def), def);
nir_phi_builder_value_set_block_def(val, nir_cursor_current_block(b->cursor),
new_def);
}
@ -135,7 +134,7 @@ rewrite_instr_src_from_phi_builder(nir_src *src, void *data)
nir_def *new_def = nir_phi_builder_value_get_block_def(entry->data, block);
bool can_rewrite = true;
if (new_def->parent_instr->block == block && new_def->index != UINT32_MAX)
if (nir_def_block(new_def) == block && new_def->index != UINT32_MAX)
can_rewrite =
!nir_instr_is_before(nir_src_parent_instr(src), new_def->parent_instr);

View file

@ -145,7 +145,7 @@ def_only_used_in_cf_node(nir_def *def, void *_node)
* corresponding predecessor is inside the loop or not because the value
* can go through the phi into the outside world and escape the loop.
*/
if (block != def->parent_instr->block && !block_in_cf_node(block, node))
if (block != nir_def_block(def) && !block_in_cf_node(block, node))
return false;
}

View file

@ -668,9 +668,9 @@ gcm_schedule_late_def(nir_def *def, void *void_state)
}
if (def->parent_instr->pass_flags & GCM_INSTR_SCHEDULE_EARLIER_ONLY &&
lca != def->parent_instr->block &&
nir_block_dominates(def->parent_instr->block, lca)) {
lca = def->parent_instr->block;
lca != nir_def_block(def) &&
nir_block_dominates(nir_def_block(def), lca)) {
lca = nir_def_block(def);
}
/* We now have the LCA of all of the uses. If our invariants hold,
@ -681,7 +681,7 @@ gcm_schedule_late_def(nir_def *def, void *void_state)
nir_block *best_block =
gcm_choose_block_for_instr(def->parent_instr, early_block, lca, state);
if (def->parent_instr->block != best_block)
if (nir_def_block(def) != best_block)
state->progress = true;
def->parent_instr->block = best_block;

View file

@ -166,9 +166,9 @@ has_only_sources_less_than(nir_src *src, void *data)
struct check_sources_state *state = (struct check_sources_state *)data;
/* true if nir_foreach_src should keep going */
return state->block != src->ssa->parent_instr->block ||
state->infos[src->ssa->parent_instr->index].instr_index <
state->first_instr_index;
return state->block != nir_def_block(src->ssa) ||
state->infos[src->ssa->parent_instr->index].instr_index <
state->first_instr_index;
}
static void

View file

@ -163,7 +163,7 @@ opt_peel_loop_initial_if(nir_loop *loop)
return false;
nir_phi_instr *cond_phi = nir_def_as_phi(cond);
if (cond->parent_instr->block != header_block)
if (nir_def_block(cond) != header_block)
return false;
bool entry_val = false, continue_val = false;
@ -285,7 +285,7 @@ is_trivial_bcsel(const nir_instr *instr, bool allow_non_phi_src)
for (unsigned i = 0; i < 3; i++) {
if (!nir_alu_src_is_trivial_ssa(bcsel, i) ||
bcsel->src[i].src.ssa->parent_instr->block != instr->block)
nir_def_block(bcsel->src[i].src.ssa) != instr->block)
return false;
if (bcsel->src[i].src.ssa->parent_instr->type != nir_instr_type_phi) {

View file

@ -71,13 +71,13 @@ try_opt_bcsel_of_shuffle(nir_builder *b, nir_alu_instr *alu,
nir_def *data1, *index1;
if (!nir_alu_src_is_trivial_ssa(alu, 1) ||
alu->src[1].src.ssa->parent_instr->block != alu->instr.block ||
nir_def_block(alu->src[1].src.ssa) != alu->instr.block ||
!src_is_single_use_shuffle(alu->src[1].src, &data1, &index1))
return NULL;
nir_def *data2, *index2;
if (!nir_alu_src_is_trivial_ssa(alu, 2) ||
alu->src[2].src.ssa->parent_instr->block != alu->instr.block ||
nir_def_block(alu->src[2].src.ssa) != alu->instr.block ||
!src_is_single_use_shuffle(alu->src[2].src, &data2, &index2))
return NULL;

View file

@ -9,7 +9,7 @@ static bool
defined_before_loop(nir_src *src, void *state)
{
unsigned *loop_preheader_idx = state;
return src->ssa->parent_instr->block->index <= *loop_preheader_idx;
return nir_def_block(src->ssa)->index <= *loop_preheader_idx;
}
static bool

View file

@ -315,7 +315,7 @@ can_constant_fold(nir_scalar scalar, nir_block *loop_header)
if (scalar.def->parent_instr->type == nir_instr_type_phi) {
/* If this is a phi from anything but the loop header, we cannot constant-fold. */
if (scalar.def->parent_instr->block != loop_header)
if (nir_def_block(scalar.def) != loop_header)
return false;
nir_block *preheader = nir_block_cf_tree_prev(loop_header);
@ -468,7 +468,7 @@ insert_phis_after_terminator_merge(nir_def *def, void *state)
}
if (nir_src_is_if(src) ||
(!nir_src_is_if(src) && nir_src_parent_instr(src)->block != def->parent_instr->block)) {
(!nir_src_is_if(src) && nir_src_parent_instr(src)->block != nir_def_block(def))) {
if (!phi_created) {
phi_instr = nir_phi_instr_create(m_state->shader);
nir_def_init(&phi_instr->instr, &phi_instr->def, def->num_components,

View file

@ -321,7 +321,7 @@ find_chains(nir_function_impl *impl, struct hash_table *pair_freq,
for (unsigned i = 0; i < c.length; ++i) {
lowest_rank = MIN2(rank(c.srcs[i]), lowest_rank);
highest_rank = MAX2(rank(c.srcs[i]), highest_rank);
local &= c.srcs[i].def->parent_instr->block == block;
local &= nir_def_block(c.srcs[i].def) == block;
}
for (unsigned i = 0; i < c.length; ++i) {

View file

@ -59,7 +59,7 @@ static bool
src_dominates_block(nir_src *src, void *state)
{
nir_block *block = state;
return nir_block_dominates(src->ssa->parent_instr->block, block);
return nir_block_dominates(nir_def_block(src->ssa), block);
}
static bool
@ -120,7 +120,7 @@ remove_phis_instr(nir_builder *b, nir_phi_instr *phi, void *unused)
if (def == NULL) {
def = src->src.ssa;
if (!nir_block_dominates(def->parent_instr->block, block->imm_dom)) {
if (!nir_block_dominates(nir_def_block(def), block->imm_dom)) {
if (!can_rematerialize_phi_src(block->imm_dom, def))
return false;
needs_remat = true;

View file

@ -344,9 +344,9 @@ get_preferred_block(nir_def *def, bool sink_out_of_loops)
* This might occasionally increase register pressure, but seems overall
* the better choice.
*/
lca = adjust_block_for_loops(lca, def->parent_instr->block,
lca = adjust_block_for_loops(lca, nir_def_block(def),
sink_out_of_loops);
assert(nir_block_dominates(def->parent_instr->block, lca));
assert(nir_block_dominates(nir_def_block(def), lca));
return lca;
}

View file

@ -76,7 +76,7 @@ repair_ssa_def(nir_def *def, void *void_state)
nir_block *src_block = get_src_block(src);
if (nir_block_is_unreachable(src_block) ||
!nir_block_dominates(def->parent_instr->block, src_block)) {
!nir_block_dominates(nir_def_block(def), src_block)) {
is_valid = false;
break;
}
@ -87,18 +87,18 @@ repair_ssa_def(nir_def *def, void *void_state)
struct nir_phi_builder *pb = prep_build_phi(state);
BITSET_SET(state->def_set, def->parent_instr->block->index);
BITSET_SET(state->def_set, nir_def_block(def)->index);
struct nir_phi_builder_value *val =
nir_phi_builder_add_value(pb, def->num_components, def->bit_size,
state->def_set);
nir_phi_builder_value_set_block_def(val, def->parent_instr->block, def);
nir_phi_builder_value_set_block_def(val, nir_def_block(def), def);
nir_foreach_use_including_if_safe(src, def) {
nir_block *block = get_src_block(src);
if (block == def->parent_instr->block) {
if (block == nir_def_block(def)) {
assert(nir_phi_builder_value_get_block_def(val, block) == def);
continue;
}

View file

@ -345,7 +345,7 @@ clear_def(nir_def *def, void *state)
continue;
/* Anything global has already been trivialized and can be ignored */
if (parent->block != def->parent_instr->block)
if (parent->block != nir_def_block(def))
continue;
if (def == store->src[0].ssa) {

View file

@ -1594,12 +1594,12 @@ validate_src_dominance(nir_src *src, void *_state)
{
validate_state *state = _state;
if (src->ssa->parent_instr->block == nir_src_parent_instr(src)->block) {
if (nir_def_block(src->ssa) == nir_src_parent_instr(src)->block) {
validate_assert(state, src->ssa->index < state->impl->ssa_alloc);
validate_assert(state, BITSET_TEST(state->ssa_defs_found,
src->ssa->index));
} else {
validate_assert(state, nir_block_dominates(src->ssa->parent_instr->block,
validate_assert(state, nir_block_dominates(nir_def_block(src->ssa),
nir_src_parent_instr(src)->block));
}
return true;
@ -1618,7 +1618,7 @@ validate_ssa_dominance(nir_function_impl *impl, validate_state *state)
nir_phi_instr *phi = nir_instr_as_phi(instr);
nir_foreach_phi_src(src, phi) {
validate_assert(state,
nir_block_dominates(src->src.ssa->parent_instr->block,
nir_block_dominates(nir_def_block(src->src.ssa),
src->pred));
}
} else {
@ -1629,7 +1629,7 @@ validate_ssa_dominance(nir_function_impl *impl, validate_state *state)
nir_if *nif = nir_block_get_following_if(block);
if (nif) {
validate_assert(state, nir_block_dominates(nif->condition.ssa->parent_instr->block,
validate_assert(state, nir_block_dominates(nir_def_block(nif->condition.ssa),
block));
}
}

View file

@ -96,7 +96,7 @@ loop_builder(nir_builder *b, loop_builder_param p)
nir_def_init(&phi->instr, &phi->def, ssa_0->num_components,
ssa_0->bit_size);
nir_phi_instr_add_src(phi, ssa_0->parent_instr->block, ssa_0);
nir_phi_instr_add_src(phi, nir_def_block(ssa_0), ssa_0);
nir_def *ssa_5 = &phi->def;
nir_def *ssa_3 = p.cond_instr(b, ssa_5, ssa_1);
@ -116,7 +116,7 @@ loop_builder(nir_builder *b, loop_builder_param p)
nir_def *ssa_4 = p.incr_instr(b, ssa_5, ssa_2);
nir_phi_instr_add_src(phi, ssa_4->parent_instr->block, ssa_4);
nir_phi_instr_add_src(phi, nir_def_block(ssa_4), ssa_4);
}
nir_pop_loop(b, loop);
@ -170,7 +170,7 @@ loop_builder_invert(nir_builder *b, loop_builder_invert_param p)
nir_def_init(&phi->instr, &phi->def, ssa_0->num_components,
ssa_0->bit_size);
nir_phi_instr_add_src(phi, ssa_0->parent_instr->block, ssa_0);
nir_phi_instr_add_src(phi, nir_def_block(ssa_0), ssa_0);
nir_def *ssa_5 = &phi->def;

View file

@ -370,7 +370,7 @@ ir3_def_is_rematerializable_for_preamble(nir_def *def,
preamble_defs) &&
ir3_def_is_rematerializable_for_preamble(intrin->src[1].ssa,
preamble_defs) &&
(def->parent_instr->block->cf_node.parent->type ==
(nir_def_block(def)->cf_node.parent->type ==
nir_cf_node_function ||
(nir_intrinsic_access(intrin) & ACCESS_CAN_SPECULATE));
case nir_intrinsic_bindless_resource_ir3:
@ -404,7 +404,7 @@ static bool
find_dominated_src(nir_src *src, void *data)
{
struct find_insert_block_state *state = data;
nir_block *src_block = src->ssa->parent_instr->block;
nir_block *src_block = nir_def_block(src->ssa);
if (!state->insert_block) {
state->insert_block = src_block;

View file

@ -53,7 +53,7 @@ static void register_node_ssa(gpir_block *block, gpir_node *node, nir_def *ssa)
*/
bool needs_register = false;
nir_foreach_use(use, ssa) {
if (nir_src_parent_instr(use)->block != ssa->parent_instr->block) {
if (nir_src_parent_instr(use)->block != nir_def_block(ssa)) {
needs_register = true;
break;
}
@ -62,7 +62,7 @@ static void register_node_ssa(gpir_block *block, gpir_node *node, nir_def *ssa)
if (!needs_register) {
nir_foreach_if_use(use, ssa) {
if (nir_cf_node_prev(&nir_src_parent_if(use)->cf_node) !=
&ssa->parent_instr->block->cf_node) {
&nir_def_block(ssa)->cf_node) {
needs_register = true;
break;
}

View file

@ -99,7 +99,7 @@ collect_reaching_defs(nir_alu_instr *fsat, nir_instr_worklist *sources)
* do its job. Adding another fsat will not help.
*/
if (def->parent_instr->type == nir_instr_type_alu &&
def->parent_instr->block != fsat->instr.block) {
nir_def_block(def) != fsat->instr.block) {
nir_instr_worklist_push_tail(sources, def->parent_instr);
}
}

View file

@ -244,7 +244,7 @@ anv_nir_lower_multiview(nir_shader *shader, uint32_t view_mask,
*/
nir_def *view_index = build_view_index(&state);
assert(view_index->parent_instr->block == nir_start_block(entrypoint));
assert(nir_def_block(view_index) == nir_start_block(entrypoint));
b->cursor = nir_after_instr(view_index->parent_instr);
/* Unless there is only one possible view index (that would be set

View file

@ -215,7 +215,7 @@ anv_nir_lower_multiview(nir_shader *shader, uint32_t view_mask)
*/
nir_def *view_index = build_view_index(&state);
assert(view_index->parent_instr->block == nir_start_block(entrypoint));
assert(nir_def_block(view_index) == nir_start_block(entrypoint));
b->cursor = nir_after_instr(view_index->parent_instr);
/* Unless there is only one possible view index (that would be set

View file

@ -412,7 +412,7 @@ recompute_phi_divergence_impl(nir_function_impl *impl)
* don't want to deal with inserting a r2ur somewhere.
*/
if (phi_src->pred->divergent || phi_src->src.ssa->divergent ||
phi_src->src.ssa->parent_instr->block->divergent) {
nir_def_block(phi_src->src.ssa)->divergent) {
divergent = true;
break;
}

View file

@ -112,7 +112,7 @@ add_live_handle(nir_def *handle, struct non_uniform_section *nus)
static bool
def_needs_hoist(nir_def *def, nir_block *target)
{
return def->parent_instr->block->index > target->index;
return nir_def_block(def)->index > target->index;
}
static bool