pan/bi: Assume destinations are non-NULL

We guarantee this now, no need to check it in every pass. There's an exception
on Bifrost after clause scheduling, but few passes run after clause scheduling
so this doesn't affect much.

Signed-off-by: Alyssa Rosenzweig <alyssa@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17794>
This commit is contained in:
Alyssa Rosenzweig 2022-07-21 16:10:06 -04:00 committed by Marge Bot
parent e5b3faf5aa
commit 8375717de3
7 changed files with 14 additions and 36 deletions

View file

@ -202,9 +202,6 @@ bi_helper_block_update(BITSET_WORD *deps, bi_block *block)
bi_foreach_instr_in_block_rev(block, I) {
/* If a destination is required by helper invocation... */
bi_foreach_dest(I, d) {
if (bi_is_null(I->dest[d]))
continue;
if (!BITSET_TEST(deps, bi_get_node(I->dest[d])))
continue;
@ -269,8 +266,8 @@ bi_analyze_helper_requirements(bi_context *ctx)
bool exec = false;
bi_foreach_dest(I, d) {
if (I->dest[d].type == BI_INDEX_NORMAL)
exec |= BITSET_TEST(deps, bi_get_node(I->dest[d]));
assert(I->dest[d].type == BI_INDEX_NORMAL);
exec |= BITSET_TEST(deps, bi_get_node(I->dest[d]));
}
I->skip = !exec;

View file

@ -114,13 +114,12 @@ create_dag(bi_context *ctx, bi_block *block, void *memctx)
/* Writes depend on reads and writes */
bi_foreach_dest(I, s) {
bi_index dest = I->dest[s];
assert(dest.type == BI_INDEX_NORMAL);
if (dest.type == BI_INDEX_NORMAL) {
add_dep(node, last_read[label_index(ctx, dest)]);
add_dep(node, last_write[label_index(ctx, dest)]);
add_dep(node, last_read[label_index(ctx, dest)]);
add_dep(node, last_write[label_index(ctx, dest)]);
last_write[label_index(ctx, dest)] = node;
}
last_write[label_index(ctx, dest)] = node;
}
bi_foreach_src(I, s) {
@ -233,8 +232,9 @@ calculate_pressure_delta(bi_instr *I, uint8_t *live, unsigned max)
/* Destinations must be unique */
bi_foreach_dest(I, d) {
unsigned node = bi_get_node(I->dest[d]);
assert(node < max);
if (node < max && live[node])
if (live[node])
delta -= bi_count_write_registers(I, d);
}

View file

@ -255,9 +255,7 @@ bi_mark_interference(bi_block *block, struct lcra_state *l, uint8_t *live, uint6
bi_foreach_dest(ins, d) {
unsigned node = bi_get_node(ins->dest[d]);
if (node >= node_count)
continue;
assert(node < node_count);
/* Don't allocate to anything that's read later as a
* preloaded register. The affinity is the intersection
@ -374,9 +372,9 @@ bi_allocate_registers(bi_context *ctx, bool *success, bool full_regs)
bi_foreach_instr_global(ctx, ins) {
bi_foreach_dest(ins, d) {
unsigned dest = bi_get_node(ins->dest[d]);
assert(dest < node_count);
if (dest < node_count)
l->affinity[dest] = default_affinity;
l->affinity[dest] = default_affinity;
}
/* Blend shaders expect the src colour to be in r0-r3 */
@ -545,9 +543,7 @@ bi_choose_spill_node(bi_context *ctx, struct lcra_state *l)
bi_foreach_instr_global(ctx, ins) {
bi_foreach_dest(ins, d) {
unsigned node = bi_get_node(ins->dest[d]);
if (node >= l->node_count)
continue;
assert(node < l->node_count);
/* Don't allow spilling coverage mask writes because the
* register preload logic assumes it will stay in R60.
@ -719,8 +715,6 @@ bi_lower_vector(bi_context *ctx)
assert(src.offset == 0);
bi_foreach_dest(I, i) {
assert(!bi_is_null(I->dest[i]));
src.offset = i;
bi_mov_i32_to(&b, I->dest[i], src);
@ -829,8 +823,7 @@ squeeze_index(bi_context *ctx)
bi_foreach_instr_global(ctx, I) {
bi_foreach_dest(I, d) {
if (I->dest[d].type == BI_INDEX_NORMAL)
I->dest[d].value = find_or_allocate_temp(map, I->dest[d].value, &ctx->ssa_alloc);
I->dest[d].value = find_or_allocate_temp(map, I->dest[d].value, &ctx->ssa_alloc);
}
bi_foreach_src(I, s) {

View file

@ -264,7 +264,7 @@ bi_create_dependency_graph(struct bi_worklist st, bool inorder, bool is_blend)
}
bi_foreach_dest(ins, d) {
if (ins->dest[d].type != BI_INDEX_REGISTER) continue;
assert(ins->dest[d].type == BI_INDEX_REGISTER);
unsigned dest = ins->dest[d].value;
unsigned count = bi_count_write_registers(ins, d);
@ -999,9 +999,6 @@ bi_write_count(bi_instr *instr, uint64_t live_after_temp)
if (d == 0 && bi_opcode_props[instr->op].sr_write)
continue;
if (bi_is_null(instr->dest[d]))
continue;
assert(instr->dest[0].type == BI_INDEX_REGISTER);
if (live_after_temp & BITFIELD64_BIT(instr->dest[0].value))
count++;
@ -1071,9 +1068,6 @@ bi_instr_schedulable(bi_instr *instr,
* instruction can't be scheduled */
if (bi_opcode_props[instr->op].sr_write) {
bi_foreach_dest(instr, d) {
if (bi_is_null(instr->dest[d]))
continue;
unsigned nr = bi_count_write_registers(instr, d);
assert(instr->dest[d].type == BI_INDEX_REGISTER);
unsigned reg = instr->dest[d].value;

View file

@ -117,7 +117,6 @@ bi_validate_width(bi_context *ctx)
bi_foreach_instr_global(ctx, I) {
bi_foreach_dest(I, d) {
if (bi_is_null(I->dest[d])) continue;
if (!bi_is_ssa(I->dest[d])) continue;
unsigned v = I->dest[d].value;

View file

@ -90,8 +90,6 @@ bi_write_mask(bi_instr *I)
uint64_t mask = 0;
bi_foreach_dest(I, d) {
if (bi_is_null(I->dest[d])) continue;
assert(I->dest[d].type == BI_INDEX_REGISTER);
unsigned reg = I->dest[d].value;

View file

@ -70,9 +70,6 @@ static bool
bi_writes_reg(const bi_instr *I, unsigned reg)
{
bi_foreach_dest(I, d) {
if (bi_is_null(I->dest[d]))
continue;
assert(I->dest[d].type == BI_INDEX_REGISTER);
unsigned count = bi_count_write_registers(I, d);