pan/util,midgard: Remove pan_block

This is only used by midgard, move everything left related to it to
midgard.

Signed-off-by: Mary Guillemard <mary.guillemard@collabora.com>
Acked-by: Boris Brezillon <boris.brezillon@collabora.com>
Acked-by: Erik Faye-Lund <erik.faye-lund@collabora.com>
Acked-by: Eric R. Smith <eric.smith@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/32650>
This commit is contained in:
Mary Guillemard 2024-12-16 09:27:50 +01:00 committed by Marge Bot
parent dce110a604
commit 330c3c68e2
13 changed files with 140 additions and 160 deletions

View file

@ -65,6 +65,7 @@ ForEachMacros: [
'mir_foreach_instr_in_block_safe_rev',
'mir_foreach_instr_in_block_scheduled_rev',
'mir_foreach_predecessor',
'mir_foreach_successor',
'mir_foreach_src',
'nir_foreach_variable_with_modes',
'nir_foreach_shader_in_variable',
@ -73,9 +74,6 @@ ForEachMacros: [
'nodearray_sparse_foreach',
'pan_cast_and_pack',
'pan_cast_and_pack_nodefaults',
'pan_foreach_instr_in_block_rev',
'pan_foreach_predecessor',
'pan_foreach_successor',
'pan_pack',
'pan_pack_nodefaults',
'pan_section_pack',

View file

@ -189,7 +189,25 @@ typedef struct midgard_instruction {
} midgard_instruction;
typedef struct midgard_block {
pan_block base;
/* Link to next block. Must be first for mir_get_block */
struct list_head link;
/* List of instructions emitted for the current block */
struct list_head instructions;
/* Index of the block in source order */
unsigned name;
/* Control flow graph */
struct midgard_block *successors[2];
struct set *predecessors;
bool unconditional_jumps;
/* In liveness analysis, these are live masks (per-component) for
* indices for the block. Scalar compilers have the luxury of using
* simple bit fields, but for us, liveness is a vector idea. */
uint16_t *live_in;
uint16_t *live_out;
bool scheduled;
@ -327,7 +345,7 @@ emit_mir_instruction(struct compiler_context *ctx,
const struct midgard_instruction *ins)
{
midgard_instruction *u = mir_upload_ins(ctx, ins);
list_addtail(&u->link, &ctx->current_block->base.instructions);
list_addtail(&u->link, &ctx->current_block->instructions);
return u;
}
@ -353,34 +371,42 @@ mir_remove_instruction(struct midgard_instruction *ins)
#define mir_next_op(ins) \
list_first_entry(&((ins)->link), midgard_instruction, link)
static inline midgard_block *
mir_exit_block(struct list_head *blocks)
{
midgard_block *last = list_last_entry(blocks, midgard_block, link);
assert(!last->successors[0] && !last->successors[1]);
return last;
}
#define mir_foreach_block(ctx, v) \
list_for_each_entry(pan_block, v, &ctx->blocks, link)
list_for_each_entry(midgard_block, v, &ctx->blocks, link)
#define mir_foreach_block_from(ctx, from, v) \
list_for_each_entry_from(pan_block, v, &from->base, &ctx->blocks, link)
list_for_each_entry_from(midgard_block, v, from, &ctx->blocks, link)
#define mir_foreach_instr_in_block(block, v) \
list_for_each_entry(struct midgard_instruction, v, \
&block->base.instructions, link)
&block->instructions, link)
#define mir_foreach_instr_in_block_rev(block, v) \
list_for_each_entry_rev(struct midgard_instruction, v, \
&block->base.instructions, link)
&block->instructions, link)
#define mir_foreach_instr_in_block_safe(block, v) \
list_for_each_entry_safe(struct midgard_instruction, v, \
&block->base.instructions, link)
&block->instructions, link)
#define mir_foreach_instr_in_block_safe_rev(block, v) \
list_for_each_entry_safe_rev(struct midgard_instruction, v, \
&block->base.instructions, link)
&block->instructions, link)
#define mir_foreach_instr_in_block_from(block, v, from) \
list_for_each_entry_from(struct midgard_instruction, v, from, \
&block->base.instructions, link)
&block->instructions, link)
#define mir_foreach_instr_in_block_from_rev(block, v, from) \
list_for_each_entry_from_rev(struct midgard_instruction, v, from, \
&block->base.instructions, link)
&block->instructions, link)
#define mir_foreach_bundle_in_block(block, v) \
util_dynarray_foreach(&block->bundles, midgard_bundle, v)
@ -405,22 +431,31 @@ mir_remove_instruction(struct midgard_instruction *ins)
/* Based on set_foreach, expanded with automatic type casts */
#define mir_foreach_successor(blk, v) \
struct midgard_block *v; \
struct midgard_block **_v; \
for (_v = (midgard_block **)&blk->successors[0], v = *_v; \
v != NULL && _v < (midgard_block **)&blk->successors[2]; \
_v++, v = *_v)
#define mir_foreach_predecessor(blk, v) \
struct set_entry *_entry_##v; \
struct midgard_block *v; \
for (_entry_##v = _mesa_set_next_entry(blk->base.predecessors, NULL), \
for (_entry_##v = _mesa_set_next_entry(blk->predecessors, NULL), \
v = (struct midgard_block *)(_entry_##v ? _entry_##v->key : NULL); \
_entry_##v != NULL; \
_entry_##v = _mesa_set_next_entry(blk->base.predecessors, _entry_##v), \
_entry_##v = _mesa_set_next_entry(blk->predecessors, _entry_##v), \
v = (struct midgard_block *)(_entry_##v ? _entry_##v->key : NULL))
#define mir_foreach_src(ins, v) \
for (unsigned v = 0; v < ARRAY_SIZE(ins->src); ++v)
void mir_block_add_successor(midgard_block *block, midgard_block *successor);
static inline midgard_instruction *
mir_last_in_block(struct midgard_block *block)
{
return list_last_entry(&block->base.instructions, struct midgard_instruction,
return list_last_entry(&block->instructions, struct midgard_instruction,
link);
}

View file

@ -70,10 +70,10 @@ create_empty_block(compiler_context *ctx)
{
midgard_block *blk = rzalloc(ctx, midgard_block);
blk->base.predecessors =
blk->predecessors =
_mesa_set_create(blk, _mesa_hash_pointer, _mesa_key_pointer_equal);
blk->base.name = ctx->block_source_count++;
blk->name = ctx->block_source_count++;
return blk;
}
@ -84,9 +84,9 @@ schedule_barrier(compiler_context *ctx)
midgard_block *temp = ctx->after_block;
ctx->after_block = create_empty_block(ctx);
ctx->block_count++;
list_addtail(&ctx->after_block->base.link, &ctx->blocks);
list_inithead(&ctx->after_block->base.instructions);
pan_block_add_successor(&ctx->current_block->base, &ctx->after_block->base);
list_addtail(&ctx->after_block->link, &ctx->blocks);
list_inithead(&ctx->after_block->instructions);
mir_block_add_successor(ctx->current_block, ctx->after_block);
ctx->current_block = ctx->after_block;
ctx->after_block = temp;
}
@ -274,9 +274,9 @@ midgard_nir_lower_global_load_instr(nir_builder *b, nir_intrinsic_instr *intr,
static bool
midgard_nir_lower_global_load(nir_shader *shader)
{
return nir_shader_intrinsics_pass(
shader, midgard_nir_lower_global_load_instr,
nir_metadata_control_flow, NULL);
return nir_shader_intrinsics_pass(shader,
midgard_nir_lower_global_load_instr,
nir_metadata_control_flow, NULL);
}
static bool
@ -367,7 +367,7 @@ mem_access_size_align_cb(nir_intrinsic_op intrin, uint8_t bytes,
static uint8_t
lower_vec816_alu(const nir_instr *instr, const void *cb_data)
{
return 4;
return 4;
}
static bool
@ -2473,7 +2473,7 @@ inline_alu_constants(compiler_context *ctx, midgard_block *block)
* possible.
*/
midgard_instruction *first = list_first_entry(
&block->base.instructions, midgard_instruction, link);
&block->instructions, midgard_instruction, link);
if (alu == first) {
mir_insert_instruction_before(ctx, alu, &ins);
@ -2738,13 +2738,13 @@ emit_block_init(compiler_context *ctx)
if (!this_block)
this_block = create_empty_block(ctx);
list_addtail(&this_block->base.link, &ctx->blocks);
list_addtail(&this_block->link, &ctx->blocks);
this_block->scheduled = false;
++ctx->block_count;
/* Set up current block */
list_inithead(&this_block->base.instructions);
list_inithead(&this_block->instructions);
ctx->current_block = this_block;
return this_block;
@ -2811,11 +2811,11 @@ emit_if(struct compiler_context *ctx, nir_if *nif)
ctx->after_block = create_empty_block(ctx);
pan_block_add_successor(&before_block->base, &then_block->base);
pan_block_add_successor(&before_block->base, &else_block->base);
mir_block_add_successor(before_block, then_block);
mir_block_add_successor(before_block, else_block);
pan_block_add_successor(&end_then_block->base, &ctx->after_block->base);
pan_block_add_successor(&end_else_block->base, &ctx->after_block->base);
mir_block_add_successor(end_then_block, ctx->after_block);
mir_block_add_successor(end_else_block, ctx->after_block);
}
static void
@ -2841,8 +2841,8 @@ emit_loop(struct compiler_context *ctx, nir_loop *nloop)
emit_mir_instruction(ctx, &br_back);
/* Mark down that branch in the graph. */
pan_block_add_successor(&start_block->base, &loop_block->base);
pan_block_add_successor(&ctx->current_block->base, &loop_block->base);
mir_block_add_successor(start_block, loop_block);
mir_block_add_successor(ctx->current_block, loop_block);
/* Find the index of the block about to follow us (note: we don't add
* one; blocks are 0-indexed so we get a fencepost problem) */
@ -2874,7 +2874,7 @@ emit_loop(struct compiler_context *ctx, nir_loop *nloop)
ins->branch.target_type = TARGET_GOTO;
ins->branch.target_block = break_block_idx;
pan_block_add_successor(_block, &ctx->after_block->base);
mir_block_add_successor((midgard_block *)_block, ctx->after_block);
}
}
@ -2891,7 +2891,8 @@ emit_cf_list(struct compiler_context *ctx, struct exec_list *list)
{
midgard_block *start_block = NULL;
foreach_list_typed(nir_cf_node, node, node, list) {
foreach_list_typed(nir_cf_node, node, node, list)
{
switch (node->type) {
case nir_cf_node_block: {
midgard_block *block = emit_block(ctx, nir_cf_node_as_block(node));
@ -2957,8 +2958,8 @@ mir_add_writeout_loops(compiler_context *ctx)
continue;
unsigned popped = br->branch.target_block;
pan_block_add_successor(&(mir_get_block(ctx, popped - 1)->base),
&ctx->current_block->base);
mir_block_add_successor(mir_get_block(ctx, popped - 1),
ctx->current_block);
br->branch.target_block = emit_fragment_epilogue(ctx, rt, s);
br->branch.target_type = TARGET_GOTO;
@ -2977,8 +2978,8 @@ mir_add_writeout_loops(compiler_context *ctx)
uncond.branch.target_block = popped;
uncond.branch.target_type = TARGET_GOTO;
emit_mir_instruction(ctx, &uncond);
pan_block_add_successor(&ctx->current_block->base,
&(mir_get_block(ctx, popped)->base));
mir_block_add_successor(ctx->current_block,
mir_get_block(ctx, popped));
schedule_barrier(ctx);
} else {
/* We're last, so we can terminate here */
@ -3113,7 +3114,8 @@ midgard_compile_shader_nir(nir_shader *nir,
int bundle_idx = 0;
mir_foreach_block(ctx, _block) {
midgard_block *block = (midgard_block *)_block;
util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) {
util_dynarray_foreach(&block->bundles, midgard_bundle, bundle)
{
source_order_bundles[bundle_idx++] = bundle;
}
}

View file

@ -743,10 +743,10 @@ emit_branch(midgard_instruction *ins, compiler_context *ctx,
quadword_offset = 0x2;
} else if (is_tilebuf_wait) {
quadword_offset = -1;
} else if (target_number > block->base.name) {
} else if (target_number > block->name) {
/* Jump forward */
for (int idx = block->base.name + 1; idx < target_number; ++idx) {
for (int idx = block->name + 1; idx < target_number; ++idx) {
midgard_block *blk = mir_get_block(ctx, idx);
assert(blk);
@ -755,7 +755,7 @@ emit_branch(midgard_instruction *ins, compiler_context *ctx,
} else {
/* Jump backwards */
for (int idx = block->base.name; idx >= target_number; --idx) {
for (int idx = block->name; idx >= target_number; --idx) {
midgard_block *blk = mir_get_block(ctx, idx);
assert(blk);

View file

@ -84,8 +84,8 @@ mir_block_terminates_helpers(midgard_block *block)
return false;
/* Can't terminate if a successor needs helpers */
pan_foreach_successor((&block->base), succ) {
if (((midgard_block *)succ)->helpers_in)
mir_foreach_successor(block, succ) {
if (succ->helpers_in)
return false;
}
@ -123,13 +123,13 @@ mir_analyze_helper_terminate(compiler_context *ctx)
while ((cur = _mesa_set_next_entry(worklist, NULL)) != NULL) {
/* Pop off a block requiring helpers */
pan_block *blk = (struct pan_block *)cur->key;
midgard_block *blk = (struct midgard_block *)cur->key;
_mesa_set_remove(worklist, cur);
/* Its predecessors also require helpers */
pan_foreach_predecessor(blk, pred) {
mir_foreach_predecessor(blk, pred) {
if (!_mesa_set_search(visited, pred)) {
((midgard_block *)pred)->helpers_in = true;
pred->helpers_in = true;
_mesa_set_add(worklist, pred);
}
}
@ -161,11 +161,10 @@ mir_analyze_helper_terminate(compiler_context *ctx)
}
static bool
mir_helper_block_update(BITSET_WORD *deps, pan_block *_block,
mir_helper_block_update(BITSET_WORD *deps, midgard_block *block,
unsigned temp_count)
{
bool progress = false;
midgard_block *block = (midgard_block *)_block;
mir_foreach_instr_in_block_rev(block, ins) {
/* Ensure we write to a helper dependency */
@ -219,16 +218,16 @@ mir_analyze_helper_requirements(compiler_context *ctx)
_mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
struct set_entry *cur =
_mesa_set_add(work_list, pan_exit_block(&ctx->blocks));
_mesa_set_add(work_list, mir_exit_block(&ctx->blocks));
do {
pan_block *blk = (struct pan_block *)cur->key;
midgard_block *blk = (struct midgard_block *)cur->key;
_mesa_set_remove(work_list, cur);
bool progress = mir_helper_block_update(deps, blk, temp_count);
if (progress || !_mesa_set_search(visited, blk)) {
pan_foreach_predecessor(blk, pred)
mir_foreach_predecessor(blk, pred)
_mesa_set_add(work_list, pred);
}

View file

@ -22,6 +22,7 @@
* SOFTWARE.
*/
#include "panfrost/util/pan_ir.h"
#include "compiler.h"
/* Routines for liveness analysis. Liveness is tracked per byte per node. Per
@ -73,9 +74,9 @@ mir_liveness_ins_update(uint16_t *live, const midgard_instruction *ins,
/* live_out[s] = sum { p in succ[s] } ( live_in[p] ) */
static void
liveness_block_live_out(pan_block *blk, unsigned temp_count)
liveness_block_live_out(midgard_block *blk, unsigned temp_count)
{
pan_foreach_successor(blk, succ) {
mir_foreach_successor(blk, succ) {
for (unsigned i = 0; i < temp_count; ++i)
blk->live_out[i] |= succ->live_in[i];
}
@ -86,7 +87,7 @@ liveness_block_live_out(pan_block *blk, unsigned temp_count)
* returns whether progress was made. */
static bool
liveness_block_update(pan_block *blk, unsigned temp_count)
liveness_block_update(midgard_block *blk, unsigned temp_count)
{
bool progress = false;
@ -95,7 +96,7 @@ liveness_block_update(pan_block *blk, unsigned temp_count)
uint16_t *live = ralloc_array(blk, uint16_t, temp_count);
memcpy(live, blk->live_out, temp_count * sizeof(uint16_t));
pan_foreach_instr_in_block_rev(blk, ins)
mir_foreach_instr_in_block_rev(blk, ins)
mir_liveness_ins_update(live, (midgard_instruction *)ins, temp_count);
/* To figure out progress, diff live_in */
@ -112,10 +113,10 @@ liveness_block_update(pan_block *blk, unsigned temp_count)
/* Once liveness data is no longer valid, call this */
static void
mir_free_liveness(struct list_head *blocks)
mir_free_liveness(compiler_context *ctx)
{
list_for_each_entry(pan_block, block, blocks, link)
{
mir_foreach_block(ctx, _block) {
midgard_block *block = (midgard_block *)_block;
if (block->live_in)
ralloc_free(block->live_in);
@ -142,7 +143,7 @@ mir_compute_liveness(compiler_context *ctx)
mir_compute_temp_count(ctx);
/* Set of pan_block */
/* Set of midgard_block */
struct set *work_list =
_mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
@ -151,10 +152,10 @@ mir_compute_liveness(compiler_context *ctx)
/* Free any previous liveness, and allocate */
mir_free_liveness(&ctx->blocks);
mir_free_liveness(ctx);
list_for_each_entry(pan_block, block, &ctx->blocks, link)
{
mir_foreach_block(ctx, _block) {
midgard_block *block = (midgard_block *)_block;
block->live_in = rzalloc_array(block, uint16_t, ctx->temp_count);
block->live_out = rzalloc_array(block, uint16_t, ctx->temp_count);
}
@ -162,13 +163,13 @@ mir_compute_liveness(compiler_context *ctx)
/* Initialize the work list with the exit block */
struct set_entry *cur;
cur = _mesa_set_add(work_list, pan_exit_block(&ctx->blocks));
cur = _mesa_set_add(work_list, mir_exit_block(&ctx->blocks));
/* Iterate the work list */
do {
/* Pop off a block */
pan_block *blk = (struct pan_block *)cur->key;
midgard_block *blk = (struct midgard_block *)cur->key;
_mesa_set_remove(work_list, cur);
/* Update its liveness information */
@ -177,7 +178,7 @@ mir_compute_liveness(compiler_context *ctx)
/* If we made progress, we need to process the predecessors */
if (progress || !_mesa_set_search(visited, blk)) {
pan_foreach_predecessor(blk, pred)
mir_foreach_predecessor(blk, pred)
_mesa_set_add(work_list, pred);
}
@ -198,7 +199,7 @@ mir_invalidate_liveness(compiler_context *ctx)
if (!(ctx->metadata & MIDGARD_METADATA_LIVENESS))
return;
mir_free_liveness(&ctx->blocks);
mir_free_liveness(ctx);
/* It's now invalid regardless */
ctx->metadata &= ~MIDGARD_METADATA_LIVENESS;
@ -212,7 +213,7 @@ mir_is_live_after(compiler_context *ctx, const midgard_block *block,
/* Check whether we're live in the successors */
if (mir_liveness_get(block->base.live_out, src, ctx->temp_count))
if (mir_liveness_get(block->live_out, src, ctx->temp_count))
return true;
/* Check the rest of the block for liveness */

View file

@ -73,7 +73,7 @@ midgard_opt_dead_code_eliminate_block(compiler_context *ctx,
bool progress = false;
uint16_t *live =
mem_dup(block->base.live_out, ctx->temp_count * sizeof(uint16_t));
mem_dup(block->live_out, ctx->temp_count * sizeof(uint16_t));
mir_foreach_instr_in_block_rev(block, ins) {
if (can_cull_mask(ctx, ins)) {

View file

@ -355,7 +355,7 @@ mir_print_instruction(const midgard_instruction *ins)
void
mir_print_block(const midgard_block *block)
{
printf("block%u: {\n", block->base.name);
printf("block%u: {\n", block->name);
if (block->scheduled) {
mir_foreach_bundle_in_block(block, bundle) {
@ -372,15 +372,15 @@ mir_print_block(const midgard_block *block)
printf("}");
if (block->base.successors[0]) {
if (block->successors[0]) {
printf(" -> ");
pan_foreach_successor((&block->base), succ)
mir_foreach_successor(block, succ)
printf(" block%u ", succ->name);
}
printf(" from { ");
mir_foreach_predecessor(block, pred)
printf("block%u ", pred->base.name);
printf("block%u ", pred->name);
printf("}");
printf("\n\n");

View file

@ -285,7 +285,7 @@ bytes_for_instruction(midgard_instruction *ains)
static midgard_instruction **
flatten_mir(midgard_block *block, unsigned *len)
{
*len = list_length(&block->base.instructions);
*len = list_length(&block->instructions);
if (!(*len))
return NULL;
@ -1524,7 +1524,7 @@ schedule_block(compiler_context *ctx, midgard_block *block)
}
mir_foreach_instr_in_block_scheduled_rev(block, ins) {
list_add(&ins->link, &block->base.instructions);
list_add(&ins->link, &block->instructions);
}
free(instructions); /* Allocated by flatten_mir() */
@ -1569,7 +1569,7 @@ mir_lower_blend_input(compiler_context *ctx)
mir_foreach_block(ctx, _blk) {
midgard_block *blk = (midgard_block *)_blk;
if (list_is_empty(&blk->base.instructions))
if (list_is_empty(&blk->instructions))
continue;
midgard_instruction *I = mir_last_in_block(blk);

View file

@ -25,6 +25,32 @@
#include "compiler.h"
#include "midgard_ops.h"
void
mir_block_add_successor(midgard_block *block, midgard_block *successor)
{
assert(block);
assert(successor);
/* Cull impossible edges */
if (block->unconditional_jumps)
return;
for (unsigned i = 0; i < ARRAY_SIZE(block->successors); ++i) {
if (block->successors[i]) {
if (block->successors[i] == successor)
return;
else
continue;
}
block->successors[i] = successor;
_mesa_set_add(successor->predecessors, block);
return;
}
unreachable("Too many successors");
}
void
mir_rewrite_index_src_single(midgard_instruction *ins, unsigned old,
unsigned new)

View file

@ -202,7 +202,7 @@ mir_estimate_pressure(compiler_context *ctx)
mir_foreach_block(ctx, _block) {
midgard_block *block = (midgard_block *)_block;
uint16_t *live =
mem_dup(block->base.live_out, ctx->temp_count * sizeof(uint16_t));
mem_dup(block->live_out, ctx->temp_count * sizeof(uint16_t));
mir_foreach_instr_in_block_rev(block, ins) {
unsigned count = mir_count_live(live, ctx->temp_count);

View file

@ -68,32 +68,6 @@ pan_to_bytemask(unsigned bytes, unsigned mask)
}
}
void
pan_block_add_successor(pan_block *block, pan_block *successor)
{
assert(block);
assert(successor);
/* Cull impossible edges */
if (block->unconditional_jumps)
return;
for (unsigned i = 0; i < ARRAY_SIZE(block->successors); ++i) {
if (block->successors[i]) {
if (block->successors[i] == successor)
return;
else
continue;
}
block->successors[i] = successor;
_mesa_set_add(successor->predecessors, block);
return;
}
unreachable("Too many successors");
}
/* Could optimize with a better data structure if anyone cares, TODO: profile */
unsigned

View file

@ -316,63 +316,8 @@ struct pan_shader_info {
};
};
typedef struct pan_block {
/* Link to next block. Must be first for mir_get_block */
struct list_head link;
/* List of instructions emitted for the current block */
struct list_head instructions;
/* Index of the block in source order */
unsigned name;
/* Control flow graph */
struct pan_block *successors[2];
struct set *predecessors;
bool unconditional_jumps;
/* In liveness analysis, these are live masks (per-component) for
* indices for the block. Scalar compilers have the luxury of using
* simple bit fields, but for us, liveness is a vector idea. */
uint16_t *live_in;
uint16_t *live_out;
} pan_block;
struct pan_instruction {
struct list_head link;
};
#define pan_foreach_instr_in_block_rev(block, v) \
list_for_each_entry_rev(struct pan_instruction, v, &block->instructions, \
link)
#define pan_foreach_successor(blk, v) \
pan_block *v; \
pan_block **_v; \
for (_v = (pan_block **)&blk->successors[0], v = *_v; \
v != NULL && _v < (pan_block **)&blk->successors[2]; _v++, v = *_v)
#define pan_foreach_predecessor(blk, v) \
struct set_entry *_entry_##v; \
struct pan_block *v; \
for (_entry_##v = _mesa_set_next_entry(blk->predecessors, NULL), \
v = (struct pan_block *)(_entry_##v ? _entry_##v->key : NULL); \
_entry_##v != NULL; \
_entry_##v = _mesa_set_next_entry(blk->predecessors, _entry_##v), \
v = (struct pan_block *)(_entry_##v ? _entry_##v->key : NULL))
static inline pan_block *
pan_exit_block(struct list_head *blocks)
{
pan_block *last = list_last_entry(blocks, pan_block, link);
assert(!last->successors[0] && !last->successors[1]);
return last;
}
uint16_t pan_to_bytemask(unsigned bytes, unsigned mask);
void pan_block_add_successor(pan_block *block, pan_block *successor);
/* NIR passes to do some backend-specific lowering */
#define PAN_WRITEOUT_C 1