mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-22 15:40:11 +01:00
pan/midgard: Tag SSA/reg
Rather than putting registers after SSA in the MIR indexing, put them side-by-side, shifted 1, using the bottom bit as the SSA/reg select. This will allow us to generate SSA temps in the compiler. Signed-off-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
This commit is contained in:
parent
b0626c1f30
commit
9beb3391b5
7 changed files with 28 additions and 18 deletions
|
|
@ -370,6 +370,16 @@ mir_is_alu_bundle(midgard_bundle *bundle)
|
||||||
return IS_ALU(bundle->tag);
|
return IS_ALU(bundle->tag);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Registers/SSA are distinguish in the backend by the bottom-most bit */
|
||||||
|
|
||||||
|
#define IS_REG (1)
|
||||||
|
|
||||||
|
static inline unsigned
|
||||||
|
make_compiler_temp(compiler_context *ctx)
|
||||||
|
{
|
||||||
|
return (ctx->func->impl->ssa_alloc + ctx->temp_alloc++) << 1;
|
||||||
|
}
|
||||||
|
|
||||||
/* MIR manipulation */
|
/* MIR manipulation */
|
||||||
|
|
||||||
void mir_rewrite_index(compiler_context *ctx, unsigned old, unsigned new);
|
void mir_rewrite_index(compiler_context *ctx, unsigned old, unsigned new);
|
||||||
|
|
|
||||||
|
|
@ -177,8 +177,8 @@ quadword_size(int tag)
|
||||||
#define SSA_UNUSED_1 -2
|
#define SSA_UNUSED_1 -2
|
||||||
|
|
||||||
#define SSA_FIXED_SHIFT 24
|
#define SSA_FIXED_SHIFT 24
|
||||||
#define SSA_FIXED_REGISTER(reg) ((1 + reg) << SSA_FIXED_SHIFT)
|
#define SSA_FIXED_REGISTER(reg) (((1 + (reg)) << SSA_FIXED_SHIFT) | 1)
|
||||||
#define SSA_REG_FROM_FIXED(reg) ((reg >> SSA_FIXED_SHIFT) - 1)
|
#define SSA_REG_FROM_FIXED(reg) ((((reg) & ~1) >> SSA_FIXED_SHIFT) - 1)
|
||||||
#define SSA_FIXED_MINIMUM SSA_FIXED_REGISTER(0)
|
#define SSA_FIXED_MINIMUM SSA_FIXED_REGISTER(0)
|
||||||
|
|
||||||
/* Swizzle support */
|
/* Swizzle support */
|
||||||
|
|
|
||||||
|
|
@ -297,10 +297,10 @@ static unsigned
|
||||||
nir_dest_index(compiler_context *ctx, nir_dest *dst)
|
nir_dest_index(compiler_context *ctx, nir_dest *dst)
|
||||||
{
|
{
|
||||||
if (dst->is_ssa)
|
if (dst->is_ssa)
|
||||||
return dst->ssa.index;
|
return (dst->ssa.index << 1) | 0;
|
||||||
else {
|
else {
|
||||||
assert(!dst->reg.indirect);
|
assert(!dst->reg.indirect);
|
||||||
return ctx->func->impl->ssa_alloc + dst->reg.reg->index;
|
return (dst->reg.reg->index << 1) | IS_REG;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -533,7 +533,9 @@ emit_load_const(compiler_context *ctx, nir_load_const_instr *instr)
|
||||||
|
|
||||||
float *v = rzalloc_array(NULL, float, 4);
|
float *v = rzalloc_array(NULL, float, 4);
|
||||||
nir_const_load_to_arr(v, instr, f32);
|
nir_const_load_to_arr(v, instr, f32);
|
||||||
_mesa_hash_table_u64_insert(ctx->ssa_constants, def.index + 1, v);
|
|
||||||
|
/* Shifted for SSA, +1 for off-by-one */
|
||||||
|
_mesa_hash_table_u64_insert(ctx->ssa_constants, (def.index << 1) + 1, v);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Normally constants are embedded implicitly, but for I/O and such we have to
|
/* Normally constants are embedded implicitly, but for I/O and such we have to
|
||||||
|
|
@ -555,10 +557,10 @@ static unsigned
|
||||||
nir_src_index(compiler_context *ctx, nir_src *src)
|
nir_src_index(compiler_context *ctx, nir_src *src)
|
||||||
{
|
{
|
||||||
if (src->is_ssa)
|
if (src->is_ssa)
|
||||||
return src->ssa->index;
|
return (src->ssa->index << 1) | 0;
|
||||||
else {
|
else {
|
||||||
assert(!src->reg.indirect);
|
assert(!src->reg.indirect);
|
||||||
return ctx->func->impl->ssa_alloc + src->reg.reg->index;
|
return (src->reg.reg->index << 1) | IS_REG;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2003,7 +2005,7 @@ midgard_opt_pos_propagate(compiler_context *ctx, midgard_block *block)
|
||||||
|
|
||||||
/* TODO: Registers? */
|
/* TODO: Registers? */
|
||||||
unsigned src = ins->ssa_args.src1;
|
unsigned src = ins->ssa_args.src1;
|
||||||
if (src >= ctx->func->impl->ssa_alloc) continue;
|
if (src & IS_REG) continue;
|
||||||
assert(!mir_has_multiple_writes(ctx, src));
|
assert(!mir_has_multiple_writes(ctx, src));
|
||||||
|
|
||||||
/* There might be a source modifier, too */
|
/* There might be a source modifier, too */
|
||||||
|
|
|
||||||
|
|
@ -41,8 +41,8 @@ midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block)
|
||||||
|
|
||||||
if (to >= SSA_FIXED_MINIMUM) continue;
|
if (to >= SSA_FIXED_MINIMUM) continue;
|
||||||
if (from >= SSA_FIXED_MINIMUM) continue;
|
if (from >= SSA_FIXED_MINIMUM) continue;
|
||||||
if (to >= ctx->func->impl->ssa_alloc) continue;
|
if (to & IS_REG) continue;
|
||||||
if (from >= ctx->func->impl->ssa_alloc) continue;
|
if (from & IS_REG) continue;
|
||||||
|
|
||||||
/* Constant propagation is not handled here, either */
|
/* Constant propagation is not handled here, either */
|
||||||
if (ins->ssa_args.inline_constant) continue;
|
if (ins->ssa_args.inline_constant) continue;
|
||||||
|
|
|
||||||
|
|
@ -64,8 +64,8 @@ midgard_opt_combine_projection(compiler_context *ctx, midgard_block *block)
|
||||||
unsigned frcp = ins->ssa_args.src1;
|
unsigned frcp = ins->ssa_args.src1;
|
||||||
unsigned to = ins->ssa_args.dest;
|
unsigned to = ins->ssa_args.dest;
|
||||||
|
|
||||||
if (frcp >= ctx->func->impl->ssa_alloc) continue;
|
if (frcp & IS_REG) continue;
|
||||||
if (to >= ctx->func->impl->ssa_alloc) continue;
|
if (to & IS_REG) continue;
|
||||||
|
|
||||||
bool frcp_found = false;
|
bool frcp_found = false;
|
||||||
unsigned frcp_component = 0;
|
unsigned frcp_component = 0;
|
||||||
|
|
@ -150,8 +150,8 @@ midgard_opt_varying_projection(compiler_context *ctx, midgard_block *block)
|
||||||
unsigned vary = ins->ssa_args.src0;
|
unsigned vary = ins->ssa_args.src0;
|
||||||
unsigned to = ins->ssa_args.dest;
|
unsigned to = ins->ssa_args.dest;
|
||||||
|
|
||||||
if (vary >= ctx->func->impl->ssa_alloc) continue;
|
if (vary & IS_REG) continue;
|
||||||
if (to >= ctx->func->impl->ssa_alloc) continue;
|
if (to & IS_REG) continue;
|
||||||
if (!mir_single_use(ctx, vary)) continue;
|
if (!mir_single_use(ctx, vary)) continue;
|
||||||
|
|
||||||
/* Check for a varying source. If we find it, we rewrite */
|
/* Check for a varying source. If we find it, we rewrite */
|
||||||
|
|
|
||||||
|
|
@ -464,9 +464,7 @@ schedule_bundle(compiler_context *ctx, midgard_block *block, midgard_instruction
|
||||||
/* All of r0 has to be written out along with
|
/* All of r0 has to be written out along with
|
||||||
* the branch writeout */
|
* the branch writeout */
|
||||||
|
|
||||||
unsigned node_count = ctx->func->impl->ssa_alloc + ctx->func->impl->reg_alloc;
|
if (ains->writeout && !can_writeout_fragment(ctx, scheduled, index, ctx->temp_count)) {
|
||||||
|
|
||||||
if (ains->writeout && !can_writeout_fragment(ctx, scheduled, index, node_count)) {
|
|
||||||
/* We only work on full moves
|
/* We only work on full moves
|
||||||
* at the beginning. We could
|
* at the beginning. We could
|
||||||
* probably do better */
|
* probably do better */
|
||||||
|
|
|
||||||
|
|
@ -72,7 +72,7 @@ midgard_promote_uniforms(compiler_context *ctx, unsigned register_pressure)
|
||||||
/* We do need the move for safety for a non-SSA dest, or if
|
/* We do need the move for safety for a non-SSA dest, or if
|
||||||
* we're being fed into a special class */
|
* we're being fed into a special class */
|
||||||
|
|
||||||
bool needs_move = ins->ssa_args.dest >= ctx->func->impl->ssa_alloc;
|
bool needs_move = ins->ssa_args.dest & IS_REG;
|
||||||
needs_move |= mir_special_index(ctx, ins->ssa_args.dest);
|
needs_move |= mir_special_index(ctx, ins->ssa_args.dest);
|
||||||
|
|
||||||
if (needs_move) {
|
if (needs_move) {
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue