mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-08 19:58:09 +02:00
pan/midgard: Eliminate blank_alu_src
We don't need it in practice, so this is some more cleanup. Signed-off-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
This commit is contained in:
parent
70072a20e0
commit
c3a46e7644
6 changed files with 22 additions and 36 deletions
|
|
@ -538,18 +538,10 @@ void mir_insert_instruction_after_scheduled(compiler_context *ctx, midgard_block
|
|||
void mir_flip(midgard_instruction *ins);
|
||||
void mir_compute_temp_count(compiler_context *ctx);
|
||||
|
||||
/* MIR goodies */
|
||||
|
||||
static const midgard_vector_alu_src blank_alu_src = {};
|
||||
|
||||
static const midgard_scalar_alu_src blank_scalar_alu_src = {
|
||||
.full = true
|
||||
};
|
||||
|
||||
/* 'Intrinsic' move for aliasing */
|
||||
|
||||
static inline midgard_instruction
|
||||
v_mov(unsigned src, midgard_vector_alu_src mod, unsigned dest)
|
||||
v_mov(unsigned src, unsigned dest)
|
||||
{
|
||||
midgard_instruction ins = {
|
||||
.type = TAG_ALU_4,
|
||||
|
|
@ -561,9 +553,7 @@ v_mov(unsigned src, midgard_vector_alu_src mod, unsigned dest)
|
|||
.op = midgard_alu_op_imov,
|
||||
.reg_mode = midgard_reg_mode_32,
|
||||
.dest_override = midgard_dest_override_none,
|
||||
.outmod = midgard_outmod_int_wrap,
|
||||
.src1 = vector_alu_srco_unsigned(blank_alu_src),
|
||||
.src2 = vector_alu_srco_unsigned(mod)
|
||||
.outmod = midgard_outmod_int_wrap
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -158,14 +158,12 @@ static midgard_vector_alu_src
|
|||
vector_alu_modifiers(nir_alu_src *src, bool is_int, unsigned broadcast_count,
|
||||
bool half, bool sext)
|
||||
{
|
||||
if (!src) return blank_alu_src;
|
||||
|
||||
/* Figure out how many components there are so we can adjust.
|
||||
* Specifically we want to broadcast the last channel so things like
|
||||
* ball2/3 work.
|
||||
*/
|
||||
|
||||
if (broadcast_count) {
|
||||
if (broadcast_count && src) {
|
||||
uint8_t last_component = src->swizzle[broadcast_count - 1];
|
||||
|
||||
for (unsigned c = broadcast_count; c < NIR_MAX_VEC_COMPONENTS; ++c) {
|
||||
|
|
@ -191,9 +189,11 @@ vector_alu_modifiers(nir_alu_src *src, bool is_int, unsigned broadcast_count,
|
|||
}
|
||||
|
||||
/* These should have been lowered away */
|
||||
assert(!(src->abs || src->negate));
|
||||
if (src)
|
||||
assert(!(src->abs || src->negate));
|
||||
} else {
|
||||
alu_src.mod = (src->abs << 0) | (src->negate << 1);
|
||||
if (src)
|
||||
alu_src.mod = (src->abs << 0) | (src->negate << 1);
|
||||
}
|
||||
|
||||
return alu_src;
|
||||
|
|
@ -597,7 +597,7 @@ emit_explicit_constant(compiler_context *ctx, unsigned node, unsigned to)
|
|||
void *constant_value = _mesa_hash_table_u64_search(ctx->ssa_constants, node + 1);
|
||||
|
||||
if (constant_value) {
|
||||
midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, to);
|
||||
midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), to);
|
||||
attach_constants(ctx, &ins, constant_value, node + 1);
|
||||
emit_mir_instruction(ctx, ins);
|
||||
}
|
||||
|
|
@ -1315,7 +1315,7 @@ emit_fragment_store(compiler_context *ctx, unsigned src, unsigned rt)
|
|||
|
||||
if (rt != 0) {
|
||||
/* We'll write to r1.z */
|
||||
rt_move = v_mov(~0, blank_alu_src, SSA_FIXED_REGISTER(1));
|
||||
rt_move = v_mov(~0, SSA_FIXED_REGISTER(1));
|
||||
rt_move.mask = 1 << COMPONENT_Z;
|
||||
rt_move.unit = UNIT_SADD;
|
||||
|
||||
|
|
@ -1439,7 +1439,7 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
|
|||
/* For blend shaders, load the input color, which is
|
||||
* preloaded to r0 */
|
||||
|
||||
midgard_instruction move = v_mov(SSA_FIXED_REGISTER(0), blank_alu_src, reg);
|
||||
midgard_instruction move = v_mov(SSA_FIXED_REGISTER(0), reg);
|
||||
emit_mir_instruction(ctx, move);
|
||||
schedule_barrier(ctx);
|
||||
} else if (ctx->stage == MESA_SHADER_VERTEX) {
|
||||
|
|
@ -1491,7 +1491,7 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
|
|||
/* Blend constants are embedded directly in the shader and
|
||||
* patched in, so we use some magic routing */
|
||||
|
||||
midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, reg);
|
||||
midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), reg);
|
||||
ins.has_constants = true;
|
||||
ins.has_blend_constant = true;
|
||||
emit_mir_instruction(ctx, ins);
|
||||
|
|
@ -1702,13 +1702,12 @@ emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
|
|||
index = make_compiler_temp(ctx);
|
||||
|
||||
/* mov index, old_index */
|
||||
midgard_instruction mov = v_mov(old_index, blank_alu_src, index);
|
||||
midgard_instruction mov = v_mov(old_index, index);
|
||||
mov.mask = 0x3;
|
||||
emit_mir_instruction(ctx, mov);
|
||||
|
||||
/* mov index.zw, #0 */
|
||||
mov = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT),
|
||||
blank_alu_src, index);
|
||||
mov = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), index);
|
||||
mov.has_constants = true;
|
||||
mov.mask = (1 << COMPONENT_Z) | (1 << COMPONENT_W);
|
||||
emit_mir_instruction(ctx, mov);
|
||||
|
|
@ -1905,7 +1904,7 @@ inline_alu_constants(compiler_context *ctx, midgard_block *block)
|
|||
unsigned scratch = alu->dest;
|
||||
|
||||
if (entry) {
|
||||
midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, scratch);
|
||||
midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), scratch);
|
||||
attach_constants(ctx, &ins, entry, alu->src[1] + 1);
|
||||
|
||||
/* Set the source */
|
||||
|
|
|
|||
|
|
@ -49,9 +49,7 @@ midgard_lower_invert(compiler_context *ctx, midgard_block *block)
|
|||
/* TODO: i16 */
|
||||
.reg_mode = midgard_reg_mode_32,
|
||||
.dest_override = midgard_dest_override_none,
|
||||
.outmod = midgard_outmod_int_wrap,
|
||||
.src1 = vector_alu_srco_unsigned(blank_alu_src),
|
||||
.src2 = vector_alu_srco_unsigned(blank_alu_src)
|
||||
.outmod = midgard_outmod_int_wrap
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -474,8 +474,7 @@ mir_lower_special_reads(compiler_context *ctx)
|
|||
unsigned idx = spill_idx++;
|
||||
|
||||
midgard_instruction m = hazard_write ?
|
||||
v_mov(idx, blank_alu_src, i) :
|
||||
v_mov(i, blank_alu_src, idx);
|
||||
v_mov(idx, i) : v_mov(i, idx);
|
||||
|
||||
/* Insert move before each read/write, depending on the
|
||||
* hazard we're trying to account for */
|
||||
|
|
@ -499,7 +498,7 @@ mir_lower_special_reads(compiler_context *ctx)
|
|||
mir_rewrite_index_dst_single(pre_use, i, idx);
|
||||
} else {
|
||||
idx = spill_idx++;
|
||||
m = v_mov(i, blank_alu_src, idx);
|
||||
m = v_mov(i, idx);
|
||||
m.mask = mir_from_bytemask(mir_bytemask_of_read_components(pre_use, i), midgard_reg_mode_32);
|
||||
mir_insert_instruction_before(ctx, pre_use, m);
|
||||
mir_rewrite_index_src_single(pre_use, i, idx);
|
||||
|
|
|
|||
|
|
@ -719,7 +719,7 @@ mir_schedule_comparison(
|
|||
|
||||
/* Otherwise, we insert a move */
|
||||
|
||||
midgard_instruction mov = v_mov(cond, blank_alu_src, cond);
|
||||
midgard_instruction mov = v_mov(cond, cond);
|
||||
mov.mask = vector ? 0xF : 0x1;
|
||||
memcpy(mov.swizzle[1], swizzle, sizeof(mov.swizzle[1]));
|
||||
|
||||
|
|
@ -956,7 +956,7 @@ mir_schedule_alu(
|
|||
/* Finally, add a move if necessary */
|
||||
if (bad_writeout || writeout_mask != 0xF) {
|
||||
unsigned temp = (branch->src[0] == ~0) ? SSA_FIXED_REGISTER(0) : make_compiler_temp(ctx);
|
||||
midgard_instruction mov = v_mov(src, blank_alu_src, temp);
|
||||
midgard_instruction mov = v_mov(src, temp);
|
||||
vmul = mem_dup(&mov, sizeof(midgard_instruction));
|
||||
vmul->unit = UNIT_VMUL;
|
||||
vmul->mask = 0xF ^ writeout_mask;
|
||||
|
|
@ -1265,7 +1265,7 @@ static void mir_spill_register(
|
|||
midgard_instruction st;
|
||||
|
||||
if (is_special_w) {
|
||||
st = v_mov(spill_node, blank_alu_src, spill_slot);
|
||||
st = v_mov(spill_node, spill_slot);
|
||||
st.no_spill = true;
|
||||
} else {
|
||||
ins->dest = SSA_FIXED_REGISTER(26);
|
||||
|
|
@ -1333,7 +1333,7 @@ static void mir_spill_register(
|
|||
|
||||
if (is_special) {
|
||||
/* Move */
|
||||
st = v_mov(spill_node, blank_alu_src, consecutive_index);
|
||||
st = v_mov(spill_node, consecutive_index);
|
||||
st.no_spill = true;
|
||||
} else {
|
||||
/* TLS load */
|
||||
|
|
|
|||
|
|
@ -88,7 +88,7 @@ midgard_promote_uniforms(compiler_context *ctx, unsigned promoted_count)
|
|||
needs_move |= mir_special_index(ctx, ins->dest);
|
||||
|
||||
if (needs_move) {
|
||||
midgard_instruction mov = v_mov(promoted, blank_alu_src, ins->dest);
|
||||
midgard_instruction mov = v_mov(promoted, ins->dest);
|
||||
mov.mask = ins->mask;
|
||||
mir_insert_instruction_before(ctx, ins, mov);
|
||||
} else {
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue