aco: move try_optimize_branching_sequence() to postRA optimizations

Totals from 196 (0.25% of 79206) affected shaders: (Navi31)

Instrs: 534343 -> 534438 (+0.02%); split: -0.00%, +0.02%
CodeSize: 2774852 -> 2775420 (+0.02%); split: -0.00%, +0.02%
Latency: 7103512 -> 7103021 (-0.01%); split: -0.01%, +0.00%
InvThroughput: 959477 -> 959447 (-0.00%)
Copies: 42646 -> 42648 (+0.00%)
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/32330>
This commit is contained in:
Daniel Schürmann 2024-11-01 09:53:23 +01:00 committed by Marge Bot
parent 95d44c7ce0
commit fcd94a8ca7

View file

@ -819,6 +819,340 @@ try_convert_fma_to_vop2(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
instr->format = Format::VOP2;
}
bool
instr_overwrites(Instruction* instr, PhysReg reg, unsigned size)
{
for (Definition def : instr->definitions) {
if (def.physReg() + def.size() > reg && reg + size > def.physReg())
return true;
}
if (instr->isPseudo() && instr->pseudo().needs_scratch_reg) {
PhysReg scratch_reg = instr->pseudo().scratch_sgpr;
if (scratch_reg >= reg && reg + size > scratch_reg)
return true;
}
return false;
}
bool
try_insert_saveexec_out_of_loop(pr_opt_ctx& ctx, Block* block, Definition saved_exec,
unsigned saveexec_pos)
{
/* This pattern can be created by try_optimize_branching_sequence:
* BB1: // loop-header
* ... // nothing that clobbers s[0:1] or writes exec
* s[0:1] = p_parallelcopy exec // we will move this
* exec = v_cmpx_...
* p_branch_z exec BB3, BB2
* BB2:
* ...
* p_branch BB3
* BB3:
* exec = p_parallelcopy s[0:1] // exec and s[0:1] contain the same mask
* ... // nothing that clobbers s[0:1] or writes exec
* p_branch_nz scc BB1, BB4
* BB4:
* ...
*
* If we know that that exec copy in the loop header is only needed in the
* first iteration, it can be inserted into the preheader by adding a phi:
*
* BB1: // loop-header
* s[0:1] = p_linear_phi exec, s[0:1]
*
* will be lowered to a parallelcopy at the loop preheader.
*/
if (block->linear_preds.size() != 2)
return false;
/* Check if exec is written, or the copy's dst overwritten in the loop header. */
for (unsigned i = 0; i < saveexec_pos; i++) {
if (block->instructions[i]->writes_exec())
return false;
if (instr_overwrites(block->instructions[i].get(), saved_exec.physReg(), saved_exec.size()))
return false;
}
/* The register(s) must already contain the same value as exec in the continue block. */
Block* cont = &ctx.program->blocks[block->linear_preds[1]];
do {
for (int i = cont->instructions.size() - 1; i >= 0; i--) {
Instruction* instr = cont->instructions[i].get();
if (instr->opcode == aco_opcode::p_parallelcopy && instr->definitions.size() == 1 &&
instr->definitions[0].physReg() == exec &&
instr->operands[0].physReg() == saved_exec.physReg()) {
/* Insert after existing phis at the loop header because
* the first phi might contain a valid scratch reg if needed.
*/
auto it = std::find_if(block->instructions.begin(), block->instructions.end(),
[](aco_ptr<Instruction>& phi) { return phi && !is_phi(phi); });
Instruction* phi = create_instruction(aco_opcode::p_linear_phi, Format::PSEUDO, 2, 1);
phi->definitions[0] = saved_exec;
phi->operands[0] = Operand(exec, ctx.program->lane_mask);
phi->operands[1] = instr->operands[0];
block->instructions.emplace(it, phi);
return true;
}
if (instr->writes_exec())
return false;
if (instr_overwrites(instr, saved_exec.physReg(), saved_exec.size()))
return false;
}
} while (cont->linear_preds.size() == 1 && (cont = &ctx.program->blocks[cont->linear_preds[0]]));
return false;
}
bool
try_optimize_branching_sequence(pr_opt_ctx& ctx, aco_ptr<Instruction>& exec_copy)
{
/* Try to optimize the branching sequence at the end of a block.
*
* We are looking for blocks that look like this:
*
* BB:
* ... instructions ...
* s[N:M] = <exec_val instruction>
* ... other instructions that don't depend on exec ...
* p_logical_end
* exec = <exec_copy instruction> s[N:M]
* p_cbranch exec
*
* The main motivation is to eliminate exec_copy.
* Depending on the context, we try to do the following:
*
* 1. Reassign exec_val to write exec directly
* 2. If possible, eliminate exec_copy
* 3. When exec_copy also saves the old exec mask, insert a
* new copy instruction before exec_val
* 4. Reassign any instruction that used s[N:M] to use exec
*
* This is beneficial for the following reasons:
*
* - Fewer instructions in the block when exec_copy can be eliminated
* - As a result, when exec_val is VOPC this also improves the stalls
* due to SALU waiting for VALU. This works best when we can also
* remove the branching instruction, in which case the stall
* is entirely eliminated.
* - When exec_copy can't be removed, the reassignment may still be
* very slightly beneficial to latency.
*/
if (!exec_copy->writes_exec())
return false;
const aco_opcode and_saveexec = ctx.program->lane_mask == s2 ? aco_opcode::s_and_saveexec_b64
: aco_opcode::s_and_saveexec_b32;
const aco_opcode s_and =
ctx.program->lane_mask == s2 ? aco_opcode::s_and_b64 : aco_opcode::s_and_b32;
if (exec_copy->opcode != and_saveexec && exec_copy->opcode != aco_opcode::p_parallelcopy &&
(exec_copy->opcode != s_and || exec_copy->operands[1].physReg() != exec))
return false;
/* The SCC def of s_and/s_and_saveexec must be unused. */
if (exec_copy->opcode != aco_opcode::p_parallelcopy && !exec_copy->definitions[1].isKill())
return false;
Idx exec_val_idx = last_writer_idx(ctx, exec_copy->operands[0]);
if (!exec_val_idx.found() || exec_val_idx.block != ctx.current_block->index)
return false;
if (is_overwritten_since(ctx, exec, ctx.program->lane_mask, exec_val_idx)) {
// TODO: in case nothing needs the previous exec mask, just remove it
return false;
}
Instruction* exec_val = ctx.get(exec_val_idx);
/* Only allow SALU with multiple definitions. */
if (!exec_val->isSALU() && exec_val->definitions.size() > 1)
return false;
const bool vcmpx_exec_only = ctx.program->gfx_level >= GFX10;
/* Check if a suitable v_cmpx opcode exists. */
const aco_opcode v_cmpx_op =
exec_val->isVOPC() ? get_vcmpx(exec_val->opcode) : aco_opcode::num_opcodes;
const bool vopc = v_cmpx_op != aco_opcode::num_opcodes;
/* V_CMPX+DPP returns 0 with reads from disabled lanes, unlike V_CMP+DPP (RDNA3 ISA doc, 7.7) */
if (vopc && exec_val->isDPP())
return false;
/* If s_and_saveexec is used, we'll need to insert a new instruction to save the old exec. */
bool save_original_exec =
exec_copy->opcode == and_saveexec && !exec_copy->definitions[0].isKill();
const Definition exec_wr_def = exec_val->definitions[0];
const Definition exec_copy_def = exec_copy->definitions[0];
/* The copy can be removed when it kills its operand.
* v_cmpx also writes the original destination pre GFX10.
*/
const bool can_remove_copy = exec_copy->operands[0].isKill() || (vopc && !vcmpx_exec_only);
/* Always allow reassigning when the value is written by (usable) VOPC.
* Note, VOPC implicitly contains "& exec" because it yields zero on inactive lanes.
* Additionally, when value is copied as-is, also allow SALU and parallelcopies.
*/
const bool can_reassign =
vopc || (exec_copy->opcode == aco_opcode::p_parallelcopy &&
(exec_val->isSALU() || exec_val->opcode == aco_opcode::p_parallelcopy ||
exec_val->opcode == aco_opcode::p_create_vector));
/* The reassignment is not worth it when both the original exec needs to be copied
* and the new exec copy can't be removed. In this case we'd end up with more instructions.
*/
if (!can_reassign || (save_original_exec && !can_remove_copy))
return false;
/* Ensure that nothing needs a previous exec between exec_val_idx and the current exec write. */
for (unsigned i = exec_val_idx.instr + 1; i < ctx.current_instr_idx; i++) {
Instruction* instr = ctx.current_block->instructions[i].get();
if (instr && needs_exec_mask(instr))
return false;
/* If the successor has phis, copies might have to be inserted at p_logical_end. */
if (instr && instr->opcode == aco_opcode::p_logical_end &&
ctx.current_block->logical_succs.size() == 1)
return false;
}
/* When exec_val and exec_copy are non-adjacent, check whether there are any
* instructions inbetween (besides p_logical_end) which may inhibit the optimization.
*/
if (save_original_exec) {
if (is_overwritten_since(ctx, exec_copy_def, exec_val_idx))
return false;
unsigned prev_wr_idx = ctx.current_instr_idx;
if (exec_copy->operands[0].physReg() == exec_copy_def.physReg()) {
/* We'd overwrite the saved original exec */
if (vopc && !vcmpx_exec_only)
return false;
/* Other instructions can use exec directly, so only check exec_val instr */
prev_wr_idx = exec_val_idx.instr + 1;
}
/* Make sure that nothing else needs these registers in-between. */
for (unsigned i = exec_val_idx.instr; i < prev_wr_idx; i++) {
if (ctx.current_block->instructions[i]) {
for (const Operand op : ctx.current_block->instructions[i]->operands) {
if (op.physReg() + op.size() > exec_copy_def.physReg() &&
exec_copy_def.physReg() + exec_copy_def.size() > op.physReg())
return false;
}
}
}
}
/* Reassign the instruction to write exec directly. */
if (vopc) {
/* Add one extra definition for exec and copy the VOP3-specific fields if present. */
if (!vcmpx_exec_only) {
if (exec_val->isSDWA()) {
/* This might work but it needs testing and more code to copy the instruction. */
return false;
} else {
Instruction* tmp =
create_instruction(v_cmpx_op, exec_val->format, exec_val->operands.size(),
exec_val->definitions.size() + 1);
std::copy(exec_val->operands.cbegin(), exec_val->operands.cend(),
tmp->operands.begin());
std::copy(exec_val->definitions.cbegin(), exec_val->definitions.cend(),
tmp->definitions.begin());
VALU_instruction& src = exec_val->valu();
VALU_instruction& dst = tmp->valu();
dst.opsel = src.opsel;
dst.omod = src.omod;
dst.clamp = src.clamp;
dst.neg = src.neg;
dst.abs = src.abs;
ctx.current_block->instructions[exec_val_idx.instr].reset(tmp);
exec_val = ctx.get(exec_val_idx);
}
}
/* Set v_cmpx opcode. */
exec_val->opcode = v_cmpx_op;
exec_val->definitions.back() = Definition(exec, ctx.program->lane_mask);
/* Change instruction from VOP3 to plain VOPC when possible. */
if (vcmpx_exec_only && !exec_val->usesModifiers() &&
(exec_val->operands.size() < 2 || exec_val->operands[1].isOfType(RegType::vgpr)))
exec_val->format = Format::VOPC;
} else {
exec_val->definitions[0] = Definition(exec, ctx.program->lane_mask);
}
for (unsigned i = 0; i < ctx.program->lane_mask.size(); i++)
ctx.instr_idx_by_regs[ctx.current_block->index][exec + i] =
ctx.instr_idx_by_regs[ctx.current_block->index][exec_copy->operands[0].physReg() + i];
/* If there are other instructions (besides p_logical_end) between
* writing the value and copying it to exec, reassign uses
* of the old definition.
*/
Temp exec_temp = exec_copy->operands[0].getTemp();
for (unsigned i = exec_val_idx.instr + 1; i < ctx.current_instr_idx; i++) {
if (ctx.current_block->instructions[i]) {
for (Operand& op : ctx.current_block->instructions[i]->operands) {
if (op.isTemp() && op.getTemp() == exec_temp) {
op = Operand(exec, op.regClass());
ctx.uses[exec_temp.id()]--;
}
}
}
}
if (can_remove_copy) {
/* Remove the copy. */
exec_copy.reset();
ctx.uses[exec_temp.id()]--;
} else {
/* Reassign the copy to write the register of the original value. */
exec_copy.reset(create_instruction(aco_opcode::p_parallelcopy, Format::PSEUDO, 1, 1));
exec_copy->definitions[0] = exec_wr_def;
exec_copy->operands[0] = Operand(exec, ctx.program->lane_mask);
}
if (save_original_exec) {
/* Insert a new instruction that saves the original exec before it is overwritten.
* Do this last, because inserting in the instructions vector may invalidate the exec_val
* reference.
*/
if (ctx.current_block->kind & block_kind_loop_header) {
if (try_insert_saveexec_out_of_loop(ctx, ctx.current_block, exec_copy_def,
exec_val_idx.instr))
return true;
}
Instruction* copy = create_instruction(aco_opcode::p_parallelcopy, Format::PSEUDO, 1, 1);
copy->definitions[0] = exec_copy_def;
copy->operands[0] = Operand(exec, ctx.program->lane_mask);
auto it = std::next(ctx.current_block->instructions.begin(), exec_val_idx.instr);
ctx.current_block->instructions.emplace(it, copy);
/* Fixup indices after inserting an instruction. */
const unsigned current_idx = ctx.current_instr_idx;
for (unsigned i = exec_val_idx.instr; i < current_idx; i++) {
ctx.current_instr_idx = i;
if (ctx.current_block->instructions[i])
save_reg_writes(ctx, ctx.current_block->instructions[i]);
}
ctx.current_instr_idx = current_idx;
return true;
}
return true;
}
void
try_skip_const_branch(pr_opt_ctx& ctx, aco_ptr<Instruction>& branch)
{
@ -851,6 +1185,8 @@ process_instruction(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
ctx.current_instr_idx++;
return;
}
if (try_optimize_branching_sequence(ctx, instr))
return;
try_apply_branch_vcc(ctx, instr);
@ -885,8 +1221,10 @@ optimize_postRA(Program* program)
for (auto& block : program->blocks) {
ctx.reset_block(&block);
for (aco_ptr<Instruction>& instr : block.instructions)
while (ctx.current_instr_idx < block.instructions.size()) {
aco_ptr<Instruction>& instr = block.instructions[ctx.current_instr_idx];
process_instruction(ctx, instr);
}
try_skip_const_branch(ctx, block.instructions.back());
}