mesa/src/intel/compiler/brw_fs_opt.cpp
Ian Romanick 5dfea87623 brw/opt: Always do both kinds of copy propagation before lower_load_payload
shader-db:

All Intel platforms except Skylake had similar results. (Lunar Lake shown)
total instructions in shared programs: 18092932 -> 18092713 (<.01%)
instructions in affected programs: 139290 -> 139071 (-0.16%)
helped: 103
HURT: 18
helped stats (abs) min: 1 max: 8 x̄: 2.43 x̃: 2
helped stats (rel) min: 0.02% max: 9.09% x̄: 0.73% x̃: 0.29%
HURT stats (abs)   min: 1 max: 5 x̄: 1.72 x̃: 1
HURT stats (rel)   min: 0.02% max: 0.55% x̄: 0.10% x̃: 0.08%
95% mean confidence interval for instructions value: -2.17 -1.45
95% mean confidence interval for instructions %-change: -0.83% -0.38%
Instructions are helped.

total cycles in shared programs: 922792268 -> 921495900 (-0.14%)
cycles in affected programs: 400296984 -> 399000616 (-0.32%)
helped: 765
HURT: 635
helped stats (abs) min: 2 max: 77018 x̄: 6739.33 x̃: 60
helped stats (rel) min: <.01% max: 35.59% x̄: 1.98% x̃: 0.32%
HURT stats (abs)   min: 2 max: 88658 x̄: 6077.51 x̃: 152
HURT stats (rel)   min: <.01% max: 51.33% x̄: 2.75% x̃: 0.63%
95% mean confidence interval for cycles value: -1620.41 -231.54
95% mean confidence interval for cycles %-change: -0.10% 0.44%
Inconclusive result (%-change mean confidence interval includes 0).

LOST:   4
GAINED: 3

Skylake
total instructions in shared programs: 18658324 -> 18579715 (-0.42%)
instructions in affected programs: 2089957 -> 2011348 (-3.76%)
helped: 9842
HURT: 23
helped stats (abs) min: 1 max: 24 x̄: 7.99 x̃: 8
helped stats (rel) min: 0.05% max: 40.00% x̄: 5.37% x̃: 4.52%
HURT stats (abs)   min: 1 max: 5 x̄: 1.57 x̃: 1
HURT stats (rel)   min: 0.02% max: 1.28% x̄: 0.36% x̃: 0.24%
95% mean confidence interval for instructions value: -7.98 -7.95
95% mean confidence interval for instructions %-change: -5.43% -5.29%
Instructions are helped.

total cycles in shared programs: 860031654 -> 860237548 (0.02%)
cycles in affected programs: 449175235 -> 449381129 (0.05%)
helped: 7895
HURT: 4416
helped stats (abs) min: 1 max: 14129 x̄: 113.70 x̃: 22
helped stats (rel) min: <.01% max: 40.95% x̄: 1.31% x̃: 0.56%
HURT stats (abs)   min: 1 max: 33397 x̄: 249.89 x̃: 34
HURT stats (rel)   min: <.01% max: 67.47% x̄: 2.65% x̃: 0.65%
95% mean confidence interval for cycles value: 1.46 31.98
95% mean confidence interval for cycles %-change: 0.02% 0.19%
Cycles are HURT.

LOST:   557
GAINED: 900

fossil-db:

Lunar Lake
Totals:
Instrs: 141933621 -> 141884681 (-0.03%); split: -0.03%, +0.00%
Cycle count: 21990657282 -> 21990200212 (-0.00%); split: -0.14%, +0.14%
Spill count: 69754 -> 69732 (-0.03%); split: -0.05%, +0.02%
Fill count: 128559 -> 128521 (-0.03%); split: -0.05%, +0.02%
Scratch Memory Size: 5934080 -> 5925888 (-0.14%)
Max live registers: 48021653 -> 48051253 (+0.06%); split: -0.00%, +0.06%

Totals from 13510 (2.45% of 551410) affected shaders:
Instrs: 19497180 -> 19448240 (-0.25%); split: -0.25%, +0.00%
Cycle count: 2455370202 -> 2454913132 (-0.02%); split: -1.25%, +1.23%
Spill count: 10975 -> 10953 (-0.20%); split: -0.32%, +0.12%
Fill count: 21709 -> 21671 (-0.18%); split: -0.28%, +0.10%
Scratch Memory Size: 674816 -> 666624 (-1.21%)
Max live registers: 2502653 -> 2532253 (+1.18%); split: -0.01%, +1.19%

Meteor Lake and DG2 had similar results. (Meteor Lake shown)
Totals:
Instrs: 152763523 -> 152772716 (+0.01%); split: -0.00%, +0.01%
Cycle count: 17188701887 -> 17187510768 (-0.01%); split: -0.10%, +0.09%
Spill count: 79280 -> 79279 (-0.00%); split: -0.00%, +0.00%
Fill count: 148809 -> 148803 (-0.00%)
Max live registers: 31879240 -> 31879093 (-0.00%); split: -0.00%, +0.00%
Max dispatch width: 5559984 -> 5559712 (-0.00%); split: +0.00%, -0.01%

Totals from 20524 (3.24% of 633183) affected shaders:
Instrs: 20366964 -> 20376157 (+0.05%); split: -0.01%, +0.05%
Cycle count: 2406162382 -> 2404971263 (-0.05%); split: -0.68%, +0.63%
Spill count: 19935 -> 19934 (-0.01%); split: -0.02%, +0.01%
Fill count: 34487 -> 34481 (-0.02%)
Max live registers: 1745598 -> 1745451 (-0.01%); split: -0.01%, +0.01%
Max dispatch width: 117992 -> 117720 (-0.23%); split: +0.03%, -0.26%

Tiger Lake and Ice Lake had similar results. (Tiger Lake shown)
Totals:
Instrs: 150694108 -> 150683859 (-0.01%); split: -0.01%, +0.00%
Cycle count: 15526754059 -> 15529031079 (+0.01%); split: -0.10%, +0.12%
Max live registers: 31791599 -> 31791441 (-0.00%); split: -0.00%, +0.00%
Max dispatch width: 5569488 -> 5569296 (-0.00%); split: +0.00%, -0.01%

Totals from 15000 (2.37% of 632406) affected shaders:
Instrs: 10965577 -> 10955328 (-0.09%); split: -0.11%, +0.02%
Cycle count: 2025347115 -> 2027624135 (+0.11%); split: -0.80%, +0.91%
Max live registers: 983373 -> 983215 (-0.02%); split: -0.02%, +0.00%
Max dispatch width: 83064 -> 82872 (-0.23%); split: +0.12%, -0.35%

Skylake
Totals:
Instrs: 140588784 -> 140413758 (-0.12%); split: -0.13%, +0.00%
Cycle count: 14724286265 -> 14723402393 (-0.01%); split: -0.04%, +0.04%
Fill count: 100130 -> 100129 (-0.00%)
Max live registers: 31418029 -> 31417146 (-0.00%); split: -0.00%, +0.00%
Max dispatch width: 5513400 -> 5535192 (+0.40%); split: +0.89%, -0.49%

Totals from 39733 (6.35% of 625986) affected shaders:
Instrs: 17240737 -> 17065711 (-1.02%); split: -1.02%, +0.01%
Cycle count: 1994668203 -> 1993784331 (-0.04%); split: -0.31%, +0.27%
Fill count: 44481 -> 44480 (-0.00%)
Max live registers: 2766781 -> 2765898 (-0.03%); split: -0.03%, +0.00%
Max dispatch width: 210600 -> 232392 (+10.35%); split: +23.23%, -12.89%

Reviewed-by: Kenneth Graunke <kenneth@whitecape.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/32041>
2024-11-08 17:46:45 +00:00

544 lines
17 KiB
C++

/*
* Copyright © 2010 Intel Corporation
* SPDX-License-Identifier: MIT
*/
#include "brw_eu.h"
#include "brw_fs.h"
#include "brw_fs_builder.h"
using namespace brw;
void
brw_fs_optimize(fs_visitor &s)
{
const nir_shader *nir = s.nir;
s.debug_optimizer(nir, "start", 0, 0);
/* Start by validating the shader we currently have. */
brw_fs_validate(s);
/* Track how much non-SSA at this point. */
{
const brw::def_analysis &defs = s.def_analysis.require();
s.shader_stats.non_ssa_registers_after_nir =
defs.count() - defs.ssa_count();
}
bool progress = false;
int iteration = 0;
int pass_num = 0;
#define OPT(pass, ...) ({ \
pass_num++; \
bool this_progress = pass(s, ##__VA_ARGS__); \
\
if (this_progress) \
s.debug_optimizer(nir, #pass, iteration, pass_num); \
\
brw_fs_validate(s); \
\
progress = progress || this_progress; \
this_progress; \
})
s.assign_constant_locations();
OPT(brw_fs_lower_constant_loads);
if (s.compiler->lower_dpas)
OPT(brw_fs_lower_dpas);
OPT(brw_fs_opt_split_virtual_grfs);
/* Before anything else, eliminate dead code. The results of some NIR
* instructions may effectively be calculated twice. Once when the
* instruction is encountered, and again when the user of that result is
* encountered. Wipe those away before algebraic optimizations and
* especially copy propagation can mix things up.
*/
OPT(brw_fs_opt_dead_code_eliminate);
OPT(brw_fs_opt_remove_extra_rounding_modes);
OPT(brw_fs_opt_eliminate_find_live_channel);
do {
progress = false;
pass_num = 0;
iteration++;
OPT(brw_fs_opt_algebraic);
OPT(brw_fs_opt_cse_defs);
if (!OPT(brw_fs_opt_copy_propagation_defs))
OPT(brw_fs_opt_copy_propagation);
OPT(brw_fs_opt_cmod_propagation);
OPT(brw_fs_opt_dead_code_eliminate);
OPT(brw_fs_opt_saturate_propagation);
OPT(brw_fs_opt_register_coalesce);
OPT(brw_fs_opt_compact_virtual_grfs);
} while (progress);
brw_shader_phase_update(s, BRW_SHADER_PHASE_AFTER_OPT_LOOP);
progress = false;
pass_num = 0;
if (OPT(brw_fs_lower_pack)) {
OPT(brw_fs_opt_register_coalesce);
OPT(brw_fs_opt_dead_code_eliminate);
}
OPT(brw_fs_lower_subgroup_ops);
OPT(brw_fs_lower_csel);
OPT(brw_fs_lower_simd_width);
OPT(brw_fs_lower_barycentrics);
OPT(brw_fs_lower_logical_sends);
brw_shader_phase_update(s, BRW_SHADER_PHASE_AFTER_EARLY_LOWERING);
/* After logical SEND lowering. */
if (!OPT(brw_fs_opt_copy_propagation_defs))
OPT(brw_fs_opt_copy_propagation);
/* Identify trailing zeros LOAD_PAYLOAD of sampler messages.
* Do this before splitting SENDs.
*/
if (OPT(brw_fs_opt_zero_samples)) {
if (!OPT(brw_fs_opt_copy_propagation_defs)) {
OPT(brw_fs_opt_copy_propagation);
}
}
OPT(brw_fs_opt_split_sends);
OPT(brw_fs_workaround_nomask_control_flow);
if (progress) {
/* Do both forms of copy propagation because it is important to
* eliminate as many cases of load_payload-of-load_payload as possible.
*/
OPT(brw_fs_opt_copy_propagation_defs);
OPT(brw_fs_opt_copy_propagation);
/* Run after logical send lowering to give it a chance to CSE the
* LOAD_PAYLOAD instructions created to construct the payloads of
* e.g. texturing messages in cases where it wasn't possible to CSE the
* whole logical instruction.
*/
OPT(brw_fs_opt_cse_defs);
OPT(brw_fs_opt_register_coalesce);
OPT(brw_fs_opt_dead_code_eliminate);
}
OPT(brw_fs_opt_remove_redundant_halts);
if (OPT(brw_fs_lower_load_payload)) {
OPT(brw_fs_opt_split_virtual_grfs);
OPT(brw_fs_opt_register_coalesce);
OPT(brw_fs_lower_simd_width);
OPT(brw_fs_opt_dead_code_eliminate);
}
brw_shader_phase_update(s, BRW_SHADER_PHASE_AFTER_MIDDLE_LOWERING);
OPT(brw_fs_lower_alu_restrictions);
OPT(brw_fs_opt_combine_constants);
if (OPT(brw_fs_lower_integer_multiplication)) {
/* If lower_integer_multiplication made progress, it may have produced
* some 32x32-bit MULs in the process of lowering 64-bit MULs. Run it
* one more time to clean those up if they exist.
*/
OPT(brw_fs_lower_integer_multiplication);
}
OPT(brw_fs_lower_sub_sat);
progress = false;
OPT(brw_fs_lower_derivatives);
OPT(brw_fs_lower_regioning);
/* Try both copy propagation passes. The defs one will likely not be
* able to handle everything at this point.
*/
const bool cp1 = OPT(brw_fs_opt_copy_propagation_defs);
const bool cp2 = OPT(brw_fs_opt_copy_propagation);
if (cp1 || cp2)
OPT(brw_fs_opt_combine_constants);
OPT(brw_fs_opt_dead_code_eliminate);
OPT(brw_fs_opt_register_coalesce);
if (progress)
OPT(brw_fs_lower_simd_width);
OPT(brw_fs_lower_sends_overlapping_payload);
OPT(brw_fs_lower_uniform_pull_constant_loads);
OPT(brw_fs_lower_indirect_mov);
OPT(brw_fs_lower_find_live_channel);
OPT(brw_fs_lower_load_subgroup_invocation);
brw_shader_phase_update(s, BRW_SHADER_PHASE_AFTER_LATE_LOWERING);
}
static unsigned
load_payload_sources_read_for_size(fs_inst *lp, unsigned size_read)
{
assert(lp->opcode == SHADER_OPCODE_LOAD_PAYLOAD);
assert(size_read >= lp->header_size * REG_SIZE);
unsigned i;
unsigned size = lp->header_size * REG_SIZE;
for (i = lp->header_size; size < size_read && i < lp->sources; i++)
size += lp->exec_size * brw_type_size_bytes(lp->src[i].type);
/* Size read must cover exactly a subset of sources. */
assert(size == size_read);
return i;
}
/**
* Optimize sample messages that have constant zero values for the trailing
* parameters. We can just reduce the message length for these
* instructions instead of reserving a register for it. Trailing parameters
* that aren't sent default to zero anyway. This will cause the dead code
* eliminator to remove the MOV instruction that would otherwise be emitted to
* set up the zero value.
*/
bool
brw_fs_opt_zero_samples(fs_visitor &s)
{
bool progress = false;
foreach_block_and_inst(block, fs_inst, send, s.cfg) {
if (send->opcode != SHADER_OPCODE_SEND ||
send->sfid != BRW_SFID_SAMPLER)
continue;
/* Wa_14012688258:
*
* Don't trim zeros at the end of payload for sample operations
* in cube and cube arrays.
*/
if (send->keep_payload_trailing_zeros)
continue;
/* This pass works on SENDs before splitting. */
if (send->ex_mlen > 0)
continue;
fs_inst *lp = (fs_inst *) send->prev;
if (lp->is_head_sentinel() || lp->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
continue;
/* How much of the payload are actually read by this SEND. */
const unsigned params =
load_payload_sources_read_for_size(lp, send->mlen * REG_SIZE);
/* We don't want to remove the message header or the first parameter.
* Removing the first parameter is not allowed, see the Haswell PRM
* volume 7, page 149:
*
* "Parameter 0 is required except for the sampleinfo message, which
* has no parameter 0"
*/
const unsigned first_param_idx = lp->header_size;
unsigned zero_size = 0;
for (unsigned i = params - 1; i > first_param_idx; i--) {
if (lp->src[i].file != BAD_FILE && !lp->src[i].is_zero())
break;
zero_size += lp->exec_size * brw_type_size_bytes(lp->src[i].type) * lp->dst.stride;
}
/* Round down to ensure to only consider full registers. */
const unsigned zero_len = ROUND_DOWN_TO(zero_size / REG_SIZE, reg_unit(s.devinfo));
if (zero_len > 0) {
/* Note mlen is in REG_SIZE units. */
send->mlen -= zero_len;
progress = true;
}
}
if (progress)
s.invalidate_analysis(DEPENDENCY_INSTRUCTION_DETAIL);
return progress;
}
/**
* Opportunistically split SEND message payloads.
*
* Gfx9+ supports "split" SEND messages, which take two payloads that are
* implicitly concatenated. If we find a SEND message with a single payload,
* we can split that payload in two. This results in smaller contiguous
* register blocks for us to allocate. But it can help beyond that, too.
*
* We try and split a LOAD_PAYLOAD between sources which change registers.
* For example, a sampler message often contains a x/y/z coordinate that may
* already be in a contiguous VGRF, combined with an LOD, shadow comparitor,
* or array index, which comes from elsewhere. In this case, the first few
* sources will be different offsets of the same VGRF, then a later source
* will be a different VGRF. So we split there, possibly eliminating the
* payload concatenation altogether.
*/
bool
brw_fs_opt_split_sends(fs_visitor &s)
{
bool progress = false;
foreach_block_and_inst(block, fs_inst, send, s.cfg) {
if (send->opcode != SHADER_OPCODE_SEND ||
send->mlen <= reg_unit(s.devinfo) || send->ex_mlen > 0 ||
send->src[2].file != VGRF)
continue;
/* Currently don't split sends that reuse a previously used payload. */
fs_inst *lp = (fs_inst *) send->prev;
if (lp->is_head_sentinel() || lp->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
continue;
if (lp->dst.file != send->src[2].file || lp->dst.nr != send->src[2].nr)
continue;
/* Split either after the header (if present), or when consecutive
* sources switch from one VGRF to a different one.
*/
unsigned mid = lp->header_size;
if (mid == 0) {
for (mid = 1; mid < lp->sources; mid++) {
if (lp->src[mid].file == BAD_FILE)
continue;
if (lp->src[0].file != lp->src[mid].file ||
lp->src[0].nr != lp->src[mid].nr)
break;
}
}
/* SEND mlen might be smaller than what LOAD_PAYLOAD provides, so
* find out how many sources from the payload does it really need.
*/
const unsigned end =
load_payload_sources_read_for_size(lp, send->mlen * REG_SIZE);
/* Nothing to split. */
if (end <= mid)
continue;
const fs_builder ibld(&s, block, lp);
fs_inst *lp1 = ibld.LOAD_PAYLOAD(lp->dst, &lp->src[0], mid, lp->header_size);
fs_inst *lp2 = ibld.LOAD_PAYLOAD(lp->dst, &lp->src[mid], end - mid, 0);
assert(lp1->size_written % REG_SIZE == 0);
assert(lp2->size_written % REG_SIZE == 0);
assert((lp1->size_written + lp2->size_written) / REG_SIZE == send->mlen);
lp1->dst = brw_vgrf(s.alloc.allocate(lp1->size_written / REG_SIZE), lp1->dst.type);
lp2->dst = brw_vgrf(s.alloc.allocate(lp2->size_written / REG_SIZE), lp2->dst.type);
send->resize_sources(4);
send->src[2] = lp1->dst;
send->src[3] = lp2->dst;
send->ex_mlen = lp2->size_written / REG_SIZE;
send->mlen -= send->ex_mlen;
progress = true;
}
if (progress)
s.invalidate_analysis(DEPENDENCY_INSTRUCTIONS | DEPENDENCY_VARIABLES);
return progress;
}
/**
* Remove redundant or useless halts.
*
* For example, we can eliminate halts in the following sequence:
*
* halt (redundant with the next halt)
* halt (useless; jumps to the next instruction)
* halt-target
*/
bool
brw_fs_opt_remove_redundant_halts(fs_visitor &s)
{
bool progress = false;
unsigned halt_count = 0;
fs_inst *halt_target = NULL;
bblock_t *halt_target_block = NULL;
foreach_block_and_inst(block, fs_inst, inst, s.cfg) {
if (inst->opcode == BRW_OPCODE_HALT)
halt_count++;
if (inst->opcode == SHADER_OPCODE_HALT_TARGET) {
halt_target = inst;
halt_target_block = block;
break;
}
}
if (!halt_target) {
assert(halt_count == 0);
return false;
}
/* Delete any HALTs immediately before the halt target. */
for (fs_inst *prev = (fs_inst *) halt_target->prev;
!prev->is_head_sentinel() && prev->opcode == BRW_OPCODE_HALT;
prev = (fs_inst *) halt_target->prev) {
prev->remove(halt_target_block);
halt_count--;
progress = true;
}
if (halt_count == 0) {
halt_target->remove(halt_target_block);
progress = true;
}
if (progress)
s.invalidate_analysis(DEPENDENCY_INSTRUCTIONS);
return progress;
}
/**
* Eliminate FIND_LIVE_CHANNEL instructions occurring outside any control
* flow. We could probably do better here with some form of divergence
* analysis.
*/
bool
brw_fs_opt_eliminate_find_live_channel(fs_visitor &s)
{
bool progress = false;
unsigned depth = 0;
if (!brw_stage_has_packed_dispatch(s.devinfo, s.stage, s.max_polygons,
s.prog_data)) {
/* The optimization below assumes that channel zero is live on thread
* dispatch, which may not be the case if the fixed function dispatches
* threads sparsely.
*/
return false;
}
foreach_block_and_inst_safe(block, fs_inst, inst, s.cfg) {
switch (inst->opcode) {
case BRW_OPCODE_IF:
case BRW_OPCODE_DO:
depth++;
break;
case BRW_OPCODE_ENDIF:
case BRW_OPCODE_WHILE:
depth--;
break;
case BRW_OPCODE_HALT:
/* This can potentially make control flow non-uniform until the end
* of the program.
*/
goto out;
case SHADER_OPCODE_FIND_LIVE_CHANNEL:
if (depth == 0) {
inst->opcode = BRW_OPCODE_MOV;
inst->src[0] = brw_imm_ud(0u);
inst->sources = 1;
inst->force_writemask_all = true;
progress = true;
/* emit_uniformize() frequently emits FIND_LIVE_CHANNEL paired
* with a BROADCAST. Save some work for opt_copy_propagation
* and opt_algebraic by trivially cleaning up both together.
*/
assert(!inst->next->is_tail_sentinel());
fs_inst *bcast = (fs_inst *) inst->next;
/* Ignore stride when comparing */
if (bcast->opcode == SHADER_OPCODE_BROADCAST &&
inst->dst.file == VGRF &&
inst->dst.file == bcast->src[1].file &&
inst->dst.nr == bcast->src[1].nr &&
inst->dst.offset == bcast->src[1].offset) {
bcast->opcode = BRW_OPCODE_MOV;
if (!is_uniform(bcast->src[0]))
bcast->src[0] = component(bcast->src[0], 0);
bcast->sources = 1;
bcast->force_writemask_all = true;
}
}
break;
default:
break;
}
}
out:
if (progress)
s.invalidate_analysis(DEPENDENCY_INSTRUCTION_DETAIL);
return progress;
}
/**
* Rounding modes for conversion instructions are included for each
* conversion, but right now it is a state. So once it is set,
* we don't need to call it again for subsequent calls.
*
* This is useful for vector/matrices conversions, as setting the
* mode once is enough for the full vector/matrix
*/
bool
brw_fs_opt_remove_extra_rounding_modes(fs_visitor &s)
{
bool progress = false;
unsigned execution_mode = s.nir->info.float_controls_execution_mode;
brw_rnd_mode base_mode = BRW_RND_MODE_UNSPECIFIED;
if ((FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16 |
FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32 |
FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64) &
execution_mode)
base_mode = BRW_RND_MODE_RTNE;
if ((FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16 |
FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32 |
FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64) &
execution_mode)
base_mode = BRW_RND_MODE_RTZ;
foreach_block (block, s.cfg) {
brw_rnd_mode prev_mode = base_mode;
foreach_inst_in_block_safe (fs_inst, inst, block) {
if (inst->opcode == SHADER_OPCODE_RND_MODE) {
assert(inst->src[0].file == IMM);
const brw_rnd_mode mode = (brw_rnd_mode) inst->src[0].d;
if (mode == prev_mode) {
inst->remove(block);
progress = true;
} else {
prev_mode = mode;
}
}
}
}
if (progress)
s.invalidate_analysis(DEPENDENCY_INSTRUCTIONS);
return progress;
}