v3d: Add an optimization pass for redundant flags updates.

Our exec masking introduces lots of redundant flags updates, and even
without that there will be cases where NIR comparisons on the same sources
for different reasons may generate the same comparison instruction before
the selection.

total instructions in shared programs: 6492930 -> 6460934 (-0.49%)
total uniforms in shared programs: 2117460 -> 2115106 (-0.11%)
total spills in shared programs: 4983 -> 4987 (0.08%)
total fills in shared programs: 6408 -> 6416 (0.12%)
This commit is contained in:
Eric Anholt 2019-02-22 14:26:26 -08:00
parent 3dd2001993
commit 8f065596d2
5 changed files with 143 additions and 0 deletions

View file

@ -29,6 +29,7 @@ BROADCOM_FILES = \
compiler/vir_live_variables.c \
compiler/vir_opt_copy_propagate.c \
compiler/vir_opt_dead_code.c \
compiler/vir_opt_redundant_flags.c \
compiler/vir_opt_small_immediates.c \
compiler/vir_register_allocate.c \
compiler/vir_to_qpu.c \

View file

@ -25,6 +25,7 @@ libbroadcom_compiler_files = files(
'vir_live_variables.c',
'vir_opt_copy_propagate.c',
'vir_opt_dead_code.c',
'vir_opt_redundant_flags.c',
'vir_opt_small_immediates.c',
'vir_register_allocate.c',
'vir_to_qpu.c',

View file

@ -785,6 +785,7 @@ bool vir_opt_constant_folding(struct v3d_compile *c);
bool vir_opt_copy_propagate(struct v3d_compile *c);
bool vir_opt_dead_code(struct v3d_compile *c);
bool vir_opt_peephole_sf(struct v3d_compile *c);
bool vir_opt_redundant_flags(struct v3d_compile *c);
bool vir_opt_small_immediates(struct v3d_compile *c);
bool vir_opt_vpm(struct v3d_compile *c);
void v3d_nir_lower_blend(nir_shader *s, struct v3d_compile *c);

View file

@ -1014,6 +1014,7 @@ vir_optimize(struct v3d_compile *c)
bool progress = false;
OPTPASS(vir_opt_copy_propagate);
OPTPASS(vir_opt_redundant_flags);
OPTPASS(vir_opt_dead_code);
OPTPASS(vir_opt_small_immediates);

View file

@ -0,0 +1,139 @@
/*
* Copyright © 2019 Broadcom
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/**
* @file v3d_opt_redundant_flags.c
*
* This eliminates the APF/MPF flags for redundant flags updates. These are
* often produced by our channel masking in nonuniform control flow.
*/
#include "v3d_compiler.h"
static bool debug;
static void
vir_dce_pf(struct v3d_compile *c, struct qinst *inst)
{
if (debug) {
fprintf(stderr,
"Removing flags write from: ");
vir_dump_inst(c, inst);
fprintf(stderr, "\n");
}
assert(inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU);
inst->qpu.flags.apf = V3D_QPU_PF_NONE;
inst->qpu.flags.mpf = V3D_QPU_PF_NONE;
}
static bool
vir_sources_modified(struct qinst *srcs, struct qinst *write)
{
for (int i = 0; i < vir_get_nsrc(srcs); i++) {
if (write->dst.file == QFILE_TEMP &&
srcs->src[i].file == QFILE_TEMP &&
srcs->src[i].index == write->dst.index) {
return true;
}
/* assume magic regs may be modified by basically anything. */
if (srcs->src[i].file != QFILE_TEMP &&
srcs->src[i].file != QFILE_SMALL_IMM)
return true;
}
return false;
}
static bool
vir_instr_flags_op_equal(struct qinst *a, struct qinst *b)
{
for (int i = 0; i < vir_get_nsrc(a); i++) {
if (a->src[i].file != b->src[i].file ||
a->src[i].index != b->src[i].index) {
return false;
}
}
if (a->qpu.flags.apf != b->qpu.flags.apf ||
a->qpu.flags.mpf != b->qpu.flags.mpf ||
a->qpu.alu.add.op != b->qpu.alu.add.op ||
a->qpu.alu.mul.op != b->qpu.alu.mul.op ||
a->qpu.alu.add.a_unpack != b->qpu.alu.add.a_unpack ||
a->qpu.alu.add.b_unpack != b->qpu.alu.add.b_unpack ||
a->qpu.alu.add.output_pack != b->qpu.alu.add.output_pack ||
a->qpu.alu.mul.a_unpack != b->qpu.alu.mul.a_unpack ||
a->qpu.alu.mul.b_unpack != b->qpu.alu.mul.b_unpack ||
a->qpu.alu.mul.output_pack != b->qpu.alu.mul.output_pack) {
return false;
}
return true;
}
static bool
vir_opt_redundant_flags_block(struct v3d_compile *c, struct qblock *block)
{
struct qinst *last_flags = NULL;
bool progress = false;
vir_for_each_inst(inst, block) {
if (inst->qpu.type != V3D_QPU_INSTR_TYPE_ALU ||
inst->qpu.flags.auf != V3D_QPU_UF_NONE ||
inst->qpu.flags.auf != V3D_QPU_UF_NONE) {
last_flags = NULL;
continue;
}
if (inst->qpu.flags.apf != V3D_QPU_PF_NONE ||
inst->qpu.flags.mpf != V3D_QPU_PF_NONE) {
if (last_flags &&
vir_instr_flags_op_equal(inst, last_flags)) {
vir_dce_pf(c, inst);
progress = true;
} else {
last_flags = inst;
}
}
if (last_flags && vir_sources_modified(last_flags, inst)) {
last_flags = NULL;
}
}
return progress;
}
bool
vir_opt_redundant_flags(struct v3d_compile *c)
{
bool progress = false;
vir_for_each_block(block, c) {
progress = vir_opt_redundant_flags_block(c, block) || progress;
}
return progress;
}