From 7f61ff7b4d1dfe791264148ff3cbe8b03c76aeca Mon Sep 17 00:00:00 2001 From: Arcady Goldmints-Orlov Date: Sun, 31 Jan 2021 14:53:55 -0500 Subject: [PATCH] broadcom/compiler: Merge instructions more efficiently MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instructions are allowed to access up to two rf registers, or one rf register and a small immediate. This change allows qpu_merge_inst to take full advantage of this by allowint the merging of two instructions if they have no more than two different rf registers between them, or one rf register and one small immediate. qpu_merge_inst rewrites the instructions as needed to pack everything into raddr_a and raddr_b in the merged instruction. shader-db stats: total instructions in shared programs: 19938769 -> 18929664 (-5.06%) instructions in affected programs: 17929438 -> 16920333 (-5.63%) helped: 95008 HURT: 242 helped stats (abs) min: 1 max: 785 x̄: 10.62 x̃: 7 helped stats (rel) min: 0.30% max: 21.25% x̄: 5.37% x̃: 4.98% HURT stats (abs) min: 1 max: 2 x̄: 1.10 x̃: 1 HURT stats (rel) min: 0.30% max: 3.12% x̄: 1.62% x̃: 1.54% 95% mean confidence interval for instructions value: -10.67 -10.52 95% mean confidence interval for instructions %-change: -5.37% -5.33% Instructions are helped. total max-temps in shared programs: 3122664 -> 3112446 (-0.33%) max-temps in affected programs: 124881 -> 114663 (-8.18%) helped: 5445 HURT: 0 helped stats (abs) min: 1 max: 15 x̄: 1.88 x̃: 1 helped stats (rel) min: 1.49% max: 40.54% x̄: 8.97% x̃: 6.67% 95% mean confidence interval for max-temps value: -1.91 -1.84 95% mean confidence interval for max-temps %-change: -9.12% -8.81% Max-temps are helped. total sfu-stalls in shared programs: 38028 -> 41231 (8.42%) sfu-stalls in affected programs: 6053 -> 9256 (52.92%) helped: 664 HURT: 3380 helped stats (abs) min: 1 max: 2 x̄: 1.04 x̃: 1 helped stats (rel) min: 9.09% max: 100.00% x̄: 70.81% x̃: 100.00% HURT stats (abs) min: 1 max: 4 x̄: 1.15 x̃: 1 HURT stats (rel) min: 0.00% max: 300.00% x̄: 46.39% x̃: 25.00% 95% mean confidence interval for sfu-stalls value: 0.76 0.82 95% mean confidence interval for sfu-stalls %-change: 25.03% 29.26% Sfu-stalls are HURT. total inst-and-stalls in shared programs: 19976797 -> 18970895 (-5.04%) inst-and-stalls in affected programs: 17963129 -> 16957227 (-5.60%) helped: 95017 HURT: 245 helped stats (abs) min: 1 max: 785 x̄: 10.59 x̃: 7 helped stats (rel) min: 0.30% max: 21.25% x̄: 5.35% x̃: 4.95% HURT stats (abs) min: 1 max: 2 x̄: 1.09 x̃: 1 HURT stats (rel) min: 0.30% max: 3.12% x̄: 1.61% x̃: 1.54% 95% mean confidence interval for inst-and-stalls value: -10.64 -10.48 95% mean confidence interval for inst-and-stalls %-change: -5.35% -5.31% Inst-and-stalls are helped. v2 (Iago): - moved early return for naddrs > 2 even earlier. - only update {add,mul}.b mux if instruction has more than one operand. - don't OR b->raddr_{a,b} if we are not merging add/mul instructions. - don't initialize packed to 0. - minor style fixes. Reviewed-by: Iago Toral Quiroga Part-of: --- src/broadcom/compiler/qpu_schedule.c | 129 ++++++++++++++++++++++++--- 1 file changed, 115 insertions(+), 14 deletions(-) diff --git a/src/broadcom/compiler/qpu_schedule.c b/src/broadcom/compiler/qpu_schedule.c index a637ec56dcf..10ee1a33e30 100644 --- a/src/broadcom/compiler/qpu_schedule.c +++ b/src/broadcom/compiler/qpu_schedule.c @@ -687,6 +687,112 @@ qpu_compatible_peripheral_access(const struct v3d_device_info *devinfo, return false; } +/* Compute a bitmask of which rf registers are used between + * the two instructions. + */ +static uint64_t +qpu_raddrs_used(const struct v3d_qpu_instr *a, + const struct v3d_qpu_instr *b) +{ + assert(a->type == V3D_QPU_INSTR_TYPE_ALU); + assert(b->type == V3D_QPU_INSTR_TYPE_ALU); + + uint64_t raddrs_used = 0; + if (v3d_qpu_uses_mux(a, V3D_QPU_MUX_A)) + raddrs_used |= (1ll << a->raddr_a); + if (!a->sig.small_imm && v3d_qpu_uses_mux(a, V3D_QPU_MUX_B)) + raddrs_used |= (1ll << a->raddr_b); + if (v3d_qpu_uses_mux(b, V3D_QPU_MUX_A)) + raddrs_used |= (1ll << b->raddr_a); + if (!b->sig.small_imm && v3d_qpu_uses_mux(b, V3D_QPU_MUX_B)) + raddrs_used |= (1ll << b->raddr_b); + + return raddrs_used; +} + +/* Take two instructions and attempt to merge their raddr fields + * into one merged instruction. Returns false if the two instructions + * access more than two different rf registers between them, or more + * than one rf register and one small immediate. + */ +static bool +qpu_merge_raddrs(struct v3d_qpu_instr *result, + const struct v3d_qpu_instr *add_instr, + const struct v3d_qpu_instr *mul_instr) +{ + uint64_t raddrs_used = qpu_raddrs_used(add_instr, mul_instr); + int naddrs = util_bitcount64(raddrs_used); + + if (naddrs > 2) + return false; + + if ((add_instr->sig.small_imm || mul_instr->sig.small_imm)) { + if (naddrs > 1) + return false; + + if (add_instr->sig.small_imm && mul_instr->sig.small_imm) + if (add_instr->raddr_b != mul_instr->raddr_b) + return false; + + result->sig.small_imm = true; + result->raddr_b = add_instr->sig.small_imm ? + add_instr->raddr_b : mul_instr->raddr_b; + } + + if (naddrs == 0) + return true; + + int raddr_a = ffsll(raddrs_used) - 1; + raddrs_used &= ~(1ll << raddr_a); + result->raddr_a = raddr_a; + + if (!result->sig.small_imm) { + if (v3d_qpu_uses_mux(add_instr, V3D_QPU_MUX_B) && + raddr_a == add_instr->raddr_b) { + if (add_instr->alu.add.a == V3D_QPU_MUX_B) + result->alu.add.a = V3D_QPU_MUX_A; + if (add_instr->alu.add.b == V3D_QPU_MUX_B && + v3d_qpu_add_op_num_src(add_instr->alu.add.op) > 1) { + result->alu.add.b = V3D_QPU_MUX_A; + } + } + if (v3d_qpu_uses_mux(mul_instr, V3D_QPU_MUX_B) && + raddr_a == mul_instr->raddr_b) { + if (mul_instr->alu.mul.a == V3D_QPU_MUX_B) + result->alu.mul.a = V3D_QPU_MUX_A; + if (mul_instr->alu.mul.b == V3D_QPU_MUX_B && + v3d_qpu_mul_op_num_src(mul_instr->alu.mul.op) > 1) { + result->alu.mul.b = V3D_QPU_MUX_A; + } + } + } + if (!raddrs_used) + return true; + + int raddr_b = ffsll(raddrs_used) - 1; + result->raddr_b = raddr_b; + if (v3d_qpu_uses_mux(add_instr, V3D_QPU_MUX_A) && + raddr_b == add_instr->raddr_a) { + if (add_instr->alu.add.a == V3D_QPU_MUX_A) + result->alu.add.a = V3D_QPU_MUX_B; + if (add_instr->alu.add.b == V3D_QPU_MUX_A && + v3d_qpu_add_op_num_src(add_instr->alu.add.op) > 1) { + result->alu.add.b = V3D_QPU_MUX_B; + } + } + if (v3d_qpu_uses_mux(mul_instr, V3D_QPU_MUX_A) && + raddr_b == mul_instr->raddr_a) { + if (mul_instr->alu.mul.a == V3D_QPU_MUX_A) + result->alu.mul.a = V3D_QPU_MUX_B; + if (mul_instr->alu.mul.b == V3D_QPU_MUX_A && + v3d_qpu_add_op_num_src(mul_instr->alu.mul.op) > 1) { + result->alu.mul.b = V3D_QPU_MUX_B; + } + } + + return true; +} + static bool qpu_merge_inst(const struct v3d_device_info *devinfo, struct v3d_qpu_instr *result, @@ -702,6 +808,7 @@ qpu_merge_inst(const struct v3d_device_info *devinfo, return false; struct v3d_qpu_instr merge = *a; + const struct v3d_qpu_instr *add_instr = NULL, *mul_instr = NULL; if (b->alu.add.op != V3D_QPU_A_NOP) { if (a->alu.add.op != V3D_QPU_A_NOP) @@ -711,6 +818,9 @@ qpu_merge_inst(const struct v3d_device_info *devinfo, merge.flags.ac = b->flags.ac; merge.flags.apf = b->flags.apf; merge.flags.auf = b->flags.auf; + + add_instr = b; + mul_instr = a; } if (b->alu.mul.op != V3D_QPU_M_NOP) { @@ -721,23 +831,14 @@ qpu_merge_inst(const struct v3d_device_info *devinfo, merge.flags.mc = b->flags.mc; merge.flags.mpf = b->flags.mpf; merge.flags.muf = b->flags.muf; + + mul_instr = b; + add_instr = a; } - if (v3d_qpu_uses_mux(b, V3D_QPU_MUX_A)) { - if (v3d_qpu_uses_mux(a, V3D_QPU_MUX_A) && - a->raddr_a != b->raddr_a) { + if (add_instr && mul_instr && + !qpu_merge_raddrs(&merge, add_instr, mul_instr)) { return false; - } - merge.raddr_a = b->raddr_a; - } - - if (v3d_qpu_uses_mux(b, V3D_QPU_MUX_B)) { - if (v3d_qpu_uses_mux(a, V3D_QPU_MUX_B) && - (a->raddr_b != b->raddr_b || - a->sig.small_imm != b->sig.small_imm)) { - return false; - } - merge.raddr_b = b->raddr_b; } merge.sig.thrsw |= b->sig.thrsw;