2020-07-22 16:57:22 -07:00
|
|
|
/*
|
|
|
|
|
* Copyright © 2020 Google LLC
|
|
|
|
|
*
|
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
|
*
|
|
|
|
|
* The above copyright notice and this permission notice (including the next
|
|
|
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
|
* Software.
|
|
|
|
|
*
|
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* @file
|
|
|
|
|
*
|
2021-08-19 10:46:09 +02:00
|
|
|
* Removes unused components of SSA defs.
|
2020-07-22 16:57:22 -07:00
|
|
|
*
|
|
|
|
|
* Due to various optimization passes (or frontend implementations,
|
|
|
|
|
* particularly prog_to_nir), we may have instructions generating vectors
|
2021-08-19 10:46:09 +02:00
|
|
|
* whose components don't get read by any instruction.
|
|
|
|
|
*
|
|
|
|
|
* For memory loads, while it can be tricky to eliminate unused low components
|
|
|
|
|
* or channels in the middle of a writemask (you might need to increment some
|
|
|
|
|
* offset from a load_uniform, for example), it is trivial to just drop the
|
|
|
|
|
* trailing components.
|
|
|
|
|
* For vector ALU and load_const, only used by other ALU instructions,
|
|
|
|
|
* this pass eliminates arbitrary channels as well as duplicate channels,
|
|
|
|
|
* and reswizzles the uses.
|
2020-07-22 16:57:22 -07:00
|
|
|
*
|
|
|
|
|
* This pass is probably only of use to vector backends -- scalar backends
|
|
|
|
|
* typically get unused def channel trimming by scalarizing and dead code
|
|
|
|
|
* elimination.
|
|
|
|
|
*/
|
|
|
|
|
|
2023-08-08 12:00:35 -05:00
|
|
|
#include "util/u_math.h"
|
2020-07-22 16:57:22 -07:00
|
|
|
#include "nir.h"
|
|
|
|
|
#include "nir_builder.h"
|
2022-06-22 15:58:16 -04:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Round up a vector size to a vector size that's valid in NIR. At present, NIR
|
|
|
|
|
* supports only vec2-5, vec8, and vec16. Attempting to generate other sizes
|
|
|
|
|
* will fail validation.
|
|
|
|
|
*/
|
|
|
|
|
static unsigned
|
|
|
|
|
round_up_components(unsigned n)
|
|
|
|
|
{
|
|
|
|
|
return (n > 5) ? util_next_power_of_two(n) : n;
|
|
|
|
|
}
|
2020-07-22 16:57:22 -07:00
|
|
|
|
|
|
|
|
static bool
|
2023-08-12 16:17:15 -04:00
|
|
|
shrink_dest_to_read_mask(nir_def *def)
|
2020-07-22 16:57:22 -07:00
|
|
|
{
|
|
|
|
|
/* early out if there's nothing to do. */
|
|
|
|
|
if (def->num_components == 1)
|
|
|
|
|
return false;
|
|
|
|
|
|
2021-07-22 09:51:32 +02:00
|
|
|
/* don't remove any channels if used by an intrinsic */
|
|
|
|
|
nir_foreach_use(use_src, def) {
|
|
|
|
|
if (use_src->parent_instr->type == nir_instr_type_intrinsic)
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-12 16:17:15 -04:00
|
|
|
unsigned mask = nir_def_components_read(def);
|
2020-07-22 16:57:22 -07:00
|
|
|
int last_bit = util_last_bit(mask);
|
|
|
|
|
|
|
|
|
|
/* If nothing was read, leave it up to DCE. */
|
|
|
|
|
if (!mask)
|
|
|
|
|
return false;
|
|
|
|
|
|
2022-06-22 15:58:16 -04:00
|
|
|
unsigned rounded = round_up_components(last_bit);
|
|
|
|
|
assert(rounded <= def->num_components);
|
|
|
|
|
last_bit = rounded;
|
|
|
|
|
|
2020-07-22 16:57:22 -07:00
|
|
|
if (def->num_components > last_bit) {
|
|
|
|
|
def->num_components = last_bit;
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-04 15:06:50 +03:00
|
|
|
static bool
|
|
|
|
|
shrink_intrinsic_to_non_sparse(nir_intrinsic_instr *instr)
|
|
|
|
|
{
|
2023-08-14 11:56:00 -05:00
|
|
|
unsigned mask = nir_def_components_read(&instr->def);
|
2023-07-04 15:06:50 +03:00
|
|
|
int last_bit = util_last_bit(mask);
|
|
|
|
|
|
|
|
|
|
/* If the sparse component is used, do nothing. */
|
2023-08-14 11:56:00 -05:00
|
|
|
if (last_bit == instr->def.num_components)
|
2023-07-04 15:06:50 +03:00
|
|
|
return false;
|
|
|
|
|
|
2023-08-14 11:56:00 -05:00
|
|
|
instr->def.num_components -= 1;
|
|
|
|
|
instr->num_components = instr->def.num_components;
|
2023-07-04 15:06:50 +03:00
|
|
|
|
|
|
|
|
/* Switch to the non-sparse intrinsic. */
|
|
|
|
|
switch (instr->intrinsic) {
|
|
|
|
|
case nir_intrinsic_image_sparse_load:
|
|
|
|
|
instr->intrinsic = nir_intrinsic_image_load;
|
|
|
|
|
break;
|
|
|
|
|
case nir_intrinsic_bindless_image_sparse_load:
|
|
|
|
|
instr->intrinsic = nir_intrinsic_bindless_image_load;
|
|
|
|
|
break;
|
|
|
|
|
case nir_intrinsic_image_deref_sparse_load:
|
|
|
|
|
instr->intrinsic = nir_intrinsic_image_deref_load;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2021-08-17 13:23:22 +02:00
|
|
|
static void
|
2023-08-12 16:17:15 -04:00
|
|
|
reswizzle_alu_uses(nir_def *def, uint8_t *reswizzle)
|
2021-08-17 13:23:22 +02:00
|
|
|
{
|
|
|
|
|
nir_foreach_use(use_src, def) {
|
|
|
|
|
/* all uses must be ALU instructions */
|
|
|
|
|
assert(use_src->parent_instr->type == nir_instr_type_alu);
|
2023-08-08 12:00:35 -05:00
|
|
|
nir_alu_src *alu_src = (nir_alu_src *)use_src;
|
2021-08-17 13:23:22 +02:00
|
|
|
|
|
|
|
|
/* reswizzle ALU sources */
|
|
|
|
|
for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++)
|
|
|
|
|
alu_src->swizzle[i] = reswizzle[alu_src->swizzle[i]];
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
2023-08-12 16:17:15 -04:00
|
|
|
is_only_used_by_alu(nir_def *def)
|
2021-08-17 13:23:22 +02:00
|
|
|
{
|
|
|
|
|
nir_foreach_use(use_src, def) {
|
|
|
|
|
if (use_src->parent_instr->type != nir_instr_type_alu)
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2021-08-17 14:01:36 +02:00
|
|
|
static bool
|
|
|
|
|
opt_shrink_vector(nir_builder *b, nir_alu_instr *instr)
|
|
|
|
|
{
|
2023-08-14 11:43:35 -05:00
|
|
|
nir_def *def = &instr->def;
|
2023-08-12 16:17:15 -04:00
|
|
|
unsigned mask = nir_def_components_read(def);
|
2021-08-17 14:01:36 +02:00
|
|
|
|
|
|
|
|
/* If nothing was read, leave it up to DCE. */
|
|
|
|
|
if (mask == 0)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
/* don't remove any channels if used by non-ALU */
|
|
|
|
|
if (!is_only_used_by_alu(def))
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
uint8_t reswizzle[NIR_MAX_VEC_COMPONENTS] = { 0 };
|
2023-08-12 16:17:15 -04:00
|
|
|
nir_scalar srcs[NIR_MAX_VEC_COMPONENTS] = { 0 };
|
2021-08-17 14:01:36 +02:00
|
|
|
unsigned num_components = 0;
|
|
|
|
|
for (unsigned i = 0; i < def->num_components; i++) {
|
|
|
|
|
if (!((mask >> i) & 0x1))
|
|
|
|
|
continue;
|
|
|
|
|
|
2023-08-15 10:07:24 -05:00
|
|
|
nir_scalar scalar = nir_get_scalar(instr->src[i].src.ssa, instr->src[i].swizzle[0]);
|
2022-06-28 11:13:54 +02:00
|
|
|
|
2021-08-17 14:01:36 +02:00
|
|
|
/* Try reuse a component with the same value */
|
|
|
|
|
unsigned j;
|
|
|
|
|
for (j = 0; j < num_components; j++) {
|
2022-06-28 11:13:54 +02:00
|
|
|
if (scalar.def == srcs[j].def && scalar.comp == srcs[j].comp) {
|
2021-08-17 14:01:36 +02:00
|
|
|
reswizzle[i] = j;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Otherwise, just append the value */
|
|
|
|
|
if (j == num_components) {
|
2022-06-28 11:13:54 +02:00
|
|
|
srcs[num_components] = scalar;
|
2021-08-17 14:01:36 +02:00
|
|
|
reswizzle[i] = num_components++;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* return if no component was removed */
|
|
|
|
|
if (num_components == def->num_components)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
/* create new vecN and replace uses */
|
2023-08-12 16:17:15 -04:00
|
|
|
nir_def *new_vec = nir_vec_scalars(b, srcs, num_components);
|
|
|
|
|
nir_def_rewrite_uses(def, new_vec);
|
2021-08-17 14:01:36 +02:00
|
|
|
reswizzle_alu_uses(new_vec, reswizzle);
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-22 16:57:22 -07:00
|
|
|
static bool
|
|
|
|
|
opt_shrink_vectors_alu(nir_builder *b, nir_alu_instr *instr)
|
|
|
|
|
{
|
2023-08-14 11:43:35 -05:00
|
|
|
nir_def *def = &instr->def;
|
2020-07-22 16:57:22 -07:00
|
|
|
|
2021-07-07 16:43:13 +02:00
|
|
|
/* Nothing to shrink */
|
|
|
|
|
if (def->num_components == 1)
|
|
|
|
|
return false;
|
|
|
|
|
|
2021-07-12 11:00:29 +02:00
|
|
|
switch (instr->op) {
|
2023-08-08 12:00:35 -05:00
|
|
|
/* don't use nir_op_is_vec() as not all vector sizes are supported. */
|
|
|
|
|
case nir_op_vec4:
|
|
|
|
|
case nir_op_vec3:
|
|
|
|
|
case nir_op_vec2:
|
|
|
|
|
return opt_shrink_vector(b, instr);
|
|
|
|
|
default:
|
|
|
|
|
if (nir_op_infos[instr->op].output_size != 0)
|
|
|
|
|
return false;
|
|
|
|
|
break;
|
2021-07-12 11:00:29 +02:00
|
|
|
}
|
2020-07-22 16:57:22 -07:00
|
|
|
|
2021-08-17 13:23:22 +02:00
|
|
|
/* don't remove any channels if used by non-ALU */
|
|
|
|
|
if (!is_only_used_by_alu(def))
|
|
|
|
|
return false;
|
2021-07-12 11:00:29 +02:00
|
|
|
|
2023-08-12 16:17:15 -04:00
|
|
|
unsigned mask = nir_def_components_read(def);
|
2021-07-12 11:00:29 +02:00
|
|
|
/* return, if there is nothing to do */
|
2022-12-04 17:34:37 +01:00
|
|
|
if (mask == 0)
|
2021-07-12 11:00:29 +02:00
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
uint8_t reswizzle[NIR_MAX_VEC_COMPONENTS] = { 0 };
|
2022-12-04 17:34:37 +01:00
|
|
|
unsigned num_components = 0;
|
|
|
|
|
bool progress = false;
|
|
|
|
|
for (unsigned i = 0; i < def->num_components; i++) {
|
2021-08-17 14:01:36 +02:00
|
|
|
/* skip unused components */
|
|
|
|
|
if (!((mask >> i) & 0x1))
|
|
|
|
|
continue;
|
|
|
|
|
|
2022-12-04 17:34:37 +01:00
|
|
|
/* Try reuse a component with the same swizzles */
|
|
|
|
|
unsigned j;
|
|
|
|
|
for (j = 0; j < num_components; j++) {
|
|
|
|
|
bool duplicate_channel = true;
|
|
|
|
|
for (unsigned k = 0; k < nir_op_infos[instr->op].num_inputs; k++) {
|
|
|
|
|
if (nir_op_infos[instr->op].input_sizes[k] != 0 ||
|
|
|
|
|
instr->src[k].swizzle[i] != instr->src[k].swizzle[j]) {
|
|
|
|
|
duplicate_channel = false;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (duplicate_channel) {
|
|
|
|
|
reswizzle[i] = j;
|
|
|
|
|
progress = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Otherwise, just append the value */
|
|
|
|
|
if (j == num_components) {
|
|
|
|
|
for (int k = 0; k < nir_op_infos[instr->op].num_inputs; k++) {
|
|
|
|
|
instr->src[k].swizzle[num_components] = instr->src[k].swizzle[i];
|
|
|
|
|
}
|
|
|
|
|
if (i != num_components)
|
|
|
|
|
progress = true;
|
|
|
|
|
reswizzle[i] = num_components++;
|
2021-08-17 14:01:36 +02:00
|
|
|
}
|
2021-07-12 11:00:29 +02:00
|
|
|
}
|
2022-12-04 17:34:37 +01:00
|
|
|
|
2023-01-31 13:16:54 +01:00
|
|
|
/* update uses */
|
|
|
|
|
if (progress)
|
|
|
|
|
reswizzle_alu_uses(def, reswizzle);
|
|
|
|
|
|
2022-12-04 17:34:37 +01:00
|
|
|
unsigned rounded = round_up_components(num_components);
|
|
|
|
|
assert(rounded <= def->num_components);
|
2023-01-31 13:16:54 +01:00
|
|
|
if (rounded < def->num_components)
|
|
|
|
|
progress = true;
|
2020-07-22 16:57:22 -07:00
|
|
|
|
2021-08-17 14:01:36 +02:00
|
|
|
/* update dest */
|
2022-12-04 17:34:37 +01:00
|
|
|
def->num_components = rounded;
|
2021-08-17 14:01:36 +02:00
|
|
|
|
2022-12-04 17:34:37 +01:00
|
|
|
return progress;
|
2020-07-22 16:57:22 -07:00
|
|
|
}
|
|
|
|
|
|
2020-08-24 13:58:49 +01:00
|
|
|
static bool
|
2022-01-10 12:56:32 +00:00
|
|
|
opt_shrink_vectors_intrinsic(nir_builder *b, nir_intrinsic_instr *instr)
|
2020-07-22 16:57:22 -07:00
|
|
|
{
|
|
|
|
|
switch (instr->intrinsic) {
|
|
|
|
|
case nir_intrinsic_load_uniform:
|
|
|
|
|
case nir_intrinsic_load_ubo:
|
|
|
|
|
case nir_intrinsic_load_input:
|
|
|
|
|
case nir_intrinsic_load_input_vertex:
|
|
|
|
|
case nir_intrinsic_load_per_vertex_input:
|
|
|
|
|
case nir_intrinsic_load_interpolated_input:
|
|
|
|
|
case nir_intrinsic_load_ssbo:
|
|
|
|
|
case nir_intrinsic_load_push_constant:
|
|
|
|
|
case nir_intrinsic_load_constant:
|
2021-02-25 10:08:18 +01:00
|
|
|
case nir_intrinsic_load_shared:
|
2020-07-22 16:57:22 -07:00
|
|
|
case nir_intrinsic_load_global:
|
2020-08-29 00:59:22 -05:00
|
|
|
case nir_intrinsic_load_global_constant:
|
2020-07-22 16:57:22 -07:00
|
|
|
case nir_intrinsic_load_kernel_input:
|
2023-07-04 15:06:50 +03:00
|
|
|
case nir_intrinsic_load_scratch: {
|
|
|
|
|
/* Must be a vectorized intrinsic that we can resize. */
|
|
|
|
|
assert(instr->num_components != 0);
|
2020-07-22 16:57:22 -07:00
|
|
|
|
2023-07-04 15:06:50 +03:00
|
|
|
/* Trim the dest to the used channels */
|
2023-08-14 11:56:00 -05:00
|
|
|
if (!shrink_dest_to_read_mask(&instr->def))
|
2023-07-04 15:06:50 +03:00
|
|
|
return false;
|
2020-07-22 16:57:22 -07:00
|
|
|
|
2023-08-14 11:56:00 -05:00
|
|
|
instr->num_components = instr->def.num_components;
|
2022-01-10 12:56:32 +00:00
|
|
|
return true;
|
2020-07-22 16:57:22 -07:00
|
|
|
}
|
2023-07-04 15:06:50 +03:00
|
|
|
case nir_intrinsic_image_sparse_load:
|
|
|
|
|
case nir_intrinsic_bindless_image_sparse_load:
|
|
|
|
|
case nir_intrinsic_image_deref_sparse_load:
|
|
|
|
|
return shrink_intrinsic_to_non_sparse(instr);
|
|
|
|
|
default:
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-07-22 16:57:22 -07:00
|
|
|
|
2023-07-04 15:06:50 +03:00
|
|
|
static bool
|
|
|
|
|
opt_shrink_vectors_tex(nir_builder *b, nir_tex_instr *tex)
|
|
|
|
|
{
|
|
|
|
|
if (!tex->is_sparse)
|
|
|
|
|
return false;
|
|
|
|
|
|
2023-08-14 11:56:00 -05:00
|
|
|
unsigned mask = nir_def_components_read(&tex->def);
|
2023-07-04 15:06:50 +03:00
|
|
|
int last_bit = util_last_bit(mask);
|
|
|
|
|
|
|
|
|
|
/* If the sparse component is used, do nothing. */
|
2023-08-14 11:56:00 -05:00
|
|
|
if (last_bit == tex->def.num_components)
|
2023-07-04 15:06:50 +03:00
|
|
|
return false;
|
|
|
|
|
|
2023-08-14 11:56:00 -05:00
|
|
|
tex->def.num_components -= 1;
|
2023-07-04 15:06:50 +03:00
|
|
|
tex->is_sparse = false;
|
|
|
|
|
|
|
|
|
|
return true;
|
2020-07-22 16:57:22 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
opt_shrink_vectors_load_const(nir_load_const_instr *instr)
|
|
|
|
|
{
|
2023-08-12 16:17:15 -04:00
|
|
|
nir_def *def = &instr->def;
|
2021-08-17 13:23:22 +02:00
|
|
|
|
|
|
|
|
/* early out if there's nothing to do. */
|
|
|
|
|
if (def->num_components == 1)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
/* don't remove any channels if used by non-ALU */
|
|
|
|
|
if (!is_only_used_by_alu(def))
|
|
|
|
|
return false;
|
|
|
|
|
|
2023-08-12 16:17:15 -04:00
|
|
|
unsigned mask = nir_def_components_read(def);
|
2021-08-17 13:23:22 +02:00
|
|
|
|
|
|
|
|
/* If nothing was read, leave it up to DCE. */
|
|
|
|
|
if (!mask)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
uint8_t reswizzle[NIR_MAX_VEC_COMPONENTS] = { 0 };
|
|
|
|
|
unsigned num_components = 0;
|
2022-12-31 12:04:28 +01:00
|
|
|
bool progress = false;
|
2021-08-17 13:23:22 +02:00
|
|
|
for (unsigned i = 0; i < def->num_components; i++) {
|
|
|
|
|
if (!((mask >> i) & 0x1))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
/* Try reuse a component with the same constant */
|
|
|
|
|
unsigned j;
|
|
|
|
|
for (j = 0; j < num_components; j++) {
|
|
|
|
|
if (instr->value[i].u64 == instr->value[j].u64) {
|
|
|
|
|
reswizzle[i] = j;
|
2022-12-31 12:04:28 +01:00
|
|
|
progress = true;
|
2021-08-17 13:23:22 +02:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Otherwise, just append the value */
|
|
|
|
|
if (j == num_components) {
|
|
|
|
|
instr->value[num_components] = instr->value[i];
|
2023-01-31 13:29:48 +01:00
|
|
|
if (i != num_components)
|
2022-12-31 12:04:28 +01:00
|
|
|
progress = true;
|
2021-08-17 13:23:22 +02:00
|
|
|
reswizzle[i] = num_components++;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-31 13:57:22 +01:00
|
|
|
if (progress)
|
|
|
|
|
reswizzle_alu_uses(def, reswizzle);
|
|
|
|
|
|
2022-06-22 15:58:16 -04:00
|
|
|
unsigned rounded = round_up_components(num_components);
|
|
|
|
|
assert(rounded <= def->num_components);
|
2023-01-31 13:57:22 +01:00
|
|
|
if (rounded < def->num_components)
|
|
|
|
|
progress = true;
|
2021-08-17 13:23:22 +02:00
|
|
|
|
2022-12-31 12:04:28 +01:00
|
|
|
def->num_components = rounded;
|
2021-08-17 13:23:22 +02:00
|
|
|
|
2022-12-31 12:04:28 +01:00
|
|
|
return progress;
|
2020-07-22 16:57:22 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
2023-08-12 16:17:15 -04:00
|
|
|
opt_shrink_vectors_ssa_undef(nir_undef_instr *instr)
|
2020-07-22 16:57:22 -07:00
|
|
|
{
|
|
|
|
|
return shrink_dest_to_read_mask(&instr->def);
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-31 13:20:53 +01:00
|
|
|
static bool
|
|
|
|
|
opt_shrink_vectors_phi(nir_builder *b, nir_phi_instr *instr)
|
|
|
|
|
{
|
2023-08-14 11:56:00 -05:00
|
|
|
nir_def *def = &instr->def;
|
2023-01-31 13:20:53 +01:00
|
|
|
|
|
|
|
|
/* early out if there's nothing to do. */
|
|
|
|
|
if (def->num_components == 1)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
/* Ignore large vectors for now. */
|
|
|
|
|
if (def->num_components > 4)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
/* Check the uses. */
|
|
|
|
|
nir_component_mask_t mask = 0;
|
|
|
|
|
nir_foreach_use(src, def) {
|
|
|
|
|
if (src->parent_instr->type != nir_instr_type_alu)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
nir_alu_instr *alu = nir_instr_as_alu(src->parent_instr);
|
|
|
|
|
|
|
|
|
|
nir_alu_src *alu_src = exec_node_data(nir_alu_src, src, src);
|
|
|
|
|
int src_idx = alu_src - &alu->src[0];
|
|
|
|
|
nir_component_mask_t src_read_mask = nir_alu_instr_src_read_mask(alu, src_idx);
|
|
|
|
|
|
2023-08-14 11:43:35 -05:00
|
|
|
nir_def *alu_def = &alu->def;
|
2023-01-31 13:20:53 +01:00
|
|
|
|
|
|
|
|
/* We don't mark the channels used if the only reader is the original phi.
|
|
|
|
|
* This can happen in the case of loops.
|
|
|
|
|
*/
|
|
|
|
|
nir_foreach_use(alu_use_src, alu_def) {
|
|
|
|
|
if (alu_use_src->parent_instr != &instr->instr) {
|
|
|
|
|
mask |= src_read_mask;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* However, even if the instruction only points back at the phi, we still
|
|
|
|
|
* need to check that the swizzles are trivial.
|
|
|
|
|
*/
|
2023-08-15 10:59:11 -05:00
|
|
|
if (nir_op_is_vec(alu->op)) {
|
2023-01-31 13:20:53 +01:00
|
|
|
if (src_idx != alu->src[src_idx].swizzle[0]) {
|
|
|
|
|
mask |= src_read_mask;
|
|
|
|
|
}
|
|
|
|
|
} else if (!nir_alu_src_is_trivial_ssa(alu, src_idx)) {
|
|
|
|
|
mask |= src_read_mask;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* DCE will handle this. */
|
|
|
|
|
if (mask == 0)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
/* Nothing to shrink? */
|
|
|
|
|
if (BITFIELD_MASK(def->num_components) == mask)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
/* Set up the reswizzles. */
|
|
|
|
|
unsigned num_components = 0;
|
|
|
|
|
uint8_t reswizzle[NIR_MAX_VEC_COMPONENTS] = { 0 };
|
|
|
|
|
uint8_t src_reswizzle[NIR_MAX_VEC_COMPONENTS] = { 0 };
|
|
|
|
|
for (unsigned i = 0; i < def->num_components; i++) {
|
|
|
|
|
if (!((mask >> i) & 0x1))
|
|
|
|
|
continue;
|
|
|
|
|
src_reswizzle[num_components] = i;
|
|
|
|
|
reswizzle[i] = num_components++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Shrink the phi, this part is simple. */
|
|
|
|
|
def->num_components = num_components;
|
|
|
|
|
|
|
|
|
|
/* We can't swizzle phi sources directly so just insert extra mov
|
|
|
|
|
* with the correct swizzle and let the other parts of nir_shrink_vectors
|
|
|
|
|
* do its job on the original source instruction. If the original source was
|
|
|
|
|
* used only in the phi, the movs will disappear later after copy propagate.
|
|
|
|
|
*/
|
|
|
|
|
nir_foreach_phi_src(phi_src, instr) {
|
|
|
|
|
b->cursor = nir_after_instr_and_phis(phi_src->src.ssa->parent_instr);
|
|
|
|
|
|
|
|
|
|
nir_alu_src alu_src = {
|
|
|
|
|
.src = nir_src_for_ssa(phi_src->src.ssa)
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < num_components; i++)
|
|
|
|
|
alu_src.swizzle[i] = src_reswizzle[i];
|
2023-08-12 16:17:15 -04:00
|
|
|
nir_def *mov = nir_mov_alu(b, alu_src, num_components);
|
2023-01-31 13:20:53 +01:00
|
|
|
|
2023-08-17 15:44:47 -05:00
|
|
|
nir_src_rewrite(&phi_src->src, mov);
|
2023-01-31 13:20:53 +01:00
|
|
|
}
|
|
|
|
|
b->cursor = nir_before_instr(&instr->instr);
|
|
|
|
|
|
|
|
|
|
/* Reswizzle readers. */
|
|
|
|
|
reswizzle_alu_uses(def, reswizzle);
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-22 16:57:22 -07:00
|
|
|
static bool
|
2022-01-10 12:56:32 +00:00
|
|
|
opt_shrink_vectors_instr(nir_builder *b, nir_instr *instr)
|
2020-07-22 16:57:22 -07:00
|
|
|
{
|
|
|
|
|
b->cursor = nir_before_instr(instr);
|
|
|
|
|
|
|
|
|
|
switch (instr->type) {
|
|
|
|
|
case nir_instr_type_alu:
|
|
|
|
|
return opt_shrink_vectors_alu(b, nir_instr_as_alu(instr));
|
|
|
|
|
|
2023-07-04 15:06:50 +03:00
|
|
|
case nir_instr_type_tex:
|
|
|
|
|
return opt_shrink_vectors_tex(b, nir_instr_as_tex(instr));
|
|
|
|
|
|
2020-07-22 16:57:22 -07:00
|
|
|
case nir_instr_type_intrinsic:
|
2022-01-10 12:56:32 +00:00
|
|
|
return opt_shrink_vectors_intrinsic(b, nir_instr_as_intrinsic(instr));
|
2020-07-22 16:57:22 -07:00
|
|
|
|
|
|
|
|
case nir_instr_type_load_const:
|
|
|
|
|
return opt_shrink_vectors_load_const(nir_instr_as_load_const(instr));
|
|
|
|
|
|
2023-08-15 09:59:06 -05:00
|
|
|
case nir_instr_type_undef:
|
|
|
|
|
return opt_shrink_vectors_ssa_undef(nir_instr_as_undef(instr));
|
2020-07-22 16:57:22 -07:00
|
|
|
|
2023-01-31 13:20:53 +01:00
|
|
|
case nir_instr_type_phi:
|
|
|
|
|
return opt_shrink_vectors_phi(b, nir_instr_as_phi(instr));
|
|
|
|
|
|
2020-07-22 16:57:22 -07:00
|
|
|
default:
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool
|
2022-01-10 12:56:32 +00:00
|
|
|
nir_opt_shrink_vectors(nir_shader *shader)
|
2020-07-22 16:57:22 -07:00
|
|
|
{
|
|
|
|
|
bool progress = false;
|
|
|
|
|
|
2023-06-22 13:27:59 -04:00
|
|
|
nir_foreach_function_impl(impl, shader) {
|
|
|
|
|
nir_builder b = nir_builder_create(impl);
|
2020-07-22 16:57:22 -07:00
|
|
|
|
2023-06-22 13:27:59 -04:00
|
|
|
nir_foreach_block_reverse(block, impl) {
|
2021-06-16 09:20:01 +02:00
|
|
|
nir_foreach_instr_reverse(instr, block) {
|
2022-01-10 12:56:32 +00:00
|
|
|
progress |= opt_shrink_vectors_instr(&b, instr);
|
2020-07-22 16:57:22 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (progress) {
|
2023-06-22 13:27:59 -04:00
|
|
|
nir_metadata_preserve(impl,
|
2020-07-22 16:57:22 -07:00
|
|
|
nir_metadata_block_index |
|
2023-08-08 12:00:35 -05:00
|
|
|
nir_metadata_dominance);
|
2020-07-22 16:57:22 -07:00
|
|
|
} else {
|
2023-06-22 13:27:59 -04:00
|
|
|
nir_metadata_preserve(impl, nir_metadata_all);
|
2020-07-22 16:57:22 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return progress;
|
|
|
|
|
}
|