nir/alu_to_scalar: Use the new NIR lowering framework

Reviewed-by: Eric Anholt <eric@anholt.net>
This commit is contained in:
Jason Ekstrand 2019-07-11 15:20:39 -05:00
parent eb768b0a09
commit c1cffa4249

View file

@ -30,6 +30,23 @@
* arguments with individual per-channel operations. * arguments with individual per-channel operations.
*/ */
static bool
inst_is_vector_alu(const nir_instr *instr, const void *_state)
{
if (instr->type != nir_instr_type_alu)
return false;
nir_alu_instr *alu = nir_instr_as_alu(instr);
/* There is no ALU instruction which has a scalar destination, scalar
* src[0], and some other vector source.
*/
assert(alu->dest.dest.is_ssa);
assert(alu->src[0].src.is_ssa);
return alu->dest.dest.ssa.num_components > 1 ||
alu->src[0].src.ssa->num_components > 1;
}
static void static void
nir_alu_ssa_dest_init(nir_alu_instr *alu, unsigned num_components, nir_alu_ssa_dest_init(nir_alu_instr *alu, unsigned num_components,
unsigned bit_size) unsigned bit_size)
@ -39,7 +56,7 @@ nir_alu_ssa_dest_init(nir_alu_instr *alu, unsigned num_components,
alu->dest.write_mask = (1 << num_components) - 1; alu->dest.write_mask = (1 << num_components) - 1;
} }
static void static nir_ssa_def *
lower_reduction(nir_alu_instr *alu, nir_op chan_op, nir_op merge_op, lower_reduction(nir_alu_instr *alu, nir_op chan_op, nir_op merge_op,
nir_builder *builder) nir_builder *builder)
{ {
@ -68,14 +85,14 @@ lower_reduction(nir_alu_instr *alu, nir_op chan_op, nir_op merge_op,
} }
} }
assert(alu->dest.write_mask == 1); return last;
nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(last));
nir_instr_remove(&alu->instr);
} }
static bool static nir_ssa_def *
lower_alu_instr_scalar(nir_alu_instr *alu, nir_builder *b, BITSET_WORD *lower_set) lower_alu_instr_scalar(nir_builder *b, nir_instr *instr, void *_state)
{ {
BITSET_WORD *lower_set = _state;
nir_alu_instr *alu = nir_instr_as_alu(instr);
unsigned num_src = nir_op_infos[alu->op].num_inputs; unsigned num_src = nir_op_infos[alu->op].num_inputs;
unsigned i, chan; unsigned i, chan;
@ -86,14 +103,13 @@ lower_alu_instr_scalar(nir_alu_instr *alu, nir_builder *b, BITSET_WORD *lower_se
b->exact = alu->exact; b->exact = alu->exact;
if (lower_set && !BITSET_TEST(lower_set, alu->op)) if (lower_set && !BITSET_TEST(lower_set, alu->op))
return false; return NULL;
#define LOWER_REDUCTION(name, chan, merge) \ #define LOWER_REDUCTION(name, chan, merge) \
case name##2: \ case name##2: \
case name##3: \ case name##3: \
case name##4: \ case name##4: \
lower_reduction(alu, chan, merge, b); \ return lower_reduction(alu, chan, merge, b); \
return true;
switch (alu->op) { switch (alu->op) {
case nir_op_vec4: case nir_op_vec4:
@ -104,21 +120,16 @@ lower_alu_instr_scalar(nir_alu_instr *alu, nir_builder *b, BITSET_WORD *lower_se
/* We don't need to scalarize these ops, they're the ones generated to /* We don't need to scalarize these ops, they're the ones generated to
* group up outputs into a value that can be SSAed. * group up outputs into a value that can be SSAed.
*/ */
return false; return NULL;
case nir_op_pack_half_2x16: case nir_op_pack_half_2x16: {
if (!b->shader->options->lower_pack_half_2x16) if (!b->shader->options->lower_pack_half_2x16)
return false; return NULL;
nir_ssa_def *src_vec2 = nir_ssa_for_alu_src(b, alu, 0); nir_ssa_def *src_vec2 = nir_ssa_for_alu_src(b, alu, 0);
return nir_pack_half_2x16_split(b, nir_channel(b, src_vec2, 0),
nir_ssa_def *val = nir_channel(b, src_vec2, 1));
nir_pack_half_2x16_split(b, nir_channel(b, src_vec2, 0), }
nir_channel(b, src_vec2, 1));
nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(val));
nir_instr_remove(&alu->instr);
return true;
case nir_op_unpack_unorm_4x8: case nir_op_unpack_unorm_4x8:
case nir_op_unpack_snorm_4x8: case nir_op_unpack_snorm_4x8:
@ -127,22 +138,15 @@ lower_alu_instr_scalar(nir_alu_instr *alu, nir_builder *b, BITSET_WORD *lower_se
/* There is no scalar version of these ops, unless we were to break it /* There is no scalar version of these ops, unless we were to break it
* down to bitshifts and math (which is definitely not intended). * down to bitshifts and math (which is definitely not intended).
*/ */
return false; return NULL;
case nir_op_unpack_half_2x16: { case nir_op_unpack_half_2x16: {
if (!b->shader->options->lower_unpack_half_2x16) if (!b->shader->options->lower_unpack_half_2x16)
return false; return NULL;
nir_ssa_def *packed = nir_ssa_for_alu_src(b, alu, 0); nir_ssa_def *packed = nir_ssa_for_alu_src(b, alu, 0);
return nir_vec2(b, nir_unpack_half_2x16_split_x(b, packed),
nir_ssa_def *comps[2]; nir_unpack_half_2x16_split_y(b, packed));
comps[0] = nir_unpack_half_2x16_split_x(b, packed);
comps[1] = nir_unpack_half_2x16_split_y(b, packed);
nir_ssa_def *vec = nir_vec(b, comps, 2);
nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(vec));
nir_instr_remove(&alu->instr);
return true;
} }
case nir_op_pack_uvec2_to_uint: { case nir_op_pack_uvec2_to_uint: {
@ -151,13 +155,9 @@ lower_alu_instr_scalar(nir_alu_instr *alu, nir_builder *b, BITSET_WORD *lower_se
nir_ssa_def *word = nir_extract_u16(b, nir_ssa_for_alu_src(b, alu, 0), nir_ssa_def *word = nir_extract_u16(b, nir_ssa_for_alu_src(b, alu, 0),
nir_imm_int(b, 0)); nir_imm_int(b, 0));
nir_ssa_def *val = return nir_ior(b, nir_ishl(b, nir_channel(b, word, 1),
nir_ior(b, nir_ishl(b, nir_channel(b, word, 1), nir_imm_int(b, 16)), nir_imm_int(b, 16)),
nir_channel(b, word, 0)); nir_channel(b, word, 0));
nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(val));
nir_instr_remove(&alu->instr);
break;
} }
case nir_op_pack_uvec4_to_uint: { case nir_op_pack_uvec4_to_uint: {
@ -166,15 +166,13 @@ lower_alu_instr_scalar(nir_alu_instr *alu, nir_builder *b, BITSET_WORD *lower_se
nir_ssa_def *byte = nir_extract_u8(b, nir_ssa_for_alu_src(b, alu, 0), nir_ssa_def *byte = nir_extract_u8(b, nir_ssa_for_alu_src(b, alu, 0),
nir_imm_int(b, 0)); nir_imm_int(b, 0));
nir_ssa_def *val = return nir_ior(b, nir_ior(b, nir_ishl(b, nir_channel(b, byte, 3),
nir_ior(b, nir_ior(b, nir_ishl(b, nir_channel(b, byte, 3), nir_imm_int(b, 24)), nir_imm_int(b, 24)),
nir_ishl(b, nir_channel(b, byte, 2), nir_imm_int(b, 16))), nir_ishl(b, nir_channel(b, byte, 2),
nir_ior(b, nir_ishl(b, nir_channel(b, byte, 1), nir_imm_int(b, 8)), nir_imm_int(b, 16))),
nir_channel(b, byte, 0))); nir_ior(b, nir_ishl(b, nir_channel(b, byte, 1),
nir_imm_int(b, 8)),
nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(val)); nir_channel(b, byte, 0)));
nir_instr_remove(&alu->instr);
break;
} }
case nir_op_fdph: { case nir_op_fdph: {
@ -188,17 +186,13 @@ lower_alu_instr_scalar(nir_alu_instr *alu, nir_builder *b, BITSET_WORD *lower_se
} }
sum[3] = nir_channel(b, src1_vec, 3); sum[3] = nir_channel(b, src1_vec, 3);
nir_ssa_def *val = nir_fadd(b, nir_fadd(b, sum[0], sum[1]), return nir_fadd(b, nir_fadd(b, sum[0], sum[1]),
nir_fadd(b, sum[2], sum[3])); nir_fadd(b, sum[2], sum[3]));
nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(val));
nir_instr_remove(&alu->instr);
return true;
} }
case nir_op_unpack_64_2x32: case nir_op_unpack_64_2x32:
case nir_op_unpack_32_2x16: case nir_op_unpack_32_2x16:
return false; return NULL;
LOWER_REDUCTION(nir_op_fdot, nir_op_fmul, nir_op_fadd); LOWER_REDUCTION(nir_op_fdot, nir_op_fmul, nir_op_fadd);
LOWER_REDUCTION(nir_op_ball_fequal, nir_op_feq, nir_op_iand); LOWER_REDUCTION(nir_op_ball_fequal, nir_op_feq, nir_op_iand);
@ -217,7 +211,7 @@ lower_alu_instr_scalar(nir_alu_instr *alu, nir_builder *b, BITSET_WORD *lower_se
} }
if (alu->dest.dest.ssa.num_components == 1) if (alu->dest.dest.ssa.num_components == 1)
return false; return NULL;
unsigned num_components = alu->dest.dest.ssa.num_components; unsigned num_components = alu->dest.dest.ssa.num_components;
nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS] = { NULL }; nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS] = { NULL };
@ -248,47 +242,14 @@ lower_alu_instr_scalar(nir_alu_instr *alu, nir_builder *b, BITSET_WORD *lower_se
nir_builder_instr_insert(b, &lower->instr); nir_builder_instr_insert(b, &lower->instr);
} }
nir_ssa_def *vec = nir_vec(b, comps, num_components); return nir_vec(b, comps, num_components);
nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(vec));
nir_instr_remove(&alu->instr);
return true;
}
static bool
nir_lower_alu_to_scalar_impl(nir_function_impl *impl, BITSET_WORD *lower_set)
{
nir_builder builder;
nir_builder_init(&builder, impl);
bool progress = false;
nir_foreach_block(block, impl) {
nir_foreach_instr_safe(instr, block) {
if (instr->type == nir_instr_type_alu) {
progress = lower_alu_instr_scalar(nir_instr_as_alu(instr),
&builder,
lower_set) || progress;
}
}
}
nir_metadata_preserve(impl, nir_metadata_block_index |
nir_metadata_dominance);
return progress;
} }
bool bool
nir_lower_alu_to_scalar(nir_shader *shader, BITSET_WORD *lower_set) nir_lower_alu_to_scalar(nir_shader *shader, BITSET_WORD *lower_set)
{ {
bool progress = false; return nir_shader_lower_instructions(shader,
inst_is_vector_alu,
nir_foreach_function(function, shader) { lower_alu_instr_scalar,
if (function->impl) lower_set);
progress = nir_lower_alu_to_scalar_impl(function->impl,
lower_set) || progress;
}
return progress;
} }