nir/lower_io_to_scalar: add support for bo and shared io

Reviewed-by: Rhys Perry <pendingchaos02@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/13485>
This commit is contained in:
Mike Blumenkrantz 2021-10-20 11:03:44 -04:00 committed by Marge Bot
parent 60cb471805
commit 16f838576c

View file

@ -64,6 +64,53 @@ lower_load_input_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
nir_instr_remove(&intr->instr);
}
static void
lower_load_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
{
b->cursor = nir_before_instr(&intr->instr);
assert(intr->dest.is_ssa);
nir_ssa_def *loads[NIR_MAX_VEC_COMPONENTS];
unsigned offset_idx = intr->intrinsic == nir_intrinsic_load_shared ? 0 : 1;
nir_ssa_def *base_offset = intr->src[offset_idx].ssa;
for (unsigned i = 0; i < intr->num_components; i++) {
nir_intrinsic_instr *chan_intr =
nir_intrinsic_instr_create(b->shader, intr->intrinsic);
nir_ssa_dest_init(&chan_intr->instr, &chan_intr->dest,
1, intr->dest.ssa.bit_size, NULL);
chan_intr->num_components = 1;
nir_intrinsic_set_align_offset(chan_intr,
(nir_intrinsic_align_offset(intr) +
i * (intr->dest.ssa.bit_size / 8)) % nir_intrinsic_align_mul(intr));
nir_intrinsic_set_align_mul(chan_intr, nir_intrinsic_align_mul(intr));
if (nir_intrinsic_has_access(intr))
nir_intrinsic_set_access(chan_intr, nir_intrinsic_access(intr));
if (nir_intrinsic_has_range(intr))
nir_intrinsic_set_range(chan_intr, nir_intrinsic_range(intr));
if (nir_intrinsic_has_range_base(intr))
nir_intrinsic_set_range_base(chan_intr, nir_intrinsic_range_base(intr));
if (nir_intrinsic_has_base(intr))
nir_intrinsic_set_base(chan_intr, nir_intrinsic_base(intr));
for (unsigned j = 0; j < nir_intrinsic_infos[intr->intrinsic].num_srcs - 1; j++)
nir_src_copy(&chan_intr->src[j], &intr->src[j]);
/* increment offset per component */
nir_ssa_def *offset = nir_iadd_imm(b, base_offset, i * (intr->dest.ssa.bit_size / 8));
chan_intr->src[offset_idx] = nir_src_for_ssa(offset);
nir_builder_instr_insert(b, &chan_intr->instr);
loads[i] = &chan_intr->dest.ssa;
}
nir_ssa_def_rewrite_uses(&intr->dest.ssa,
nir_vec(b, loads, intr->num_components));
nir_instr_remove(&intr->instr);
}
static void
lower_store_output_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
{
@ -96,6 +143,46 @@ lower_store_output_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
nir_instr_remove(&intr->instr);
}
static void
lower_store_to_scalar(nir_builder *b, nir_intrinsic_instr *intr)
{
b->cursor = nir_before_instr(&intr->instr);
nir_ssa_def *value = nir_ssa_for_src(b, intr->src[0], intr->num_components);
unsigned offset_idx = intr->intrinsic == nir_intrinsic_store_shared ? 1 : 2;
nir_ssa_def *base_offset = intr->src[offset_idx].ssa;
/* iterate wrmask instead of num_components to handle split components */
u_foreach_bit(i, nir_intrinsic_write_mask(intr)) {
nir_intrinsic_instr *chan_intr =
nir_intrinsic_instr_create(b->shader, intr->intrinsic);
chan_intr->num_components = 1;
nir_intrinsic_set_write_mask(chan_intr, 0x1);
nir_intrinsic_set_align_offset(chan_intr,
(nir_intrinsic_align_offset(intr) +
i * (value->bit_size / 8)) % nir_intrinsic_align_mul(intr));
nir_intrinsic_set_align_mul(chan_intr, nir_intrinsic_align_mul(intr));
if (nir_intrinsic_has_access(intr))
nir_intrinsic_set_access(chan_intr, nir_intrinsic_access(intr));
if (nir_intrinsic_has_base(intr))
nir_intrinsic_set_base(chan_intr, nir_intrinsic_base(intr));
/* value */
chan_intr->src[0] = nir_src_for_ssa(nir_channel(b, value, i));
for (unsigned j = 1; j < nir_intrinsic_infos[intr->intrinsic].num_srcs - 1; j++)
nir_src_copy(&chan_intr->src[j], &intr->src[j]);
/* increment offset per component */
nir_ssa_def *offset = nir_iadd_imm(b, base_offset, i * (value->bit_size / 8));
chan_intr->src[offset_idx] = nir_src_for_ssa(offset);
nir_builder_instr_insert(b, &chan_intr->instr);
}
nir_instr_remove(&intr->instr);
}
static bool
nir_lower_io_to_scalar_instr(nir_builder *b, nir_instr *instr, void *data)
{
@ -115,12 +202,25 @@ nir_lower_io_to_scalar_instr(nir_builder *b, nir_instr *instr, void *data)
return true;
}
if ((intr->intrinsic == nir_intrinsic_load_ubo && (mask & nir_var_mem_ubo)) ||
(intr->intrinsic == nir_intrinsic_load_ssbo && (mask & nir_var_mem_ssbo)) ||
(intr->intrinsic == nir_intrinsic_load_shared && (mask & nir_var_mem_shared))) {
lower_load_to_scalar(b, intr);
return true;
}
if (intr->intrinsic == nir_intrinsic_store_output &&
mask & nir_var_shader_out) {
lower_store_output_to_scalar(b, intr);
return true;
}
if ((intr->intrinsic == nir_intrinsic_store_ssbo && (mask & nir_var_mem_ssbo)) ||
(intr->intrinsic == nir_intrinsic_store_shared && (mask & nir_var_mem_shared))) {
lower_store_to_scalar(b, intr);
return true;
}
return false;
}