ac/nir/ngg: Carve out NGG streamout code.

We're going to want to call it from a different	file too.

Reviewed-by: Marek Olšák <marek.olsak@amd.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/33218>
This commit is contained in:
Timur Kristóf 2025-01-25 04:36:23 +01:00 committed by Marge Bot
parent 1d8f46e00c
commit d2ff3f9fa0
3 changed files with 478 additions and 457 deletions

View file

@ -194,6 +194,27 @@ ac_nir_create_output_phis(nir_builder *b,
const uint64_t outputs_written_16bit,
ac_nir_prerast_out *out);
void
ac_nir_ngg_build_streamout_buffer_info(nir_builder *b,
nir_xfb_info *info,
enum amd_gfx_level gfx_level,
bool has_xfb_prim_query,
bool use_gfx12_xfb_intrinsic,
nir_def *scratch_base,
nir_def *tid_in_tg,
nir_def *gen_prim[4],
nir_def *so_buffer_ret[4],
nir_def *buffer_offsets_ret[4],
nir_def *emit_prim_ret[4]);
void
ac_nir_ngg_build_streamout_vertex(nir_builder *b, nir_xfb_info *info,
unsigned stream, nir_def *so_buffer[4],
nir_def *buffer_offsets[4],
unsigned vertex_index, nir_def *vtx_lds_addr,
ac_nir_prerast_out *pr_out,
bool skip_primitive_id);
#ifdef __cplusplus
}
#endif

View file

@ -1729,459 +1729,6 @@ ngg_nogs_store_xfb_outputs_to_lds(nir_builder *b, lower_ngg_nogs_state *s)
}
}
static nir_def *
write_values_to_lanes(nir_builder *b, nir_def **values, unsigned lane_mask)
{
nir_def *lanes = nir_imm_int(b, 0);
u_foreach_bit(i, lane_mask) {
lanes = nir_write_invocation_amd(b, lanes, values[i], nir_imm_int(b, i));
}
return lanes;
}
static nir_def *
read_values_from_4_lanes(nir_builder *b, nir_def *values, unsigned lane_mask)
{
nir_def *undef = nir_undef(b, 1, 32);
nir_def *per_lane[4] = {undef, undef, undef, undef};
u_foreach_bit(i, lane_mask) {
per_lane[i] = nir_read_invocation(b, values, nir_imm_int(b, i));
}
return nir_vec(b, per_lane, 4);
}
static void
ngg_build_streamout_buffer_info(nir_builder *b,
nir_xfb_info *info,
enum amd_gfx_level gfx_level,
bool has_xfb_prim_query,
bool use_gfx12_xfb_intrinsic,
nir_def *scratch_base,
nir_def *tid_in_tg,
nir_def *gen_prim[4],
nir_def *so_buffer_ret[4],
nir_def *buffer_offsets_ret[4],
nir_def *emit_prim_ret[4])
{
nir_def *prim_stride[4] = {0};
nir_def *undef = nir_undef(b, 1, 32);
/* For radeonsi which pass this value by arg when VS. Streamout need accurate
* num-vert-per-prim for writing correct amount of data to buffer.
*/
nir_def *num_vert_per_prim = nir_load_num_vertices_per_primitive_amd(b);
for (unsigned buffer = 0; buffer < 4; buffer++) {
if (!(info->buffers_written & BITFIELD_BIT(buffer)))
continue;
assert(info->buffers[buffer].stride);
prim_stride[buffer] =
nir_imul_imm(b, num_vert_per_prim, info->buffers[buffer].stride);
so_buffer_ret[buffer] = nir_load_streamout_buffer_amd(b, .base = buffer);
}
nir_if *if_invocation_0 = nir_push_if(b, nir_ieq_imm(b, tid_in_tg, 0));
{
nir_def *any_buffer_valid = nir_imm_false(b);
nir_def *workgroup_buffer_sizes[4];
for (unsigned buffer = 0; buffer < 4; buffer++) {
if (info->buffers_written & BITFIELD_BIT(buffer)) {
nir_def *buffer_size = nir_channel(b, so_buffer_ret[buffer], 2);
/* In radeonsi, we may not know if a feedback buffer has been bound when
* compile time, so have to check buffer size in runtime to disable the
* GDS update for unbind buffer to prevent the case that previous draw
* compiled with streamout but does not bind feedback buffer miss update
* GDS which will affect current draw's streamout.
*/
nir_def *buffer_valid = nir_ine_imm(b, buffer_size, 0);
nir_def *inc_buffer_size =
nir_imul(b, gen_prim[info->buffer_to_stream[buffer]], prim_stride[buffer]);
workgroup_buffer_sizes[buffer] =
nir_bcsel(b, buffer_valid, inc_buffer_size, nir_imm_int(b, 0));
any_buffer_valid = nir_ior(b, any_buffer_valid, buffer_valid);
} else
workgroup_buffer_sizes[buffer] = undef;
}
nir_def *buffer_offsets = NULL, *xfb_state_address = NULL, *xfb_voffset = NULL;
/* Get current global offset of buffer and increase by amount of
* workgroup buffer size. This is an ordered operation sorted by
* ordered_id; Each buffer info is in a channel of a vec4.
*/
if (gfx_level >= GFX12) {
nir_pop_if(b, if_invocation_0);
for (unsigned buffer = 0; buffer < 4; buffer++)
workgroup_buffer_sizes[buffer] = nir_if_phi(b, workgroup_buffer_sizes[buffer], undef);
any_buffer_valid = nir_if_phi(b, any_buffer_valid, nir_undef(b, 1, 1));
/* These must be set after nir_pop_if and phis. */
xfb_state_address = nir_load_xfb_state_address_gfx12_amd(b);
xfb_voffset = nir_imul_imm(b, tid_in_tg, 8);
nir_if *if_4lanes = nir_push_if(b, nir_iand(b, any_buffer_valid, nir_ult_imm(b, tid_in_tg, 4)));
{
/* Move workgroup buffer sizes from SGPRs to the first 4 lanes. */
nir_def *workgroup_buffer_size_per_lane =
write_values_to_lanes(b, workgroup_buffer_sizes, info->buffers_written);
nir_def *ordered_id = nir_load_ordered_id_amd(b);
/* The atomic value for the 4 lanes is:
* lane 0: uvec2(ordered_id, workgroup_buffer_size0)
* lane 1: uvec2(ordered_id, workgroup_buffer_size1)
* lane 2: uvec2(ordered_id, workgroup_buffer_size2)
* lane 3: uvec2(ordered_id, workgroup_buffer_size3)
*/
nir_def *atomic_src = nir_pack_64_2x32_split(b, ordered_id,
workgroup_buffer_size_per_lane);
/* The memory layout of the xfb state is:
* struct {
* unsigned ordered_id;
* unsigned dwords_written0;
* unsigned ordered_id;
* unsigned dwords_written1;
* unsigned ordered_id;
* unsigned dwords_written2;
* unsigned ordered_id;
* unsigned dwords_written3;
* };
*
* Notes:
* - global_atomic_ordered_add_b64 is semantically a 64-bit atomic, requiring 8-byte
* address alignment, even though it operates on a pair of 32-bit values.
* - The whole structure is updated at once by issuing the atomic from 4 lanes
* with 8-byte address increments.
* - The whole structure should be entirely within one 64B block of memory
* for performance. (the address bits above 64B should not differ between lanes)
*/
nir_def *buffer_offset_per_lane;
/* The gfx12 intrinsic inserts hand-written assembly producing better code than current
* LLVM.
*/
if (use_gfx12_xfb_intrinsic) {
buffer_offset_per_lane =
nir_ordered_add_loop_gfx12_amd(b, xfb_state_address, xfb_voffset, ordered_id,
atomic_src);
/* Move the buffer offsets from the 4 lanes to lane 0. */
buffer_offsets = read_values_from_4_lanes(b, buffer_offset_per_lane, info->buffers_written);
} else {
/* The NIR version of the above using nir_atomic_op_ordered_add_gfx12_amd. */
enum { NUM_ATOMICS_IN_FLIGHT = 6 };
nir_variable *result_ring[NUM_ATOMICS_IN_FLIGHT] = {0};
for (unsigned i = 0; i < NUM_ATOMICS_IN_FLIGHT; i++)
result_ring[i] = nir_local_variable_create(b->impl, glsl_uint64_t_type(), "result");
/* Issue the first N-1 atomics. The shader must not wait because we want them to be
* pipelined. It will only wait for the oldest atomic in the NIR loop.
*/
for (unsigned i = 0; i < NUM_ATOMICS_IN_FLIGHT - 1; i++) {
nir_store_var(b, result_ring[i],
nir_global_atomic_amd(b, 64, xfb_state_address, atomic_src, xfb_voffset,
.atomic_op = nir_atomic_op_ordered_add_gfx12_amd), 0x1);
ac_nir_sleep(b, 24);
}
nir_variable *buffer_offsets_var =
nir_local_variable_create(b->impl, glsl_vec4_type(), "buffer_offset_per_lane");
nir_loop *loop = nir_push_loop(b);
{
for (unsigned i = 0; i < NUM_ATOMICS_IN_FLIGHT; i++) {
int issue_index = (NUM_ATOMICS_IN_FLIGHT - 1 + i) % NUM_ATOMICS_IN_FLIGHT;
int read_index = i;
/* Issue (or repeat) the atomic. */
nir_store_var(b, result_ring[issue_index],
nir_global_atomic_amd(b, 64, xfb_state_address, atomic_src, xfb_voffset,
.atomic_op = nir_atomic_op_ordered_add_gfx12_amd), 0x1);
/* Break if the oldest atomic succeeded in incrementing the offsets. */
nir_def *oldest_result = nir_load_var(b, result_ring[read_index]);
nir_def *loaded_ordered_id = nir_unpack_64_2x32_split_x(b, oldest_result);
/* Debug: Write the vec4 into a shader log ring buffer. */
#if 0
nir_def *loaded_dwords_written = nir_unpack_64_2x32_split_y(b, oldest_result);
ac_nir_store_debug_log_amd(b, nir_vec4(b, nir_u2u32(b, xfb_state_address),
ordered_id, loaded_ordered_id,
loaded_dwords_written));
#endif
nir_def *continue_if = nir_ieq(b, loaded_ordered_id, ordered_id);
continue_if = nir_inot(b, nir_vote_any(b, 1, continue_if));
nir_push_if(b, continue_if);
}
nir_jump(b, nir_jump_continue);
for (unsigned i = 0; i < NUM_ATOMICS_IN_FLIGHT; i++) {
int read_index = NUM_ATOMICS_IN_FLIGHT - 1 - i;
nir_push_else(b, NULL);
{
nir_def *result = nir_load_var(b, result_ring[read_index]);
buffer_offset_per_lane = nir_unpack_64_2x32_split_y(b, result);
buffer_offsets = read_values_from_4_lanes(b, buffer_offset_per_lane, info->buffers_written);
nir_store_var(b, buffer_offsets_var, buffer_offsets, info->buffers_written);
}
nir_pop_if(b, NULL);
}
nir_jump(b, nir_jump_break);
}
nir_pop_loop(b, loop);
buffer_offsets = nir_load_var(b, buffer_offsets_var);
}
}
nir_pop_if(b, if_4lanes);
buffer_offsets = nir_if_phi(b, buffer_offsets, nir_undef(b, 4, 32));
if_invocation_0 = nir_push_if(b, nir_ieq_imm(b, tid_in_tg, 0));
} else {
nir_def *ordered_id = nir_load_ordered_id_amd(b);
buffer_offsets =
nir_ordered_xfb_counter_add_gfx11_amd(b, ordered_id,
nir_vec(b, workgroup_buffer_sizes, 4),
/* mask of buffers to update */
.write_mask = info->buffers_written);
}
nir_def *emit_prim[4];
memcpy(emit_prim, gen_prim, 4 * sizeof(nir_def *));
nir_def *any_overflow = nir_imm_false(b);
nir_def *overflow_amount[4] = {undef, undef, undef, undef};
for (unsigned buffer = 0; buffer < 4; buffer++) {
if (!(info->buffers_written & BITFIELD_BIT(buffer)))
continue;
nir_def *buffer_size = nir_channel(b, so_buffer_ret[buffer], 2);
/* Only consider overflow for valid feedback buffers because
* otherwise the ordered operation above (GDS atomic return) might
* return non-zero offsets for invalid buffers.
*/
nir_def *buffer_valid = nir_ine_imm(b, buffer_size, 0);
nir_def *buffer_offset = nir_channel(b, buffer_offsets, buffer);
buffer_offset = nir_bcsel(b, buffer_valid, buffer_offset, nir_imm_int(b, 0));
nir_def *remain_size = nir_isub(b, buffer_size, buffer_offset);
nir_def *remain_prim = nir_idiv(b, remain_size, prim_stride[buffer]);
nir_def *overflow = nir_ilt(b, buffer_size, buffer_offset);
any_overflow = nir_ior(b, any_overflow, overflow);
overflow_amount[buffer] = nir_imax(b, nir_imm_int(b, 0),
nir_isub(b, buffer_offset, buffer_size));
unsigned stream = info->buffer_to_stream[buffer];
/* when previous workgroup overflow, we can't emit any primitive */
emit_prim[stream] = nir_bcsel(
b, overflow, nir_imm_int(b, 0),
/* we can emit part primitives, limited by smallest buffer */
nir_imin(b, emit_prim[stream], remain_prim));
/* Save to LDS for being accessed by other waves in this workgroup. */
nir_store_shared(b, buffer_offset, scratch_base, .base = buffer * 4);
}
/* We have to fix up the streamout offsets if we overflowed because they determine
* the vertex count for DrawTransformFeedback.
*/
if (gfx_level >= GFX12) {
nir_pop_if(b, if_invocation_0);
any_overflow = nir_if_phi(b, any_overflow, nir_undef(b, 1, 1));
for (unsigned buffer = 0; buffer < 4; buffer++)
overflow_amount[buffer] = nir_if_phi(b, overflow_amount[buffer], undef);
for (unsigned stream = 0; stream < 4; stream++) {
if (emit_prim[stream])
emit_prim[stream] = nir_if_phi(b, emit_prim[stream], undef);
}
nir_if *if_any_overflow_4_lanes =
nir_push_if(b, nir_iand(b, any_overflow, nir_ult_imm(b, tid_in_tg, 4)));
{
/* Move overflow amounts from SGPRs to the first 4 lanes. */
nir_def *overflow_amount_per_lane =
write_values_to_lanes(b, overflow_amount, info->buffers_written);
nir_global_atomic_amd(b, 32, xfb_state_address, nir_ineg(b, overflow_amount_per_lane),
xfb_voffset, .base = 4, .atomic_op = nir_atomic_op_iadd);
}
nir_pop_if(b, if_any_overflow_4_lanes);
if_invocation_0 = nir_push_if(b, nir_ieq_imm(b, tid_in_tg, 0));
} else {
nir_if *if_any_overflow = nir_push_if(b, any_overflow);
nir_xfb_counter_sub_gfx11_amd(b, nir_vec(b, overflow_amount, 4),
/* mask of buffers to update */
.write_mask = info->buffers_written);
nir_pop_if(b, if_any_overflow);
}
/* Save to LDS for being accessed by other waves in this workgroup. */
for (unsigned stream = 0; stream < 4; stream++) {
if (!(info->streams_written & BITFIELD_BIT(stream)))
continue;
nir_store_shared(b, emit_prim[stream], scratch_base, .base = 16 + stream * 4);
}
/* Update shader query. */
if (has_xfb_prim_query) {
nir_if *if_shader_query = nir_push_if(b, nir_load_prim_xfb_query_enabled_amd(b));
{
for (unsigned stream = 0; stream < 4; stream++) {
if (info->streams_written & BITFIELD_BIT(stream))
nir_atomic_add_xfb_prim_count_amd(b, emit_prim[stream], .stream_id = stream);
}
}
nir_pop_if(b, if_shader_query);
}
}
nir_pop_if(b, if_invocation_0);
nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_mem_shared);
/* Fetch the per-buffer offsets in all waves. */
for (unsigned buffer = 0; buffer < 4; buffer++) {
if (!(info->buffers_written & BITFIELD_BIT(buffer)))
continue;
buffer_offsets_ret[buffer] =
nir_load_shared(b, 1, 32, scratch_base, .base = buffer * 4);
}
/* Fetch the per-stream emit prim in all waves. */
for (unsigned stream = 0; stream < 4; stream++) {
if (!(info->streams_written & BITFIELD_BIT(stream)))
continue;
emit_prim_ret[stream] =
nir_load_shared(b, 1, 32, scratch_base, .base = 16 + stream * 4);
}
}
static void
ngg_build_streamout_vertex(nir_builder *b, nir_xfb_info *info,
unsigned stream, nir_def *so_buffer[4],
nir_def *buffer_offsets[4],
unsigned vertex_index, nir_def *vtx_lds_addr,
ac_nir_prerast_out *pr_out,
bool skip_primitive_id)
{
unsigned vertex_offset[NIR_MAX_XFB_BUFFERS] = {0};
u_foreach_bit(buffer, info->buffers_written) {
/* We use imm_offset for the vertex offset within a primitive, and GFX11 only supports
* 12-bit unsigned imm_offset. (GFX12 supports 24-bit signed imm_offset)
*/
assert(info->buffers[buffer].stride * 3 < 4096);
vertex_offset[buffer] = vertex_index * info->buffers[buffer].stride;
}
nir_def *zero = nir_imm_int(b, 0);
unsigned num_values = 0, store_offset = 0, store_buffer_index = 0;
nir_def *values[4];
for (unsigned i = 0; i < info->output_count; i++) {
nir_xfb_output_info *out = info->outputs + i;
if (!out->component_mask || info->buffer_to_stream[out->buffer] != stream)
continue;
unsigned base;
if (out->location >= VARYING_SLOT_VAR0_16BIT) {
base =
util_bitcount64(b->shader->info.outputs_written) +
util_bitcount(b->shader->info.outputs_written_16bit &
BITFIELD_MASK(out->location - VARYING_SLOT_VAR0_16BIT));
} else {
uint64_t outputs_written = b->shader->info.outputs_written;
if (skip_primitive_id)
outputs_written &= ~VARYING_BIT_PRIMITIVE_ID;
base =
util_bitcount64(outputs_written &
BITFIELD64_MASK(out->location));
}
unsigned offset = (base * 4 + out->component_offset) * 4;
unsigned count = util_bitcount(out->component_mask);
assert(u_bit_consecutive(out->component_offset, count) == out->component_mask);
nir_def *out_data =
nir_load_shared(b, count, 32, vtx_lds_addr, .base = offset);
for (unsigned comp = 0; comp < count; comp++) {
nir_def *data = nir_channel(b, out_data, comp);
/* Convert 16-bit outputs to 32-bit.
*
* OpenGL ES will put 16-bit medium precision varyings to VARYING_SLOT_VAR0_16BIT.
* We need to convert them to 32-bit for streamout.
*
* Vulkan does not allow 8/16bit varyings for streamout.
*/
if (out->location >= VARYING_SLOT_VAR0_16BIT) {
unsigned index = out->location - VARYING_SLOT_VAR0_16BIT;
unsigned c = out->component_offset + comp;
nir_def *v;
nir_alu_type t;
if (out->high_16bits) {
v = nir_unpack_32_2x16_split_y(b, data);
t = pr_out->types_16bit_hi[index][c];
} else {
v = nir_unpack_32_2x16_split_x(b, data);
t = pr_out->types_16bit_lo[index][c];
}
t = nir_alu_type_get_base_type(t);
data = nir_convert_to_bit_size(b, v, t, 32);
}
const unsigned store_comp_offset = out->offset + comp * 4;
const bool has_hole = store_offset + num_values * 4 != store_comp_offset;
/* Flush the gathered components to memory as a vec4 store or less if there is a hole. */
if (num_values && (num_values == 4 || store_buffer_index != out->buffer || has_hole)) {
nir_store_buffer_amd(b, nir_vec(b, values, num_values), so_buffer[store_buffer_index],
buffer_offsets[store_buffer_index], zero, zero,
.base = vertex_offset[store_buffer_index] + store_offset,
.access = ACCESS_NON_TEMPORAL);
num_values = 0;
}
/* Initialize the buffer index and offset if we are beginning a new vec4 store. */
if (num_values == 0) {
store_buffer_index = out->buffer;
store_offset = store_comp_offset;
}
values[num_values++] = data;
}
}
if (num_values) {
/* Flush the remaining components to memory (as an up to vec4 store) */
nir_store_buffer_amd(b, nir_vec(b, values, num_values), so_buffer[store_buffer_index],
buffer_offsets[store_buffer_index], zero, zero,
.base = vertex_offset[store_buffer_index] + store_offset,
.access = ACCESS_NON_TEMPORAL);
}
}
static void
ngg_nogs_build_streamout(nir_builder *b, lower_ngg_nogs_state *s)
{
@ -2196,7 +1743,7 @@ ngg_nogs_build_streamout(nir_builder *b, lower_ngg_nogs_state *s)
nir_def *buffer_offsets[4] = {0};
nir_def *so_buffer[4] = {0};
nir_def *tid_in_tg = nir_load_local_invocation_index(b);
ngg_build_streamout_buffer_info(b, info, s->options->gfx_level, s->options->has_xfb_prim_query,
ac_nir_ngg_build_streamout_buffer_info(b, info, s->options->gfx_level, s->options->has_xfb_prim_query,
s->options->use_gfx12_xfb_intrinsic, lds_scratch_base, tid_in_tg,
gen_prim_per_stream,
so_buffer, buffer_offsets,
@ -2221,7 +1768,7 @@ ngg_nogs_build_streamout(nir_builder *b, lower_ngg_nogs_state *s)
{
nir_def *vtx_lds_idx = nir_load_var(b, s->gs_vtx_indices_vars[i]);
nir_def *vtx_lds_addr = pervertex_lds_addr(b, vtx_lds_idx, vtx_lds_stride);
ngg_build_streamout_vertex(b, info, 0, so_buffer, buffer_offsets, i,
ac_nir_ngg_build_streamout_vertex(b, info, 0, so_buffer, buffer_offsets, i,
vtx_lds_addr, &s->out, s->skip_primitive_id);
}
nir_pop_if(b, if_valid_vertex);
@ -3319,7 +2866,7 @@ ngg_gs_build_streamout(nir_builder *b, lower_ngg_gs_state *s)
nir_def *emit_prim[4] = {0};
nir_def *buffer_offsets[4] = {0};
nir_def *so_buffer[4] = {0};
ngg_build_streamout_buffer_info(b, info, s->options->gfx_level, s->options->has_xfb_prim_query,
ac_nir_ngg_build_streamout_buffer_info(b, info, s->options->gfx_level, s->options->has_xfb_prim_query,
s->options->use_gfx12_xfb_intrinsic, s->lds_addr_gs_scratch, tid_in_tg,
gen_prim, so_buffer, buffer_offsets, emit_prim);
@ -3349,7 +2896,7 @@ ngg_gs_build_streamout(nir_builder *b, lower_ngg_gs_state *s)
/* Write all vertices of this primitive to streamout buffer. */
for (unsigned i = 0; i < s->num_vertices_per_primitive; i++) {
ngg_build_streamout_vertex(b, info, stream, so_buffer,
ac_nir_ngg_build_streamout_vertex(b, info, stream, so_buffer,
stream_buffer_offsets, i,
exported_vtx_lds_addr[i],
&s->out, false);

View file

@ -945,3 +945,456 @@ ac_nir_create_output_phis(nir_builder *b,
}
}
}
static nir_def *
write_values_to_lanes(nir_builder *b, nir_def **values, unsigned lane_mask)
{
nir_def *lanes = nir_imm_int(b, 0);
u_foreach_bit(i, lane_mask) {
lanes = nir_write_invocation_amd(b, lanes, values[i], nir_imm_int(b, i));
}
return lanes;
}
static nir_def *
read_values_from_4_lanes(nir_builder *b, nir_def *values, unsigned lane_mask)
{
nir_def *undef = nir_undef(b, 1, 32);
nir_def *per_lane[4] = {undef, undef, undef, undef};
u_foreach_bit(i, lane_mask) {
per_lane[i] = nir_read_invocation(b, values, nir_imm_int(b, i));
}
return nir_vec(b, per_lane, 4);
}
void
ac_nir_ngg_build_streamout_buffer_info(nir_builder *b,
nir_xfb_info *info,
enum amd_gfx_level gfx_level,
bool has_xfb_prim_query,
bool use_gfx12_xfb_intrinsic,
nir_def *scratch_base,
nir_def *tid_in_tg,
nir_def *gen_prim[4],
nir_def *so_buffer_ret[4],
nir_def *buffer_offsets_ret[4],
nir_def *emit_prim_ret[4])
{
nir_def *prim_stride[4] = {0};
nir_def *undef = nir_undef(b, 1, 32);
/* For radeonsi which pass this value by arg when VS. Streamout need accurate
* num-vert-per-prim for writing correct amount of data to buffer.
*/
nir_def *num_vert_per_prim = nir_load_num_vertices_per_primitive_amd(b);
for (unsigned buffer = 0; buffer < 4; buffer++) {
if (!(info->buffers_written & BITFIELD_BIT(buffer)))
continue;
assert(info->buffers[buffer].stride);
prim_stride[buffer] =
nir_imul_imm(b, num_vert_per_prim, info->buffers[buffer].stride);
so_buffer_ret[buffer] = nir_load_streamout_buffer_amd(b, .base = buffer);
}
nir_if *if_invocation_0 = nir_push_if(b, nir_ieq_imm(b, tid_in_tg, 0));
{
nir_def *any_buffer_valid = nir_imm_false(b);
nir_def *workgroup_buffer_sizes[4];
for (unsigned buffer = 0; buffer < 4; buffer++) {
if (info->buffers_written & BITFIELD_BIT(buffer)) {
nir_def *buffer_size = nir_channel(b, so_buffer_ret[buffer], 2);
/* In radeonsi, we may not know if a feedback buffer has been bound when
* compile time, so have to check buffer size in runtime to disable the
* GDS update for unbind buffer to prevent the case that previous draw
* compiled with streamout but does not bind feedback buffer miss update
* GDS which will affect current draw's streamout.
*/
nir_def *buffer_valid = nir_ine_imm(b, buffer_size, 0);
nir_def *inc_buffer_size =
nir_imul(b, gen_prim[info->buffer_to_stream[buffer]], prim_stride[buffer]);
workgroup_buffer_sizes[buffer] =
nir_bcsel(b, buffer_valid, inc_buffer_size, nir_imm_int(b, 0));
any_buffer_valid = nir_ior(b, any_buffer_valid, buffer_valid);
} else
workgroup_buffer_sizes[buffer] = undef;
}
nir_def *buffer_offsets = NULL, *xfb_state_address = NULL, *xfb_voffset = NULL;
/* Get current global offset of buffer and increase by amount of
* workgroup buffer size. This is an ordered operation sorted by
* ordered_id; Each buffer info is in a channel of a vec4.
*/
if (gfx_level >= GFX12) {
nir_pop_if(b, if_invocation_0);
for (unsigned buffer = 0; buffer < 4; buffer++)
workgroup_buffer_sizes[buffer] = nir_if_phi(b, workgroup_buffer_sizes[buffer], undef);
any_buffer_valid = nir_if_phi(b, any_buffer_valid, nir_undef(b, 1, 1));
/* These must be set after nir_pop_if and phis. */
xfb_state_address = nir_load_xfb_state_address_gfx12_amd(b);
xfb_voffset = nir_imul_imm(b, tid_in_tg, 8);
nir_if *if_4lanes = nir_push_if(b, nir_iand(b, any_buffer_valid, nir_ult_imm(b, tid_in_tg, 4)));
{
/* Move workgroup buffer sizes from SGPRs to the first 4 lanes. */
nir_def *workgroup_buffer_size_per_lane =
write_values_to_lanes(b, workgroup_buffer_sizes, info->buffers_written);
nir_def *ordered_id = nir_load_ordered_id_amd(b);
/* The atomic value for the 4 lanes is:
* lane 0: uvec2(ordered_id, workgroup_buffer_size0)
* lane 1: uvec2(ordered_id, workgroup_buffer_size1)
* lane 2: uvec2(ordered_id, workgroup_buffer_size2)
* lane 3: uvec2(ordered_id, workgroup_buffer_size3)
*/
nir_def *atomic_src = nir_pack_64_2x32_split(b, ordered_id,
workgroup_buffer_size_per_lane);
/* The memory layout of the xfb state is:
* struct {
* unsigned ordered_id;
* unsigned dwords_written0;
* unsigned ordered_id;
* unsigned dwords_written1;
* unsigned ordered_id;
* unsigned dwords_written2;
* unsigned ordered_id;
* unsigned dwords_written3;
* };
*
* Notes:
* - global_atomic_ordered_add_b64 is semantically a 64-bit atomic, requiring 8-byte
* address alignment, even though it operates on a pair of 32-bit values.
* - The whole structure is updated at once by issuing the atomic from 4 lanes
* with 8-byte address increments.
* - The whole structure should be entirely within one 64B block of memory
* for performance. (the address bits above 64B should not differ between lanes)
*/
nir_def *buffer_offset_per_lane;
/* The gfx12 intrinsic inserts hand-written assembly producing better code than current
* LLVM.
*/
if (use_gfx12_xfb_intrinsic) {
buffer_offset_per_lane =
nir_ordered_add_loop_gfx12_amd(b, xfb_state_address, xfb_voffset, ordered_id,
atomic_src);
/* Move the buffer offsets from the 4 lanes to lane 0. */
buffer_offsets = read_values_from_4_lanes(b, buffer_offset_per_lane, info->buffers_written);
} else {
/* The NIR version of the above using nir_atomic_op_ordered_add_gfx12_amd. */
enum { NUM_ATOMICS_IN_FLIGHT = 6 };
nir_variable *result_ring[NUM_ATOMICS_IN_FLIGHT] = {0};
for (unsigned i = 0; i < NUM_ATOMICS_IN_FLIGHT; i++)
result_ring[i] = nir_local_variable_create(b->impl, glsl_uint64_t_type(), "result");
/* Issue the first N-1 atomics. The shader must not wait because we want them to be
* pipelined. It will only wait for the oldest atomic in the NIR loop.
*/
for (unsigned i = 0; i < NUM_ATOMICS_IN_FLIGHT - 1; i++) {
nir_store_var(b, result_ring[i],
nir_global_atomic_amd(b, 64, xfb_state_address, atomic_src, xfb_voffset,
.atomic_op = nir_atomic_op_ordered_add_gfx12_amd), 0x1);
ac_nir_sleep(b, 24);
}
nir_variable *buffer_offsets_var =
nir_local_variable_create(b->impl, glsl_vec4_type(), "buffer_offset_per_lane");
nir_loop *loop = nir_push_loop(b);
{
for (unsigned i = 0; i < NUM_ATOMICS_IN_FLIGHT; i++) {
int issue_index = (NUM_ATOMICS_IN_FLIGHT - 1 + i) % NUM_ATOMICS_IN_FLIGHT;
int read_index = i;
/* Issue (or repeat) the atomic. */
nir_store_var(b, result_ring[issue_index],
nir_global_atomic_amd(b, 64, xfb_state_address, atomic_src, xfb_voffset,
.atomic_op = nir_atomic_op_ordered_add_gfx12_amd), 0x1);
/* Break if the oldest atomic succeeded in incrementing the offsets. */
nir_def *oldest_result = nir_load_var(b, result_ring[read_index]);
nir_def *loaded_ordered_id = nir_unpack_64_2x32_split_x(b, oldest_result);
/* Debug: Write the vec4 into a shader log ring buffer. */
#if 0
nir_def *loaded_dwords_written = nir_unpack_64_2x32_split_y(b, oldest_result);
ac_nir_store_debug_log_amd(b, nir_vec4(b, nir_u2u32(b, xfb_state_address),
ordered_id, loaded_ordered_id,
loaded_dwords_written));
#endif
nir_def *continue_if = nir_ieq(b, loaded_ordered_id, ordered_id);
continue_if = nir_inot(b, nir_vote_any(b, 1, continue_if));
nir_push_if(b, continue_if);
}
nir_jump(b, nir_jump_continue);
for (unsigned i = 0; i < NUM_ATOMICS_IN_FLIGHT; i++) {
int read_index = NUM_ATOMICS_IN_FLIGHT - 1 - i;
nir_push_else(b, NULL);
{
nir_def *result = nir_load_var(b, result_ring[read_index]);
buffer_offset_per_lane = nir_unpack_64_2x32_split_y(b, result);
buffer_offsets = read_values_from_4_lanes(b, buffer_offset_per_lane, info->buffers_written);
nir_store_var(b, buffer_offsets_var, buffer_offsets, info->buffers_written);
}
nir_pop_if(b, NULL);
}
nir_jump(b, nir_jump_break);
}
nir_pop_loop(b, loop);
buffer_offsets = nir_load_var(b, buffer_offsets_var);
}
}
nir_pop_if(b, if_4lanes);
buffer_offsets = nir_if_phi(b, buffer_offsets, nir_undef(b, 4, 32));
if_invocation_0 = nir_push_if(b, nir_ieq_imm(b, tid_in_tg, 0));
} else {
nir_def *ordered_id = nir_load_ordered_id_amd(b);
buffer_offsets =
nir_ordered_xfb_counter_add_gfx11_amd(b, ordered_id,
nir_vec(b, workgroup_buffer_sizes, 4),
/* mask of buffers to update */
.write_mask = info->buffers_written);
}
nir_def *emit_prim[4];
memcpy(emit_prim, gen_prim, 4 * sizeof(nir_def *));
nir_def *any_overflow = nir_imm_false(b);
nir_def *overflow_amount[4] = {undef, undef, undef, undef};
for (unsigned buffer = 0; buffer < 4; buffer++) {
if (!(info->buffers_written & BITFIELD_BIT(buffer)))
continue;
nir_def *buffer_size = nir_channel(b, so_buffer_ret[buffer], 2);
/* Only consider overflow for valid feedback buffers because
* otherwise the ordered operation above (GDS atomic return) might
* return non-zero offsets for invalid buffers.
*/
nir_def *buffer_valid = nir_ine_imm(b, buffer_size, 0);
nir_def *buffer_offset = nir_channel(b, buffer_offsets, buffer);
buffer_offset = nir_bcsel(b, buffer_valid, buffer_offset, nir_imm_int(b, 0));
nir_def *remain_size = nir_isub(b, buffer_size, buffer_offset);
nir_def *remain_prim = nir_idiv(b, remain_size, prim_stride[buffer]);
nir_def *overflow = nir_ilt(b, buffer_size, buffer_offset);
any_overflow = nir_ior(b, any_overflow, overflow);
overflow_amount[buffer] = nir_imax(b, nir_imm_int(b, 0),
nir_isub(b, buffer_offset, buffer_size));
unsigned stream = info->buffer_to_stream[buffer];
/* when previous workgroup overflow, we can't emit any primitive */
emit_prim[stream] = nir_bcsel(
b, overflow, nir_imm_int(b, 0),
/* we can emit part primitives, limited by smallest buffer */
nir_imin(b, emit_prim[stream], remain_prim));
/* Save to LDS for being accessed by other waves in this workgroup. */
nir_store_shared(b, buffer_offset, scratch_base, .base = buffer * 4);
}
/* We have to fix up the streamout offsets if we overflowed because they determine
* the vertex count for DrawTransformFeedback.
*/
if (gfx_level >= GFX12) {
nir_pop_if(b, if_invocation_0);
any_overflow = nir_if_phi(b, any_overflow, nir_undef(b, 1, 1));
for (unsigned buffer = 0; buffer < 4; buffer++)
overflow_amount[buffer] = nir_if_phi(b, overflow_amount[buffer], undef);
for (unsigned stream = 0; stream < 4; stream++) {
if (emit_prim[stream])
emit_prim[stream] = nir_if_phi(b, emit_prim[stream], undef);
}
nir_if *if_any_overflow_4_lanes =
nir_push_if(b, nir_iand(b, any_overflow, nir_ult_imm(b, tid_in_tg, 4)));
{
/* Move overflow amounts from SGPRs to the first 4 lanes. */
nir_def *overflow_amount_per_lane =
write_values_to_lanes(b, overflow_amount, info->buffers_written);
nir_global_atomic_amd(b, 32, xfb_state_address, nir_ineg(b, overflow_amount_per_lane),
xfb_voffset, .base = 4, .atomic_op = nir_atomic_op_iadd);
}
nir_pop_if(b, if_any_overflow_4_lanes);
if_invocation_0 = nir_push_if(b, nir_ieq_imm(b, tid_in_tg, 0));
} else {
nir_if *if_any_overflow = nir_push_if(b, any_overflow);
nir_xfb_counter_sub_gfx11_amd(b, nir_vec(b, overflow_amount, 4),
/* mask of buffers to update */
.write_mask = info->buffers_written);
nir_pop_if(b, if_any_overflow);
}
/* Save to LDS for being accessed by other waves in this workgroup. */
for (unsigned stream = 0; stream < 4; stream++) {
if (!(info->streams_written & BITFIELD_BIT(stream)))
continue;
nir_store_shared(b, emit_prim[stream], scratch_base, .base = 16 + stream * 4);
}
/* Update shader query. */
if (has_xfb_prim_query) {
nir_if *if_shader_query = nir_push_if(b, nir_load_prim_xfb_query_enabled_amd(b));
{
for (unsigned stream = 0; stream < 4; stream++) {
if (info->streams_written & BITFIELD_BIT(stream))
nir_atomic_add_xfb_prim_count_amd(b, emit_prim[stream], .stream_id = stream);
}
}
nir_pop_if(b, if_shader_query);
}
}
nir_pop_if(b, if_invocation_0);
nir_barrier(b, .execution_scope = SCOPE_WORKGROUP,
.memory_scope = SCOPE_WORKGROUP,
.memory_semantics = NIR_MEMORY_ACQ_REL,
.memory_modes = nir_var_mem_shared);
/* Fetch the per-buffer offsets in all waves. */
for (unsigned buffer = 0; buffer < 4; buffer++) {
if (!(info->buffers_written & BITFIELD_BIT(buffer)))
continue;
buffer_offsets_ret[buffer] =
nir_load_shared(b, 1, 32, scratch_base, .base = buffer * 4);
}
/* Fetch the per-stream emit prim in all waves. */
for (unsigned stream = 0; stream < 4; stream++) {
if (!(info->streams_written & BITFIELD_BIT(stream)))
continue;
emit_prim_ret[stream] =
nir_load_shared(b, 1, 32, scratch_base, .base = 16 + stream * 4);
}
}
void
ac_nir_ngg_build_streamout_vertex(nir_builder *b, nir_xfb_info *info,
unsigned stream, nir_def *so_buffer[4],
nir_def *buffer_offsets[4],
unsigned vertex_index, nir_def *vtx_lds_addr,
ac_nir_prerast_out *pr_out,
bool skip_primitive_id)
{
unsigned vertex_offset[NIR_MAX_XFB_BUFFERS] = {0};
u_foreach_bit(buffer, info->buffers_written) {
/* We use imm_offset for the vertex offset within a primitive, and GFX11 only supports
* 12-bit unsigned imm_offset. (GFX12 supports 24-bit signed imm_offset)
*/
assert(info->buffers[buffer].stride * 3 < 4096);
vertex_offset[buffer] = vertex_index * info->buffers[buffer].stride;
}
nir_def *zero = nir_imm_int(b, 0);
unsigned num_values = 0, store_offset = 0, store_buffer_index = 0;
nir_def *values[4];
for (unsigned i = 0; i < info->output_count; i++) {
nir_xfb_output_info *out = info->outputs + i;
if (!out->component_mask || info->buffer_to_stream[out->buffer] != stream)
continue;
unsigned base;
if (out->location >= VARYING_SLOT_VAR0_16BIT) {
base =
util_bitcount64(b->shader->info.outputs_written) +
util_bitcount(b->shader->info.outputs_written_16bit &
BITFIELD_MASK(out->location - VARYING_SLOT_VAR0_16BIT));
} else {
uint64_t outputs_written = b->shader->info.outputs_written;
if (skip_primitive_id)
outputs_written &= ~VARYING_BIT_PRIMITIVE_ID;
base =
util_bitcount64(outputs_written &
BITFIELD64_MASK(out->location));
}
unsigned offset = (base * 4 + out->component_offset) * 4;
unsigned count = util_bitcount(out->component_mask);
assert(u_bit_consecutive(out->component_offset, count) == out->component_mask);
nir_def *out_data =
nir_load_shared(b, count, 32, vtx_lds_addr, .base = offset);
for (unsigned comp = 0; comp < count; comp++) {
nir_def *data = nir_channel(b, out_data, comp);
/* Convert 16-bit outputs to 32-bit.
*
* OpenGL ES will put 16-bit medium precision varyings to VARYING_SLOT_VAR0_16BIT.
* We need to convert them to 32-bit for streamout.
*
* Vulkan does not allow 8/16bit varyings for streamout.
*/
if (out->location >= VARYING_SLOT_VAR0_16BIT) {
unsigned index = out->location - VARYING_SLOT_VAR0_16BIT;
unsigned c = out->component_offset + comp;
nir_def *v;
nir_alu_type t;
if (out->high_16bits) {
v = nir_unpack_32_2x16_split_y(b, data);
t = pr_out->types_16bit_hi[index][c];
} else {
v = nir_unpack_32_2x16_split_x(b, data);
t = pr_out->types_16bit_lo[index][c];
}
t = nir_alu_type_get_base_type(t);
data = nir_convert_to_bit_size(b, v, t, 32);
}
const unsigned store_comp_offset = out->offset + comp * 4;
const bool has_hole = store_offset + num_values * 4 != store_comp_offset;
/* Flush the gathered components to memory as a vec4 store or less if there is a hole. */
if (num_values && (num_values == 4 || store_buffer_index != out->buffer || has_hole)) {
nir_store_buffer_amd(b, nir_vec(b, values, num_values), so_buffer[store_buffer_index],
buffer_offsets[store_buffer_index], zero, zero,
.base = vertex_offset[store_buffer_index] + store_offset,
.access = ACCESS_NON_TEMPORAL);
num_values = 0;
}
/* Initialize the buffer index and offset if we are beginning a new vec4 store. */
if (num_values == 0) {
store_buffer_index = out->buffer;
store_offset = store_comp_offset;
}
values[num_values++] = data;
}
}
if (num_values) {
/* Flush the remaining components to memory (as an up to vec4 store) */
nir_store_buffer_amd(b, nir_vec(b, values, num_values), so_buffer[store_buffer_index],
buffer_offsets[store_buffer_index], zero, zero,
.base = vertex_offset[store_buffer_index] + store_offset,
.access = ACCESS_NON_TEMPORAL);
}
}