radv: rename indirect_descriptor_sets to indirect_descriptors

With descriptor heap the driver will also have to emit indirect
descriptor heaps in some cases.

Rename couple of things to make them more generic.

Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/37786>
This commit is contained in:
Samuel Pitoiset 2025-10-09 12:40:13 +02:00 committed by Marge Bot
parent 0ff1ce4ac5
commit 609ae4e647
15 changed files with 40 additions and 40 deletions

View file

@ -47,7 +47,7 @@ static nir_def *
load_desc_ptr(nir_builder *b, apply_layout_state *state, unsigned set)
{
const struct radv_userdata_locations *user_sgprs_locs = &state->info->user_sgprs_locs;
if (user_sgprs_locs->shader_data[AC_UD_INDIRECT_DESCRIPTOR_SETS].sgpr_idx != -1) {
if (user_sgprs_locs->shader_data[AC_UD_INDIRECT_DESCRIPTORS].sgpr_idx != -1) {
nir_def *addr = get_scalar_arg(b, 1, state->args->descriptor_sets[0]);
addr = convert_pointer_to_64_bit(b, state, addr);
return ac_nir_load_smem(b, 1, addr, nir_imm_int(b, set * 4), 4, 0);

View file

@ -1794,15 +1794,15 @@ radv_emit_descriptors_per_stage(const struct radv_device *device, struct radv_cm
const struct radv_shader *shader, const struct radv_descriptor_state *descriptors_state)
{
const struct radv_physical_device *pdev = radv_device_physical(device);
const uint32_t indirect_descriptor_sets_offset = radv_get_user_sgpr_loc(shader, AC_UD_INDIRECT_DESCRIPTOR_SETS);
const uint32_t indirect_descriptors_offset = radv_get_user_sgpr_loc(shader, AC_UD_INDIRECT_DESCRIPTORS);
if (indirect_descriptor_sets_offset) {
if (indirect_descriptors_offset) {
radeon_begin(cs);
if (pdev->info.gfx_level >= GFX12) {
gfx12_push_32bit_pointer(indirect_descriptor_sets_offset, descriptors_state->indirect_descriptor_sets_va,
gfx12_push_32bit_pointer(indirect_descriptors_offset, descriptors_state->indirect_descriptor_sets_va,
&pdev->info);
} else {
radeon_emit_32bit_pointer(indirect_descriptor_sets_offset, descriptors_state->indirect_descriptor_sets_va,
radeon_emit_32bit_pointer(indirect_descriptors_offset, descriptors_state->indirect_descriptor_sets_va,
&pdev->info);
}
radeon_end();
@ -6252,7 +6252,7 @@ radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer, VkShaderStageFlags st
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
struct radv_cmd_stream *cs = cmd_buffer->cs;
if (descriptors_state->need_indirect_descriptor_sets)
if (descriptors_state->need_indirect_descriptors)
radv_upload_indirect_descriptor_sets(cmd_buffer, descriptors_state);
ASSERTED unsigned cdw_max = radeon_check_space(device->ws, cs->b, MAX_SETS * MESA_VULKAN_SHADER_STAGES * 4);
@ -8673,8 +8673,8 @@ radv_CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipeline
pipeline->need_push_constants_upload;
cmd_buffer->push_constant_state[vk_to_bind_point(pipelineBindPoint)].dynamic_offset_count =
pipeline->dynamic_offset_count;
cmd_buffer->descriptors[vk_to_bind_point(pipelineBindPoint)].need_indirect_descriptor_sets =
pipeline->need_indirect_descriptor_sets;
cmd_buffer->descriptors[vk_to_bind_point(pipelineBindPoint)].need_indirect_descriptors =
pipeline->need_indirect_descriptors;
}
VKAPI_ATTR void VKAPI_CALL
@ -12289,7 +12289,7 @@ radv_bind_graphics_shaders(struct radv_cmd_buffer *cmd_buffer)
struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
const struct radv_physical_device *pdev = radv_device_physical(device);
uint32_t push_constant_size = 0, dynamic_offset_count = 0;
bool need_indirect_descriptor_sets = false;
bool need_indirect_descriptors = false;
bool need_push_constants_upload = false;
for (unsigned s = 0; s <= MESA_SHADER_MESH; s++) {
@ -12323,7 +12323,7 @@ radv_bind_graphics_shaders(struct radv_cmd_buffer *cmd_buffer)
continue;
/* Compute push constants/indirect descriptors state. */
need_indirect_descriptor_sets |= radv_shader_need_indirect_descriptor_sets(shader);
need_indirect_descriptors |= radv_shader_need_indirect_descriptors(shader);
need_push_constants_upload |= radv_shader_need_push_constants_upload(shader);
push_constant_size += shader_obj->push_constant_size;
dynamic_offset_count += shader_obj->dynamic_offset_count;
@ -12359,7 +12359,7 @@ radv_bind_graphics_shaders(struct radv_cmd_buffer *cmd_buffer)
radv_get_descriptors_state(cmd_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS);
struct radv_push_constant_state *pc_state = &cmd_buffer->push_constant_state[VK_PIPELINE_BIND_POINT_GRAPHICS];
descriptors_state->need_indirect_descriptor_sets = need_indirect_descriptor_sets;
descriptors_state->need_indirect_descriptors = need_indirect_descriptors;
pc_state->need_upload = need_push_constants_upload;
pc_state->size = push_constant_size;
pc_state->dynamic_offset_count = dynamic_offset_count;
@ -15333,7 +15333,7 @@ radv_bind_compute_shader(struct radv_cmd_buffer *cmd_buffer, struct radv_shader_
radv_get_descriptors_state(cmd_buffer, VK_PIPELINE_BIND_POINT_COMPUTE);
struct radv_push_constant_state *pc_state = &cmd_buffer->push_constant_state[VK_PIPELINE_BIND_POINT_COMPUTE];
descriptors_state->need_indirect_descriptor_sets = radv_shader_need_indirect_descriptor_sets(shader);
descriptors_state->need_indirect_descriptors = radv_shader_need_indirect_descriptors(shader);
pc_state->need_upload = radv_shader_need_push_constants_upload(shader);
pc_state->size = shader_obj->push_constant_size;
pc_state->dynamic_offset_count = shader_obj->dynamic_offset_count;

View file

@ -245,7 +245,7 @@ struct radv_descriptor_state {
struct radv_push_descriptor_set push_set;
uint32_t dynamic_buffers[4 * MAX_DYNAMIC_BUFFERS];
uint64_t descriptor_buffers[MAX_SETS];
bool need_indirect_descriptor_sets;
bool need_indirect_descriptors;
uint64_t indirect_descriptor_sets_va;
};

View file

@ -279,8 +279,8 @@ radv_get_sequence_size_compute(const struct radv_indirect_command_layout *layout
/* precomputed CS size */
*cmd_size += ies->cs_num_dw * 4;
if (ies->uses_indirect_desc_sets_sgpr) {
/* PKT3_SET_SH_REG for indirect descriptor sets pointer */
if (ies->uses_indirect_descriptors_sgpr) {
/* PKT3_SET_SH_REG for indirect descriptors pointer */
*cmd_size += 3 * 4;
}
@ -762,7 +762,7 @@ struct radv_dgc_params {
/* IES info */
uint64_t ies_addr;
uint32_t ies_stride;
uint32_t indirect_desc_sets_va;
uint32_t indirect_descriptors_va;
/* For conditional rendering on ACE. */
uint8_t predicating;
@ -2500,17 +2500,17 @@ dgc_emit_draw_mesh_tasks_with_count_ace(struct dgc_cmdbuf *ace_cs, nir_def *stre
* Indirect execution set
*/
static void
dgc_emit_indirect_sets(struct dgc_cmdbuf *cs)
dgc_emit_indirect_descriptors(struct dgc_cmdbuf *cs)
{
nir_builder *b = cs->b;
nir_def *indirect_desc_sets_sgpr = load_shader_metadata32(cs, indirect_desc_sets_sgpr);
nir_push_if(b, nir_ine_imm(b, indirect_desc_sets_sgpr, 0));
nir_def *indirect_descriptors_sgpr = load_shader_metadata32(cs, indirect_descriptors_sgpr);
nir_push_if(b, nir_ine_imm(b, indirect_descriptors_sgpr, 0));
{
dgc_cs_begin(cs);
dgc_cs_emit_imm(PKT3(PKT3_SET_SH_REG, 1, 0));
dgc_cs_emit(indirect_desc_sets_sgpr);
dgc_cs_emit(load_param32(b, indirect_desc_sets_va));
dgc_cs_emit(indirect_descriptors_sgpr);
dgc_cs_emit(load_param32(b, indirect_descriptors_va));
dgc_cs_end();
}
nir_pop_if(b, NULL);
@ -2545,7 +2545,7 @@ dgc_emit_ies(struct dgc_cmdbuf *cs)
}
nir_pop_loop(b, NULL);
dgc_emit_indirect_sets(cs);
dgc_emit_indirect_descriptors(cs);
}
/**
@ -3070,7 +3070,7 @@ radv_prepare_dgc_compute(struct radv_cmd_buffer *cmd_buffer, const VkGeneratedCo
radv_upload_indirect_descriptor_sets(cmd_buffer, descriptors_state);
params->ies_stride = ies->stride;
params->indirect_desc_sets_va = descriptors_state->indirect_descriptor_sets_va;
params->indirect_descriptors_va = descriptors_state->indirect_descriptor_sets_va;
} else {
const VkGeneratedCommandsPipelineInfoEXT *pipeline_info =
vk_find_struct_const(pGeneratedCommandsInfo->pNext, GENERATED_COMMANDS_PIPELINE_INFO_EXT);
@ -3412,7 +3412,7 @@ radv_update_ies_shader(struct radv_device *device, struct radv_indirect_executio
set->cs_num_dw = MAX2(set->cs_num_dw, cs.b->cdw);
set->uses_grid_base_sgpr |= md.grid_base_sgpr;
set->uses_upload_sgpr |= !!(md.push_const_sgpr & 0xffff);
set->uses_indirect_desc_sets_sgpr |= md.indirect_desc_sets_sgpr;
set->uses_indirect_descriptors_sgpr |= md.indirect_descriptors_sgpr;
set->push_constant_size = MAX2(set->push_constant_size, shader->info.push_constant_size);
set->compute_scratch_size_per_wave = MAX2(set->compute_scratch_size_per_wave, shader->config.scratch_bytes_per_wave);
set->compute_scratch_waves = MAX2(set->compute_scratch_waves, radv_get_max_scratch_waves(device, shader));

View file

@ -43,7 +43,7 @@ struct radv_indirect_execution_set {
uint32_t cs_num_dw;
bool uses_grid_base_sgpr;
bool uses_upload_sgpr;
bool uses_indirect_desc_sets_sgpr;
bool uses_indirect_descriptors_sgpr;
uint16_t push_constant_size;
uint32_t compute_scratch_size_per_wave;

View file

@ -47,7 +47,7 @@ struct radv_pipeline {
struct vk_pipeline_cache_object *cache_object;
bool is_internal;
bool need_indirect_descriptor_sets;
bool need_indirect_descriptors;
bool need_push_constants_upload;
struct radv_shader *shaders[MESA_VULKAN_SHADER_STAGES];
struct radv_shader *gs_copy_shader;

View file

@ -74,14 +74,14 @@ radv_get_compute_shader_metadata(const struct radv_device *device, const struct
metadata->push_const_sgpr = upload_sgpr | (inline_sgpr << 16);
metadata->inline_push_const_mask = cs->info.inline_push_constant_mask;
metadata->indirect_desc_sets_sgpr = radv_get_user_sgpr(cs, AC_UD_INDIRECT_DESCRIPTOR_SETS);
metadata->indirect_descriptors_sgpr = radv_get_user_sgpr(cs, AC_UD_INDIRECT_DESCRIPTORS);
}
void
radv_compute_pipeline_init(struct radv_compute_pipeline *pipeline, const struct radv_pipeline_layout *layout,
struct radv_shader *shader)
{
pipeline->base.need_indirect_descriptor_sets |= radv_shader_need_indirect_descriptor_sets(shader);
pipeline->base.need_indirect_descriptors |= radv_shader_need_indirect_descriptors(shader);
pipeline->base.need_push_constants_upload |= radv_shader_need_push_constants_upload(shader);
pipeline->base.push_constant_size = layout->push_constant_size;

View file

@ -28,7 +28,7 @@ struct radv_compute_pipeline_metadata {
uint32_t grid_base_sgpr;
uint32_t push_const_sgpr;
uint64_t inline_push_const_mask;
uint32_t indirect_desc_sets_sgpr;
uint32_t indirect_descriptors_sgpr;
};
uint32_t radv_get_compute_resource_limits(const struct radv_physical_device *pdev, const struct radv_shader_info *info);

View file

@ -3332,8 +3332,8 @@ radv_pipeline_init_shader_stages_state(const struct radv_device *device, struct
bool shader_exists = !!pipeline->base.shaders[i];
if (shader_exists || i < MESA_SHADER_COMPUTE) {
if (shader_exists) {
pipeline->base.need_indirect_descriptor_sets |=
radv_shader_need_indirect_descriptor_sets(pipeline->base.shaders[i]);
pipeline->base.need_indirect_descriptors |=
radv_shader_need_indirect_descriptors(pipeline->base.shaders[i]);
pipeline->base.need_push_constants_upload |=
radv_shader_need_push_constants_upload(pipeline->base.shaders[i]);
}

View file

@ -3385,7 +3385,7 @@ radv_create_rt_prolog(struct radv_device *device)
struct radv_shader_info info = {0};
info.stage = MESA_SHADER_COMPUTE;
info.loads_push_constants = true;
info.force_indirect_desc_sets = true;
info.force_indirect_descriptors = true;
info.wave_size = pdev->rt_wave_size;
info.workgroup_size = info.wave_size;
info.user_data_0 = R_00B900_COMPUTE_USER_DATA_0;

View file

@ -710,9 +710,9 @@ uint32_t radv_get_user_sgpr_loc(const struct radv_shader *shader, int idx);
uint32_t radv_get_user_sgpr(const struct radv_shader *shader, int idx);
static inline bool
radv_shader_need_indirect_descriptor_sets(const struct radv_shader *shader)
radv_shader_need_indirect_descriptors(const struct radv_shader *shader)
{
const struct radv_userdata_info *loc = radv_get_user_sgpr_info(shader, AC_UD_INDIRECT_DESCRIPTOR_SETS);
const struct radv_userdata_info *loc = radv_get_user_sgpr_info(shader, AC_UD_INDIRECT_DESCRIPTORS);
return loc->sgpr_idx != -1;
}

View file

@ -95,7 +95,7 @@ declare_global_input_sgprs(const enum amd_gfx_level gfx_level, const struct radv
add_descriptor_set(args, i);
}
} else {
add_ud_arg(args, 1, AC_ARG_CONST_ADDR, &args->descriptor_sets[0], AC_UD_INDIRECT_DESCRIPTOR_SETS);
add_ud_arg(args, 1, AC_ARG_CONST_ADDR, &args->descriptor_sets[0], AC_UD_INDIRECT_DESCRIPTORS);
}
if (info->merged_shader_compiled_separately ||
@ -319,7 +319,7 @@ void
radv_declare_rt_shader_args(enum amd_gfx_level gfx_level, struct radv_shader_args *args)
{
add_ud_arg(args, 2, AC_ARG_CONST_ADDR, &args->ac.rt.uniform_shader_addr, AC_UD_SCRATCH_RING_OFFSETS);
add_ud_arg(args, 1, AC_ARG_CONST_ADDR, &args->descriptor_sets[0], AC_UD_INDIRECT_DESCRIPTOR_SETS);
add_ud_arg(args, 1, AC_ARG_CONST_ADDR, &args->descriptor_sets[0], AC_UD_INDIRECT_DESCRIPTORS);
ac_add_arg(&args->ac, AC_ARG_SGPR, 1, AC_ARG_CONST_ADDR, &args->ac.push_constants);
ac_add_arg(&args->ac, AC_ARG_SGPR, 2, AC_ARG_CONST_ADDR, &args->ac.rt.sbt_descriptors);
ac_add_arg(&args->ac, AC_ARG_SGPR, 1, AC_ARG_CONST_ADDR, &args->ac.rt.traversal_shader_addr);
@ -903,7 +903,7 @@ radv_declare_shader_args(const struct radv_device *device, const struct radv_gra
uint32_t num_desc_set = util_bitcount(info->desc_set_used_mask);
if (info->force_indirect_desc_sets || remaining_sgprs < num_desc_set) {
if (info->force_indirect_descriptors || remaining_sgprs < num_desc_set) {
user_sgpr_info.indirect_all_descriptor_sets = true;
user_sgpr_info.remaining_sgprs--;
} else {

View file

@ -18,7 +18,7 @@ enum radv_ud_index {
AC_UD_SCRATCH_RING_OFFSETS = 0,
AC_UD_PUSH_CONSTANTS = 1,
AC_UD_INLINE_PUSH_CONSTANTS = 2,
AC_UD_INDIRECT_DESCRIPTOR_SETS = 3,
AC_UD_INDIRECT_DESCRIPTORS = 3,
AC_UD_VIEW_INDEX = 4,
AC_UD_STREAMOUT_BUFFERS = 5,
AC_UD_STREAMOUT_STATE = 6,

View file

@ -1096,7 +1096,7 @@ radv_nir_shader_info_pass(struct radv_device *device, const struct nir_shader *n
info->user_data_0 = radv_get_user_data_0(device, info);
info->merged_shader_compiled_separately = radv_is_merged_shader_compiled_separately(device, info);
info->force_indirect_desc_sets = info->merged_shader_compiled_separately || stage_key->indirect_bindable;
info->force_indirect_descriptors = info->merged_shader_compiled_separately || stage_key->indirect_bindable;
switch (nir->info.stage) {
case MESA_SHADER_COMPUTE:

View file

@ -110,7 +110,7 @@ struct radv_shader_info {
bool inputs_linked;
bool outputs_linked;
bool merged_shader_compiled_separately; /* GFX9+ */
bool force_indirect_desc_sets;
bool force_indirect_descriptors;
uint64_t gs_inputs_read; /* Mask of GS inputs read (only used by linked ES) */
unsigned nir_shared_size; /* Only used by LLVM. */