anv: add dynamic buffer offsets support with independent sets

With independent sets, we're not able to compute immediate values for
the index at which to read anv_push_constants::dynamic_offsets to get
the offset of a dynamic buffer. This is because the pipeline layout
may not have all the descriptor set layouts when we compile the
shader.

To solve that issue, we insert a layer of indirection.

This reworks the dynamic buffer offset storage with a 2D array in
anv_cmd_pipeline_state :

   dynamic_offsets[MAX_SETS][MAX_DYN_BUFFERS]

When the pipeline or the dynamic buffer offsets are updated, we
flatten that array into the
anv_push_constants::dynamic_offsets[MAX_DYN_BUFFERS] array.

For shaders compiled with independent sets, the bottom 6 bits of
element X in anv_push_constants::desc_sets[] is used to specify the
base offsets into the anv_push_constants::dynamic_offsets[] for the
set X.

The computation in the shader is now something like :

  base_dyn_buffer_set_idx = anv_push_constants::desc_sets[set_idx] & 0x3f
  dyn_buffer_offset = anv_push_constants::dynamic_offsets[base_dyn_buffer_set_idx + dynamic_buffer_idx]

It was suggested by Faith to use a different push constant buffer with
dynamic_offsets prepared for each stage when using independent sets
instead, but it feels easier to understand this way. And there is some
room for optimization if you are set X and that you know all the sets in
the range [0, X], then you can still avoid the indirection. Separate
push constant allocations per stage do have a CPU cost.

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: Emma Anholt <emma@anholt.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/15637>
This commit is contained in:
Lionel Landwerlin 2022-04-06 18:12:02 +03:00 committed by Marge Bot
parent 16c7c37718
commit 0b8a2de2a1
11 changed files with 384 additions and 100 deletions

View file

@ -434,7 +434,8 @@ visit_intrinsic(nir_shader *shader, nir_intrinsic_instr *instr)
case nir_intrinsic_image_load_raw_intel:
case nir_intrinsic_get_ubo_size:
case nir_intrinsic_load_ssbo_address:
case nir_intrinsic_load_desc_set_address_intel: {
case nir_intrinsic_load_desc_set_address_intel:
case nir_intrinsic_load_desc_set_dynamic_index_intel: {
unsigned num_srcs = nir_intrinsic_infos[instr->intrinsic].num_srcs;
for (unsigned i = 0; i < num_srcs; i++) {
if (instr->src[i].ssa->divergent) {

View file

@ -1741,6 +1741,11 @@ intrinsic("load_reloc_const_intel", dest_comp=1, bit_sizes=[32],
intrinsic("load_desc_set_address_intel", dest_comp=1, bit_sizes=[64],
src_comp=[1], flags=[CAN_ELIMINATE, CAN_REORDER])
# Base offset for a given set in the flatten array of dynamic offsets
# src[0] = { set }
intrinsic("load_desc_set_dynamic_index_intel", dest_comp=1, bit_sizes=[32],
src_comp=[1], flags=[CAN_ELIMINATE, CAN_REORDER])
# OpSubgroupBlockReadINTEL and OpSubgroupBlockWriteINTEL from SPV_INTEL_subgroups.
intrinsic("load_deref_block_intel", dest_comp=0, src_comp=[-1],
indices=[ACCESS], flags=[CAN_ELIMINATE])

View file

@ -405,6 +405,32 @@ void anv_CmdBindPipeline(
state = &cmd_buffer->state.gfx.base;
stages = gfx_pipeline->base.active_stages;
/* When the pipeline is using independent states and dynamic buffers,
* this will trigger an update of anv_push_constants::dynamic_base_index
* & anv_push_constants::dynamic_offsets.
*/
struct anv_push_constants *push =
&cmd_buffer->state.gfx.base.push_constants;
struct anv_pipeline_sets_layout *layout = &gfx_pipeline->base.base.layout;
if (layout->independent_sets && layout->num_dynamic_buffers > 0) {
bool modified = false;
for (uint32_t s = 0; s < layout->num_sets; s++) {
if (layout->set[s].layout == NULL)
continue;
assert(layout->set[s].dynamic_offset_start < MAX_DYNAMIC_BUFFERS);
if (layout->set[s].layout->dynamic_offset_count > 0 &&
(push->desc_sets[s] & ANV_DESCRIPTOR_SET_DYNAMIC_INDEX_MASK) != layout->set[s].dynamic_offset_start) {
push->desc_sets[s] &= ~ANV_DESCRIPTOR_SET_DYNAMIC_INDEX_MASK;
push->desc_sets[s] |= (layout->set[s].dynamic_offset_start &
ANV_DESCRIPTOR_SET_DYNAMIC_INDEX_MASK);
modified = true;
}
}
if (modified)
cmd_buffer->state.push_constants_dirty |= stages;
}
break;
}
@ -438,7 +464,7 @@ void anv_CmdBindPipeline(
static void
anv_cmd_buffer_bind_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
VkPipelineBindPoint bind_point,
struct anv_pipeline_layout *layout,
struct anv_pipeline_sets_layout *layout,
uint32_t set_index,
struct anv_descriptor_set *set,
uint32_t *dynamic_offset_count,
@ -516,7 +542,9 @@ anv_cmd_buffer_bind_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
struct anv_push_constants *push = &pipe_state->push_constants;
struct anv_address addr = anv_descriptor_set_address(set);
push->desc_sets[set_index] = anv_address_physical(addr);
push->desc_sets[set_index] &= ~ANV_DESCRIPTOR_SET_ADDRESS_MASK;
push->desc_sets[set_index] |= (anv_address_physical(addr) &
ANV_DESCRIPTOR_SET_ADDRESS_MASK);
if (addr.bo) {
anv_reloc_list_add_bo(cmd_buffer->batch.relocs,
@ -536,6 +564,11 @@ anv_cmd_buffer_bind_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
uint32_t *push_offsets =
&push->dynamic_offsets[dynamic_offset_start];
memcpy(pipe_state->dynamic_offsets[set_index].offsets,
*dynamic_offsets,
sizeof(uint32_t) * MIN2(*dynamic_offset_count,
set_layout->dynamic_offset_count));
/* Assert that everything is in range */
assert(set_layout->dynamic_offset_count <= *dynamic_offset_count);
assert(dynamic_offset_start + set_layout->dynamic_offset_count <=
@ -543,7 +576,8 @@ anv_cmd_buffer_bind_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
for (uint32_t i = 0; i < set_layout->dynamic_offset_count; i++) {
if (push_offsets[i] != (*dynamic_offsets)[i]) {
push_offsets[i] = (*dynamic_offsets)[i];
pipe_state->dynamic_offsets[set_index].offsets[i] =
push_offsets[i] = (*dynamic_offsets)[i];
/* dynamic_offset_stages[] elements could contain blanket
* values like VK_SHADER_STAGE_ALL, so limit this to the
* binding point's bits.
@ -575,12 +609,15 @@ void anv_CmdBindDescriptorSets(
const uint32_t* pDynamicOffsets)
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, _layout);
struct anv_pipeline_sets_layout *layout = &pipeline_layout->sets_layout;
assert(firstSet + descriptorSetCount <= MAX_SETS);
for (uint32_t i = 0; i < descriptorSetCount; i++) {
ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
if (set == NULL)
continue;
anv_cmd_buffer_bind_descriptor_set(cmd_buffer, pipelineBindPoint,
layout, firstSet + i, set,
&dynamicOffsetCount,
@ -728,8 +765,8 @@ struct anv_state
anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer)
{
const struct intel_device_info *devinfo = cmd_buffer->device->info;
struct anv_push_constants *data =
&cmd_buffer->state.compute.base.push_constants;
struct anv_cmd_pipeline_state *pipe_state = &cmd_buffer->state.compute.base;
struct anv_push_constants *data = &pipe_state->push_constants;
struct anv_compute_pipeline *pipeline = cmd_buffer->state.compute.pipeline;
const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
const struct anv_push_range *range = &pipeline->cs->bind_map.push_ranges[0];
@ -906,7 +943,8 @@ void anv_CmdPushDescriptorSetKHR(
const VkWriteDescriptorSet* pDescriptorWrites)
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, _layout);
struct anv_pipeline_sets_layout *layout = &pipeline_layout->sets_layout;
assert(_set < MAX_SETS);
@ -1003,7 +1041,8 @@ void anv_CmdPushDescriptorSetWithTemplateKHR(
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
VK_FROM_HANDLE(vk_descriptor_update_template, template,
descriptorUpdateTemplate);
ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, _layout);
struct anv_pipeline_sets_layout *layout = &pipeline_layout->sets_layout;
assert(_set < MAX_PUSH_DESCRIPTORS);

View file

@ -647,6 +647,12 @@ set_layout_buffer_view_count(const struct anv_descriptor_set_layout *set_layout,
return set_layout->buffer_view_count - shrink;
}
static bool
anv_descriptor_set_layout_empty(const struct anv_descriptor_set_layout *set_layout)
{
return set_layout->binding_count == 0;
}
uint32_t
anv_descriptor_set_layout_descriptor_buffer_size(const struct anv_descriptor_set_layout *set_layout,
uint32_t var_desc_count)
@ -740,6 +746,83 @@ sha1_update_descriptor_set_layout(struct mesa_sha1 *ctx,
* just multiple descriptor set layouts pasted together
*/
void
anv_pipeline_sets_layout_init(struct anv_pipeline_sets_layout *layout,
struct anv_device *device,
bool independent_sets)
{
memset(layout, 0, sizeof(*layout));
layout->device = device;
layout->independent_sets = independent_sets;
}
void
anv_pipeline_sets_layout_add(struct anv_pipeline_sets_layout *layout,
uint32_t set_idx,
struct anv_descriptor_set_layout *set_layout)
{
if (layout->set[set_idx].layout)
return;
/* Workaround CTS : Internal CTS issue 3584 */
if (layout->independent_sets && anv_descriptor_set_layout_empty(set_layout))
return;
layout->num_sets = MAX2(set_idx + 1, layout->num_sets);
layout->set[set_idx].layout =
anv_descriptor_set_layout_ref(set_layout);
layout->set[set_idx].dynamic_offset_start = layout->num_dynamic_buffers;
layout->num_dynamic_buffers += set_layout->dynamic_offset_count;
assert(layout->num_dynamic_buffers < MAX_DYNAMIC_BUFFERS);
}
void
anv_pipeline_sets_layout_hash(struct anv_pipeline_sets_layout *layout)
{
struct mesa_sha1 ctx;
_mesa_sha1_init(&ctx);
for (unsigned s = 0; s < layout->num_sets; s++) {
if (!layout->set[s].layout)
continue;
sha1_update_descriptor_set_layout(&ctx, layout->set[s].layout);
_mesa_sha1_update(&ctx, &layout->set[s].dynamic_offset_start,
sizeof(layout->set[s].dynamic_offset_start));
}
_mesa_sha1_update(&ctx, &layout->num_sets, sizeof(layout->num_sets));
_mesa_sha1_final(&ctx, layout->sha1);
}
void
anv_pipeline_sets_layout_fini(struct anv_pipeline_sets_layout *layout)
{
for (unsigned s = 0; s < layout->num_sets; s++) {
if (!layout->set[s].layout)
continue;
anv_descriptor_set_layout_unref(layout->device, layout->set[s].layout);
}
}
void
anv_pipeline_sets_layout_print(const struct anv_pipeline_sets_layout *layout)
{
fprintf(stderr, "layout: dyn_count=%u sets=%u ind=%u\n",
layout->num_dynamic_buffers,
layout->num_sets,
layout->independent_sets);
for (unsigned s = 0; s < layout->num_sets; s++) {
if (!layout->set[s].layout)
continue;
fprintf(stderr, " set%i: dyn_start=%u flags=0x%x\n",
s, layout->set[s].dynamic_offset_start, layout->set[s].layout->flags);
}
}
VkResult anv_CreatePipelineLayout(
VkDevice _device,
const VkPipelineLayoutCreateInfo* pCreateInfo,
@ -756,30 +839,28 @@ VkResult anv_CreatePipelineLayout(
if (layout == NULL)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
layout->num_sets = pCreateInfo->setLayoutCount;
unsigned dynamic_offset_count = 0;
anv_pipeline_sets_layout_init(&layout->sets_layout, device,
pCreateInfo->flags & VK_PIPELINE_LAYOUT_CREATE_INDEPENDENT_SETS_BIT_EXT);
for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout,
pCreateInfo->pSetLayouts[set]);
layout->set[set].layout = set_layout;
anv_descriptor_set_layout_ref(set_layout);
layout->set[set].dynamic_offset_start = dynamic_offset_count;
dynamic_offset_count += set_layout->dynamic_offset_count;
}
assert(dynamic_offset_count < MAX_DYNAMIC_BUFFERS);
/* VUID-VkPipelineLayoutCreateInfo-graphicsPipelineLibrary-06753
*
* "If graphicsPipelineLibrary is not enabled, elements of
* pSetLayouts must be valid VkDescriptorSetLayout objects"
*
* As a result of supporting graphicsPipelineLibrary, we need to allow
* null descriptor set layouts.
*/
if (set_layout == NULL)
continue;
struct mesa_sha1 ctx;
_mesa_sha1_init(&ctx);
for (unsigned s = 0; s < layout->num_sets; s++) {
sha1_update_descriptor_set_layout(&ctx, layout->set[s].layout);
_mesa_sha1_update(&ctx, &layout->set[s].dynamic_offset_start,
sizeof(layout->set[s].dynamic_offset_start));
anv_pipeline_sets_layout_add(&layout->sets_layout, set, set_layout);
}
_mesa_sha1_update(&ctx, &layout->num_sets, sizeof(layout->num_sets));
_mesa_sha1_final(&ctx, layout->sha1);
anv_pipeline_sets_layout_hash(&layout->sets_layout);
*pPipelineLayout = anv_pipeline_layout_to_handle(layout);
@ -792,15 +873,14 @@ void anv_DestroyPipelineLayout(
const VkAllocationCallbacks* pAllocator)
{
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, _pipelineLayout);
ANV_FROM_HANDLE(anv_pipeline_layout, layout, _pipelineLayout);
if (!pipeline_layout)
if (!layout)
return;
for (uint32_t i = 0; i < pipeline_layout->num_sets; i++)
anv_descriptor_set_layout_unref(device, pipeline_layout->set[i].layout);
anv_pipeline_sets_layout_fini(&layout->sets_layout);
vk_object_free(&device->vk, pAllocator, pipeline_layout);
vk_object_free(&device->vk, pAllocator, layout);
}
/*

View file

@ -40,7 +40,7 @@ bool anv_nir_lower_multiview(nir_shader *shader, uint32_t view_mask,
bool use_primitive_replication);
bool anv_nir_lower_ycbcr_textures(nir_shader *shader,
const struct anv_pipeline_layout *layout);
const struct anv_pipeline_sets_layout *layout);
static inline nir_address_format
anv_nir_ssbo_addr_format(const struct anv_physical_device *pdevice,
@ -67,7 +67,8 @@ bool anv_nir_lower_ubo_loads(nir_shader *shader);
void anv_nir_apply_pipeline_layout(nir_shader *shader,
const struct anv_physical_device *pdevice,
bool robust_buffer_access,
const struct anv_pipeline_layout *layout,
bool independent_sets,
const struct anv_pipeline_sets_layout *layout,
struct anv_pipeline_bind_map *map);
void anv_nir_compute_push_layout(nir_shader *nir,
@ -83,14 +84,14 @@ void anv_nir_validate_push_layout(struct brw_stage_prog_data *prog_data,
bool anv_nir_add_base_work_group_id(nir_shader *shader);
uint32_t anv_nir_compute_used_push_descriptors(nir_shader *shader,
const struct anv_pipeline_layout *layout);
const struct anv_pipeline_sets_layout *layout);
bool anv_nir_loads_push_desc_buffer(nir_shader *nir,
const struct anv_pipeline_layout *layout,
const struct anv_pipeline_sets_layout *layout,
const struct anv_pipeline_bind_map *bind_map);
uint32_t anv_nir_push_desc_ubo_fully_promoted(nir_shader *nir,
const struct anv_pipeline_layout *layout,
const struct anv_pipeline_sets_layout *layout,
const struct anv_pipeline_bind_map *bind_map);
#ifdef __cplusplus

View file

@ -39,7 +39,7 @@
struct apply_pipeline_layout_state {
const struct anv_physical_device *pdevice;
const struct anv_pipeline_layout *layout;
const struct anv_pipeline_sets_layout *layout;
bool add_bounds_checks;
nir_address_format desc_addr_format;
nir_address_format ssbo_addr_format;
@ -50,6 +50,7 @@ struct apply_pipeline_layout_state {
bool uses_constants;
bool has_dynamic_buffers;
bool has_independent_sets;
uint8_t constants_offset;
struct {
bool desc_buffer_used;
@ -89,6 +90,9 @@ add_binding(struct apply_pipeline_layout_state *state,
const struct anv_descriptor_set_binding_layout *bind_layout =
&state->layout->set[set].layout->binding[binding];
assert(set < state->layout->num_sets);
assert(binding < state->layout->set[set].layout->binding_count);
if (state->set[set].use_count[binding] < UINT8_MAX)
state->set[set].use_count[binding]++;
@ -331,16 +335,30 @@ build_res_index(nir_builder *b, uint32_t set, uint32_t binding,
}
assert(bind_layout->dynamic_offset_index < MAX_DYNAMIC_BUFFERS);
uint32_t dynamic_offset_index = 0xff; /* No dynamic offset */
nir_ssa_def *dynamic_offset_index;
if (bind_layout->dynamic_offset_index >= 0) {
dynamic_offset_index =
state->layout->set[set].dynamic_offset_start +
bind_layout->dynamic_offset_index;
if (state->has_independent_sets) {
nir_ssa_def *dynamic_offset_start =
nir_load_desc_set_dynamic_index_intel(b, nir_imm_int(b, set));
dynamic_offset_index =
nir_iadd_imm(b, dynamic_offset_start,
bind_layout->dynamic_offset_index);
} else {
dynamic_offset_index =
nir_imm_int(b,
state->layout->set[set].dynamic_offset_start +
bind_layout->dynamic_offset_index);
}
} else {
dynamic_offset_index = nir_imm_int(b, 0xff); /* No dynamic offset */
}
const uint32_t packed = (bind_layout->descriptor_stride << 16 ) | (set_idx << 8) | dynamic_offset_index;
nir_ssa_def *packed =
nir_ior_imm(b,
dynamic_offset_index,
(bind_layout->descriptor_stride << 16 ) | (set_idx << 8));
return nir_vec4(b, nir_imm_int(b, packed),
return nir_vec4(b, packed,
nir_imm_int(b, bind_layout->descriptor_offset),
nir_imm_int(b, array_size - 1),
array_index);
@ -1278,15 +1296,49 @@ compare_binding_infos(const void *_a, const void *_b)
return a->binding - b->binding;
}
#ifndef NDEBUG
static void
anv_validate_pipeline_layout(const struct anv_pipeline_sets_layout *layout,
nir_shader *shader)
{
nir_foreach_function(function, shader) {
if (!function->impl)
continue;
nir_foreach_block(block, function->impl) {
nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_vulkan_resource_index)
continue;
unsigned set = nir_intrinsic_desc_set(intrin);
assert(layout->set[set].layout);
}
}
}
}
#endif
void
anv_nir_apply_pipeline_layout(nir_shader *shader,
const struct anv_physical_device *pdevice,
bool robust_buffer_access,
const struct anv_pipeline_layout *layout,
bool independent_sets,
const struct anv_pipeline_sets_layout *layout,
struct anv_pipeline_bind_map *map)
{
void *mem_ctx = ralloc_context(NULL);
#ifndef NDEBUG
/* We should not have have any reference to a descriptor set that is not
* given through the pipeline layout (layout->set[set].layout = NULL).
*/
anv_validate_pipeline_layout(layout, shader);
#endif
struct apply_pipeline_layout_state state = {
.pdevice = pdevice,
.layout = layout,
@ -1298,9 +1350,13 @@ anv_nir_apply_pipeline_layout(nir_shader *shader,
.ssbo_addr_format = anv_nir_ssbo_addr_format(pdevice, robust_buffer_access),
.ubo_addr_format = anv_nir_ubo_addr_format(pdevice, robust_buffer_access),
.lowered_instrs = _mesa_pointer_set_create(mem_ctx),
.has_independent_sets = independent_sets,
};
for (unsigned s = 0; s < layout->num_sets; s++) {
if (!layout->set[s].layout)
continue;
const unsigned count = layout->set[s].layout->binding_count;
state.set[s].use_count = rzalloc_array(mem_ctx, uint8_t, count);
state.set[s].surface_offsets = rzalloc_array(mem_ctx, uint8_t, count);
@ -1328,6 +1384,9 @@ anv_nir_apply_pipeline_layout(nir_shader *shader,
unsigned used_binding_count = 0;
for (uint32_t set = 0; set < layout->num_sets; set++) {
struct anv_descriptor_set_layout *set_layout = layout->set[set].layout;
if (!set_layout)
continue;
for (unsigned b = 0; b < set_layout->binding_count; b++) {
if (state.set[set].use_count[b] == 0)
continue;
@ -1341,6 +1400,9 @@ anv_nir_apply_pipeline_layout(nir_shader *shader,
used_binding_count = 0;
for (uint32_t set = 0; set < layout->num_sets; set++) {
const struct anv_descriptor_set_layout *set_layout = layout->set[set].layout;
if (!set_layout)
continue;
for (unsigned b = 0; b < set_layout->binding_count; b++) {
if (state.set[set].use_count[b] == 0)
continue;
@ -1380,6 +1442,7 @@ anv_nir_apply_pipeline_layout(nir_shader *shader,
for (unsigned i = 0; i < used_binding_count; i++) {
unsigned set = infos[i].set, b = infos[i].binding;
assert(layout->set[set].layout);
const struct anv_descriptor_set_binding_layout *binding =
&layout->set[set].layout->binding[b];
@ -1421,7 +1484,6 @@ anv_nir_apply_pipeline_layout(nir_shader *shader,
.binding = b,
.index = binding->descriptor_index + i,
.dynamic_offset_index =
layout->set[set].dynamic_offset_start +
binding->dynamic_offset_index + i,
};
}

View file

@ -68,6 +68,7 @@ anv_nir_compute_push_layout(nir_shader *nir,
}
case nir_intrinsic_load_desc_set_address_intel:
case nir_intrinsic_load_desc_set_dynamic_index_intel:
push_start = MIN2(push_start,
offsetof(struct anv_push_constants, desc_sets));
push_end = MAX2(push_end, push_start +
@ -171,6 +172,22 @@ anv_nir_compute_push_layout(nir_shader *nir,
.base = offsetof(struct anv_push_constants, desc_sets),
.range = sizeof_field(struct anv_push_constants, desc_sets),
.dest_type = nir_type_uint64);
pc_load = nir_iand_imm(b, pc_load, ANV_DESCRIPTOR_SET_ADDRESS_MASK);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, pc_load);
break;
}
case nir_intrinsic_load_desc_set_dynamic_index_intel: {
b->cursor = nir_before_instr(&intrin->instr);
nir_ssa_def *pc_load = nir_load_uniform(b, 1, 64,
nir_imul_imm(b, intrin->src[0].ssa, sizeof(uint64_t)),
.base = offsetof(struct anv_push_constants, desc_sets),
.range = sizeof_field(struct anv_push_constants, desc_sets),
.dest_type = nir_type_uint64);
pc_load = nir_i2i32(
b,
nir_iand_imm(
b, pc_load, ANV_DESCRIPTOR_SET_DYNAMIC_INDEX_MASK));
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, pc_load);
break;
}

View file

@ -24,7 +24,7 @@
#include "anv_nir.h"
const struct anv_descriptor_set_layout *
anv_pipeline_layout_get_push_set(const struct anv_pipeline_layout *layout,
anv_pipeline_layout_get_push_set(const struct anv_pipeline_sets_layout *layout,
uint8_t *set_idx)
{
for (unsigned s = 0; s < ARRAY_SIZE(layout->set); s++) {
@ -51,7 +51,7 @@ anv_pipeline_layout_get_push_set(const struct anv_pipeline_layout *layout,
*/
uint32_t
anv_nir_compute_used_push_descriptors(nir_shader *shader,
const struct anv_pipeline_layout *layout)
const struct anv_pipeline_sets_layout *layout)
{
uint8_t push_set;
const struct anv_descriptor_set_layout *push_set_layout =
@ -108,7 +108,7 @@ anv_nir_compute_used_push_descriptors(nir_shader *shader,
*/
bool
anv_nir_loads_push_desc_buffer(nir_shader *nir,
const struct anv_pipeline_layout *layout,
const struct anv_pipeline_sets_layout *layout,
const struct anv_pipeline_bind_map *bind_map)
{
uint8_t push_set;
@ -157,7 +157,7 @@ anv_nir_loads_push_desc_buffer(nir_shader *nir,
*/
uint32_t
anv_nir_push_desc_ubo_fully_promoted(nir_shader *nir,
const struct anv_pipeline_layout *layout,
const struct anv_pipeline_sets_layout *layout,
const struct anv_pipeline_bind_map *bind_map)
{
uint8_t push_set;

View file

@ -279,14 +279,40 @@ anv_pipeline_init(struct anv_pipeline *pipeline,
util_dynarray_init(&pipeline->executables, pipeline->mem_ctx);
anv_pipeline_sets_layout_init(&pipeline->layout, device,
false /* independent_sets */);
return VK_SUCCESS;
}
static void
anv_pipeline_init_layout(struct anv_pipeline *pipeline,
struct anv_pipeline_layout *pipeline_layout)
{
if (pipeline_layout) {
struct anv_pipeline_sets_layout *layout = &pipeline_layout->sets_layout;
for (uint32_t s = 0; s < layout->num_sets; s++) {
if (layout->set[s].layout == NULL)
continue;
anv_pipeline_sets_layout_add(&pipeline->layout, s,
layout->set[s].layout);
}
}
anv_pipeline_sets_layout_hash(&pipeline->layout);
assert(!pipeline_layout ||
!memcmp(pipeline->layout.sha1,
pipeline_layout->sets_layout.sha1,
sizeof(pipeline_layout->sets_layout.sha1)));
}
void
anv_pipeline_finish(struct anv_pipeline *pipeline,
struct anv_device *device,
const VkAllocationCallbacks *pAllocator)
{
anv_pipeline_sets_layout_fini(&pipeline->layout);
anv_reloc_list_finish(&pipeline->batch_relocs,
pAllocator ? pAllocator : &device->vk.alloc);
ralloc_free(pipeline->mem_ctx);
@ -606,7 +632,6 @@ struct anv_pipeline_stage {
static void
anv_pipeline_hash_graphics(struct anv_graphics_base_pipeline *pipeline,
struct anv_pipeline_layout *layout,
struct anv_pipeline_stage *stages,
uint32_t view_mask,
unsigned char *sha1_out)
@ -616,8 +641,8 @@ anv_pipeline_hash_graphics(struct anv_graphics_base_pipeline *pipeline,
_mesa_sha1_update(&ctx, &view_mask, sizeof(view_mask));
if (layout)
_mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
_mesa_sha1_update(&ctx, pipeline->base.layout.sha1,
sizeof(pipeline->base.layout.sha1));
const struct anv_device *device = pipeline->base.device;
@ -642,15 +667,14 @@ anv_pipeline_hash_graphics(struct anv_graphics_base_pipeline *pipeline,
static void
anv_pipeline_hash_compute(struct anv_compute_pipeline *pipeline,
struct anv_pipeline_layout *layout,
struct anv_pipeline_stage *stage,
unsigned char *sha1_out)
{
struct mesa_sha1 ctx;
_mesa_sha1_init(&ctx);
if (layout)
_mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
_mesa_sha1_update(&ctx, pipeline->base.layout.sha1,
sizeof(pipeline->base.layout.sha1));
const struct anv_device *device = pipeline->base.device;
@ -669,15 +693,14 @@ anv_pipeline_hash_compute(struct anv_compute_pipeline *pipeline,
static void
anv_pipeline_hash_ray_tracing_shader(struct anv_ray_tracing_pipeline *pipeline,
struct anv_pipeline_layout *layout,
struct anv_pipeline_stage *stage,
unsigned char *sha1_out)
{
struct mesa_sha1 ctx;
_mesa_sha1_init(&ctx);
if (layout != NULL)
_mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
_mesa_sha1_update(&ctx, pipeline->base.layout.sha1,
sizeof(pipeline->base.layout.sha1));
const bool rba = pipeline->base.device->vk.enabled_features.robustBufferAccess;
_mesa_sha1_update(&ctx, &rba, sizeof(rba));
@ -690,7 +713,6 @@ anv_pipeline_hash_ray_tracing_shader(struct anv_ray_tracing_pipeline *pipeline,
static void
anv_pipeline_hash_ray_tracing_combined_shader(struct anv_ray_tracing_pipeline *pipeline,
struct anv_pipeline_layout *layout,
struct anv_pipeline_stage *intersection,
struct anv_pipeline_stage *any_hit,
unsigned char *sha1_out)
@ -698,8 +720,8 @@ anv_pipeline_hash_ray_tracing_combined_shader(struct anv_ray_tracing_pipeline *p
struct mesa_sha1 ctx;
_mesa_sha1_init(&ctx);
if (layout != NULL)
_mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
_mesa_sha1_update(&ctx, pipeline->base.layout.sha1,
sizeof(pipeline->base.layout.sha1));
const bool rba = pipeline->base.device->vk.enabled_features.robustBufferAccess;
_mesa_sha1_update(&ctx, &rba, sizeof(rba));
@ -743,15 +765,15 @@ anv_pipeline_stage_get_nir(struct anv_pipeline *pipeline,
}
static const struct vk_ycbcr_conversion_state *
lookup_ycbcr_conversion(const void *_pipeline_layout, uint32_t set,
lookup_ycbcr_conversion(const void *_sets_layout, uint32_t set,
uint32_t binding, uint32_t array_index)
{
const struct anv_pipeline_layout *pipeline_layout = _pipeline_layout;
const struct anv_pipeline_sets_layout *sets_layout = _sets_layout;
assert(set < MAX_SETS);
assert(binding < pipeline_layout->set[set].layout->binding_count);
assert(binding < sets_layout->set[set].layout->binding_count);
const struct anv_descriptor_set_binding_layout *bind_layout =
&pipeline_layout->set[set].layout->binding[binding];
&sets_layout->set[set].layout->binding[binding];
if (bind_layout->immutable_samplers == NULL)
return NULL;
@ -780,12 +802,12 @@ static void
anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
void *mem_ctx,
struct anv_pipeline_stage *stage,
struct anv_pipeline_layout *layout,
uint32_t view_mask,
bool use_primitive_replication)
{
const struct anv_physical_device *pdevice = pipeline->device->physical;
const struct brw_compiler *compiler = pdevice->compiler;
struct anv_pipeline_sets_layout *layout = &pipeline->layout;
struct brw_stage_prog_data *prog_data = &stage->prog_data.base;
nir_shader *nir = stage->nir;
@ -839,6 +861,7 @@ anv_pipeline_lower_nir(struct anv_pipeline *pipeline,
/* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
NIR_PASS_V(nir, anv_nir_apply_pipeline_layout,
pdevice, pipeline->device->vk.enabled_features.robustBufferAccess,
layout->independent_sets,
layout, &stage->bind_map);
NIR_PASS(_, nir, nir_lower_explicit_io, nir_var_mem_ubo,
@ -1686,7 +1709,6 @@ anv_graphics_pipeline_compile(struct anv_graphics_base_pipeline *pipeline,
const VkGraphicsPipelineCreateInfo *info,
const struct vk_graphics_pipeline_state *state)
{
ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
VkResult result;
VkPipelineCreationFeedbackEXT pipeline_feedback = {
@ -1707,7 +1729,7 @@ anv_graphics_pipeline_compile(struct anv_graphics_base_pipeline *pipeline,
anv_graphics_pipeline_init_keys(pipeline, state, stages);
unsigned char sha1[20];
anv_pipeline_hash_graphics(pipeline, layout, stages, state->rp->view_mask, sha1);
anv_pipeline_hash_graphics(pipeline, stages, state->rp->view_mask, sha1);
for (unsigned s = 0; s < ARRAY_SIZE(stages); s++) {
if (!stages[s].info)
@ -1815,7 +1837,7 @@ anv_graphics_pipeline_compile(struct anv_graphics_base_pipeline *pipeline,
int64_t stage_start = os_time_get_nano();
anv_pipeline_lower_nir(&pipeline->base, pipeline_ctx, stage, layout,
anv_pipeline_lower_nir(&pipeline->base, pipeline_ctx, stage,
state->rp->view_mask, use_primitive_replication);
struct shader_info *cur_info = &stage->nir->info;
@ -2025,12 +2047,10 @@ anv_pipeline_compile_cs(struct anv_compute_pipeline *pipeline,
populate_cs_prog_key(device, device->vk.enabled_features.robustBufferAccess, &stage.key.cs);
ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
const bool skip_cache_lookup =
(pipeline->base.flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR);
anv_pipeline_hash_compute(pipeline, layout, &stage, stage.cache_key.sha1);
anv_pipeline_hash_compute(pipeline, &stage, stage.cache_key.sha1);
bool cache_hit = false;
if (!skip_cache_lookup) {
@ -2068,7 +2088,7 @@ anv_pipeline_compile_cs(struct anv_compute_pipeline *pipeline,
anv_pipeline_nir_preprocess(&pipeline->base, stage.nir);
anv_pipeline_lower_nir(&pipeline->base, mem_ctx, &stage, layout,
anv_pipeline_lower_nir(&pipeline->base, mem_ctx, &stage,
0 /* view_mask */,
false /* use_primitive_replication */);
@ -2169,6 +2189,9 @@ anv_compute_pipeline_create(struct anv_device *device,
return result;
}
ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
anv_pipeline_init_layout(&pipeline->base, pipeline_layout);
anv_batch_set_storage(&pipeline->base.batch, ANV_NULL_ADDRESS,
pipeline->batch_data, sizeof(pipeline->batch_data));
@ -2295,6 +2318,9 @@ anv_graphics_pipeline_init(struct anv_graphics_pipeline *pipeline,
pipeline->view_mask = state->rp->view_mask;
ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
anv_pipeline_init_layout(&pipeline->base.base, pipeline_layout);
result = anv_graphics_pipeline_compile(&pipeline->base, cache, pCreateInfo, state);
if (result != VK_SUCCESS) {
anv_pipeline_finish(&pipeline->base.base, device, alloc);
@ -2593,8 +2619,6 @@ anv_pipeline_init_ray_tracing_stages(struct anv_ray_tracing_pipeline *pipeline,
const VkRayTracingPipelineCreateInfoKHR *info,
void *pipeline_ctx)
{
ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
/* Create enough stage entries for all shader modules plus potential
* combinaisons in the groups.
*/
@ -2630,7 +2654,7 @@ anv_pipeline_init_ray_tracing_stages(struct anv_ray_tracing_pipeline *pipeline,
vk_pipeline_hash_shader_stage(sinfo, NULL, stages[i].shader_sha1);
if (stages[i].stage != MESA_SHADER_INTERSECTION) {
anv_pipeline_hash_ray_tracing_shader(pipeline, layout, &stages[i],
anv_pipeline_hash_ray_tracing_shader(pipeline, &stages[i],
stages[i].cache_key.sha1);
}
@ -2652,12 +2676,11 @@ anv_pipeline_init_ray_tracing_stages(struct anv_ray_tracing_pipeline *pipeline,
if (any_hit_idx != VK_SHADER_UNUSED_KHR) {
assert(any_hit_idx < info->stageCount);
anv_pipeline_hash_ray_tracing_combined_shader(pipeline,
layout,
&stages[intersection_idx],
&stages[any_hit_idx],
stages[intersection_idx].cache_key.sha1);
} else {
anv_pipeline_hash_ray_tracing_shader(pipeline, layout,
anv_pipeline_hash_ray_tracing_shader(pipeline,
&stages[intersection_idx],
stages[intersection_idx].cache_key.sha1);
}
@ -2729,8 +2752,6 @@ anv_pipeline_compile_ray_tracing(struct anv_ray_tracing_pipeline *pipeline,
struct anv_pipeline_stage *stages =
anv_pipeline_init_ray_tracing_stages(pipeline, info, pipeline_ctx);
ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
const bool skip_cache_lookup =
(pipeline->base.flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR);
@ -2765,7 +2786,7 @@ anv_pipeline_compile_ray_tracing(struct anv_ray_tracing_pipeline *pipeline,
anv_pipeline_nir_preprocess(&pipeline->base, stages[i].nir);
anv_pipeline_lower_nir(&pipeline->base, pipeline_ctx, &stages[i],
layout, 0 /* view_mask */,
0 /* view_mask */,
false /* use_primitive_replication */);
stages[i].feedback.duration += os_time_get_nano() - stage_start;
@ -3065,6 +3086,9 @@ anv_ray_tracing_pipeline_init(struct anv_ray_tracing_pipeline *pipeline,
util_dynarray_init(&pipeline->shaders, pipeline->base.mem_ctx);
ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, pCreateInfo->layout);
anv_pipeline_init_layout(&pipeline->base, pipeline_layout);
result = anv_pipeline_compile_ray_tracing(pipeline, cache, pCreateInfo);
if (result != VK_SUCCESS)
goto fail;

View file

@ -1675,7 +1675,9 @@ struct anv_descriptor_set_binding_layout {
/* Index into the flattened descriptor set */
uint32_t descriptor_index;
/* Index into the dynamic state array for a dynamic buffer */
/* Index into the dynamic state array for a dynamic buffer, relative to the
* set.
*/
int16_t dynamic_offset_index;
/* Index into the descriptor set buffer views */
@ -1737,11 +1739,13 @@ struct anv_descriptor_set_layout {
void anv_descriptor_set_layout_destroy(struct anv_device *device,
struct anv_descriptor_set_layout *layout);
static inline void
static inline struct anv_descriptor_set_layout *
anv_descriptor_set_layout_ref(struct anv_descriptor_set_layout *layout)
{
assert(layout && layout->ref_cnt >= 1);
p_atomic_inc(&layout->ref_cnt);
return layout;
}
static inline void
@ -1982,7 +1986,13 @@ struct anv_pipeline_binding {
/** Plane in the binding index for images */
uint8_t plane;
/** Dynamic offset index (for dynamic UBOs and SSBOs) */
/** Input attachment index (relative to the subpass) */
uint8_t input_attachment_index;
/** Dynamic offset index
*
* For dynamic UBOs and SSBOs, relative to set.
*/
uint8_t dynamic_offset_index;
};
@ -1997,7 +2007,7 @@ struct anv_push_range {
/** Descriptor set index */
uint8_t set;
/** Dynamic offset index (for dynamic UBOs) */
/** Dynamic offset index (for dynamic UBOs), relative to set. */
uint8_t dynamic_offset_index;
/** Start offset in units of 32B */
@ -2007,8 +2017,8 @@ struct anv_push_range {
uint8_t length;
};
struct anv_pipeline_layout {
struct vk_object_base base;
struct anv_pipeline_sets_layout {
struct anv_device *device;
struct {
struct anv_descriptor_set_layout *layout;
@ -2016,12 +2026,35 @@ struct anv_pipeline_layout {
} set[MAX_SETS];
uint32_t num_sets;
uint32_t num_dynamic_buffers;
bool independent_sets;
unsigned char sha1[20];
};
void anv_pipeline_sets_layout_init(struct anv_pipeline_sets_layout *layout,
struct anv_device *device,
bool independent_sets);
void anv_pipeline_sets_layout_fini(struct anv_pipeline_sets_layout *layout);
void anv_pipeline_sets_layout_add(struct anv_pipeline_sets_layout *layout,
uint32_t set_idx,
struct anv_descriptor_set_layout *set_layout);
void anv_pipeline_sets_layout_hash(struct anv_pipeline_sets_layout *layout);
void anv_pipeline_sets_layout_print(const struct anv_pipeline_sets_layout *layout);
struct anv_pipeline_layout {
struct vk_object_base base;
struct anv_pipeline_sets_layout sets_layout;
};
const struct anv_descriptor_set_layout *
anv_pipeline_layout_get_push_set(const struct anv_pipeline_layout *layout,
anv_pipeline_layout_get_push_set(const struct anv_pipeline_sets_layout *layout,
uint8_t *desc_idx);
struct anv_buffer {
@ -2394,7 +2427,14 @@ struct anv_push_constants {
/** Ray query globals (RT_DISPATCH_GLOBALS) */
uint64_t ray_query_globals;
/* Base addresses for descriptor sets */
#define ANV_DESCRIPTOR_SET_DYNAMIC_INDEX_MASK ((uint64_t)ANV_UBO_ALIGNMENT - 1)
#define ANV_DESCRIPTOR_SET_ADDRESS_MASK (~(uint64_t)(ANV_UBO_ALIGNMENT - 1))
/**
* In bits [0:5] : dynamic offset index in dynamic_offsets[] for the set
*
* In bits [6:63] : descriptor set address
*/
uint64_t desc_sets[MAX_SETS];
struct {
@ -2524,6 +2564,21 @@ struct anv_cmd_pipeline_state {
/* Push constant state allocated when flushing push constants. */
struct anv_state push_constants_state;
/**
* Dynamic buffer offsets.
*
* We have a maximum of MAX_DYNAMIC_BUFFERS per pipeline, but with
* independent sets we cannot know which how much in total is going to be
* used. As a result we need to store the maximum possible number per set.
*
* Those values are written into anv_push_constants::dynamic_offsets at
* flush time when have the pipeline with the final
* anv_pipeline_sets_layout.
*/
struct {
uint32_t offsets[MAX_DYNAMIC_BUFFERS];
} dynamic_offsets[MAX_SETS];
};
/** State tracking for graphics pipeline
@ -3118,6 +3173,9 @@ struct anv_pipeline {
*/
VkShaderStageFlags use_push_descriptor_buffer;
/* Layout of the sets used by the pipeline. */
struct anv_pipeline_sets_layout layout;
struct util_dynarray executables;
const struct intel_l3_config * l3_config;

View file

@ -2041,10 +2041,6 @@ emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
if (bt_state->map == NULL)
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
/* Note that we always keep all user-allocated memory objects resident. */
struct anv_push_constants *push = &pipe_state->push_constants;
for (uint32_t s = 0; s < map->surface_count; s++) {
struct anv_pipeline_binding *binding = &map->surface_to_descriptor[s];
@ -2239,7 +2235,8 @@ emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
if (desc->buffer) {
/* Compute the offset within the buffer */
uint32_t dynamic_offset =
push->dynamic_offsets[binding->dynamic_offset_index];
pipe_state->dynamic_offsets[
binding->set].offsets[binding->dynamic_offset_index];
uint64_t offset = desc->offset + dynamic_offset;
/* Clamp to the buffer size */
offset = MIN2(offset, desc->buffer->vk.size);
@ -2547,10 +2544,10 @@ get_push_range_address(struct anv_cmd_buffer *cmd_buffer,
} else {
assert(desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
if (desc->buffer) {
const struct anv_push_constants *push =
&gfx_state->base.push_constants;
const struct anv_cmd_pipeline_state *pipe_state = &gfx_state->base;
uint32_t dynamic_offset =
push->dynamic_offsets[range->dynamic_offset_index];
pipe_state->dynamic_offsets[
range->set].offsets[range->dynamic_offset_index];
return anv_address_add(desc->buffer->address,
desc->offset + dynamic_offset);
}
@ -2620,10 +2617,10 @@ get_push_range_bound_size(struct anv_cmd_buffer *cmd_buffer,
assert(desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
/* Compute the offset within the buffer */
const struct anv_push_constants *push =
&gfx_state->base.push_constants;
const struct anv_cmd_pipeline_state *pipe_state = &gfx_state->base;
uint32_t dynamic_offset =
push->dynamic_offsets[range->dynamic_offset_index];
pipe_state->dynamic_offsets[
range->set].offsets[range->dynamic_offset_index];
uint64_t offset = desc->offset + dynamic_offset;
/* Clamp to the buffer size */
offset = MIN2(offset, desc->buffer->vk.size);