panvk: Interleave UBOs with multiple descriptor sets

The original intention was to put all the non-dynamic UBOs first
followed by all the dynamic ones.  However, we got the calculations
wrong and, once you went above one descriptor set, things start stomping
each other.

Also, the whole strategy is a bit busted.  Vulkan pipeline layout
compatability rules say that it's ok to create a pipeline with one
layout and then bind with another so long as the bottom N descriptor set
layouts match and the pipeline uses at most N descriptors.  This means
that, while it's safe to have each subsequent set add onto a given pool
of descriptors, if you're going to combine two of those pools, you need
to be careful that the position of descriptors in set N only depends on
the layouts of sets M <= N.  The easy way to do this is to interleve
where we do the UBOs for set 0 then dynamic for set 0 then UBOs for set
1 then dynamic for set 1, etc.

Acked-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/16276>
This commit is contained in:
Jason Ekstrand 2022-04-29 10:38:36 -05:00 committed by Marge Bot
parent 6d15d65e19
commit e265583ee1
3 changed files with 60 additions and 30 deletions

View file

@ -497,6 +497,37 @@ panvk_pipeline_layout_ref(struct panvk_pipeline_layout *layout)
return layout;
}
static unsigned
panvk_pipeline_layout_ubo_start(const struct panvk_pipeline_layout *layout,
unsigned set, bool is_dynamic)
{
unsigned offset = PANVK_NUM_BUILTIN_UBOS +
layout->sets[set].ubo_offset +
layout->sets[set].dyn_ubo_offset;
if (is_dynamic)
offset += layout->sets[set].layout->num_ubos;
return offset;
}
static unsigned
panvk_pipeline_layout_ubo_index(const struct panvk_pipeline_layout *layout,
unsigned set, unsigned binding,
unsigned array_index)
{
struct panvk_descriptor_set_binding_layout *binding_layout =
&layout->sets[set].layout->bindings[binding];
const bool is_dynamic =
binding_layout->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
const uint32_t ubo_idx = is_dynamic ? binding_layout->dyn_ubo_idx :
binding_layout->ubo_idx;
return panvk_pipeline_layout_ubo_start(layout, set, is_dynamic) +
ubo_idx + array_index;
}
struct panvk_desc_pool_counters {
unsigned samplers;
unsigned combined_image_samplers;

View file

@ -401,35 +401,41 @@ panvk_per_arch(emit_ubos)(const struct panvk_pipeline *pipeline,
memset(&ubos[PANVK_PUSH_CONST_UBO_INDEX], 0, sizeof(*ubos));
}
for (unsigned i = 0; i < ARRAY_SIZE(state->sets); i++) {
for (unsigned s = 0; s < pipeline->layout->num_sets; s++) {
const struct panvk_descriptor_set_layout *set_layout =
pipeline->layout->sets[i].layout;
const struct panvk_descriptor_set *set = state->sets[i];
unsigned offset = PANVK_NUM_BUILTIN_UBOS +
pipeline->layout->sets[i].ubo_offset;
pipeline->layout->sets[s].layout;
const struct panvk_descriptor_set *set = state->sets[s];
if (!set_layout)
continue;
unsigned ubo_start =
panvk_pipeline_layout_ubo_start(pipeline->layout, s, false);
if (!set) {
memset(&ubos[offset], 0, set_layout->num_ubos * sizeof(*ubos));
unsigned all_ubos = set_layout->num_ubos + set_layout->num_dyn_ubos;
memset(&ubos[ubo_start], 0, all_ubos * sizeof(*ubos));
} else {
memcpy(&ubos[offset], set->ubos, set_layout->num_ubos * sizeof(*ubos));
memcpy(&ubos[ubo_start], set->ubos,
set_layout->num_ubos * sizeof(*ubos));
unsigned dyn_ubo_start =
panvk_pipeline_layout_ubo_start(pipeline->layout, s, true);
for (unsigned i = 0; i < set_layout->num_dyn_ubos; i++) {
const struct panvk_buffer_desc *bdesc =
&state->dyn.ubos[pipeline->layout->sets[s].dyn_ubo_offset + i];
mali_ptr address = panvk_buffer_gpu_ptr(bdesc->buffer,
bdesc->offset);
size_t size = panvk_buffer_range(bdesc->buffer,
bdesc->offset, bdesc->size);
if (size) {
panvk_per_arch(emit_ubo)(address, size,
&ubos[dyn_ubo_start + i]);
} else {
memset(&ubos[dyn_ubo_start + i], 0, sizeof(*ubos));
}
}
}
}
unsigned offset = PANVK_NUM_BUILTIN_UBOS + pipeline->layout->num_ubos;
for (unsigned i = 0; i < pipeline->layout->num_dyn_ubos; i++) {
const struct panvk_buffer_desc *bdesc = &state->dyn.ubos[i];
mali_ptr address = panvk_buffer_gpu_ptr(bdesc->buffer, bdesc->offset);
size_t size = panvk_buffer_range(bdesc->buffer,
bdesc->offset, bdesc->size);
if (size)
panvk_per_arch(emit_ubo)(address, size, &ubos[offset + i]);
else
memset(&ubos[offset + i], 0, sizeof(*ubos));
}
}
void

View file

@ -135,15 +135,8 @@ lower_vulkan_resource_index(nir_builder *b, nir_intrinsic_instr *intr,
switch (binding_layout->type) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
base = PANVK_NUM_BUILTIN_UBOS +
ctx->layout->sets[set].ubo_offset +
binding_layout->ubo_idx;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
base = PANVK_NUM_BUILTIN_UBOS +
ctx->layout->sets[set].dyn_ubo_offset +
ctx->layout->num_ubos +
binding_layout->dyn_ubo_idx;
base = panvk_pipeline_layout_ubo_index(ctx->layout, set, binding, 0);
break;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
base = binding_layout->ssbo_idx + ctx->layout->sets[set].ssbo_offset;