tu: add reference counting for descriptor set layouts

The spec states that descriptor set layouts can be destroyed almost
at any time:

   "VkDescriptorSetLayout objects may be accessed by commands that operate
    on descriptor sets allocated using that layout, and those descriptor
    sets must not be updated with vkUpdateDescriptorSets after the descriptor
    set layout has been destroyed. Otherwise, a VkDescriptorSetLayout object
    passed as a parameter to create another object is not further accessed
    by that object after the duration of the command it is passed into."

Copied mostly from ANV.

Gitlab: https://gitlab.freedesktop.org/mesa/mesa/-/issues/5893

Signed-off-by: Danylo Piliaiev <dpiliaiev@igalia.com>
Reviewed-by: Hyunjun Ko <zzoon@igalia.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/14622>
This commit is contained in:
Danylo Piliaiev 2022-01-19 17:10:17 +02:00 committed by Marge Bot
parent 0513ff6564
commit 1b513f4958
4 changed files with 78 additions and 6 deletions

View file

@ -1468,6 +1468,12 @@ tu_cmd_buffer_destroy(struct tu_cmd_buffer *cmd_buffer)
u_trace_fini(&cmd_buffer->trace);
for (unsigned i = 0; i < MAX_BIND_POINTS; i++) {
if (cmd_buffer->descriptors[i].push_set.layout)
tu_descriptor_set_layout_unref(cmd_buffer->device,
cmd_buffer->descriptors[i].push_set.layout);
}
vk_command_buffer_finish(&cmd_buffer->vk);
vk_free2(&cmd_buffer->device->vk.alloc, &cmd_buffer->pool->alloc,
cmd_buffer);
@ -1488,6 +1494,9 @@ tu_reset_cmd_buffer(struct tu_cmd_buffer *cmd_buffer)
for (unsigned i = 0; i < MAX_BIND_POINTS; i++) {
memset(&cmd_buffer->descriptors[i].sets, 0, sizeof(cmd_buffer->descriptors[i].sets));
if (cmd_buffer->descriptors[i].push_set.layout)
tu_descriptor_set_layout_unref(cmd_buffer->device,
cmd_buffer->descriptors[i].push_set.layout);
memset(&cmd_buffer->descriptors[i].push_set, 0, sizeof(cmd_buffer->descriptors[i].push_set));
cmd_buffer->descriptors[i].push_set.base.type = VK_OBJECT_TYPE_DESCRIPTOR_SET;
}
@ -1912,7 +1921,13 @@ tu_CmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer,
if (set->layout == layout)
memcpy(set_mem.map, set->mapped_ptr, layout->size);
set->layout = layout;
if (set->layout != layout) {
if (set->layout)
tu_descriptor_set_layout_unref(cmd->device, set->layout);
tu_descriptor_set_layout_ref(layout);
set->layout = layout;
}
set->mapped_ptr = set_mem.map;
set->va = set_mem.iova;
@ -1951,7 +1966,13 @@ tu_CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
if (set->layout == layout)
memcpy(set_mem.map, set->mapped_ptr, layout->size);
set->layout = layout;
if (set->layout != layout) {
if (set->layout)
tu_descriptor_set_layout_unref(cmd->device, set->layout);
tu_descriptor_set_layout_ref(layout);
set->layout = layout;
}
set->mapped_ptr = set_mem.map;
set->va = set_mem.iova;

View file

@ -149,7 +149,7 @@ tu_CreateDescriptorSetLayout(
immutable_sampler_count * sizeof(struct tu_sampler) +
ycbcr_sampler_count * sizeof(struct tu_sampler_ycbcr_conversion);
set_layout = vk_object_zalloc(&device->vk, pAllocator, size,
set_layout = vk_object_zalloc(&device->vk, NULL, size,
VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT);
if (!set_layout)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
@ -169,6 +169,7 @@ tu_CreateDescriptorSetLayout(
return vk_error(device, result);
}
set_layout->ref_cnt = 1;
set_layout->binding_count = num_bindings;
set_layout->shader_stages = 0;
set_layout->has_immutable_samplers = false;
@ -277,7 +278,15 @@ tu_DestroyDescriptorSetLayout(VkDevice _device,
if (!set_layout)
return;
vk_object_free(&device->vk, pAllocator, set_layout);
tu_descriptor_set_layout_unref(device, set_layout);
}
void
tu_descriptor_set_layout_destroy(struct tu_device *device,
struct tu_descriptor_set_layout *layout)
{
assert(layout->ref_cnt == 0);
vk_object_free(&device->vk, NULL, layout);
}
VKAPI_ATTR void VKAPI_CALL
@ -399,6 +408,8 @@ tu_CreatePipelineLayout(VkDevice _device,
TU_FROM_HANDLE(tu_descriptor_set_layout, set_layout,
pCreateInfo->pSetLayouts[set]);
layout->set[set].layout = set_layout;
tu_descriptor_set_layout_ref(set_layout);
layout->set[set].dynamic_offset_start = dynamic_offset_count;
dynamic_offset_count += set_layout->dynamic_offset_count;
}
@ -429,6 +440,9 @@ tu_DestroyPipelineLayout(VkDevice _device,
if (!pipeline_layout)
return;
for (uint32_t i = 0; i < pipeline_layout->num_sets; i++)
tu_descriptor_set_layout_unref(device, pipeline_layout->set[i].layout);
vk_object_free(&device->vk, pAllocator, pipeline_layout);
}
@ -437,7 +451,7 @@ tu_DestroyPipelineLayout(VkDevice _device,
static VkResult
tu_descriptor_set_create(struct tu_device *device,
struct tu_descriptor_pool *pool,
const struct tu_descriptor_set_layout *layout,
struct tu_descriptor_set_layout *layout,
const uint32_t *variable_count,
struct tu_descriptor_set **out_set)
{
@ -547,6 +561,8 @@ tu_descriptor_set_create(struct tu_device *device,
}
}
tu_descriptor_set_layout_ref(layout);
*out_set = set;
return VK_SUCCESS;
}
@ -685,6 +701,10 @@ tu_DestroyDescriptorPool(VkDevice _device,
if (!pool)
return;
for(int i = 0; i < pool->entry_count; ++i) {
tu_descriptor_set_layout_unref(device, pool->entries[i].set->layout);
}
if (!pool->host_memory_base) {
for(int i = 0; i < pool->entry_count; ++i) {
tu_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
@ -709,6 +729,10 @@ tu_ResetDescriptorPool(VkDevice _device,
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_descriptor_pool, pool, descriptorPool);
for(int i = 0; i < pool->entry_count; ++i) {
tu_descriptor_set_layout_unref(device, pool->entries[i].set->layout);
}
if (!pool->host_memory_base) {
for(int i = 0; i < pool->entry_count; ++i) {
tu_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
@ -782,6 +806,9 @@ tu_FreeDescriptorSets(VkDevice _device,
for (uint32_t i = 0; i < count; i++) {
TU_FROM_HANDLE(tu_descriptor_set, set, pDescriptorSets[i]);
if (set)
tu_descriptor_set_layout_unref(device, set->layout);
if (set && !pool->host_memory_base)
tu_descriptor_set_destroy(device, pool, set, true);
}

View file

@ -65,6 +65,9 @@ struct tu_descriptor_set_layout
{
struct vk_object_base base;
/* Descriptor set layouts can be destroyed at almost any time */
uint32_t ref_cnt;
/* The create flags for this descriptor set layout */
VkDescriptorSetLayoutCreateFlags flags;
@ -92,6 +95,27 @@ struct tu_descriptor_set_layout
struct tu_descriptor_set_binding_layout binding[0];
};
struct tu_device;
void tu_descriptor_set_layout_destroy(struct tu_device *device,
struct tu_descriptor_set_layout *layout);
static inline void
tu_descriptor_set_layout_ref(struct tu_descriptor_set_layout *layout)
{
assert(layout && layout->ref_cnt >= 1);
p_atomic_inc(&layout->ref_cnt);
}
static inline void
tu_descriptor_set_layout_unref(struct tu_device *device,
struct tu_descriptor_set_layout *layout)
{
assert(layout && layout->ref_cnt >= 1);
if (p_atomic_dec_zero(&layout->ref_cnt))
tu_descriptor_set_layout_destroy(device, layout);
}
struct tu_pipeline_layout
{
struct vk_object_base base;

View file

@ -658,7 +658,7 @@ struct tu_descriptor_set
{
struct vk_object_base base;
const struct tu_descriptor_set_layout *layout;
struct tu_descriptor_set_layout *layout;
struct tu_descriptor_pool *pool;
uint32_t size;