radv: move descriptor pool implementation to radv_descriptor_pool.c/h

Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/35732>
This commit is contained in:
Samuel Pitoiset 2025-06-25 09:08:56 +02:00 committed by Marge Bot
parent ac16b8c439
commit 8493331eb1
6 changed files with 305 additions and 258 deletions

View file

@ -112,6 +112,8 @@ libradv_files = files(
'radv_device.h',
'radv_device_memory.c',
'radv_device_memory.h',
'radv_descriptor_pool.c',
'radv_descriptor_pool.h',
'radv_descriptor_set.c',
'radv_descriptor_set.h',
'radv_dgc.c',

View file

@ -0,0 +1,242 @@
/*
* Copyright © 2016 Red Hat.
* Copyright © 2016 Bas Nieuwenhuizen
*
* SPDX-License-Identifier: MIT
*/
#include "radv_descriptor_pool.h"
#include "radv_buffer.h"
#include "radv_descriptor_set.h"
#include "radv_device.h"
#include "radv_entrypoints.h"
#include "radv_physical_device.h"
#include "radv_rmv.h"
#include "vk_log.h"
static void
radv_destroy_descriptor_pool(struct radv_device *device, const VkAllocationCallbacks *pAllocator,
struct radv_descriptor_pool *pool)
{
if (!pool->host_memory_base) {
for (uint32_t i = 0; i < pool->entry_count; ++i) {
radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
}
} else {
for (uint32_t i = 0; i < pool->entry_count; ++i) {
vk_descriptor_set_layout_unref(&device->vk, &pool->sets[i]->header.layout->vk);
vk_object_base_finish(&pool->sets[i]->header.base);
}
}
if (pool->bo)
radv_bo_destroy(device, &pool->base, pool->bo);
if (pool->host_bo)
vk_free2(&device->vk.alloc, pAllocator, pool->host_bo);
radv_rmv_log_resource_destroy(device, (uint64_t)radv_descriptor_pool_to_handle(pool));
vk_object_base_finish(&pool->base);
vk_free2(&device->vk.alloc, pAllocator, pool);
}
static VkResult
radv_create_descriptor_pool(struct radv_device *device, const VkDescriptorPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool)
{
const struct radv_physical_device *pdev = radv_device_physical(device);
struct radv_descriptor_pool *pool;
uint64_t size = sizeof(struct radv_descriptor_pool);
uint64_t bo_size = 0, bo_count = 0, range_count = 0;
const VkMutableDescriptorTypeCreateInfoEXT *mutable_info =
vk_find_struct_const(pCreateInfo->pNext, MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT);
vk_foreach_struct_const (ext, pCreateInfo->pNext) {
switch (ext->sType) {
case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO: {
const VkDescriptorPoolInlineUniformBlockCreateInfo *info =
(const VkDescriptorPoolInlineUniformBlockCreateInfo *)ext;
/* the sizes are 4 aligned, and we need to align to at
* most 32, which needs at most 28 bytes extra per
* binding. */
bo_size += 28llu * info->maxInlineUniformBlockBindings;
break;
}
default:
break;
}
}
uint64_t num_16byte_descriptors = 0;
for (unsigned i = 0; i < pCreateInfo->poolSizeCount; ++i) {
bo_count += radv_descriptor_type_buffer_count(pCreateInfo->pPoolSizes[i].type) *
pCreateInfo->pPoolSizes[i].descriptorCount;
switch (pCreateInfo->pPoolSizes[i].type) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
range_count += pCreateInfo->pPoolSizes[i].descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_SAMPLER:
case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
bo_size += RADV_BUFFER_DESC_SIZE * pCreateInfo->pPoolSizes[i].descriptorCount;
num_16byte_descriptors += pCreateInfo->pPoolSizes[i].descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
bo_size += RADV_STORAGE_IMAGE_DESC_SIZE * pCreateInfo->pPoolSizes[i].descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
bo_size += radv_get_sampled_image_desc_size(pdev) * pCreateInfo->pPoolSizes[i].descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_MUTABLE_EXT:
/* Per spec, if a mutable descriptor type list is provided for the pool entry, we
* allocate enough memory to hold any subset of that list.
* If there is no mutable descriptor type list available,
* we must allocate enough for any supported mutable descriptor type, i.e. 64 bytes if
* FMASK is used.
*/
if (mutable_info && i < mutable_info->mutableDescriptorTypeListCount) {
uint64_t mutable_size, mutable_alignment;
if (radv_mutable_descriptor_type_size_alignment(device, &mutable_info->pMutableDescriptorTypeLists[i],
&mutable_size, &mutable_alignment)) {
/* 32 as we may need to align for images */
mutable_size = align(mutable_size, 32);
bo_size += mutable_size * pCreateInfo->pPoolSizes[i].descriptorCount;
if (mutable_size < 32)
num_16byte_descriptors += pCreateInfo->pPoolSizes[i].descriptorCount;
}
} else {
const uint32_t max_desc_size = pdev->use_fmask ? 64 : 32;
bo_size += max_desc_size * pCreateInfo->pPoolSizes[i].descriptorCount;
}
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
bo_size += RADV_COMBINED_IMAGE_SAMPLER_DESC_SIZE * pCreateInfo->pPoolSizes[i].descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK:
bo_size += pCreateInfo->pPoolSizes[i].descriptorCount;
break;
default:
break;
}
}
if (num_16byte_descriptors) {
/* Reserve space to align before image descriptors. Our layout code ensures at most one gap
* per set. */
bo_size += 16 * MIN2(num_16byte_descriptors, pCreateInfo->maxSets);
}
uint64_t sets_size = 0;
if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT)) {
size += pCreateInfo->maxSets * sizeof(struct radv_descriptor_set);
size += sizeof(struct radeon_winsys_bo *) * bo_count;
size += sizeof(struct radv_descriptor_range) * range_count;
sets_size = sizeof(struct radv_descriptor_set *) * pCreateInfo->maxSets;
size += sets_size;
} else {
size += sizeof(struct radv_descriptor_pool_entry) * pCreateInfo->maxSets;
}
pool = vk_alloc2(&device->vk.alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!pool)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
memset(pool, 0, sizeof(*pool));
vk_object_base_init(&device->vk, &pool->base, VK_OBJECT_TYPE_DESCRIPTOR_POOL);
if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT)) {
pool->host_memory_base = (uint8_t *)pool + sizeof(struct radv_descriptor_pool) + sets_size;
pool->host_memory_ptr = pool->host_memory_base;
pool->host_memory_end = (uint8_t *)pool + size;
}
if (bo_size) {
if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_EXT)) {
enum radeon_bo_flag flags = RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT;
if (radv_device_should_clear_vram(device))
flags |= RADEON_FLAG_ZERO_VRAM;
VkResult result = radv_bo_create(device, &pool->base, bo_size, 32, RADEON_DOMAIN_VRAM, flags,
RADV_BO_PRIORITY_DESCRIPTOR, 0, false, &pool->bo);
if (result != VK_SUCCESS) {
radv_destroy_descriptor_pool(device, pAllocator, pool);
return vk_error(device, result);
}
pool->mapped_ptr = (uint8_t *)radv_buffer_map(device->ws, pool->bo);
if (!pool->mapped_ptr) {
radv_destroy_descriptor_pool(device, pAllocator, pool);
return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
}
} else {
pool->host_bo = vk_alloc2(&device->vk.alloc, pAllocator, bo_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!pool->host_bo) {
radv_destroy_descriptor_pool(device, pAllocator, pool);
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
}
pool->mapped_ptr = pool->host_bo;
}
}
pool->size = bo_size;
pool->max_entry_count = pCreateInfo->maxSets;
*pDescriptorPool = radv_descriptor_pool_to_handle(pool);
radv_rmv_log_descriptor_pool_create(device, pCreateInfo, *pDescriptorPool);
return VK_SUCCESS;
}
VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateDescriptorPool(VkDevice _device, const VkDescriptorPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool)
{
VK_FROM_HANDLE(radv_device, device, _device);
return radv_create_descriptor_pool(device, pCreateInfo, pAllocator, pDescriptorPool);
}
VKAPI_ATTR void VKAPI_CALL
radv_DestroyDescriptorPool(VkDevice _device, VkDescriptorPool _pool, const VkAllocationCallbacks *pAllocator)
{
VK_FROM_HANDLE(radv_device, device, _device);
VK_FROM_HANDLE(radv_descriptor_pool, pool, _pool);
if (!pool)
return;
radv_destroy_descriptor_pool(device, pAllocator, pool);
}
VKAPI_ATTR VkResult VKAPI_CALL
radv_ResetDescriptorPool(VkDevice _device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags)
{
VK_FROM_HANDLE(radv_device, device, _device);
VK_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
if (!pool->host_memory_base) {
for (uint32_t i = 0; i < pool->entry_count; ++i) {
radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
}
} else {
for (uint32_t i = 0; i < pool->entry_count; ++i) {
vk_descriptor_set_layout_unref(&device->vk, &pool->sets[i]->header.layout->vk);
vk_object_base_finish(&pool->sets[i]->header.base);
}
}
pool->entry_count = 0;
pool->current_offset = 0;
pool->host_memory_ptr = pool->host_memory_base;
return VK_SUCCESS;
}

View file

@ -0,0 +1,45 @@
/*
* Copyright © 2016 Bas Nieuwenhuizen
*
* SPDX-License-Identifier: MIT
*/
#ifndef RADV_DESCRIPTOR_POOL_H
#define RADV_DESCRIPTOR_POOL_H
#include "vk_object.h"
#include <vulkan/vulkan.h>
struct radv_descriptor_set;
struct radv_descriptor_pool_entry {
uint32_t offset;
uint32_t size;
struct radv_descriptor_set *set;
};
struct radv_descriptor_pool {
struct vk_object_base base;
struct radeon_winsys_bo *bo;
uint8_t *host_bo;
uint8_t *mapped_ptr;
uint64_t current_offset;
uint64_t size;
uint8_t *host_memory_base;
uint8_t *host_memory_ptr;
uint8_t *host_memory_end;
uint32_t entry_count;
uint32_t max_entry_count;
union {
struct radv_descriptor_set *sets[0];
struct radv_descriptor_pool_entry entries[0];
};
};
VK_DEFINE_NONDISP_HANDLE_CASTS(radv_descriptor_pool, base, VkDescriptorPool, VK_OBJECT_TYPE_DESCRIPTOR_POOL)
#endif /* RADV_DESCRIPTOR_POOL_H */

View file

@ -13,6 +13,7 @@
#include "radv_buffer.h"
#include "radv_buffer_view.h"
#include "radv_cmd_buffer.h"
#include "radv_descriptor_pool.h"
#include "radv_descriptor_set.h"
#include "radv_entrypoints.h"
#include "radv_image.h"
@ -30,7 +31,7 @@
static_assert(RADV_SAMPLER_DESC_SIZE == 16 && RADV_BUFFER_DESC_SIZE == 16 && RADV_ACCEL_STRUCT_DESC_SIZE == 16,
"Sampler/buffer/acceleration structure descriptor sizes must match.");
static unsigned
unsigned
radv_descriptor_type_buffer_count(VkDescriptorType type)
{
switch (type) {
@ -72,7 +73,7 @@ radv_descriptor_alignment(VkDescriptorType type)
}
}
static bool
bool
radv_mutable_descriptor_type_size_alignment(const struct radv_device *device,
const VkMutableDescriptorTypeListEXT *list, uint64_t *out_size,
uint64_t *out_align)
@ -588,7 +589,7 @@ radv_descriptor_set_create(struct radv_device *device, struct radv_descriptor_po
return VK_SUCCESS;
}
static void
void
radv_descriptor_set_destroy(struct radv_device *device, struct radv_descriptor_pool *pool,
struct radv_descriptor_set *set, bool free_bo)
{
@ -609,232 +610,6 @@ radv_descriptor_set_destroy(struct radv_device *device, struct radv_descriptor_p
vk_free2(&device->vk.alloc, NULL, set);
}
static void
radv_destroy_descriptor_pool(struct radv_device *device, const VkAllocationCallbacks *pAllocator,
struct radv_descriptor_pool *pool)
{
if (!pool->host_memory_base) {
for (uint32_t i = 0; i < pool->entry_count; ++i) {
radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
}
} else {
for (uint32_t i = 0; i < pool->entry_count; ++i) {
vk_descriptor_set_layout_unref(&device->vk, &pool->sets[i]->header.layout->vk);
vk_object_base_finish(&pool->sets[i]->header.base);
}
}
if (pool->bo)
radv_bo_destroy(device, &pool->base, pool->bo);
if (pool->host_bo)
vk_free2(&device->vk.alloc, pAllocator, pool->host_bo);
radv_rmv_log_resource_destroy(device, (uint64_t)radv_descriptor_pool_to_handle(pool));
vk_object_base_finish(&pool->base);
vk_free2(&device->vk.alloc, pAllocator, pool);
}
static VkResult
radv_create_descriptor_pool(struct radv_device *device, const VkDescriptorPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool)
{
const struct radv_physical_device *pdev = radv_device_physical(device);
struct radv_descriptor_pool *pool;
uint64_t size = sizeof(struct radv_descriptor_pool);
uint64_t bo_size = 0, bo_count = 0, range_count = 0;
const VkMutableDescriptorTypeCreateInfoEXT *mutable_info =
vk_find_struct_const(pCreateInfo->pNext, MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_EXT);
vk_foreach_struct_const (ext, pCreateInfo->pNext) {
switch (ext->sType) {
case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO: {
const VkDescriptorPoolInlineUniformBlockCreateInfo *info =
(const VkDescriptorPoolInlineUniformBlockCreateInfo *)ext;
/* the sizes are 4 aligned, and we need to align to at
* most 32, which needs at most 28 bytes extra per
* binding. */
bo_size += 28llu * info->maxInlineUniformBlockBindings;
break;
}
default:
break;
}
}
uint64_t num_16byte_descriptors = 0;
for (unsigned i = 0; i < pCreateInfo->poolSizeCount; ++i) {
bo_count += radv_descriptor_type_buffer_count(pCreateInfo->pPoolSizes[i].type) *
pCreateInfo->pPoolSizes[i].descriptorCount;
switch (pCreateInfo->pPoolSizes[i].type) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
range_count += pCreateInfo->pPoolSizes[i].descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_SAMPLER:
case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR:
bo_size += RADV_BUFFER_DESC_SIZE * pCreateInfo->pPoolSizes[i].descriptorCount;
num_16byte_descriptors += pCreateInfo->pPoolSizes[i].descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
bo_size += RADV_STORAGE_IMAGE_DESC_SIZE * pCreateInfo->pPoolSizes[i].descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
bo_size += radv_get_sampled_image_desc_size(pdev) * pCreateInfo->pPoolSizes[i].descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_MUTABLE_EXT:
/* Per spec, if a mutable descriptor type list is provided for the pool entry, we
* allocate enough memory to hold any subset of that list.
* If there is no mutable descriptor type list available,
* we must allocate enough for any supported mutable descriptor type, i.e. 64 bytes if
* FMASK is used.
*/
if (mutable_info && i < mutable_info->mutableDescriptorTypeListCount) {
uint64_t mutable_size, mutable_alignment;
if (radv_mutable_descriptor_type_size_alignment(device, &mutable_info->pMutableDescriptorTypeLists[i],
&mutable_size, &mutable_alignment)) {
/* 32 as we may need to align for images */
mutable_size = align(mutable_size, 32);
bo_size += mutable_size * pCreateInfo->pPoolSizes[i].descriptorCount;
if (mutable_size < 32)
num_16byte_descriptors += pCreateInfo->pPoolSizes[i].descriptorCount;
}
} else {
const uint32_t max_desc_size = pdev->use_fmask ? 64 : 32;
bo_size += max_desc_size * pCreateInfo->pPoolSizes[i].descriptorCount;
}
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
bo_size += RADV_COMBINED_IMAGE_SAMPLER_DESC_SIZE * pCreateInfo->pPoolSizes[i].descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK:
bo_size += pCreateInfo->pPoolSizes[i].descriptorCount;
break;
default:
break;
}
}
if (num_16byte_descriptors) {
/* Reserve space to align before image descriptors. Our layout code ensures at most one gap
* per set. */
bo_size += 16 * MIN2(num_16byte_descriptors, pCreateInfo->maxSets);
}
uint64_t sets_size = 0;
if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT)) {
size += pCreateInfo->maxSets * sizeof(struct radv_descriptor_set);
size += sizeof(struct radeon_winsys_bo *) * bo_count;
size += sizeof(struct radv_descriptor_range) * range_count;
sets_size = sizeof(struct radv_descriptor_set *) * pCreateInfo->maxSets;
size += sets_size;
} else {
size += sizeof(struct radv_descriptor_pool_entry) * pCreateInfo->maxSets;
}
pool = vk_alloc2(&device->vk.alloc, pAllocator, size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!pool)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
memset(pool, 0, sizeof(*pool));
vk_object_base_init(&device->vk, &pool->base, VK_OBJECT_TYPE_DESCRIPTOR_POOL);
if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT)) {
pool->host_memory_base = (uint8_t *)pool + sizeof(struct radv_descriptor_pool) + sets_size;
pool->host_memory_ptr = pool->host_memory_base;
pool->host_memory_end = (uint8_t *)pool + size;
}
if (bo_size) {
if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_EXT)) {
enum radeon_bo_flag flags = RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT;
if (radv_device_should_clear_vram(device))
flags |= RADEON_FLAG_ZERO_VRAM;
VkResult result = radv_bo_create(device, &pool->base, bo_size, 32, RADEON_DOMAIN_VRAM, flags,
RADV_BO_PRIORITY_DESCRIPTOR, 0, false, &pool->bo);
if (result != VK_SUCCESS) {
radv_destroy_descriptor_pool(device, pAllocator, pool);
return vk_error(device, result);
}
pool->mapped_ptr = (uint8_t *)radv_buffer_map(device->ws, pool->bo);
if (!pool->mapped_ptr) {
radv_destroy_descriptor_pool(device, pAllocator, pool);
return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
}
} else {
pool->host_bo = vk_alloc2(&device->vk.alloc, pAllocator, bo_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!pool->host_bo) {
radv_destroy_descriptor_pool(device, pAllocator, pool);
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
}
pool->mapped_ptr = pool->host_bo;
}
}
pool->size = bo_size;
pool->max_entry_count = pCreateInfo->maxSets;
*pDescriptorPool = radv_descriptor_pool_to_handle(pool);
radv_rmv_log_descriptor_pool_create(device, pCreateInfo, *pDescriptorPool);
return VK_SUCCESS;
}
VKAPI_ATTR VkResult VKAPI_CALL
radv_CreateDescriptorPool(VkDevice _device, const VkDescriptorPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool)
{
VK_FROM_HANDLE(radv_device, device, _device);
return radv_create_descriptor_pool(device, pCreateInfo, pAllocator, pDescriptorPool);
}
VKAPI_ATTR void VKAPI_CALL
radv_DestroyDescriptorPool(VkDevice _device, VkDescriptorPool _pool, const VkAllocationCallbacks *pAllocator)
{
VK_FROM_HANDLE(radv_device, device, _device);
VK_FROM_HANDLE(radv_descriptor_pool, pool, _pool);
if (!pool)
return;
radv_destroy_descriptor_pool(device, pAllocator, pool);
}
VKAPI_ATTR VkResult VKAPI_CALL
radv_ResetDescriptorPool(VkDevice _device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags)
{
VK_FROM_HANDLE(radv_device, device, _device);
VK_FROM_HANDLE(radv_descriptor_pool, pool, descriptorPool);
if (!pool->host_memory_base) {
for (uint32_t i = 0; i < pool->entry_count; ++i) {
radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
}
} else {
for (uint32_t i = 0; i < pool->entry_count; ++i) {
vk_descriptor_set_layout_unref(&device->vk, &pool->sets[i]->header.layout->vk);
vk_object_base_finish(&pool->sets[i]->header.base);
}
}
pool->entry_count = 0;
pool->current_offset = 0;
pool->host_memory_ptr = pool->host_memory_base;
return VK_SUCCESS;
}
VKAPI_ATTR VkResult VKAPI_CALL
radv_AllocateDescriptorSets(VkDevice _device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
VkDescriptorSet *pDescriptorSets)

View file

@ -107,35 +107,6 @@ struct radv_push_descriptor_set {
uint32_t capacity;
};
struct radv_descriptor_pool_entry {
uint32_t offset;
uint32_t size;
struct radv_descriptor_set *set;
};
struct radv_descriptor_pool {
struct vk_object_base base;
struct radeon_winsys_bo *bo;
uint8_t *host_bo;
uint8_t *mapped_ptr;
uint64_t current_offset;
uint64_t size;
uint8_t *host_memory_base;
uint8_t *host_memory_ptr;
uint8_t *host_memory_end;
uint32_t entry_count;
uint32_t max_entry_count;
union {
struct radv_descriptor_set *sets[0];
struct radv_descriptor_pool_entry entries[0];
};
};
VK_DEFINE_NONDISP_HANDLE_CASTS(radv_descriptor_pool, base, VkDescriptorPool, VK_OBJECT_TYPE_DESCRIPTOR_POOL)
struct radv_descriptor_update_template_entry {
VkDescriptorType descriptor_type;
@ -205,4 +176,15 @@ void radv_cmd_update_descriptor_set_with_template(struct radv_device *device, st
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData);
struct radv_descriptor_pool;
void radv_descriptor_set_destroy(struct radv_device *device, struct radv_descriptor_pool *pool,
struct radv_descriptor_set *set, bool free_bo);
unsigned radv_descriptor_type_buffer_count(VkDescriptorType type);
bool radv_mutable_descriptor_type_size_alignment(const struct radv_device *device,
const VkMutableDescriptorTypeListEXT *list, uint64_t *out_size,
uint64_t *out_align);
#endif /* RADV_DESCRIPTOR_SET_H */

View file

@ -14,6 +14,7 @@
#include <stdlib.h>
#include "ac_gpu_info.h"
#include "radv_buffer.h"
#include "radv_descriptor_pool.h"
#include "radv_descriptor_set.h"
#include "radv_device_memory.h"
#include "radv_event.h"