mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-25 00:00:11 +01:00
Track bitset when create descriptor pool
Signed-off-by: Dawn Han <dawnhan@google.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17505>
This commit is contained in:
parent
d5f116feb5
commit
de5879447b
2 changed files with 157 additions and 25 deletions
|
|
@ -304,17 +304,32 @@ vn_CreateDescriptorPool(VkDevice device,
|
|||
vk_find_struct_const(pCreateInfo->pNext,
|
||||
DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO);
|
||||
|
||||
struct vn_descriptor_pool *pool =
|
||||
vk_zalloc(alloc, sizeof(*pool), VN_DEFAULT_ALIGN,
|
||||
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
|
||||
uint32_t mutable_states_count = 0;
|
||||
for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; i++) {
|
||||
const VkDescriptorPoolSize *pool_size = &pCreateInfo->pPoolSizes[i];
|
||||
if (pool_size->type == VK_DESCRIPTOR_TYPE_MUTABLE_VALVE)
|
||||
mutable_states_count++;
|
||||
}
|
||||
struct vn_descriptor_pool *pool;
|
||||
struct vn_descriptor_pool_state_mutable *mutable_states;
|
||||
|
||||
if (!pool)
|
||||
VK_MULTIALLOC(ma);
|
||||
vk_multialloc_add(&ma, &pool, __typeof__(*pool), 1);
|
||||
vk_multialloc_add(&ma, &mutable_states, __typeof__(*mutable_states),
|
||||
mutable_states_count);
|
||||
|
||||
if (!vk_multialloc_zalloc(&ma, alloc, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT))
|
||||
return vn_error(dev->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
|
||||
vn_object_base_init(&pool->base, VK_OBJECT_TYPE_DESCRIPTOR_POOL,
|
||||
&dev->base);
|
||||
|
||||
pool->allocator = *alloc;
|
||||
pool->mutable_states = mutable_states;
|
||||
|
||||
const VkMutableDescriptorTypeCreateInfoVALVE *mutable_descriptor_info =
|
||||
vk_find_struct_const(pCreateInfo->pNext,
|
||||
MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_VALVE);
|
||||
|
||||
/* Without VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, the set
|
||||
* allocation must not fail due to a fragmented pool per spec. In this
|
||||
|
|
@ -330,19 +345,56 @@ vn_CreateDescriptorPool(VkDevice device,
|
|||
if (iub_info)
|
||||
pool->max.iub_binding_count = iub_info->maxInlineUniformBlockBindings;
|
||||
|
||||
uint32_t next_mutable_state = 0;
|
||||
for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; i++) {
|
||||
const VkDescriptorPoolSize *pool_size = &pCreateInfo->pPoolSizes[i];
|
||||
const uint32_t type_index = vn_descriptor_type_index(pool_size->type);
|
||||
|
||||
assert(type_index < VN_NUM_DESCRIPTOR_TYPES);
|
||||
|
||||
pool->max.descriptor_counts[type_index] += pool_size->descriptorCount;
|
||||
if (type_index == VN_DESCRIPTOR_TYPE_MUTABLE_VALVE) {
|
||||
struct vn_descriptor_pool_state_mutable *mutable_state = NULL;
|
||||
BITSET_DECLARE(mutable_types, VN_NUM_DESCRIPTOR_TYPES);
|
||||
if (!mutable_descriptor_info ||
|
||||
i >= mutable_descriptor_info->mutableDescriptorTypeListCount) {
|
||||
BITSET_ONES(mutable_types);
|
||||
} else {
|
||||
const VkMutableDescriptorTypeListVALVE *list =
|
||||
&mutable_descriptor_info->pMutableDescriptorTypeLists[i];
|
||||
|
||||
for (uint32_t j = 0; j < list->descriptorTypeCount; j++) {
|
||||
BITSET_SET(mutable_types, vn_descriptor_type_index(
|
||||
list->pDescriptorTypes[j]));
|
||||
}
|
||||
}
|
||||
for (uint32_t j = 0; j < next_mutable_state; j++) {
|
||||
if (BITSET_EQUAL(mutable_types, pool->mutable_states[j].types)) {
|
||||
mutable_state = &pool->mutable_states[j];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!mutable_state) {
|
||||
/* The application must ensure that partial overlap does not
|
||||
* exist in pPoolSizes. so this entry must have a disjoint set
|
||||
* of types.
|
||||
*/
|
||||
mutable_state = &pool->mutable_states[next_mutable_state++];
|
||||
BITSET_COPY(mutable_state->types, mutable_types);
|
||||
}
|
||||
|
||||
mutable_state->max += pool_size->descriptorCount;
|
||||
} else {
|
||||
pool->max.descriptor_counts[type_index] +=
|
||||
pool_size->descriptorCount;
|
||||
}
|
||||
|
||||
assert((pCreateInfo->pPoolSizes[i].type !=
|
||||
VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK) ||
|
||||
iub_info);
|
||||
}
|
||||
|
||||
pool->mutable_states_count = next_mutable_state;
|
||||
list_inithead(&pool->descriptor_sets);
|
||||
|
||||
VkDescriptorPool pool_handle = vn_descriptor_pool_to_handle(pool);
|
||||
|
|
@ -385,6 +437,50 @@ vn_DestroyDescriptorPool(VkDevice device,
|
|||
vk_free(alloc, pool);
|
||||
}
|
||||
|
||||
static struct vn_descriptor_pool_state_mutable *
|
||||
vn_find_mutable_state_for_binding(
|
||||
const struct vn_descriptor_pool *pool,
|
||||
const struct vn_descriptor_set_layout_binding *binding)
|
||||
{
|
||||
for (uint32_t i = 0; i < pool->mutable_states_count; i++) {
|
||||
struct vn_descriptor_pool_state_mutable *mutable_state =
|
||||
&pool->mutable_states[i];
|
||||
BITSET_DECLARE(shared_types, VN_NUM_DESCRIPTOR_TYPES);
|
||||
BITSET_AND(shared_types, mutable_state->types,
|
||||
binding->mutable_descriptor_types);
|
||||
|
||||
if (BITSET_EQUAL(shared_types, binding->mutable_descriptor_types)) {
|
||||
/* The application must ensure that partial overlap does not
|
||||
* exist in pPoolSizes. so there should be at most 1
|
||||
* matching pool entry for a binding.
|
||||
*/
|
||||
return mutable_state;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
vn_pool_restore_mutable_states(struct vn_descriptor_pool *pool,
|
||||
const struct vn_descriptor_set_layout *layout,
|
||||
uint32_t max_binding_index,
|
||||
uint32_t last_binding_descriptor_count)
|
||||
{
|
||||
for (uint32_t i = 0; i <= max_binding_index; i++) {
|
||||
if (layout->bindings[i].type != VK_DESCRIPTOR_TYPE_MUTABLE_VALVE)
|
||||
continue;
|
||||
|
||||
const uint32_t count = i == layout->last_binding
|
||||
? last_binding_descriptor_count
|
||||
: layout->bindings[i].count;
|
||||
|
||||
struct vn_descriptor_pool_state_mutable *mutable_state =
|
||||
vn_find_mutable_state_for_binding(pool, &layout->bindings[i]);
|
||||
assert(mutable_state && mutable_state->used >= count);
|
||||
mutable_state->used -= count;
|
||||
}
|
||||
}
|
||||
|
||||
static bool
|
||||
vn_descriptor_pool_alloc_descriptors(
|
||||
struct vn_descriptor_pool *pool,
|
||||
|
|
@ -404,11 +500,13 @@ vn_descriptor_pool_alloc_descriptors(
|
|||
|
||||
++pool->used.set_count;
|
||||
|
||||
for (uint32_t i = 0; i <= layout->last_binding; i++) {
|
||||
const VkDescriptorType type = layout->bindings[i].type;
|
||||
const uint32_t count = i == layout->last_binding
|
||||
uint32_t binding_index = 0;
|
||||
|
||||
while (binding_index <= layout->last_binding) {
|
||||
const VkDescriptorType type = layout->bindings[binding_index].type;
|
||||
const uint32_t count = binding_index == layout->last_binding
|
||||
? last_binding_descriptor_count
|
||||
: layout->bindings[i].count;
|
||||
: layout->bindings[binding_index].count;
|
||||
|
||||
/* Allocation may fail if a call to vkAllocateDescriptorSets would cause
|
||||
* the total number of inline uniform block bindings allocated from the
|
||||
|
|
@ -417,25 +515,43 @@ vn_descriptor_pool_alloc_descriptors(
|
|||
* used to create the descriptor pool.
|
||||
*/
|
||||
if (type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK) {
|
||||
if (++pool->used.iub_binding_count > pool->max.iub_binding_count) {
|
||||
/* restore pool state before this allocation */
|
||||
pool->used = recovery;
|
||||
return false;
|
||||
}
|
||||
if (++pool->used.iub_binding_count > pool->max.iub_binding_count)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
const uint32_t type_index = vn_descriptor_type_index(type);
|
||||
pool->used.descriptor_counts[type_index] += count;
|
||||
if (type == VK_DESCRIPTOR_TYPE_MUTABLE_VALVE) {
|
||||
/* A mutable descriptor can be allocated if below are satisfied:
|
||||
* - vn_descriptor_pool_state_mutable::types is a superset
|
||||
* - vn_descriptor_pool_state_mutable::{max - used} is enough
|
||||
*/
|
||||
struct vn_descriptor_pool_state_mutable *mutable_state =
|
||||
vn_find_mutable_state_for_binding(
|
||||
pool, &layout->bindings[binding_index]);
|
||||
|
||||
if (pool->used.descriptor_counts[type_index] >
|
||||
pool->max.descriptor_counts[type_index]) {
|
||||
/* restore pool state before this allocation */
|
||||
pool->used = recovery;
|
||||
return false;
|
||||
if (mutable_state &&
|
||||
mutable_state->max - mutable_state->used >= count) {
|
||||
mutable_state->used += count;
|
||||
} else
|
||||
goto fail;
|
||||
} else {
|
||||
const uint32_t type_index = vn_descriptor_type_index(type);
|
||||
pool->used.descriptor_counts[type_index] += count;
|
||||
|
||||
if (pool->used.descriptor_counts[type_index] >
|
||||
pool->max.descriptor_counts[type_index])
|
||||
goto fail;
|
||||
}
|
||||
binding_index++;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
fail:
|
||||
/* restore pool state before this allocation */
|
||||
pool->used = recovery;
|
||||
vn_pool_restore_mutable_states(pool, layout, binding_index - 1,
|
||||
last_binding_descriptor_count);
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -447,16 +563,22 @@ vn_descriptor_pool_free_descriptors(
|
|||
if (!pool->async_set_allocation)
|
||||
return;
|
||||
|
||||
vn_pool_restore_mutable_states(pool, layout, layout->last_binding,
|
||||
last_binding_descriptor_count);
|
||||
|
||||
for (uint32_t i = 0; i <= layout->last_binding; i++) {
|
||||
const uint32_t count = i == layout->last_binding
|
||||
? last_binding_descriptor_count
|
||||
: layout->bindings[i].count;
|
||||
|
||||
pool->used.descriptor_counts[vn_descriptor_type_index(
|
||||
layout->bindings[i].type)] -= count;
|
||||
if (layout->bindings[i].type != VK_DESCRIPTOR_TYPE_MUTABLE_VALVE) {
|
||||
pool->used.descriptor_counts[vn_descriptor_type_index(
|
||||
layout->bindings[i].type)] -= count;
|
||||
|
||||
if (layout->bindings[i].type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK)
|
||||
--pool->used.iub_binding_count;
|
||||
if (layout->bindings[i].type ==
|
||||
VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK)
|
||||
--pool->used.iub_binding_count;
|
||||
}
|
||||
}
|
||||
|
||||
--pool->used.set_count;
|
||||
|
|
@ -768,6 +890,7 @@ vn_update_descriptor_sets_parse_writes(uint32_t write_count,
|
|||
write->pTexelBufferView = NULL;
|
||||
break;
|
||||
case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK:
|
||||
case VK_DESCRIPTOR_TYPE_MUTABLE_VALVE:
|
||||
default:
|
||||
write->pImageInfo = NULL;
|
||||
write->pBufferInfo = NULL;
|
||||
|
|
|
|||
|
|
@ -66,6 +66,12 @@ struct vn_descriptor_pool_state {
|
|||
uint32_t descriptor_counts[VN_NUM_DESCRIPTOR_TYPES];
|
||||
};
|
||||
|
||||
struct vn_descriptor_pool_state_mutable {
|
||||
uint32_t max;
|
||||
uint32_t used;
|
||||
BITSET_DECLARE(types, VN_NUM_DESCRIPTOR_TYPES);
|
||||
};
|
||||
|
||||
struct vn_descriptor_pool {
|
||||
struct vn_object_base base;
|
||||
|
||||
|
|
@ -75,6 +81,9 @@ struct vn_descriptor_pool {
|
|||
struct vn_descriptor_pool_state used;
|
||||
|
||||
struct list_head descriptor_sets;
|
||||
|
||||
uint32_t mutable_states_count;
|
||||
struct vn_descriptor_pool_state_mutable *mutable_states;
|
||||
};
|
||||
VK_DEFINE_NONDISP_HANDLE_CASTS(vn_descriptor_pool,
|
||||
base.base,
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue