mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-23 22:00:13 +01:00
zink: rework descriptor pool overflow
previously this would just destroy and recreate pools, but it's much smarter to instead store the pools and just reuse them when needed Reviewed-by: Dave Airlie <airlied@redhat.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/18065>
This commit is contained in:
parent
fb9f110113
commit
cf7c17a7af
2 changed files with 139 additions and 74 deletions
|
|
@ -578,6 +578,14 @@ pool_destroy(struct zink_screen *screen, struct zink_descriptor_pool *pool)
|
|||
ralloc_free(pool);
|
||||
}
|
||||
|
||||
static void
|
||||
multi_pool_destroy(struct zink_screen *screen, struct zink_descriptor_pool_multi *mpool)
|
||||
{
|
||||
if (mpool->pool)
|
||||
pool_destroy(screen, mpool->pool);
|
||||
ralloc_free(mpool);
|
||||
}
|
||||
|
||||
static VkDescriptorPool
|
||||
create_pool(struct zink_screen *screen, unsigned num_type_sizes, const VkDescriptorPoolSize *sizes, unsigned flags)
|
||||
{
|
||||
|
|
@ -600,37 +608,59 @@ static struct zink_descriptor_pool *
|
|||
get_descriptor_pool(struct zink_context *ctx, struct zink_program *pg, enum zink_descriptor_type type, struct zink_batch_state *bs, bool is_compute);
|
||||
|
||||
static bool
|
||||
set_pool(struct zink_batch_state *bs, struct zink_program *pg, struct zink_descriptor_pool *pool, enum zink_descriptor_type type)
|
||||
set_pool(struct zink_batch_state *bs, struct zink_program *pg, struct zink_descriptor_pool_multi *mpool, enum zink_descriptor_type type)
|
||||
{
|
||||
assert(type != ZINK_DESCRIPTOR_TYPES);
|
||||
assert(mpool);
|
||||
const struct zink_descriptor_pool_key *pool_key = pg->dd.pool_key[type];
|
||||
if (pool) {
|
||||
size_t size = bs->dd.pools[type].capacity;
|
||||
if (!util_dynarray_resize(&bs->dd.pools[type], struct zink_descriptor_pool*, pool_key->id + 1))
|
||||
return false;
|
||||
if (size != bs->dd.pools[type].capacity) {
|
||||
uint8_t *data = bs->dd.pools[type].data;
|
||||
memset(data + size, 0, bs->dd.pools[type].capacity - size);
|
||||
}
|
||||
bs->dd.pool_size[type] = MAX2(bs->dd.pool_size[type], pool_key->id + 1);
|
||||
size_t size = bs->dd.pools[type].capacity;
|
||||
if (!util_dynarray_resize(&bs->dd.pools[type], struct zink_descriptor_pool*, pool_key->id + 1))
|
||||
return false;
|
||||
if (size != bs->dd.pools[type].capacity) {
|
||||
uint8_t *data = bs->dd.pools[type].data;
|
||||
memset(data + size, 0, bs->dd.pools[type].capacity - size);
|
||||
}
|
||||
struct zink_descriptor_pool **ppool = util_dynarray_element(&bs->dd.pools[type], struct zink_descriptor_pool*, pool_key->id);
|
||||
*ppool = pool;
|
||||
bs->dd.pool_size[type] = MAX2(bs->dd.pool_size[type], pool_key->id + 1);
|
||||
struct zink_descriptor_pool_multi **mppool = util_dynarray_element(&bs->dd.pools[type], struct zink_descriptor_pool_multi*, pool_key->id);
|
||||
*mppool = mpool;
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct zink_descriptor_pool *
|
||||
check_pool_alloc(struct zink_context *ctx, struct zink_descriptor_pool *pool, struct zink_program *pg,
|
||||
alloc_new_pool(struct zink_screen *screen, struct zink_descriptor_pool_multi *mpool)
|
||||
{
|
||||
struct zink_descriptor_pool *pool = rzalloc(mpool, struct zink_descriptor_pool);
|
||||
if (!pool)
|
||||
return NULL;
|
||||
const unsigned num_type_sizes = mpool->pool_key->sizes[1].descriptorCount ? 2 : 1;
|
||||
pool->pool = create_pool(screen, num_type_sizes, mpool->pool_key->sizes, 0);
|
||||
if (!pool->pool) {
|
||||
ralloc_free(pool);
|
||||
return NULL;
|
||||
}
|
||||
return pool;
|
||||
}
|
||||
|
||||
static struct zink_descriptor_pool *
|
||||
check_pool_alloc(struct zink_context *ctx, struct zink_descriptor_pool_multi *mpool, struct zink_program *pg,
|
||||
enum zink_descriptor_type type, struct zink_batch_state *bs, bool is_compute)
|
||||
{
|
||||
struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
if (!mpool->pool) {
|
||||
if (util_dynarray_contains(&mpool->overflowed_pools[!mpool->overflow_idx], struct zink_descriptor_pool*))
|
||||
mpool->pool = util_dynarray_pop(&mpool->overflowed_pools[!mpool->overflow_idx], struct zink_descriptor_pool*);
|
||||
else
|
||||
mpool->pool = alloc_new_pool(screen, mpool);
|
||||
}
|
||||
struct zink_descriptor_pool *pool = mpool->pool;
|
||||
/* allocate up to $current * 10, e.g., 10 -> 100 or 100 -> 1000 */
|
||||
if (pool->set_idx == pool->sets_alloc) {
|
||||
unsigned sets_to_alloc = MIN2(MIN2(MAX2(pool->sets_alloc * 10, 10), MAX_LAZY_DESCRIPTORS) - pool->sets_alloc, 100);
|
||||
if (!sets_to_alloc) {
|
||||
/* overflowed pool: queue for deletion on next reset */
|
||||
util_dynarray_append(&bs->dd.overflowed_pools, struct zink_descriptor_pool*, pool);
|
||||
set_pool(bs, pg, NULL, type);
|
||||
/* overflowed pool: store for reuse */
|
||||
pool->set_idx = 0;
|
||||
util_dynarray_append(&mpool->overflowed_pools[mpool->overflow_idx], struct zink_descriptor_pool*, pool);
|
||||
mpool->pool = NULL;
|
||||
return get_descriptor_pool(ctx, pg, type, bs, is_compute);
|
||||
}
|
||||
if (!zink_descriptor_util_alloc_sets(screen, pg->dsl[type + 1],
|
||||
|
|
@ -659,18 +689,25 @@ create_push_pool(struct zink_screen *screen, struct zink_batch_state *bs, bool i
|
|||
}
|
||||
|
||||
static struct zink_descriptor_pool *
|
||||
check_push_pool_alloc(struct zink_context *ctx, struct zink_descriptor_pool *pool, struct zink_batch_state *bs, bool is_compute)
|
||||
check_push_pool_alloc(struct zink_context *ctx, struct zink_descriptor_pool_multi *mpool, struct zink_batch_state *bs, bool is_compute)
|
||||
{
|
||||
struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
struct zink_descriptor_pool *pool = mpool->pool;
|
||||
/* allocate up to $current * 10, e.g., 10 -> 100 or 100 -> 1000 */
|
||||
if (pool->set_idx == pool->sets_alloc || unlikely(ctx->dd.has_fbfetch != bs->dd.has_fbfetch)) {
|
||||
unsigned sets_to_alloc = MIN2(MIN2(MAX2(pool->sets_alloc * 10, 10), MAX_LAZY_DESCRIPTORS) - pool->sets_alloc, 100);
|
||||
if (!sets_to_alloc || unlikely(ctx->dd.has_fbfetch != bs->dd.has_fbfetch)) {
|
||||
/* overflowed pool: queue for deletion on next reset */
|
||||
util_dynarray_append(&bs->dd.overflowed_pools, struct zink_descriptor_pool*, pool);
|
||||
bs->dd.push_pool[is_compute] = create_push_pool(screen, bs, is_compute, ctx->dd.has_fbfetch);
|
||||
/* overflowed pool: store for reuse */
|
||||
pool->set_idx = 0;
|
||||
util_dynarray_append(&mpool->overflowed_pools[mpool->overflow_idx], struct zink_descriptor_pool*, pool);
|
||||
if (util_dynarray_contains(&mpool->overflowed_pools[!mpool->overflow_idx], struct zink_descriptor_pool*))
|
||||
bs->dd.push_pool[is_compute].pool = util_dynarray_pop(&mpool->overflowed_pools[!mpool->overflow_idx], struct zink_descriptor_pool*);
|
||||
else
|
||||
bs->dd.push_pool[is_compute].pool = create_push_pool(screen, bs, is_compute, ctx->dd.has_fbfetch);
|
||||
if (unlikely(ctx->dd.has_fbfetch != bs->dd.has_fbfetch))
|
||||
mpool->reinit_overflow = true;
|
||||
bs->dd.has_fbfetch = ctx->dd.has_fbfetch;
|
||||
return check_push_pool_alloc(ctx, bs->dd.push_pool[is_compute], bs, is_compute);
|
||||
return check_push_pool_alloc(ctx, &bs->dd.push_pool[is_compute], bs, is_compute);
|
||||
}
|
||||
if (!zink_descriptor_util_alloc_sets(screen, ctx->dd.push_dsl[is_compute]->layout,
|
||||
pool->pool, &pool->sets[pool->sets_alloc], sets_to_alloc)) {
|
||||
|
|
@ -687,27 +724,23 @@ get_descriptor_pool(struct zink_context *ctx, struct zink_program *pg, enum zink
|
|||
{
|
||||
struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
const struct zink_descriptor_pool_key *pool_key = pg->dd.pool_key[type];
|
||||
struct zink_descriptor_pool **ppool = bs->dd.pool_size[type] > pool_key->id ?
|
||||
util_dynarray_element(&bs->dd.pools[type], struct zink_descriptor_pool *, pool_key->id) :
|
||||
struct zink_descriptor_pool_multi **mppool = bs->dd.pool_size[type] > pool_key->id ?
|
||||
util_dynarray_element(&bs->dd.pools[type], struct zink_descriptor_pool_multi *, pool_key->id) :
|
||||
NULL;
|
||||
if (ppool && *ppool)
|
||||
return check_pool_alloc(ctx, *ppool, pg, type, bs, is_compute);
|
||||
struct zink_descriptor_pool *pool = rzalloc(bs, struct zink_descriptor_pool);
|
||||
if (!pool)
|
||||
if (mppool && *mppool)
|
||||
return check_pool_alloc(ctx, *mppool, pg, type, bs, is_compute);
|
||||
struct zink_descriptor_pool_multi *mpool = rzalloc(bs, struct zink_descriptor_pool_multi);
|
||||
if (!mpool)
|
||||
return NULL;
|
||||
const unsigned num_type_sizes = pool_key->sizes[1].descriptorCount ? 2 : 1;
|
||||
pool->pool = create_pool(screen, num_type_sizes, pool_key->sizes, 0);
|
||||
pool->pool_key = pool_key;
|
||||
if (!pool->pool) {
|
||||
ralloc_free(pool);
|
||||
return NULL;
|
||||
}
|
||||
if (!set_pool(bs, pg, pool, type)) {
|
||||
pool_destroy(screen, pool);
|
||||
util_dynarray_init(&mpool->overflowed_pools[0], mpool);
|
||||
util_dynarray_init(&mpool->overflowed_pools[1], mpool);
|
||||
mpool->pool_key = pool_key;
|
||||
if (!set_pool(bs, pg, mpool, type)) {
|
||||
multi_pool_destroy(screen, mpool);
|
||||
return NULL;
|
||||
}
|
||||
assert(pool_key->id < bs->dd.pool_size[type]);
|
||||
return check_pool_alloc(ctx, pool, pg, type, bs, is_compute);
|
||||
return check_pool_alloc(ctx, mpool, pg, type, bs, is_compute);
|
||||
}
|
||||
|
||||
ALWAYS_INLINE static VkDescriptorSet
|
||||
|
|
@ -820,7 +853,7 @@ zink_descriptors_update(struct zink_context *ctx, bool is_compute)
|
|||
(ctx->dd.push_state_changed[is_compute] || batch_changed);
|
||||
VkDescriptorSet push_set = VK_NULL_HANDLE;
|
||||
if (need_push && !have_KHR_push_descriptor) {
|
||||
struct zink_descriptor_pool *pool = check_push_pool_alloc(ctx, bs->dd.push_pool[pg->is_compute], bs, pg->is_compute);
|
||||
struct zink_descriptor_pool *pool = check_push_pool_alloc(ctx, &bs->dd.push_pool[pg->is_compute], bs, pg->is_compute);
|
||||
push_set = get_descriptor_set(pool);
|
||||
if (!push_set)
|
||||
mesa_loge("ZINK: failed to get push descriptor set! prepare to crash!");
|
||||
|
|
@ -874,49 +907,74 @@ zink_context_invalidate_descriptor_state(struct zink_context *ctx, gl_shader_sta
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
clear_multi_pool_overflow(struct zink_screen *screen, struct util_dynarray *overflowed_pools)
|
||||
{
|
||||
while (util_dynarray_num_elements(overflowed_pools, struct zink_descriptor_pool*)) {
|
||||
struct zink_descriptor_pool *pool = util_dynarray_pop(overflowed_pools, struct zink_descriptor_pool*);
|
||||
pool_destroy(screen, pool);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
deinit_multi_pool_overflow(struct zink_screen *screen, struct zink_descriptor_pool_multi *mpool)
|
||||
{
|
||||
for (unsigned i = 0; i < 2; i++) {
|
||||
clear_multi_pool_overflow(screen, &mpool->overflowed_pools[i]);
|
||||
util_dynarray_fini(&mpool->overflowed_pools[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
zink_batch_descriptor_deinit(struct zink_screen *screen, struct zink_batch_state *bs)
|
||||
{
|
||||
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
|
||||
while (util_dynarray_contains(&bs->dd.pools[i], struct zink_descriptor_pool *)) {
|
||||
struct zink_descriptor_pool *pool = util_dynarray_pop(&bs->dd.pools[i], struct zink_descriptor_pool *);
|
||||
if (pool)
|
||||
VKSCR(DestroyDescriptorPool)(screen->dev, pool->pool, NULL);
|
||||
while (util_dynarray_contains(&bs->dd.pools[i], struct zink_descriptor_pool_multi *)) {
|
||||
struct zink_descriptor_pool_multi *mpool = util_dynarray_pop(&bs->dd.pools[i], struct zink_descriptor_pool_multi *);
|
||||
if (mpool) {
|
||||
deinit_multi_pool_overflow(screen, mpool);
|
||||
multi_pool_destroy(screen, mpool);
|
||||
}
|
||||
}
|
||||
util_dynarray_fini(&bs->dd.pools[i]);
|
||||
}
|
||||
if (bs->dd.push_pool[0])
|
||||
VKSCR(DestroyDescriptorPool)(screen->dev, bs->dd.push_pool[0]->pool, NULL);
|
||||
if (bs->dd.push_pool[1])
|
||||
VKSCR(DestroyDescriptorPool)(screen->dev, bs->dd.push_pool[1]->pool, NULL);
|
||||
for (unsigned i = 0; i < 2; i++) {
|
||||
if (bs->dd.push_pool[0].pool)
|
||||
pool_destroy(screen, bs->dd.push_pool[i].pool);
|
||||
deinit_multi_pool_overflow(screen, &bs->dd.push_pool[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
zink_batch_descriptor_reset(struct zink_screen *screen, struct zink_batch_state *bs)
|
||||
{
|
||||
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
|
||||
struct zink_descriptor_pool **pools = bs->dd.pools[i].data;
|
||||
unsigned count = util_dynarray_num_elements(&bs->dd.pools[i], struct zink_descriptor_pool *);
|
||||
struct zink_descriptor_pool_multi **mpools = bs->dd.pools[i].data;
|
||||
unsigned count = util_dynarray_num_elements(&bs->dd.pools[i], struct zink_descriptor_pool_multi *);
|
||||
for (unsigned j = 0; j < count; j++) {
|
||||
struct zink_descriptor_pool *pool = pools[j];
|
||||
if (!pool)
|
||||
struct zink_descriptor_pool_multi *mpool = mpools[j];
|
||||
if (!mpool)
|
||||
continue;
|
||||
if (pool->pool_key->use_count)
|
||||
pool->set_idx = 0;
|
||||
if (mpool->pool->set_idx)
|
||||
mpool->overflow_idx = !mpool->overflow_idx;
|
||||
if (mpool->pool_key->use_count)
|
||||
mpool->pool->set_idx = 0;
|
||||
else {
|
||||
pool_destroy(screen, pool);
|
||||
pools[j] = NULL;
|
||||
multi_pool_destroy(screen, mpool);
|
||||
mpools[j] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
for (unsigned i = 0; i < 2; i++) {
|
||||
bs->dd.pg[i] = NULL;
|
||||
if (bs->dd.push_pool[i])
|
||||
bs->dd.push_pool[i]->set_idx = 0;
|
||||
}
|
||||
while (util_dynarray_num_elements(&bs->dd.overflowed_pools, struct zink_descriptor_pool*)) {
|
||||
struct zink_descriptor_pool *pool = util_dynarray_pop(&bs->dd.overflowed_pools, struct zink_descriptor_pool*);
|
||||
pool_destroy(screen, pool);
|
||||
if (bs->dd.push_pool[i].reinit_overflow) {
|
||||
/* these don't match current fbfetch usage and can never be used again */
|
||||
clear_multi_pool_overflow(screen, &bs->dd.push_pool[i].overflowed_pools[bs->dd.push_pool[i].overflow_idx]);
|
||||
} else if (bs->dd.push_pool[i].pool && bs->dd.push_pool[i].pool->set_idx) {
|
||||
bs->dd.push_pool[i].overflow_idx = !bs->dd.push_pool[i].overflow_idx;
|
||||
}
|
||||
if (bs->dd.push_pool[i].pool)
|
||||
bs->dd.push_pool[i].pool->set_idx = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -925,10 +983,12 @@ zink_batch_descriptor_init(struct zink_screen *screen, struct zink_batch_state *
|
|||
{
|
||||
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++)
|
||||
util_dynarray_init(&bs->dd.pools[i], bs);
|
||||
util_dynarray_init(&bs->dd.overflowed_pools, bs);
|
||||
if (!screen->info.have_KHR_push_descriptor) {
|
||||
bs->dd.push_pool[0] = create_push_pool(screen, bs, false, false);
|
||||
bs->dd.push_pool[1] = create_push_pool(screen, bs, true, false);
|
||||
for (unsigned i = 0; i < 2; i++) {
|
||||
bs->dd.push_pool[i].pool = create_push_pool(screen, bs, i, false);
|
||||
util_dynarray_init(&bs->dd.push_pool[i].overflowed_pools[0], bs);
|
||||
util_dynarray_init(&bs->dd.push_pool[i].overflowed_pools[1], bs);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -340,12 +340,26 @@ struct zink_program_descriptor_data {
|
|||
VkDescriptorUpdateTemplate templates[ZINK_DESCRIPTOR_TYPES + 1];
|
||||
};
|
||||
|
||||
struct zink_descriptor_pool {
|
||||
unsigned set_idx;
|
||||
unsigned sets_alloc;
|
||||
VkDescriptorPool pool;
|
||||
VkDescriptorSet sets[MAX_LAZY_DESCRIPTORS];
|
||||
};
|
||||
|
||||
struct zink_descriptor_pool_multi {
|
||||
bool reinit_overflow;
|
||||
unsigned overflow_idx;
|
||||
struct util_dynarray overflowed_pools[2];
|
||||
struct zink_descriptor_pool *pool;
|
||||
const struct zink_descriptor_pool_key *pool_key;
|
||||
};
|
||||
|
||||
struct zink_batch_descriptor_data {
|
||||
bool has_fbfetch;
|
||||
struct util_dynarray overflowed_pools;
|
||||
unsigned pool_size[ZINK_DESCRIPTOR_TYPES];
|
||||
struct util_dynarray pools[ZINK_DESCRIPTOR_TYPES];
|
||||
struct zink_descriptor_pool *push_pool[2];
|
||||
struct zink_descriptor_pool_multi push_pool[2];
|
||||
struct zink_program *pg[2]; //gfx, compute
|
||||
uint32_t compat_id[2];
|
||||
VkDescriptorSetLayout dsl[2][ZINK_DESCRIPTOR_TYPES];
|
||||
|
|
@ -353,15 +367,6 @@ struct zink_batch_descriptor_data {
|
|||
unsigned push_usage[2];
|
||||
};
|
||||
|
||||
struct zink_descriptor_pool {
|
||||
unsigned set_idx;
|
||||
unsigned sets_alloc;
|
||||
VkDescriptorPool pool;
|
||||
VkDescriptorSet sets[MAX_LAZY_DESCRIPTORS];
|
||||
const struct zink_descriptor_pool_key *pool_key;
|
||||
};
|
||||
|
||||
|
||||
/** batch types */
|
||||
/* zink_batch_usage concepts:
|
||||
* - batch "usage" is an indicator of when and how a BO was accessed
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue