mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-24 08:50:13 +01:00
zink: match lazy descriptor set layout in cache mode
now cached descriptors also use a push set for ubo0, also reusing the ZINK_DESCRIPTOR_TYPES for this set for enum purposes Acked-by: Hoe Hao Cheng <haochengho12907@gmail.com> Acked-by: Dave Airlie <airlied@redhat.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/11188>
This commit is contained in:
parent
ab7ea7acfa
commit
d2f04604d6
4 changed files with 378 additions and 244 deletions
|
|
@ -894,19 +894,13 @@ zink_shader_create(struct zink_screen *screen, struct nir_shader *nir,
|
|||
const struct glsl_type *type = glsl_without_array(var->type);
|
||||
if (var->data.mode == nir_var_mem_ubo) {
|
||||
ztype = ZINK_DESCRIPTOR_TYPE_UBO;
|
||||
if (screen->lazy_descriptors) {
|
||||
/* buffer 0 is a push descriptor */
|
||||
var->data.descriptor_set = !!var->data.driver_location;
|
||||
var->data.binding = !var->data.driver_location ? nir->info.stage :
|
||||
zink_binding(nir->info.stage,
|
||||
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
|
||||
var->data.driver_location);
|
||||
} else {
|
||||
var->data.descriptor_set = ztype;
|
||||
var->data.binding = zink_binding(nir->info.stage,
|
||||
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
|
||||
var->data.driver_location);
|
||||
}
|
||||
/* buffer 0 is a push descriptor */
|
||||
var->data.descriptor_set = !!var->data.driver_location;
|
||||
var->data.binding = !var->data.driver_location ? nir->info.stage :
|
||||
zink_binding(nir->info.stage,
|
||||
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
|
||||
var->data.driver_location);
|
||||
assert(var->data.driver_location || var->data.binding < 10);
|
||||
VkDescriptorType vktype = !var->data.driver_location ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC : VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
||||
int binding = var->data.binding;
|
||||
|
||||
|
|
@ -918,9 +912,7 @@ zink_shader_create(struct zink_screen *screen, struct nir_shader *nir,
|
|||
ret->num_bindings[ztype]++;
|
||||
} else if (var->data.mode == nir_var_mem_ssbo) {
|
||||
ztype = ZINK_DESCRIPTOR_TYPE_SSBO;
|
||||
var->data.descriptor_set = ztype;
|
||||
if (screen->lazy_descriptors)
|
||||
var->data.descriptor_set++;
|
||||
var->data.descriptor_set = ztype + 1;
|
||||
var->data.binding = zink_binding(nir->info.stage,
|
||||
VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
|
||||
var->data.driver_location);
|
||||
|
|
@ -938,9 +930,7 @@ zink_shader_create(struct zink_screen *screen, struct nir_shader *nir,
|
|||
ret->num_texel_buffers++;
|
||||
ztype = zink_desc_type_from_vktype(vktype);
|
||||
var->data.driver_location = var->data.binding;
|
||||
var->data.descriptor_set = ztype;
|
||||
if (screen->lazy_descriptors)
|
||||
var->data.descriptor_set++;
|
||||
var->data.descriptor_set = ztype + 1;
|
||||
var->data.binding = zink_binding(nir->info.stage, vktype, var->data.driver_location);
|
||||
ret->bindings[ztype][ret->num_bindings[ztype]].index = var->data.driver_location;
|
||||
ret->bindings[ztype][ret->num_bindings[ztype]].binding = var->data.binding;
|
||||
|
|
|
|||
|
|
@ -80,11 +80,25 @@ struct zink_descriptor_data {
|
|||
struct zink_descriptor_state gfx_descriptor_states[ZINK_SHADER_COUNT]; // keep incremental hashes here
|
||||
struct zink_descriptor_state descriptor_states[2]; // gfx, compute
|
||||
struct hash_table *descriptor_pools[ZINK_DESCRIPTOR_TYPES];
|
||||
|
||||
struct zink_descriptor_pool *push_pool[2]; //gfx, compute
|
||||
VkDescriptorSetLayout push_dsl[2]; //gfx, compute
|
||||
uint8_t last_push_usage[2];
|
||||
bool push_valid[2];
|
||||
uint32_t push_state[2];
|
||||
bool gfx_push_valid[ZINK_SHADER_COUNT];
|
||||
uint32_t gfx_push_state[ZINK_SHADER_COUNT];
|
||||
struct zink_descriptor_set *last_set[2];
|
||||
|
||||
struct zink_descriptor_pool *dummy_pool;
|
||||
VkDescriptorSetLayout dummy_dsl;
|
||||
VkDescriptorSet dummy_set;
|
||||
};
|
||||
|
||||
struct zink_program_descriptor_data {
|
||||
struct zink_descriptor_pool *pool[ZINK_DESCRIPTOR_TYPES];
|
||||
struct zink_descriptor_set *last_set[ZINK_DESCRIPTOR_TYPES];
|
||||
uint8_t push_usage;
|
||||
};
|
||||
|
||||
struct zink_batch_descriptor_data {
|
||||
|
|
@ -351,19 +365,18 @@ zink_descriptor_util_layout_get(struct zink_context *ctx, enum zink_descriptor_t
|
|||
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT | VK_SHADER_STAGE_COMPUTE_BIT;
|
||||
key.bindings = &null_binding;
|
||||
}
|
||||
if (type == ZINK_DESCRIPTOR_TYPES) {//push layout
|
||||
return descriptor_layout_create(screen, type, key.bindings, MAX2(num_bindings, 1));
|
||||
}
|
||||
|
||||
hash = hash_descriptor_layout(&key);
|
||||
struct hash_entry *he = _mesa_hash_table_search_pre_hashed(&ctx->desc_set_layouts[type], hash, &key);
|
||||
if (he) {
|
||||
*layout_key = (void*)he->key;
|
||||
if (type != ZINK_DESCRIPTOR_TYPES) {
|
||||
hash = hash_descriptor_layout(&key);
|
||||
struct hash_entry *he = _mesa_hash_table_search_pre_hashed(&ctx->desc_set_layouts[type], hash, &key);
|
||||
if (he) {
|
||||
*layout_key = (void*)he->key;
|
||||
#if VK_USE_64_BIT_PTR_DEFINES == 1
|
||||
return (VkDescriptorSetLayout)he->data;
|
||||
return (VkDescriptorSetLayout)he->data;
|
||||
#else
|
||||
return *((VkDescriptorSetLayout*)he->data);
|
||||
return *((VkDescriptorSetLayout*)he->data);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
VkDescriptorSetLayout dsl = descriptor_layout_create(screen, type, key.bindings, MAX2(num_bindings, 1));
|
||||
|
|
@ -380,15 +393,18 @@ zink_descriptor_util_layout_get(struct zink_context *ctx, enum zink_descriptor_t
|
|||
return VK_NULL_HANDLE;
|
||||
}
|
||||
memcpy(k->bindings, key.bindings, bindings_size);
|
||||
|
||||
if (type != ZINK_DESCRIPTOR_TYPES) {
|
||||
#if VK_USE_64_BIT_PTR_DEFINES == 1
|
||||
_mesa_hash_table_insert_pre_hashed(&ctx->desc_set_layouts[type], hash, k, dsl);
|
||||
_mesa_hash_table_insert_pre_hashed(&ctx->desc_set_layouts[type], hash, k, dsl);
|
||||
#else
|
||||
{
|
||||
VkDescriptorSetLayout *dsl_p = ralloc(NULL, VkDescriptorSetLayout);
|
||||
*dsl_p = dsl;
|
||||
_mesa_hash_table_insert_pre_hashed(&ctx->desc_set_layouts[type], hash, k, dsl_p);
|
||||
}
|
||||
{
|
||||
VkDescriptorSetLayout *dsl_p = ralloc(NULL, VkDescriptorSetLayout);
|
||||
*dsl_p = dsl;
|
||||
_mesa_hash_table_insert_pre_hashed(&ctx->desc_set_layouts[type], hash, k, dsl_p);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
*layout_key = k;
|
||||
return dsl;
|
||||
}
|
||||
|
|
@ -396,21 +412,43 @@ zink_descriptor_util_layout_get(struct zink_context *ctx, enum zink_descriptor_t
|
|||
bool
|
||||
zink_descriptor_util_push_layouts_get(struct zink_context *ctx, VkDescriptorSetLayout *dsls, struct zink_descriptor_layout_key **layout_keys)
|
||||
{
|
||||
struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
VkDescriptorSetLayoutBinding bindings[PIPE_SHADER_TYPES];
|
||||
for (unsigned i = 0; i < PIPE_SHADER_TYPES; i++) {
|
||||
bindings[i].binding = tgsi_processor_to_shader_stage(i);
|
||||
bindings[i].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
||||
bindings[i].descriptorType = screen->lazy_descriptors ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
|
||||
bindings[i].descriptorCount = 1;
|
||||
bindings[i].stageFlags = zink_shader_stage(i);
|
||||
bindings[i].pImmutableSamplers = NULL;
|
||||
}
|
||||
struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
enum zink_descriptor_type dsl_type = screen->lazy_descriptors ? ZINK_DESCRIPTOR_TYPES : ZINK_DESCRIPTOR_TYPE_UBO;
|
||||
dsls[0] = zink_descriptor_util_layout_get(ctx, dsl_type, bindings, ZINK_SHADER_COUNT, &layout_keys[0]);
|
||||
dsls[1] = zink_descriptor_util_layout_get(ctx, dsl_type, &bindings[PIPE_SHADER_COMPUTE], 1, &layout_keys[1]);
|
||||
return dsls[0] && dsls[1];
|
||||
}
|
||||
|
||||
void
|
||||
zink_descriptor_util_init_null_set(struct zink_context *ctx, VkDescriptorSet desc_set)
|
||||
{
|
||||
struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
VkDescriptorBufferInfo push_info;
|
||||
VkWriteDescriptorSet push_wd;
|
||||
push_wd.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
|
||||
push_wd.pNext = NULL;
|
||||
push_wd.dstBinding = 0;
|
||||
push_wd.dstArrayElement = 0;
|
||||
push_wd.descriptorCount = 1;
|
||||
push_wd.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
||||
push_wd.dstSet = desc_set;
|
||||
push_wd.pBufferInfo = &push_info;
|
||||
push_info.buffer = screen->info.rb2_feats.nullDescriptor ?
|
||||
VK_NULL_HANDLE :
|
||||
zink_resource(ctx->dummy_vertex_buffer)->obj->buffer;
|
||||
push_info.offset = 0;
|
||||
push_info.range = VK_WHOLE_SIZE;
|
||||
vkUpdateDescriptorSets(screen->dev, 1, &push_wd, 0, NULL);
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
hash_descriptor_pool(const void *key)
|
||||
{
|
||||
|
|
@ -438,18 +476,21 @@ descriptor_pool_get(struct zink_context *ctx, enum zink_descriptor_type type,
|
|||
struct zink_descriptor_layout_key *layout_key, VkDescriptorPoolSize *sizes, unsigned num_type_sizes)
|
||||
{
|
||||
uint32_t hash = 0;
|
||||
struct zink_descriptor_pool_key key = {
|
||||
.layout = layout_key,
|
||||
.num_type_sizes = num_type_sizes,
|
||||
.sizes = sizes,
|
||||
};
|
||||
if (type != ZINK_DESCRIPTOR_TYPES) {
|
||||
struct zink_descriptor_pool_key key = {
|
||||
.layout = layout_key,
|
||||
.num_type_sizes = num_type_sizes,
|
||||
.sizes = sizes,
|
||||
};
|
||||
|
||||
hash = hash_descriptor_pool(&key);
|
||||
struct hash_entry *he = _mesa_hash_table_search_pre_hashed(ctx->dd->descriptor_pools[type], hash, &key);
|
||||
if (he)
|
||||
return (void*)he->data;
|
||||
hash = hash_descriptor_pool(&key);
|
||||
struct hash_entry *he = _mesa_hash_table_search_pre_hashed(ctx->dd->descriptor_pools[type], hash, &key);
|
||||
if (he)
|
||||
return (void*)he->data;
|
||||
}
|
||||
struct zink_descriptor_pool *pool = descriptor_pool_create(zink_screen(ctx->base.screen), type, layout_key, sizes, num_type_sizes);
|
||||
_mesa_hash_table_insert_pre_hashed(ctx->dd->descriptor_pools[type], hash, &pool->key, pool);
|
||||
if (type != ZINK_DESCRIPTOR_TYPES)
|
||||
_mesa_hash_table_insert_pre_hashed(ctx->dd->descriptor_pools[type], hash, &pool->key, pool);
|
||||
return pool;
|
||||
}
|
||||
|
||||
|
|
@ -483,9 +524,11 @@ zink_descriptor_util_alloc_sets(struct zink_screen *screen, VkDescriptorSetLayou
|
|||
}
|
||||
|
||||
static struct zink_descriptor_set *
|
||||
allocate_desc_set(struct zink_screen *screen, struct zink_program *pg, enum zink_descriptor_type type, unsigned descs_used, bool is_compute)
|
||||
allocate_desc_set(struct zink_context *ctx, struct zink_program *pg, enum zink_descriptor_type type, unsigned descs_used, bool is_compute)
|
||||
{
|
||||
struct zink_descriptor_pool *pool = pg->dd->pool[type];
|
||||
struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
bool push_set = type == ZINK_DESCRIPTOR_TYPES;
|
||||
struct zink_descriptor_pool *pool = push_set ? ctx->dd->push_pool[is_compute] : pg->dd->pool[type];
|
||||
#define DESC_BUCKET_FACTOR 10
|
||||
unsigned bucket_size = pool->key.layout->num_descriptors ? DESC_BUCKET_FACTOR : 1;
|
||||
if (pool->key.layout->num_descriptors) {
|
||||
|
|
@ -493,7 +536,7 @@ allocate_desc_set(struct zink_screen *screen, struct zink_program *pg, enum zink
|
|||
bucket_size = desc_factor;
|
||||
}
|
||||
VkDescriptorSet desc_set[bucket_size];
|
||||
if (!zink_descriptor_util_alloc_sets(screen, pg->dsl[type], pool->descpool, desc_set, bucket_size))
|
||||
if (!zink_descriptor_util_alloc_sets(screen, push_set ? ctx->dd->push_dsl[is_compute] : pg->dsl[type + 1], pool->descpool, desc_set, bucket_size))
|
||||
return VK_NULL_HANDLE;
|
||||
|
||||
struct zink_descriptor_set *alloc = ralloc_array(pool, struct zink_descriptor_set, bucket_size);
|
||||
|
|
@ -532,12 +575,25 @@ allocate_desc_set(struct zink_screen *screen, struct zink_program *pg, enum zink
|
|||
|
||||
static void
|
||||
populate_zds_key(struct zink_context *ctx, enum zink_descriptor_type type, bool is_compute,
|
||||
struct zink_descriptor_state_key *key) {
|
||||
struct zink_descriptor_state_key *key, uint32_t push_usage)
|
||||
{
|
||||
if (is_compute) {
|
||||
for (unsigned i = 1; i < ZINK_SHADER_COUNT; i++)
|
||||
key->exists[i] = false;
|
||||
key->exists[0] = true;
|
||||
key->state[0] = ctx->dd->descriptor_states[is_compute].state[type];
|
||||
if (type == ZINK_DESCRIPTOR_TYPES)
|
||||
key->state[0] = ctx->dd->push_state[is_compute];
|
||||
else
|
||||
key->state[0] = ctx->dd->descriptor_states[is_compute].state[type];
|
||||
} else if (type == ZINK_DESCRIPTOR_TYPES) {
|
||||
/* gfx only */
|
||||
for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
|
||||
if (push_usage & BITFIELD_BIT(i)) {
|
||||
key->exists[i] = true;
|
||||
key->state[i] = ctx->dd->gfx_push_state[i];
|
||||
} else
|
||||
key->exists[i] = false;
|
||||
}
|
||||
} else {
|
||||
for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
|
||||
key->exists[i] = ctx->dd->gfx_descriptor_states[i].valid[type];
|
||||
|
|
@ -565,122 +621,111 @@ zink_descriptor_set_get(struct zink_context *ctx,
|
|||
{
|
||||
*cache_hit = false;
|
||||
struct zink_descriptor_set *zds;
|
||||
struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
|
||||
struct zink_batch *batch = &ctx->batch;
|
||||
struct zink_descriptor_pool *pool = pg->dd->pool[type];
|
||||
bool push_set = type == ZINK_DESCRIPTOR_TYPES;
|
||||
struct zink_descriptor_pool *pool = push_set ? ctx->dd->push_pool[is_compute] : pg->dd->pool[type];
|
||||
unsigned descs_used = 1;
|
||||
assert(type < ZINK_DESCRIPTOR_TYPES);
|
||||
uint32_t hash = pool->key.layout->num_descriptors ? ctx->dd->descriptor_states[is_compute].state[type] : 0;
|
||||
assert(type <= ZINK_DESCRIPTOR_TYPES);
|
||||
|
||||
assert(pool->key.layout->num_descriptors);
|
||||
uint32_t hash = push_set ? ctx->dd->push_state[is_compute] :
|
||||
ctx->dd->descriptor_states[is_compute].state[type];
|
||||
|
||||
struct zink_descriptor_state_key key;
|
||||
populate_zds_key(ctx, type, is_compute, &key);
|
||||
populate_zds_key(ctx, type, is_compute, &key, pg->dd->push_usage);
|
||||
|
||||
simple_mtx_lock(&pool->mtx);
|
||||
if (pg->dd->last_set[type] && pg->dd->last_set[type]->hash == hash &&
|
||||
desc_state_equal(&pg->dd->last_set[type]->key, &key)) {
|
||||
zds = pg->dd->last_set[type];
|
||||
struct zink_descriptor_set *last_set = push_set ? ctx->dd->last_set[is_compute] : pg->dd->last_set[type];
|
||||
if (last_set && last_set->hash == hash && desc_state_equal(&last_set->key, &key)) {
|
||||
zds = last_set;
|
||||
*cache_hit = !zds->invalid;
|
||||
if (pool->key.layout->num_descriptors) {
|
||||
if (zds->recycled) {
|
||||
struct hash_entry *he = _mesa_hash_table_search_pre_hashed(pool->free_desc_sets, hash, &key);
|
||||
if (he)
|
||||
_mesa_hash_table_remove(pool->free_desc_sets, he);
|
||||
zds->recycled = false;
|
||||
}
|
||||
if (zds->invalid) {
|
||||
if (zink_batch_usage_exists(&zds->batch_uses))
|
||||
punt_invalid_set(zds, NULL);
|
||||
else
|
||||
/* this set is guaranteed to be in pool->alloc_desc_sets */
|
||||
goto skip_hash_tables;
|
||||
zds = NULL;
|
||||
}
|
||||
if (zds->recycled) {
|
||||
struct hash_entry *he = _mesa_hash_table_search_pre_hashed(pool->free_desc_sets, hash, &key);
|
||||
if (he)
|
||||
_mesa_hash_table_remove(pool->free_desc_sets, he);
|
||||
zds->recycled = false;
|
||||
}
|
||||
if (zds->invalid) {
|
||||
if (zink_batch_usage_exists(&zds->batch_uses))
|
||||
punt_invalid_set(zds, NULL);
|
||||
else
|
||||
/* this set is guaranteed to be in pool->alloc_desc_sets */
|
||||
goto skip_hash_tables;
|
||||
zds = NULL;
|
||||
}
|
||||
if (zds)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (pool->key.layout->num_descriptors) {
|
||||
struct hash_entry *he = _mesa_hash_table_search_pre_hashed(pool->desc_sets, hash, &key);
|
||||
bool recycled = false, punted = false;
|
||||
if (he) {
|
||||
zds = (void*)he->data;
|
||||
if (zds->invalid && zink_batch_usage_exists(&zds->batch_uses)) {
|
||||
punt_invalid_set(zds, he);
|
||||
zds = NULL;
|
||||
punted = true;
|
||||
}
|
||||
struct hash_entry *he = _mesa_hash_table_search_pre_hashed(pool->desc_sets, hash, &key);
|
||||
bool recycled = false, punted = false;
|
||||
if (he) {
|
||||
zds = (void*)he->data;
|
||||
if (zds->invalid && zink_batch_usage_exists(&zds->batch_uses)) {
|
||||
punt_invalid_set(zds, he);
|
||||
zds = NULL;
|
||||
punted = true;
|
||||
}
|
||||
}
|
||||
if (!he) {
|
||||
he = _mesa_hash_table_search_pre_hashed(pool->free_desc_sets, hash, &key);
|
||||
recycled = true;
|
||||
}
|
||||
if (he && !punted) {
|
||||
zds = (void*)he->data;
|
||||
*cache_hit = !zds->invalid;
|
||||
if (recycled) {
|
||||
/* need to migrate this entry back to the in-use hash */
|
||||
_mesa_hash_table_remove(pool->free_desc_sets, he);
|
||||
goto out;
|
||||
}
|
||||
if (!he) {
|
||||
he = _mesa_hash_table_search_pre_hashed(pool->free_desc_sets, hash, &key);
|
||||
recycled = true;
|
||||
}
|
||||
if (he && !punted) {
|
||||
zds = (void*)he->data;
|
||||
*cache_hit = !zds->invalid;
|
||||
if (recycled) {
|
||||
/* need to migrate this entry back to the in-use hash */
|
||||
goto quick_out;
|
||||
}
|
||||
skip_hash_tables:
|
||||
if (util_dynarray_num_elements(&pool->alloc_desc_sets, struct zink_descriptor_set *)) {
|
||||
/* grab one off the allocated array */
|
||||
zds = util_dynarray_pop(&pool->alloc_desc_sets, struct zink_descriptor_set *);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (_mesa_hash_table_num_entries(pool->free_desc_sets)) {
|
||||
/* try for an invalidated set first */
|
||||
unsigned count = 0;
|
||||
hash_table_foreach(pool->free_desc_sets, he) {
|
||||
struct zink_descriptor_set *tmp = he->data;
|
||||
if ((count++ >= 100 && tmp->reference.count == 1) || get_invalidated_desc_set(he->data)) {
|
||||
zds = tmp;
|
||||
assert(p_atomic_read(&zds->reference.count) == 1);
|
||||
descriptor_set_invalidate(zds);
|
||||
_mesa_hash_table_remove(pool->free_desc_sets, he);
|
||||
goto out;
|
||||
}
|
||||
goto quick_out;
|
||||
}
|
||||
skip_hash_tables:
|
||||
if (util_dynarray_num_elements(&pool->alloc_desc_sets, struct zink_descriptor_set *)) {
|
||||
/* grab one off the allocated array */
|
||||
zds = util_dynarray_pop(&pool->alloc_desc_sets, struct zink_descriptor_set *);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (_mesa_hash_table_num_entries(pool->free_desc_sets)) {
|
||||
/* try for an invalidated set first */
|
||||
unsigned count = 0;
|
||||
hash_table_foreach(pool->free_desc_sets, he) {
|
||||
struct zink_descriptor_set *tmp = he->data;
|
||||
if ((count++ >= 100 && tmp->reference.count == 1) || get_invalidated_desc_set(he->data)) {
|
||||
zds = tmp;
|
||||
assert(p_atomic_read(&zds->reference.count) == 1);
|
||||
descriptor_set_invalidate(zds);
|
||||
_mesa_hash_table_remove(pool->free_desc_sets, he);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (pool->num_sets_allocated + pool->key.layout->num_descriptors > ZINK_DEFAULT_MAX_DESCS) {
|
||||
simple_mtx_unlock(&pool->mtx);
|
||||
zink_fence_wait(&ctx->base);
|
||||
zink_batch_reference_program(batch, pg);
|
||||
return zink_descriptor_set_get(ctx, type, is_compute, cache_hit);
|
||||
}
|
||||
} else {
|
||||
if (pg->dd->last_set[type] && !pg->dd->last_set[type]->hash) {
|
||||
zds = pg->dd->last_set[type];
|
||||
*cache_hit = true;
|
||||
goto quick_out;
|
||||
}
|
||||
}
|
||||
|
||||
zds = allocate_desc_set(screen, pg, type, descs_used, is_compute);
|
||||
if (pool->num_sets_allocated + pool->key.layout->num_descriptors > ZINK_DEFAULT_MAX_DESCS) {
|
||||
simple_mtx_unlock(&pool->mtx);
|
||||
zink_fence_wait(&ctx->base);
|
||||
zink_batch_reference_program(batch, pg);
|
||||
return zink_descriptor_set_get(ctx, type, is_compute, cache_hit);
|
||||
}
|
||||
|
||||
zds = allocate_desc_set(ctx, pg, type, descs_used, is_compute);
|
||||
out:
|
||||
zds->hash = hash;
|
||||
populate_zds_key(ctx, type, is_compute, &zds->key);
|
||||
populate_zds_key(ctx, type, is_compute, &zds->key, pg->dd->push_usage);
|
||||
zds->recycled = false;
|
||||
if (pool->key.layout->num_descriptors)
|
||||
_mesa_hash_table_insert_pre_hashed(pool->desc_sets, hash, &zds->key, zds);
|
||||
else {
|
||||
/* we can safely apply the null set to all the slots which will need it here */
|
||||
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
|
||||
if (pg->dd->pool[i] && !pg->dd->pool[i]->key.layout->num_descriptors)
|
||||
pg->dd->last_set[i] = zds;
|
||||
}
|
||||
}
|
||||
_mesa_hash_table_insert_pre_hashed(pool->desc_sets, hash, &zds->key, zds);
|
||||
quick_out:
|
||||
zds->punted = zds->invalid = false;
|
||||
if (batch_add_desc_set(batch, zds)) {
|
||||
batch->state->descs_used += pool->key.layout->num_descriptors;
|
||||
}
|
||||
pg->dd->last_set[type] = zds;
|
||||
if (push_set)
|
||||
ctx->dd->last_set[is_compute] = zds;
|
||||
else
|
||||
pg->dd->last_set[type] = zds;
|
||||
simple_mtx_unlock(&pool->mtx);
|
||||
|
||||
return zds;
|
||||
|
|
@ -788,6 +833,7 @@ zink_descriptor_program_init(struct zink_context *ctx, struct zink_program *pg)
|
|||
{
|
||||
VkDescriptorSetLayoutBinding bindings[ZINK_DESCRIPTOR_TYPES][PIPE_SHADER_TYPES * 32];
|
||||
unsigned num_bindings[ZINK_DESCRIPTOR_TYPES] = {0};
|
||||
uint8_t push_usage = 0;
|
||||
|
||||
VkDescriptorPoolSize sizes[6] = {0};
|
||||
int type_map[12];
|
||||
|
|
@ -804,11 +850,16 @@ zink_descriptor_program_init(struct zink_context *ctx, struct zink_program *pg)
|
|||
struct zink_shader *shader = stages[i];
|
||||
if (!shader)
|
||||
continue;
|
||||
enum pipe_shader_type stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
|
||||
|
||||
VkShaderStageFlagBits stage_flags = zink_shader_stage(pipe_shader_type_from_mesa(shader->nir->info.stage));
|
||||
for (int j = 0; j < ZINK_DESCRIPTOR_TYPES; j++) {
|
||||
for (int k = 0; k < shader->num_bindings[j]; k++) {
|
||||
assert(num_bindings[j] < ARRAY_SIZE(bindings[j]));
|
||||
if (shader->bindings[j][k].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
|
||||
push_usage |= BITFIELD_BIT(stage);
|
||||
continue;
|
||||
}
|
||||
bindings[j][num_bindings[j]].binding = shader->bindings[j][k].binding;
|
||||
bindings[j][num_bindings[j]].descriptorType = shader->bindings[j][k].type;
|
||||
bindings[j][num_bindings[j]].descriptorCount = shader->bindings[j][k].size;
|
||||
|
|
@ -828,16 +879,20 @@ zink_descriptor_program_init(struct zink_context *ctx, struct zink_program *pg)
|
|||
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
|
||||
total_descs += num_bindings[i];
|
||||
}
|
||||
|
||||
if (total_descs || push_usage) {
|
||||
pg->dd = rzalloc(pg, struct zink_program_descriptor_data);
|
||||
if (!pg->dd)
|
||||
return false;
|
||||
|
||||
pg->dd->push_usage = push_usage;
|
||||
pg->dsl[pg->num_dsl++] = push_usage ? ctx->dd->push_dsl[pg->is_compute] : ctx->dd->dummy_dsl;
|
||||
}
|
||||
if (!total_descs) {
|
||||
pg->layout = zink_pipeline_layout_create(zink_screen(ctx->base.screen), pg);
|
||||
return !!pg->layout;
|
||||
}
|
||||
|
||||
if (!pg->dd)
|
||||
pg->dd = rzalloc(pg, struct zink_program_descriptor_data);
|
||||
if (!pg->dd)
|
||||
return false;
|
||||
|
||||
for (int i = 0; i < num_types; i++)
|
||||
sizes[i].descriptorCount *= ZINK_DEFAULT_MAX_DESCS;
|
||||
|
||||
|
|
@ -847,14 +902,8 @@ zink_descriptor_program_init(struct zink_context *ctx, struct zink_program *pg)
|
|||
if (!num_bindings[i]) {
|
||||
if (!found_descriptors)
|
||||
continue;
|
||||
pg->dsl[i] = zink_descriptor_util_layout_get(ctx, i, NULL, 0, &layout_key[i]);
|
||||
if (!pg->dsl[i])
|
||||
return false;
|
||||
VkDescriptorPoolSize null_size = {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, ZINK_DEFAULT_MAX_DESCS};
|
||||
struct zink_descriptor_pool *pool = descriptor_pool_get(ctx, i, layout_key[i], &null_size, 1);
|
||||
if (!pool)
|
||||
return false;
|
||||
zink_descriptor_pool_reference(zink_screen(ctx->base.screen), &pg->dd->pool[i], pool);
|
||||
pg->dsl[i + 1] = ctx->dd->dummy_dsl;
|
||||
/* pool is null here for detection during update */
|
||||
pg->num_dsl++;
|
||||
continue;
|
||||
}
|
||||
|
|
@ -868,10 +917,6 @@ zink_descriptor_program_init(struct zink_context *ctx, struct zink_program *pg)
|
|||
type_sizes[num_type_sizes] = sizes[type_map[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER]];
|
||||
num_type_sizes++;
|
||||
}
|
||||
if (type_map[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] != -1) {
|
||||
type_sizes[num_type_sizes] = sizes[type_map[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC]];
|
||||
num_type_sizes++;
|
||||
}
|
||||
break;
|
||||
case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
|
||||
if (type_map[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER] != -1) {
|
||||
|
|
@ -900,8 +945,8 @@ zink_descriptor_program_init(struct zink_context *ctx, struct zink_program *pg)
|
|||
}
|
||||
break;
|
||||
}
|
||||
pg->dsl[i] = zink_descriptor_util_layout_get(ctx, i, bindings[i], num_bindings[i], &layout_key[i]);
|
||||
if (!pg->dsl[i])
|
||||
pg->dsl[i + 1] = zink_descriptor_util_layout_get(ctx, i, bindings[i], num_bindings[i], &layout_key[i]);
|
||||
if (!pg->dsl[i + 1])
|
||||
return false;
|
||||
struct zink_descriptor_pool *pool = descriptor_pool_get(ctx, i, layout_key[i], type_sizes, num_type_sizes);
|
||||
if (!pool)
|
||||
|
|
@ -926,13 +971,15 @@ zink_descriptor_program_deinit(struct zink_screen *screen, struct zink_program *
|
|||
static void
|
||||
zink_descriptor_pool_deinit(struct zink_context *ctx)
|
||||
{
|
||||
struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
|
||||
hash_table_foreach(ctx->dd->descriptor_pools[i], entry) {
|
||||
struct zink_descriptor_pool *pool = (void*)entry->data;
|
||||
zink_descriptor_pool_reference(zink_screen(ctx->base.screen), &pool, NULL);
|
||||
zink_descriptor_pool_reference(screen, &pool, NULL);
|
||||
}
|
||||
_mesa_hash_table_destroy(ctx->dd->descriptor_pools[i], NULL);
|
||||
}
|
||||
zink_descriptor_pool_reference(screen, &ctx->dd->dummy_pool, NULL);
|
||||
}
|
||||
|
||||
static bool
|
||||
|
|
@ -943,6 +990,31 @@ zink_descriptor_pool_init(struct zink_context *ctx)
|
|||
if (!ctx->dd->descriptor_pools[i])
|
||||
return false;
|
||||
}
|
||||
struct zink_descriptor_layout_key *layout_keys[2];
|
||||
struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
if (!zink_descriptor_util_push_layouts_get(ctx, ctx->dd->push_dsl, layout_keys))
|
||||
return false;
|
||||
VkDescriptorPoolSize sizes;
|
||||
sizes.type = screen->lazy_descriptors ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
|
||||
sizes.descriptorCount = ZINK_SHADER_COUNT * ZINK_DEFAULT_MAX_DESCS;
|
||||
ctx->dd->push_pool[0] = descriptor_pool_get(ctx, 0, layout_keys[0], &sizes, 1);
|
||||
sizes.descriptorCount = ZINK_DEFAULT_MAX_DESCS;
|
||||
ctx->dd->push_pool[1] = descriptor_pool_get(ctx, 0, layout_keys[1], &sizes, 1);
|
||||
if (!ctx->dd->push_pool[0] || !ctx->dd->push_pool[1])
|
||||
return false;
|
||||
|
||||
ctx->dd->dummy_dsl = zink_descriptor_util_layout_get(ctx, 0, NULL, 0, &layout_keys[0]);
|
||||
if (!ctx->dd->dummy_dsl)
|
||||
return false;
|
||||
VkDescriptorPoolSize null_size = {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1};
|
||||
ctx->dd->dummy_pool = descriptor_pool_create(screen, 0, layout_keys[0], &null_size, 1);
|
||||
if (!ctx->dd->dummy_pool)
|
||||
return false;
|
||||
zink_descriptor_util_alloc_sets(screen, ctx->dd->dummy_dsl,
|
||||
ctx->dd->dummy_pool->descpool, &ctx->dd->dummy_set, 1);
|
||||
if (!ctx->dd->dummy_set)
|
||||
return false;
|
||||
zink_descriptor_util_init_null_set(ctx, ctx->dd->dummy_set);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -1020,18 +1092,79 @@ init_write_descriptor(struct zink_shader *shader, struct zink_descriptor_set *zd
|
|||
{
|
||||
wd->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
|
||||
wd->pNext = NULL;
|
||||
wd->dstBinding = shader->bindings[type][idx].binding;
|
||||
wd->dstBinding = shader ? shader->bindings[type][idx].binding : idx;
|
||||
wd->dstArrayElement = 0;
|
||||
wd->descriptorCount = shader->bindings[type][idx].size;
|
||||
wd->descriptorType = shader->bindings[type][idx].type;
|
||||
wd->descriptorCount = shader ? shader->bindings[type][idx].size : 1;
|
||||
wd->descriptorType = shader ? shader->bindings[type][idx].type : VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
|
||||
wd->dstSet = zds->desc_set;
|
||||
return num_wds + 1;
|
||||
}
|
||||
|
||||
static unsigned
|
||||
update_push_ubo_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
|
||||
bool is_compute, bool cache_hit, uint32_t *dynamic_offsets)
|
||||
{
|
||||
struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
VkWriteDescriptorSet wds[ZINK_SHADER_COUNT];
|
||||
VkDescriptorBufferInfo buffer_infos[ZINK_SHADER_COUNT];
|
||||
unsigned num_buffer_info = 0;
|
||||
struct zink_shader **stages;
|
||||
struct {
|
||||
uint32_t binding;
|
||||
uint32_t offset;
|
||||
} dynamic_buffers[ZINK_SHADER_COUNT];
|
||||
|
||||
unsigned num_stages = is_compute ? 1 : ZINK_SHADER_COUNT;
|
||||
if (is_compute)
|
||||
stages = &ctx->curr_compute->shader;
|
||||
else
|
||||
stages = &ctx->gfx_stages[0];
|
||||
|
||||
VkDescriptorBufferInfo null_info;
|
||||
null_info.buffer = screen->info.rb2_feats.nullDescriptor ?
|
||||
VK_NULL_HANDLE : zink_resource(ctx->dummy_vertex_buffer)->obj->buffer;
|
||||
null_info.offset = 0;
|
||||
null_info.range = VK_WHOLE_SIZE;
|
||||
|
||||
for (int i = 0; i < num_stages; i++) {
|
||||
struct zink_shader *shader = stages[i];
|
||||
enum pipe_shader_type pstage = shader ? pipe_shader_type_from_mesa(shader->nir->info.stage) : i;
|
||||
struct zink_resource *res = zink_get_resource_for_descriptor(ctx, ZINK_DESCRIPTOR_TYPE_UBO, pstage, 0);
|
||||
|
||||
dynamic_buffers[i].binding = tgsi_processor_to_shader_stage(pstage);
|
||||
dynamic_buffers[i].offset = ctx->ubos[pstage][0].buffer_offset;
|
||||
if (cache_hit)
|
||||
continue;
|
||||
init_write_descriptor(NULL, zds, ZINK_DESCRIPTOR_TYPE_UBO, tgsi_processor_to_shader_stage(pstage), &wds[i], 0);
|
||||
desc_set_res_add(zds, res, i, cache_hit);
|
||||
if (shader && res) {
|
||||
buffer_infos[num_buffer_info].buffer = res->obj->buffer;
|
||||
buffer_infos[num_buffer_info].offset = 0;
|
||||
buffer_infos[num_buffer_info].range = ctx->ubos[pstage][0].buffer_size;
|
||||
wds[i].pBufferInfo = &buffer_infos[num_buffer_info++];
|
||||
} else {
|
||||
/* dump in a null buffer */
|
||||
wds[i].pBufferInfo = &null_info;
|
||||
}
|
||||
}
|
||||
/* Values are taken from pDynamicOffsets in an order such that all entries for set N come before set N+1;
|
||||
* within a set, entries are ordered by the binding numbers in the descriptor set layouts
|
||||
* - vkCmdBindDescriptorSets spec
|
||||
*
|
||||
* because of this, we have to sort all the dynamic offsets by their associated binding to ensure they
|
||||
* match what the driver expects
|
||||
*/
|
||||
qsort(dynamic_buffers, num_stages, sizeof(uint32_t) * 2, cmp_dynamic_offset_binding);
|
||||
for (int i = 0; i < num_stages; i++)
|
||||
dynamic_offsets[i] = dynamic_buffers[i].offset;
|
||||
|
||||
write_descriptors(ctx, num_stages, wds, cache_hit);
|
||||
return num_stages;
|
||||
}
|
||||
|
||||
static void
|
||||
update_ubo_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
|
||||
bool is_compute, bool cache_hit,
|
||||
uint32_t *dynamic_offsets, unsigned *dynamic_offset_idx)
|
||||
bool is_compute, bool cache_hit)
|
||||
{
|
||||
struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
|
||||
struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
|
|
@ -1043,11 +1176,6 @@ update_ubo_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds
|
|||
unsigned num_buffer_info = 0;
|
||||
unsigned num_resources = 0;
|
||||
struct zink_shader **stages;
|
||||
struct {
|
||||
uint32_t binding;
|
||||
uint32_t offset;
|
||||
} dynamic_buffers[PIPE_MAX_CONSTANT_BUFFERS];
|
||||
unsigned dynamic_offset_count = 0;
|
||||
|
||||
unsigned num_stages = is_compute ? 1 : ZINK_SHADER_COUNT;
|
||||
if (is_compute)
|
||||
|
|
@ -1055,7 +1183,7 @@ update_ubo_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds
|
|||
else
|
||||
stages = &ctx->gfx_stages[0];
|
||||
|
||||
for (int i = 0; i < num_stages; i++) {
|
||||
for (int i = 0; !cache_hit && i < num_stages; i++) {
|
||||
struct zink_shader *shader = stages[i];
|
||||
if (!shader)
|
||||
continue;
|
||||
|
|
@ -1063,8 +1191,10 @@ update_ubo_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds
|
|||
|
||||
for (int j = 0; j < shader->num_bindings[ZINK_DESCRIPTOR_TYPE_UBO]; j++) {
|
||||
int index = shader->bindings[ZINK_DESCRIPTOR_TYPE_UBO][j].index;
|
||||
assert(shader->bindings[ZINK_DESCRIPTOR_TYPE_UBO][j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
|
||||
shader->bindings[ZINK_DESCRIPTOR_TYPE_UBO][j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
|
||||
/* skip push descriptors for general ubo set */
|
||||
if (!index)
|
||||
continue;
|
||||
assert(shader->bindings[ZINK_DESCRIPTOR_TYPE_UBO][j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
|
||||
assert(ctx->ubos[stage][index].buffer_size <= screen->info.props.limits.maxUniformBufferRange);
|
||||
struct zink_resource *res = zink_resource(ctx->ubos[stage][index].buffer);
|
||||
assert(!res || ctx->ubos[stage][index].buffer_size > 0);
|
||||
|
|
@ -1076,13 +1206,7 @@ update_ubo_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds
|
|||
(screen->info.rb2_feats.nullDescriptor ?
|
||||
VK_NULL_HANDLE :
|
||||
zink_resource(ctx->dummy_vertex_buffer)->obj->buffer);
|
||||
if (shader->bindings[ZINK_DESCRIPTOR_TYPE_UBO][j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
|
||||
buffer_infos[num_buffer_info].offset = 0;
|
||||
/* we're storing this to qsort later */
|
||||
dynamic_buffers[dynamic_offset_count].binding = shader->bindings[ZINK_DESCRIPTOR_TYPE_UBO][j].binding;
|
||||
dynamic_buffers[dynamic_offset_count++].offset = res ? ctx->ubos[stage][index].buffer_offset : 0;
|
||||
} else
|
||||
buffer_infos[num_buffer_info].offset = res ? ctx->ubos[stage][index].buffer_offset : 0;
|
||||
buffer_infos[num_buffer_info].offset = res ? ctx->ubos[stage][index].buffer_offset : 0;
|
||||
buffer_infos[num_buffer_info].range = res ? ctx->ubos[stage][index].buffer_size : VK_WHOLE_SIZE;
|
||||
wds[num_wds].pBufferInfo = buffer_infos + num_buffer_info;
|
||||
++num_buffer_info;
|
||||
|
|
@ -1090,18 +1214,6 @@ update_ubo_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds
|
|||
num_wds = init_write_descriptor(shader, zds, ZINK_DESCRIPTOR_TYPE_UBO, j, &wds[num_wds], num_wds);
|
||||
}
|
||||
}
|
||||
/* Values are taken from pDynamicOffsets in an order such that all entries for set N come before set N+1;
|
||||
* within a set, entries are ordered by the binding numbers in the descriptor set layouts
|
||||
* - vkCmdBindDescriptorSets spec
|
||||
*
|
||||
* because of this, we have to sort all the dynamic offsets by their associated binding to ensure they
|
||||
* match what the driver expects
|
||||
*/
|
||||
if (dynamic_offset_count > 1)
|
||||
qsort(dynamic_buffers, dynamic_offset_count, sizeof(uint32_t) * 2, cmp_dynamic_offset_binding);
|
||||
for (int i = 0; i < dynamic_offset_count; i++)
|
||||
dynamic_offsets[i] = dynamic_buffers[i].offset;
|
||||
*dynamic_offset_idx = dynamic_offset_count;
|
||||
|
||||
write_descriptors(ctx, num_wds, wds, cache_hit);
|
||||
}
|
||||
|
|
@ -1355,21 +1467,37 @@ update_image_descriptors(struct zink_context *ctx, struct zink_descriptor_set *z
|
|||
}
|
||||
|
||||
static void
|
||||
zink_context_update_descriptor_states(struct zink_context *ctx, bool is_compute);
|
||||
zink_context_update_descriptor_states(struct zink_context *ctx, struct zink_program *pg);
|
||||
|
||||
void
|
||||
zink_descriptors_update(struct zink_context *ctx, bool is_compute)
|
||||
{
|
||||
struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
|
||||
|
||||
zink_context_update_descriptor_states(ctx, is_compute);
|
||||
bool cache_hit[ZINK_DESCRIPTOR_TYPES];
|
||||
struct zink_descriptor_set *zds[ZINK_DESCRIPTOR_TYPES];
|
||||
zink_context_update_descriptor_states(ctx, pg);
|
||||
bool cache_hit[ZINK_DESCRIPTOR_TYPES + 1];
|
||||
VkDescriptorSet sets[ZINK_DESCRIPTOR_TYPES + 1];
|
||||
struct zink_descriptor_set *zds[ZINK_DESCRIPTOR_TYPES + 1];
|
||||
/* push set is indexed in vulkan as 0 but isn't in the general pool array */
|
||||
if (pg->dd->push_usage)
|
||||
zds[ZINK_DESCRIPTOR_TYPES] = zink_descriptor_set_get(ctx, ZINK_DESCRIPTOR_TYPES, is_compute, &cache_hit[ZINK_DESCRIPTOR_TYPES]);
|
||||
else {
|
||||
zds[ZINK_DESCRIPTOR_TYPES] = NULL;
|
||||
cache_hit[ZINK_DESCRIPTOR_TYPES] = false;
|
||||
}
|
||||
sets[0] = zds[ZINK_DESCRIPTOR_TYPES] ? zds[ZINK_DESCRIPTOR_TYPES]->desc_set : ctx->dd->dummy_set;
|
||||
for (int h = 0; h < ZINK_DESCRIPTOR_TYPES; h++) {
|
||||
if (pg->dd->pool[h])
|
||||
zds[h] = zink_descriptor_set_get(ctx, h, is_compute, &cache_hit[h]);
|
||||
else
|
||||
if (pg->dsl[h + 1]) {
|
||||
/* null set has null pool */
|
||||
if (pg->dd->pool[h])
|
||||
zds[h] = zink_descriptor_set_get(ctx, h, is_compute, &cache_hit[h]);
|
||||
else
|
||||
zds[h] = NULL;
|
||||
/* reuse dummy set for bind */
|
||||
sets[h + 1] = zds[h] ? zds[h]->desc_set : ctx->dd->dummy_set;
|
||||
} else {
|
||||
zds[h] = NULL;
|
||||
}
|
||||
}
|
||||
struct zink_batch *batch = &ctx->batch;
|
||||
zink_batch_reference_program(batch, pg);
|
||||
|
|
@ -1377,10 +1505,13 @@ zink_descriptors_update(struct zink_context *ctx, bool is_compute)
|
|||
uint32_t dynamic_offsets[PIPE_MAX_CONSTANT_BUFFERS];
|
||||
unsigned dynamic_offset_idx = 0;
|
||||
|
||||
if (pg->dd->push_usage) // push set
|
||||
dynamic_offset_idx = update_push_ubo_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPES],
|
||||
is_compute, cache_hit[ZINK_DESCRIPTOR_TYPES], dynamic_offsets);
|
||||
|
||||
if (zds[ZINK_DESCRIPTOR_TYPE_UBO])
|
||||
update_ubo_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_UBO],
|
||||
is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_UBO],
|
||||
dynamic_offsets, &dynamic_offset_idx);
|
||||
is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_UBO]);
|
||||
if (zds[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW])
|
||||
update_sampler_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW],
|
||||
is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW]);
|
||||
|
|
@ -1391,13 +1522,9 @@ zink_descriptors_update(struct zink_context *ctx, bool is_compute)
|
|||
update_image_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_IMAGE],
|
||||
is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_IMAGE]);
|
||||
|
||||
for (unsigned h = 0; h < ZINK_DESCRIPTOR_TYPES; h++) {
|
||||
if (zds[h]) {
|
||||
vkCmdBindDescriptorSets(batch->state->cmdbuf, is_compute ? VK_PIPELINE_BIND_POINT_COMPUTE : VK_PIPELINE_BIND_POINT_GRAPHICS,
|
||||
pg->layout, zds[h]->pool->type, 1, &zds[h]->desc_set,
|
||||
zds[h]->pool->type == ZINK_DESCRIPTOR_TYPE_UBO ? dynamic_offset_idx : 0, dynamic_offsets);
|
||||
}
|
||||
}
|
||||
vkCmdBindDescriptorSets(batch->state->cmdbuf, is_compute ? VK_PIPELINE_BIND_POINT_COMPUTE : VK_PIPELINE_BIND_POINT_GRAPHICS,
|
||||
pg->layout, 0, pg->num_dsl, sets,
|
||||
dynamic_offset_idx, dynamic_offsets);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
@ -1454,7 +1581,7 @@ zink_get_resource_for_descriptor(struct zink_context *ctx, enum zink_descriptor_
|
|||
}
|
||||
|
||||
static uint32_t
|
||||
calc_descriptor_state_hash_ubo(struct zink_context *ctx, struct zink_shader *zs, enum pipe_shader_type shader, int i, int idx, uint32_t hash)
|
||||
calc_descriptor_state_hash_ubo(struct zink_context *ctx, enum pipe_shader_type shader, int idx, uint32_t hash, bool need_offset)
|
||||
{
|
||||
struct zink_resource *res = zink_get_resource_for_descriptor(ctx, ZINK_DESCRIPTOR_TYPE_UBO, shader, idx);
|
||||
struct zink_resource_object *obj = res ? res->obj : NULL;
|
||||
|
|
@ -1462,7 +1589,7 @@ calc_descriptor_state_hash_ubo(struct zink_context *ctx, struct zink_shader *zs,
|
|||
void *hash_data = &ctx->ubos[shader][idx].buffer_size;
|
||||
size_t data_size = sizeof(unsigned);
|
||||
hash = XXH32(hash_data, data_size, hash);
|
||||
if (zs->bindings[ZINK_DESCRIPTOR_TYPE_UBO][i].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER)
|
||||
if (need_offset)
|
||||
hash = XXH32(&ctx->ubos[shader][idx].buffer_offset, sizeof(unsigned), hash);
|
||||
return hash;
|
||||
}
|
||||
|
|
@ -1552,10 +1679,14 @@ update_descriptor_stage_state(struct zink_context *ctx, enum pipe_shader_type sh
|
|||
|
||||
uint32_t hash = 0;
|
||||
for (int i = 0; i < zs->num_bindings[type]; i++) {
|
||||
/* skip push set members */
|
||||
if (zs->bindings[type][i].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
|
||||
continue;
|
||||
|
||||
int idx = zs->bindings[type][i].index;
|
||||
switch (type) {
|
||||
case ZINK_DESCRIPTOR_TYPE_UBO:
|
||||
hash = calc_descriptor_state_hash_ubo(ctx, zs, shader, i, idx, hash);
|
||||
hash = calc_descriptor_state_hash_ubo(ctx, shader, idx, hash, true);
|
||||
break;
|
||||
case ZINK_DESCRIPTOR_TYPE_SSBO:
|
||||
hash = calc_descriptor_state_hash_ssbo(ctx, zs, shader, i, idx, hash);
|
||||
|
|
@ -1622,17 +1753,50 @@ update_descriptor_state(struct zink_context *ctx, enum zink_descriptor_type type
|
|||
}
|
||||
|
||||
static void
|
||||
zink_context_update_descriptor_states(struct zink_context *ctx, bool is_compute)
|
||||
zink_context_update_descriptor_states(struct zink_context *ctx, struct zink_program *pg)
|
||||
{
|
||||
if (pg->dd->push_usage && (!ctx->dd->push_valid[pg->is_compute] ||
|
||||
pg->dd->push_usage != ctx->dd->last_push_usage[pg->is_compute])) {
|
||||
uint32_t hash = 0;
|
||||
if (pg->is_compute) {
|
||||
hash = calc_descriptor_state_hash_ubo(ctx, PIPE_SHADER_COMPUTE, 0, 0, false);
|
||||
} else {
|
||||
bool first = true;
|
||||
u_foreach_bit(stage, pg->dd->push_usage) {
|
||||
if (!ctx->dd->gfx_push_valid[stage]) {
|
||||
ctx->dd->gfx_push_state[stage] = calc_descriptor_state_hash_ubo(ctx, stage, 0, 0, false);
|
||||
ctx->dd->gfx_push_valid[stage] = true;
|
||||
}
|
||||
if (first)
|
||||
hash = ctx->dd->gfx_push_state[stage];
|
||||
else
|
||||
hash = XXH32(&ctx->dd->gfx_push_state[stage], sizeof(uint32_t), hash);
|
||||
first = false;
|
||||
}
|
||||
}
|
||||
ctx->dd->push_state[pg->is_compute] = hash;
|
||||
ctx->dd->push_valid[pg->is_compute] = true;
|
||||
ctx->dd->last_push_usage[pg->is_compute] = pg->dd->push_usage;
|
||||
}
|
||||
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
|
||||
if (!ctx->dd->descriptor_states[is_compute].valid[i])
|
||||
update_descriptor_state(ctx, i, is_compute);
|
||||
if (!ctx->dd->descriptor_states[pg->is_compute].valid[i])
|
||||
update_descriptor_state(ctx, i, pg->is_compute);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
zink_context_invalidate_descriptor_state(struct zink_context *ctx, enum pipe_shader_type shader, enum zink_descriptor_type type, unsigned start, unsigned count)
|
||||
{
|
||||
if (type == ZINK_DESCRIPTOR_TYPE_UBO && !start) {
|
||||
/* ubo 0 is the push set */
|
||||
ctx->dd->push_state[shader == PIPE_SHADER_COMPUTE] = 0;
|
||||
ctx->dd->push_valid[shader == PIPE_SHADER_COMPUTE] = false;
|
||||
if (shader != PIPE_SHADER_COMPUTE) {
|
||||
ctx->dd->gfx_push_state[shader] = 0;
|
||||
ctx->dd->gfx_push_valid[shader] = false;
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (shader != PIPE_SHADER_COMPUTE) {
|
||||
ctx->dd->gfx_descriptor_states[shader].valid[type] = false;
|
||||
ctx->dd->gfx_descriptor_states[shader].state[type] = 0;
|
||||
|
|
|
|||
|
|
@ -108,6 +108,8 @@ zink_descriptor_util_layout_get(struct zink_context *ctx, enum zink_descriptor_t
|
|||
struct zink_descriptor_layout_key **layout_key);
|
||||
bool
|
||||
zink_descriptor_util_push_layouts_get(struct zink_context *ctx, VkDescriptorSetLayout *dsls, struct zink_descriptor_layout_key **layout_keys);
|
||||
void
|
||||
zink_descriptor_util_init_null_set(struct zink_context *ctx, VkDescriptorSet desc_set);
|
||||
struct zink_resource *
|
||||
zink_get_resource_for_descriptor(struct zink_context *ctx, enum zink_descriptor_type type, enum pipe_shader_type shader, int idx);
|
||||
|
||||
|
|
|
|||
|
|
@ -589,7 +589,7 @@ zink_batch_descriptor_init_lazy(struct zink_screen *screen, struct zink_batch_st
|
|||
if (!screen->info.have_KHR_push_descriptor) {
|
||||
VkDescriptorPoolSize sizes;
|
||||
sizes.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
||||
sizes.descriptorCount = ZINK_SHADER_COUNT * ZINK_DEFAULT_MAX_DESCS;
|
||||
sizes.descriptorCount = ZINK_SHADER_COUNT * ZINK_DEFAULT_MAX_DESCS;
|
||||
bs->dd->push_pool[0] = rzalloc(bs, struct zink_descriptor_pool);
|
||||
bs->dd->push_pool[0]->pool = create_pool(screen, 1, &sizes, 0);
|
||||
sizes.descriptorCount = ZINK_DEFAULT_MAX_DESCS;
|
||||
|
|
@ -606,7 +606,6 @@ zink_descriptors_init_lazy(struct zink_context *ctx)
|
|||
if (!ctx->dd)
|
||||
return false;
|
||||
|
||||
VkDescriptorSetLayoutBinding bindings[PIPE_SHADER_TYPES];
|
||||
for (unsigned i = 0; i < PIPE_SHADER_TYPES; i++) {
|
||||
VkDescriptorUpdateTemplateEntry *entry = &ctx->dd->push_entries[i];
|
||||
entry->dstBinding = tgsi_processor_to_shader_stage(i);
|
||||
|
|
@ -614,44 +613,23 @@ zink_descriptors_init_lazy(struct zink_context *ctx)
|
|||
entry->descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
||||
entry->offset = offsetof(struct zink_context, di.ubos[i][0]);
|
||||
entry->stride = sizeof(VkDescriptorBufferInfo);
|
||||
|
||||
bindings[i].binding = tgsi_processor_to_shader_stage(i);
|
||||
bindings[i].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
||||
bindings[i].descriptorCount = 1;
|
||||
bindings[i].stageFlags = zink_shader_stage(i);
|
||||
bindings[i].pImmutableSamplers = NULL;
|
||||
}
|
||||
struct zink_screen *screen = zink_screen(ctx->base.screen);
|
||||
struct zink_descriptor_layout_key *layout_key;
|
||||
enum zink_descriptor_type dsl_type = screen->info.have_KHR_push_descriptor ? ZINK_DESCRIPTOR_TYPES : ZINK_DESCRIPTOR_TYPE_UBO;
|
||||
ctx->dd->push_dsl[0] = zink_descriptor_util_layout_get(ctx, dsl_type, bindings, ZINK_SHADER_COUNT, &layout_key);
|
||||
ctx->dd->push_dsl[1] = zink_descriptor_util_layout_get(ctx, dsl_type, &bindings[PIPE_SHADER_COMPUTE], 1, &layout_key);
|
||||
struct zink_descriptor_layout_key *layout_keys[2];
|
||||
if (!zink_descriptor_util_push_layouts_get(ctx, ctx->dd->push_dsl, layout_keys))
|
||||
if (!ctx->dd->push_dsl[0] || !ctx->dd->push_dsl[1])
|
||||
return false;
|
||||
|
||||
ctx->dd->dummy_dsl = zink_descriptor_util_layout_get(ctx, 2, bindings, 1, &layout_key);
|
||||
ctx->dd->dummy_dsl = zink_descriptor_util_layout_get(ctx, 0, NULL, 0, &layout_keys[0]);
|
||||
if (!ctx->dd->dummy_dsl)
|
||||
return false;
|
||||
VkDescriptorPoolSize null_size = {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1};
|
||||
ctx->dd->dummy_pool = create_pool(screen, 1, &null_size, 0);
|
||||
zink_descriptor_util_alloc_sets(screen, ctx->dd->dummy_dsl,
|
||||
ctx->dd->dummy_pool, &ctx->dd->dummy_set, 1);
|
||||
VkDescriptorBufferInfo push_info;
|
||||
VkWriteDescriptorSet push_wd;
|
||||
push_wd.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
|
||||
push_wd.pNext = NULL;
|
||||
push_wd.dstBinding = 0;
|
||||
push_wd.dstArrayElement = 0;
|
||||
push_wd.descriptorCount = 1;
|
||||
push_wd.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
||||
push_wd.dstSet = ctx->dd->dummy_set;
|
||||
push_wd.pBufferInfo = &push_info;
|
||||
push_info.buffer = screen->info.rb2_feats.nullDescriptor ?
|
||||
VK_NULL_HANDLE :
|
||||
zink_resource(ctx->dummy_vertex_buffer)->obj->buffer;
|
||||
push_info.offset = 0;
|
||||
push_info.range = VK_WHOLE_SIZE;
|
||||
vkUpdateDescriptorSets(screen->dev, 1, &push_wd, 0, NULL);
|
||||
zink_descriptor_util_init_null_set(ctx, ctx->dd->dummy_set);
|
||||
printf("ZINK: USING EXPERIMENTAL LAZY DESCRIPTORS\n");
|
||||
return !!ctx->dd->dummy_dsl;
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue