zink: split descriptor sets based on usage

this uses multiple descriptor sets so that we can perform more incremental
updating and increase the value that we get from our cache by only invalidating
one state at a time

Reviewed-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/9348>
This commit is contained in:
Mike Blumenkrantz 2020-10-01 15:22:45 -04:00 committed by Marge Bot
parent c5ce20e97a
commit d4304ab0d2
8 changed files with 696 additions and 401 deletions

View file

@ -854,7 +854,7 @@ emit_image(struct ntv_context *ctx, struct nir_variable *var)
assert(ctx->num_entry_ifaces < ARRAY_SIZE(ctx->entry_ifaces));
ctx->entry_ifaces[ctx->num_entry_ifaces++] = var_id;
spirv_builder_emit_descriptor_set(&ctx->builder, var_id, 0);
spirv_builder_emit_descriptor_set(&ctx->builder, var_id, is_sampler ? ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW : ZINK_DESCRIPTOR_TYPE_IMAGE);
int binding = zink_binding(ctx->stage,
is_sampler ? zink_sampler_type(type) : zink_image_type(type),
var->data.binding);
@ -976,7 +976,7 @@ emit_bo(struct ntv_context *ctx, struct nir_variable *var)
assert(ctx->num_entry_ifaces < ARRAY_SIZE(ctx->entry_ifaces));
ctx->entry_ifaces[ctx->num_entry_ifaces++] = var_id;
spirv_builder_emit_descriptor_set(&ctx->builder, var_id, 0);
spirv_builder_emit_descriptor_set(&ctx->builder, var_id, ssbo ? ZINK_DESCRIPTOR_TYPE_SSBO : ZINK_DESCRIPTOR_TYPE_UBO);
int binding = zink_binding(ctx->stage,
ssbo ? VK_DESCRIPTOR_TYPE_STORAGE_BUFFER : VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
var->data.binding + i);

View file

@ -52,16 +52,16 @@ zink_reset_batch(struct zink_context *ctx, struct zink_batch *batch)
hash_table_foreach(batch->programs, entry) {
struct zink_program *pg = (struct zink_program*)entry->key;
struct hash_table *desc_sets = (struct hash_table*)entry->data;
hash_table_foreach(desc_sets, sentry) {
struct zink_descriptor_set *zds = (void*)sentry->data;
struct set *desc_sets = (struct set*)entry->data;
set_foreach(desc_sets, sentry) {
struct zink_descriptor_set *zds = (void*)sentry->key;
/* reset descriptor pools when no batch is using this program to avoid
* having some inactive program hogging a billion descriptors
*/
pipe_reference(&zds->reference, NULL);
zink_program_recycle_desc_set(pg, sentry->hash, zds);
zink_program_recycle_desc_set(pg, zds);
}
_mesa_hash_table_destroy(desc_sets, NULL);
_mesa_set_destroy(desc_sets, NULL);
if (batch->batch_id == ZINK_COMPUTE_BATCH_ID) {
struct zink_compute_program *comp = (struct zink_compute_program*)entry->key;
zink_compute_program_reference(screen, &comp, NULL);
@ -228,21 +228,21 @@ zink_batch_reference_program(struct zink_batch *batch,
{
struct hash_entry *entry = _mesa_hash_table_search(batch->programs, pg);
if (!entry) {
entry = _mesa_hash_table_insert(batch->programs, pg, _mesa_hash_table_create(batch->programs, NULL, _mesa_key_pointer_equal));
entry = _mesa_hash_table_insert(batch->programs, pg, _mesa_pointer_set_create(batch->programs));
pipe_reference(NULL, &pg->reference);
}
batch->has_work = true;
}
bool
zink_batch_add_desc_set(struct zink_batch *batch, struct zink_program *pg, uint32_t hash, struct zink_descriptor_set *zds)
zink_batch_add_desc_set(struct zink_batch *batch, struct zink_program *pg, struct zink_descriptor_set *zds)
{
struct hash_entry *entry = _mesa_hash_table_search(batch->programs, pg);
assert(entry);
struct hash_table *desc_sets = (void*)entry->data;
if (!_mesa_hash_table_search_pre_hashed(desc_sets, hash, (void*)(uintptr_t)hash)) {
struct set *desc_sets = (void*)entry->data;
if (!_mesa_set_search(desc_sets, zds)) {
pipe_reference(NULL, &zds->reference);
_mesa_hash_table_insert_pre_hashed(desc_sets, hash, (void*)(uintptr_t)hash, zds);
_mesa_set_add(desc_sets, zds);
return true;
}
return false;

View file

@ -97,5 +97,5 @@ zink_batch_reference_surface(struct zink_batch *batch,
struct zink_surface *surface);
bool
zink_batch_add_desc_set(struct zink_batch *batch, struct zink_program *pg, uint32_t hash, struct zink_descriptor_set *zds);
zink_batch_add_desc_set(struct zink_batch *batch, struct zink_program *pg, struct zink_descriptor_set *zds);
#endif

View file

@ -609,7 +609,6 @@ zink_shader_create(struct zink_screen *screen, struct nir_shader *nir,
fprintf(stderr, "---8<---\n");
}
ret->num_bindings = 0;
uint32_t cur_ubo = 0;
/* UBO buffers are zero-indexed, but buffer 0 is always the one created by nir_lower_uniforms_to_ubo,
* which means there is no buffer 0 if there are no uniforms
@ -623,12 +622,14 @@ zink_shader_create(struct zink_screen *screen, struct nir_shader *nir,
if (_nir_shader_variable_has_mode(var, nir_var_uniform |
nir_var_mem_ubo |
nir_var_mem_ssbo)) {
enum zink_descriptor_type ztype;
if (var->data.mode == nir_var_mem_ubo) {
/* ignore variables being accessed if they aren't the base of the UBO */
bool ubo_array = glsl_type_is_array(var->type) && glsl_type_is_interface(glsl_without_array(var->type));
if (var->data.location && !ubo_array && var->type != var->interface_type)
continue;
var->data.binding = cur_ubo;
ztype = ZINK_DESCRIPTOR_TYPE_UBO;
/* if this is a ubo array, create a binding point for each array member:
*
"For uniform blocks declared as arrays, each individual array element
@ -642,11 +643,12 @@ zink_shader_create(struct zink_screen *screen, struct nir_shader *nir,
int binding = zink_binding(nir->info.stage,
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
cur_ubo++);
ret->bindings[ret->num_bindings].index = ubo_index++;
ret->bindings[ret->num_bindings].binding = binding;
ret->bindings[ret->num_bindings].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
ret->bindings[ret->num_bindings].size = 1;
ret->num_bindings++;
ret->bindings[ztype][ret->num_bindings[ztype]].index = ubo_index++;
ret->bindings[ztype][ret->num_bindings[ztype]].binding = binding;
ret->bindings[ztype][ret->num_bindings[ztype]].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
ret->bindings[ztype][ret->num_bindings[ztype]].size = 1;
ret->ubos_used |= (1 << ret->bindings[ztype][ret->num_bindings[ztype]].index);
ret->num_bindings[ztype]++;
}
} else if (var->data.mode == nir_var_mem_ssbo) {
/* same-ish mechanics as ubos */
@ -656,35 +658,38 @@ zink_shader_create(struct zink_screen *screen, struct nir_shader *nir,
if (!var->data.explicit_binding) {
var->data.binding = ssbo_array_index;
}
ztype = ZINK_DESCRIPTOR_TYPE_SSBO;
for (unsigned i = 0; i < (bo_array ? glsl_get_aoa_size(var->type) : 1); i++) {
int binding = zink_binding(nir->info.stage,
VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
var->data.binding + i);
if (strcmp(glsl_get_type_name(var->interface_type), "counters"))
ret->bindings[ret->num_bindings].index = ssbo_array_index++;
ret->bindings[ztype][ret->num_bindings[ztype]].index = ssbo_array_index++;
else
ret->bindings[ret->num_bindings].index = var->data.binding;
ret->bindings[ret->num_bindings].binding = binding;
ret->bindings[ret->num_bindings].type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
ret->bindings[ret->num_bindings].size = 1;
ret->num_bindings++;
ret->bindings[ztype][ret->num_bindings[ztype]].index = var->data.binding;
ret->ssbos_used |= (1 << ret->bindings[ztype][ret->num_bindings[ztype]].index);
ret->bindings[ztype][ret->num_bindings[ztype]].binding = binding;
ret->bindings[ztype][ret->num_bindings[ztype]].type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
ret->bindings[ztype][ret->num_bindings[ztype]].size = 1;
ret->num_bindings[ztype]++;
}
} else {
assert(var->data.mode == nir_var_uniform);
const struct glsl_type *type = glsl_without_array(var->type);
if (glsl_type_is_sampler(type) || glsl_type_is_image(type)) {
VkDescriptorType vktype = glsl_type_is_image(type) ? zink_image_type(type) : zink_sampler_type(type);
ztype = zink_desc_type_from_vktype(vktype);
int binding = zink_binding(nir->info.stage,
vktype,
var->data.binding);
ret->bindings[ret->num_bindings].index = var->data.binding;
ret->bindings[ret->num_bindings].binding = binding;
ret->bindings[ret->num_bindings].type = vktype;
ret->bindings[ztype][ret->num_bindings[ztype]].index = var->data.binding;
ret->bindings[ztype][ret->num_bindings[ztype]].binding = binding;
ret->bindings[ztype][ret->num_bindings[ztype]].type = vktype;
if (glsl_type_is_array(var->type))
ret->bindings[ret->num_bindings].size = glsl_get_aoa_size(var->type);
ret->bindings[ztype][ret->num_bindings[ztype]].size = glsl_get_aoa_size(var->type);
else
ret->bindings[ret->num_bindings].size = 1;
ret->num_bindings++;
ret->bindings[ztype][ret->num_bindings[ztype]].size = 1;
ret->num_bindings[ztype]++;
}
}
}

View file

@ -63,6 +63,14 @@ zink_get_compiler_options(struct pipe_screen *screen,
struct nir_shader *
zink_tgsi_to_nir(struct pipe_screen *screen, const struct tgsi_token *tokens);
enum zink_descriptor_type {
ZINK_DESCRIPTOR_TYPE_UBO,
ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW,
ZINK_DESCRIPTOR_TYPE_SSBO,
ZINK_DESCRIPTOR_TYPE_IMAGE,
ZINK_DESCRIPTOR_TYPES,
};
struct zink_shader {
unsigned shader_id;
struct nir_shader *nir;
@ -74,8 +82,10 @@ struct zink_shader {
int binding;
VkDescriptorType type;
unsigned char size;
} bindings[PIPE_MAX_CONSTANT_BUFFERS + PIPE_MAX_SAMPLERS + PIPE_MAX_SHADER_BUFFERS + PIPE_MAX_SHADER_IMAGES];
size_t num_bindings;
} bindings[ZINK_DESCRIPTOR_TYPES][32];
size_t num_bindings[ZINK_DESCRIPTOR_TYPES];
uint32_t ubos_used; // bitfield of which ubo indices are used
uint32_t ssbos_used; // bitfield of which ssbo indices are used
struct set *programs;
union {

View file

@ -224,12 +224,12 @@ get_gfx_program(struct zink_context *ctx)
}
static struct zink_descriptor_set *
get_descriptor_set(struct zink_context *ctx, bool is_compute, uint32_t desc_hash, bool *cache_hit)
get_descriptor_set(struct zink_context *ctx, bool is_compute, uint32_t desc_hash, enum zink_descriptor_type type, bool *cache_hit)
{
struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
struct zink_batch *batch = is_compute ? &ctx->compute_batch : zink_curr_batch(ctx);
zink_batch_reference_program(batch, pg);
return zink_program_allocate_desc_set(ctx, batch, pg, desc_hash, cache_hit);
return zink_program_allocate_desc_set(ctx, batch, pg, desc_hash, type, cache_hit);
}
struct zink_transition {
@ -303,13 +303,20 @@ write_descriptor_resource(struct zink_descriptor_resource *resource, struct zink
static void
update_descriptors(struct zink_context *ctx, struct zink_screen *screen, bool is_compute)
{
VkWriteDescriptorSet wds[MAX_DESCRIPTORS];
struct zink_descriptor_resource resources[MAX_DESCRIPTORS];
struct zink_surface *surface_refs[PIPE_SHADER_TYPES * PIPE_MAX_SHADER_IMAGES] = {};
VkDescriptorBufferInfo buffer_infos[PIPE_SHADER_TYPES * (PIPE_MAX_CONSTANT_BUFFERS + PIPE_MAX_SHADER_BUFFERS + PIPE_MAX_SHADER_IMAGES)];
VkDescriptorImageInfo image_infos[PIPE_SHADER_TYPES * (PIPE_MAX_SAMPLERS + PIPE_MAX_SHADER_IMAGES)];
struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
unsigned num_descriptors = zink_program_num_descriptors(pg);
unsigned num_bindings = zink_program_num_bindings(pg, is_compute);
unsigned num_image_bindings = zink_program_num_bindings_typed(pg, ZINK_DESCRIPTOR_TYPE_IMAGE, is_compute);
VkWriteDescriptorSet wds[ZINK_DESCRIPTOR_TYPES][num_descriptors];
struct zink_descriptor_resource resources[ZINK_DESCRIPTOR_TYPES][num_bindings];
struct zink_surface *surface_refs[num_image_bindings];
VkDescriptorBufferInfo buffer_infos[ZINK_DESCRIPTOR_TYPES][num_bindings];
VkDescriptorImageInfo image_infos[ZINK_DESCRIPTOR_TYPES][num_bindings];
VkBufferView buffer_view[] = {VK_NULL_HANDLE};
unsigned num_wds = 0, num_buffer_info = 0, num_image_info = 0, num_surface_refs = 0;
unsigned num_wds[ZINK_DESCRIPTOR_TYPES] = {0};
unsigned num_buffer_info[ZINK_DESCRIPTOR_TYPES] = {0};
unsigned num_image_info[ZINK_DESCRIPTOR_TYPES] = {0};
unsigned num_surface_refs = 0;
struct zink_shader **stages;
unsigned num_stages = is_compute ? 1 : ZINK_SHADER_COUNT;
@ -318,226 +325,247 @@ update_descriptors(struct zink_context *ctx, struct zink_screen *screen, bool is
else
stages = &ctx->gfx_stages[0];
struct zink_transition transitions[MAX_DESCRIPTORS];
struct zink_transition transitions[num_bindings];
int num_transitions = 0;
struct set *ht = _mesa_set_create(NULL, transition_hash, transition_equals);
uint32_t desc_hash = 0;
uint32_t desc_hash[ZINK_DESCRIPTOR_TYPES] = {0};
for (int i = 0; i < num_stages; i++) {
struct zink_shader *shader = stages[i];
if (!shader)
continue;
enum pipe_shader_type stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
for (int j = 0; j < shader->num_bindings; j++) {
int index = shader->bindings[j].index;
if (shader->bindings[j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
assert(ctx->ubos[stage][index].buffer_size <= screen->info.props.limits.maxUniformBufferRange);
struct zink_resource *res = zink_resource(ctx->ubos[stage][index].buffer);
assert(!res || ctx->ubos[stage][index].buffer_size > 0);
assert(!res || ctx->ubos[stage][index].buffer);
read_descriptor_resource(&resources[num_wds], res);
buffer_infos[num_buffer_info].buffer = res ? res->buffer :
(screen->info.rb2_feats.nullDescriptor ?
VK_NULL_HANDLE :
zink_resource(ctx->dummy_vertex_buffer)->buffer);
buffer_infos[num_buffer_info].offset = res ? ctx->ubos[stage][index].buffer_offset : 0;
buffer_infos[num_buffer_info].range = res ? ctx->ubos[stage][index].buffer_size : VK_WHOLE_SIZE;
desc_hash = XXH32(&buffer_infos[num_buffer_info], sizeof(VkDescriptorBufferInfo), desc_hash);
if (res)
add_transition(res, 0, VK_ACCESS_UNIFORM_READ_BIT, stage, &transitions[num_transitions], &num_transitions, ht);
wds[num_wds].pBufferInfo = buffer_infos + num_buffer_info;
++num_buffer_info;
} else if (shader->bindings[j].type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) {
struct zink_resource *res = zink_resource(ctx->ssbos[stage][index].buffer);
if (res) {
assert(ctx->ssbos[stage][index].buffer_size > 0);
assert(ctx->ssbos[stage][index].buffer_size <= screen->info.props.limits.maxStorageBufferRange);
unsigned flag = VK_ACCESS_SHADER_READ_BIT;
if (ctx->writable_ssbos[stage] & (1 << index)) {
write_descriptor_resource(&resources[num_wds], res);
flag |= VK_ACCESS_SHADER_WRITE_BIT;
for (int h = 0; h < ZINK_DESCRIPTOR_TYPES; h++) {
for (int i = 0; i < num_stages; i++) {
struct zink_shader *shader = stages[i];
if (!shader)
continue;
enum pipe_shader_type stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
for (int j = 0; j < shader->num_bindings[h]; j++) {
int index = shader->bindings[h][j].index;
if (shader->bindings[h][j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
assert(ctx->ubos[stage][index].buffer_size <= screen->info.props.limits.maxUniformBufferRange);
struct zink_resource *res = zink_resource(ctx->ubos[stage][index].buffer);
assert(num_wds[h] < num_bindings);
assert(!res || ctx->ubos[stage][index].buffer_size > 0);
assert(!res || ctx->ubos[stage][index].buffer);
read_descriptor_resource(&resources[h][num_wds[h]], res);
assert(num_buffer_info[h] < num_bindings);
buffer_infos[h][num_buffer_info[h]].buffer = res ? res->buffer :
(screen->info.rb2_feats.nullDescriptor ?
VK_NULL_HANDLE :
zink_resource(ctx->dummy_vertex_buffer)->buffer);
buffer_infos[h][num_buffer_info[h]].offset = res ? ctx->ubos[stage][index].buffer_offset : 0;
buffer_infos[h][num_buffer_info[h]].range = res ? ctx->ubos[stage][index].buffer_size : VK_WHOLE_SIZE;
desc_hash[h] = XXH32(&buffer_infos[h][num_buffer_info[h]], sizeof(VkDescriptorBufferInfo), desc_hash[h]);
if (res)
add_transition(res, 0, VK_ACCESS_UNIFORM_READ_BIT, stage, &transitions[num_transitions], &num_transitions, ht);
wds[h][num_wds[h]].pBufferInfo = buffer_infos[h] + num_buffer_info[h];
++num_buffer_info[h];
} else if (shader->bindings[h][j].type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) {
struct zink_resource *res = zink_resource(ctx->ssbos[stage][index].buffer);
if (res) {
assert(ctx->ssbos[stage][index].buffer_size > 0);
assert(ctx->ssbos[stage][index].buffer_size <= screen->info.props.limits.maxStorageBufferRange);
assert(num_buffer_info[h] < num_bindings);
unsigned flag = VK_ACCESS_SHADER_READ_BIT;
assert(num_wds[h] < num_bindings);
if (ctx->writable_ssbos[stage] & (1 << index)) {
write_descriptor_resource(&resources[h][num_wds[h]], res);
flag |= VK_ACCESS_SHADER_WRITE_BIT;
} else {
read_descriptor_resource(&resources[h][num_wds[h]], res);
}
add_transition(res, 0, flag, stage, &transitions[num_transitions], &num_transitions, ht);
buffer_infos[h][num_buffer_info[h]].buffer = res->buffer;
buffer_infos[h][num_buffer_info[h]].offset = ctx->ssbos[stage][index].buffer_offset;
buffer_infos[h][num_buffer_info[h]].range = ctx->ssbos[stage][index].buffer_size;
} else {
read_descriptor_resource(&resources[num_wds], res);
}
add_transition(res, 0, flag, stage, &transitions[num_transitions], &num_transitions, ht);
buffer_infos[num_buffer_info].buffer = res->buffer;
buffer_infos[num_buffer_info].offset = ctx->ssbos[stage][index].buffer_offset;
buffer_infos[num_buffer_info].range = ctx->ssbos[stage][index].buffer_size;
} else {
assert(screen->info.rb2_feats.nullDescriptor);
buffer_infos[num_buffer_info].buffer = VK_NULL_HANDLE;
buffer_infos[num_buffer_info].offset = 0;
buffer_infos[num_buffer_info].range = VK_WHOLE_SIZE;
}
desc_hash = XXH32(&buffer_infos[num_buffer_info], sizeof(VkDescriptorBufferInfo), desc_hash);
wds[num_wds].pBufferInfo = buffer_infos + num_buffer_info;
++num_buffer_info;
} else {
for (unsigned k = 0; k < shader->bindings[j].size; k++) {
VkImageView imageview = VK_NULL_HANDLE;
struct zink_resource *res = NULL;
VkImageLayout layout = VK_IMAGE_LAYOUT_UNDEFINED;
VkSampler sampler = VK_NULL_HANDLE;
switch (shader->bindings[j].type) {
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
/* fallthrough */
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {
struct pipe_sampler_view *psampler_view = ctx->sampler_views[stage][index + k];
struct zink_sampler_view *sampler_view = zink_sampler_view(psampler_view);
res = psampler_view ? zink_resource(psampler_view->texture) : NULL;
if (!res)
break;
if (res->base.target == PIPE_BUFFER) {
wds[num_wds].pTexelBufferView = &sampler_view->buffer_view;
desc_hash = XXH32(&sampler_view->base.u.buf, sizeof(sampler_view->base.u.buf), desc_hash);
} else {
imageview = sampler_view->image_view;
layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
sampler = ctx->samplers[stage][index + k];
}
add_transition(res, layout, VK_ACCESS_SHADER_READ_BIT, stage, &transitions[num_transitions], &num_transitions, ht);
read_descriptor_resource(&resources[num_wds], res);
}
break;
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
/* fallthrough */
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
struct zink_image_view *image_view = &ctx->image_views[stage][index + k];
assert(image_view);
surface_refs[num_surface_refs++] = image_view->surface;
res = zink_resource(image_view->base.resource);
if (!res)
break;
if (image_view->base.resource->target == PIPE_BUFFER) {
wds[num_wds].pTexelBufferView = &image_view->buffer_view;
desc_hash = XXH32(&image_view->base.u.buf, sizeof(image_view->base.u.buf), desc_hash);
} else {
imageview = image_view->surface->image_view;
layout = VK_IMAGE_LAYOUT_GENERAL;
}
VkAccessFlags flags = 0;
if (image_view->base.access & PIPE_IMAGE_ACCESS_READ)
flags |= VK_ACCESS_SHADER_READ_BIT;
if (image_view->base.access & PIPE_IMAGE_ACCESS_WRITE)
flags |= VK_ACCESS_SHADER_WRITE_BIT;
add_transition(res, layout, flags, stage, &transitions[num_transitions], &num_transitions, ht);
if (image_view->base.access & PIPE_IMAGE_ACCESS_WRITE)
write_descriptor_resource(&resources[num_wds], res);
else
read_descriptor_resource(&resources[num_wds], res);
}
break;
default:
unreachable("unknown descriptor type");
}
if (!res) {
/* if we're hitting this assert often, we can probably just throw a junk buffer in since
* the results of this codepath are undefined in ARB_texture_buffer_object spec
*/
assert(screen->info.rb2_feats.nullDescriptor);
read_descriptor_resource(&resources[num_wds], res);
switch (shader->bindings[j].type) {
read_descriptor_resource(&resources[h][num_wds[h]], res);
buffer_infos[h][num_buffer_info[h]].buffer = VK_NULL_HANDLE;
buffer_infos[h][num_buffer_info[h]].offset = 0;
buffer_infos[h][num_buffer_info[h]].range = VK_WHOLE_SIZE;
}
desc_hash[h] = XXH32(&buffer_infos[h][num_buffer_info[h]], sizeof(VkDescriptorBufferInfo), desc_hash[h]);
wds[h][num_wds[h]].pBufferInfo = buffer_infos[h] + num_buffer_info[h];
++num_buffer_info[h];
} else {
for (unsigned k = 0; k < shader->bindings[h][j].size; k++) {
VkImageView imageview = VK_NULL_HANDLE;
struct zink_resource *res = NULL;
VkImageLayout layout = VK_IMAGE_LAYOUT_UNDEFINED;
VkSampler sampler = VK_NULL_HANDLE;
switch (shader->bindings[h][j].type) {
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
/* fallthrough */
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {
struct pipe_sampler_view *psampler_view = ctx->sampler_views[stage][index + k];
struct zink_sampler_view *sampler_view = zink_sampler_view(psampler_view);
res = psampler_view ? zink_resource(psampler_view->texture) : NULL;
if (!res)
break;
if (res->base.target == PIPE_BUFFER) {
wds[h][num_wds[h]].pTexelBufferView = &sampler_view->buffer_view;
desc_hash[h] = XXH32(&sampler_view->base.u.buf, sizeof(sampler_view->base.u.buf), desc_hash[h]);
} else {
imageview = sampler_view->image_view;
layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
sampler = ctx->samplers[stage][index + k];
}
add_transition(res, layout, VK_ACCESS_SHADER_READ_BIT, stage, &transitions[num_transitions], &num_transitions, ht);
assert(num_wds[h] < num_bindings);
read_descriptor_resource(&resources[h][num_wds[h]], res);
}
break;
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
wds[num_wds].pTexelBufferView = &buffer_view[0];
desc_hash = XXH32(&buffer_view[0], sizeof(VkBufferView), desc_hash);
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
image_infos[num_image_info].imageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
image_infos[num_image_info].imageView = VK_NULL_HANDLE;
image_infos[num_image_info].sampler = sampler;
if (!k)
wds[num_wds].pImageInfo = image_infos + num_image_info;
desc_hash = XXH32(&image_infos[num_image_info], sizeof(VkDescriptorImageInfo), desc_hash);
++num_image_info;
break;
/* fallthrough */
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
struct zink_image_view *image_view = &ctx->image_views[stage][index + k];
assert(image_view);
assert(num_surface_refs < num_image_bindings);
surface_refs[num_surface_refs++] = image_view->surface;
res = zink_resource(image_view->base.resource);
if (!res)
break;
if (image_view->base.resource->target == PIPE_BUFFER) {
wds[h][num_wds[h]].pTexelBufferView = &image_view->buffer_view;
desc_hash[h] = XXH32(&image_view->base.u.buf, sizeof(image_view->base.u.buf), desc_hash[h]);
} else {
imageview = image_view->surface->image_view;
layout = VK_IMAGE_LAYOUT_GENERAL;
}
VkAccessFlags flags = 0;
if (image_view->base.access & PIPE_IMAGE_ACCESS_READ)
flags |= VK_ACCESS_SHADER_READ_BIT;
if (image_view->base.access & PIPE_IMAGE_ACCESS_WRITE)
flags |= VK_ACCESS_SHADER_WRITE_BIT;
add_transition(res, layout, flags, stage, &transitions[num_transitions], &num_transitions, ht);
assert(num_wds[h] < num_bindings);
if (image_view->base.access & PIPE_IMAGE_ACCESS_WRITE)
write_descriptor_resource(&resources[h][num_wds[h]], res);
else
read_descriptor_resource(&resources[h][num_wds[h]], res);
}
break;
default:
unreachable("unknown descriptor type");
}
} else if (res->base.target != PIPE_BUFFER) {
assert(layout != VK_IMAGE_LAYOUT_UNDEFINED);
image_infos[num_image_info].imageLayout = layout;
image_infos[num_image_info].imageView = imageview;
image_infos[num_image_info].sampler = ctx->samplers[stage][index + k];
if (!k)
wds[num_wds].pImageInfo = image_infos + num_image_info;
desc_hash = XXH32(&image_infos[num_image_info], sizeof(VkDescriptorImageInfo), desc_hash);
++num_image_info;
} else
desc_hash = XXH32(&res->buffer, sizeof(VkBuffer), desc_hash);
}
}
wds[num_wds].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
wds[num_wds].pNext = NULL;
wds[num_wds].dstBinding = shader->bindings[j].binding;
wds[num_wds].dstArrayElement = 0;
wds[num_wds].descriptorCount = shader->bindings[j].size;
wds[num_wds].descriptorType = shader->bindings[j].type;
++num_wds;
if (!res) {
/* if we're hitting this assert often, we can probably just throw a junk buffer in since
* the results of this codepath are undefined in ARB_texture_buffer_object spec
*/
assert(screen->info.rb2_feats.nullDescriptor);
assert(num_wds[h] < num_bindings);
read_descriptor_resource(&resources[h][num_wds[h]], res);
switch (shader->bindings[h][j].type) {
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
wds[h][num_wds[h]].pTexelBufferView = &buffer_view[0];
desc_hash[h] = XXH32(&buffer_view[0], sizeof(VkBufferView), desc_hash[h]);
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
assert(num_image_info[h] < num_bindings);
image_infos[h][num_image_info[h]].imageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
image_infos[h][num_image_info[h]].imageView = VK_NULL_HANDLE;
image_infos[h][num_image_info[h]].sampler = sampler;
if (!k)
wds[h][num_wds[h]].pImageInfo = image_infos[h] + num_image_info[h];
desc_hash[h] = XXH32(&image_infos[h][num_image_info[h]], sizeof(VkDescriptorImageInfo), desc_hash[h]);
++num_image_info[h];
break;
default:
unreachable("unknown descriptor type");
}
} else if (res->base.target != PIPE_BUFFER) {
assert(layout != VK_IMAGE_LAYOUT_UNDEFINED);
assert(num_image_info[h] < num_bindings);
image_infos[h][num_image_info[h]].imageLayout = layout;
image_infos[h][num_image_info[h]].imageView = imageview;
image_infos[h][num_image_info[h]].sampler = sampler;
if (!k)
wds[h][num_wds[h]].pImageInfo = image_infos[h] + num_image_info[h];
desc_hash[h] = XXH32(&image_infos[h][num_image_info[h]], sizeof(VkDescriptorImageInfo), desc_hash[h]);
++num_image_info[h];
} else
desc_hash[h] = XXH32(&res->buffer, sizeof(VkBuffer), desc_hash[h]);
}
}
wds[h][num_wds[h]].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
wds[h][num_wds[h]].pNext = NULL;
wds[h][num_wds[h]].dstBinding = shader->bindings[h][j].binding;
wds[h][num_wds[h]].dstArrayElement = 0;
wds[h][num_wds[h]].descriptorCount = shader->bindings[h][j].size;
wds[h][num_wds[h]].descriptorType = shader->bindings[h][j].type;
++num_wds[h];
}
}
}
_mesa_set_destroy(ht, NULL);
struct zink_batch *batch = NULL;
bool cache_hit = false;
struct zink_program *pg = is_compute ? &ctx->curr_compute->base : &ctx->curr_program->base;
struct zink_descriptor_set *zds = get_descriptor_set(ctx, is_compute, desc_hash, &cache_hit);
assert(zds != VK_NULL_HANDLE);
bool cache_hit[ZINK_DESCRIPTOR_TYPES];
struct zink_descriptor_set *zds[ZINK_DESCRIPTOR_TYPES];
for (int h = 0; h < ZINK_DESCRIPTOR_TYPES; h++) {
if (pg->dsl[h])
zds[h] = get_descriptor_set(ctx, is_compute, desc_hash[h], h, &cache_hit[h]);
else
zds[h] = NULL;
}
batch = is_compute ? &ctx->compute_batch : zink_curr_batch(ctx);
unsigned check_flush_id = is_compute ? 0 : ZINK_COMPUTE_BATCH_ID;
bool need_flush = false;
if (num_wds > 0) {
for (int i = 0; i < num_wds; ++i) {
wds[i].dstSet = zds->desc_set;
struct zink_resource *res = resources[i].res;
for (int h = 0; h < ZINK_DESCRIPTOR_TYPES; h++) {
if (!zds[h])
continue;
assert(zds[h]->desc_set);
for (int i = 0; i < num_wds[h]; ++i) {
wds[h][i].dstSet = zds[h]->desc_set;
struct zink_resource *res = resources[h][i].res;
if (res) {
need_flush |= zink_batch_reference_resource_rw(batch, res, resources[i].write) == check_flush_id;
need_flush |= zink_batch_reference_resource_rw(batch, res, resources[h][i].write) == check_flush_id;
}
/* if we got a cache hit, we have to verify that the cached set is still valid;
* we store the vk resource to the set here to avoid a more complex and costly mechanism of maintaining a
* hash table on every resource with the associated descriptor sets that then needs to be iterated through
* whenever a resource is destroyed
*/
cache_hit = cache_hit && zds->resources[i] == res;
if (zds->resources[i] != res)
zink_resource_desc_set_add(res, zds, i);
cache_hit[h] = cache_hit[h] && zds[h]->resources[i] == res;
if (zds[h]->resources[i] != res)
zink_resource_desc_set_add(res, zds[h], i);
}
if (!cache_hit)
vkUpdateDescriptorSets(screen->dev, num_wds, wds, 0, NULL);
}
if (!cache_hit[h] && num_wds[h])
vkUpdateDescriptorSets(screen->dev, num_wds[h], wds[h], 0, NULL);
if (is_compute)
vkCmdBindDescriptorSets(batch->cmdbuf, VK_PIPELINE_BIND_POINT_COMPUTE,
ctx->curr_compute->layout, 0, 1, &zds->desc_set, 0, NULL);
else {
batch = zink_batch_rp(ctx);
vkCmdBindDescriptorSets(batch->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS,
ctx->curr_program->layout, 0, 1, &zds->desc_set, 0, NULL);
}
if (is_compute)
vkCmdBindDescriptorSets(batch->cmdbuf, VK_PIPELINE_BIND_POINT_COMPUTE,
ctx->curr_compute->layout, h, 1, &zds[h]->desc_set, 0, NULL);
else
vkCmdBindDescriptorSets(batch->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS,
ctx->curr_program->layout, h, 1, &zds[h]->desc_set, 0, NULL);
for (int i = 0; i < num_stages; i++) {
struct zink_shader *shader = stages[i];
if (!shader)
continue;
enum pipe_shader_type stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
for (int i = 0; i < num_stages; i++) {
struct zink_shader *shader = stages[i];
if (!shader)
continue;
enum pipe_shader_type stage = pipe_shader_type_from_mesa(shader->nir->info.stage);
for (int j = 0; j < shader->num_bindings; j++) {
int index = shader->bindings[j].index;
if (shader->bindings[j].type != VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
struct zink_sampler_view *sampler_view = zink_sampler_view(ctx->sampler_views[stage][index]);
if (sampler_view)
zink_batch_reference_sampler_view(batch, sampler_view);
for (int j = 0; j < shader->num_bindings[h]; j++) {
int index = shader->bindings[h][j].index;
if (shader->bindings[h][j].type != VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
struct zink_sampler_view *sampler_view = zink_sampler_view(ctx->sampler_views[stage][index]);
if (sampler_view)
zink_batch_reference_sampler_view(batch, sampler_view);
}
}
}
}
for (int i = 0; i < num_transitions; ++i) {
zink_resource_barrier(ctx, NULL, transitions[i].res,
transitions[i].layout, transitions[i].access, transitions[i].stage);

View file

@ -110,14 +110,13 @@ keybox_equals(const void *void_a, const void *void_b)
return memcmp(a->data, b->data, a->size) == 0;
}
static VkDescriptorSetLayout
static bool
create_desc_set_layout(VkDevice dev,
struct zink_shader *stages[ZINK_SHADER_COUNT],
unsigned *num_descriptors,
VkDescriptorPool *descpool)
struct zink_program *pg)
{
VkDescriptorSetLayoutBinding bindings[(PIPE_SHADER_TYPES * (PIPE_MAX_CONSTANT_BUFFERS + PIPE_MAX_SAMPLERS + PIPE_MAX_SHADER_BUFFERS + PIPE_MAX_SHADER_IMAGES))];
int num_bindings = 0;
VkDescriptorSetLayoutBinding bindings[ZINK_DESCRIPTOR_TYPES][PIPE_SHADER_TYPES * 32];
int num_bindings[ZINK_DESCRIPTOR_TYPES] = {};
VkDescriptorPoolSize sizes[6] = {};
int type_map[12];
@ -130,63 +129,151 @@ create_desc_set_layout(VkDevice dev,
continue;
VkShaderStageFlagBits stage_flags = zink_shader_stage(pipe_shader_type_from_mesa(shader->nir->info.stage));
for (int j = 0; j < shader->num_bindings; j++) {
assert(num_bindings < ARRAY_SIZE(bindings));
bindings[num_bindings].binding = shader->bindings[j].binding;
bindings[num_bindings].descriptorType = shader->bindings[j].type;
bindings[num_bindings].descriptorCount = shader->bindings[j].size;
bindings[num_bindings].stageFlags = stage_flags;
bindings[num_bindings].pImmutableSamplers = NULL;
if (type_map[shader->bindings[j].type] == -1) {
type_map[shader->bindings[j].type] = num_types++;
sizes[type_map[shader->bindings[j].type]].type = shader->bindings[j].type;
for (int j = 0; j < ZINK_DESCRIPTOR_TYPES; j++) {
for (int k = 0; k < shader->num_bindings[j]; k++) {
assert(num_bindings[j] < ARRAY_SIZE(bindings[j]));
bindings[j][num_bindings[j]].binding = shader->bindings[j][k].binding;
bindings[j][num_bindings[j]].descriptorType = shader->bindings[j][k].type;
bindings[j][num_bindings[j]].descriptorCount = shader->bindings[j][k].size;
bindings[j][num_bindings[j]].stageFlags = stage_flags;
bindings[j][num_bindings[j]].pImmutableSamplers = NULL;
if (type_map[shader->bindings[j][k].type] == -1) {
type_map[shader->bindings[j][k].type] = num_types++;
sizes[type_map[shader->bindings[j][k].type]].type = shader->bindings[j][k].type;
}
sizes[type_map[shader->bindings[j][k].type]].descriptorCount += shader->bindings[j][k].size;
++num_bindings[j];
}
sizes[type_map[shader->bindings[j].type]].descriptorCount += shader->bindings[j].size;
++num_bindings;
}
}
*num_descriptors = num_bindings;
if (!num_bindings)
return VK_NULL_HANDLE;
unsigned total_descs = 0;
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
pg->num_descriptors[i] = num_bindings[i];
total_descs += num_bindings[i];;
}
if (!total_descs)
return true;
for (int i = 0; i < num_types; i++)
sizes[i].descriptorCount *= ZINK_DEFAULT_MAX_DESCS;
VkDescriptorSetLayoutCreateInfo dcslci = {};
dcslci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
dcslci.pNext = NULL;
dcslci.flags = 0;
dcslci.bindingCount = num_bindings;
dcslci.pBindings = bindings;
VkDescriptorSetLayout null_set = VK_NULL_HANDLE;
VkDescriptorPool null_pool = VK_NULL_HANDLE;
bool found_descriptors = false;
for (unsigned i = ZINK_DESCRIPTOR_TYPES - 1; i < ZINK_DESCRIPTOR_TYPES; i--) {
VkDescriptorSetLayoutCreateInfo dcslci = {};
dcslci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
dcslci.pNext = NULL;
dcslci.flags = 0;
VkDescriptorSetLayout dsl;
if (vkCreateDescriptorSetLayout(dev, &dcslci, 0, &dsl) != VK_SUCCESS) {
debug_printf("vkCreateDescriptorSetLayout failed\n");
return VK_NULL_HANDLE;
}
VkDescriptorPoolCreateInfo dpci = {};
dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
dpci.pPoolSizes = sizes;
dpci.poolSizeCount = num_types;
dpci.flags = 0;
dpci.maxSets = ZINK_DEFAULT_MAX_DESCS;
if (vkCreateDescriptorPool(dev, &dpci, 0, descpool) != VK_SUCCESS) {
vkDestroyDescriptorSetLayout(dev, dsl, NULL);
return VK_NULL_HANDLE;
}
if (!num_bindings[i]) {
if (!found_descriptors)
continue;
if (!null_set) {
dcslci.bindingCount = 1;
VkDescriptorSetLayoutBinding null_binding;
null_binding.binding = 1;
null_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
null_binding.descriptorCount = 1;
null_binding.pImmutableSamplers = NULL;
null_binding.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT |
VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT |
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT | VK_SHADER_STAGE_COMPUTE_BIT;
dcslci.pBindings = &null_binding;
if (vkCreateDescriptorSetLayout(dev, &dcslci, 0, &null_set) != VK_SUCCESS) {
debug_printf("vkCreateDescriptorSetLayout failed\n");
return false;
}
VkDescriptorPoolCreateInfo dpci = {};
VkDescriptorPoolSize null_size = {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, ZINK_DESCRIPTOR_TYPES};
dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
dpci.pPoolSizes = &null_size;
dpci.poolSizeCount = 1;
dpci.flags = 0;
dpci.maxSets = 1;
if (vkCreateDescriptorPool(dev, &dpci, 0, &null_pool) != VK_SUCCESS)
return false;
}
pg->dsl[i] = null_set;
pg->descpool[i] = null_pool;
continue;
}
dcslci.bindingCount = num_bindings[i];
dcslci.pBindings = bindings[i];
found_descriptors = true;
return dsl;
if (vkCreateDescriptorSetLayout(dev, &dcslci, 0, &pg->dsl[i]) != VK_SUCCESS) {
debug_printf("vkCreateDescriptorSetLayout failed\n");
return false;
}
VkDescriptorPoolSize type_sizes[2] = {};
int num_type_sizes = 0;
switch (i) {
case ZINK_DESCRIPTOR_TYPE_UBO:
if (type_map[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] != -1) {
num_type_sizes = 1;
type_sizes[0] = sizes[type_map[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER]];
}
break;
case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
if (type_map[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER] != -1) {
type_sizes[num_type_sizes] = sizes[type_map[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER]];
num_type_sizes++;
}
if (type_map[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] != -1) {
type_sizes[num_type_sizes] = sizes[type_map[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER]];
num_type_sizes++;
}
break;
case ZINK_DESCRIPTOR_TYPE_SSBO:
if (type_map[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] != -1) {
num_type_sizes = 1;
type_sizes[0] = sizes[type_map[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER]];
}
break;
case ZINK_DESCRIPTOR_TYPE_IMAGE:
if (type_map[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER] != -1) {
type_sizes[num_type_sizes] = sizes[type_map[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER]];
num_type_sizes++;
}
if (type_map[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] != -1) {
type_sizes[num_type_sizes] = sizes[type_map[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE]];
num_type_sizes++;
}
break;
}
VkDescriptorPoolCreateInfo dpci = {};
dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
dpci.pPoolSizes = type_sizes;
dpci.poolSizeCount = num_type_sizes;
dpci.flags = 0;
dpci.maxSets = ZINK_DEFAULT_MAX_DESCS;
if (vkCreateDescriptorPool(dev, &dpci, 0, &pg->descpool[i]) != VK_SUCCESS) {
return false;
}
}
return true;
}
static VkPipelineLayout
create_gfx_pipeline_layout(VkDevice dev, VkDescriptorSetLayout dsl)
create_gfx_pipeline_layout(VkDevice dev, struct zink_gfx_program *prog)
{
VkPipelineLayoutCreateInfo plci = {};
plci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
plci.pSetLayouts = &dsl;
plci.setLayoutCount = !!dsl;
VkDescriptorSetLayout layouts[ZINK_DESCRIPTOR_TYPES];
unsigned num_layouts = 0;
unsigned num_descriptors = zink_program_num_descriptors(&prog->base);
if (num_descriptors) {
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
layouts[num_layouts] = prog->base.dsl[i];
num_layouts += !!layouts[num_layouts];
}
}
plci.pSetLayouts = layouts;
plci.setLayoutCount = num_layouts;
VkPushConstantRange pcr[2] = {};
@ -209,13 +296,23 @@ create_gfx_pipeline_layout(VkDevice dev, VkDescriptorSetLayout dsl)
}
static VkPipelineLayout
create_compute_pipeline_layout(VkDevice dev, VkDescriptorSetLayout dsl)
create_compute_pipeline_layout(VkDevice dev, struct zink_compute_program *comp)
{
VkPipelineLayoutCreateInfo plci = {};
plci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
plci.pSetLayouts = &dsl;
plci.setLayoutCount = !!dsl;
VkDescriptorSetLayout layouts[ZINK_DESCRIPTOR_TYPES];
unsigned num_layouts = 0;
unsigned num_descriptors = zink_program_num_descriptors(&comp->base);
if (num_descriptors) {
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
layouts[num_layouts] = comp->base.dsl[i];
num_layouts += !!layouts[num_layouts];
}
}
plci.pSetLayouts = layouts;
plci.setLayoutCount = num_layouts;
VkPipelineLayout layout;
if (vkCreatePipelineLayout(dev, &plci, NULL, &layout) != VK_SUCCESS) {
@ -554,24 +651,27 @@ zink_create_gfx_program(struct zink_context *ctx,
}
}
prog->base.dsl = create_desc_set_layout(screen->dev, stages,
&prog->base.num_descriptors, &prog->base.descpool);
if (prog->base.num_descriptors && (!prog->base.dsl || !prog->base.descpool))
if (!create_desc_set_layout(screen->dev, stages, &prog->base))
goto fail;
prog->layout = create_gfx_pipeline_layout(screen->dev, prog->base.dsl);
prog->layout = create_gfx_pipeline_layout(screen->dev, prog);
if (!prog->layout)
goto fail;
prog->base.desc_sets = _mesa_hash_table_create(NULL, NULL, _mesa_key_pointer_equal);
if (!prog->base.desc_sets)
goto fail;
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
if (!prog->base.num_descriptors[i])
continue;
prog->base.desc_sets[i] = _mesa_hash_table_create(NULL, NULL, _mesa_key_pointer_equal);
if (!prog->base.desc_sets[i])
goto fail;
prog->base.free_desc_sets = _mesa_hash_table_create(NULL, NULL, _mesa_key_pointer_equal);
if (!prog->base.free_desc_sets)
goto fail;
prog->base.free_desc_sets[i] = _mesa_hash_table_create(NULL, NULL, _mesa_key_pointer_equal);
if (!prog->base.free_desc_sets[i])
goto fail;
util_dynarray_init(&prog->base.alloc_desc_sets[i], NULL);
}
util_dynarray_init(&prog->base.alloc_desc_sets, NULL);
return prog;
fail:
@ -663,24 +763,26 @@ zink_create_compute_program(struct zink_context *ctx, struct zink_shader *shader
struct zink_shader *stages[ZINK_SHADER_COUNT] = {};
stages[0] = shader;
comp->base.dsl = create_desc_set_layout(screen->dev, stages,
&comp->base.num_descriptors, &comp->base.descpool);
if (comp->base.num_descriptors && (!comp->base.dsl || !comp->base.descpool))
if (!create_desc_set_layout(screen->dev, stages, (struct zink_program*)comp))
goto fail;
comp->layout = create_compute_pipeline_layout(screen->dev, comp->base.dsl);
comp->layout = create_compute_pipeline_layout(screen->dev, comp);
if (!comp->layout)
goto fail;
comp->base.desc_sets = _mesa_hash_table_create(NULL, NULL, _mesa_key_pointer_equal);
if (!comp->base.desc_sets)
goto fail;
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
if (!comp->base.num_descriptors[i])
continue;
comp->base.desc_sets[i] = _mesa_hash_table_create(NULL, NULL, _mesa_key_pointer_equal);
if (!comp->base.desc_sets[i])
goto fail;
comp->base.free_desc_sets = _mesa_hash_table_create(NULL, NULL, _mesa_key_pointer_equal);
if (!comp->base.free_desc_sets)
goto fail;
comp->base.free_desc_sets[i] = _mesa_hash_table_create(NULL, NULL, _mesa_key_pointer_equal);
if (!comp->base.free_desc_sets[i])
goto fail;
util_dynarray_init(&comp->base.alloc_desc_sets, NULL);
util_dynarray_init(&comp->base.alloc_desc_sets[i], NULL);
}
return comp;
@ -693,36 +795,38 @@ fail:
static inline void
desc_set_invalidate_resources(struct zink_program *pg, struct zink_descriptor_set *zds)
{
for (unsigned i = 0; i < pg->num_descriptors; i++)
for (unsigned i = 0; i < pg->num_descriptors[zds->type]; i++)
zds->resources[i] = NULL;
zds->valid = false;
zds->invalid = true;
}
static bool
get_invalidated_desc_set(struct zink_descriptor_set *zds)
{
if (zds->valid)
if (!zds->invalid)
return false;
return p_atomic_read(&zds->reference.count) == 1;
}
static struct zink_descriptor_set *
allocate_desc_set(struct zink_screen *screen, struct zink_program *pg, unsigned descs_used)
allocate_desc_set(struct zink_screen *screen, struct zink_program *pg, enum zink_descriptor_type type, unsigned descs_used)
{
VkDescriptorSetAllocateInfo dsai;
#define DESC_BUCKET_FACTOR 10
unsigned bucket_size = DESC_BUCKET_FACTOR;
for (unsigned desc_factor = DESC_BUCKET_FACTOR; desc_factor < descs_used; desc_factor *= DESC_BUCKET_FACTOR)
bucket_size = desc_factor;
unsigned bucket_size = pg->num_descriptors[type] ? DESC_BUCKET_FACTOR : 1;
if (pg->num_descriptors[type]) {
for (unsigned desc_factor = DESC_BUCKET_FACTOR; desc_factor < descs_used; desc_factor *= DESC_BUCKET_FACTOR)
bucket_size = desc_factor;
}
VkDescriptorSetLayout layouts[bucket_size];
memset((void *)&dsai, 0, sizeof(dsai));
dsai.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
dsai.pNext = NULL;
dsai.descriptorPool = pg->descpool;
dsai.descriptorPool = pg->descpool[type];
dsai.descriptorSetCount = bucket_size;
for (unsigned i = 0; i < bucket_size; i ++)
layouts[i] = pg->dsl;
layouts[i] = pg->dsl[type];
dsai.pSetLayouts = layouts;
VkDescriptorSet desc_set[bucket_size];
@ -733,16 +837,18 @@ allocate_desc_set(struct zink_screen *screen, struct zink_program *pg, unsigned
struct zink_descriptor_set *alloc = ralloc_array(pg, struct zink_descriptor_set, bucket_size);
assert(alloc);
struct zink_resource **resources = rzalloc_array(pg, struct zink_resource*, pg->num_descriptors * bucket_size);
struct zink_resource **resources = rzalloc_array(pg, struct zink_resource*, pg->num_descriptors[type] * bucket_size);
assert(resources);
for (unsigned i = 0; i < bucket_size; i ++) {
struct zink_descriptor_set *zds = &alloc[i];
pipe_reference_init(&zds->reference, 1);
zds->valid = false;
zds->resources = &resources[i * pg->num_descriptors];
zds->hash = 0;
zds->invalid = true;
zds->type = type;
zds->resources = &resources[i * pg->num_descriptors[type]];
zds->desc_set = desc_set[i];
if (i > 0)
util_dynarray_append(&pg->alloc_desc_sets, struct zink_descriptor_set *, zds);
util_dynarray_append(&pg->alloc_desc_sets[type], struct zink_descriptor_set *, zds);
}
return alloc;
}
@ -752,92 +858,107 @@ zink_program_allocate_desc_set(struct zink_context *ctx,
struct zink_batch *batch,
struct zink_program *pg,
uint32_t hash,
enum zink_descriptor_type type,
bool *cache_hit)
{
*cache_hit = false;
struct zink_descriptor_set *zds;
struct zink_screen *screen = zink_screen(ctx->base.screen);
struct hash_entry *he = _mesa_hash_table_search_pre_hashed(pg->desc_sets, hash, (void*)(uintptr_t)hash);
bool recycled = false;
if (he) {
zds = (void*)he->data;
/* this shouldn't happen, but if we somehow get a cache hit on an invalidated, active desc set then
* we probably should just crash here rather than later
*/
assert(zds->valid);
}
if (!he) {
he = _mesa_hash_table_search_pre_hashed(pg->free_desc_sets, hash, (void*)(uintptr_t)hash);
recycled = true;
}
if (he) {
zds = (void*)he->data;
*cache_hit = zds->valid;
if (recycled) {
/* need to migrate this entry back to the in-use hash */
_mesa_hash_table_remove(pg->free_desc_sets, he);
unsigned descs_used = 1;
assert(type < ZINK_DESCRIPTOR_TYPES);
if (pg->num_descriptors[type]) {
struct hash_entry *he = _mesa_hash_table_search_pre_hashed(pg->desc_sets[type], hash, (void*)(uintptr_t)hash);
bool recycled = false;
if (he) {
zds = (void*)he->data;
/* this shouldn't happen, but if we somehow get a cache hit on an invalidated, active desc set then
* we probably should just crash here rather than later
*/
assert(!zds->invalid);
}
if (!he) {
he = _mesa_hash_table_search_pre_hashed(pg->free_desc_sets[type], hash, (void*)(uintptr_t)hash);
recycled = true;
}
if (he) {
zds = (void*)he->data;
*cache_hit = !zds->invalid;
if (recycled) {
/* need to migrate this entry back to the in-use hash */
_mesa_hash_table_remove(pg->free_desc_sets[type], he);
goto out;
}
goto quick_out;
}
if (util_dynarray_num_elements(&pg->alloc_desc_sets[type], struct zink_descriptor_set *)) {
/* grab one off the allocated array */
zds = util_dynarray_pop(&pg->alloc_desc_sets[type], struct zink_descriptor_set *);
goto out;
}
if (zink_batch_add_desc_set(batch, pg, hash, zds))
batch->descs_used += pg->num_descriptors;
return zds;
}
if (util_dynarray_num_elements(&pg->alloc_desc_sets, struct zink_descriptor_set *)) {
/* grab one off the allocated array */
zds = util_dynarray_pop(&pg->alloc_desc_sets, struct zink_descriptor_set *);
goto out;
}
if (_mesa_hash_table_num_entries(pg->free_desc_sets[type])) {
/* try for an invalidated set first */
unsigned count = 0;
hash_table_foreach(pg->free_desc_sets[type], he) {
struct zink_descriptor_set *tmp = he->data;
if ((count++ >= 100 && tmp->reference.count == 1) || get_invalidated_desc_set(he->data)) {
zds = tmp;
assert(p_atomic_read(&zds->reference.count) == 1);
desc_set_invalidate_resources(pg, zds);
_mesa_hash_table_remove(pg->free_desc_sets[type], he);
goto out;
}
}
}
if (_mesa_hash_table_num_entries(pg->free_desc_sets)) {
/* try for an invalidated set first */
unsigned count = 0;
hash_table_foreach(pg->free_desc_sets, he) {
struct zink_descriptor_set *tmp = he->data;
if ((count >= 100 && tmp->reference.count == 1) || get_invalidated_desc_set(he->data)) {
zds = tmp;
assert(p_atomic_read(&zds->reference.count) == 1);
desc_set_invalidate_resources(pg, zds);
_mesa_hash_table_remove(pg->free_desc_sets, he);
goto out;
} else
count++;
descs_used = _mesa_hash_table_num_entries(pg->desc_sets[type]) + _mesa_hash_table_num_entries(pg->free_desc_sets[type]);
if (descs_used + pg->num_descriptors[type] > ZINK_DEFAULT_MAX_DESCS) {
batch = zink_flush_batch(ctx, batch);
zink_batch_reference_program(batch, pg);
return zink_program_allocate_desc_set(ctx, batch, pg, hash, type, cache_hit);
}
} else {
zds = pg->null_set;
if (zds) {
*cache_hit = true;
goto quick_out;
}
}
unsigned descs_used = _mesa_hash_table_num_entries(pg->desc_sets) + _mesa_hash_table_num_entries(pg->free_desc_sets);
if (descs_used + pg->num_descriptors > ZINK_DEFAULT_MAX_DESCS) {
batch = zink_flush_batch(ctx, batch);
zink_batch_reference_program(batch, pg);
return zink_program_allocate_desc_set(ctx, batch, pg, hash, cache_hit);
}
zds = allocate_desc_set(screen, pg, descs_used);
zds = allocate_desc_set(screen, pg, type, descs_used);
out:
zds->valid = true;
_mesa_hash_table_insert_pre_hashed(pg->desc_sets, hash, (void*)(uintptr_t)hash, zds);
if (zink_batch_add_desc_set(batch, pg, hash, zds))
batch->descs_used += pg->num_descriptors;
zds->hash = hash;
if (pg->num_descriptors[type])
_mesa_hash_table_insert_pre_hashed(pg->desc_sets[type], hash, (void*)(uintptr_t)hash, zds);
else
pg->null_set = zds;
quick_out:
zds->invalid = false;
if (zink_batch_add_desc_set(batch, pg, zds))
batch->descs_used += pg->num_descriptors[type];
return zds;
}
void
zink_program_recycle_desc_set(struct zink_program *pg, uint32_t hash, struct zink_descriptor_set *zds)
zink_program_recycle_desc_set(struct zink_program *pg, struct zink_descriptor_set *zds)
{
/* if desc set is still in use by a batch, don't recache */
uint32_t refcount = p_atomic_read(&zds->reference.count);
if (refcount != 1)
return;
struct hash_entry *he = _mesa_hash_table_search_pre_hashed(pg->desc_sets, hash, (void*)(uintptr_t)hash);
if (!he)
/* desc sets can be used multiple times in the same batch */
return;
_mesa_hash_table_remove(pg->desc_sets, he);
if (zds->valid)
_mesa_hash_table_insert_pre_hashed(pg->free_desc_sets, hash, (void*)(uintptr_t)hash, zds);
else
util_dynarray_append(&pg->alloc_desc_sets, struct zink_descriptor_set *, zds);
if (zds->hash) {
struct hash_entry *he = _mesa_hash_table_search_pre_hashed(pg->desc_sets[zds->type], zds->hash, (void*)(uintptr_t)zds->hash);
if (!he)
/* desc sets can be used multiple times in the same batch */
return;
_mesa_hash_table_remove(pg->desc_sets[zds->type], he);
_mesa_hash_table_insert_pre_hashed(pg->free_desc_sets[zds->type], zds->hash, (void*)(uintptr_t)zds->hash, zds);
} else
util_dynarray_append(&pg->alloc_desc_sets[zds->type], struct zink_descriptor_set *, zds);
}
static void
@ -850,6 +971,83 @@ zink_program_clear_desc_sets(struct zink_program *pg, struct hash_table *ht)
_mesa_hash_table_clear(ht, NULL);
}
uint32_t
zink_program_get_descriptor_usage(struct zink_context *ctx, enum pipe_shader_type stage, enum zink_descriptor_type type)
{
struct zink_shader *zs = NULL;
switch (stage) {
case PIPE_SHADER_VERTEX:
case PIPE_SHADER_TESS_CTRL:
case PIPE_SHADER_TESS_EVAL:
case PIPE_SHADER_GEOMETRY:
case PIPE_SHADER_FRAGMENT:
zs = ctx->gfx_stages[stage];
break;
case PIPE_SHADER_COMPUTE: {
zs = ctx->compute_stage;
break;
}
default:
unreachable("unknown shader type");
}
if (!zs)
return 0;
switch (type) {
case ZINK_DESCRIPTOR_TYPE_UBO:
return zs->ubos_used;
case ZINK_DESCRIPTOR_TYPE_SSBO:
return zs->ssbos_used;
case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
return BITSET_TEST_RANGE(zs->nir->info.textures_used, 0, PIPE_MAX_SAMPLERS - 1);
case ZINK_DESCRIPTOR_TYPE_IMAGE:
return zs->nir->info.images_used;
default:
unreachable("unknown descriptor type!");
}
return 0;
}
static unsigned
get_num_bindings(struct zink_shader *zs, enum zink_descriptor_type type)
{
switch (type) {
case ZINK_DESCRIPTOR_TYPE_UBO:
case ZINK_DESCRIPTOR_TYPE_SSBO:
return zs->num_bindings[type];
default:
break;
}
unsigned num_bindings = 0;
for (int i = 0; i < zs->num_bindings[type]; i++)
num_bindings += zs->bindings[type][i].size;
return num_bindings;
}
unsigned
zink_program_num_bindings_typed(const struct zink_program *pg, enum zink_descriptor_type type, bool is_compute)
{
unsigned num_bindings = 0;
if (is_compute) {
struct zink_compute_program *comp = (void*)pg;
return get_num_bindings(comp->shader, type);
}
struct zink_gfx_program *prog = (void*)pg;
for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
if (prog->shaders[i])
num_bindings += get_num_bindings(prog->shaders[i], type);
}
return num_bindings;
}
unsigned
zink_program_num_bindings(const struct zink_program *pg, bool is_compute)
{
unsigned num_bindings = 0;
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++)
num_bindings += zink_program_num_bindings_typed(pg, i, is_compute);
return num_bindings;
}
static void
gfx_program_remove_shader(struct zink_gfx_program *prog, struct zink_shader *shader)
{
@ -867,9 +1065,6 @@ zink_destroy_gfx_program(struct zink_screen *screen,
if (prog->layout)
vkDestroyPipelineLayout(screen->dev, prog->layout, NULL);
if (prog->base.dsl)
vkDestroyDescriptorSetLayout(screen->dev, prog->base.dsl, NULL);
for (int i = 0; i < ZINK_SHADER_COUNT; ++i) {
if (prog->shaders[i])
gfx_program_remove_shader(prog, prog->shaders[i]);
@ -888,14 +1083,22 @@ zink_destroy_gfx_program(struct zink_screen *screen,
}
zink_shader_cache_reference(screen, &prog->shader_cache, NULL);
zink_program_clear_desc_sets((struct zink_program*)prog, prog->base.desc_sets);
_mesa_hash_table_destroy(prog->base.desc_sets, NULL);
zink_program_clear_desc_sets((struct zink_program*)prog, prog->base.free_desc_sets);
_mesa_hash_table_destroy(prog->base.free_desc_sets, NULL);
if (prog->base.descpool)
vkDestroyDescriptorPool(screen->dev, prog->base.descpool, NULL);
bool null_destroy = false;
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
util_dynarray_fini(&prog->base.alloc_desc_sets);
if (prog->base.num_descriptors[i] || !null_destroy) {
vkDestroyDescriptorSetLayout(screen->dev, prog->base.dsl[i], NULL);
vkDestroyDescriptorPool(screen->dev, prog->base.descpool[i], NULL);
}
null_destroy |= !prog->base.num_descriptors[i];
zink_program_clear_desc_sets(&prog->base, prog->base.desc_sets[i]);
_mesa_hash_table_destroy(prog->base.desc_sets[i], NULL);
zink_program_clear_desc_sets(&prog->base, prog->base.free_desc_sets[i]);
_mesa_hash_table_destroy(prog->base.free_desc_sets[i], NULL);
util_dynarray_fini(&prog->base.alloc_desc_sets[i]);
}
ralloc_free(prog);
}
@ -907,9 +1110,6 @@ zink_destroy_compute_program(struct zink_screen *screen,
if (comp->layout)
vkDestroyPipelineLayout(screen->dev, comp->layout, NULL);
if (comp->base.dsl)
vkDestroyDescriptorSetLayout(screen->dev, comp->base.dsl, NULL);
if (comp->shader)
_mesa_set_remove_key(comp->shader->programs, comp);
if (comp->module)
@ -924,14 +1124,22 @@ zink_destroy_compute_program(struct zink_screen *screen,
_mesa_hash_table_destroy(comp->pipelines, NULL);
zink_shader_cache_reference(screen, &comp->shader_cache, NULL);
zink_program_clear_desc_sets((struct zink_program*)comp, comp->base.desc_sets);
_mesa_hash_table_destroy(comp->base.desc_sets, NULL);
zink_program_clear_desc_sets((struct zink_program*)comp, comp->base.free_desc_sets);
_mesa_hash_table_destroy(comp->base.free_desc_sets, NULL);
if (comp->base.descpool)
vkDestroyDescriptorPool(screen->dev, comp->base.descpool, NULL);
bool null_destroy = false;
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
util_dynarray_fini(&comp->base.alloc_desc_sets);
if (comp->base.num_descriptors[i] || !null_destroy) {
vkDestroyDescriptorSetLayout(screen->dev, comp->base.dsl[i], NULL);
vkDestroyDescriptorPool(screen->dev, comp->base.descpool[i], NULL);
}
null_destroy |= !comp->base.num_descriptors[i];
zink_program_clear_desc_sets((struct zink_program*)comp, comp->base.desc_sets[i]);
_mesa_hash_table_destroy(comp->base.desc_sets[i], NULL);
zink_program_clear_desc_sets((struct zink_program*)comp, comp->base.free_desc_sets[i]);
_mesa_hash_table_destroy(comp->base.free_desc_sets[i], NULL);
util_dynarray_fini(&comp->base.alloc_desc_sets[i]);
}
ralloc_free(comp);
}

View file

@ -31,6 +31,7 @@
#include "util/u_inlines.h"
#include "zink_context.h"
#include "zink_compiler.h"
#include "zink_shader_keys.h"
struct zink_screen;
@ -64,21 +65,24 @@ struct zink_shader_cache {
};
struct zink_descriptor_set {
enum zink_descriptor_type type;
struct pipe_reference reference; //incremented for batch usage
VkDescriptorSet desc_set;
bool valid;
uint32_t hash;
bool invalid;
struct zink_resource **resources;
};
struct zink_program {
struct pipe_reference reference;
struct hash_table *desc_sets;
struct hash_table *free_desc_sets;
struct util_dynarray alloc_desc_sets;
VkDescriptorPool descpool;
VkDescriptorSetLayout dsl;
unsigned num_descriptors;
struct hash_table *desc_sets[ZINK_DESCRIPTOR_TYPES];
struct hash_table *free_desc_sets[ZINK_DESCRIPTOR_TYPES];
struct util_dynarray alloc_desc_sets[ZINK_DESCRIPTOR_TYPES];
VkDescriptorPool descpool[ZINK_DESCRIPTOR_TYPES];
VkDescriptorSetLayout dsl[ZINK_DESCRIPTOR_TYPES];
unsigned num_descriptors[ZINK_DESCRIPTOR_TYPES];
struct zink_descriptor_set *null_set;
};
struct zink_gfx_program {
@ -103,6 +107,42 @@ struct zink_compute_program {
struct hash_table *pipelines;
};
static inline enum zink_descriptor_type
zink_desc_type_from_vktype(VkDescriptorType type)
{
switch (type) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
return ZINK_DESCRIPTOR_TYPE_UBO;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
return ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
return ZINK_DESCRIPTOR_TYPE_SSBO;
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
return ZINK_DESCRIPTOR_TYPE_IMAGE;
default:
unreachable("unhandled descriptor type");
}
return 0;
}
static inline unsigned
zink_program_num_descriptors(const struct zink_program *pg)
{
unsigned num_descriptors = 0;
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++)
num_descriptors += pg->num_descriptors[i];
return num_descriptors;
}
unsigned
zink_program_num_bindings_typed(const struct zink_program *pg, enum zink_descriptor_type type, bool is_compute);
unsigned
zink_program_num_bindings(const struct zink_program *pg, bool is_compute);
void
zink_update_gfx_program(struct zink_context *ctx, struct zink_gfx_program *prog);
@ -123,6 +163,9 @@ zink_get_gfx_pipeline(struct zink_screen *screen,
void
zink_program_init(struct zink_context *ctx);
uint32_t
zink_program_get_descriptor_usage(struct zink_context *ctx, enum pipe_shader_type stage, enum zink_descriptor_type type);
void
debug_describe_zink_gfx_program(char* buf, const struct zink_gfx_program *ptr);
@ -174,7 +217,8 @@ zink_program_allocate_desc_set(struct zink_context *ctx,
struct zink_batch *batch,
struct zink_program *pg,
uint32_t desc_hash,
enum zink_descriptor_type type,
bool *cache_hit);
void
zink_program_recycle_desc_set(struct zink_program *pg, uint32_t hash, struct zink_descriptor_set *zds);
zink_program_recycle_desc_set(struct zink_program *pg, struct zink_descriptor_set *zds);
#endif