zink: introduce descriptor states

this aims to track the states of descriptors so that we can do more incremental
updating

it also enables the descriptor cache to be more robust by providing the incremental
data as the key

Reviewed-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/9348>
This commit is contained in:
Mike Blumenkrantz 2020-10-01 15:22:29 -04:00 committed by Marge Bot
parent 6d233e74ad
commit 67f9038b85
5 changed files with 308 additions and 40 deletions

View file

@ -51,6 +51,176 @@
#include "util/u_memory.h"
#include "util/u_upload_mgr.h"
#define XXH_INLINE_ALL
#include "util/xxhash.h"
static uint32_t
calc_descriptor_state_hash_ubo(struct zink_context *ctx, struct zink_shader *zs, enum pipe_shader_type shader, int i, int idx, uint32_t hash)
{
hash = XXH32(&ctx->ubos[shader][idx].buffer, sizeof(void*), hash);
void *hash_data = &ctx->ubos[shader][idx].buffer_size;
size_t data_size = sizeof(unsigned);
hash = XXH32(hash_data, data_size, hash);
if (zs->bindings[ZINK_DESCRIPTOR_TYPE_UBO][i].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER)
hash = XXH32(&ctx->ubos[shader][idx].buffer_offset, sizeof(unsigned), hash);
return hash;
}
static uint32_t
calc_descriptor_state_hash_ssbo(struct zink_context *ctx, struct zink_shader *zs, enum pipe_shader_type shader, int i, int idx, uint32_t hash)
{
void *hash_data = &ctx->ssbos[shader][idx];
size_t data_size = sizeof(struct pipe_shader_buffer);
return XXH32(hash_data, data_size, hash);
}
static uint32_t
calc_descriptor_state_hash_sampler(struct zink_context *ctx, struct zink_shader *zs, enum pipe_shader_type shader, int i, int idx, uint32_t hash)
{
void *hash_data;
size_t data_size;
for (unsigned k = 0; k < zs->bindings[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW][i].size; k++) {
VkDescriptorImageInfo info;
if (!ctx->sampler_views[shader][idx + k]) {
VkDescriptorImageInfo null_info = {VK_NULL_HANDLE, VK_NULL_HANDLE, VK_IMAGE_LAYOUT_UNDEFINED};
hash_data = &null_info;
data_size = sizeof(VkDescriptorImageInfo);
hash = XXH32(hash_data, data_size, hash);
continue;
}
hash = XXH32(&ctx->sampler_views[shader][idx + k]->texture, sizeof(void*), hash);
if (ctx->sampler_views[shader][idx + k]->target == PIPE_BUFFER) {
hash_data = &ctx->sampler_views[shader][idx + k]->u.buf;
data_size = sizeof(ctx->sampler_views[shader][idx + k]->u.buf);
hash = XXH32(hash_data, data_size, hash);
} else {
struct zink_sampler_state *sampler_state = ctx->sampler_states[shader][idx + k];
info.sampler = sampler_state ? sampler_state->sampler : VK_NULL_HANDLE;
info.imageView = zink_sampler_view(ctx->sampler_views[shader][idx + k])->image_view;
if (util_format_is_depth_and_stencil(ctx->sampler_views[shader][idx + k]->format))
info.imageLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
else
info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
hash_data = &info;
data_size = sizeof(VkDescriptorImageInfo);
hash = XXH32(hash_data, data_size, hash);
}
}
return hash;
}
static uint32_t
calc_descriptor_state_hash_image(struct zink_context *ctx, struct zink_shader *zs, enum pipe_shader_type shader, int i, int idx, uint32_t hash)
{
void *hash_data;
size_t data_size;
for (unsigned k = 0; k < zs->bindings[ZINK_DESCRIPTOR_TYPE_IMAGE][i].size; k++) {
VkDescriptorImageInfo info;
if (!ctx->image_views[shader][idx + k].base.resource) {
VkDescriptorImageInfo null_info = {VK_NULL_HANDLE, VK_NULL_HANDLE, VK_IMAGE_LAYOUT_UNDEFINED};
hash_data = &null_info;
data_size = sizeof(VkDescriptorImageInfo);
hash = XXH32(hash_data, data_size, hash);
break;
}
struct zink_resource *res = zink_resource(ctx->image_views[shader][idx + k].base.resource);
if (res->base.target == PIPE_BUFFER) {
hash = XXH32(&ctx->image_views[shader][idx + k].base.resource, sizeof(void*), hash);
hash_data = &ctx->image_views[shader][idx + k].base.u.buf;
data_size = sizeof(ctx->image_views[shader][idx + k].base.u.buf);
hash = XXH32(hash_data, data_size, hash);
} else {
info.imageView = ctx->image_views[shader][idx + k].surface->image_view;
info.imageLayout = VK_IMAGE_LAYOUT_GENERAL;
hash_data = &info;
data_size = sizeof(VkDescriptorImageInfo);
hash = XXH32(hash_data, data_size, hash);
}
}
return hash;
}
static uint32_t
update_descriptor_stage_state(struct zink_context *ctx, enum pipe_shader_type shader, enum zink_descriptor_type type)
{
struct zink_shader *zs = shader == PIPE_SHADER_COMPUTE ? ctx->compute_stage : ctx->gfx_stages[shader];
if (!zink_program_get_descriptor_usage(ctx, shader, type))
return 0;
uint32_t hash = 0;
for (int i = 0; i < zs->num_bindings[type]; i++) {
int idx = zs->bindings[type][i].index;
switch (type) {
case ZINK_DESCRIPTOR_TYPE_UBO:
hash = calc_descriptor_state_hash_ubo(ctx, zs, shader, i, idx, hash);
break;
case ZINK_DESCRIPTOR_TYPE_SSBO:
hash = calc_descriptor_state_hash_ssbo(ctx, zs, shader, i, idx, hash);
break;
case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
hash = calc_descriptor_state_hash_sampler(ctx, zs, shader, i, idx, hash);
break;
case ZINK_DESCRIPTOR_TYPE_IMAGE:
hash = calc_descriptor_state_hash_image(ctx, zs, shader, i, idx, hash);
break;
default:
unreachable("unknown descriptor type");
}
}
return hash;
}
static void
update_descriptor_state(struct zink_context *ctx, enum zink_descriptor_type type, bool is_compute)
{
/* we shouldn't be calling this if we don't have to */
assert(!ctx->descriptor_states[is_compute].valid[type]);
if (is_compute)
/* just update compute state */
ctx->descriptor_states[is_compute].state[type] = update_descriptor_stage_state(ctx, PIPE_SHADER_COMPUTE, type);
else {
/* update all gfx states */
for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
/* this is the incremental update for the shader stage */
if (!ctx->gfx_descriptor_states[i].valid[type] && ctx->gfx_stages[i]) {
ctx->gfx_descriptor_states[i].state[type] = update_descriptor_stage_state(ctx, i, type);
ctx->gfx_descriptor_states[i].valid[type] = true;
}
if (ctx->gfx_descriptor_states[i].valid[type]) {
/* this is the overall state update for the descriptor set hash */
ctx->descriptor_states[is_compute].state[type] = XXH32(&ctx->gfx_descriptor_states[i].state[type],
sizeof(uint32_t),
ctx->descriptor_states[is_compute].state[type]);
}
}
}
ctx->descriptor_states[is_compute].valid[type] = true;
}
void
zink_context_update_descriptor_states(struct zink_context *ctx, bool is_compute)
{
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
if (!ctx->descriptor_states[is_compute].valid[i])
update_descriptor_state(ctx, i, is_compute);
}
}
static void
invalidate_descriptor_state(struct zink_context *ctx, enum pipe_shader_type shader, enum zink_descriptor_type type)
{
if (shader != PIPE_SHADER_COMPUTE) {
ctx->gfx_descriptor_states[shader].valid[type] = false;
ctx->gfx_descriptor_states[shader].state[type] = 0;
}
ctx->descriptor_states[shader == PIPE_SHADER_COMPUTE].valid[type] = false;
ctx->descriptor_states[shader == PIPE_SHADER_COMPUTE].state[type] = 0;
}
static void
destroy_batch(struct zink_context* ctx, struct zink_batch* batch)
{
@ -186,11 +356,6 @@ wrap_needs_border_color(unsigned wrap)
wrap == PIPE_TEX_WRAP_MIRROR_CLAMP || wrap == PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER;
}
struct zink_sampler_state {
VkSampler sampler;
bool custom_border_color;
};
static void *
zink_create_sampler_state(struct pipe_context *pctx,
const struct pipe_sampler_state *state)
@ -269,12 +434,16 @@ zink_bind_sampler_states(struct pipe_context *pctx,
void **samplers)
{
struct zink_context *ctx = zink_context(pctx);
bool update = false;
for (unsigned i = 0; i < num_samplers; ++i) {
VkSampler *sampler = samplers[i];
update |= ctx->sampler_states[shader][start_slot + i] != samplers[i];
ctx->sampler_states[shader][start_slot + i] = sampler;
ctx->samplers[shader][start_slot + i] = sampler ? *sampler : VK_NULL_HANDLE;
}
ctx->num_samplers[shader] = start_slot + num_samplers;
if (update)
invalidate_descriptor_state(ctx, shader, ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW);
}
static void
@ -548,6 +717,7 @@ zink_set_constant_buffer(struct pipe_context *pctx,
const struct pipe_constant_buffer *cb)
{
struct zink_context *ctx = zink_context(pctx);
bool update = false;
if (cb) {
struct pipe_resource *buffer = cb->buffer;
@ -558,6 +728,10 @@ zink_set_constant_buffer(struct pipe_context *pctx,
screen->info.props.limits.minUniformBufferOffsetAlignment,
cb->user_buffer, &offset, &buffer);
}
struct zink_resource *res = zink_resource(ctx->ubos[shader][index].buffer);
update |= (index && ctx->ubos[shader][index].buffer_offset != offset) ||
!!res != !!buffer || (res && res->buffer != zink_resource(buffer)->buffer) ||
ctx->ubos[shader][index].buffer_size != cb->buffer_size;
if (take_ownership) {
pipe_resource_reference(&ctx->ubos[shader][index].buffer, NULL);
@ -572,11 +746,15 @@ zink_set_constant_buffer(struct pipe_context *pctx,
if (cb->user_buffer)
pipe_resource_reference(&buffer, NULL);
} else {
update = !!ctx->ubos[shader][index].buffer;
pipe_resource_reference(&ctx->ubos[shader][index].buffer, NULL);
ctx->ubos[shader][index].buffer_offset = 0;
ctx->ubos[shader][index].buffer_size = 0;
ctx->ubos[shader][index].user_buffer = NULL;
}
if (update)
invalidate_descriptor_state(ctx, shader, ZINK_DESCRIPTOR_TYPE_UBO);
}
static void
@ -587,6 +765,7 @@ zink_set_shader_buffers(struct pipe_context *pctx,
unsigned writable_bitmask)
{
struct zink_context *ctx = zink_context(pctx);
bool update = false;
unsigned modified_bits = u_bit_consecutive(start_slot, count);
ctx->writable_ssbos[p_stage] &= ~modified_bits;
@ -601,12 +780,17 @@ zink_set_shader_buffers(struct pipe_context *pctx,
ssbo->buffer_size = MIN2(buffers[i].buffer_size, res->size - ssbo->buffer_offset);
util_range_add(&res->base, &res->valid_buffer_range, ssbo->buffer_offset,
ssbo->buffer_offset + ssbo->buffer_size);
update = true;
} else {
update |= !!ssbo->buffer;
pipe_resource_reference(&ssbo->buffer, NULL);
ssbo->buffer_offset = 0;
ssbo->buffer_size = 0;
}
}
if (update)
invalidate_descriptor_state(ctx, p_stage, ZINK_DESCRIPTOR_TYPE_SSBO);
}
static void
@ -633,7 +817,7 @@ zink_set_shader_images(struct pipe_context *pctx,
const struct pipe_image_view *images)
{
struct zink_context *ctx = zink_context(pctx);
bool update = false;
for (unsigned i = 0; i < count; i++) {
struct zink_image_view *image_view = &ctx->image_views[p_stage][start_slot + i];
if (images && images[i].resource) {
@ -654,12 +838,19 @@ zink_set_shader_images(struct pipe_context *pctx,
image_view->surface = zink_surface(pctx->create_surface(pctx, &res->base, &tmpl));
assert(image_view->surface);
}
} else if (image_view->base.resource)
unbind_shader_image(ctx, p_stage, start_slot + i);
}
update = true;
} else if (image_view->base.resource) {
update |= !!image_view->base.resource;
for (unsigned i = 0; i < unbind_num_trailing_slots; i++)
unbind_shader_image(ctx, p_stage, start_slot + i);
}
}
for (unsigned i = 0; i < unbind_num_trailing_slots; i++) {
update |= !!ctx->image_views[p_stage][start_slot + count + i].base.resource;
unbind_shader_image(ctx, p_stage, start_slot + count + i);
}
if (update)
invalidate_descriptor_state(ctx, p_stage, ZINK_DESCRIPTOR_TYPE_IMAGE);
}
static void
@ -673,18 +864,21 @@ zink_set_sampler_views(struct pipe_context *pctx,
struct zink_context *ctx = zink_context(pctx);
unsigned i;
bool update = false;
for (i = 0; i < num_views; ++i) {
struct pipe_sampler_view *pview = views ? views[i] : NULL;
pipe_sampler_view_reference(
&ctx->sampler_views[shader_type][start_slot + i],
pview);
update |= ctx->sampler_views[shader_type][start_slot + i] != pview;
pipe_sampler_view_reference(&ctx->sampler_views[shader_type][start_slot + i], pview);
}
for (; i < num_views + unbind_num_trailing_slots; ++i) {
update |= !!ctx->sampler_views[shader_type][start_slot + i];
pipe_sampler_view_reference(
&ctx->sampler_views[shader_type][start_slot + i],
NULL);
}
ctx->num_sampler_views[shader_type] = start_slot + num_views;
if (update)
invalidate_descriptor_state(ctx, shader_type, ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW);
}
static void

View file

@ -27,6 +27,7 @@
#include "zink_clear.h"
#include "zink_pipeline.h"
#include "zink_batch.h"
#include "zink_compiler.h"
#include "pipe/p_context.h"
#include "pipe/p_state.h"
@ -56,6 +57,12 @@ enum zink_blit_flags {
ZINK_BLIT_SAVE_TEXTURES = 1 << 3,
};
struct zink_sampler_state {
VkSampler sampler;
bool custom_border_color;
};
struct zink_sampler_view {
struct pipe_sampler_view base;
union {
@ -107,6 +114,12 @@ struct zink_viewport_state {
#define ZINK_DEFAULT_MAX_DESCS 5000
/* hashes of all the named types in a given state */
struct zink_descriptor_state {
bool valid[ZINK_DESCRIPTOR_TYPES];
uint32_t state[ZINK_DESCRIPTOR_TYPES];
};
struct zink_context {
struct pipe_context base;
struct slab_child_pool transfer_pool;
@ -126,6 +139,7 @@ struct zink_context {
struct pipe_shader_buffer ssbos[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_BUFFERS];
uint32_t writable_ssbos[PIPE_SHADER_TYPES];
struct zink_image_view image_views[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_IMAGES];
struct pipe_framebuffer_state fb_state;
struct zink_vertex_elements_state *element_state;
@ -138,6 +152,9 @@ struct zink_context {
struct hash_table *program_cache;
struct zink_gfx_program *curr_program;
struct zink_descriptor_state gfx_descriptor_states[ZINK_SHADER_COUNT]; // keep incremental hashes here
struct zink_descriptor_state descriptor_states[2]; // gfx, compute
struct zink_shader *compute_stage;
struct zink_compute_pipeline_state compute_pipeline_state;
struct hash_table *compute_program_cache;
@ -304,4 +321,8 @@ zink_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info);
void
zink_copy_buffer(struct zink_context *ctx, struct zink_batch *batch, struct zink_resource *dst, struct zink_resource *src,
unsigned dst_offset, unsigned src_offset, unsigned size);
void
zink_context_update_descriptor_states(struct zink_context *ctx, bool is_compute);
#endif

View file

@ -224,12 +224,12 @@ get_gfx_program(struct zink_context *ctx)
}
static struct zink_descriptor_set *
get_descriptor_set(struct zink_context *ctx, bool is_compute, uint32_t desc_hash, enum zink_descriptor_type type, bool *cache_hit)
get_descriptor_set(struct zink_context *ctx, bool is_compute, enum zink_descriptor_type type, bool *cache_hit)
{
struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
struct zink_batch *batch = is_compute ? &ctx->compute_batch : zink_curr_batch(ctx);
zink_batch_reference_program(batch, pg);
return zink_program_allocate_desc_set(ctx, batch, pg, desc_hash, type, cache_hit);
return zink_program_allocate_desc_set(ctx, batch, pg, type, is_compute, cache_hit);
}
struct zink_transition {
@ -338,11 +338,11 @@ update_descriptors(struct zink_context *ctx, struct zink_screen *screen, bool is
else
stages = &ctx->gfx_stages[0];
zink_context_update_descriptor_states(ctx, is_compute);
struct zink_transition transitions[num_bindings];
int num_transitions = 0;
struct set *ht = _mesa_set_create(NULL, transition_hash, transition_equals);
uint32_t desc_hash[ZINK_DESCRIPTOR_TYPES] = {0};
for (int h = 0; h < ZINK_DESCRIPTOR_TYPES; h++) {
@ -375,7 +375,6 @@ update_descriptors(struct zink_context *ctx, struct zink_screen *screen, bool is
} else
buffer_infos[h][num_buffer_info[h]].offset = res ? ctx->ubos[stage][index].buffer_offset : 0;
buffer_infos[h][num_buffer_info[h]].range = res ? ctx->ubos[stage][index].buffer_size : VK_WHOLE_SIZE;
desc_hash[h] = XXH32(&buffer_infos[h][num_buffer_info[h]], sizeof(VkDescriptorBufferInfo), desc_hash[h]);
if (res)
add_transition(res, 0, VK_ACCESS_UNIFORM_READ_BIT, stage, &transitions[num_transitions], &num_transitions, ht);
wds[h][num_wds[h]].pBufferInfo = buffer_infos[h] + num_buffer_info[h];
@ -405,7 +404,6 @@ update_descriptors(struct zink_context *ctx, struct zink_screen *screen, bool is
buffer_infos[h][num_buffer_info[h]].offset = 0;
buffer_infos[h][num_buffer_info[h]].range = VK_WHOLE_SIZE;
}
desc_hash[h] = XXH32(&buffer_infos[h][num_buffer_info[h]], sizeof(VkDescriptorBufferInfo), desc_hash[h]);
wds[h][num_wds[h]].pBufferInfo = buffer_infos[h] + num_buffer_info[h];
++num_buffer_info[h];
} else {
@ -426,7 +424,6 @@ update_descriptors(struct zink_context *ctx, struct zink_screen *screen, bool is
break;
if (res->base.target == PIPE_BUFFER) {
wds[h][num_wds[h]].pTexelBufferView = &sampler_view->buffer_view;
desc_hash[h] = XXH32(&sampler_view->base.u.buf, sizeof(sampler_view->base.u.buf), desc_hash[h]);
} else {
imageview = sampler_view->image_view;
layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
@ -450,7 +447,6 @@ update_descriptors(struct zink_context *ctx, struct zink_screen *screen, bool is
break;
if (image_view->base.resource->target == PIPE_BUFFER) {
wds[h][num_wds[h]].pTexelBufferView = &image_view->buffer_view;
desc_hash[h] = XXH32(&image_view->base.u.buf, sizeof(image_view->base.u.buf), desc_hash[h]);
} else {
imageview = image_view->surface->image_view;
layout = VK_IMAGE_LAYOUT_GENERAL;
@ -483,7 +479,6 @@ update_descriptors(struct zink_context *ctx, struct zink_screen *screen, bool is
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
wds[h][num_wds[h]].pTexelBufferView = &buffer_view[0];
desc_hash[h] = XXH32(&buffer_view[0], sizeof(VkBufferView), desc_hash[h]);
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
@ -493,7 +488,6 @@ update_descriptors(struct zink_context *ctx, struct zink_screen *screen, bool is
image_infos[h][num_image_info[h]].sampler = sampler;
if (!k)
wds[h][num_wds[h]].pImageInfo = image_infos[h] + num_image_info[h];
desc_hash[h] = XXH32(&image_infos[h][num_image_info[h]], sizeof(VkDescriptorImageInfo), desc_hash[h]);
++num_image_info[h];
break;
default:
@ -507,10 +501,8 @@ update_descriptors(struct zink_context *ctx, struct zink_screen *screen, bool is
image_infos[h][num_image_info[h]].sampler = sampler;
if (!k)
wds[h][num_wds[h]].pImageInfo = image_infos[h] + num_image_info[h];
desc_hash[h] = XXH32(&image_infos[h][num_image_info[h]], sizeof(VkDescriptorImageInfo), desc_hash[h]);
++num_image_info[h];
} else
desc_hash[h] = XXH32(&res->buffer, sizeof(VkBuffer), desc_hash[h]);
}
}
}
@ -543,7 +535,7 @@ update_descriptors(struct zink_context *ctx, struct zink_screen *screen, bool is
struct zink_descriptor_set *zds[ZINK_DESCRIPTOR_TYPES];
for (int h = 0; h < ZINK_DESCRIPTOR_TYPES; h++) {
if (pg->dsl[h])
zds[h] = get_descriptor_set(ctx, is_compute, desc_hash[h], h, &cache_hit[h]);
zds[h] = get_descriptor_set(ctx, is_compute, h, &cache_hit[h]);
else
zds[h] = NULL;
}

View file

@ -36,6 +36,9 @@
#include "util/u_memory.h"
#include "tgsi/tgsi_from_mesa.h"
#define XXH_INLINE_ALL
#include "util/xxhash.h"
struct gfx_pipeline_cache_entry {
struct zink_gfx_pipeline_state state;
VkPipeline pipeline;
@ -625,6 +628,38 @@ zink_update_gfx_program(struct zink_context *ctx, struct zink_gfx_program *prog)
update_shader_modules(ctx, ctx->gfx_stages, prog, true);
}
static bool
desc_state_equal(const void *a, const void *b)
{
const struct zink_descriptor_state_key *a_k = (void*)a;
const struct zink_descriptor_state_key *b_k = (void*)b;
for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
if (a_k->exists[i] != b_k->exists[i])
return false;
if (a_k->exists[i] && b_k->exists[i] &&
a_k->state[i] != b_k->state[i])
return false;
}
return true;
}
static uint32_t
desc_state_hash(const void *key)
{
const struct zink_descriptor_state_key *d_key = (void*)key;
uint32_t hash = 0;
/* this is a compute shader */
if (!d_key->exists[PIPE_SHADER_FRAGMENT])
return d_key->state[0];
for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
if (d_key->exists[i])
hash = XXH32(&d_key->state[i], sizeof(uint32_t), hash);
}
return hash;
}
struct zink_gfx_program *
zink_create_gfx_program(struct zink_context *ctx,
struct zink_shader *stages[ZINK_SHADER_COUNT])
@ -665,11 +700,11 @@ zink_create_gfx_program(struct zink_context *ctx,
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
if (!prog->base.num_descriptors[i])
continue;
prog->base.desc_sets[i] = _mesa_hash_table_create(NULL, NULL, _mesa_key_pointer_equal);
prog->base.desc_sets[i] = _mesa_hash_table_create(NULL, desc_state_hash, desc_state_equal);
if (!prog->base.desc_sets[i])
goto fail;
prog->base.free_desc_sets[i] = _mesa_hash_table_create(NULL, NULL, _mesa_key_pointer_equal);
prog->base.free_desc_sets[i] = _mesa_hash_table_create(NULL, desc_state_hash, desc_state_equal);
if (!prog->base.free_desc_sets[i])
goto fail;
@ -777,11 +812,11 @@ zink_create_compute_program(struct zink_context *ctx, struct zink_shader *shader
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
if (!comp->base.num_descriptors[i])
continue;
comp->base.desc_sets[i] = _mesa_hash_table_create(NULL, NULL, _mesa_key_pointer_equal);
comp->base.desc_sets[i] = _mesa_hash_table_create(NULL, desc_state_hash, desc_state_equal);
if (!comp->base.desc_sets[i])
goto fail;
comp->base.free_desc_sets[i] = _mesa_hash_table_create(NULL, NULL, _mesa_key_pointer_equal);
comp->base.free_desc_sets[i] = _mesa_hash_table_create(NULL, desc_state_hash, desc_state_equal);
if (!comp->base.free_desc_sets[i])
goto fail;
@ -857,12 +892,28 @@ allocate_desc_set(struct zink_screen *screen, struct zink_program *pg, enum zink
return alloc;
}
static void
populate_zds_key(struct zink_context *ctx, enum zink_descriptor_type type, bool is_compute,
struct zink_descriptor_state_key *key) {
if (is_compute) {
for (unsigned i = 1; i < ZINK_SHADER_COUNT; i++)
key->exists[i] = false;
key->exists[0] = true;
key->state[0] = ctx->descriptor_states[is_compute].state[type];
} else {
for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
key->exists[i] = ctx->gfx_descriptor_states[i].valid[type];
key->state[i] = ctx->gfx_descriptor_states[i].state[type];
}
}
}
struct zink_descriptor_set *
zink_program_allocate_desc_set(struct zink_context *ctx,
struct zink_batch *batch,
struct zink_program *pg,
uint32_t hash,
enum zink_descriptor_type type,
bool is_compute,
bool *cache_hit)
{
*cache_hit = false;
@ -870,9 +921,12 @@ zink_program_allocate_desc_set(struct zink_context *ctx,
struct zink_screen *screen = zink_screen(ctx->base.screen);
unsigned descs_used = 1;
assert(type < ZINK_DESCRIPTOR_TYPES);
uint32_t hash = pg->num_descriptors[type] ? ctx->descriptor_states[is_compute].state[type] : 0;
struct zink_descriptor_state_key key;
populate_zds_key(ctx, type, is_compute, &key);
if (pg->num_descriptors[type]) {
struct hash_entry *he = _mesa_hash_table_search_pre_hashed(pg->desc_sets[type], hash, (void*)(uintptr_t)hash);
struct hash_entry *he = _mesa_hash_table_search_pre_hashed(pg->desc_sets[type], hash, &key);
bool recycled = false;
if (he) {
zds = (void*)he->data;
@ -882,7 +936,7 @@ zink_program_allocate_desc_set(struct zink_context *ctx,
assert(!zds->invalid);
}
if (!he) {
he = _mesa_hash_table_search_pre_hashed(pg->free_desc_sets[type], hash, (void*)(uintptr_t)hash);
he = _mesa_hash_table_search_pre_hashed(pg->free_desc_sets[type], hash, &key);
recycled = true;
}
if (he) {
@ -921,7 +975,7 @@ zink_program_allocate_desc_set(struct zink_context *ctx,
if (descs_used + pg->num_descriptors[type] > ZINK_DEFAULT_MAX_DESCS) {
batch = zink_flush_batch(ctx, batch);
zink_batch_reference_program(batch, pg);
return zink_program_allocate_desc_set(ctx, batch, pg, hash, type, cache_hit);
return zink_program_allocate_desc_set(ctx, batch, pg, type, is_compute, cache_hit);
}
} else {
zds = pg->null_set;
@ -934,8 +988,9 @@ zink_program_allocate_desc_set(struct zink_context *ctx,
zds = allocate_desc_set(screen, pg, type, descs_used);
out:
zds->hash = hash;
populate_zds_key(ctx, type, is_compute, &zds->key);
if (pg->num_descriptors[type])
_mesa_hash_table_insert_pre_hashed(pg->desc_sets[type], hash, (void*)(uintptr_t)hash, zds);
_mesa_hash_table_insert_pre_hashed(pg->desc_sets[type], hash, &zds->key, zds);
else
pg->null_set = zds;
quick_out:
@ -953,14 +1008,14 @@ zink_program_recycle_desc_set(struct zink_program *pg, struct zink_descriptor_se
uint32_t refcount = p_atomic_read(&zds->reference.count);
if (refcount != 1)
return;
if (zds->hash) {
struct hash_entry *he = _mesa_hash_table_search_pre_hashed(pg->desc_sets[zds->type], zds->hash, (void*)(uintptr_t)zds->hash);
if (!zds->invalid) {
struct hash_entry *he = _mesa_hash_table_search_pre_hashed(pg->desc_sets[zds->type], zds->hash, &zds->key);
if (!he)
/* desc sets can be used multiple times in the same batch */
return;
_mesa_hash_table_remove(pg->desc_sets[zds->type], he);
_mesa_hash_table_insert_pre_hashed(pg->free_desc_sets[zds->type], zds->hash, (void*)(uintptr_t)zds->hash, zds);
_mesa_hash_table_insert_pre_hashed(pg->free_desc_sets[zds->type], zds->hash, &zds->key, zds);
} else
util_dynarray_append(&pg->alloc_desc_sets[zds->type], struct zink_descriptor_set *, zds);
}

View file

@ -64,12 +64,18 @@ struct zink_shader_cache {
struct hash_table *shader_cache;
};
struct zink_descriptor_state_key {
bool exists[ZINK_SHADER_COUNT];
uint32_t state[ZINK_SHADER_COUNT];
};
struct zink_descriptor_set {
enum zink_descriptor_type type;
struct pipe_reference reference; //incremented for batch usage
VkDescriptorSet desc_set;
uint32_t hash;
bool invalid;
struct zink_descriptor_state_key key;
struct zink_resource **resources;
};
@ -217,8 +223,8 @@ struct zink_descriptor_set *
zink_program_allocate_desc_set(struct zink_context *ctx,
struct zink_batch *batch,
struct zink_program *pg,
uint32_t desc_hash,
enum zink_descriptor_type type,
bool is_compute,
bool *cache_hit);
void
zink_program_recycle_desc_set(struct zink_program *pg, struct zink_descriptor_set *zds);