zink: move descriptor state management to descriptors.c

Reviewed-by: Erik Faye-Lund <erik.faye-lund@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/10177>
This commit is contained in:
Mike Blumenkrantz 2020-12-31 09:15:37 -05:00 committed by Marge Bot
parent 8c6a64c9b0
commit 344c4ab580
4 changed files with 221 additions and 218 deletions

View file

@ -64,53 +64,6 @@ incr_curr_batch(struct zink_context *ctx)
incr_curr_batch(ctx);
}
static struct zink_resource *
get_resource_for_descriptor(struct zink_context *ctx, enum zink_descriptor_type type, enum pipe_shader_type shader, int idx)
{
switch (type) {
case ZINK_DESCRIPTOR_TYPE_UBO:
return zink_resource(ctx->ubos[shader][idx].buffer);
case ZINK_DESCRIPTOR_TYPE_SSBO:
return zink_resource(ctx->ssbos[shader][idx].buffer);
case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
return ctx->sampler_views[shader][idx] ? zink_resource(ctx->sampler_views[shader][idx]->texture) : NULL;
case ZINK_DESCRIPTOR_TYPE_IMAGE:
return zink_resource(ctx->image_views[shader][idx].base.resource);
default:
break;
}
unreachable("unknown descriptor type!");
return NULL;
}
static uint32_t
calc_descriptor_state_hash_ubo(struct zink_context *ctx, struct zink_shader *zs, enum pipe_shader_type shader, int i, int idx, uint32_t hash)
{
struct zink_resource *res = get_resource_for_descriptor(ctx, ZINK_DESCRIPTOR_TYPE_UBO, shader, idx);
struct zink_resource_object *obj = res ? res->obj : NULL;
hash = XXH32(&obj, sizeof(void*), hash);
void *hash_data = &ctx->ubos[shader][idx].buffer_size;
size_t data_size = sizeof(unsigned);
hash = XXH32(hash_data, data_size, hash);
if (zs->bindings[ZINK_DESCRIPTOR_TYPE_UBO][i].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER)
hash = XXH32(&ctx->ubos[shader][idx].buffer_offset, sizeof(unsigned), hash);
return hash;
}
static uint32_t
calc_descriptor_state_hash_ssbo(struct zink_context *ctx, struct zink_shader *zs, enum pipe_shader_type shader, int i, int idx, uint32_t hash)
{
struct zink_resource *res = get_resource_for_descriptor(ctx, ZINK_DESCRIPTOR_TYPE_SSBO, shader, idx);
struct zink_resource_object *obj = res ? res->obj : NULL;
hash = XXH32(&obj, sizeof(void*), hash);
if (obj) {
struct pipe_shader_buffer *ssbo = &ctx->ssbos[shader][idx];
hash = XXH32(&ssbo->buffer_offset, sizeof(ssbo->buffer_offset), hash);
hash = XXH32(&ssbo->buffer_size, sizeof(ssbo->buffer_size), hash);
}
return hash;
}
static void
calc_descriptor_hash_sampler_state(struct zink_sampler_state *sampler_state)
{
@ -119,166 +72,6 @@ calc_descriptor_hash_sampler_state(struct zink_sampler_state *sampler_state)
sampler_state->hash = XXH32(hash_data, data_size, 0);
}
static inline uint32_t
get_sampler_view_hash(const struct zink_sampler_view *sampler_view)
{
if (!sampler_view)
return 0;
return sampler_view->base.target == PIPE_BUFFER ?
sampler_view->buffer_view->hash : sampler_view->image_view->hash;
}
static inline uint32_t
get_image_view_hash(const struct zink_image_view *image_view)
{
if (!image_view || !image_view->base.resource)
return 0;
return image_view->base.resource->target == PIPE_BUFFER ?
image_view->buffer_view->hash : image_view->surface->hash;
}
uint32_t
zink_get_sampler_view_hash(struct zink_context *ctx, struct zink_sampler_view *sampler_view, bool is_buffer)
{
return get_sampler_view_hash(sampler_view) ? get_sampler_view_hash(sampler_view) :
(is_buffer ? zink_screen(ctx->base.screen)->null_descriptor_hashes.buffer_view :
zink_screen(ctx->base.screen)->null_descriptor_hashes.image_view);
}
uint32_t
zink_get_image_view_hash(struct zink_context *ctx, struct zink_image_view *image_view, bool is_buffer)
{
return get_image_view_hash(image_view) ? get_image_view_hash(image_view) :
(is_buffer ? zink_screen(ctx->base.screen)->null_descriptor_hashes.buffer_view :
zink_screen(ctx->base.screen)->null_descriptor_hashes.image_view);
}
static uint32_t
calc_descriptor_state_hash_sampler(struct zink_context *ctx, struct zink_shader *zs, enum pipe_shader_type shader, int i, int idx, uint32_t hash)
{
for (unsigned k = 0; k < zs->bindings[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW][i].size; k++) {
struct zink_sampler_view *sampler_view = zink_sampler_view(ctx->sampler_views[shader][idx + k]);
bool is_buffer = zink_shader_descriptor_is_buffer(zs, ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW, i);
uint32_t val = zink_get_sampler_view_hash(ctx, sampler_view, is_buffer);
hash = XXH32(&val, sizeof(uint32_t), hash);
if (is_buffer)
continue;
struct zink_sampler_state *sampler_state = ctx->sampler_states[shader][idx + k];
if (sampler_state)
hash = XXH32(&sampler_state->hash, sizeof(uint32_t), hash);
}
return hash;
}
static uint32_t
calc_descriptor_state_hash_image(struct zink_context *ctx, struct zink_shader *zs, enum pipe_shader_type shader, int i, int idx, uint32_t hash)
{
for (unsigned k = 0; k < zs->bindings[ZINK_DESCRIPTOR_TYPE_IMAGE][i].size; k++) {
uint32_t val = zink_get_image_view_hash(ctx, &ctx->image_views[shader][idx + k],
zink_shader_descriptor_is_buffer(zs, ZINK_DESCRIPTOR_TYPE_IMAGE, i));
hash = XXH32(&val, sizeof(uint32_t), hash);
}
return hash;
}
static uint32_t
update_descriptor_stage_state(struct zink_context *ctx, enum pipe_shader_type shader, enum zink_descriptor_type type)
{
struct zink_shader *zs = shader == PIPE_SHADER_COMPUTE ? ctx->compute_stage : ctx->gfx_stages[shader];
uint32_t hash = 0;
for (int i = 0; i < zs->num_bindings[type]; i++) {
int idx = zs->bindings[type][i].index;
switch (type) {
case ZINK_DESCRIPTOR_TYPE_UBO:
hash = calc_descriptor_state_hash_ubo(ctx, zs, shader, i, idx, hash);
break;
case ZINK_DESCRIPTOR_TYPE_SSBO:
hash = calc_descriptor_state_hash_ssbo(ctx, zs, shader, i, idx, hash);
break;
case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
hash = calc_descriptor_state_hash_sampler(ctx, zs, shader, i, idx, hash);
break;
case ZINK_DESCRIPTOR_TYPE_IMAGE:
hash = calc_descriptor_state_hash_image(ctx, zs, shader, i, idx, hash);
break;
default:
unreachable("unknown descriptor type");
}
}
return hash;
}
static void
update_descriptor_state(struct zink_context *ctx, enum zink_descriptor_type type, bool is_compute)
{
/* we shouldn't be calling this if we don't have to */
assert(!ctx->descriptor_states[is_compute].valid[type]);
bool has_any_usage = false;
if (is_compute) {
/* just update compute state */
bool has_usage = zink_program_get_descriptor_usage(ctx, PIPE_SHADER_COMPUTE, type);
if (has_usage)
ctx->descriptor_states[is_compute].state[type] = update_descriptor_stage_state(ctx, PIPE_SHADER_COMPUTE, type);
else
ctx->descriptor_states[is_compute].state[type] = 0;
has_any_usage = has_usage;
} else {
/* update all gfx states */
bool first = true;
for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
bool has_usage = false;
/* this is the incremental update for the shader stage */
if (!ctx->gfx_descriptor_states[i].valid[type]) {
ctx->gfx_descriptor_states[i].state[type] = 0;
if (ctx->gfx_stages[i]) {
has_usage = zink_program_get_descriptor_usage(ctx, i, type);
if (has_usage)
ctx->gfx_descriptor_states[i].state[type] = update_descriptor_stage_state(ctx, i, type);
ctx->gfx_descriptor_states[i].valid[type] = has_usage;
}
}
if (ctx->gfx_descriptor_states[i].valid[type]) {
/* this is the overall state update for the descriptor set hash */
if (first) {
/* no need to double hash the first state */
ctx->descriptor_states[is_compute].state[type] = ctx->gfx_descriptor_states[i].state[type];
first = false;
} else {
ctx->descriptor_states[is_compute].state[type] = XXH32(&ctx->gfx_descriptor_states[i].state[type],
sizeof(uint32_t),
ctx->descriptor_states[is_compute].state[type]);
}
}
has_any_usage |= has_usage;
}
}
ctx->descriptor_states[is_compute].valid[type] = has_any_usage;
}
void
zink_context_update_descriptor_states(struct zink_context *ctx, bool is_compute)
{
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
if (!ctx->descriptor_states[is_compute].valid[i])
update_descriptor_state(ctx, i, is_compute);
}
}
void
zink_context_invalidate_descriptor_state(struct zink_context *ctx, enum pipe_shader_type shader, enum zink_descriptor_type type)
{
if (shader != PIPE_SHADER_COMPUTE) {
ctx->gfx_descriptor_states[shader].valid[type] = false;
ctx->gfx_descriptor_states[shader].state[type] = 0;
}
ctx->descriptor_states[shader == PIPE_SHADER_COMPUTE].valid[type] = false;
ctx->descriptor_states[shader == PIPE_SHADER_COMPUTE].state[type] = 0;
}
void
debug_describe_zink_buffer_view(char *buf, const struct zink_buffer_view *ptr)
{
@ -2591,7 +2384,7 @@ zink_resource_rebind(struct zink_context *ctx, struct zink_resource *res)
uint32_t usage = zink_program_get_descriptor_usage(ctx, shader, type);
while (usage) {
const int i = u_bit_scan(&usage);
struct zink_resource *cres = get_resource_for_descriptor(ctx, type, shader, i);
struct zink_resource *cres = zink_get_resource_for_descriptor(ctx, type, shader, i);
if (res != cres)
continue;

View file

@ -369,14 +369,4 @@ zink_buffer_view_reference(struct zink_screen *screen,
if (dst) *dst = src;
}
void
zink_context_update_descriptor_states(struct zink_context *ctx, bool is_compute);
void
zink_context_invalidate_descriptor_state(struct zink_context *ctx, enum pipe_shader_type shader, enum zink_descriptor_type type);
uint32_t
zink_get_sampler_view_hash(struct zink_context *ctx, struct zink_sampler_view *sampler_view, bool is_buffer);
uint32_t
zink_get_image_view_hash(struct zink_context *ctx, struct zink_image_view *image_view, bool is_buffer);
#endif

View file

@ -1245,3 +1245,210 @@ zink_descriptors_update(struct zink_context *ctx, struct zink_screen *screen, bo
}
}
}
struct zink_resource *
zink_get_resource_for_descriptor(struct zink_context *ctx, enum zink_descriptor_type type, enum pipe_shader_type shader, int idx)
{
switch (type) {
case ZINK_DESCRIPTOR_TYPE_UBO:
return zink_resource(ctx->ubos[shader][idx].buffer);
case ZINK_DESCRIPTOR_TYPE_SSBO:
return zink_resource(ctx->ssbos[shader][idx].buffer);
case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
return ctx->sampler_views[shader][idx] ? zink_resource(ctx->sampler_views[shader][idx]->texture) : NULL;
case ZINK_DESCRIPTOR_TYPE_IMAGE:
return zink_resource(ctx->image_views[shader][idx].base.resource);
default:
break;
}
unreachable("unknown descriptor type!");
return NULL;
}
static uint32_t
calc_descriptor_state_hash_ubo(struct zink_context *ctx, struct zink_shader *zs, enum pipe_shader_type shader, int i, int idx, uint32_t hash)
{
struct zink_resource *res = zink_get_resource_for_descriptor(ctx, ZINK_DESCRIPTOR_TYPE_UBO, shader, idx);
struct zink_resource_object *obj = res ? res->obj : NULL;
hash = XXH32(&obj, sizeof(void*), hash);
void *hash_data = &ctx->ubos[shader][idx].buffer_size;
size_t data_size = sizeof(unsigned);
hash = XXH32(hash_data, data_size, hash);
if (zs->bindings[ZINK_DESCRIPTOR_TYPE_UBO][i].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER)
hash = XXH32(&ctx->ubos[shader][idx].buffer_offset, sizeof(unsigned), hash);
return hash;
}
static uint32_t
calc_descriptor_state_hash_ssbo(struct zink_context *ctx, struct zink_shader *zs, enum pipe_shader_type shader, int i, int idx, uint32_t hash)
{
struct zink_resource *res = zink_get_resource_for_descriptor(ctx, ZINK_DESCRIPTOR_TYPE_SSBO, shader, idx);
struct zink_resource_object *obj = res ? res->obj : NULL;
hash = XXH32(&obj, sizeof(void*), hash);
if (obj) {
struct pipe_shader_buffer *ssbo = &ctx->ssbos[shader][idx];
hash = XXH32(&ssbo->buffer_offset, sizeof(ssbo->buffer_offset), hash);
hash = XXH32(&ssbo->buffer_size, sizeof(ssbo->buffer_size), hash);
}
return hash;
}
static inline uint32_t
get_sampler_view_hash(const struct zink_sampler_view *sampler_view)
{
if (!sampler_view)
return 0;
return sampler_view->base.target == PIPE_BUFFER ?
sampler_view->buffer_view->hash : sampler_view->image_view->hash;
}
static inline uint32_t
get_image_view_hash(const struct zink_image_view *image_view)
{
if (!image_view || !image_view->base.resource)
return 0;
return image_view->base.resource->target == PIPE_BUFFER ?
image_view->buffer_view->hash : image_view->surface->hash;
}
uint32_t
zink_get_sampler_view_hash(struct zink_context *ctx, struct zink_sampler_view *sampler_view, bool is_buffer)
{
return get_sampler_view_hash(sampler_view) ? get_sampler_view_hash(sampler_view) :
(is_buffer ? zink_screen(ctx->base.screen)->null_descriptor_hashes.buffer_view :
zink_screen(ctx->base.screen)->null_descriptor_hashes.image_view);
}
uint32_t
zink_get_image_view_hash(struct zink_context *ctx, struct zink_image_view *image_view, bool is_buffer)
{
return get_image_view_hash(image_view) ? get_image_view_hash(image_view) :
(is_buffer ? zink_screen(ctx->base.screen)->null_descriptor_hashes.buffer_view :
zink_screen(ctx->base.screen)->null_descriptor_hashes.image_view);
}
static uint32_t
calc_descriptor_state_hash_sampler(struct zink_context *ctx, struct zink_shader *zs, enum pipe_shader_type shader, int i, int idx, uint32_t hash)
{
for (unsigned k = 0; k < zs->bindings[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW][i].size; k++) {
struct zink_sampler_view *sampler_view = zink_sampler_view(ctx->sampler_views[shader][idx + k]);
bool is_buffer = zink_shader_descriptor_is_buffer(zs, ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW, i);
uint32_t val = zink_get_sampler_view_hash(ctx, sampler_view, is_buffer);
hash = XXH32(&val, sizeof(uint32_t), hash);
if (is_buffer)
continue;
struct zink_sampler_state *sampler_state = ctx->sampler_states[shader][idx + k];
if (sampler_state)
hash = XXH32(&sampler_state->hash, sizeof(uint32_t), hash);
}
return hash;
}
static uint32_t
calc_descriptor_state_hash_image(struct zink_context *ctx, struct zink_shader *zs, enum pipe_shader_type shader, int i, int idx, uint32_t hash)
{
for (unsigned k = 0; k < zs->bindings[ZINK_DESCRIPTOR_TYPE_IMAGE][i].size; k++) {
uint32_t val = zink_get_image_view_hash(ctx, &ctx->image_views[shader][idx + k],
zink_shader_descriptor_is_buffer(zs, ZINK_DESCRIPTOR_TYPE_IMAGE, i));
hash = XXH32(&val, sizeof(uint32_t), hash);
}
return hash;
}
static uint32_t
update_descriptor_stage_state(struct zink_context *ctx, enum pipe_shader_type shader, enum zink_descriptor_type type)
{
struct zink_shader *zs = shader == PIPE_SHADER_COMPUTE ? ctx->compute_stage : ctx->gfx_stages[shader];
uint32_t hash = 0;
for (int i = 0; i < zs->num_bindings[type]; i++) {
int idx = zs->bindings[type][i].index;
switch (type) {
case ZINK_DESCRIPTOR_TYPE_UBO:
hash = calc_descriptor_state_hash_ubo(ctx, zs, shader, i, idx, hash);
break;
case ZINK_DESCRIPTOR_TYPE_SSBO:
hash = calc_descriptor_state_hash_ssbo(ctx, zs, shader, i, idx, hash);
break;
case ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW:
hash = calc_descriptor_state_hash_sampler(ctx, zs, shader, i, idx, hash);
break;
case ZINK_DESCRIPTOR_TYPE_IMAGE:
hash = calc_descriptor_state_hash_image(ctx, zs, shader, i, idx, hash);
break;
default:
unreachable("unknown descriptor type");
}
}
return hash;
}
static void
update_descriptor_state(struct zink_context *ctx, enum zink_descriptor_type type, bool is_compute)
{
/* we shouldn't be calling this if we don't have to */
assert(!ctx->descriptor_states[is_compute].valid[type]);
bool has_any_usage = false;
if (is_compute) {
/* just update compute state */
bool has_usage = zink_program_get_descriptor_usage(ctx, PIPE_SHADER_COMPUTE, type);
if (has_usage)
ctx->descriptor_states[is_compute].state[type] = update_descriptor_stage_state(ctx, PIPE_SHADER_COMPUTE, type);
else
ctx->descriptor_states[is_compute].state[type] = 0;
has_any_usage = has_usage;
} else {
/* update all gfx states */
bool first = true;
for (unsigned i = 0; i < ZINK_SHADER_COUNT; i++) {
bool has_usage = false;
/* this is the incremental update for the shader stage */
if (!ctx->gfx_descriptor_states[i].valid[type]) {
ctx->gfx_descriptor_states[i].state[type] = 0;
if (ctx->gfx_stages[i]) {
has_usage = zink_program_get_descriptor_usage(ctx, i, type);
if (has_usage)
ctx->gfx_descriptor_states[i].state[type] = update_descriptor_stage_state(ctx, i, type);
ctx->gfx_descriptor_states[i].valid[type] = has_usage;
}
}
if (ctx->gfx_descriptor_states[i].valid[type]) {
/* this is the overall state update for the descriptor set hash */
if (first) {
/* no need to double hash the first state */
ctx->descriptor_states[is_compute].state[type] = ctx->gfx_descriptor_states[i].state[type];
first = false;
} else {
ctx->descriptor_states[is_compute].state[type] = XXH32(&ctx->gfx_descriptor_states[i].state[type],
sizeof(uint32_t),
ctx->descriptor_states[is_compute].state[type]);
}
}
has_any_usage |= has_usage;
}
}
ctx->descriptor_states[is_compute].valid[type] = has_any_usage;
}
void
zink_context_update_descriptor_states(struct zink_context *ctx, bool is_compute)
{
for (unsigned i = 0; i < ZINK_DESCRIPTOR_TYPES; i++) {
if (!ctx->descriptor_states[is_compute].valid[i])
update_descriptor_state(ctx, i, is_compute);
}
}
void
zink_context_invalidate_descriptor_state(struct zink_context *ctx, enum pipe_shader_type shader, enum zink_descriptor_type type)
{
if (shader != PIPE_SHADER_COMPUTE) {
ctx->gfx_descriptor_states[shader].valid[type] = false;
ctx->gfx_descriptor_states[shader].state[type] = 0;
}
ctx->descriptor_states[shader == PIPE_SHADER_COMPUTE].valid[type] = false;
ctx->descriptor_states[shader == PIPE_SHADER_COMPUTE].state[type] = 0;
}

View file

@ -189,4 +189,17 @@ zink_descriptor_pool_reference(struct zink_screen *screen,
void
zink_descriptors_update(struct zink_context *ctx, struct zink_screen *screen, bool is_compute);
void
zink_context_update_descriptor_states(struct zink_context *ctx, bool is_compute);
void
zink_context_invalidate_descriptor_state(struct zink_context *ctx, enum pipe_shader_type shader, enum zink_descriptor_type type);
uint32_t
zink_get_sampler_view_hash(struct zink_context *ctx, struct zink_sampler_view *sampler_view, bool is_buffer);
uint32_t
zink_get_image_view_hash(struct zink_context *ctx, struct zink_image_view *image_view, bool is_buffer);
struct zink_resource *
zink_get_resource_for_descriptor(struct zink_context *ctx, enum zink_descriptor_type type, enum pipe_shader_type shader, int idx);
#endif