zink: implement bindless textures

this works by tracking 1024-member arrays of images and textures using idalloc
for indexing. each idalloc id is an index into the array that is returned as a handle,
and this handle is used to index into the array in shaders.

in the driver, VK_EXT_descriptor_indexing features are used to enable updates on the live
bindless descriptor set and leave unused members of the arrays unbound, which works as
long as no member is updated while it is in use. to avoid this, idalloc ids must cycle through
a batch once the image/texture handle is destroyed before being returned to the available pool

in shaders, bindless ops come in one of two types:
- i/o variables
- bindless instructions

for i/o, the image/texture variables have to be rewritten back to the integer
handles which represent them so that the successive shader stage utilizing them
can perform the indexing

for instructions, the src representing the image/texture has to be rewritten as a deref
into the bindless image/texture array

Reviewed-by: Dave Airlie <airlied@redhat.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/12855>
This commit is contained in:
Mike Blumenkrantz 2021-09-14 09:42:31 -04:00 committed by Marge Bot
parent 04e9470bcf
commit bc202553e9
12 changed files with 762 additions and 7 deletions

View file

@ -44,6 +44,15 @@ zink_reset_batch_state(struct zink_context *ctx, struct zink_batch_state *bs)
util_dynarray_append(&bs->unref_resources, struct zink_resource_object*, obj);
}
for (unsigned i = 0; i < 2; i++) {
while (util_dynarray_contains(&bs->bindless_releases[i], uint32_t)) {
uint32_t handle = util_dynarray_pop(&bs->bindless_releases[i], uint32_t);
bool is_buffer = ZINK_BINDLESS_IS_BUFFER(handle);
struct util_idalloc *ids = i ? &ctx->di.bindless[is_buffer].img_slots : &ctx->di.bindless[is_buffer].tex_slots;
util_idalloc_free(ids, is_buffer ? handle - ZINK_MAX_BINDLESS_HANDLES : handle);
}
}
set_foreach_remove(bs->active_queries, entry) {
struct zink_query *query = (void*)entry->key;
zink_prune_query(screen, bs, query);
@ -156,6 +165,8 @@ zink_batch_state_destroy(struct zink_screen *screen, struct zink_batch_state *bs
util_dynarray_fini(&bs->zombie_samplers);
util_dynarray_fini(&bs->dead_framebuffers);
util_dynarray_fini(&bs->unref_resources);
util_dynarray_fini(&bs->bindless_releases[0]);
util_dynarray_fini(&bs->bindless_releases[1]);
_mesa_set_destroy(bs->surfaces, NULL);
_mesa_set_destroy(bs->bufferviews, NULL);
_mesa_set_destroy(bs->programs, NULL);
@ -205,6 +216,8 @@ create_batch_state(struct zink_context *ctx)
util_dynarray_init(&bs->dead_framebuffers, NULL);
util_dynarray_init(&bs->persistent_resources, NULL);
util_dynarray_init(&bs->unref_resources, NULL);
util_dynarray_init(&bs->bindless_releases[0], NULL);
util_dynarray_init(&bs->bindless_releases[1], NULL);
cnd_init(&bs->usage.flush);
mtx_init(&bs->usage.mtx, mtx_plain);

View file

@ -82,6 +82,7 @@ struct zink_batch_state {
struct set *bufferviews;
struct util_dynarray unref_resources;
struct util_dynarray bindless_releases[2];
struct util_dynarray persistent_resources;
struct util_dynarray zombie_samplers;

View file

@ -1042,6 +1042,178 @@ unbreak_bos(nir_shader *shader)
return true;
}
/* this is a "default" bindless texture used if the shader has no texture variables */
static nir_variable *
create_bindless_texture(nir_shader *nir, nir_tex_instr *tex)
{
unsigned binding = tex->sampler_dim == GLSL_SAMPLER_DIM_BUF ? 1 : 0;
nir_variable *var;
const struct glsl_type *sampler_type = glsl_sampler_type(tex->sampler_dim, tex->is_shadow, tex->is_array, GLSL_TYPE_FLOAT);
var = nir_variable_create(nir, nir_var_uniform, glsl_array_type(sampler_type, ZINK_MAX_BINDLESS_HANDLES, 0), "bindless_texture");
var->data.descriptor_set = ZINK_DESCRIPTOR_BINDLESS;
var->data.driver_location = var->data.binding = binding;
return var;
}
/* this is a "default" bindless image used if the shader has no image variables */
static nir_variable *
create_bindless_image(nir_shader *nir, enum glsl_sampler_dim dim)
{
unsigned binding = dim == GLSL_SAMPLER_DIM_BUF ? 3 : 2;
nir_variable *var;
const struct glsl_type *image_type = glsl_image_type(dim, false, GLSL_TYPE_FLOAT);
var = nir_variable_create(nir, nir_var_uniform, glsl_array_type(image_type, ZINK_MAX_BINDLESS_HANDLES, 0), "bindless_image");
var->data.descriptor_set = ZINK_DESCRIPTOR_BINDLESS;
var->data.driver_location = var->data.binding = binding;
var->data.image.format = PIPE_FORMAT_R8G8B8A8_UNORM;
return var;
}
/* rewrite bindless instructions as array deref instructions */
static bool
lower_bindless_instr(nir_builder *b, nir_instr *in, void *data)
{
nir_variable **bindless = data;
if (in->type == nir_instr_type_tex) {
nir_tex_instr *tex = nir_instr_as_tex(in);
int idx = nir_tex_instr_src_index(tex, nir_tex_src_texture_handle);
if (idx == -1)
return false;
nir_variable *var = tex->sampler_dim == GLSL_SAMPLER_DIM_BUF ? bindless[1] : bindless[0];
if (!var)
var = create_bindless_texture(b->shader, tex);
b->cursor = nir_before_instr(in);
nir_deref_instr *deref = nir_build_deref_var(b, var);
if (glsl_type_is_array(var->type))
deref = nir_build_deref_array(b, deref, nir_u2uN(b, tex->src[idx].src.ssa, 32));
nir_instr_rewrite_src_ssa(in, &tex->src[idx].src, &deref->dest.ssa);
/* bindless sampling uses the variable type directly, which means the tex instr has to exactly
* match up with it in contrast to normal sampler ops where things are a bit more flexible;
* this results in cases where a shader is passed with sampler2DArray but the tex instr only has
* 2 components, which explodes spirv compilation even though it doesn't trigger validation errors
*
* to fix this, pad the coord src here and fix the tex instr so that ntv will do the "right" thing
* - Warhammer 40k: Dawn of War III
*/
unsigned needed_components = glsl_get_sampler_coordinate_components(glsl_without_array(var->type));
unsigned c = nir_tex_instr_src_index(tex, nir_tex_src_coord);
unsigned coord_components = nir_src_num_components(tex->src[c].src);
if (coord_components < needed_components) {
nir_ssa_def *def = nir_pad_vector(b, tex->src[c].src.ssa, needed_components);
nir_instr_rewrite_src_ssa(in, &tex->src[c].src, def);
tex->coord_components = needed_components;
}
return true;
}
if (in->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *instr = nir_instr_as_intrinsic(in);
nir_intrinsic_op op;
#define OP_SWAP(OP) \
case nir_intrinsic_bindless_image_##OP: \
op = nir_intrinsic_image_deref_##OP; \
break;
/* convert bindless intrinsics to deref intrinsics */
switch (instr->intrinsic) {
OP_SWAP(atomic_add)
OP_SWAP(atomic_and)
OP_SWAP(atomic_comp_swap)
OP_SWAP(atomic_dec_wrap)
OP_SWAP(atomic_exchange)
OP_SWAP(atomic_fadd)
OP_SWAP(atomic_fmax)
OP_SWAP(atomic_fmin)
OP_SWAP(atomic_imax)
OP_SWAP(atomic_imin)
OP_SWAP(atomic_inc_wrap)
OP_SWAP(atomic_or)
OP_SWAP(atomic_umax)
OP_SWAP(atomic_umin)
OP_SWAP(atomic_xor)
OP_SWAP(format)
OP_SWAP(load)
OP_SWAP(order)
OP_SWAP(samples)
OP_SWAP(size)
OP_SWAP(store)
default:
return false;
}
enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
nir_variable *var = dim == GLSL_SAMPLER_DIM_BUF ? bindless[3] : bindless[2];
if (!var)
var = create_bindless_image(b->shader, dim);
instr->intrinsic = op;
b->cursor = nir_before_instr(in);
nir_deref_instr *deref = nir_build_deref_var(b, var);
if (glsl_type_is_array(var->type))
deref = nir_build_deref_array(b, deref, nir_u2uN(b, instr->src[0].ssa, 32));
nir_instr_rewrite_src_ssa(in, &instr->src[0], &deref->dest.ssa);
return true;
}
static bool
lower_bindless(nir_shader *shader, nir_variable **bindless)
{
if (!nir_shader_instructions_pass(shader, lower_bindless_instr, nir_metadata_dominance, bindless))
return false;
nir_fixup_deref_modes(shader);
NIR_PASS_V(shader, nir_remove_dead_variables, nir_var_shader_temp, NULL);
optimize_nir(shader);
return true;
}
/* convert shader image/texture io variables to int64 handles for bindless indexing */
static bool
lower_bindless_io_instr(nir_builder *b, nir_instr *in, void *data)
{
if (in->type != nir_instr_type_intrinsic)
return false;
nir_intrinsic_instr *instr = nir_instr_as_intrinsic(in);
if (instr->intrinsic != nir_intrinsic_load_deref &&
instr->intrinsic != nir_intrinsic_store_deref)
return false;
nir_deref_instr *src_deref = nir_src_as_deref(instr->src[0]);
nir_variable *var = nir_deref_instr_get_variable(src_deref);
if (var->data.bindless)
return false;
if (var->data.mode != nir_var_shader_in && var->data.mode != nir_var_shader_out)
return false;
if (!glsl_type_is_image(var->type) && !glsl_type_is_sampler(var->type))
return false;
var->type = glsl_int64_t_type();
var->data.bindless = 1;
b->cursor = nir_before_instr(in);
nir_deref_instr *deref = nir_build_deref_var(b, var);
if (instr->intrinsic == nir_intrinsic_load_deref) {
nir_ssa_def *def = nir_load_deref(b, deref);
nir_instr_rewrite_src_ssa(in, &instr->src[0], def);
nir_ssa_def_rewrite_uses(&instr->dest.ssa, def);
} else {
nir_store_deref(b, deref, instr->src[1].ssa, nir_intrinsic_write_mask(instr));
}
nir_instr_remove(in);
nir_instr_remove(&src_deref->instr);
return true;
}
static bool
lower_bindless_io(nir_shader *shader)
{
return nir_shader_instructions_pass(shader, lower_bindless_io_instr, nir_metadata_dominance, NULL);
}
static uint32_t
zink_binding(gl_shader_stage stage, VkDescriptorType type, int index)
{
@ -1074,6 +1246,52 @@ zink_binding(gl_shader_stage stage, VkDescriptorType type, int index)
}
}
static void
handle_bindless_var(nir_shader *nir, nir_variable *var, const struct glsl_type *type, nir_variable **bindless)
{
if (glsl_type_is_struct(type)) {
for (unsigned i = 0; i < glsl_get_length(type); i++)
handle_bindless_var(nir, var, glsl_get_struct_field(type, i), bindless);
return;
}
/* just a random scalar in a struct */
if (!glsl_type_is_image(type) && !glsl_type_is_sampler(type))
return;
VkDescriptorType vktype = glsl_type_is_image(type) ? zink_image_type(type) : zink_sampler_type(type);
unsigned binding;
switch (vktype) {
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
binding = 0;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
binding = 1;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
binding = 2;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
binding = 3;
break;
default:
unreachable("unknown");
}
if (!bindless[binding]) {
bindless[binding] = nir_variable_clone(var, nir);
bindless[binding]->data.bindless = 0;
bindless[binding]->data.descriptor_set = ZINK_DESCRIPTOR_BINDLESS;
bindless[binding]->type = glsl_array_type(type, ZINK_MAX_BINDLESS_HANDLES, 0);
bindless[binding]->data.driver_location = bindless[binding]->data.binding = binding;
if (!bindless[binding]->data.image.format)
bindless[binding]->data.image.format = PIPE_FORMAT_R8G8B8A8_UNORM;
nir_shader_add_variable(nir, bindless[binding]);
} else {
assert(glsl_get_sampler_dim(glsl_without_array(bindless[binding]->type)) == glsl_get_sampler_dim(glsl_without_array(var->type)));
}
var->data.mode = nir_var_shader_temp;
}
struct zink_shader *
zink_shader_create(struct zink_screen *screen, struct nir_shader *nir,
const struct pipe_stream_output_info *so_info)
@ -1133,7 +1351,18 @@ zink_shader_create(struct zink_screen *screen, struct nir_shader *nir,
fprintf(stderr, "---8<---\n");
}
foreach_list_typed_reverse(nir_variable, var, node, &nir->variables) {
nir_variable *bindless[4] = {0};
bool has_bindless_io = false;
nir_foreach_variable_with_modes(var, nir, nir_var_shader_in | nir_var_shader_out) {
if (glsl_type_is_image(var->type) || glsl_type_is_sampler(var->type)) {
has_bindless_io = true;
break;
}
}
if (has_bindless_io)
NIR_PASS_V(nir, lower_bindless_io);
foreach_list_typed_reverse_safe(nir_variable, var, node, &nir->variables) {
if (_nir_shader_variable_has_mode(var, nir_var_uniform |
nir_var_mem_ubo |
nir_var_mem_ssbo)) {
@ -1171,11 +1400,14 @@ zink_shader_create(struct zink_screen *screen, struct nir_shader *nir,
ret->num_bindings[ztype]++;
} else {
assert(var->data.mode == nir_var_uniform);
if (glsl_type_is_sampler(type) || glsl_type_is_image(type)) {
if (var->data.bindless) {
ret->bindless = true;
handle_bindless_var(nir, var, type, bindless);
} else if (glsl_type_is_sampler(type) || glsl_type_is_image(type)) {
VkDescriptorType vktype = glsl_type_is_image(type) ? zink_image_type(type) : zink_sampler_type(type);
ztype = zink_desc_type_from_vktype(vktype);
if (vktype == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER)
ret->num_texel_buffers++;
ztype = zink_desc_type_from_vktype(vktype);
var->data.driver_location = var->data.binding;
var->data.descriptor_set = ztype + 1;
var->data.binding = zink_binding(nir->info.stage, vktype, var->data.driver_location);
@ -1191,6 +1423,9 @@ zink_shader_create(struct zink_screen *screen, struct nir_shader *nir,
}
}
}
bool bindless_lowered = false;
NIR_PASS(bindless_lowered, nir, lower_bindless, bindless);
ret->bindless |= bindless_lowered;
ret->nir = nir;
if (so_info && nir->info.outputs_written && nir->info.has_transform_feedback_varyings)

View file

@ -83,6 +83,7 @@ struct zink_shader {
unsigned num_texel_buffers;
uint32_t ubos_used; // bitfield of which ubo indices are used
uint32_t ssbos_used; // bitfield of which ssbo indices are used
bool bindless;
simple_mtx_t lock;
struct set *programs;

View file

@ -98,6 +98,8 @@ zink_context_destroy(struct pipe_context *pctx)
pipe_surface_release(&ctx->base, &ctx->dummy_surface[i]);
zink_buffer_view_reference(screen, &ctx->dummy_bufferview, NULL);
zink_descriptors_deinit_bindless(ctx);
simple_mtx_destroy(&ctx->batch_mtx);
zink_clear_batch_state(ctx, ctx->batch.state);
zink_batch_state_destroy(screen, ctx->batch.state);
@ -111,6 +113,15 @@ zink_context_destroy(struct pipe_context *pctx)
zink_batch_state_destroy(screen, *bs);
}
for (unsigned i = 0; i < 2; i++) {
util_idalloc_fini(&ctx->di.bindless[i].tex_slots);
util_idalloc_fini(&ctx->di.bindless[i].img_slots);
free(ctx->di.bindless[i].buffer_infos);
free(ctx->di.bindless[i].img_infos);
util_dynarray_fini(&ctx->di.bindless[i].updates);
util_dynarray_fini(&ctx->di.bindless[i].resident);
}
if (screen->info.have_KHR_imageless_framebuffer) {
hash_table_foreach(&ctx->framebuffer_cache, he)
zink_destroy_framebuffer(screen, he->data);
@ -1502,6 +1513,273 @@ zink_set_sampler_views(struct pipe_context *pctx,
zink_screen(pctx->screen)->context_invalidate_descriptor_state(ctx, shader_type, ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW, start_slot, num_views);
}
static uint64_t
zink_create_texture_handle(struct pipe_context *pctx, struct pipe_sampler_view *view, const struct pipe_sampler_state *state)
{
struct zink_context *ctx = zink_context(pctx);
struct zink_resource *res = zink_resource(view->texture);
struct zink_sampler_view *sv = zink_sampler_view(view);
struct zink_bindless_descriptor *bd;
bd = calloc(1, sizeof(struct zink_bindless_descriptor));
if (!bd)
return 0;
bd->sampler = pctx->create_sampler_state(pctx, state);
if (!bd->sampler) {
free(bd);
return 0;
}
bd->ds.is_buffer = res->base.b.target == PIPE_BUFFER;
if (res->base.b.target == PIPE_BUFFER)
zink_buffer_view_reference(zink_screen(pctx->screen), &bd->ds.bufferview, sv->buffer_view);
else
zink_surface_reference(zink_screen(pctx->screen), &bd->ds.surface, sv->image_view);
uint64_t handle = util_idalloc_alloc(&ctx->di.bindless[bd->ds.is_buffer].tex_slots);
if (bd->ds.is_buffer)
handle += ZINK_MAX_BINDLESS_HANDLES;
bd->handle = handle;
_mesa_hash_table_insert(&ctx->di.bindless[bd->ds.is_buffer].tex_handles, (void*)(uintptr_t)handle, bd);
return handle;
}
static void
zink_delete_texture_handle(struct pipe_context *pctx, uint64_t handle)
{
struct zink_context *ctx = zink_context(pctx);
bool is_buffer = ZINK_BINDLESS_IS_BUFFER(handle);
struct hash_entry *he = _mesa_hash_table_search(&ctx->di.bindless[is_buffer].tex_handles, (void*)(uintptr_t)handle);
assert(he);
struct zink_bindless_descriptor *bd = he->data;
struct zink_descriptor_surface *ds = &bd->ds;
_mesa_hash_table_remove(&ctx->di.bindless[is_buffer].tex_handles, he);
uint32_t h = handle;
util_dynarray_append(&ctx->batch.state->bindless_releases[0], uint32_t, h);
struct zink_resource *res = zink_descriptor_surface_resource(ds);
if (ds->is_buffer) {
if (zink_resource_has_usage(res))
zink_batch_reference_bufferview(&ctx->batch, ds->bufferview);
zink_buffer_view_reference(zink_screen(pctx->screen), &ds->bufferview, NULL);
} else {
if (zink_resource_has_usage(res))
zink_batch_reference_surface(&ctx->batch, ds->surface);
zink_surface_reference(zink_screen(pctx->screen), &ds->surface, NULL);
pctx->delete_sampler_state(pctx, bd->sampler);
}
free(ds);
}
static void
rebind_bindless_bufferview(struct zink_context *ctx, struct zink_resource *res, struct zink_descriptor_surface *ds)
{
/* if this resource has been rebound while it wasn't set here,
* its backing resource will have changed and thus we need to update
* the bufferview
*/
VkBufferViewCreateInfo bvci = ds->bufferview->bvci;
bvci.buffer = res->obj->buffer;
struct zink_buffer_view *buffer_view = get_buffer_view(ctx, res, &bvci);
assert(buffer_view != ds->bufferview);
if (zink_resource_has_usage(res))
zink_batch_reference_bufferview(&ctx->batch, ds->bufferview);
zink_buffer_view_reference(zink_screen(ctx->base.screen), &ds->bufferview, NULL);
ds->bufferview = buffer_view;
}
static void
zero_bindless_descriptor(struct zink_context *ctx, uint32_t handle, bool is_buffer, bool is_image)
{
if (likely(zink_screen(ctx->base.screen)->info.rb2_feats.nullDescriptor)) {
if (is_buffer) {
VkBufferView *bv = &ctx->di.bindless[is_image].buffer_infos[handle];
*bv = VK_NULL_HANDLE;
} else {
VkDescriptorImageInfo *ii = &ctx->di.bindless[is_image].img_infos[handle];
memset(ii, 0, sizeof(*ii));
}
} else {
if (is_buffer) {
VkBufferView *bv = &ctx->di.bindless[is_image].buffer_infos[handle];
struct zink_buffer_view *null_bufferview = ctx->dummy_bufferview;
*bv = null_bufferview->buffer_view;
} else {
struct zink_surface *null_surface = zink_csurface(ctx->dummy_surface[is_image]);
VkDescriptorImageInfo *ii = &ctx->di.bindless[is_image].img_infos[handle];
ii->sampler = VK_NULL_HANDLE;
ii->imageView = null_surface->image_view;
ii->imageLayout = VK_IMAGE_LAYOUT_GENERAL;
}
}
}
static void
zink_make_texture_handle_resident(struct pipe_context *pctx, uint64_t handle, bool resident)
{
struct zink_context *ctx = zink_context(pctx);
bool is_buffer = ZINK_BINDLESS_IS_BUFFER(handle);
struct hash_entry *he = _mesa_hash_table_search(&ctx->di.bindless[is_buffer].tex_handles, (void*)(uintptr_t)handle);
assert(he);
struct zink_bindless_descriptor *bd = he->data;
struct zink_descriptor_surface *ds = &bd->ds;
struct zink_resource *res = zink_descriptor_surface_resource(ds);
if (is_buffer)
handle -= ZINK_MAX_BINDLESS_HANDLES;
if (resident) {
update_res_bind_count(ctx, res, false, false);
update_res_bind_count(ctx, res, true, false);
res->bindless[0]++;
if (is_buffer) {
if (ds->bufferview->bvci.buffer != res->obj->buffer)
rebind_bindless_bufferview(ctx, res, ds);
VkBufferView *bv = &ctx->di.bindless[0].buffer_infos[handle];
*bv = ds->bufferview->buffer_view;
zink_fake_buffer_barrier(res, VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT);
} else {
VkDescriptorImageInfo *ii = &ctx->di.bindless[0].img_infos[handle];
ii->sampler = bd->sampler->sampler;
ii->imageView = ds->surface->image_view;
ii->imageLayout = zink_descriptor_util_image_layout_eval(res, false);
flush_pending_clears(ctx, res);
check_for_layout_update(ctx, res, false);
check_for_layout_update(ctx, res, true);
}
zink_batch_resource_usage_set(&ctx->batch, res, false);
util_dynarray_append(&ctx->di.bindless[0].resident, struct zink_bindless_descriptor *, bd);
uint32_t h = is_buffer ? handle + ZINK_MAX_BINDLESS_HANDLES : handle;
util_dynarray_append(&ctx->di.bindless[0].updates, uint32_t, h);
} else {
zero_bindless_descriptor(ctx, handle, is_buffer, false);
util_dynarray_delete_unordered(&ctx->di.bindless[0].resident, struct zink_bindless_descriptor *, bd);
update_res_bind_count(ctx, res, false, true);
update_res_bind_count(ctx, res, true, true);
res->bindless[0]--;
for (unsigned i = 0; i < 2; i++) {
if (!res->image_bind_count[i])
check_for_layout_update(ctx, res, i);
}
}
ctx->di.bindless_dirty[0] = true;
}
static uint64_t
zink_create_image_handle(struct pipe_context *pctx, const struct pipe_image_view *view)
{
struct zink_context *ctx = zink_context(pctx);
struct zink_resource *res = zink_resource(view->resource);
struct zink_bindless_descriptor *bd;
if (!zink_resource_object_init_storage(ctx, res)) {
debug_printf("couldn't create storage image!");
return 0;
}
bd = malloc(sizeof(struct zink_bindless_descriptor));
if (!bd)
return 0;
bd->sampler = NULL;
bd->ds.is_buffer = res->base.b.target == PIPE_BUFFER;
if (res->base.b.target == PIPE_BUFFER)
bd->ds.bufferview = create_image_bufferview(ctx, view);
else
bd->ds.surface = create_image_surface(ctx, view, false);
uint64_t handle = util_idalloc_alloc(&ctx->di.bindless[bd->ds.is_buffer].img_slots);
if (bd->ds.is_buffer)
handle += ZINK_MAX_BINDLESS_HANDLES;
bd->handle = handle;
_mesa_hash_table_insert(&ctx->di.bindless[bd->ds.is_buffer].img_handles, (void*)(uintptr_t)handle, bd);
return handle;
}
static void
zink_delete_image_handle(struct pipe_context *pctx, uint64_t handle)
{
struct zink_context *ctx = zink_context(pctx);
bool is_buffer = ZINK_BINDLESS_IS_BUFFER(handle);
struct hash_entry *he = _mesa_hash_table_search(&ctx->di.bindless[is_buffer].img_handles, (void*)(uintptr_t)handle);
assert(he);
struct zink_descriptor_surface *ds = he->data;
_mesa_hash_table_remove(&ctx->di.bindless[is_buffer].img_handles, he);
uint32_t h = handle;
util_dynarray_append(&ctx->batch.state->bindless_releases[1], uint32_t, h);
struct zink_resource *res = zink_descriptor_surface_resource(ds);
if (ds->is_buffer) {
if (zink_resource_has_usage(res))
zink_batch_reference_bufferview(&ctx->batch, ds->bufferview);
zink_buffer_view_reference(zink_screen(pctx->screen), &ds->bufferview, NULL);
} else {
if (zink_resource_has_usage(res))
zink_batch_reference_surface(&ctx->batch, ds->surface);
zink_surface_reference(zink_screen(pctx->screen), &ds->surface, NULL);
}
free(ds);
}
static void
zink_make_image_handle_resident(struct pipe_context *pctx, uint64_t handle, unsigned paccess, bool resident)
{
struct zink_context *ctx = zink_context(pctx);
bool is_buffer = ZINK_BINDLESS_IS_BUFFER(handle);
struct hash_entry *he = _mesa_hash_table_search(&ctx->di.bindless[is_buffer].img_handles, (void*)(uintptr_t)handle);
assert(he);
struct zink_bindless_descriptor *bd = he->data;
struct zink_descriptor_surface *ds = &bd->ds;
bd->access = paccess;
struct zink_resource *res = zink_descriptor_surface_resource(ds);
VkAccessFlags access = 0;
if (paccess & PIPE_IMAGE_ACCESS_WRITE) {
if (resident) {
res->write_bind_count[0]++;
res->write_bind_count[1]++;
} else {
res->write_bind_count[0]--;
res->write_bind_count[1]--;
}
access |= VK_ACCESS_SHADER_WRITE_BIT;
}
if (paccess & PIPE_IMAGE_ACCESS_READ) {
access |= VK_ACCESS_SHADER_READ_BIT;
}
if (is_buffer)
handle -= ZINK_MAX_BINDLESS_HANDLES;
if (resident) {
update_res_bind_count(ctx, res, false, false);
update_res_bind_count(ctx, res, true, false);
res->image_bind_count[0]++;
res->image_bind_count[1]++;
res->bindless[1]++;
if (is_buffer) {
if (ds->bufferview->bvci.buffer != res->obj->buffer)
rebind_bindless_bufferview(ctx, res, ds);
VkBufferView *bv = &ctx->di.bindless[1].buffer_infos[handle];
*bv = ds->bufferview->buffer_view;
zink_fake_buffer_barrier(res, access, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT);
} else {
VkDescriptorImageInfo *ii = &ctx->di.bindless[1].img_infos[handle];
ii->sampler = VK_NULL_HANDLE;
ii->imageView = ds->surface->image_view;
ii->imageLayout = VK_IMAGE_LAYOUT_GENERAL;
finalize_image_bind(ctx, res, false);
finalize_image_bind(ctx, res, true);
}
zink_batch_resource_usage_set(&ctx->batch, res, zink_resource_access_is_write(access));
util_dynarray_append(&ctx->di.bindless[1].resident, struct zink_bindless_descriptor *, bd);
uint32_t h = is_buffer ? handle + ZINK_MAX_BINDLESS_HANDLES : handle;
util_dynarray_append(&ctx->di.bindless[1].updates, uint32_t, h);
} else {
zero_bindless_descriptor(ctx, handle, is_buffer, true);
util_dynarray_delete_unordered(&ctx->di.bindless[1].resident, struct zink_bindless_descriptor *, bd);
unbind_shader_image_counts(ctx, res, false, false);
unbind_shader_image_counts(ctx, res, true, false);
res->bindless[1]--;
for (unsigned i = 0; i < 2; i++) {
if (!res->image_bind_count[i])
check_for_layout_update(ctx, res, i);
}
}
ctx->di.bindless_dirty[1] = true;
}
static void
zink_set_stencil_ref(struct pipe_context *pctx,
const struct pipe_stencil_ref ref)
@ -2085,6 +2363,15 @@ zink_update_descriptor_refs(struct zink_context *ctx, bool compute)
if (ctx->curr_program)
zink_batch_reference_program(batch, &ctx->curr_program->base);
}
if (ctx->di.bindless_refs_dirty) {
ctx->di.bindless_refs_dirty = false;
for (unsigned i = 0; i < 2; i++) {
util_dynarray_foreach(&ctx->di.bindless[i].resident, struct zink_bindless_descriptor*, bd) {
struct zink_resource *res = zink_descriptor_surface_resource(&(*bd)->ds);
zink_batch_resource_usage_set(&ctx->batch, res, (*bd)->access & PIPE_IMAGE_ACCESS_WRITE);
}
}
}
}
static void
@ -2123,6 +2410,8 @@ flush_batch(struct zink_context *ctx, bool sync)
stall(ctx);
ctx->oom_flush = false;
ctx->oom_stall = false;
ctx->dd->bindless_bound = false;
ctx->di.bindless_refs_dirty = true;
}
}
@ -3451,6 +3740,7 @@ rebind_buffer(struct zink_context *ctx, struct zink_resource *res, uint32_t rebi
unsigned num_rebinds = 0;
bool has_write = false;
assert(!res->bindless[1]); //TODO
if ((rebind_mask & BITFIELD_BIT(TC_BINDING_STREAMOUT_BUFFER)) || (!rebind_mask && res->so_bind_count && ctx->num_so_targets)) {
for (unsigned i = 0; i < ctx->num_so_targets; i++) {
if (ctx->so_targets[i]) {
@ -3872,6 +4162,27 @@ zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
goto fail;
}
ctx->base.create_texture_handle = zink_create_texture_handle;
ctx->base.delete_texture_handle = zink_delete_texture_handle;
ctx->base.make_texture_handle_resident = zink_make_texture_handle_resident;
ctx->base.create_image_handle = zink_create_image_handle;
ctx->base.delete_image_handle = zink_delete_image_handle;
ctx->base.make_image_handle_resident = zink_make_image_handle_resident;
for (unsigned i = 0; i < 2; i++) {
_mesa_hash_table_init(&ctx->di.bindless[i].img_handles, ctx, _mesa_hash_pointer, _mesa_key_pointer_equal);
_mesa_hash_table_init(&ctx->di.bindless[i].tex_handles, ctx, _mesa_hash_pointer, _mesa_key_pointer_equal);
/* allocate 1024 slots and reserve slot 0 */
util_idalloc_init(&ctx->di.bindless[i].tex_slots, ZINK_MAX_BINDLESS_HANDLES);
util_idalloc_alloc(&ctx->di.bindless[i].tex_slots);
util_idalloc_init(&ctx->di.bindless[i].img_slots, ZINK_MAX_BINDLESS_HANDLES);
util_idalloc_alloc(&ctx->di.bindless[i].img_slots);
ctx->di.bindless[i].buffer_infos = malloc(sizeof(VkImageView) * ZINK_MAX_BINDLESS_HANDLES);
ctx->di.bindless[i].img_infos = malloc(sizeof(VkDescriptorImageInfo) * ZINK_MAX_BINDLESS_HANDLES);
util_dynarray_init(&ctx->di.bindless[i].updates, NULL);
util_dynarray_init(&ctx->di.bindless[i].resident, NULL);
}
ctx->have_timelines = screen->info.have_KHR_timeline_semaphore;
simple_mtx_init(&ctx->batch_mtx, mtx_plain);
zink_start_batch(ctx, &ctx->batch);

View file

@ -30,6 +30,8 @@
#define ZINK_DEFAULT_MAX_DESCS 5000
#define ZINK_DEFAULT_DESC_CLAMP (ZINK_DEFAULT_MAX_DESCS * 0.9)
#define ZINK_MAX_BINDLESS_HANDLES 1024
#include "zink_clear.h"
#include "zink_pipeline.h"
#include "zink_batch.h"
@ -41,7 +43,7 @@
#include "pipe/p_state.h"
#include "util/u_rect.h"
#include "util/u_threaded_context.h"
#include "util/u_idalloc.h"
#include "util/slab.h"
#include "util/list.h"
#include "util/u_dynarray.h"
@ -139,6 +141,19 @@ struct zink_descriptor_surface {
bool is_buffer;
};
struct zink_bindless_descriptor {
struct zink_descriptor_surface ds;
struct zink_sampler_state *sampler;
uint32_t handle;
uint32_t access; //PIPE_ACCESS_...
};
static inline struct zink_resource *
zink_descriptor_surface_resource(struct zink_descriptor_surface *ds)
{
return ds->is_buffer ? (struct zink_resource*)ds->bufferview->pres : (struct zink_resource*)ds->surface->base.texture;
}
typedef void (*pipe_draw_vbo_func)(struct pipe_context *pipe,
const struct pipe_draw_info *info,
unsigned drawid_offset,
@ -313,6 +328,22 @@ struct zink_context {
struct zink_resource *descriptor_res[ZINK_DESCRIPTOR_TYPES][PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS];
struct zink_descriptor_surface sampler_surfaces[PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS];
struct zink_descriptor_surface image_surfaces[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_IMAGES];
struct {
struct util_idalloc tex_slots;
struct util_idalloc img_slots;
struct hash_table tex_handles;
struct hash_table img_handles;
VkBufferView *buffer_infos; //tex, img
VkDescriptorImageInfo *img_infos; //tex, img
struct util_dynarray updates;
struct util_dynarray resident;
} bindless[2]; //img, buffer
union {
bool bindless_dirty[2]; //tex, img
uint16_t any_bindless_dirty;
};
bool bindless_refs_dirty;
} di;
struct set *need_barriers[2]; //gfx, compute
struct set update_barriers[2][2]; //[gfx, compute][current, next]

View file

@ -535,6 +535,12 @@ zink_descriptor_util_init_null_set(struct zink_context *ctx, VkDescriptorSet des
VkImageLayout
zink_descriptor_util_image_layout_eval(const struct zink_resource *res, bool is_compute)
{
if (res->bindless[0] || res->bindless[1]) {
/* bindless needs most permissive layout */
if (res->image_bind_count[0] || res->image_bind_count[1])
return VK_IMAGE_LAYOUT_GENERAL;
return VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
}
return res->image_bind_count[is_compute] ? VK_IMAGE_LAYOUT_GENERAL :
res->aspect & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT) ?
//Vulkan-Docs#1490
@ -1462,6 +1468,13 @@ zink_descriptors_update(struct zink_context *ctx, bool is_compute)
}
}
ctx->dd->pg[is_compute] = pg;
if (pg->dd->bindless && unlikely(!ctx->dd->bindless_bound)) {
VKCTX(CmdBindDescriptorSets)(batch->state->cmdbuf, bp,
pg->layout, ZINK_DESCRIPTOR_BINDLESS, 1, &ctx->dd->bindless_set,
0, NULL);
ctx->dd->bindless_bound = true;
}
}
void
@ -1752,3 +1765,110 @@ zink_descriptor_util_init_fbfetch(struct zink_context *ctx)
if (screen->descriptor_mode != ZINK_DESCRIPTOR_MODE_LAZY)
zink_descriptor_pool_init(ctx);
}
ALWAYS_INLINE static VkDescriptorType
type_from_bindless_index(unsigned idx)
{
switch (idx) {
case 0: return VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
case 1: return VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
case 2: return VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
case 3: return VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
default:
unreachable("unknown index");
}
}
void
zink_descriptors_init_bindless(struct zink_context *ctx)
{
if (ctx->dd->bindless_set)
return;
struct zink_screen *screen = zink_screen(ctx->base.screen);
VkDescriptorSetLayoutBinding bindings[4];
const unsigned num_bindings = 4;
VkDescriptorSetLayoutCreateInfo dcslci = {0};
dcslci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
dcslci.pNext = NULL;
VkDescriptorSetLayoutBindingFlagsCreateInfo fci = {0};
VkDescriptorBindingFlags flags[4];
dcslci.pNext = &fci;
dcslci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT;
fci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO;
fci.bindingCount = num_bindings;
fci.pBindingFlags = flags;
for (unsigned i = 0; i < num_bindings; i++) {
flags[i] = VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT | VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT | VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT;
}
for (unsigned i = 0; i < num_bindings; i++) {
bindings[i].binding = i;
bindings[i].descriptorType = type_from_bindless_index(i);
bindings[i].descriptorCount = ZINK_MAX_BINDLESS_HANDLES;
bindings[i].stageFlags = VK_SHADER_STAGE_ALL_GRAPHICS | VK_SHADER_STAGE_COMPUTE_BIT;
bindings[i].pImmutableSamplers = NULL;
}
dcslci.bindingCount = num_bindings;
dcslci.pBindings = bindings;
if (VKSCR(CreateDescriptorSetLayout)(screen->dev, &dcslci, 0, &ctx->dd->bindless_layout) != VK_SUCCESS) {
debug_printf("vkCreateDescriptorSetLayout failed\n");
return;
}
VkDescriptorPoolCreateInfo dpci = {0};
VkDescriptorPoolSize sizes[4];
for (unsigned i = 0; i < 4; i++) {
sizes[i].type = type_from_bindless_index(i);
sizes[i].descriptorCount = ZINK_MAX_BINDLESS_HANDLES;
}
dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
dpci.pPoolSizes = sizes;
dpci.poolSizeCount = 4;
dpci.flags = VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT;
dpci.maxSets = 1;
if (VKSCR(CreateDescriptorPool)(screen->dev, &dpci, 0, &ctx->dd->bindless_pool) != VK_SUCCESS) {
debug_printf("vkCreateDescriptorPool failed\n");
return;
}
zink_descriptor_util_alloc_sets(screen, ctx->dd->bindless_layout, ctx->dd->bindless_pool, &ctx->dd->bindless_set, 1);
}
void
zink_descriptors_deinit_bindless(struct zink_context *ctx)
{
struct zink_screen *screen = zink_screen(ctx->base.screen);
if (ctx->dd->bindless_layout)
VKSCR(DestroyDescriptorSetLayout)(screen->dev, ctx->dd->bindless_layout, NULL);
if (ctx->dd->bindless_pool)
VKSCR(DestroyDescriptorPool)(screen->dev, ctx->dd->bindless_pool, NULL);
}
void
zink_descriptors_update_bindless(struct zink_context *ctx)
{
struct zink_screen *screen = zink_screen(ctx->base.screen);
for (unsigned i = 0; i < 2; i++) {
if (!ctx->di.bindless_dirty[i])
continue;
while (util_dynarray_contains(&ctx->di.bindless[i].updates, uint32_t)) {
uint32_t handle = util_dynarray_pop(&ctx->di.bindless[i].updates, uint32_t);
bool is_buffer = ZINK_BINDLESS_IS_BUFFER(handle);
VkWriteDescriptorSet wd;
wd.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
wd.pNext = NULL;
wd.dstSet = ctx->dd->bindless_set;
wd.dstBinding = is_buffer ? i * 2 + 1: i * 2;
wd.dstArrayElement = is_buffer ? handle - ZINK_MAX_BINDLESS_HANDLES : handle;
wd.descriptorCount = 1;
wd.descriptorType = type_from_bindless_index(wd.dstBinding);
if (is_buffer)
wd.pTexelBufferView = &ctx->di.bindless[i].buffer_infos[wd.dstArrayElement];
else
wd.pImageInfo = &ctx->di.bindless[i].img_infos[handle];
VKSCR(UpdateDescriptorSets)(screen->dev, 1, &wd, 0, NULL);
}
}
ctx->di.any_bindless_dirty = 0;
}

View file

@ -46,10 +46,13 @@ enum zink_descriptor_type {
ZINK_DESCRIPTOR_TYPE_SSBO,
ZINK_DESCRIPTOR_TYPE_IMAGE,
ZINK_DESCRIPTOR_TYPES,
ZINK_DESCRIPTOR_BINDLESS,
};
#define ZINK_MAX_DESCRIPTORS_PER_TYPE (32 * ZINK_SHADER_COUNT)
#define ZINK_BINDLESS_IS_BUFFER(HANDLE) (HANDLE >= ZINK_MAX_BINDLESS_HANDLES)
struct zink_descriptor_refs {
struct util_dynarray refs;
};
@ -109,7 +112,6 @@ struct zink_descriptor_reference {
bool *invalid;
};
struct zink_descriptor_data {
struct zink_descriptor_state gfx_descriptor_states[ZINK_SHADER_COUNT]; // keep incremental hashes here
struct zink_descriptor_state descriptor_states[2]; // gfx, compute
@ -129,6 +131,11 @@ struct zink_descriptor_data {
struct zink_descriptor_layout *dummy_dsl;
VkDescriptorSet dummy_set;
VkDescriptorSetLayout bindless_layout;
VkDescriptorPool bindless_pool;
VkDescriptorSet bindless_set;
bool bindless_bound;
bool changed[2][ZINK_DESCRIPTOR_TYPES + 1];
bool has_fbfetch;
struct zink_program *pg[2]; //gfx, compute
@ -136,6 +143,7 @@ struct zink_descriptor_data {
struct zink_program_descriptor_data {
uint8_t push_usage;
bool bindless;
VkDescriptorPoolSize sizes[6]; //zink_descriptor_size_index
struct zink_descriptor_layout_key *layout_key[ZINK_DESCRIPTOR_TYPES]; //push set doesn't need one
uint8_t binding_usage;
@ -211,7 +219,12 @@ void
zink_descriptor_util_init_null_set(struct zink_context *ctx, VkDescriptorSet desc_set);
VkImageLayout
zink_descriptor_util_image_layout_eval(const struct zink_resource *res, bool is_compute);
void
zink_descriptors_init_bindless(struct zink_context *ctx);
void
zink_descriptors_deinit_bindless(struct zink_context *ctx);
void
zink_descriptors_update_bindless(struct zink_context *ctx);
/* these two can't be called in lazy mode */
void
zink_descriptor_set_refs_clear(struct zink_descriptor_refs *refs, void *ptr);

View file

@ -204,7 +204,10 @@ zink_descriptor_program_init_lazy(struct zink_context *ctx, struct zink_program
has_bindings |= BITFIELD_BIT(j);
}
}
pg->dd->bindless |= shader->bindless;
}
if (pg->dd->bindless)
zink_descriptors_init_bindless(ctx);
pg->dd->binding_usage = has_bindings;
if (!has_bindings && !push_count) {
ralloc_free(pg->dd);
@ -233,6 +236,19 @@ zink_descriptor_program_init_lazy(struct zink_context *ctx, struct zink_program
for (unsigned i = 0; i < ARRAY_SIZE(pg->dd->sizes); i++)
pg->dd->sizes[i].descriptorCount *= screen->descriptor_mode == ZINK_DESCRIPTOR_MODE_LAZY ? MAX_LAZY_DESCRIPTORS : ZINK_DEFAULT_MAX_DESCS;
}
/* TODO: make this dynamic? */
if (pg->dd->bindless) {
pg->num_dsl = ZINK_DESCRIPTOR_BINDLESS + 1;
pg->dsl[ZINK_DESCRIPTOR_BINDLESS] = ctx->dd->bindless_layout;
for (unsigned i = 0; i < ZINK_DESCRIPTOR_BINDLESS; i++) {
if (!pg->dsl[i]) {
/* inject a null dsl */
pg->dsl[i] = ctx->dd->dummy_dsl->layout;
if (i != ZINK_DESCRIPTOR_TYPES)
pg->dd->binding_usage |= BITFIELD_BIT(i);
}
}
}
pg->layout = zink_pipeline_layout_create(screen, pg, &pg->compat_id);
if (!pg->layout)
@ -261,6 +277,7 @@ zink_descriptor_program_init_lazy(struct zink_context *ctx, struct zink_program
bool is_push = i == 0;
/* no need for empty templates */
if (pg->dsl[i] == ctx->dd->dummy_dsl->layout ||
pg->dsl[i] == ctx->dd->bindless_layout ||
(!is_push && pg->dd->layouts[i]->desc_template))
continue;
template[i].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO;
@ -558,6 +575,12 @@ zink_descriptors_update_lazy(struct zink_context *ctx, bool is_compute)
bool need_push = pg->dd->push_usage &&
(dd_lazy(ctx)->push_state_changed[is_compute] || batch_changed);
zink_descriptors_update_lazy_masked(ctx, is_compute, changed_sets, need_push, true);
if (pg->dd->bindless && unlikely(!ctx->dd->bindless_bound)) {
VKCTX(CmdBindDescriptorSets)(ctx->batch.state->cmdbuf, is_compute ? VK_PIPELINE_BIND_POINT_COMPUTE : VK_PIPELINE_BIND_POINT_GRAPHICS,
pg->layout, ZINK_DESCRIPTOR_BINDLESS, 1, &ctx->dd->bindless_set,
0, NULL);
ctx->dd->bindless_bound = true;
}
}
void
@ -694,6 +717,7 @@ zink_descriptors_init_lazy(struct zink_context *ctx)
zink_descriptor_util_alloc_sets(screen, ctx->dd->dummy_dsl->layout,
ctx->dd->dummy_pool, &ctx->dd->dummy_set, 1);
zink_descriptor_util_init_null_set(ctx, ctx->dd->dummy_set);
return true;
}

View file

@ -753,6 +753,9 @@ zink_draw_vbo(struct pipe_context *pctx,
if (zink_program_has_descriptors(&ctx->curr_program->base))
screen->descriptors_update(ctx, false);
if (ctx->di.any_bindless_dirty && ctx->curr_program->base.dd->bindless)
zink_descriptors_update_bindless(ctx);
if (reads_basevertex) {
unsigned draw_mode_is_indexed = index_size > 0;
VKCTX(CmdPushConstants)(batch->state->cmdbuf, ctx->curr_program->base.layout, VK_SHADER_STAGE_VERTEX_BIT,
@ -865,6 +868,8 @@ zink_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
if (zink_program_has_descriptors(&ctx->curr_compute->base))
screen->descriptors_update(ctx, true);
if (ctx->di.any_bindless_dirty && ctx->curr_compute->base.dd->bindless)
zink_descriptors_update_bindless(ctx);
zink_program_update_compute_pipeline_state(ctx, ctx->curr_compute, info->block);
VkPipeline prev_pipeline = ctx->compute_pipeline_state.pipeline;

View file

@ -86,7 +86,7 @@ struct zink_program {
uint32_t compat_id;
VkPipelineLayout layout;
VkDescriptorSetLayout dsl[ZINK_DESCRIPTOR_TYPES + 1]; // one for each type + push
VkDescriptorSetLayout dsl[ZINK_DESCRIPTOR_TYPES + 2]; // one for each type + push + bindless
unsigned num_dsl;
bool removed;

View file

@ -119,6 +119,7 @@ struct zink_resource {
uint32_t sampler_binds[PIPE_SHADER_TYPES];
uint16_t image_bind_count[2]; //gfx, compute
uint16_t write_bind_count[2]; //gfx, compute
uint16_t bindless[2]; //tex, img
union {
uint16_t bind_count[2]; //gfx, compute
uint32_t all_binds;