zink: remove batch tracking/usage from view types

instead of incurring all the overhead of tracking lifetimes for these,
it makes more sense to just let them delete whenever and then store
the vk object onto its parent image/buffer to be deleted when that gets
freed, as the parent object's lifetime will always be >= the view's lifetime

Reviewed-by: Adam Jackson <ajax@redhat.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/18664>
This commit is contained in:
Mike Blumenkrantz 2022-09-01 09:34:41 -04:00 committed by Marge Bot
parent 9f9dd4f2ac
commit f6bbd7875a
6 changed files with 21 additions and 145 deletions

View file

@ -72,17 +72,6 @@ zink_reset_batch_state(struct zink_context *ctx, struct zink_batch_state *bs)
zink_prune_query(screen, bs, query);
}
set_foreach_remove(&bs->surfaces, entry) {
struct zink_surface *surf = (struct zink_surface *)entry->key;
zink_batch_usage_unset(&surf->batch_uses, bs);
zink_surface_reference(screen, &surf, NULL);
}
set_foreach_remove(&bs->bufferviews, entry) {
struct zink_buffer_view *buffer_view = (struct zink_buffer_view *)entry->key;
zink_batch_usage_unset(&buffer_view->batch_uses, bs);
zink_buffer_view_reference(screen, &buffer_view, NULL);
}
util_dynarray_foreach(&bs->dead_framebuffers, struct zink_framebuffer*, fb) {
zink_framebuffer_reference(screen, fb, NULL);
}
@ -245,8 +234,6 @@ create_batch_state(struct zink_context *ctx)
bs->ctx = ctx;
SET_CREATE_OR_FAIL(&bs->surfaces);
SET_CREATE_OR_FAIL(&bs->bufferviews);
SET_CREATE_OR_FAIL(&bs->programs);
SET_CREATE_OR_FAIL(&bs->active_queries);
util_dynarray_init(&bs->wait_semaphores, NULL);
@ -678,38 +665,6 @@ zink_batch_reference_resource_move(struct zink_batch *batch, struct zink_resourc
return false;
}
void
zink_batch_reference_bufferview(struct zink_batch *batch, struct zink_buffer_view *buffer_view)
{
if (!batch_ptr_add_usage(batch, &batch->state->bufferviews, buffer_view))
return;
pipe_reference(NULL, &buffer_view->reference);
batch->has_work = true;
}
void
zink_batch_reference_surface(struct zink_batch *batch, struct zink_surface *surface)
{
if (!batch_ptr_add_usage(batch, &batch->state->surfaces, surface))
return;
struct pipe_surface *surf = NULL;
pipe_surface_reference(&surf, &surface->base);
batch->has_work = true;
}
void
zink_batch_reference_sampler_view(struct zink_batch *batch,
struct zink_sampler_view *sv)
{
if (sv->base.target == PIPE_BUFFER)
zink_batch_reference_bufferview(batch, sv->buffer_view);
else {
zink_batch_reference_surface(batch, sv->image_view);
if (sv->cube_array)
zink_batch_reference_surface(batch, sv->cube_array);
}
}
void
zink_batch_reference_program(struct zink_batch *batch,
struct zink_program *pg)
@ -722,16 +677,6 @@ zink_batch_reference_program(struct zink_batch *batch,
batch->has_work = true;
}
void
zink_batch_reference_image_view(struct zink_batch *batch,
struct zink_image_view *image_view)
{
if (image_view->base.resource->target == PIPE_BUFFER)
zink_batch_reference_bufferview(batch, image_view->buffer_view);
else
zink_batch_reference_surface(batch, image_view->surface);
}
bool
zink_screen_usage_check_completion(struct zink_screen *screen, const struct zink_batch_usage *u)
{

View file

@ -74,22 +74,10 @@ zink_batch_reference_resource(struct zink_batch *batch, struct zink_resource *re
bool
zink_batch_reference_resource_move(struct zink_batch *batch, struct zink_resource *res);
void
zink_batch_reference_sampler_view(struct zink_batch *batch,
struct zink_sampler_view *sv);
void
zink_batch_reference_program(struct zink_batch *batch,
struct zink_program *pg);
void
zink_batch_reference_image_view(struct zink_batch *batch,
struct zink_image_view *image_view);
void
zink_batch_reference_bufferview(struct zink_batch *batch, struct zink_buffer_view *buffer_view);
void
zink_batch_reference_surface(struct zink_batch *batch, struct zink_surface *surface);
void
debug_describe_zink_batch_state(char *buf, const struct zink_batch_state *ptr);

View file

@ -1049,8 +1049,10 @@ zink_destroy_buffer_view(struct zink_screen *screen, struct zink_buffer_view *bu
assert(he);
_mesa_hash_table_remove(&res->bufferview_cache, he);
simple_mtx_unlock(&res->bufferview_mtx);
simple_mtx_lock(&res->obj->view_lock);
util_dynarray_append(&res->obj->views, VkBufferView, buffer_view->buffer_view);
simple_mtx_unlock(&res->obj->view_lock);
pipe_resource_reference(&buffer_view->pres, NULL);
VKSCR(DestroyBufferView)(screen->dev, buffer_view->buffer_view, NULL);
FREE(buffer_view);
}
@ -1580,15 +1582,11 @@ unbind_shader_image(struct zink_context *ctx, gl_shader_stage stage, unsigned sl
if (image_view->base.resource->target == PIPE_BUFFER) {
unbind_buffer_descriptor_stage(res, stage);
unbind_buffer_descriptor_reads(res, stage);
if (zink_batch_usage_exists(image_view->buffer_view->batch_uses))
zink_batch_reference_bufferview(&ctx->batch, image_view->buffer_view);
zink_buffer_view_reference(zink_screen(ctx->base.screen), &image_view->buffer_view, NULL);
} else {
unbind_descriptor_stage(res, stage);
if (!res->image_bind_count[is_compute])
check_for_layout_update(ctx, res, is_compute);
if (zink_batch_usage_exists(image_view->surface->batch_uses))
zink_batch_reference_surface(&ctx->batch, image_view->surface);
zink_surface_reference(zink_screen(ctx->base.screen), &image_view->surface, NULL);
}
image_view->base.resource = NULL;
@ -1704,7 +1702,6 @@ zink_set_shader_images(struct pipe_context *pctx,
unbind_shader_image(ctx, p_stage, start_slot + i);
}
image_view->buffer_view = bv;
zink_batch_usage_set(&image_view->buffer_view->batch_uses, ctx->batch.state);
zink_screen(ctx->base.screen)->buffer_barrier(ctx, res, access,
res->gfx_barrier);
zink_batch_resource_usage_set(&ctx->batch, res,
@ -1719,7 +1716,6 @@ zink_set_shader_images(struct pipe_context *pctx,
}
image_view->surface = surface;
finalize_image_bind(ctx, res, p_stage == MESA_SHADER_COMPUTE);
zink_batch_usage_set(&image_view->surface->batch_uses, ctx->batch.state);
zink_batch_resource_usage_set(&ctx->batch, res,
zink_resource_access_is_write(access), false);
}
@ -1748,15 +1744,6 @@ zink_set_shader_images(struct pipe_context *pctx,
zink_context_invalidate_descriptor_state(ctx, p_stage, ZINK_DESCRIPTOR_TYPE_IMAGE, start_slot, count);
}
ALWAYS_INLINE static void
check_samplerview_for_batch_ref(struct zink_context *ctx, struct zink_sampler_view *sv)
{
const struct zink_resource *res = zink_resource(sv->base.texture);
if ((res->obj->is_buffer && zink_batch_usage_exists(sv->buffer_view->batch_uses)) ||
(!res->obj->is_buffer && zink_batch_usage_exists(sv->image_view->batch_uses)))
zink_batch_reference_sampler_view(&ctx->batch, sv);
}
ALWAYS_INLINE static void
unbind_samplerview(struct zink_context *ctx, gl_shader_stage stage, unsigned slot)
{
@ -1765,7 +1752,6 @@ unbind_samplerview(struct zink_context *ctx, gl_shader_stage stage, unsigned slo
return;
struct zink_resource *res = zink_resource(sv->base.texture);
res->sampler_bind_count[stage == MESA_SHADER_COMPUTE]--;
check_samplerview_for_batch_ref(ctx, sv);
update_res_bind_count(ctx, res, stage == MESA_SHADER_COMPUTE, true);
res->sampler_binds[stage] &= ~BITFIELD_BIT(slot);
if (res->obj->is_buffer) {
@ -1806,8 +1792,6 @@ zink_set_sampler_views(struct pipe_context *pctx,
res->sampler_bind_count[shader_type == MESA_SHADER_COMPUTE]++;
res->gfx_barrier |= zink_pipeline_flags_from_pipe_stage(shader_type);
res->barrier_access[shader_type == MESA_SHADER_COMPUTE] |= VK_ACCESS_SHADER_READ_BIT;
} else if (a != b) {
check_samplerview_for_batch_ref(ctx, a);
}
if (res->base.b.target == PIPE_BUFFER) {
if (b->buffer_view->bvci.buffer != res->obj->buffer) {
@ -1819,13 +1803,10 @@ zink_set_sampler_views(struct pipe_context *pctx,
bvci.buffer = res->obj->buffer;
struct zink_buffer_view *buffer_view = get_buffer_view(ctx, res, &bvci);
assert(buffer_view != b->buffer_view);
if (zink_batch_usage_exists(b->buffer_view->batch_uses))
zink_batch_reference_bufferview(&ctx->batch, b->buffer_view);
zink_buffer_view_reference(zink_screen(ctx->base.screen), &b->buffer_view, NULL);
b->buffer_view = buffer_view;
update = true;
}
zink_batch_usage_set(&b->buffer_view->batch_uses, ctx->batch.state);
zink_screen(ctx->base.screen)->buffer_barrier(ctx, res, VK_ACCESS_SHADER_READ_BIT,
res->gfx_barrier);
if (!a || a->buffer_view->buffer_view != b->buffer_view->buffer_view)
@ -1847,10 +1828,8 @@ zink_set_sampler_views(struct pipe_context *pctx,
flush_pending_clears(ctx, res);
if (b->cube_array) {
ctx->di.cubes[shader_type] |= BITFIELD_BIT(start_slot + i);
zink_batch_usage_set(&b->cube_array->batch_uses, ctx->batch.state);
}
check_for_layout_update(ctx, res, shader_type == MESA_SHADER_COMPUTE);
zink_batch_usage_set(&b->image_view->batch_uses, ctx->batch.state);
if (!a)
update = true;
zink_batch_resource_usage_set(&ctx->batch, res, false, false);
@ -1929,14 +1908,9 @@ zink_delete_texture_handle(struct pipe_context *pctx, uint64_t handle)
uint32_t h = handle;
util_dynarray_append(&ctx->batch.state->bindless_releases[0], uint32_t, h);
struct zink_resource *res = zink_descriptor_surface_resource(ds);
if (ds->is_buffer) {
if (zink_resource_has_usage(res))
zink_batch_reference_bufferview(&ctx->batch, ds->bufferview);
zink_buffer_view_reference(zink_screen(pctx->screen), &ds->bufferview, NULL);
} else {
if (zink_resource_has_usage(res))
zink_batch_reference_surface(&ctx->batch, ds->surface);
zink_surface_reference(zink_screen(pctx->screen), &ds->surface, NULL);
pctx->delete_sampler_state(pctx, bd->sampler);
}
@ -1954,8 +1928,6 @@ rebind_bindless_bufferview(struct zink_context *ctx, struct zink_resource *res,
bvci.buffer = res->obj->buffer;
struct zink_buffer_view *buffer_view = get_buffer_view(ctx, res, &bvci);
assert(buffer_view != ds->bufferview);
if (zink_resource_has_usage(res))
zink_batch_reference_bufferview(&ctx->batch, ds->bufferview);
zink_buffer_view_reference(zink_screen(ctx->base.screen), &ds->bufferview, NULL);
ds->bufferview = buffer_view;
}
@ -2077,14 +2049,9 @@ zink_delete_image_handle(struct pipe_context *pctx, uint64_t handle)
uint32_t h = handle;
util_dynarray_append(&ctx->batch.state->bindless_releases[1], uint32_t, h);
struct zink_resource *res = zink_descriptor_surface_resource(ds);
if (ds->is_buffer) {
if (zink_resource_has_usage(res))
zink_batch_reference_bufferview(&ctx->batch, ds->bufferview);
zink_buffer_view_reference(zink_screen(pctx->screen), &ds->bufferview, NULL);
} else {
if (zink_resource_has_usage(res))
zink_batch_reference_surface(&ctx->batch, ds->surface);
zink_surface_reference(zink_screen(pctx->screen), &ds->surface, NULL);
}
free(ds);
@ -2507,7 +2474,6 @@ zink_prep_fb_attachment(struct zink_context *ctx, struct zink_surface *surf, uns
} else {
res = zink_resource(surf->base.texture);
zink_batch_resource_usage_set(&ctx->batch, res, true, false);
zink_batch_usage_set(&surf->batch_uses, ctx->batch.state);
}
VkAccessFlags access;
@ -2669,27 +2635,9 @@ update_resource_refs_for_stage(struct zink_context *ctx, gl_shader_stage stage)
else
res->obj->unordered_read = false;
struct zink_sampler_view *sv = zink_sampler_view(ctx->sampler_views[stage][j]);
struct zink_sampler_state *sampler_state = ctx->sampler_states[stage][j];
struct zink_image_view *iv = &ctx->image_views[stage][j];
if (sampler_state && i == ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW && j <= ctx->di.num_samplers[stage])
zink_batch_usage_set(&sampler_state->batch_uses, ctx->batch.state);
if (sv && i == ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW && j <= ctx->di.num_sampler_views[stage]) {
if (is_buffer) {
zink_batch_usage_set(&sv->buffer_view->batch_uses, ctx->batch.state);
} else {
zink_batch_usage_set(&sv->image_view->batch_uses, ctx->batch.state);
if (sv->cube_array)
zink_batch_usage_set(&sv->cube_array->batch_uses, ctx->batch.state);
}
zink_batch_reference_sampler_view(batch, sv);
} else if (i == ZINK_DESCRIPTOR_TYPE_IMAGE && j <= ctx->di.num_images[stage]) {
if (is_buffer)
zink_batch_usage_set(&iv->buffer_view->batch_uses, ctx->batch.state);
else
zink_batch_usage_set(&iv->surface->batch_uses, ctx->batch.state);
zink_batch_reference_image_view(batch, iv);
}
}
}
}
@ -2833,14 +2781,8 @@ unbind_fb_surface(struct zink_context *ctx, struct pipe_surface *surf, unsigned
ctx->dynamic_fb.attachments[idx].imageView = VK_NULL_HANDLE;
if (!surf)
return;
struct zink_surface *transient = zink_transient_surface(surf);
struct zink_resource *res = zink_resource(surf->texture);
if (changed) {
if (zink_batch_usage_exists(zink_csurface(surf)->batch_uses)) {
zink_batch_reference_surface(&ctx->batch, zink_csurface(surf));
if (transient)
zink_batch_reference_surface(&ctx->batch, transient);
}
ctx->rp_changed = true;
}
res->fb_binds--;
@ -4034,8 +3976,6 @@ rebind_tbo(struct zink_context *ctx, gl_shader_stage shader, unsigned slot)
if (!sampler_view || sampler_view->base.texture->target != PIPE_BUFFER)
return NULL;
struct zink_resource *res = zink_resource(sampler_view->base.texture);
if (zink_batch_usage_exists(sampler_view->buffer_view->batch_uses))
zink_batch_reference_bufferview(&ctx->batch, sampler_view->buffer_view);
VkBufferViewCreateInfo bvci = sampler_view->buffer_view->bvci;
bvci.buffer = res->obj->buffer;
zink_buffer_view_reference(zink_screen(ctx->base.screen), &sampler_view->buffer_view, NULL);
@ -4052,8 +3992,6 @@ rebind_ibo(struct zink_context *ctx, gl_shader_stage shader, unsigned slot)
struct zink_resource *res = zink_resource(image_view->base.resource);
if (!res || res->base.b.target != PIPE_BUFFER)
return NULL;
if (zink_batch_usage_exists(image_view->buffer_view->batch_uses))
zink_batch_reference_bufferview(&ctx->batch, image_view->buffer_view);
VkBufferViewCreateInfo bvci = image_view->buffer_view->bvci;
bvci.buffer = res->obj->buffer;
zink_buffer_view_reference(zink_screen(ctx->base.screen), &image_view->buffer_view, NULL);

View file

@ -98,6 +98,13 @@ debug_describe_zink_resource_object(char *buf, const struct zink_resource_object
void
zink_destroy_resource_object(struct zink_screen *screen, struct zink_resource_object *obj)
{
if (obj->is_buffer) {
while (util_dynarray_contains(&obj->views, VkBufferView))
VKSCR(DestroyBufferView)(screen->dev, util_dynarray_pop(&obj->views, VkBufferView), NULL);
} else {
while (util_dynarray_contains(&obj->views, VkImageView))
VKSCR(DestroyImageView)(screen->dev, util_dynarray_pop(&obj->views, VkImageView), NULL);
}
if (obj->is_buffer) {
VKSCR(DestroyBuffer)(screen->dev, obj->buffer, NULL);
VKSCR(DestroyBuffer)(screen->dev, obj->storage_buffer, NULL);
@ -111,6 +118,7 @@ zink_destroy_resource_object(struct zink_screen *screen, struct zink_resource_ob
#endif
}
simple_mtx_destroy(&obj->view_lock);
if (obj->dt) {
FREE(obj->bo); //this is a dummy struct
} else
@ -564,6 +572,8 @@ resource_object_create(struct zink_screen *screen, const struct pipe_resource *t
struct zink_resource_object *obj = CALLOC_STRUCT(zink_resource_object);
if (!obj)
return NULL;
simple_mtx_init(&obj->view_lock, mtx_plain);
util_dynarray_init(&obj->views, NULL);
obj->last_dt_idx = obj->dt_idx = UINT32_MAX; //TODO: unionize
VkMemoryRequirements reqs = {0};

View file

@ -323,16 +323,18 @@ zink_destroy_surface(struct zink_screen *screen, struct pipe_surface *psurface)
_mesa_hash_table_remove(&res->surface_cache, he);
simple_mtx_unlock(&res->surface_mtx);
}
simple_mtx_lock(&res->obj->view_lock);
if (surface->simage_view)
VKSCR(DestroyImageView)(screen->dev, surface->simage_view, NULL);
util_dynarray_append(&res->obj->views, VkImageView, surface->simage_view);
if (surface->is_swapchain) {
for (unsigned i = 0; i < surface->old_swapchain_size; i++)
VKSCR(DestroyImageView)(screen->dev, surface->old_swapchain[i], NULL);
util_dynarray_append(&res->obj->views, VkImageView, surface->old_swapchain[i]);
for (unsigned i = 0; i < surface->swapchain_size; i++)
VKSCR(DestroyImageView)(screen->dev, surface->swapchain[i], NULL);
util_dynarray_append(&res->obj->views, VkImageView, surface->swapchain[i]);
free(surface->swapchain);
} else
VKSCR(DestroyImageView)(screen->dev, surface->image_view, NULL);
util_dynarray_append(&res->obj->views, VkImageView, surface->image_view);
simple_mtx_unlock(&res->obj->view_lock);
pipe_resource_reference(&psurface->texture, NULL);
FREE(surface);
}
@ -362,13 +364,10 @@ zink_rebind_surface(struct zink_context *ctx, struct pipe_surface **psurface)
simple_mtx_lock(&res->surface_mtx);
struct hash_entry *new_entry = _mesa_hash_table_search_pre_hashed(&res->surface_cache, hash, &ivci);
if (zink_batch_usage_exists(surface->batch_uses))
zink_batch_reference_surface(&ctx->batch, surface);
if (new_entry) {
/* reuse existing surface; old one will be cleaned up naturally */
struct zink_surface *new_surface = new_entry->data;
simple_mtx_unlock(&res->surface_mtx);
zink_batch_usage_set(&new_surface->batch_uses, ctx->batch.state);
zink_surface_reference(screen, (struct zink_surface**)psurface, new_surface);
return true;
}
@ -393,7 +392,6 @@ zink_rebind_surface(struct zink_context *ctx, struct pipe_surface **psurface)
surface->info.flags = res->obj->vkflags;
surface->info.usage = res->obj->vkusage;
surface->info_hash = _mesa_hash_data(&surface->info, sizeof(surface->info));
zink_batch_usage_set(&surface->batch_uses, ctx->batch.state);
simple_mtx_unlock(&res->surface_mtx);
return true;
}

View file

@ -458,9 +458,6 @@ struct zink_batch_state {
struct zink_resource_object *last_added_obj;
struct util_dynarray swapchain_obj; //this doesn't have a zink_bo and must be handled differently
struct set surfaces;
struct set bufferviews;
struct util_dynarray unref_resources;
struct util_dynarray bindless_releases[2];
@ -914,6 +911,8 @@ struct zink_resource_object {
unsigned persistent_maps; //if nonzero, requires vkFlushMappedMemoryRanges during batch use
VkBuffer storage_buffer;
simple_mtx_t view_lock;
struct util_dynarray views;
union {
VkBuffer buffer;
@ -1191,7 +1190,6 @@ struct zink_surface {
VkImageView simage_view;//old iview after storage replacement/rebind
void *obj; //backing resource object
uint32_t hash;
struct zink_batch_usage *batch_uses;
};
/* wrapper object that preserves the gallium expectation of having
@ -1265,7 +1263,6 @@ struct zink_buffer_view {
VkBufferViewCreateInfo bvci;
VkBufferView buffer_view;
uint32_t hash;
struct zink_batch_usage *batch_uses;
};
struct zink_sampler_view {