zink: Defer freeing sparse backing buffers.

Sparse backing buffers were destroyed immediately after issuing the
unbind call, which was against the Vulkan spec which requires the
destroy call to not happen before the unbind semaphore was signaled.

To tackle this, keep a reference against buffers we are unbinding within
the batch. This will keep the backing buffer long enough to not cause
use-after-free. As described in comments, we don't need to reference
every backing page used in the batch, as the resource usually keeps
references to them until they are unbound, which is now correctly
handled.

Reviewed-by: Dave Airlie <airlied@redhat.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/26171>
This commit is contained in:
Tatsuyuki Ishi 2023-11-13 17:47:31 +09:00 committed by Marge Bot
parent 3ee283e455
commit 729ce08815
5 changed files with 34 additions and 7 deletions

View file

@ -145,6 +145,11 @@ zink_reset_batch_state(struct zink_context *ctx, struct zink_batch_state *bs)
zink_batch_descriptor_reset(screen, bs);
util_dynarray_foreach(&bs->freed_sparse_backing_bos, struct zink_bo, bo) {
zink_bo_unref(screen, bo);
}
util_dynarray_clear(&bs->freed_sparse_backing_bos);
/* programs are refcounted and batch-tracked */
set_foreach_remove(&bs->programs, entry) {
struct zink_program *pg = (struct zink_program*)entry->key;
@ -301,6 +306,7 @@ zink_batch_state_destroy(struct zink_screen *screen, struct zink_batch_state *bs
free(bs->real_objs.objs);
free(bs->slab_objs.objs);
free(bs->sparse_objs.objs);
util_dynarray_fini(&bs->freed_sparse_backing_bos);
util_dynarray_fini(&bs->dead_querypools);
util_dynarray_fini(&bs->dgc.pipelines);
util_dynarray_fini(&bs->dgc.layouts);
@ -399,6 +405,7 @@ create_batch_state(struct zink_context *ctx)
util_dynarray_init(&bs->fd_wait_semaphore_stages, NULL);
util_dynarray_init(&bs->zombie_samplers, NULL);
util_dynarray_init(&bs->dead_framebuffers, NULL);
util_dynarray_init(&bs->freed_sparse_backing_bos, NULL);
util_dynarray_init(&bs->unref_resources, NULL);
util_dynarray_init(&bs->acquires, NULL);
util_dynarray_init(&bs->acquire_flags, NULL);
@ -1022,7 +1029,15 @@ zink_batch_reference_resource_move(struct zink_batch *batch, struct zink_resourc
if (!(res->base.b.flags & PIPE_RESOURCE_FLAG_SPARSE)) {
bs->resource_size += res->obj->size;
} else {
// TODO: check backing pages
/* Sparse backing pages are not directly referenced by the batch as
* there can be a lot of them.
* Instead, they are kept referenced in one of two ways:
* - While they are committed, they are directly referenced from the
* resource's state.
* - Upon de-commit, they are added to the freed_sparse_backing_bos
* list, which will defer destroying the resource until the batch
* performing unbind finishes.
*/
}
check_oom_flush(batch->state->ctx, batch);
batch->has_work = true;

View file

@ -740,6 +740,14 @@ zink_bo_unmap(struct zink_screen *screen, struct zink_bo *bo)
}
}
/* see comment in zink_batch_reference_resource_move for how references on sparse backing buffers are organized */
static void
track_freed_sparse_bo(struct zink_context *ctx, struct zink_sparse_backing *backing)
{
pipe_reference(NULL, &backing->bo->base.reference);
util_dynarray_append(&ctx->batch.state->freed_sparse_backing_bos, struct zink_bo*, backing->bo);
}
static VkSemaphore
buffer_commit_single(struct zink_screen *screen, struct zink_resource *res, struct zink_bo *bo, uint32_t bo_offset, uint32_t offset, uint32_t size, bool commit, VkSemaphore wait)
{
@ -776,9 +784,10 @@ buffer_commit_single(struct zink_screen *screen, struct zink_resource *res, stru
}
static bool
buffer_bo_commit(struct zink_screen *screen, struct zink_resource *res, uint32_t offset, uint32_t size, bool commit, VkSemaphore *sem)
buffer_bo_commit(struct zink_context *ctx, struct zink_resource *res, uint32_t offset, uint32_t size, bool commit, VkSemaphore *sem)
{
bool ok = true;
struct zink_screen *screen = zink_screen(ctx->base.screen);
struct zink_bo *bo = res->obj->bo;
assert(offset % ZINK_SPARSE_BUFFER_PAGE_SIZE == 0);
assert(offset <= bo->base.size);
@ -877,6 +886,7 @@ buffer_bo_commit(struct zink_screen *screen, struct zink_resource *res, uint32_t
span_pages++;
}
track_freed_sparse_bo(ctx, backing);
if (!sparse_backing_free(screen, bo, backing, backing_start, span_pages)) {
/* Couldn't allocate tracking data structures, so we have to leak */
fprintf(stderr, "zink: leaking sparse backing memory\n");
@ -947,9 +957,10 @@ texture_commit_miptail(struct zink_screen *screen, struct zink_resource *res, st
}
bool
zink_bo_commit(struct zink_screen *screen, struct zink_resource *res, unsigned level, struct pipe_box *box, bool commit, VkSemaphore *sem)
zink_bo_commit(struct zink_context *ctx, struct zink_resource *res, unsigned level, struct pipe_box *box, bool commit, VkSemaphore *sem)
{
bool ok = true;
struct zink_screen *screen = zink_screen(ctx->base.screen);
struct zink_bo *bo = res->obj->bo;
VkSemaphore cur_sem = VK_NULL_HANDLE;
@ -959,7 +970,7 @@ zink_bo_commit(struct zink_screen *screen, struct zink_resource *res, unsigned l
simple_mtx_lock(&screen->queue_lock);
simple_mtx_lock(&bo->lock);
if (res->base.b.target == PIPE_BUFFER) {
ok = buffer_bo_commit(screen, res, box->x, box->width, commit, &cur_sem);
ok = buffer_bo_commit(ctx, res, box->x, box->width, commit, &cur_sem);
goto out;
}

View file

@ -140,7 +140,7 @@ void
zink_bo_unmap(struct zink_screen *screen, struct zink_bo *bo);
bool
zink_bo_commit(struct zink_screen *screen, struct zink_resource *res, unsigned level, struct pipe_box *box, bool commit, VkSemaphore *sem);
zink_bo_commit(struct zink_context *ctx, struct zink_resource *res, unsigned level, struct pipe_box *box, bool commit, VkSemaphore *sem);
static ALWAYS_INLINE bool
zink_bo_has_unflushed_usage(const struct zink_bo *bo)

View file

@ -4774,14 +4774,13 @@ zink_resource_commit(struct pipe_context *pctx, struct pipe_resource *pres, unsi
{
struct zink_context *ctx = zink_context(pctx);
struct zink_resource *res = zink_resource(pres);
struct zink_screen *screen = zink_screen(pctx->screen);
/* if any current usage exists, flush the queue */
if (zink_resource_has_unflushed_usage(res))
zink_flush_queue(ctx);
VkSemaphore sem = VK_NULL_HANDLE;
bool ret = zink_bo_commit(screen, res, level, box, commit, &sem);
bool ret = zink_bo_commit(ctx, res, level, box, commit, &sem);
if (ret) {
if (sem)
zink_batch_add_wait_semaphore(&ctx->batch, sem);

View file

@ -652,6 +652,8 @@ struct zink_batch_state {
struct set active_queries; /* zink_query objects which were active at some point in this batch */
struct util_dynarray dead_querypools;
struct util_dynarray freed_sparse_backing_bos;
struct zink_batch_descriptor_data dd;
VkDeviceSize resource_size;