zink: add queue locking

sparse binds have to be processed synchronously with cmdbuf recording to
avoid resource object desync in the vk driver, which means they have to be
done in the driver thread instead of the flush thread. this necessitates
adding locking for the queue since there is now a case when submissions occur
in a different thread

fixes illegal multithread usage in KHR-GL46.CommonBugs.CommonBug_SparseBuffersWithCopyOps

cc: mesa-stable

Reviewed-by: Dave Airlie <airlied@redhat.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/13597>
This commit is contained in:
Mike Blumenkrantz 2021-10-29 11:06:34 -04:00 committed by Marge Bot
parent 786167b88c
commit 3137ff4709
4 changed files with 26 additions and 59 deletions

View file

@ -418,10 +418,12 @@ submit_queue(void *data, void *gdata, int thread_index)
VKSCR(FlushMappedMemoryRanges)(screen->dev, 1, &range);
}
simple_mtx_lock(&screen->queue_lock);
if (VKSCR(QueueSubmit)(bs->queue, 1, &si, bs->fence.fence) != VK_SUCCESS) {
debug_printf("ZINK: vkQueueSubmit() failed\n");
bs->is_device_lost = true;
}
simple_mtx_unlock(&screen->queue_lock);
bs->submit_count++;
end:
cnd_broadcast(&bs->usage.flush);

View file

@ -675,72 +675,32 @@ zink_bo_unmap(struct zink_screen *screen, struct zink_bo *bo)
}
}
static inline struct zink_screen **
get_screen_ptr_for_commit(uint8_t *mem)
{
return (struct zink_screen**)(mem + sizeof(VkBindSparseInfo) + sizeof(VkSparseBufferMemoryBindInfo) + sizeof(VkSparseMemoryBind));
}
static bool
resource_commit(struct zink_screen *screen, VkBindSparseInfo *sparse)
{
VkQueue queue = screen->threaded ? screen->thread_queue : screen->queue;
VkResult ret = VKSCR(QueueBindSparse)(queue, 1, sparse, VK_NULL_HANDLE);
return zink_screen_handle_vkresult(screen, ret);
}
static void
submit_resource_commit(void *data, void *gdata, int thread_index)
{
struct zink_screen **screen = get_screen_ptr_for_commit(data);
resource_commit(*screen, data);
free(data);
}
static bool
do_commit_single(struct zink_screen *screen, struct zink_resource *res, struct zink_bo *bo, uint32_t offset, uint32_t size, bool commit)
{
VkBindSparseInfo sparse = {0};
sparse.sType = VK_STRUCTURE_TYPE_BIND_SPARSE_INFO;
sparse.bufferBindCount = 1;
uint8_t *mem = malloc(sizeof(VkBindSparseInfo) + sizeof(VkSparseBufferMemoryBindInfo) + sizeof(VkSparseMemoryBind) + sizeof(void*));
if (!mem)
return false;
VkBindSparseInfo *sparse = (void*)mem;
sparse->sType = VK_STRUCTURE_TYPE_BIND_SPARSE_INFO;
sparse->pNext = NULL;
sparse->waitSemaphoreCount = 0;
sparse->bufferBindCount = 1;
sparse->imageOpaqueBindCount = 0;
sparse->imageBindCount = 0;
sparse->signalSemaphoreCount = 0;
VkSparseBufferMemoryBindInfo sparse_bind;
sparse_bind.buffer = res->obj->buffer;
sparse_bind.bindCount = 1;
sparse.pBufferBinds = &sparse_bind;
VkSparseBufferMemoryBindInfo *sparse_bind = (void*)(mem + sizeof(VkBindSparseInfo));
sparse_bind->buffer = res->obj->buffer;
sparse_bind->bindCount = 1;
sparse->pBufferBinds = sparse_bind;
VkSparseMemoryBind mem_bind;
mem_bind.resourceOffset = offset;
mem_bind.size = MIN2(res->base.b.width0 - offset, size);
mem_bind.memory = commit ? bo->mem : VK_NULL_HANDLE;
mem_bind.memoryOffset = 0;
mem_bind.flags = 0;
sparse_bind.pBinds = &mem_bind;
VkSparseMemoryBind *mem_bind = (void*)(mem + sizeof(VkBindSparseInfo) + sizeof(VkSparseBufferMemoryBindInfo));
mem_bind->resourceOffset = offset;
mem_bind->size = MIN2(res->base.b.width0 - offset, size);
mem_bind->memory = commit ? bo->mem : VK_NULL_HANDLE;
mem_bind->memoryOffset = 0;
mem_bind->flags = 0;
sparse_bind->pBinds = mem_bind;
VkQueue queue = screen->threaded ? screen->thread_queue : screen->queue;
struct zink_screen **ptr = get_screen_ptr_for_commit(mem);
*ptr = screen;
if (screen->threaded) {
/* this doesn't need any kind of fencing because any access to this resource
* will be automagically synchronized by queue dispatch */
util_queue_add_job(&screen->flush_queue, mem, NULL, submit_resource_commit, NULL, 0);
} else {
bool ret = resource_commit(screen, sparse);
free(sparse);
return ret;
}
return true;
simple_mtx_lock(&screen->queue_lock);
VkResult ret = VKSCR(QueueBindSparse)(queue, 1, &sparse, VK_NULL_HANDLE);
simple_mtx_unlock(&screen->queue_lock);
return zink_screen_handle_vkresult(screen, ret);
}
bool

View file

@ -1122,6 +1122,7 @@ zink_destroy_screen(struct pipe_screen *pscreen)
if (screen->threaded)
util_queue_destroy(&screen->flush_queue);
simple_mtx_destroy(&screen->queue_lock);
VKSCR(DestroyDevice)(screen->dev, NULL);
vkDestroyInstance(screen->instance, NULL);
util_idalloc_mt_fini(&screen->buffer_ids);
@ -1208,6 +1209,7 @@ update_queue_props(struct zink_screen *screen)
static void
init_queue(struct zink_screen *screen)
{
simple_mtx_init(&screen->queue_lock, mtx_plain);
vkGetDeviceQueue(screen->dev, screen->gfx_queue, 0, &screen->queue);
if (screen->threaded && screen->max_queues > 1)
vkGetDeviceQueue(screen->dev, screen->gfx_queue, 1, &screen->thread_queue);
@ -1601,11 +1603,13 @@ noop_submit(void *data, void *gdata, int thread_index)
struct noop_submit_info *n = data;
VkSubmitInfo si = {0};
si.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
simple_mtx_lock(&n->screen->queue_lock);
if (n->VKSCR(QueueSubmit)(n->screen->threaded ? n->screen->thread_queue : n->screen->queue,
1, &si, n->fence) != VK_SUCCESS) {
debug_printf("ZINK: vkQueueSubmit() failed\n");
n->screen->device_lost = true;
}
simple_mtx_unlock(&n->screen->queue_lock);
}
bool

View file

@ -133,6 +133,7 @@ struct zink_screen {
VkDevice dev;
VkQueue queue; //gfx+compute
VkQueue thread_queue; //gfx+compute
simple_mtx_t queue_lock;
VkDebugUtilsMessengerEXT debugUtilsCallbackHandle;
uint32_t cur_custom_border_color_samplers;