lavapipe: enqueue pipeline destruction

this avoids races in llvmpipe related to modification of per-context shader
variant lists, which causes massive amounts of flakiness in ci

Reviewed-by: Dave Airlie <airlied@redhat.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/16331>
This commit is contained in:
Mike Blumenkrantz 2022-05-04 09:01:56 -04:00 committed by Marge Bot
parent 269083d94b
commit 7767b2f7b5
3 changed files with 54 additions and 33 deletions

View file

@ -1382,6 +1382,16 @@ VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetPhysicalDeviceProcAddr(
return vk_instance_get_physical_device_proc_addr(&instance->vk, pName);
}
static void
destroy_pipelines(struct lvp_queue *queue)
{
simple_mtx_lock(&queue->pipeline_lock);
while (util_dynarray_contains(&queue->pipeline_destroys, struct lvp_pipeline*)) {
lvp_pipeline_destroy(queue->device, util_dynarray_pop(&queue->pipeline_destroys, struct lvp_pipeline*));
}
simple_mtx_unlock(&queue->pipeline_lock);
}
static VkResult
lvp_queue_submit(struct vk_queue *vk_queue,
struct vk_queue_submit *submit)
@ -1409,6 +1419,7 @@ lvp_queue_submit(struct vk_queue *vk_queue,
vk_sync_as_lvp_pipe_sync(submit->signals[i].sync);
lvp_pipe_sync_signal_with_fence(queue->device, sync, queue->last_fence);
}
destroy_pipelines(queue);
return VK_SUCCESS;
}
@ -1437,17 +1448,24 @@ lvp_queue_init(struct lvp_device *device, struct lvp_queue *queue,
queue->vk.driver_submit = lvp_queue_submit;
simple_mtx_init(&queue->pipeline_lock, mtx_plain);
util_dynarray_init(&queue->pipeline_destroys, NULL);
return VK_SUCCESS;
}
static void
lvp_queue_finish(struct lvp_queue *queue)
{
vk_queue_finish(&queue->vk);
destroy_pipelines(queue);
simple_mtx_destroy(&queue->pipeline_lock);
util_dynarray_fini(&queue->pipeline_destroys);
u_upload_destroy(queue->uploader);
cso_destroy_context(queue->cso);
queue->ctx->destroy(queue->ctx);
vk_queue_finish(&queue->vk);
}
static void

View file

@ -42,17 +42,9 @@
dst = temp; \
} while(0)
VKAPI_ATTR void VKAPI_CALL lvp_DestroyPipeline(
VkDevice _device,
VkPipeline _pipeline,
const VkAllocationCallbacks* pAllocator)
void
lvp_pipeline_destroy(struct lvp_device *device, struct lvp_pipeline *pipeline)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_pipeline, pipeline, _pipeline);
if (!_pipeline)
return;
if (pipeline->shader_cso[PIPE_SHADER_VERTEX])
device->queue.ctx->delete_vs_state(device->queue.ctx, pipeline->shader_cso[PIPE_SHADER_VERTEX]);
if (pipeline->shader_cso[PIPE_SHADER_FRAGMENT])
@ -74,7 +66,23 @@ VKAPI_ATTR void VKAPI_CALL lvp_DestroyPipeline(
ralloc_free(pipeline->mem_ctx);
vk_object_base_finish(&pipeline->base);
vk_free2(&device->vk.alloc, pAllocator, pipeline);
vk_free(&device->vk.alloc, pipeline);
}
VKAPI_ATTR void VKAPI_CALL lvp_DestroyPipeline(
VkDevice _device,
VkPipeline _pipeline,
const VkAllocationCallbacks* pAllocator)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
LVP_FROM_HANDLE(lvp_pipeline, pipeline, _pipeline);
if (!_pipeline)
return;
simple_mtx_lock(&device->queue.pipeline_lock);
util_dynarray_append(&device->queue.pipeline_destroys, struct lvp_pipeline*, pipeline);
simple_mtx_unlock(&device->queue.pipeline_lock);
}
static VkResult
@ -1199,8 +1207,7 @@ static VkResult
lvp_graphics_pipeline_init(struct lvp_pipeline *pipeline,
struct lvp_device *device,
struct lvp_pipeline_cache *cache,
const VkGraphicsPipelineCreateInfo *pCreateInfo,
const VkAllocationCallbacks *alloc)
const VkGraphicsPipelineCreateInfo *pCreateInfo)
{
const VkGraphicsPipelineLibraryCreateInfoEXT *libinfo = vk_find_struct_const(pCreateInfo,
GRAPHICS_PIPELINE_LIBRARY_CREATE_INFO_EXT);
@ -1264,8 +1271,6 @@ lvp_graphics_pipeline_init(struct lvp_pipeline *pipeline,
}
}
if (alloc == NULL)
alloc = &device->vk.alloc;
pipeline->device = device;
for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
@ -1402,7 +1407,6 @@ lvp_graphics_pipeline_create(
VkDevice _device,
VkPipelineCache _cache,
const VkGraphicsPipelineCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipeline)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
@ -1412,7 +1416,7 @@ lvp_graphics_pipeline_create(
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
pipeline = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*pipeline), 8,
pipeline = vk_zalloc(&device->vk.alloc, sizeof(*pipeline), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pipeline == NULL)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
@ -1420,10 +1424,9 @@ lvp_graphics_pipeline_create(
vk_object_base_init(&device->vk, &pipeline->base,
VK_OBJECT_TYPE_PIPELINE);
uint64_t t0 = os_time_get_nano();
result = lvp_graphics_pipeline_init(pipeline, device, cache, pCreateInfo,
pAllocator);
result = lvp_graphics_pipeline_init(pipeline, device, cache, pCreateInfo);
if (result != VK_SUCCESS) {
vk_free2(&device->vk.alloc, pAllocator, pipeline);
vk_free(&device->vk.alloc, pipeline);
return result;
}
@ -1456,7 +1459,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateGraphicsPipelines(
r = lvp_graphics_pipeline_create(_device,
pipelineCache,
&pCreateInfos[i],
pAllocator, &pPipelines[i]);
&pPipelines[i]);
if (r != VK_SUCCESS) {
result = r;
pPipelines[i] = VK_NULL_HANDLE;
@ -1476,13 +1479,10 @@ static VkResult
lvp_compute_pipeline_init(struct lvp_pipeline *pipeline,
struct lvp_device *device,
struct lvp_pipeline_cache *cache,
const VkComputePipelineCreateInfo *pCreateInfo,
const VkAllocationCallbacks *alloc)
const VkComputePipelineCreateInfo *pCreateInfo)
{
VK_FROM_HANDLE(vk_shader_module, module,
pCreateInfo->stage.module);
if (alloc == NULL)
alloc = &device->vk.alloc;
pipeline->device = device;
pipeline->layout = lvp_pipeline_layout_from_handle(pCreateInfo->layout);
lvp_pipeline_layout_ref(pipeline->layout);
@ -1508,7 +1508,6 @@ lvp_compute_pipeline_create(
VkDevice _device,
VkPipelineCache _cache,
const VkComputePipelineCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipeline)
{
LVP_FROM_HANDLE(lvp_device, device, _device);
@ -1518,7 +1517,7 @@ lvp_compute_pipeline_create(
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO);
pipeline = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*pipeline), 8,
pipeline = vk_zalloc(&device->vk.alloc, sizeof(*pipeline), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pipeline == NULL)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
@ -1526,10 +1525,9 @@ lvp_compute_pipeline_create(
vk_object_base_init(&device->vk, &pipeline->base,
VK_OBJECT_TYPE_PIPELINE);
uint64_t t0 = os_time_get_nano();
result = lvp_compute_pipeline_init(pipeline, device, cache, pCreateInfo,
pAllocator);
result = lvp_compute_pipeline_init(pipeline, device, cache, pCreateInfo);
if (result != VK_SUCCESS) {
vk_free2(&device->vk.alloc, pAllocator, pipeline);
vk_free(&device->vk.alloc, pipeline);
return result;
}
@ -1562,7 +1560,7 @@ VKAPI_ATTR VkResult VKAPI_CALL lvp_CreateComputePipelines(
r = lvp_compute_pipeline_create(_device,
pipelineCache,
&pCreateInfos[i],
pAllocator, &pPipelines[i]);
&pPipelines[i]);
if (r != VK_SUCCESS) {
result = r;
pPipelines[i] = VK_NULL_HANDLE;

View file

@ -176,6 +176,8 @@ struct lvp_queue {
struct u_upload_mgr *uploader;
struct pipe_fence_handle *last_fence;
void *state;
struct util_dynarray pipeline_destroys;
simple_mtx_t pipeline_lock;
};
struct lvp_pipeline_cache {
@ -632,6 +634,9 @@ lvp_vk_format_to_pipe_format(VkFormat format)
return vk_format_to_pipe_format(format);
}
void
lvp_pipeline_destroy(struct lvp_device *device, struct lvp_pipeline *pipeline);
void
queue_thread_noop(void *data, void *gdata, int thread_index);
#ifdef __cplusplus