lavapipe: Lock around CSO destroys

They can race in llvmpipe_register_shader.

Closes: https://gitlab.freedesktop.org/mesa/mesa/-/issues/9680
Reviewed-by: Mike Blumenkrantz <michael.blumenkrantz@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24827>
This commit is contained in:
Konstantin Seurer 2023-08-22 16:14:39 +02:00 committed by Marge Bot
parent 04cb346973
commit 1f3ffb7fd9
3 changed files with 17 additions and 10 deletions

View file

@ -1381,7 +1381,7 @@ destroy_pipelines(struct lvp_queue *queue)
{
simple_mtx_lock(&queue->lock);
while (util_dynarray_contains(&queue->pipeline_destroys, struct lvp_pipeline*)) {
lvp_pipeline_destroy(queue->device, util_dynarray_pop(&queue->pipeline_destroys, struct lvp_pipeline*));
lvp_pipeline_destroy(queue->device, util_dynarray_pop(&queue->pipeline_destroys, struct lvp_pipeline*), true);
}
simple_mtx_unlock(&queue->lock);
}

View file

@ -43,7 +43,7 @@
typedef void (*cso_destroy_func)(struct pipe_context*, void*);
static void
shader_destroy(struct lvp_device *device, struct lvp_shader *shader)
shader_destroy(struct lvp_device *device, struct lvp_shader *shader, bool locked)
{
if (!shader->pipeline_nir)
return;
@ -64,27 +64,34 @@ shader_destroy(struct lvp_device *device, struct lvp_shader *shader)
free(variant);
}
ralloc_free(shader->inlines.variants.table);
if (!locked)
simple_mtx_lock(&device->queue.lock);
if (shader->shader_cso)
destroy[stage](device->queue.ctx, shader->shader_cso);
if (shader->tess_ccw_cso)
destroy[stage](device->queue.ctx, shader->tess_ccw_cso);
if (!locked)
simple_mtx_unlock(&device->queue.lock);
lvp_pipeline_nir_ref(&shader->pipeline_nir, NULL);
lvp_pipeline_nir_ref(&shader->tess_ccw, NULL);
}
void
lvp_pipeline_destroy(struct lvp_device *device, struct lvp_pipeline *pipeline)
lvp_pipeline_destroy(struct lvp_device *device, struct lvp_pipeline *pipeline, bool locked)
{
lvp_forall_stage(i)
shader_destroy(device, &pipeline->shaders[i]);
shader_destroy(device, &pipeline->shaders[i], locked);
if (pipeline->layout)
vk_pipeline_layout_unref(&device->vk, &pipeline->layout->vk);
for (unsigned i = 0; i < pipeline->num_groups; i++) {
LVP_FROM_HANDLE(lvp_pipeline, p, pipeline->groups[i]);
lvp_pipeline_destroy(device, p);
lvp_pipeline_destroy(device, p, locked);
}
vk_free(&device->vk.alloc, pipeline->state_data);
@ -108,7 +115,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_DestroyPipeline(
util_dynarray_append(&device->queue.pipeline_destroys, struct lvp_pipeline*, pipeline);
simple_mtx_unlock(&device->queue.lock);
} else {
lvp_pipeline_destroy(device, pipeline);
lvp_pipeline_destroy(device, pipeline, false);
}
}
@ -995,7 +1002,7 @@ lvp_graphics_pipeline_create(
pci.stageCount = g->stageCount;
result = lvp_graphics_pipeline_create(_device, _cache, &pci, flags, &pipeline->groups[i], true);
if (result != VK_SUCCESS) {
lvp_pipeline_destroy(device, pipeline);
lvp_pipeline_destroy(device, pipeline, false);
return result;
}
pipeline->num_groups++;
@ -1202,7 +1209,7 @@ VKAPI_ATTR void VKAPI_CALL lvp_DestroyShaderEXT(
if (!shader)
return;
shader_destroy(device, shader);
shader_destroy(device, shader, false);
vk_pipeline_layout_unref(&device->vk, &shader->layout->vk);
blob_finish(&shader->blob);
@ -1293,7 +1300,7 @@ create_shader_object(struct lvp_device *device, const VkShaderCreateInfoEXT *pCr
} else if (stage == MESA_SHADER_FRAGMENT && nir->info.fs.uses_fbfetch_output) {
/* this is (currently) illegal */
assert(!nir->info.fs.uses_fbfetch_output);
shader_destroy(device, shader);
shader_destroy(device, shader, false);
vk_object_base_finish(&shader->base);
vk_free2(&device->vk.alloc, pAllocator, shader);

View file

@ -684,7 +684,7 @@ lvp_vk_format_to_pipe_format(VkFormat format)
}
void
lvp_pipeline_destroy(struct lvp_device *device, struct lvp_pipeline *pipeline);
lvp_pipeline_destroy(struct lvp_device *device, struct lvp_pipeline *pipeline, bool locked);
void
queue_thread_noop(void *data, void *gdata, int thread_index);