mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-05 09:38:07 +02:00
radv: Handle mmap failures.
Which can happen if we have to many mmaps active in the process. CC: <mesa-stable@lists.freedesktop.org> Reviewed-by: Samuel Pitoiset <samuel.pitoiset@gmail.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5578>
This commit is contained in:
parent
04765e6a9a
commit
a5cb88eea4
3 changed files with 61 additions and 15 deletions
|
|
@ -629,6 +629,23 @@ radv_descriptor_set_destroy(struct radv_device *device,
|
|||
vk_free2(&device->vk.alloc, NULL, set);
|
||||
}
|
||||
|
||||
static void radv_destroy_descriptor_pool(struct radv_device *device,
|
||||
const VkAllocationCallbacks *pAllocator,
|
||||
struct radv_descriptor_pool *pool)
|
||||
{
|
||||
if (!pool->host_memory_base) {
|
||||
for(int i = 0; i < pool->entry_count; ++i) {
|
||||
radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
|
||||
}
|
||||
}
|
||||
|
||||
if (pool->bo)
|
||||
device->ws->buffer_destroy(pool->bo);
|
||||
|
||||
vk_object_base_finish(&pool->base);
|
||||
vk_free2(&device->vk.alloc, pAllocator, pool);
|
||||
}
|
||||
|
||||
VkResult radv_CreateDescriptorPool(
|
||||
VkDevice _device,
|
||||
const VkDescriptorPoolCreateInfo* pCreateInfo,
|
||||
|
|
@ -721,7 +738,15 @@ VkResult radv_CreateDescriptorPool(
|
|||
RADEON_FLAG_READ_ONLY |
|
||||
RADEON_FLAG_32BIT,
|
||||
RADV_BO_PRIORITY_DESCRIPTOR);
|
||||
if (!pool->bo) {
|
||||
radv_destroy_descriptor_pool(device, pAllocator, pool);
|
||||
return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
||||
}
|
||||
pool->mapped_ptr = (uint8_t*)device->ws->buffer_map(pool->bo);
|
||||
if (!pool->mapped_ptr) {
|
||||
radv_destroy_descriptor_pool(device, pAllocator, pool);
|
||||
return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
||||
}
|
||||
}
|
||||
pool->size = bo_size;
|
||||
pool->max_entry_count = pCreateInfo->maxSets;
|
||||
|
|
@ -741,17 +766,7 @@ void radv_DestroyDescriptorPool(
|
|||
if (!pool)
|
||||
return;
|
||||
|
||||
if (!pool->host_memory_base) {
|
||||
for(int i = 0; i < pool->entry_count; ++i) {
|
||||
radv_descriptor_set_destroy(device, pool, pool->entries[i].set, false);
|
||||
}
|
||||
}
|
||||
|
||||
if (pool->bo)
|
||||
device->ws->buffer_destroy(pool->bo);
|
||||
|
||||
vk_object_base_finish(&pool->base);
|
||||
vk_free2(&device->vk.alloc, pAllocator, pool);
|
||||
radv_destroy_descriptor_pool(device, pAllocator, pool);
|
||||
}
|
||||
|
||||
VkResult radv_ResetDescriptorPool(
|
||||
|
|
|
|||
|
|
@ -3012,6 +3012,8 @@ static VkResult radv_device_init_border_color(struct radv_device *device)
|
|||
|
||||
device->border_color_data.colors_gpu_ptr =
|
||||
device->ws->buffer_map(device->border_color_data.bo);
|
||||
if (!device->border_color_data.colors_gpu_ptr)
|
||||
return vk_error(device->physical_device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
||||
pthread_mutex_init(&device->border_color_data.mutex, NULL);
|
||||
|
||||
return VK_SUCCESS;
|
||||
|
|
@ -4097,6 +4099,8 @@ radv_get_preamble_cs(struct radv_queue *queue,
|
|||
|
||||
if (descriptor_bo != queue->descriptor_bo) {
|
||||
uint32_t *map = (uint32_t*)queue->device->ws->buffer_map(descriptor_bo);
|
||||
if (!map)
|
||||
goto fail;
|
||||
|
||||
if (scratch_bo) {
|
||||
uint64_t scratch_va = radv_buffer_get_va(scratch_bo);
|
||||
|
|
@ -6362,7 +6366,14 @@ radv_SignalSemaphore(VkDevice _device,
|
|||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
static void radv_destroy_event(struct radv_device *device,
|
||||
const VkAllocationCallbacks* pAllocator,
|
||||
struct radv_event *event)
|
||||
{
|
||||
device->ws->buffer_destroy(event->bo);
|
||||
vk_object_base_finish(&event->base);
|
||||
vk_free2(&device->vk.alloc, pAllocator, event);
|
||||
}
|
||||
|
||||
VkResult radv_CreateEvent(
|
||||
VkDevice _device,
|
||||
|
|
@ -6390,6 +6401,10 @@ VkResult radv_CreateEvent(
|
|||
}
|
||||
|
||||
event->map = (uint64_t*)device->ws->buffer_map(event->bo);
|
||||
if (!event->map) {
|
||||
radv_destroy_event(device, pAllocator, event);
|
||||
return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
|
||||
}
|
||||
|
||||
*pEvent = radv_event_to_handle(event);
|
||||
|
||||
|
|
@ -6406,9 +6421,8 @@ void radv_DestroyEvent(
|
|||
|
||||
if (!event)
|
||||
return;
|
||||
device->ws->buffer_destroy(event->bo);
|
||||
vk_object_base_finish(&event->base);
|
||||
vk_free2(&device->vk.alloc, pAllocator, event);
|
||||
|
||||
radv_destroy_event(device, pAllocator, event);
|
||||
}
|
||||
|
||||
VkResult radv_GetEventStatus(
|
||||
|
|
|
|||
|
|
@ -667,7 +667,18 @@ radv_alloc_shader_memory(struct radv_device *device,
|
|||
(device->physical_device->rad_info.cpdma_prefetch_writes_memory ?
|
||||
0 : RADEON_FLAG_READ_ONLY),
|
||||
RADV_BO_PRIORITY_SHADER);
|
||||
if (!slab->bo) {
|
||||
free(slab);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
slab->ptr = (char*)device->ws->buffer_map(slab->bo);
|
||||
if (!slab->ptr) {
|
||||
device->ws->buffer_destroy(slab->bo);
|
||||
free(slab);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
list_inithead(&slab->shaders);
|
||||
|
||||
mtx_lock(&device->shader_slab_mutex);
|
||||
|
|
@ -1012,6 +1023,12 @@ radv_shader_variant_create(struct radv_device *device,
|
|||
}
|
||||
|
||||
void *dest_ptr = radv_alloc_shader_memory(device, variant);
|
||||
if (!dest_ptr) {
|
||||
if (binary->type == RADV_BINARY_TYPE_RTLD)
|
||||
ac_rtld_close(&rtld_binary);
|
||||
free(variant);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (binary->type == RADV_BINARY_TYPE_RTLD) {
|
||||
struct radv_shader_binary_rtld* bin = (struct radv_shader_binary_rtld *)binary;
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue