radv: use vk_error() everywhere an error is returned

For consistency and it might help for debugging purposes.

Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
Reviewed-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
This commit is contained in:
Samuel Pitoiset 2017-11-10 09:17:58 +01:00
parent 4e16c6a41e
commit cd64a4f705
8 changed files with 27 additions and 27 deletions

View file

@ -2526,7 +2526,7 @@ VkResult radv_EndCommandBuffer(
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments); vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs)) if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs))
return VK_ERROR_OUT_OF_DEVICE_MEMORY; return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
return cmd_buffer->record_result; return cmd_buffer->record_result;
} }

View file

@ -184,7 +184,7 @@ radv_physical_device_init(struct radv_physical_device *device,
fd = open(path, O_RDWR | O_CLOEXEC); fd = open(path, O_RDWR | O_CLOEXEC);
if (fd < 0) if (fd < 0)
return VK_ERROR_INCOMPATIBLE_DRIVER; return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
version = drmGetVersion(fd); version = drmGetVersion(fd);
if (!version) { if (!version) {
@ -196,7 +196,7 @@ radv_physical_device_init(struct radv_physical_device *device,
if (strcmp(version->name, "amdgpu")) { if (strcmp(version->name, "amdgpu")) {
drmFreeVersion(version); drmFreeVersion(version);
close(fd); close(fd);
return VK_ERROR_INCOMPATIBLE_DRIVER; return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
} }
drmFreeVersion(version); drmFreeVersion(version);
@ -436,7 +436,7 @@ radv_enumerate_devices(struct radv_instance *instance)
max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices)); max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
if (max_devices < 1) if (max_devices < 1)
return VK_ERROR_INCOMPATIBLE_DRIVER; return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
for (unsigned i = 0; i < (unsigned)max_devices; i++) { for (unsigned i = 0; i < (unsigned)max_devices; i++) {
if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER && if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
@ -895,7 +895,7 @@ radv_queue_init(struct radv_device *device, struct radv_queue *queue,
queue->hw_ctx = device->ws->ctx_create(device->ws, queue->priority); queue->hw_ctx = device->ws->ctx_create(device->ws, queue->priority);
if (!queue->hw_ctx) if (!queue->hw_ctx)
return VK_ERROR_OUT_OF_HOST_MEMORY; return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
return VK_SUCCESS; return VK_SUCCESS;
} }
@ -1797,7 +1797,7 @@ fail:
queue->device->ws->buffer_destroy(tess_factor_ring_bo); queue->device->ws->buffer_destroy(tess_factor_ring_bo);
if (tess_offchip_ring_bo && tess_offchip_ring_bo != queue->tess_offchip_ring_bo) if (tess_offchip_ring_bo && tess_offchip_ring_bo != queue->tess_offchip_ring_bo)
queue->device->ws->buffer_destroy(tess_offchip_ring_bo); queue->device->ws->buffer_destroy(tess_offchip_ring_bo);
return VK_ERROR_OUT_OF_DEVICE_MEMORY; return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
} }
static VkResult radv_alloc_sem_counts(struct radv_winsys_sem_counts *counts, static VkResult radv_alloc_sem_counts(struct radv_winsys_sem_counts *counts,
@ -1821,14 +1821,14 @@ static VkResult radv_alloc_sem_counts(struct radv_winsys_sem_counts *counts,
if (counts->syncobj_count) { if (counts->syncobj_count) {
counts->syncobj = (uint32_t *)malloc(sizeof(uint32_t) * counts->syncobj_count); counts->syncobj = (uint32_t *)malloc(sizeof(uint32_t) * counts->syncobj_count);
if (!counts->syncobj) if (!counts->syncobj)
return VK_ERROR_OUT_OF_HOST_MEMORY; return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
} }
if (counts->sem_count) { if (counts->sem_count) {
counts->sem = (struct radeon_winsys_sem **)malloc(sizeof(struct radeon_winsys_sem *) * counts->sem_count); counts->sem = (struct radeon_winsys_sem **)malloc(sizeof(struct radeon_winsys_sem *) * counts->sem_count);
if (!counts->sem) { if (!counts->sem) {
free(counts->syncobj); free(counts->syncobj);
return VK_ERROR_OUT_OF_HOST_MEMORY; return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
} }
} }
@ -2227,7 +2227,7 @@ VkResult radv_MapMemory(
return VK_SUCCESS; return VK_SUCCESS;
} }
return VK_ERROR_MEMORY_MAP_FAILED; return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
} }
void radv_UnmapMemory( void radv_UnmapMemory(
@ -2542,7 +2542,7 @@ VkResult radv_CreateFence(
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!fence) if (!fence)
return VK_ERROR_OUT_OF_HOST_MEMORY; return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
memset(fence, 0, sizeof(*fence)); memset(fence, 0, sizeof(*fence));
fence->submitted = false; fence->submitted = false;
@ -2550,7 +2550,7 @@ VkResult radv_CreateFence(
fence->fence = device->ws->create_fence(); fence->fence = device->ws->create_fence();
if (!fence->fence) { if (!fence->fence) {
vk_free2(&device->alloc, pAllocator, fence); vk_free2(&device->alloc, pAllocator, fence);
return VK_ERROR_OUT_OF_HOST_MEMORY; return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
} }
*pFence = radv_fence_to_handle(fence); *pFence = radv_fence_to_handle(fence);
@ -2666,7 +2666,7 @@ VkResult radv_CreateSemaphore(
sizeof(*sem), 8, sizeof(*sem), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!sem) if (!sem)
return VK_ERROR_OUT_OF_HOST_MEMORY; return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
sem->temp_syncobj = 0; sem->temp_syncobj = 0;
/* create a syncobject if we are going to export this semaphore */ /* create a syncobject if we are going to export this semaphore */
@ -2676,14 +2676,14 @@ VkResult radv_CreateSemaphore(
int ret = device->ws->create_syncobj(device->ws, &sem->syncobj); int ret = device->ws->create_syncobj(device->ws, &sem->syncobj);
if (ret) { if (ret) {
vk_free2(&device->alloc, pAllocator, sem); vk_free2(&device->alloc, pAllocator, sem);
return VK_ERROR_OUT_OF_HOST_MEMORY; return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
} }
sem->sem = NULL; sem->sem = NULL;
} else { } else {
sem->sem = device->ws->create_sem(device->ws); sem->sem = device->ws->create_sem(device->ws);
if (!sem->sem) { if (!sem->sem) {
vk_free2(&device->alloc, pAllocator, sem); vk_free2(&device->alloc, pAllocator, sem);
return VK_ERROR_OUT_OF_HOST_MEMORY; return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
} }
sem->syncobj = 0; sem->syncobj = 0;
} }
@ -2721,14 +2721,14 @@ VkResult radv_CreateEvent(
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!event) if (!event)
return VK_ERROR_OUT_OF_HOST_MEMORY; return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
event->bo = device->ws->buffer_create(device->ws, 8, 8, event->bo = device->ws->buffer_create(device->ws, 8, 8,
RADEON_DOMAIN_GTT, RADEON_DOMAIN_GTT,
RADEON_FLAG_VA_UNCACHED | RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING); RADEON_FLAG_VA_UNCACHED | RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING);
if (!event->bo) { if (!event->bo) {
vk_free2(&device->alloc, pAllocator, event); vk_free2(&device->alloc, pAllocator, event);
return VK_ERROR_OUT_OF_DEVICE_MEMORY; return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
} }
event->map = (uint64_t*)device->ws->buffer_map(event->bo); event->map = (uint64_t*)device->ws->buffer_map(event->bo);
@ -3506,7 +3506,7 @@ VkResult radv_GetMemoryFdKHR(VkDevice _device,
bool ret = radv_get_memory_fd(device, memory, pFD); bool ret = radv_get_memory_fd(device, memory, pFD);
if (ret == false) if (ret == false)
return VK_ERROR_OUT_OF_DEVICE_MEMORY; return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
return VK_SUCCESS; return VK_SUCCESS;
} }
@ -3521,7 +3521,7 @@ VkResult radv_GetMemoryFdPropertiesKHR(VkDevice _device,
* *
* Since we only handle opaque handles for now, there are no FD properties. * Since we only handle opaque handles for now, there are no FD properties.
*/ */
return VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR; return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
} }
VkResult radv_ImportSemaphoreFdKHR(VkDevice _device, VkResult radv_ImportSemaphoreFdKHR(VkDevice _device,
@ -3534,7 +3534,7 @@ VkResult radv_ImportSemaphoreFdKHR(VkDevice _device,
int ret = device->ws->import_syncobj(device->ws, pImportSemaphoreFdInfo->fd, &syncobj_handle); int ret = device->ws->import_syncobj(device->ws, pImportSemaphoreFdInfo->fd, &syncobj_handle);
if (ret != 0) if (ret != 0)
return VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR; return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) { if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) {
sem->temp_syncobj = syncobj_handle; sem->temp_syncobj = syncobj_handle;

View file

@ -1144,7 +1144,7 @@ unsupported:
.maxResourceSize = 0, .maxResourceSize = 0,
}; };
return VK_ERROR_FORMAT_NOT_SUPPORTED; return vk_error(VK_ERROR_FORMAT_NOT_SUPPORTED);
} }
VkResult radv_GetPhysicalDeviceImageFormatProperties( VkResult radv_GetPhysicalDeviceImageFormatProperties(

View file

@ -134,7 +134,7 @@ radv_pipeline_scratch_init(struct radv_device *device,
if (scratch_bytes_per_wave && max_waves < min_waves) { if (scratch_bytes_per_wave && max_waves < min_waves) {
/* Not really true at this moment, but will be true on first /* Not really true at this moment, but will be true on first
* execution. Avoid having hanging shaders. */ * execution. Avoid having hanging shaders. */
return VK_ERROR_OUT_OF_DEVICE_MEMORY; return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
} }
pipeline->scratch_bytes_per_wave = scratch_bytes_per_wave; pipeline->scratch_bytes_per_wave = scratch_bytes_per_wave;
pipeline->max_waves = max_waves; pipeline->max_waves = max_waves;

View file

@ -206,7 +206,7 @@ radv_pipeline_cache_grow(struct radv_pipeline_cache *cache)
table = malloc(byte_size); table = malloc(byte_size);
if (table == NULL) if (table == NULL)
return VK_ERROR_OUT_OF_HOST_MEMORY; return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
cache->hash_table = table; cache->hash_table = table;
cache->table_size = table_size; cache->table_size = table_size;

View file

@ -754,7 +754,7 @@ VkResult radv_CreateQueryPool(
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!pool) if (!pool)
return VK_ERROR_OUT_OF_HOST_MEMORY; return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
switch(pCreateInfo->queryType) { switch(pCreateInfo->queryType) {
@ -784,7 +784,7 @@ VkResult radv_CreateQueryPool(
if (!pool->bo) { if (!pool->bo) {
vk_free2(&device->alloc, pAllocator, pool); vk_free2(&device->alloc, pAllocator, pool);
return VK_ERROR_OUT_OF_DEVICE_MEMORY; return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
} }
pool->ptr = device->ws->buffer_map(pool->bo); pool->ptr = device->ws->buffer_map(pool->bo);
@ -792,7 +792,7 @@ VkResult radv_CreateQueryPool(
if (!pool->ptr) { if (!pool->ptr) {
device->ws->buffer_destroy(pool->bo); device->ws->buffer_destroy(pool->bo);
vk_free2(&device->alloc, pAllocator, pool); vk_free2(&device->alloc, pAllocator, pool);
return VK_ERROR_OUT_OF_DEVICE_MEMORY; return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
} }
memset(pool->ptr, 0, size); memset(pool->ptr, 0, size);

View file

@ -664,7 +664,7 @@ radv_GetShaderInfoAMD(VkDevice _device,
/* Spec doesn't indicate what to do if the stage is invalid, so just /* Spec doesn't indicate what to do if the stage is invalid, so just
* return no info for this. */ * return no info for this. */
if (!variant) if (!variant)
return VK_ERROR_FEATURE_NOT_PRESENT; return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
switch (infoType) { switch (infoType) {
case VK_SHADER_INFO_TYPE_STATISTICS_AMD: case VK_SHADER_INFO_TYPE_STATISTICS_AMD:

View file

@ -305,7 +305,7 @@ radv_wsi_create_prime_command_buffers(struct radv_device *device,
swapchain->cmd_buffers = vk_alloc(alloc, (sizeof(VkCommandBuffer) * num_cmd_buffers), 8, swapchain->cmd_buffers = vk_alloc(alloc, (sizeof(VkCommandBuffer) * num_cmd_buffers), 8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!swapchain->cmd_buffers) if (!swapchain->cmd_buffers)
return VK_ERROR_OUT_OF_HOST_MEMORY; return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
memset(swapchain->cmd_buffers, 0, sizeof(VkCommandBuffer) * num_cmd_buffers); memset(swapchain->cmd_buffers, 0, sizeof(VkCommandBuffer) * num_cmd_buffers);
memset(swapchain->cmd_pools, 0, sizeof(VkCommandPool) * num_pools); memset(swapchain->cmd_pools, 0, sizeof(VkCommandPool) * num_pools);