anv: change the vm_bind-related kmd_backend vfuncs to return VkResult

All these vfuncs funnel down to either stubs or the xe_vm_bind_op()
function. By returning int we're shifting VkResult generation to the
callers, which are simply not doing the correct job. If they get
VkResult they can simply throw the errors up the stack without having
to erroneously try to figure out what really happened.

Today the callers are returning either VK_ERROR_UNKNOWN or
VK_ERROR_OUT_OF_DEVICE_MEMORY, but after the patch we're returning
either VK_ERROR_OUT_OF_HOST_MEMORY or VK_ERROR_DEVICE_LOST.

Reviewed-by: José Roberto de Souza <jose.souza@intel.com>
Signed-off-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/27926>
This commit is contained in:
Paulo Zanoni 2024-03-06 17:10:06 -08:00 committed by Marge Bot
parent 4863e12679
commit a8f7d26c2b
6 changed files with 37 additions and 35 deletions

View file

@ -1375,7 +1375,7 @@ static void
anv_bo_finish(struct anv_device *device, struct anv_bo *bo)
{
/* Not releasing vma in case unbind fails */
if (device->kmd_backend->vm_unbind_bo(device, bo) == 0)
if (device->kmd_backend->vm_unbind_bo(device, bo) == VK_SUCCESS)
anv_bo_vma_free(device, bo);
anv_bo_unmap_close(device, bo);
@ -1560,10 +1560,11 @@ anv_device_alloc_bo(struct anv_device *device,
if (result != VK_SUCCESS)
return result;
if (device->kmd_backend->vm_bind_bo(device, &new_bo)) {
result = device->kmd_backend->vm_bind_bo(device, &new_bo);
if (result != VK_SUCCESS) {
anv_bo_vma_free(device, &new_bo);
anv_bo_unmap_close(device, &new_bo);
return vk_errorf(device, VK_ERROR_UNKNOWN, "vm bind failed");
return result;
}
assert(new_bo.gem_handle);
@ -1717,11 +1718,11 @@ anv_device_import_bo_from_host_ptr(struct anv_device *device,
return result;
}
if (device->kmd_backend->vm_bind_bo(device, &new_bo)) {
VkResult res = vk_errorf(device, VK_ERROR_UNKNOWN, "vm bind failed: %m");
result = device->kmd_backend->vm_bind_bo(device, &new_bo);
if (result != VK_SUCCESS) {
anv_bo_vma_free(device, &new_bo);
pthread_mutex_unlock(&cache->mutex);
return res;
return result;
}
*bo = new_bo;
@ -1813,10 +1814,11 @@ anv_device_import_bo(struct anv_device *device,
return result;
}
if (device->kmd_backend->vm_bind_bo(device, &new_bo)) {
result = device->kmd_backend->vm_bind_bo(device, &new_bo);
if (result != VK_SUCCESS) {
anv_bo_vma_free(device, &new_bo);
pthread_mutex_unlock(&cache->mutex);
return vk_errorf(device, VK_ERROR_UNKNOWN, "vm bind failed");
return result;
}
*bo = new_bo;

View file

@ -152,16 +152,16 @@ anv_gem_import_bo_alloc_flags_to_bo_flags(struct anv_device *device,
return VK_SUCCESS;
}
static int
static VkResult
stub_vm_bind(struct anv_device *device, struct anv_sparse_submission *submit)
{
return 0;
return VK_SUCCESS;
}
static int
static VkResult
stub_vm_bind_bo(struct anv_device *device, struct anv_bo *bo)
{
return 0;
return VK_SUCCESS;
}
const struct anv_kmd_backend *anv_stub_kmd_backend_get(void)

View file

@ -79,8 +79,8 @@ struct anv_kmd_backend {
* This is intended for sparse resources, so it does not use the
* bind_timeline interface: synchronization is up to the callers.
*/
int (*vm_bind)(struct anv_device *device,
struct anv_sparse_submission *submit);
VkResult (*vm_bind)(struct anv_device *device,
struct anv_sparse_submission *submit);
/*
* Fully bind or unbind a BO.
@ -88,8 +88,8 @@ struct anv_kmd_backend {
* a new point in the bind_timeline, which will be waited for the next time
* a batch is submitted.
*/
int (*vm_bind_bo)(struct anv_device *device, struct anv_bo *bo);
int (*vm_unbind_bo)(struct anv_device *device, struct anv_bo *bo);
VkResult (*vm_bind_bo)(struct anv_device *device, struct anv_bo *bo);
VkResult (*vm_unbind_bo)(struct anv_device *device, struct anv_bo *bo);
VkResult (*execute_simple_batch)(struct anv_queue *queue,
struct anv_bo *batch_bo,

View file

@ -613,11 +613,7 @@ anv_sparse_bind_vm_bind(struct anv_device *device,
if (!queue)
assert(submit->wait_count == 0 && submit->signal_count == 0);
int rc = device->kmd_backend->vm_bind(device, submit);
if (rc)
return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
return VK_SUCCESS;
return device->kmd_backend->vm_bind(device, submit);
}
VkResult

View file

@ -226,16 +226,16 @@ i915_gem_mmap(struct anv_device *device, struct anv_bo *bo, uint64_t offset,
return i915_gem_mmap_legacy(device, bo, offset, size, flags);
}
static int
static VkResult
i915_vm_bind(struct anv_device *device, struct anv_sparse_submission *submit)
{
return 0;
return VK_SUCCESS;
}
static int
static VkResult
i915_vm_bind_bo(struct anv_device *device, struct anv_bo *bo)
{
return 0;
return VK_SUCCESS;
}
static uint32_t

View file

@ -122,16 +122,17 @@ capture_vm_in_error_dump(struct anv_device *device, struct anv_bo *bo)
return capture ? DRM_XE_VM_BIND_FLAG_DUMPABLE : 0;
}
static inline int
static inline VkResult
xe_vm_bind_op(struct anv_device *device,
struct anv_sparse_submission *submit,
bool signal_bind_timeline)
{
VkResult result = VK_SUCCESS;
int num_syncs = submit->wait_count + submit->signal_count +
signal_bind_timeline;
STACK_ARRAY(struct drm_xe_sync, xe_syncs, num_syncs);
if (!xe_syncs)
return -ENOMEM;
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
int sync_idx = 0;
for (int s = 0; s < submit->wait_count; s++) {
@ -179,14 +180,13 @@ xe_vm_bind_op(struct anv_device *device,
.num_syncs = num_syncs,
.syncs = (uintptr_t)xe_syncs,
};
int ret;
STACK_ARRAY(struct drm_xe_vm_bind_op, xe_binds_stackarray,
submit->binds_len);
struct drm_xe_vm_bind_op *xe_binds;
if (submit->binds_len > 1) {
if (!xe_binds_stackarray) {
ret = -ENOMEM;
result = vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
goto out_syncs;
}
@ -245,12 +245,14 @@ xe_vm_bind_op(struct anv_device *device,
xe_syncs[num_syncs - 1].timeline_value =
intel_bind_timeline_bind_begin(&device->bind_timeline);
}
ret = intel_ioctl(device->fd, DRM_IOCTL_XE_VM_BIND, &args);
int ret = intel_ioctl(device->fd, DRM_IOCTL_XE_VM_BIND, &args);
if (signal_bind_timeline)
intel_bind_timeline_bind_end(&device->bind_timeline);
if (ret)
if (ret) {
result = vk_device_set_lost(&device->vk, "vm_bind failed: %m");
goto out_stackarray;
}
ANV_RMV(vm_binds, device, submit->binds, submit->binds_len);
@ -259,16 +261,17 @@ out_stackarray:
out_syncs:
STACK_ARRAY_FINISH(xe_syncs);
return ret;
return result;
}
static int
static VkResult
xe_vm_bind(struct anv_device *device, struct anv_sparse_submission *submit)
{
return xe_vm_bind_op(device, submit, false);
}
static int xe_vm_bind_bo(struct anv_device *device, struct anv_bo *bo)
static VkResult
xe_vm_bind_bo(struct anv_device *device, struct anv_bo *bo)
{
struct anv_vm_bind bind = {
.bo = bo,
@ -288,7 +291,8 @@ static int xe_vm_bind_bo(struct anv_device *device, struct anv_bo *bo)
return xe_vm_bind_op(device, &submit, true);
}
static int xe_vm_unbind_bo(struct anv_device *device, struct anv_bo *bo)
static VkResult
xe_vm_unbind_bo(struct anv_device *device, struct anv_bo *bo)
{
struct anv_vm_bind bind = {
.bo = bo,