diff --git a/src/intel/vulkan/anv_allocator.c b/src/intel/vulkan/anv_allocator.c index 06419965e6e..9013446d4a7 100644 --- a/src/intel/vulkan/anv_allocator.c +++ b/src/intel/vulkan/anv_allocator.c @@ -1640,7 +1640,18 @@ anv_device_import_bo_from_host_ptr(struct anv_device *device, pthread_mutex_lock(&cache->mutex); - struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle); + struct anv_bo *bo = NULL; + if (device->info->kmd_type == INTEL_KMD_TYPE_XE) { + bo = vk_zalloc(&device->vk.alloc, sizeof(*bo), 8, + VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); + if (!bo) { + pthread_mutex_unlock(&cache->mutex); + return VK_ERROR_OUT_OF_HOST_MEMORY; + } + } else { + bo = anv_device_lookup_bo(device, gem_handle); + } + if (bo->refcount > 0) { /* VK_EXT_external_memory_host doesn't require handling importing the * same pointer twice at the same time, but we don't get in the way. If @@ -1693,6 +1704,13 @@ anv_device_import_bo_from_host_ptr(struct anv_device *device, return result; } + if (device->kmd_backend->gem_vm_bind(device, &new_bo)) { + VkResult res = vk_errorf(device, VK_ERROR_UNKNOWN, "vm bind failed: %m"); + anv_bo_vma_free(device, &new_bo); + pthread_mutex_unlock(&cache->mutex); + return res; + } + *bo = new_bo; } @@ -1909,7 +1927,10 @@ anv_device_release_bo(struct anv_device *device, struct anv_bo *bo) { struct anv_bo_cache *cache = &device->bo_cache; - assert(anv_device_lookup_bo(device, bo->gem_handle) == bo); + const bool bo_is_xe_userptr = device->info->kmd_type == INTEL_KMD_TYPE_XE && + bo->from_host_ptr; + assert(bo_is_xe_userptr || + anv_device_lookup_bo(device, bo->gem_handle) == bo); /* Try to decrement the counter but don't go below one. If this succeeds * then the refcount has been decremented and we are not the last @@ -1948,7 +1969,10 @@ anv_device_release_bo(struct anv_device *device, */ struct anv_bo old_bo = *bo; - memset(bo, 0, sizeof(*bo)); + if (bo_is_xe_userptr) + vk_free(&device->vk.alloc, bo); + else + memset(bo, 0, sizeof(*bo)); anv_bo_finish(device, &old_bo); diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h index 388107db82c..8954dc9a89e 100644 --- a/src/intel/vulkan/anv_private.h +++ b/src/intel/vulkan/anv_private.h @@ -404,6 +404,7 @@ struct anv_bo { */ struct util_vma_heap *vma_heap; + /* All userptr bos in Xe KMD has gem_handle set to workaround_bo->gem_handle */ uint32_t gem_handle; uint32_t refcount; diff --git a/src/intel/vulkan/xe/anv_kmd_backend.c b/src/intel/vulkan/xe/anv_kmd_backend.c index 3e36edb4136..123f0a74d20 100644 --- a/src/intel/vulkan/xe/anv_kmd_backend.c +++ b/src/intel/vulkan/xe/anv_kmd_backend.c @@ -94,6 +94,15 @@ xe_gem_vm_bind_op(struct anv_device *device, struct anv_bo *bo, uint32_t op) if (ret) return ret; + uint32_t obj = op == XE_VM_BIND_OP_UNMAP ? 0 : bo->gem_handle; + uint64_t obj_offset = 0; + if (bo->from_host_ptr) { + obj = 0; + obj_offset = (uintptr_t)bo->map; + if (op == XE_VM_BIND_OP_MAP) + op = XE_VM_BIND_OP_MAP_USERPTR; + } + struct drm_xe_sync sync = { .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, .handle = syncobj_handle, @@ -101,8 +110,8 @@ xe_gem_vm_bind_op(struct anv_device *device, struct anv_bo *bo, uint32_t op) struct drm_xe_vm_bind args = { .vm_id = device->vm_id, .num_binds = 1, - .bind.obj = op == XE_VM_BIND_OP_UNMAP ? 0 : bo->gem_handle, - .bind.obj_offset = 0, + .bind.obj = obj, + .bind.obj_offset = obj_offset, .bind.range = bo->actual_size, .bind.addr = intel_48b_address(bo->offset), .bind.op = op, @@ -141,7 +150,11 @@ static int xe_gem_vm_unbind(struct anv_device *device, struct anv_bo *bo) static uint32_t xe_gem_create_userptr(struct anv_device *device, void *mem, uint64_t size) { - return 0; + /* We return the workaround BO gem_handle here, because Xe doesn't + * create handles for userptrs. But we still need to make it look + * to the rest of Anv that the operation succeeded. + */ + return device->workaround_bo->gem_handle; } const struct anv_kmd_backend *