diff --git a/src/intel/common/xe/intel_gem.c b/src/intel/common/xe/intel_gem.c index 5832d4acb48..f2ca10b9d08 100644 --- a/src/intel/common/xe/intel_gem.c +++ b/src/intel/common/xe/intel_gem.c @@ -94,3 +94,12 @@ xe_gem_read_correlate_cpu_gpu_timestamp(int fd, return true; } + +void +intel_xe_gem_add_ext(uint64_t *ptr, uint32_t ext_name, void *data) +{ + struct drm_xe_user_extension *ext = data; + ext->next_extension = *ptr; + ext->name = ext_name; + *ptr = (uintptr_t)ext; +} diff --git a/src/intel/common/xe/intel_gem.h b/src/intel/common/xe/intel_gem.h index 6be526afb3a..8029f5ed042 100644 --- a/src/intel/common/xe/intel_gem.h +++ b/src/intel/common/xe/intel_gem.h @@ -39,3 +39,5 @@ xe_gem_read_correlate_cpu_gpu_timestamp(int fd, uint64_t *gpu_timestamp, uint64_t *cpu_delta); bool xe_gem_can_render_on_fd(int fd); + +void intel_xe_gem_add_ext(uint64_t *ptr, uint32_t ext_name, void *data); diff --git a/src/intel/vulkan/xe/anv_device.c b/src/intel/vulkan/xe/anv_device.c index 6ce971355ee..fca775e228c 100644 --- a/src/intel/vulkan/xe/anv_device.c +++ b/src/intel/vulkan/xe/anv_device.c @@ -57,6 +57,8 @@ VkResult anv_xe_device_setup_vm(struct anv_device *device) "intel_bind_timeline_init failed"); } + device->protected_session_id = DRM_XE_PXP_HWDRM_DEFAULT_SESSION; + return VK_SUCCESS; } diff --git a/src/intel/vulkan/xe/anv_kmd_backend.c b/src/intel/vulkan/xe/anv_kmd_backend.c index 5da310acba8..04b996a78ba 100644 --- a/src/intel/vulkan/xe/anv_kmd_backend.c +++ b/src/intel/vulkan/xe/anv_kmd_backend.c @@ -39,8 +39,12 @@ xe_gem_create(struct anv_device *device, enum anv_bo_alloc_flags alloc_flags, uint64_t *actual_size) { - /* TODO: protected content */ - assert((alloc_flags & ANV_BO_ALLOC_PROTECTED) == 0); + struct drm_xe_ext_set_property pxp_ext = { + .base.name = DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY, + .property = DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE, + .value = DRM_XE_PXP_TYPE_HWDRM, + }; + /* WB+0 way coherent not supported by Xe KMD */ assert((alloc_flags & ANV_BO_ALLOC_HOST_CACHED) == 0 || (alloc_flags & ANV_BO_ALLOC_HOST_CACHED_COHERENT) == ANV_BO_ALLOC_HOST_CACHED_COHERENT); @@ -79,6 +83,9 @@ xe_gem_create(struct anv_device *device, gem_create.cpu_caching = DRM_XE_GEM_CPU_CACHING_WC; } + if (alloc_flags & ANV_BO_ALLOC_PROTECTED) + gem_create.extensions = (uintptr_t)&pxp_ext; + if (intel_ioctl(device->fd, DRM_IOCTL_XE_GEM_CREATE, &gem_create)) return 0; @@ -153,6 +160,9 @@ anv_vm_bind_to_drm_xe_vm_bind(struct anv_device *device, xe_bind.op = DRM_XE_VM_BIND_OP_MAP; xe_bind.obj = bo->gem_handle; } + + if (bo && (bo->alloc_flags & ANV_BO_ALLOC_PROTECTED)) + xe_bind.flags |= DRM_XE_VM_BIND_FLAG_CHECK_PXP; } else if (anv_bind->op == ANV_VM_UNBIND_ALL) { xe_bind.op = DRM_XE_VM_BIND_OP_UNMAP_ALL; xe_bind.obj = bo->gem_handle; diff --git a/src/intel/vulkan/xe/anv_queue.c b/src/intel/vulkan/xe/anv_queue.c index b1b06d1e2df..4be1347609d 100644 --- a/src/intel/vulkan/xe/anv_queue.c +++ b/src/intel/vulkan/xe/anv_queue.c @@ -25,6 +25,7 @@ #include "anv_private.h" #include "common/xe/intel_engine.h" +#include "common/xe/intel_gem.h" #include "common/xe/intel_queue.h" #include "common/intel_gem.h" @@ -110,20 +111,37 @@ create_engine(struct anv_device *device, } assert(device->vm_id != 0); - struct drm_xe_ext_set_property ext = { - .base.name = DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY, + const bool pxp_needed = pCreateInfo->flags & VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT; + struct drm_xe_ext_set_property priority_ext = { .property = DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY, .value = anv_vk_priority_to_drm_sched_priority(priority), }; + struct drm_xe_ext_set_property pxp_ext = { + .property = DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE, + .value = DRM_XE_PXP_TYPE_HWDRM, + }; struct drm_xe_exec_queue_create create = { /* Allows KMD to pick one of those engines for the submission queue */ .instances = (uintptr_t)instances, .vm_id = device->vm_id, .width = 1, .num_placements = count, - .extensions = (uintptr_t)&ext, }; - int ret = intel_ioctl(device->fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &create); + intel_xe_gem_add_ext((uint64_t *)&create.extensions, + DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY, + &priority_ext.base); + if (pxp_needed) + intel_xe_gem_add_ext((uint64_t *)&create.extensions, + DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY, + &pxp_ext.base); + int ret; + bool retry; + do { + ret = intel_ioctl(device->fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &create); + retry = pxp_needed && ret == -1 && errno == EBUSY; + if (retry) + usleep(1000); + } while (retry); vk_free(&device->vk.alloc, instances); if (ret) return vk_errorf(device, VK_ERROR_UNKNOWN, "Unable to create exec queue");