diff --git a/src/intel/vulkan/anv_allocator.c b/src/intel/vulkan/anv_allocator.c index 2c97af83a32..c554db839ea 100644 --- a/src/intel/vulkan/anv_allocator.c +++ b/src/intel/vulkan/anv_allocator.c @@ -1330,40 +1330,6 @@ anv_bo_cache_finish(struct anv_bo_cache *cache) pthread_mutex_destroy(&cache->mutex); } -#define ANV_BO_CACHE_SUPPORTED_FLAGS \ - (EXEC_OBJECT_WRITE | \ - EXEC_OBJECT_ASYNC | \ - EXEC_OBJECT_SUPPORTS_48B_ADDRESS | \ - EXEC_OBJECT_PINNED | \ - EXEC_OBJECT_CAPTURE) - -static uint32_t -anv_bo_alloc_flags_to_bo_flags(struct anv_device *device, - enum anv_bo_alloc_flags alloc_flags) -{ - struct anv_physical_device *pdevice = device->physical; - - uint64_t bo_flags = EXEC_OBJECT_PINNED; - - if (!(alloc_flags & ANV_BO_ALLOC_32BIT_ADDRESS)) - bo_flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS; - - if (((alloc_flags & ANV_BO_ALLOC_CAPTURE) || - INTEL_DEBUG(DEBUG_CAPTURE_ALL)) && - pdevice->has_exec_capture) - bo_flags |= EXEC_OBJECT_CAPTURE; - - if (alloc_flags & ANV_BO_ALLOC_IMPLICIT_WRITE) { - assert(alloc_flags & ANV_BO_ALLOC_IMPLICIT_SYNC); - bo_flags |= EXEC_OBJECT_WRITE; - } - - if (!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_SYNC) && pdevice->has_exec_async) - bo_flags |= EXEC_OBJECT_ASYNC; - - return bo_flags; -} - static void anv_bo_unmap_close(struct anv_device *device, struct anv_bo *bo) { @@ -1453,8 +1419,7 @@ anv_device_alloc_bo(struct anv_device *device, assert(!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS)); const uint32_t bo_flags = - anv_bo_alloc_flags_to_bo_flags(device, alloc_flags); - assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS)); + device->kmd_backend->bo_alloc_flags_to_bo_flags(device, alloc_flags); /* The kernel is going to give us whole pages anyway. And we * also need 4KB alignment for 1MB AUX buffer that follows @@ -1615,8 +1580,7 @@ anv_device_import_bo_from_host_ptr(struct anv_device *device, struct anv_bo_cache *cache = &device->bo_cache; const uint32_t bo_flags = - anv_bo_alloc_flags_to_bo_flags(device, alloc_flags); - assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS)); + device->kmd_backend->bo_alloc_flags_to_bo_flags(device, alloc_flags); uint32_t gem_handle = device->kmd_backend->gem_create_userptr(device, host_ptr, size); if (!gem_handle) @@ -1720,8 +1684,7 @@ anv_device_import_bo(struct anv_device *device, struct anv_bo_cache *cache = &device->bo_cache; const uint32_t bo_flags = - anv_bo_alloc_flags_to_bo_flags(device, alloc_flags); - assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS)); + device->kmd_backend->bo_alloc_flags_to_bo_flags(device, alloc_flags); pthread_mutex_lock(&cache->mutex); diff --git a/src/intel/vulkan/anv_gem_stubs.c b/src/intel/vulkan/anv_gem_stubs.c index 99905c0e9a4..524fcee812a 100644 --- a/src/intel/vulkan/anv_gem_stubs.c +++ b/src/intel/vulkan/anv_gem_stubs.c @@ -79,6 +79,19 @@ stub_queue_exec_locked(struct anv_queue *queue, return VK_ERROR_UNKNOWN; } +static VkResult +stub_queue_exec_trace(struct anv_queue *queue, struct anv_utrace_submit *submit) +{ + return VK_ERROR_UNKNOWN; +} + +static uint32_t +stub_bo_alloc_flags_to_bo_flags(struct anv_device *device, + enum anv_bo_alloc_flags alloc_flags) +{ + return 0; +} + void * anv_gem_mmap(struct anv_device *device, struct anv_bo *bo, uint64_t offset, uint64_t size, VkMemoryPropertyFlags property_flags) @@ -169,6 +182,8 @@ const struct anv_kmd_backend *anv_stub_kmd_backend_get(void) .vm_unbind_bo = stub_vm_bind_bo, .execute_simple_batch = stub_execute_simple_batch, .queue_exec_locked = stub_queue_exec_locked, + .queue_exec_trace = stub_queue_exec_trace, + .bo_alloc_flags_to_bo_flags = stub_bo_alloc_flags_to_bo_flags, }; return &stub_backend; } diff --git a/src/intel/vulkan/anv_kmd_backend.h b/src/intel/vulkan/anv_kmd_backend.h index a299b72cf1d..bf3e093162f 100644 --- a/src/intel/vulkan/anv_kmd_backend.h +++ b/src/intel/vulkan/anv_kmd_backend.h @@ -89,6 +89,8 @@ struct anv_kmd_backend { uint32_t perf_query_pass); VkResult (*queue_exec_trace)(struct anv_queue *queue, struct anv_utrace_submit *submit); + uint32_t (*bo_alloc_flags_to_bo_flags)(struct anv_device *device, + enum anv_bo_alloc_flags alloc_flags); }; const struct anv_kmd_backend *anv_kmd_backend_get(enum intel_kmd_type type); diff --git a/src/intel/vulkan/i915/anv_kmd_backend.c b/src/intel/vulkan/i915/anv_kmd_backend.c index f1b565d6430..d0f99b542bd 100644 --- a/src/intel/vulkan/i915/anv_kmd_backend.c +++ b/src/intel/vulkan/i915/anv_kmd_backend.c @@ -231,6 +231,33 @@ i915_gem_create_userptr(struct anv_device *device, void *mem, uint64_t size) return userptr.handle; } +static uint32_t +i915_bo_alloc_flags_to_bo_flags(struct anv_device *device, + enum anv_bo_alloc_flags alloc_flags) +{ + struct anv_physical_device *pdevice = device->physical; + + uint64_t bo_flags = EXEC_OBJECT_PINNED; + + if (!(alloc_flags & ANV_BO_ALLOC_32BIT_ADDRESS)) + bo_flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS; + + if (((alloc_flags & ANV_BO_ALLOC_CAPTURE) || + INTEL_DEBUG(DEBUG_CAPTURE_ALL)) && + pdevice->has_exec_capture) + bo_flags |= EXEC_OBJECT_CAPTURE; + + if (alloc_flags & ANV_BO_ALLOC_IMPLICIT_WRITE) { + assert(alloc_flags & ANV_BO_ALLOC_IMPLICIT_SYNC); + bo_flags |= EXEC_OBJECT_WRITE; + } + + if (!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_SYNC) && pdevice->has_exec_async) + bo_flags |= EXEC_OBJECT_ASYNC; + + return bo_flags; +} + const struct anv_kmd_backend * anv_i915_kmd_backend_get(void) { @@ -245,6 +272,7 @@ anv_i915_kmd_backend_get(void) .execute_simple_batch = i915_execute_simple_batch, .queue_exec_locked = i915_queue_exec_locked, .queue_exec_trace = i915_queue_exec_trace, + .bo_alloc_flags_to_bo_flags = i915_bo_alloc_flags_to_bo_flags, }; return &i915_backend; } diff --git a/src/intel/vulkan/xe/anv_kmd_backend.c b/src/intel/vulkan/xe/anv_kmd_backend.c index f1dd1d54d1a..3b3e6a53048 100644 --- a/src/intel/vulkan/xe/anv_kmd_backend.c +++ b/src/intel/vulkan/xe/anv_kmd_backend.c @@ -226,6 +226,13 @@ xe_gem_create_userptr(struct anv_device *device, void *mem, uint64_t size) return device->workaround_bo->gem_handle; } +static uint32_t +xe_bo_alloc_flags_to_bo_flags(struct anv_device *device, + enum anv_bo_alloc_flags alloc_flags) +{ + return 0; +} + const struct anv_kmd_backend * anv_xe_kmd_backend_get(void) { @@ -240,6 +247,7 @@ anv_xe_kmd_backend_get(void) .execute_simple_batch = xe_execute_simple_batch, .queue_exec_locked = xe_queue_exec_locked, .queue_exec_trace = xe_queue_exec_utrace_locked, + .bo_alloc_flags_to_bo_flags = xe_bo_alloc_flags_to_bo_flags, }; return &xe_backend; }