anv: Move bo_alloc_flags_to_bo_flags() to backend

The bo_flags are i915 specific and should not be handled in common
code, so here adding it to backend as it is in the hot-path.

There still i915 bo_flags handling in anv_device_import_bo() that
will be handled in the next patch.

Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Reviewed-by: Sagar Ghuge <sagar.ghuge@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/25044>
This commit is contained in:
José Roberto de Souza 2023-09-04 11:15:29 -07:00
parent 8b672c7b2c
commit e1f846f790
5 changed files with 56 additions and 40 deletions

View file

@ -1330,40 +1330,6 @@ anv_bo_cache_finish(struct anv_bo_cache *cache)
pthread_mutex_destroy(&cache->mutex);
}
#define ANV_BO_CACHE_SUPPORTED_FLAGS \
(EXEC_OBJECT_WRITE | \
EXEC_OBJECT_ASYNC | \
EXEC_OBJECT_SUPPORTS_48B_ADDRESS | \
EXEC_OBJECT_PINNED | \
EXEC_OBJECT_CAPTURE)
static uint32_t
anv_bo_alloc_flags_to_bo_flags(struct anv_device *device,
enum anv_bo_alloc_flags alloc_flags)
{
struct anv_physical_device *pdevice = device->physical;
uint64_t bo_flags = EXEC_OBJECT_PINNED;
if (!(alloc_flags & ANV_BO_ALLOC_32BIT_ADDRESS))
bo_flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
if (((alloc_flags & ANV_BO_ALLOC_CAPTURE) ||
INTEL_DEBUG(DEBUG_CAPTURE_ALL)) &&
pdevice->has_exec_capture)
bo_flags |= EXEC_OBJECT_CAPTURE;
if (alloc_flags & ANV_BO_ALLOC_IMPLICIT_WRITE) {
assert(alloc_flags & ANV_BO_ALLOC_IMPLICIT_SYNC);
bo_flags |= EXEC_OBJECT_WRITE;
}
if (!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_SYNC) && pdevice->has_exec_async)
bo_flags |= EXEC_OBJECT_ASYNC;
return bo_flags;
}
static void
anv_bo_unmap_close(struct anv_device *device, struct anv_bo *bo)
{
@ -1453,8 +1419,7 @@ anv_device_alloc_bo(struct anv_device *device,
assert(!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS));
const uint32_t bo_flags =
anv_bo_alloc_flags_to_bo_flags(device, alloc_flags);
assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
device->kmd_backend->bo_alloc_flags_to_bo_flags(device, alloc_flags);
/* The kernel is going to give us whole pages anyway. And we
* also need 4KB alignment for 1MB AUX buffer that follows
@ -1615,8 +1580,7 @@ anv_device_import_bo_from_host_ptr(struct anv_device *device,
struct anv_bo_cache *cache = &device->bo_cache;
const uint32_t bo_flags =
anv_bo_alloc_flags_to_bo_flags(device, alloc_flags);
assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
device->kmd_backend->bo_alloc_flags_to_bo_flags(device, alloc_flags);
uint32_t gem_handle = device->kmd_backend->gem_create_userptr(device, host_ptr, size);
if (!gem_handle)
@ -1720,8 +1684,7 @@ anv_device_import_bo(struct anv_device *device,
struct anv_bo_cache *cache = &device->bo_cache;
const uint32_t bo_flags =
anv_bo_alloc_flags_to_bo_flags(device, alloc_flags);
assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
device->kmd_backend->bo_alloc_flags_to_bo_flags(device, alloc_flags);
pthread_mutex_lock(&cache->mutex);

View file

@ -79,6 +79,19 @@ stub_queue_exec_locked(struct anv_queue *queue,
return VK_ERROR_UNKNOWN;
}
static VkResult
stub_queue_exec_trace(struct anv_queue *queue, struct anv_utrace_submit *submit)
{
return VK_ERROR_UNKNOWN;
}
static uint32_t
stub_bo_alloc_flags_to_bo_flags(struct anv_device *device,
enum anv_bo_alloc_flags alloc_flags)
{
return 0;
}
void *
anv_gem_mmap(struct anv_device *device, struct anv_bo *bo, uint64_t offset,
uint64_t size, VkMemoryPropertyFlags property_flags)
@ -169,6 +182,8 @@ const struct anv_kmd_backend *anv_stub_kmd_backend_get(void)
.vm_unbind_bo = stub_vm_bind_bo,
.execute_simple_batch = stub_execute_simple_batch,
.queue_exec_locked = stub_queue_exec_locked,
.queue_exec_trace = stub_queue_exec_trace,
.bo_alloc_flags_to_bo_flags = stub_bo_alloc_flags_to_bo_flags,
};
return &stub_backend;
}

View file

@ -89,6 +89,8 @@ struct anv_kmd_backend {
uint32_t perf_query_pass);
VkResult (*queue_exec_trace)(struct anv_queue *queue,
struct anv_utrace_submit *submit);
uint32_t (*bo_alloc_flags_to_bo_flags)(struct anv_device *device,
enum anv_bo_alloc_flags alloc_flags);
};
const struct anv_kmd_backend *anv_kmd_backend_get(enum intel_kmd_type type);

View file

@ -231,6 +231,33 @@ i915_gem_create_userptr(struct anv_device *device, void *mem, uint64_t size)
return userptr.handle;
}
static uint32_t
i915_bo_alloc_flags_to_bo_flags(struct anv_device *device,
enum anv_bo_alloc_flags alloc_flags)
{
struct anv_physical_device *pdevice = device->physical;
uint64_t bo_flags = EXEC_OBJECT_PINNED;
if (!(alloc_flags & ANV_BO_ALLOC_32BIT_ADDRESS))
bo_flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
if (((alloc_flags & ANV_BO_ALLOC_CAPTURE) ||
INTEL_DEBUG(DEBUG_CAPTURE_ALL)) &&
pdevice->has_exec_capture)
bo_flags |= EXEC_OBJECT_CAPTURE;
if (alloc_flags & ANV_BO_ALLOC_IMPLICIT_WRITE) {
assert(alloc_flags & ANV_BO_ALLOC_IMPLICIT_SYNC);
bo_flags |= EXEC_OBJECT_WRITE;
}
if (!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_SYNC) && pdevice->has_exec_async)
bo_flags |= EXEC_OBJECT_ASYNC;
return bo_flags;
}
const struct anv_kmd_backend *
anv_i915_kmd_backend_get(void)
{
@ -245,6 +272,7 @@ anv_i915_kmd_backend_get(void)
.execute_simple_batch = i915_execute_simple_batch,
.queue_exec_locked = i915_queue_exec_locked,
.queue_exec_trace = i915_queue_exec_trace,
.bo_alloc_flags_to_bo_flags = i915_bo_alloc_flags_to_bo_flags,
};
return &i915_backend;
}

View file

@ -226,6 +226,13 @@ xe_gem_create_userptr(struct anv_device *device, void *mem, uint64_t size)
return device->workaround_bo->gem_handle;
}
static uint32_t
xe_bo_alloc_flags_to_bo_flags(struct anv_device *device,
enum anv_bo_alloc_flags alloc_flags)
{
return 0;
}
const struct anv_kmd_backend *
anv_xe_kmd_backend_get(void)
{
@ -240,6 +247,7 @@ anv_xe_kmd_backend_get(void)
.execute_simple_batch = xe_execute_simple_batch,
.queue_exec_locked = xe_queue_exec_locked,
.queue_exec_trace = xe_queue_exec_utrace_locked,
.bo_alloc_flags_to_bo_flags = xe_bo_alloc_flags_to_bo_flags,
};
return &xe_backend;
}