anv: Add gem VM bind and unbind to backend

Not using it yet, that will be done in the next patch.
Xe only supports submission using VM.
For i915 the backend functions are just a noop.

Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21698>
This commit is contained in:
José Roberto de Souza 2023-02-09 08:57:11 -08:00 committed by Marge Bot
parent 324d22d684
commit 37fa2fa30e
6 changed files with 88 additions and 11 deletions

View file

@ -3169,7 +3169,7 @@ anv_device_setup_context_or_vm(struct anv_device *device,
const VkDeviceCreateInfo *pCreateInfo,
const uint32_t num_queues)
{
switch (anv_kmd_type_get(device)) {
switch (device->info->kmd_type) {
case INTEL_KMD_TYPE_I915:
return anv_i915_device_setup_context(device, pCreateInfo, num_queues);
case INTEL_KMD_TYPE_XE:
@ -3183,7 +3183,7 @@ anv_device_setup_context_or_vm(struct anv_device *device,
static bool
anv_device_destroy_context_or_vm(struct anv_device *device)
{
switch (anv_kmd_type_get(device)) {
switch (device->info->kmd_type) {
case INTEL_KMD_TYPE_I915:
return intel_gem_destroy_context(device->fd, device->context_id);
case INTEL_KMD_TYPE_XE:

View file

@ -150,12 +150,26 @@ anv_gem_fd_to_handle(struct anv_device *device, int fd)
unreachable("Unused");
}
static int
stub_gem_vm_bind(struct anv_device *device, struct anv_bo *bo)
{
return 0;
}
static int
stub_gem_vm_unbind(struct anv_device *device, struct anv_bo *bo)
{
return 0;
}
const struct anv_kmd_backend *anv_stub_kmd_backend_get(void)
{
static const struct anv_kmd_backend stub_backend = {
.gem_create = stub_gem_create,
.gem_close = stub_gem_close,
.gem_mmap = stub_gem_mmap,
.gem_vm_bind = stub_gem_vm_bind,
.gem_vm_unbind = stub_gem_vm_unbind,
.execute_simple_batch = stub_execute_simple_batch,
.queue_exec_locked = stub_queue_exec_locked,
};

View file

@ -40,9 +40,3 @@ anv_kmd_backend_get(enum intel_kmd_type type)
return NULL;
}
}
inline enum intel_kmd_type
anv_kmd_type_get(struct anv_device *device)
{
return device->info->kmd_type;
}

View file

@ -52,6 +52,8 @@ struct anv_kmd_backend {
void *(*gem_mmap)(struct anv_device *device, struct anv_bo *bo,
uint64_t offset, uint64_t size,
VkMemoryPropertyFlags property_flags);
int (*gem_vm_bind)(struct anv_device *device, struct anv_bo *bo);
int (*gem_vm_unbind)(struct anv_device *device, struct anv_bo *bo);
VkResult (*execute_simple_batch)(struct anv_queue *queue,
struct anv_bo *batch_bo,
uint32_t batch_bo_size);
@ -68,9 +70,6 @@ struct anv_kmd_backend {
const struct anv_kmd_backend *anv_kmd_backend_get(enum intel_kmd_type type);
enum intel_kmd_type
anv_kmd_type_get(struct anv_device *device);
/* Internal functions, should only be called by anv_kmd_backend_get() */
const struct anv_kmd_backend *anv_i915_kmd_backend_get(void);
const struct anv_kmd_backend *anv_xe_kmd_backend_get(void);

View file

@ -149,6 +149,18 @@ i915_gem_mmap(struct anv_device *device, struct anv_bo *bo, uint64_t offset,
return i915_gem_mmap_legacy(device, bo, offset, size, flags);
}
static int
i915_gem_vm_bind(struct anv_device *device, struct anv_bo *bo)
{
return 0;
}
static int
i915_gem_vm_unbind(struct anv_device *device, struct anv_bo *bo)
{
return 0;
}
const struct anv_kmd_backend *
anv_i915_kmd_backend_get(void)
{
@ -156,6 +168,8 @@ anv_i915_kmd_backend_get(void)
.gem_create = i915_gem_create,
.gem_close = i915_gem_close,
.gem_mmap = i915_gem_mmap,
.gem_vm_bind = i915_gem_vm_bind,
.gem_vm_unbind = i915_gem_vm_unbind,
.execute_simple_batch = i915_execute_simple_batch,
.queue_exec_locked = i915_queue_exec_locked
};

View file

@ -22,6 +22,7 @@
*/
#include <sys/mman.h>
#include <xf86drm.h>
#include "anv_private.h"
@ -69,6 +70,59 @@ xe_gem_mmap(struct anv_device *device, struct anv_bo *bo, uint64_t offset,
device->fd, args.offset);
}
static inline int
xe_gem_vm_bind_op(struct anv_device *device, struct anv_bo *bo, uint32_t op)
{
uint32_t syncobj_handle;
int ret = drmSyncobjCreate(device->fd, 0, &syncobj_handle);
if (ret)
return ret;
struct drm_xe_sync sync = {
.flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL,
.handle = syncobj_handle,
};
struct drm_xe_vm_bind args = {
.vm_id = device->vm_id,
.num_binds = 1,
.bind.obj = op == XE_VM_BIND_OP_UNMAP ? 0 : bo->gem_handle,
.bind.obj_offset = 0,
.bind.range = bo->size + bo->_ccs_size,
.bind.addr = intel_48b_address(bo->offset),
.bind.op = op,
.num_syncs = 1,
.syncs = (uintptr_t)&sync,
};
ret = intel_ioctl(device->fd, DRM_IOCTL_XE_VM_BIND, &args);
if (ret)
goto bind_error;
struct drm_syncobj_wait wait = {
.handles = (uintptr_t)&syncobj_handle,
.timeout_nsec = INT64_MAX,
.count_handles = 1,
.flags = 0,
.first_signaled = 0,
.pad = 0,
};
intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
bind_error:
drmSyncobjDestroy(device->fd, syncobj_handle);
return ret;
}
static int xe_gem_vm_bind(struct anv_device *device, struct anv_bo *bo)
{
return xe_gem_vm_bind_op(device, bo, XE_VM_BIND_OP_MAP);
}
static int xe_gem_vm_unbind(struct anv_device *device, struct anv_bo *bo)
{
return xe_gem_vm_bind_op(device, bo, XE_VM_BIND_OP_UNMAP);
}
const struct anv_kmd_backend *
anv_xe_kmd_backend_get(void)
{
@ -76,6 +130,8 @@ anv_xe_kmd_backend_get(void)
.gem_create = xe_gem_create,
.gem_close = xe_gem_close,
.gem_mmap = xe_gem_mmap,
.gem_vm_bind = xe_gem_vm_bind,
.gem_vm_unbind = xe_gem_vm_unbind,
};
return &xe_backend;
}