iris: Start to use intel_bind_timeline

With this patch, VM binds remain synchronous in relation to vm_bind()
KMD backend calls. However, the syscalls required for VM bind is
reduce in 2(in the optimal cases), the syncobj create and destroy
syscall are replaced by he usage a timeline syncobj.

Next step will be make this completely asynchronous.

Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/26805>
This commit is contained in:
José Roberto de Souza 2023-12-22 11:03:15 -08:00 committed by Marge Bot
parent f23395e2b9
commit 63abbb24ad
3 changed files with 31 additions and 18 deletions

View file

@ -231,6 +231,7 @@ struct iris_bufmgr {
struct intel_device_info devinfo;
const struct iris_kmd_backend *kmd_backend;
struct intel_bind_timeline bind_timeline; /* Xe only */
bool bo_reuse:1;
bool use_global_vm:1;
@ -1769,6 +1770,7 @@ iris_bufmgr_destroy_global_vm(struct iris_bufmgr *bufmgr)
/* Nothing to do in i915 */
break;
case INTEL_KMD_TYPE_XE:
intel_bind_timeline_finish(&bufmgr->bind_timeline, bufmgr->fd);
iris_xe_destroy_global_vm(bufmgr);
break;
default:
@ -2269,6 +2271,9 @@ iris_bufmgr_init_global_vm(struct iris_bufmgr *bufmgr)
/* i915 don't require VM, so returning true even if use_global_vm is false */
return true;
case INTEL_KMD_TYPE_XE:
if (!intel_bind_timeline_init(&bufmgr->bind_timeline, bufmgr->fd))
return false;
bufmgr->use_global_vm = iris_xe_init_global_vm(bufmgr, &bufmgr->global_vm_id);
/* Xe requires VM */
return bufmgr->use_global_vm;
@ -2573,3 +2578,9 @@ iris_heap_to_pat_entry(const struct intel_device_info *devinfo,
unreachable("invalid heap for platforms using PAT entries");
}
}
struct intel_bind_timeline *
iris_bufmgr_get_bind_timeline(struct iris_bufmgr *bufmgr)
{
return &bufmgr->bind_timeline;
}

View file

@ -28,7 +28,9 @@
#include <stdint.h>
#include <stdio.h>
#include <sys/types.h>
#include "c11/threads.h"
#include "common/intel_bind_timeline.h"
#include "util/macros.h"
#include "util/u_atomic.h"
#include "util/u_dynarray.h"
@ -620,6 +622,7 @@ const struct iris_kmd_backend *
iris_bufmgr_get_kernel_driver_backend(struct iris_bufmgr *bufmgr);
uint32_t iris_bufmgr_get_global_vm_id(struct iris_bufmgr *bufmgr);
bool iris_bufmgr_use_global_vm_id(struct iris_bufmgr *bufmgr);
struct intel_bind_timeline *iris_bufmgr_get_bind_timeline(struct iris_bufmgr *bufmgr);
enum iris_madvice {
IRIS_MADVICE_WILL_NEED = 0,

View file

@ -104,27 +104,24 @@ xe_gem_mmap(struct iris_bufmgr *bufmgr, struct iris_bo *bo)
static inline int
xe_gem_vm_bind_op(struct iris_bo *bo, uint32_t op)
{
const struct intel_device_info *devinfo = iris_bufmgr_get_device_info(bo->bufmgr);
struct iris_bufmgr *bufmgr = bo->bufmgr;
struct intel_bind_timeline *bind_timeline = iris_bufmgr_get_bind_timeline(bufmgr);
const struct intel_device_info *devinfo = iris_bufmgr_get_device_info(bufmgr);
uint32_t handle = op == DRM_XE_VM_BIND_OP_UNMAP ? 0 : bo->gem_handle;
struct drm_xe_sync xe_sync = {
.type = DRM_XE_SYNC_TYPE_SYNCOBJ,
.handle = intel_bind_timeline_get_syncobj(bind_timeline),
.type = DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ,
.flags = DRM_XE_SYNC_FLAG_SIGNAL,
};
struct drm_syncobj_create syncobj_create = {};
struct drm_syncobj_destroy syncobj_destroy = {};
struct drm_syncobj_wait syncobj_wait = {
struct drm_syncobj_timeline_wait syncobj_wait = {
.timeout_nsec = INT64_MAX,
.handles = (uintptr_t)&xe_sync.handle,
.count_handles = 1,
};
uint64_t range, obj_offset = 0;
int ret, fd;
fd = iris_bufmgr_get_fd(bo->bufmgr);
ret = intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &syncobj_create);
if (ret)
return ret;
xe_sync.handle = syncobj_create.handle;
fd = iris_bufmgr_get_fd(bufmgr);
if (iris_bo_is_imported(bo))
range = bo->size;
@ -143,7 +140,7 @@ xe_gem_vm_bind_op(struct iris_bo *bo, uint32_t op)
pat_index = iris_heap_to_pat_entry(devinfo, bo->real.heap)->index;
struct drm_xe_vm_bind args = {
.vm_id = iris_bufmgr_get_global_vm_id(bo->bufmgr),
.vm_id = iris_bufmgr_get_global_vm_id(bufmgr),
.num_syncs = 1,
.syncs = (uintptr_t)&xe_sync,
.num_binds = 1,
@ -154,16 +151,18 @@ xe_gem_vm_bind_op(struct iris_bo *bo, uint32_t op)
.bind.op = op,
.bind.pat_index = pat_index,
};
xe_sync.timeline_value = intel_bind_timeline_bind_begin(bind_timeline);
ret = intel_ioctl(fd, DRM_IOCTL_XE_VM_BIND, &args);
intel_bind_timeline_bind_end(bind_timeline);
if (ret == 0) {
syncobj_wait.handles = (uintptr_t)&xe_sync.handle;
ret = intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &syncobj_wait);
} else {
DBG("vm_bind_op: DRM_IOCTL_XE_VM_BIND failed(%i)", ret);
syncobj_wait.points = (uintptr_t)&xe_sync.timeline_value;
ret = intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, &syncobj_wait);
}
syncobj_destroy.handle = xe_sync.handle;
intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &syncobj_destroy);
if (ret)
DBG("vm_bind_op: DRM_IOCTL_XE_VM_BIND failed(%i)", ret);
return ret;
}