tu/drm: Split out iova allocation and BO allocation

Reserve an iova range separately before allocating a BO. This reduces
the size of the critical section under the VMA lock and paves the way
for lazy BOs, where iova initialization is separated out.

While we're here, shrink the area where the VMA mutex is applied when
importing a dma-buf. AFAICT it's not useful to lock the entire function,
only the VMA lookup and zombie BO handling.

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/37151>
This commit is contained in:
Connor Abbott 2025-08-28 16:55:51 -04:00 committed by Marge Bot
parent b663d8f762
commit 93a80f4bb9
2 changed files with 99 additions and 61 deletions

View file

@ -538,19 +538,23 @@ msm_allocate_userspace_iova_locked(struct tu_device *dev,
result = tu_allocate_userspace_iova(dev, size, client_iova, flags, iova); result = tu_allocate_userspace_iova(dev, size, client_iova, flags, iova);
} }
if (result != VK_SUCCESS) return result;
return result; }
static VkResult
msm_set_iova(struct tu_device *dev,
uint32_t gem_handle,
uint64_t iova)
{
struct drm_msm_gem_info req = { struct drm_msm_gem_info req = {
.handle = gem_handle, .handle = gem_handle,
.info = MSM_INFO_SET_IOVA, .info = MSM_INFO_SET_IOVA,
.value = *iova, .value = iova,
}; };
int ret = int ret =
drmCommandWriteRead(dev->fd, DRM_MSM_GEM_INFO, &req, sizeof(req)); drmCommandWriteRead(dev->fd, DRM_MSM_GEM_INFO, &req, sizeof(req));
if (ret < 0) { if (ret < 0) {
util_vma_heap_free(&dev->vma, *iova, size);
mesa_loge("MSM_INFO_SET_IOVA failed! %d (%s)", ret, strerror(errno)); mesa_loge("MSM_INFO_SET_IOVA failed! %d (%s)", ret, strerror(errno));
return VK_ERROR_OUT_OF_HOST_MEMORY; return VK_ERROR_OUT_OF_HOST_MEMORY;
} }
@ -559,9 +563,9 @@ msm_allocate_userspace_iova_locked(struct tu_device *dev,
} }
static VkResult static VkResult
tu_allocate_kernel_iova(struct tu_device *dev, msm_get_iova(struct tu_device *dev,
uint32_t gem_handle, uint32_t gem_handle,
uint64_t *iova) uint64_t *iova)
{ {
*iova = tu_gem_info(dev, gem_handle, MSM_INFO_GET_IOVA); *iova = tu_gem_info(dev, gem_handle, MSM_INFO_GET_IOVA);
if (!*iova) if (!*iova)
@ -625,23 +629,13 @@ static VkResult
msm_allocate_vm_bind(struct tu_device *dev, msm_allocate_vm_bind(struct tu_device *dev,
uint32_t gem_handle, uint32_t gem_handle,
uint64_t size, uint64_t size,
uint64_t client_iova, uint64_t iova,
enum tu_bo_alloc_flags flags, enum tu_bo_alloc_flags flags)
uint64_t *iova)
{ {
VkResult result;
*iova = 0;
result = tu_allocate_userspace_iova(dev, size, client_iova, flags, iova);
if (result != VK_SUCCESS)
return result;
uint32_t map_op_flags = 0; uint32_t map_op_flags = 0;
if (flags & TU_BO_ALLOC_ALLOW_DUMP) if (flags & TU_BO_ALLOC_ALLOW_DUMP)
map_op_flags |= MSM_VM_BIND_OP_DUMP; map_op_flags |= MSM_VM_BIND_OP_DUMP;
return tu_map_vm_bind(dev, MSM_VM_BIND_OP_MAP, map_op_flags, *iova, return tu_map_vm_bind(dev, MSM_VM_BIND_OP_MAP, map_op_flags, iova,
gem_handle, 0, size); gem_handle, 0, size);
} }
@ -684,29 +678,65 @@ tu_bo_add_to_bo_list(struct tu_device *dev,
return VK_SUCCESS; return VK_SUCCESS;
} }
static inline void
msm_vma_lock(struct tu_device *dev)
{
if (dev->physical_device->has_set_iova)
mtx_lock(&dev->vma_mutex);
}
static inline void
msm_vma_unlock(struct tu_device *dev)
{
if (dev->physical_device->has_set_iova)
mtx_unlock(&dev->vma_mutex);
}
/* If we have control over iova, allocate the iova range. */
static VkResult
tu_allocate_iova(struct tu_device *dev,
uint32_t gem_handle, /* only for BOs imported via dma-buf */
uint64_t size,
uint64_t client_iova,
enum tu_bo_alloc_flags flags,
uint64_t *iova)
{
VkResult result = VK_SUCCESS;
msm_vma_lock(dev);
if (dev->physical_device->has_vm_bind) {
result = tu_allocate_userspace_iova(dev, size, client_iova, flags, iova);
} else if (dev->physical_device->has_set_iova) {
assert(dev->physical_device->has_set_iova);
result = msm_allocate_userspace_iova_locked(dev, gem_handle, size,
client_iova, flags, iova);
}
msm_vma_unlock(dev);
return result;
}
static VkResult static VkResult
tu_bo_init(struct tu_device *dev, tu_bo_init(struct tu_device *dev,
struct vk_object_base *base, struct vk_object_base *base,
struct tu_bo *bo, struct tu_bo *bo,
uint32_t gem_handle, uint32_t gem_handle,
uint64_t size, uint64_t size,
uint64_t client_iova, uint64_t iova,
enum tu_bo_alloc_flags flags, enum tu_bo_alloc_flags flags,
const char *name) const char *name)
{ {
VkResult result = VK_SUCCESS; VkResult result = VK_SUCCESS;
uint64_t iova = 0;
assert(!client_iova || dev->physical_device->has_set_iova);
if (dev->physical_device->has_vm_bind) { if (dev->physical_device->has_vm_bind) {
result = msm_allocate_vm_bind(dev, gem_handle, size, client_iova, flags, result = msm_allocate_vm_bind(dev, gem_handle, size, iova, flags);
&iova);
} else if (dev->physical_device->has_set_iova) { } else if (dev->physical_device->has_set_iova) {
result = msm_allocate_userspace_iova_locked(dev, gem_handle, size, result = msm_set_iova(dev, gem_handle, iova);
client_iova, flags, &iova);
} else { } else {
result = tu_allocate_kernel_iova(dev, gem_handle, &iova); result = msm_get_iova(dev, gem_handle, &iova);
} }
if (result != VK_SUCCESS) { if (result != VK_SUCCESS) {
@ -784,20 +814,6 @@ tu_bo_set_kernel_name(struct tu_device *dev, struct tu_bo *bo, const char *name)
} }
} }
static inline void
msm_vma_lock(struct tu_device *dev)
{
if (dev->physical_device->has_set_iova)
mtx_lock(&dev->vma_mutex);
}
static inline void
msm_vma_unlock(struct tu_device *dev)
{
if (dev->physical_device->has_set_iova)
mtx_unlock(&dev->vma_mutex);
}
static VkResult static VkResult
msm_bo_init(struct tu_device *dev, msm_bo_init(struct tu_device *dev,
struct vk_object_base *base, struct vk_object_base *base,
@ -809,6 +825,14 @@ msm_bo_init(struct tu_device *dev,
const char *name) const char *name)
{ {
MESA_TRACE_FUNC(); MESA_TRACE_FUNC();
VkResult result;
uint64_t iova;
result = tu_allocate_iova(dev, 0, size, client_iova, flags, &iova);
if (result != VK_SUCCESS)
return result;
struct drm_msm_gem_new req = { struct drm_msm_gem_new req = {
.size = size, .size = size,
.flags = 0 .flags = 0
@ -832,20 +856,20 @@ msm_bo_init(struct tu_device *dev,
int ret = drmCommandWriteRead(dev->fd, int ret = drmCommandWriteRead(dev->fd,
DRM_MSM_GEM_NEW, &req, sizeof(req)); DRM_MSM_GEM_NEW, &req, sizeof(req));
if (ret) if (ret) {
msm_vma_lock(dev);
util_vma_heap_free(&dev->vma, iova, size);
msm_vma_unlock(dev);
return vk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY); return vk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY);
}
struct tu_bo* bo = tu_device_lookup_bo(dev, req.handle); struct tu_bo* bo = tu_device_lookup_bo(dev, req.handle);
assert(bo && bo->gem_handle == 0); assert(bo && bo->gem_handle == 0);
assert(!(flags & TU_BO_ALLOC_DMABUF)); assert(!(flags & TU_BO_ALLOC_DMABUF));
msm_vma_lock(dev); result =
tu_bo_init(dev, base, bo, req.handle, size, iova, flags, name);
VkResult result =
tu_bo_init(dev, base, bo, req.handle, size, client_iova, flags, name);
msm_vma_unlock(dev);
if (result == VK_SUCCESS) { if (result == VK_SUCCESS) {
*out_bo = bo; *out_bo = bo;
@ -853,8 +877,12 @@ msm_bo_init(struct tu_device *dev,
TU_RMV(internal_resource_create, dev, bo); TU_RMV(internal_resource_create, dev, bo);
TU_RMV(resource_name, dev, bo, name); TU_RMV(resource_name, dev, bo, name);
} }
} else } else {
msm_vma_lock(dev);
util_vma_heap_free(&dev->vma, iova, size);
msm_vma_unlock(dev);
memset(bo, 0, sizeof(*bo)); memset(bo, 0, sizeof(*bo));
}
/* We don't use bo->name here because for the !TU_DEBUG=bo case bo->name is NULL. */ /* We don't use bo->name here because for the !TU_DEBUG=bo case bo->name is NULL. */
tu_bo_set_kernel_name(dev, bo, name); tu_bo_set_kernel_name(dev, bo, name);
@ -898,22 +926,21 @@ msm_bo_init_dmabuf(struct tu_device *dev,
* to happen in parallel. * to happen in parallel.
*/ */
u_rwlock_wrlock(&dev->dma_bo_lock); u_rwlock_wrlock(&dev->dma_bo_lock);
msm_vma_lock(dev);
uint32_t gem_handle; uint32_t gem_handle;
int ret = drmPrimeFDToHandle(dev->fd, prime_fd, int ret = drmPrimeFDToHandle(dev->fd, prime_fd,
&gem_handle); &gem_handle);
if (ret) { if (ret) {
msm_vma_unlock(dev);
u_rwlock_wrunlock(&dev->dma_bo_lock); u_rwlock_wrunlock(&dev->dma_bo_lock);
return vk_error(dev, VK_ERROR_INVALID_EXTERNAL_HANDLE); return vk_error(dev, VK_ERROR_INVALID_EXTERNAL_HANDLE);
} }
uint64_t iova;
struct tu_bo* bo = tu_device_lookup_bo(dev, gem_handle); struct tu_bo* bo = tu_device_lookup_bo(dev, gem_handle);
if (bo->refcnt != 0) { if (bo->refcnt != 0) {
p_atomic_inc(&bo->refcnt); p_atomic_inc(&bo->refcnt);
msm_vma_unlock(dev);
u_rwlock_wrunlock(&dev->dma_bo_lock); u_rwlock_wrunlock(&dev->dma_bo_lock);
*out_bo = bo; *out_bo = bo;
@ -921,16 +948,27 @@ msm_bo_init_dmabuf(struct tu_device *dev,
} }
VkResult result = VkResult result =
tu_bo_init(dev, NULL, bo, gem_handle, size, 0, TU_BO_ALLOC_DMABUF, "dmabuf"); tu_allocate_iova(dev, gem_handle, size, 0, TU_BO_ALLOC_DMABUF, &iova);
if (result != VK_SUCCESS) if (result != VK_SUCCESS) {
tu_gem_close(dev, gem_handle);
goto out_unlock;
}
result =
tu_bo_init(dev, NULL, bo, gem_handle, size, iova, TU_BO_ALLOC_DMABUF, "dmabuf");
if (result != VK_SUCCESS) {
msm_vma_lock(dev);
util_vma_heap_free(&dev->vma, iova, size);
msm_vma_unlock(dev);
memset(bo, 0, sizeof(*bo)); memset(bo, 0, sizeof(*bo));
else } else {
*out_bo = bo; *out_bo = bo;
}
msm_vma_unlock(dev); out_unlock:
u_rwlock_wrunlock(&dev->dma_bo_lock); u_rwlock_wrunlock(&dev->dma_bo_lock);
return result; return result;
} }

View file

@ -804,7 +804,6 @@ virtio_bo_init_dmabuf(struct tu_device *dev,
* to happen in parallel. * to happen in parallel.
*/ */
u_rwlock_wrlock(&dev->dma_bo_lock); u_rwlock_wrlock(&dev->dma_bo_lock);
mtx_lock(&dev->vma_mutex);
uint32_t handle, res_id; uint32_t handle, res_id;
uint64_t iova; uint64_t iova;
@ -834,8 +833,10 @@ virtio_bo_init_dmabuf(struct tu_device *dev,
bo->res_id = res_id; bo->res_id = res_id;
mtx_lock(&dev->vma_mutex);
result = virtio_allocate_userspace_iova_locked(dev, handle, size, 0, result = virtio_allocate_userspace_iova_locked(dev, handle, size, 0,
TU_BO_ALLOC_DMABUF, &iova); TU_BO_ALLOC_DMABUF, &iova);
mtx_unlock(&dev->vma_mutex);
if (result != VK_SUCCESS) { if (result != VK_SUCCESS) {
vdrm_bo_close(dev->vdev->vdrm, handle); vdrm_bo_close(dev->vdev->vdrm, handle);
goto out_unlock; goto out_unlock;
@ -853,7 +854,6 @@ virtio_bo_init_dmabuf(struct tu_device *dev,
set_iova(dev, bo->res_id, iova); set_iova(dev, bo->res_id, iova);
out_unlock: out_unlock:
mtx_unlock(&dev->vma_mutex);
u_rwlock_wrunlock(&dev->dma_bo_lock); u_rwlock_wrunlock(&dev->dma_bo_lock);
return result; return result;
} }