tu: Refactor BO deletion

For VM_BIND, BO deletion will have to be implemented differently in
native drm and virtio. We already have a somewhat awkward situation with
native-specific code in the common BO deletion helper, which we only get
away with because it's for kernels without SET_IOVA in which case virtio
isn't supported. Add a few common helpers for some of the guts, and move
the guts into backend-specific functions.

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/32533>
This commit is contained in:
Connor Abbott 2024-12-02 18:08:55 -05:00 committed by Marge Bot
parent e689b342fe
commit 51a7aebc86
4 changed files with 94 additions and 57 deletions

View file

@ -59,27 +59,8 @@ tu_drm_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
}
void
tu_drm_bo_finish(struct tu_device *dev, struct tu_bo *bo)
tu_bo_list_del(struct tu_device *dev, struct tu_bo *bo)
{
assert(bo->gem_handle);
u_rwlock_rdlock(&dev->dma_bo_lock);
if (!p_atomic_dec_zero(&bo->refcnt)) {
u_rwlock_rdunlock(&dev->dma_bo_lock);
return;
}
tu_debug_bos_del(dev, bo);
tu_dump_bo_del(dev, bo);
if (bo->map) {
TU_RMV(bo_unmap, dev, bo);
munmap(bo->map, bo->size);
}
TU_RMV(bo_destroy, dev, bo);
mtx_lock(&dev->bo_mutex);
dev->submit_bo_count--;
dev->submit_bo_list[bo->submit_bo_list_idx] = dev->submit_bo_list[dev->submit_bo_count];
@ -91,44 +72,28 @@ tu_drm_bo_finish(struct tu_device *dev, struct tu_bo *bo)
dev->implicit_sync_bo_count--;
mtx_unlock(&dev->bo_mutex);
}
if (dev->physical_device->has_set_iova) {
mtx_lock(&dev->vma_mutex);
struct tu_zombie_vma *vma = (struct tu_zombie_vma *)
u_vector_add(&dev->zombie_vmas);
vma->gem_handle = bo->gem_handle;
void
tu_bo_make_zombie(struct tu_device *dev, struct tu_bo *bo)
{
mtx_lock(&dev->vma_mutex);
struct tu_zombie_vma *vma = (struct tu_zombie_vma *)
u_vector_add(&dev->zombie_vmas);
vma->gem_handle = bo->gem_handle;
#ifdef TU_HAS_VIRTIO
vma->res_id = bo->res_id;
vma->res_id = bo->res_id;
#endif
vma->iova = bo->iova;
vma->size = bo->size;
vma->fence = p_atomic_read(&dev->queues[0]->fence);
vma->iova = bo->iova;
vma->size = bo->size;
vma->fence = p_atomic_read(&dev->queues[0]->fence);
/* Must be cleared under the VMA mutex, or another thread could race to
* reap the VMA, closing the BO and letting a new GEM allocation produce
* this handle again.
*/
memset(bo, 0, sizeof(*bo));
mtx_unlock(&dev->vma_mutex);
} else {
/* Our BO structs are stored in a sparse array in the physical device,
* so we don't want to free the BO pointer, instead we want to reset it
* to 0, to signal that array entry as being free.
*/
uint32_t gem_handle = bo->gem_handle;
memset(bo, 0, sizeof(*bo));
/* Note that virtgpu GEM_CLOSE path is a bit different, but it does
* not use the !has_set_iova path so we can ignore that
*/
struct drm_gem_close req = {
.handle = gem_handle,
};
drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
}
u_rwlock_rdunlock(&dev->dma_bo_lock);
/* Must be cleared under the VMA mutex, or another thread could race to
* reap the VMA, closing the BO and letting a new GEM allocation produce
* this handle again.
*/
memset(bo, 0, sizeof(*bo));
mtx_unlock(&dev->vma_mutex);
}
void *

View file

@ -20,7 +20,9 @@ VkResult tu_allocate_userspace_iova(struct tu_device *dev,
enum tu_bo_alloc_flags flags,
uint64_t *iova);
int tu_drm_export_dmabuf(struct tu_device *dev, struct tu_bo *bo);
void tu_drm_bo_finish(struct tu_device *dev, struct tu_bo *bo);
void tu_bo_list_del(struct tu_device *dev, struct tu_bo *bo);
void tu_bo_make_zombie(struct tu_device *dev, struct tu_bo *bo);
struct tu_msm_queue_submit
{

View file

@ -853,6 +853,50 @@ msm_bo_get_metadata(struct tu_device *dev, struct tu_bo *bo,
return ret;
}
static void
msm_bo_finish(struct tu_device *dev, struct tu_bo *bo)
{
assert(bo->gem_handle);
u_rwlock_rdlock(&dev->dma_bo_lock);
if (!p_atomic_dec_zero(&bo->refcnt)) {
u_rwlock_rdunlock(&dev->dma_bo_lock);
return;
}
tu_debug_bos_del(dev, bo);
tu_dump_bo_del(dev, bo);
if (bo->map) {
TU_RMV(bo_unmap, dev, bo);
munmap(bo->map, bo->size);
}
TU_RMV(bo_destroy, dev, bo);
tu_bo_list_del(dev, bo);
if (dev->physical_device->has_set_iova) {
tu_bo_make_zombie(dev, bo);
} else {
/* Our BO structs are stored in a sparse array in the physical device,
* so we don't want to free the BO pointer, instead we want to reset it
* to 0, to signal that array entry as being free.
*/
uint32_t gem_handle = bo->gem_handle;
memset(bo, 0, sizeof(*bo));
struct drm_gem_close req = {
.handle = gem_handle,
};
drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
}
u_rwlock_rdunlock(&dev->dma_bo_lock);
}
static VkResult
msm_queue_submit(struct tu_queue *queue, void *_submit,
struct vk_sync_wait *waits, uint32_t wait_count,
@ -1021,7 +1065,7 @@ static const struct tu_knl msm_knl_funcs = {
.bo_export_dmabuf = tu_drm_export_dmabuf,
.bo_map = msm_bo_map,
.bo_allow_dump = msm_bo_allow_dump,
.bo_finish = tu_drm_bo_finish,
.bo_finish = msm_bo_finish,
.bo_set_metadata = msm_bo_set_metadata,
.bo_get_metadata = msm_bo_get_metadata,
.submit_create = msm_submit_create,

View file

@ -882,6 +882,32 @@ virtio_bo_allow_dump(struct tu_device *dev, struct tu_bo *bo)
mtx_unlock(&dev->bo_mutex);
}
static void
virtio_bo_finish(struct tu_device *dev, struct tu_bo *bo)
{
assert(bo->gem_handle);
u_rwlock_rdlock(&dev->dma_bo_lock);
if (!p_atomic_dec_zero(&bo->refcnt)) {
u_rwlock_rdunlock(&dev->dma_bo_lock);
return;
}
tu_debug_bos_del(dev, bo);
tu_dump_bo_del(dev, bo);
if (bo->map)
munmap(bo->map, bo->size);
tu_bo_list_del(dev, bo);
assert(dev->physical_device->has_set_iova);
tu_bo_make_zombie(dev, bo);
u_rwlock_rdunlock(&dev->dma_bo_lock);
}
static VkResult
setup_fence_cmds(struct tu_device *dev)
{
@ -1122,7 +1148,7 @@ static const struct tu_knl virtio_knl_funcs = {
.bo_export_dmabuf = virtio_bo_export_dmabuf,
.bo_map = virtio_bo_map,
.bo_allow_dump = virtio_bo_allow_dump,
.bo_finish = tu_drm_bo_finish,
.bo_finish = virtio_bo_finish,
.submit_create = msm_submit_create,
.submit_finish = msm_submit_finish,
.submit_add_entries = msm_submit_add_entries,