freedreno/drm: Support for batched frees

Batch up handles before closing them to give the drm backend a chance to
batch up any extra handling needed (ie. virtio batching up messages to
host to release IOVA).

Signed-off-by: Rob Clark <robdclark@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/19832>
This commit is contained in:
Rob Clark 2022-11-17 14:34:52 -08:00 committed by Marge Bot
parent e5a60e1df2
commit b4a54824e5
3 changed files with 62 additions and 4 deletions

View file

@ -31,6 +31,7 @@
simple_mtx_t table_lock = SIMPLE_MTX_INITIALIZER;
void bo_del(struct fd_bo *bo);
void bo_del_flush(struct fd_device *dev);
/* set buffer name, and add to table, call w/ table_lock held: */
static void
@ -287,7 +288,10 @@ fd_bo_del_locked(struct fd_bo *bo)
if (!p_atomic_dec_zero(&bo->refcnt))
return;
struct fd_device *dev = bo->dev;
bo_del_or_recycle(bo);
bo_del_flush(dev);
}
void
@ -296,17 +300,29 @@ fd_bo_del(struct fd_bo *bo)
if (!p_atomic_dec_zero(&bo->refcnt))
return;
struct fd_device *dev = bo->dev;
simple_mtx_lock(&table_lock);
bo_del_or_recycle(bo);
bo_del_flush(dev);
simple_mtx_unlock(&table_lock);
}
void
fd_bo_del_array(struct fd_bo **bos, unsigned count)
{
if (!count)
return;
struct fd_device *dev = bos[0]->dev;
simple_mtx_lock(&table_lock);
for (unsigned i = 0; i < count; i++)
fd_bo_del_locked(bos[i]);
for (unsigned i = 0; i < count; i++) {
if (!p_atomic_dec_zero(&bos[i]->refcnt))
continue;
bo_del_or_recycle(bos[i]);
}
bo_del_flush(dev);
simple_mtx_unlock(&table_lock);
}
@ -342,7 +358,11 @@ cleanup_fences(struct fd_bo *bo, bool expired)
}
}
/* Called under table_lock */
/* Called under table_lock, bo_del_flush() *must* be called before
* table_lock is released (but bo_del() can be called multiple times
* before bo_del_flush(), as long as table_lock is held the entire
* time)
*/
void
bo_del(struct fd_bo *bo)
{
@ -369,11 +389,30 @@ bo_del(struct fd_bo *bo)
bo->funcs->destroy(bo);
if (handle) {
if (dev->num_deferred_handles == ARRAY_SIZE(dev->deferred_handles))
bo_del_flush(dev);
dev->deferred_handles[dev->num_deferred_handles++] = handle;
}
}
/* Called under table_lock */
void
bo_del_flush(struct fd_device *dev)
{
if (!dev->num_deferred_handles)
return;
if (dev->funcs->flush)
dev->funcs->flush(dev);
for (unsigned i = 0; i < dev->num_deferred_handles; i++) {
struct drm_gem_close req = {
.handle = handle,
.handle = dev->deferred_handles[i],
};
drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
}
dev->num_deferred_handles = 0;
}
static void

View file

@ -28,6 +28,7 @@
#include "freedreno_priv.h"
void bo_del(struct fd_bo *bo);
void bo_del_flush(struct fd_device *dev);
extern simple_mtx_t table_lock;
static void
@ -79,6 +80,7 @@ fd_bo_cache_init(struct fd_bo_cache *cache, int coarse)
void
fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time)
{
struct fd_device *dev = NULL;
int i;
simple_mtx_assert_locked(&table_lock);
@ -97,12 +99,17 @@ fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time)
if (time && ((time - bo->free_time) <= 1))
break;
dev = bo->dev;
VG_BO_OBTAIN(bo);
list_del(&bo->list);
bo_del(bo);
}
}
if (dev)
bo_del_flush(dev);
cache->time = time;
}
@ -170,8 +177,10 @@ retry:
VG_BO_OBTAIN(bo);
if (bo->funcs->madvise(bo, true) <= 0) {
/* we've lost the backing pages, delete and try again: */
struct fd_device *dev = bo->dev;
simple_mtx_lock(&table_lock);
bo_del(bo);
bo_del_flush(dev);
simple_mtx_unlock(&table_lock);
goto retry;
}

View file

@ -107,6 +107,7 @@ struct fd_device_funcs {
struct fd_pipe *(*pipe_new)(struct fd_device *dev, enum fd_pipe_id id,
unsigned prio);
int (*flush)(struct fd_device *dev);
void (*destroy)(struct fd_device *dev);
};
@ -179,6 +180,15 @@ struct fd_device {
simple_mtx_t suballoc_lock;
struct util_queue submit_queue;
/**
* GEM handles can be queued/batched for freeing in cases where many
* buffers are freed together under table_lock. This enables the
* virtio backend to batch messages to the host to avoid quickly
* depleting the virtqueue ringbuffer slots.
*/
uint32_t deferred_handles[64];
uint32_t num_deferred_handles;
};
#define foreach_submit(name, list) \