freedreno/drm: Replace shared/nosync fields

These are redundent with the alloc_flags.  A following commit will map
alloc_flags directly to bo heap that should be used, and adding the
FD_BO_SHARED bit when a BO is exported neatly avoids returning it to
the pool to be potentially erroneously reused.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/20263>
This commit is contained in:
Rob Clark 2022-12-05 10:33:28 -08:00 committed by Marge Bot
parent e6b364f924
commit 803df07d9e
6 changed files with 22 additions and 33 deletions

View file

@ -85,7 +85,7 @@ fd_bo_init_common(struct fd_bo *bo, struct fd_device *dev)
/* allocate a new buffer object, call w/ table_lock held */
static struct fd_bo *
bo_from_handle(struct fd_device *dev, uint32_t size, uint32_t handle)
import_bo_from_handle(struct fd_device *dev, uint32_t size, uint32_t handle)
{
struct fd_bo *bo;
@ -100,6 +100,8 @@ bo_from_handle(struct fd_device *dev, uint32_t size, uint32_t handle)
return NULL;
}
bo->alloc_flags |= FD_BO_SHARED;
/* add ourself into the handle table: */
_mesa_hash_table_insert(dev->handle_table, &bo->handle, bo);
@ -177,7 +179,7 @@ fd_bo_from_handle(struct fd_device *dev, uint32_t handle, uint32_t size)
if (bo)
goto out_unlock;
bo = bo_from_handle(dev, size, handle);
bo = import_bo_from_handle(dev, size, handle);
VG_BO_ALLOC(bo);
@ -209,7 +211,7 @@ fd_bo_from_dmabuf(struct fd_device *dev, int fd)
size = lseek(fd, 0, SEEK_END);
lseek(fd, 0, SEEK_CUR);
bo = bo_from_handle(dev, size, handle);
bo = import_bo_from_handle(dev, size, handle);
VG_BO_ALLOC(bo);
@ -243,7 +245,7 @@ fd_bo_from_name(struct fd_device *dev, uint32_t name)
if (bo)
goto out_unlock;
bo = bo_from_handle(dev, req.size, req.handle);
bo = import_bo_from_handle(dev, req.size, req.handle);
if (bo) {
set_name(bo, name);
VG_BO_ALLOC(bo);
@ -442,7 +444,7 @@ fd_bo_get_name(struct fd_bo *bo, uint32_t *name)
set_name(bo, req.name);
simple_mtx_unlock(&table_lock);
bo->bo_reuse = NO_CACHE;
bo->shared = true;
bo->alloc_flags |= FD_BO_SHARED;
bo_flush(bo);
}
@ -455,7 +457,7 @@ uint32_t
fd_bo_handle(struct fd_bo *bo)
{
bo->bo_reuse = NO_CACHE;
bo->shared = true;
bo->alloc_flags |= FD_BO_SHARED;
bo_flush(bo);
return bo->handle;
}
@ -473,7 +475,7 @@ fd_bo_dmabuf(struct fd_bo *bo)
}
bo->bo_reuse = NO_CACHE;
bo->shared = true;
bo->alloc_flags |= FD_BO_SHARED;
bo_flush(bo);
return prime_fd;
@ -620,7 +622,7 @@ fd_bo_add_fence(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t fence)
{
simple_mtx_assert_locked(&fence_lock);
if (bo->nosync)
if (bo->alloc_flags & _FD_BO_NOSYNC)
return;
/* The common case is bo re-used on the same pipe it had previously
@ -662,7 +664,7 @@ fd_bo_state(struct fd_bo *bo)
* The pipe's control buffer is specifically nosync to avoid recursive
* lock problems here.
*/
if (bo->shared || bo->nosync)
if (bo->alloc_flags & (FD_BO_SHARED | _FD_BO_NOSYNC))
return FD_BO_STATE_UNKNOWN;
simple_mtx_lock(&fence_lock);

View file

@ -280,7 +280,7 @@ retry:
int
fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo)
{
if (bo->nosync || bo->shared)
if (bo->alloc_flags & (FD_BO_SHARED | _FD_BO_NOSYNC))
return -1;
struct fd_bo_bucket *bucket = get_bucket(cache, bo->size);

View file

@ -112,6 +112,7 @@ struct fd_fence {
/* internal bo flags: */
#define _FD_BO_VIRTIO_SHM BITSET_BIT(6)
#define _FD_BO_NOSYNC BITSET_BIT(7) /* Avoid userspace fencing on control buffers */
/*
* bo access flags: (keep aligned to MSM_PREP_x)
@ -201,18 +202,6 @@ struct fd_bo {
RING_CACHE = 2,
} bo_reuse : 2;
/* Buffers that are shared (imported or exported) may be used in
* other processes, so we need to fallback to kernel to determine
* busyness.
*/
bool shared : 1;
/* We need to be able to disable userspace fence synchronization for
* special internal buffers, namely the pipe->control buffer, to avoid
* a circular reference loop.
*/
bool nosync : 1;
/* Most recent index in submit's bo table, used to optimize the common
* case where a bo is used many times in the same submit.
*/

View file

@ -63,8 +63,14 @@ fd_pipe_new2(struct fd_device *dev, enum fd_pipe_id id, uint32_t prio)
fd_pipe_get_param(pipe, FD_CHIP_ID, &val);
pipe->dev_id.chip_id = val;
/* Use the _NOSYNC flags because we don't want the control_mem bo to hold
* a reference to the ourself. This also means that we won't be able
* to determine if the buffer is idle which is needed by bo-cache. But
* pipe creation/destroy is not a high frequency event.
*/
pipe->control_mem = fd_bo_new(dev, sizeof(*pipe->control),
FD_BO_CACHED_COHERENT,
FD_BO_CACHED_COHERENT | _FD_BO_NOSYNC,
"pipe-control");
pipe->control = fd_bo_map(pipe->control_mem);
@ -72,14 +78,6 @@ fd_pipe_new2(struct fd_device *dev, enum fd_pipe_id id, uint32_t prio)
* is not garbage:
*/
pipe->control->fence = 0;
/* We don't want the control_mem bo to hold a reference to the ourself,
* so disable userspace fencing. This also means that we won't be able
* to determine if the buffer is idle which is needed by bo-cache. But
* pipe creation/destroy is not a high frequency event so just disable
* the bo-cache as well:
*/
pipe->control_mem->nosync = true;
pipe->control_mem->bo_reuse = NO_CACHE;
return pipe;

View file

@ -182,7 +182,7 @@ fd_submit_sp_flush_prep(struct fd_submit *submit, int in_fence_fd,
simple_mtx_lock(&fence_lock);
for (unsigned i = 0; i < fd_submit->nr_bos; i++) {
fd_bo_add_fence(fd_submit->bos[i], submit->pipe, submit->fence);
has_shared |= fd_submit->bos[i]->shared;
has_shared |= fd_submit->bos[i]->alloc_flags & FD_BO_SHARED;
}
simple_mtx_unlock(&fence_lock);

View file

@ -107,7 +107,7 @@ virtio_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
/* If buffer is not shared, then it is not shared with host,
* so we don't need to worry about implicit sync in host:
*/
if (!bo->shared)
if (!(bo->alloc_flags & FD_BO_SHARED))
goto out;
/* If buffer is shared, but we are using explicit sync, no