panfrost: Back panfrost_bo with pan_kmod_bo object

We keep the existing implementation unchanged but use pan_kmod for
all interactions with the kernel driver.

Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Erik Faye-Lund <erik.faye-lund@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/26357>
This commit is contained in:
Boris Brezillon 2023-11-20 17:50:27 +01:00 committed by Marge Bot
parent 7a9c471009
commit 5089a758df
2 changed files with 97 additions and 116 deletions

View file

@ -28,7 +28,6 @@
#include <pthread.h> #include <pthread.h>
#include <stdio.h> #include <stdio.h>
#include <xf86drm.h> #include <xf86drm.h>
#include "drm-uapi/panfrost_drm.h"
#include "pan_bo.h" #include "pan_bo.h"
#include "pan_device.h" #include "pan_device.h"
@ -55,35 +54,55 @@
* around the linked list. * around the linked list.
*/ */
static uint32_t
to_kmod_bo_flags(uint32_t flags)
{
uint32_t kmod_bo_flags = 0;
if (flags & PAN_BO_EXECUTE)
kmod_bo_flags |= PAN_KMOD_BO_FLAG_EXECUTABLE;
if (flags & PAN_BO_GROWABLE)
kmod_bo_flags |= PAN_KMOD_BO_FLAG_ALLOC_ON_FAULT;
if (flags & PAN_BO_INVISIBLE)
kmod_bo_flags |= PAN_KMOD_BO_FLAG_NO_MMAP;
return kmod_bo_flags;
}
static struct panfrost_bo * static struct panfrost_bo *
panfrost_bo_alloc(struct panfrost_device *dev, size_t size, uint32_t flags, panfrost_bo_alloc(struct panfrost_device *dev, size_t size, uint32_t flags,
const char *label) const char *label)
{ {
struct drm_panfrost_create_bo create_bo = {.size = size}; struct pan_kmod_bo *kmod_bo;
struct panfrost_bo *bo; struct panfrost_bo *bo;
int ret;
if (panfrost_device_kmod_version_major(dev) > 1 || kmod_bo =
panfrost_device_kmod_version_minor(dev) >= 1) { pan_kmod_bo_alloc(dev->kmod.dev, NULL, size, to_kmod_bo_flags(flags));
if (flags & PAN_BO_GROWABLE) assert(kmod_bo);
create_bo.flags |= PANFROST_BO_HEAP;
if (!(flags & PAN_BO_EXECUTE))
create_bo.flags |= PANFROST_BO_NOEXEC;
}
ret = drmIoctl(panfrost_device_fd(dev), DRM_IOCTL_PANFROST_CREATE_BO, bo = pan_lookup_bo(dev, kmod_bo->handle);
&create_bo);
if (ret) {
fprintf(stderr, "DRM_IOCTL_PANFROST_CREATE_BO failed: %m\n");
return NULL;
}
bo = pan_lookup_bo(dev, create_bo.handle);
assert(!memcmp(bo, &((struct panfrost_bo){}), sizeof(*bo))); assert(!memcmp(bo, &((struct panfrost_bo){}), sizeof(*bo)));
bo->kmod_bo = kmod_bo;
bo->size = create_bo.size; struct pan_kmod_vm_op vm_op = {
bo->ptr.gpu = create_bo.offset; .type = PAN_KMOD_VM_OP_TYPE_MAP,
bo->gem_handle = create_bo.handle; .va =
{
.start = PAN_KMOD_VM_MAP_AUTO_VA,
.size = bo->kmod_bo->size,
},
.map =
{
.bo = bo->kmod_bo,
.bo_offset = 0,
},
};
ASSERTED int ret =
pan_kmod_vm_bind(dev->kmod.vm, PAN_KMOD_VM_OP_MODE_IMMEDIATE, &vm_op, 1);
assert(!ret);
bo->ptr.gpu = vm_op.va.start;
bo->flags = flags; bo->flags = flags;
bo->dev = dev; bo->dev = dev;
bo->label = label; bo->label = label;
@ -93,18 +112,27 @@ panfrost_bo_alloc(struct panfrost_device *dev, size_t size, uint32_t flags,
static void static void
panfrost_bo_free(struct panfrost_bo *bo) panfrost_bo_free(struct panfrost_bo *bo)
{ {
struct drm_gem_close gem_close = {.handle = panfrost_bo_handle(bo)}; struct pan_kmod_bo *kmod_bo = bo->kmod_bo;
int fd = panfrost_device_fd(bo->dev); struct pan_kmod_vm *vm = bo->dev->kmod.vm;
int ret; uint64_t gpu_va = bo->ptr.gpu;
/* BO will be freed with the sparse array, but zero to indicate free */ /* BO will be freed with the sparse array, but zero to indicate free */
memset(bo, 0, sizeof(*bo)); memset(bo, 0, sizeof(*bo));
ret = drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close); struct pan_kmod_vm_op vm_op = {
if (ret) { .type = PAN_KMOD_VM_OP_TYPE_UNMAP,
fprintf(stderr, "DRM_IOCTL_GEM_CLOSE failed: %m\n"); .va =
assert(0); {
} .start = gpu_va,
.size = kmod_bo->size,
},
};
ASSERTED int ret = pan_kmod_vm_bind(
vm, PAN_KMOD_VM_OP_MODE_DEFER_TO_NEXT_IDLE_POINT, &vm_op, 1);
assert(!ret);
pan_kmod_bo_put(kmod_bo);
} }
/* Returns true if the BO is ready, false otherwise. /* Returns true if the BO is ready, false otherwise.
@ -115,12 +143,6 @@ panfrost_bo_free(struct panfrost_bo *bo)
bool bool
panfrost_bo_wait(struct panfrost_bo *bo, int64_t timeout_ns, bool wait_readers) panfrost_bo_wait(struct panfrost_bo *bo, int64_t timeout_ns, bool wait_readers)
{ {
struct drm_panfrost_wait_bo req = {
.handle = panfrost_bo_handle(bo),
.timeout_ns = timeout_ns,
};
int ret;
/* If the BO has been exported or imported we can't rely on the cached /* If the BO has been exported or imported we can't rely on the cached
* state, we need to call the WAIT_BO ioctl. * state, we need to call the WAIT_BO ioctl.
*/ */
@ -136,12 +158,7 @@ panfrost_bo_wait(struct panfrost_bo *bo, int64_t timeout_ns, bool wait_readers)
return true; return true;
} }
/* The ioctl returns >= 0 value when the BO we are waiting for is ready if (pan_kmod_bo_wait(bo->kmod_bo, timeout_ns, !wait_readers)) {
* -1 otherwise.
*/
ret =
drmIoctl(panfrost_device_fd(bo->dev), DRM_IOCTL_PANFROST_WAIT_BO, &req);
if (ret != -1) {
/* Set gpu_access to 0 so that the next call to bo_wait() /* Set gpu_access to 0 so that the next call to bo_wait()
* doesn't have to call the WAIT_BO ioctl. * doesn't have to call the WAIT_BO ioctl.
*/ */
@ -149,10 +166,6 @@ panfrost_bo_wait(struct panfrost_bo *bo, int64_t timeout_ns, bool wait_readers)
return true; return true;
} }
/* If errno is not ETIMEDOUT or EBUSY that means the handle we passed
* is invalid, which shouldn't happen here.
*/
assert(errno == ETIMEDOUT || errno == EBUSY);
return false; return false;
} }
@ -200,22 +213,14 @@ panfrost_bo_cache_fetch(struct panfrost_device *dev, size_t size,
/* If the oldest BO in the cache is busy, likely so is /* If the oldest BO in the cache is busy, likely so is
* everything newer, so bail. */ * everything newer, so bail. */
if (!panfrost_bo_wait(entry, dontwait ? 0 : INT64_MAX, PAN_BO_ACCESS_RW)) if (!panfrost_bo_wait(entry, dontwait ? 0 : INT64_MAX, true))
break; break;
struct drm_panfrost_madvise madv = {
.handle = panfrost_bo_handle(entry),
.madv = PANFROST_MADV_WILLNEED,
};
int ret;
/* This one works, splice it out of the cache */ /* This one works, splice it out of the cache */
list_del(&entry->bucket_link); list_del(&entry->bucket_link);
list_del(&entry->lru_link); list_del(&entry->lru_link);
ret = if (!pan_kmod_bo_make_unevictable(entry->kmod_bo)) {
drmIoctl(panfrost_device_fd(dev), DRM_IOCTL_PANFROST_MADVISE, &madv);
if (!ret && !madv.retained) {
panfrost_bo_free(entry); panfrost_bo_free(entry);
continue; continue;
} }
@ -269,14 +274,9 @@ panfrost_bo_cache_put(struct panfrost_bo *bo)
pthread_mutex_lock(&dev->bo_cache.lock); pthread_mutex_lock(&dev->bo_cache.lock);
struct list_head *bucket = pan_bucket(dev, MAX2(panfrost_bo_size(bo), 4096)); struct list_head *bucket = pan_bucket(dev, MAX2(panfrost_bo_size(bo), 4096));
struct drm_panfrost_madvise madv;
struct timespec time; struct timespec time;
madv.handle = panfrost_bo_handle(bo); pan_kmod_bo_make_evictable(bo->kmod_bo);
madv.madv = PANFROST_MADV_DONTNEED;
madv.retained = 0;
drmIoctl(panfrost_device_fd(dev), DRM_IOCTL_PANFROST_MADVISE, &madv);
/* Add us to the bucket */ /* Add us to the bucket */
list_addtail(&bo->bucket_link, bucket); list_addtail(&bo->bucket_link, bucket);
@ -324,28 +324,15 @@ panfrost_bo_cache_evict_all(struct panfrost_device *dev)
void void
panfrost_bo_mmap(struct panfrost_bo *bo) panfrost_bo_mmap(struct panfrost_bo *bo)
{ {
struct drm_panfrost_mmap_bo mmap_bo = {.handle = panfrost_bo_handle(bo)};
int ret;
if (bo->ptr.cpu) if (bo->ptr.cpu)
return; return;
ret = drmIoctl(panfrost_device_fd(bo->dev), DRM_IOCTL_PANFROST_MMAP_BO, bo->ptr.cpu = pan_kmod_bo_mmap(bo->kmod_bo, 0, panfrost_bo_size(bo),
&mmap_bo); PROT_READ | PROT_WRITE, MAP_SHARED, NULL);
if (ret) {
fprintf(stderr, "DRM_IOCTL_PANFROST_MMAP_BO failed: %m\n");
assert(0);
}
bo->ptr.cpu =
os_mmap(NULL, panfrost_bo_size(bo), PROT_READ | PROT_WRITE, MAP_SHARED,
panfrost_device_fd(bo->dev), mmap_bo.offset);
if (bo->ptr.cpu == MAP_FAILED) { if (bo->ptr.cpu == MAP_FAILED) {
bo->ptr.cpu = NULL; bo->ptr.cpu = NULL;
fprintf(stderr, fprintf(stderr, "mmap failed: result=%p size=0x%llx\n", bo->ptr.cpu,
"mmap failed: result=%p size=0x%llx fd=%i offset=0x%llx %m\n", (long long)panfrost_bo_size(bo));
bo->ptr.cpu, (long long)panfrost_bo_size(bo),
panfrost_device_fd(bo->dev), (long long)mmap_bo.offset);
} }
} }
@ -468,38 +455,39 @@ struct panfrost_bo *
panfrost_bo_import(struct panfrost_device *dev, int fd) panfrost_bo_import(struct panfrost_device *dev, int fd)
{ {
struct panfrost_bo *bo; struct panfrost_bo *bo;
struct drm_panfrost_get_bo_offset get_bo_offset = {
0,
};
ASSERTED int ret; ASSERTED int ret;
unsigned gem_handle; unsigned gem_handle;
pthread_mutex_lock(&dev->bo_map_lock); pthread_mutex_lock(&dev->bo_map_lock);
ret = drmPrimeFDToHandle(dev->kmod.dev->fd, fd, &gem_handle);
ret = drmPrimeFDToHandle(panfrost_device_fd(dev), fd, &gem_handle);
assert(!ret); assert(!ret);
bo = pan_lookup_bo(dev, gem_handle); bo = pan_lookup_bo(dev, gem_handle);
if (!bo->dev) { if (!bo->dev) {
get_bo_offset.handle = gem_handle; bo->dev = dev;
ret = drmIoctl(panfrost_device_fd(dev), DRM_IOCTL_PANFROST_GET_BO_OFFSET, bo->kmod_bo = pan_kmod_bo_import(dev->kmod.dev, fd, 0);
&get_bo_offset);
struct pan_kmod_vm_op vm_op = {
.type = PAN_KMOD_VM_OP_TYPE_MAP,
.va =
{
.start = PAN_KMOD_VM_MAP_AUTO_VA,
.size = bo->kmod_bo->size,
},
.map =
{
.bo = bo->kmod_bo,
.bo_offset = 0,
},
};
ASSERTED int ret = pan_kmod_vm_bind(
dev->kmod.vm, PAN_KMOD_VM_OP_MODE_IMMEDIATE, &vm_op, 1);
assert(!ret); assert(!ret);
bo->dev = dev; bo->ptr.gpu = vm_op.va.start;
bo->ptr.gpu = (mali_ptr)get_bo_offset.offset;
bo->size = lseek(fd, 0, SEEK_END);
/* Sometimes this can fail and return -1. size of -1 is not
* a nice thing for mmap to try mmap. Be more robust also
* for zero sized maps and fail nicely too
*/
if ((panfrost_bo_size(bo) == 0) || (panfrost_bo_size(bo) == (size_t)-1)) {
pthread_mutex_unlock(&dev->bo_map_lock);
return NULL;
}
bo->flags = PAN_BO_SHARED; bo->flags = PAN_BO_SHARED;
bo->gem_handle = gem_handle;
p_atomic_set(&bo->refcnt, 1); p_atomic_set(&bo->refcnt, 1);
} else { } else {
/* bo->refcnt == 0 can happen if the BO /* bo->refcnt == 0 can happen if the BO
@ -525,16 +513,9 @@ panfrost_bo_import(struct panfrost_device *dev, int fd)
int int
panfrost_bo_export(struct panfrost_bo *bo) panfrost_bo_export(struct panfrost_bo *bo)
{ {
struct drm_prime_handle args = { int ret = pan_kmod_bo_export(bo->kmod_bo);
.handle = panfrost_bo_handle(bo), if (ret >= 0)
.flags = DRM_CLOEXEC, bo->flags |= PAN_BO_SHARED;
};
int ret = drmIoctl(panfrost_device_fd(bo->dev), DRM_IOCTL_PRIME_HANDLE_TO_FD, return ret;
&args);
if (ret == -1)
return -1;
bo->flags |= PAN_BO_SHARED;
return args.fd;
} }

View file

@ -30,6 +30,8 @@
#include "util/list.h" #include "util/list.h"
#include "panfrost-job.h" #include "panfrost-job.h"
#include "kmod/pan_kmod.h"
/* Flags for allocated memory */ /* Flags for allocated memory */
/* This memory region is executable */ /* This memory region is executable */
@ -95,16 +97,14 @@ struct panfrost_bo {
/* Atomic reference count */ /* Atomic reference count */
int32_t refcnt; int32_t refcnt;
/* Kernel representation of a buffer object. */
struct pan_kmod_bo *kmod_bo;
struct panfrost_device *dev; struct panfrost_device *dev;
/* Mapping for the entire object (all levels) */ /* Mapping for the entire object (all levels) */
struct panfrost_ptr ptr; struct panfrost_ptr ptr;
/* Size of all entire trees */
size_t size;
int gem_handle;
uint32_t flags; uint32_t flags;
/* Combination of PAN_BO_ACCESS_{READ,WRITE} flags encoding pending /* Combination of PAN_BO_ACCESS_{READ,WRITE} flags encoding pending
@ -120,13 +120,13 @@ struct panfrost_bo {
static inline size_t static inline size_t
panfrost_bo_size(struct panfrost_bo *bo) panfrost_bo_size(struct panfrost_bo *bo)
{ {
return bo->size; return bo->kmod_bo->size;
} }
static inline size_t static inline size_t
panfrost_bo_handle(struct panfrost_bo *bo) panfrost_bo_handle(struct panfrost_bo *bo)
{ {
return bo->gem_handle; return bo->kmod_bo->handle;
} }
bool panfrost_bo_wait(struct panfrost_bo *bo, int64_t timeout_ns, bool panfrost_bo_wait(struct panfrost_bo *bo, int64_t timeout_ns,