mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-05-06 15:58:05 +02:00
panfrost: Back panfrost_bo with pan_kmod_bo object
We keep the existing implementation unchanged but use pan_kmod for all interactions with the kernel driver. Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com> Reviewed-by: Erik Faye-Lund <erik.faye-lund@collabora.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/26357>
This commit is contained in:
parent
7a9c471009
commit
5089a758df
2 changed files with 97 additions and 116 deletions
|
|
@ -28,7 +28,6 @@
|
|||
#include <pthread.h>
|
||||
#include <stdio.h>
|
||||
#include <xf86drm.h>
|
||||
#include "drm-uapi/panfrost_drm.h"
|
||||
|
||||
#include "pan_bo.h"
|
||||
#include "pan_device.h"
|
||||
|
|
@ -55,35 +54,55 @@
|
|||
* around the linked list.
|
||||
*/
|
||||
|
||||
static uint32_t
|
||||
to_kmod_bo_flags(uint32_t flags)
|
||||
{
|
||||
uint32_t kmod_bo_flags = 0;
|
||||
|
||||
if (flags & PAN_BO_EXECUTE)
|
||||
kmod_bo_flags |= PAN_KMOD_BO_FLAG_EXECUTABLE;
|
||||
if (flags & PAN_BO_GROWABLE)
|
||||
kmod_bo_flags |= PAN_KMOD_BO_FLAG_ALLOC_ON_FAULT;
|
||||
if (flags & PAN_BO_INVISIBLE)
|
||||
kmod_bo_flags |= PAN_KMOD_BO_FLAG_NO_MMAP;
|
||||
|
||||
return kmod_bo_flags;
|
||||
}
|
||||
|
||||
static struct panfrost_bo *
|
||||
panfrost_bo_alloc(struct panfrost_device *dev, size_t size, uint32_t flags,
|
||||
const char *label)
|
||||
{
|
||||
struct drm_panfrost_create_bo create_bo = {.size = size};
|
||||
struct pan_kmod_bo *kmod_bo;
|
||||
struct panfrost_bo *bo;
|
||||
int ret;
|
||||
|
||||
if (panfrost_device_kmod_version_major(dev) > 1 ||
|
||||
panfrost_device_kmod_version_minor(dev) >= 1) {
|
||||
if (flags & PAN_BO_GROWABLE)
|
||||
create_bo.flags |= PANFROST_BO_HEAP;
|
||||
if (!(flags & PAN_BO_EXECUTE))
|
||||
create_bo.flags |= PANFROST_BO_NOEXEC;
|
||||
}
|
||||
kmod_bo =
|
||||
pan_kmod_bo_alloc(dev->kmod.dev, NULL, size, to_kmod_bo_flags(flags));
|
||||
assert(kmod_bo);
|
||||
|
||||
ret = drmIoctl(panfrost_device_fd(dev), DRM_IOCTL_PANFROST_CREATE_BO,
|
||||
&create_bo);
|
||||
if (ret) {
|
||||
fprintf(stderr, "DRM_IOCTL_PANFROST_CREATE_BO failed: %m\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bo = pan_lookup_bo(dev, create_bo.handle);
|
||||
bo = pan_lookup_bo(dev, kmod_bo->handle);
|
||||
assert(!memcmp(bo, &((struct panfrost_bo){}), sizeof(*bo)));
|
||||
bo->kmod_bo = kmod_bo;
|
||||
|
||||
bo->size = create_bo.size;
|
||||
bo->ptr.gpu = create_bo.offset;
|
||||
bo->gem_handle = create_bo.handle;
|
||||
struct pan_kmod_vm_op vm_op = {
|
||||
.type = PAN_KMOD_VM_OP_TYPE_MAP,
|
||||
.va =
|
||||
{
|
||||
.start = PAN_KMOD_VM_MAP_AUTO_VA,
|
||||
.size = bo->kmod_bo->size,
|
||||
},
|
||||
.map =
|
||||
{
|
||||
.bo = bo->kmod_bo,
|
||||
.bo_offset = 0,
|
||||
},
|
||||
};
|
||||
|
||||
ASSERTED int ret =
|
||||
pan_kmod_vm_bind(dev->kmod.vm, PAN_KMOD_VM_OP_MODE_IMMEDIATE, &vm_op, 1);
|
||||
assert(!ret);
|
||||
|
||||
bo->ptr.gpu = vm_op.va.start;
|
||||
bo->flags = flags;
|
||||
bo->dev = dev;
|
||||
bo->label = label;
|
||||
|
|
@ -93,18 +112,27 @@ panfrost_bo_alloc(struct panfrost_device *dev, size_t size, uint32_t flags,
|
|||
static void
|
||||
panfrost_bo_free(struct panfrost_bo *bo)
|
||||
{
|
||||
struct drm_gem_close gem_close = {.handle = panfrost_bo_handle(bo)};
|
||||
int fd = panfrost_device_fd(bo->dev);
|
||||
int ret;
|
||||
struct pan_kmod_bo *kmod_bo = bo->kmod_bo;
|
||||
struct pan_kmod_vm *vm = bo->dev->kmod.vm;
|
||||
uint64_t gpu_va = bo->ptr.gpu;
|
||||
|
||||
/* BO will be freed with the sparse array, but zero to indicate free */
|
||||
memset(bo, 0, sizeof(*bo));
|
||||
|
||||
ret = drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
|
||||
if (ret) {
|
||||
fprintf(stderr, "DRM_IOCTL_GEM_CLOSE failed: %m\n");
|
||||
assert(0);
|
||||
}
|
||||
struct pan_kmod_vm_op vm_op = {
|
||||
.type = PAN_KMOD_VM_OP_TYPE_UNMAP,
|
||||
.va =
|
||||
{
|
||||
.start = gpu_va,
|
||||
.size = kmod_bo->size,
|
||||
},
|
||||
};
|
||||
|
||||
ASSERTED int ret = pan_kmod_vm_bind(
|
||||
vm, PAN_KMOD_VM_OP_MODE_DEFER_TO_NEXT_IDLE_POINT, &vm_op, 1);
|
||||
assert(!ret);
|
||||
|
||||
pan_kmod_bo_put(kmod_bo);
|
||||
}
|
||||
|
||||
/* Returns true if the BO is ready, false otherwise.
|
||||
|
|
@ -115,12 +143,6 @@ panfrost_bo_free(struct panfrost_bo *bo)
|
|||
bool
|
||||
panfrost_bo_wait(struct panfrost_bo *bo, int64_t timeout_ns, bool wait_readers)
|
||||
{
|
||||
struct drm_panfrost_wait_bo req = {
|
||||
.handle = panfrost_bo_handle(bo),
|
||||
.timeout_ns = timeout_ns,
|
||||
};
|
||||
int ret;
|
||||
|
||||
/* If the BO has been exported or imported we can't rely on the cached
|
||||
* state, we need to call the WAIT_BO ioctl.
|
||||
*/
|
||||
|
|
@ -136,12 +158,7 @@ panfrost_bo_wait(struct panfrost_bo *bo, int64_t timeout_ns, bool wait_readers)
|
|||
return true;
|
||||
}
|
||||
|
||||
/* The ioctl returns >= 0 value when the BO we are waiting for is ready
|
||||
* -1 otherwise.
|
||||
*/
|
||||
ret =
|
||||
drmIoctl(panfrost_device_fd(bo->dev), DRM_IOCTL_PANFROST_WAIT_BO, &req);
|
||||
if (ret != -1) {
|
||||
if (pan_kmod_bo_wait(bo->kmod_bo, timeout_ns, !wait_readers)) {
|
||||
/* Set gpu_access to 0 so that the next call to bo_wait()
|
||||
* doesn't have to call the WAIT_BO ioctl.
|
||||
*/
|
||||
|
|
@ -149,10 +166,6 @@ panfrost_bo_wait(struct panfrost_bo *bo, int64_t timeout_ns, bool wait_readers)
|
|||
return true;
|
||||
}
|
||||
|
||||
/* If errno is not ETIMEDOUT or EBUSY that means the handle we passed
|
||||
* is invalid, which shouldn't happen here.
|
||||
*/
|
||||
assert(errno == ETIMEDOUT || errno == EBUSY);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
@ -200,22 +213,14 @@ panfrost_bo_cache_fetch(struct panfrost_device *dev, size_t size,
|
|||
|
||||
/* If the oldest BO in the cache is busy, likely so is
|
||||
* everything newer, so bail. */
|
||||
if (!panfrost_bo_wait(entry, dontwait ? 0 : INT64_MAX, PAN_BO_ACCESS_RW))
|
||||
if (!panfrost_bo_wait(entry, dontwait ? 0 : INT64_MAX, true))
|
||||
break;
|
||||
|
||||
struct drm_panfrost_madvise madv = {
|
||||
.handle = panfrost_bo_handle(entry),
|
||||
.madv = PANFROST_MADV_WILLNEED,
|
||||
};
|
||||
int ret;
|
||||
|
||||
/* This one works, splice it out of the cache */
|
||||
list_del(&entry->bucket_link);
|
||||
list_del(&entry->lru_link);
|
||||
|
||||
ret =
|
||||
drmIoctl(panfrost_device_fd(dev), DRM_IOCTL_PANFROST_MADVISE, &madv);
|
||||
if (!ret && !madv.retained) {
|
||||
if (!pan_kmod_bo_make_unevictable(entry->kmod_bo)) {
|
||||
panfrost_bo_free(entry);
|
||||
continue;
|
||||
}
|
||||
|
|
@ -269,14 +274,9 @@ panfrost_bo_cache_put(struct panfrost_bo *bo)
|
|||
pthread_mutex_lock(&dev->bo_cache.lock);
|
||||
|
||||
struct list_head *bucket = pan_bucket(dev, MAX2(panfrost_bo_size(bo), 4096));
|
||||
struct drm_panfrost_madvise madv;
|
||||
struct timespec time;
|
||||
|
||||
madv.handle = panfrost_bo_handle(bo);
|
||||
madv.madv = PANFROST_MADV_DONTNEED;
|
||||
madv.retained = 0;
|
||||
|
||||
drmIoctl(panfrost_device_fd(dev), DRM_IOCTL_PANFROST_MADVISE, &madv);
|
||||
pan_kmod_bo_make_evictable(bo->kmod_bo);
|
||||
|
||||
/* Add us to the bucket */
|
||||
list_addtail(&bo->bucket_link, bucket);
|
||||
|
|
@ -324,28 +324,15 @@ panfrost_bo_cache_evict_all(struct panfrost_device *dev)
|
|||
void
|
||||
panfrost_bo_mmap(struct panfrost_bo *bo)
|
||||
{
|
||||
struct drm_panfrost_mmap_bo mmap_bo = {.handle = panfrost_bo_handle(bo)};
|
||||
int ret;
|
||||
|
||||
if (bo->ptr.cpu)
|
||||
return;
|
||||
|
||||
ret = drmIoctl(panfrost_device_fd(bo->dev), DRM_IOCTL_PANFROST_MMAP_BO,
|
||||
&mmap_bo);
|
||||
if (ret) {
|
||||
fprintf(stderr, "DRM_IOCTL_PANFROST_MMAP_BO failed: %m\n");
|
||||
assert(0);
|
||||
}
|
||||
|
||||
bo->ptr.cpu =
|
||||
os_mmap(NULL, panfrost_bo_size(bo), PROT_READ | PROT_WRITE, MAP_SHARED,
|
||||
panfrost_device_fd(bo->dev), mmap_bo.offset);
|
||||
bo->ptr.cpu = pan_kmod_bo_mmap(bo->kmod_bo, 0, panfrost_bo_size(bo),
|
||||
PROT_READ | PROT_WRITE, MAP_SHARED, NULL);
|
||||
if (bo->ptr.cpu == MAP_FAILED) {
|
||||
bo->ptr.cpu = NULL;
|
||||
fprintf(stderr,
|
||||
"mmap failed: result=%p size=0x%llx fd=%i offset=0x%llx %m\n",
|
||||
bo->ptr.cpu, (long long)panfrost_bo_size(bo),
|
||||
panfrost_device_fd(bo->dev), (long long)mmap_bo.offset);
|
||||
fprintf(stderr, "mmap failed: result=%p size=0x%llx\n", bo->ptr.cpu,
|
||||
(long long)panfrost_bo_size(bo));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -468,38 +455,39 @@ struct panfrost_bo *
|
|||
panfrost_bo_import(struct panfrost_device *dev, int fd)
|
||||
{
|
||||
struct panfrost_bo *bo;
|
||||
struct drm_panfrost_get_bo_offset get_bo_offset = {
|
||||
0,
|
||||
};
|
||||
ASSERTED int ret;
|
||||
unsigned gem_handle;
|
||||
|
||||
pthread_mutex_lock(&dev->bo_map_lock);
|
||||
|
||||
ret = drmPrimeFDToHandle(panfrost_device_fd(dev), fd, &gem_handle);
|
||||
ret = drmPrimeFDToHandle(dev->kmod.dev->fd, fd, &gem_handle);
|
||||
assert(!ret);
|
||||
|
||||
bo = pan_lookup_bo(dev, gem_handle);
|
||||
|
||||
if (!bo->dev) {
|
||||
get_bo_offset.handle = gem_handle;
|
||||
ret = drmIoctl(panfrost_device_fd(dev), DRM_IOCTL_PANFROST_GET_BO_OFFSET,
|
||||
&get_bo_offset);
|
||||
bo->dev = dev;
|
||||
bo->kmod_bo = pan_kmod_bo_import(dev->kmod.dev, fd, 0);
|
||||
|
||||
struct pan_kmod_vm_op vm_op = {
|
||||
.type = PAN_KMOD_VM_OP_TYPE_MAP,
|
||||
.va =
|
||||
{
|
||||
.start = PAN_KMOD_VM_MAP_AUTO_VA,
|
||||
.size = bo->kmod_bo->size,
|
||||
},
|
||||
.map =
|
||||
{
|
||||
.bo = bo->kmod_bo,
|
||||
.bo_offset = 0,
|
||||
},
|
||||
};
|
||||
|
||||
ASSERTED int ret = pan_kmod_vm_bind(
|
||||
dev->kmod.vm, PAN_KMOD_VM_OP_MODE_IMMEDIATE, &vm_op, 1);
|
||||
assert(!ret);
|
||||
|
||||
bo->dev = dev;
|
||||
bo->ptr.gpu = (mali_ptr)get_bo_offset.offset;
|
||||
bo->size = lseek(fd, 0, SEEK_END);
|
||||
/* Sometimes this can fail and return -1. size of -1 is not
|
||||
* a nice thing for mmap to try mmap. Be more robust also
|
||||
* for zero sized maps and fail nicely too
|
||||
*/
|
||||
if ((panfrost_bo_size(bo) == 0) || (panfrost_bo_size(bo) == (size_t)-1)) {
|
||||
pthread_mutex_unlock(&dev->bo_map_lock);
|
||||
return NULL;
|
||||
}
|
||||
bo->ptr.gpu = vm_op.va.start;
|
||||
bo->flags = PAN_BO_SHARED;
|
||||
bo->gem_handle = gem_handle;
|
||||
p_atomic_set(&bo->refcnt, 1);
|
||||
} else {
|
||||
/* bo->refcnt == 0 can happen if the BO
|
||||
|
|
@ -525,16 +513,9 @@ panfrost_bo_import(struct panfrost_device *dev, int fd)
|
|||
int
|
||||
panfrost_bo_export(struct panfrost_bo *bo)
|
||||
{
|
||||
struct drm_prime_handle args = {
|
||||
.handle = panfrost_bo_handle(bo),
|
||||
.flags = DRM_CLOEXEC,
|
||||
};
|
||||
int ret = pan_kmod_bo_export(bo->kmod_bo);
|
||||
if (ret >= 0)
|
||||
bo->flags |= PAN_BO_SHARED;
|
||||
|
||||
int ret = drmIoctl(panfrost_device_fd(bo->dev), DRM_IOCTL_PRIME_HANDLE_TO_FD,
|
||||
&args);
|
||||
if (ret == -1)
|
||||
return -1;
|
||||
|
||||
bo->flags |= PAN_BO_SHARED;
|
||||
return args.fd;
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,6 +30,8 @@
|
|||
#include "util/list.h"
|
||||
#include "panfrost-job.h"
|
||||
|
||||
#include "kmod/pan_kmod.h"
|
||||
|
||||
/* Flags for allocated memory */
|
||||
|
||||
/* This memory region is executable */
|
||||
|
|
@ -95,16 +97,14 @@ struct panfrost_bo {
|
|||
/* Atomic reference count */
|
||||
int32_t refcnt;
|
||||
|
||||
/* Kernel representation of a buffer object. */
|
||||
struct pan_kmod_bo *kmod_bo;
|
||||
|
||||
struct panfrost_device *dev;
|
||||
|
||||
/* Mapping for the entire object (all levels) */
|
||||
struct panfrost_ptr ptr;
|
||||
|
||||
/* Size of all entire trees */
|
||||
size_t size;
|
||||
|
||||
int gem_handle;
|
||||
|
||||
uint32_t flags;
|
||||
|
||||
/* Combination of PAN_BO_ACCESS_{READ,WRITE} flags encoding pending
|
||||
|
|
@ -120,13 +120,13 @@ struct panfrost_bo {
|
|||
static inline size_t
|
||||
panfrost_bo_size(struct panfrost_bo *bo)
|
||||
{
|
||||
return bo->size;
|
||||
return bo->kmod_bo->size;
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
panfrost_bo_handle(struct panfrost_bo *bo)
|
||||
{
|
||||
return bo->gem_handle;
|
||||
return bo->kmod_bo->handle;
|
||||
}
|
||||
|
||||
bool panfrost_bo_wait(struct panfrost_bo *bo, int64_t timeout_ns,
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue