turnip/kgsl: Support external memory via ION/DMABUF buffers

android12-5.10 kernel has ION disabled and the buffers should
be allocated via dma_heap.

Also before that there was ION abi breakage, which is handled here, see:
https://source.android.com/devices/architecture/kernel/ion_abi_changes

ion_4.19.h and ion.h are copied from libion:
https://android.googlesource.com/platform/system/memory/libion

Signed-off-by: Danylo Piliaiev <dpiliaiev@igalia.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/14928>
This commit is contained in:
Danylo Piliaiev 2024-05-30 19:03:25 +02:00 committed by Marge Bot
parent 048f761fae
commit 17c12a9924
7 changed files with 307 additions and 6 deletions

View file

@ -0,0 +1,70 @@
/* Copied from libion:
* https://android.googlesource.com/platform/system/memory/libion/
*
* This header was automatically generated from a Linux kernel header
* of the same name, to make information necessary for userspace to
* call into the kernel available to libc. It contains only constants,
* structures, and macros generated from the original header, and thus,
* contains no copyrightable information.
*/
#ifndef _UAPI_LINUX_ION_H
#define _UAPI_LINUX_ION_H
#include <linux/ioctl.h>
#include <linux/types.h>
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
typedef int ion_user_handle_t;
enum ion_heap_type {
ION_HEAP_TYPE_SYSTEM,
ION_HEAP_TYPE_SYSTEM_CONTIG,
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
ION_HEAP_TYPE_CARVEOUT,
ION_HEAP_TYPE_CHUNK,
ION_HEAP_TYPE_DMA,
ION_HEAP_TYPE_CUSTOM,
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
ION_NUM_HEAPS = 16,
};
#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM)
#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA)
#define ION_NUM_HEAP_IDS (sizeof(unsigned int) * 8)
#define ION_FLAG_CACHED 1
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
#define ION_FLAG_CACHED_NEEDS_SYNC 2
struct ion_allocation_data {
size_t len;
size_t align;
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
unsigned int heap_id_mask;
unsigned int flags;
ion_user_handle_t handle;
};
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
struct ion_fd_data {
ion_user_handle_t handle;
int fd;
};
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
struct ion_handle_data {
ion_user_handle_t handle;
};
struct ion_custom_data {
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
unsigned int cmd;
unsigned long arg;
};
#define ION_IOC_MAGIC 'I'
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, struct ion_allocation_data)
#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
#endif
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */

View file

@ -0,0 +1,55 @@
/* Copied from libion:
* https://android.googlesource.com/platform/system/memory/libion/
*
* This header was automatically generated from a Linux kernel header
* of the same name, to make information necessary for userspace to
* call into the kernel available to libc. It contains only constants,
* structures, and macros generated from the original header, and thus,
* contains no copyrightable information.
*/
#ifndef _UAPI_LINUX_ION_NEW_H
#define _UAPI_LINUX_ION_NEW_H
#include <linux/ioctl.h>
#include <linux/types.h>
#define ION_NUM_HEAP_IDS (sizeof(unsigned int) * 8)
enum ion_heap_type_ext {
ION_HEAP_TYPE_CUSTOM_EXT = 16,
ION_HEAP_TYPE_MAX = 31,
};
enum ion_heap_id {
ION_HEAP_SYSTEM = (1 << ION_HEAP_TYPE_SYSTEM),
ION_HEAP_DMA_START = (ION_HEAP_SYSTEM << 1),
ION_HEAP_DMA_END = (ION_HEAP_DMA_START << 7),
ION_HEAP_CUSTOM_START = (ION_HEAP_DMA_END << 1),
ION_HEAP_CUSTOM_END = (ION_HEAP_CUSTOM_START << 22),
};
#define ION_NUM_MAX_HEAPS (32)
struct ion_new_allocation_data {
__u64 len;
__u32 heap_id_mask;
__u32 flags;
__u32 fd;
__u32 unused;
};
#define MAX_HEAP_NAME 32
struct ion_heap_data {
char name[MAX_HEAP_NAME];
__u32 type;
__u32 heap_id;
__u32 reserved0;
__u32 reserved1;
__u32 reserved2;
};
struct ion_heap_query {
__u32 cnt;
__u32 reserved0;
__u64 heaps;
__u32 reserved1;
__u32 reserved2;
};
#define ION_IOC_MAGIC 'I'
#define ION_IOC_NEW_ALLOC _IOWR(ION_IOC_MAGIC, 0, struct ion_new_allocation_data)
#define ION_IOC_HEAP_QUERY _IOWR(ION_IOC_MAGIC, 8, struct ion_heap_query)
#define ION_IOC_ABI_VERSION _IOR(ION_IOC_MAGIC, 9, __u32)
#endif

View file

@ -1310,6 +1310,9 @@ tu_physical_device_finish(struct tu_physical_device *device)
if (device->master_fd != -1)
close(device->master_fd);
if (device->kgsl_dma_fd != -1)
close(device->kgsl_dma_fd);
disk_cache_destroy(device->vk.disk_cache);
vk_free(&device->instance->vk.alloc, (void *)device->name);
@ -2836,6 +2839,14 @@ tu_AllocateMemory(VkDevice _device,
alloc_flags |= TU_BO_ALLOC_REPLAYABLE;
}
const VkExportMemoryAllocateInfo *export_info =
vk_find_struct_const(pAllocateInfo->pNext, EXPORT_MEMORY_ALLOCATE_INFO);
if (export_info && (export_info->handleTypes &
(VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT)))
alloc_flags |= TU_BO_ALLOC_SHAREABLE;
char name[64] = "vkAllocateMemory()";
if (device->bo_sizes)
snprintf(name, ARRAY_SIZE(name), "vkAllocateMemory(%ldkb)",

View file

@ -63,6 +63,13 @@ struct tu_memory_heap {
alignas(8) VkDeviceSize used;
};
enum tu_kgsl_dma_type
{
TU_KGSL_DMA_TYPE_ION_LEGACY,
TU_KGSL_DMA_TYPE_ION,
TU_KGSL_DMA_TYPE_DMAHEAP,
};
struct tu_physical_device
{
struct vk_physical_device vk;
@ -86,6 +93,9 @@ struct tu_physical_device
int64_t master_major;
int64_t master_minor;
int kgsl_dma_fd;
enum tu_kgsl_dma_type kgsl_dma_type;
uint32_t gmem_size;
uint64_t gmem_base;

View file

@ -298,6 +298,7 @@ tu_physical_device_try_create(struct vk_instance *vk_instance,
}
device->master_fd = master_fd;
device->kgsl_dma_fd = -1;
assert(strlen(path) < ARRAY_SIZE(device->fd_path));
snprintf(device->fd_path, ARRAY_SIZE(device->fd_path), "%s", path);

View file

@ -22,6 +22,7 @@ enum tu_bo_alloc_flags {
TU_BO_ALLOC_REPLAYABLE = 1 << 2,
TU_BO_ALLOC_INTERNAL_RESOURCE = 1 << 3,
TU_BO_ALLOC_DMABUF = 1 << 4,
TU_BO_ALLOC_SHAREABLE = 1 << 5,
};
/* Define tu_timeline_sync type based on drm syncobj for a point type
@ -53,6 +54,14 @@ struct tu_bo {
uint32_t bo_list_idx;
#ifdef TU_HAS_KGSL
/* We have to store fd returned by ion_fd_data
* in order to be able to mmap this buffer and to
* export file descriptor.
*/
int shared_fd;
#endif
bool implicit_sync : 1;
};

View file

@ -11,10 +11,15 @@
#include <stdint.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <linux/dma-heap.h>
#include "msm_kgsl.h"
#include "ion/ion.h"
#include "ion/ion_4.19.h"
#include "vk_util.h"
#include "util/os_file.h"
#include "util/u_debug.h"
#include "util/u_vector.h"
#include "util/libsync.h"
@ -26,6 +31,10 @@
#include "tu_dynamic_rendering.h"
#include "tu_rmv.h"
/* ION_HEAP(ION_SYSTEM_HEAP_ID) */
#define KGSL_ION_SYSTEM_HEAP_MASK (1u << 25)
static int
safe_ioctl(int fd, unsigned long request, void *arg)
{
@ -68,6 +77,90 @@ kgsl_submitqueue_close(struct tu_device *dev, uint32_t queue_id)
safe_ioctl(dev->physical_device->local_fd, IOCTL_KGSL_DRAWCTXT_DESTROY, &req);
}
static VkResult
bo_init_new_dmaheap(struct tu_device *dev, struct tu_bo **out_bo, uint64_t size,
enum tu_bo_alloc_flags flags)
{
struct dma_heap_allocation_data alloc = {
.len = size,
.fd_flags = O_RDWR | O_CLOEXEC,
};
int ret;
ret = safe_ioctl(dev->physical_device->kgsl_dma_fd, DMA_HEAP_IOCTL_ALLOC,
&alloc);
if (ret) {
return vk_errorf(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY,
"DMA_HEAP_IOCTL_ALLOC failed (%s)", strerror(errno));
}
return tu_bo_init_dmabuf(dev, out_bo, -1, alloc.fd);
}
static VkResult
bo_init_new_ion(struct tu_device *dev, struct tu_bo **out_bo, uint64_t size,
enum tu_bo_alloc_flags flags)
{
struct ion_new_allocation_data alloc = {
.len = size,
.heap_id_mask = KGSL_ION_SYSTEM_HEAP_MASK,
.flags = 0,
.fd = -1,
};
int ret;
ret = safe_ioctl(dev->physical_device->kgsl_dma_fd, ION_IOC_NEW_ALLOC, &alloc);
if (ret) {
return vk_errorf(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY,
"ION_IOC_NEW_ALLOC failed (%s)", strerror(errno));
}
return tu_bo_init_dmabuf(dev, out_bo, -1, alloc.fd);
}
static VkResult
bo_init_new_ion_legacy(struct tu_device *dev, struct tu_bo **out_bo, uint64_t size,
enum tu_bo_alloc_flags flags)
{
struct ion_allocation_data alloc = {
.len = size,
.align = 4096,
.heap_id_mask = KGSL_ION_SYSTEM_HEAP_MASK,
.flags = 0,
.handle = -1,
};
int ret;
ret = safe_ioctl(dev->physical_device->kgsl_dma_fd, ION_IOC_ALLOC, &alloc);
if (ret) {
return vk_errorf(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY,
"ION_IOC_ALLOC failed (%s)", strerror(errno));
}
struct ion_fd_data share = {
.handle = alloc.handle,
.fd = -1,
};
ret = safe_ioctl(dev->physical_device->kgsl_dma_fd, ION_IOC_SHARE, &share);
if (ret) {
return vk_errorf(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY,
"ION_IOC_SHARE failed (%s)", strerror(errno));
}
struct ion_handle_data free = {
.handle = alloc.handle,
};
ret = safe_ioctl(dev->physical_device->kgsl_dma_fd, ION_IOC_FREE, &free);
if (ret) {
return vk_errorf(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY,
"ION_IOC_FREE failed (%s)", strerror(errno));
}
return tu_bo_init_dmabuf(dev, out_bo, -1, share.fd);
}
static VkResult
kgsl_bo_init(struct tu_device *dev,
struct tu_bo **out_bo,
@ -79,6 +172,17 @@ kgsl_bo_init(struct tu_device *dev,
{
assert(client_iova == 0);
if (flags & TU_BO_ALLOC_SHAREABLE) {
switch(dev->physical_device->kgsl_dma_type) {
case TU_KGSL_DMA_TYPE_DMAHEAP:
return bo_init_new_dmaheap(dev, out_bo, size, flags);
case TU_KGSL_DMA_TYPE_ION:
return bo_init_new_ion(dev, out_bo, size, flags);
case TU_KGSL_DMA_TYPE_ION_LEGACY:
return bo_init_new_ion_legacy(dev, out_bo, size, flags);
}
}
struct kgsl_gpumem_alloc_id req = {
.size = size,
};
@ -114,6 +218,7 @@ kgsl_bo_init(struct tu_device *dev,
.iova = req.gpuaddr,
.name = tu_debug_bos_add(dev, req.mmapsize, name),
.refcnt = 1,
.shared_fd = -1,
};
*out_bo = bo;
@ -169,6 +274,7 @@ kgsl_bo_init_dmabuf(struct tu_device *dev,
.iova = info_req.gpuaddr,
.name = tu_debug_bos_add(dev, info_req.size, "dmabuf"),
.refcnt = 1,
.shared_fd = os_dupfd_cloexec(fd),
};
*out_bo = bo;
@ -179,18 +285,27 @@ kgsl_bo_init_dmabuf(struct tu_device *dev,
static int
kgsl_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
{
tu_stub();
return -1;
assert(bo->shared_fd != -1);
return os_dupfd_cloexec(bo->shared_fd);
}
static VkResult
kgsl_bo_map(struct tu_device *dev, struct tu_bo *bo, void *placed_addr)
{
uint64_t offset = bo->gem_handle << 12;
void *map = mmap(placed_addr, bo->size, PROT_READ | PROT_WRITE,
MAP_SHARED | (placed_addr != NULL ? MAP_FIXED : 0),
dev->physical_device->local_fd, offset);
void *map = MAP_FAILED;
if (bo->shared_fd == -1) {
uint64_t offset = bo->gem_handle << 12;
map = mmap(placed_addr, bo->size, PROT_READ | PROT_WRITE,
MAP_SHARED | (placed_addr != NULL ? MAP_FIXED : 0),
dev->physical_device->local_fd, offset);
} else {
map = mmap(placed_addr, bo->size, PROT_READ | PROT_WRITE,
MAP_SHARED | (placed_addr != NULL ? MAP_FIXED : 0),
bo->shared_fd, 0);
}
if (map == MAP_FAILED)
return vk_error(dev, VK_ERROR_MEMORY_MAP_FAILED);
@ -218,6 +333,9 @@ kgsl_bo_finish(struct tu_device *dev, struct tu_bo *bo)
munmap(bo->map, bo->size);
}
if (bo->shared_fd != -1)
close(bo->shared_fd);
TU_RMV(bo_destroy, dev, bo);
struct kgsl_gpumem_free_id req = {
@ -1421,6 +1539,30 @@ tu_knl_kgsl_load(struct tu_instance *instance, int fd)
return vk_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
static const char dma_heap_path[] = "/dev/dma_heap/system";
static const char ion_path[] = "/dev/ion";
int dma_fd;
dma_fd = open(dma_heap_path, O_RDONLY);
if (dma_fd >= 0) {
device->kgsl_dma_type = TU_KGSL_DMA_TYPE_DMAHEAP;
} else {
dma_fd = open(ion_path, O_RDONLY);
if (dma_fd >= 0) {
/* ION_IOC_FREE available only for legacy ION */
struct ion_handle_data free = { .handle = 0 };
if (safe_ioctl(dma_fd, ION_IOC_FREE, &free) >= 0 || errno != ENOTTY)
device->kgsl_dma_type = TU_KGSL_DMA_TYPE_ION_LEGACY;
else
device->kgsl_dma_type = TU_KGSL_DMA_TYPE_ION;
} else {
mesa_logw(
"Unable to open neither %s nor %s, VK_KHR_external_memory_fd would be "
"unavailable: %s",
dma_heap_path, ion_path, strerror(errno));
}
}
VkResult result = VK_ERROR_INITIALIZATION_FAILED;
struct kgsl_devinfo info;
@ -1436,6 +1578,7 @@ tu_knl_kgsl_load(struct tu_instance *instance, int fd)
device->instance = instance;
device->master_fd = -1;
device->local_fd = fd;
device->kgsl_dma_fd = dma_fd;
device->dev_id.gpu_id =
((info.chip_id >> 24) & 0xff) * 100 +
@ -1476,5 +1619,7 @@ tu_knl_kgsl_load(struct tu_instance *instance, int fd)
fail:
vk_free(&instance->vk.alloc, device);
close(fd);
if (dma_fd >= 0)
close(dma_fd);
return result;
}