turnip: Split out vfuncs for kernel interface

To allow support for multiple KMD's to coexist, split out vfuncs.  This
doesn't quite *yet* let us have kgsl and msm coexist, because of the
different vk entrypoints needed by the kgsl backend.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21394>
This commit is contained in:
Rob Clark 2023-02-16 13:56:14 -08:00 committed by Marge Bot
parent f84fce767d
commit fd2b014a12
6 changed files with 240 additions and 83 deletions

View file

@ -24,6 +24,7 @@ libtu_files = files(
'tu_dynamic_rendering.c',
'tu_formats.c',
'tu_image.c',
'tu_knl.c',
'tu_lrz.c',
'tu_nir_lower_multiview.c',
'tu_pass.c',

View file

@ -114,10 +114,14 @@ struct tu_physical_device
VK_DEFINE_HANDLE_CASTS(tu_physical_device, vk.base, VkPhysicalDevice,
VK_OBJECT_TYPE_PHYSICAL_DEVICE)
struct tu_knl;
struct tu_instance
{
struct vk_instance vk;
const struct tu_knl *knl;
uint32_t api_version;
struct driOptionCache dri_options;

View file

@ -0,0 +1,102 @@
/*
* Copyright © 2016 Red Hat.
* Copyright © 2016 Bas Nieuwenhuizen
* SPDX-License-Identifier: MIT
*
* based in part on anv driver which is:
* Copyright © 2015 Intel Corporation
*/
#include "tu_device.h"
#include "tu_knl.h"
VkResult
tu_bo_init_new_explicit_iova(struct tu_device *dev,
struct tu_bo **out_bo,
uint64_t size,
uint64_t client_iova,
enum tu_bo_alloc_flags flags, const char *name)
{
return dev->instance->knl->bo_init(dev, out_bo, size, client_iova, flags, name);
}
VkResult
tu_bo_init_dmabuf(struct tu_device *dev,
struct tu_bo **bo,
uint64_t size,
int fd)
{
return dev->instance->knl->bo_init_dmabuf(dev, bo, size, fd);
}
int
tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
{
return dev->instance->knl->bo_export_dmabuf(dev, bo);
}
void
tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
{
dev->instance->knl->bo_finish(dev, bo);
}
VkResult
tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
{
return dev->instance->knl->bo_map(dev, bo);
}
void tu_bo_allow_dump(struct tu_device *dev, struct tu_bo *bo)
{
dev->instance->knl->bo_allow_dump(dev, bo);
}
int
tu_device_get_gpu_timestamp(struct tu_device *dev,
uint64_t *ts)
{
return dev->instance->knl->device_get_gpu_timestamp(dev, ts);
}
int
tu_device_get_suspend_count(struct tu_device *dev,
uint64_t *suspend_count)
{
return dev->instance->knl->device_get_suspend_count(dev, suspend_count);
}
VkResult
tu_device_wait_u_trace(struct tu_device *dev, struct tu_u_trace_syncobj *syncobj)
{
return dev->instance->knl->device_wait_u_trace(dev, syncobj);
}
VkResult
tu_device_check_status(struct vk_device *vk_device)
{
struct tu_device *dev = container_of(vk_device, struct tu_device, vk);
return dev->instance->knl->device_check_status(dev);
}
int
tu_drm_submitqueue_new(const struct tu_device *dev,
int priority,
uint32_t *queue_id)
{
return dev->instance->knl->submitqueue_new(dev, priority, queue_id);
}
void
tu_drm_submitqueue_close(const struct tu_device *dev, uint32_t queue_id)
{
dev->instance->knl->submitqueue_close(dev, queue_id);
}
VkResult
tu_queue_submit(struct vk_queue *vk_queue, struct vk_queue_submit *submit)
{
struct tu_queue *queue = container_of(vk_queue, struct tu_queue, vk);
return queue->device->instance->knl->queue_submit(queue, submit);
}

View file

@ -12,13 +12,6 @@
#include "tu_common.h"
/* Keep tu_syncobj until porting to common code for kgsl too */
#ifdef TU_USE_KGSL
struct tu_syncobj;
/* for TU_FROM_HANDLE with both VkFence and VkSemaphore: */
#define tu_syncobj_from_handle(x) ((struct tu_syncobj*) (uintptr_t) (x))
#endif
struct tu_u_trace_syncobj;
enum tu_bo_alloc_flags
@ -45,8 +38,7 @@ enum tu_timeline_sync_state {
TU_TIMELINE_SYNC_STATE_SIGNALED,
};
struct tu_bo
{
struct tu_bo {
uint32_t gem_handle;
uint64_t size;
uint64_t iova;
@ -61,6 +53,28 @@ struct tu_bo
bool implicit_sync : 1;
};
struct tu_knl {
const char *name;
int (*device_get_gpu_timestamp)(struct tu_device *dev, uint64_t *ts);
int (*device_get_suspend_count)(struct tu_device *dev, uint64_t *suspend_count);
VkResult (*device_check_status)(struct tu_device *dev);
int (*submitqueue_new)(const struct tu_device *dev, int priority, uint32_t *queue_id);
void (*submitqueue_close)(const struct tu_device *dev, uint32_t queue_id);
VkResult (*bo_init)(struct tu_device *dev, struct tu_bo **out_bo, uint64_t size,
uint64_t client_iova, enum tu_bo_alloc_flags flags, const char *name);
VkResult (*bo_init_dmabuf)(struct tu_device *dev, struct tu_bo **out_bo,
uint64_t size, int prime_fd);
int (*bo_export_dmabuf)(struct tu_device *dev, struct tu_bo *bo);
VkResult (*bo_map)(struct tu_device *dev, struct tu_bo *bo);
void (*bo_allow_dump)(struct tu_device *dev, struct tu_bo *bo);
void (*bo_finish)(struct tu_device *dev, struct tu_bo *bo);
VkResult (*device_wait_u_trace)(struct tu_device *dev,
struct tu_u_trace_syncobj *syncobj);
VkResult (*queue_submit)(struct tu_queue *queue,
struct vk_queue_submit *submit);
};
struct tu_timeline_sync {
struct vk_sync base;

View file

@ -140,23 +140,22 @@ tu_drm_get_priorities(const struct tu_physical_device *dev)
return val;
}
int
tu_device_get_gpu_timestamp(struct tu_device *dev, uint64_t *ts)
static int
msm_device_get_gpu_timestamp(struct tu_device *dev, uint64_t *ts)
{
return tu_drm_get_param(dev->physical_device, MSM_PARAM_TIMESTAMP, ts);
}
int
tu_device_get_suspend_count(struct tu_device *dev, uint64_t *suspend_count)
static int
msm_device_get_suspend_count(struct tu_device *dev, uint64_t *suspend_count)
{
int ret = tu_drm_get_param(dev->physical_device, MSM_PARAM_SUSPENDS, suspend_count);
return ret;
}
VkResult
tu_device_check_status(struct vk_device *vk_device)
static VkResult
msm_device_check_status(struct tu_device *device)
{
struct tu_device *device = container_of(vk_device, struct tu_device, vk);
struct tu_physical_device *physical_device = device->physical_device;
uint64_t last_fault_count = physical_device->fault_count;
@ -170,10 +169,10 @@ tu_device_check_status(struct vk_device *vk_device)
return VK_SUCCESS;
}
int
tu_drm_submitqueue_new(const struct tu_device *dev,
int priority,
uint32_t *queue_id)
static int
msm_submitqueue_new(const struct tu_device *dev,
int priority,
uint32_t *queue_id)
{
assert(priority >= 0 &&
priority < dev->physical_device->submitqueue_priority_count);
@ -191,8 +190,8 @@ tu_drm_submitqueue_new(const struct tu_device *dev,
return 0;
}
void
tu_drm_submitqueue_close(const struct tu_device *dev, uint32_t queue_id)
static void
msm_submitqueue_close(const struct tu_device *dev, uint32_t queue_id)
{
drmCommandWrite(dev->fd, DRM_MSM_SUBMITQUEUE_CLOSE,
&queue_id, sizeof(uint32_t));
@ -392,13 +391,13 @@ tu_bo_set_kernel_name(struct tu_device *dev, struct tu_bo *bo, const char *name)
}
}
VkResult
tu_bo_init_new_explicit_iova(struct tu_device *dev,
struct tu_bo **out_bo,
uint64_t size,
uint64_t client_iova,
enum tu_bo_alloc_flags flags,
const char *name)
static VkResult
msm_bo_init(struct tu_device *dev,
struct tu_bo **out_bo,
uint64_t size,
uint64_t client_iova,
enum tu_bo_alloc_flags flags,
const char *name)
{
/* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
* always sets `flags = MSM_BO_WC`, and we copy that behavior here.
@ -433,11 +432,11 @@ tu_bo_init_new_explicit_iova(struct tu_device *dev,
return result;
}
VkResult
tu_bo_init_dmabuf(struct tu_device *dev,
struct tu_bo **out_bo,
uint64_t size,
int prime_fd)
static VkResult
msm_bo_init_dmabuf(struct tu_device *dev,
struct tu_bo **out_bo,
uint64_t size,
int prime_fd)
{
/* lseek() to get the real size */
off_t real_size = lseek(prime_fd, 0, SEEK_END);
@ -484,8 +483,8 @@ tu_bo_init_dmabuf(struct tu_device *dev,
return result;
}
int
tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
static int
msm_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
{
int prime_fd;
int ret = drmPrimeHandleToFD(dev->fd, bo->gem_handle,
@ -494,8 +493,8 @@ tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
return ret == 0 ? prime_fd : -1;
}
VkResult
tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
static VkResult
msm_bo_map(struct tu_device *dev, struct tu_bo *bo)
{
if (bo->map)
return VK_SUCCESS;
@ -514,16 +513,16 @@ tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
return VK_SUCCESS;
}
void
tu_bo_allow_dump(struct tu_device *dev, struct tu_bo *bo)
static void
msm_bo_allow_dump(struct tu_device *dev, struct tu_bo *bo)
{
mtx_lock(&dev->bo_mutex);
dev->bo_list[bo->bo_list_idx].flags |= MSM_SUBMIT_BO_DUMP;
mtx_unlock(&dev->bo_mutex);
}
void
tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
static void
msm_bo_finish(struct tu_device *dev, struct tu_bo *bo)
{
assert(bo->gem_handle);
@ -1074,8 +1073,8 @@ get_abs_timeout(struct drm_msm_timespec *tv, uint64_t ns)
tv->tv_nsec = t.tv_nsec + ns % 1000000000;
}
VkResult
tu_device_wait_u_trace(struct tu_device *dev, struct tu_u_trace_syncobj *syncobj)
static VkResult
msm_device_wait_u_trace(struct tu_device *dev, struct tu_u_trace_syncobj *syncobj)
{
struct drm_msm_wait_fence req = {
.fence = syncobj->fence,
@ -1094,11 +1093,10 @@ tu_device_wait_u_trace(struct tu_device *dev, struct tu_u_trace_syncobj *syncobj
return VK_SUCCESS;
}
VkResult
tu_queue_submit(struct vk_queue *vk_queue, struct vk_queue_submit *submit)
static VkResult
msm_queue_submit(struct tu_queue *queue, struct vk_queue_submit *submit)
{
MESA_TRACE_FUNC();
struct tu_queue *queue = container_of(vk_queue, struct tu_queue, vk);
uint32_t perf_pass_index = queue->device->perfcntrs_pass_cs ?
submit->perf_pass_index : ~0;
struct tu_queue_submit submit_req;
@ -1155,6 +1153,24 @@ tu_queue_submit(struct vk_queue *vk_queue, struct vk_queue_submit *submit)
return VK_SUCCESS;
}
static const struct tu_knl msm_knl_funcs = {
.name = "msm",
.device_get_gpu_timestamp = msm_device_get_gpu_timestamp,
.device_get_suspend_count = msm_device_get_suspend_count,
.device_check_status = msm_device_check_status,
.submitqueue_new = msm_submitqueue_new,
.submitqueue_close = msm_submitqueue_close,
.bo_init = msm_bo_init,
.bo_init_dmabuf = msm_bo_init_dmabuf,
.bo_export_dmabuf = msm_bo_export_dmabuf,
.bo_map = msm_bo_map,
.bo_allow_dump = msm_bo_allow_dump,
.bo_finish = msm_bo_finish,
.device_wait_u_trace = msm_device_wait_u_trace,
.queue_submit = msm_queue_submit,
};
const struct vk_sync_type tu_timeline_sync_type = {
.size = sizeof(struct tu_timeline_sync),
.features = VK_SYNC_FEATURE_BINARY |
@ -1339,6 +1355,8 @@ tu_physical_device_try_create(struct vk_instance *vk_instance,
device->heap.used = 0u;
device->heap.flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
instance->knl = &msm_knl_funcs;
result = tu_physical_device_init(device, instance);
if (result == VK_SUCCESS) {

View file

@ -7,6 +7,7 @@
#include <errno.h>
#include <fcntl.h>
#include <poll.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
@ -25,6 +26,7 @@ struct tu_syncobj {
uint32_t timestamp;
bool timestamp_valid;
};
#define tu_syncobj_from_handle(x) ((struct tu_syncobj*) (uintptr_t) (x))
static int
safe_ioctl(int fd, unsigned long request, void *arg)
@ -38,10 +40,10 @@ safe_ioctl(int fd, unsigned long request, void *arg)
return ret;
}
int
tu_drm_submitqueue_new(const struct tu_device *dev,
int priority,
uint32_t *queue_id)
static int
kgsl_submitqueue_new(const struct tu_device *dev,
int priority,
uint32_t *queue_id)
{
struct kgsl_drawctxt_create req = {
.flags = KGSL_CONTEXT_SAVE_GMEM |
@ -58,8 +60,8 @@ tu_drm_submitqueue_new(const struct tu_device *dev,
return 0;
}
void
tu_drm_submitqueue_close(const struct tu_device *dev, uint32_t queue_id)
static void
kgsl_submitqueue_close(const struct tu_device *dev, uint32_t queue_id)
{
struct kgsl_drawctxt_destroy req = {
.drawctxt_id = queue_id,
@ -68,13 +70,13 @@ tu_drm_submitqueue_close(const struct tu_device *dev, uint32_t queue_id)
safe_ioctl(dev->physical_device->local_fd, IOCTL_KGSL_DRAWCTXT_DESTROY, &req);
}
VkResult
tu_bo_init_new_explicit_iova(struct tu_device *dev,
struct tu_bo **out_bo,
uint64_t size,
uint64_t client_iova,
enum tu_bo_alloc_flags flags,
const char *name)
static VkResult
kgsl_bo_init(struct tu_device *dev,
struct tu_bo **out_bo,
uint64_t size,
uint64_t client_iova,
enum tu_bo_alloc_flags flags,
const char *name)
{
assert(client_iova == 0);
@ -110,11 +112,11 @@ tu_bo_init_new_explicit_iova(struct tu_device *dev,
return VK_SUCCESS;
}
VkResult
tu_bo_init_dmabuf(struct tu_device *dev,
struct tu_bo **out_bo,
uint64_t size,
int fd)
static VkResult
kgsl_bo_init_dmabuf(struct tu_device *dev,
struct tu_bo **out_bo,
uint64_t size,
int fd)
{
struct kgsl_gpuobj_import_dma_buf import_dmabuf = {
.fd = fd,
@ -159,16 +161,16 @@ tu_bo_init_dmabuf(struct tu_device *dev,
return VK_SUCCESS;
}
int
tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
static int
kgsl_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
{
tu_stub();
return -1;
}
VkResult
tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
static VkResult
kgsl_bo_map(struct tu_device *dev, struct tu_bo *bo)
{
if (bo->map)
return VK_SUCCESS;
@ -184,13 +186,13 @@ tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
return VK_SUCCESS;
}
void
tu_bo_allow_dump(struct tu_device *dev, struct tu_bo *bo)
static void
kgsl_bo_allow_dump(struct tu_device *dev, struct tu_bo *bo)
{
}
void
tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
static void
kgsl_bo_finish(struct tu_device *dev, struct tu_bo *bo)
{
assert(bo->gem_handle);
@ -683,33 +685,31 @@ tu_GetFenceStatus(VkDevice _device, VkFence _fence)
return VK_SUCCESS;
}
VkResult
tu_device_wait_u_trace(struct tu_device *dev, struct tu_u_trace_syncobj *syncobj)
static VkResult
kgsl_device_wait_u_trace(struct tu_device *dev, struct tu_u_trace_syncobj *syncobj)
{
tu_finishme("tu_device_wait_u_trace");
return VK_SUCCESS;
}
int
tu_device_get_gpu_timestamp(struct tu_device *dev, uint64_t *ts)
static int
kgsl_device_get_gpu_timestamp(struct tu_device *dev, uint64_t *ts)
{
tu_finishme("tu_device_get_gpu_timestamp");
return 0;
}
int
tu_device_get_suspend_count(struct tu_device *dev, uint64_t *suspend_count)
static int
kgsl_device_get_suspend_count(struct tu_device *dev, uint64_t *suspend_count)
{
/* kgsl doesn't have a way to get it */
*suspend_count = 0;
return 0;
}
VkResult
tu_device_check_status(struct vk_device *vk_device)
static VkResult
kgsl_device_check_status(struct tu_device *device)
{
struct tu_device *device = container_of(vk_device, struct tu_device, vk);
for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
for (unsigned q = 0; q < device->queue_count[i]; q++) {
/* KGSL's KGSL_PROP_GPU_RESET_STAT takes the u32 msm_queue_id and returns a
@ -757,6 +757,22 @@ tu_QueueSignalReleaseImageANDROID(VkQueue _queue,
}
#endif
static const struct tu_knl kgsl_knl_funcs = {
.name = "kgsl",
.device_get_gpu_timestamp = kgsl_device_get_gpu_timestamp,
.device_get_suspend_count = kgsl_device_get_suspend_count,
.device_check_status = kgsl_device_check_status,
.submitqueue_new = kgsl_submitqueue_new,
.submitqueue_close = kgsl_submitqueue_close,
.bo_init = kgsl_bo_init,
.bo_init_dmabuf = kgsl_bo_init_dmabuf,
.bo_export_dmabuf = kgsl_bo_export_dmabuf,
.bo_map = kgsl_bo_map,
.bo_allow_dump = kgsl_bo_allow_dump,
.bo_finish = kgsl_bo_finish,
.device_wait_u_trace = kgsl_device_wait_u_trace,
};
VkResult
tu_enumerate_devices(struct vk_instance *vk_instance)
@ -820,6 +836,8 @@ tu_enumerate_devices(struct vk_instance *vk_instance)
device->heap.used = 0u;
device->heap.flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT;
instance->knl = &kgsl_knl_funcs;
if (tu_physical_device_init(device, instance) != VK_SUCCESS)
goto fail;