turnip: add tu_drm.h

Also define tu_syncobj_from_handle only when TU_USE_KGSL.

(cherry picked from commit 4d9ac3d0df)

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17877>
This commit is contained in:
Chia-I Wu 2022-07-29 14:34:13 -07:00 committed by Dylan Baker
parent 1a1ded7d78
commit fe4bc64b9f
5 changed files with 138 additions and 110 deletions

View file

@ -112,4 +112,10 @@
#define TU_FROM_HANDLE(__tu_type, __name, __handle) \
VK_FROM_HANDLE(__tu_type, __name, __handle)
/* vk object types */
struct tu_device;
struct tu_instance;
struct tu_bo;
#endif /* TU_COMMON_H */

View file

@ -22,6 +22,8 @@
* DEALINGS IN THE SOFTWARE.
*/
#include "tu_drm.h"
#include <errno.h>
#include <fcntl.h>
#include <sys/ioctl.h>

View file

@ -0,0 +1,126 @@
/*
* Copyright © 2016 Red Hat.
* Copyright © 2016 Bas Nieuwenhuizen
* SPDX-License-Identifier: MIT
*
* based in part on anv driver which is:
* Copyright © 2015 Intel Corporation
*/
#ifndef TU_DRM_H
#define TU_DRM_H
#include "tu_common.h"
/* Keep tu_syncobj until porting to common code for kgsl too */
#ifdef TU_USE_KGSL
struct tu_syncobj;
/* for TU_FROM_HANDLE with both VkFence and VkSemaphore: */
#define tu_syncobj_from_handle(x) ((struct tu_syncobj*) (uintptr_t) (x))
#endif
struct tu_u_trace_syncobj;
enum tu_bo_alloc_flags
{
TU_BO_ALLOC_NO_FLAGS = 0,
TU_BO_ALLOC_ALLOW_DUMP = 1 << 0,
TU_BO_ALLOC_GPU_READ_ONLY = 1 << 1,
};
/* Define tu_timeline_sync type based on drm syncobj for a point type
* for vk_sync_timeline, and the logic to handle is mostly copied from
* anv_bo_sync since it seems it can be used by similar way to anv.
*/
enum tu_timeline_sync_state {
/** Indicates that this is a new (or newly reset fence) */
TU_TIMELINE_SYNC_STATE_RESET,
/** Indicates that this fence has been submitted to the GPU but is still
* (as far as we know) in use by the GPU.
*/
TU_TIMELINE_SYNC_STATE_SUBMITTED,
TU_TIMELINE_SYNC_STATE_SIGNALED,
};
struct tu_bo
{
uint32_t gem_handle;
uint64_t size;
uint64_t iova;
void *map;
int32_t refcnt;
#ifndef TU_USE_KGSL
uint32_t bo_list_idx;
#endif
bool implicit_sync : 1;
};
struct tu_timeline_sync {
struct vk_sync base;
enum tu_timeline_sync_state state;
uint32_t syncobj;
};
VkResult
tu_bo_init_new(struct tu_device *dev, struct tu_bo **bo, uint64_t size,
enum tu_bo_alloc_flags flags);
VkResult
tu_bo_init_dmabuf(struct tu_device *dev,
struct tu_bo **bo,
uint64_t size,
int fd);
int
tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo);
void
tu_bo_finish(struct tu_device *dev, struct tu_bo *bo);
VkResult
tu_bo_map(struct tu_device *dev, struct tu_bo *bo);
static inline struct tu_bo *
tu_bo_get_ref(struct tu_bo *bo)
{
p_atomic_inc(&bo->refcnt);
return bo;
}
VkResult
tu_enumerate_devices(struct tu_instance *instance);
int
tu_device_get_gpu_timestamp(struct tu_device *dev,
uint64_t *ts);
int
tu_device_get_suspend_count(struct tu_device *dev,
uint64_t *suspend_count);
VkResult
tu_device_wait_u_trace(struct tu_device *dev, struct tu_u_trace_syncobj *syncobj);
VkResult
tu_device_check_status(struct vk_device *vk_device);
int
tu_drm_submitqueue_new(const struct tu_device *dev,
int priority,
uint32_t *queue_id);
void
tu_drm_submitqueue_close(const struct tu_device *dev, uint32_t queue_id);
int
tu_syncobj_to_fd(struct tu_device *device, struct vk_sync *sync);
VkResult
tu_queue_submit(struct vk_queue *vk_queue, struct vk_queue_submit *submit);
#endif /* TU_DRM_H */

View file

@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
#include "tu_drm.h"
#include "tu_private.h"
#include <errno.h>

View file

@ -29,10 +29,11 @@
#define TU_PRIVATE_H
#include "tu_common.h"
#include "tu_descriptor_set.h"
#include "tu_autotune.h"
#include "tu_util.h"
#include "tu_descriptor_set.h"
#include "tu_drm.h"
#include "tu_perfetto.h"
#include "tu_util.h"
/* Pre-declarations needed for WSI entrypoints */
struct wl_surface;
@ -46,7 +47,6 @@ typedef uint32_t xcb_window_t;
* propagating errors. Might be useful to plug in a stack trace here.
*/
struct tu_instance;
struct breadcrumbs_context;
VkResult
@ -198,13 +198,6 @@ bool
tu_physical_device_extension_supported(struct tu_physical_device *dev,
const char *name);
enum tu_bo_alloc_flags
{
TU_BO_ALLOC_NO_FLAGS = 0,
TU_BO_ALLOC_ALLOW_DUMP = 1 << 0,
TU_BO_ALLOC_GPU_READ_ONLY = 1 << 1,
};
struct cache_entry;
struct tu_pipeline_cache
@ -233,35 +226,6 @@ struct tu_pipeline_key
#define TU_MAX_QUEUE_FAMILIES 1
/* Keep tu_syncobj until porting to common code for kgsl too */
#ifdef TU_USE_KGSL
struct tu_syncobj;
#endif
struct tu_u_trace_syncobj;
/* Define tu_timeline_sync type based on drm syncobj for a point type
* for vk_sync_timeline, and the logic to handle is mostly copied from
* anv_bo_sync since it seems it can be used by similar way to anv.
*/
enum tu_timeline_sync_state {
/** Indicates that this is a new (or newly reset fence) */
TU_TIMELINE_SYNC_STATE_RESET,
/** Indicates that this fence has been submitted to the GPU but is still
* (as far as we know) in use by the GPU.
*/
TU_TIMELINE_SYNC_STATE_SUBMITTED,
TU_TIMELINE_SYNC_STATE_SIGNALED,
};
struct tu_timeline_sync {
struct vk_sync base;
enum tu_timeline_sync_state state;
uint32_t syncobj;
};
struct tu_queue
{
struct vk_queue vk;
@ -272,21 +236,6 @@ struct tu_queue
int fence;
};
struct tu_bo
{
uint32_t gem_handle;
uint64_t size;
uint64_t iova;
void *map;
int32_t refcnt;
#ifndef TU_USE_KGSL
uint32_t bo_list_idx;
#endif
bool implicit_sync : 1;
};
/* externally-synchronized BO suballocator. */
struct tu_suballocator
{
@ -554,43 +503,15 @@ VkResult tu_insert_dynamic_cmdbufs(struct tu_device *dev,
VkResult
tu_device_submit_deferred_locked(struct tu_device *dev);
VkResult
tu_device_wait_u_trace(struct tu_device *dev, struct tu_u_trace_syncobj *syncobj);
uint64_t
tu_device_ticks_to_ns(struct tu_device *dev, uint64_t ts);
VkResult
tu_device_check_status(struct vk_device *vk_device);
VkResult
tu_bo_init_new(struct tu_device *dev, struct tu_bo **bo, uint64_t size,
enum tu_bo_alloc_flags flags);
VkResult
tu_bo_init_dmabuf(struct tu_device *dev,
struct tu_bo **bo,
uint64_t size,
int fd);
int
tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo);
void
tu_bo_finish(struct tu_device *dev, struct tu_bo *bo);
VkResult
tu_bo_map(struct tu_device *dev, struct tu_bo *bo);
static inline struct tu_bo *
tu_device_lookup_bo(struct tu_device *device, uint32_t handle)
{
return (struct tu_bo *) util_sparse_array_get(&device->bo_map, handle);
}
static inline struct tu_bo *
tu_bo_get_ref(struct tu_bo *bo)
{
p_atomic_inc(&bo->refcnt);
return bo;
}
/* Get a scratch bo for use inside a command buffer. This will always return
* the same bo given the same size or similar sizes, so only one scratch bo
* can be used at the same time. It's meant for short-lived things where we
@ -2192,31 +2113,6 @@ tu_update_descriptor_set_with_template(
VkResult
tu_physical_device_init(struct tu_physical_device *device,
struct tu_instance *instance);
VkResult
tu_enumerate_devices(struct tu_instance *instance);
int
tu_device_get_gpu_timestamp(struct tu_device *dev,
uint64_t *ts);
int
tu_device_get_suspend_count(struct tu_device *dev,
uint64_t *suspend_count);
int
tu_drm_submitqueue_new(const struct tu_device *dev,
int priority,
uint32_t *queue_id);
void
tu_drm_submitqueue_close(const struct tu_device *dev, uint32_t queue_id);
int
tu_syncobj_to_fd(struct tu_device *device, struct vk_sync *sync);
VkResult
tu_queue_submit(struct vk_queue *vk_queue, struct vk_queue_submit *submit);
void
tu_copy_timestamp_buffer(struct u_trace_context *utctx, void *cmdstream,
void *ts_from, uint32_t from_offset,
@ -2317,9 +2213,6 @@ VK_DEFINE_NONDISP_HANDLE_CASTS(tu_sampler, base, VkSampler,
VK_DEFINE_NONDISP_HANDLE_CASTS(tu_sampler_ycbcr_conversion, base, VkSamplerYcbcrConversion,
VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION)
/* for TU_FROM_HANDLE with both VkFence and VkSemaphore: */
#define tu_syncobj_from_handle(x) ((struct tu_syncobj*) (uintptr_t) (x))
void
update_stencil_mask(uint32_t *value, VkStencilFaceFlags face, uint32_t mask);