mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2025-12-30 01:20:17 +01:00
freedreno/drm/virtio: Switch to vdrm helper
Signed-off-by: Rob Clark <robdclark@chromium.org> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24733>
This commit is contained in:
parent
06e57e3231
commit
0e3584df44
10 changed files with 155 additions and 428 deletions
|
|
@ -218,17 +218,27 @@ out_unlock:
|
|||
return bo;
|
||||
}
|
||||
|
||||
uint32_t
|
||||
fd_handle_from_dmabuf_drm(struct fd_device *dev, int fd)
|
||||
{
|
||||
uint32_t handle;
|
||||
int ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
|
||||
if (ret)
|
||||
return 0;
|
||||
return handle;
|
||||
}
|
||||
|
||||
struct fd_bo *
|
||||
fd_bo_from_dmabuf_drm(struct fd_device *dev, int fd)
|
||||
{
|
||||
int ret, size;
|
||||
int size;
|
||||
uint32_t handle;
|
||||
struct fd_bo *bo;
|
||||
|
||||
restart:
|
||||
simple_mtx_lock(&table_lock);
|
||||
ret = drmPrimeFDToHandle(dev->fd, fd, &handle);
|
||||
if (ret) {
|
||||
handle = dev->funcs->handle_from_dmabuf(dev, fd);
|
||||
if (!handle) {
|
||||
simple_mtx_unlock(&table_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
|
@ -437,12 +447,12 @@ fd_bo_fini_fences(struct fd_bo *bo)
|
|||
}
|
||||
|
||||
void
|
||||
fd_bo_close_handle_drm(struct fd_device *dev, uint32_t handle)
|
||||
fd_bo_close_handle_drm(struct fd_bo *bo)
|
||||
{
|
||||
struct drm_gem_close req = {
|
||||
.handle = handle,
|
||||
.handle = bo->handle,
|
||||
};
|
||||
drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
|
||||
drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -468,7 +478,7 @@ fd_bo_fini_common(struct fd_bo *bo)
|
|||
|
||||
if (handle) {
|
||||
simple_mtx_lock(&table_lock);
|
||||
dev->funcs->bo_close_handle(dev, handle);
|
||||
dev->funcs->bo_close_handle(bo);
|
||||
_mesa_hash_table_remove_key(dev->handle_table, &handle);
|
||||
if (bo->name)
|
||||
_mesa_hash_table_remove_key(dev->name_table, &bo->name);
|
||||
|
|
|
|||
|
|
@ -145,7 +145,6 @@ int fd_fence_wait(struct fd_fence *f);
|
|||
#define FD_BO_SCANOUT BITSET_BIT(5)
|
||||
|
||||
/* internal bo flags: */
|
||||
#define _FD_BO_VIRTIO_SHM BITSET_BIT(6)
|
||||
#define _FD_BO_NOSYNC BITSET_BIT(7) /* Avoid userspace fencing on control buffers */
|
||||
|
||||
/*
|
||||
|
|
@ -286,7 +285,6 @@ struct fd_bo *fd_bo_from_handle(struct fd_device *dev, uint32_t handle,
|
|||
uint32_t size);
|
||||
struct fd_bo *fd_bo_from_name(struct fd_device *dev, uint32_t name);
|
||||
struct fd_bo *fd_bo_from_dmabuf(struct fd_device *dev, int fd);
|
||||
struct fd_bo *fd_bo_from_dmabuf_drm(struct fd_device *dev, int fd);
|
||||
void fd_bo_mark_for_dump(struct fd_bo *bo);
|
||||
|
||||
static inline uint64_t
|
||||
|
|
|
|||
|
|
@ -108,8 +108,9 @@ struct fd_device_funcs {
|
|||
*/
|
||||
struct fd_bo *(*bo_from_handle)(struct fd_device *dev, uint32_t size,
|
||||
uint32_t handle);
|
||||
uint32_t (*handle_from_dmabuf)(struct fd_device *dev, int fd);
|
||||
struct fd_bo *(*bo_from_dmabuf)(struct fd_device *dev, int fd);
|
||||
void (*bo_close_handle)(struct fd_device *dev, uint32_t handle);
|
||||
void (*bo_close_handle)(struct fd_bo *bo);
|
||||
|
||||
struct fd_pipe *(*pipe_new)(struct fd_device *dev, enum fd_pipe_id id,
|
||||
unsigned prio);
|
||||
|
|
@ -471,7 +472,10 @@ void fd_bo_fini_fences(struct fd_bo *bo);
|
|||
void fd_bo_fini_common(struct fd_bo *bo);
|
||||
|
||||
struct fd_bo *fd_bo_new_ring(struct fd_device *dev, uint32_t size);
|
||||
void fd_bo_close_handle_drm(struct fd_device *dev, uint32_t handle);
|
||||
|
||||
uint32_t fd_handle_from_dmabuf_drm(struct fd_device *dev, int fd);
|
||||
struct fd_bo *fd_bo_from_dmabuf_drm(struct fd_device *dev, int fd);
|
||||
void fd_bo_close_handle_drm(struct fd_bo *bo);
|
||||
|
||||
#define enable_debug 0 /* TODO make dynamic */
|
||||
|
||||
|
|
|
|||
|
|
@ -38,6 +38,10 @@ libfreedreno_drm_includes = [
|
|||
inc_include,
|
||||
inc_src,
|
||||
]
|
||||
libfreedreno_drm_deps = [
|
||||
dep_libdrm,
|
||||
dep_valgrind,
|
||||
]
|
||||
|
||||
libfreedreno_drm_msm_files = files(
|
||||
'msm/msm_bo.c',
|
||||
|
|
@ -59,7 +63,10 @@ libfreedreno_drm_virtio_files = files(
|
|||
if freedreno_kmds.contains('virtio')
|
||||
libfreedreno_drm_files += libfreedreno_drm_virtio_files
|
||||
libfreedreno_drm_flags += '-DHAVE_FREEDRENO_VIRTIO'
|
||||
libfreedreno_drm_includes += inc_virtio_gpu
|
||||
libfreedreno_drm_includes += [
|
||||
inc_virtio_gpu,
|
||||
inc_virtio_vdrm,
|
||||
]
|
||||
endif
|
||||
|
||||
libfreedreno_drm = static_library(
|
||||
|
|
@ -71,10 +78,9 @@ libfreedreno_drm = static_library(
|
|||
include_directories : libfreedreno_drm_includes,
|
||||
c_args : [no_override_init_args, libfreedreno_drm_flags],
|
||||
gnu_symbol_visibility : 'hidden',
|
||||
dependencies : [
|
||||
dep_libdrm,
|
||||
dep_valgrind,
|
||||
],
|
||||
dependencies : libfreedreno_drm_deps,
|
||||
# TODO
|
||||
link_with : [libvdrm],
|
||||
build_by_default : false,
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@ msm_device_destroy(struct fd_device *dev)
|
|||
static const struct fd_device_funcs funcs = {
|
||||
.bo_new = msm_bo_new,
|
||||
.bo_from_handle = msm_bo_from_handle,
|
||||
.handle_from_dmabuf = fd_handle_from_dmabuf_drm,
|
||||
.bo_from_dmabuf = fd_bo_from_dmabuf_drm,
|
||||
.bo_close_handle = fd_bo_close_handle_drm,
|
||||
.pipe_new = msm_pipe_new,
|
||||
|
|
|
|||
|
|
@ -25,73 +25,33 @@
|
|||
|
||||
#include "virtio_priv.h"
|
||||
|
||||
static int
|
||||
bo_allocate(struct virtio_bo *virtio_bo)
|
||||
{
|
||||
struct fd_bo *bo = &virtio_bo->base;
|
||||
if (!virtio_bo->offset) {
|
||||
struct drm_virtgpu_map req = {
|
||||
.handle = bo->handle,
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = virtio_ioctl(bo->dev->fd, VIRTGPU_MAP, &req);
|
||||
if (ret) {
|
||||
ERROR_MSG("alloc failed: %s", strerror(errno));
|
||||
return ret;
|
||||
}
|
||||
|
||||
virtio_bo->offset = req.offset;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
virtio_bo_offset(struct fd_bo *bo, uint64_t *offset)
|
||||
static void *
|
||||
virtio_bo_mmap(struct fd_bo *bo)
|
||||
{
|
||||
struct vdrm_device *vdrm = to_virtio_device(bo->dev)->vdrm;
|
||||
struct virtio_bo *virtio_bo = to_virtio_bo(bo);
|
||||
int ret = bo_allocate(virtio_bo);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* If we have uploaded, we need to wait for host to handle that
|
||||
* before we can allow guest-side CPU access:
|
||||
*/
|
||||
if (virtio_bo->has_upload_seqno) {
|
||||
|
||||
virtio_bo->has_upload_seqno = false;
|
||||
virtio_execbuf_flush(bo->dev);
|
||||
virtio_host_sync(bo->dev, &(struct vdrm_ccmd_req) {
|
||||
|
||||
vdrm_flush(vdrm);
|
||||
vdrm_host_sync(vdrm, &(struct vdrm_ccmd_req) {
|
||||
.seqno = virtio_bo->upload_seqno,
|
||||
});
|
||||
}
|
||||
|
||||
*offset = virtio_bo->offset;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
virtio_bo_cpu_prep_guest(struct fd_bo *bo)
|
||||
{
|
||||
struct drm_virtgpu_3d_wait args = {
|
||||
.handle = bo->handle,
|
||||
};
|
||||
int ret;
|
||||
|
||||
/* Side note, this ioctl is defined as IO_WR but should be IO_W: */
|
||||
ret = virtio_ioctl(bo->dev->fd, VIRTGPU_WAIT, &args);
|
||||
if (ret && errno == EBUSY)
|
||||
return -EBUSY;
|
||||
|
||||
return 0;
|
||||
return vdrm_bo_map(vdrm, bo->handle, bo->size);
|
||||
}
|
||||
|
||||
static int
|
||||
virtio_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
|
||||
{
|
||||
MESA_TRACE_FUNC();
|
||||
struct vdrm_device *vdrm = to_virtio_device(bo->dev)->vdrm;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
|
|
@ -101,7 +61,7 @@ virtio_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
|
|||
* know about usage of the bo in the host (or other guests).
|
||||
*/
|
||||
|
||||
ret = virtio_bo_cpu_prep_guest(bo);
|
||||
ret = vdrm_bo_wait(vdrm, bo->handle);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
|
@ -120,9 +80,9 @@ virtio_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op)
|
|||
|
||||
/* We can't do a blocking wait in the host, so we have to poll: */
|
||||
do {
|
||||
rsp = virtio_alloc_rsp(bo->dev, &req.hdr, sizeof(*rsp));
|
||||
rsp = vdrm_alloc_rsp(vdrm, &req.hdr, sizeof(*rsp));
|
||||
|
||||
ret = virtio_execbuf(bo->dev, &req.hdr, true);
|
||||
ret = vdrm_send_req(vdrm, &req.hdr, true);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
|
@ -182,7 +142,15 @@ virtio_bo_set_name(struct fd_bo *bo, const char *fmt, va_list ap)
|
|||
|
||||
memcpy(req->payload, name, sz);
|
||||
|
||||
virtio_execbuf(bo->dev, &req->hdr, false);
|
||||
vdrm_send_req(to_virtio_device(bo->dev)->vdrm, &req->hdr, false);
|
||||
}
|
||||
|
||||
static int
|
||||
virtio_bo_dmabuf(struct fd_bo *bo)
|
||||
{
|
||||
struct virtio_device *virtio_dev = to_virtio_device(bo->dev);
|
||||
|
||||
return vdrm_bo_export_dmabuf(virtio_dev->vdrm, bo->handle);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -203,7 +171,7 @@ bo_upload(struct fd_bo *bo, unsigned off, void *src, unsigned len)
|
|||
|
||||
memcpy(req->payload, src, len);
|
||||
|
||||
virtio_execbuf(bo->dev, &req->hdr, false);
|
||||
vdrm_send_req(to_virtio_device(bo->dev)->vdrm, &req->hdr, false);
|
||||
|
||||
virtio_bo->upload_seqno = req->hdr.seqno;
|
||||
virtio_bo->has_upload_seqno = true;
|
||||
|
|
@ -257,7 +225,7 @@ set_iova(struct fd_bo *bo, uint64_t iova)
|
|||
.iova = iova,
|
||||
};
|
||||
|
||||
virtio_execbuf(bo->dev, &req.hdr, false);
|
||||
vdrm_send_req(to_virtio_device(bo->dev)->vdrm, &req.hdr, false);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -272,8 +240,7 @@ virtio_bo_finalize(struct fd_bo *bo)
|
|||
}
|
||||
|
||||
static const struct fd_bo_funcs funcs = {
|
||||
.offset = virtio_bo_offset,
|
||||
.map = fd_bo_map_os_mmap,
|
||||
.map = virtio_bo_mmap,
|
||||
.cpu_prep = virtio_bo_cpu_prep,
|
||||
.madvise = virtio_bo_madvise,
|
||||
.iova = virtio_bo_iova,
|
||||
|
|
@ -288,6 +255,7 @@ static const struct fd_bo_funcs funcs = {
|
|||
static struct fd_bo *
|
||||
bo_from_handle(struct fd_device *dev, uint32_t size, uint32_t handle)
|
||||
{
|
||||
struct virtio_device *virtio_dev = to_virtio_device(dev);
|
||||
struct virtio_bo *virtio_bo;
|
||||
struct fd_bo *bo;
|
||||
|
||||
|
|
@ -312,19 +280,7 @@ bo_from_handle(struct fd_device *dev, uint32_t size, uint32_t handle)
|
|||
/* Don't assume we can mmap an imported bo: */
|
||||
bo->alloc_flags = FD_BO_NOMAP;
|
||||
|
||||
struct drm_virtgpu_resource_info args = {
|
||||
.bo_handle = handle,
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = virtio_ioctl(dev->fd, VIRTGPU_RESOURCE_INFO, &args);
|
||||
if (ret) {
|
||||
INFO_MSG("failed to get resource info: %s", strerror(errno));
|
||||
free(virtio_bo);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
virtio_bo->res_id = args.res_handle;
|
||||
virtio_bo->res_id = vdrm_handle_to_res_id(virtio_dev->vdrm, handle);
|
||||
|
||||
fd_bo_init_common(bo, dev);
|
||||
|
||||
|
|
@ -359,15 +315,10 @@ struct fd_bo *
|
|||
virtio_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
|
||||
{
|
||||
struct virtio_device *virtio_dev = to_virtio_device(dev);
|
||||
struct drm_virtgpu_resource_create_blob args = {
|
||||
.blob_mem = VIRTGPU_BLOB_MEM_HOST3D,
|
||||
.size = size,
|
||||
};
|
||||
struct msm_ccmd_gem_new_req req = {
|
||||
.hdr = MSM_CCMD(GEM_NEW, sizeof(req)),
|
||||
.size = size,
|
||||
};
|
||||
int ret;
|
||||
|
||||
if (flags & FD_BO_SCANOUT)
|
||||
req.flags |= MSM_BO_SCANOUT;
|
||||
|
|
@ -381,56 +332,42 @@ virtio_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags)
|
|||
req.flags |= MSM_BO_WC;
|
||||
}
|
||||
|
||||
if (flags & _FD_BO_VIRTIO_SHM) {
|
||||
args.blob_id = 0;
|
||||
args.blob_flags = VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
|
||||
} else {
|
||||
if (flags & (FD_BO_SHARED | FD_BO_SCANOUT)) {
|
||||
args.blob_flags = VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE |
|
||||
VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
|
||||
}
|
||||
|
||||
if (!(flags & FD_BO_NOMAP)) {
|
||||
args.blob_flags |= VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
|
||||
}
|
||||
|
||||
args.blob_id = p_atomic_inc_return(&virtio_dev->next_blob_id);
|
||||
args.cmd = VOID2U64(&req);
|
||||
args.cmd_size = sizeof(req);
|
||||
|
||||
/* tunneled cmds are processed separately on host side,
|
||||
* before the renderer->get_blob() callback.. the blob_id
|
||||
* is used to like the created bo to the get_blob() call
|
||||
*/
|
||||
req.blob_id = args.blob_id;
|
||||
req.iova = virtio_dev_alloc_iova(dev, size);
|
||||
if (!req.iova) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
uint32_t blob_flags = 0;
|
||||
if (flags & (FD_BO_SHARED | FD_BO_SCANOUT)) {
|
||||
blob_flags = VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE |
|
||||
VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
|
||||
}
|
||||
|
||||
simple_mtx_lock(&virtio_dev->eb_lock);
|
||||
if (args.cmd) {
|
||||
virtio_execbuf_flush_locked(dev);
|
||||
req.hdr.seqno = ++virtio_dev->next_seqno;
|
||||
if (!(flags & FD_BO_NOMAP)) {
|
||||
blob_flags |= VIRTGPU_BLOB_FLAG_USE_MAPPABLE;
|
||||
}
|
||||
ret = virtio_ioctl(dev->fd, VIRTGPU_RESOURCE_CREATE_BLOB, &args);
|
||||
simple_mtx_unlock(&virtio_dev->eb_lock);
|
||||
if (ret)
|
||||
|
||||
uint32_t blob_id = p_atomic_inc_return(&virtio_dev->next_blob_id);
|
||||
|
||||
/* tunneled cmds are processed separately on host side,
|
||||
* before the renderer->get_blob() callback.. the blob_id
|
||||
* is used to link the created bo to the get_blob() call
|
||||
*/
|
||||
req.blob_id = blob_id;
|
||||
req.iova = virtio_dev_alloc_iova(dev, size);
|
||||
if (!req.iova)
|
||||
goto fail;
|
||||
|
||||
struct fd_bo *bo = bo_from_handle(dev, size, args.bo_handle);
|
||||
uint32_t handle =
|
||||
vdrm_bo_create(virtio_dev->vdrm, size, blob_flags, blob_id, &req.hdr);
|
||||
if (!handle)
|
||||
goto fail;
|
||||
|
||||
struct fd_bo *bo = bo_from_handle(dev, size, handle);
|
||||
struct virtio_bo *virtio_bo = to_virtio_bo(bo);
|
||||
|
||||
virtio_bo->blob_id = args.blob_id;
|
||||
virtio_bo->blob_id = blob_id;
|
||||
bo->iova = req.iova;
|
||||
|
||||
return bo;
|
||||
|
||||
fail:
|
||||
if (req.iova) {
|
||||
if (req.iova)
|
||||
virtio_dev_free_iova(dev, req.iova, size);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,55 +30,44 @@
|
|||
|
||||
#include "virtio_priv.h"
|
||||
|
||||
|
||||
static int virtio_execbuf_flush(struct fd_device *dev);
|
||||
|
||||
static void
|
||||
virtio_device_destroy(struct fd_device *dev)
|
||||
{
|
||||
struct virtio_device *virtio_dev = to_virtio_device(dev);
|
||||
|
||||
fd_bo_del(virtio_dev->shmem_bo);
|
||||
util_vma_heap_finish(&virtio_dev->address_space);
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
virtio_handle_from_dmabuf(struct fd_device *dev, int fd)
|
||||
{
|
||||
struct virtio_device *virtio_dev = to_virtio_device(dev);
|
||||
|
||||
return vdrm_dmabuf_to_handle(virtio_dev->vdrm, fd);
|
||||
}
|
||||
|
||||
static void
|
||||
virtio_close_handle(struct fd_bo *bo)
|
||||
{
|
||||
struct virtio_device *virtio_dev = to_virtio_device(bo->dev);
|
||||
|
||||
vdrm_bo_close(virtio_dev->vdrm, bo->handle);
|
||||
}
|
||||
|
||||
static const struct fd_device_funcs funcs = {
|
||||
.bo_new = virtio_bo_new,
|
||||
.bo_from_handle = virtio_bo_from_handle,
|
||||
.handle_from_dmabuf = virtio_handle_from_dmabuf,
|
||||
.bo_from_dmabuf = fd_bo_from_dmabuf_drm,
|
||||
.bo_close_handle = fd_bo_close_handle_drm,
|
||||
.bo_close_handle = virtio_close_handle,
|
||||
.pipe_new = virtio_pipe_new,
|
||||
.flush = virtio_execbuf_flush,
|
||||
.destroy = virtio_device_destroy,
|
||||
};
|
||||
|
||||
static int
|
||||
get_capset(int fd, struct virgl_renderer_capset_drm *caps)
|
||||
{
|
||||
struct drm_virtgpu_get_caps args = {
|
||||
.cap_set_id = VIRGL_RENDERER_CAPSET_DRM,
|
||||
.cap_set_ver = 0,
|
||||
.addr = VOID2U64(caps),
|
||||
.size = sizeof(*caps),
|
||||
};
|
||||
|
||||
memset(caps, 0, sizeof(*caps));
|
||||
|
||||
return virtio_ioctl(fd, VIRTGPU_GET_CAPS, &args);
|
||||
}
|
||||
|
||||
static int
|
||||
set_context(int fd)
|
||||
{
|
||||
struct drm_virtgpu_context_set_param params[] = {
|
||||
{ VIRTGPU_CONTEXT_PARAM_CAPSET_ID, VIRGL_RENDERER_CAPSET_DRM },
|
||||
{ VIRTGPU_CONTEXT_PARAM_NUM_RINGS, 64 },
|
||||
};
|
||||
struct drm_virtgpu_context_init args = {
|
||||
.num_params = ARRAY_SIZE(params),
|
||||
.ctx_set_params = VOID2U64(params),
|
||||
};
|
||||
|
||||
return virtio_ioctl(fd, VIRTGPU_CONTEXT_INIT, &args);
|
||||
}
|
||||
|
||||
static void
|
||||
set_debuginfo(struct fd_device *dev)
|
||||
{
|
||||
|
|
@ -117,7 +106,7 @@ set_debuginfo(struct fd_device *dev)
|
|||
memcpy(&req->payload[0], comm, comm_len);
|
||||
memcpy(&req->payload[comm_len], cmdline, cmdline_len);
|
||||
|
||||
virtio_execbuf(dev, &req->hdr, false);
|
||||
vdrm_send_req(to_virtio_device(dev)->vdrm, &req->hdr, false);
|
||||
|
||||
free(req);
|
||||
}
|
||||
|
|
@ -127,8 +116,8 @@ virtio_device_new(int fd, drmVersionPtr version)
|
|||
{
|
||||
struct virgl_renderer_capset_drm caps;
|
||||
struct virtio_device *virtio_dev;
|
||||
struct vdrm_device *vdrm;
|
||||
struct fd_device *dev;
|
||||
int ret;
|
||||
|
||||
STATIC_ASSERT(FD_BO_PREP_READ == MSM_PREP_READ);
|
||||
STATIC_ASSERT(FD_BO_PREP_WRITE == MSM_PREP_WRITE);
|
||||
|
|
@ -138,16 +127,13 @@ virtio_device_new(int fd, drmVersionPtr version)
|
|||
if (debug_get_bool_option("FD_NO_VIRTIO", false))
|
||||
return NULL;
|
||||
|
||||
ret = get_capset(fd, &caps);
|
||||
if (ret) {
|
||||
INFO_MSG("could not get caps: %s", strerror(errno));
|
||||
vdrm = vdrm_device_connect(fd, VIRTGPU_DRM_CONTEXT_MSM);
|
||||
if (!vdrm) {
|
||||
INFO_MSG("could not connect vdrm");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (caps.context_type != VIRTGPU_DRM_CONTEXT_MSM) {
|
||||
INFO_MSG("wrong context_type: %u", caps.context_type);
|
||||
return NULL;
|
||||
}
|
||||
caps = vdrm->caps;
|
||||
|
||||
INFO_MSG("wire_format_version: %u", caps.wire_format_version);
|
||||
INFO_MSG("version_major: %u", caps.version_major);
|
||||
|
|
@ -164,29 +150,23 @@ virtio_device_new(int fd, drmVersionPtr version)
|
|||
|
||||
if (caps.wire_format_version != 2) {
|
||||
ERROR_MSG("Unsupported protocol version: %u", caps.wire_format_version);
|
||||
return NULL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
if ((caps.version_major != 1) || (caps.version_minor < FD_VERSION_SOFTPIN)) {
|
||||
ERROR_MSG("unsupported version: %u.%u.%u", caps.version_major,
|
||||
caps.version_minor, caps.version_patchlevel);
|
||||
return NULL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (!caps.u.msm.va_size) {
|
||||
ERROR_MSG("No address space");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ret = set_context(fd);
|
||||
if (ret) {
|
||||
INFO_MSG("Could not set context type: %s", strerror(errno));
|
||||
return NULL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
virtio_dev = calloc(1, sizeof(*virtio_dev));
|
||||
if (!virtio_dev)
|
||||
return NULL;
|
||||
goto error;
|
||||
|
||||
dev = &virtio_dev->base;
|
||||
dev->funcs = &funcs;
|
||||
|
|
@ -195,16 +175,13 @@ virtio_device_new(int fd, drmVersionPtr version)
|
|||
dev->has_cached_coherent = caps.u.msm.has_cached_coherent;
|
||||
|
||||
p_atomic_set(&virtio_dev->next_blob_id, 1);
|
||||
|
||||
virtio_dev->caps = caps;
|
||||
virtio_dev->shmem = to_msm_shmem(vdrm->shmem);
|
||||
virtio_dev->vdrm = vdrm;
|
||||
|
||||
util_queue_init(&dev->submit_queue, "sq", 8, 1, 0, NULL);
|
||||
|
||||
dev->bo_size = sizeof(struct virtio_bo);
|
||||
|
||||
simple_mtx_init(&virtio_dev->rsp_lock, mtx_plain);
|
||||
simple_mtx_init(&virtio_dev->eb_lock, mtx_plain);
|
||||
|
||||
set_debuginfo(dev);
|
||||
|
||||
util_vma_heap_init(&virtio_dev->address_space,
|
||||
|
|
@ -213,183 +190,16 @@ virtio_device_new(int fd, drmVersionPtr version)
|
|||
simple_mtx_init(&virtio_dev->address_space_lock, mtx_plain);
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
||||
void *
|
||||
virtio_alloc_rsp(struct fd_device *dev, struct vdrm_ccmd_req *req, uint32_t sz)
|
||||
{
|
||||
struct virtio_device *virtio_dev = to_virtio_device(dev);
|
||||
unsigned off;
|
||||
|
||||
simple_mtx_lock(&virtio_dev->rsp_lock);
|
||||
|
||||
sz = align(sz, 8);
|
||||
|
||||
if ((virtio_dev->next_rsp_off + sz) >= virtio_dev->rsp_mem_len)
|
||||
virtio_dev->next_rsp_off = 0;
|
||||
|
||||
off = virtio_dev->next_rsp_off;
|
||||
virtio_dev->next_rsp_off += sz;
|
||||
|
||||
simple_mtx_unlock(&virtio_dev->rsp_lock);
|
||||
|
||||
req->rsp_off = off;
|
||||
|
||||
struct vdrm_ccmd_rsp *rsp = (void *)&virtio_dev->rsp_mem[off];
|
||||
rsp->len = sz;
|
||||
|
||||
return rsp;
|
||||
}
|
||||
|
||||
static int execbuf_flush_locked(struct fd_device *dev, int *out_fence_fd);
|
||||
|
||||
static int
|
||||
execbuf_locked(struct fd_device *dev, void *cmd, uint32_t cmd_size,
|
||||
uint32_t *handles, uint32_t num_handles,
|
||||
int in_fence_fd, int *out_fence_fd, int ring_idx)
|
||||
{
|
||||
#define COND(bool, val) ((bool) ? (val) : 0)
|
||||
struct drm_virtgpu_execbuffer eb = {
|
||||
.flags = COND(out_fence_fd, VIRTGPU_EXECBUF_FENCE_FD_OUT) |
|
||||
COND(in_fence_fd != -1, VIRTGPU_EXECBUF_FENCE_FD_IN) |
|
||||
VIRTGPU_EXECBUF_RING_IDX,
|
||||
.fence_fd = in_fence_fd,
|
||||
.size = cmd_size,
|
||||
.command = VOID2U64(cmd),
|
||||
.ring_idx = ring_idx,
|
||||
.bo_handles = VOID2U64(handles),
|
||||
.num_bo_handles = num_handles,
|
||||
};
|
||||
|
||||
int ret = virtio_ioctl(dev->fd, VIRTGPU_EXECBUFFER, &eb);
|
||||
if (ret) {
|
||||
ERROR_MSG("EXECBUFFER failed: %s", strerror(errno));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (out_fence_fd)
|
||||
*out_fence_fd = eb.fence_fd;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper for "execbuf" ioctl.. note that in virtgpu execbuf is just
|
||||
* a generic "send commands to host", not necessarily specific to
|
||||
* cmdstream execution.
|
||||
*
|
||||
* Note that ring_idx 0 is the "CPU ring", ie. for synchronizing btwn
|
||||
* guest and host CPU.
|
||||
*/
|
||||
int
|
||||
virtio_execbuf_fenced(struct fd_device *dev, struct vdrm_ccmd_req *req,
|
||||
uint32_t *handles, uint32_t num_handles,
|
||||
int in_fence_fd, int *out_fence_fd, int ring_idx)
|
||||
{
|
||||
struct virtio_device *virtio_dev = to_virtio_device(dev);
|
||||
int ret;
|
||||
|
||||
simple_mtx_lock(&virtio_dev->eb_lock);
|
||||
execbuf_flush_locked(dev, NULL);
|
||||
req->seqno = ++virtio_dev->next_seqno;
|
||||
|
||||
ret = execbuf_locked(dev, req, req->len, handles, num_handles,
|
||||
in_fence_fd, out_fence_fd, ring_idx);
|
||||
|
||||
simple_mtx_unlock(&virtio_dev->eb_lock);
|
||||
|
||||
return ret;
|
||||
error:
|
||||
vdrm_device_close(vdrm);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
execbuf_flush_locked(struct fd_device *dev, int *out_fence_fd)
|
||||
{
|
||||
struct virtio_device *virtio_dev = to_virtio_device(dev);
|
||||
int ret;
|
||||
|
||||
if (!virtio_dev->reqbuf_len)
|
||||
return 0;
|
||||
|
||||
ret = execbuf_locked(dev, virtio_dev->reqbuf, virtio_dev->reqbuf_len,
|
||||
NULL, 0, -1, out_fence_fd, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
virtio_dev->reqbuf_len = 0;
|
||||
virtio_dev->reqbuf_cnt = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
virtio_execbuf_flush(struct fd_device *dev)
|
||||
{
|
||||
struct virtio_device *virtio_dev = to_virtio_device(dev);
|
||||
simple_mtx_lock(&virtio_dev->eb_lock);
|
||||
int ret = execbuf_flush_locked(dev, NULL);
|
||||
simple_mtx_unlock(&virtio_dev->eb_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
virtio_execbuf_flush_locked(struct fd_device *dev)
|
||||
{
|
||||
struct virtio_device *virtio_dev = to_virtio_device(dev);
|
||||
simple_mtx_assert_locked(&virtio_dev->eb_lock);
|
||||
return execbuf_flush_locked(dev, NULL);
|
||||
}
|
||||
|
||||
int
|
||||
virtio_execbuf(struct fd_device *dev, struct vdrm_ccmd_req *req, bool sync)
|
||||
{
|
||||
MESA_TRACE_FUNC();
|
||||
struct virtio_device *virtio_dev = to_virtio_device(dev);
|
||||
int fence_fd, ret = 0;
|
||||
|
||||
simple_mtx_lock(&virtio_dev->eb_lock);
|
||||
req->seqno = ++virtio_dev->next_seqno;
|
||||
|
||||
if ((virtio_dev->reqbuf_len + req->len) > sizeof(virtio_dev->reqbuf)) {
|
||||
ret = execbuf_flush_locked(dev, NULL);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
memcpy(&virtio_dev->reqbuf[virtio_dev->reqbuf_len], req, req->len);
|
||||
virtio_dev->reqbuf_len += req->len;
|
||||
virtio_dev->reqbuf_cnt++;
|
||||
|
||||
if (!sync)
|
||||
goto out_unlock;
|
||||
|
||||
ret = execbuf_flush_locked(dev, &fence_fd);
|
||||
|
||||
out_unlock:
|
||||
simple_mtx_unlock(&virtio_dev->eb_lock);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (sync) {
|
||||
MESA_TRACE_SCOPE("virtio_execbuf sync");
|
||||
sync_wait(fence_fd, -1);
|
||||
close(fence_fd);
|
||||
virtio_host_sync(dev, req);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait until host as processed the specified request.
|
||||
*/
|
||||
void
|
||||
virtio_host_sync(struct fd_device *dev, const struct vdrm_ccmd_req *req)
|
||||
{
|
||||
struct virtio_device *virtio_dev = to_virtio_device(dev);
|
||||
|
||||
while (fd_fence_before(virtio_dev->shmem->base.seqno, req->seqno))
|
||||
sched_yield();
|
||||
return vdrm_flush(to_virtio_device(dev)->vdrm);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -399,6 +209,7 @@ int
|
|||
virtio_simple_ioctl(struct fd_device *dev, unsigned cmd, void *_req)
|
||||
{
|
||||
MESA_TRACE_FUNC();
|
||||
struct vdrm_device *vdrm = to_virtio_device(dev)->vdrm;
|
||||
unsigned req_len = sizeof(struct msm_ccmd_ioctl_simple_req);
|
||||
unsigned rsp_len = sizeof(struct msm_ccmd_ioctl_simple_rsp);
|
||||
|
||||
|
|
@ -414,9 +225,9 @@ virtio_simple_ioctl(struct fd_device *dev, unsigned cmd, void *_req)
|
|||
req->cmd = cmd;
|
||||
memcpy(req->payload, _req, _IOC_SIZE(cmd));
|
||||
|
||||
rsp = virtio_alloc_rsp(dev, &req->hdr, rsp_len);
|
||||
rsp = vdrm_alloc_rsp(vdrm, &req->hdr, rsp_len);
|
||||
|
||||
int ret = virtio_execbuf(dev, &req->hdr, true);
|
||||
int ret = vdrm_send_req(vdrm, &req->hdr, true);
|
||||
|
||||
if (cmd & IOC_OUT)
|
||||
memcpy(_req, rsp->payload, _IOC_SIZE(cmd));
|
||||
|
|
|
|||
|
|
@ -91,12 +91,12 @@ virtio_pipe_get_param(struct fd_pipe *pipe, enum fd_param_id param,
|
|||
*value = virtio_pipe->chip_id;
|
||||
return 0;
|
||||
case FD_MAX_FREQ:
|
||||
*value = virtio_dev->caps.u.msm.max_freq;
|
||||
*value = virtio_dev->vdrm->caps.u.msm.max_freq;
|
||||
return 0;
|
||||
case FD_TIMESTAMP:
|
||||
return query_param(pipe, MSM_PARAM_TIMESTAMP, value);
|
||||
case FD_NR_PRIORITIES:
|
||||
*value = virtio_dev->caps.u.msm.priorities;
|
||||
*value = virtio_dev->vdrm->caps.u.msm.priorities;
|
||||
return 0;
|
||||
case FD_CTX_FAULTS:
|
||||
case FD_GLOBAL_FAULTS:
|
||||
|
|
@ -104,7 +104,7 @@ virtio_pipe_get_param(struct fd_pipe *pipe, enum fd_param_id param,
|
|||
case FD_SUSPEND_COUNT:
|
||||
return query_param(pipe, MSM_PARAM_SUSPENDS, value);
|
||||
case FD_VA_SIZE:
|
||||
*value = virtio_dev->caps.u.msm.va_size;
|
||||
*value = virtio_dev->vdrm->caps.u.msm.va_size;
|
||||
return 0;
|
||||
default:
|
||||
ERROR_MSG("invalid param id: %d", param);
|
||||
|
|
@ -116,7 +116,7 @@ static int
|
|||
virtio_pipe_wait(struct fd_pipe *pipe, const struct fd_fence *fence, uint64_t timeout)
|
||||
{
|
||||
MESA_TRACE_FUNC();
|
||||
|
||||
struct vdrm_device *vdrm = to_virtio_device(pipe->dev)->vdrm;
|
||||
struct msm_ccmd_wait_fence_req req = {
|
||||
.hdr = MSM_CCMD(WAIT_FENCE, sizeof(req)),
|
||||
.queue_id = to_virtio_pipe(pipe)->queue_id,
|
||||
|
|
@ -129,20 +129,20 @@ virtio_pipe_wait(struct fd_pipe *pipe, const struct fd_fence *fence, uint64_t ti
|
|||
/* Do a non-blocking wait to trigger host-side wait-boost,
|
||||
* if the host kernel is new enough
|
||||
*/
|
||||
rsp = virtio_alloc_rsp(pipe->dev, &req.hdr, sizeof(*rsp));
|
||||
ret = virtio_execbuf(pipe->dev, &req.hdr, false);
|
||||
rsp = vdrm_alloc_rsp(vdrm, &req.hdr, sizeof(*rsp));
|
||||
ret = vdrm_send_req(vdrm, &req.hdr, false);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
virtio_execbuf_flush(pipe->dev);
|
||||
vdrm_flush(vdrm);
|
||||
|
||||
if (fence->use_fence_fd)
|
||||
return sync_wait(fence->fence_fd, timeout / 1000000);
|
||||
|
||||
do {
|
||||
rsp = virtio_alloc_rsp(pipe->dev, &req.hdr, sizeof(*rsp));
|
||||
rsp = vdrm_alloc_rsp(vdrm, &req.hdr, sizeof(*rsp));
|
||||
|
||||
ret = virtio_execbuf(pipe->dev, &req.hdr, true);
|
||||
ret = vdrm_send_req(vdrm, &req.hdr, true);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
|
@ -213,30 +213,6 @@ static const struct fd_pipe_funcs funcs = {
|
|||
.destroy = virtio_pipe_destroy,
|
||||
};
|
||||
|
||||
static void
|
||||
init_shmem(struct fd_device *dev)
|
||||
{
|
||||
struct virtio_device *virtio_dev = to_virtio_device(dev);
|
||||
|
||||
simple_mtx_lock(&virtio_dev->rsp_lock);
|
||||
|
||||
/* One would like to do this in virtio_device_new(), but we'd
|
||||
* have to bypass/reinvent fd_bo_new()..
|
||||
*/
|
||||
if (unlikely(!virtio_dev->shmem)) {
|
||||
virtio_dev->shmem_bo = fd_bo_new(dev, 0x4000,
|
||||
_FD_BO_VIRTIO_SHM, "shmem");
|
||||
virtio_dev->shmem = fd_bo_map(virtio_dev->shmem_bo);
|
||||
virtio_dev->shmem_bo->bo_reuse = NO_CACHE;
|
||||
|
||||
uint32_t offset = virtio_dev->shmem->base.rsp_mem_offset;
|
||||
virtio_dev->rsp_mem_len = fd_bo_size(virtio_dev->shmem_bo) - offset;
|
||||
virtio_dev->rsp_mem = &((uint8_t *)virtio_dev->shmem)[offset];
|
||||
}
|
||||
|
||||
simple_mtx_unlock(&virtio_dev->rsp_lock);
|
||||
}
|
||||
|
||||
struct fd_pipe *
|
||||
virtio_pipe_new(struct fd_device *dev, enum fd_pipe_id id, uint32_t prio)
|
||||
{
|
||||
|
|
@ -245,11 +221,10 @@ virtio_pipe_new(struct fd_device *dev, enum fd_pipe_id id, uint32_t prio)
|
|||
[FD_PIPE_2D] = MSM_PIPE_2D0,
|
||||
};
|
||||
struct virtio_device *virtio_dev = to_virtio_device(dev);
|
||||
struct vdrm_device *vdrm = virtio_dev->vdrm;
|
||||
struct virtio_pipe *virtio_pipe = NULL;
|
||||
struct fd_pipe *pipe = NULL;
|
||||
|
||||
init_shmem(dev);
|
||||
|
||||
virtio_pipe = calloc(1, sizeof(*virtio_pipe));
|
||||
if (!virtio_pipe) {
|
||||
ERROR_MSG("allocation failed");
|
||||
|
|
@ -264,10 +239,10 @@ virtio_pipe_new(struct fd_device *dev, enum fd_pipe_id id, uint32_t prio)
|
|||
pipe->dev = dev;
|
||||
virtio_pipe->pipe = pipe_id[id];
|
||||
|
||||
virtio_pipe->gpu_id = virtio_dev->caps.u.msm.gpu_id;
|
||||
virtio_pipe->gmem = virtio_dev->caps.u.msm.gmem_size;
|
||||
virtio_pipe->gmem_base = virtio_dev->caps.u.msm.gmem_base;
|
||||
virtio_pipe->chip_id = virtio_dev->caps.u.msm.chip_id;
|
||||
virtio_pipe->gpu_id = vdrm->caps.u.msm.gpu_id;
|
||||
virtio_pipe->gmem = vdrm->caps.u.msm.gmem_size;
|
||||
virtio_pipe->gmem_base = vdrm->caps.u.msm.gmem_base;
|
||||
virtio_pipe->chip_id = vdrm->caps.u.msm.chip_id;
|
||||
|
||||
|
||||
if (!(virtio_pipe->gpu_id || virtio_pipe->chip_id))
|
||||
|
|
|
|||
|
|
@ -42,21 +42,15 @@
|
|||
#include "virglrenderer_hw.h"
|
||||
#include "msm_proto.h"
|
||||
|
||||
#include "vdrm.h"
|
||||
|
||||
struct virtio_device {
|
||||
struct fd_device base;
|
||||
|
||||
struct fd_bo *shmem_bo;
|
||||
struct msm_shmem *shmem;
|
||||
uint8_t *rsp_mem;
|
||||
uint32_t rsp_mem_len;
|
||||
uint32_t next_rsp_off;
|
||||
simple_mtx_t rsp_lock;
|
||||
simple_mtx_t eb_lock;
|
||||
struct vdrm_device *vdrm;
|
||||
|
||||
uint32_t next_blob_id;
|
||||
uint32_t next_seqno;
|
||||
|
||||
struct virgl_renderer_capset_drm caps;
|
||||
struct msm_shmem *shmem;
|
||||
|
||||
/*
|
||||
* Notes on address space allocation:
|
||||
|
|
@ -78,19 +72,9 @@ struct virtio_device {
|
|||
*/
|
||||
struct util_vma_heap address_space;
|
||||
simple_mtx_t address_space_lock;
|
||||
|
||||
uint32_t reqbuf_len;
|
||||
uint32_t reqbuf_cnt;
|
||||
uint8_t reqbuf[0x4000];
|
||||
};
|
||||
FD_DEFINE_CAST(fd_device, virtio_device);
|
||||
|
||||
#define virtio_ioctl(fd, name, args) ({ \
|
||||
MESA_TRACE_SCOPE(#name); \
|
||||
int ret = drmIoctl((fd), DRM_IOCTL_ ## name, (args)); \
|
||||
ret; \
|
||||
})
|
||||
|
||||
struct fd_device *virtio_device_new(int fd, drmVersionPtr version);
|
||||
|
||||
static inline void
|
||||
|
|
@ -177,14 +161,6 @@ struct fd_bo *virtio_bo_from_handle(struct fd_device *dev, uint32_t size,
|
|||
/*
|
||||
* Internal helpers:
|
||||
*/
|
||||
void *virtio_alloc_rsp(struct fd_device *dev, struct vdrm_ccmd_req *hdr, uint32_t sz);
|
||||
int virtio_execbuf_fenced(struct fd_device *dev, struct vdrm_ccmd_req *req,
|
||||
uint32_t *handles, uint32_t num_handles,
|
||||
int in_fence_fd, int *out_fence_fd, int ring_idx);
|
||||
int virtio_execbuf_flush(struct fd_device *dev);
|
||||
int virtio_execbuf_flush_locked(struct fd_device *dev);
|
||||
int virtio_execbuf(struct fd_device *dev, struct vdrm_ccmd_req *req, bool sync);
|
||||
void virtio_host_sync(struct fd_device *dev, const struct vdrm_ccmd_req *req);
|
||||
int virtio_simple_ioctl(struct fd_device *dev, unsigned cmd, void *req);
|
||||
|
||||
#endif /* VIRTIO_PRIV_H_ */
|
||||
|
|
|
|||
|
|
@ -190,9 +190,18 @@ flush_submit_list(struct list_head *submit_list)
|
|||
nr_guest_handles = 0;
|
||||
}
|
||||
|
||||
virtio_execbuf_fenced(dev, &req->hdr, guest_handles, nr_guest_handles,
|
||||
fd_submit->in_fence_fd, &out_fence->fence_fd,
|
||||
virtio_pipe->ring_idx);
|
||||
struct vdrm_execbuf_params p = {
|
||||
.req = &req->hdr,
|
||||
.handles = guest_handles,
|
||||
.num_handles = nr_guest_handles,
|
||||
.has_in_fence_fd = !!(fd_submit->in_fence_fd != -1),
|
||||
.needs_out_fence_fd = true,
|
||||
.fence_fd = fd_submit->in_fence_fd,
|
||||
.ring_idx = virtio_pipe->ring_idx,
|
||||
};
|
||||
vdrm_execbuf(to_virtio_device(dev)->vdrm, &p);
|
||||
|
||||
out_fence->fence_fd = p.fence_fd;
|
||||
|
||||
free(req);
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue