mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2026-01-11 21:00:17 +01:00
virtio/drm: Split out common virtgpu drm structs
Since these all ended up the same with (not yet merged) intel and amd implementation of drm native context, split these out so they can be shared. This will also make it easier to extract out a shared helper that can be re-used across native-context drivers. Signed-off-by: Rob Clark <robdclark@chromium.org> Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> # virtio-intel Reviewed-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24733>
This commit is contained in:
parent
9bdee8cd39
commit
b90244776a
7 changed files with 110 additions and 94 deletions
|
|
@ -23,31 +23,7 @@
|
|||
* Defines the layout of shmem buffer used for host->guest communication.
|
||||
*/
|
||||
struct msm_shmem {
|
||||
/**
|
||||
* The sequence # of last cmd processed by the host
|
||||
*/
|
||||
uint32_t seqno;
|
||||
|
||||
/**
|
||||
* Offset to the start of rsp memory region in the shmem buffer. This
|
||||
* is set by the host when the shmem buffer is allocated, to allow for
|
||||
* extending the shmem buffer with new fields. The size of the rsp
|
||||
* memory region is the size of the shmem buffer (controlled by the
|
||||
* guest) minus rsp_mem_offset.
|
||||
*
|
||||
* The guest should use the msm_shmem_has_field() macro to determine
|
||||
* if the host supports a given field, ie. to handle compatibility of
|
||||
* newer guest vs older host.
|
||||
*
|
||||
* Making the guest userspace responsible for backwards compatibility
|
||||
* simplifies the host VMM.
|
||||
*/
|
||||
uint32_t rsp_mem_offset;
|
||||
|
||||
#define msm_shmem_has_field(shmem, field) ({ \
|
||||
struct msm_shmem *_shmem = (shmem); \
|
||||
(_shmem->rsp_mem_offset > offsetof(struct msm_shmem, field)); \
|
||||
})
|
||||
struct vdrm_shmem base;
|
||||
|
||||
/**
|
||||
* Counter that is incremented on asynchronous errors, like SUBMIT
|
||||
|
|
@ -61,12 +37,7 @@ struct msm_shmem {
|
|||
*/
|
||||
uint32_t global_faults;
|
||||
};
|
||||
|
||||
#define DEFINE_CAST(parent, child) \
|
||||
static inline struct child *to_##child(const struct parent *x) \
|
||||
{ \
|
||||
return (struct child *)x; \
|
||||
}
|
||||
DEFINE_CAST(vdrm_shmem, msm_shmem)
|
||||
|
||||
/*
|
||||
* Possible cmd types for "command stream", ie. payload of EXECBUF ioctl:
|
||||
|
|
@ -86,30 +57,13 @@ enum msm_ccmd {
|
|||
MSM_CCMD_LAST,
|
||||
};
|
||||
|
||||
struct msm_ccmd_req {
|
||||
uint32_t cmd;
|
||||
uint32_t len;
|
||||
uint32_t seqno;
|
||||
|
||||
/* Offset into shmem ctrl buffer to write response. The host ensures
|
||||
* that it doesn't write outside the bounds of the ctrl buffer, but
|
||||
* otherwise it is up to the guest to manage allocation of where responses
|
||||
* should be written in the ctrl buf.
|
||||
*/
|
||||
uint32_t rsp_off;
|
||||
};
|
||||
|
||||
struct msm_ccmd_rsp {
|
||||
uint32_t len;
|
||||
};
|
||||
|
||||
#ifdef __cplusplus
|
||||
#define MSM_CCMD(_cmd, _len) { \
|
||||
.cmd = MSM_CCMD_##_cmd, \
|
||||
.len = (_len), \
|
||||
}
|
||||
#else
|
||||
#define MSM_CCMD(_cmd, _len) (struct msm_ccmd_req){ \
|
||||
#define MSM_CCMD(_cmd, _len) (struct vdrm_ccmd_req){ \
|
||||
.cmd = MSM_CCMD_##_cmd, \
|
||||
.len = (_len), \
|
||||
}
|
||||
|
|
@ -119,7 +73,7 @@ struct msm_ccmd_rsp {
|
|||
* MSM_CCMD_NOP
|
||||
*/
|
||||
struct msm_ccmd_nop_req {
|
||||
struct msm_ccmd_req hdr;
|
||||
struct vdrm_ccmd_req hdr;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
@ -128,15 +82,15 @@ struct msm_ccmd_nop_req {
|
|||
* Forward simple/flat IOC_RW or IOC_W ioctls. Limited ioctls are supported.
|
||||
*/
|
||||
struct msm_ccmd_ioctl_simple_req {
|
||||
struct msm_ccmd_req hdr;
|
||||
struct vdrm_ccmd_req hdr;
|
||||
|
||||
uint32_t cmd;
|
||||
uint8_t payload[];
|
||||
};
|
||||
DEFINE_CAST(msm_ccmd_req, msm_ccmd_ioctl_simple_req)
|
||||
DEFINE_CAST(vdrm_ccmd_req, msm_ccmd_ioctl_simple_req)
|
||||
|
||||
struct msm_ccmd_ioctl_simple_rsp {
|
||||
struct msm_ccmd_rsp hdr;
|
||||
struct vdrm_ccmd_rsp hdr;
|
||||
|
||||
/* ioctl return value, interrupted syscalls are handled on the host without
|
||||
* returning to the guest.
|
||||
|
|
@ -160,14 +114,14 @@ struct msm_ccmd_ioctl_simple_rsp {
|
|||
* No response.
|
||||
*/
|
||||
struct msm_ccmd_gem_new_req {
|
||||
struct msm_ccmd_req hdr;
|
||||
struct vdrm_ccmd_req hdr;
|
||||
|
||||
uint64_t iova;
|
||||
uint64_t size;
|
||||
uint32_t flags;
|
||||
uint32_t blob_id;
|
||||
};
|
||||
DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_new_req)
|
||||
DEFINE_CAST(vdrm_ccmd_req, msm_ccmd_gem_new_req)
|
||||
|
||||
/*
|
||||
* MSM_CCMD_GEM_SET_IOVA
|
||||
|
|
@ -176,12 +130,12 @@ DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_new_req)
|
|||
* (by setting it to zero) when a BO is freed.
|
||||
*/
|
||||
struct msm_ccmd_gem_set_iova_req {
|
||||
struct msm_ccmd_req hdr;
|
||||
struct vdrm_ccmd_req hdr;
|
||||
|
||||
uint64_t iova;
|
||||
uint32_t res_id;
|
||||
};
|
||||
DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_set_iova_req)
|
||||
DEFINE_CAST(vdrm_ccmd_req, msm_ccmd_gem_set_iova_req)
|
||||
|
||||
/*
|
||||
* MSM_CCMD_GEM_CPU_PREP
|
||||
|
|
@ -193,15 +147,15 @@ DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_set_iova_req)
|
|||
* should poll if needed.
|
||||
*/
|
||||
struct msm_ccmd_gem_cpu_prep_req {
|
||||
struct msm_ccmd_req hdr;
|
||||
struct vdrm_ccmd_req hdr;
|
||||
|
||||
uint32_t res_id;
|
||||
uint32_t op;
|
||||
};
|
||||
DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_cpu_prep_req)
|
||||
DEFINE_CAST(vdrm_ccmd_req, msm_ccmd_gem_cpu_prep_req)
|
||||
|
||||
struct msm_ccmd_gem_cpu_prep_rsp {
|
||||
struct msm_ccmd_rsp hdr;
|
||||
struct vdrm_ccmd_req hdr;
|
||||
|
||||
int32_t ret;
|
||||
};
|
||||
|
|
@ -214,7 +168,7 @@ struct msm_ccmd_gem_cpu_prep_rsp {
|
|||
* No response.
|
||||
*/
|
||||
struct msm_ccmd_gem_set_name_req {
|
||||
struct msm_ccmd_req hdr;
|
||||
struct vdrm_ccmd_req hdr;
|
||||
|
||||
uint32_t res_id;
|
||||
/* Note: packet size aligned to 4 bytes, so the string name may
|
||||
|
|
@ -223,7 +177,7 @@ struct msm_ccmd_gem_set_name_req {
|
|||
uint32_t len;
|
||||
uint8_t payload[];
|
||||
};
|
||||
DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_set_name_req)
|
||||
DEFINE_CAST(vdrm_ccmd_req, msm_ccmd_gem_set_name_req)
|
||||
|
||||
/*
|
||||
* MSM_CCMD_GEM_SUBMIT
|
||||
|
|
@ -241,7 +195,7 @@ DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_set_name_req)
|
|||
* No response.
|
||||
*/
|
||||
struct msm_ccmd_gem_submit_req {
|
||||
struct msm_ccmd_req hdr;
|
||||
struct vdrm_ccmd_req hdr;
|
||||
|
||||
uint32_t flags;
|
||||
uint32_t queue_id;
|
||||
|
|
@ -265,7 +219,7 @@ struct msm_ccmd_gem_submit_req {
|
|||
*/
|
||||
int8_t payload[];
|
||||
};
|
||||
DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_submit_req)
|
||||
DEFINE_CAST(vdrm_ccmd_req, msm_ccmd_gem_submit_req)
|
||||
|
||||
/*
|
||||
* MSM_CCMD_GEM_UPLOAD
|
||||
|
|
@ -275,7 +229,7 @@ DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_submit_req)
|
|||
* No response.
|
||||
*/
|
||||
struct msm_ccmd_gem_upload_req {
|
||||
struct msm_ccmd_req hdr;
|
||||
struct vdrm_ccmd_req hdr;
|
||||
|
||||
uint32_t res_id;
|
||||
uint32_t pad;
|
||||
|
|
@ -287,7 +241,7 @@ struct msm_ccmd_gem_upload_req {
|
|||
uint32_t len;
|
||||
uint8_t payload[];
|
||||
};
|
||||
DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_upload_req)
|
||||
DEFINE_CAST(vdrm_ccmd_req, msm_ccmd_gem_upload_req)
|
||||
|
||||
/*
|
||||
* MSM_CCMD_SUBMITQUEUE_QUERY
|
||||
|
|
@ -295,16 +249,16 @@ DEFINE_CAST(msm_ccmd_req, msm_ccmd_gem_upload_req)
|
|||
* Maps to DRM_MSM_SUBMITQUEUE_QUERY
|
||||
*/
|
||||
struct msm_ccmd_submitqueue_query_req {
|
||||
struct msm_ccmd_req hdr;
|
||||
struct vdrm_ccmd_req hdr;
|
||||
|
||||
uint32_t queue_id;
|
||||
uint32_t param;
|
||||
uint32_t len; /* size of payload in rsp */
|
||||
};
|
||||
DEFINE_CAST(msm_ccmd_req, msm_ccmd_submitqueue_query_req)
|
||||
DEFINE_CAST(vdrm_ccmd_req, msm_ccmd_submitqueue_query_req)
|
||||
|
||||
struct msm_ccmd_submitqueue_query_rsp {
|
||||
struct msm_ccmd_rsp hdr;
|
||||
struct vdrm_ccmd_rsp hdr;
|
||||
|
||||
int32_t ret;
|
||||
uint32_t out_len;
|
||||
|
|
@ -321,15 +275,15 @@ struct msm_ccmd_submitqueue_query_rsp {
|
|||
* should poll if needed.
|
||||
*/
|
||||
struct msm_ccmd_wait_fence_req {
|
||||
struct msm_ccmd_req hdr;
|
||||
struct vdrm_ccmd_req hdr;
|
||||
|
||||
uint32_t queue_id;
|
||||
uint32_t fence;
|
||||
};
|
||||
DEFINE_CAST(msm_ccmd_req, msm_ccmd_wait_fence_req)
|
||||
DEFINE_CAST(vdrm_ccmd_req, msm_ccmd_wait_fence_req)
|
||||
|
||||
struct msm_ccmd_wait_fence_rsp {
|
||||
struct msm_ccmd_rsp hdr;
|
||||
struct vdrm_ccmd_rsp hdr;
|
||||
|
||||
int32_t ret;
|
||||
};
|
||||
|
|
@ -346,7 +300,7 @@ struct msm_ccmd_wait_fence_rsp {
|
|||
* No response.
|
||||
*/
|
||||
struct msm_ccmd_set_debuginfo_req {
|
||||
struct msm_ccmd_req hdr;
|
||||
struct vdrm_ccmd_req hdr;
|
||||
|
||||
uint32_t comm_len;
|
||||
uint32_t cmdline_len;
|
||||
|
|
@ -357,6 +311,6 @@ struct msm_ccmd_set_debuginfo_req {
|
|||
*/
|
||||
int8_t payload[];
|
||||
};
|
||||
DEFINE_CAST(msm_ccmd_req, msm_ccmd_set_debuginfo_req)
|
||||
DEFINE_CAST(vdrm_ccmd_req, msm_ccmd_set_debuginfo_req)
|
||||
|
||||
#endif /* MSM_PROTO_H_ */
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ virtio_bo_offset(struct fd_bo *bo, uint64_t *offset)
|
|||
if (virtio_bo->has_upload_seqno) {
|
||||
virtio_bo->has_upload_seqno = false;
|
||||
virtio_execbuf_flush(bo->dev);
|
||||
virtio_host_sync(bo->dev, &(struct msm_ccmd_req) {
|
||||
virtio_host_sync(bo->dev, &(struct vdrm_ccmd_req) {
|
||||
.seqno = virtio_bo->upload_seqno,
|
||||
});
|
||||
}
|
||||
|
|
|
|||
|
|
@ -214,7 +214,7 @@ virtio_device_new(int fd, drmVersionPtr version)
|
|||
}
|
||||
|
||||
void *
|
||||
virtio_alloc_rsp(struct fd_device *dev, struct msm_ccmd_req *req, uint32_t sz)
|
||||
virtio_alloc_rsp(struct fd_device *dev, struct vdrm_ccmd_req *req, uint32_t sz)
|
||||
{
|
||||
struct virtio_device *virtio_dev = to_virtio_device(dev);
|
||||
unsigned off;
|
||||
|
|
@ -233,7 +233,7 @@ virtio_alloc_rsp(struct fd_device *dev, struct msm_ccmd_req *req, uint32_t sz)
|
|||
|
||||
req->rsp_off = off;
|
||||
|
||||
struct msm_ccmd_rsp *rsp = (void *)&virtio_dev->rsp_mem[off];
|
||||
struct vdrm_ccmd_rsp *rsp = (void *)&virtio_dev->rsp_mem[off];
|
||||
rsp->len = sz;
|
||||
|
||||
return rsp;
|
||||
|
|
@ -280,7 +280,7 @@ execbuf_locked(struct fd_device *dev, void *cmd, uint32_t cmd_size,
|
|||
* guest and host CPU.
|
||||
*/
|
||||
int
|
||||
virtio_execbuf_fenced(struct fd_device *dev, struct msm_ccmd_req *req,
|
||||
virtio_execbuf_fenced(struct fd_device *dev, struct vdrm_ccmd_req *req,
|
||||
uint32_t *handles, uint32_t num_handles,
|
||||
int in_fence_fd, int *out_fence_fd, int ring_idx)
|
||||
{
|
||||
|
|
@ -338,7 +338,7 @@ virtio_execbuf_flush_locked(struct fd_device *dev)
|
|||
}
|
||||
|
||||
int
|
||||
virtio_execbuf(struct fd_device *dev, struct msm_ccmd_req *req, bool sync)
|
||||
virtio_execbuf(struct fd_device *dev, struct vdrm_ccmd_req *req, bool sync)
|
||||
{
|
||||
MESA_TRACE_FUNC();
|
||||
struct virtio_device *virtio_dev = to_virtio_device(dev);
|
||||
|
|
@ -382,11 +382,11 @@ out_unlock:
|
|||
* Wait until host as processed the specified request.
|
||||
*/
|
||||
void
|
||||
virtio_host_sync(struct fd_device *dev, const struct msm_ccmd_req *req)
|
||||
virtio_host_sync(struct fd_device *dev, const struct vdrm_ccmd_req *req)
|
||||
{
|
||||
struct virtio_device *virtio_dev = to_virtio_device(dev);
|
||||
|
||||
while (fd_fence_before(virtio_dev->shmem->seqno, req->seqno))
|
||||
while (fd_fence_before(virtio_dev->shmem->base.seqno, req->seqno))
|
||||
sched_yield();
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -53,10 +53,10 @@ query_faults(struct fd_pipe *pipe, uint64_t *value)
|
|||
uint32_t async_error = 0;
|
||||
uint64_t global_faults;
|
||||
|
||||
if (msm_shmem_has_field(virtio_dev->shmem, async_error))
|
||||
if (vdrm_shmem_has_field(virtio_dev->shmem, async_error))
|
||||
async_error = virtio_dev->shmem->async_error;
|
||||
|
||||
if (msm_shmem_has_field(virtio_dev->shmem, global_faults)) {
|
||||
if (vdrm_shmem_has_field(virtio_dev->shmem, global_faults)) {
|
||||
global_faults = virtio_dev->shmem->global_faults;
|
||||
} else {
|
||||
int ret = query_param(pipe, MSM_PARAM_FAULTS, &global_faults);
|
||||
|
|
@ -229,7 +229,7 @@ init_shmem(struct fd_device *dev)
|
|||
virtio_dev->shmem = fd_bo_map(virtio_dev->shmem_bo);
|
||||
virtio_dev->shmem_bo->bo_reuse = NO_CACHE;
|
||||
|
||||
uint32_t offset = virtio_dev->shmem->rsp_mem_offset;
|
||||
uint32_t offset = virtio_dev->shmem->base.rsp_mem_offset;
|
||||
virtio_dev->rsp_mem_len = fd_bo_size(virtio_dev->shmem_bo) - offset;
|
||||
virtio_dev->rsp_mem = &((uint8_t *)virtio_dev->shmem)[offset];
|
||||
}
|
||||
|
|
|
|||
|
|
@ -177,14 +177,14 @@ struct fd_bo *virtio_bo_from_handle(struct fd_device *dev, uint32_t size,
|
|||
/*
|
||||
* Internal helpers:
|
||||
*/
|
||||
void *virtio_alloc_rsp(struct fd_device *dev, struct msm_ccmd_req *hdr, uint32_t sz);
|
||||
int virtio_execbuf_fenced(struct fd_device *dev, struct msm_ccmd_req *req,
|
||||
void *virtio_alloc_rsp(struct fd_device *dev, struct vdrm_ccmd_req *hdr, uint32_t sz);
|
||||
int virtio_execbuf_fenced(struct fd_device *dev, struct vdrm_ccmd_req *req,
|
||||
uint32_t *handles, uint32_t num_handles,
|
||||
int in_fence_fd, int *out_fence_fd, int ring_idx);
|
||||
int virtio_execbuf_flush(struct fd_device *dev);
|
||||
int virtio_execbuf_flush_locked(struct fd_device *dev);
|
||||
int virtio_execbuf(struct fd_device *dev, struct msm_ccmd_req *req, bool sync);
|
||||
void virtio_host_sync(struct fd_device *dev, const struct msm_ccmd_req *req);
|
||||
int virtio_execbuf(struct fd_device *dev, struct vdrm_ccmd_req *req, bool sync);
|
||||
void virtio_host_sync(struct fd_device *dev, const struct vdrm_ccmd_req *req);
|
||||
int virtio_simple_ioctl(struct fd_device *dev, unsigned cmd, void *req);
|
||||
|
||||
#endif /* VIRTIO_PRIV_H_ */
|
||||
|
|
|
|||
|
|
@ -204,7 +204,7 @@ execbuf_flush(struct tu_device *device)
|
|||
}
|
||||
|
||||
static int
|
||||
send_ccmd(struct tu_device *device, struct msm_ccmd_req *req, bool sync)
|
||||
send_ccmd(struct tu_device *device, struct vdrm_ccmd_req *req, bool sync)
|
||||
{
|
||||
MESA_TRACE_FUNC();
|
||||
struct tu_virtio_device *vdev = device->vdev;
|
||||
|
|
@ -244,7 +244,7 @@ out_unlock:
|
|||
}
|
||||
|
||||
static void *
|
||||
virtio_alloc_rsp(struct tu_device *dev, struct msm_ccmd_req *req, uint32_t sz)
|
||||
virtio_alloc_rsp(struct tu_device *dev, struct vdrm_ccmd_req *req, uint32_t sz)
|
||||
{
|
||||
struct tu_virtio_device *vdev = dev->vdev;
|
||||
unsigned off;
|
||||
|
|
@ -263,7 +263,7 @@ virtio_alloc_rsp(struct tu_device *dev, struct msm_ccmd_req *req, uint32_t sz)
|
|||
|
||||
req->rsp_off = off;
|
||||
|
||||
struct msm_ccmd_rsp *rsp = (struct msm_ccmd_rsp *)&vdev->rsp_mem[off];
|
||||
struct vdrm_ccmd_rsp *rsp = (struct vdrm_ccmd_rsp *)&vdev->rsp_mem[off];
|
||||
rsp->len = sz;
|
||||
|
||||
return rsp;
|
||||
|
|
@ -347,7 +347,7 @@ init_shmem(struct tu_device *dev, struct tu_virtio_device *vdev)
|
|||
return vk_startup_errorf(instance, result, "failed to map shmem buffer");
|
||||
}
|
||||
|
||||
uint32_t offset = vdev->shmem->rsp_mem_offset;
|
||||
uint32_t offset = vdev->shmem->base.rsp_mem_offset;
|
||||
vdev->rsp_mem_len = args.size - offset;
|
||||
vdev->rsp_mem = &((uint8_t *)vdev->shmem)[offset];
|
||||
|
||||
|
|
@ -361,10 +361,10 @@ query_faults(struct tu_device *dev, uint64_t *value)
|
|||
uint32_t async_error = 0;
|
||||
uint64_t global_faults;
|
||||
|
||||
if (msm_shmem_has_field(vdev->shmem, async_error))
|
||||
if (vdrm_shmem_has_field(vdev->shmem, async_error))
|
||||
async_error = vdev->shmem->async_error;
|
||||
|
||||
if (msm_shmem_has_field(vdev->shmem, global_faults)) {
|
||||
if (vdrm_shmem_has_field(vdev->shmem, global_faults)) {
|
||||
global_faults = vdev->shmem->global_faults;
|
||||
} else {
|
||||
int ret = tu_drm_get_param(dev, MSM_PARAM_FAULTS, &global_faults);
|
||||
|
|
|
|||
|
|
@ -30,4 +30,66 @@ struct virgl_renderer_capset_drm {
|
|||
} u;
|
||||
};
|
||||
|
||||
/**
|
||||
* Defines the layout of shmem buffer used for host->guest communication.
|
||||
*/
|
||||
struct vdrm_shmem {
|
||||
/**
|
||||
* The sequence # of last cmd processed by the host
|
||||
*/
|
||||
uint32_t seqno;
|
||||
|
||||
/**
|
||||
* Offset to the start of rsp memory region in the shmem buffer. This
|
||||
* is set by the host when the shmem buffer is allocated, to allow for
|
||||
* extending the shmem buffer with new fields. The size of the rsp
|
||||
* memory region is the size of the shmem buffer (controlled by the
|
||||
* guest) minus rsp_mem_offset.
|
||||
*
|
||||
* The guest should use the vdrm_shmem_has_field() macro to determine
|
||||
* if the host supports a given field, ie. to handle compatibility of
|
||||
* newer guest vs older host.
|
||||
*
|
||||
* Making the guest userspace responsible for backwards compatibility
|
||||
* simplifies the host VMM.
|
||||
*/
|
||||
uint32_t rsp_mem_offset;
|
||||
|
||||
#define vdrm_shmem_has_field(shmem, field) ({ \
|
||||
struct vdrm_shmem *_shmem = &(shmem)->base; \
|
||||
(_shmem->rsp_mem_offset > offsetof(__typeof__(*(shmem)), field)); \
|
||||
})
|
||||
};
|
||||
|
||||
/**
|
||||
* A Guest -> Host request header.
|
||||
*/
|
||||
struct vdrm_ccmd_req {
|
||||
uint32_t cmd;
|
||||
uint32_t len;
|
||||
uint32_t seqno;
|
||||
|
||||
/* Offset into shmem ctrl buffer to write response. The host ensures
|
||||
* that it doesn't write outside the bounds of the ctrl buffer, but
|
||||
* otherwise it is up to the guest to manage allocation of where responses
|
||||
* should be written in the ctrl buf.
|
||||
*
|
||||
* Only applicable for cmds that have a response message.
|
||||
*/
|
||||
uint32_t rsp_off;
|
||||
};
|
||||
|
||||
/**
|
||||
* A Guest <- Host response header.
|
||||
*/
|
||||
struct vdrm_ccmd_rsp {
|
||||
uint32_t len;
|
||||
};
|
||||
|
||||
#define DEFINE_CAST(parent, child) \
|
||||
static inline struct child *to_##child(const struct parent *x) \
|
||||
{ \
|
||||
return (struct child *)x; \
|
||||
}
|
||||
|
||||
#endif /* DRM_HW_H_ */
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue