venus: use upstream DRM_IOCTL_VIRTGPU_CONTEXT_INIT

And remove SIMULATE_CONTEXT_INIT and PARAM_MAX_SYNC_QUEUE_COUNT now that
we expect guest kernel support for CONTEXT_INIT with standard support
for up to 64 rings.

Signed-off-by: Ryan Neph <ryanneph@google.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/19691>
This commit is contained in:
Ryan Neph 2021-12-14 17:10:36 -08:00 committed by Marge Bot
parent a72f84c4a3
commit 6dce6012df

View file

@ -26,28 +26,6 @@
#include "vn_renderer_internal.h"
/* XXX WIP kernel uapi */
#ifndef VIRTGPU_PARAM_CONTEXT_INIT
#define VIRTGPU_PARAM_CONTEXT_INIT 6
#define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x0001
struct drm_virtgpu_context_set_param {
__u64 param;
__u64 value;
};
struct drm_virtgpu_context_init {
__u32 num_params;
__u32 pad;
__u64 ctx_set_params;
};
#define DRM_VIRTGPU_CONTEXT_INIT 0xb
#define DRM_IOCTL_VIRTGPU_CONTEXT_INIT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_CONTEXT_INIT, \
struct drm_virtgpu_context_init)
#endif /* VIRTGPU_PARAM_CONTEXT_INIT */
#ifndef VIRTGPU_PARAM_MAX_SYNC_QUEUE_COUNT
#define VIRTGPU_PARAM_MAX_SYNC_QUEUE_COUNT 100
#endif /* VIRTGPU_PARAM_MAX_SYNC_QUEUE_COUNT */
#ifndef VIRTGPU_PARAM_GUEST_VRAM
/* All guest allocations happen via virtgpu dedicated heap. */
#define VIRTGPU_PARAM_GUEST_VRAM 9
@ -59,7 +37,6 @@ struct drm_virtgpu_context_init {
/* XXX comment these out to really use kernel uapi */
#define SIMULATE_BO_SIZE_FIX 1
// #define SIMULATE_CONTEXT_INIT 1
#define SIMULATE_SYNCOBJ 1
#define SIMULATE_SUBMIT 1
@ -596,15 +573,6 @@ virtgpu_ioctl(struct virtgpu *gpu, unsigned long request, void *args)
static uint64_t
virtgpu_ioctl_getparam(struct virtgpu *gpu, uint64_t param)
{
#ifdef SIMULATE_CONTEXT_INIT
if (param == VIRTGPU_PARAM_CONTEXT_INIT)
return 1;
#endif
#ifdef SIMULATE_SUBMIT
if (param == VIRTGPU_PARAM_MAX_SYNC_QUEUE_COUNT)
return 16;
#endif
/* val must be zeroed because kernel only writes the lower 32 bits */
uint64_t val = 0;
struct drm_virtgpu_getparam args = {
@ -623,11 +591,6 @@ virtgpu_ioctl_get_caps(struct virtgpu *gpu,
void *capset,
size_t capset_size)
{
#ifdef SIMULATE_CONTEXT_INIT
if (id == VIRGL_RENDERER_CAPSET_VENUS && version == 0)
return 0;
#endif
struct drm_virtgpu_get_caps args = {
.cap_set_id = id,
.cap_set_ver = version,
@ -642,18 +605,24 @@ static int
virtgpu_ioctl_context_init(struct virtgpu *gpu,
enum virgl_renderer_capset capset_id)
{
#ifdef SIMULATE_CONTEXT_INIT
if (capset_id == VIRGL_RENDERER_CAPSET_VENUS)
return 0;
#endif
struct drm_virtgpu_context_set_param ctx_set_params[3] = {
{
.param = VIRTGPU_CONTEXT_PARAM_CAPSET_ID,
.value = capset_id,
},
{
.param = VIRTGPU_CONTEXT_PARAM_NUM_RINGS,
.value = 64,
},
{
.param = VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK,
.value = 0, /* don't generate drm_events on fence signaling */
},
};
struct drm_virtgpu_context_init args = {
.num_params = 1,
.ctx_set_params = (uintptr_t) &
(struct drm_virtgpu_context_set_param){
.param = VIRTGPU_CONTEXT_PARAM_CAPSET_ID,
.value = capset_id,
},
.num_params = ARRAY_SIZE(ctx_set_params),
.ctx_set_params = (uintptr_t)&ctx_set_params,
};
return virtgpu_ioctl(gpu, DRM_IOCTL_VIRTGPU_CONTEXT_INIT, &args);
@ -1559,13 +1528,8 @@ virtgpu_init_params(struct virtgpu *gpu)
return VK_ERROR_INITIALIZATION_FAILED;
}
val = virtgpu_ioctl_getparam(gpu, VIRTGPU_PARAM_MAX_SYNC_QUEUE_COUNT);
if (!val) {
if (VN_DEBUG(INIT))
vn_log(gpu->instance, "no sync queue support");
return VK_ERROR_INITIALIZATION_FAILED;
}
gpu->max_sync_queue_count = val;
/* implied by CONTEXT_INIT uapi */
gpu->max_sync_queue_count = 64;
return VK_SUCCESS;
}