venus: add vn_renderer_shmem

CPU BOs and GPU BOs are used different enough that it makes sense to
treat them as different objects.  This commit adds vn_renderer_shmem to
represent CPU BOs.

Signed-off-by: Chia-I Wu <olvaffe@gmail.com>
Reviewed-by: Yiwei Zhang <zzyiwei@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/10437>
This commit is contained in:
Chia-I Wu 2021-04-22 13:33:55 -07:00
parent 2c2fb015cc
commit 452a49fe19
4 changed files with 185 additions and 0 deletions

View file

@ -78,7 +78,9 @@ struct vn_command_buffer;
struct vn_cs_encoder;
struct vn_cs_decoder;
struct vn_renderer;
struct vn_renderer_shmem;
struct vn_renderer_bo;
struct vn_renderer_sync;

View file

@ -8,6 +8,13 @@
#include "vn_common.h"
struct vn_renderer_shmem {
atomic_int refcount;
uint32_t res_id;
size_t mmap_size; /* for internal use only (i.e., munmap) */
void *mmap_ptr;
};
struct vn_renderer_bo_ops {
void (*destroy)(struct vn_renderer_bo *bo);
@ -192,8 +199,16 @@ struct vn_renderer_ops {
struct vn_renderer_sync *(*sync_create)(struct vn_renderer *renderer);
};
struct vn_renderer_shmem_ops {
struct vn_renderer_shmem *(*create)(struct vn_renderer *renderer,
size_t size);
void (*destroy)(struct vn_renderer *renderer,
struct vn_renderer_shmem *shmem);
};
struct vn_renderer {
struct vn_renderer_ops ops;
struct vn_renderer_shmem_ops shmem_ops;
};
VkResult
@ -264,6 +279,46 @@ vn_renderer_wait(struct vn_renderer *renderer,
return renderer->ops.wait(renderer, wait);
}
static inline struct vn_renderer_shmem *
vn_renderer_shmem_create(struct vn_renderer *renderer, size_t size)
{
struct vn_renderer_shmem *shmem =
renderer->shmem_ops.create(renderer, size);
if (shmem) {
assert(atomic_load(&shmem->refcount) == 1);
assert(shmem->res_id);
assert(shmem->mmap_size >= size);
assert(shmem->mmap_ptr);
}
return shmem;
}
static inline struct vn_renderer_shmem *
vn_renderer_shmem_ref(struct vn_renderer *renderer,
struct vn_renderer_shmem *shmem)
{
const int old =
atomic_fetch_add_explicit(&shmem->refcount, 1, memory_order_relaxed);
assert(old >= 1);
return shmem;
}
static inline void
vn_renderer_shmem_unref(struct vn_renderer *renderer,
struct vn_renderer_shmem *shmem)
{
const int old =
atomic_fetch_sub_explicit(&shmem->refcount, 1, memory_order_release);
assert(old >= 1);
if (old == 1) {
atomic_thread_fence(memory_order_acquire);
renderer->shmem_ops.destroy(renderer, shmem);
}
}
static inline VkResult
vn_renderer_bo_create_cpu(struct vn_renderer *renderer,
VkDeviceSize size,

View file

@ -13,6 +13,7 @@
#include <xf86drm.h>
#include "drm-uapi/virtgpu_drm.h"
#include "util/sparse_array.h"
#define VIRGL_RENDERER_UNSTABLE_APIS
#include "virtio-gpu/virglrenderer_hw.h"
@ -51,6 +52,11 @@ struct drm_virtgpu_context_init {
struct virtgpu;
struct virtgpu_shmem {
struct vn_renderer_shmem base;
uint32_t gem_handle;
};
struct virtgpu_bo {
struct vn_renderer_bo base;
struct virtgpu *gpu;
@ -100,6 +106,12 @@ struct virtgpu {
uint32_t version;
struct virgl_renderer_capset_venus data;
} capset;
/* note that we use gem_handle instead of res_id to index because
* res_id is monotonically increasing by default (see
* virtio_gpu_resource_id_get)
*/
struct util_sparse_array shmem_array;
};
#ifdef SIMULATE_SYNCOBJ
@ -1223,6 +1235,50 @@ virtgpu_bo_create(struct vn_renderer *renderer)
return &bo->base;
}
static void
virtgpu_shmem_destroy(struct vn_renderer *renderer,
struct vn_renderer_shmem *_shmem)
{
struct virtgpu *gpu = (struct virtgpu *)renderer;
struct virtgpu_shmem *shmem = (struct virtgpu_shmem *)_shmem;
munmap(shmem->base.mmap_ptr, shmem->base.mmap_size);
virtgpu_ioctl_gem_close(gpu, shmem->gem_handle);
}
static struct vn_renderer_shmem *
virtgpu_shmem_create(struct vn_renderer *renderer, size_t size)
{
struct virtgpu *gpu = (struct virtgpu *)renderer;
uint32_t res_id;
uint32_t gem_handle = virtgpu_ioctl_resource_create_blob(
gpu, VIRTGPU_BLOB_MEM_GUEST, VIRTGPU_BLOB_FLAG_USE_MAPPABLE, size, 0,
&res_id);
if (!gem_handle)
return NULL;
void *ptr = virtgpu_ioctl_map(gpu, gem_handle, size);
if (!ptr) {
virtgpu_ioctl_gem_close(gpu, gem_handle);
return NULL;
}
struct virtgpu_shmem *shmem =
util_sparse_array_get(&gpu->shmem_array, gem_handle);
*shmem = (struct virtgpu_shmem){
.base = {
.refcount = 1,
.res_id = res_id,
.mmap_size = size,
.mmap_ptr = ptr,
},
.gem_handle = gem_handle,
};
return &shmem->base;
}
static VkResult
virtgpu_wait(struct vn_renderer *renderer,
const struct vn_renderer_wait *wait)
@ -1292,6 +1348,8 @@ virtgpu_destroy(struct vn_renderer *renderer,
if (gpu->fd >= 0)
close(gpu->fd);
util_sparse_array_finish(&gpu->shmem_array);
vk_free(alloc, gpu);
}
@ -1447,6 +1505,9 @@ virtgpu_open(struct virtgpu *gpu)
static VkResult
virtgpu_init(struct virtgpu *gpu)
{
util_sparse_array_init(&gpu->shmem_array, sizeof(struct virtgpu_shmem),
1024);
VkResult result = virtgpu_open(gpu);
if (result == VK_SUCCESS)
result = virtgpu_init_params(gpu);
@ -1464,6 +1525,9 @@ virtgpu_init(struct virtgpu *gpu)
gpu->base.ops.bo_create = virtgpu_bo_create;
gpu->base.ops.sync_create = virtgpu_sync_create;
gpu->base.shmem_ops.create = virtgpu_shmem_create;
gpu->base.shmem_ops.destroy = virtgpu_shmem_destroy;
return VK_SUCCESS;
}

View file

@ -16,6 +16,7 @@
#include <unistd.h>
#include "util/os_file.h"
#include "util/sparse_array.h"
#include "util/u_process.h"
#define VIRGL_RENDERER_UNSTABLE_APIS
#include "virtio-gpu/virglrenderer_hw.h"
@ -28,6 +29,10 @@
struct vtest;
struct vtest_shmem {
struct vn_renderer_shmem base;
};
struct vtest_bo {
struct vn_renderer_bo base;
struct vtest *vtest;
@ -61,6 +66,8 @@ struct vtest {
uint32_t version;
struct virgl_renderer_capset_venus data;
} capset;
struct util_sparse_array shmem_array;
};
static int
@ -822,6 +829,56 @@ vtest_bo_create(struct vn_renderer *renderer)
return &bo->base;
}
static void
vtest_shmem_destroy(struct vn_renderer *renderer,
struct vn_renderer_shmem *_shmem)
{
struct vtest *vtest = (struct vtest *)renderer;
struct vtest_shmem *shmem = (struct vtest_shmem *)_shmem;
munmap(shmem->base.mmap_ptr, shmem->base.mmap_size);
mtx_lock(&vtest->sock_mutex);
vtest_vcmd_resource_unref(vtest, shmem->base.res_id);
mtx_unlock(&vtest->sock_mutex);
}
static struct vn_renderer_shmem *
vtest_shmem_create(struct vn_renderer *renderer, size_t size)
{
struct vtest *vtest = (struct vtest *)renderer;
mtx_lock(&vtest->sock_mutex);
int res_fd;
uint32_t res_id = vtest_vcmd_resource_create_blob(
vtest, VCMD_BLOB_TYPE_GUEST, VCMD_BLOB_FLAG_MAPPABLE, size, 0, &res_fd);
assert(res_id > 0 && res_fd >= 0);
mtx_unlock(&vtest->sock_mutex);
void *ptr =
mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, res_fd, 0);
close(res_fd);
if (ptr == MAP_FAILED) {
mtx_lock(&vtest->sock_mutex);
vtest_vcmd_resource_unref(vtest, res_id);
mtx_unlock(&vtest->sock_mutex);
return NULL;
}
struct vtest_shmem *shmem =
util_sparse_array_get(&vtest->shmem_array, res_id);
*shmem = (struct vtest_shmem){
.base = {
.refcount = 1,
.res_id = res_id,
.mmap_size = size,
.mmap_ptr = ptr,
},
};
return &shmem->base;
}
static VkResult
sync_wait_poll(int fd, int poll_timeout)
{
@ -934,6 +991,7 @@ vtest_destroy(struct vn_renderer *renderer,
}
mtx_destroy(&vtest->sock_mutex);
util_sparse_array_finish(&vtest->shmem_array);
vk_free(alloc, vtest);
}
@ -989,6 +1047,9 @@ vtest_init_protocol_version(struct vtest *vtest)
static VkResult
vtest_init(struct vtest *vtest)
{
util_sparse_array_init(&vtest->shmem_array, sizeof(struct vtest_shmem),
1024);
mtx_init(&vtest->sock_mutex, mtx_plain);
vtest->sock_fd =
vtest_connect_socket(vtest->instance, VTEST_DEFAULT_SOCKET_NAME);
@ -1017,6 +1078,9 @@ vtest_init(struct vtest *vtest)
vtest->base.ops.bo_create = vtest_bo_create;
vtest->base.ops.sync_create = vtest_sync_create;
vtest->base.shmem_ops.create = vtest_shmem_create;
vtest->base.shmem_ops.destroy = vtest_shmem_destroy;
return VK_SUCCESS;
}