venus: prefer VIRTGPU_BLOB_MEM_HOST3D for shmems

They are logically contiguously in the host.  More importantly, they
enable host process isolation.

Signed-off-by: Chia-I Wu <olvaffe@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/13800>
This commit is contained in:
Chia-I Wu 2021-12-07 15:04:16 -08:00 committed by Marge Bot
parent fe3a800ad3
commit b14dd3a866
5 changed files with 49 additions and 2 deletions

View file

@ -32,6 +32,9 @@ struct virgl_renderer_capset_venus {
uint32_t vk_xml_version;
uint32_t vk_ext_command_serialization_spec_version;
uint32_t vk_mesa_venus_protocol_spec_version;
/* TODO revisit this when we bump up wire_format_version to 1 */
uint32_t supports_blob_id_0;
};
#endif

View file

@ -293,6 +293,8 @@ vn_instance_init_renderer(struct vn_instance *instance)
instance->renderer_info.vk_ext_command_serialization_spec_version);
vn_log(instance, "VK_MESA_venus_protocol spec version %d",
instance->renderer_info.vk_mesa_venus_protocol_spec_version);
vn_log(instance, "supports blob id 0: %d",
instance->renderer_info.supports_blob_id_0);
}
return VK_SUCCESS;

View file

@ -61,6 +61,7 @@ struct vn_renderer_info {
uint32_t vk_xml_version;
uint32_t vk_ext_command_serialization_spec_version;
uint32_t vk_mesa_venus_protocol_spec_version;
uint32_t supports_blob_id_0;
};
struct vn_renderer_submit_batch {

View file

@ -101,6 +101,8 @@ struct virtgpu {
struct virgl_renderer_capset_venus data;
} capset;
uint32_t shmem_blob_mem;
/* note that we use gem_handle instead of res_id to index because
* res_id is monotonically increasing by default (see
* virtio_gpu_resource_id_get)
@ -1286,7 +1288,7 @@ virtgpu_shmem_create(struct vn_renderer *renderer, size_t size)
uint32_t res_id;
uint32_t gem_handle = virtgpu_ioctl_resource_create_blob(
gpu, VIRTGPU_BLOB_MEM_GUEST, VIRTGPU_BLOB_FLAG_USE_MAPPABLE, size, 0,
gpu, gpu->shmem_blob_mem, VIRTGPU_BLOB_FLAG_USE_MAPPABLE, size, 0,
&res_id);
if (!gem_handle)
return NULL;
@ -1370,6 +1372,7 @@ virtgpu_get_info(struct vn_renderer *renderer, struct vn_renderer_info *info)
capset->vk_ext_command_serialization_spec_version;
info->vk_mesa_venus_protocol_spec_version =
capset->vk_mesa_venus_protocol_spec_version;
info->supports_blob_id_0 = capset->supports_blob_id_0;
}
static void
@ -1389,6 +1392,33 @@ virtgpu_destroy(struct vn_renderer *renderer,
vk_free(alloc, gpu);
}
static void
virtgpu_init_shmem_blob_mem(struct virtgpu *gpu)
{
/* VIRTGPU_BLOB_MEM_GUEST allocates from the guest system memory. They are
* logically contiguous in the guest but are sglists (iovecs) in the host.
* That makes them slower to process in the host. With host process
* isolation, it also becomes impossible for the host to access sglists
* directly.
*
* While there are ideas (and shipped code in some cases) such as creating
* udmabufs from sglists, or having a dedicated guest heap, it seems the
* easiest way is to reuse VIRTGPU_BLOB_MEM_HOST3D. That is, when the
* renderer sees a request to export a blob where
*
* - blob_mem is VIRTGPU_BLOB_MEM_HOST3D
* - blob_flags is VIRTGPU_BLOB_FLAG_USE_MAPPABLE
* - blob_id is 0
*
* it allocates a host shmem.
*
* TODO cache shmems as they are costly to set up and usually require syncs
*/
gpu->shmem_blob_mem = gpu->capset.data.supports_blob_id_0
? VIRTGPU_BLOB_MEM_HOST3D
: VIRTGPU_BLOB_MEM_GUEST;
}
static VkResult
virtgpu_init_context(struct virtgpu *gpu)
{
@ -1557,6 +1587,8 @@ virtgpu_init(struct virtgpu *gpu)
if (result != VK_SUCCESS)
return result;
virtgpu_init_shmem_blob_mem(gpu);
gpu->base.ops.destroy = virtgpu_destroy;
gpu->base.ops.get_info = virtgpu_get_info;
gpu->base.ops.submit = virtgpu_submit;

View file

@ -62,6 +62,8 @@ struct vtest {
struct virgl_renderer_capset_venus data;
} capset;
uint32_t shmem_blob_mem;
struct util_sparse_array shmem_array;
struct util_sparse_array bo_array;
};
@ -796,7 +798,8 @@ vtest_shmem_create(struct vn_renderer *renderer, size_t size)
mtx_lock(&vtest->sock_mutex);
int res_fd;
uint32_t res_id = vtest_vcmd_resource_create_blob(
vtest, VCMD_BLOB_TYPE_GUEST, VCMD_BLOB_FLAG_MAPPABLE, size, 0, &res_fd);
vtest, vtest->shmem_blob_mem, VCMD_BLOB_FLAG_MAPPABLE, size, 0,
&res_fd);
assert(res_id > 0 && res_fd >= 0);
mtx_unlock(&vtest->sock_mutex);
@ -922,6 +925,7 @@ vtest_get_info(struct vn_renderer *renderer, struct vn_renderer_info *info)
capset->vk_ext_command_serialization_spec_version;
info->vk_mesa_venus_protocol_spec_version =
capset->vk_mesa_venus_protocol_spec_version;
info->supports_blob_id_0 = capset->supports_blob_id_0;
}
static void
@ -1016,6 +1020,11 @@ vtest_init(struct vtest *vtest)
if (result != VK_SUCCESS)
return result;
/* see virtgpu_init_shmem_blob_mem */
vtest->shmem_blob_mem = vtest->capset.data.supports_blob_id_0
? VCMD_BLOB_TYPE_HOST3D
: VCMD_BLOB_TYPE_GUEST;
vtest_vcmd_context_init(vtest, vtest->capset.id);
vtest->base.ops.destroy = vtest_destroy;