venus: mandate a few venus capsets long required before 1.0

Just a clean up. Venus driver can no longer work without those.

Signed-off-by: Yiwei Zhang <zzyiwei@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/27842>
This commit is contained in:
Yiwei Zhang 2024-02-26 15:31:36 -08:00 committed by Marge Bot
parent 661ddf6084
commit 83f3b1d76a
6 changed files with 25 additions and 55 deletions

View file

@ -49,27 +49,20 @@ vn_queue_init(struct vn_device *dev,
vn_cached_storage_init(&queue->storage, &dev->base.base.alloc);
VkDeviceQueueTimelineInfoMESA timeline_info;
const struct vn_renderer_info *renderer_info =
&dev->instance->renderer->info;
if (renderer_info->supports_multiple_timelines) {
int ring_idx = vn_instance_acquire_ring_idx(dev->instance);
if (ring_idx < 0) {
vn_log(dev->instance, "failed binding VkQueue to renderer timeline");
return VK_ERROR_INITIALIZATION_FAILED;
}
queue->ring_idx = (uint32_t)ring_idx;
timeline_info = (VkDeviceQueueTimelineInfoMESA){
.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_TIMELINE_INFO_MESA,
.ringIdx = queue->ring_idx,
};
const int ring_idx = vn_instance_acquire_ring_idx(dev->instance);
if (ring_idx < 0) {
vn_log(dev->instance, "failed binding VkQueue to renderer timeline");
return VK_ERROR_INITIALIZATION_FAILED;
}
queue->ring_idx = (uint32_t)ring_idx;
const VkDeviceQueueTimelineInfoMESA timeline_info = {
.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_TIMELINE_INFO_MESA,
.ringIdx = queue->ring_idx,
};
const VkDeviceQueueInfo2 device_queue_info = {
.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
.pNext =
renderer_info->supports_multiple_timelines ? &timeline_info : NULL,
.pNext = &timeline_info,
.flags = queue_info->flags,
.queueFamilyIndex = queue_info->queueFamilyIndex,
.queueIndex = queue_index,
@ -614,10 +607,8 @@ vn_DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator)
* Otherwise, another thread might reuse their ring_idx while they
* are still bound to the queues in the renderer.
*/
if (dev->renderer->info.supports_multiple_timelines) {
for (uint32_t i = 0; i < dev->queue_count; i++) {
vn_instance_release_ring_idx(dev->instance, dev->queues[i].ring_idx);
}
for (uint32_t i = 0; i < dev->queue_count; i++) {
vn_instance_release_ring_idx(dev->instance, dev->queues[i].ring_idx);
}
vk_free(alloc, dev->queues);

View file

@ -213,12 +213,6 @@ vn_instance_init_renderer(struct vn_instance *instance)
renderer_info->vk_ext_command_serialization_spec_version);
vn_log(instance, "VK_MESA_venus_protocol spec version %d",
renderer_info->vk_mesa_venus_protocol_spec_version);
vn_log(instance, "supports blob id 0: %d",
renderer_info->supports_blob_id_0);
vn_log(instance, "allow_vk_wait_syncs: %d",
renderer_info->allow_vk_wait_syncs);
vn_log(instance, "supports_multiple_timelines: %d",
renderer_info->supports_multiple_timelines);
}
return VK_SUCCESS;

View file

@ -1358,12 +1358,6 @@ vn_fence_feedback_init(struct vn_device *dev,
if (fence->is_external)
return VK_SUCCESS;
/* Fence feedback implementation relies on vkWaitForFences to cover the gap
* between feedback slot signaling and the actual fence signal operation.
*/
if (unlikely(!dev->renderer->info.allow_vk_wait_syncs))
return VK_SUCCESS;
if (VN_PERF(NO_FENCE_FEEDBACK))
return VK_SUCCESS;

View file

@ -73,11 +73,9 @@ struct vn_renderer_info {
uint32_t vk_xml_version;
uint32_t vk_ext_command_serialization_spec_version;
uint32_t vk_mesa_venus_protocol_spec_version;
uint32_t supports_blob_id_0;
/* combined mask for vk_extension_mask1, 2,..., N */
uint32_t vk_extension_mask[32];
uint32_t allow_vk_wait_syncs;
uint32_t supports_multiple_timelines;
};
struct vn_renderer_submit_batch {

View file

@ -161,10 +161,7 @@ sim_syncobj_create(struct virtgpu *gpu, bool signaled)
util_idalloc_init(&sim.ida, 32);
struct drm_virtgpu_execbuffer args = {
.flags = VIRTGPU_EXECBUF_FENCE_FD_OUT |
(gpu->base.info.supports_multiple_timelines
? VIRTGPU_EXECBUF_RING_IDX
: 0),
.flags = VIRTGPU_EXECBUF_RING_IDX | VIRTGPU_EXECBUF_FENCE_FD_OUT,
.ring_idx = 0, /* CPU ring */
};
int ret = drmIoctl(gpu->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &args);
@ -513,8 +510,6 @@ sim_submit_alloc_gem_handles(struct vn_renderer_bo *const *bos,
static int
sim_submit(struct virtgpu *gpu, const struct vn_renderer_submit *submit)
{
const bool use_ring_idx = gpu->base.info.supports_multiple_timelines;
/* TODO replace submit->bos by submit->gem_handles to avoid malloc/loop */
uint32_t *gem_handles = NULL;
if (submit->bo_count) {
@ -531,13 +526,13 @@ sim_submit(struct virtgpu *gpu, const struct vn_renderer_submit *submit)
const struct vn_renderer_submit_batch *batch = &submit->batches[i];
struct drm_virtgpu_execbuffer args = {
.flags = (batch->sync_count ? VIRTGPU_EXECBUF_FENCE_FD_OUT : 0) |
(use_ring_idx ? VIRTGPU_EXECBUF_RING_IDX : 0),
.flags = VIRTGPU_EXECBUF_RING_IDX |
(batch->sync_count ? VIRTGPU_EXECBUF_FENCE_FD_OUT : 0),
.size = batch->cs_size,
.command = (uintptr_t)batch->cs_data,
.bo_handles = (uintptr_t)gem_handles,
.num_bo_handles = submit->bo_count,
.ring_idx = (use_ring_idx ? batch->ring_idx : 0),
.ring_idx = batch->ring_idx,
};
ret = drmIoctl(gpu->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &args);
@ -1381,7 +1376,7 @@ virtgpu_init_renderer_info(struct virtgpu *gpu)
capset->vk_ext_command_serialization_spec_version;
info->vk_mesa_venus_protocol_spec_version =
capset->vk_mesa_venus_protocol_spec_version;
info->supports_blob_id_0 = capset->supports_blob_id_0;
assert(capset->supports_blob_id_0);
/* ensure vk_extension_mask is large enough to hold all capset masks */
STATIC_ASSERT(sizeof(info->vk_extension_mask) >=
@ -1389,9 +1384,9 @@ virtgpu_init_renderer_info(struct virtgpu *gpu)
memcpy(info->vk_extension_mask, capset->vk_extension_mask1,
sizeof(capset->vk_extension_mask1));
info->allow_vk_wait_syncs = capset->allow_vk_wait_syncs;
assert(capset->allow_vk_wait_syncs);
info->supports_multiple_timelines = capset->supports_multiple_timelines;
assert(capset->supports_multiple_timelines);
info->max_timeline_count = gpu->max_timeline_count;
if (gpu->bo_blob_mem == VIRTGPU_BLOB_MEM_GUEST_VRAM)

View file

@ -536,15 +536,13 @@ vtest_vcmd_submit_cmd2(struct vtest *vtest,
for (uint32_t i = 0; i < submit->batch_count; i++) {
const struct vn_renderer_submit_batch *batch = &submit->batches[i];
struct vcmd_submit_cmd2_batch dst = {
.flags = VCMD_SUBMIT_CMD2_FLAG_RING_IDX,
.cmd_offset = cs_offset / sizeof(uint32_t),
.cmd_size = batch->cs_size / sizeof(uint32_t),
.sync_offset = sync_offset / sizeof(uint32_t),
.sync_count = batch->sync_count,
.ring_idx = batch->ring_idx,
};
if (vtest->base.info.supports_multiple_timelines) {
dst.flags = VCMD_SUBMIT_CMD2_FLAG_RING_IDX;
dst.ring_idx = batch->ring_idx;
}
vtest_write(vtest, &dst, sizeof(dst));
cs_offset += batch->cs_size;
@ -948,7 +946,7 @@ vtest_init_renderer_info(struct vtest *vtest)
capset->vk_ext_command_serialization_spec_version;
info->vk_mesa_venus_protocol_spec_version =
capset->vk_mesa_venus_protocol_spec_version;
info->supports_blob_id_0 = capset->supports_blob_id_0;
assert(capset->supports_blob_id_0);
/* ensure vk_extension_mask is large enough to hold all capset masks */
STATIC_ASSERT(sizeof(info->vk_extension_mask) >=
@ -956,9 +954,9 @@ vtest_init_renderer_info(struct vtest *vtest)
memcpy(info->vk_extension_mask, capset->vk_extension_mask1,
sizeof(capset->vk_extension_mask1));
info->allow_vk_wait_syncs = capset->allow_vk_wait_syncs;
assert(capset->allow_vk_wait_syncs);
info->supports_multiple_timelines = capset->supports_multiple_timelines;
assert(capset->supports_multiple_timelines);
info->max_timeline_count = vtest->max_timeline_count;
}