venus: put android wsi on the sub-optimal path

Simplify Android wsi to only use performant path if fixed sync_fd fence
support is enabled. This removes hacky codes and allows us to deprecate
a special ring wait code path as well.

Signed-off-by: Yiwei Zhang <zzyiwei@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17975>
This commit is contained in:
Yiwei Zhang 2022-08-12 18:31:13 +00:00 committed by Marge Bot
parent 64f9fbb9ea
commit b21e4a7990
5 changed files with 17 additions and 41 deletions

View file

@ -797,7 +797,7 @@ vn_AcquireImageANDROID(VkDevice device,
}
static VkResult
vn_android_sync_fence_create(struct vn_queue *queue)
vn_android_sync_fence_create(struct vn_queue *queue, bool external)
{
struct vn_device *dev = queue->device;
@ -808,8 +808,7 @@ vn_android_sync_fence_create(struct vn_queue *queue)
};
const VkFenceCreateInfo create_info = {
.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
.pNext =
dev->instance->experimental.globalFencing ? &export_info : NULL,
.pNext = external ? &export_info : NULL,
.flags = 0,
};
return vn_CreateFence(vn_device_to_handle(dev), &create_info, NULL,
@ -827,7 +826,9 @@ vn_QueueSignalReleaseImageANDROID(VkQueue _queue,
struct vn_queue *queue = vn_queue_from_handle(_queue);
struct vn_device *dev = queue->device;
const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
const bool has_global_fencing = dev->instance->experimental.globalFencing;
const bool has_sync_fd_fence_export =
dev->physical_device->renderer_sync_fd_fence_features &
VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT;
VkDevice device = vn_device_to_handle(dev);
VkPipelineStageFlags local_stage_masks[8];
VkPipelineStageFlags *stage_masks = local_stage_masks;
@ -841,7 +842,7 @@ vn_QueueSignalReleaseImageANDROID(VkQueue _queue,
/* lazily create sync fence for Android wsi */
if (queue->sync_fence == VK_NULL_HANDLE) {
result = vn_android_sync_fence_create(queue);
result = vn_android_sync_fence_create(queue, has_sync_fd_fence_export);
if (result != VK_SUCCESS)
return result;
}
@ -868,13 +869,7 @@ vn_QueueSignalReleaseImageANDROID(VkQueue _queue,
.signalSemaphoreCount = 0,
.pSignalSemaphores = NULL,
};
/* XXX When globalFencing is supported, our implementation is not able to
* reset the fence during vn_GetFenceFdKHR currently. Thus to ensure proper
* host driver behavior, we pass VK_NULL_HANDLE here.
*/
result =
vn_QueueSubmit(_queue, 1, &submit_info,
has_global_fencing ? VK_NULL_HANDLE : queue->sync_fence);
result = vn_QueueSubmit(_queue, 1, &submit_info, queue->sync_fence);
if (stage_masks != local_stage_masks)
vk_free(alloc, stage_masks);
@ -882,15 +877,7 @@ vn_QueueSignalReleaseImageANDROID(VkQueue _queue,
if (result != VK_SUCCESS)
return vn_error(dev->instance, result);
if (has_global_fencing) {
/* With globalFencing, the external queue fence was not passed in the
* above vn_QueueSubmit to hint it to be synchronous. So we need to wait
* for the ring here before vn_GetFenceFdKHR which is pure kernel ops.
* Skip ring wait if async queue submit is disabled.
*/
if (!VN_PERF(NO_ASYNC_QUEUE_SUBMIT))
vn_instance_ring_wait(dev->instance);
if (has_sync_fd_fence_export) {
const VkFenceGetFdInfoKHR fd_info = {
.sType = VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR,
.pNext = NULL,

View file

@ -232,8 +232,16 @@ vn_device_fix_create_info(const struct vn_device *dev,
VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME;
}
if (app_exts->ANDROID_native_buffer)
if (app_exts->ANDROID_native_buffer) {
if (!app_exts->KHR_external_fence_fd &&
(physical_dev->renderer_sync_fd_fence_features &
VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT)) {
extra_exts[extra_count++] =
VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME;
}
block_exts[block_count++] = VK_ANDROID_NATIVE_BUFFER_EXTENSION_NAME;
}
if (app_exts->ANDROID_external_memory_android_hardware_buffer) {
block_exts[block_count++] =

View file

@ -110,13 +110,6 @@ VkResult
vn_instance_ring_submit(struct vn_instance *instance,
const struct vn_cs_encoder *cs);
static inline void
vn_instance_ring_wait(struct vn_instance *instance)
{
struct vn_ring *ring = &instance->ring.ring;
vn_ring_wait_all(ring);
}
struct vn_instance_submit_command {
/* empty command implies errors */
struct vn_cs_encoder command;

View file

@ -259,12 +259,3 @@ vn_ring_wait(const struct vn_ring *ring, uint32_t seqno)
{
vn_ring_wait_seqno(ring, seqno);
}
void
vn_ring_wait_all(const struct vn_ring *ring)
{
/* load from tail rather than ring->cur for atomicity */
const uint32_t pending_seqno =
atomic_load_explicit(ring->shared.tail, memory_order_relaxed);
vn_ring_wait(ring, pending_seqno);
}

View file

@ -99,7 +99,4 @@ vn_ring_submit(struct vn_ring *ring,
void
vn_ring_wait(const struct vn_ring *ring, uint32_t seqno);
void
vn_ring_wait_all(const struct vn_ring *ring);
#endif /* VN_RING_H */