diff --git a/src/virtio/vulkan/vn_android.c b/src/virtio/vulkan/vn_android.c index f1ba3ce68f1..53ce3ecd6e6 100644 --- a/src/virtio/vulkan/vn_android.c +++ b/src/virtio/vulkan/vn_android.c @@ -797,7 +797,7 @@ vn_AcquireImageANDROID(VkDevice device, } static VkResult -vn_android_sync_fence_create(struct vn_queue *queue) +vn_android_sync_fence_create(struct vn_queue *queue, bool external) { struct vn_device *dev = queue->device; @@ -808,8 +808,7 @@ vn_android_sync_fence_create(struct vn_queue *queue) }; const VkFenceCreateInfo create_info = { .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, - .pNext = - dev->instance->experimental.globalFencing ? &export_info : NULL, + .pNext = external ? &export_info : NULL, .flags = 0, }; return vn_CreateFence(vn_device_to_handle(dev), &create_info, NULL, @@ -827,7 +826,9 @@ vn_QueueSignalReleaseImageANDROID(VkQueue _queue, struct vn_queue *queue = vn_queue_from_handle(_queue); struct vn_device *dev = queue->device; const VkAllocationCallbacks *alloc = &dev->base.base.alloc; - const bool has_global_fencing = dev->instance->experimental.globalFencing; + const bool has_sync_fd_fence_export = + dev->physical_device->renderer_sync_fd_fence_features & + VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT; VkDevice device = vn_device_to_handle(dev); VkPipelineStageFlags local_stage_masks[8]; VkPipelineStageFlags *stage_masks = local_stage_masks; @@ -841,7 +842,7 @@ vn_QueueSignalReleaseImageANDROID(VkQueue _queue, /* lazily create sync fence for Android wsi */ if (queue->sync_fence == VK_NULL_HANDLE) { - result = vn_android_sync_fence_create(queue); + result = vn_android_sync_fence_create(queue, has_sync_fd_fence_export); if (result != VK_SUCCESS) return result; } @@ -868,13 +869,7 @@ vn_QueueSignalReleaseImageANDROID(VkQueue _queue, .signalSemaphoreCount = 0, .pSignalSemaphores = NULL, }; - /* XXX When globalFencing is supported, our implementation is not able to - * reset the fence during vn_GetFenceFdKHR currently. Thus to ensure proper - * host driver behavior, we pass VK_NULL_HANDLE here. - */ - result = - vn_QueueSubmit(_queue, 1, &submit_info, - has_global_fencing ? VK_NULL_HANDLE : queue->sync_fence); + result = vn_QueueSubmit(_queue, 1, &submit_info, queue->sync_fence); if (stage_masks != local_stage_masks) vk_free(alloc, stage_masks); @@ -882,15 +877,7 @@ vn_QueueSignalReleaseImageANDROID(VkQueue _queue, if (result != VK_SUCCESS) return vn_error(dev->instance, result); - if (has_global_fencing) { - /* With globalFencing, the external queue fence was not passed in the - * above vn_QueueSubmit to hint it to be synchronous. So we need to wait - * for the ring here before vn_GetFenceFdKHR which is pure kernel ops. - * Skip ring wait if async queue submit is disabled. - */ - if (!VN_PERF(NO_ASYNC_QUEUE_SUBMIT)) - vn_instance_ring_wait(dev->instance); - + if (has_sync_fd_fence_export) { const VkFenceGetFdInfoKHR fd_info = { .sType = VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR, .pNext = NULL, diff --git a/src/virtio/vulkan/vn_device.c b/src/virtio/vulkan/vn_device.c index faa06512279..ffa27e027b3 100644 --- a/src/virtio/vulkan/vn_device.c +++ b/src/virtio/vulkan/vn_device.c @@ -232,8 +232,16 @@ vn_device_fix_create_info(const struct vn_device *dev, VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME; } - if (app_exts->ANDROID_native_buffer) + if (app_exts->ANDROID_native_buffer) { + if (!app_exts->KHR_external_fence_fd && + (physical_dev->renderer_sync_fd_fence_features & + VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT)) { + extra_exts[extra_count++] = + VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME; + } + block_exts[block_count++] = VK_ANDROID_NATIVE_BUFFER_EXTENSION_NAME; + } if (app_exts->ANDROID_external_memory_android_hardware_buffer) { block_exts[block_count++] = diff --git a/src/virtio/vulkan/vn_instance.h b/src/virtio/vulkan/vn_instance.h index 78cf1ed0b42..f9d6ca5e657 100644 --- a/src/virtio/vulkan/vn_instance.h +++ b/src/virtio/vulkan/vn_instance.h @@ -110,13 +110,6 @@ VkResult vn_instance_ring_submit(struct vn_instance *instance, const struct vn_cs_encoder *cs); -static inline void -vn_instance_ring_wait(struct vn_instance *instance) -{ - struct vn_ring *ring = &instance->ring.ring; - vn_ring_wait_all(ring); -} - struct vn_instance_submit_command { /* empty command implies errors */ struct vn_cs_encoder command; diff --git a/src/virtio/vulkan/vn_ring.c b/src/virtio/vulkan/vn_ring.c index 39f77925780..6ed64a7b7dc 100644 --- a/src/virtio/vulkan/vn_ring.c +++ b/src/virtio/vulkan/vn_ring.c @@ -259,12 +259,3 @@ vn_ring_wait(const struct vn_ring *ring, uint32_t seqno) { vn_ring_wait_seqno(ring, seqno); } - -void -vn_ring_wait_all(const struct vn_ring *ring) -{ - /* load from tail rather than ring->cur for atomicity */ - const uint32_t pending_seqno = - atomic_load_explicit(ring->shared.tail, memory_order_relaxed); - vn_ring_wait(ring, pending_seqno); -} diff --git a/src/virtio/vulkan/vn_ring.h b/src/virtio/vulkan/vn_ring.h index caf6c8e89b0..033203f6f8c 100644 --- a/src/virtio/vulkan/vn_ring.h +++ b/src/virtio/vulkan/vn_ring.h @@ -99,7 +99,4 @@ vn_ring_submit(struct vn_ring *ring, void vn_ring_wait(const struct vn_ring *ring, uint32_t seqno); -void -vn_ring_wait_all(const struct vn_ring *ring); - #endif /* VN_RING_H */