Merge 'Fix issues with shared present modes' into 'main'

See merge request mesa/vulkan-wsi-layer!220
This commit is contained in:
Iason Paraskevopoulos 2025-12-10 11:22:49 +00:00
commit fbe3b9e592
8 changed files with 156 additions and 135 deletions

View file

@ -80,28 +80,42 @@ wsi_ext_present_timing::~wsi_ext_present_timing()
}
}
VkResult wsi_ext_present_timing::init_timing_resources()
VkResult wsi_ext_present_timing::init(util::unique_ptr<wsi::vulkan_time_domain> *domains, size_t domain_count)
{
for (size_t i = 0; i < domain_count; i++)
{
if (!get_swapchain_time_domains().add_time_domain(std::move(domains[i])))
{
WSI_LOG_ERROR("Failed to add a time domain.");
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
if (is_present_stage_supported(VK_PRESENT_STAGE_QUEUE_OPERATIONS_END_BIT_EXT))
{
if (!m_present_semaphore.try_resize(m_num_images))
{
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
for (auto &semaphore : m_present_semaphore)
{
semaphore = VK_NULL_HANDLE;
VkSemaphoreCreateInfo semaphore_info = {};
semaphore_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
if (m_device.disp.CreateSemaphore(m_device.device, &semaphore_info, m_allocator.get_original_callbacks(),
&semaphore) != VK_SUCCESS)
{
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
TRY_LOG_CALL(m_queue_family_resources.init(m_device.get_best_queue_family_index(), m_num_images));
}
if (!m_scheduled_present_targets.try_resize(m_num_images))
{
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
if (!m_present_semaphore.try_resize(m_num_images))
{
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
for (auto &semaphore : m_present_semaphore)
{
semaphore = VK_NULL_HANDLE;
VkSemaphoreCreateInfo semaphore_info = {};
semaphore_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
if (m_device.disp.CreateSemaphore(m_device.device, &semaphore_info, m_allocator.get_original_callbacks(),
&semaphore) != VK_SUCCESS)
{
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
TRY_LOG_CALL(m_queue_family_resources.init(m_device.get_best_queue_family_index(), m_num_images));
return VK_SUCCESS;
}
@ -136,6 +150,11 @@ swapchain_presentation_entry *wsi_ext_present_timing::get_pending_stage_entry(ui
return nullptr;
}
bool wsi_ext_present_timing::is_present_stage_supported(VkPresentStageFlagBitsEXT present_stage)
{
return stages_supported() & present_stage;
}
VkResult wsi_ext_present_timing::write_pending_results()
{
for (auto &slot : m_queue)
@ -263,7 +282,8 @@ VkResult wsi_ext_present_timing::add_presentation_query_entry(VkQueue queue, uin
{
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
if (present_stage_queries & VK_PRESENT_STAGE_QUEUE_OPERATIONS_END_BIT_EXT)
if ((present_stage_queries & VK_PRESENT_STAGE_QUEUE_OPERATIONS_END_BIT_EXT) &&
is_present_stage_supported(VK_PRESENT_STAGE_QUEUE_OPERATIONS_END_BIT_EXT))
{
TRY_LOG_CALL(queue_submit_queue_end_timing(m_device, queue, image_index));
}
@ -322,6 +342,7 @@ swapchain_time_domains &wsi_ext_present_timing::get_swapchain_time_domains()
VkSemaphore wsi_ext_present_timing::get_image_present_semaphore(uint32_t image_index)
{
assert(is_present_stage_supported(VK_PRESENT_STAGE_QUEUE_OPERATIONS_END_BIT_EXT));
return m_present_semaphore[image_index];
}

View file

@ -446,15 +446,7 @@ public:
VkDevice device, uint32_t num_images, arg_types &&...args)
{
auto present_timing = allocator.make_unique<T>(allocator, device, num_images, std::forward<arg_types>(args)...);
for (size_t i = 0; i < domain_count; i++)
{
if (!present_timing->get_swapchain_time_domains().add_time_domain(std::move(domains[i])))
{
WSI_LOG_ERROR("Failed to add a time domain.");
return nullptr;
}
}
if (present_timing->init_timing_resources() != VK_SUCCESS)
if (present_timing->wsi_ext_present_timing::init(domains, domain_count) != VK_SUCCESS)
{
WSI_LOG_ERROR("Failed to initialize present timing.");
return nullptr;
@ -645,6 +637,14 @@ public:
*/
virtual VkPresentStageFlagsEXT stages_supported() = 0;
/**
* @brief Check if a present stage is supported.
*
* @param present_stage Present stage to check.
* @return true if stage is supported, false otherwise.
*/
bool is_present_stage_supported(VkPresentStageFlagBitsEXT present_stage);
protected:
/**
* @brief User provided memory allocation callbacks.
@ -721,11 +721,13 @@ private:
uint32_t image_index);
/**
* @brief Initialize resources for timing queries.
* @brief Initialize the present timing extension.
*
* @return VK_SUCCESS if the initialization is successful and error if otherwise.
* @param domains Array of time domains.
* @param domain_count Size of the @p domains array.
* @return VK_SUCCESS when the initialization is successful and error otherwise.
*/
VkResult init_timing_resources();
VkResult init(util::unique_ptr<wsi::vulkan_time_domain> *domains, size_t domain_count);
/**
* @brief Get all the pending results that are available to the queue.

View file

@ -36,14 +36,17 @@
wsi_ext_present_timing_headless::wsi_ext_present_timing_headless(const util::allocator &allocator, VkDevice device,
uint32_t num_images,
std::optional<VkTimeDomainEXT> monotonic_domain)
std::optional<VkTimeDomainEXT> monotonic_domain,
bool is_swapchain_using_shared_present_mode)
: wsi::wsi_ext_present_timing(allocator, device, num_images)
, m_monotonic_domain(monotonic_domain)
, m_is_swapchain_using_shared_present_mode(is_swapchain_using_shared_present_mode)
{
}
util::unique_ptr<wsi_ext_present_timing_headless> wsi_ext_present_timing_headless::create(
const util::allocator &allocator, const VkDevice &device, uint32_t num_images)
const util::allocator &allocator, const VkDevice &device, uint32_t num_images,
bool is_swapchain_using_shared_present_mode)
{
auto &dev_data = layer::device_private_data::get(device);
@ -73,33 +76,37 @@ util::unique_ptr<wsi_ext_present_timing_headless> wsi_ext_present_timing_headles
}
util::vector<util::unique_ptr<wsi::vulkan_time_domain>> domains(allocator);
if (!domains.try_push_back(allocator.make_unique<wsi::vulkan_time_domain>(
VK_PRESENT_STAGE_QUEUE_OPERATIONS_END_BIT_EXT, VK_TIME_DOMAIN_DEVICE_KHR)))
if (!is_swapchain_using_shared_present_mode)
{
return nullptr;
}
if (!domains.try_push_back(allocator.make_unique<wsi::vulkan_time_domain>(
VK_PRESENT_STAGE_QUEUE_OPERATIONS_END_BIT_EXT, VK_TIME_DOMAIN_DEVICE_KHR)))
{
return nullptr;
}
if (monotonic_domain)
{
if (!domains.try_push_back(allocator.make_unique<wsi::vulkan_time_domain>(
VK_PRESENT_STAGE_REQUEST_DEQUEUED_BIT_EXT, *monotonic_domain)))
if (monotonic_domain)
{
return nullptr;
}
if (!domains.try_push_back(allocator.make_unique<wsi::vulkan_time_domain>(
VK_PRESENT_STAGE_IMAGE_FIRST_PIXEL_OUT_BIT_EXT, *monotonic_domain)))
{
return nullptr;
}
if (!domains.try_push_back(allocator.make_unique<wsi::vulkan_time_domain>(
VK_PRESENT_STAGE_IMAGE_FIRST_PIXEL_VISIBLE_BIT_EXT, *monotonic_domain)))
{
return nullptr;
if (!domains.try_push_back(allocator.make_unique<wsi::vulkan_time_domain>(
VK_PRESENT_STAGE_REQUEST_DEQUEUED_BIT_EXT, *monotonic_domain)))
{
return nullptr;
}
if (!domains.try_push_back(allocator.make_unique<wsi::vulkan_time_domain>(
VK_PRESENT_STAGE_IMAGE_FIRST_PIXEL_OUT_BIT_EXT, *monotonic_domain)))
{
return nullptr;
}
if (!domains.try_push_back(allocator.make_unique<wsi::vulkan_time_domain>(
VK_PRESENT_STAGE_IMAGE_FIRST_PIXEL_VISIBLE_BIT_EXT, *monotonic_domain)))
{
return nullptr;
}
}
}
return wsi_ext_present_timing::create<wsi_ext_present_timing_headless>(allocator, domains.data(), domains.size(),
device, num_images, monotonic_domain);
device, num_images, monotonic_domain,
is_swapchain_using_shared_present_mode);
}
VkResult wsi_ext_present_timing_headless::get_swapchain_timing_properties(
@ -166,9 +173,14 @@ void wsi_ext_present_timing_headless::set_first_pixel_visible_timestamp_for_last
VkPresentStageFlagsEXT wsi_ext_present_timing_headless::stages_supported()
{
VkPresentStageFlagsEXT stages =
VK_PRESENT_STAGE_QUEUE_OPERATIONS_END_BIT_EXT | VK_PRESENT_STAGE_REQUEST_DEQUEUED_BIT_EXT |
VK_PRESENT_STAGE_IMAGE_FIRST_PIXEL_OUT_BIT_EXT | VK_PRESENT_STAGE_IMAGE_FIRST_PIXEL_VISIBLE_BIT_EXT;
VkPresentStageFlagsEXT stages = {};
/* Do not expose any stage when using shared present modes. */
if (!m_is_swapchain_using_shared_present_mode)
{
stages |= VK_PRESENT_STAGE_QUEUE_OPERATIONS_END_BIT_EXT | VK_PRESENT_STAGE_REQUEST_DEQUEUED_BIT_EXT |
VK_PRESENT_STAGE_IMAGE_FIRST_PIXEL_OUT_BIT_EXT | VK_PRESENT_STAGE_IMAGE_FIRST_PIXEL_VISIBLE_BIT_EXT;
}
return stages;
}

View file

@ -43,7 +43,8 @@ class wsi_ext_present_timing_headless : public wsi::wsi_ext_present_timing
{
public:
static util::unique_ptr<wsi_ext_present_timing_headless> create(const util::allocator &allocator,
const VkDevice &device, uint32_t num_images);
const VkDevice &device, uint32_t num_images,
bool is_swapchain_using_shared_present_mode);
VkResult get_swapchain_timing_properties(uint64_t &timing_properties_counter,
VkSwapchainTimingPropertiesEXT &timing_properties) override;
@ -89,7 +90,8 @@ public:
private:
wsi_ext_present_timing_headless(const util::allocator &allocator, VkDevice device, uint32_t num_images,
std::optional<VkTimeDomainEXT> monotonic_domain);
std::optional<VkTimeDomainEXT> monotonic_domain,
bool is_swapchain_using_shared_present_mode);
/* Allow util::allocator to access the private constructor */
friend util::allocator;
@ -101,6 +103,11 @@ private:
* Timestamp for the last VK_PRESENT_STAGE_IMAGE_FIRST_PIXEL_VISIBLE_BIT_EXT stage.
*/
std::optional<uint64_t> m_first_pixel_visible_timestamp_for_last_image;
/**
* @brief Indicates whether the swapchain is using shared present mode.
*/
bool m_is_swapchain_using_shared_present_mode;
};
#endif

View file

@ -103,8 +103,8 @@ VkResult swapchain::add_required_extensions(VkDevice device, const VkSwapchainCr
bool swapchain_support_enabled = swapchain_create_info->flags & VK_SWAPCHAIN_CREATE_PRESENT_TIMING_BIT_EXT;
if (swapchain_support_enabled)
{
if (!add_swapchain_extension(
wsi_ext_present_timing_headless::create(m_allocator, device, swapchain_create_info->minImageCount)))
if (!add_swapchain_extension(wsi_ext_present_timing_headless::create(
m_allocator, device, swapchain_create_info->minImageCount, is_using_shared_present_mode())))
{
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
@ -132,15 +132,8 @@ VkResult swapchain::init_platform(VkDevice device, const VkSwapchainCreateInfoKH
bool &use_presentation_thread)
{
UNUSED(device);
if (swapchain_create_info->presentMode == VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR)
{
use_presentation_thread = false;
}
else
{
use_presentation_thread = true;
}
use_presentation_thread = !is_using_shared_present_mode();
return init_image_factory(*swapchain_create_info);
}
@ -161,7 +154,11 @@ VkResult swapchain::init_image_factory(const VkSwapchainCreateInfoKHR &swapchain
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
m_image_factory.init(std::move(image_handle_creator), std::move(backing_memory_creator), false, true);
/* On shared present modes we don't want to wait on present fence as the image is re-used and never released. */
bool wait_on_present_fence = !is_using_shared_present_mode();
m_image_factory.init(std::move(image_handle_creator), std::move(backing_memory_creator), false,
wait_on_present_fence);
return VK_SUCCESS;
}

View file

@ -54,6 +54,12 @@
namespace wsi
{
bool swapchain_base::is_using_shared_present_mode()
{
return (m_present_mode == VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR ||
m_present_mode == VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR);
}
void swapchain_base::page_flip_thread()
{
auto &sc_images = m_swapchain_images;
@ -69,40 +75,20 @@ void swapchain_base::page_flip_thread()
while (m_page_flip_thread_run)
{
pending_present_request submit_info{};
if (m_present_mode == VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR)
/* Waiting for the page_flip_semaphore which will be signalled once there is an
* image to display.*/
{
/* In continuous mode the application will only make one presentation request,
* therefore the page flip semaphore will only be signalled once. */
if (m_first_present)
VkResult wait_res = m_page_flip_semaphore.wait(SEMAPHORE_TIMEOUT);
if (wait_res == VK_TIMEOUT)
{
VkResult wait_res = m_page_flip_semaphore.wait(SEMAPHORE_TIMEOUT);
if (wait_res == VK_TIMEOUT)
{
/* Image is not ready yet. */
continue;
}
assert(wait_res == VK_SUCCESS);
/* Image is not ready yet. */
continue;
}
/* For continuous mode there will be only one image in the swapchain.
* This image will always be used, and there is no pending state in this case. */
submit_info.image_index = 0;
}
else
{
/* Waiting for the page_flip_semaphore which will be signalled once there is an
* image to display.*/
{
VkResult wait_res = m_page_flip_semaphore.wait(SEMAPHORE_TIMEOUT);
if (wait_res == VK_TIMEOUT)
{
/* Image is not ready yet. */
continue;
}
}
/* We want to present the oldest queued for present image from our present queue,
* which we can find at the sc->pending_buffer_pool.head index. */
/* We want to present the oldest queued for present image from our present queue,
* which we can find at the sc->pending_buffer_pool.head index. */
{
util::unique_lock<util::recursive_mutex> image_status_lock(m_image_status_mutex);
if (!image_status_lock)
{
@ -199,20 +185,12 @@ void swapchain_base::unpresent_image(uint32_t presented_index)
abort();
}
if (m_present_mode == VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR ||
m_present_mode == VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR)
{
m_swapchain_images[presented_index].set_status(swapchain_image::ACQUIRED);
}
else
{
m_swapchain_images[presented_index].set_status(swapchain_image::FREE);
}
const bool is_shared_present = is_using_shared_present_mode();
m_swapchain_images[presented_index].set_status(is_shared_present ? swapchain_image::ACQUIRED :
swapchain_image::FREE);
image_status_lock.unlock();
if (m_present_mode != VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR &&
m_present_mode != VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR)
if (!is_shared_present)
{
m_free_image_semaphore.post();
}
@ -635,13 +613,6 @@ VkResult swapchain_base::queue_present(VkQueue queue, const VkPresentInfoKHR *pr
sem_count = present_info->waitSemaphoreCount;
}
if (!m_page_flip_thread_run)
{
/* If the page flip thread is not running, we need to wait for any present payload here, before setting a new present payload. */
constexpr uint64_t WAIT_PRESENT_TIMEOUT = 1000000000; /* 1 second */
TRY_LOG_CALL(m_swapchain_images[submit_info.pending_present.image_index].wait_present(WAIT_PRESENT_TIMEOUT));
}
void *submission_pnext = nullptr;
uint32_t count_signal_semaphores = 0;
std::optional<VkFrameBoundaryEXT> frame_boundary;
@ -668,8 +639,11 @@ VkResult swapchain_base::queue_present(VkQueue queue, const VkPresentInfoKHR *pr
(present_timing_info->presentStageQueries & VK_PRESENT_STAGE_QUEUE_OPERATIONS_END_BIT_EXT))
{
auto *ext_present_timing = get_swapchain_extension<wsi::wsi_ext_present_timing>(true);
signal_semaphores[count_signal_semaphores++] =
ext_present_timing->get_image_present_semaphore(submit_info.pending_present.image_index);
if (ext_present_timing->is_present_stage_supported(VK_PRESENT_STAGE_QUEUE_OPERATIONS_END_BIT_EXT))
{
signal_semaphores[count_signal_semaphores++] =
ext_present_timing->get_image_present_semaphore(submit_info.pending_present.image_index);
}
}
#endif
queue_submit_semaphores semaphores = {

View file

@ -592,6 +592,13 @@ protected:
}
}
/**
* @brief Check if the swapchain is using a shared present mode.
*
* @return true if using a shared present mode, false otherwise.
*/
bool is_using_shared_present_mode();
private:
util::mutex m_image_acquire_lock;
/**

View file

@ -54,7 +54,7 @@ std::variant<VkResult, swapchain_image> swapchain_image::create(create_args &cre
return result;
}
util::unique_ptr<fence_sync> present_fence;
util::unique_ptr<fence_sync> present_fence{ nullptr };
if (create_args.m_exportable_fence)
{
auto present_fence_opt = sync_fd_fence_sync::create(*device_data);
@ -70,23 +70,20 @@ std::variant<VkResult, swapchain_image> swapchain_image::create(create_args &cre
}
else
{
auto present_fence_opt = fence_sync::create(*device_data);
if (!present_fence_opt.has_value())
/* If we are not exporting fence and are not waiting on it then skip creating one. */
if (create_args.m_wait_on_present_fence)
{
device_data->disp.DestroySemaphore(device, present_semaphore,
create_args.m_allocator.get_original_callbacks());
device_data->disp.DestroySemaphore(device, present_fence_wait,
create_args.m_allocator.get_original_callbacks());
return VK_ERROR_OUT_OF_HOST_MEMORY;
auto present_fence_opt = fence_sync::create(*device_data);
if (!present_fence_opt.has_value())
{
device_data->disp.DestroySemaphore(device, present_semaphore,
create_args.m_allocator.get_original_callbacks());
device_data->disp.DestroySemaphore(device, present_fence_wait,
create_args.m_allocator.get_original_callbacks());
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
present_fence = create_args.m_allocator.make_unique<fence_sync>(std::move(present_fence_opt.value()));
}
present_fence = create_args.m_allocator.make_unique<fence_sync>(std::move(present_fence_opt.value()));
}
if (present_fence == nullptr)
{
device_data->disp.DestroySemaphore(device, present_semaphore, create_args.m_allocator.get_original_callbacks());
device_data->disp.DestroySemaphore(device, present_fence_wait, create_args.m_allocator.get_original_callbacks());
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
return swapchain_image(create_args.m_image_handle, present_semaphore, present_fence_wait, std::move(present_fence),
@ -144,12 +141,16 @@ VkResult swapchain_image::bind(const VkBindImageMemoryInfo *bind_image_mem_info)
VkResult swapchain_image::set_present_payload(VkQueue queue, const queue_submit_semaphores &semaphores,
const void *submission_pnext)
{
if (!m_present_fence)
{
return sync_queue_submit(*m_device_data, queue, VK_NULL_HANDLE, semaphores, submission_pnext);
}
return m_present_fence->set_payload(queue, semaphores, submission_pnext);
}
VkResult swapchain_image::wait_present(uint64_t timeout_ns)
{
if (m_wait_on_present_fence)
if (m_wait_on_present_fence && m_present_fence)
{
return m_present_fence->wait_payload(timeout_ns);
}