Adding exception-safe custom mutex to the WSI layer

Introduce util::mutex, util::recursive_mutex
and util::unique_lock; switch WSI layer call sites to it.
All locks are acquired via try_lock(), no system_error leaks.

Signed-off-by: Maged Elnaggar <maged.elnaggar@arm.com>
Change-Id: Ide9ef4318be7cc47e9577059695cc298f8b8e579
This commit is contained in:
Maged Elnaggar 2025-09-05 15:06:38 +00:00 committed by Rosen Zhelev
parent ef9b134054
commit 78b21da4ef
20 changed files with 599 additions and 59 deletions

View file

@ -311,6 +311,7 @@ add_library(${PROJECT_NAME} SHARED
layer/present_wait_api.cpp
util/timed_semaphore.cpp
util/custom_allocator.cpp
util/custom_mutex.cpp
util/extension_list.cpp
util/log.cpp
util/format_modifiers.cpp

View file

@ -32,11 +32,12 @@
#include "util/log.hpp"
#include "util/helpers.hpp"
#include "util/macros.hpp"
#include "util/custom_mutex.hpp"
namespace layer
{
static std::mutex g_data_lock;
static util::mutex g_data_lock;
/* The dictionaries below use plain pointers to store the instance/device private data objects.
* This means that these objects are leaked if the application terminates without calling vkDestroyInstance
@ -217,7 +218,12 @@ VkResult instance_private_data::associate(VkInstance instance, instance_dispatch
}
const auto key = get_key(instance);
scoped_mutex lock(g_data_lock);
util::unique_lock<util::mutex> lock(g_data_lock);
if (!lock)
{
WSI_LOG_ERROR("Failed to acquire instance data lock in associate.");
return VK_ERROR_INITIALIZATION_FAILED;
}
auto it = g_instance_data.find(key);
if (it != g_instance_data.end())
@ -248,7 +254,13 @@ void instance_private_data::disassociate(VkInstance instance)
assert(instance != VK_NULL_HANDLE);
instance_private_data *instance_data = nullptr;
{
scoped_mutex lock(g_data_lock);
util::unique_lock<util::mutex> lock(g_data_lock);
if (!lock)
{
WSI_LOG_ERROR("Failed to acquire instance data lock in disassociate.");
abort();
}
auto it = g_instance_data.find(get_key(instance));
if (it == g_instance_data.end())
{
@ -266,7 +278,12 @@ void instance_private_data::disassociate(VkInstance instance)
template <typename dispatchable_type>
static instance_private_data &get_instance_private_data(dispatchable_type dispatchable_object)
{
scoped_mutex lock(g_data_lock);
util::unique_lock<util::mutex> lock(g_data_lock);
if (!lock)
{
WSI_LOG_ERROR("Failed to acquire instance data lock in get_instance_private_data.");
abort();
}
return *g_instance_data.at(get_key(dispatchable_object));
}
@ -282,7 +299,12 @@ instance_private_data &instance_private_data::get(VkPhysicalDevice phys_dev)
VkResult instance_private_data::add_surface(VkSurfaceKHR vk_surface, util::unique_ptr<wsi::surface> &wsi_surface)
{
scoped_mutex lock(surfaces_lock);
util::unique_lock<util::mutex> lock(surfaces_lock);
if (!lock)
{
WSI_LOG_ERROR("Failed to acquire surfaces lock in add_surface.");
return VK_ERROR_INITIALIZATION_FAILED;
}
auto it = surfaces.find(vk_surface);
if (it != surfaces.end())
@ -305,7 +327,13 @@ VkResult instance_private_data::add_surface(VkSurfaceKHR vk_surface, util::uniqu
wsi::surface *instance_private_data::get_surface(VkSurfaceKHR vk_surface)
{
scoped_mutex lock(surfaces_lock);
util::unique_lock<util::mutex> lock(surfaces_lock);
if (!lock)
{
WSI_LOG_ERROR("Failed to acquire surfaces lock in get_surface.");
abort();
}
auto it = surfaces.find(vk_surface);
if (it != surfaces.end())
{
@ -317,7 +345,13 @@ wsi::surface *instance_private_data::get_surface(VkSurfaceKHR vk_surface)
void instance_private_data::remove_surface(VkSurfaceKHR vk_surface, const util::allocator &alloc)
{
scoped_mutex lock(surfaces_lock);
util::unique_lock<util::mutex> lock(surfaces_lock);
if (!lock)
{
WSI_LOG_ERROR("Failed to acquire surfaces lock in remove_surface.");
abort();
}
auto it = surfaces.find(vk_surface);
if (it != surfaces.end())
{
@ -331,7 +365,13 @@ void instance_private_data::remove_surface(VkSurfaceKHR vk_surface, const util::
bool instance_private_data::does_layer_support_surface(VkSurfaceKHR surface)
{
scoped_mutex lock(surfaces_lock);
util::unique_lock<util::mutex> lock(surfaces_lock);
if (!lock)
{
WSI_LOG_ERROR("Failed to acquire surfaces lock in does_layer_support_surface.");
abort();
}
auto it = surfaces.find(surface);
return it != surfaces.end();
}
@ -456,7 +496,12 @@ VkResult device_private_data::associate(VkDevice dev, instance_private_data &ins
}
const auto key = get_key(dev);
scoped_mutex lock(g_data_lock);
util::unique_lock<util::mutex> lock(g_data_lock);
if (!lock)
{
WSI_LOG_ERROR("Failed to acquire device data lock in associate.");
return VK_ERROR_INITIALIZATION_FAILED;
}
auto it = g_device_data.find(key);
if (it != g_device_data.end())
@ -486,7 +531,13 @@ void device_private_data::disassociate(VkDevice dev)
assert(dev != VK_NULL_HANDLE);
device_private_data *device_data = nullptr;
{
scoped_mutex lock(g_data_lock);
util::unique_lock<util::mutex> lock(g_data_lock);
if (!lock)
{
WSI_LOG_ERROR("Failed to acquire device data lock in disassociate.");
abort();
}
auto it = g_device_data.find(get_key(dev));
if (it == g_device_data.end())
{
@ -504,7 +555,12 @@ void device_private_data::disassociate(VkDevice dev)
template <typename dispatchable_type>
static device_private_data &get_device_private_data(dispatchable_type dispatchable_object)
{
scoped_mutex lock(g_data_lock);
util::unique_lock<util::mutex> lock(g_data_lock);
if (!lock)
{
WSI_LOG_ERROR("Failed to acquire device data lock in get_device_private_data.");
abort();
}
return *g_device_data.at(get_key(dispatchable_object));
}
@ -521,14 +577,26 @@ device_private_data &device_private_data::get(VkQueue queue)
VkResult device_private_data::add_layer_swapchain(VkSwapchainKHR swapchain)
{
scoped_mutex lock(swapchains_lock);
util::unique_lock<util::mutex> lock(swapchains_lock);
if (!lock)
{
WSI_LOG_ERROR("Failed to acquire swapchains lock in add_layer_swapchain.");
return VK_ERROR_INITIALIZATION_FAILED;
}
auto result = swapchains.try_insert(swapchain);
return result.has_value() ? VK_SUCCESS : VK_ERROR_OUT_OF_HOST_MEMORY;
}
void device_private_data::remove_layer_swapchain(VkSwapchainKHR swapchain)
{
scoped_mutex lock(swapchains_lock);
util::unique_lock<util::mutex> lock(swapchains_lock);
if (!lock)
{
WSI_LOG_ERROR("Failed to acquire swapchains lock in remove_layer_swapchain.");
abort();
}
auto it = swapchains.find(swapchain);
if (it != swapchains.end())
{
@ -538,7 +606,13 @@ void device_private_data::remove_layer_swapchain(VkSwapchainKHR swapchain)
bool device_private_data::layer_owns_all_swapchains(const VkSwapchainKHR *swapchain, uint32_t swapchain_count) const
{
scoped_mutex lock(swapchains_lock);
util::unique_lock<util::mutex> lock(swapchains_lock);
if (!lock)
{
WSI_LOG_ERROR("Failed to acquire swapchains lock in layer_owns_all_swapchains.");
abort();
}
for (uint32_t i = 0; i < swapchain_count; i++)
{
if (swapchains.find(swapchain[i]) == swapchains.end())

View file

@ -28,6 +28,7 @@
#include <util/platform_set.hpp>
#include <util/custom_allocator.hpp>
#include <util/custom_mutex.hpp>
#include <util/unordered_set.hpp>
#include <util/unordered_map.hpp>
#include <util/extension_list.hpp>
@ -40,10 +41,8 @@
#include <memory>
#include <unordered_set>
#include <cassert>
#include <mutex>
#include <limits>
#include <cstring>
using scoped_mutex = std::lock_guard<std::mutex>;
/** Forward declare stored objects */
namespace wsi
@ -770,7 +769,7 @@ private:
/**
* @brief Lock for thread safe access to @ref surfaces
*/
std::mutex surfaces_lock;
util::mutex surfaces_lock;
/**
* @brief List with the names of the enabled instance extensions.
@ -1026,7 +1025,7 @@ private:
const util::allocator allocator;
util::unordered_set<VkSwapchainKHR> swapchains;
mutable std::mutex swapchains_lock;
mutable util::mutex swapchains_lock;
/**
* @brief List with the names of the enabled device extensions.

106
util/custom_mutex.cpp Normal file
View file

@ -0,0 +1,106 @@
/*
* Copyright (c) 2025 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**
* @file custom_mutex.cpp
* @brief Implementation of the exceptionneutral mutex helpers declared in
* @ref custom_mutex.hpp.
*/
#include "custom_mutex.hpp"
namespace util
{
bool mutex::lock() noexcept
{
try
{
m_mtx.lock();
return true;
}
catch (const std::system_error &)
{
return false;
}
}
bool mutex::try_lock() noexcept
{
try
{
return m_mtx.try_lock();
}
catch (const std::system_error &)
{
return false;
}
}
void mutex::unlock() noexcept
{
m_mtx.unlock();
}
std::mutex &mutex::native() noexcept
{
return m_mtx;
}
bool recursive_mutex::lock() noexcept
{
try
{
m_mtx.lock();
return true;
}
catch (const std::system_error &)
{
return false;
}
}
bool recursive_mutex::try_lock() noexcept
{
try
{
return m_mtx.try_lock();
}
catch (const std::system_error &)
{
return false;
}
}
void recursive_mutex::unlock() noexcept
{
m_mtx.unlock();
}
std::recursive_mutex &recursive_mutex::native() noexcept
{
return m_mtx;
}
} /* namespace util */

229
util/custom_mutex.hpp Normal file
View file

@ -0,0 +1,229 @@
/*
* Copyright (c) 2025 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**
* @file custom_mutex.hpp
* @brief Exceptionneutral mutex wrappers and RAII guards for the Vulkan WSI layer.
*
* Provides the following helpers. All locking is exceptionneutral: the
* wrappers catch any `std::system_error` from the STL and expose boolean
* success/failure. Immediate guards use blocking `lock()` by default;
* `try_lock()` remains available for deferred cases.
* util::mutex wraps `std::mutex`
* util::recursive_mutex wraps `std::recursive_mutex`
* util::unique_lock flexible RAII guard (defer/adopt/retry)
*/
#pragma once
#include <mutex>
#include <system_error>
#include <utility>
#include <cassert>
#include "helpers.hpp"
namespace util
{
/**
* @class util::mutex
* @brief Exceptionneutral wrapper around @c std::mutex.
*
* Provides only a nonthrowing @c try_lock() plus @c unlock(). Any
* @c std::system_error raised by the STL is caught internally and reported as
* a simple boolean failure.
*/
class mutex : private noncopyable
{
public:
mutex() = default;
~mutex() = default;
/**
* @brief Block until the mutex is acquired.
* @retval true Lock acquired successfully.
* @retval false Underlying OS error prevented locking.
* Never throws; any @c std::system_error is caught internally.
*/
bool lock() noexcept;
/**
* @brief Attempt to acquire the mutex without blocking.
* @retval true Lock acquired successfully.
* @retval false The mutex was already owned or an OS error occurred.
*
* Never throws; any @c std::system_error is caught internally.
*/
bool try_lock() noexcept;
/**
* @brief Release the mutex.
*
* Behaviour is undefined if the current thread does not own the lock.
* This function is @c noexcept because the guard classes guarantee they
* call it only when ownership is held.
*/
void unlock() noexcept;
/**
* @brief Access the wrapped STL mutex.
* @warning Bypassing the wrapper forfeits the exceptionneutral contract.
*/
std::mutex &native() noexcept;
private:
std::mutex m_mtx;
};
/**
* @class util::recursive_mutex
* @brief Reentrant variant of @ref util::mutex.
*
* Retains the same exceptionneutral contract while permitting the same
* thread to acquire the lock multiple times.
*/
class recursive_mutex : private noncopyable
{
public:
recursive_mutex() = default;
~recursive_mutex() = default;
/**
* @brief Block until the mutex is acquired.
* @retval true Lock acquired successfully.
* @retval false Underlying OS error prevented locking.
* Never throws; any @c std::system_error is caught internally.
*/
bool lock() noexcept;
/**
* @brief Nonblocking attempt to acquire the mutex.
* @return See util::mutex::try_lock().
*/
bool try_lock() noexcept;
/**
* @brief Release one ownership level of the recursive mutex.
*/
void unlock() noexcept;
/**
* @brief Access the underlying `std::recursive_mutex`.
* @warning Direct use bypasses the exceptionneutral guarantee.
*/
std::recursive_mutex &native() noexcept;
private:
std::recursive_mutex m_mtx;
};
/**
* @class util::unique_lock
* @tparam Mutex Mutexlike type (defaults to @ref util::mutex).
* @brief Flexible RAII guard supporting deferlock, adoptlock, unlock, and retry.
*
* All lock attempts are nonblocking and exceptionneutral; failure is
* indicated by @c false, never by throwing.
*/
template <typename Mutex = util::mutex>
class unique_lock : private noncopyable
{
public:
explicit unique_lock(Mutex &m) noexcept
: m_mtx(&m)
{
m_owns = m_mtx->lock();
}
unique_lock(Mutex &m, std::defer_lock_t) noexcept
: m_mtx(&m)
{
}
unique_lock(Mutex &m, std::adopt_lock_t) noexcept
: m_mtx(&m)
, m_owns(true)
{
}
~unique_lock() noexcept
{
if (m_owns)
{
m_mtx->unlock();
}
}
/** Block until the mutex is acquired (for deferlock cases). */
bool lock() noexcept
{
assert(m_mtx && !m_owns && "unique_lock::lock: already owns or no mutex");
m_owns = m_mtx->lock();
return m_owns;
}
/* Retry after deferlock */
bool try_lock() noexcept
{
assert(m_mtx && !m_owns && "unique_lock::try_lock: already owns or no mutex");
m_owns = m_mtx->try_lock();
return m_owns;
}
void unlock() noexcept
{
if (m_owns)
{
m_mtx->unlock();
m_owns = false;
}
}
bool owns_lock() const noexcept
{
return m_owns;
}
explicit operator bool() const noexcept
{
return owns_lock();
}
/** Disown without unlocking caller becomes responsible. */
Mutex *release() noexcept
{
m_owns = false;
return std::exchange(m_mtx, nullptr);
}
/** Return a reference to the wrapped std::mutex (requires Mutex::native()). */
auto &native_mutex() noexcept
{
return m_mtx->native();
}
private:
Mutex *m_mtx{ nullptr };
bool m_owns{ false };
};
} /* namespace util */

View file

@ -33,7 +33,6 @@
#include <fcntl.h>
#include <unistd.h>
#include <assert.h>
#include <mutex>
#include <drm_fourcc.h>
namespace wsi
{

View file

@ -405,7 +405,12 @@ VkResult swapchain::create_framebuffer(const VkImageCreateInfo &image_create_inf
VkResult swapchain::allocate_and_bind_swapchain_image(VkImageCreateInfo image_create_info, swapchain_image &image)
{
std::unique_lock<std::recursive_mutex> image_status_lock(m_image_status_mutex);
util::unique_lock<util::recursive_mutex> image_status_lock(m_image_status_mutex);
if (!image_status_lock)
{
WSI_LOG_ERROR("Failed to acquire image status lock in allocate_and_bind_swapchain_image.");
return VK_ERROR_INITIALIZATION_FAILED;
}
image.status = swapchain_image::FREE;
assert(image.data != nullptr);
auto image_data = static_cast<display_image_data *>(image.data);
@ -608,7 +613,12 @@ VkResult swapchain::image_wait_present(swapchain_image &image, uint64_t timeout)
void swapchain::destroy_image(swapchain_image &image)
{
std::unique_lock<std::recursive_mutex> image_status_lock(m_image_status_mutex);
util::unique_lock<util::recursive_mutex> image_status_lock(m_image_status_mutex);
if (!image_status_lock)
{
WSI_LOG_ERROR("Failed to acquire image status lock in destroy_image.");
abort();
}
if (image.status != swapchain_image::INVALID)
{

View file

@ -30,6 +30,8 @@
#include "present_id.hpp"
#include <system_error>
#include <cassert>
namespace wsi
{
@ -38,7 +40,12 @@ void wsi_ext_present_id::mark_delivered(uint64_t present_id)
/* Stale reads are acceptable as we only care that the ID is increasing */
if (present_id > m_last_delivered_id.load(std::memory_order_relaxed))
{
std::unique_lock lock(m_mutex);
util::unique_lock lock(m_mutex);
if (!lock)
{
WSI_LOG_ERROR("Failed to acquire mutex lock in mark_delivered.\n");
abort();
}
m_last_delivered_id.store(present_id, std::memory_order_relaxed);
}
m_present_state_changed.notify_all();
@ -46,7 +53,12 @@ void wsi_ext_present_id::mark_delivered(uint64_t present_id)
void wsi_ext_present_id::set_error_state(VkResult error_code)
{
std::unique_lock lock(m_mutex);
util::unique_lock lock(m_mutex);
if (!lock)
{
WSI_LOG_ERROR("Failed to acquire mutex lock in set_error_state.\n");
abort();
}
m_error_state.store(error_code);
m_present_state_changed.notify_all();
}
@ -65,12 +77,26 @@ VkResult wsi_ext_present_id::wait_for_present_id(uint64_t present_id, uint64_t t
try
{
std::unique_lock lock(m_mutex);
util::unique_lock lock(m_mutex);
if (!lock)
{
return VK_ERROR_UNKNOWN;
}
/* Move ownership into a std::unique_lock so we can call the
* std::condition_variable APIs, which accept only
* std::unique_lock<std::mutex>. The mutex is already held by
* util::unique_lock, which acquired it via try_lock() and converts any
* std::system_error into a simple false. We then release the
* util::unique_lock to avoid doubleunlocking.
*/
std::unique_lock<std::mutex> wait_lock(lock.native_mutex(), std::adopt_lock);
lock.release();
if (timeout_in_ns == UINT64_MAX)
{
/* Infinite wait */
m_present_state_changed.wait(
lock, [&]() { return (m_last_delivered_id.load() >= present_id || m_error_state.load() != VK_SUCCESS); });
m_present_state_changed.wait(wait_lock, [&]() {
return (m_last_delivered_id.load() >= present_id || m_error_state.load() != VK_SUCCESS);
});
/* The condition can either return when present_id condition has been reached or there has been an error */
return m_error_state;
@ -80,9 +106,10 @@ VkResult wsi_ext_present_id::wait_for_present_id(uint64_t present_id, uint64_t t
/* Note: With very long timeouts it is possible that the clock in condition_variable will overflow.
* This will result in wait_for immediately returning with a failed result. Considering the
* duration needed to overflow the clock, we can probably ignore this. */
bool wait_success = m_present_state_changed.wait_for(lock, std::chrono::nanoseconds(timeout_in_ns), [&]() {
return (m_last_delivered_id.load() >= present_id || m_error_state.load() != VK_SUCCESS);
});
bool wait_success =
m_present_state_changed.wait_for(wait_lock, std::chrono::nanoseconds(timeout_in_ns), [&]() {
return (m_last_delivered_id.load() >= present_id || m_error_state.load() != VK_SUCCESS);
});
if (!wait_success)
{

View file

@ -31,6 +31,7 @@
#pragma once
#include <util/custom_allocator.hpp>
#include <util/custom_mutex.hpp>
#include <util/macros.hpp>
#include <util/log.hpp>
#include <atomic>
@ -112,7 +113,7 @@ private:
/**
* @brief Mutex for m_present_state_changed conditional variable.
*/
std::mutex m_mutex;
util::mutex m_mutex;
};
} /* namespace wsi */

View file

@ -212,7 +212,11 @@ VkResult wsi_ext_present_timing::write_pending_results()
VkResult wsi_ext_present_timing::present_timing_queue_set_size(size_t queue_size)
{
const std::lock_guard<std::mutex> lock(m_queue_mutex);
const util::unique_lock<util::mutex> lock(m_queue_mutex);
if (!lock)
{
return VK_ERROR_UNKNOWN;
}
if (m_queue.size() > queue_size)
{
return VK_NOT_READY;
@ -279,7 +283,11 @@ VkResult wsi_ext_present_timing::add_presentation_query_entry(VkQueue queue, uin
uint64_t target_time,
VkPresentStageFlagsEXT present_stage_queries)
{
const std::lock_guard<std::mutex> lock(m_queue_mutex);
const util::unique_lock<util::mutex> lock(m_queue_mutex);
if (!lock)
{
return VK_ERROR_UNKNOWN;
}
TRY_LOG_CALL(write_pending_results());
/* Keep the internal queue to the limit defined by the application. */
@ -366,8 +374,11 @@ uint32_t wsi_ext_present_timing::get_num_available_results(VkPastPresentationTim
VkResult wsi_ext_present_timing::get_past_presentation_results(
VkPastPresentationTimingPropertiesEXT *past_present_timing_properties, VkPastPresentationTimingFlagsEXT flags)
{
const std::lock_guard<std::mutex> lock(m_queue_mutex);
const util::unique_lock<util::mutex> lock(m_queue_mutex);
if (!lock)
{
return VK_ERROR_UNKNOWN;
}
assert(past_present_timing_properties != nullptr);
/* Get any outstanding timings to the internal queue. */
TRY_LOG_CALL(write_pending_results());
@ -460,7 +471,12 @@ VkResult wsi_ext_present_timing::get_past_presentation_results(
bool wsi_ext_present_timing::is_stage_pending_for_image_index(uint32_t image_index,
VkPresentStageFlagBitsEXT present_stage)
{
const std::lock_guard<std::mutex> lock(m_queue_mutex);
const util::unique_lock<util::mutex> lock(m_queue_mutex);
if (!lock)
{
WSI_LOG_ERROR("Failed to acquire queue mutex in is_stage_pending_for_image_index.");
abort();
}
return (get_pending_stage_timing(image_index, present_stage) != nullptr);
}

View file

@ -34,6 +34,7 @@
#include <layer/wsi_layer_experimental.hpp>
#include <layer/private_data.hpp>
#include <util/custom_allocator.hpp>
#include <util/custom_mutex.hpp>
#include <util/macros.hpp>
#include <atomic>
@ -43,6 +44,7 @@
#include <tuple>
#include <optional>
#include <functional>
#include <cassert>
#include "wsi_extension.hpp"
@ -433,7 +435,12 @@ public:
*/
void set_pending_stage_time(uint32_t image_index, VkPresentStageFlagBitsEXT stage, uint64_t time)
{
const std::lock_guard<std::mutex> lock(m_queue_mutex);
const util::unique_lock<util::mutex> lock(m_queue_mutex);
if (!lock)
{
WSI_LOG_ERROR("Failed to acquire queue mutex in set_pending_stage_time.");
abort();
}
if (auto timing = get_pending_stage_timing(image_index, stage))
{
timing->set_time(time);
@ -536,7 +543,7 @@ private:
* Private helpers assume **the caller already holds the lock**; that
* pre-condition must be met before invoking them.
*/
std::mutex m_queue_mutex;
util::mutex m_queue_mutex;
/**
* @brief The presentation timing queue.

View file

@ -28,7 +28,7 @@
#include <cstdlib>
#include <cstring>
#include <map>
#include <mutex>
#include <util/custom_mutex.hpp>
#include <vulkan/vk_icd.h>
#include <vulkan/vulkan.h>

View file

@ -150,7 +150,11 @@ VkResult swapchain::allocate_and_bind_swapchain_image(VkImageCreateInfo image_cr
{
UNUSED(image_create);
VkResult res = VK_SUCCESS;
const std::lock_guard<std::recursive_mutex> lock(m_image_status_mutex);
const util::unique_lock<util::recursive_mutex> lock(m_image_status_mutex);
if (!lock)
{
return VK_ERROR_INITIALIZATION_FAILED;
}
VkMemoryRequirements memory_requirements = {};
m_device_data.disp.GetImageMemoryRequirements(m_device, image.image, &memory_requirements);
@ -282,7 +286,12 @@ void swapchain::present_image(const pending_present_request &pending_present)
void swapchain::destroy_image(wsi::swapchain_image &image)
{
std::unique_lock<std::recursive_mutex> image_status_lock(m_image_status_mutex);
util::unique_lock<util::recursive_mutex> image_status_lock(m_image_status_mutex);
if (!image_status_lock)
{
WSI_LOG_ERROR("Failed to acquire image status lock in destroy_image.");
abort();
}
if (image.status != wsi::swapchain_image::INVALID)
{
if (image.image != VK_NULL_HANDLE)

View file

@ -99,7 +99,12 @@ void swapchain_base::page_flip_thread()
/* We want to present the oldest queued for present image from our present queue,
* which we can find at the sc->pending_buffer_pool.head index. */
std::unique_lock<std::recursive_mutex> image_status_lock(m_image_status_mutex);
util::unique_lock<util::recursive_mutex> image_status_lock(m_image_status_mutex);
if (!image_status_lock)
{
WSI_LOG_ERROR("Failed to acquire image status lock in page flip thread.");
abort();
}
auto pending_submission = m_pending_buffer_pool.pop_front();
assert(pending_submission.has_value());
@ -183,7 +188,12 @@ VkResult swapchain_base::init_page_flip_thread()
void swapchain_base::unpresent_image(uint32_t presented_index)
{
std::unique_lock<std::recursive_mutex> image_status_lock(m_image_status_mutex);
util::unique_lock<util::recursive_mutex> image_status_lock(m_image_status_mutex);
if (!image_status_lock)
{
WSI_LOG_ERROR("Failed to acquire image status lock in unpresent_image.");
abort();
}
if (m_present_mode == VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR ||
m_present_mode == VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR)
@ -418,7 +428,12 @@ void swapchain_base::teardown()
VkResult swapchain_base::acquire_next_image(uint64_t timeout, VkSemaphore semaphore, VkFence fence,
uint32_t *image_index)
{
std::unique_lock<std::mutex> acquire_lock(m_image_acquire_lock);
util::unique_lock<util::mutex> acquire_lock(m_image_acquire_lock);
if (!acquire_lock)
{
WSI_LOG_ERROR("Failed to acquire image acquire lock.");
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
TRY(wait_and_get_free_buffer(timeout));
if (error_has_occured())
@ -426,7 +441,12 @@ VkResult swapchain_base::acquire_next_image(uint64_t timeout, VkSemaphore semaph
return get_error_state();
}
std::unique_lock<std::recursive_mutex> image_status_lock(m_image_status_mutex);
util::unique_lock<util::recursive_mutex> image_status_lock(m_image_status_mutex);
if (!image_status_lock)
{
WSI_LOG_ERROR("Failed to acquire image status lock in acquire_next_image.");
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
size_t i;
for (i = 0; i < m_swapchain_images.size(); ++i)
@ -553,7 +573,11 @@ VkResult swapchain_base::get_swapchain_status()
VkResult swapchain_base::notify_presentation_engine(const pending_present_request &pending_present)
{
const std::lock_guard<std::recursive_mutex> lock(m_image_status_mutex);
const util::unique_lock<util::recursive_mutex> lock(m_image_status_mutex);
if (!lock)
{
return VK_ERROR_UNKNOWN;
}
/* If the descendant has started presenting, we should release the image
* however we do not want to block inside the main thread so we mark it
@ -698,8 +722,13 @@ void swapchain_base::deprecate(VkSwapchainKHR descendant)
void swapchain_base::wait_for_pending_buffers()
{
std::unique_lock<std::mutex> acquire_lock(m_image_acquire_lock);
std::unique_lock<std::recursive_mutex> image_status_lock(m_image_status_mutex);
util::unique_lock<util::mutex> acquire_lock(m_image_acquire_lock);
util::unique_lock<util::recursive_mutex> image_status_lock(m_image_status_mutex);
if (!acquire_lock || !image_status_lock)
{
WSI_LOG_ERROR("Failed to acquire mutex lock in wait_for_pending_buffers.\n");
abort();
}
uint64_t non_pending_images = 0;
for (auto &img : m_swapchain_images)

View file

@ -37,6 +37,7 @@
#include <array>
#include <util/custom_allocator.hpp>
#include <util/custom_mutex.hpp>
#include <util/helpers.hpp>
#include <util/ring_buffer.hpp>
#include <util/timed_semaphore.hpp>
@ -346,7 +347,7 @@ protected:
* these functions to be called both with and without the mutex already locked in the
* same thread.
*/
std::recursive_mutex m_image_status_mutex;
util::recursive_mutex m_image_status_mutex;
/**
* @brief Defines if the pthread_t and sem_t members of the class are defined.
@ -641,7 +642,7 @@ protected:
}
private:
std::mutex m_image_acquire_lock;
util::mutex m_image_acquire_lock;
/**
* @brief In case we encounter threading or drm errors we need a way to
* notify the user of the failure. While no error has occurred its value

View file

@ -29,6 +29,7 @@
*/
#if VULKAN_WSI_LAYER_EXPERIMENTAL
#include <util/custom_mutex.hpp>
#include "present_id_wayland.hpp"
namespace wsi
@ -39,7 +40,12 @@ namespace wayland
presentation_feedback *wsi_ext_present_id_wayland::insert_into_pending_present_feedback_list(
uint64_t present_id, struct wp_presentation_feedback *feedback_obj)
{
scoped_mutex lock(m_pending_presents_lock);
util::unique_lock<util::mutex> lock(m_pending_presents_lock);
if (!lock)
{
WSI_LOG_ERROR("Failed to acquire pending presents lock in insert_into_pending_present_feedback_list.\n");
abort();
}
bool ret = m_pending_presents.push_back(presentation_feedback(feedback_obj, this, present_id));
if (!ret)
{
@ -50,7 +56,12 @@ presentation_feedback *wsi_ext_present_id_wayland::insert_into_pending_present_f
void wsi_ext_present_id_wayland::remove_from_pending_present_feedback_list(uint64_t present_id)
{
scoped_mutex lock(m_pending_presents_lock);
util::unique_lock<util::mutex> lock(m_pending_presents_lock);
if (!lock)
{
WSI_LOG_ERROR("Failed to acquire pending presents lock in remove_from_pending_present_feedback_list.\n");
abort();
}
while (m_pending_presents.size() > 0 && m_pending_presents.front()->get_present_id() <= present_id)
{
m_pending_presents.pop_front();

View file

@ -35,7 +35,7 @@
#include <util/ring_buffer.hpp>
#include "surface_properties.hpp"
#include "wp_presentation_feedback.hpp"
#include <mutex>
#include <util/custom_mutex.hpp>
namespace wsi
{
@ -65,7 +65,7 @@ private:
/**
* @brief Mutex for synchronising accesses to the pending present id list.
*/
std::mutex m_pending_presents_lock;
util::mutex m_pending_presents_lock;
/**
* @brief Stores the presentation feedbacks that have been queued.

View file

@ -28,6 +28,7 @@
* @brief Contains the functionality to implement features for present timing extension.
*/
#include <util/custom_mutex.hpp>
#include "present_timing_handler.hpp"
#include "surface.hpp"
@ -100,7 +101,12 @@ VkResult wsi_ext_present_timing_wayland::get_swapchain_timing_properties(
presentation_feedback *wsi_ext_present_timing_wayland::insert_into_pending_present_feedback_list(
uint32_t image_index, struct wp_presentation_feedback *feedback_obj)
{
scoped_mutex lock(m_pending_presents_lock);
util::unique_lock<util::mutex> lock(m_pending_presents_lock);
if (!lock)
{
WSI_LOG_ERROR("Failed to acquire pending presents lock in insert_into_pending_present_feedback_list.\n");
abort();
}
presentation_feedback fb(feedback_obj, this, image_index);
size_t position = m_pending_presents.size();
if (!m_pending_presents.try_push_back(std::move(fb)))
@ -112,7 +118,12 @@ presentation_feedback *wsi_ext_present_timing_wayland::insert_into_pending_prese
void wsi_ext_present_timing_wayland::remove_from_pending_present_feedback_list(uint32_t image_index)
{
scoped_mutex lock(m_pending_presents_lock);
util::unique_lock<util::mutex> lock(m_pending_presents_lock);
if (!lock)
{
WSI_LOG_ERROR("Failed to acquire pending presents lock in remove_from_pending_present_feedback_list.\n");
abort();
}
auto it = std::find_if(m_pending_presents.begin(), m_pending_presents.end(),
[image_index](const presentation_feedback &p) { return p.get_image_index() == image_index; });

View file

@ -36,6 +36,7 @@
#include "surface_properties.hpp"
#include "wp_presentation_feedback.hpp"
#include "wl_helpers.hpp"
#include <util/custom_mutex.hpp>
namespace wsi
{
@ -112,7 +113,7 @@ private:
/**
* @brief Mutex for synchronising accesses to the pending present id list.
*/
std::mutex m_pending_presents_lock;
util::mutex m_pending_presents_lock;
/**
* @brief Stores the presentation feedbacks that have been queued.

View file

@ -471,7 +471,12 @@ VkResult swapchain::create_wl_buffer(const VkImageCreateInfo &image_create_info,
VkResult swapchain::allocate_and_bind_swapchain_image(VkImageCreateInfo image_create_info, swapchain_image &image)
{
std::unique_lock<std::recursive_mutex> image_status_lock(m_image_status_mutex);
util::unique_lock<util::recursive_mutex> image_status_lock(m_image_status_mutex);
if (!image_status_lock)
{
WSI_LOG_ERROR("Failed to acquire mutex lock in allocate_and_bind_swapchain_image.\n");
return VK_ERROR_INITIALIZATION_FAILED;
}
image.status = swapchain_image::FREE;
assert(image.data != nullptr);
@ -653,8 +658,12 @@ void swapchain::present_image(const pending_present_request &pending_present)
void swapchain::destroy_image(swapchain_image &image)
{
std::unique_lock<std::recursive_mutex> image_status_lock(m_image_status_mutex);
util::unique_lock<util::recursive_mutex> image_status_lock(m_image_status_mutex);
if (!image_status_lock)
{
WSI_LOG_ERROR("Failed to acquire mutex lock in destroy_image.\n");
abort();
}
if (image.status != swapchain_image::INVALID)
{
if (image.image != VK_NULL_HANDLE)