Add ring buffer utility

Add new utility class implementation ring_buffer and replace existing
struct implementation.

Change-Id: I725033738bff73d33f938e64cc96ca9acb8a910c
Signed-off-by: Dennis Tsiang <dennis.tsiang@arm.com>
Signed-off-by: David Harvey-Macaulay <david.harvey-macaulay@arm.com>
This commit is contained in:
Dennis Tsiang 2021-08-16 12:22:25 +01:00
parent 6282185808
commit 03bc497ea4
7 changed files with 171 additions and 46 deletions

View file

@ -100,6 +100,26 @@ public:
void reset() noexcept
{
m_has_value = false;
m_value = T{};
}
/**
* @brief Reassign/assign the value in the optional.
*
* @return optional& This optional object with the value.
*/
optional &set(T &&val) noexcept
{
m_value = std::move(val);
m_has_value = true;
return *this;
}
optional &set(const T &val) noexcept
{
m_value = val;
m_has_value = true;
return *this;
}
/**

132
util/ring_buffer.hpp Normal file
View file

@ -0,0 +1,132 @@
/*
* Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <memory>
#include <array>
#include "optional.hpp"
namespace util
{
template <typename T, std::size_t N>
class ring_buffer
{
public:
/**
* @brief Return maximum capacity of the ring buffer.
*/
constexpr std::size_t capacity()
{
return N;
}
/**
* @brief Return current size of the ring buffer.
*/
std::size_t size() const
{
return m_size;
}
/**
* @brief Places item into next slot of the ring buffer.
* @return Boolean to indicate success or failure.
*/
template <typename U>
bool push_back(U &&item)
{
if (size() == capacity())
{
return false;
}
m_data[(m_begin + m_size) % N].set(std::forward<U>(item));
++m_size;
return true;
}
/**
* @brief Gets a pointer to the item at the starting index of the ring buffer.
*/
T *front()
{
return get(m_begin);
}
/**
* @brief Gets a pointer to the item that was last placed into the ring buffer.
*/
T *back()
{
return get((m_begin + m_size + N - 1) % N);
}
/**
* @brief Pop the front of the ring buffer.
*
* Item at the starting index of the ring buffer is returned. The slot is subsequently emptied. The starting index of
* the ring buffer increments by 1.
*
* @return Item wrapped in an optional.
*/
util::optional<T> pop_front()
{
if (size() == 0)
{
return util::optional<T>{};
}
util::optional<T> value = std::move(m_data[m_begin]);
m_begin = (m_begin + 1) % N;
--m_size;
return value;
}
private:
T *get(std::size_t index)
{
if (m_data[index].has_value())
{
return std::addressof(m_data[index].value());
}
else
{
return nullptr;
}
}
std::array<util::optional<T>, N> m_data{};
// Marks the start index of the ring buffer.
std::size_t m_begin{};
// The number of entries in the ring buffer from the start index.
std::size_t m_size{};
};
} /* namespace util */

View file

@ -57,8 +57,7 @@ VkResult surface_properties::get_surface_capabilities(VkPhysicalDevice physical_
UNUSED(surface);
/* Image count limits */
surface_capabilities->minImageCount = 1;
/* There is no maximum theoretically speaking */
surface_capabilities->maxImageCount = UINT32_MAX;
surface_capabilities->maxImageCount = MAX_SWAPCHAIN_IMAGE_COUNT;
/* Surface extents */
surface_capabilities->currentExtent = { 0xffffffff, 0xffffffff };

View file

@ -75,6 +75,9 @@ public:
* At least the specific VkSurface creation entrypoint must be intercepted.
*/
virtual PFN_vkVoidFunction get_proc_addr(const char *name) = 0;
/* There is no maximum theoretically speaking however we choose 3 for practicality */
static constexpr uint32_t MAX_SWAPCHAIN_IMAGE_COUNT = 3;
};
} /* namespace wsi */

View file

@ -71,11 +71,11 @@ void swapchain_base::page_flip_thread()
/* We want to present the oldest queued for present image from our present queue,
* which we can find at the sc->pending_buffer_pool.head index. */
uint32_t pending_index = m_pending_buffer_pool.ring[m_pending_buffer_pool.head];
m_pending_buffer_pool.head = (m_pending_buffer_pool.head + 1) % m_pending_buffer_pool.size;
auto pending_index = m_pending_buffer_pool.pop_front();
assert(pending_index.has_value());
/* We wait for the fence of the oldest pending image to be signalled. */
vk_res = m_device_data.disp.WaitForFences(m_device, 1, &sc_images[pending_index].present_fence, VK_TRUE,
vk_res = m_device_data.disp.WaitForFences(m_device, 1, &sc_images[*pending_index].present_fence, VK_TRUE,
timeout);
if (vk_res != VK_SUCCESS)
{
@ -88,9 +88,9 @@ void swapchain_base::page_flip_thread()
/* If the descendant has started presenting the queue_present operation has marked the image
* as FREE so we simply release it and continue. */
if (sc_images[pending_index].status == swapchain_image::FREE)
if (sc_images[*pending_index].status == swapchain_image::FREE)
{
destroy_image(sc_images[pending_index]);
destroy_image(sc_images[*pending_index]);
m_free_image_semaphore.post();
continue;
}
@ -108,14 +108,14 @@ void swapchain_base::page_flip_thread()
sem_post(&m_start_present_semaphore);
present_image(pending_index);
present_image(*pending_index);
m_first_present = false;
}
/* The swapchain has already started presenting. */
else
{
present_image(pending_index);
present_image(*pending_index);
}
}
}
@ -139,7 +139,7 @@ swapchain_base::swapchain_base(layer::device_private_data &dev_data, const VkAll
, m_page_flip_thread_run(true)
, m_thread_sem_defined(false)
, m_first_present(true)
, m_pending_buffer_pool{ nullptr, 0, 0, 0 }
, m_pending_buffer_pool{}
, m_allocator(dev_data.get_allocator(), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT, callbacks)
, m_swapchain_images(m_allocator)
, m_surface(VK_NULL_HANDLE)
@ -185,17 +185,6 @@ VkResult swapchain_base::init(VkDevice device, const VkSwapchainCreateInfoKHR *s
if (!m_swapchain_images.try_resize(swapchain_create_info->minImageCount))
return VK_ERROR_OUT_OF_HOST_MEMORY;
/* Initialize ring buffer. */
m_pending_buffer_pool.ring = m_allocator.create<uint32_t>(m_swapchain_images.size(), 0);
if (m_pending_buffer_pool.ring == nullptr)
{
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
m_pending_buffer_pool.head = 0;
m_pending_buffer_pool.tail = 0;
m_pending_buffer_pool.size = m_swapchain_images.size();
/* We have allocated images, we can call the platform init function if something needs to be done. */
result = init_platform(device, swapchain_create_info);
if (result != VK_SUCCESS)
@ -364,8 +353,6 @@ void swapchain_base::teardown()
/* Call implementation specific release */
destroy_image(img);
}
m_allocator.destroy(m_swapchain_images.size(), m_pending_buffer_pool.ring);
}
VkResult swapchain_base::acquire_next_image(uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t *image_index)
@ -457,7 +444,7 @@ VkResult swapchain_base::get_swapchain_images(uint32_t *swapchain_image_count, V
}
}
VkResult swapchain_base::queue_present(VkQueue queue, const VkPresentInfoKHR *present_info, const uint32_t image_index)
VkResult swapchain_base::queue_present(VkQueue queue, const VkPresentInfoKHR *present_info, uint32_t image_index)
{
VkResult result;
bool descendent_started_presenting = false;
@ -512,19 +499,13 @@ VkResult swapchain_base::queue_present(VkQueue queue, const VkPresentInfoKHR *pr
if (descendent_started_presenting)
{
m_swapchain_images[image_index].status = swapchain_image::FREE;
m_pending_buffer_pool.ring[m_pending_buffer_pool.tail] = image_index;
m_pending_buffer_pool.tail = (m_pending_buffer_pool.tail + 1) % m_pending_buffer_pool.size;
m_pending_buffer_pool.push_back(image_index);
m_page_flip_semaphore.post();
return VK_ERROR_OUT_OF_DATE_KHR;
}
m_swapchain_images[image_index].status = swapchain_image::PENDING;
m_pending_buffer_pool.ring[m_pending_buffer_pool.tail] = image_index;
m_pending_buffer_pool.tail = (m_pending_buffer_pool.tail + 1) % m_pending_buffer_pool.size;
m_pending_buffer_pool.push_back(image_index);
m_page_flip_semaphore.post();
return VK_SUCCESS;
}

View file

@ -38,6 +38,8 @@
#include <layer/private_data.hpp>
#include <util/timed_semaphore.hpp>
#include <util/custom_allocator.hpp>
#include <util/ring_buffer.hpp>
#include "surface_properties.hpp"
namespace wsi
{
@ -164,17 +166,6 @@ protected:
*/
bool m_is_valid;
struct ring_buffer
{
/* Ring buffer to hold the image indexes. */
uint32_t *ring;
/* Head of the ring. */
uint32_t head;
/* End of the ring. */
uint32_t tail;
/* Size of the ring. */
uint32_t size;
};
/**
* @brief A semaphore to be signalled once a page flip event occurs.
*/
@ -215,7 +206,7 @@ protected:
* threads and we do not allow the application to acquire more images
* than we have we eliminate race conditions.
*/
ring_buffer m_pending_buffer_pool;
util::ring_buffer<uint32_t, wsi::surface_properties::MAX_SWAPCHAIN_IMAGE_COUNT> m_pending_buffer_pool;
/**
* @brief User provided memory allocation callbacks.

View file

@ -70,8 +70,7 @@ VkResult surface_properties::get_surface_capabilities(VkPhysicalDevice physical_
{
/* Image count limits */
pSurfaceCapabilities->minImageCount = 2;
/* There is no maximum theoretically speaking */
pSurfaceCapabilities->maxImageCount = UINT32_MAX;
pSurfaceCapabilities->maxImageCount = MAX_SWAPCHAIN_IMAGE_COUNT;
/* Surface extents */
pSurfaceCapabilities->currentExtent = { 0xffffffff, 0xffffffff };