Adds initial support for VK_KHR_wayland_surface.

Very basic Wayland support is implemented by importing
memory with VK_EXT_image_drm_format_modifiers.
The current implementation requires an external system
memory allocator. An API for this allocator is defined
in util/wsialloc/wsialloc.h and an implementation using
the ION memory allocator is included.

Outstanding issues:
 * This is an initial prototype for Wayland support and
   has many outstanding TODOs which need addressing to
   properly use the Wayland protocol.
 * Using ICD Exported memory instead of a system allocator
   is not implemented.

Wayland support is still experimental and outstanding issues
will be fixed in future commits.

Change-Id: I1b0d5991e15ff1cf25ebbab3392a631b021e8c17
Signed-off-by: Rosen Zhelev <rosen.zhelev@arm.com>
Signed-off-by: Iason Paraskevopoulos <iason.paraskevopoulos@arm.com>
This commit is contained in:
Iason Paraskevopoulos 2021-01-31 20:09:53 +00:00
parent 950cdd406a
commit 8dc4d923ff
23 changed files with 2177 additions and 48 deletions

View file

@ -20,16 +20,18 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE. # SOFTWARE.
cmake_minimum_required(VERSION 2.8.11) cmake_minimum_required(VERSION 3.4.3)
project(VkLayer_window_system_integration CXX) project(VkLayer_window_system_integration)
find_package(PkgConfig REQUIRED) find_package(PkgConfig REQUIRED)
pkg_check_modules(VULKAN_PKG_CONFIG vulkan) pkg_check_modules(VULKAN_PKG_CONFIG vulkan)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -pthread") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -pthread -fPIC")
if (DEFINED DEBUG) if (DEFINED DEBUG)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O0") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O0")
endif() endif()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC")
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--no-undefined") set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--no-undefined")
@ -44,6 +46,101 @@ else()
message(FATAL_ERROR "Either vulkan.pc must be available or VULKAN_CXX_INCLUDE must be defined") message(FATAL_ERROR "Either vulkan.pc must be available or VULKAN_CXX_INCLUDE must be defined")
endif() endif()
# Build Configuration options
option(BUILD_WSI_WAYLAND "Build with support for VK_KHR_wayland_surface" OFF)
set(SELECT_EXTERNAL_ALLOCATOR "none" CACHE STRING "Select an external system allocator (none, ion)")
if(BUILD_WSI_WAYLAND)
if(SELECT_EXTERNAL_ALLOCATOR STREQUAL "none")
message(FATAL_ERROR "Wayland only supported with an external allocator.")
endif()
set(BUILD_DRM_UTILS True)
endif()
# DRM Utilities
if(BUILD_DRM_UTILS)
add_library(drm_utils STATIC util/drm/drm_utils.cpp)
pkg_check_modules(LIBDRM REQUIRED libdrm)
message(STATUS "Using libdrm include directories: ${LIBDRM_INCLUDE_DIRS}")
message(STATUS "Using libdrm cflags: ${LIBDRM_CFLAGS}")
target_sources(drm_utils PRIVATE util/drm/format_table.c)
target_include_directories(drm_utils PRIVATE ${VULKAN_CXX_INCLUDE})
target_include_directories(drm_utils PUBLIC ${LIBDRM_INCLUDE_DIRS})
target_compile_options(drm_utils PUBLIC ${LIBDRM_CFLAGS})
endif()
# External WSI Alloctator
if(NOT SELECT_EXTERNAL_ALLOCATOR STREQUAL "none")
add_library(wsialloc STATIC)
set_target_properties(wsialloc PROPERTIES C_STANDARD 99)
if(SELECT_EXTERNAL_ALLOCATOR STREQUAL "ion")
target_sources(wsialloc PRIVATE util/wsialloc/wsialloc_ion.c)
target_link_libraries(wsialloc drm_utils)
if(DEFINED KERNEL_DIR)
target_include_directories(wsialloc PRIVATE "${KERNEL_DIR}/drivers/staging/android/uapi")
else()
message(FATAL_ERROR "KERNEL_DIR must be defined as the root of the Linux kernel source.")
endif()
else()
message(FATAL_ERROR "Invalid external allocator selected: ${SELECT_EXTERNAL_ALLOCATOR}")
endif()
target_include_directories(wsialloc PRIVATE ${VULKAN_CXX_INCLUDE})
target_include_directories(wsialloc PRIVATE util/drm)
endif()
# Wayland WSI
if(BUILD_WSI_WAYLAND)
add_library(wayland_wsi STATIC
wsi/wayland/surface_properties.cpp
wsi/wayland/swapchain_wl_helpers.cpp
wsi/wayland/swapchain.cpp)
pkg_check_modules(WAYLAND_CLIENT REQUIRED wayland-client)
message(STATUS "Using Wayland client include directories: ${WAYLAND_CLIENT_INCLUDE_DIRS}")
message(STATUS "Using Wayland client cflags: ${WAYLAND_CLIENT_CFLAGS}")
message(STATUS "Using Wayland client ldflags: ${WAYLAND_CLIENT_LDFLAGS}")
pkg_check_modules(WAYLAND_SCANNER REQUIRED wayland-scanner)
pkg_get_variable(WAYLAND_SCANNER_EXEC wayland-scanner wayland_scanner)
message(STATUS "Using wayland-scanner : ${WAYLAND_SCANNER_EXEC}")
pkg_check_modules(WAYLAND_PROTOCOLS REQUIRED wayland-protocols)
pkg_get_variable(WAYLAND_PROTOCOLS_DIR wayland-protocols pkgdatadir)
message(STATUS "Using wayland protocols dir : ${WAYLAND_PROTOCOLS_DIR}")
add_custom_target(wayland_generated_files
COMMAND ${WAYLAND_SCANNER_EXEC} client-header
${WAYLAND_PROTOCOLS_DIR}/unstable/linux-dmabuf/linux-dmabuf-unstable-v1.xml
${CMAKE_CURRENT_BINARY_DIR}/linux-dmabuf-unstable-v1-client-protocol.h
COMMAND ${WAYLAND_SCANNER_EXEC} code
${WAYLAND_PROTOCOLS_DIR}/unstable/linux-dmabuf/linux-dmabuf-unstable-v1.xml
${CMAKE_CURRENT_BINARY_DIR}/linux-dmabuf-unstable-v1-protocol.c
BYPRODUCTS linux-dmabuf-unstable-v1-protocol.c linux-dmabuf-unstable-v1-client-protocol.h)
target_sources(wayland_wsi PRIVATE
${CMAKE_CURRENT_BINARY_DIR}/linux-dmabuf-unstable-v1-protocol.c
${CMAKE_CURRENT_BINARY_DIR}/linux-dmabuf-unstable-v1-client-protocol.h)
add_dependencies(wayland_wsi wayland_generated_files)
target_include_directories(wayland_wsi PRIVATE
${PROJECT_SOURCE_DIR}
${VULKAN_CXX_INCLUDE}
${WAYLAND_CLIENT_INCLUDE_DIRS}
${CMAKE_CURRENT_BINARY_DIR})
target_compile_options(wayland_wsi PRIVATE ${WAYLAND_CLIENT_CFLAGS})
target_compile_options(wayland_wsi INTERFACE "-DBUILD_WSI_WAYLAND=1")
target_link_libraries(wayland_wsi drm_utils wsialloc ${WAYLAND_CLIENT_LDFLAGS})
list(APPEND LINK_WSI_LIBS wayland_wsi)
else()
list(APPEND JSON_COMMANDS COMMAND sed -i '/VK_KHR_wayland_surface/d' ${CMAKE_CURRENT_BINARY_DIR}/VkLayer_window_system_integration.json)
endif()
# Layer
add_library(${PROJECT_NAME} SHARED add_library(${PROJECT_NAME} SHARED
layer/layer.cpp layer/layer.cpp
layer/private_data.cpp layer/private_data.cpp
@ -56,5 +153,12 @@ add_library(${PROJECT_NAME} SHARED
wsi/wsi_factory.cpp wsi/wsi_factory.cpp
wsi/headless/surface_properties.cpp wsi/headless/surface_properties.cpp
wsi/headless/swapchain.cpp) wsi/headless/swapchain.cpp)
set_target_properties(${PROJECT_NAME} PROPERTIES CXX_STANDARD 11) target_compile_definitions(${PROJECT_NAME} PRIVATE ${WSI_DEFINES})
target_include_directories(${PROJECT_NAME} PRIVATE ${PROJECT_SOURCE_DIR} ${VULKAN_CXX_INCLUDE}) target_include_directories(${PROJECT_NAME} PRIVATE
${PROJECT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR} ${VULKAN_CXX_INCLUDE})
target_link_libraries(${PROJECT_NAME} ${LINK_WSI_LIBS})
add_custom_target(manifest_json ALL COMMAND
cp ${PROJECT_SOURCE_DIR}/layer/VkLayer_window_system_integration.json ${CMAKE_CURRENT_BINARY_DIR}
${JSON_COMMANDS})

View file

@ -12,14 +12,14 @@ ICD; instead, the implementation of these extensions are shared across vendors
for mutual benefit. for mutual benefit.
The project currently implements support for `VK_EXT_headless_surface` and The project currently implements support for `VK_EXT_headless_surface` and
its dependencies. We hope to extend support for further platforms such as Wayland its dependencies. Experimental support for `VK_KHR_wayland_surface` can be
and direct-to-display rendering in the future. enabled via a build option [as explained below](#building-with-wayland-support).
## Building ## Building
### Dependencies ### Dependencies
* [CMake](https://cmake.org) version 2.8.11 or above. * [CMake](https://cmake.org) version 3.4.3 or above.
* C++11 compiler. * C++11 compiler.
* Vulkan® loader and associated headers with support for the * Vulkan® loader and associated headers with support for the
`VK_EXT_headless_surface` extension. `VK_EXT_headless_surface` extension.
@ -59,6 +59,23 @@ cmake . -Bbuild
make -C build make -C build
``` ```
#### Building with Wayland support
In order to build with Wayland support the `BUILD_WSI_WAYLAND` build option
must be used, the `SELECT_EXTERNAL_ALLOCATOR` option has to be set to
an allocator (currently only ion is supported) and the `KERNEL_DIR` option must
be defined as the root of the Linux kernel source.
```
cmake . -DVULKAN_CXX_INCLUDE="path/to/vulkan-header" \
-DBUILD_WSI_WAYLAND=1 \
-DSELECT_EXTERNAL_ALLOCATOR=ion \
-DKERNEL_DIR="path/to/linux-kernel-source"
```
Wayland support is still **EXPERIMENTAL**. What this means in practice is that
the support is incomplete and not ready for prime time.
## Installation ## Installation
Copy the shared library `libVkLayer_window_system_integration.so` and JSON Copy the shared library `libVkLayer_window_system_integration.so` and JSON

View file

@ -1,5 +1,5 @@
{ {
"file_format_version" : "1.0.0", "file_format_version" : "1.1.2",
"layer" : { "layer" : {
"name": "VK_LAYER_window_system_integration", "name": "VK_LAYER_window_system_integration",
"type": "GLOBAL", "type": "GLOBAL",
@ -11,15 +11,13 @@
"vkGetInstanceProcAddr": "wsi_layer_vkGetInstanceProcAddr", "vkGetInstanceProcAddr": "wsi_layer_vkGetInstanceProcAddr",
"vkGetDeviceProcAddr": "wsi_layer_vkGetDeviceProcAddr" "vkGetDeviceProcAddr": "wsi_layer_vkGetDeviceProcAddr"
}, },
"pre_instance_functions" : {
"vkEnumerateInstanceExtensionProperties" : "wsi_layer_vkEnumerateInstanceExtensionProperties"
},
"instance_extensions": [ "instance_extensions": [
{ {"name" : "VK_EXT_headless_surface", "spec_version" : "1"},
"name" : "VK_EXT_headless_surface", {"name" : "VK_KHR_wayland_surface", "spec_version" : "1"},
"spec_version" : "1" {"name" : "VK_KHR_surface", "spec_version" : "1"}
},
{
"name" : "VK_KHR_surface",
"spec_version" : "1"
}
], ],
"device_extensions": [ "device_extensions": [
{ {
@ -28,10 +26,10 @@
} }
], ],
"enable_environment": { "enable_environment": {
"ENABLE_HEADLESS_SURFACE": "1" "ENABLE_WSI_LAYER": "1"
}, },
"disable_environment": { "disable_environment": {
"DISABLE_HEADLESS_SURFACE": "1" "DISABLE_WSI_LAYER": "1"
} }
} }
} }

View file

@ -60,7 +60,7 @@ VKAPI_ATTR VkResult extension_properties(const uint32_t count, const VkExtension
} }
size = *pCount < count ? *pCount : count; size = *pCount < count ? *pCount : count;
memcpy(pProp, layer_ext, size * sizeof(VkExtensionProperties)); memcpy(pProp, layer_ext, size * sizeof(*pProp));
*pCount = size; *pCount = size;
if (size < count) if (size < count)
{ {
@ -82,7 +82,7 @@ VKAPI_ATTR VkResult layer_properties(const uint32_t count, const VkLayerProperti
} }
size = *pCount < count ? *pCount : count; size = *pCount < count ? *pCount : count;
memcpy(pProp, layer_prop, size * sizeof(VkLayerProperties)); memcpy(pProp, layer_prop, size * sizeof(*pProp));
*pCount = size; *pCount = size;
if (size < count) if (size < count)
{ {
@ -333,12 +333,14 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL wsi_layer_vkEnumerateDeviceExtens
} }
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL wsi_layer_vkEnumerateInstanceExtensionProperties( VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL wsi_layer_vkEnumerateInstanceExtensionProperties(
const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) const VkEnumerateInstanceExtensionPropertiesChain *chain, const char *pLayerName,
uint32_t *pCount, VkExtensionProperties *pProperties)
{ {
if (pLayerName && !strcmp(pLayerName, layer::global_layer.layerName)) if (pLayerName && !strcmp(pLayerName, layer::global_layer.layerName))
return layer::extension_properties(1, layer::instance_extension, pCount, pProperties); return layer::extension_properties(1, layer::instance_extension, pCount, pProperties);
return VK_ERROR_LAYER_NOT_PRESENT; assert(chain);
return chain->CallDown(pLayerName, pCount, pProperties);
} }
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
@ -365,6 +367,12 @@ VK_LAYER_EXPORT PFN_vkVoidFunction VKAPI_CALL wsi_layer_vkGetDeviceProcAddr(VkDe
VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL wsi_layer_vkGetInstanceProcAddr(VkInstance instance, VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL wsi_layer_vkGetInstanceProcAddr(VkInstance instance,
const char *funcName) const char *funcName)
{ {
PFN_vkVoidFunction wsi_func = wsi::get_proc_addr(funcName);
if (wsi_func)
{
return wsi_func;
}
GET_PROC_ADDR(vkGetDeviceProcAddr); GET_PROC_ADDR(vkGetDeviceProcAddr);
GET_PROC_ADDR(vkGetInstanceProcAddr); GET_PROC_ADDR(vkGetInstanceProcAddr);
GET_PROC_ADDR(vkCreateInstance); GET_PROC_ADDR(vkCreateInstance);
@ -376,7 +384,6 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL wsi_layer_vkGetInstance
GET_PROC_ADDR(vkGetPhysicalDeviceSurfaceFormatsKHR); GET_PROC_ADDR(vkGetPhysicalDeviceSurfaceFormatsKHR);
GET_PROC_ADDR(vkGetPhysicalDeviceSurfacePresentModesKHR); GET_PROC_ADDR(vkGetPhysicalDeviceSurfacePresentModesKHR);
GET_PROC_ADDR(vkEnumerateDeviceExtensionProperties); GET_PROC_ADDR(vkEnumerateDeviceExtensionProperties);
GET_PROC_ADDR(vkEnumerateInstanceExtensionProperties);
GET_PROC_ADDR(vkEnumerateInstanceLayerProperties); GET_PROC_ADDR(vkEnumerateInstanceLayerProperties);
return layer::instance_private_data::get(instance).disp.GetInstanceProcAddr(instance, funcName); return layer::instance_private_data::get(instance).disp.GetInstanceProcAddr(instance, funcName);

View file

@ -214,4 +214,4 @@ void device_private_data::destroy(VkDevice dev)
scoped_mutex lock(g_data_lock); scoped_mutex lock(g_data_lock);
g_device_data.erase(get_key(dev)); g_device_data.erase(get_key(dev));
} }
} /* namespace layer */ } /* namespace layer */

View file

@ -45,16 +45,17 @@ namespace layer
* guarantee than we can safely call them. We still mark the entrypoints with REQUIRED() and OPTIONAL(). The layer * guarantee than we can safely call them. We still mark the entrypoints with REQUIRED() and OPTIONAL(). The layer
* fails if vkGetInstanceProcAddr returns null for entrypoints that are REQUIRED(). * fails if vkGetInstanceProcAddr returns null for entrypoints that are REQUIRED().
*/ */
#define INSTANCE_ENTRYPOINTS_LIST(REQUIRED, OPTIONAL) \ #define INSTANCE_ENTRYPOINTS_LIST(REQUIRED, OPTIONAL) \
REQUIRED(GetInstanceProcAddr) \ REQUIRED(GetInstanceProcAddr) \
REQUIRED(DestroyInstance) \ REQUIRED(DestroyInstance) \
REQUIRED(GetPhysicalDeviceProperties) \ REQUIRED(GetPhysicalDeviceProperties) \
REQUIRED(GetPhysicalDeviceImageFormatProperties) \ REQUIRED(GetPhysicalDeviceImageFormatProperties) \
REQUIRED(EnumerateDeviceExtensionProperties) \ REQUIRED(EnumerateDeviceExtensionProperties) \
OPTIONAL(GetPhysicalDeviceSurfaceCapabilitiesKHR) \ OPTIONAL(GetPhysicalDeviceSurfaceCapabilitiesKHR) \
OPTIONAL(GetPhysicalDeviceSurfaceFormatsKHR) \ OPTIONAL(GetPhysicalDeviceSurfaceFormatsKHR) \
OPTIONAL(GetPhysicalDeviceSurfacePresentModesKHR) \ OPTIONAL(GetPhysicalDeviceSurfacePresentModesKHR) \
OPTIONAL(GetPhysicalDeviceSurfaceSupportKHR) OPTIONAL(GetPhysicalDeviceSurfaceSupportKHR) \
OPTIONAL(GetPhysicalDeviceImageFormatProperties2KHR)
struct instance_dispatch_table struct instance_dispatch_table
{ {
@ -100,7 +101,8 @@ struct instance_dispatch_table
OPTIONAL(DestroySwapchainKHR) \ OPTIONAL(DestroySwapchainKHR) \
OPTIONAL(GetSwapchainImagesKHR) \ OPTIONAL(GetSwapchainImagesKHR) \
OPTIONAL(AcquireNextImageKHR) \ OPTIONAL(AcquireNextImageKHR) \
OPTIONAL(QueuePresentKHR) OPTIONAL(QueuePresentKHR) \
OPTIONAL(GetMemoryFdPropertiesKHR)
struct device_dispatch_table struct device_dispatch_table
{ {

View file

@ -34,30 +34,30 @@
extern "C" { extern "C" {
/** /**
* @brief Implements vkGetPhysicalDeviceSurfaceCapabilitiesKHR Vulkan entrypoint. * @brief Implements vkGetPhysicalDeviceSurfaceCapabilitiesKHR Vulkan entrypoint.
*/ */
VKAPI_ATTR VkResult wsi_layer_vkGetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VKAPI_ATTR VkResult wsi_layer_vkGetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface, VkSurfaceKHR surface,
VkSurfaceCapabilitiesKHR *pSurfaceCapabilities); VkSurfaceCapabilitiesKHR *pSurfaceCapabilities);
/** /**
* @brief Implements vkGetPhysicalDeviceSurfaceFormatsKHR Vulkan entrypoint. * @brief Implements vkGetPhysicalDeviceSurfaceFormatsKHR Vulkan entrypoint.
*/ */
VKAPI_ATTR VkResult wsi_layer_vkGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VKAPI_ATTR VkResult wsi_layer_vkGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface, uint32_t *pSurfaceFormatCount, VkSurfaceKHR surface, uint32_t *pSurfaceFormatCount,
VkSurfaceFormatKHR *pSurfaceFormats); VkSurfaceFormatKHR *pSurfaceFormats);
/** /**
* @brief Implements vkGetPhysicalDeviceSurfacePresentModesKHR Vulkan entrypoint. * @brief Implements vkGetPhysicalDeviceSurfacePresentModesKHR Vulkan entrypoint.
*/ */
VKAPI_ATTR VkResult wsi_layer_vkGetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VKAPI_ATTR VkResult wsi_layer_vkGetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface, VkSurfaceKHR surface,
uint32_t *pPresentModeCount, uint32_t *pPresentModeCount,
VkPresentModeKHR *pPresentModes); VkPresentModeKHR *pPresentModes);
/** /**
* @brief Implements vkGetPhysicalDeviceSurfaceSupportKHR Vulkan entrypoint. * @brief Implements vkGetPhysicalDeviceSurfaceSupportKHR Vulkan entrypoint.
*/ */
VKAPI_ATTR VkResult wsi_layer_vkGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, VKAPI_ATTR VkResult wsi_layer_vkGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex, VkSurfaceKHR surface, uint32_t queueFamilyIndex, VkSurfaceKHR surface,
VkBool32 *pSupported); VkBool32 *pSupported);

81
util/drm/drm_utils.cpp Normal file
View file

@ -0,0 +1,81 @@
/*
* Copyright (c) 2019, 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "drm_utils.hpp"
#include "format_table.h"
namespace util
{
namespace drm
{
uint32_t vk_to_drm_format(VkFormat vk_format)
{
for (size_t i = 0; i < fourcc_format_table_len; i++)
{
if (vk_format == fourcc_format_table[i].vk_format)
{
return fourcc_format_table[i].drm_format;
}
}
for (size_t i = 0; i < srgb_fourcc_format_table_len; i++)
{
if (vk_format == srgb_fourcc_format_table[i].vk_format)
{
return srgb_fourcc_format_table[i].drm_format;
}
}
return 0;
}
VkFormat drm_to_vk_format(uint32_t drm_format)
{
for (size_t i = 0; i < fourcc_format_table_len; i++)
{
if (drm_format == fourcc_format_table[i].drm_format)
{
return fourcc_format_table[i].vk_format;
}
}
return VK_FORMAT_UNDEFINED;
}
VkFormat drm_to_vk_srgb_format(uint32_t drm_format)
{
for (size_t i = 0; i < srgb_fourcc_format_table_len; i++)
{
if (drm_format == srgb_fourcc_format_table[i].drm_format)
{
return srgb_fourcc_format_table[i].vk_format;
}
}
return VK_FORMAT_UNDEFINED;
}
} // namespace drm
} // namespace util

39
util/drm/drm_utils.hpp Normal file
View file

@ -0,0 +1,39 @@
/*
* Copyright (c) 2019, 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#pragma once
#include <vulkan/vulkan.h>
namespace util
{
namespace drm
{
uint32_t vk_to_drm_format(VkFormat vk_format);
VkFormat drm_to_vk_format(uint32_t drm_format);
VkFormat drm_to_vk_srgb_format(uint32_t drm_format);
} // namespace drm
} // namespace util

67
util/drm/format_table.c Normal file
View file

@ -0,0 +1,67 @@
/*
* Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "format_table.h"
const fmt_spec fourcc_format_table[] = {
/* Supported R,G,B,A formats */
{ DRM_FORMAT_RGB332, 1, { 8, 0, 0, 0 }, VK_FORMAT_UNDEFINED },
{ DRM_FORMAT_BGR233, 1, { 8, 0, 0, 0 }, VK_FORMAT_UNDEFINED },
{ DRM_FORMAT_XRGB4444, 1, { 16, 0, 0, 0 }, VK_FORMAT_UNDEFINED },
{ DRM_FORMAT_XBGR4444, 1, { 16, 0, 0, 0 }, VK_FORMAT_UNDEFINED },
{ DRM_FORMAT_RGBX4444, 1, { 16, 0, 0, 0 }, VK_FORMAT_UNDEFINED },
{ DRM_FORMAT_BGRX4444, 1, { 16, 0, 0, 0 }, VK_FORMAT_UNDEFINED },
{ DRM_FORMAT_ARGB4444, 1, { 16, 0, 0, 0 }, VK_FORMAT_UNDEFINED },
{ DRM_FORMAT_ABGR4444, 1, { 16, 0, 0, 0 }, VK_FORMAT_UNDEFINED },
{ DRM_FORMAT_RGBA4444, 1, { 16, 0, 0, 0 }, VK_FORMAT_R4G4B4A4_UNORM_PACK16 },
{ DRM_FORMAT_BGRA4444, 1, { 16, 0, 0, 0 }, VK_FORMAT_B4G4R4A4_UNORM_PACK16 },
{ DRM_FORMAT_XRGB1555, 1, { 16, 0, 0, 0 }, VK_FORMAT_UNDEFINED },
{ DRM_FORMAT_XBGR1555, 1, { 16, 0, 0, 0 }, VK_FORMAT_UNDEFINED },
{ DRM_FORMAT_RGBX5551, 1, { 16, 0, 0, 0 }, VK_FORMAT_UNDEFINED },
{ DRM_FORMAT_BGRX5551, 1, { 16, 0, 0, 0 }, VK_FORMAT_UNDEFINED },
{ DRM_FORMAT_ARGB1555, 1, { 16, 0, 0, 0 }, VK_FORMAT_A1R5G5B5_UNORM_PACK16 },
{ DRM_FORMAT_ABGR1555, 1, { 16, 0, 0, 0 }, VK_FORMAT_UNDEFINED },
{ DRM_FORMAT_RGBA5551, 1, { 16, 0, 0, 0 }, VK_FORMAT_R5G5B5A1_UNORM_PACK16 },
{ DRM_FORMAT_BGRA5551, 1, { 16, 0, 0, 0 }, VK_FORMAT_B5G5R5A1_UNORM_PACK16 },
{ DRM_FORMAT_RGB565, 1, { 16, 0, 0, 0 }, VK_FORMAT_R5G6B5_UNORM_PACK16 },
{ DRM_FORMAT_BGR565, 1, { 16, 0, 0, 0 }, VK_FORMAT_B5G6R5_UNORM_PACK16 },
{ DRM_FORMAT_RGB888, 1, { 24, 0, 0, 0 }, VK_FORMAT_B8G8R8_UNORM },
{ DRM_FORMAT_BGR888, 1, { 24, 0, 0, 0 }, VK_FORMAT_R8G8B8_UNORM },
{ DRM_FORMAT_XRGB8888, 1, { 32, 0, 0, 0 }, VK_FORMAT_UNDEFINED },
{ DRM_FORMAT_XBGR8888, 1, { 32, 0, 0, 0 }, VK_FORMAT_UNDEFINED },
{ DRM_FORMAT_RGBX8888, 1, { 32, 0, 0, 0 }, VK_FORMAT_UNDEFINED },
{ DRM_FORMAT_BGRX8888, 1, { 32, 0, 0, 0 }, VK_FORMAT_UNDEFINED },
{ DRM_FORMAT_ARGB8888, 1, { 32, 0, 0, 0 }, VK_FORMAT_B8G8R8A8_UNORM },
{ DRM_FORMAT_ABGR8888, 1, { 32, 0, 0, 0 }, VK_FORMAT_R8G8B8A8_UNORM },
{ DRM_FORMAT_RGBA8888, 1, { 32, 0, 0, 0 }, VK_FORMAT_UNDEFINED },
{ DRM_FORMAT_BGRA8888, 1, { 32, 0, 0, 0 }, VK_FORMAT_UNDEFINED },
};
const fmt_spec srgb_fourcc_format_table[] = {
{ DRM_FORMAT_ARGB8888, 1, { 32, 0, 0, 0 }, VK_FORMAT_B8G8R8A8_SRGB },
{ DRM_FORMAT_ABGR8888, 1, { 32, 0, 0, 0 }, VK_FORMAT_R8G8B8A8_SRGB },
};
const size_t fourcc_format_table_len = NELEMS(fourcc_format_table);
const size_t srgb_fourcc_format_table_len = NELEMS(srgb_fourcc_format_table);

52
util/drm/format_table.h Normal file
View file

@ -0,0 +1,52 @@
/*
* Copyright (c) 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#pragma once
#include <vulkan/vulkan.h>
#include <drm_fourcc.h>
/* Define DRM linear modifier for compatibility with older DRM header versions. */
#ifndef DRM_FORMAT_MOD_LINEAR
#define DRM_FORMAT_MOD_LINEAR 0
#endif
/* Maximum number of planes that can be returned */
#define WSIALLOCP_MAX_PLANES 4
#define NELEMS(x) (sizeof(x) / sizeof(x[0]))
typedef struct fmt_spec
{
uint32_t drm_format;
uint32_t nr_planes;
uint8_t bpp[WSIALLOCP_MAX_PLANES];
VkFormat vk_format;
} fmt_spec;
extern const fmt_spec fourcc_format_table[];
extern const fmt_spec srgb_fourcc_format_table[];
extern size_t const fourcc_format_table_len;
extern size_t const srgb_fourcc_format_table_len;

175
util/wsialloc/wsialloc.h Normal file
View file

@ -0,0 +1,175 @@
/*
* Copyright (c) 2017, 2019, 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**
* @file
* Window System Integration (WSI) Buffer Allocation Interface
*/
#ifndef _WSIALLOC_H_
#define _WSIALLOC_H_
#include <stdint.h>
#include <drm_fourcc.h>
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
/**
* @page wsialloc_page Window System Integration (WSI) Buffer Allocation
*
* wsialloc is a standalone module for doing window system/platform agnostic multi-plane buffer allocation.
*
* The underlying implementation will allocate sufficient space for the desired buffer format in a way that is
* compatible with the window system and the GPU (e.g. accounting for buffer row-start alignment requirements).
*
* @note The bare minimum work is done to provide the buffer. For example, it is up to the client to initialize the data
* if that is required for the desired buffer format.
*
* The client may have already obtained a file descriptor (fd) for a Direct Rendering Manager (DRM) driver so it may
* perform other operations (e.g. presenting a buffer). Hence, such an fd is passed in on Allocator Construction so that
* the underlying allocator may allocate from the existing fd. However, the underlying implementation is also free to
* ignore that fd and use some other allocation mechanism, even in a production system.
*
* The underlying allocator implementation is chosen at compile time.
*
* All API Public entry points are implemented in a way that is thread safe, and the client does not need to be
* concerned with locking when using these APIs. If implementers make use of non-threadsafe functions, writable global
* data etc they must make appropriate use of locks.
*/
/**
* @brief Union for allocator type.
*
* Allocators are usually file descriptors of the allocating device. However,
* this interface provides the possibility to define private allocator structures.
*/
typedef union wsialloc_allocator
{
void *ptr;
intptr_t fd;
} wsialloc_allocator;
/**
* @brief Allocate and initialize a new WSI Allocator from an existing file descriptor.
*
* The allocator from the function is used in subsequent calls to wsialloc_alloc() to allocate new buffers.
*
* This function will be implemented as thread-safe. The implementer must ensure they use thread-safe functions, or use
* appropriate locking for non-threadsafe functions/writable global data.
*
* @note The underlying implementation may choose to use @p external_fd or its own platform-specific allocation method.
*
* @param external_fd file descriptor that the WSI Allocator could use for allocating new buffers
* @param[out] allocator a valid allocator for use in wsialloc functions
* @retval 0 indicates success in creating the allocator
* @retval non-zero indicates an error
*/
int wsialloc_new(int external_fd, wsialloc_allocator *allocator);
/**
* @brief Close down and free resources associated with a WSI Allocator
*
* It is acceptable for buffer allocations from @p allocator to still exist and be in use. In this case, this function
* will return without waiting for the client to free the other allocations. However, the actual closing down and
* freeing of @p allocator will be deferred until the last allocation has been closed by the client.
*
* @note For the implementer: there is usually no need to track this explicitly, since kernel side allocators
* automatically defer their closing until all their allocations are also freed.
*
* This function will be implemented as thread-safe. The implementer must ensure they use thread-safe functions, or use
* appropriate locking for non-threadsafe functions/writable global data.
*
* @post no more new allocations should be made on @p allocator even if previous allocations still exist.
*
* @param allocator The allocator to close down and free
* @retval 0 on success
* @retval non-zero on failure with errno
*/
int wsialloc_delete(wsialloc_allocator *allocator);
/**
* @brief Allocate a buffer from the WSI Allocator
*
* Allocate a buffer of size @p width x @p height of format @p fourcc_fmt in a way that is suitable for the underlying
* window system and GPU.
*
* Each plane is returned as a file descriptor. All other information returned about the buffer is also per-plane. It is
* assumed the caller already knows how many planes are implied by @p fourcc_fmt and @p modifiers.
*
* Each row in the buffer may be larger than @p width to account for buffer alignment requirements in the underlying
* window system. @p strides must be examined to determine the number of bytes between subsequent rows in each of the
* buffer's planes. Only positive strides are allowed.
*
* The client may free the buffer's planes by invoking close() on some or all of the elements of @p buffer_fds
*
* The same file descriptor ('fd') may be written to different elements of @p buffer_fds more than once, for some or all
* of the planes. In this case:
* - @p offsets @b must be used to determine where each plane starts in the file descriptor
* - When the client frees the buffer, each unique fd in @p buffer_fds must only be closed once
*
* Even if @p buffer_fds are all different or @p fourcc_fmt is for a single plane, then the client must inspect @p
* offsets in case it contains non-zero values.
*
* @note The implementation might not export the file descriptors in @p buffer_fds in such a way that allows the client
* to directly map them on the CPU as writable (PROT_WRITE).
*
* @p modifiers allows for a fourcc_mod_code() (as defined in drm_fourcc.h) to be passed in
* per-plane to request that allocation account for a reordering or other modification of the data in the buffer's
* planes (e.g. compression, change in number of planes, etc).
*
* @p strides and @p offsets may have modifier-specific meaning when @p modifiers are in use.
*
* This function will be implemented as thread-safe. The implementer must ensure they use thread-safe functions, or use
* appropriate locking for non-threadsafe functions/writable global data.
*
* @pre @p strides, @p buffer_fds, @p offsets are pointers to storage large enough to hold per-plane information
* @pre @p width >=1 && @p height >= 1
* @pre @p allocator is a currently valid WSI Allocator from wsialloc_new()
* @post The allocated buffer will be zeroed.
*
* @param allocator The WSI Allocator to allocate from
* @param fourcc_fmt The DRM_FORMAT_<...> code
* @param width The number of pixel columns required in the buffer
* @param height The number of pixel rows required in the buffer
* @param[out] strides Per-plane number of bytes between successive rows in the buffer.
* @param[out] buffer_fds Per-plane file descriptors for the buffer
* @param[out] offsets Per-plane offset into the file descriptor for the start of the plane
* @param[in] modifiers Per-plane modifiers or NULL if no modifiers required
* @retval 0 on successful buffer allocation
* @retval Non-zero indicates some error listed below and/or a window-system/allocator specific (usually negative)
* error
* @retval -EINVAL is also returned for invalid parameters
* @retval -ENOTSUP is also returned for unsupported parameters, such as a modifier or recognized format
* not supported by the underlying window-system/allocator
*/
int wsialloc_alloc(wsialloc_allocator *allocator, uint32_t fourcc_fmt, uint32_t width, uint32_t height, int *strides, int *buffer_fds,
uint32_t *offsets, const uint64_t *modifiers);
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* _WSIALLOC_H_ */

View file

@ -0,0 +1,263 @@
/*
* Copyright (c) 2017, 2019, 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "wsialloc.h"
#include "format_table.h"
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <ion.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdlib.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#define UNUSED(x) ((void)(x))
/** Default alignment */
#define WSIALLOCP_MIN_ALIGN_SZ (64u)
struct ion_allocator {
/* File descriptor of /dev/ion. */
int fd;
/* Allocator heap id. */
uint32_t alloc_heap_id;
};
static int find_alloc_heap_id(int fd)
{
assert(fd != -1);
struct ion_heap_data heaps[ION_NUM_HEAP_IDS];
struct ion_heap_query query = {
.cnt = ION_NUM_HEAP_IDS,
.heaps = (uint64_t)(uintptr_t)heaps,
};
int ret = ioctl(fd, ION_IOC_HEAP_QUERY, &query);
if (ret < 0)
{
return ret;
}
int alloc_heap_id = -1;
for (uint32_t i = 0; i < query.cnt; ++i)
{
if (ION_HEAP_TYPE_DMA == heaps[i].type)
{
alloc_heap_id = heaps[i].heap_id;
break;
}
}
return alloc_heap_id;
}
static int allocate(int fd, uint64_t size, uint32_t heap_id)
{
assert(size > 0);
assert(fd != -1);
struct ion_allocation_data alloc = {
.len = size,
.heap_id_mask = 1u << heap_id,
.flags = 0,
};
int ret = ioctl(fd, ION_IOC_ALLOC, &alloc);
if (ret < 0)
{
return ret;
}
return alloc.fd;
}
static uint64_t round_size_up_to_align(uint64_t size)
{
return (size + WSIALLOCP_MIN_ALIGN_SZ - 1) & ~(WSIALLOCP_MIN_ALIGN_SZ - 1);
}
int wsialloc_new(int external_fd, wsialloc_allocator *allocator)
{
UNUSED(external_fd);
assert(allocator != NULL);
struct ion_allocator *ion = NULL;
int ret = 0;
allocator->ptr = ion = malloc(sizeof(*ion));
if (NULL == ion)
{
ret = -ENOMEM;
goto fail;
}
ion->fd = open("/dev/ion", O_RDONLY);
if (ion->fd < 0)
{
ret = -errno;
goto fail;
}
ion->alloc_heap_id = find_alloc_heap_id(ion->fd);
if (ion->alloc_heap_id < 0)
{
ret = ion->alloc_heap_id;
goto fail;
}
return 0;
fail:
wsialloc_delete(allocator);
return ret;
}
int wsialloc_delete(wsialloc_allocator *allocator)
{
assert(allocator != NULL);
struct ion_allocator *ion = allocator->ptr;
int ret = 0;
if (NULL == ion)
{
return 0;
}
if (ion->fd != -1)
{
if (close(ion->fd) != 0)
{
ret = -errno;
}
}
free(ion);
allocator->ptr = NULL;
return ret;
}
static int wsiallocp_get_fmt_info(uint32_t fourcc_fmt, uint32_t *nr_planes, uint32_t *plane_bpp)
{
unsigned int fmt_idx;
const fmt_spec *found_fmt;
unsigned int plane_idx;
assert(nr_planes != NULL && plane_bpp != NULL);
/* Mask off any bits not necessary for allocation size */
fourcc_fmt = fourcc_fmt & (~(uint32_t)DRM_FORMAT_BIG_ENDIAN);
/* Search table for the format*/
for (fmt_idx = 0; fmt_idx < fourcc_format_table_len; ++fmt_idx)
{
if (fourcc_fmt == fourcc_format_table[fmt_idx].drm_format)
{
break;
}
}
if (fmt_idx >= fourcc_format_table_len)
{
return -ENOTSUP;
}
/* fmt_idx is now a correct index into the table */
found_fmt = &fourcc_format_table[fmt_idx];
assert(found_fmt->nr_planes <= WSIALLOCP_MAX_PLANES);
*nr_planes = found_fmt->nr_planes;
/* Only write out as many bpp as there are planes */
for (plane_idx = 0; plane_idx < found_fmt->nr_planes; ++plane_idx)
{
plane_bpp[plane_idx] = (uint32_t)found_fmt->bpp[plane_idx];
}
return 0;
}
int wsialloc_alloc(
wsialloc_allocator *allocator,
uint32_t fourcc,
uint32_t width,
uint32_t height,
int *stride,
int *new_fd,
uint32_t *offset,
const uint64_t *modifier)
{
assert(allocator != NULL);
assert(fourcc != 0);
assert(width > 0);
assert(height > 0);
assert(stride != NULL);
assert(new_fd != NULL);
assert(offset != NULL);
int ret = 0;
struct ion_allocator *ion = allocator->ptr;
if(modifier != NULL && *modifier != 0)
{
return -ENOTSUP;
}
size_t size = 0;
/* Validate format and determine per-plane bits per pixel. */
uint32_t nr_planes, bits_per_pixel[WSIALLOCP_MAX_PLANES];
ret = wsiallocp_get_fmt_info(fourcc, &nr_planes, bits_per_pixel);
if (ret != 0)
{
return ret;
}
/* Only single plane formats supported. */
if (nr_planes != 1)
{
return -ENOTSUP;
}
/* Assumes multiple of 8--rework otherwise. */
uint32_t plane0_bytes_per_pixel = bits_per_pixel[0] / 8;
assert(plane0_bytes_per_pixel * 8 == bits_per_pixel[0]);
*stride = round_size_up_to_align(width * plane0_bytes_per_pixel);
size = *stride * height;
*new_fd = allocate(ion->fd, size, ion->alloc_heap_id);
if (*new_fd < 0)
{
return -errno;
}
*offset = 0;
return 0;
}

View file

@ -31,7 +31,7 @@
#pragma once #pragma once
#include <vulkan/vulkan.h> #include <vulkan/vulkan.h>
#include <util/extension_list.hpp> #include "util/extension_list.hpp"
namespace wsi namespace wsi
{ {
@ -68,6 +68,14 @@ public:
static const util::extension_list empty{util::allocator::get_generic()}; static const util::extension_list empty{util::allocator::get_generic()};
return empty; return empty;
} }
/**
* @brief Implements vkGetProcAddr for entrypoints specific to the surface type.
*/
virtual PFN_vkVoidFunction get_proc_addr(const char *name)
{
return nullptr;
}
}; };
} /* namespace wsi */ } /* namespace wsi */

View file

@ -179,6 +179,8 @@ VkResult swapchain_base::init(VkDevice device, const VkSwapchainCreateInfoKHR *s
return VK_ERROR_INITIALIZATION_FAILED; return VK_ERROR_INITIALIZATION_FAILED;
} }
m_present_mode = swapchain_create_info->presentMode;
/* Init image to invalid values. */ /* Init image to invalid values. */
if (!m_swapchain_images.try_resize(swapchain_create_info->minImageCount)) if (!m_swapchain_images.try_resize(swapchain_create_info->minImageCount))
return VK_ERROR_OUT_OF_HOST_MEMORY; return VK_ERROR_OUT_OF_HOST_MEMORY;

View file

@ -0,0 +1,206 @@
/*
* Copyright (c) 2017-2019, 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
extern "C" {
#include <wayland-client.h>
#include <linux-dmabuf-unstable-v1-client-protocol.h>
}
#include <cassert>
#include <cstdlib>
#include <algorithm>
#include <array>
#include <string.h>
#include "surface_properties.hpp"
#include "layer/private_data.hpp"
#define NELEMS(x) (sizeof(x) / sizeof(x[0]))
namespace wsi
{
namespace wayland
{
surface_properties &surface_properties::get_instance()
{
static surface_properties instance;
return instance;
}
VkResult surface_properties::get_surface_capabilities(VkPhysicalDevice physical_device, VkSurfaceKHR surface,
VkSurfaceCapabilitiesKHR *pSurfaceCapabilities)
{
/* Image count limits */
pSurfaceCapabilities->minImageCount = 2;
/* There is no maximum theoretically speaking */
pSurfaceCapabilities->maxImageCount = UINT32_MAX;
/* Surface extents */
pSurfaceCapabilities->currentExtent = { 0xffffffff, 0xffffffff };
pSurfaceCapabilities->minImageExtent = { 1, 1 };
/* TODO: Ask the device for max - for now setting the max from the GPU, may be ask the display somehow*/
VkPhysicalDeviceProperties dev_props;
layer::instance_private_data::get(physical_device).disp.GetPhysicalDeviceProperties(physical_device, &dev_props);
pSurfaceCapabilities->maxImageExtent = { dev_props.limits.maxImageDimension2D,
dev_props.limits.maxImageDimension2D };
pSurfaceCapabilities->maxImageArrayLayers = 1;
/* Surface transforms */
pSurfaceCapabilities->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
pSurfaceCapabilities->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
/* TODO: Composite alpha */
pSurfaceCapabilities->supportedCompositeAlpha = static_cast<VkCompositeAlphaFlagBitsKHR>(
VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR | VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR |
VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR | VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR);
/* Image usage flags */
pSurfaceCapabilities->supportedUsageFlags =
VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT |
VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
return VK_SUCCESS;
}
VkResult surface_properties::get_surface_formats(VkPhysicalDevice physical_device, VkSurfaceKHR surface,
uint32_t *surfaceFormatCount, VkSurfaceFormatKHR *surfaceFormats)
{
VkResult res = VK_SUCCESS;
/* TODO: Hardcoding a list of sensible formats, may be query it from compositor later. */
static std::array<const VkFormat, 2> formats = { VK_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_R8G8B8A8_SRGB };
assert(surfaceFormatCount != nullptr);
res = VK_SUCCESS;
if (nullptr == surfaceFormats)
{
*surfaceFormatCount = formats.size();
}
else
{
if (formats.size() > *surfaceFormatCount)
{
res = VK_INCOMPLETE;
}
*surfaceFormatCount = std::min(*surfaceFormatCount, static_cast<uint32_t>(formats.size()));
for (uint32_t i = 0; i < *surfaceFormatCount; ++i)
{
surfaceFormats[i].format = formats[i];
surfaceFormats[i].colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
}
}
return res;
}
VkResult surface_properties::get_surface_present_modes(VkPhysicalDevice physical_device, VkSurfaceKHR surface,
uint32_t *pPresentModeCount, VkPresentModeKHR *pPresentModes)
{
VkResult res = VK_SUCCESS;
/* TODO: Check that FIFO is okay on Wayland */
static std::array<const VkPresentModeKHR, 2> modes = {
VK_PRESENT_MODE_FIFO_KHR,
VK_PRESENT_MODE_MAILBOX_KHR,
};
assert(pPresentModeCount != nullptr);
if (nullptr == pPresentModes)
{
*pPresentModeCount = modes.size();
}
else
{
if (modes.size() > *pPresentModeCount)
{
res = VK_INCOMPLETE;
}
*pPresentModeCount = std::min(*pPresentModeCount, static_cast<uint32_t>(modes.size()));
for (uint32_t i = 0; i < *pPresentModeCount; ++i)
{
pPresentModes[i] = modes[i];
}
}
return res;
}
static const char *required_device_extensions[] = {
VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME,
VK_KHR_BIND_MEMORY_2_EXTENSION_NAME,
VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME,
VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME,
VK_KHR_MAINTENANCE1_EXTENSION_NAME,
VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME,
VK_EXT_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME,
VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME,
VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME,
};
static std::unique_ptr<util::extension_list> populate_device_extensions()
{
std::unique_ptr<util::extension_list> ret;
ret->add(required_device_extensions, NELEMS(required_device_extensions));
return ret;
}
const util::extension_list &surface_properties::get_required_device_extensions()
{
static std::unique_ptr<util::extension_list> device_extensions = populate_device_extensions();
return *device_extensions;
}
bool surface_properties::physical_device_supported(VkPhysicalDevice dev)
{
static util::extension_list device_extensions{util::allocator::get_generic()};
device_extensions.add(dev);
static util::extension_list required_extensions{util::allocator::get_generic()};
required_extensions.add(required_device_extensions, NELEMS(required_device_extensions));
return device_extensions.contains(required_extensions);
}
/* TODO: Check for zwp_linux_dmabuf_v1 protocol in display */
VkBool32 GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physical_device, uint32_t queue_index,
struct wl_display *display)
{
return VK_TRUE;
}
PFN_vkVoidFunction surface_properties::get_proc_addr(const char *name)
{
if (strcmp(name, "vkGetPhysicalDeviceWaylandPresentationSupportKHR") == 0)
{
return reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceWaylandPresentationSupportKHR);
}
return nullptr;
}
} // namespace wayland
} // namespace wsi

View file

@ -0,0 +1,54 @@
/*
* Copyright (c) 2017-2019, 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#pragma once
#include "wsi/surface_properties.hpp"
namespace wsi
{
namespace wayland
{
class surface_properties : public wsi::surface_properties
{
public:
static surface_properties &get_instance();
VkResult get_surface_capabilities(VkPhysicalDevice physical_device, VkSurfaceKHR surface,
VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) override;
VkResult get_surface_formats(VkPhysicalDevice physical_device, VkSurfaceKHR surface, uint32_t *surfaceFormatCount,
VkSurfaceFormatKHR *surfaceFormats) override;
VkResult get_surface_present_modes(VkPhysicalDevice physical_device, VkSurfaceKHR surface,
uint32_t *pPresentModeCount, VkPresentModeKHR *pPresentModes) override;
const util::extension_list &get_required_device_extensions() override;
bool physical_device_supported(VkPhysicalDevice dev);
PFN_vkVoidFunction get_proc_addr(const char *name) override;
};
} // namespace wayland
} // namespace wsi

691
wsi/wayland/swapchain.cpp Normal file
View file

@ -0,0 +1,691 @@
/*
* Copyright (c) 2017-2019, 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#define VK_USE_PLATFORM_WAYLAND_KHR 1
#include "swapchain.hpp"
#include "swapchain_wl_helpers.hpp"
#include <cstring>
#include <cassert>
#include <unistd.h>
#include <cstdlib>
#include <cerrno>
#include <cstdio>
#include <climits>
#include "util/drm/drm_utils.hpp"
#if VULKAN_WSI_DEBUG > 0
#define WSI_PRINT_ERROR(...) fprintf(stderr, __FILE__, __LINE__, __func__, ##__VA_ARGS__)
#else
#define WSI_PRINT_ERROR(...) (void)0
#endif
namespace wsi
{
namespace wayland
{
struct swapchain::wayland_image_data
{
int buffer_fd;
int stride;
uint32_t offset;
wl_buffer *buffer;
VkDeviceMemory memory;
};
swapchain::swapchain(layer::device_private_data &dev_data, const VkAllocationCallbacks *pAllocator)
: swapchain_base(dev_data, pAllocator)
, m_display(nullptr)
, m_surface(nullptr)
, m_dmabuf_interface(nullptr)
, m_surface_queue(nullptr)
, m_buffer_queue(nullptr)
, m_present_pending(false)
{
}
swapchain::~swapchain()
{
int res;
teardown();
if (m_dmabuf_interface != nullptr)
{
zwp_linux_dmabuf_v1_destroy(m_dmabuf_interface);
}
res = wsialloc_delete(&m_wsi_allocator);
if (res != 0)
{
WSI_PRINT_ERROR("error deleting the allocator: %d\n", res);
}
if (m_surface_queue != nullptr)
{
wl_event_queue_destroy(m_surface_queue);
}
if (m_buffer_queue != nullptr)
{
wl_event_queue_destroy(m_buffer_queue);
}
}
static void roundtrip_cb_done(void *data, wl_callback *cb, uint32_t cb_data)
{
(void)cb_data;
bool *cb_recvd = reinterpret_cast<bool *>(data);
assert(cb_recvd);
*cb_recvd = true;
}
int swapchain::roundtrip()
{
int res;
const wl_callback_listener listener = { roundtrip_cb_done };
bool cb_recvd = false;
wl_callback *cb = wl_display_sync(m_display);
if (!cb)
{
WSI_PRINT_ERROR("failed to create wl_display::sync callback\n");
res = -1;
goto exit;
}
wl_proxy_set_queue((wl_proxy *)cb, m_surface_queue);
res = wl_callback_add_listener(cb, &listener, &cb_recvd);
if (res == -1)
{
WSI_PRINT_ERROR("error setting wl_display::sync callback listener\n");
goto exit;
}
res = wl_display_flush(m_display);
if (res == -1)
{
WSI_PRINT_ERROR("error performing a flush on the display\n");
goto exit;
}
do
{
res = dispatch_queue(m_display, m_surface_queue, 1000);
} while (res > 0 && !cb_recvd);
if (res < 0)
{
WSI_PRINT_ERROR("error dispatching on the surface queue\n");
goto exit;
}
else if (res == 0)
{
WSI_PRINT_ERROR("timeout waiting for roundtrip callback\n");
goto exit;
}
exit:
if (cb)
{
wl_callback_destroy(cb);
}
return res;
}
struct display_queue
{
wl_display *display;
wl_event_queue *queue;
};
VkResult swapchain::init_platform(VkDevice device, const VkSwapchainCreateInfoKHR *pSwapchainCreateInfo)
{
VkIcdSurfaceWayland *vk_surf = reinterpret_cast<VkIcdSurfaceWayland *>(pSwapchainCreateInfo->surface);
m_display = vk_surf->display;
m_surface = vk_surf->surface;
m_surface_queue = wl_display_create_queue(m_display);
if (m_surface_queue == nullptr)
{
WSI_PRINT_ERROR("Failed to create wl surface display_queue.\n");
return VK_ERROR_INITIALIZATION_FAILED;
}
m_buffer_queue = wl_display_create_queue(m_display);
if (m_buffer_queue == nullptr)
{
WSI_PRINT_ERROR("Failed to create wl buffer display_queue.\n");
return VK_ERROR_INITIALIZATION_FAILED;
}
wl_registry *registry = wl_display_get_registry(m_display);
if (registry == nullptr)
{
WSI_PRINT_ERROR("Failed to get wl display registry.\n");
return VK_ERROR_INITIALIZATION_FAILED;
}
wl_proxy_set_queue((struct wl_proxy *)registry, m_surface_queue);
const wl_registry_listener registry_listener = { registry_handler };
int res = wl_registry_add_listener(registry, &registry_listener, &m_dmabuf_interface);
if (res < 0)
{
WSI_PRINT_ERROR("Failed to add registry listener.\n");
return VK_ERROR_INITIALIZATION_FAILED;
}
res = roundtrip();
if (res < 0)
{
WSI_PRINT_ERROR("Roundtrip failed.\n");
return VK_ERROR_INITIALIZATION_FAILED;
}
/* we should have the dma_buf interface by now */
assert(m_dmabuf_interface);
wl_registry_destroy(registry);
res = wsialloc_new(-1, &m_wsi_allocator);
if (res != 0)
{
WSI_PRINT_ERROR("Failed to create wsi allocator.\n");
return VK_ERROR_INITIALIZATION_FAILED;
}
return VK_SUCCESS;
}
static void create_succeeded(void *data, struct zwp_linux_buffer_params_v1 *params, struct wl_buffer *buffer)
{
struct wl_buffer **wayland_buffer = (struct wl_buffer **)data;
*wayland_buffer = buffer;
}
static const struct zwp_linux_buffer_params_v1_listener params_listener = { create_succeeded, NULL };
static void buffer_release(void *data, struct wl_buffer *wayl_buffer)
{
swapchain *sc = (swapchain *)data;
sc->release_buffer(wayl_buffer);
}
void swapchain::release_buffer(struct wl_buffer *wayl_buffer)
{
uint32_t i;
for (i = 0; i < m_swapchain_images.size(); i++)
{
wayland_image_data *data;
data = (wayland_image_data *)m_swapchain_images[i].data;
if (data->buffer == wayl_buffer)
{
unpresent_image(i);
break;
}
}
/* check we found a buffer to unpresent */
assert(i < m_swapchain_images.size());
}
static struct wl_buffer_listener buffer_listener = { buffer_release };
VkResult swapchain::allocate_image(const VkImageCreateInfo &image_create_info, wayland_image_data *image_data,
VkImage *image)
{
VkResult result = VK_SUCCESS;
image_data->buffer = nullptr;
image_data->buffer_fd = -1;
image_data->memory = VK_NULL_HANDLE;
VkExternalImageFormatPropertiesKHR external_props = {};
external_props.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR;
VkImageFormatProperties2KHR format_props = {};
format_props.sType = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2_KHR;
format_props.pNext = &external_props;
{
VkPhysicalDeviceExternalImageFormatInfoKHR external_info = {};
external_info.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR;
external_info.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
VkPhysicalDeviceImageDrmFormatModifierInfoEXT drm_mod_info = {};
drm_mod_info.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT;
drm_mod_info.pNext = &external_info;
drm_mod_info.drmFormatModifier = DRM_FORMAT_MOD_LINEAR;
drm_mod_info.sharingMode = image_create_info.sharingMode;
drm_mod_info.queueFamilyIndexCount = image_create_info.queueFamilyIndexCount;
drm_mod_info.pQueueFamilyIndices = image_create_info.pQueueFamilyIndices;
VkPhysicalDeviceImageFormatInfo2KHR info = {};
info.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR;
info.pNext = &drm_mod_info;
info.format = image_create_info.format;
info.type = image_create_info.imageType;
info.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
info.usage = image_create_info.usage;
info.flags = image_create_info.flags;
result = m_device_data.instance_data.disp.GetPhysicalDeviceImageFormatProperties2KHR(m_device_data.physical_device,
&info, &format_props);
}
if (result != VK_SUCCESS)
{
WSI_PRINT_ERROR("Failed to get physical device format support.\n");
return result;
}
if (format_props.imageFormatProperties.maxExtent.width < image_create_info.extent.width ||
format_props.imageFormatProperties.maxExtent.height < image_create_info.extent.height ||
format_props.imageFormatProperties.maxExtent.depth < image_create_info.extent.depth)
{
WSI_PRINT_ERROR("Physical device does not support required extent.\n");
return VK_ERROR_INITIALIZATION_FAILED;
}
if (format_props.imageFormatProperties.maxMipLevels < image_create_info.mipLevels ||
format_props.imageFormatProperties.maxArrayLayers < image_create_info.arrayLayers)
{
WSI_PRINT_ERROR("Physical device does not support required array layers or mip levels.\n");
return VK_ERROR_INITIALIZATION_FAILED;
}
if ((format_props.imageFormatProperties.sampleCounts & image_create_info.samples) != image_create_info.samples)
{
WSI_PRINT_ERROR("Physical device does not support required sample count.\n");
return VK_ERROR_INITIALIZATION_FAILED;
}
if (external_props.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR)
{
/* TODO: Handle exportable images which use ICD allocated memory in preference to an external allocator. */
}
if (!(external_props.externalMemoryProperties.externalMemoryFeatures &
VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR))
{
WSI_PRINT_ERROR("Export/Import not supported.\n");
return VK_ERROR_INITIALIZATION_FAILED;
}
else
{
/* TODO: Handle Dedicated allocation bit. */
uint32_t fourcc = util::drm::vk_to_drm_format(image_create_info.format);
int res =
wsialloc_alloc(&m_wsi_allocator, fourcc, image_create_info.extent.width, image_create_info.extent.height,
&image_data->stride, &image_data->buffer_fd, &image_data->offset, nullptr);
if (res != 0)
{
WSI_PRINT_ERROR("Failed allocation of DMA Buffer.\n");
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
{
assert(image_data->stride >= 0);
VkSubresourceLayout image_layout = {};
image_layout.offset = image_data->offset;
image_layout.rowPitch = static_cast<uint32_t>(image_data->stride);
VkImageDrmFormatModifierExplicitCreateInfoEXT drm_mod_info = {};
drm_mod_info.sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT;
drm_mod_info.pNext = image_create_info.pNext;
drm_mod_info.drmFormatModifier = DRM_FORMAT_MOD_LINEAR;
drm_mod_info.drmFormatModifierPlaneCount = 1;
drm_mod_info.pPlaneLayouts = &image_layout;
VkExternalMemoryImageCreateInfoKHR external_info = {};
external_info.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR;
external_info.pNext = &drm_mod_info;
external_info.handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
VkImageCreateInfo image_info = image_create_info;
image_info.pNext = &external_info;
image_info.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
result = m_device_data.disp.CreateImage(m_device, &image_info, get_allocation_callbacks(), image);
}
if (result != VK_SUCCESS)
{
WSI_PRINT_ERROR("Image creation failed.\n");
return result;
}
{
VkMemoryFdPropertiesKHR mem_props = {};
mem_props.sType = VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR;
result = m_device_data.disp.GetMemoryFdPropertiesKHR(m_device, VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT,
image_data->buffer_fd, &mem_props);
if (result != VK_SUCCESS)
{
WSI_PRINT_ERROR("Error querying Fd properties.\n");
return result;
}
uint32_t mem_idx;
for (mem_idx = 0; mem_idx < VK_MAX_MEMORY_TYPES; mem_idx++)
{
if (mem_props.memoryTypeBits & (1 << mem_idx))
{
break;
}
}
off_t dma_buf_size = lseek(image_data->buffer_fd, 0, SEEK_END);
if (dma_buf_size < 0)
{
WSI_PRINT_ERROR("Failed to get DMA Buf size.\n");
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
VkImportMemoryFdInfoKHR import_mem_info = {};
import_mem_info.sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR;
import_mem_info.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT;
import_mem_info.fd = image_data->buffer_fd;
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.pNext = &import_mem_info;
alloc_info.allocationSize = static_cast<uint64_t>(dma_buf_size);
alloc_info.memoryTypeIndex = mem_idx;
result = m_device_data.disp.AllocateMemory(m_device, &alloc_info, get_allocation_callbacks(), &image_data->memory);
}
if (result != VK_SUCCESS)
{
WSI_PRINT_ERROR("Failed to import memory.\n");
return result;
}
result = m_device_data.disp.BindImageMemory(m_device, *image, image_data->memory, 0);
}
return result;
}
VkResult swapchain::create_image(const VkImageCreateInfo &image_create_info, swapchain_image &image)
{
uint32_t fourcc = util::drm::vk_to_drm_format(image_create_info.format);
int res;
VkResult result = VK_SUCCESS;
wayland_image_data *image_data = nullptr;
VkFenceCreateInfo fenceInfo = { VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0 };
/* Create image_data */
if (get_allocation_callbacks() != nullptr)
{
image_data = static_cast<wayland_image_data *>(
get_allocation_callbacks()->pfnAllocation(get_allocation_callbacks()->pUserData, sizeof(wayland_image_data),
alignof(wayland_image_data), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT));
}
else
{
image_data = static_cast<wayland_image_data *>(malloc(sizeof(wayland_image_data)));
}
if (image_data == nullptr)
{
result = VK_ERROR_OUT_OF_HOST_MEMORY;
goto out;
}
image.data = reinterpret_cast<void *>(image_data);
image.status = swapchain_image::FREE;
result = allocate_image(image_create_info, image_data, &image.image);
if (result != VK_SUCCESS)
{
WSI_PRINT_ERROR("Failed to allocate image.\n");
goto out;
}
/* create a wl_buffer using the dma_buf protocol */
struct zwp_linux_buffer_params_v1 *params;
params = zwp_linux_dmabuf_v1_create_params(m_dmabuf_interface);
zwp_linux_buffer_params_v1_add(params, image_data->buffer_fd, 0, image_data->offset, image_data->stride, 0, 0);
wl_proxy_set_queue((struct wl_proxy *)params, m_surface_queue);
res = zwp_linux_buffer_params_v1_add_listener(params, &params_listener, &image_data->buffer);
if (res < 0)
{
result = VK_ERROR_INITIALIZATION_FAILED;
goto out;
}
zwp_linux_buffer_params_v1_create(params, image_create_info.extent.width, image_create_info.extent.height, fourcc,
0);
/* TODO: don't roundtrip - we should be able to send the create request now,
* and only wait for it on first present. only do this once, not for all buffers created */
res = roundtrip();
if (res < 0)
{
result = VK_ERROR_INITIALIZATION_FAILED;
goto out;
}
/* should now have a wl_buffer */
assert(image_data->buffer);
zwp_linux_buffer_params_v1_destroy(params);
wl_proxy_set_queue((struct wl_proxy *)image_data->buffer, m_buffer_queue);
res = wl_buffer_add_listener(image_data->buffer, &buffer_listener, this);
if (res < 0)
{
result = VK_ERROR_INITIALIZATION_FAILED;
goto out;
}
/* Initialize presentation fence. */
result = m_device_data.disp.CreateFence(m_device, &fenceInfo, get_allocation_callbacks(), &image.present_fence);
out:
if (result != VK_SUCCESS)
{
destroy_image(image);
return result;
}
return result;
}
static void frame_done(void *data, wl_callback *cb, uint32_t cb_data)
{
(void)cb_data;
bool *present_pending = reinterpret_cast<bool *>(data);
assert(present_pending);
*present_pending = false;
wl_callback_destroy(cb);
}
void swapchain::present_image(uint32_t pendingIndex)
{
int res;
wayland_image_data *image_data = reinterpret_cast<wayland_image_data *>(m_swapchain_images[pendingIndex].data);
/* if a frame is already pending, wait for a hint to present again */
if (m_present_pending)
{
assert(m_present_mode == VK_PRESENT_MODE_FIFO_KHR);
do
{
/* block waiting for the compositor to return the wl_surface::frame
* callback. We may want to change this to timeout after a period of
* time if the compositor isn't responding (perhaps because the
* window is hidden).
*/
res = dispatch_queue(m_display, m_surface_queue, -1);
} while (res > 0 && m_present_pending);
if (res <= 0)
{
WSI_PRINT_ERROR("error waiting for Wayland compositor frame hint\n");
m_is_valid = false;
/* try to present anyway */
}
}
wl_surface_attach(m_surface, image_data->buffer, 0, 0);
/* TODO: work out damage */
wl_surface_damage(m_surface, 0, 0, INT32_MAX, INT32_MAX);
if (m_present_mode == VK_PRESENT_MODE_FIFO_KHR)
{
/* request a hint when we can present the _next_ frame */
wl_callback *cb = wl_surface_frame(m_surface);
if (cb)
{
wl_proxy_set_queue((wl_proxy *)cb, m_surface_queue);
static const wl_callback_listener frame_listener = { frame_done };
m_present_pending = true;
wl_callback_add_listener(cb, &frame_listener, &m_present_pending);
}
}
else
{
assert(m_present_mode == VK_PRESENT_MODE_MAILBOX_KHR);
/* weston only _queues_ wl_buffer::release events. This means when the
* compositor flushes the client it only sends the events if some other events
* have been posted.
*
* As such we have to request a sync callback - we discard it straight away
* as we don't actually need the callback, but it means the
* wl_buffer::release event is actually sent.
*/
wl_callback *cb = wl_display_sync(m_display);
assert(cb);
if (cb)
{
wl_callback_destroy(cb);
}
}
wl_surface_commit(m_surface);
res = wl_display_flush(m_display);
if (res < 0)
{
WSI_PRINT_ERROR("error flushing the display\n");
/* Setting the swapchain as invalid */
m_is_valid = false;
}
}
void swapchain::destroy_image(swapchain_image &image)
{
if (image.status != swapchain_image::INVALID)
{
if (image.present_fence != VK_NULL_HANDLE)
{
m_device_data.disp.DestroyFence(m_device, image.present_fence, get_allocation_callbacks());
image.present_fence = VK_NULL_HANDLE;
}
if (image.image != VK_NULL_HANDLE)
{
m_device_data.disp.DestroyImage(m_device, image.image, get_allocation_callbacks());
image.image = VK_NULL_HANDLE;
}
}
if (image.data != nullptr)
{
auto image_data = reinterpret_cast<wayland_image_data *>(image.data);
if (image_data->buffer != nullptr)
{
wl_buffer_destroy(image_data->buffer);
}
if (image_data->memory != VK_NULL_HANDLE)
{
m_device_data.disp.FreeMemory(m_device, image_data->memory, get_allocation_callbacks());
}
else if (image_data->buffer_fd >= 0)
{
close(image_data->buffer_fd);
}
if (get_allocation_callbacks() != nullptr)
{
get_allocation_callbacks()->pfnFree(get_allocation_callbacks()->pUserData, image_data);
}
else
{
free(image_data);
}
image.data = nullptr;
}
image.status = swapchain_image::INVALID;
}
bool swapchain::free_image_found()
{
for (auto &img : m_swapchain_images)
{
if (img.status == swapchain_image::FREE)
{
return true;
}
}
return false;
}
VkResult swapchain::get_free_buffer(uint64_t *timeout)
{
int ms_timeout, res;
if (*timeout >= INT_MAX * 1000llu * 1000llu)
{
ms_timeout = INT_MAX;
}
else
{
ms_timeout = *timeout / 1000llu / 1000llu;
}
/* The current dispatch_queue implementation will return if any
* events are returned, even if no events are dispatched to the buffer
* queue. Therefore dispatch repeatedly until a buffer has been freed.
*/
do
{
res = dispatch_queue(m_display, m_buffer_queue, ms_timeout);
} while (!free_image_found() && res > 0);
if (res > 0)
{
*timeout = 0;
return VK_SUCCESS;
}
else if (res == 0)
{
if (*timeout == 0)
{
return VK_NOT_READY;
}
else
{
return VK_TIMEOUT;
}
}
else
{
return VK_ERROR_DEVICE_LOST;
}
}
} // namespace wayland
} // namespace wsi

136
wsi/wayland/swapchain.hpp Normal file
View file

@ -0,0 +1,136 @@
/*
* Copyright (c) 2017-2019, 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#pragma once
#include "wsi/swapchain_base.hpp"
extern "C" {
#include <vulkan/vk_icd.h>
#include <util/wsialloc/wsialloc.h>
#include <wayland-client.h>
#include <linux-dmabuf-unstable-v1-client-protocol.h>
}
namespace wsi
{
namespace wayland
{
class swapchain : public wsi::swapchain_base
{
public:
explicit swapchain(layer::device_private_data &dev_data, const VkAllocationCallbacks *allocator);
~swapchain();
/* TODO: make the buffer destructor a friend? so this can be protected */
void release_buffer(struct wl_buffer *wl_buffer);
protected:
/**
* @brief Initialize platform specifics.
*/
VkResult init_platform(VkDevice device, const VkSwapchainCreateInfoKHR *pSwapchainCreateInfo) override;
/**
* @brief Creates a new swapchain image.
*
* @param image_create_info Data to be used to create the image.
*
* @param image Handle to the image.
*
* @return If image creation is successful returns VK_SUCCESS, otherwise
* will return VK_ERROR_OUT_OF_DEVICE_MEMORY or VK_ERROR_INITIALIZATION_FAILED
* depending on the error that occurred.
*/
VkResult create_image(const VkImageCreateInfo &image_create_info, swapchain_image &image) override;
/**
* @brief Method to present an image
*
* @param pendingIndex Index of the pending image to be presented.
*/
void present_image(uint32_t pendingIndex) override;
/**
* @brief Method to release a swapchain image
*
* @param image Handle to the image about to be released.
*/
void destroy_image(swapchain_image &image) override;
/**
* @brief Method to perform a rountrip to the Wayland compositor
*
* @return -1 on error. Otherwise non-negative.
*/
int roundtrip();
/**
* @brief Method to check if there are any free images
*
* @return true if any images are free, otherwise false.
*/
bool free_image_found();
/**
* @brief Hook for any actions to free up a buffer for acquire
*
* @param[in,out] timeout time to wait, in nanoseconds. 0 doesn't block,
* UINT64_MAX waits indefinitely. The timeout should
* be updated if a sleep is required - this can
* be set to 0 if the semaphore is now not expected
* block.
*/
VkResult get_free_buffer(uint64_t *timeout) override;
private:
struct wayland_image_data;
VkResult allocate_image(const VkImageCreateInfo &image_create_info, wayland_image_data *image_data, VkImage *image);
struct wl_display *m_display;
struct wl_surface *m_surface;
struct zwp_linux_dmabuf_v1 *m_dmabuf_interface;
/* The queue on which we dispatch the swapchain related events, mostly frame completion */
struct wl_event_queue *m_surface_queue;
/* The queue on which we dispatch buffer related events, mostly buffer_release */
struct wl_event_queue *m_buffer_queue;
/**
* @brief Handle to the WSI allocator.
*/
wsialloc_allocator m_wsi_allocator;
/**
* @brief true when waiting for the server hint to present a buffer
*
* true if a buffer has been presented and we've not had a wl_surface::frame
* callback to indicate the server is ready for the next buffer.
*/
bool m_present_pending;
};
} // namespace wayland
} // namespace wsi

View file

@ -0,0 +1,130 @@
/*
* Copyright (c) 2017-2019, 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "swapchain_wl_helpers.hpp"
#include <wayland-client.h>
#include <linux-dmabuf-unstable-v1-client-protocol.h>
#include <string.h>
#include <assert.h>
#include <poll.h>
#include <errno.h>
extern "C" {
void registry_handler(void *data, struct wl_registry *wl_registry, uint32_t name, const char *interface,
uint32_t version)
{
zwp_linux_dmabuf_v1 **dmabuf_interface = (zwp_linux_dmabuf_v1 **)data;
if (!strcmp(interface, "zwp_linux_dmabuf_v1"))
{
*dmabuf_interface =
(zwp_linux_dmabuf_v1 *)wl_registry_bind(wl_registry, name, &zwp_linux_dmabuf_v1_interface, version);
assert(*dmabuf_interface);
}
}
int dispatch_queue(struct wl_display *display, struct wl_event_queue *queue, int timeout)
{
int err;
struct pollfd pfd = {};
int retval;
/* Before we sleep, dispatch any pending events. prepare_read_queue will return 0 whilst there are pending
* events to dispatch on the queue. */
while (0 != wl_display_prepare_read_queue(display, queue))
{
/* dispatch_queue_pending returns -1 on error, or the number of events dispatched otherwise. If we
* already dispatched some events, then we might not need to sleep, as we might have just dispatched
* the event we want, so return immediately. */
err = wl_display_dispatch_queue_pending(display, queue);
if (err)
{
return (0 > err) ? -1 : 1;
}
}
/* wl_display_read_events performs a non-blocking read. */
pfd.fd = wl_display_get_fd(display);
pfd.events = POLLIN;
while (true)
{
/* Timeout is given in milliseconds. A return value of 0, or -1 with errno set to EINTR means that we
* should retry as the timeout was exceeded or we were interrupted by a signal, respectively. A
* return value of 1 means that something happened, and we should inspect the pollfd structure to see
* just what that was.
*/
err = poll(&pfd, 1, timeout);
if (0 == err)
{
/* Timeout. */
wl_display_cancel_read(display);
return 0;
}
else if (-1 == err)
{
if (EINTR == errno)
{
/* Interrupted by a signal; restart. This resets the timeout. */
continue;
}
else
{
/* Something else bad happened; abort. */
wl_display_cancel_read(display);
return -1;
}
}
else
{
if (POLLIN == pfd.revents)
{
/* We have data to read, and no errors; proceed to read_events. */
break;
}
else
{
/* An error occurred, e.g. file descriptor was closed from underneath us. */
wl_display_cancel_read(display);
return -1;
}
}
}
/* Actually read the events from the display. A failure in read_events calls cancel_read internally for us,
* so we don't need to do that here. */
err = wl_display_read_events(display);
if (0 != err)
{
return -1;
}
/* Finally, if we read any events relevant to our queue, we can dispatch them. */
err = wl_display_dispatch_queue_pending(display, queue);
retval = err < 0 ? -1 : 1;
return retval;
}
}

View file

@ -0,0 +1,49 @@
/*
* Copyright (c) 2017-2019, 2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#pragma once
#include <wayland-client.h>
extern "C" {
void registry_handler(void *data, struct wl_registry *wl_registry, uint32_t name, const char *interface,
uint32_t version);
/**
* @brief Dispatch events from a Wayland event queue
*
* Dispatch events from a given Wayland display event queue, including calling event handlers, and flush out any
* requests the event handlers may have written. Specification of a timeout allows the wait to be bounded. If any
* events are already pending dispatch (have been read from the display by another thread or event queue), they
* will be dispatched and the function will return immediately, without waiting for new events to arrive.
*
* @param display Wayland display to dispatch events from
* @param queue Event queue to dispatch events from; other event queues will not have their handlers called from
* within this function
* @param timeout Maximum time to wait for events to arrive, in milliseconds
* @return 1 if one or more events were dispatched on this queue, 0 if the timeout was reached without any
* events being dispatched, or -1 on error.
*/
int dispatch_queue(struct wl_display *display, struct wl_event_queue *queue, int timeout);
}

View file

@ -35,8 +35,15 @@
#include <cstdlib> #include <cstdlib>
#include <cstring> #include <cstring>
#include <new> #include <new>
#include <vulkan/vk_icd.h> #include <vulkan/vk_icd.h>
#if BUILD_WSI_WAYLAND
#include <vulkan/vulkan_wayland.h>
#include "wayland/surface_properties.hpp"
#include "wayland/swapchain.hpp"
#endif
namespace wsi namespace wsi
{ {
@ -45,7 +52,10 @@ static struct wsi_extension
VkExtensionProperties extension; VkExtensionProperties extension;
VkIcdWsiPlatform platform; VkIcdWsiPlatform platform;
} const supported_wsi_extensions[] = { } const supported_wsi_extensions[] = {
{ { VK_EXT_HEADLESS_SURFACE_EXTENSION_NAME, VK_EXT_HEADLESS_SURFACE_SPEC_VERSION }, VK_ICD_WSI_PLATFORM_HEADLESS } { { VK_EXT_HEADLESS_SURFACE_EXTENSION_NAME, VK_EXT_HEADLESS_SURFACE_SPEC_VERSION }, VK_ICD_WSI_PLATFORM_HEADLESS },
#if BUILD_WSI_WAYLAND
{ { VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME, VK_KHR_WAYLAND_SURFACE_SPEC_VERSION }, VK_ICD_WSI_PLATFORM_WAYLAND },
#endif
}; };
static surface_properties *get_surface_properties(VkIcdWsiPlatform platform) static surface_properties *get_surface_properties(VkIcdWsiPlatform platform)
@ -54,6 +64,10 @@ static surface_properties *get_surface_properties(VkIcdWsiPlatform platform)
{ {
case VK_ICD_WSI_PLATFORM_HEADLESS: case VK_ICD_WSI_PLATFORM_HEADLESS:
return &headless::surface_properties::get_instance(); return &headless::surface_properties::get_instance();
#if BUILD_WSI_WAYLAND
case VK_ICD_WSI_PLATFORM_WAYLAND:
return &wayland::surface_properties::get_instance();
#endif
default: default:
return nullptr; return nullptr;
} }
@ -87,6 +101,10 @@ swapchain_base *allocate_surface_swapchain(VkSurfaceKHR surface, layer::device_p
{ {
case VK_ICD_WSI_PLATFORM_HEADLESS: case VK_ICD_WSI_PLATFORM_HEADLESS:
return allocate_swapchain<wsi::headless::swapchain>(dev_data, pAllocator); return allocate_swapchain<wsi::headless::swapchain>(dev_data, pAllocator);
#if BUILD_WSI_WAYLAND
case VK_ICD_WSI_PLATFORM_WAYLAND:
return allocate_swapchain<wsi::wayland::swapchain>(dev_data, pAllocator);
#endif
default: default:
return nullptr; return nullptr;
} }
@ -165,4 +183,21 @@ void destroy_surface_swapchain(swapchain_base *swapchain, const VkAllocationCall
} }
} }
} // namespace wsi PFN_vkVoidFunction get_proc_addr(const char *name)
{
/*
* Note that we here assume that there are no two get_proc_addr implementations
* that handle the same function name.
*/
for (const auto &wsi_ext : supported_wsi_extensions)
{
PFN_vkVoidFunction func = get_surface_properties(wsi_ext.platform)->get_proc_addr(name);
if (func)
{
return func;
}
}
return nullptr;
}
} // namespace wsi

View file

@ -95,4 +95,17 @@ util::wsi_platform_set find_enabled_layer_platforms(const VkInstanceCreateInfo *
VkResult add_extensions_required_by_layer(VkPhysicalDevice phys_dev, const util::wsi_platform_set enabled_platforms, VkResult add_extensions_required_by_layer(VkPhysicalDevice phys_dev, const util::wsi_platform_set enabled_platforms,
util::extension_list &extensions_to_enable); util::extension_list &extensions_to_enable);
} // namespace wsi /**
* @brief Return a function pointer for surface specific functions.
*
* @details This function iterates through the supported platforms and queries them for the
* implementation of the @p name function.
*
* @param name The name of the target function
*
* @return A pointer to the implementation of the @p name function or null pointer in case this
* function isn't implemented for any platform.
*/
PFN_vkVoidFunction get_proc_addr(const char *name);
} // namespace wsi