vulkan: Add a vk_sync base class

This doesn't map directly to any particular Vulkan object but is,
instead, a base class for the internal implementations of both VkFence
and VkSemaphore.  Its utility will become evident in later patches.

The design of vk_sync will look familiar to anyone with significant
experience in DRM.  The base object itself is just a pointer to a vfunc
table with function pointers providing the implementation of the various
operations.  Depending on how the vk_sync will be used some of of those
vfuncs are optional.  If it's only going to be used for VkSemaphore, for
instance, there's no need for reset().

Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Acked-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/13427>
This commit is contained in:
Jason Ekstrand 2021-10-18 16:24:49 -05:00
parent 236ca76376
commit 3cf5fced4c
3 changed files with 772 additions and 0 deletions

View file

@ -52,6 +52,8 @@ files_vulkan_runtime = files(
'vk_render_pass.c',
'vk_shader_module.c',
'vk_shader_module.h',
'vk_sync.c',
'vk_sync.h',
'vk_synchronization2.c',
)

View file

@ -0,0 +1,405 @@
/*
* Copyright © 2021 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "vk_sync.h"
#include <assert.h>
#include <string.h>
#include "util/debug.h"
#include "util/macros.h"
#include "util/os_time.h"
#include "vk_alloc.h"
#include "vk_device.h"
#include "vk_log.h"
static void
vk_sync_type_validate(const struct vk_sync_type *type)
{
assert(type->init);
assert(type->finish);
assert(type->features & (VK_SYNC_FEATURE_BINARY |
VK_SYNC_FEATURE_TIMELINE));
if (type->features & VK_SYNC_FEATURE_TIMELINE) {
assert(type->features & VK_SYNC_FEATURE_GPU_WAIT);
assert(type->features & VK_SYNC_FEATURE_CPU_WAIT);
assert(type->features & VK_SYNC_FEATURE_CPU_SIGNAL);
assert(type->features & (VK_SYNC_FEATURE_WAIT_BEFORE_SIGNAL |
VK_SYNC_FEATURE_WAIT_PENDING));
assert(type->signal);
assert(type->get_value);
} else {
assert(!type->get_value);
}
if (!(type->features & VK_SYNC_FEATURE_BINARY)) {
assert(!(type->features & (VK_SYNC_FEATURE_GPU_MULTI_WAIT |
VK_SYNC_FEATURE_CPU_RESET)));
assert(!type->import_sync_file);
assert(!type->export_sync_file);
}
if (type->features & VK_SYNC_FEATURE_CPU_WAIT) {
assert(type->wait || type->wait_many);
} else {
assert(!(type->features & (VK_SYNC_FEATURE_WAIT_ANY |
VK_SYNC_FEATURE_WAIT_PENDING)));
}
if (type->features & VK_SYNC_FEATURE_GPU_MULTI_WAIT)
assert(type->features & VK_SYNC_FEATURE_GPU_WAIT);
if (type->features & VK_SYNC_FEATURE_CPU_RESET)
assert(type->reset);
else
assert(!type->reset);
if (type->features & VK_SYNC_FEATURE_CPU_SIGNAL)
assert(type->signal);
else
assert(!type->signal);
}
VkResult
vk_sync_init(struct vk_device *device,
struct vk_sync *sync,
const struct vk_sync_type *type,
enum vk_sync_flags flags,
uint64_t initial_value)
{
vk_sync_type_validate(type);
if (flags & VK_SYNC_IS_TIMELINE)
assert(type->features & VK_SYNC_FEATURE_TIMELINE);
else
assert(type->features & VK_SYNC_FEATURE_BINARY);
assert(type->size >= sizeof(*sync));
memset(sync, 0, type->size);
sync->type = type;
sync->flags = flags;
return type->init(device, sync, initial_value);
}
void
vk_sync_finish(struct vk_device *device,
struct vk_sync *sync)
{
sync->type->finish(device, sync);
}
VkResult
vk_sync_create(struct vk_device *device,
const struct vk_sync_type *type,
enum vk_sync_flags flags,
uint64_t initial_value,
struct vk_sync **sync_out)
{
struct vk_sync *sync;
sync = vk_alloc(&device->alloc, type->size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (sync == NULL)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
VkResult result = vk_sync_init(device, sync, type, flags, initial_value);
if (result != VK_SUCCESS) {
vk_free(&device->alloc, sync);
return result;
}
*sync_out = sync;
return VK_SUCCESS;
}
void
vk_sync_destroy(struct vk_device *device,
struct vk_sync *sync)
{
vk_sync_finish(device, sync);
vk_free(&device->alloc, sync);
}
VkResult
vk_sync_signal(struct vk_device *device,
struct vk_sync *sync,
uint64_t value)
{
if (sync->flags & VK_SYNC_IS_TIMELINE)
assert(value > 0);
else
assert(value == 0);
return sync->type->signal(device, sync, value);
}
VkResult
vk_sync_get_value(struct vk_device *device,
struct vk_sync *sync,
uint64_t *value)
{
assert(sync->flags & VK_SYNC_IS_TIMELINE);
return sync->type->get_value(device, sync, value);
}
VkResult
vk_sync_reset(struct vk_device *device,
struct vk_sync *sync)
{
assert(!(sync->flags & VK_SYNC_IS_TIMELINE));
return sync->type->reset(device, sync);
}
VkResult vk_sync_move(struct vk_device *device,
struct vk_sync *dst,
struct vk_sync *src)
{
assert(!(dst->flags & VK_SYNC_IS_TIMELINE));
assert(!(src->flags & VK_SYNC_IS_TIMELINE));
assert(dst->type == src->type);
return src->type->move(device, dst, src);
}
static void
assert_valid_wait(struct vk_sync *sync,
uint64_t wait_value,
enum vk_sync_wait_flags wait_flags)
{
assert(sync->type->features & VK_SYNC_FEATURE_CPU_WAIT);
if (!(sync->flags & VK_SYNC_IS_TIMELINE))
assert(wait_value == 0);
if (wait_flags & VK_SYNC_WAIT_PENDING)
assert(sync->type->features & VK_SYNC_FEATURE_WAIT_PENDING);
}
static uint64_t
get_max_abs_timeout_ns(void)
{
static int max_timeout_ms = -1;
if (max_timeout_ms < 0)
max_timeout_ms = env_var_as_unsigned("MESA_VK_MAX_TIMEOUT", 0);
if (max_timeout_ms == 0)
return UINT64_MAX;
else
return os_time_get_absolute_timeout(max_timeout_ms * 1000000ull);
}
static VkResult
__vk_sync_wait(struct vk_device *device,
struct vk_sync *sync,
uint64_t wait_value,
enum vk_sync_wait_flags wait_flags,
uint64_t abs_timeout_ns)
{
assert_valid_wait(sync, wait_value, wait_flags);
/* This doesn't make sense for a single wait */
assert(!(wait_flags & VK_SYNC_WAIT_ANY));
if (sync->type->wait) {
return sync->type->wait(device, sync, wait_value,
wait_flags, abs_timeout_ns);
} else {
struct vk_sync_wait wait = {
.sync = sync,
.stage_mask = ~(VkPipelineStageFlags2KHR)0,
.wait_value = wait_value,
};
return sync->type->wait_many(device, 1, &wait, wait_flags,
abs_timeout_ns);
}
}
VkResult
vk_sync_wait(struct vk_device *device,
struct vk_sync *sync,
uint64_t wait_value,
enum vk_sync_wait_flags wait_flags,
uint64_t abs_timeout_ns)
{
uint64_t max_abs_timeout_ns = get_max_abs_timeout_ns();
if (abs_timeout_ns > max_abs_timeout_ns) {
VkResult result =
__vk_sync_wait(device, sync, wait_value, wait_flags,
max_abs_timeout_ns);
if (unlikely(result == VK_TIMEOUT))
return vk_device_set_lost(device, "Maximum timeout exceeded!");
return result;
} else {
return __vk_sync_wait(device, sync, wait_value, wait_flags,
abs_timeout_ns);
}
}
static bool
can_wait_many(uint32_t wait_count,
const struct vk_sync_wait *waits,
enum vk_sync_wait_flags wait_flags)
{
if (waits[0].sync->type->wait_many == NULL)
return false;
if ((wait_flags & VK_SYNC_WAIT_ANY) &&
!(waits[0].sync->type->features & VK_SYNC_FEATURE_WAIT_ANY))
return false;
for (uint32_t i = 0; i < wait_count; i++) {
assert_valid_wait(waits[i].sync, waits[i].wait_value, wait_flags);
if (waits[i].sync->type != waits[0].sync->type)
return false;
}
return true;
}
static VkResult
__vk_sync_wait_many(struct vk_device *device,
uint32_t wait_count,
const struct vk_sync_wait *waits,
enum vk_sync_wait_flags wait_flags,
uint64_t abs_timeout_ns)
{
if (wait_count == 0)
return VK_SUCCESS;
if (wait_count == 1) {
return __vk_sync_wait(device, waits[0].sync, waits[0].wait_value,
wait_flags & ~VK_SYNC_WAIT_ANY, abs_timeout_ns);
}
if (can_wait_many(wait_count, waits, wait_flags)) {
return waits[0].sync->type->wait_many(device, wait_count, waits,
wait_flags, abs_timeout_ns);
} else if (wait_flags & VK_SYNC_WAIT_ANY) {
/* If we have multiple syncs and they don't support wait_any or they're
* not all the same type, there's nothing better we can do than spin.
*/
do {
for (uint32_t i = 0; i < wait_count; i++) {
VkResult result = __vk_sync_wait(device, waits[i].sync,
waits[i].wait_value,
wait_flags & ~VK_SYNC_WAIT_ANY,
0 /* abs_timeout_ns */);
if (result != VK_TIMEOUT)
return result;
}
} while (os_time_get_nano() < abs_timeout_ns);
return VK_TIMEOUT;
} else {
for (uint32_t i = 0; i < wait_count; i++) {
VkResult result = __vk_sync_wait(device, waits[i].sync,
waits[i].wait_value,
wait_flags, abs_timeout_ns);
if (result != VK_SUCCESS)
return result;
}
return VK_SUCCESS;
}
}
VkResult
vk_sync_wait_many(struct vk_device *device,
uint32_t wait_count,
const struct vk_sync_wait *waits,
enum vk_sync_wait_flags wait_flags,
uint64_t abs_timeout_ns)
{
uint64_t max_abs_timeout_ns = get_max_abs_timeout_ns();
if (abs_timeout_ns > max_abs_timeout_ns) {
VkResult result =
__vk_sync_wait_many(device, wait_count, waits, wait_flags,
max_abs_timeout_ns);
if (unlikely(result == VK_TIMEOUT))
return vk_device_set_lost(device, "Maximum timeout exceeded!");
return result;
} else {
return __vk_sync_wait_many(device, wait_count, waits, wait_flags,
abs_timeout_ns);
}
}
VkResult
vk_sync_import_opaque_fd(struct vk_device *device,
struct vk_sync *sync,
int fd)
{
VkResult result = sync->type->import_opaque_fd(device, sync, fd);
if (unlikely(result != VK_SUCCESS))
return result;
sync->flags |= VK_SYNC_IS_SHAREABLE |
VK_SYNC_IS_SHARED;
return VK_SUCCESS;
}
VkResult
vk_sync_export_opaque_fd(struct vk_device *device,
struct vk_sync *sync,
int *fd)
{
assert(sync->flags & VK_SYNC_IS_SHAREABLE);
VkResult result = sync->type->export_opaque_fd(device, sync, fd);
if (unlikely(result != VK_SUCCESS))
return result;
sync->flags |= VK_SYNC_IS_SHARED;
return VK_SUCCESS;
}
VkResult
vk_sync_import_sync_file(struct vk_device *device,
struct vk_sync *sync,
int sync_file)
{
assert(!(sync->flags & VK_SYNC_IS_TIMELINE));
/* Silently handle negative file descriptors in case the driver doesn't
* want to bother.
*/
if (sync_file < 0 && sync->type->signal)
return sync->type->signal(device, sync, 0);
return sync->type->import_sync_file(device, sync, sync_file);
}
VkResult
vk_sync_export_sync_file(struct vk_device *device,
struct vk_sync *sync,
int *sync_file)
{
assert(!(sync->flags & VK_SYNC_IS_TIMELINE));
return sync->type->export_sync_file(device, sync, sync_file);
}

View file

@ -0,0 +1,365 @@
/*
* Copyright © 2021 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef VK_SYNC_H
#define VK_SYNC_H
#include <stdbool.h>
#include <vulkan/vulkan_core.h>
#include "util/macros.h"
#ifdef __cplusplus
extern "C" {
#endif
struct vk_device;
struct vk_sync;
enum vk_sync_features {
/** Set if a sync type supports the binary mode of operation
*
* In binary mode, a vk_sync has two modes: signaled and unsignaled. If
* it supports CPU_RESET, it can be changed from signaled to unsignaled on
* the CPU via vk_sync_reset(). If it supports CPU_SIGNAL, it can be
* changed from unsignaled to signaled on the CPU via vk_sync_signal().
*
* Binary vk_sync types may also support WAIT_PENDING in which they have a
* third hidden pending state. Once such a vk_sync has been submitted to
* the kernel driver for signaling, it is in the pending state and remains
* there until the work is complete at which point it enters the signaled
* state. This pending state is visible across processes for shared
* vk_sync types. This is used to by the threaded submit mode to ensure
* that everything gets submitted to the kernel driver in-order.
*
* A vk_sync is operates in binary mode if VK_SYNC_IS_TIMELINE is not set
* in vk_sync::flags.
*/
VK_SYNC_FEATURE_BINARY = (1 << 0),
/** Set if a sync type supports the timeline mode of operation
*
* In timeline mode, a vk_sync has a monotonically increasing 64-bit value
* which represents most recently signaled time point. Waits are relative
* to time points. Instead of waiting for the vk_sync to enter a signaled
* state, you wait for its 64-bit value to be at least some wait value.
*
* Timeline vk_sync types can also support WAIT_PENDING. In this case, the
* wait is not for a pending state, as such, but rather for someone to have
* submitted a kernel request which will signal a time point with at least
* that value. Logically, you can think of this as having two timelines,
* the real timeline and a pending timeline which runs slightly ahead of
* the real one. As with binary vk_sync types, this is used by threaded
* submit to re-order things so that the kernel requests happen in a valid
* linear order.
*
* A vk_sync is operates in binary mode if VK_SYNC_IS_TIMELINE is set in
* vk_sync::flags.
*/
VK_SYNC_FEATURE_TIMELINE = (1 << 1),
/** Set if this sync supports GPU waits */
VK_SYNC_FEATURE_GPU_WAIT = (1 << 2),
/** Set if a sync type supports multiple GPU waits on one signal state
*
* The Vulkan spec for VkSemaphore requires GPU wait and signal operations
* to have a one-to-one relationship. This formally described by saying
* that the VkSemaphore gets implicitly reset on wait. However, it is
* often useful to have well-defined multi-wait. If binary vk_sync
* supports multi-wait then any number of kernel requests can be submitted
* which wait on one signal operation. This also implies that you can
* signal twice back-to-back (there are 0 waits on the first signal).
*
* This feature only applies to binary vk_sync objects.
*/
VK_SYNC_FEATURE_GPU_MULTI_WAIT = (1 << 3),
/** Set if a sync type supports vk_sync_wait() and vk_sync_wait_many() */
VK_SYNC_FEATURE_CPU_WAIT = (1 << 4),
/** Set if a sync type supports vk_sync_reset()
*
* This feature only applies to binary vk_sync objects.
*/
VK_SYNC_FEATURE_CPU_RESET = (1 << 5),
/** Set if a sync type supports vk_sync_signal() */
VK_SYNC_FEATURE_CPU_SIGNAL = (1 << 6),
/** Set if sync_type::wait_many supports the VK_SYNC_WAIT_ANY bit
*
* vk_sync_wait_many() will support the bit regardless. If the sync type
* doesn't support it natively, it will be emulated.
*/
VK_SYNC_FEATURE_WAIT_ANY = (1 << 7),
/** Set if a sync type supports the VK_SYNC_WAIT_PENDING bit
*
* See VK_SYNC_FEATURE_BINARY and VK_SYNC_FEATURE_TIMELINE for descriptions
* of what this does in each case.
*/
VK_SYNC_FEATURE_WAIT_PENDING = (1 << 8),
/** Set if a sync type natively supports wait-before-signal
*
* If this is set then the underlying OS primitive supports submitting
* kernel requests which wait on the vk_sync before submitting a kernel
* request which would cause that wait to unblock.
*/
VK_SYNC_FEATURE_WAIT_BEFORE_SIGNAL = (1 << 9),
};
struct vk_sync_wait;
enum vk_sync_wait_flags {
/** Placeholder for 0 to make vk_sync_wait() calls more clear */
VK_SYNC_WAIT_COMPLETE = 0,
/** If set, only wait for the vk_sync operation to be pending
*
* See VK_SYNC_FEATURE_BINARY and VK_SYNC_FEATURE_TIMELINE for descriptions
* of what this does in each case.
*/
VK_SYNC_WAIT_PENDING = (1 << 0),
/** If set, wait for any of of the vk_sync operations to complete
*
* This is as opposed to waiting for all of them. There is no guarantee
* that vk_sync_wait_many() will return immediately after the first
* operation completes but it will make a best effort to return as soon as
* possible.
*/
VK_SYNC_WAIT_ANY = (1 << 1),
};
struct vk_sync_type {
/** Size of this sync type */
size_t size;
/** Features supported by this sync type */
enum vk_sync_features features;
/** Initialize a vk_sync
*
* The base vk_sync will already be initialized and the sync type set
* before this function is called. If any OS primitives need to be
* allocated, that should be done here.
*/
VkResult (*init)(struct vk_device *device,
struct vk_sync *sync,
uint64_t initial_value);
/** Finish a vk_sync
*
* This should free any internal data stored in this vk_sync.
*/
void (*finish)(struct vk_device *device,
struct vk_sync *sync);
/** Signal a vk_sync
*
* For non-timeline sync types, value == 0.
*/
VkResult (*signal)(struct vk_device *device,
struct vk_sync *sync,
uint64_t value);
/** Get the timeline value for a vk_sync */
VkResult (*get_value)(struct vk_device *device,
struct vk_sync *sync,
uint64_t *value);
/** Reset a non-timeline vk_sync */
VkResult (*reset)(struct vk_device *device,
struct vk_sync *sync);
/** Moves the guts of one binary vk_sync to another
*
* This moves the current binary vk_sync event from src to dst and resets
* src. If dst contained an event, it is discarded.
*
* This is required for all binary vk_sync types that can be used for a
* semaphore wait in conjunction with real timeline semaphores.
*/
VkResult (*move)(struct vk_device *device,
struct vk_sync *dst,
struct vk_sync *src);
/** Wait on a vk_sync
*
* For a timeline vk_sync, wait_value is the timeline value to wait for.
* This function should not return VK_SUCCESS until get_value on that
* vk_sync would return a value >= wait_value. A wait_value of zero is
* allowed in which case the wait is a no-op. For a non-timeline vk_sync,
* wait_value should be ignored.
*
* This function is optional. If the sync type needs to support CPU waits,
* at least one of wait or wait_many must be provided. If one is missing,
* it will be implemented in terms of the other.
*/
VkResult (*wait)(struct vk_device *device,
struct vk_sync *sync,
uint64_t wait_value,
enum vk_sync_wait_flags wait_flags,
uint64_t abs_timeout_ns);
/** Wait for multiple vk_sync events
*
* If VK_SYNC_WAIT_ANY is set, it will return after at least one of the
* wait events is complete instead of waiting for all of them.
*
* See wait for more details.
*/
VkResult (*wait_many)(struct vk_device *device,
uint32_t wait_count,
const struct vk_sync_wait *waits,
enum vk_sync_wait_flags wait_flags,
uint64_t abs_timeout_ns);
/** Permanently imports the given FD into this vk_sync
*
* This replaces the guts of the given vk_sync with whatever is in the FD.
* In a sense, this vk_sync now aliases whatever vk_sync the FD was
* exported from.
*/
VkResult (*import_opaque_fd)(struct vk_device *device,
struct vk_sync *sync,
int fd);
/** Export the guts of this vk_sync to an FD */
VkResult (*export_opaque_fd)(struct vk_device *device,
struct vk_sync *sync,
int *fd);
/** Imports a sync file into this binary vk_sync
*
* If this completes successfully, the vk_sync will now signal whenever
* the sync file signals.
*
* If sync_file == -1, the vk_sync should be signaled immediately. If
* the vk_sync_type implements signal, sync_file will never be -1.
*/
VkResult (*import_sync_file)(struct vk_device *device,
struct vk_sync *sync,
int sync_file);
/** Exports the current binary vk_sync state as a sync file.
*
* The resulting sync file will contain the current event stored in this
* binary vk_sync must be turned into a sync file. If the vk_sync is later
* modified to contain a new event, the sync file is unaffected.
*/
VkResult (*export_sync_file)(struct vk_device *device,
struct vk_sync *sync,
int *sync_file);
};
enum vk_sync_flags {
/** Set if the vk_sync is a timeline */
VK_SYNC_IS_TIMELINE = (1 << 0),
/** Set if the vk_sync can have its payload shared */
VK_SYNC_IS_SHAREABLE = (1 << 1),
/** Set if the vk_sync has a shared payload */
VK_SYNC_IS_SHARED = (1 << 2),
};
struct vk_sync {
const struct vk_sync_type *type;
enum vk_sync_flags flags;
};
/* See VkSemaphoreSubmitInfoKHR */
struct vk_sync_wait {
struct vk_sync *sync;
VkPipelineStageFlags2KHR stage_mask;
uint64_t wait_value;
};
VkResult MUST_CHECK vk_sync_init(struct vk_device *device,
struct vk_sync *sync,
const struct vk_sync_type *type,
enum vk_sync_flags flags,
uint64_t initial_value);
void vk_sync_finish(struct vk_device *device,
struct vk_sync *sync);
VkResult MUST_CHECK vk_sync_create(struct vk_device *device,
const struct vk_sync_type *type,
enum vk_sync_flags flags,
uint64_t initial_value,
struct vk_sync **sync_out);
void vk_sync_destroy(struct vk_device *device,
struct vk_sync *sync);
VkResult MUST_CHECK vk_sync_signal(struct vk_device *device,
struct vk_sync *sync,
uint64_t value);
VkResult MUST_CHECK vk_sync_get_value(struct vk_device *device,
struct vk_sync *sync,
uint64_t *value);
VkResult MUST_CHECK vk_sync_reset(struct vk_device *device,
struct vk_sync *sync);
VkResult MUST_CHECK vk_sync_wait(struct vk_device *device,
struct vk_sync *sync,
uint64_t wait_value,
enum vk_sync_wait_flags wait_flags,
uint64_t abs_timeout_ns);
VkResult MUST_CHECK vk_sync_wait_many(struct vk_device *device,
uint32_t wait_count,
const struct vk_sync_wait *waits,
enum vk_sync_wait_flags wait_flags,
uint64_t abs_timeout_ns);
VkResult MUST_CHECK vk_sync_import_opaque_fd(struct vk_device *device,
struct vk_sync *sync,
int fd);
VkResult MUST_CHECK vk_sync_export_opaque_fd(struct vk_device *device,
struct vk_sync *sync,
int *fd);
VkResult MUST_CHECK vk_sync_import_sync_file(struct vk_device *device,
struct vk_sync *sync,
int sync_file);
VkResult MUST_CHECK vk_sync_export_sync_file(struct vk_device *device,
struct vk_sync *sync,
int *sync_file);
VkResult MUST_CHECK vk_sync_move(struct vk_device *device,
struct vk_sync *dst,
struct vk_sync *src);
#ifdef __cplusplus
}
#endif
#endif /* VK_SYNC_H */