vulkan: Use syncobj shim

This will allow syncobj use in cases where the process does not have
direct rendernode access (ex, vtest).

An alternative would be an alternate vk_sync_type implementation, but
the WSI code was also directly using drm syncobjs.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Reviewed-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/33433>
This commit is contained in:
Rob Clark 2024-11-05 13:46:29 -08:00 committed by Marge Bot
parent 3f5dc6329a
commit d2ea532528
5 changed files with 70 additions and 67 deletions

View file

@ -187,7 +187,6 @@ vk_device_init(struct vk_device *device,
list_inithead(&device->queues);
device->drm_fd = -1;
device->mem_cache = NULL;
device->timeline_mode = get_timeline_mode(physical_device);
@ -278,6 +277,9 @@ vk_device_finish(struct vk_device *device)
/* Drivers should tear down their own queues */
assert(list_is_empty(&device->queues));
if (device->sync)
device->sync->finalize(device->sync);
vk_device_memory_report_finish(device);
vk_memory_trace_finish(device);

View file

@ -32,6 +32,7 @@
#include "util/list.h"
#include "util/simple_mtx.h"
#include "util/u_atomic.h"
#include "util/u_sync_provider.h"
#ifdef __cplusplus
extern "C" {
@ -241,7 +242,7 @@ struct vk_device {
struct vk_sync **sync_out);
/* Set by vk_device_set_drm_fd() */
int drm_fd;
struct util_sync_provider *sync;
/** Implicit pipeline cache, or NULL */
struct vk_pipeline_cache *mem_cache;
@ -355,7 +356,7 @@ vk_device_init(struct vk_device *device,
static inline void
vk_device_set_drm_fd(struct vk_device *device, int drm_fd)
{
device->drm_fd = drm_fd;
device->sync = util_sync_provider_drm(drm_fd);
}
/** Tears down a vk_device

View file

@ -29,6 +29,7 @@
#include "drm-uapi/drm.h"
#include "util/os_time.h"
#include "util/u_sync_provider.h"
#include "vk_device.h"
#include "vk_log.h"
@ -52,16 +53,15 @@ vk_drm_syncobj_init(struct vk_device *device,
if (!(sync->flags & VK_SYNC_IS_TIMELINE) && initial_value)
flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
assert(device->drm_fd >= 0);
int err = drmSyncobjCreate(device->drm_fd, flags, &sobj->syncobj);
int err = device->sync->create(device->sync, flags, &sobj->syncobj);
if (err < 0) {
return vk_errorf(device, VK_ERROR_OUT_OF_HOST_MEMORY,
"DRM_IOCTL_SYNCOBJ_CREATE failed: %m");
}
if ((sync->flags & VK_SYNC_IS_TIMELINE) && initial_value) {
err = drmSyncobjTimelineSignal(device->drm_fd, &sobj->syncobj,
&initial_value, 1);
err = device->sync->timeline_signal(device->sync, &sobj->syncobj,
&initial_value, 1);
if (err < 0) {
vk_drm_syncobj_finish(device, sync);
return vk_errorf(device, VK_ERROR_OUT_OF_HOST_MEMORY,
@ -78,8 +78,7 @@ vk_drm_syncobj_finish(struct vk_device *device,
{
struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
assert(device->drm_fd >= 0);
ASSERTED int err = drmSyncobjDestroy(device->drm_fd, sobj->syncobj);
ASSERTED int err = device->sync->destroy(device->sync, sobj->syncobj);
assert(err == 0);
}
@ -90,12 +89,11 @@ vk_drm_syncobj_signal(struct vk_device *device,
{
struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
assert(device->drm_fd >= 0);
int err;
if (sync->flags & VK_SYNC_IS_TIMELINE)
err = drmSyncobjTimelineSignal(device->drm_fd, &sobj->syncobj, &value, 1);
err = device->sync->timeline_signal(device->sync, &sobj->syncobj, &value, 1);
else
err = drmSyncobjSignal(device->drm_fd, &sobj->syncobj, 1);
err = device->sync->signal(device->sync, &sobj->syncobj, 1);
if (err) {
return vk_errorf(device, VK_ERROR_UNKNOWN,
"DRM_IOCTL_SYNCOBJ_SIGNAL failed: %m");
@ -111,8 +109,7 @@ vk_drm_syncobj_get_value(struct vk_device *device,
{
struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
assert(device->drm_fd >= 0);
int err = drmSyncobjQuery(device->drm_fd, &sobj->syncobj, value, 1);
int err = device->sync->query(device->sync, &sobj->syncobj, value, 1, 0);
if (err) {
return vk_errorf(device, VK_ERROR_UNKNOWN,
"DRM_IOCTL_SYNCOBJ_QUERY failed: %m");
@ -127,8 +124,7 @@ vk_drm_syncobj_reset(struct vk_device *device,
{
struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
assert(device->drm_fd >= 0);
int err = drmSyncobjReset(device->drm_fd, &sobj->syncobj, 1);
int err = device->sync->reset(device->sync, &sobj->syncobj, 1);
if (err) {
return vk_errorf(device, VK_ERROR_UNKNOWN,
"DRM_IOCTL_SYNCOBJ_RESET failed: %m");
@ -143,7 +139,7 @@ sync_has_sync_file(struct vk_device *device, struct vk_sync *sync)
uint32_t handle = to_drm_syncobj(sync)->syncobj;
int fd = -1;
int err = drmSyncobjExportSyncFile(device->drm_fd, handle, &fd);
int err = device->sync->export_sync_file(device->sync, handle, &fd);
if (!err) {
close(fd);
return VK_SUCCESS;
@ -153,9 +149,9 @@ sync_has_sync_file(struct vk_device *device, struct vk_sync *sync)
* unexpected reason, we want to ensure this function will return success
* eventually. Do a zero-time syncobj wait if the export failed.
*/
err = drmSyncobjWait(device->drm_fd, &handle, 1, 0 /* timeout */,
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
NULL /* first_signaled */);
err = device->sync->wait(device->sync, &handle, 1, 0 /* timeout */,
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
NULL /* first_signaled */);
if (!err) {
return VK_SUCCESS;
} else if (errno == ETIME) {
@ -251,7 +247,6 @@ vk_drm_syncobj_wait_many(struct vk_device *device,
if (!(wait_flags & VK_SYNC_WAIT_ANY))
syncobj_wait_flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;
assert(device->drm_fd >= 0);
int err;
if (wait_count == 0) {
err = 0;
@ -260,21 +255,21 @@ vk_drm_syncobj_wait_many(struct vk_device *device,
* syncobjs because the non-timeline wait doesn't support
* DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE.
*/
err = drmSyncobjTimelineWait(device->drm_fd, handles, wait_values,
wait_count, abs_timeout_ns,
syncobj_wait_flags |
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE,
NULL /* first_signaled */);
err = device->sync->timeline_wait(device->sync, handles, wait_values,
wait_count, abs_timeout_ns,
syncobj_wait_flags |
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE,
NULL /* first_signaled */);
} else if (has_timeline) {
err = drmSyncobjTimelineWait(device->drm_fd, handles, wait_values,
wait_count, abs_timeout_ns,
syncobj_wait_flags,
NULL /* first_signaled */);
err = device->sync->timeline_wait(device->sync, handles, wait_values,
wait_count, abs_timeout_ns,
syncobj_wait_flags,
NULL /* first_signaled */);
} else {
err = drmSyncobjWait(device->drm_fd, handles,
wait_count, abs_timeout_ns,
syncobj_wait_flags,
NULL /* first_signaled */);
err = device->sync->wait(device->sync, handles,
wait_count, abs_timeout_ns,
syncobj_wait_flags,
NULL /* first_signaled */);
}
STACK_ARRAY_FINISH(handles);
@ -297,15 +292,14 @@ vk_drm_syncobj_import_opaque_fd(struct vk_device *device,
{
struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
assert(device->drm_fd >= 0);
uint32_t new_handle;
int err = drmSyncobjFDToHandle(device->drm_fd, fd, &new_handle);
int err = device->sync->fd_to_handle(device->sync, fd, &new_handle);
if (err) {
return vk_errorf(device, VK_ERROR_UNKNOWN,
"DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE failed: %m");
}
err = drmSyncobjDestroy(device->drm_fd, sobj->syncobj);
err = device->sync->destroy(device->sync, sobj->syncobj);
assert(!err);
sobj->syncobj = new_handle;
@ -320,8 +314,7 @@ vk_drm_syncobj_export_opaque_fd(struct vk_device *device,
{
struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
assert(device->drm_fd >= 0);
int err = drmSyncobjHandleToFD(device->drm_fd, sobj->syncobj, fd);
int err = device->sync->handle_to_fd(device->sync, sobj->syncobj, fd);
if (err) {
return vk_errorf(device, VK_ERROR_UNKNOWN,
"DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD failed: %m");
@ -337,8 +330,7 @@ vk_drm_syncobj_import_sync_file(struct vk_device *device,
{
struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
assert(device->drm_fd >= 0);
int err = drmSyncobjImportSyncFile(device->drm_fd, sobj->syncobj, sync_file);
int err = device->sync->import_sync_file(device->sync, sobj->syncobj, sync_file);
if (err) {
return vk_errorf(device, VK_ERROR_UNKNOWN,
"DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE failed: %m");
@ -354,8 +346,7 @@ vk_drm_syncobj_export_sync_file(struct vk_device *device,
{
struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
assert(device->drm_fd >= 0);
int err = drmSyncobjExportSyncFile(device->drm_fd, sobj->syncobj, sync_file);
int err = device->sync->export_sync_file(device->sync, sobj->syncobj, sync_file);
if (err) {
return vk_errorf(device, VK_ERROR_UNKNOWN,
"DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD failed: %m");
@ -401,10 +392,10 @@ vk_drm_syncobj_move(struct vk_device *device,
}
struct vk_sync_type
vk_drm_syncobj_get_type(int drm_fd)
vk_drm_syncobj_get_type_from_provider(struct util_sync_provider *sync)
{
uint32_t syncobj = 0;
int err = drmSyncobjCreate(drm_fd, DRM_SYNCOBJ_CREATE_SIGNALED, &syncobj);
int err = sync->create(sync, DRM_SYNCOBJ_CREATE_SIGNALED, &syncobj);
if (err < 0)
return (struct vk_sync_type) { .features = 0 };
@ -426,24 +417,31 @@ vk_drm_syncobj_get_type(int drm_fd)
.export_sync_file = vk_drm_syncobj_export_sync_file,
};
err = drmSyncobjWait(drm_fd, &syncobj, 1, 0,
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL,
NULL /* first_signaled */);
err = sync->wait(sync, &syncobj, 1, 0,
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL,
NULL /* first_signaled */);
if (err == 0) {
type.wait_many = vk_drm_syncobj_wait_many;
type.features |= VK_SYNC_FEATURE_CPU_WAIT |
VK_SYNC_FEATURE_WAIT_ANY;
}
uint64_t cap;
err = drmGetCap(drm_fd, DRM_CAP_SYNCOBJ_TIMELINE, &cap);
if (err == 0 && cap != 0) {
if (sync->timeline_wait) {
type.get_value = vk_drm_syncobj_get_value;
type.features |= VK_SYNC_FEATURE_TIMELINE;
}
err = drmSyncobjDestroy(drm_fd, syncobj);
err = sync->destroy(sync, syncobj);
assert(err == 0);
return type;
}
struct vk_sync_type
vk_drm_syncobj_get_type(int drm_fd)
{
struct util_sync_provider *sync = util_sync_provider_drm(drm_fd);
struct vk_sync_type ret = vk_drm_syncobj_get_type_from_provider(sync);
sync->finalize(sync);
return ret;
}

View file

@ -54,6 +54,8 @@ vk_sync_as_drm_syncobj(struct vk_sync *sync)
return container_of(sync, struct vk_drm_syncobj, base);
}
struct util_sync_provider;
struct vk_sync_type vk_drm_syncobj_get_type_from_provider(struct util_sync_provider *sync);
struct vk_sync_type vk_drm_syncobj_get_type(int drm_fd);
#ifdef __cplusplus

View file

@ -280,7 +280,7 @@ wsi_create_image_explicit_sync_drm(const struct wsi_swapchain *chain,
}
for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
ret = drmSyncobjFDToHandle(device->drm_fd, image->explicit_sync[i].fd, &image->explicit_sync[i].handle);
ret = device->sync->fd_to_handle(device->sync, image->explicit_sync[i].fd, &image->explicit_sync[i].handle);
if (ret != 0)
return VK_ERROR_FEATURE_NOT_PRESENT;
}
@ -297,7 +297,7 @@ wsi_destroy_image_explicit_sync_drm(const struct wsi_swapchain *chain,
for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
if (image->explicit_sync[i].handle != 0) {
drmSyncobjDestroy(device->drm_fd, image->explicit_sync[i].handle);
device->sync->destroy(device->sync, image->explicit_sync[i].handle);
image->explicit_sync[i].handle = 0;
}
@ -364,17 +364,17 @@ wsi_create_sync_for_image_syncobj(const struct wsi_swapchain *chain,
* surrogate handle.
*/
for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
if (drmSyncobjCreate(device->drm_fd, 0, &tmp_handles[i])) {
if (device->sync->create(device->sync, 0, &tmp_handles[i])) {
result = vk_errorf(NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY, "Failed to create temp syncobj. Errno: %d - %s", errno, strerror(errno));
goto fail;
}
if (drmSyncobjTransfer(device->drm_fd, tmp_handles[i], 0,
image->explicit_sync[i].handle, image->explicit_sync[i].timeline, 0)) {
if (device->sync->transfer(device->sync, tmp_handles[i], 0,
image->explicit_sync[i].handle, image->explicit_sync[i].timeline, 0)) {
result = vk_errorf(NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY, "Failed to transfer syncobj. Was the timeline point materialized? Errno: %d - %s", errno, strerror(errno));
goto fail;
}
if (drmSyncobjExportSyncFile(device->drm_fd, tmp_handles[i], &sync_file_fds[i])) {
if (device->sync->export_sync_file(device->sync, tmp_handles[i], &sync_file_fds[i])) {
result = vk_errorf(NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY, "Failed to export sync file. Errno: %d - %s", errno, strerror(errno));
goto fail;
}
@ -403,7 +403,7 @@ fail:
done:
for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
if (tmp_handles[i])
drmSyncobjDestroy(device->drm_fd, tmp_handles[i]);
device->sync->destroy(device->sync, tmp_handles[i]);
}
for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
if (sync_file_fds[i] >= 0)
@ -885,7 +885,7 @@ wsi_drm_images_explicit_sync_state(struct vk_device *device, int count, uint32_t
handles[i * WSI_ES_COUNT + WSI_ES_RELEASE] = image->explicit_sync[WSI_ES_RELEASE].handle;
}
int ret = drmSyncobjQuery(device->drm_fd, handles, points, count * WSI_ES_COUNT);
int ret = device->sync->query(device->sync, handles, points, count * WSI_ES_COUNT, 0);
if (ret)
goto done;
@ -899,9 +899,9 @@ wsi_drm_images_explicit_sync_state(struct vk_device *device, int count, uint32_t
flags[i] |= WSI_ES_STATE_RELEASE_SIGNALLED | WSI_ES_STATE_RELEASE_MATERIALIZED;
} else {
uint32_t first_signalled;
ret = drmSyncobjTimelineWait(device->drm_fd, &handles[i * WSI_ES_COUNT + WSI_ES_RELEASE],
&image->explicit_sync[WSI_ES_RELEASE].timeline, 1, 0,
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE, &first_signalled);
ret = device->sync->timeline_wait(device->sync, &handles[i * WSI_ES_COUNT + WSI_ES_RELEASE],
&image->explicit_sync[WSI_ES_RELEASE].timeline, 1, 0,
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE, &first_signalled);
if (ret == 0)
flags[i] |= WSI_ES_STATE_RELEASE_MATERIALIZED;
}
@ -989,10 +989,10 @@ wsi_drm_wait_for_explicit_sync_release(struct wsi_swapchain *chain,
* We will forward the GPU signal to the VkSemaphore/VkFence of the acquire.
*/
uint32_t first_signalled;
ret = drmSyncobjTimelineWait(device->drm_fd, handles, points, unacquired_image_count,
wsi_drm_rel_timeout_to_abs(rel_timeout_ns),
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE,
&first_signalled);
ret = device->sync->timeline_wait(device->sync, handles, points, unacquired_image_count,
wsi_drm_rel_timeout_to_abs(rel_timeout_ns),
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE,
&first_signalled);
/* Return the first image that materialized. */
if (ret != 0)