vulkan: Use syncobj shim

This will allow syncobj use in cases where the process does not have
direct rendernode access (ex, vtest).

An alternative would be an alternate vk_sync_type implementation, but
the WSI code was also directly using drm syncobjs.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Reviewed-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/33433>
This commit is contained in:
Rob Clark 2024-11-05 13:46:29 -08:00 committed by Marge Bot
parent 3f5dc6329a
commit d2ea532528
5 changed files with 70 additions and 67 deletions

View file

@ -187,7 +187,6 @@ vk_device_init(struct vk_device *device,
list_inithead(&device->queues); list_inithead(&device->queues);
device->drm_fd = -1;
device->mem_cache = NULL; device->mem_cache = NULL;
device->timeline_mode = get_timeline_mode(physical_device); device->timeline_mode = get_timeline_mode(physical_device);
@ -278,6 +277,9 @@ vk_device_finish(struct vk_device *device)
/* Drivers should tear down their own queues */ /* Drivers should tear down their own queues */
assert(list_is_empty(&device->queues)); assert(list_is_empty(&device->queues));
if (device->sync)
device->sync->finalize(device->sync);
vk_device_memory_report_finish(device); vk_device_memory_report_finish(device);
vk_memory_trace_finish(device); vk_memory_trace_finish(device);

View file

@ -32,6 +32,7 @@
#include "util/list.h" #include "util/list.h"
#include "util/simple_mtx.h" #include "util/simple_mtx.h"
#include "util/u_atomic.h" #include "util/u_atomic.h"
#include "util/u_sync_provider.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
@ -241,7 +242,7 @@ struct vk_device {
struct vk_sync **sync_out); struct vk_sync **sync_out);
/* Set by vk_device_set_drm_fd() */ /* Set by vk_device_set_drm_fd() */
int drm_fd; struct util_sync_provider *sync;
/** Implicit pipeline cache, or NULL */ /** Implicit pipeline cache, or NULL */
struct vk_pipeline_cache *mem_cache; struct vk_pipeline_cache *mem_cache;
@ -355,7 +356,7 @@ vk_device_init(struct vk_device *device,
static inline void static inline void
vk_device_set_drm_fd(struct vk_device *device, int drm_fd) vk_device_set_drm_fd(struct vk_device *device, int drm_fd)
{ {
device->drm_fd = drm_fd; device->sync = util_sync_provider_drm(drm_fd);
} }
/** Tears down a vk_device /** Tears down a vk_device

View file

@ -29,6 +29,7 @@
#include "drm-uapi/drm.h" #include "drm-uapi/drm.h"
#include "util/os_time.h" #include "util/os_time.h"
#include "util/u_sync_provider.h"
#include "vk_device.h" #include "vk_device.h"
#include "vk_log.h" #include "vk_log.h"
@ -52,15 +53,14 @@ vk_drm_syncobj_init(struct vk_device *device,
if (!(sync->flags & VK_SYNC_IS_TIMELINE) && initial_value) if (!(sync->flags & VK_SYNC_IS_TIMELINE) && initial_value)
flags |= DRM_SYNCOBJ_CREATE_SIGNALED; flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
assert(device->drm_fd >= 0); int err = device->sync->create(device->sync, flags, &sobj->syncobj);
int err = drmSyncobjCreate(device->drm_fd, flags, &sobj->syncobj);
if (err < 0) { if (err < 0) {
return vk_errorf(device, VK_ERROR_OUT_OF_HOST_MEMORY, return vk_errorf(device, VK_ERROR_OUT_OF_HOST_MEMORY,
"DRM_IOCTL_SYNCOBJ_CREATE failed: %m"); "DRM_IOCTL_SYNCOBJ_CREATE failed: %m");
} }
if ((sync->flags & VK_SYNC_IS_TIMELINE) && initial_value) { if ((sync->flags & VK_SYNC_IS_TIMELINE) && initial_value) {
err = drmSyncobjTimelineSignal(device->drm_fd, &sobj->syncobj, err = device->sync->timeline_signal(device->sync, &sobj->syncobj,
&initial_value, 1); &initial_value, 1);
if (err < 0) { if (err < 0) {
vk_drm_syncobj_finish(device, sync); vk_drm_syncobj_finish(device, sync);
@ -78,8 +78,7 @@ vk_drm_syncobj_finish(struct vk_device *device,
{ {
struct vk_drm_syncobj *sobj = to_drm_syncobj(sync); struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
assert(device->drm_fd >= 0); ASSERTED int err = device->sync->destroy(device->sync, sobj->syncobj);
ASSERTED int err = drmSyncobjDestroy(device->drm_fd, sobj->syncobj);
assert(err == 0); assert(err == 0);
} }
@ -90,12 +89,11 @@ vk_drm_syncobj_signal(struct vk_device *device,
{ {
struct vk_drm_syncobj *sobj = to_drm_syncobj(sync); struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
assert(device->drm_fd >= 0);
int err; int err;
if (sync->flags & VK_SYNC_IS_TIMELINE) if (sync->flags & VK_SYNC_IS_TIMELINE)
err = drmSyncobjTimelineSignal(device->drm_fd, &sobj->syncobj, &value, 1); err = device->sync->timeline_signal(device->sync, &sobj->syncobj, &value, 1);
else else
err = drmSyncobjSignal(device->drm_fd, &sobj->syncobj, 1); err = device->sync->signal(device->sync, &sobj->syncobj, 1);
if (err) { if (err) {
return vk_errorf(device, VK_ERROR_UNKNOWN, return vk_errorf(device, VK_ERROR_UNKNOWN,
"DRM_IOCTL_SYNCOBJ_SIGNAL failed: %m"); "DRM_IOCTL_SYNCOBJ_SIGNAL failed: %m");
@ -111,8 +109,7 @@ vk_drm_syncobj_get_value(struct vk_device *device,
{ {
struct vk_drm_syncobj *sobj = to_drm_syncobj(sync); struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
assert(device->drm_fd >= 0); int err = device->sync->query(device->sync, &sobj->syncobj, value, 1, 0);
int err = drmSyncobjQuery(device->drm_fd, &sobj->syncobj, value, 1);
if (err) { if (err) {
return vk_errorf(device, VK_ERROR_UNKNOWN, return vk_errorf(device, VK_ERROR_UNKNOWN,
"DRM_IOCTL_SYNCOBJ_QUERY failed: %m"); "DRM_IOCTL_SYNCOBJ_QUERY failed: %m");
@ -127,8 +124,7 @@ vk_drm_syncobj_reset(struct vk_device *device,
{ {
struct vk_drm_syncobj *sobj = to_drm_syncobj(sync); struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
assert(device->drm_fd >= 0); int err = device->sync->reset(device->sync, &sobj->syncobj, 1);
int err = drmSyncobjReset(device->drm_fd, &sobj->syncobj, 1);
if (err) { if (err) {
return vk_errorf(device, VK_ERROR_UNKNOWN, return vk_errorf(device, VK_ERROR_UNKNOWN,
"DRM_IOCTL_SYNCOBJ_RESET failed: %m"); "DRM_IOCTL_SYNCOBJ_RESET failed: %m");
@ -143,7 +139,7 @@ sync_has_sync_file(struct vk_device *device, struct vk_sync *sync)
uint32_t handle = to_drm_syncobj(sync)->syncobj; uint32_t handle = to_drm_syncobj(sync)->syncobj;
int fd = -1; int fd = -1;
int err = drmSyncobjExportSyncFile(device->drm_fd, handle, &fd); int err = device->sync->export_sync_file(device->sync, handle, &fd);
if (!err) { if (!err) {
close(fd); close(fd);
return VK_SUCCESS; return VK_SUCCESS;
@ -153,7 +149,7 @@ sync_has_sync_file(struct vk_device *device, struct vk_sync *sync)
* unexpected reason, we want to ensure this function will return success * unexpected reason, we want to ensure this function will return success
* eventually. Do a zero-time syncobj wait if the export failed. * eventually. Do a zero-time syncobj wait if the export failed.
*/ */
err = drmSyncobjWait(device->drm_fd, &handle, 1, 0 /* timeout */, err = device->sync->wait(device->sync, &handle, 1, 0 /* timeout */,
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
NULL /* first_signaled */); NULL /* first_signaled */);
if (!err) { if (!err) {
@ -251,7 +247,6 @@ vk_drm_syncobj_wait_many(struct vk_device *device,
if (!(wait_flags & VK_SYNC_WAIT_ANY)) if (!(wait_flags & VK_SYNC_WAIT_ANY))
syncobj_wait_flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL; syncobj_wait_flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;
assert(device->drm_fd >= 0);
int err; int err;
if (wait_count == 0) { if (wait_count == 0) {
err = 0; err = 0;
@ -260,18 +255,18 @@ vk_drm_syncobj_wait_many(struct vk_device *device,
* syncobjs because the non-timeline wait doesn't support * syncobjs because the non-timeline wait doesn't support
* DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE. * DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE.
*/ */
err = drmSyncobjTimelineWait(device->drm_fd, handles, wait_values, err = device->sync->timeline_wait(device->sync, handles, wait_values,
wait_count, abs_timeout_ns, wait_count, abs_timeout_ns,
syncobj_wait_flags | syncobj_wait_flags |
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE,
NULL /* first_signaled */); NULL /* first_signaled */);
} else if (has_timeline) { } else if (has_timeline) {
err = drmSyncobjTimelineWait(device->drm_fd, handles, wait_values, err = device->sync->timeline_wait(device->sync, handles, wait_values,
wait_count, abs_timeout_ns, wait_count, abs_timeout_ns,
syncobj_wait_flags, syncobj_wait_flags,
NULL /* first_signaled */); NULL /* first_signaled */);
} else { } else {
err = drmSyncobjWait(device->drm_fd, handles, err = device->sync->wait(device->sync, handles,
wait_count, abs_timeout_ns, wait_count, abs_timeout_ns,
syncobj_wait_flags, syncobj_wait_flags,
NULL /* first_signaled */); NULL /* first_signaled */);
@ -297,15 +292,14 @@ vk_drm_syncobj_import_opaque_fd(struct vk_device *device,
{ {
struct vk_drm_syncobj *sobj = to_drm_syncobj(sync); struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
assert(device->drm_fd >= 0);
uint32_t new_handle; uint32_t new_handle;
int err = drmSyncobjFDToHandle(device->drm_fd, fd, &new_handle); int err = device->sync->fd_to_handle(device->sync, fd, &new_handle);
if (err) { if (err) {
return vk_errorf(device, VK_ERROR_UNKNOWN, return vk_errorf(device, VK_ERROR_UNKNOWN,
"DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE failed: %m"); "DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE failed: %m");
} }
err = drmSyncobjDestroy(device->drm_fd, sobj->syncobj); err = device->sync->destroy(device->sync, sobj->syncobj);
assert(!err); assert(!err);
sobj->syncobj = new_handle; sobj->syncobj = new_handle;
@ -320,8 +314,7 @@ vk_drm_syncobj_export_opaque_fd(struct vk_device *device,
{ {
struct vk_drm_syncobj *sobj = to_drm_syncobj(sync); struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
assert(device->drm_fd >= 0); int err = device->sync->handle_to_fd(device->sync, sobj->syncobj, fd);
int err = drmSyncobjHandleToFD(device->drm_fd, sobj->syncobj, fd);
if (err) { if (err) {
return vk_errorf(device, VK_ERROR_UNKNOWN, return vk_errorf(device, VK_ERROR_UNKNOWN,
"DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD failed: %m"); "DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD failed: %m");
@ -337,8 +330,7 @@ vk_drm_syncobj_import_sync_file(struct vk_device *device,
{ {
struct vk_drm_syncobj *sobj = to_drm_syncobj(sync); struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
assert(device->drm_fd >= 0); int err = device->sync->import_sync_file(device->sync, sobj->syncobj, sync_file);
int err = drmSyncobjImportSyncFile(device->drm_fd, sobj->syncobj, sync_file);
if (err) { if (err) {
return vk_errorf(device, VK_ERROR_UNKNOWN, return vk_errorf(device, VK_ERROR_UNKNOWN,
"DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE failed: %m"); "DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE failed: %m");
@ -354,8 +346,7 @@ vk_drm_syncobj_export_sync_file(struct vk_device *device,
{ {
struct vk_drm_syncobj *sobj = to_drm_syncobj(sync); struct vk_drm_syncobj *sobj = to_drm_syncobj(sync);
assert(device->drm_fd >= 0); int err = device->sync->export_sync_file(device->sync, sobj->syncobj, sync_file);
int err = drmSyncobjExportSyncFile(device->drm_fd, sobj->syncobj, sync_file);
if (err) { if (err) {
return vk_errorf(device, VK_ERROR_UNKNOWN, return vk_errorf(device, VK_ERROR_UNKNOWN,
"DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD failed: %m"); "DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD failed: %m");
@ -401,10 +392,10 @@ vk_drm_syncobj_move(struct vk_device *device,
} }
struct vk_sync_type struct vk_sync_type
vk_drm_syncobj_get_type(int drm_fd) vk_drm_syncobj_get_type_from_provider(struct util_sync_provider *sync)
{ {
uint32_t syncobj = 0; uint32_t syncobj = 0;
int err = drmSyncobjCreate(drm_fd, DRM_SYNCOBJ_CREATE_SIGNALED, &syncobj); int err = sync->create(sync, DRM_SYNCOBJ_CREATE_SIGNALED, &syncobj);
if (err < 0) if (err < 0)
return (struct vk_sync_type) { .features = 0 }; return (struct vk_sync_type) { .features = 0 };
@ -426,7 +417,7 @@ vk_drm_syncobj_get_type(int drm_fd)
.export_sync_file = vk_drm_syncobj_export_sync_file, .export_sync_file = vk_drm_syncobj_export_sync_file,
}; };
err = drmSyncobjWait(drm_fd, &syncobj, 1, 0, err = sync->wait(sync, &syncobj, 1, 0,
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL,
NULL /* first_signaled */); NULL /* first_signaled */);
if (err == 0) { if (err == 0) {
@ -435,15 +426,22 @@ vk_drm_syncobj_get_type(int drm_fd)
VK_SYNC_FEATURE_WAIT_ANY; VK_SYNC_FEATURE_WAIT_ANY;
} }
uint64_t cap; if (sync->timeline_wait) {
err = drmGetCap(drm_fd, DRM_CAP_SYNCOBJ_TIMELINE, &cap);
if (err == 0 && cap != 0) {
type.get_value = vk_drm_syncobj_get_value; type.get_value = vk_drm_syncobj_get_value;
type.features |= VK_SYNC_FEATURE_TIMELINE; type.features |= VK_SYNC_FEATURE_TIMELINE;
} }
err = drmSyncobjDestroy(drm_fd, syncobj); err = sync->destroy(sync, syncobj);
assert(err == 0); assert(err == 0);
return type; return type;
} }
struct vk_sync_type
vk_drm_syncobj_get_type(int drm_fd)
{
struct util_sync_provider *sync = util_sync_provider_drm(drm_fd);
struct vk_sync_type ret = vk_drm_syncobj_get_type_from_provider(sync);
sync->finalize(sync);
return ret;
}

View file

@ -54,6 +54,8 @@ vk_sync_as_drm_syncobj(struct vk_sync *sync)
return container_of(sync, struct vk_drm_syncobj, base); return container_of(sync, struct vk_drm_syncobj, base);
} }
struct util_sync_provider;
struct vk_sync_type vk_drm_syncobj_get_type_from_provider(struct util_sync_provider *sync);
struct vk_sync_type vk_drm_syncobj_get_type(int drm_fd); struct vk_sync_type vk_drm_syncobj_get_type(int drm_fd);
#ifdef __cplusplus #ifdef __cplusplus

View file

@ -280,7 +280,7 @@ wsi_create_image_explicit_sync_drm(const struct wsi_swapchain *chain,
} }
for (uint32_t i = 0; i < WSI_ES_COUNT; i++) { for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
ret = drmSyncobjFDToHandle(device->drm_fd, image->explicit_sync[i].fd, &image->explicit_sync[i].handle); ret = device->sync->fd_to_handle(device->sync, image->explicit_sync[i].fd, &image->explicit_sync[i].handle);
if (ret != 0) if (ret != 0)
return VK_ERROR_FEATURE_NOT_PRESENT; return VK_ERROR_FEATURE_NOT_PRESENT;
} }
@ -297,7 +297,7 @@ wsi_destroy_image_explicit_sync_drm(const struct wsi_swapchain *chain,
for (uint32_t i = 0; i < WSI_ES_COUNT; i++) { for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
if (image->explicit_sync[i].handle != 0) { if (image->explicit_sync[i].handle != 0) {
drmSyncobjDestroy(device->drm_fd, image->explicit_sync[i].handle); device->sync->destroy(device->sync, image->explicit_sync[i].handle);
image->explicit_sync[i].handle = 0; image->explicit_sync[i].handle = 0;
} }
@ -364,17 +364,17 @@ wsi_create_sync_for_image_syncobj(const struct wsi_swapchain *chain,
* surrogate handle. * surrogate handle.
*/ */
for (uint32_t i = 0; i < WSI_ES_COUNT; i++) { for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
if (drmSyncobjCreate(device->drm_fd, 0, &tmp_handles[i])) { if (device->sync->create(device->sync, 0, &tmp_handles[i])) {
result = vk_errorf(NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY, "Failed to create temp syncobj. Errno: %d - %s", errno, strerror(errno)); result = vk_errorf(NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY, "Failed to create temp syncobj. Errno: %d - %s", errno, strerror(errno));
goto fail; goto fail;
} }
if (drmSyncobjTransfer(device->drm_fd, tmp_handles[i], 0, if (device->sync->transfer(device->sync, tmp_handles[i], 0,
image->explicit_sync[i].handle, image->explicit_sync[i].timeline, 0)) { image->explicit_sync[i].handle, image->explicit_sync[i].timeline, 0)) {
result = vk_errorf(NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY, "Failed to transfer syncobj. Was the timeline point materialized? Errno: %d - %s", errno, strerror(errno)); result = vk_errorf(NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY, "Failed to transfer syncobj. Was the timeline point materialized? Errno: %d - %s", errno, strerror(errno));
goto fail; goto fail;
} }
if (drmSyncobjExportSyncFile(device->drm_fd, tmp_handles[i], &sync_file_fds[i])) { if (device->sync->export_sync_file(device->sync, tmp_handles[i], &sync_file_fds[i])) {
result = vk_errorf(NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY, "Failed to export sync file. Errno: %d - %s", errno, strerror(errno)); result = vk_errorf(NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY, "Failed to export sync file. Errno: %d - %s", errno, strerror(errno));
goto fail; goto fail;
} }
@ -403,7 +403,7 @@ fail:
done: done:
for (uint32_t i = 0; i < WSI_ES_COUNT; i++) { for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
if (tmp_handles[i]) if (tmp_handles[i])
drmSyncobjDestroy(device->drm_fd, tmp_handles[i]); device->sync->destroy(device->sync, tmp_handles[i]);
} }
for (uint32_t i = 0; i < WSI_ES_COUNT; i++) { for (uint32_t i = 0; i < WSI_ES_COUNT; i++) {
if (sync_file_fds[i] >= 0) if (sync_file_fds[i] >= 0)
@ -885,7 +885,7 @@ wsi_drm_images_explicit_sync_state(struct vk_device *device, int count, uint32_t
handles[i * WSI_ES_COUNT + WSI_ES_RELEASE] = image->explicit_sync[WSI_ES_RELEASE].handle; handles[i * WSI_ES_COUNT + WSI_ES_RELEASE] = image->explicit_sync[WSI_ES_RELEASE].handle;
} }
int ret = drmSyncobjQuery(device->drm_fd, handles, points, count * WSI_ES_COUNT); int ret = device->sync->query(device->sync, handles, points, count * WSI_ES_COUNT, 0);
if (ret) if (ret)
goto done; goto done;
@ -899,7 +899,7 @@ wsi_drm_images_explicit_sync_state(struct vk_device *device, int count, uint32_t
flags[i] |= WSI_ES_STATE_RELEASE_SIGNALLED | WSI_ES_STATE_RELEASE_MATERIALIZED; flags[i] |= WSI_ES_STATE_RELEASE_SIGNALLED | WSI_ES_STATE_RELEASE_MATERIALIZED;
} else { } else {
uint32_t first_signalled; uint32_t first_signalled;
ret = drmSyncobjTimelineWait(device->drm_fd, &handles[i * WSI_ES_COUNT + WSI_ES_RELEASE], ret = device->sync->timeline_wait(device->sync, &handles[i * WSI_ES_COUNT + WSI_ES_RELEASE],
&image->explicit_sync[WSI_ES_RELEASE].timeline, 1, 0, &image->explicit_sync[WSI_ES_RELEASE].timeline, 1, 0,
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE, &first_signalled); DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE, &first_signalled);
if (ret == 0) if (ret == 0)
@ -989,7 +989,7 @@ wsi_drm_wait_for_explicit_sync_release(struct wsi_swapchain *chain,
* We will forward the GPU signal to the VkSemaphore/VkFence of the acquire. * We will forward the GPU signal to the VkSemaphore/VkFence of the acquire.
*/ */
uint32_t first_signalled; uint32_t first_signalled;
ret = drmSyncobjTimelineWait(device->drm_fd, handles, points, unacquired_image_count, ret = device->sync->timeline_wait(device->sync, handles, points, unacquired_image_count,
wsi_drm_rel_timeout_to_abs(rel_timeout_ns), wsi_drm_rel_timeout_to_abs(rel_timeout_ns),
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE,
&first_signalled); &first_signalled);