From 8f56d03d4b0f77da7af921ad9cf607e78d3f5953 Mon Sep 17 00:00:00 2001 From: Erico Nunes Date: Sat, 28 Dec 2024 14:41:19 +0100 Subject: [PATCH] libweston: Vulkan renderer A Vulkan renderer for weston, based on the GL renderer. The goal is to impose the least requirements as possible on Vulkan implementations, as to allow even Vulkan 1.0 (or early development) drivers to run a Wayland compositor. Any additional features or extensions are made optional if possible. Currently supports drm, wayland, x11 and headless backends. As of this implementation, this is still considered an experimental renderer. Signed-off-by: Erico Nunes --- doc/sphinx/toc/running-weston.rst | 15 +- frontend/config-helpers.c | 1 + frontend/main.c | 14 +- include/libweston/libweston.h | 3 +- libweston/backend-drm/drm-gbm.c | 273 ++ libweston/backend-drm/drm-internal.h | 40 + libweston/backend-drm/drm.c | 34 +- libweston/backend-drm/fb.c | 3 +- libweston/backend-drm/kms.c | 3 + libweston/backend-drm/meson.build | 10 + libweston/backend-headless/headless.c | 92 + libweston/backend-wayland/meson.build | 4 + libweston/backend-wayland/wayland.c | 129 +- libweston/backend-x11/x11.c | 61 + libweston/compositor.c | 25 + libweston/libweston-internal.h | 1 + libweston/meson.build | 2 + libweston/pixel-formats.c | 20 + libweston/pixel-formats.h | 3 + libweston/renderer-vulkan/meson.build | 59 + libweston/renderer-vulkan/vulkan-pipeline.c | 399 ++ .../renderer-vulkan/vulkan-pixel-format.c | 237 + .../vulkan-renderer-internal.h | 203 + libweston/renderer-vulkan/vulkan-renderer.c | 4195 +++++++++++++++++ libweston/renderer-vulkan/vulkan-renderer.h | 97 + .../vulkan_fragment_shader.frag | 57 + .../vulkan_vertex_shader_surface.vert | 16 + .../vulkan_vertex_shader_texcoord.vert | 16 + man/weston-drm.man | 6 +- meson.build | 19 + meson_options.txt | 7 + shared/meson.build | 2 +- 32 files changed, 6029 insertions(+), 17 deletions(-) create mode 100644 libweston/renderer-vulkan/meson.build create mode 100644 libweston/renderer-vulkan/vulkan-pipeline.c create mode 100644 libweston/renderer-vulkan/vulkan-pixel-format.c create mode 100644 libweston/renderer-vulkan/vulkan-renderer-internal.h create mode 100644 libweston/renderer-vulkan/vulkan-renderer.c create mode 100644 libweston/renderer-vulkan/vulkan-renderer.h create mode 100644 libweston/renderer-vulkan/vulkan_fragment_shader.frag create mode 100644 libweston/renderer-vulkan/vulkan_vertex_shader_surface.vert create mode 100644 libweston/renderer-vulkan/vulkan_vertex_shader_texcoord.vert diff --git a/doc/sphinx/toc/running-weston.rst b/doc/sphinx/toc/running-weston.rst index babda55ae..84815369b 100644 --- a/doc/sphinx/toc/running-weston.rst +++ b/doc/sphinx/toc/running-weston.rst @@ -35,12 +35,15 @@ stitching them together is performed by a *renderer*. By doing so, it is compositing all surfaces into a single image, which is being handed out to a back-end, and finally, displayed on the screen. -libweston provides two useful renderers. One uses -`OpenGL ES `_, which will often be accelerated -by your GPU when suitable drivers are installed. The other uses the -`Pixman `_ library which is entirely CPU (software) -rendered. You can select between these with the ``--renderer=gl`` and -``--renderer=pixman`` arguments when starting Weston. +libweston provides multiple useful renderers. There are +`OpenGL ES `_ and +`Vulkan `_ renderers, which will often be accelerated +by your GPU when suitable drivers are installed. +Another uses the `Pixman `_ library which is entirely +CPU (software) rendered. + +You can select between these with the ``--renderer=gl``, ``--renderer=vulkan`` +and ``--renderer=pixman`` arguments when starting Weston. Multi-back-end support ---------------------- diff --git a/frontend/config-helpers.c b/frontend/config-helpers.c index 21a0512eb..8c4a1d771 100644 --- a/frontend/config-helpers.c +++ b/frontend/config-helpers.c @@ -70,6 +70,7 @@ struct { } renderer_name_map[] = { { "auto", WESTON_RENDERER_AUTO }, { "gl", WESTON_RENDERER_GL }, + { "vulkan", WESTON_RENDERER_VULKAN }, { "noop", WESTON_RENDERER_NOOP }, { "pixman", WESTON_RENDERER_PIXMAN }, }; diff --git a/frontend/main.c b/frontend/main.c index 54d7b7bca..d2909a254 100644 --- a/frontend/main.c +++ b/frontend/main.c @@ -710,6 +710,9 @@ usage(int error_code) "\t\t\t\tauto\tAutomatic selection of one of the below renderers\n" #if defined(ENABLE_EGL) "\t\t\t\tgl\tOpenGL ES\n" +#endif +#if defined(ENABLE_VULKAN) + "\t\t\t\tvulkan\tVulkan\n" #endif "\t\t\t\tnoop\tNo-op renderer for testing only\n" "\t\t\t\tpixman\tPixman software renderer\n" @@ -755,6 +758,7 @@ usage(int error_code) "\tflipped-rotate-270\n" " --use-pixman\t\tUse the pixman (CPU) renderer (deprecated alias for --renderer=pixman)\n" " --use-gl\t\tUse the GL renderer (deprecated alias for --renderer=gl)\n" + " --use-vulkan\t\tUse the Vulkan renderer (deprecated alias for --renderer=vulkan)\n" " --no-outputs\t\tDo not create any virtual outputs\n" " --refresh-rate=RATE\tThe output refresh rate (in mHz)\n" "\n"); @@ -3532,6 +3536,7 @@ load_headless_backend(struct weston_compositor *c, struct wet_backend *wb; bool force_pixman; bool force_gl; + bool force_vulkan; bool no_outputs = false; char *transform = NULL; @@ -3544,6 +3549,8 @@ load_headless_backend(struct weston_compositor *c, false); weston_config_section_get_bool(section, "use-gl", &force_gl, false); + weston_config_section_get_bool(section, "use-vulkan", &force_vulkan, + false); weston_config_section_get_bool(section, "output-decorations", &config.decorate, false); @@ -3553,6 +3560,7 @@ load_headless_backend(struct weston_compositor *c, { WESTON_OPTION_INTEGER, "scale", 0, &parsed_options->scale }, { WESTON_OPTION_BOOLEAN, "use-pixman", 0, &force_pixman }, { WESTON_OPTION_BOOLEAN, "use-gl", 0, &force_gl }, + { WESTON_OPTION_BOOLEAN, "use-vulkan", 0, &force_vulkan }, { WESTON_OPTION_STRING, "transform", 0, &transform }, { WESTON_OPTION_BOOLEAN, "no-outputs", 0, &no_outputs }, { WESTON_OPTION_INTEGER, "refresh-rate", 0, &config.refresh }, @@ -3562,13 +3570,17 @@ load_headless_backend(struct weston_compositor *c, parse_options(options, ARRAY_LENGTH(options), argc, argv); if ((force_pixman && force_gl) || - (renderer != WESTON_RENDERER_AUTO && (force_pixman || force_gl))) { + (force_pixman && force_vulkan) || + (force_gl && force_vulkan) || + (renderer != WESTON_RENDERER_AUTO && (force_pixman || force_gl || force_vulkan))) { weston_log("Conflicting renderer specifications\n"); return -1; } else if (force_pixman) { config.renderer = WESTON_RENDERER_PIXMAN; } else if (force_gl) { config.renderer = WESTON_RENDERER_GL; + } else if (force_vulkan) { + config.renderer = WESTON_RENDERER_VULKAN; } else { config.renderer = renderer; } diff --git a/include/libweston/libweston.h b/include/libweston/libweston.h index 74d265cd8..d66b40d8d 100644 --- a/include/libweston/libweston.h +++ b/include/libweston/libweston.h @@ -201,7 +201,7 @@ enum weston_hdcp_protection { * \ingroup testharness */ struct weston_testsuite_quirks { - /** Force GL-renderer to do a full upload of wl_shm buffers. */ + /** Force GL/Vulkan-renderer to do a full upload of wl_shm buffers. */ bool force_full_upload; /** Ensure GL shadow fb is used, and always repaint it fully. */ bool gl_force_full_redraw_of_shadow_fb; @@ -2278,6 +2278,7 @@ enum weston_renderer_type { WESTON_RENDERER_NOOP = 1, WESTON_RENDERER_PIXMAN = 2, WESTON_RENDERER_GL = 3, + WESTON_RENDERER_VULKAN = 4, }; struct weston_backend * diff --git a/libweston/backend-drm/drm-gbm.c b/libweston/backend-drm/drm-gbm.c index 921c952ed..617d62f67 100644 --- a/libweston/backend-drm/drm-gbm.c +++ b/libweston/backend-drm/drm-gbm.c @@ -41,6 +41,7 @@ #include "pixel-formats.h" #include "renderer-gl/gl-renderer.h" #include "shared/weston-egl-ext.h" +#include "renderer-vulkan/vulkan-renderer.h" #include "linux-dmabuf.h" #include "linux-explicit-synchronization.h" @@ -82,6 +83,27 @@ drm_backend_create_gl_renderer(struct drm_backend *b) &options.base); } +static int +drm_backend_create_vulkan_renderer(struct drm_backend *b) +{ + const struct pixel_format_info *format[3] = { + b->format, + fallback_format_for(b->format), + }; + struct vulkan_renderer_display_options options = { + .gbm_device = b->gbm, + .formats = format, + .formats_count = 1, + }; + + if (format[1]) + options.formats_count = 2; + + return weston_compositor_init_renderer(b->compositor, + WESTON_RENDERER_VULKAN, + &options.base); +} + int init_egl(struct drm_backend *b) { @@ -100,6 +122,24 @@ init_egl(struct drm_backend *b) return 0; } +int +init_vulkan(struct drm_backend *b) +{ + struct drm_device *device = b->drm; + + b->gbm = gbm_create_device(device->drm.fd); + if (!b->gbm) + return -1; + + if (drm_backend_create_vulkan_renderer(b) < 0) { + gbm_device_destroy(b->gbm); + b->gbm = NULL; + return -1; + } + + return 0; +} + static void drm_output_fini_cursor_egl(struct drm_output *output) { unsigned int i; @@ -113,6 +153,19 @@ static void drm_output_fini_cursor_egl(struct drm_output *output) } } +static void drm_output_fini_cursor_vulkan(struct drm_output *output) +{ + unsigned int i; + + for (i = 0; i < ARRAY_LENGTH(output->gbm_cursor_fb); i++) { + /* This cursor does not have a GBM device */ + if (output->gbm_cursor_fb[i] && !output->gbm_cursor_fb[i]->bo) + output->gbm_cursor_fb[i]->type = BUFFER_PIXMAN_DUMB; + drm_fb_unref(output->gbm_cursor_fb[i]); + output->gbm_cursor_fb[i] = NULL; + } +} + static int drm_output_init_cursor_egl(struct drm_output *output, struct drm_backend *b) { @@ -162,6 +215,55 @@ err: return -1; } +static int +drm_output_init_cursor_vulkan(struct drm_output *output, struct drm_backend *b) +{ + struct drm_device *device = output->device; + unsigned int i; + + /* No point creating cursors if we don't have a plane for them. */ + if (!output->cursor_plane) + return 0; + + for (i = 0; i < ARRAY_LENGTH(output->gbm_cursor_fb); i++) { + struct gbm_bo *bo; + + if (gbm_device_get_fd(b->gbm) != output->device->drm.fd) { + output->gbm_cursor_fb[i] = + drm_fb_create_dumb(output->device, + device->cursor_width, + device->cursor_height, + DRM_FORMAT_ARGB8888); + /* Override buffer type, since we know it is a cursor */ + output->gbm_cursor_fb[i]->type = BUFFER_CURSOR; + output->gbm_cursor_handle[i] = + output->gbm_cursor_fb[i]->handles[0]; + } else { + bo = gbm_bo_create(b->gbm, device->cursor_width, device->cursor_height, + GBM_FORMAT_ARGB8888, + GBM_BO_USE_CURSOR | GBM_BO_USE_WRITE); + if (!bo) + goto err; + + output->gbm_cursor_fb[i] = + drm_fb_get_from_bo(bo, device, false, BUFFER_CURSOR); + if (!output->gbm_cursor_fb[i]) { + gbm_bo_destroy(bo); + goto err; + } + output->gbm_cursor_handle[i] = gbm_bo_get_handle(bo).s32; + } + } + + return 0; + +err: + weston_log("cursor buffers unavailable, using vulkan cursors\n"); + device->cursors_are_broken = true; + drm_output_fini_cursor_vulkan(output); + return -1; +} + static void create_gbm_surface(struct gbm_device *gbm, struct drm_output *output) { @@ -256,6 +358,123 @@ drm_output_init_egl(struct drm_output *output, struct drm_backend *b) return 0; } +static void +create_gbm_bos(struct gbm_device *gbm, struct drm_output *output, unsigned int n) +{ + struct weston_mode *mode = output->base.current_mode; + struct drm_plane *plane = output->scanout_plane; + struct weston_drm_format *fmt; + const uint64_t *modifiers; + unsigned int num_modifiers; + + fmt = weston_drm_format_array_find_format(&plane->formats, + output->format->format); + if (!fmt) { + weston_log("format %s not supported by output %s\n", + output->format->drm_format_name, + output->base.name); + return; + } + + if (!weston_drm_format_has_modifier(fmt, DRM_FORMAT_MOD_INVALID)) { + modifiers = weston_drm_format_get_modifiers(fmt, &num_modifiers); + for (unsigned int i = 0; i < n; i++) { + output->gbm_bos[i] = + gbm_bo_create_with_modifiers(gbm, + mode->width, mode->height, + output->format->format, + modifiers, num_modifiers); + } + } + + /* + * If we cannot use modifiers to allocate the GBM surface and + * the GBM device differs from the KMS display device, try to + * use linear buffers and hope that the allocated GBM surface + * is correctly displayed on the KMS device. + */ + if (gbm_device_get_fd(gbm) != output->device->drm.fd) + output->gbm_bo_flags |= GBM_BO_USE_LINEAR; + + if (!output->gbm_bos[0]) { + for (unsigned int i = 0; i < n; i++) { + output->gbm_bos[i] = gbm_bo_create(gbm, + mode->width, mode->height, + output->format->format, + output->gbm_bo_flags); + } + } + + struct drm_device *device = output->device; + for (unsigned int i = 0; i < n; i++) { + assert(output->gbm_bos[i]); + drm_fb_get_from_bo(output->gbm_bos[i], device, !output->format->opaque_substitute, + BUFFER_GBM_BO); + } + + assert(output->gbm_surface == NULL); +} + +/* Init output state that depends on vulkan or gbm */ +int +drm_output_init_vulkan(struct drm_output *output, struct drm_backend *b) +{ + const struct weston_renderer *renderer = b->compositor->renderer; + const struct weston_mode *mode = output->base.current_mode; + const struct pixel_format_info *format[2] = { + output->format, + fallback_format_for(output->format), + }; + struct vulkan_renderer_output_options options = { + .formats = format, + .formats_count = 1, + .area.x = 0, + .area.y = 0, + .area.width = mode->width, + .area.height = mode->height, + .fb_size.width = mode->width, + .fb_size.height = mode->height, + }; + + assert(output->gbm_surface == NULL); + + /* + * TODO: This method for BO allocation needs to be reworked. + * Currently, it allocates a buffer based on the list of acceptable + * modifiers received from the DRM backend but does not check it + * against formats renderable by the renderer (and there is no + * straightforward way to do so yet). + * Most likely this should be replaced by sending the acceptable + * modifiers list from the DRM backend to the renderer and doing the + * optimal dmabuf allocation in the renderer. But as of this writing, + * this API for dmabuf allocation is not yet implemented in the + * Vulkan renderer. + */ + create_gbm_bos(b->gbm, output, NUM_GBM_BOS); + if (!output->gbm_bos[0]) { + weston_log("failed to create gbm bos\n"); + return -1; + } + options.num_gbm_bos = NUM_GBM_BOS; + + if (options.formats[1]) + options.formats_count = 2; + + for (unsigned int i = 0; i < options.num_gbm_bos; i++) + options.gbm_bos[i] = output->gbm_bos[i]; + + if (renderer->vulkan->output_window_create(&output->base, &options) < 0) { + weston_log("failed to create vulkan renderer output state\n"); + gbm_surface_destroy(output->gbm_surface); + output->gbm_surface = NULL; + return -1; + } + + drm_output_init_cursor_vulkan(output, b); + + return 0; +} + void drm_output_fini_egl(struct drm_output *output) { @@ -276,6 +495,27 @@ drm_output_fini_egl(struct drm_output *output) drm_output_fini_cursor_egl(output); } +void +drm_output_fini_vulkan(struct drm_output *output) +{ + struct drm_backend *b = output->backend; + const struct weston_renderer *renderer = b->compositor->renderer; + + /* Destroying the GBM surface will destroy all our GBM buffers, + * regardless of refcount. Ensure we destroy them here. */ + if (!b->compositor->shutting_down && + output->scanout_plane->state_cur->fb && + output->scanout_plane->state_cur->fb->type == BUFFER_GBM_BO) { + drm_plane_reset_state(output->scanout_plane); + } + + renderer->vulkan->output_destroy(&output->base); + for (unsigned int i = 0; i < NUM_GBM_BOS; i++) + gbm_bo_destroy(output->gbm_bos[i]); + output->gbm_surface = NULL; + drm_output_fini_cursor_vulkan(output); +} + struct drm_fb * drm_output_render_gl(struct drm_output_state *state, pixman_region32_t *damage) { @@ -307,3 +547,36 @@ drm_output_render_gl(struct drm_output_state *state, pixman_region32_t *damage) return ret; } + +struct drm_fb * +drm_output_render_vulkan(struct drm_output_state *state, pixman_region32_t *damage) +{ + struct drm_output *output = state->output; + struct drm_device *device = output->device; + struct gbm_bo *bo; + struct drm_fb *ret; + + output->base.compositor->renderer->repaint_output(&output->base, + damage, NULL); + + bo = output->gbm_bos[output->current_bo]; + if (!bo) { + weston_log("failed to get gbm_bo\n"); + return NULL; + } + + /* Output transparent/opaque image according to the format required by + * the client. */ + ret = drm_fb_get_from_bo(bo, device, !output->format->opaque_substitute, + BUFFER_GBM_BO); + if (!ret) { + weston_log("failed to get drm_fb for bo\n"); + return NULL; + } + ret->bo = bo; + + ret->gbm_surface = NULL; + output->current_bo = (output->current_bo + 1) % NUM_GBM_BOS; + + return ret; +} diff --git a/libweston/backend-drm/drm-internal.h b/libweston/backend-drm/drm-internal.h index 442f0c027..0b3e44ce8 100644 --- a/libweston/backend-drm/drm-internal.h +++ b/libweston/backend-drm/drm-internal.h @@ -293,6 +293,7 @@ enum drm_fb_type { BUFFER_PIXMAN_DUMB, /**< internal Pixman rendering */ BUFFER_GBM_SURFACE, /**< internal EGL rendering */ BUFFER_CURSOR, /**< internal cursor buffer */ + BUFFER_GBM_BO, /**< internal Vulkan rendering */ }; struct drm_fb { @@ -574,6 +575,8 @@ struct drm_output { int current_cursor; struct gbm_surface *gbm_surface; + struct gbm_bo *gbm_bos[2]; + int current_bo; const struct pixel_format_info *format; uint32_t gbm_bo_flags; @@ -946,6 +949,18 @@ drm_output_fini_egl(struct drm_output *output); struct drm_fb * drm_output_render_gl(struct drm_output_state *state, pixman_region32_t *damage); +int +init_vulkan(struct drm_backend *b); + +int +drm_output_init_vulkan(struct drm_output *output, struct drm_backend *b); + +void +drm_output_fini_vulkan(struct drm_output *output); + +struct drm_fb * +drm_output_render_vulkan(struct drm_output_state *state, pixman_region32_t *damage); + #else inline static int init_egl(struct drm_backend *b) @@ -970,4 +985,29 @@ drm_output_render_gl(struct drm_output_state *state, pixman_region32_t *damage) { return NULL; } + +inline static int +init_vulkan(struct drm_backend *b) +{ + weston_log("Compiled without GBM support\n"); + return -1; +} + +inline static int +drm_output_init_vulkan(struct drm_output *output, struct drm_backend *b) +{ + return -1; +} + +inline static void +drm_output_fini_vulkan(struct drm_output *output) +{ +} + +inline static struct drm_fb * +drm_output_render_vulkan(struct drm_output_state *state, pixman_region32_t *damage) +{ + return NULL; +} + #endif diff --git a/libweston/backend-drm/drm.c b/libweston/backend-drm/drm.c index fb15cac27..14635c149 100644 --- a/libweston/backend-drm/drm.c +++ b/libweston/backend-drm/drm.c @@ -520,13 +520,16 @@ drm_output_render(struct drm_output_state *state) !weston_output_has_renderer_capture_tasks(&output->base) && scanout_plane->state_cur->fb && (scanout_plane->state_cur->fb->type == BUFFER_GBM_SURFACE || + scanout_plane->state_cur->fb->type == BUFFER_GBM_BO || scanout_plane->state_cur->fb->type == BUFFER_PIXMAN_DUMB)) { fb = drm_fb_ref(scanout_plane->state_cur->fb); } else if (c->renderer->type == WESTON_RENDERER_PIXMAN) { fb = drm_output_render_pixman(state, &damage); - } else { + } else if (c->renderer->type == WESTON_RENDERER_GL) { fb = drm_output_render_gl(state, &damage); - } + } else if (c->renderer->type == WESTON_RENDERER_VULKAN) { + fb = drm_output_render_vulkan(state, &damage); + } else assert(0); if (!fb) { drm_plane_state_put_back(scanout_state); @@ -1195,6 +1198,13 @@ drm_output_apply_mode(struct drm_output *output) "new mode"); return -1; } + } else if (b->compositor->renderer->type == WESTON_RENDERER_VULKAN) { + drm_output_fini_vulkan(output); + if (drm_output_init_vulkan(output, b) < 0) { + weston_log("failed to init output vulkan state with " + "new mode"); + return -1; + } } if (device->atomic_modeset) @@ -2482,6 +2492,11 @@ drm_output_enable(struct weston_output *base) weston_log("Failed to init output pixman state\n"); goto err_planes; } + } else if (b->compositor->renderer->type == WESTON_RENDERER_VULKAN) { + if (drm_output_init_vulkan(output, b) < 0) { + weston_log("Failed to init output vulkan state\n"); + goto err_planes; + } } else if (drm_output_init_egl(output, b) < 0) { weston_log("Failed to init output gl state\n"); goto err_planes; @@ -2532,6 +2547,8 @@ drm_output_deinit(struct weston_output *base) if (b->compositor->renderer->type == WESTON_RENDERER_PIXMAN) drm_output_fini_pixman(output); + else if (b->compositor->renderer->type == WESTON_RENDERER_VULKAN) + drm_output_fini_vulkan(output); else drm_output_fini_egl(output); @@ -4218,9 +4235,14 @@ drm_backend_create(struct weston_compositor *compositor, if (config->additional_devices) open_additional_devices(b, config->additional_devices); + /* GL renderer is the default whenever it is enabled. + * Only on a build without GL but with Vulkan, Vulkan is picked + * as the default. Otherwise, pick pixman as the default */ if (config->renderer == WESTON_RENDERER_AUTO) { -#ifdef BUILD_DRM_GBM +#if defined(ENABLE_EGL) config->renderer = WESTON_RENDERER_GL; +#elif defined(ENABLE_VULKAN) + config->renderer = WESTON_RENDERER_VULKAN; #else config->renderer = WESTON_RENDERER_PIXMAN; #endif @@ -4239,6 +4261,12 @@ drm_backend_create(struct weston_compositor *compositor, goto err_udev_dev; } break; + case WESTON_RENDERER_VULKAN: + if (init_vulkan(b) < 0) { + weston_log("failed to initialize vulkan\n"); + goto err_udev_dev; + } + break; default: weston_log("unsupported renderer for DRM backend\n"); goto err_udev_dev; diff --git a/libweston/backend-drm/fb.c b/libweston/backend-drm/fb.c index 7feef482c..3c89142a2 100644 --- a/libweston/backend-drm/fb.c +++ b/libweston/backend-drm/fb.c @@ -366,7 +366,7 @@ drm_fb_destroy_gbm(struct gbm_bo *bo, void *data) struct drm_fb *fb = data; assert(fb->type == BUFFER_GBM_SURFACE || fb->type == BUFFER_CLIENT || - fb->type == BUFFER_CURSOR); + fb->type == BUFFER_CURSOR || fb->type == BUFFER_GBM_BO); drm_fb_destroy(fb); } @@ -622,6 +622,7 @@ drm_fb_unref(struct drm_fb *fb) #ifdef BUILD_DRM_GBM case BUFFER_CURSOR: case BUFFER_CLIENT: + case BUFFER_GBM_BO: gbm_bo_destroy(fb->bo); break; case BUFFER_GBM_SURFACE: diff --git a/libweston/backend-drm/kms.c b/libweston/backend-drm/kms.c index 0dc97cc36..2096774c4 100644 --- a/libweston/backend-drm/kms.c +++ b/libweston/backend-drm/kms.c @@ -1791,6 +1791,9 @@ drm_pending_state_apply(struct drm_pending_state *pending_state) if (b->compositor->renderer->type == WESTON_RENDERER_GL) { drm_output_fini_egl(output); drm_output_init_egl(output, b); + } else if (b->compositor->renderer->type == WESTON_RENDERER_VULKAN) { + drm_output_fini_vulkan(output); + drm_output_init_vulkan(output, b); } } } diff --git a/libweston/backend-drm/meson.build b/libweston/backend-drm/meson.build index d7db8097a..9d87be89f 100644 --- a/libweston/backend-drm/meson.build +++ b/libweston/backend-drm/meson.build @@ -34,6 +34,7 @@ srcs_drm = [ deps_drm = [ dep_egl, # optional + dep_vulkan, # optional dep_libm, dep_libdl, dep_libshared, @@ -55,6 +56,15 @@ if get_option('renderer-gl') config_h.set('BUILD_DRM_GBM', '1') endif +if get_option('renderer-vulkan') + if not dep_gbm.found() + error('drm-backend with Vulkan renderer requires gbm which was not found. Or, you can use \'-Drenderer-vulkan=false\'.') + endif + deps_drm += dep_gbm + srcs_drm += 'drm-gbm.c' + config_h.set('BUILD_DRM_GBM', '1') +endif + if get_option('backend-drm-screencast-vaapi') foreach name : [ 'libva', 'libva-drm' ] d = dependency(name, version: '>= 0.34.0', required: false) diff --git a/libweston/backend-headless/headless.c b/libweston/backend-headless/headless.c index 408992651..e0425c17d 100644 --- a/libweston/backend-headless/headless.c +++ b/libweston/backend-headless/headless.c @@ -42,6 +42,7 @@ #include "pixel-formats.h" #include "pixman-renderer.h" #include "renderer-gl/gl-renderer.h" +#include "renderer-vulkan/vulkan-renderer.h" #include "renderer-borders.h" #include "shared/weston-drm-fourcc.h" #include "shared/weston-egl-ext.h" @@ -198,6 +199,24 @@ headless_output_disable_gl(struct headless_output *output) } } +static void +headless_output_disable_vulkan(struct headless_output *output) +{ + struct weston_compositor *compositor = output->base.compositor; + const struct weston_renderer *renderer = compositor->renderer; + + weston_renderer_borders_fini(&output->borders, &output->base); + + renderer->destroy_renderbuffer(output->renderbuffer); + output->renderbuffer = NULL; + renderer->vulkan->output_destroy(&output->base); + + if (output->frame) { + frame_destroy(output->frame); + output->frame = NULL; + } +} + static void headless_output_disable_pixman(struct headless_output *output) { @@ -227,6 +246,9 @@ headless_output_disable(struct weston_output *base) case WESTON_RENDERER_GL: headless_output_disable_gl(output); break; + case WESTON_RENDERER_VULKAN: + headless_output_disable_vulkan(output); + break; case WESTON_RENDERER_PIXMAN: headless_output_disable_pixman(output); break; @@ -310,6 +332,63 @@ err_renderbuffer: return -1; } +static int +headless_output_enable_vulkan(struct headless_output *output) +{ + struct headless_backend *b = output->backend; + const struct weston_renderer *renderer = b->compositor->renderer; + const struct weston_mode *mode = output->base.current_mode; + struct vulkan_renderer_fbo_options options = { 0 }; + + if (b->decorate) { + /* + * Start with a dummy exterior size and then resize, because + * there is no frame_create() with interior size. + */ + output->frame = frame_create(b->theme, 100, 100, + FRAME_BUTTON_CLOSE, NULL, NULL); + if (!output->frame) { + weston_log("failed to create frame for output\n"); + return -1; + } + frame_resize_inside(output->frame, mode->width, mode->height); + + options.fb_size.width = frame_width(output->frame); + options.fb_size.height = frame_height(output->frame); + frame_interior(output->frame, &options.area.x, &options.area.y, + &options.area.width, &options.area.height); + } else { + options.area.x = 0; + options.area.y = 0; + options.area.width = mode->width; + options.area.height = mode->height; + options.fb_size.width = mode->width; + options.fb_size.height = mode->height; + } + + if (renderer->vulkan->output_fbo_create(&output->base, &options) < 0) { + weston_log("failed to create vulkan renderer output state\n"); + if (output->frame) { + frame_destroy(output->frame); + output->frame = NULL; + } + return -1; + } + + output->renderbuffer = + renderer->create_renderbuffer(&output->base, b->formats[0], + NULL, 0, NULL, NULL); + if (!output->renderbuffer) + goto err_renderbuffer; + + return 0; + +err_renderbuffer: + renderer->vulkan->output_destroy(&output->base); + + return -1; +} + static int headless_output_enable_pixman(struct headless_output *output) { @@ -365,6 +444,9 @@ headless_output_enable(struct weston_output *base) case WESTON_RENDERER_GL: ret = headless_output_enable_gl(output); break; + case WESTON_RENDERER_VULKAN: + ret = headless_output_enable_vulkan(output); + break; case WESTON_RENDERER_PIXMAN: ret = headless_output_enable_pixman(output); break; @@ -592,6 +674,16 @@ headless_backend_create(struct weston_compositor *compositor, &options.base); break; } + case WESTON_RENDERER_VULKAN: { + const struct vulkan_renderer_display_options options = { + .formats = b->formats, + .formats_count = b->formats_count, + }; + ret = weston_compositor_init_renderer(compositor, + WESTON_RENDERER_VULKAN, + &options.base); + break; + } case WESTON_RENDERER_PIXMAN: if (config->decorate) { weston_log("Error: Pixman renderer does not support decorations.\n"); diff --git a/libweston/backend-wayland/meson.build b/libweston/backend-wayland/meson.build index d9c1ed297..84fcae28a 100644 --- a/libweston/backend-wayland/meson.build +++ b/libweston/backend-wayland/meson.build @@ -33,6 +33,10 @@ if get_option('renderer-gl') deps_wlwl += dep_egl # for gl-renderer.h endif +if get_option('renderer-vulkan') + deps_wlwl += dep_vulkan # for vulkan-renderer.h +endif + plugin_wlwl = shared_library( 'wayland-backend', srcs_wlwl, diff --git a/libweston/backend-wayland/wayland.c b/libweston/backend-wayland/wayland.c index 8cf17f692..7ee2e0d8a 100644 --- a/libweston/backend-wayland/wayland.c +++ b/libweston/backend-wayland/wayland.c @@ -48,6 +48,7 @@ #include #include #include "renderer-gl/gl-renderer.h" +#include "renderer-vulkan/vulkan-renderer.h" #include "renderer-borders.h" #include "shared/weston-drm-fourcc.h" #include "shared/weston-egl-ext.h" @@ -456,8 +457,12 @@ static const struct wl_callback_listener frame_listener = { static void draw_initial_frame(struct wayland_output *output) { + struct wayland_backend *b = output->backend; struct wayland_shm_buffer *sb; + assert(b->compositor); + assert(b->compositor->renderer); + sb = wayland_output_get_shm_buffer(output); /* If we are rendering with GL, then orphan it so that it gets @@ -465,12 +470,15 @@ draw_initial_frame(struct wayland_output *output) if (output->gl.egl_window) sb->output = NULL; + if (b->compositor->renderer->type == WESTON_RENDERER_VULKAN) + sb->output = NULL; + wl_surface_attach(output->parent.surface, sb->buffer, 0, 0); wl_surface_damage(output->parent.surface, 0, 0, sb->width, sb->height); } -#ifdef ENABLE_EGL +#if defined(ENABLE_EGL) || defined(ENABLE_VULKAN) static void wayland_output_update_renderer_border(struct wayland_output *output) { @@ -530,6 +538,35 @@ wayland_output_repaint_gl(struct weston_output *output_base) } #endif +#ifdef ENABLE_VULKAN +static int +wayland_output_repaint_vulkan(struct weston_output *output_base) +{ + struct wayland_output *output = to_wayland_output(output_base); + struct weston_compositor *ec; + pixman_region32_t damage; + + assert(output); + + ec = output->base.compositor; + + pixman_region32_init(&damage); + + weston_output_flush_damage_for_primary_plane(output_base, &damage); + + output->frame_cb = wl_surface_frame(output->parent.surface); + wl_callback_add_listener(output->frame_cb, &frame_listener, output); + + wayland_output_update_renderer_border(output); + + ec->renderer->repaint_output(&output->base, &damage, NULL); + + pixman_region32_fini(&damage); + + return 0; +} +#endif + static void wayland_output_update_shm_border(struct wayland_shm_buffer *buffer) { @@ -709,6 +746,13 @@ wayland_output_disable(struct weston_output *base) renderer->gl->output_destroy(&output->base); wl_egl_window_destroy(output->gl.egl_window); break; +#endif +#ifdef ENABLE_VULKAN + case WESTON_RENDERER_VULKAN: + weston_renderer_borders_fini(&output->borders, &output->base); + + renderer->vulkan->output_destroy(&output->base); + break; #endif default: unreachable("invalid renderer"); @@ -790,6 +834,47 @@ cleanup_window: } #endif +#ifdef ENABLE_VULKAN +static int +wayland_output_init_vulkan_renderer(struct wayland_output *output) +{ + const struct weston_mode *mode = output->base.current_mode; + struct wayland_backend *b = output->backend; + const struct weston_renderer *renderer; + struct vulkan_renderer_output_options options = { + .formats = b->formats, + .formats_count = b->formats_count, + }; + + if (output->frame) { + frame_interior(output->frame, &options.area.x, &options.area.y, + &options.area.width, &options.area.height); + options.fb_size.width = frame_width(output->frame); + options.fb_size.height = frame_height(output->frame); + } else { + options.area.x = 0; + options.area.y = 0; + options.area.width = mode->width; + options.area.height = mode->height; + options.fb_size.width = mode->width; + options.fb_size.height = mode->height; + } + + options.wayland_display = b->parent.wl_display; + options.wayland_surface = output->parent.surface; + + renderer = output->base.compositor->renderer; + + if (renderer->vulkan->output_window_create(&output->base, &options) < 0) + goto cleanup_window; + + return 0; + +cleanup_window: + return -1; +} +#endif + static int wayland_output_init_pixman_renderer(struct wayland_output *output) { @@ -824,6 +909,9 @@ wayland_output_resize_surface(struct wayland_output *output) struct weston_geometry opa = area; struct wl_region *region; + assert(b->compositor); + assert(b->compositor->renderer); + if (output->frame) { frame_resize_inside(output->frame, area.width, area.height); frame_interior(output->frame, &area.x, &area.y, NULL, NULL); @@ -862,6 +950,14 @@ wayland_output_resize_surface(struct wayland_output *output) weston_renderer_borders_fini(&output->borders, &output->base); } else #endif +#ifdef ENABLE_VULKAN + if (b->compositor->renderer->type == WESTON_RENDERER_VULKAN) { + weston_renderer_resize_output(&output->base, &fb_size, &area); + + /* These will need to be re-created due to the resize */ + weston_renderer_borders_fini(&output->borders, &output->base); + } else +#endif { /* * Pixman-renderer never knows about decorations, we blit them @@ -1043,6 +1139,13 @@ wayland_output_switch_mode_finish(struct wayland_output *output) if (wayland_output_init_gl_renderer(output) < 0) return -1; break; +#endif +#ifdef ENABLE_VULKAN + case WESTON_RENDERER_VULKAN: + renderer->vulkan->output_destroy(&output->base); + if (wayland_output_init_vulkan_renderer(output) < 0) + return -1; + break; #endif default: unreachable("invalid renderer"); @@ -1303,6 +1406,14 @@ wayland_output_enable(struct weston_output *base) output->base.repaint = wayland_output_repaint_gl; break; +#endif +#ifdef ENABLE_VULKAN + case WESTON_RENDERER_VULKAN: + if (wayland_output_init_vulkan_renderer(output) < 0) + goto err_output; + + output->base.repaint = wayland_output_repaint_vulkan; + break; #endif default: unreachable("invalid renderer"); @@ -2961,6 +3072,22 @@ wayland_backend_create(struct weston_compositor *compositor, goto err_display; } break; + case WESTON_RENDERER_VULKAN: { + const struct vulkan_renderer_display_options options = { + .formats = b->formats, + .formats_count = b->formats_count, + }; + + if (weston_compositor_init_renderer(compositor, + WESTON_RENDERER_VULKAN, + &options.base) < 0) { + weston_log("Failed to initialize the Vulkan renderer\n"); + goto err_display; + } + /* For now Vulkan does not fall back to anything automatically, + * like GL renderer does. */ + break; + } default: weston_log("Unsupported renderer requested\n"); goto err_display; diff --git a/libweston/backend-x11/x11.c b/libweston/backend-x11/x11.c index bf9e3b553..6987eece8 100644 --- a/libweston/backend-x11/x11.c +++ b/libweston/backend-x11/x11.c @@ -57,6 +57,7 @@ #include "shared/timespec-util.h" #include "shared/file-util.h" #include "renderer-gl/gl-renderer.h" +#include "renderer-vulkan/vulkan-renderer.h" #include "shared/weston-drm-fourcc.h" #include "shared/weston-egl-ext.h" #include "shared/xalloc.h" @@ -455,6 +456,29 @@ x11_output_repaint_gl(struct weston_output *output_base) return 0; } +static int +x11_output_repaint_vulkan(struct weston_output *output_base) +{ + struct x11_output *output = to_x11_output(output_base); + struct weston_compositor *ec; + pixman_region32_t damage; + + assert(output); + + ec = output->base.compositor; + + pixman_region32_init(&damage); + + weston_output_flush_damage_for_primary_plane(output_base, &damage); + + ec->renderer->repaint_output(output_base, &damage, NULL); + + pixman_region32_fini(&damage); + + weston_output_arm_frame_timer(output_base, output->finish_frame_timer); + return 0; +} + static void set_clip_for_output(struct weston_output *output_base, pixman_region32_t *region) { @@ -929,6 +953,9 @@ x11_output_disable(struct weston_output *base) case WESTON_RENDERER_GL: renderer->gl->output_destroy(&output->base); break; + case WESTON_RENDERER_VULKAN: + renderer->vulkan->output_destroy(&output->base); + break; default: unreachable("invalid renderer"); } @@ -1110,6 +1137,29 @@ x11_output_enable(struct weston_output *base) output->base.repaint = x11_output_repaint_gl; break; } + case WESTON_RENDERER_VULKAN: { + struct vulkan_renderer_output_options options = { + .formats = b->formats, + .formats_count = b->formats_count, + .area.x = 0, + .area.y = 0, + .area.width = mode->width, + .area.height = mode->height, + .fb_size.width = mode->width, + .fb_size.height = mode->height, + }; + + options.xcb_connection = b->conn; + options.xcb_visualid = screen->root_visual; + options.xcb_window = output->window; + + ret = renderer->vulkan->output_window_create(base, &options); + if (ret < 0) + goto err; + + output->base.repaint = x11_output_repaint_vulkan; + break; + } default: unreachable("invalid renderer"); } @@ -1960,6 +2010,17 @@ x11_backend_create(struct weston_compositor *compositor, goto err_xdisplay; break; } + case WESTON_RENDERER_VULKAN: { + const struct vulkan_renderer_display_options options = { + .formats = b->formats, + .formats_count = b->formats_count, + }; + if (weston_compositor_init_renderer(compositor, + WESTON_RENDERER_VULKAN, + &options.base) < 0) + goto err_xdisplay; + break; + } default: weston_log("Unsupported renderer requested\n"); goto err_xdisplay; diff --git a/libweston/compositor.c b/libweston/compositor.c index 55c930e0a..d8af711ce 100644 --- a/libweston/compositor.c +++ b/libweston/compositor.c @@ -88,6 +88,7 @@ #include "pixman-renderer.h" #include "renderer-gl/gl-renderer.h" #include "weston-trace.h" +#include "renderer-vulkan/vulkan-renderer.h" #include "weston-log-internal.h" @@ -10514,6 +10515,8 @@ weston_compositor_init_renderer(struct weston_compositor *compositor, { const struct gl_renderer_interface *gl_renderer; const struct gl_renderer_display_options *gl_options; + const struct vulkan_renderer_interface *vulkan_renderer; + const struct vulkan_renderer_display_options *vulkan_options; int ret; switch (renderer_type) { @@ -10534,6 +10537,28 @@ weston_compositor_init_renderer(struct weston_compositor *compositor, compositor->renderer->gl = gl_renderer; weston_log("Using GL renderer\n"); break; + case WESTON_RENDERER_VULKAN: + vulkan_renderer = weston_load_module("vulkan-renderer.so", + "vulkan_renderer_interface", + LIBWESTON_MODULEDIR); + if (!vulkan_renderer) + return -1; + + vulkan_options = container_of(options, + struct vulkan_renderer_display_options, + base); + ret = vulkan_renderer->display_create(compositor, vulkan_options); + if (ret < 0) + return ret; + + compositor->renderer->vulkan = vulkan_renderer; + weston_log("Using Vulkan renderer\n"); + weston_log_continue(STAMP_SPACE "Note: This version of Vulkan renderer " + "is still experimental and not expected to be ready " + "for production use\n"); + weston_log("Run with VK_INSTANCE_LAYERS=VK_LAYER_KHRONOS_validation " + "to enable the Vulkan validation layers\n"); + break; case WESTON_RENDERER_PIXMAN: ret = pixman_renderer_init(compositor); if (ret < 0) diff --git a/libweston/libweston-internal.h b/libweston/libweston-internal.h index 8bba553bc..fced33813 100644 --- a/libweston/libweston-internal.h +++ b/libweston/libweston-internal.h @@ -208,6 +208,7 @@ struct weston_renderer { enum weston_renderer_type type; const struct gl_renderer_interface *gl; + const struct vulkan_renderer_interface *vulkan; const struct pixman_renderer_interface *pixman; /* Sets the output border. diff --git a/libweston/meson.build b/libweston/meson.build index e95150d96..a0816f0f0 100644 --- a/libweston/meson.build +++ b/libweston/meson.build @@ -7,6 +7,7 @@ deps_libweston = [ dep_xkbcommon, dep_matrix_c, dep_egl, + dep_vulkan, ] srcs_libweston = [ git_version_h, @@ -242,6 +243,7 @@ dep_lib_renderer_borders = declare_dependency( subdir('color-lcms') subdir('renderer-gl') +subdir('renderer-vulkan') subdir('backend-drm') subdir('backend-headless') subdir('backend-pipewire') diff --git a/libweston/pixel-formats.c b/libweston/pixel-formats.c index fc45e2672..3b2759a45 100644 --- a/libweston/pixel-formats.c +++ b/libweston/pixel-formats.c @@ -94,6 +94,13 @@ #define GL_TYPE(type) .gl_type = 0 #endif +#ifdef ENABLE_VULKAN +#include +#define VULKAN_FORMAT(fmt) .vulkan_format = (fmt) +#else +#define VULKAN_FORMAT(fmt) .vulkan_format = 0 +#endif + #define DRM_FORMAT(f) .format = DRM_FORMAT_ ## f, .drm_format_name = #f #define BITS_RGBA_FIXED(r_, g_, b_, a_) \ .bits.r = r_, \ @@ -128,6 +135,7 @@ static const struct pixel_format_info pixel_format_table[] = { GL_FORMAT_INFO(GL_R8, GL_RED, GL_UNSIGNED_BYTE, R001), GL_FORMAT(GL_R8_EXT), GL_TYPE(GL_UNSIGNED_BYTE), + VULKAN_FORMAT(VK_FORMAT_R8_UNORM), }, { DRM_FORMAT(R16), @@ -146,6 +154,7 @@ static const struct pixel_format_info pixel_format_table[] = { GL_FORMAT_INFO(GL_RG8, GL_RG, GL_UNSIGNED_BYTE, RG01), GL_FORMAT(GL_RG8_EXT), GL_TYPE(GL_UNSIGNED_BYTE), + VULKAN_FORMAT(VK_FORMAT_R8G8_UNORM), }, { DRM_FORMAT(RG88), @@ -366,6 +375,7 @@ static const struct pixel_format_info pixel_format_table[] = { GL_FORMAT_INFO(GL_RGB8, GL_RGB, GL_UNSIGNED_BYTE, BGR1), GL_FORMAT(GL_RGB), GL_TYPE(GL_UNSIGNED_BYTE), + VULKAN_FORMAT(VK_FORMAT_B8G8R8_UNORM), }, { DRM_FORMAT(BGR888), @@ -375,6 +385,7 @@ static const struct pixel_format_info pixel_format_table[] = { GL_FORMAT_INFO(GL_RGB8, GL_RGB, GL_UNSIGNED_BYTE, RGB1), GL_FORMAT(GL_RGB), GL_TYPE(GL_UNSIGNED_BYTE), + VULKAN_FORMAT(VK_FORMAT_R8G8B8_UNORM), }, { DRM_FORMAT(XRGB8888), @@ -386,6 +397,7 @@ static const struct pixel_format_info pixel_format_table[] = { GL_INTERNALFORMAT(GL_RGB8), GL_FORMAT(GL_BGRA_EXT), GL_TYPE(GL_UNSIGNED_BYTE), + VULKAN_FORMAT(VK_FORMAT_B8G8R8A8_UNORM), #if __BYTE_ORDER == __LITTLE_ENDIAN PIXMAN_FMT(x8r8g8b8), #else @@ -403,6 +415,7 @@ static const struct pixel_format_info pixel_format_table[] = { GL_INTERNALFORMAT(GL_RGBA8), GL_FORMAT(GL_BGRA_EXT), GL_TYPE(GL_UNSIGNED_BYTE), + VULKAN_FORMAT(VK_FORMAT_B8G8R8A8_UNORM), #if __BYTE_ORDER == __LITTLE_ENDIAN PIXMAN_FMT(a8r8g8b8), #else @@ -417,6 +430,7 @@ static const struct pixel_format_info pixel_format_table[] = { GL_FORMAT_INFO(GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, RGB1), GL_FORMAT(GL_RGBA), GL_TYPE(GL_UNSIGNED_BYTE), + VULKAN_FORMAT(VK_FORMAT_R8G8B8A8_UNORM), #if __BYTE_ORDER == __LITTLE_ENDIAN PIXMAN_FMT(x8b8g8r8), #else @@ -432,6 +446,7 @@ static const struct pixel_format_info pixel_format_table[] = { GL_FORMAT_INFO(GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, RGBA), GL_FORMAT(GL_RGBA), GL_TYPE(GL_UNSIGNED_BYTE), + VULKAN_FORMAT(VK_FORMAT_R8G8B8A8_UNORM), #if __BYTE_ORDER == __LITTLE_ENDIAN PIXMAN_FMT(a8b8g8r8), #else @@ -538,6 +553,7 @@ static const struct pixel_format_info pixel_format_table[] = { BITS_RGBA_FIXED(10, 10, 10, 2), .bpp = 32, .opaque_substitute = DRM_FORMAT_XBGR2101010, + VULKAN_FORMAT(VK_FORMAT_A2B10G10R10_UNORM_PACK32), #if __BYTE_ORDER == __LITTLE_ENDIAN GL_FORMAT_INFO(GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, RGBA), GL_FORMAT(GL_RGBA), @@ -580,6 +596,7 @@ static const struct pixel_format_info pixel_format_table[] = { #if __BYTE_ORDER__ == __LITTLE_ENDIAN GL_FORMAT(GL_RGBA16_EXT), GL_TYPE(GL_UNSIGNED_SHORT), + VULKAN_FORMAT(VK_FORMAT_R16G16B16A16_UNORM), #endif }, { @@ -592,6 +609,7 @@ static const struct pixel_format_info pixel_format_table[] = { #if __BYTE_ORDER__ == __LITTLE_ENDIAN GL_FORMAT(GL_RGBA16_EXT), GL_TYPE(GL_UNSIGNED_SHORT), + VULKAN_FORMAT(VK_FORMAT_R16G16B16A16_UNORM), #endif }, { @@ -618,6 +636,7 @@ static const struct pixel_format_info pixel_format_table[] = { #if __BYTE_ORDER__ == __LITTLE_ENDIAN GL_FORMAT(GL_RGBA16F), GL_TYPE(GL_HALF_FLOAT), + VULKAN_FORMAT(VK_FORMAT_R16G16B16A16_SFLOAT), #endif }, { @@ -630,6 +649,7 @@ static const struct pixel_format_info pixel_format_table[] = { #if __BYTE_ORDER__ == __LITTLE_ENDIAN GL_FORMAT(GL_RGBA16F), GL_TYPE(GL_HALF_FLOAT), + VULKAN_FORMAT(VK_FORMAT_R16G16B16A16_SFLOAT), #endif }, { diff --git a/libweston/pixel-formats.h b/libweston/pixel-formats.h index d6e028bfa..8ea2c5eb4 100644 --- a/libweston/pixel-formats.h +++ b/libweston/pixel-formats.h @@ -107,6 +107,9 @@ struct pixel_format_info { /** GL data type, if data can be natively/directly uploaded. */ int gl_type; + /** Vulkan format, if data can be natively/directly uploaded. */ + int vulkan_format; + /** Pixman data type, if it agrees exactly with the wl_shm format */ pixman_format_code_t pixman_format; diff --git a/libweston/renderer-vulkan/meson.build b/libweston/renderer-vulkan/meson.build new file mode 100644 index 000000000..faeee6249 --- /dev/null +++ b/libweston/renderer-vulkan/meson.build @@ -0,0 +1,59 @@ +if not get_option('renderer-vulkan') + subdir_done() +endif + +config_h.set('ENABLE_VULKAN', '1') + +srcs_shaders_renderer_vulkan = [ + 'vulkan_vertex_shader_surface.vert', + 'vulkan_vertex_shader_texcoord.vert', + 'vulkan_fragment_shader.frag', +] + +shaders_renderer_vulkan = [] +foreach s : srcs_shaders_renderer_vulkan + shaders_renderer_vulkan += custom_target(s + '.spv.h', + command: [ prog_glslang, '@INPUT@', '--quiet', '--variable-name', '@BASENAME@', '-V', '-x', '-o', '@OUTPUT@' ], + input: s, + output: '@BASENAME@.spv.h', + ) +endforeach + +srcs_renderer_vulkan = [ + 'vulkan-pipeline.c', + 'vulkan-pixel-format.c', + 'vulkan-renderer.c', + shaders_renderer_vulkan, + linux_dmabuf_unstable_v1_protocol_c, + linux_dmabuf_unstable_v1_server_protocol_h, +] + +deps_renderer_vulkan = [ + dep_libdrm, + + dep_gbm, + dep_libm, + dep_pixman, + dep_libweston_private, + dep_libdrm_headers, + dep_vertex_clipping +] + +foreach name : [ 'vulkan' ] + d = dependency(name, required: false) + if not d.found() + error('vulkan-renderer requires @0@ which was not found. Or, you can use \'-Drenderer-vulkan=false\'.'.format(name)) + endif + deps_renderer_vulkan += d +endforeach + +plugin_vulkan = shared_library( + 'vulkan-renderer', + srcs_renderer_vulkan, + include_directories: common_inc, + dependencies: deps_renderer_vulkan, + name_prefix: '', + install: true, + install_dir: dir_module_libweston +) +env_modmap += 'vulkan-renderer.so=@0@;'.format(plugin_vulkan.full_path()) diff --git a/libweston/renderer-vulkan/vulkan-pipeline.c b/libweston/renderer-vulkan/vulkan-pipeline.c new file mode 100644 index 000000000..1e869d5ee --- /dev/null +++ b/libweston/renderer-vulkan/vulkan-pipeline.c @@ -0,0 +1,399 @@ +/* + * Copyright © 2025 Erico Nunes + * + * based on gl-shaders.c: + * Copyright 2012 Intel Corporation + * Copyright 2015,2019,2021 Collabora, Ltd. + * Copyright 2016 NVIDIA Corporation + * Copyright 2019 Harish Krupo + * Copyright 2019 Intel Corporation + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "config.h" + +#include + +#include +#include + +#include "vulkan-renderer.h" +#include "vulkan-renderer-internal.h" + +/* const uint32_t vulkan_vertex_shader_surface[]; vulkan_vertex_shader_surface.vert */ +#include "vulkan_vertex_shader_surface.spv.h" + +/* const uint32_t vulkan_vertex_shader_texcoord[]; vulkan_vertex_shader_texcoord.vert */ +#include "vulkan_vertex_shader_texcoord.spv.h" + +/* const uint32_t vulkan_fragment_shader[]; vulkan_fragment_shader.frag */ +#include "vulkan_fragment_shader.spv.h" + +struct vertex { + float pos[2]; +}; + +struct vertex_tc { + float pos[2]; + float texcoord[2]; +}; + +struct fs_specialization_consts { + uint32_t c_variant; + uint32_t c_input_is_premult; +}; + +static void create_graphics_pipeline(struct vulkan_renderer *vr, + const struct vulkan_pipeline_requirements *req, + struct vulkan_pipeline *pipeline) +{ + VkResult result; + + VkShaderModule vs_module; + VkShaderModuleCreateInfo vs_shader_module_create_info = { + .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO, + }; + + switch (req->texcoord_input) { + case SHADER_TEXCOORD_INPUT_ATTRIB: + vs_shader_module_create_info.codeSize = sizeof(vulkan_vertex_shader_texcoord), + vs_shader_module_create_info.pCode = (uint32_t *)vulkan_vertex_shader_texcoord; + break; + case SHADER_TEXCOORD_INPUT_SURFACE: + vs_shader_module_create_info.codeSize = sizeof(vulkan_vertex_shader_surface); + vs_shader_module_create_info.pCode = (uint32_t *)vulkan_vertex_shader_surface; + break; + default: + weston_log("Invalid req->texcoord_input\n"); + abort(); + } + + vkCreateShaderModule(vr->dev, &vs_shader_module_create_info, NULL, &vs_module); + + const struct fs_specialization_consts fsc = { + req->variant, + req->input_is_premult + }; + const VkSpecializationMapEntry fsc_entries[] = { + { 0, 0, sizeof(fsc.c_variant) }, + { 1, 0, sizeof(fsc.c_input_is_premult) }, + }; + const VkSpecializationInfo fs_specialization = { + .mapEntryCount = ARRAY_LENGTH(fsc_entries), + .pMapEntries = fsc_entries, + .dataSize = sizeof(fsc), + .pData = &fsc, + }; + VkShaderModule fs_module; + const VkShaderModuleCreateInfo fs_shader_module_create_info = { + .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO, + .codeSize = sizeof(vulkan_fragment_shader), + .pCode = (uint32_t *)vulkan_fragment_shader, + }; + vkCreateShaderModule(vr->dev, &fs_shader_module_create_info, NULL, &fs_module); + + const VkPipelineShaderStageCreateInfo vert_shader_stage_info = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, + .stage = VK_SHADER_STAGE_VERTEX_BIT, + .module = vs_module, + .pName = "main", + }; + + const VkPipelineShaderStageCreateInfo frag_shader_stage_info = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, + .stage = VK_SHADER_STAGE_FRAGMENT_BIT, + .module = fs_module, + .pSpecializationInfo = &fs_specialization, + .pName = "main", + }; + + const VkPipelineShaderStageCreateInfo shader_stages[] = {vert_shader_stage_info, frag_shader_stage_info}; + + // SHADER_TEXCOORD_INPUT_ATTRIB + const VkPipelineVertexInputStateCreateInfo pipeline_vertex_input_attrib = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, + .vertexBindingDescriptionCount = 1, + .pVertexBindingDescriptions = (VkVertexInputBindingDescription[]) { + { + .binding = 0, + .stride = sizeof(struct vertex_tc), + .inputRate = VK_VERTEX_INPUT_RATE_VERTEX + }, + }, + .vertexAttributeDescriptionCount = 2, + .pVertexAttributeDescriptions = (VkVertexInputAttributeDescription[]) { + { + .binding = 0, + .location = 0, + .format = VK_FORMAT_R32G32_SFLOAT, + .offset = offsetof(struct vertex_tc, pos), + }, + { + .binding = 0, + .location = 1, + .format = VK_FORMAT_R32G32_SFLOAT, + .offset = offsetof(struct vertex_tc, texcoord), + }, + } + }; + + // SHADER_TEXCOORD_INPUT_SURFACE + const VkPipelineVertexInputStateCreateInfo pipeline_vertex_input_surface = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, + .vertexBindingDescriptionCount = 1, + .pVertexBindingDescriptions = (VkVertexInputBindingDescription[]) { + { + .binding = 0, + .stride = sizeof(struct vertex), + .inputRate = VK_VERTEX_INPUT_RATE_VERTEX + }, + }, + .vertexAttributeDescriptionCount = 1, + .pVertexAttributeDescriptions = (VkVertexInputAttributeDescription[]) { + { + .binding = 0, + .location = 0, + .format = VK_FORMAT_R32G32_SFLOAT, + .offset = offsetof(struct vertex, pos), + }, + } + }; + + const VkPipelineVertexInputStateCreateInfo *pipeline_vertex_input_state_create_info; + + switch (req->texcoord_input) { + case SHADER_TEXCOORD_INPUT_ATTRIB: + pipeline_vertex_input_state_create_info = &pipeline_vertex_input_attrib; + break; + case SHADER_TEXCOORD_INPUT_SURFACE: + pipeline_vertex_input_state_create_info = &pipeline_vertex_input_surface; + break; + default: + weston_log("Invalid req->texcoord_input\n"); + abort(); + } + + + const VkPipelineInputAssemblyStateCreateInfo input_assembly = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, + .topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN, + .primitiveRestartEnable = VK_FALSE, + }; + + const VkPipelineViewportStateCreateInfo viewport_state = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, + .viewportCount = 1, + .scissorCount = 1, + }; + + const VkPipelineRasterizationStateCreateInfo rasterizer = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, + .depthClampEnable = VK_FALSE, + .rasterizerDiscardEnable = VK_FALSE, + .polygonMode = VK_POLYGON_MODE_FILL, + .lineWidth = 1.0f, + .cullMode = VK_CULL_MODE_NONE, + .frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE, + .depthBiasEnable = VK_FALSE, + }; + + const VkPipelineMultisampleStateCreateInfo multisampling = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, + .sampleShadingEnable = VK_FALSE, + .rasterizationSamples = VK_SAMPLE_COUNT_1_BIT, + }; + + VkPipelineColorBlendAttachmentState color_blend_attachment = { + .colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT, + .blendEnable = VK_FALSE, + }; + + if (req->blend) { + color_blend_attachment.blendEnable = VK_TRUE; + color_blend_attachment.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT; + color_blend_attachment.srcColorBlendFactor = VK_BLEND_FACTOR_ONE; + color_blend_attachment.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; + color_blend_attachment.colorBlendOp = VK_BLEND_OP_ADD; + color_blend_attachment.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE; + color_blend_attachment.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; + color_blend_attachment.alphaBlendOp = VK_BLEND_OP_ADD; + } + + const VkPipelineColorBlendStateCreateInfo color_blending = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, + .logicOpEnable = VK_FALSE, + .logicOp = VK_LOGIC_OP_COPY, + .attachmentCount = 1, + .pAttachments = &color_blend_attachment, + }; + + const VkDynamicState dynamic_states[] = { VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR }; + const VkPipelineDynamicStateCreateInfo dynamic_state = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO, + .dynamicStateCount = ARRAY_LENGTH(dynamic_states), + .pDynamicStates = dynamic_states, + }; + + const VkPipelineLayoutCreateInfo pipeline_layout_info = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, + .setLayoutCount = 1, + .pSetLayouts = &pipeline->descriptor_set_layout, + }; + + result = vkCreatePipelineLayout(vr->dev, &pipeline_layout_info, NULL, &pipeline->pipeline_layout); + check_vk_success(result, "vkCreatePipelineLayout"); + + const VkGraphicsPipelineCreateInfo pipeline_info = { + .sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, + .stageCount = 2, + .pStages = shader_stages, + .pVertexInputState = pipeline_vertex_input_state_create_info, + .pInputAssemblyState = &input_assembly, + .pViewportState = &viewport_state, + .pRasterizationState = &rasterizer, + .pMultisampleState = &multisampling, + .pColorBlendState = &color_blending, + .pDynamicState = &dynamic_state, + .layout = pipeline->pipeline_layout, + .renderPass = req->renderpass, + .subpass = 0, + .basePipelineHandle = VK_NULL_HANDLE, + }; + + result = vkCreateGraphicsPipelines(vr->dev, VK_NULL_HANDLE, 1, &pipeline_info, NULL, &pipeline->pipeline); + check_vk_success(result, "vkCreateGraphicsPipelines"); + + vkDestroyShaderModule(vr->dev, fs_module, NULL); + vkDestroyShaderModule(vr->dev, vs_module, NULL); +} + +static void +create_descriptor_set_layout(struct vulkan_renderer *vr, struct vulkan_pipeline *pipeline) +{ + VkResult result; + + const VkDescriptorSetLayoutBinding vs_ubo_layout_binding = { + .binding = 0, + .descriptorCount = 1, + .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, + .stageFlags = VK_SHADER_STAGE_VERTEX_BIT, + }; + + const VkDescriptorSetLayoutBinding fs_ubo_layout_binding = { + .binding = 1, + .descriptorCount = 1, + .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, + .stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT, + }; + + const VkDescriptorSetLayoutBinding fs_sampler_layout_binding = { + .binding = 2, + .descriptorCount = 1, + .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + .stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT, + }; + + const VkDescriptorSetLayoutBinding bindings[] = { + vs_ubo_layout_binding, + fs_ubo_layout_binding, + fs_sampler_layout_binding, + }; + const VkDescriptorSetLayoutCreateInfo layout_info = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, + .bindingCount = ARRAY_LENGTH(bindings), + .pBindings = bindings, + }; + + result = vkCreateDescriptorSetLayout(vr->dev, &layout_info, NULL, &pipeline->descriptor_set_layout); + check_vk_success(result, "vkCreateDescriptorSetLayout"); +} + +static struct vulkan_pipeline * +vulkan_pipeline_create(struct vulkan_renderer *vr, + const struct vulkan_pipeline_requirements *reqs) +{ + struct vulkan_pipeline *pipeline = NULL; + + pipeline = zalloc(sizeof *pipeline); + if (!pipeline) { + weston_log("could not create pipeline\n"); + abort(); + } + + wl_list_init(&pipeline->link); + pipeline->key = *reqs; + + create_descriptor_set_layout(vr, pipeline); + + create_graphics_pipeline(vr, reqs, pipeline); + + wl_list_insert(&vr->pipeline_list, &pipeline->link); + + return pipeline; +} + +void +vulkan_pipeline_destroy(struct vulkan_renderer *vr, struct vulkan_pipeline *pipeline) +{ + vkDestroyPipelineLayout(vr->dev, pipeline->pipeline_layout, NULL); + vkDestroyPipeline(vr->dev, pipeline->pipeline, NULL); + vkDestroyDescriptorSetLayout(vr->dev, pipeline->descriptor_set_layout, NULL); + wl_list_remove(&pipeline->link); + free(pipeline); +} + +void +vulkan_renderer_pipeline_list_destroy(struct vulkan_renderer *vr) +{ + struct vulkan_pipeline *pipeline, *next_pipeline; + + wl_list_for_each_safe(pipeline, next_pipeline, &vr->pipeline_list, link) + vulkan_pipeline_destroy(vr, pipeline); +} + +static int +vulkan_pipeline_requirements_cmp(const struct vulkan_pipeline_requirements *a, + const struct vulkan_pipeline_requirements *b) +{ + return memcmp(a, b, sizeof(*a)); +} + +struct vulkan_pipeline * +vulkan_renderer_get_pipeline(struct vulkan_renderer *vr, + const struct vulkan_pipeline_requirements *reqs) +{ + struct vulkan_pipeline *pipeline; + + wl_list_for_each(pipeline, &vr->pipeline_list, link) { + if (vulkan_pipeline_requirements_cmp(reqs, &pipeline->key) == 0) + return pipeline; + } + + pipeline = vulkan_pipeline_create(vr, reqs); + if (pipeline) + return pipeline; + + return NULL; +} + diff --git a/libweston/renderer-vulkan/vulkan-pixel-format.c b/libweston/renderer-vulkan/vulkan-pixel-format.c new file mode 100644 index 000000000..1d63a4ee6 --- /dev/null +++ b/libweston/renderer-vulkan/vulkan-pixel-format.c @@ -0,0 +1,237 @@ +/* + * Copyright © 2025 Erico Nunes + * + * Based on wlroots' vulkan pixel_format.c + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "config.h" + +#include + +#include +#include "pixel-formats.h" +#include "shared/xalloc.h" +#include "vulkan-renderer-internal.h" + +#include +#include + +static const VkImageUsageFlags image_tex_usage = + VK_IMAGE_USAGE_SAMPLED_BIT | + VK_IMAGE_USAGE_TRANSFER_SRC_BIT; + +static const VkFormatFeatureFlags format_tex_features = + VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT | + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT; + +static bool +query_modifier_usage_support(struct vulkan_renderer *vr, VkFormat vk_format, + VkImageUsageFlags usage, const VkDrmFormatModifierPropertiesEXT *m) +{ + VkResult result; + + VkPhysicalDeviceImageFormatInfo2 pdev_image_format_info = { + .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2, + .type = VK_IMAGE_TYPE_2D, + .format = vk_format, + .usage = usage, + .flags = 0, + .tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT, + }; + + VkPhysicalDeviceExternalImageFormatInfo pdev_ext_image_format_info = { + .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO, + .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT, + }; + pnext(&pdev_image_format_info, &pdev_ext_image_format_info); + + VkPhysicalDeviceImageDrmFormatModifierInfoEXT pdev_image_drm_format_mod_info = { + .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT, + .drmFormatModifier = m->drmFormatModifier, + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, + }; + pnext(&pdev_image_format_info, &pdev_image_drm_format_mod_info); + + VkImageFormatListCreateInfoKHR image_format_info = { + .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR, + .pViewFormats = &vk_format, + .viewFormatCount = 1, + }; + pnext(&pdev_image_format_info, &image_format_info); + + VkImageFormatProperties2 image_format_props = { + .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2, + }; + + VkExternalImageFormatProperties ext_image_format_props = { + .sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES, + }; + pnext(&image_format_props, &ext_image_format_props); + + const VkExternalMemoryProperties *ext_mem_props = &ext_image_format_props.externalMemoryProperties; + + result = vkGetPhysicalDeviceImageFormatProperties2(vr->phys_dev, &pdev_image_format_info, &image_format_props); + if (result != VK_SUCCESS && result != VK_ERROR_FORMAT_NOT_SUPPORTED) + return false; + + if (!(ext_mem_props->externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT)) + return false; + + return true; +} + +static bool +query_dmabuf_support(struct vulkan_renderer *vr, VkFormat vk_format, + VkImageFormatProperties *out) +{ + VkResult result; + + VkPhysicalDeviceImageFormatInfo2 pdev_image_format_info = { + .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2, + .type = VK_IMAGE_TYPE_2D, + .format = vk_format, + .tiling = VK_IMAGE_TILING_OPTIMAL, + .usage = image_tex_usage, + .flags = 0, + }; + + VkImageFormatListCreateInfoKHR image_format_info = { + .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR, + .pViewFormats = &vk_format, + .viewFormatCount = 1, + }; + pnext(&pdev_image_format_info, &image_format_info); + + VkImageFormatProperties2 image_format_props = { + image_format_props.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2, + }; + + result = vkGetPhysicalDeviceImageFormatProperties2(vr->phys_dev, &pdev_image_format_info, &image_format_props); + if (result != VK_SUCCESS) { + if (result == VK_ERROR_FORMAT_NOT_SUPPORTED) { + weston_log("unsupported format\n"); + } else { + weston_log("failed to get format properties\n"); + } + return false; + } + + *out = image_format_props.imageFormatProperties; + return true; +} + +static void +query_dmabuf_modifier_support(struct vulkan_renderer *vr, const struct pixel_format_info *format, + struct weston_drm_format *fmt) +{ + if (!vr->has_image_drm_format_modifier) { + uint64_t modifier = DRM_FORMAT_MOD_INVALID; + + int ret = weston_drm_format_add_modifier(fmt, modifier); + assert(ret == 0); + + char *modifier_name = drmGetFormatModifierName(modifier); + weston_log("DRM dmabuf format %s (0x%08x) modifier %s (0x%016lx)\n", + format->drm_format_name ? format->drm_format_name : "", + format->format, + modifier_name ? modifier_name : "", + modifier); + free(modifier_name); + return; + } + + VkDrmFormatModifierPropertiesListEXT drm_format_mod_props = { + drm_format_mod_props.sType = VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT, + }; + VkFormatProperties2 format_props = { + .sType = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2, + }; + pnext(&format_props, &drm_format_mod_props); + vkGetPhysicalDeviceFormatProperties2(vr->phys_dev, format->vulkan_format, &format_props); + + size_t modifier_count = drm_format_mod_props.drmFormatModifierCount; + + drm_format_mod_props.drmFormatModifierCount = modifier_count; + drm_format_mod_props.pDrmFormatModifierProperties = + xzalloc(modifier_count * sizeof(*drm_format_mod_props.pDrmFormatModifierProperties)); + + vkGetPhysicalDeviceFormatProperties2(vr->phys_dev, format->vulkan_format, &format_props); + + for (uint32_t i = 0; i < drm_format_mod_props.drmFormatModifierCount; ++i) { + VkDrmFormatModifierPropertiesEXT m = drm_format_mod_props.pDrmFormatModifierProperties[i]; + + // check that specific modifier for texture usage + if ((m.drmFormatModifierTilingFeatures & format_tex_features) != format_tex_features) + continue; + + if (!query_modifier_usage_support(vr, format->vulkan_format, image_tex_usage, &m)) + continue; + + int ret = weston_drm_format_add_modifier(fmt, m.drmFormatModifier); + assert(ret == 0); + + char *modifier_name = drmGetFormatModifierName(m.drmFormatModifier); + weston_log("DRM dmabuf format %s (0x%08x) modifier %s (0x%016lx) %d planes\n", + format->drm_format_name ? format->drm_format_name : "", + format->format, + modifier_name ? modifier_name : "", + m.drmFormatModifier, + m.drmFormatModifierPlaneCount); + free(modifier_name); + + } + + free(drm_format_mod_props.pDrmFormatModifierProperties); +} + +bool +vulkan_renderer_query_dmabuf_format(struct vulkan_renderer *vr, const struct pixel_format_info *format) +{ + VkFormatProperties2 format_props = { + .sType = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2, + }; + + vkGetPhysicalDeviceFormatProperties2(vr->phys_dev, format->vulkan_format, &format_props); + + struct weston_drm_format *fmt = NULL; + + // dmabuf texture properties + if ((format_props.formatProperties.optimalTilingFeatures & format_tex_features) != format_tex_features) + return false; + + VkImageFormatProperties iformat_props; + if (!query_dmabuf_support(vr, format->vulkan_format, &iformat_props)) + return false; + + fmt = weston_drm_format_array_add_format(&vr->supported_formats, format->format); + assert(fmt); + + weston_log("DRM dmabuf format %s (0x%08x)\n", + format->drm_format_name ? format->drm_format_name : "", + format->format); + + query_dmabuf_modifier_support(vr, format, fmt); + + return true; +} diff --git a/libweston/renderer-vulkan/vulkan-renderer-internal.h b/libweston/renderer-vulkan/vulkan-renderer-internal.h new file mode 100644 index 000000000..fefc1c1e9 --- /dev/null +++ b/libweston/renderer-vulkan/vulkan-renderer-internal.h @@ -0,0 +1,203 @@ +/* + * Copyright © 2025 Erico Nunes + * + * based on gl-renderer-internal.h: + * Copyright © 2019 Collabora, Ltd. + * Copyright © 2019 Harish Krupo + * Copyright © 2019 Intel Corporation + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef VULKAN_RENDERER_INTERNAL_H +#define VULKAN_RENDERER_INTERNAL_H + +#include +#include + +#include +#include +#include "shared/helpers.h" +#include "libweston/libweston.h" +#include "libweston/libweston-internal.h" +#include + +#define VK_USE_PLATFORM_XCB_KHR +#define VK_USE_PLATFORM_WAYLAND_KHR +#include +#include + +#define MAX_NUM_IMAGES 5 +#define MAX_CONCURRENT_FRAMES 2 + +enum vulkan_pipeline_texture_variant { + PIPELINE_VARIANT_NONE = 0, +/* Keep the following in sync with Vulkan shader.frag. */ + PIPELINE_VARIANT_RGBA = 1, + PIPELINE_VARIANT_RGBX = 2, + PIPELINE_VARIANT_SOLID = 3, + PIPELINE_VARIANT_EXTERNAL = 4, +}; + +struct vulkan_pipeline_requirements +{ + unsigned texcoord_input:1; /* enum vulkan_shader_texcoord_input */ + unsigned variant:4; /* enum vulkan_pipeline_texture_variant */ + bool input_is_premult:1; + bool blend:1; + VkRenderPass renderpass; +}; + +struct vulkan_pipeline_config { + struct vulkan_pipeline_requirements req; + + struct weston_matrix projection; + struct weston_matrix surface_to_buffer; + float view_alpha; + float unicolor[4]; +}; + + +/* Keep the following in sync with vertex.glsl. */ +enum vulkan_shader_texcoord_input { + SHADER_TEXCOORD_INPUT_ATTRIB = 0, + SHADER_TEXCOORD_INPUT_SURFACE, +}; + +struct vulkan_pipeline { + struct vulkan_pipeline_requirements key; + + struct wl_list link; /* vulkan_renderer::pipeline_list */ + struct timespec last_used; + + VkDescriptorSetLayout descriptor_set_layout; + + VkPipeline pipeline; + VkPipelineLayout pipeline_layout; +}; + +struct vulkan_renderer_texture_image { + VkImage image; + VkDeviceMemory memory; + VkImageView image_view; + + VkBuffer staging_buffer; + VkDeviceMemory staging_memory; + void *staging_map; + + VkCommandBuffer upload_cmd; + VkFence upload_fence; +}; + +struct vulkan_renderer { + struct weston_renderer base; + struct weston_compositor *compositor; + + bool has_wayland_surface; + bool has_xcb_surface; + VkInstance inst; + + VkPhysicalDevice phys_dev; + VkQueue queue; + uint32_t queue_family; + + bool has_incremental_present; + bool has_image_drm_format_modifier; + bool has_external_semaphore_fd; + bool has_physical_device_drm; + bool has_external_memory_dma_buf; + bool has_queue_family_foreign; + bool semaphore_import_export; + VkDevice dev; + + VkCommandPool cmd_pool; + + int drm_fd; /* drm device fd */ + struct weston_drm_format_array supported_formats; + struct wl_list dmabuf_images; + struct wl_list dmabuf_formats; + + struct wl_signal destroy_signal; + struct wl_list pipeline_list; + struct dmabuf_allocator *allocator; + + PFN_vkCreateWaylandSurfaceKHR create_wayland_surface; + PFN_vkCreateXcbSurfaceKHR create_xcb_surface; + PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR get_wayland_presentation_support; + PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR get_xcb_presentation_support; + + PFN_vkGetImageMemoryRequirements2KHR get_image_memory_requirements2; + PFN_vkGetMemoryFdPropertiesKHR get_memory_fd_properties; + PFN_vkGetSemaphoreFdKHR get_semaphore_fd; + PFN_vkImportSemaphoreFdKHR import_semaphore_fd; + + /* This can be removed if a different shader is defined + * to avoid requiring a valid sampler descriptor to run + * for solids */ + struct { + struct vulkan_renderer_texture_image image; + VkSampler sampler; + } dummy; +}; + +static inline struct vulkan_renderer * +get_renderer(struct weston_compositor *ec) +{ + return (struct vulkan_renderer *)ec->renderer; +} + +static inline void pnext(void *base, void *next) +{ + VkBaseOutStructure *b = base; + VkBaseOutStructure *n = next; + n->pNext = b->pNext; + b->pNext = n; +} + +static inline void _check_vk_success(const char *file, int line, const char *func, + VkResult result, const char *vk_func) +{ + if (result == VK_SUCCESS) + return; + + weston_log("%s %d %s Error: %s failed with VkResult %d\n", file, line, func, vk_func, result); + abort(); +} +#define check_vk_success(result, vk_func) \ + _check_vk_success(__FILE__, __LINE__, __func__, (result), (vk_func)) + +void +vulkan_pipeline_destroy(struct vulkan_renderer *vr, struct vulkan_pipeline *pipeline); + +void +vulkan_renderer_pipeline_list_destroy(struct vulkan_renderer *vr); + +struct vulkan_pipeline * +vulkan_renderer_get_pipeline(struct vulkan_renderer *vr, + const struct vulkan_pipeline_requirements *reqs); + +bool +vulkan_renderer_query_dmabuf_format(struct vulkan_renderer *vr, + const struct pixel_format_info *format); + +#endif /* VULKAN_RENDERER_INTERNAL_H */ diff --git a/libweston/renderer-vulkan/vulkan-renderer.c b/libweston/renderer-vulkan/vulkan-renderer.c new file mode 100644 index 000000000..c20920b23 --- /dev/null +++ b/libweston/renderer-vulkan/vulkan-renderer.c @@ -0,0 +1,4195 @@ +/* + * Copyright © 2025 Erico Nunes + * + * based on gl-renderer: + * Copyright © 2012 Intel Corporation + * Copyright © 2015,2019,2021 Collabora, Ltd. + * Copyright © 2016 NVIDIA Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "config.h" + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "linux-sync-file.h" + +#include +#include "vulkan-renderer.h" +#include "vulkan-renderer-internal.h" +#include "vertex-clipping.h" +#include "linux-dmabuf.h" +#include "linux-dmabuf-unstable-v1-server-protocol.h" +#include "linux-explicit-synchronization.h" +#include "output-capture.h" +#include "pixel-formats.h" + +#include "shared/fd-util.h" +#include "shared/helpers.h" +#include "shared/platform.h" +#include "shared/string-helpers.h" +#include "shared/weston-drm-fourcc.h" +#include "shared/xalloc.h" +#include "libweston/weston-log.h" + +#include /* Physical device drm */ + +enum vulkan_border_status { + BORDER_STATUS_CLEAN = 0, + BORDER_TOP_DIRTY = 1 << WESTON_RENDERER_BORDER_TOP, + BORDER_LEFT_DIRTY = 1 << WESTON_RENDERER_BORDER_LEFT, + BORDER_RIGHT_DIRTY = 1 << WESTON_RENDERER_BORDER_RIGHT, + BORDER_BOTTOM_DIRTY = 1 << WESTON_RENDERER_BORDER_BOTTOM, + BORDER_ALL_DIRTY = 0xf, +}; + +struct vulkan_border_image { + int32_t width, height; + int32_t tex_width; + void *data; + + struct vulkan_renderer_texture_image texture; + VkSampler sampler; + + VkDescriptorSet descriptor_set; + + VkBuffer vs_ubo_buffer; + VkDeviceMemory vs_ubo_memory; + void *vs_ubo_map; + + /* these are not really used as of now */ + VkBuffer fs_ubo_buffer; + VkDeviceMemory fs_ubo_memory; + void *fs_ubo_map; +}; + +struct vulkan_renderbuffer { + struct weston_output *output; + pixman_region32_t damage; + enum vulkan_border_status border_status; + bool stale; + + void *buffer; + int stride; + weston_renderbuffer_discarded_func discarded_cb; + void *user_data; + + /* Unused by drm and swapchain outputs */ + struct vulkan_renderer_image *image; + + struct wl_list link; +}; + +struct vulkan_renderer_image { + VkImage image; + VkDeviceMemory memory; + VkImageView image_view; + VkFramebuffer framebuffer; + + struct vulkan_renderbuffer *renderbuffer; + struct gbm_bo *bo; + +}; + +struct vulkan_renderer_frame_acquire_fence { + VkSemaphore semaphore; + + struct wl_list link; +}; + +struct vulkan_renderer_frame_vbuf { + VkBuffer buffer; + VkDeviceMemory memory; + void *map; + uint64_t offset; + uint64_t size; + + struct wl_list link; +}; + +struct vulkan_renderer_frame_dspool { + VkDescriptorPool pool; + uint32_t count; + uint32_t maxsets; + + struct wl_list link; +}; + +struct vulkan_renderer_frame { + VkCommandBuffer cmd_buffer; + + VkSemaphore render_done; + VkSemaphore image_acquired; + VkFence fence; + + int render_fence_fd; + + struct wl_list acquire_fence_list; + + struct wl_list vbuf_list; + struct wl_list dspool_list; +}; + +enum vulkan_output_type { + VULKAN_OUTPUT_HEADLESS, + VULKAN_OUTPUT_DRM, + VULKAN_OUTPUT_SWAPCHAIN, +}; + +struct vulkan_output_state { + struct weston_size fb_size; /**< in pixels, including borders */ + struct weston_geometry area; /**< composited area in pixels inside fb */ + + struct vulkan_border_image borders[4]; + enum vulkan_border_status border_status; + + struct weston_matrix output_matrix; + + /* struct vulkan_renderbuffer::link */ + struct wl_list renderbuffer_list; + + const struct pixel_format_info *pixel_format; + VkRenderPass renderpass; + enum vulkan_output_type output_type; + struct { + VkSwapchainKHR swapchain; + VkPresentModeKHR present_mode; + VkSurfaceKHR surface; + } swapchain; + struct { + uint32_t image_index; + } drm; + + /* For drm and swapchain outputs only */ + uint32_t image_count; + struct vulkan_renderer_image images[MAX_NUM_IMAGES]; + + uint32_t last_frame; + uint32_t frame_index; + uint32_t num_frames; + struct vulkan_renderer_frame frames[MAX_CONCURRENT_FRAMES]; +}; + +struct vulkan_buffer_state { + struct vulkan_renderer *vr; + + float color[4]; + + bool needs_full_upload; + pixman_region32_t texture_damage; + + /* Only needed between attach() and flush_damage() */ + uint32_t vulkan_format[3]; + uint32_t pitch; /* plane 0 pitch in pixels */ + uint32_t offset[3]; /* per-plane pitch in bytes */ + + enum vulkan_pipeline_texture_variant pipeline_variant; + + unsigned int textures[3]; + int num_textures; + + struct wl_listener destroy_listener; + + struct vulkan_renderer_texture_image texture; + VkSampler sampler_linear; + VkSampler sampler_nearest; + + VkDescriptorSet descriptor_set; + + VkBuffer vs_ubo_buffer; + VkDeviceMemory vs_ubo_memory; + void *vs_ubo_map; + + VkBuffer fs_ubo_buffer; + VkDeviceMemory fs_ubo_memory; + void *fs_ubo_map; +}; + +struct vulkan_surface_state { + struct weston_surface *surface; + + struct vulkan_buffer_state *buffer; + + /* These buffer references should really be attached to paint nodes + * rather than either buffer or surface state */ + struct weston_buffer_reference buffer_ref; + struct weston_buffer_release_reference buffer_release_ref; + + /* Whether this surface was used in the current output repaint. + Used only in the context of a vulkan_renderer_repaint_output call. */ + bool used_in_output_repaint; + + struct wl_listener surface_destroy_listener; + struct wl_listener renderer_destroy_listener; +}; + +struct vs_ubo { + float proj[16]; + float surface_to_buffer[16]; +}; + +struct fs_ubo { + float unicolor[4]; + float view_alpha; +}; + +static void +transfer_image_queue_family(VkCommandBuffer cmd_buffer, VkImage image, + uint32_t src_index, uint32_t dst_index) +{ + const VkImageMemoryBarrier barrier = { + .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, + .oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + .newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + .srcAccessMask = 0, + .dstAccessMask = 0, + .image = image, + .subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .subresourceRange.layerCount = 1, + .subresourceRange.levelCount = 1, + .srcQueueFamilyIndex = src_index, + .dstQueueFamilyIndex = dst_index, + }; + + vkCmdPipelineBarrier(cmd_buffer, + VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, + VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, + 0, 0, NULL, 0, NULL, 1, &barrier); +} + +static void +transition_image_layout(VkCommandBuffer cmd_buffer, VkImage image, + VkImageLayout old_layout, VkImageLayout new_layout, + VkPipelineStageFlags srcs, VkPipelineStageFlags dsts, + VkAccessFlags src_access, VkAccessFlags dst_access) +{ + const VkImageMemoryBarrier barrier = { + .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, + .oldLayout = old_layout, + .newLayout = new_layout, + .image = image, + .subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .subresourceRange.layerCount = 1, + .subresourceRange.levelCount = 1, + .srcAccessMask = src_access, + .dstAccessMask = dst_access, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + }; + + vkCmdPipelineBarrier(cmd_buffer, srcs, dsts, 0, 0, NULL, 0, NULL, 1, &barrier); +} + +static void +destroy_buffer(VkDevice device, VkBuffer buffer, VkDeviceMemory memory) +{ + if (memory) + vkUnmapMemory(device, memory); + vkDestroyBuffer(device, buffer, NULL); + vkFreeMemory(device, memory, NULL); +} + +static void +destroy_sampler(VkDevice device, VkSampler sampler) +{ + vkDestroySampler(device, sampler, NULL); +} + +static void +destroy_image(VkDevice device, VkImage image, VkImageView image_view, VkDeviceMemory memory) +{ + if (image_view) + vkDestroyImageView(device, image_view, NULL); + vkDestroyImage(device, image, NULL); + vkFreeMemory(device, memory, NULL); +} + +static void +destroy_texture_image(struct vulkan_renderer *vr, struct vulkan_renderer_texture_image *texture) +{ + vkDestroyFence(vr->dev, texture->upload_fence, NULL); + vkFreeCommandBuffers(vr->dev, vr->cmd_pool, 1, &texture->upload_cmd); + + destroy_buffer(vr->dev, texture->staging_buffer, texture->staging_memory); + + destroy_image(vr->dev, texture->image, texture->image_view, texture->memory); +} + +static void +destroy_buffer_state(struct vulkan_buffer_state *vb) +{ + struct vulkan_renderer *vr = vb->vr; + + // TODO: how to refcount this buffer properly so that it is not + // destroyed in the middle of a frame? + VkResult result; + result = vkQueueWaitIdle(vr->queue); + check_vk_success(result, "vkQueueWaitIdle"); + + destroy_sampler(vr->dev, vb->sampler_linear); + destroy_sampler(vr->dev, vb->sampler_nearest); + destroy_texture_image(vr, &vb->texture); + + destroy_buffer(vr->dev, vb->fs_ubo_buffer, vb->fs_ubo_memory); + destroy_buffer(vr->dev, vb->vs_ubo_buffer, vb->vs_ubo_memory); + + pixman_region32_fini(&vb->texture_damage); + + wl_list_remove(&vb->destroy_listener.link); + + free(vb); +} + +static void +surface_state_destroy(struct vulkan_surface_state *vs, struct vulkan_renderer *vr) +{ + wl_list_remove(&vs->surface_destroy_listener.link); + wl_list_remove(&vs->renderer_destroy_listener.link); + + vs->surface->renderer_state = NULL; + + if (vs->buffer && vs->buffer_ref.buffer->type == WESTON_BUFFER_SHM) + destroy_buffer_state(vs->buffer); + vs->buffer = NULL; + + weston_buffer_reference(&vs->buffer_ref, NULL, + BUFFER_WILL_NOT_BE_ACCESSED); + weston_buffer_release_reference(&vs->buffer_release_ref, NULL); + + free(vs); +} + +static void +surface_state_handle_surface_destroy(struct wl_listener *listener, void *data) +{ + struct vulkan_surface_state *vs; + struct vulkan_renderer *vr; + + vs = container_of(listener, struct vulkan_surface_state, + surface_destroy_listener); + + vr = get_renderer(vs->surface->compositor); + + surface_state_destroy(vs, vr); +} + +static void +surface_state_handle_renderer_destroy(struct wl_listener *listener, void *data) +{ + struct vulkan_surface_state *vs; + struct vulkan_renderer *vr; + + vr = data; + + vs = container_of(listener, struct vulkan_surface_state, + renderer_destroy_listener); + + surface_state_destroy(vs, vr); +} + +static inline struct vulkan_output_state * +get_output_state(struct weston_output *output) +{ + return (struct vulkan_output_state *)output->renderer_state; +} + +static void +vulkan_renderbuffer_fini(struct vulkan_renderbuffer *renderbuffer) +{ + assert(!renderbuffer->stale); + + pixman_region32_fini(&renderbuffer->damage); + + renderbuffer->stale = true; +} + +static void +vulkan_renderer_destroy_image(struct vulkan_renderer *vr, + struct vulkan_renderer_image *image) +{ + vkDestroyFramebuffer(vr->dev, image->framebuffer, NULL); + vkDestroyImageView(vr->dev, image->image_view, NULL); + vkDestroyImage(vr->dev, image->image, NULL); + vkFreeMemory(vr->dev, image->memory, NULL); +} + +static void +vulkan_renderer_destroy_renderbuffer(weston_renderbuffer_t weston_renderbuffer) +{ + struct vulkan_renderbuffer *rb = + (struct vulkan_renderbuffer *) weston_renderbuffer; + struct vulkan_renderer *vr = get_renderer(rb->output->compositor); + + wl_list_remove(&rb->link); + + if (!rb->stale) + vulkan_renderbuffer_fini(rb); + + if (rb->image) { + vulkan_renderer_destroy_image(vr, rb->image); + free(rb->image); + } + + free(rb); +} + +static bool +vulkan_renderer_discard_renderbuffers(struct vulkan_output_state *vo, + bool destroy) +{ + struct vulkan_renderbuffer *rb, *tmp; + bool success = true; + + /* A renderbuffer goes stale after being discarded. Most resources are + * released. It's kept in the output states' renderbuffer list waiting + * for the backend to destroy it. */ + wl_list_for_each_safe(rb, tmp, &vo->renderbuffer_list, link) { + if (destroy) { + vulkan_renderer_destroy_renderbuffer((weston_renderbuffer_t) rb); + } else if (!rb->stale) { + vulkan_renderbuffer_fini(rb); + if (rb->discarded_cb) + success = rb->discarded_cb((weston_renderbuffer_t) rb, + rb->user_data); + } + } + + return success; +} + +static void +vulkan_renderer_output_destroy_images(struct weston_output *output) +{ + struct vulkan_output_state *vo = get_output_state(output); + struct weston_compositor *ec = output->compositor; + struct vulkan_renderer *vr = get_renderer(ec); + + // this wait idle is only on output destroy + VkResult result; + result = vkQueueWaitIdle(vr->queue); + check_vk_success(result, "vkQueueWaitIdle"); + + for (uint32_t i = 0; i < vo->image_count; i++) { + struct vulkan_renderer_image *im = &vo->images[i]; + vulkan_renderer_destroy_image(vr, im); + } +} + +static void +vulkan_renderer_destroy_swapchain(struct weston_output *output) +{ + struct weston_compositor *ec = output->compositor; + struct vulkan_renderer *vr = get_renderer(ec); + struct vulkan_output_state *vo = get_output_state(output); + + // Wait idle here is bad, but this is only swapchain recreation + // and not on drm-backend + VkResult result; + result = vkQueueWaitIdle(vr->queue); + check_vk_success(result, "vkQueueWaitIdle"); + + for (uint32_t i = 0; i < vo->image_count; i++) { + struct vulkan_renderer_image *im = &vo->images[i]; + + vkDestroyFramebuffer(vr->dev, im->framebuffer, NULL); + vkDestroyImageView(vr->dev, im->image_view, NULL); + } + + vkDestroySwapchainKHR(vr->dev, vo->swapchain.swapchain, NULL); +} + +static void +vulkan_renderer_output_destroy(struct weston_output *output) +{ + struct vulkan_output_state *vo = get_output_state(output); + struct weston_compositor *ec = output->compositor; + struct vulkan_renderer *vr = get_renderer(ec); + + // this wait idle is only on output destroy + VkResult result; + result = vkQueueWaitIdle(vr->queue); + check_vk_success(result, "vkQueueWaitIdle"); + + vkDestroyRenderPass(vr->dev, vo->renderpass, NULL); + + for (unsigned int i = 0; i < vo->num_frames; ++i) { + struct vulkan_renderer_frame *fr = &vo->frames[i]; + + vkDestroyFence(vr->dev, fr->fence, NULL); + vkDestroySemaphore(vr->dev, fr->render_done, NULL); + vkDestroySemaphore(vr->dev, fr->image_acquired, NULL); + vkFreeCommandBuffers(vr->dev, vr->cmd_pool, 1, &fr->cmd_buffer); + + struct vulkan_renderer_frame_acquire_fence *acquire_fence, *ftmp; + wl_list_for_each_safe(acquire_fence, ftmp, &fr->acquire_fence_list, link) { + vkDestroySemaphore(vr->dev, acquire_fence->semaphore, NULL); + wl_list_remove(&acquire_fence->link); + free(acquire_fence); + } + + struct vulkan_renderer_frame_vbuf *vbuf, *vtmp; + wl_list_for_each_safe(vbuf, vtmp, &fr->vbuf_list, link) { + destroy_buffer(vr->dev, vbuf->buffer, vbuf->memory); + wl_list_remove(&vbuf->link); + free(vbuf); + } + + struct vulkan_renderer_frame_dspool *dspool, *dtmp; + wl_list_for_each_safe(dspool, dtmp, &fr->dspool_list, link) { + vkDestroyDescriptorPool(vr->dev, dspool->pool, NULL); + wl_list_remove(&dspool->link), + free(dspool); + } + } + + if (vo->output_type == VULKAN_OUTPUT_SWAPCHAIN) { + vulkan_renderer_destroy_swapchain(output); + vkDestroySurfaceKHR(vr->inst, vo->swapchain.surface, NULL); + } else { + vulkan_renderer_output_destroy_images(output); + } + + vulkan_renderer_discard_renderbuffers(vo, true); + + free(vo); +} + +static void +create_descriptor_pool(struct vulkan_renderer *vr, VkDescriptorPool *descriptor_pool, + uint32_t base_count, uint32_t maxsets) +{ + VkResult result; + + const VkDescriptorPoolSize pool_sizes[] = { + { + .type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, + .descriptorCount = 2 * base_count, + }, + { + .type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + .descriptorCount = 1 * base_count, + }, + }; + + const VkDescriptorPoolCreateInfo pool_info = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, + .poolSizeCount = ARRAY_LENGTH(pool_sizes), + .pPoolSizes = pool_sizes, + .maxSets = maxsets, + }; + + result = vkCreateDescriptorPool(vr->dev, &pool_info, NULL, descriptor_pool); + check_vk_success(result, "vkCreateDescriptorPool"); +} + +static bool +try_allocate_descriptor_set(struct vulkan_renderer *vr, + VkDescriptorPool descriptor_pool, + VkDescriptorSetLayout *descriptor_set_layout, + VkDescriptorSet *descriptor_set) +{ + VkResult result; + + const VkDescriptorSetAllocateInfo alloc_info = { + .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, + .descriptorPool = descriptor_pool, + .descriptorSetCount = 1, + .pSetLayouts = descriptor_set_layout, + }; + + result = vkAllocateDescriptorSets(vr->dev, &alloc_info, descriptor_set); + return (result == VK_SUCCESS); +} + +static void +get_descriptor_set(struct vulkan_renderer *vr, + struct vulkan_renderer_frame *fr, + VkDescriptorSetLayout *descriptor_set_layout, + VkDescriptorSet *descriptor_set) +{ + const uint32_t base_count = 1024; + const uint32_t maxsets = 4096; + + struct vulkan_renderer_frame_dspool *dspool; + + wl_list_for_each(dspool, &fr->dspool_list, link) { + VkDescriptorPool pool = dspool->pool; + bool success = try_allocate_descriptor_set(vr, pool, descriptor_set_layout, + descriptor_set); + if (success) + return; + } + + struct vulkan_renderer_frame_dspool *new_dspool = xzalloc(sizeof(*new_dspool)); + new_dspool->count = base_count; + new_dspool->maxsets = maxsets; + create_descriptor_pool(vr, &new_dspool->pool, base_count, maxsets); + wl_list_insert(&fr->dspool_list, &new_dspool->link); + + bool success = try_allocate_descriptor_set(vr, new_dspool->pool, descriptor_set_layout, + descriptor_set); + assert(success); +} + +static void +create_descriptor_set(struct vulkan_renderer *vr, + struct vulkan_renderer_frame *fr, + VkDescriptorSetLayout *descriptor_set_layout, + VkBuffer vs_ubo_buffer, + VkBuffer fs_ubo_buffer, + VkImageView image_view, + VkSampler sampler, + VkDescriptorSet *descriptor_set) +{ + const VkDescriptorBufferInfo vs_ubo_info = { + .buffer = vs_ubo_buffer, + .offset = 0, + .range = sizeof(struct vs_ubo), + }; + + const VkDescriptorBufferInfo fs_ubo_info = { + .buffer = fs_ubo_buffer, + .offset = 0, + .range = sizeof(struct fs_ubo), + }; + + const VkDescriptorImageInfo image_info = { + .imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + .imageView = image_view, + .sampler = sampler, + }; + + get_descriptor_set(vr, fr, descriptor_set_layout, descriptor_set); + assert(descriptor_set); + + const VkWriteDescriptorSet descriptor_writes[] = { + { + .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, + .dstSet = *descriptor_set, + .dstBinding = 0, + .dstArrayElement = 0, + .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, + .descriptorCount = 1, + .pBufferInfo = &vs_ubo_info, + }, + { + .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, + .dstSet = *descriptor_set, + .dstBinding = 1, + .dstArrayElement = 0, + .descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, + .descriptorCount = 1, + .pBufferInfo = &fs_ubo_info, + }, + { + .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, + .dstSet = *descriptor_set, + .dstBinding = 2, + .dstArrayElement = 0, + .descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + .descriptorCount = 1, + .pImageInfo = &image_info, + }, + }; + + vkUpdateDescriptorSets(vr->dev, ARRAY_LENGTH(descriptor_writes), descriptor_writes, 0, NULL); +} + +static void +reset_descriptor_pool(struct vulkan_renderer *vr, struct vulkan_renderer_frame *fr) +{ + if (wl_list_empty(&fr->dspool_list)) + return; + + if (wl_list_length(&fr->dspool_list) == 1) { + struct vulkan_renderer_frame_dspool *first = wl_container_of(fr->dspool_list.next, first, link); + vkResetDescriptorPool(vr->dev, first->pool, 0); + return; + } + + struct vulkan_renderer_frame_dspool *dspool, *tmp; + uint32_t total_count = 0; + uint32_t total_maxsets = 0; + wl_list_for_each_safe(dspool, tmp, &fr->dspool_list, link) { + total_count += dspool->count; + total_maxsets += dspool->maxsets; + wl_list_remove(&dspool->link), + vkDestroyDescriptorPool(vr->dev, dspool->pool, NULL); + free(dspool); + } + + total_count = round_up_pow2_32(total_count); + total_maxsets = round_up_pow2_32(total_maxsets); + + struct vulkan_renderer_frame_dspool *new_dspool = xzalloc(sizeof(*new_dspool)); + new_dspool->count = total_count; + new_dspool->maxsets = total_maxsets; + create_descriptor_pool(vr, &new_dspool->pool, total_count, total_maxsets); + wl_list_insert(&fr->dspool_list, &new_dspool->link); +} + +static int +find_memory_type(struct vulkan_renderer *vr, uint32_t allowed, VkMemoryPropertyFlags properties) +{ + VkPhysicalDeviceMemoryProperties mem_properties; + vkGetPhysicalDeviceMemoryProperties(vr->phys_dev, &mem_properties); + + for (unsigned i = 0; (1u << i) <= allowed && i <= mem_properties.memoryTypeCount; ++i) { + if ((allowed & (1u << i)) && (mem_properties.memoryTypes[i].propertyFlags & properties)) + return i; + } + return -1; +} + +static void +create_buffer(struct vulkan_renderer *vr, VkDeviceSize size, + VkBufferUsageFlags usage, VkMemoryPropertyFlags properties, + VkBuffer *buffer, VkDeviceMemory *memory) +{ + VkResult result; + + const VkBufferCreateInfo buffer_info = { + .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, + .size = size, + .usage = usage, + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, + }; + result = vkCreateBuffer(vr->dev, &buffer_info, NULL, buffer); + check_vk_success(result, "vkCreateBuffer"); + + VkMemoryRequirements mem_requirements; + vkGetBufferMemoryRequirements(vr->dev, *buffer, &mem_requirements); + + int memory_type = find_memory_type(vr, mem_requirements.memoryTypeBits, properties); + assert(memory_type >= 0); + + const VkMemoryAllocateInfo alloc_info = { + .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, + .allocationSize = mem_requirements.size, + .memoryTypeIndex = memory_type, + }; + + result = vkAllocateMemory(vr->dev, &alloc_info, NULL, memory); + check_vk_success(result, "vkAllocateMemory"); + + result = vkBindBufferMemory(vr->dev, *buffer, *memory, 0); + check_vk_success(result, "vkBindBufferMemory"); +} + +static void +create_vs_ubo_buffer(struct vulkan_renderer *vr, VkBuffer *vs_ubo_buffer, + VkDeviceMemory *vs_ubo_memory, void **vs_ubo_map) +{ + VkResult result; + VkDeviceSize buffer_size = sizeof(struct vs_ubo); + + create_buffer(vr, buffer_size, + VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, + vs_ubo_buffer, vs_ubo_memory); + + result = vkMapMemory(vr->dev, *vs_ubo_memory, 0, VK_WHOLE_SIZE, 0, vs_ubo_map); + check_vk_success(result, "vkMapMemory"); +} + +static void +create_fs_ubo_buffer(struct vulkan_renderer *vr, VkBuffer *fs_ubo_buffer, + VkDeviceMemory *fs_ubo_memory, void **fs_ubo_map) +{ + VkResult result; + VkDeviceSize buffer_size = sizeof(struct fs_ubo); + + create_buffer(vr, buffer_size, + VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, + fs_ubo_buffer, fs_ubo_memory); + + result = vkMapMemory(vr->dev, *fs_ubo_memory, 0, VK_WHOLE_SIZE, 0, fs_ubo_map); + check_vk_success(result, "vkMapMemory"); +} + +/* + * Allocates new vertex buffers on demand or reuse current buffers if there + * is still space available + */ +static struct vulkan_renderer_frame_vbuf * +get_vertex_buffer(struct vulkan_renderer *vr, struct vulkan_renderer_frame *fr, uint64_t size) +{ + const uint32_t base_size = 4096; + VkResult result; + + if (!wl_list_empty(&fr->vbuf_list)) { + struct vulkan_renderer_frame_vbuf *first = wl_container_of(fr->vbuf_list.next, first, link); + if (first->size >= first->offset + size) + return first; + } + + struct vulkan_renderer_frame_vbuf *new_vbuf = xzalloc(sizeof(*new_vbuf)); + + VkDeviceSize buffer_size = MAX(base_size, round_up_pow2_32(size)); + new_vbuf->size = buffer_size; + + create_buffer(vr, new_vbuf->size, + VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, + &new_vbuf->buffer, &new_vbuf->memory); + + result = vkMapMemory(vr->dev, new_vbuf->memory, 0, VK_WHOLE_SIZE, 0, &new_vbuf->map); + check_vk_success(result, "vkMapMemory"); + + wl_list_insert(&fr->vbuf_list, &new_vbuf->link); + + return new_vbuf; +} + +/* + * Resets vertex buffer offset so it can be reused; or coalesces multiple + * vertex buffers into a single larger new one if multiple were dynamically + * allocated in the previous use of this frame + */ +static void +reset_vertex_buffers(struct vulkan_renderer *vr, struct vulkan_renderer_frame *fr) +{ + if (wl_list_empty(&fr->vbuf_list)) + return; + + if (wl_list_length(&fr->vbuf_list) == 1) { + struct vulkan_renderer_frame_vbuf *first = wl_container_of(fr->vbuf_list.next, first, link); + first->offset = 0; + return; + } + + struct vulkan_renderer_frame_vbuf *vbuf, *tmp; + uint64_t total_size = 0; + wl_list_for_each_safe(vbuf, tmp, &fr->vbuf_list, link) { + total_size += vbuf->size; + wl_list_remove(&vbuf->link); + destroy_buffer(vr->dev, vbuf->buffer, vbuf->memory); + free(vbuf); + } + + total_size = round_up_pow2_32(total_size); + + get_vertex_buffer(vr, fr, total_size); +} + +static int +vulkan_renderer_create_surface(struct weston_surface *surface) +{ + struct vulkan_surface_state *vs; + struct vulkan_renderer *vr = get_renderer(surface->compositor); + + vs = xzalloc(sizeof(*vs)); + + vs->surface = surface; + + surface->renderer_state = vs; + + vs->surface_destroy_listener.notify = + surface_state_handle_surface_destroy; + wl_signal_add(&surface->destroy_signal, + &vs->surface_destroy_listener); + + vs->renderer_destroy_listener.notify = + surface_state_handle_renderer_destroy; + wl_signal_add(&vr->destroy_signal, + &vs->renderer_destroy_listener); + + return 0; +} + +static inline struct vulkan_surface_state * +get_surface_state(struct weston_surface *surface) +{ + if (!surface->renderer_state) + vulkan_renderer_create_surface(surface); + + return (struct vulkan_surface_state *)surface->renderer_state; +} + +static void +create_image(struct vulkan_renderer *vr, + uint32_t width, uint32_t height, + VkFormat format, VkImageTiling tiling, + VkImageUsageFlags usage, VkMemoryPropertyFlags properties, + VkImage *image, VkDeviceMemory *memory) +{ + VkResult result; + + const VkImageCreateInfo image_info = { + .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, + .imageType = VK_IMAGE_TYPE_2D, + .format = format, + .extent.width = width, + .extent.height = height, + .extent.depth = 1, + .mipLevels = 1, + .arrayLayers = 1, + .samples = VK_SAMPLE_COUNT_1_BIT, + .tiling = tiling, + .usage = usage, + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, + .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, + }; + result = vkCreateImage(vr->dev, &image_info, NULL, image); + check_vk_success(result, "vkCreateImage"); + + VkMemoryRequirements mem_requirements; + vkGetImageMemoryRequirements(vr->dev, *image, &mem_requirements); + + int memory_type = find_memory_type(vr, mem_requirements.memoryTypeBits, properties); + assert(memory_type >= 0); + + const VkMemoryAllocateInfo alloc_info = { + .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, + .allocationSize = mem_requirements.size, + .memoryTypeIndex = memory_type, + }; + + result = vkAllocateMemory(vr->dev, &alloc_info, NULL, memory); + check_vk_success(result, "vkAllocateMemory"); + + result = vkBindImageMemory(vr->dev, *image, *memory, 0); + check_vk_success(result, "vkBindImageMemory"); +} + +static void +create_framebuffer(VkDevice device, VkRenderPass renderpass, VkImageView image_view, + uint32_t width, uint32_t height, VkFramebuffer *framebuffer) +{ + VkResult result; + + const VkFramebufferCreateInfo framebuffer_create_info = { + .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, + .renderPass = renderpass, + .attachmentCount = 1, + .pAttachments = &image_view, + .width = width, + .height = height, + .layers = 1, + }; + + result = vkCreateFramebuffer(device, &framebuffer_create_info, NULL, framebuffer); + check_vk_success(result, "vkCreateFramebuffer"); +} + +static void +create_image_view(VkDevice device, VkImage image, VkFormat format, VkImageView *image_view) +{ + VkResult result; + + const VkImageViewCreateInfo view_info = { + .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, + .image = image, + .viewType = VK_IMAGE_VIEW_TYPE_2D, + .format = format, + .subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .subresourceRange.baseMipLevel = 0, + .subresourceRange.levelCount = 1, + .subresourceRange.baseArrayLayer = 0, + .subresourceRange.layerCount = 1, + }; + + result = vkCreateImageView(device, &view_info, NULL, image_view); + check_vk_success(result, "vkCreateImageView"); +} + +static void +copy_image_to_buffer(VkCommandBuffer cmd_buffer, + VkImage image, VkBuffer buffer, + uint32_t image_width, uint32_t image_height, + uint32_t buffer_pitch) +{ + const VkExtent3D image_extent = { image_width, image_height, 1 }; + const VkBufferImageCopy region = { + .imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .imageSubresource.mipLevel = 0, + .imageSubresource.baseArrayLayer = 0, + .imageSubresource.layerCount = 1, + .imageExtent = image_extent, + .bufferOffset = 0, + .bufferRowLength = buffer_pitch, + .bufferImageHeight = image_height, + }; + + vkCmdCopyImageToBuffer(cmd_buffer, + image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + buffer, 1, ®ion); +} + +static void +vulkan_renderer_cmd_begin(struct vulkan_renderer *vr, + VkCommandBuffer *cmd_buffer) +{ + VkResult result; + + const VkCommandBufferAllocateInfo cmd_alloc_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, + .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + .commandPool = vr->cmd_pool, + .commandBufferCount = 1, + }; + + result = vkAllocateCommandBuffers(vr->dev, &cmd_alloc_info, cmd_buffer); + check_vk_success(result, "vkAllocateCommandBuffers"); + + const VkCommandBufferBeginInfo begin_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, + .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, + }; + + result = vkBeginCommandBuffer(*cmd_buffer, &begin_info); + check_vk_success(result, "vkBeginCommandBuffer"); +} + +static void +vulkan_renderer_cmd_end_wait(struct vulkan_renderer *vr, + VkCommandBuffer *cmd_buffer) +{ + VkResult result; + + result = vkEndCommandBuffer(*cmd_buffer); + check_vk_success(result, "vkEndCommandBuffer"); + + const VkSubmitInfo submit_info = { + .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, + .commandBufferCount = 1, + .pCommandBuffers = cmd_buffer, + }; + + result = vkQueueSubmit(vr->queue, 1, &submit_info, VK_NULL_HANDLE); + check_vk_success(result, "vkQueueSubmit"); + + result = vkQueueWaitIdle(vr->queue); + check_vk_success(result, "vkQueueWaitIdle"); + + vkFreeCommandBuffers(vr->dev, vr->cmd_pool, 1, cmd_buffer); +} + +static bool +vulkan_renderer_do_read_pixels(struct vulkan_renderer *vr, + VkImage color_attachment, + struct vulkan_output_state *vo, + const struct pixel_format_info *pixel_format, + void *pixels, int stride, + const struct weston_geometry *rect) +{ + VkBuffer dst_buffer; + VkDeviceMemory dst_memory; + VkDeviceSize buffer_size = stride * rect->height; + VkResult result; + + create_buffer(vr, buffer_size, + VK_BUFFER_USAGE_TRANSFER_DST_BIT, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, + &dst_buffer, &dst_memory); + + // TODO: async implementation of this + VkCommandBuffer cmd_buffer; + vulkan_renderer_cmd_begin(vr, &cmd_buffer); + + transition_image_layout(cmd_buffer, color_attachment, + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, + 0, VK_ACCESS_TRANSFER_WRITE_BIT); + + copy_image_to_buffer(cmd_buffer, + color_attachment, dst_buffer, + rect->width, rect->height, + stride / (pixel_format->bpp/8)); + + transition_image_layout(cmd_buffer, color_attachment, + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, + 0, VK_ACCESS_TRANSFER_WRITE_BIT); + + // TODO: async implementation of this, remove wait + vulkan_renderer_cmd_end_wait(vr, &cmd_buffer); + + /* Map image memory so we can start copying from it */ + void* buffer_map; + result = vkMapMemory(vr->dev, dst_memory, 0, VK_WHOLE_SIZE, 0, &buffer_map); + check_vk_success(result, "vkMapMemory"); + + memcpy(pixels, buffer_map, buffer_size); + + destroy_buffer(vr->dev, dst_buffer, dst_memory); + + return true; +} + +static bool +vulkan_renderer_do_capture(struct vulkan_renderer *vr, + VkImage color_attachment, + struct vulkan_output_state *vo, + struct weston_buffer *into, + const struct weston_geometry *rect) +{ + struct wl_shm_buffer *shm = into->shm_buffer; + const struct pixel_format_info *pixel_format = into->pixel_format; + bool ret; + + assert(into->type == WESTON_BUFFER_SHM); + assert(shm); + + wl_shm_buffer_begin_access(shm); + + ret = vulkan_renderer_do_read_pixels(vr, color_attachment, vo, pixel_format, + wl_shm_buffer_get_data(shm), into->stride, rect); + + wl_shm_buffer_end_access(shm); + + return ret; +} + +static void +vulkan_renderer_do_capture_tasks(struct vulkan_renderer *vr, + VkImage color_attachment, + struct weston_output *output, + enum weston_output_capture_source source) +{ + struct vulkan_output_state *vo = get_output_state(output); + const struct pixel_format_info *pixel_format; + struct weston_capture_task *ct; + struct weston_geometry rect; + + switch (source) { + case WESTON_OUTPUT_CAPTURE_SOURCE_FRAMEBUFFER: + pixel_format = output->compositor->read_format; + rect = vo->area; + break; + case WESTON_OUTPUT_CAPTURE_SOURCE_FULL_FRAMEBUFFER: + pixel_format = output->compositor->read_format; + rect.x = 0; + rect.y = 0; + rect.width = vo->fb_size.width; + rect.height = vo->fb_size.height; + break; + default: + assert(0); + return; + } + + while ((ct = weston_output_pull_capture_task(output, source, rect.width, + rect.height, pixel_format))) { + struct weston_buffer *buffer = weston_capture_task_get_buffer(ct); + + assert(buffer->width == rect.width); + assert(buffer->height == rect.height); + assert(buffer->pixel_format->format == pixel_format->format); + + if (buffer->type != WESTON_BUFFER_SHM || + buffer->buffer_origin != ORIGIN_TOP_LEFT) { + weston_capture_task_retire_failed(ct, "Vulkan: unsupported buffer"); + continue; + } + + if (buffer->stride % 4 != 0) { + weston_capture_task_retire_failed(ct, "Vulkan: buffer stride not multiple of 4"); + continue; + } + + if (vulkan_renderer_do_capture(vr, color_attachment, vo, buffer, &rect)) + weston_capture_task_retire_complete(ct); + else + weston_capture_task_retire_failed(ct, "Vulkan: capture failed"); + } +} + +static bool +vulkan_pipeline_texture_variant_can_be_premult(enum vulkan_pipeline_texture_variant v) +{ + switch (v) { + case PIPELINE_VARIANT_SOLID: + case PIPELINE_VARIANT_RGBA: + case PIPELINE_VARIANT_EXTERNAL: + return true; + case PIPELINE_VARIANT_RGBX: + return false; + case PIPELINE_VARIANT_NONE: + default: + abort(); + } + return true; +} + +static bool +vulkan_pipeline_config_init_for_paint_node(struct vulkan_pipeline_config *pconf, + struct weston_paint_node *pnode) +{ + struct vulkan_output_state *vo = get_output_state(pnode->output); + struct vulkan_surface_state *vs = get_surface_state(pnode->surface); + struct vulkan_buffer_state *vb = vs->buffer; + struct weston_buffer *buffer = vs->buffer_ref.buffer; + + if (!pnode->surf_xform_valid) + return false; + + *pconf = (struct vulkan_pipeline_config) { + .req = { + .texcoord_input = SHADER_TEXCOORD_INPUT_SURFACE, + .renderpass = vo->renderpass, + }, + .projection = pnode->view->transform.matrix, + .surface_to_buffer = + pnode->view->surface->surface_to_buffer_matrix, + .view_alpha = pnode->view->alpha, + }; + + weston_matrix_multiply(&pconf->projection, &vo->output_matrix); + + if (buffer->buffer_origin == ORIGIN_TOP_LEFT) { + weston_matrix_scale(&pconf->surface_to_buffer, + 1.0f / buffer->width, + 1.0f / buffer->height, 1); + } else { + weston_matrix_scale(&pconf->surface_to_buffer, + 1.0f / buffer->width, + -1.0f / buffer->height, 1); + weston_matrix_translate(&pconf->surface_to_buffer, 0, 1, 0); + } + + pconf->req.variant = vb->pipeline_variant; + pconf->req.input_is_premult = + vulkan_pipeline_texture_variant_can_be_premult(vb->pipeline_variant); + + for (int i = 0; i < 4; i++) + pconf->unicolor[i] = vb->color[i]; + + return true; +} + +static void +rect_to_quad(pixman_box32_t *rect, + struct weston_view *ev, + struct clipper_quad *quad) +{ + struct weston_coord_global rect_g[4] = { + { .c = weston_coord(rect->x1, rect->y1) }, + { .c = weston_coord(rect->x2, rect->y1) }, + { .c = weston_coord(rect->x2, rect->y2) }, + { .c = weston_coord(rect->x1, rect->y2) }, + }; + struct weston_coord rect_s; + + /* Transform rect to surface space. */ + for (int i = 0; i < 4; i++) { + rect_s = weston_coord_global_to_surface(ev, rect_g[i]).c; + quad->polygon[i].x = (float)rect_s.x; + quad->polygon[i].y = (float)rect_s.y; + } + + quad->axis_aligned = !ev->transform.enabled || + (ev->transform.matrix.type < WESTON_MATRIX_TRANSFORM_ROTATE); + + // TODO handle !axis_aligned ? + assert(quad->axis_aligned); +} + +static uint32_t +generate_fans(struct weston_paint_node *pnode, + pixman_region32_t *region, + pixman_region32_t *surf_region, + struct wl_array *vertices, + struct wl_array *vtxcnt) +{ + struct weston_view *ev = pnode->view; + struct clipper_vertex *v; + uint32_t *cnt; + uint32_t nvtx = 0; + pixman_box32_t *rects; + pixman_box32_t *surf_rects; + int nrects; + int nsurf; + struct clipper_quad quad; + + rects = pixman_region32_rectangles(region, &nrects); + surf_rects = pixman_region32_rectangles(surf_region, &nsurf); + + /* worst case we can have 8 vertices per rect (ie. clipped into + * an octagon) */ + v = wl_array_add(vertices, nrects * nsurf * 8 * sizeof(struct clipper_vertex)); + cnt = wl_array_add(vtxcnt, nrects * nsurf * sizeof(uint32_t)); + + for (int i = 0; i < nrects; i++) { + rect_to_quad(&rects[i], ev, &quad); + for (int j = 0; j < nsurf; j++) { + uint32_t n; + + /* The transformed quad, after clipping to the surface rect, can + * have as many as eight sides, emitted as a triangle-fan. The + * first vertex in the triangle fan can be chosen arbitrarily, + * since the area is guaranteed to be convex. + * + * If a corner of the transformed quad falls outside of the + * surface rect, instead of emitting one vertex, up to two are + * emitted for two corresponding intersection point(s) between the + * edges. + * + * To do this, we first calculate the (up to eight) points at the + * intersection of the edges of the quad and the surface rect. + */ + n = clipper_quad_clip_box32(&quad, &surf_rects[j], v); + if (n >= 3) { + v += n; + cnt[nvtx++] = n; + } + } + } + + return nvtx; +} + +static void +repaint_region(struct vulkan_renderer *vr, + struct weston_paint_node *pnode, + pixman_region32_t *region, + pixman_region32_t *surf_region, + const struct vulkan_pipeline_config *pconf, + struct vulkan_renderer_frame *fr) +{ + struct vulkan_surface_state *vs = get_surface_state(pnode->surface); + struct vulkan_buffer_state *vb = vs->buffer; + struct vulkan_pipeline *pipeline; + VkCommandBuffer cmd_buffer = fr->cmd_buffer; + uint32_t nfans; + + struct wl_array vertices; + struct wl_array vtxcnt; + wl_array_init(&vertices); + wl_array_init(&vtxcnt); + + /* The final region to be painted is the intersection of 'region' and + * 'surf_region'. However, 'region' is in the global coordinates, and + * 'surf_region' is in the surface-local coordinates. + * generate_fans() will iterate over all pairs of rectangles from both + * regions, compute the intersection polygon for each pair, and store + * it as a triangle fan if it has a non-zero area (at least 3 vertices, + * actually). + */ + nfans = generate_fans(pnode, region, surf_region, &vertices, &vtxcnt); + + struct vulkan_renderer_frame_vbuf *vbuf = get_vertex_buffer(vr, fr, vertices.size); + + pipeline = vulkan_renderer_get_pipeline(vr, &pconf->req); + assert(pipeline); + + vkCmdBindPipeline(cmd_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline->pipeline); + memcpy(vbuf->map + vbuf->offset, vertices.data, vertices.size); + + vkCmdBindVertexBuffers(cmd_buffer, 0, 1, &vbuf->buffer, &vbuf->offset); + + memcpy(vb->vs_ubo_map + offsetof(struct vs_ubo, proj), + pconf->projection.M.colmaj, sizeof(pconf->projection.M.colmaj)); + memcpy(vb->vs_ubo_map + offsetof(struct vs_ubo, surface_to_buffer), + pconf->surface_to_buffer.M.colmaj, sizeof(pconf->surface_to_buffer.M.colmaj)); + memcpy(vb->fs_ubo_map + offsetof(struct fs_ubo, unicolor), + pconf->unicolor, sizeof(pconf->unicolor)); + memcpy(vb->fs_ubo_map + offsetof(struct fs_ubo, view_alpha), + &pconf->view_alpha, sizeof(pconf->view_alpha)); + + vkCmdBindDescriptorSets(cmd_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, + pipeline->pipeline_layout, 0, 1, &vb->descriptor_set, 0, NULL); + + for (uint32_t i = 0, first = 0; i < nfans; i++) { + const uint32_t *vtxcntp = vtxcnt.data; + vkCmdDraw(cmd_buffer, vtxcntp[i], 1, first, 0); + first += vtxcntp[i]; + } + + vbuf->offset += vertices.size; + + wl_array_release(&vertices); + wl_array_release(&vtxcnt); +} + +static int +ensure_surface_buffer_is_ready(struct vulkan_renderer *vr, + struct vulkan_surface_state *vs, + struct vulkan_renderer_frame *fr) +{ + struct weston_surface *surface = vs->surface; + struct weston_buffer *buffer = vs->buffer_ref.buffer; + int acquire_fence_fd; + VkResult result; + + if (!buffer) + return 0; + + if (surface->acquire_fence_fd < 0) + return 0; + + /* We should only get a fence for non-SHM buffers, since surface + * commit would have failed otherwise. */ + assert(buffer->type != WESTON_BUFFER_SHM); + + acquire_fence_fd = dup(surface->acquire_fence_fd); + if (acquire_fence_fd == -1) { + linux_explicit_synchronization_send_server_error( + vs->surface->synchronization_resource, + "Failed to dup acquire fence"); + return -1; + } + + struct vulkan_renderer_frame_acquire_fence *acquire_fence; + acquire_fence = xzalloc(sizeof(*acquire_fence)); + + const VkSemaphoreCreateInfo semaphore_info = { + .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, + }; + result = vkCreateSemaphore(vr->dev, &semaphore_info, NULL, + &acquire_fence->semaphore); + check_vk_success(result, "vkCreateSemaphore"); + if (result != VK_SUCCESS) { + linux_explicit_synchronization_send_server_error( + vs->surface->synchronization_resource, + "vkCreateSemaphore"); + close(acquire_fence_fd); + return -1; + } + + const VkImportSemaphoreFdInfoKHR import_info = { + .sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR, + .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT, + .flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT, + .semaphore = acquire_fence->semaphore, + .fd = acquire_fence_fd, + }; + result = vr->import_semaphore_fd(vr->dev, &import_info); + check_vk_success(result, "vkImportSemaphoreFdKHR"); + if (result != VK_SUCCESS) { + linux_explicit_synchronization_send_server_error( + vs->surface->synchronization_resource, + "vkImportSemaphoreFdKHR"); + close(acquire_fence_fd); + return -1; + } + + wl_list_insert(&fr->acquire_fence_list, &acquire_fence->link); + + return 0; +} + +static void +draw_paint_node(struct weston_paint_node *pnode, + pixman_region32_t *damage, /* in global coordinates */ + struct vulkan_renderer_frame *fr) +{ + struct vulkan_renderer *vr = get_renderer(pnode->surface->compositor); + struct vulkan_surface_state *vs = get_surface_state(pnode->surface); + struct vulkan_buffer_state *vb = vs->buffer; + struct weston_buffer *buffer = vs->buffer_ref.buffer; + /* repaint bounding region in global coordinates: */ + pixman_region32_t repaint; + /* opaque region in surface coordinates: */ + pixman_region32_t surface_opaque; + /* non-opaque region in surface coordinates: */ + pixman_region32_t surface_blend; + struct vulkan_pipeline_config pconf; + struct vulkan_pipeline *pipeline; + + if (vb->pipeline_variant == PIPELINE_VARIANT_NONE && + !buffer->direct_display) + return; + + pixman_region32_init(&repaint); + pixman_region32_intersect(&repaint, &pnode->visible, damage); + + if (!pixman_region32_not_empty(&repaint)) + goto out; + + if (!pnode->draw_solid && ensure_surface_buffer_is_ready(vr, vs, fr) < 0) + goto out; + + if (!vulkan_pipeline_config_init_for_paint_node(&pconf, pnode)) { + goto out; + } + + pipeline = vulkan_renderer_get_pipeline(vr, &pconf.req); + assert(pipeline); + + VkSampler sampler; + VkImageView image_view; + if (vb->texture.image_view) { + image_view = vb->texture.image_view; + sampler = pnode->needs_filtering ? vb->sampler_linear : vb->sampler_nearest; + } else { + image_view = vr->dummy.image.image_view; + sampler = vr->dummy.sampler; + } + create_descriptor_set(vr, fr, &pipeline->descriptor_set_layout, + vb->vs_ubo_buffer, vb->fs_ubo_buffer, + image_view, sampler, + &vb->descriptor_set); + + /* XXX: Should we be using ev->transform.opaque here? */ + if (pnode->is_fully_opaque) { + pixman_region32_init_rect(&surface_opaque, 0, 0, + pnode->surface->width, + pnode->surface->height); + } else { + pixman_region32_init(&surface_opaque); + pixman_region32_copy(&surface_opaque, &pnode->surface->opaque); + } + + if (pnode->view->geometry.scissor_enabled) + pixman_region32_intersect(&surface_opaque, + &surface_opaque, + &pnode->view->geometry.scissor); + + /* blended region is whole surface minus opaque region: */ + pixman_region32_init_rect(&surface_blend, 0, 0, + pnode->surface->width, pnode->surface->height); + if (pnode->view->geometry.scissor_enabled) + pixman_region32_intersect(&surface_blend, &surface_blend, + &pnode->view->geometry.scissor); + pixman_region32_subtract(&surface_blend, &surface_blend, + &surface_opaque); + + if (pixman_region32_not_empty(&surface_opaque)) { + struct vulkan_pipeline_config alt = pconf; + + if (alt.req.variant == PIPELINE_VARIANT_RGBA) + alt.req.variant = PIPELINE_VARIANT_RGBX; + + alt.req.blend = (pnode->view->alpha < 1.0); + + repaint_region(vr, pnode, &repaint, &surface_opaque, &alt, fr); + vs->used_in_output_repaint = true; + } + + pconf.req.blend = true; + if (pixman_region32_not_empty(&surface_blend)) { + repaint_region(vr, pnode, &repaint, &surface_blend, &pconf, fr); + vs->used_in_output_repaint = true; + } + + pixman_region32_fini(&surface_blend); + pixman_region32_fini(&surface_opaque); + +out: + pixman_region32_fini(&repaint); +} + +static void +repaint_views(struct weston_output *output, pixman_region32_t *damage, + struct vulkan_renderer_frame *fr) +{ + struct weston_paint_node *pnode; + + wl_list_for_each_reverse(pnode, &output->paint_node_z_order_list, + z_order_link) { + if (pnode->plane == &output->primary_plane) + draw_paint_node(pnode, damage, fr); + } +} + +static void +vulkan_renderbuffer_init(struct vulkan_renderbuffer *renderbuffer, + struct vulkan_renderer_image *image, + weston_renderbuffer_discarded_func discarded_cb, + void *user_data, + struct weston_output *output) +{ + struct vulkan_output_state *vo = get_output_state(output); + + renderbuffer->output = output; + pixman_region32_init(&renderbuffer->damage); + pixman_region32_copy(&renderbuffer->damage, &output->region); + renderbuffer->border_status = BORDER_ALL_DIRTY; + renderbuffer->discarded_cb = discarded_cb; + renderbuffer->user_data = user_data; + renderbuffer->image = image; + + wl_list_insert(&vo->renderbuffer_list, &renderbuffer->link); +} + +static void +vulkan_renderer_update_renderbuffers(struct weston_output *output, + pixman_region32_t *damage) +{ + struct vulkan_output_state *vo = get_output_state(output); + struct vulkan_renderbuffer *rb; + + /* Accumulate changes in non-stale renderbuffers. */ + wl_list_for_each(rb, &vo->renderbuffer_list, link) { + if (rb->stale) + continue; + + pixman_region32_union(&rb->damage, &rb->damage, damage); + rb->border_status |= vo->border_status; + } +} + +static struct weston_geometry +output_get_border_area(const struct vulkan_output_state *vo, + enum weston_renderer_border_side side) +{ + const struct weston_size *fb_size = &vo->fb_size; + const struct weston_geometry *area = &vo->area; + + switch (side) { + case WESTON_RENDERER_BORDER_TOP: + return (struct weston_geometry){ + .x = 0, + .y = 0, + .width = fb_size->width, + .height = area->y + }; + case WESTON_RENDERER_BORDER_LEFT: + return (struct weston_geometry){ + .x = 0, + .y = area->y, + .width = area->x, + .height = area->height + }; + case WESTON_RENDERER_BORDER_RIGHT: + return (struct weston_geometry){ + .x = area->x + area->width, + .y = area->y, + .width = fb_size->width - area->x - area->width, + .height = area->height + }; + case WESTON_RENDERER_BORDER_BOTTOM: + return (struct weston_geometry){ + .x = 0, + .y = area->y + area->height, + .width = fb_size->width, + .height = fb_size->height - area->y - area->height + }; + } + + abort(); + return (struct weston_geometry){}; +} + +static int +vulkan_renderer_create_fence_fd(struct weston_output *output) +{ + struct vulkan_output_state *vo = get_output_state(output); + + struct vulkan_renderer_frame *fr = &vo->frames[vo->last_frame]; + + return dup(fr->render_fence_fd); +} + +/* Updates the release fences of surfaces that were used in the current output + * repaint. Should only be used from vulkan_renderer_repaint_output, so that the + * information in vulkan_surface_state.used_in_output_repaint is accurate. + */ +static void +update_buffer_release_fences(struct weston_compositor *compositor, + struct weston_output *output) +{ + struct weston_paint_node *pnode; + + wl_list_for_each_reverse(pnode, &output->paint_node_z_order_list, + z_order_link) { + struct vulkan_surface_state *vs; + struct weston_buffer_release *buffer_release; + int fence_fd; + + if (pnode->plane != &output->primary_plane) + continue; + + if (pnode->draw_solid) + continue; + + vs = get_surface_state(pnode->surface); + buffer_release = vs->buffer_release_ref.buffer_release; + + if (!vs->used_in_output_repaint || !buffer_release) + continue; + + fence_fd = vulkan_renderer_create_fence_fd(output); + + /* If we have a buffer_release then it means we support fences, + * and we should be able to create the release fence. If we + * can't, something has gone horribly wrong, so disconnect the + * client. + */ + if (fence_fd == -1) { + linux_explicit_synchronization_send_server_error( + buffer_release->resource, + "Failed to create release fence"); + fd_clear(&buffer_release->fence_fd); + continue; + } + + /* At the moment it is safe to just replace the fence_fd, + * discarding the previous one: + * + * 1. If the previous fence fd represents a sync fence from + * a previous repaint cycle, that fence fd is now not + * sufficient to provide the release guarantee and should + * be replaced. + * + * 2. If the fence fd represents a sync fence from another + * output in the same repaint cycle, it's fine to replace + * it since we are rendering to all outputs using the same + * EGL context, so a fence issued for a later output rendering + * is guaranteed to signal after fences for previous output + * renderings. + * + * Note that the above is only valid if the buffer_release + * fences only originate from the GL renderer, which guarantees + * a total order of operations and fences. If we introduce + * fences from other sources (e.g., plane out-fences), we will + * need to merge fences instead. + */ + fd_update(&buffer_release->fence_fd, fence_fd); + } +} + +static void +draw_output_border_texture(struct vulkan_renderer *vr, + struct vulkan_output_state *vo, + struct vulkan_pipeline_config *pconf, + enum weston_renderer_border_side side, + int32_t x, int32_t y, + int32_t width, int32_t height, + VkCommandBuffer cmd_buffer, + struct vulkan_renderer_frame *fr) +{ + struct vulkan_border_image *border = &vo->borders[side]; + struct vulkan_pipeline *pipeline; + + if (!border->data) + return; + + float position[] = { + x, y, 0.0f, 0.0f, + x + width, y, (float)border->width / (float)border->tex_width, 0.0f, + x + width, y + height, (float)border->width / (float)border->tex_width, 1.0f, + x, y + height, 0.0f, 1.0f, + }; + + struct vulkan_renderer_frame_vbuf *vbuf = get_vertex_buffer(vr, fr, sizeof(position)); + + pipeline = vulkan_renderer_get_pipeline(vr, &pconf->req); + assert(pipeline); + + create_descriptor_set(vr, fr, &pipeline->descriptor_set_layout, + border->vs_ubo_buffer, border->fs_ubo_buffer, + border->texture.image_view, border->sampler, + &border->descriptor_set); + + vkCmdBindPipeline(cmd_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline->pipeline); + memcpy(vbuf->map + vbuf->offset, position, sizeof(position)); + + vkCmdBindVertexBuffers(cmd_buffer, 0, 1, &vbuf->buffer, &vbuf->offset); + + memcpy(border->vs_ubo_map + offsetof(struct vs_ubo, proj), + pconf->projection.M.colmaj, sizeof(pconf->projection.M.colmaj)); + memset(border->vs_ubo_map + offsetof(struct vs_ubo, surface_to_buffer), + 0, sizeof(pconf->surface_to_buffer.M.colmaj)); + memcpy(border->fs_ubo_map + offsetof(struct fs_ubo, unicolor), + pconf->unicolor, sizeof(pconf->unicolor)); + memcpy(border->fs_ubo_map + offsetof(struct fs_ubo, view_alpha), + &pconf->view_alpha, sizeof(pconf->view_alpha)); + + vkCmdBindDescriptorSets(cmd_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, + pipeline->pipeline_layout, 0, 1, &border->descriptor_set, 0, NULL); + + vkCmdDraw(cmd_buffer, 4, 1, 0, 0); + + vbuf->offset += sizeof(position); +} + +static void +draw_output_borders(struct weston_output *output, + enum vulkan_border_status border_status, + VkCommandBuffer cmd_buffer, + struct vulkan_renderer_frame *fr) +{ + struct vulkan_output_state *vo = get_output_state(output); + struct vulkan_renderer *vr = get_renderer(output->compositor); + const struct weston_size *fb_size = &vo->fb_size; + enum vulkan_pipeline_texture_variant pipeline_variant; + + if (pixel_format_is_opaque(vo->pixel_format)) + pipeline_variant = PIPELINE_VARIANT_RGBX; + else + pipeline_variant = PIPELINE_VARIANT_RGBA; + + struct vulkan_pipeline_config pconf = { + .req = { + .texcoord_input = SHADER_TEXCOORD_INPUT_ATTRIB, + .renderpass = vo->renderpass, + .variant = pipeline_variant, + .input_is_premult = true, + }, + .view_alpha = 1.0f, + }; + + if (border_status == BORDER_STATUS_CLEAN) + return; /* Clean. Nothing to do. */ + + weston_matrix_init(&pconf.projection); + + weston_matrix_translate(&pconf.projection, + -fb_size->width / 2.0, -fb_size->height / 2.0, 0); + weston_matrix_scale(&pconf.projection, + 2.0 / (float)fb_size->width, 2.0 / (float)fb_size->height, 1); + + const VkViewport viewport = { + .x = 0, + .y = 0, + .width = fb_size->width, + .height = fb_size->height, + .minDepth = 0.0f, + .maxDepth = 1.0f, + }; + vkCmdSetViewport(cmd_buffer, 0, 1, &viewport); + + const VkRect2D scissor = { + .offset = { 0, 0 }, + .extent = { fb_size->width, fb_size->height }, + }; + vkCmdSetScissor(cmd_buffer, 0, 1, &scissor); + + for (unsigned side = 0; side < 4; side++) { + struct weston_geometry g; + + if (!(border_status & (1 << side))) + continue; + + g = output_get_border_area(vo, side); + draw_output_border_texture(vr, vo, &pconf, side, + g.x, g.y, g.width, g.height, cmd_buffer, fr); + } +} + +static void +output_get_border_damage(struct weston_output *output, + enum vulkan_border_status border_status, + pixman_region32_t *damage) +{ + struct vulkan_output_state *vo = get_output_state(output); + unsigned side; + + for (side = 0; side < 4; side++) { + struct weston_geometry g; + + if (!(border_status & (1 << side))) + continue; + + g = output_get_border_area(vo, side); + pixman_region32_union_rect(damage, damage, + g.x, g.y, g.width, g.height); + } +} + +static int +output_has_borders(struct weston_output *output) +{ + struct vulkan_output_state *vo = get_output_state(output); + + return vo->borders[WESTON_RENDERER_BORDER_TOP].data || + vo->borders[WESTON_RENDERER_BORDER_RIGHT].data || + vo->borders[WESTON_RENDERER_BORDER_BOTTOM].data || + vo->borders[WESTON_RENDERER_BORDER_LEFT].data; +} + +static void +pixman_region_to_scissor(struct weston_output *output, + struct pixman_region32 *global_region, + enum vulkan_border_status border_status, + VkRect2D *scissor) +{ + struct vulkan_output_state *vo = get_output_state(output); + pixman_region32_t transformed; + struct pixman_box32 *box; + + /* Translate from global to output coordinate space. */ + pixman_region32_init(&transformed); + weston_region_global_to_output(&transformed, + output, + global_region); + + /* If we have borders drawn around the output, shift our output damage + * to account for borders being drawn around the outside, adding any + * damage resulting from borders being redrawn. */ + if (output_has_borders(output)) { + pixman_region32_translate(&transformed, + vo->area.x, vo->area.y); + output_get_border_damage(output, border_status, &transformed); + } + + /* Convert from a Pixman region into a VkRect2D */ + box = pixman_region32_extents(&transformed); + + const VkRect2D s = { + .offset = { box->x1, box->y1 }, + .extent = { box->x2 - box->x1, box->y2 - box->y1 }, + }; + + *scissor = s; + pixman_region32_fini(&transformed); +} + +static void +pixman_region_to_present_region(struct weston_output *output, + struct pixman_region32 *global_region, + enum vulkan_border_status border_status, + uint32_t *nrects, + VkRectLayerKHR **rects) +{ + struct vulkan_output_state *vo = get_output_state(output); + pixman_region32_t transformed; + + /* Translate from global to output coordinate space. */ + pixman_region32_init(&transformed); + weston_region_global_to_output(&transformed, + output, + global_region); + + /* If we have borders drawn around the output, shift our output damage + * to account for borders being drawn around the outside, adding any + * damage resulting from borders being redrawn. */ + if (output_has_borders(output)) { + pixman_region32_translate(&transformed, + vo->area.x, vo->area.y); + output_get_border_damage(output, border_status, &transformed); + } + + int n; + pixman_box32_t *r; + r = pixman_region32_rectangles(&transformed, &n); + VkRectLayerKHR *rect_layers = xmalloc(n * sizeof(*rect_layers)); + + for (int i = 0; i < n; i++) { + const pixman_box32_t *b = &r[i]; + const VkRectLayerKHR l = { + .offset = { b->x1, b->y1 }, + .extent = { b->x2 - b->x1, b->y2 - b->y1 }, + }; + + rect_layers[i] = l; + } + + *nrects = (uint32_t)n; + *rects = rect_layers; + + pixman_region32_fini(&transformed); +} + +static void +vulkan_renderer_create_swapchain(struct weston_output *output, + struct weston_size fb_size) +{ + struct weston_compositor *ec = output->compositor; + struct vulkan_renderer *vr = get_renderer(ec); + struct vulkan_output_state *vo = get_output_state(output); + const struct pixel_format_info *pixel_format = vo->pixel_format; + const VkFormat format = pixel_format->vulkan_format; + + VkSurfaceCapabilitiesKHR surface_caps; + vkGetPhysicalDeviceSurfaceCapabilitiesKHR(vr->phys_dev, vo->swapchain.surface, &surface_caps); + + uint32_t min_image_count = 2; + if (min_image_count < surface_caps.minImageCount) + min_image_count = surface_caps.minImageCount; + + if (surface_caps.maxImageCount > 0 && min_image_count > surface_caps.maxImageCount) + min_image_count = surface_caps.maxImageCount; + + const VkExtent2D swapchain_extent = { fb_size.width, fb_size.height }; + VkSwapchainCreateInfoKHR swapchain_create_info = { + .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR, + .flags = 0, + .surface = vo->swapchain.surface, + .minImageCount = min_image_count, + .imageFormat = format, + .imageColorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, + .imageExtent = swapchain_extent, + .imageArrayLayers = 1, + .imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, + .imageSharingMode = VK_SHARING_MODE_EXCLUSIVE, + .queueFamilyIndexCount = 1, + .pQueueFamilyIndices = &vr->queue_family, + .preTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR, + .presentMode = vo->swapchain.present_mode, + }; + if (surface_caps.supportedCompositeAlpha & VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR) + swapchain_create_info.compositeAlpha = VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR; + else + swapchain_create_info.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; + + vkCreateSwapchainKHR(vr->dev, &swapchain_create_info, NULL, &vo->swapchain.swapchain); + + vkGetSwapchainImagesKHR(vr->dev, vo->swapchain.swapchain, &vo->image_count, NULL); + assert(vo->image_count > 0); + VkImage swapchain_images[vo->image_count]; + vkGetSwapchainImagesKHR(vr->dev, vo->swapchain.swapchain, &vo->image_count, swapchain_images); + + // Command here only for the layout transitions + VkCommandBuffer cmd_buffer; + vulkan_renderer_cmd_begin(vr, &cmd_buffer); + + for (uint32_t i = 0; i < vo->image_count; i++) { + struct vulkan_renderer_image *im = &vo->images[i]; + + create_image_view(vr->dev, swapchain_images[i], format, &im->image_view); + create_framebuffer(vr->dev, vo->renderpass, im->image_view, + fb_size.width, fb_size.height, &im->framebuffer); + + transition_image_layout(cmd_buffer, swapchain_images[i], + VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, + VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, + 0, VK_ACCESS_TRANSFER_WRITE_BIT); + + im->renderbuffer = xzalloc(sizeof(*im->renderbuffer)); + vulkan_renderbuffer_init(im->renderbuffer, NULL, NULL, NULL, output); + } + + // Wait here is bad, but this is only on swapchain recreation + vulkan_renderer_cmd_end_wait(vr, &cmd_buffer); +} + +static void +vulkan_renderer_recreate_swapchain(struct weston_output *output, + struct weston_size fb_size) +{ + vulkan_renderer_destroy_swapchain(output); + vulkan_renderer_create_swapchain(output, fb_size); +} + +static void +vulkan_renderer_repaint_output(struct weston_output *output, + pixman_region32_t *output_damage, + weston_renderbuffer_t renderbuffer) +{ + struct weston_compositor *compositor = output->compositor; + struct vulkan_output_state *vo = get_output_state(output); + struct vulkan_renderer *vr = get_renderer(compositor); + struct weston_paint_node *pnode; + VkResult result; + uint32_t swapchain_index; + + assert(vo); + assert(!renderbuffer || + ((struct vulkan_renderbuffer *) renderbuffer)->output == output); + + struct vulkan_renderer_frame *fr = &vo->frames[vo->frame_index]; + + assert(vo->frame_index < vo->num_frames); + vkWaitForFences(vr->dev, 1, &vo->frames[vo->frame_index].fence, VK_TRUE, UINT64_MAX); + vkResetFences(vr->dev, 1, &vo->frames[vo->frame_index].fence); + + struct vulkan_renderer_frame_acquire_fence *acquire_fence, *ftmp; + wl_list_for_each_safe(acquire_fence, ftmp, &fr->acquire_fence_list, link) { + vkDestroySemaphore(vr->dev, acquire_fence->semaphore, NULL); + wl_list_remove(&acquire_fence->link); + free(acquire_fence); + } + + reset_vertex_buffers(vr, fr); + + reset_descriptor_pool(vr, fr); + + /* Clear the used_in_output_repaint flag, so that we can properly track + * which surfaces were used in this output repaint. */ + wl_list_for_each_reverse(pnode, &output->paint_node_z_order_list, + z_order_link) { + if (pnode->plane == &output->primary_plane) { + struct vulkan_surface_state *vs = + get_surface_state(pnode->view->surface); + vs->used_in_output_repaint = false; + } + } + + /* Calculate the global matrix */ + vo->output_matrix = output->matrix; + weston_matrix_translate(&vo->output_matrix, + -(vo->area.width / 2.0), + -(vo->area.height / 2.0), 0); + weston_matrix_scale(&vo->output_matrix, + 2.0 / vo->area.width, + 2.0 / vo->area.height, 1); + + struct vulkan_renderer_image *im; + struct vulkan_renderbuffer *rb; + switch(vo->output_type) { + case VULKAN_OUTPUT_SWAPCHAIN: + result = vkAcquireNextImageKHR(vr->dev, vo->swapchain.swapchain, UINT64_MAX, + fr->image_acquired, VK_NULL_HANDLE, &swapchain_index); + if (result == VK_SUBOPTIMAL_KHR) { + vulkan_renderer_recreate_swapchain(output, vo->fb_size); + } else if (result != VK_SUCCESS) { + abort(); + } + + im = &vo->images[swapchain_index]; + rb = im->renderbuffer; + break; + case VULKAN_OUTPUT_HEADLESS: + assert(renderbuffer); + rb = renderbuffer; + im = rb->image; + break; + case VULKAN_OUTPUT_DRM: + im = &vo->images[vo->drm.image_index]; + rb = im->renderbuffer; + break; + default: + abort(); + } + assert(rb && im); + + vulkan_renderer_update_renderbuffers(output, output_damage); + + VkCommandBuffer cmd_buffer = fr->cmd_buffer; + VkFramebuffer framebuffer = im->framebuffer; + + VkCommandBufferBeginInfo begin_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, + .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, + }; + + result = vkBeginCommandBuffer(cmd_buffer, &begin_info); + check_vk_success(result, "vkBeginCommandBuffer"); + + if (vo->output_type == VULKAN_OUTPUT_DRM) { + // Transfer ownership of the dmabuf to Vulkan + if (!vr->has_queue_family_foreign) + abort(); + transfer_image_queue_family(cmd_buffer, im->image, + VK_QUEUE_FAMILY_FOREIGN_EXT, + vr->queue_family); + } + + const struct weston_size *fb = &vo->fb_size; + const VkRect2D render_area = { + .offset = { 0, 0 }, + .extent = { fb->width, fb->height }, + }; + const VkRenderPassBeginInfo renderpass_begin_info = { + .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, + .renderPass = vo->renderpass, + .framebuffer = framebuffer, + .renderArea = render_area, + }; + vkCmdBeginRenderPass(cmd_buffer, &renderpass_begin_info, VK_SUBPASS_CONTENTS_INLINE); + + const VkViewport viewport = { + .x = vo->area.x, + .y = vo->area.y, + .width = vo->area.width, + .height = vo->area.height, + .minDepth = 0.0f, + .maxDepth = 1.0f, + }; + vkCmdSetViewport(cmd_buffer, 0, 1, &viewport); + + VkRect2D scissor; + pixman_region_to_scissor(output, &rb->damage, rb->border_status, &scissor); + vkCmdSetScissor(cmd_buffer, 0, 1, &scissor); + + repaint_views(output, &rb->damage, fr); + + draw_output_borders(output, rb->border_status, cmd_buffer, fr); + + wl_signal_emit(&output->frame_signal, output_damage); + + vkCmdEndRenderPass(cmd_buffer); + + if (vo->output_type == VULKAN_OUTPUT_DRM) { + // Transfer ownership of the dmabuf to DRM + if (!vr->has_queue_family_foreign) + abort(); + transfer_image_queue_family(cmd_buffer, im->image, + vr->queue_family, + VK_QUEUE_FAMILY_FOREIGN_EXT); + } + + result = vkEndCommandBuffer(cmd_buffer); + check_vk_success(result, "vkEndCommandBuffer"); + + uint32_t semaphore_count = wl_list_length(&fr->acquire_fence_list); + VkPipelineStageFlags wait_stages[1+semaphore_count]; + VkSemaphore wait_semaphores[1+semaphore_count]; + + uint32_t s = 0; + if (vo->output_type == VULKAN_OUTPUT_SWAPCHAIN) { + wait_semaphores[s] = fr->image_acquired; + wait_stages[s] = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; + s++; + } + wl_list_for_each(acquire_fence, &fr->acquire_fence_list, link) { + wait_semaphores[s] = acquire_fence->semaphore; + wait_stages[s] = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; + s++; + } + VkSubmitInfo submit_info = { + .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, + .waitSemaphoreCount = s, + .pWaitSemaphores = wait_semaphores, + .pWaitDstStageMask = wait_stages, + .commandBufferCount = 1, + .pCommandBuffers = &cmd_buffer, + }; + if ((vo->output_type == VULKAN_OUTPUT_SWAPCHAIN) || + (vo->output_type == VULKAN_OUTPUT_DRM && vr->semaphore_import_export)) { + submit_info.signalSemaphoreCount = 1; + submit_info.pSignalSemaphores = &fr->render_done; + } + + result = vkQueueSubmit(vr->queue, 1, &submit_info, fr->fence); + check_vk_success(result, "vkQueueSubmit"); + + if (vo->output_type == VULKAN_OUTPUT_SWAPCHAIN) { + VkPresentInfoKHR present_info = { + .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, + .waitSemaphoreCount = 1, + .pWaitSemaphores = &fr->render_done, + .swapchainCount = 1, + .pSwapchains = &vo->swapchain.swapchain, + .pImageIndices = &swapchain_index, + .pResults = NULL, + }; + + if (vr->has_incremental_present) { + uint32_t nrects; + VkRectLayerKHR *rects; + pixman_region_to_present_region(output, output_damage, + rb->border_status, &nrects, &rects); + + const VkPresentRegionKHR region = { + .rectangleCount = nrects, + .pRectangles = rects, + }; + VkPresentRegionsKHR present_regions = { + .sType = VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR, + .swapchainCount = 1, + .pRegions = ®ion, + }; + pnext(&present_info, &present_regions); + + result = vkQueuePresentKHR(vr->queue, &present_info); + free(rects); + } else { + result = vkQueuePresentKHR(vr->queue, &present_info); + } + + if (result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR) { + abort(); + } else if (result != VK_SUCCESS) { + abort(); + } + } else if (vo->output_type == VULKAN_OUTPUT_DRM && vr->semaphore_import_export) { + int fd; + const VkSemaphoreGetFdInfoKHR semaphore_fd_info = { + .sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR, + .semaphore = fr->render_done, + .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT, + }; + result = vr->get_semaphore_fd(vr->dev, &semaphore_fd_info, &fd); + check_vk_success(result, "vkGetSemaphoreFdKHR"); + + fd_update(&fr->render_fence_fd, fd); + } + + vulkan_renderer_do_capture_tasks(vr, im->image, output, + WESTON_OUTPUT_CAPTURE_SOURCE_FRAMEBUFFER); + vulkan_renderer_do_capture_tasks(vr, im->image, output, + WESTON_OUTPUT_CAPTURE_SOURCE_FULL_FRAMEBUFFER); + + rb->border_status = BORDER_STATUS_CLEAN; + vo->border_status = BORDER_STATUS_CLEAN; + + pixman_region32_clear(&rb->damage); + + update_buffer_release_fences(compositor, output); + + vo->last_frame = vo->frame_index; + vo->frame_index = (vo->frame_index + 1) % vo->num_frames; + + if (vo->output_type == VULKAN_OUTPUT_DRM) + vo->drm.image_index = (vo->drm.image_index + 1) % vo->image_count; +} + +static void +create_texture_sampler(struct vulkan_renderer *vr, VkSampler *texture_sampler, VkFilter filter) +{ + VkResult result; + + const VkSamplerCreateInfo sampler_info = { + .sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, + .magFilter = filter, + .minFilter = filter, + .addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, + .addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, + .addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, + .anisotropyEnable = VK_FALSE, + .borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK, + .unnormalizedCoordinates = VK_FALSE, + .compareEnable = VK_FALSE, + .compareOp = VK_COMPARE_OP_ALWAYS, + }; + + result = vkCreateSampler(vr->dev, &sampler_info, NULL, texture_sampler); + check_vk_success(result, "vkCreateSampler"); +} + +static void +copy_buffer_to_sub_image(VkCommandBuffer cmd_buffer, + VkBuffer buffer, VkImage image, + uint32_t buffer_width, uint32_t buffer_height, + uint32_t pitch, + uint32_t bpp, + uint32_t xoff, uint32_t yoff, + uint32_t xcopy, uint32_t ycopy) +{ + const VkOffset3D image_offset = { xoff, yoff }; + const VkExtent3D image_extent = { xcopy, ycopy, 1 }; + + const VkBufferImageCopy region = { + .bufferOffset = ((buffer_width * yoff) + xoff) * (bpp/8), + .bufferRowLength = pitch, + .bufferImageHeight = buffer_height, + .imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .imageSubresource.mipLevel = 0, + .imageSubresource.baseArrayLayer = 0, + .imageSubresource.layerCount = 1, + .imageOffset = image_offset, + .imageExtent = image_extent, + }; + + vkCmdCopyBufferToImage(cmd_buffer, buffer, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); +} + +static void +update_texture_image(struct vulkan_renderer *vr, + struct vulkan_renderer_texture_image *texture, + VkImageLayout expected_layout, + const struct pixel_format_info *pixel_format, + uint32_t buffer_width, uint32_t buffer_height, + uint32_t pitch, const void * const pixels, + uint32_t xoff, uint32_t yoff, + uint32_t xcopy, uint32_t ycopy) +{ + VkDeviceSize image_size = pitch * buffer_height * (pixel_format->bpp/8); + VkResult result; + + assert(pixels); + + memcpy(texture->staging_map, pixels, (size_t)image_size); + + vkWaitForFences(vr->dev, 1, &texture->upload_fence, VK_TRUE, UINT64_MAX); + vkResetFences(vr->dev, 1, &texture->upload_fence); + + const VkCommandBufferBeginInfo begin_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, + .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, + }; + + VkCommandBuffer cmd_buffer = texture->upload_cmd; + + result = vkBeginCommandBuffer(cmd_buffer, &begin_info); + check_vk_success(result, "vkBeginCommandBuffer"); + + transition_image_layout(cmd_buffer, texture->image, + expected_layout, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, + VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT); + + copy_buffer_to_sub_image(cmd_buffer, texture->staging_buffer, texture->image, + buffer_width, buffer_height, pitch, pixel_format->bpp, + xoff, yoff, xcopy, ycopy); + + transition_image_layout(cmd_buffer, texture->image, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, + VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT); + + result = vkEndCommandBuffer(cmd_buffer); + check_vk_success(result, "vkEndCommandBuffer"); + + const VkSubmitInfo submit_info = { + .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, + .commandBufferCount = 1, + .pCommandBuffers = &cmd_buffer, + }; + + result = vkQueueSubmit(vr->queue, 1, &submit_info, texture->upload_fence); + check_vk_success(result, "vkQueueSubmit"); +} + +static void +update_texture_image_all(struct vulkan_renderer *vr, + struct vulkan_renderer_texture_image *texture, + VkImageLayout expected_layout, + const struct pixel_format_info *pixel_format, + uint32_t buffer_width, uint32_t buffer_height, + uint32_t pitch, const void * const pixels) +{ + update_texture_image(vr, texture, expected_layout, pixel_format, + buffer_width, buffer_height, pitch, pixels, + 0, 0, buffer_width, buffer_height); +} + +static void +create_texture_image(struct vulkan_renderer *vr, + struct vulkan_renderer_texture_image *texture, + const struct pixel_format_info *pixel_format, + uint32_t buffer_width, uint32_t buffer_height, uint32_t pitch) +{ + VkDeviceSize image_size = pitch * buffer_height * (pixel_format->bpp/8); + VkResult result; + + const VkFenceCreateInfo fence_info = { + .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, + .flags = VK_FENCE_CREATE_SIGNALED_BIT, + }; + result = vkCreateFence(vr->dev, &fence_info, NULL, &texture->upload_fence); + check_vk_success(result, "vkCreateFence"); + + const VkCommandBufferAllocateInfo cmd_alloc_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, + .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + .commandPool = vr->cmd_pool, + .commandBufferCount = 1, + }; + result = vkAllocateCommandBuffers(vr->dev, &cmd_alloc_info, &texture->upload_cmd); + check_vk_success(result, "vkAllocateCommandBuffers"); + + create_buffer(vr, image_size, + VK_BUFFER_USAGE_TRANSFER_SRC_BIT, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, + &texture->staging_buffer, &texture->staging_memory); + + result = vkMapMemory(vr->dev, texture->staging_memory, 0, image_size, 0, &texture->staging_map); + check_vk_success(result, "vkMapMemory"); + + create_image(vr, buffer_width, buffer_height, pixel_format->vulkan_format, VK_IMAGE_TILING_OPTIMAL, + VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, + VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &texture->image, &texture->memory); + + create_image_view(vr->dev, texture->image, pixel_format->vulkan_format, &texture->image_view); +} + +static void +vulkan_renderer_flush_damage(struct weston_paint_node *pnode) +{ + struct weston_surface *es = pnode->surface; + struct weston_compositor *ec = es->compositor; + struct vulkan_renderer *vr = get_renderer(ec); + + struct weston_surface *surface = pnode->surface; + const struct weston_testsuite_quirks *quirks = + &surface->compositor->test_data.test_quirks; + struct weston_buffer *buffer = surface->buffer_ref.buffer; + struct vulkan_surface_state *vs = get_surface_state(surface); + struct vulkan_buffer_state *vb = vs->buffer; + pixman_box32_t *rectangles; + uint8_t *data; + int n; + + assert(buffer && vb); + + pixman_region32_union(&vb->texture_damage, + &vb->texture_damage, &surface->damage); + + if (pnode->plane != &pnode->output->primary_plane) { + return; + } + + /* This can happen if a SHM wl_buffer gets destroyed before we flush + * damage, because wayland-server just nukes the wl_shm_buffer from + * underneath us */ + if (!buffer->shm_buffer) { + return; + } + + if (!pixman_region32_not_empty(&vb->texture_damage) && + !vb->needs_full_upload) { + return; + } + + data = wl_shm_buffer_get_data(buffer->shm_buffer); + + if (vb->needs_full_upload || quirks->force_full_upload) { + wl_shm_buffer_begin_access(buffer->shm_buffer); + + for (int j = 0; j < vb->num_textures; j++) { + int hsub = pixel_format_hsub(buffer->pixel_format, j); + int vsub = pixel_format_vsub(buffer->pixel_format, j); + void *pixels = data + vb->offset[j]; + uint32_t buffer_width = buffer->width / hsub; + uint32_t buffer_height = buffer->height / vsub; + + update_texture_image_all(vr, &vb->texture, VK_IMAGE_LAYOUT_UNDEFINED, + buffer->pixel_format, buffer_width, buffer_height, + vb->pitch, pixels); + } + wl_shm_buffer_end_access(buffer->shm_buffer); + goto done; + } + + rectangles = pixman_region32_rectangles(&vb->texture_damage, &n); + wl_shm_buffer_begin_access(buffer->shm_buffer); + for (int i = 0; i < n; i++) { + pixman_box32_t r; + + r = weston_surface_to_buffer_rect(surface, rectangles[i]); + + for (int j = 0; j < vb->num_textures; j++) { + int hsub = pixel_format_hsub(buffer->pixel_format, j); + int vsub = pixel_format_vsub(buffer->pixel_format, j); + uint32_t xoff = r.x1 / hsub; + uint32_t yoff = r.y1 / vsub; + uint32_t xcopy = (r.x2 - r.x1) / hsub; + uint32_t ycopy = (r.y2 - r.y1) / vsub; + void *pixels = data + vb->offset[j]; + + update_texture_image(vr, &vb->texture, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + buffer->pixel_format, buffer->width / hsub, buffer->height / vsub, + vb->pitch, pixels, xoff, yoff, xcopy, ycopy); + } + } + wl_shm_buffer_end_access(buffer->shm_buffer); + +done: + pixman_region32_fini(&vb->texture_damage); + pixman_region32_init(&vb->texture_damage); + vb->needs_full_upload = false; + + weston_buffer_reference(&vs->buffer_ref, buffer, + BUFFER_WILL_NOT_BE_ACCESSED); + weston_buffer_release_reference(&vs->buffer_release_ref, NULL); +} + +static void +handle_buffer_destroy(struct wl_listener *listener, void *data) +{ + struct weston_buffer *buffer = data; + struct vulkan_buffer_state *vb = + container_of(listener, struct vulkan_buffer_state, destroy_listener); + + assert(vb == buffer->renderer_private); + buffer->renderer_private = NULL; + + destroy_buffer_state(vb); +} + +static void +vulkan_renderer_attach_shm(struct weston_surface *surface, struct weston_buffer *buffer) +{ + struct weston_compositor *ec = surface->compositor; + struct vulkan_renderer *vr = get_renderer(ec); + struct vulkan_surface_state *vs = get_surface_state(surface); + struct vulkan_buffer_state *vb; + struct weston_buffer *old_buffer = vs->buffer_ref.buffer; + unsigned int vulkan_format[3] = { 0, 0, 0 }; + enum vulkan_pipeline_texture_variant pipeline_variant; + uint32_t pitch; + int offset[3] = { 0, 0, 0 }; + unsigned int num_planes; + + int bpp = buffer->pixel_format->bpp; + + assert(pixel_format_get_plane_count(buffer->pixel_format) == 1); + num_planes = 1; + + if (pixel_format_is_opaque(buffer->pixel_format)) + pipeline_variant = PIPELINE_VARIANT_RGBX; + else + pipeline_variant = PIPELINE_VARIANT_RGBA; + + assert(bpp > 0 && !(bpp & 7)); + pitch = buffer->stride / (bpp / 8); + + vulkan_format[0] = buffer->pixel_format->vulkan_format; + vulkan_format[1] = buffer->pixel_format->vulkan_format; + vulkan_format[2] = buffer->pixel_format->vulkan_format; + /* If this surface previously had a SHM buffer, its vulkan_buffer_state will + * be speculatively retained. Check to see if we can reuse it rather + * than allocating a new one. */ + assert(!vs->buffer || + (old_buffer && old_buffer->type == WESTON_BUFFER_SHM)); + if (vs->buffer && + buffer->width == old_buffer->width && + buffer->height == old_buffer->height && + buffer->pixel_format == old_buffer->pixel_format) { + vs->buffer->pitch = pitch; + memcpy(vs->buffer->offset, offset, sizeof(offset)); + return; + } + + if (vs->buffer) + destroy_buffer_state(vs->buffer); + vs->buffer = NULL; + + vb = xzalloc(sizeof(*vb)); + vb->vr = vr; + + wl_list_init(&vb->destroy_listener.link); + pixman_region32_init(&vb->texture_damage); + + vb->pitch = pitch; + vb->pipeline_variant = pipeline_variant; + ARRAY_COPY(vb->offset, offset); + ARRAY_COPY(vb->vulkan_format, vulkan_format); + vb->needs_full_upload = true; + vb->num_textures = num_planes; + + vs->buffer = vb; + vs->surface = surface; + + for (uint32_t i = 0; i < num_planes; i++) { + int hsub = pixel_format_hsub(buffer->pixel_format, i); + int vsub = pixel_format_vsub(buffer->pixel_format, i); + uint32_t buffer_width = buffer->width / hsub; + uint32_t buffer_height = buffer->height / vsub; + + create_texture_image(vr, &vb->texture, buffer->pixel_format, buffer_width, buffer_height, pitch); + create_texture_sampler(vr, &vb->sampler_nearest, VK_FILTER_NEAREST); + create_texture_sampler(vr, &vb->sampler_linear, VK_FILTER_LINEAR); + } + create_vs_ubo_buffer(vr, &vb->vs_ubo_buffer, &vb->vs_ubo_memory, &vb->vs_ubo_map); + create_fs_ubo_buffer(vr, &vb->fs_ubo_buffer, &vb->fs_ubo_memory, &vb->fs_ubo_map); +} + +static void +create_texture_image_dummy(struct vulkan_renderer *vr) +{ + const struct pixel_format_info *dummy_pixel_format = pixel_format_get_info(DRM_FORMAT_ARGB8888); + const uint32_t dummy_pixels[1] = { 0 }; + create_texture_image(vr, &vr->dummy.image, dummy_pixel_format, 1, 1, 1); + create_texture_sampler(vr, &vr->dummy.sampler, VK_FILTER_NEAREST); + update_texture_image_all(vr, &vr->dummy.image, VK_IMAGE_LAYOUT_UNDEFINED, + dummy_pixel_format, 1, 1, 1, dummy_pixels); +} + +static struct vulkan_buffer_state * +ensure_renderer_vulkan_buffer_state(struct weston_surface *surface, + struct weston_buffer *buffer) +{ + struct vulkan_renderer *vr = get_renderer(surface->compositor); + struct vulkan_surface_state *vs = get_surface_state(surface); + struct vulkan_buffer_state *vb = buffer->renderer_private; + + if (vb) { + vs->buffer = vb; + return vb; + } + + vb = xzalloc(sizeof(*vb)); + vb->vr = vr; + pixman_region32_init(&vb->texture_damage); + buffer->renderer_private = vb; + vb->destroy_listener.notify = handle_buffer_destroy; + wl_signal_add(&buffer->destroy_signal, &vb->destroy_listener); + + vs->buffer = vb; + + create_vs_ubo_buffer(vr, &vb->vs_ubo_buffer, &vb->vs_ubo_memory, &vb->vs_ubo_map); + create_fs_ubo_buffer(vr, &vb->fs_ubo_buffer, &vb->fs_ubo_memory, &vb->fs_ubo_map); + + return vb; +} + +static void +attach_direct_display_placeholder(struct weston_paint_node *pnode) +{ + struct weston_surface *surface = pnode->surface; + struct weston_buffer *buffer = surface->buffer_ref.buffer; + struct vulkan_buffer_state *vb; + + vb = ensure_renderer_vulkan_buffer_state(surface, buffer); + + /* uses the same color as the content-protection placeholder */ + vb->color[0] = pnode->solid.r; + vb->color[1] = pnode->solid.g; + vb->color[2] = pnode->solid.b; + vb->color[3] = pnode->solid.a; + + vb->pipeline_variant = PIPELINE_VARIANT_SOLID; +} + +static void +vulkan_renderer_attach_buffer(struct weston_surface *surface, + struct weston_buffer *buffer) +{ + struct vulkan_surface_state *vs = get_surface_state(surface); + struct vulkan_buffer_state *vb; + + assert(buffer->renderer_private); + vb = buffer->renderer_private; + + if (pixel_format_is_opaque(buffer->pixel_format)) + vb->pipeline_variant = PIPELINE_VARIANT_RGBX; + else + vb->pipeline_variant = PIPELINE_VARIANT_RGBA; + + vs->buffer = vb; +} + +static void +vulkan_renderer_attach_solid(struct weston_surface *surface, + struct weston_buffer *buffer) +{ + struct vulkan_buffer_state *vb; + + vb = ensure_renderer_vulkan_buffer_state(surface, buffer); + + vb->color[0] = buffer->solid.r; + vb->color[1] = buffer->solid.g; + vb->color[2] = buffer->solid.b; + vb->color[3] = buffer->solid.a; + + vb->pipeline_variant = PIPELINE_VARIANT_SOLID; +} + +static void +vulkan_renderer_attach(struct weston_paint_node *pnode) +{ + struct weston_surface *es = pnode->surface; + struct weston_buffer *buffer = es->buffer_ref.buffer; + struct vulkan_surface_state *vs = get_surface_state(es); + + if (vs->buffer_ref.buffer == buffer) + return; + + /* SHM buffers are a little special in that they are allocated + * per-surface rather than per-buffer, because we keep a shadow + * copy of the SHM data in a GL texture; for these we need to + * destroy the buffer state when we're switching to another + * buffer type. For all the others, the vulkan_buffer_state comes + * from the weston_buffer itself, and will only be destroyed + * along with it. */ + if (vs->buffer && vs->buffer_ref.buffer->type == WESTON_BUFFER_SHM) { + if (!buffer || buffer->type != WESTON_BUFFER_SHM) { + destroy_buffer_state(vs->buffer); + vs->buffer = NULL; + } + } else { + vs->buffer = NULL; + } + + if (!buffer) + goto out; + + if (pnode->is_direct) { + attach_direct_display_placeholder(pnode); + goto success; + } + + switch (buffer->type) { + case WESTON_BUFFER_SHM: + vulkan_renderer_attach_shm(es, buffer); + break; + case WESTON_BUFFER_DMABUF: + case WESTON_BUFFER_RENDERER_OPAQUE: + vulkan_renderer_attach_buffer(es, buffer); + break; + case WESTON_BUFFER_SOLID: + vulkan_renderer_attach_solid(es, buffer); + break; + default: + weston_log("unhandled buffer type!\n"); + weston_buffer_send_server_error(buffer, + "disconnecting due to unhandled buffer type"); + goto out; + } + +success: + weston_buffer_reference(&vs->buffer_ref, buffer, + BUFFER_MAY_BE_ACCESSED); + weston_buffer_release_reference(&vs->buffer_release_ref, + es->buffer_release_ref.buffer_release); + return; + +out: + assert(!vs->buffer); + weston_buffer_reference(&vs->buffer_ref, NULL, + BUFFER_WILL_NOT_BE_ACCESSED); + weston_buffer_release_reference(&vs->buffer_release_ref, NULL); +} + +static void +vulkan_renderer_buffer_init(struct weston_compositor *ec, + struct weston_buffer *buffer) +{ + struct vulkan_buffer_state *vb; + + if (buffer->type != WESTON_BUFFER_DMABUF) + return; + + /* Thanks to linux-dmabuf being totally independent of libweston, + * the vulkan_buffer_state willonly be set as userdata on the dmabuf, + * not on the weston_buffer. Steal it away into the weston_buffer. */ + assert(!buffer->renderer_private); + vb = linux_dmabuf_buffer_get_user_data(buffer->dmabuf); + assert(vb); + linux_dmabuf_buffer_set_user_data(buffer->dmabuf, NULL, NULL); + buffer->renderer_private = vb; + vb->destroy_listener.notify = handle_buffer_destroy; + wl_signal_add(&buffer->destroy_signal, &vb->destroy_listener); +} + +static void +vulkan_renderer_output_destroy_border(struct weston_output *output, + enum weston_renderer_border_side side) +{ + struct vulkan_output_state *vo = get_output_state(output); + struct vulkan_renderer *vr = get_renderer(output->compositor); + + // Wait idle here is bad, but this is only resize/refocus + // and not on drm-backend + VkResult result; + result = vkQueueWaitIdle(vr->queue); + check_vk_success(result, "vkQueueWaitIdle"); + + struct vulkan_border_image *border = &vo->borders[side]; + + destroy_buffer(vr->dev, border->fs_ubo_buffer, border->fs_ubo_memory); + destroy_buffer(vr->dev, border->vs_ubo_buffer, border->vs_ubo_memory); + + destroy_sampler(vr->dev, border->sampler); + destroy_texture_image(vr, &border->texture); +} + +static void +vulkan_renderer_output_set_border(struct weston_output *output, + enum weston_renderer_border_side side, + int32_t width, int32_t height, + int32_t tex_width, unsigned char *data) +{ + struct vulkan_output_state *vo = get_output_state(output); + struct vulkan_renderer *vr = get_renderer(output->compositor); + + if (vo->borders[side].width != width || + vo->borders[side].height != height) + /* In this case, we have to blow everything and do a full + * repaint. */ + vo->border_status |= BORDER_ALL_DIRTY; + + struct vulkan_border_image *border = &vo->borders[side]; + + if (border->data != NULL) + vulkan_renderer_output_destroy_border(output, side); + + if (data == NULL) { + width = 0; + height = 0; + } + + border->width = width; + border->height = height; + border->tex_width = tex_width; + border->data = data; + vo->border_status |= 1 << side; + + if (data == NULL) + return; + + const uint32_t drm_format = DRM_FORMAT_ARGB8888; + const struct pixel_format_info *pixel_format = pixel_format_get_info(drm_format); + uint32_t pitch = tex_width; + + create_texture_image(vr, &border->texture, pixel_format, tex_width, height, pitch); + create_texture_sampler(vr, &border->sampler, VK_FILTER_NEAREST); + update_texture_image_all(vr, &border->texture, VK_IMAGE_LAYOUT_UNDEFINED, + pixel_format, tex_width, height, pitch, data); + + create_vs_ubo_buffer(vr, &border->vs_ubo_buffer, &border->vs_ubo_memory, &border->vs_ubo_map); + create_fs_ubo_buffer(vr, &border->fs_ubo_buffer, &border->fs_ubo_memory, &border->fs_ubo_map); +} + +static bool +vulkan_renderer_resize_output(struct weston_output *output, + const struct weston_size *fb_size, + const struct weston_geometry *area) +{ + struct vulkan_output_state *vo = get_output_state(output); + bool ret = true; + + assert(vo->output_type == VULKAN_OUTPUT_SWAPCHAIN || + vo->output_type == VULKAN_OUTPUT_HEADLESS); + + check_compositing_area(fb_size, area); + + vo->fb_size = *fb_size; + vo->area = *area; + + weston_output_update_capture_info(output, + WESTON_OUTPUT_CAPTURE_SOURCE_FRAMEBUFFER, + area->width, area->height, + output->compositor->read_format); + + weston_output_update_capture_info(output, + WESTON_OUTPUT_CAPTURE_SOURCE_FULL_FRAMEBUFFER, + fb_size->width, fb_size->height, + output->compositor->read_format); + + if (!vulkan_renderer_discard_renderbuffers(vo, false)) + return false; + + if (vo->output_type == VULKAN_OUTPUT_SWAPCHAIN) + vulkan_renderer_recreate_swapchain(output, *fb_size); + + return ret; +} + +static bool +bind_image_gbm_bo(struct vulkan_renderer *vr, VkImage image, + struct gbm_bo *bo, VkDeviceMemory *memory) +{ + VkResult result; + + int gbm_bo_fd = gbm_bo_get_fd(bo); + if (gbm_bo_fd < 0) { + weston_log("Failed to get fd for gbm_bo\n"); + return false; + } + + if (!vr->has_external_memory_dma_buf) + abort(); + + VkMemoryFdPropertiesKHR fd_props = { + fd_props.sType = VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR, + }; + result = vr->get_memory_fd_properties(vr->dev, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT, + gbm_bo_fd, &fd_props); + check_vk_success(result, "vkGetMemoryFdPropertiesKHR"); + + VkImageMemoryRequirementsInfo2 mem_reqs_info = { + .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2, + .image = image, + }; + VkMemoryRequirements2 mem_reqs = { + .sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2, + }; + vr->get_image_memory_requirements2(vr->dev, &mem_reqs_info, &mem_reqs); + + const uint32_t memory_type_bits = fd_props.memoryTypeBits & + mem_reqs.memoryRequirements.memoryTypeBits; + if (!memory_type_bits) { + weston_log("No valid memory type\n"); + close(gbm_bo_fd); + return false; + } + + VkMemoryAllocateInfo memory_allocate_info = { + .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, + .allocationSize = mem_reqs.memoryRequirements.size, + .memoryTypeIndex = ffs(memory_type_bits) - 1, + }; + + VkImportMemoryFdInfoKHR memory_fd_info = { + .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR, + .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT, + .fd = gbm_bo_fd, + }; + pnext(&memory_allocate_info, &memory_fd_info); + + VkMemoryDedicatedAllocateInfo memory_dedicated_info = { + .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO, + .image = image, + }; + pnext(&memory_allocate_info, &memory_dedicated_info); + + result = vkAllocateMemory(vr->dev, &memory_allocate_info, NULL, memory); + check_vk_success(result, "vkAllocateMemory"); + + result = vkBindImageMemory(vr->dev, image, *memory, 0); + check_vk_success(result, "vkBindImageMemory"); + + return true; +} + +static void +create_dmabuf_image(struct vulkan_renderer *vr, + const struct dmabuf_attributes *attributes, + VkFormat format, VkImageUsageFlags usage, VkImage *image) +{ + VkResult result; + int width = attributes->width; + int height = attributes->height; + uint64_t modifier = attributes->modifier; + int n_planes = attributes->n_planes; + + VkImageCreateInfo image_info = { + .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, + .imageType = VK_IMAGE_TYPE_2D, + .format = format, + .extent.width = width, + .extent.height = height, + .extent.depth = 1, + .mipLevels = 1, + .arrayLayers = 1, + .samples = VK_SAMPLE_COUNT_1_BIT, + .usage = usage, + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, + .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, + }; + + VkImageDrmFormatModifierExplicitCreateInfoEXT mod_create_info = { + .sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT, + }; + VkSubresourceLayout plane_layouts[n_planes]; + if (vr->has_image_drm_format_modifier) { + image_info.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT; + + memset(plane_layouts, 0, sizeof(plane_layouts)); + for (int i = 0; i < n_planes; i++) { + plane_layouts[i].offset = attributes->offset[i]; + plane_layouts[i].size = 0; + plane_layouts[i].rowPitch = attributes->stride[i]; + } + + mod_create_info.drmFormatModifier = modifier; + mod_create_info.drmFormatModifierPlaneCount = n_planes; + mod_create_info.pPlaneLayouts = plane_layouts; + pnext(&image_info, &mod_create_info); + } else { + image_info.tiling = VK_IMAGE_TILING_LINEAR; + } + + VkExternalMemoryImageCreateInfo external_create_info = { + .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO, + .handleTypes = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT, + }; + pnext(&image_info, &external_create_info); + + result = vkCreateImage(vr->dev, &image_info, NULL, image); + check_vk_success(result, "vkCreateImage"); +} + +static void +create_image_from_gbm_bo(struct vulkan_renderer *vr, struct gbm_bo *bo, VkFormat format, VkImage *image) +{ + struct dmabuf_attributes attributes; + attributes.width = gbm_bo_get_width(bo); + attributes.height = gbm_bo_get_height(bo); + attributes.modifier = gbm_bo_get_modifier(bo); + attributes.n_planes = gbm_bo_get_plane_count(bo); + + for (int i = 0; i < attributes.n_planes; i++) { + attributes.offset[i] = gbm_bo_get_offset(bo, i); + attributes.stride[i] = gbm_bo_get_stride_for_plane(bo, i); + } + + create_dmabuf_image(vr, &attributes, format, + VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, image); +} + +static int +vulkan_renderer_output_window_create_gbm(struct weston_output *output, + const struct vulkan_renderer_output_options *options) +{ + struct weston_compositor *ec = output->compositor; + struct vulkan_output_state *vo = get_output_state(output); + struct vulkan_renderer *vr = get_renderer(ec); + const struct pixel_format_info *pixel_format = vo->pixel_format; + const VkFormat format = pixel_format->vulkan_format; + + vo->image_count = options->num_gbm_bos; + + for (uint32_t i = 0; i < vo->image_count; i++) { + struct vulkan_renderer_image *im = &vo->images[i]; + int ret; + + im->bo = options->gbm_bos[i]; + + create_image_from_gbm_bo(vr, im->bo, format, &im->image); + + ret = bind_image_gbm_bo(vr, im->image, im->bo, &im->memory); + assert(ret); + + create_image_view(vr->dev, im->image, format, &im->image_view); + create_framebuffer(vr->dev, vo->renderpass, im->image_view, + options->fb_size.width, options->fb_size.height, &im->framebuffer); + + im->renderbuffer = xzalloc(sizeof(*im->renderbuffer)); + vulkan_renderbuffer_init(im->renderbuffer, NULL, NULL, NULL, output); + } + + return 0; +} + +static int +vulkan_renderer_output_window_create_swapchain(struct weston_output *output, + const struct vulkan_renderer_output_options *options) +{ + struct weston_compositor *ec = output->compositor; + struct vulkan_renderer *vr = get_renderer(ec); + struct vulkan_output_state *vo = get_output_state(output); + VkResult result; + VkBool32 supported; + + if (options->wayland_display && options->wayland_surface) { + assert(vr->has_wayland_surface); + + supported = vr->get_wayland_presentation_support(vr->phys_dev, 0, options->wayland_display); + assert(supported); + + const VkWaylandSurfaceCreateInfoKHR wayland_surface_create_info = { + .sType = VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR, + .display = options->wayland_display, + .surface = options->wayland_surface, + }; + result = vr->create_wayland_surface(vr->inst, &wayland_surface_create_info, NULL, + &vo->swapchain.surface); + check_vk_success(result, "vkCreateWaylandSurfaceKHR"); + } else if (options->xcb_connection && options->xcb_window) { + assert(vr->has_xcb_surface); + + supported = vr->get_xcb_presentation_support(vr->phys_dev, 0, options->xcb_connection, options->xcb_visualid); + assert(supported); + + const VkXcbSurfaceCreateInfoKHR xcb_surface_create_info = { + .sType = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR, + .connection = options->xcb_connection, + .window = options->xcb_window, + }; + result = vr->create_xcb_surface(vr->inst, &xcb_surface_create_info, NULL, + &vo->swapchain.surface); + check_vk_success(result, "vkCreateXcbSurfaceKHR"); + } else { + assert(0); + } + + vkGetPhysicalDeviceSurfaceSupportKHR(vr->phys_dev, 0, vo->swapchain.surface, &supported); + assert(supported); + + uint32_t present_mode_count; + vkGetPhysicalDeviceSurfacePresentModesKHR(vr->phys_dev, vo->swapchain.surface, + &present_mode_count, NULL); + VkPresentModeKHR present_modes[present_mode_count]; + vkGetPhysicalDeviceSurfacePresentModesKHR(vr->phys_dev, vo->swapchain.surface, + &present_mode_count, present_modes); + + vo->swapchain.present_mode = VK_PRESENT_MODE_FIFO_KHR; + assert(vo->swapchain.present_mode >= 0 && vo->swapchain.present_mode < 4); + supported = false; + for (size_t i = 0; i < present_mode_count; ++i) { + if (present_modes[i] == vo->swapchain.present_mode) { + supported = true; + break; + } + } + + if (!supported) { + weston_log("Present mode %d unsupported\n", vo->swapchain.present_mode); + abort(); + } + + vulkan_renderer_create_swapchain(output, options->fb_size); + + return 0; +} + +static int +vulkan_renderer_create_output_state(struct weston_output *output, + const struct weston_size *fb_size, + const struct weston_geometry *area) +{ + struct vulkan_output_state *vo; + + vo = xzalloc(sizeof(*vo)); + + wl_list_init(&vo->renderbuffer_list); + + output->renderer_state = vo; + + check_compositing_area(fb_size, area); + + vo->fb_size = *fb_size; + vo->area = *area; + + return 0; +} + +static int +vulkan_renderer_create_output_frames(struct weston_output *output, + const struct weston_size *fb_size, + const struct weston_geometry *area, + uint32_t num_frames) +{ + struct weston_compositor *ec = output->compositor; + struct vulkan_renderer *vr = get_renderer(ec); + struct vulkan_output_state *vo = get_output_state(output); + + vo->num_frames = num_frames; + + for (unsigned int i = 0; i < vo->num_frames; ++i) { + struct vulkan_renderer_frame *fr = &vo->frames[i]; + VkResult result; + + const VkCommandBufferAllocateInfo cmd_alloc_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, + .commandPool = vr->cmd_pool, + .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + .commandBufferCount = 1, + }; + result = vkAllocateCommandBuffers(vr->dev, &cmd_alloc_info, &fr->cmd_buffer); + check_vk_success(result, "vkAllocateCommandBuffers"); + + VkSemaphoreCreateInfo semaphore_info = { + .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, + }; + + result = vkCreateSemaphore(vr->dev, &semaphore_info, NULL, &fr->image_acquired); + check_vk_success(result, "vkCreateSemaphore image_acquired"); + + VkExportSemaphoreCreateInfo export_info = { + .sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO, + .handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT, + }; + if (vr->semaphore_import_export && vo->output_type != VULKAN_OUTPUT_SWAPCHAIN) + pnext(&semaphore_info, &export_info); + + result = vkCreateSemaphore(vr->dev, &semaphore_info, NULL, &fr->render_done); + check_vk_success(result, "vkCreateSemaphore render_done"); + + const VkFenceCreateInfo fence_info = { + .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, + .flags = VK_FENCE_CREATE_SIGNALED_BIT, + }; + result = vkCreateFence(vr->dev, &fence_info, NULL, &fr->fence); + check_vk_success(result, "vkCreateFence"); + + wl_list_init(&fr->dspool_list); + wl_list_init(&fr->vbuf_list); + wl_list_init(&fr->acquire_fence_list); + } + + return 0; +} + +static int +create_renderpass(struct weston_output *output, VkFormat format, VkImageLayout attachment_layout) +{ + struct weston_compositor *ec = output->compositor; + struct vulkan_renderer *vr = get_renderer(ec); + struct vulkan_output_state *vo = get_output_state(output); + VkResult result; + + const VkAttachmentDescription attachment_description = { + .format = format, + .samples = VK_SAMPLE_COUNT_1_BIT, + .loadOp = VK_ATTACHMENT_LOAD_OP_LOAD, + .storeOp = VK_ATTACHMENT_STORE_OP_STORE, + .initialLayout = attachment_layout, + .finalLayout = attachment_layout, + }; + const VkAttachmentReference attachment_reference = { + .attachment = 0, + .layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + }; + const VkSubpassDescription subpass_description = { + .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS, + .colorAttachmentCount = 1, + .pColorAttachments = &attachment_reference, + }; + const VkRenderPassCreateInfo renderpass_create_info = { + .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, + .attachmentCount = 1, + .pAttachments = &attachment_description, + .subpassCount = 1, + .pSubpasses = &subpass_description, + }; + + result = vkCreateRenderPass(vr->dev, &renderpass_create_info, NULL, &vo->renderpass); + check_vk_success(result, "vkCreateRenderPass"); + + return 0; +} + +static int +vulkan_renderer_output_window_create(struct weston_output *output, + const struct vulkan_renderer_output_options *options) +{ + int ret; + const struct weston_size *fb_size = &options->fb_size; + const struct weston_geometry *area = &options->area; + const struct pixel_format_info *pixel_format = options->formats[0]; + + ret = vulkan_renderer_create_output_state(output, fb_size, area); + assert(ret == 0); + + struct vulkan_output_state *vo = get_output_state(output); + if ((options->wayland_display && options->wayland_surface) || + (options->xcb_connection && options->xcb_window)) { + vo->output_type = VULKAN_OUTPUT_SWAPCHAIN; + } else { + vo->output_type = VULKAN_OUTPUT_DRM; + } + vo->pixel_format = pixel_format; + + if (vo->output_type == VULKAN_OUTPUT_SWAPCHAIN) { + create_renderpass(output, pixel_format->vulkan_format, VK_IMAGE_LAYOUT_PRESENT_SRC_KHR); + vulkan_renderer_output_window_create_swapchain(output, options); + } else { + create_renderpass(output, pixel_format->vulkan_format, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); + vulkan_renderer_output_window_create_gbm(output, options); + } + + weston_output_update_capture_info(output, + WESTON_OUTPUT_CAPTURE_SOURCE_FRAMEBUFFER, + area->width, area->height, + output->compositor->read_format); + + weston_output_update_capture_info(output, + WESTON_OUTPUT_CAPTURE_SOURCE_FULL_FRAMEBUFFER, + fb_size->width, fb_size->height, + output->compositor->read_format); + + vulkan_renderer_create_output_frames(output, fb_size, area, MAX_CONCURRENT_FRAMES); + + return 0; +} + +static int +vulkan_renderer_output_fbo_create(struct weston_output *output, + const struct vulkan_renderer_fbo_options *options) +{ + /* TODO: a format is needed here because right now a renderpass object + * is created per output. + * It should probably be independent of output (at least for renderbuffers), + * should probably be moved to a renderpass allocator to avoid creating + * a large number of renderpass objects (and exploding the number of + * pipelines) ? */ + const struct pixel_format_info *pixel_format = pixel_format_get_info(DRM_FORMAT_XRGB8888); + const VkFormat format = pixel_format->vulkan_format; + int ret; + const struct weston_size *fb_size = &options->fb_size; + const struct weston_geometry *area = &options->area; + + ret = vulkan_renderer_create_output_state(output, &options->fb_size, &options->area); + assert(ret == 0); + + struct vulkan_output_state *vo = get_output_state(output); + vo->output_type = VULKAN_OUTPUT_HEADLESS; + vo->pixel_format = pixel_format; + + create_renderpass(output, format, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); + + weston_output_update_capture_info(output, + WESTON_OUTPUT_CAPTURE_SOURCE_FRAMEBUFFER, + area->width, area->height, + output->compositor->read_format); + + weston_output_update_capture_info(output, + WESTON_OUTPUT_CAPTURE_SOURCE_FULL_FRAMEBUFFER, + fb_size->width, fb_size->height, + output->compositor->read_format); + + vulkan_renderer_create_output_frames(output, &options->fb_size, &options->area, 1); + + return 0; +} + +static void +vulkan_renderer_destroy(struct weston_compositor *ec) +{ + struct vulkan_renderer *vr = get_renderer(ec); + + wl_signal_emit(&vr->destroy_signal, vr); + + VkResult result; + result = vkDeviceWaitIdle(vr->dev); + check_vk_success(result, "vkDeviceWaitIdle"); + + vulkan_renderer_pipeline_list_destroy(vr); + + destroy_sampler(vr->dev, vr->dummy.sampler); + destroy_texture_image(vr, &vr->dummy.image); + + vkDestroyCommandPool(vr->dev, vr->cmd_pool, NULL); + + vkDestroyDevice(vr->dev, NULL); + + vkDestroyInstance(vr->inst, NULL); + + if (vr->drm_fd > 0) + close(vr->drm_fd); + + weston_drm_format_array_fini(&vr->supported_formats); + + free(vr); + ec->renderer = NULL; +} + +static void +log_vulkan_phys_dev(VkPhysicalDevice phys_dev) +{ + VkPhysicalDeviceProperties props; + + vkGetPhysicalDeviceProperties(phys_dev, &props); + + uint32_t api_major = VK_VERSION_MAJOR(props.apiVersion); + uint32_t api_minor = VK_VERSION_MINOR(props.apiVersion); + uint32_t api_patch = VK_VERSION_PATCH(props.apiVersion); + + uint32_t driver_major = VK_VERSION_MAJOR(props.driverVersion); + uint32_t driver_minor = VK_VERSION_MINOR(props.driverVersion); + uint32_t driver_patch = VK_VERSION_PATCH(props.driverVersion); + + const char *dev_type = "unknown"; + switch (props.deviceType) { + case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU: + dev_type = "integrated"; + break; + case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU: + dev_type = "discrete"; + break; + case VK_PHYSICAL_DEVICE_TYPE_CPU: + dev_type = "cpu"; + break; + case VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU: + dev_type = "vgpu"; + break; + default: + break; + } + + weston_log("Vulkan device: '%s'\n", props.deviceName); + weston_log(" Device type: '%s'\n", dev_type); + weston_log(" Supported API version: %u.%u.%u\n", api_major, api_minor, api_patch); + weston_log(" Driver version: %u.%u.%u\n", driver_major, driver_minor, driver_patch); +} + +static void +vulkan_renderer_choose_physical_device(struct vulkan_renderer *vr) +{ + uint32_t n_phys_devs; + VkPhysicalDevice *phys_devs = NULL; + VkResult result; + + result = vkEnumeratePhysicalDevices(vr->inst, &n_phys_devs, NULL); + check_vk_success(result, "vkEnumeratePhysicalDevices"); + assert(n_phys_devs != 0); + phys_devs = xmalloc(n_phys_devs * sizeof(*phys_devs)); + result = vkEnumeratePhysicalDevices(vr->inst, &n_phys_devs, phys_devs); + check_vk_success(result, "vkEnumeratePhysicalDevices"); + + VkPhysicalDevice physical_device = VK_NULL_HANDLE; + /* Pick the first one */ + for (uint32_t i = 0; i < n_phys_devs; ++i) { + VkPhysicalDeviceProperties props; + + vkGetPhysicalDeviceProperties(phys_devs[i], &props); + + if (physical_device == VK_NULL_HANDLE) { + physical_device = phys_devs[i]; + break; + } + } + + if (physical_device == VK_NULL_HANDLE) { + weston_log("Unable to find a suitable physical device\n"); + abort(); + } + + vr->phys_dev = physical_device; + + free(phys_devs); + + log_vulkan_phys_dev(physical_device); +} + +static void +vulkan_renderer_choose_queue_family(struct vulkan_renderer *vr) +{ + uint32_t n_props = 0; + VkQueueFamilyProperties *props = NULL; + + vkGetPhysicalDeviceQueueFamilyProperties(vr->phys_dev, &n_props, NULL); + props = xmalloc(n_props * sizeof(*props)); + vkGetPhysicalDeviceQueueFamilyProperties(vr->phys_dev, &n_props, props); + + uint32_t family_idx = UINT32_MAX; + /* Pick the first graphics queue */ + for (uint32_t i = 0; i < n_props; ++i) { + if ((props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) && props[i].queueCount > 0) { + family_idx = i; + break; + } + } + + if (family_idx == UINT32_MAX) { + weston_log("Unable to find graphics queue\n"); + abort(); + } + + vr->queue_family = family_idx; + + free(props); +} + +static weston_renderbuffer_t +vulkan_renderer_create_renderbuffer(struct weston_output *output, + const struct pixel_format_info *pixel_format, + void *buffer, int stride, + weston_renderbuffer_discarded_func discarded_cb, + void *user_data) +{ + struct weston_compositor *ec = output->compositor; + struct vulkan_output_state *vo = get_output_state(output); + struct vulkan_renderer *vr = get_renderer(ec); + + struct vulkan_renderbuffer *renderbuffer; + + const struct weston_size *fb_size = &vo->fb_size; + VkFormat format = pixel_format->vulkan_format; + + renderbuffer = xzalloc(sizeof(*renderbuffer)); + renderbuffer->buffer = buffer; + renderbuffer->stride = stride; + + struct vulkan_renderer_image *im = xzalloc(sizeof(*im)); + + // Command here only for the layout transition + VkCommandBuffer cmd_buffer; + vulkan_renderer_cmd_begin(vr, &cmd_buffer); + + create_image(vr, fb_size->width, fb_size->height, format, VK_IMAGE_TILING_OPTIMAL, + VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, + VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &im->image, &im->memory); + + create_image_view(vr->dev, im->image, format, &im->image_view); + + create_framebuffer(vr->dev, vo->renderpass, im->image_view, + fb_size->width, fb_size->height, &im->framebuffer); + + transition_image_layout(cmd_buffer, im->image, + VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, + 0, VK_ACCESS_TRANSFER_WRITE_BIT); + + // Wait here is bad, but this is only on renderbuffer creation + vulkan_renderer_cmd_end_wait(vr, &cmd_buffer); + + vulkan_renderbuffer_init(renderbuffer, im, discarded_cb, user_data, output); + + return (weston_renderbuffer_t) renderbuffer; +} + +static int +import_dmabuf(struct vulkan_renderer *vr, + struct vulkan_buffer_state *vb, + const struct dmabuf_attributes *attributes) +{ + VkResult result; + int fd0 = attributes->fd[0]; + VkFormat format = 0; + + const struct pixel_format_info *pixel_format = pixel_format_get_info(attributes->format); + assert(pixel_format); + + format = pixel_format->vulkan_format; + + create_dmabuf_image(vr, attributes, format, VK_IMAGE_USAGE_SAMPLED_BIT, &vb->texture.image); + + VkMemoryFdPropertiesKHR fd_props = { + .sType = VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR, + }; + result = vr->get_memory_fd_properties(vr->dev, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT, + fd0, &fd_props); + check_vk_success(result, "vkGetMemoryFdPropertiesKHR"); + + VkImageMemoryRequirementsInfo2 mem_reqs_info = { + .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2, + .image = vb->texture.image, + }; + VkMemoryRequirements2 mem_reqs = { + .sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2, + }; + vr->get_image_memory_requirements2(vr->dev, &mem_reqs_info, &mem_reqs); + + const uint32_t memory_type_bits = fd_props.memoryTypeBits & + mem_reqs.memoryRequirements.memoryTypeBits; + if (!memory_type_bits) { + weston_log("No valid memory type\n"); + return false; + } + + int dfd = fcntl(fd0, F_DUPFD_CLOEXEC, 0); + if (dfd < 0) { + weston_log("fcntl(F_DUPFD_CLOEXEC) failed\n"); + abort(); + } + + VkMemoryAllocateInfo memory_allocate_info = { + .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, + .allocationSize = mem_reqs.memoryRequirements.size, + .memoryTypeIndex = ffs(memory_type_bits) - 1, + }; + + VkImportMemoryFdInfoKHR memory_fd_info = { + .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR, + .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT, + .fd = dfd, + }; + pnext(&memory_allocate_info, &memory_fd_info); + + VkMemoryDedicatedAllocateInfo memory_dedicated_info = { + .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO, + .image = vb->texture.image, + }; + pnext(&memory_allocate_info, &memory_dedicated_info); + + result = vkAllocateMemory(vr->dev, &memory_allocate_info, NULL, &vb->texture.memory); + check_vk_success(result, "vkAllocateMemory"); + + result = vkBindImageMemory(vr->dev, vb->texture.image, vb->texture.memory, 0); + check_vk_success(result, "vkBindImageMemory"); + + create_texture_sampler(vr, &vb->sampler_linear, VK_FILTER_LINEAR); + create_texture_sampler(vr, &vb->sampler_nearest, VK_FILTER_NEAREST); + create_image_view(vr->dev, vb->texture.image, format, &vb->texture.image_view); + + return 0; +} + +static void +vulkan_renderer_destroy_dmabuf(struct linux_dmabuf_buffer *dmabuf) +{ + struct vulkan_buffer_state *vb = + linux_dmabuf_buffer_get_user_data(dmabuf); + + linux_dmabuf_buffer_set_user_data(dmabuf, NULL, NULL); + destroy_buffer_state(vb); +} + +static bool +vulkan_renderer_import_dmabuf(struct weston_compositor *ec, + struct linux_dmabuf_buffer *dmabuf) +{ + struct vulkan_renderer *vr = get_renderer(ec); + struct vulkan_buffer_state *vb; + + /* reject all flags we do not recognize or handle */ + if (dmabuf->attributes.flags & ~ZWP_LINUX_BUFFER_PARAMS_V1_FLAGS_Y_INVERT) + return false; + + if (!pixel_format_get_info(dmabuf->attributes.format)) + return false; + + vb = xzalloc(sizeof(*vb)); + + vb->vr = vr; + pixman_region32_init(&vb->texture_damage); + wl_list_init(&vb->destroy_listener.link); + + import_dmabuf(vr, vb, &dmabuf->attributes); + + assert(vb->num_textures == 0); + vb->num_textures = 1; + + create_vs_ubo_buffer(vr, &vb->vs_ubo_buffer, &vb->vs_ubo_memory, &vb->vs_ubo_map); + create_fs_ubo_buffer(vr, &vb->fs_ubo_buffer, &vb->fs_ubo_memory, &vb->fs_ubo_map); + + linux_dmabuf_buffer_set_user_data(dmabuf, vb, + vulkan_renderer_destroy_dmabuf); + + return true; +} + +static const struct weston_drm_format_array * +vulkan_renderer_get_supported_formats(struct weston_compositor *ec) +{ + struct vulkan_renderer *vr = get_renderer(ec); + + return &vr->supported_formats; +} + +static int +populate_supported_formats(struct weston_compositor *ec, + struct weston_drm_format_array *supported_formats) +{ + struct vulkan_renderer *vr = get_renderer(ec); + + for (unsigned int i = 0; i < pixel_format_get_info_count(); i++) { + const struct pixel_format_info *format = pixel_format_get_info_by_index(i); + + if (format->vulkan_format == VK_FORMAT_UNDEFINED) + continue; + + vulkan_renderer_query_dmabuf_format(vr, format); + } + + return 0; +} + +static int +create_default_dmabuf_feedback(struct weston_compositor *ec, + struct vulkan_renderer *vr) +{ + struct stat dev_stat; + struct weston_dmabuf_feedback_tranche *tranche; + uint32_t flags = 0; + + if (fstat(vr->drm_fd, &dev_stat) != 0) { + weston_log("%s: device disappeared, so we can't recover\n", __func__); + abort(); + } + + ec->default_dmabuf_feedback = + weston_dmabuf_feedback_create(dev_stat.st_rdev); + if (!ec->default_dmabuf_feedback) + return -1; + + tranche = + weston_dmabuf_feedback_tranche_create(ec->default_dmabuf_feedback, + ec->dmabuf_feedback_format_table, + dev_stat.st_rdev, flags, + RENDERER_PREF); + if (!tranche) { + weston_dmabuf_feedback_destroy(ec->default_dmabuf_feedback); + ec->default_dmabuf_feedback = NULL; + return -1; + } + + return 0; +} + +static int +open_drm_device_node(struct vulkan_renderer *vr) +{ + assert(vr->has_physical_device_drm); + + VkPhysicalDeviceProperties2 phys_dev_props = { + .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2, + }; + + VkPhysicalDeviceDrmPropertiesEXT drm_props = { + .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRM_PROPERTIES_EXT, + }; + pnext(&phys_dev_props, &drm_props); + + vkGetPhysicalDeviceProperties2(vr->phys_dev, &phys_dev_props); + + dev_t devid; + if (drm_props.hasRender) { + devid = makedev(drm_props.renderMajor, drm_props.renderMinor); + } else if (drm_props.hasPrimary) { + devid = makedev(drm_props.primaryMajor, drm_props.primaryMinor); + } else { + weston_log("Physical device is missing both render and primary nodes\n"); + return -1; + } + + drmDevice *device = NULL; + if (drmGetDeviceFromDevId(devid, 0, &device) != 0) { + weston_log("drmGetDeviceFromDevId failed\n"); + return -1; + } + + const char *name = NULL; + if (device->available_nodes & (1 << DRM_NODE_RENDER)) { + name = device->nodes[DRM_NODE_RENDER]; + } else { + assert(device->available_nodes & (1 << DRM_NODE_PRIMARY)); + name = device->nodes[DRM_NODE_PRIMARY]; + weston_log("DRM device %s has no render node, falling back to primary node\n", name); + } + + int drm_fd = open(name, O_RDWR | O_NONBLOCK | O_CLOEXEC); + if (drm_fd < 0) { + weston_log("Failed to open DRM node %s\n", name); + } + drmFreeDevice(&device); + return drm_fd; +} + +static bool +check_extension(const VkExtensionProperties *avail, uint32_t avail_len, const char *name) +{ + for (size_t i = 0; i < avail_len; i++) { + if (strcmp(avail[i].extensionName, name) == 0) { + return true; + } + } + return false; +} + +static void +load_instance_proc(struct vulkan_renderer *vr, const char *func, void *proc_ptr) +{ + void *proc = (void *)vkGetInstanceProcAddr(vr->inst, func); + if (proc == NULL) { + char err[256]; + snprintf(err, sizeof(err), "Failed to vkGetInstanceProcAddr %s\n", func); + err[sizeof(err)-1] = '\0'; + weston_log("%s", err); + abort(); + } + + *(void **)proc_ptr = proc; +} + +static void +vulkan_renderer_setup_instance_extensions(struct vulkan_renderer *vr) +{ + if (vr->has_wayland_surface) { + load_instance_proc(vr, "vkCreateWaylandSurfaceKHR", &vr->create_wayland_surface); + load_instance_proc(vr, "vkGetPhysicalDeviceWaylandPresentationSupportKHR", &vr->get_wayland_presentation_support); + } + + if (vr->has_xcb_surface) { + load_instance_proc(vr, "vkCreateXcbSurfaceKHR", &vr->create_xcb_surface); + load_instance_proc(vr, "vkGetPhysicalDeviceXcbPresentationSupportKHR", &vr->get_xcb_presentation_support); + } +} + +static void +vulkan_renderer_create_instance(struct vulkan_renderer *vr) +{ + uint32_t num_avail_inst_extns; + uint32_t num_inst_extns = 0; + VkResult result; + + result = vkEnumerateInstanceExtensionProperties(NULL, &num_avail_inst_extns, NULL); + check_vk_success(result, "vkEnumerateInstanceExtensionProperties"); + assert(num_avail_inst_extns > 0); + VkExtensionProperties *avail_inst_extns = xmalloc(num_avail_inst_extns * sizeof(VkExtensionProperties)); + result = vkEnumerateInstanceExtensionProperties(NULL, &num_avail_inst_extns, avail_inst_extns); + check_vk_success(result, "vkEnumerateInstanceExtensionProperties"); + + const char **inst_extns = xmalloc(num_avail_inst_extns * sizeof(*inst_extns)); + inst_extns[num_inst_extns++] = VK_EXT_DEBUG_UTILS_EXTENSION_NAME; + inst_extns[num_inst_extns++] = VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME; + inst_extns[num_inst_extns++] = VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME; + inst_extns[num_inst_extns++] = VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; + + if (check_extension(avail_inst_extns, num_avail_inst_extns, VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME)) { + inst_extns[num_inst_extns++] = VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME; + vr->has_wayland_surface = true; + } + + if (check_extension(avail_inst_extns, num_avail_inst_extns, VK_KHR_XCB_SURFACE_EXTENSION_NAME)) { + inst_extns[num_inst_extns++] = VK_KHR_XCB_SURFACE_EXTENSION_NAME; + vr->has_xcb_surface = true; + } + + if (vr->has_wayland_surface || vr->has_xcb_surface) + inst_extns[num_inst_extns++] = VK_KHR_SURFACE_EXTENSION_NAME; + + for (uint32_t i = 0; i < num_inst_extns; i++) { + uint32_t j; + for (j = 0; j < num_avail_inst_extns; j++) { + if (strcmp(inst_extns[i], avail_inst_extns[j].extensionName) == 0) { + break; + } + } + if (j == num_avail_inst_extns) { + weston_log("Unsupported instance extension: %s\n", inst_extns[i]); + abort(); + } + } + + const VkApplicationInfo app_info = { + .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO, + .pApplicationName = "weston", + .apiVersion = VK_MAKE_VERSION(1, 0, 0), + }; + + const VkInstanceCreateInfo inst_create_info = { + .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, + .pApplicationInfo = &app_info, + .ppEnabledExtensionNames = inst_extns, + .enabledExtensionCount = num_inst_extns, + }; + + result = vkCreateInstance(&inst_create_info, NULL, &vr->inst); + check_vk_success(result, "vkCreateInstance"); + + vulkan_renderer_setup_instance_extensions(vr); + + free(avail_inst_extns); + free(inst_extns); +} + +static void +load_device_proc(struct vulkan_renderer *vr, const char *func, void *proc_ptr) +{ + void *proc = (void *)vkGetDeviceProcAddr(vr->dev, func); + if (proc == NULL) { + char err[256]; + snprintf(err, sizeof(err), "Failed to vkGetDeviceProcAddr %s\n", func); + err[sizeof(err)-1] = '\0'; + weston_log("%s", err); + abort(); + } + + *(void **)proc_ptr = proc; +} + +static void +vulkan_renderer_setup_device_extensions(struct vulkan_renderer *vr) +{ + // VK_KHR_get_memory_requirements2 + load_device_proc(vr, "vkGetImageMemoryRequirements2KHR", &vr->get_image_memory_requirements2); + + // VK_KHR_external_memory_fd + load_device_proc(vr, "vkGetMemoryFdPropertiesKHR", &vr->get_memory_fd_properties); + + // VK_KHR_external_semaphore_fd + if (vr->has_external_semaphore_fd) { + load_device_proc(vr, "vkGetSemaphoreFdKHR", &vr->get_semaphore_fd); + load_device_proc(vr, "vkImportSemaphoreFdKHR", &vr->import_semaphore_fd); + } +} + +static void +vulkan_renderer_create_device(struct vulkan_renderer *vr) +{ + uint32_t num_avail_device_extns; + uint32_t num_device_extns = 0; + VkResult result; + + result = vkEnumerateDeviceExtensionProperties(vr->phys_dev, NULL, &num_avail_device_extns, NULL); + check_vk_success(result, "vkEnumerateDeviceExtensionProperties"); + VkExtensionProperties *avail_device_extns = xmalloc(num_avail_device_extns * sizeof(VkExtensionProperties)); + result = vkEnumerateDeviceExtensionProperties(vr->phys_dev, NULL, &num_avail_device_extns, avail_device_extns); + check_vk_success(result, "vkEnumerateDeviceExtensionProperties"); + + const char **device_extns = xmalloc(num_avail_device_extns * sizeof(*device_extns)); + device_extns[num_device_extns++] = VK_KHR_BIND_MEMORY_2_EXTENSION_NAME; + device_extns[num_device_extns++] = VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME; + device_extns[num_device_extns++] = VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME; + device_extns[num_device_extns++] = VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME; + device_extns[num_device_extns++] = VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME; + device_extns[num_device_extns++] = VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME; + device_extns[num_device_extns++] = VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME; + device_extns[num_device_extns++] = VK_KHR_SWAPCHAIN_EXTENSION_NAME; + + if (check_extension(avail_device_extns, num_avail_device_extns, VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME)) { + device_extns[num_device_extns++] = VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME; + vr->has_incremental_present = true; + } + + if (check_extension(avail_device_extns, num_avail_device_extns, VK_EXT_PHYSICAL_DEVICE_DRM_EXTENSION_NAME)) { + device_extns[num_device_extns++] = VK_EXT_PHYSICAL_DEVICE_DRM_EXTENSION_NAME; + vr->has_physical_device_drm = true; + } + + if (check_extension(avail_device_extns, num_avail_device_extns, VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME) && + check_extension(avail_device_extns, num_avail_device_extns, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME)) { + device_extns[num_device_extns++] = VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME; + /* Extension dependencies */ + device_extns[num_device_extns++] = VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME; + device_extns[num_device_extns++] = VK_KHR_MAINTENANCE_1_EXTENSION_NAME; + vr->has_image_drm_format_modifier = true; + } + + if (check_extension(avail_device_extns, num_avail_device_extns, VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME)) { + device_extns[num_device_extns++] = VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME; + vr->has_external_semaphore_fd = true; + } + + /* These are really not optional for DRM backend, but are not used by + * e.g. headless, software renderer, so make them optional for tests */ + if (check_extension(avail_device_extns, num_avail_device_extns, VK_EXT_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME)) { + device_extns[num_device_extns++] = VK_EXT_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME; + vr->has_external_memory_dma_buf = true; + } + if (check_extension(avail_device_extns, num_avail_device_extns, VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME)) { + device_extns[num_device_extns++] = VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME; + vr->has_queue_family_foreign = true; + } + + for (uint32_t i = 0; i < num_device_extns; i++) { + uint32_t j; + for (j = 0; j < num_avail_device_extns; j++) { + if (strcmp(device_extns[i], avail_device_extns[j].extensionName) == 0) { + break; + } + } + if (j == num_avail_device_extns) { + weston_log("Unsupported device extension: %s\n", device_extns[i]); + abort(); + } + } + + const VkDeviceQueueCreateInfo device_queue_info = { + .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, + .queueFamilyIndex = vr->queue_family, + .queueCount = 1, + .pQueuePriorities = (float[]){ 1.0f }, + }; + + const VkDeviceCreateInfo device_create_info = { + .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, + .queueCreateInfoCount = 1, + .pQueueCreateInfos = &device_queue_info, + .enabledExtensionCount = num_device_extns, + .ppEnabledExtensionNames = device_extns, + }; + + result = vkCreateDevice(vr->phys_dev, &device_create_info, NULL, &vr->dev); + check_vk_success(result, "vkCreateDevice"); + + bool exportable_semaphore = false, importable_semaphore = false; + if (vr->has_external_semaphore_fd) { + const VkPhysicalDeviceExternalSemaphoreInfo ext_semaphore_info = { + .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO, + .handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT, + }; + VkExternalSemaphoreProperties ext_semaphore_props = { + .sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES, + }; + vkGetPhysicalDeviceExternalSemaphoreProperties(vr->phys_dev, &ext_semaphore_info, &ext_semaphore_props); + + exportable_semaphore = ext_semaphore_props.externalSemaphoreFeatures & VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT; + importable_semaphore = ext_semaphore_props.externalSemaphoreFeatures & VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT; + } + if (!vr->has_image_drm_format_modifier) + weston_log("DRM format modifiers not supported\n"); + if (!exportable_semaphore) + weston_log("VkSemaphore is not exportable\n"); + if (!importable_semaphore) + weston_log("VkSemaphore is not importable\n"); + + vr->semaphore_import_export = exportable_semaphore && importable_semaphore; + + vulkan_renderer_setup_device_extensions(vr); + + free(avail_device_extns); + free(device_extns); +} + +static int +vulkan_renderer_display_create(struct weston_compositor *ec, + const struct vulkan_renderer_display_options *options) +{ + struct vulkan_renderer *vr; + VkResult result; + + vr = xzalloc(sizeof(*vr)); + + vr->compositor = ec; + wl_list_init(&vr->pipeline_list); + vr->base.repaint_output = vulkan_renderer_repaint_output; + vr->base.resize_output = vulkan_renderer_resize_output; + vr->base.create_renderbuffer = vulkan_renderer_create_renderbuffer; + vr->base.destroy_renderbuffer = vulkan_renderer_destroy_renderbuffer; + vr->base.flush_damage = vulkan_renderer_flush_damage; + vr->base.attach = vulkan_renderer_attach; + vr->base.destroy = vulkan_renderer_destroy; + vr->base.buffer_init = vulkan_renderer_buffer_init; + vr->base.output_set_border = vulkan_renderer_output_set_border, + vr->base.type = WESTON_RENDERER_VULKAN; + + weston_drm_format_array_init(&vr->supported_formats); + + ec->renderer = &vr->base; + + wl_list_init(&vr->dmabuf_formats); + wl_signal_init(&vr->destroy_signal); + + // TODO: probe and register remaining shm formats + wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_XRGB8888); + wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_ARGB8888); + wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_ABGR2101010); + + vulkan_renderer_create_instance(vr); + + vulkan_renderer_choose_physical_device(vr); + + vulkan_renderer_choose_queue_family(vr); + + vulkan_renderer_create_device(vr); + + vr->drm_fd = -1; + if (vr->has_physical_device_drm) + vr->drm_fd = open_drm_device_node(vr); + + ec->capabilities |= WESTON_CAP_ROTATION_ANY; + ec->capabilities |= WESTON_CAP_CAPTURE_YFLIP; + ec->capabilities |= WESTON_CAP_VIEW_CLIP_MASK; + + if (vr->semaphore_import_export) + ec->capabilities |= WESTON_CAP_EXPLICIT_SYNC; + + if (vr->has_external_memory_dma_buf) { + int ret; + vr->base.import_dmabuf = vulkan_renderer_import_dmabuf; + vr->base.get_supported_formats = vulkan_renderer_get_supported_formats; + + ret = populate_supported_formats(ec, &vr->supported_formats); + if (ret < 0) + abort(); + + if (vr->drm_fd > 0) { + /* We support dmabuf feedback only when the renderer + * exposes a DRM-device */ + ec->dmabuf_feedback_format_table = + weston_dmabuf_feedback_format_table_create(&vr->supported_formats); + assert(ec->dmabuf_feedback_format_table); + ret = create_default_dmabuf_feedback(ec, vr); + if (ret < 0) + abort(); + } + } + + vkGetDeviceQueue(vr->dev, vr->queue_family, 0, &vr->queue); + + const VkCommandPoolCreateInfo cmd_pool_create_info = { + .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, + .flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT | VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, + .queueFamilyIndex = vr->queue_family, + }; + result = vkCreateCommandPool(vr->dev, &cmd_pool_create_info, NULL, &vr->cmd_pool); + check_vk_success(result, "vkCreateCommandPool"); + + ec->read_format = pixel_format_get_info(DRM_FORMAT_ARGB8888); + + create_texture_image_dummy(vr); /* Workaround for solids */ + + weston_log("Vulkan features:\n"); + weston_log_continue(STAMP_SPACE "wayland_surface: %s\n", yesno(vr->has_wayland_surface)); + weston_log_continue(STAMP_SPACE "xcb_surface: %s\n", yesno(vr->has_xcb_surface)); + weston_log_continue(STAMP_SPACE "incremental_present: %s\n", yesno(vr->has_incremental_present)); + weston_log_continue(STAMP_SPACE "image_drm_format_modifier: %s\n", yesno(vr->has_image_drm_format_modifier)); + weston_log_continue(STAMP_SPACE "external_semaphore_fd: %s\n", yesno(vr->has_external_semaphore_fd)); + weston_log_continue(STAMP_SPACE "physical_device_drm: %s\n", yesno(vr->has_physical_device_drm)); + weston_log_continue(STAMP_SPACE "external_memory_dma_buf: %s\n", yesno(vr->has_external_memory_dma_buf)); + weston_log_continue(STAMP_SPACE "queue_family_foreign: %s\n", yesno(vr->has_queue_family_foreign)); + weston_log_continue(STAMP_SPACE "semaphore_import_export: %s\n", yesno(vr->semaphore_import_export)); + + return 0; +} + +WL_EXPORT struct vulkan_renderer_interface vulkan_renderer_interface = { + .display_create = vulkan_renderer_display_create, + .output_window_create = vulkan_renderer_output_window_create, + .output_fbo_create = vulkan_renderer_output_fbo_create, + .output_destroy = vulkan_renderer_output_destroy, + .create_fence_fd = vulkan_renderer_create_fence_fd, +}; diff --git a/libweston/renderer-vulkan/vulkan-renderer.h b/libweston/renderer-vulkan/vulkan-renderer.h new file mode 100644 index 000000000..63eb66e35 --- /dev/null +++ b/libweston/renderer-vulkan/vulkan-renderer.h @@ -0,0 +1,97 @@ +/* + * Copyright © 2025 Erico Nunes + * + * based on gl-renderer: + * Copyright © 2012 John Kåre Alsaker + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#pragma once + +#include "config.h" + +#include +#include + +#include + +#include +#include "backend.h" +#include "libweston-internal.h" + +#include + +/** + * Options passed to the \c display_create method of the vulkan renderer interface. + * + * \see struct vulkan_renderer_interface + */ +struct vulkan_renderer_display_options { + struct weston_renderer_options base; + VkInstance instance; + void *gbm_device; + const struct pixel_format_info **formats; + unsigned formats_count; +}; + +#define NUM_GBM_BOS 2 + +struct vulkan_renderer_output_options { + struct gbm_bo *gbm_bos[NUM_GBM_BOS]; + unsigned int num_gbm_bos; + struct weston_size fb_size; + struct weston_geometry area; + const struct pixel_format_info **formats; + unsigned formats_count; + + // xcb backend options + void *xcb_connection; + xcb_visualid_t xcb_visualid; + xcb_window_t xcb_window; + + // wayland backend options + void *wayland_display; + void *wayland_surface; +}; + +struct vulkan_renderer_fbo_options { + /** Size of the framebuffer in pixels, including borders */ + struct weston_size fb_size; + /** Area inside the framebuffer in pixels for composited content */ + struct weston_geometry area; +}; + +struct vulkan_renderer_interface { + int (*display_create)(struct weston_compositor *ec, + const struct vulkan_renderer_display_options *options); + + int (*output_window_create)(struct weston_output *output, + const struct vulkan_renderer_output_options *options); + + int (*output_fbo_create)(struct weston_output *output, + const struct vulkan_renderer_fbo_options *options); + + void (*output_destroy)(struct weston_output *output); + + int (*create_fence_fd)(struct weston_output *output); +}; diff --git a/libweston/renderer-vulkan/vulkan_fragment_shader.frag b/libweston/renderer-vulkan/vulkan_fragment_shader.frag new file mode 100644 index 000000000..e50638adb --- /dev/null +++ b/libweston/renderer-vulkan/vulkan_fragment_shader.frag @@ -0,0 +1,57 @@ +#version 450 + +layout(binding = 1) uniform _ubo { + uniform vec4 unicolor; + uniform float view_alpha; +} ubo; +layout(binding = 2) uniform sampler2D tex; + +layout(location = 1) in vec2 v_texcoord; + +layout(location = 0) out vec4 fragcolor; + +layout(constant_id = 0) const int c_variant = 0; +layout(constant_id = 1) const bool c_input_is_premult = false; + +#define PIPELINE_VARIANT_RGBA 1 +#define PIPELINE_VARIANT_RGBX 2 +#define PIPELINE_VARIANT_SOLID 3 +#define PIPELINE_VARIANT_EXTERNAL 4 + +vec4 +sample_input_texture() +{ + if (c_variant == PIPELINE_VARIANT_SOLID) + return ubo.unicolor; + + if (c_variant == PIPELINE_VARIANT_EXTERNAL || + c_variant == PIPELINE_VARIANT_RGBA || + c_variant == PIPELINE_VARIANT_RGBX) { + vec4 color; + + color = texture(tex, v_texcoord); + + if (c_variant == PIPELINE_VARIANT_RGBX) + color.a = 1.0; + + return color; + } + + /* Never reached, bad variant value. */ + return vec4(1.0, 0.3, 1.0, 1.0); +} + +void main() { + vec4 color; + + /* Electrical (non-linear) RGBA values, may be premult or not */ + color = sample_input_texture(); + + /* Ensure pre-multiplied for blending */ + if (!c_input_is_premult) + color.rgb *= color.a; + + color *= ubo.view_alpha; + + fragcolor = color; +} diff --git a/libweston/renderer-vulkan/vulkan_vertex_shader_surface.vert b/libweston/renderer-vulkan/vulkan_vertex_shader_surface.vert new file mode 100644 index 000000000..6d7dc6c38 --- /dev/null +++ b/libweston/renderer-vulkan/vulkan_vertex_shader_surface.vert @@ -0,0 +1,16 @@ +#version 450 + +layout(binding = 0) uniform _ubo { + uniform mat4 proj; + uniform mat4 surface_to_buffer; +} ubo; + +layout(location = 0) in vec2 position; +/* layout(location = 1) in vec2 texcoord; // unused here */ + +layout(location = 1) out vec2 v_texcoord; + +void main() { + gl_Position = ubo.proj * vec4(position, 0.0, 1.0); + v_texcoord = vec2(ubo.surface_to_buffer * vec4(position, 0.0, 1.0)); +} diff --git a/libweston/renderer-vulkan/vulkan_vertex_shader_texcoord.vert b/libweston/renderer-vulkan/vulkan_vertex_shader_texcoord.vert new file mode 100644 index 000000000..d0684b58b --- /dev/null +++ b/libweston/renderer-vulkan/vulkan_vertex_shader_texcoord.vert @@ -0,0 +1,16 @@ +#version 450 + +layout(binding = 0) uniform _ubo { + uniform mat4 proj; + uniform mat4 surface_to_buffer; +} ubo; + +layout(location = 0) in vec2 position; +layout(location = 1) in vec2 texcoord; + +layout(location = 1) out vec2 v_texcoord; + +void main() { + gl_Position = ubo.proj * vec4(position, 0.0, 1.0); + v_texcoord = texcoord; +} diff --git a/man/weston-drm.man b/man/weston-drm.man index bfa91a7bb..3fd524fed 100644 --- a/man/weston-drm.man +++ b/man/weston-drm.man @@ -18,11 +18,11 @@ runs without any underlying windowing system. The backend uses the Linux KMS API to detect connected monitors. Monitor hot-plugging is supported. Input devices are found automatically by .BR udev (7). -Compositing happens mainly in GL\ ES\ 2, initialized through EGL. It +Compositing happens mainly in GL\ ES\ 2, initialized through EGL, or Vulkan. It is also possible to take advantage of hardware cursors and overlays, when they exist and are functional. Full-screen surfaces will be scanned out directly without compositing, when possible. -Hardware accelerated clients are supported via EGL. +Hardware accelerated clients are supported via EGL or Vulkan. The backend chooses the DRM graphics device first based on seat id. If seat identifiers are not set, it looks for the graphics device @@ -57,7 +57,7 @@ The actually supported pixel formats depend on the DRM driver and hardware, and the renderer used. Using Pixman-renderer, DRM-backend supports .BR xrgb8888 ", " xrgb2101010 ", " rgb565 and some of their permutations. -The formats supported with GL-renderer depend on the EGL and OpenGL ES 2 or 3 +The formats supported with GL-renderer or vulkan-renderer depend on the driver implementations. The names are case-insensitive. This setting applies only to .RB "outputs in SDR mode, see " eotf-mode " in " weston.ini (5). If the hardware platform supports hardware planes placed under the DRM primary plane diff --git a/meson.build b/meson.build index 045a48d36..4a7655f59 100644 --- a/meson.build +++ b/meson.build @@ -123,6 +123,10 @@ config_h.set('EGL_NO_X11', '1') config_h.set('MESA_EGL_NO_X11_HEADERS', '1') config_h.set('EGL_NO_PLATFORM_SPECIFIC_TYPES', '1') +config_h.set('VULKAN_NO_X11', '1') +config_h.set('MESA_VULKAN_NO_X11_HEADERS', '1') +config_h.set('VULKAN_NO_PLATFORM_SPECIFIC_TYPES', '1') + config_h.set_quoted('PACKAGE_STRING', 'weston @0@'.format(version_weston)) config_h.set_quoted('PACKAGE_VERSION', version_weston) config_h.set_quoted('VERSION', version_weston) @@ -201,6 +205,21 @@ else dep_egl = dependency('', required: false) endif +if get_option('renderer-vulkan') + dep_vulkan = dependency('vulkan', required: false) + if not dep_vulkan.found() + error('libweston + vulkan-renderer requires vulkan which was not found. Or, you can use \'-Drenderer-vulkan=false\'.') + endif + + prog_glslang = find_program('glslangValidator', required : false) + if not prog_glslang.found() + error('libweston + vulkan-renderer requires glslangValidator which was not found. Or, you can use \'-Drenderer-vulkan=false\'.') + endif +else + dep_vulkan = dependency('', required: false) + prog_glslang = find_program('', required : false) +endif + subdir('include') subdir('protocol') diff --git a/meson_options.txt b/meson_options.txt index 20701abd6..66993cbea 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -71,6 +71,13 @@ option( description: 'Weston renderer: EGL / OpenGL ES 2.x' ) +option( + 'renderer-vulkan', + type: 'boolean', + value: true, + description: 'Weston renderer: Vulkan' +) + option( 'xwayland', type: 'boolean', diff --git a/shared/meson.build b/shared/meson.build index daff4246f..78c038cfc 100644 --- a/shared/meson.build +++ b/shared/meson.build @@ -6,7 +6,7 @@ srcs_libshared = [ 'process-util.c', 'hash.c', ] -deps_libshared = [dep_wayland_client, dep_pixman, deps_for_libweston_users, dep_egl] +deps_libshared = [dep_wayland_client, dep_pixman, deps_for_libweston_users, dep_egl, dep_vulkan] lib_libshared = static_library( 'shared',