backend-drm: use dmabuf renderbuffer for Vulkan

The initial drm backend implementation for Vulkan passes display device
allocated gbm bos directly to the renderer. This is a bit awkward since
it requires the renderer to maintain a custom output creation interface
and another code path for importing specifically gbm bos.
Since then, vulkan-renderer received support to use dmabuf renderbuffers
to support e.g. pipewire dmabuf, in an interface which is also common
with gl-renderer.
The dmabuf renderbuffer code path is similar to what the drm backend
implementation intended to do, so we can unify it to a single interface.
This has the advantages of requiring one less custom output creation
interface, as well as the renderer being able to handle the drm backend
through a single shared dmabuf renderbuffer path.

Signed-off-by: Erico Nunes <nunes.erico@gmail.com>
This commit is contained in:
Erico Nunes 2025-07-25 16:01:54 +02:00 committed by Daniel Stone
parent aff309fa9f
commit 1a68480421
6 changed files with 131 additions and 179 deletions

View file

@ -45,6 +45,7 @@
#include "shared/weston-egl-ext.h"
#include "linux-dmabuf.h"
#include "linux-explicit-synchronization.h"
#include "shared/xalloc.h"
/* When initializing EGL, if the preferred buffer format isn't available
* we may be able to substitute an ARGB format for an XRGB one.
@ -92,7 +93,6 @@ drm_backend_create_vulkan_renderer(struct drm_backend *b)
fallback_format_for(b->format),
};
struct vulkan_renderer_display_options options = {
.gbm_device = b->gbm,
.formats = format,
.formats_count = 1,
};
@ -544,14 +544,24 @@ drm_output_init_egl(struct drm_output *output, struct drm_backend *b)
return 0;
}
static void
create_gbm_bos(struct gbm_device *gbm, struct drm_output *output, unsigned int n)
static struct gbm_bo *
drm_gbm_create_bo(struct gbm_device *gbm, struct drm_output *output)
{
struct weston_mode *mode = output->base.current_mode;
struct drm_plane *plane = output->scanout_plane;
struct weston_drm_format *fmt;
const uint64_t *modifiers;
unsigned int num_modifiers;
struct gbm_bo *bo = NULL;
/*
* TODO: Currently, this method allocates a buffer based on the list
* of acceptable modifiers received from the DRM backend but does not
* check it against formats renderable by the renderer.
* To support cases where the renderer may not support the same
* modifiers (e.g. Vulkan software renderer) it should match against
* renderer modifiers.
*/
fmt = weston_drm_format_array_find_format(&plane->formats,
output->format->format);
@ -559,18 +569,14 @@ create_gbm_bos(struct gbm_device *gbm, struct drm_output *output, unsigned int n
weston_log("format %s not supported by output %s\n",
output->format->drm_format_name,
output->base.name);
return;
return NULL;
}
if (!weston_drm_format_has_modifier(fmt, DRM_FORMAT_MOD_INVALID)) {
modifiers = weston_drm_format_get_modifiers(fmt, &num_modifiers);
for (unsigned int i = 0; i < n; i++) {
output->gbm_bos[i] =
gbm_bo_create_with_modifiers(gbm,
mode->width, mode->height,
output->format->format,
modifiers, num_modifiers);
}
bo = gbm_bo_create_with_modifiers(gbm, mode->width, mode->height,
output->format->format,
modifiers, num_modifiers);
}
/*
@ -582,38 +588,96 @@ create_gbm_bos(struct gbm_device *gbm, struct drm_output *output, unsigned int n
if (gbm_device_get_fd(gbm) != output->device->drm.fd)
output->gbm_bo_flags |= GBM_BO_USE_LINEAR;
if (!output->gbm_bos[0]) {
for (unsigned int i = 0; i < n; i++) {
output->gbm_bos[i] = gbm_bo_create(gbm,
mode->width, mode->height,
output->format->format,
output->gbm_bo_flags);
}
if (!bo) {
bo = gbm_bo_create(gbm, mode->width, mode->height,
output->format->format, output->gbm_bo_flags);
}
return bo;
}
static void
drm_gbm_dmabuf_destroy(struct linux_dmabuf_memory *dmabuf)
{
struct dmabuf_attributes *attributes;
attributes = dmabuf->attributes;
for (int i = 0; i < attributes->n_planes; ++i)
close(attributes->fd[i]);
free(dmabuf->attributes);
free(dmabuf);
}
static struct linux_dmabuf_memory *
drm_gbm_bo_get_dmabuf(struct gbm_device *gbm, struct drm_output *output, struct gbm_bo *bo)
{
struct linux_dmabuf_memory *dmabuf;
struct dmabuf_attributes *attributes;
attributes = xzalloc(sizeof(*attributes));
attributes->width = gbm_bo_get_width(bo);
attributes->height = gbm_bo_get_height(bo);
attributes->format = gbm_bo_get_format(bo);
attributes->n_planes = gbm_bo_get_plane_count(bo);
for (int i = 0; i < attributes->n_planes; ++i) {
attributes->fd[i] = gbm_bo_get_fd(bo);
attributes->stride[i] = gbm_bo_get_stride_for_plane(bo, i);
attributes->offset[i] = gbm_bo_get_offset(bo, i);
}
attributes->modifier = gbm_bo_get_modifier(bo);
dmabuf = xzalloc(sizeof(*dmabuf));
dmabuf->attributes = attributes;
dmabuf->destroy = drm_gbm_dmabuf_destroy;
return dmabuf;
}
static void
create_renderbuffers(struct gbm_device *gbm, struct drm_output *output, unsigned int n)
{
struct weston_renderer *renderer = output->base.compositor->renderer;
struct drm_device *device = output->device;
for (unsigned int i = 0; i < n; i++) {
assert(output->gbm_bos[i]);
drm_fb_get_from_bo(output->gbm_bos[i], device, !output->format->opaque_substitute,
BUFFER_GBM_BO);
}
struct linux_dmabuf_memory *dmabuf;
struct gbm_bo *bo;
assert(output->gbm_surface == NULL);
bo = drm_gbm_create_bo(gbm, output);
if (!bo) {
weston_log("failed to allocate bo\n");
return;
}
dmabuf = drm_gbm_bo_get_dmabuf(gbm, output, bo);
if (!dmabuf) {
weston_log("failed to allocate dmabuf\n");
return;
}
output->renderbuffer[i] =
renderer->create_renderbuffer_dmabuf(&output->base,
dmabuf,
NULL, NULL);
if (!output->renderbuffer[i]) {
weston_log("failed to allocate renderbuffer\n");
return;
}
drm_fb_get_from_dmabuf_attributes(dmabuf->attributes, device, true, false, NULL);
output->linux_dmabuf_memory[i] = dmabuf;
}
}
/* Init output state that depends on vulkan or gbm */
/* Init output state that depends on vulkan */
int
drm_output_init_vulkan(struct drm_output *output, struct drm_backend *b)
{
const struct weston_renderer *renderer = b->compositor->renderer;
const struct weston_mode *mode = output->base.current_mode;
const struct pixel_format_info *format[2] = {
output->format,
fallback_format_for(output->format),
};
struct vulkan_renderer_surface_options options = {
.formats = format,
.formats_count = 1,
struct weston_renderer *renderer = b->compositor->renderer;
const struct vulkan_renderer_surfaceless_options options = {
.area.x = 0,
.area.y = 0,
.area.width = mode->width,
@ -622,37 +686,14 @@ drm_output_init_vulkan(struct drm_output *output, struct drm_backend *b)
.fb_size.height = mode->height,
};
assert(output->gbm_surface == NULL);
/*
* TODO: This method for BO allocation needs to be reworked.
* Currently, it allocates a buffer based on the list of acceptable
* modifiers received from the DRM backend but does not check it
* against formats renderable by the renderer (and there is no
* straightforward way to do so yet).
* Most likely this should be replaced by sending the acceptable
* modifiers list from the DRM backend to the renderer and doing the
* optimal dmabuf allocation in the renderer. But as of this writing,
* this API for dmabuf allocation is not yet implemented in the
* Vulkan renderer.
*/
create_gbm_bos(b->gbm, output, NUM_GBM_BOS);
if (!output->gbm_bos[0]) {
weston_log("failed to create gbm bos\n");
if (renderer->vulkan->output_surfaceless_create(&output->base, &options) < 0) {
weston_log("failed to create vulkan renderer output state\n");
return -1;
}
options.num_gbm_bos = NUM_GBM_BOS;
if (options.formats[1])
options.formats_count = 2;
for (unsigned int i = 0; i < options.num_gbm_bos; i++)
options.gbm_bos[i] = output->gbm_bos[i];
if (renderer->vulkan->output_surface_create(&output->base, &options) < 0) {
weston_log("failed to create vulkan renderer output state\n");
gbm_surface_destroy(output->gbm_surface);
output->gbm_surface = NULL;
create_renderbuffers(b->gbm, output, ARRAY_LENGTH(output->renderbuffer));
if (!output->linux_dmabuf_memory[0]) {
weston_log("failed to create dmabufs\n");
return -1;
}
@ -687,18 +728,17 @@ drm_output_fini_vulkan(struct drm_output *output)
struct drm_backend *b = output->backend;
const struct weston_renderer *renderer = b->compositor->renderer;
/* Destroying the GBM surface will destroy all our GBM buffers,
* regardless of refcount. Ensure we destroy them here. */
if (!b->compositor->shutting_down &&
output->scanout_plane->state_cur->fb &&
output->scanout_plane->state_cur->fb->type == BUFFER_GBM_BO) {
output->scanout_plane->state_cur->fb->type == BUFFER_DMABUF) {
drm_plane_reset_state(output->scanout_plane);
}
for (unsigned int i = 0; i < ARRAY_LENGTH(output->renderbuffer); i++)
renderer->destroy_renderbuffer(output->renderbuffer[i]);
renderer->vulkan->output_destroy(&output->base);
for (unsigned int i = 0; i < NUM_GBM_BOS; i++)
gbm_bo_destroy(output->gbm_bos[i]);
output->gbm_surface = NULL;
drm_output_fini_cursor_vulkan(output);
}
@ -739,30 +779,29 @@ drm_output_render_vulkan(struct drm_output_state *state, pixman_region32_t *dama
{
struct drm_output *output = state->output;
struct drm_device *device = output->device;
struct gbm_bo *bo;
struct linux_dmabuf_memory *dmabuf;
struct drm_fb *ret;
output->base.compositor->renderer->repaint_output(&output->base,
damage, NULL);
damage,
output->renderbuffer[output->current_image]);
bo = output->gbm_bos[output->current_bo];
if (!bo) {
weston_log("failed to get gbm_bo\n");
dmabuf = output->linux_dmabuf_memory[output->current_image];
if (!dmabuf) {
weston_log("failed to get dmabuf\n");
return NULL;
}
/* Output transparent/opaque image according to the format required by
* the client. */
ret = drm_fb_get_from_bo(bo, device, !output->format->opaque_substitute,
BUFFER_GBM_BO);
ret = drm_fb_get_from_dmabuf_attributes(dmabuf->attributes, device,
!output->format->opaque_substitute, false, NULL);
if (!ret) {
weston_log("failed to get drm_fb for bo\n");
weston_log("failed to get drm_fb for dmabuf\n");
return NULL;
}
ret->bo = bo;
ret->gbm_surface = NULL;
output->current_bo = (output->current_bo + 1) % NUM_GBM_BOS;
output->current_image = (output->current_image + 1) % ARRAY_LENGTH(output->renderbuffer);
return ret;
}

View file

@ -276,7 +276,6 @@ enum drm_fb_type {
BUFFER_PIXMAN_DUMB, /**< internal Pixman rendering */
BUFFER_GBM_SURFACE, /**< internal EGL rendering */
BUFFER_CURSOR, /**< internal cursor buffer */
BUFFER_GBM_BO, /**< internal Vulkan rendering */
};
struct drm_fb {
@ -574,8 +573,7 @@ struct drm_output {
int current_cursor;
struct gbm_surface *gbm_surface;
struct gbm_bo *gbm_bos[2];
int current_bo;
struct linux_dmabuf_memory *linux_dmabuf_memory[2];
const struct pixel_format_info *format;
uint32_t gbm_bo_flags;

View file

@ -526,7 +526,6 @@ drm_output_render(struct drm_output_state *state)
!weston_output_has_renderer_capture_tasks(&output->base) &&
scanout_plane->state_cur->fb &&
(scanout_plane->state_cur->fb->type == BUFFER_GBM_SURFACE ||
scanout_plane->state_cur->fb->type == BUFFER_GBM_BO ||
scanout_plane->state_cur->fb->type == BUFFER_PIXMAN_DUMB)) {
fb = drm_fb_ref(scanout_plane->state_cur->fb);
} else if (c->renderer->type == WESTON_RENDERER_PIXMAN) {

View file

@ -366,7 +366,7 @@ drm_fb_destroy_gbm(struct gbm_bo *bo, void *data)
struct drm_fb *fb = data;
assert(fb->type == BUFFER_GBM_SURFACE || fb->type == BUFFER_CLIENT ||
fb->type == BUFFER_CURSOR || fb->type == BUFFER_GBM_BO);
fb->type == BUFFER_CURSOR);
drm_fb_destroy(fb);
}
@ -634,7 +634,6 @@ drm_fb_unref(struct drm_fb *fb)
#ifdef BUILD_DRM_GBM
case BUFFER_CURSOR:
case BUFFER_CLIENT:
case BUFFER_GBM_BO:
gbm_bo_destroy(fb->bo);
break;
case BUFFER_GBM_SURFACE:

View file

@ -91,25 +91,21 @@ struct vulkan_border_image {
void *fs_ubo_map;
};
struct vulkan_renderbuffer_dmabuf {
struct vulkan_renderer *vr;
struct linux_dmabuf_memory *memory;
};
struct vulkan_renderbuffer {
struct weston_output *output;
pixman_region32_t damage;
enum vulkan_border_status border_status;
bool stale;
struct vulkan_renderbuffer_dmabuf dmabuf;
/* Used by dmabuf renderbuffers */
struct linux_dmabuf_memory *dmabuf;
void *buffer;
int stride;
weston_renderbuffer_discarded_func discarded_cb;
void *user_data;
/* Unused by drm and swapchain outputs */
/* Unused by swapchain outputs */
struct vulkan_renderer_image *image;
struct wl_list link;
@ -123,8 +119,6 @@ struct vulkan_renderer_image {
VkSemaphore render_done;
struct vulkan_renderbuffer *renderbuffer;
struct gbm_bo *bo;
};
struct vulkan_renderer_frame_acquire_fence {
@ -165,7 +159,6 @@ struct vulkan_renderer_frame {
enum vulkan_output_type {
VULKAN_OUTPUT_HEADLESS,
VULKAN_OUTPUT_DRM,
VULKAN_OUTPUT_SWAPCHAIN,
};
@ -189,11 +182,8 @@ struct vulkan_output_state {
VkPresentModeKHR present_mode;
VkSurfaceKHR surface;
} swapchain;
struct {
uint32_t image_index;
} drm;
/* For drm and swapchain outputs only */
/* For swapchain outputs only */
uint32_t image_count;
struct vulkan_renderer_image images[MAX_NUM_IMAGES];
@ -607,8 +597,8 @@ vulkan_renderer_destroy_renderbuffer(weston_renderbuffer_t weston_renderbuffer)
free(rb->image);
}
if (rb->dmabuf.memory)
rb->dmabuf.memory->destroy(rb->dmabuf.memory);
if (rb->dmabuf)
rb->dmabuf->destroy(rb->dmabuf);
free(rb);
}
@ -2473,10 +2463,6 @@ vulkan_renderer_repaint_output(struct weston_output *output,
rb = renderbuffer;
im = rb->image;
break;
case VULKAN_OUTPUT_DRM:
im = &vo->images[vo->drm.image_index];
rb = im->renderbuffer;
break;
default:
abort();
}
@ -2495,7 +2481,7 @@ vulkan_renderer_repaint_output(struct weston_output *output,
result = vkBeginCommandBuffer(cmd_buffer, &begin_info);
check_vk_success(result, "vkBeginCommandBuffer");
if (vo->output_type == VULKAN_OUTPUT_DRM) {
if (rb->dmabuf) {
// Transfer ownership of the dmabuf to Vulkan
assert(vulkan_device_has(vr, EXTENSION_EXT_QUEUE_FAMILY_FOREIGN));
transfer_image_queue_family(cmd_buffer, im->image,
@ -2538,7 +2524,7 @@ vulkan_renderer_repaint_output(struct weston_output *output,
vkCmdEndRenderPass(cmd_buffer);
if (vo->output_type == VULKAN_OUTPUT_DRM) {
if (rb->dmabuf) {
// Transfer ownership of the dmabuf to DRM
assert(vulkan_device_has(vr, EXTENSION_EXT_QUEUE_FAMILY_FOREIGN));
transfer_image_queue_family(cmd_buffer, im->image,
@ -2674,9 +2660,6 @@ vulkan_renderer_repaint_output(struct weston_output *output,
pixman_region32_clear(&rb->damage);
vo->frame_index = (vo->frame_index + 1) % vo->num_frames;
if (vo->output_type == VULKAN_OUTPUT_DRM)
vo->drm.image_index = (vo->drm.image_index + 1) % vo->image_count;
}
static void
@ -3443,57 +3426,6 @@ create_dmabuf_image(struct vulkan_renderer *vr,
check_vk_success(result, "vkCreateImage");
}
static int
vulkan_renderer_output_window_create_gbm(struct weston_output *output,
const struct vulkan_renderer_surface_options *options)
{
struct weston_compositor *ec = output->compositor;
struct vulkan_output_state *vo = get_output_state(output);
struct vulkan_renderer *vr = get_renderer(ec);
const struct pixel_format_info *pixel_format = vo->pixel_format;
const VkFormat format = pixel_format->vulkan_format;
vo->image_count = options->num_gbm_bos;
for (uint32_t i = 0; i < vo->image_count; i++) {
struct vulkan_renderer_image *im = &vo->images[i];
struct gbm_bo *bo = options->gbm_bos[i];
im->bo = bo;
struct dmabuf_attributes attributes;
attributes.fd[0] = gbm_bo_get_fd(bo);
attributes.width = gbm_bo_get_width(bo);
attributes.height = gbm_bo_get_height(bo);
attributes.modifier = gbm_bo_get_modifier(bo);
attributes.n_planes = gbm_bo_get_plane_count(bo);
attributes.format = pixel_format->format;
for (int i = 0; i < attributes.n_planes; i++) {
attributes.offset[i] = gbm_bo_get_offset(bo, i);
attributes.stride[i] = gbm_bo_get_stride_for_plane(bo, i);
}
create_dmabuf_image(vr, &attributes,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
&im->image);
import_dmabuf(vr, im->image, &im->memory, &attributes);
close(attributes.fd[0]); /* fd is duped */
create_image_view(vr->dev, im->image, format, &im->image_view);
create_framebuffer(vr->dev, vo->renderpass, im->image_view,
options->fb_size.width, options->fb_size.height, &im->framebuffer);
create_image_semaphores(vr, vo, im);
im->renderbuffer = xzalloc(sizeof(*im->renderbuffer));
vulkan_renderbuffer_init(im->renderbuffer, NULL, NULL, NULL, output);
}
return 0;
}
static int
vulkan_renderer_output_window_create_swapchain(struct weston_output *output,
const struct vulkan_renderer_surface_options *options)
@ -3689,21 +3621,11 @@ vulkan_renderer_output_surface_create(struct weston_output *output,
assert(ret == 0);
struct vulkan_output_state *vo = get_output_state(output);
if ((options->wayland_display && options->wayland_surface) ||
(options->xcb_connection && options->xcb_window)) {
vo->output_type = VULKAN_OUTPUT_SWAPCHAIN;
} else {
vo->output_type = VULKAN_OUTPUT_DRM;
}
vo->output_type = VULKAN_OUTPUT_SWAPCHAIN;
vo->pixel_format = pixel_format;
if (vo->output_type == VULKAN_OUTPUT_SWAPCHAIN) {
create_renderpass(output, pixel_format->vulkan_format, VK_IMAGE_LAYOUT_PRESENT_SRC_KHR);
vulkan_renderer_output_window_create_swapchain(output, options);
} else {
create_renderpass(output, pixel_format->vulkan_format, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
vulkan_renderer_output_window_create_gbm(output, options);
}
create_renderpass(output, pixel_format->vulkan_format, VK_IMAGE_LAYOUT_PRESENT_SRC_KHR);
vulkan_renderer_output_window_create_swapchain(output, options);
weston_output_update_capture_info(output,
WESTON_OUTPUT_CAPTURE_SOURCE_FRAMEBUFFER,
@ -3755,7 +3677,7 @@ vulkan_renderer_output_surfaceless_create(struct weston_output *output,
fb_size->width, fb_size->height,
output->compositor->read_format);
vulkan_renderer_create_output_frames(output, &options->fb_size, &options->area, 1);
vulkan_renderer_create_output_frames(output, fb_size, area, MAX_CONCURRENT_FRAMES);
return 0;
}
@ -3993,8 +3915,7 @@ vulkan_renderer_create_renderbuffer_dmabuf(struct weston_output *output,
vulkan_renderbuffer_init(renderbuffer, im, discarded_cb, user_data, output);
renderbuffer->dmabuf.vr = vr;
renderbuffer->dmabuf.memory = dmabuf;
renderbuffer->dmabuf = dmabuf;
return (weston_renderbuffer_t) renderbuffer;
}

View file

@ -50,15 +50,11 @@ typedef uint32_t xcb_window_t;
*/
struct vulkan_renderer_display_options {
struct weston_renderer_options base;
void *gbm_device;
const struct pixel_format_info **formats;
unsigned formats_count;
};
#define NUM_GBM_BOS 2
struct vulkan_renderer_surface_options {
struct gbm_bo *gbm_bos[NUM_GBM_BOS];
unsigned int num_gbm_bos;
struct weston_size fb_size;
struct weston_geometry area;