From 1668434d812f847211368fb5056cd641085c2880 Mon Sep 17 00:00:00 2001 From: Trigger Huang Date: Fri, 24 Apr 2026 15:24:32 +0800 Subject: [PATCH] renderer-gl: Restore dma buffers in GPU recovery Restore all the dma buffers in GPU recovery, then the applications which created dam buffers will not be terminated. Co-authored-by: Wei Zhao Signed-off-by: Trigger Huang --- libweston/renderer-gl/gl-renderer.c | 124 ++++++++++++++++++++++++++++ 1 file changed, 124 insertions(+) diff --git a/libweston/renderer-gl/gl-renderer.c b/libweston/renderer-gl/gl-renderer.c index 61c32a69d..a78dd24f0 100644 --- a/libweston/renderer-gl/gl-renderer.c +++ b/libweston/renderer-gl/gl-renderer.c @@ -305,6 +305,8 @@ struct gl_buffer_state { struct wl_list link; /* link to shm_bufs of gl renderer */ void *saved_gs; /* gl_surface_state, saved for gb recreation */ + struct linux_dmabuf_buffer *saved_dmabuf; + struct weston_buffer *saved_buffer; }; struct gl_surface_state { @@ -4305,6 +4307,7 @@ gl_renderer_import_dmabuf(struct weston_compositor *ec, return false; wl_list_insert(&gr->dma_bufs, &gb->link); + gb->saved_dmabuf = dmabuf; linux_dmabuf_buffer_set_user_data(dmabuf, gb, gl_renderer_destroy_dmabuf); @@ -4321,6 +4324,8 @@ gl_renderer_attach_buffer(struct weston_surface *surface, assert(buffer->renderer_private); gb = buffer->renderer_private; gs->buffer = gb; + gb->saved_gs = gs; + gb->saved_buffer = buffer; } static const struct weston_drm_format_array * @@ -4414,6 +4419,13 @@ gl_renderer_attach(struct weston_paint_node *pnode) gs->buffer = NULL; } } else { + if (gs->buffer) { + /* + * Non-SHM buffer state is buffer-owned. Clear the saved + * surface link before dropping gs->buffer. + */ + gs->buffer->saved_gs = NULL; + } gs->buffer = NULL; } @@ -4472,6 +4484,9 @@ gl_renderer_buffer_init(struct weston_compositor *etc, assert(gb); linux_dmabuf_buffer_set_user_data(buffer->dmabuf, NULL, NULL); buffer->renderer_private = gb; + gb->saved_gs = NULL; + gb->saved_dmabuf = NULL; + gb->saved_buffer = buffer; gb->destroy_listener.notify = handle_buffer_destroy; wl_signal_add(&buffer->destroy_signal, &gb->destroy_listener); } @@ -4599,6 +4614,9 @@ surface_state_destroy(struct gl_surface_state *gs, struct gl_renderer *gr) if (gs->buffer && gs->buffer_ref.buffer->type == WESTON_BUFFER_SHM) destroy_buffer_state(gs->buffer); + else if (gs->buffer) + /* Clear the saved surface link for buffer-owned state */ + gs->buffer->saved_gs = NULL; gs->buffer = NULL; weston_buffer_reference(&gs->buffer_ref, NULL, @@ -5215,6 +5233,27 @@ gl_renderer_destroy_context(struct weston_compositor *ec) glDeleteTextures(gb->num_textures, gb->textures); gb->num_textures = 0; } + + /* Destroy all the GL/EGL resources for dma-bufs. */ + wl_list_for_each(gb, &gr->dma_bufs, link) { + struct gl_color_egl_image *color_egl_image; + int i; + + glDeleteTextures(gb->num_textures, gb->textures); + gb->num_textures = 0; + + for (i = 0; i < gb->num_images; i++) { + gr->destroy_image(gr->egl_display, gb->images[i]); + gb->images[i] = NULL; + } + gb->num_images = 0; + + wl_array_for_each(color_egl_image, &gb->reimported_egl_images) + gr->destroy_image(gr->egl_display, color_egl_image->image); + wl_array_release(&gb->reimported_egl_images); + wl_array_init(&gb->reimported_egl_images); + gb->active_reimported_egl_image = NULL; + } } wl_list_for_each_safe(format, next_format, &gr->dmabuf_formats, link) @@ -5351,6 +5390,91 @@ gl_renderer_recover_resources(struct weston_compositor *ec) /* Manually add the new gbs to shm_bufs. */ wl_list_insert_list(&gr->shm_bufs, &tmp_gb_list); + /* Restore dma_bufs. */ + wl_list_init(&tmp_gb_list); + wl_list_for_each_safe(gb, tmp, &gr->dma_bufs, link) { +#define BUFFER_RECREATE() do { \ + destroy_buffer_state(gb); \ + new_gb = import_dmabuf(gr, dmabuf); \ + if (!new_gb) \ + return -EINVAL; \ + } while (0) +#define BUFFER_REATTACH() do { \ + linux_dmabuf_buffer_set_user_data(dmabuf, NULL, NULL); \ + buffer->renderer_private = new_gb; \ + new_gb->destroy_listener.notify = handle_buffer_destroy; \ + wl_signal_add(&buffer->destroy_signal, &new_gb->destroy_listener); \ + } while (0) + struct weston_buffer *buffer = gb->saved_buffer; + struct linux_dmabuf_buffer *dmabuf = gb->saved_dmabuf; + struct gl_surface_state *gs = gb->saved_gs; + struct gl_buffer_state *new_gb; + bool surface_bound = false; + + /* + * dma-buf ownership can live in three places: + * 1, Imported-only: linux_dmabuf_buffer userdata still owns gb. + * 2, Buffer-owned: weston_buffer::renderer_private owns gb. + * 3, Surface-bound: a live gl_surface_state still points at gb. + */ + if (dmabuf) { + /* 1, Imported-only state */ + void *user_data = + linux_dmabuf_buffer_get_user_data(dmabuf); + + /* Stale imported state, no longer owned by the dmabuf. */ + if (user_data != gb) { + destroy_buffer_state(gb); + continue; + } + + linux_dmabuf_buffer_set_user_data(dmabuf, NULL, NULL); + BUFFER_RECREATE(); + linux_dmabuf_buffer_set_user_data(dmabuf, new_gb, + gl_renderer_destroy_dmabuf); + new_gb->saved_dmabuf = dmabuf; + } else { + /* 2, Buffer-owned state */ + + /* Stale buffer-owned state. */ + if (!buffer || buffer->renderer_private != gb) { + destroy_buffer_state(gb); + continue; + } + + dmabuf = buffer->dmabuf; + if (!dmabuf) { + /* The buffer no longer has a live dma-buf backing. */ + destroy_buffer_state(gb); + continue; + } + + if (gs && + gs->surface && + gs->surface->renderer_state == gs && + gs->buffer == gb && + gs->buffer_ref.buffer == buffer) + surface_bound = true; + + BUFFER_RECREATE(); + BUFFER_REATTACH(); + new_gb->saved_gs = NULL; + new_gb->saved_dmabuf = NULL; + new_gb->saved_buffer = buffer; + + /* 3, Surface-bound state */ + if (surface_bound) + gl_renderer_attach_buffer(gs->surface, buffer); + } + + wl_list_insert(&tmp_gb_list, &new_gb->link); +#undef BUFFER_RECREATE +#undef BUFFER_REATTACH + } + + /* Manually add the new gbs to dma_bufs. */ + wl_list_insert_list(&gr->dma_bufs, &tmp_gb_list); + /* * Since all the buffer states have been recreated, mark them all as * dirty to avoid garbage caused by original damage information.