Merge branch 'gpu_recovery' into 'main'

Support GPU recovery in gl renderer of drm backend

See merge request wayland/weston!1662
This commit is contained in:
Trigger Huang 2026-05-05 22:09:45 +08:00
commit 633013984c
11 changed files with 714 additions and 126 deletions

View file

@ -39,6 +39,8 @@
#include <wayland-cursor.h>
#include <GLES2/gl2.h>
#include <GLES2/gl2ext.h>
#include <GLES3/gl32.h>
#include <EGL/egl.h>
#include <EGL/eglext.h>
@ -86,6 +88,7 @@ struct display {
PFNEGLSWAPBUFFERSWITHDAMAGEEXTPROC swap_buffers_with_damage;
PFNEGLQUERYSUPPORTEDCOMPRESSIONRATESEXTPROC query_compression_rates;
PFNGLGETGRAPHICSRESETSTATUSEXTPROC get_graphics_reset_status;
};
struct geometry {
@ -106,6 +109,9 @@ struct window {
GLuint rotation_uniform;
GLuint pos;
GLuint col;
GLuint frag;
GLuint vert;
GLuint program;
} gl;
uint32_t frames;
@ -200,7 +206,9 @@ init_egl(struct display *display, struct window *window)
};
static const EGLint context_attribs[] = {
EGL_CONTEXT_CLIENT_VERSION, 2,
EGL_CONTEXT_CLIENT_VERSION, 3,
EGL_CONTEXT_OPENGL_RESET_NOTIFICATION_STRATEGY_KHR,
EGL_LOSE_CONTEXT_ON_RESET_KHR,
EGL_NONE
};
const char *extensions;
@ -310,6 +318,18 @@ init_egl(struct display *display, struct window *window)
eglGetProcAddress("eglQuerySupportedCompressionRatesEXT");
printf("has EGL_EXT_surface_compression\n");
}
if (extensions &&
weston_check_egl_extension(extensions,
"EGL_EXT_create_context_robustness")) {
display->get_graphics_reset_status =
(PFNGLGETGRAPHICSRESETSTATUSEXTPROC)
eglGetProcAddress("glGetGraphicsResetStatusEXT");
if (display->get_graphics_reset_status)
printf("glGetGraphicsResetStatusEXT is valid!\n");
else
printf("glGetGraphicsResetStatusEXT is invalid!\n");
}
}
static void
@ -319,6 +339,21 @@ fini_egl(struct display *display)
eglReleaseThread();
}
static void
fini_gl(struct window *window)
{
glDeleteShader(window->gl.frag);
glDeleteShader(window->gl.vert);
glDeleteProgram(window->gl.program);
eglMakeCurrent(window->display->egl.dpy, EGL_NO_SURFACE, EGL_NO_SURFACE,
EGL_NO_CONTEXT);
weston_platform_destroy_egl_surface(window->display->egl.dpy,
window->egl_surface);
wl_egl_window_destroy(window->native);
}
static GLuint
create_shader(struct window *window, const char *source, GLenum shader_type)
{
@ -477,6 +512,7 @@ init_gl(struct window *window)
EGLBoolean ret;
EGLint attribs[5] = { EGL_NONE };
uint32_t num_attribs = 0;
GLint strategy = 0;
if (window->needs_buffer_geometry_update)
update_buffer_geometry(window);
@ -561,12 +597,21 @@ init_gl(struct window *window)
window->gl.pos = 0;
window->gl.col = 1;
window->gl.frag = frag;
window->gl.vert = vert;
window->gl.program = program;
glBindAttribLocation(program, window->gl.pos, "pos");
glBindAttribLocation(program, window->gl.col, "color");
window->gl.rotation_uniform =
glGetUniformLocation(program, "rotation");
glGetIntegerv(GL_RESET_NOTIFICATION_STRATEGY_EXT, &strategy);
if (strategy == GL_LOSE_CONTEXT_ON_RESET_EXT)
printf("Create GL robust context successfully!\n");
else
printf("Failed to create GL robust context successfully!\n");
}
static void
@ -902,7 +947,37 @@ destroy_surface(struct window *window)
}
static void
static int
check_gpu_reset_status(struct display *display)
{
bool has_reset = false;
int i;
if (!display->get_graphics_reset_status)
return 0;
/* Assume GPU reset should be finished within 5s. */
for (i = 0; i < 100000; i++) {
unsigned status;
status = display->get_graphics_reset_status();
if (status == GL_NO_ERROR)
break;
has_reset = true;
usleep(50);
}
if (!has_reset)
return 0;
/* If GPU reset has not completed, nothing we can do. */
assert(i < 100000);
return -EAGAIN;
}
static int
redraw(struct window *window)
{
struct display *display = window->display;
@ -995,6 +1070,8 @@ redraw(struct window *window)
draw_triangle(window, buffer_age);
window->frames++;
return check_gpu_reset_status(display);
}
static void
@ -1469,15 +1546,12 @@ main(int argc, char **argv)
goto out_no_xdg_shell;
}
init_egl(&display, &window);
create_surface(&window);
/* we already have wait_for_configure set after create_surface() */
while (running && ret != -1 && window.wait_for_configure)
ret = wl_display_dispatch(display.display);
init_gl(&window);
display.cursor_surface =
wl_compositor_create_surface(display.compositor);
@ -1486,13 +1560,25 @@ main(int argc, char **argv)
sigint.sa_flags = SA_RESETHAND;
sigaction(SIGINT, &sigint, NULL);
init_gl_egl:
init_egl(&display, &window);
init_gl(&window);
while (running && ret != -1) {
int draw;
ret = wl_display_dispatch_pending(display.display);
redraw(&window);
draw = redraw(&window);
if (draw) {
fini_gl(&window);
fini_egl(&display);
goto init_gl_egl;
}
}
fprintf(stderr, "simple-egl exiting\n");
fini_gl(&window);
destroy_surface(&window);
fini_egl(&display);

View file

@ -80,9 +80,13 @@ drm_backend_create_gl_renderer(struct drm_backend *b)
if (format[1])
options.formats_count = 2;
return weston_compositor_init_renderer(b->compositor,
WESTON_RENDERER_GL,
&options.base);
if (!b->gl_recovering)
return weston_compositor_init_renderer(b->compositor,
WESTON_RENDERER_GL,
&options.base);
else
return b->compositor->renderer->gl->display_create(b->compositor,
&options);
}
static int
@ -525,24 +529,29 @@ drm_output_init_egl(struct drm_output *output, struct drm_backend *b)
options.fb_size.width = mode->width;
options.fb_size.height = mode->height;
assert(output->gbm_surface == NULL);
create_gbm_surface(b->gbm, output);
if (!output->gbm_surface) {
weston_log("failed to create gbm surface\n");
return -1;
/* GBM surface and cursor do not need to be recreated when recovering */
if (!b->gl_recovering) {
assert(output->gbm_surface == NULL);
create_gbm_surface(b->gbm, output);
if (!output->gbm_surface) {
weston_log("failed to create gbm surface\n");
return -1;
}
drm_output_init_cursor_egl(output, b);
}
options.window_for_legacy = (EGLNativeWindowType) output->gbm_surface;
options.window_for_platform = output->gbm_surface;
if (renderer->gl->output_window_create(&output->base, &options) < 0) {
weston_log("failed to create gl renderer output state\n");
gbm_surface_destroy(output->gbm_surface);
output->gbm_surface = NULL;
if (!b->gl_recovering) {
gbm_surface_destroy(output->gbm_surface);
output->gbm_surface = NULL;
}
return -1;
}
drm_output_init_cursor_egl(output, b);
return 0;
}
@ -780,6 +789,82 @@ drm_output_fini_vulkan(struct drm_output *output)
drm_output_fini_cursor_vulkan(output);
}
static int
drm_handle_gl_renderer_error(struct weston_compositor *compositor, int err)
{
const struct weston_renderer *renderer = compositor->renderer;
struct drm_backend *b = to_drm_backend(compositor);
int ret;
switch (err) {
case WESTON_RENDERER_ERROR_LOST: {
struct weston_output *output;
weston_log("Initiating GL renderer recovery...\n");
b->gl_recovering = true;
renderer->gl->set_recovering(compositor, true);
/* 1, Destroy the renderer outputs */
wl_list_for_each(output, &compositor->output_list, link) {
struct drm_output *drm = to_drm_output(output);
struct drm_plane *scanout = drm->scanout_handle->plane;
/*
* When the output is destroyed, its associated
* EGLSurface will also be destroyed. Consequently, the
* GBM BOs backing this surface will be freed. Mesa will
* destroy the GBM BO user_data (fb) regardless of
* refcount, so ensure we destroy them here before
* invoking output_destroy.
*/
if (scanout && scanout->state_cur &&
scanout->state_cur->fb &&
scanout->state_cur->fb->type == BUFFER_GBM_SURFACE)
drm_plane_reset_state(scanout);
renderer->gl->output_destroy(output);
}
/* 2, Destroy the renderer */
renderer->destroy(compositor);
/* 3, Create the renderer again */
ret = drm_backend_create_gl_renderer(b);
if (ret < 0) {
weston_log("Failed to recreate gl renderer!\n");
goto fail;
}
/* 4, Create the renderer outputs again */
wl_list_for_each(output, &compositor->output_list, link) {
ret = drm_output_init_egl(to_drm_output(output), b);
if (ret < 0) {
weston_log("Failed to recreate gl output!\n");
goto fail;
}
}
b->gl_recovering = false;
drm_device_recovery_required(b->drm);
renderer->gl->set_recovering(compositor, false);
ret = -EAGAIN;
weston_log("GL renderer recovery completed successfully\n");
break;
}
default:
weston_log("Unsupported gl renderer err: %d\n", err);
ret = -EINVAL;
}
return ret;
fail:
b->gl_recovering = false;
renderer->gl->set_recovering(compositor, false);
return -EIO;
}
struct drm_fb *
drm_output_render_gl(struct drm_output_state *state, pixman_region32_t *damage)
{
@ -787,9 +872,19 @@ drm_output_render_gl(struct drm_output_state *state, pixman_region32_t *damage)
struct drm_device *device = output->device;
struct gbm_bo *bo;
struct drm_fb *ret;
int err;
output->base.compositor->renderer->repaint_output(&output->base,
damage, NULL);
repaint:
err = output->base.compositor->renderer->repaint_output(&output->base,
damage, NULL);
if (err != WESTON_RENDERER_ERROR_NONE) {
err = drm_handle_gl_renderer_error(output->base.compositor, err);
if (err == -EAGAIN)
goto repaint;
return NULL;
}
bo = gbm_surface_lock_front_buffer(output->gbm_surface);
if (!bo) {

View file

@ -285,6 +285,9 @@ struct drm_backend {
bool timer_armed;
} perf_page_flips_stats;
/* True, if GL renderer is recovering from GPU reset */
bool gl_recovering;
/* True if we need a workaround for some very old kernels */
bool stale_timestamp_workaround;
};

View file

@ -82,10 +82,15 @@ enum weston_renderer_border_side {
WESTON_RENDERER_BORDER_BOTTOM = 3,
};
enum weston_renderer_error {
WESTON_RENDERER_ERROR_NONE = 0,
WESTON_RENDERER_ERROR_LOST = 1,
};
struct weston_renderer {
void (*repaint_output)(struct weston_output *output,
pixman_region32_t *output_damage,
weston_renderbuffer_t renderbuffer);
enum weston_renderer_error (*repaint_output)(struct weston_output *output,
pixman_region32_t *output_damage,
weston_renderbuffer_t renderbuffer);
/** See weston_renderer_resize_output()
*

View file

@ -36,11 +36,12 @@ struct noop_renderer {
unsigned char seed; /* see comment in attach() */
};
static void
static enum weston_renderer_error
noop_renderer_repaint_output(struct weston_output *output,
pixman_region32_t *output_damage,
weston_renderbuffer_t renderbuffer)
{
return WESTON_RENDERER_ERROR_NONE;
}
static bool

View file

@ -598,7 +598,7 @@ static void
pixman_renderer_output_set_buffer(struct weston_output *output,
pixman_image_t *buffer);
static void
static enum weston_renderer_error
pixman_renderer_repaint_output(struct weston_output *output,
pixman_region32_t *output_damage,
weston_renderbuffer_t renderbuffer)
@ -622,7 +622,7 @@ pixman_renderer_repaint_output(struct weston_output *output,
output->color_outcome->from_blend_to_output == NULL);
if (!po->hw_buffer)
return;
goto out;
if (po->shadow_image) {
repaint_surfaces(output, output_damage);
@ -641,6 +641,9 @@ pixman_renderer_repaint_output(struct weston_output *output,
wl_signal_emit(&output->frame_signal, output_damage);
/* Actual flip should be done by caller */
out:
return WESTON_RENDERER_ERROR_NONE;
}
static void

View file

@ -73,6 +73,7 @@ static const struct gl_extension_table device_table[] = {
static const struct gl_extension_table display_table[] = {
EXT("EGL_ANDROID_native_fence_sync", EXTENSION_ANDROID_NATIVE_FENCE_SYNC),
EXT("EGL_EXT_buffer_age", EXTENSION_EXT_BUFFER_AGE),
EXT("EGL_EXT_create_context_robustness", EXTENSION_EXT_CREATE_CONTEXT_ROBUSTNESS),
EXT("EGL_EXT_image_dma_buf_import", EXTENSION_EXT_IMAGE_DMA_BUF_IMPORT),
EXT("EGL_EXT_image_dma_buf_import_modifiers", EXTENSION_EXT_IMAGE_DMA_BUF_IMPORT_MODIFIERS),
EXT("EGL_EXT_pixel_format_float", EXTENSION_EXT_PIXEL_FORMAT_FLOAT),

View file

@ -106,21 +106,22 @@ enum egl_device_extension_flag {
enum egl_display_extension_flag {
EXTENSION_ANDROID_NATIVE_FENCE_SYNC = 1ull << 0,
EXTENSION_EXT_BUFFER_AGE = 1ull << 1,
EXTENSION_EXT_IMAGE_DMA_BUF_IMPORT = 1ull << 2,
EXTENSION_EXT_IMAGE_DMA_BUF_IMPORT_MODIFIERS = 1ull << 3,
EXTENSION_EXT_PIXEL_FORMAT_FLOAT = 1ull << 4,
EXTENSION_EXT_SWAP_BUFFERS_WITH_DAMAGE = 1ull << 5,
EXTENSION_IMG_CONTEXT_PRIORITY = 1ull << 6,
EXTENSION_KHR_FENCE_SYNC = 1ull << 7,
EXTENSION_KHR_GET_ALL_PROC_ADDRESSES = 1ull << 8,
EXTENSION_KHR_IMAGE_BASE = 1ull << 9,
EXTENSION_KHR_NO_CONFIG_CONTEXT = 1ull << 10,
EXTENSION_KHR_PARTIAL_UPDATE = 1ull << 11,
EXTENSION_KHR_SURFACELESS_CONTEXT = 1ull << 12,
EXTENSION_KHR_SWAP_BUFFERS_WITH_DAMAGE = 1ull << 13,
EXTENSION_KHR_WAIT_SYNC = 1ull << 14,
EXTENSION_MESA_CONFIGLESS_CONTEXT = 1ull << 15,
EXTENSION_WL_BIND_WAYLAND_DISPLAY = 1ull << 16,
EXTENSION_EXT_CREATE_CONTEXT_ROBUSTNESS = 1ull << 2,
EXTENSION_EXT_IMAGE_DMA_BUF_IMPORT = 1ull << 3,
EXTENSION_EXT_IMAGE_DMA_BUF_IMPORT_MODIFIERS = 1ull << 4,
EXTENSION_EXT_PIXEL_FORMAT_FLOAT = 1ull << 5,
EXTENSION_EXT_SWAP_BUFFERS_WITH_DAMAGE = 1ull << 6,
EXTENSION_IMG_CONTEXT_PRIORITY = 1ull << 7,
EXTENSION_KHR_FENCE_SYNC = 1ull << 8,
EXTENSION_KHR_GET_ALL_PROC_ADDRESSES = 1ull << 9,
EXTENSION_KHR_IMAGE_BASE = 1ull << 10,
EXTENSION_KHR_NO_CONFIG_CONTEXT = 1ull << 11,
EXTENSION_KHR_PARTIAL_UPDATE = 1ull << 12,
EXTENSION_KHR_SURFACELESS_CONTEXT = 1ull << 13,
EXTENSION_KHR_SWAP_BUFFERS_WITH_DAMAGE = 1ull << 14,
EXTENSION_KHR_WAIT_SYNC = 1ull << 15,
EXTENSION_MESA_CONFIGLESS_CONTEXT = 1ull << 16,
EXTENSION_WL_BIND_WAYLAND_DISPLAY = 1ull << 17,
};
/* Keep in sync with gl-renderer.c. */
@ -134,27 +135,28 @@ enum gl_extension_flag {
EXTENSION_EXT_EGL_IMAGE_STORAGE = 1ull << 8,
EXTENSION_EXT_MAP_BUFFER_RANGE = 1ull << 9,
EXTENSION_EXT_READ_FORMAT_BGRA = 1ull << 10,
EXTENSION_EXT_SHADER_FB_FETCH_NC = 1ull << 11,
EXTENSION_EXT_TEXTURE_FORMAT_BGRA8888 = 1ull << 12,
EXTENSION_EXT_TEXTURE_NORM16 = 1ull << 13,
EXTENSION_EXT_TEXTURE_RG = 1ull << 14,
EXTENSION_EXT_TEXTURE_SRGB_R8 = 1ull << 15,
EXTENSION_EXT_TEXTURE_SRGB_RG8 = 1ull << 16,
EXTENSION_EXT_TEXTURE_STORAGE = 1ull << 17,
EXTENSION_EXT_TEXTURE_TYPE_2_10_10_10_REV = 1ull << 18,
EXTENSION_EXT_UNPACK_SUBIMAGE = 1ull << 19,
EXTENSION_NV_PACKED_FLOAT = 1ull << 20,
EXTENSION_NV_PIXEL_BUFFER_OBJECT = 1ull << 21,
EXTENSION_OES_EGL_IMAGE = 1ull << 22,
EXTENSION_OES_EGL_IMAGE_EXTERNAL = 1ull << 23,
EXTENSION_OES_MAPBUFFER = 1ull << 24,
EXTENSION_OES_REQUIRED_INTERNALFORMAT = 1ull << 25,
EXTENSION_OES_RGB8_RGBA8 = 1ull << 26,
EXTENSION_OES_TEXTURE_3D = 1ull << 27,
EXTENSION_OES_TEXTURE_FLOAT = 1ull << 28,
EXTENSION_OES_TEXTURE_FLOAT_LINEAR = 1ull << 29,
EXTENSION_OES_TEXTURE_HALF_FLOAT = 1ull << 30,
EXTENSION_QCOM_RENDER_SRGB_R8_RG8 = 1ull << 31,
EXTENSION_EXT_ROBUSTNESS = 1ull << 11,
EXTENSION_EXT_SHADER_FB_FETCH_NC = 1ull << 12,
EXTENSION_EXT_TEXTURE_FORMAT_BGRA8888 = 1ull << 13,
EXTENSION_EXT_TEXTURE_NORM16 = 1ull << 14,
EXTENSION_EXT_TEXTURE_RG = 1ull << 15,
EXTENSION_EXT_TEXTURE_SRGB_R8 = 1ull << 16,
EXTENSION_EXT_TEXTURE_SRGB_RG8 = 1ull << 17,
EXTENSION_EXT_TEXTURE_STORAGE = 1ull << 18,
EXTENSION_EXT_TEXTURE_TYPE_2_10_10_10_REV = 1ull << 19,
EXTENSION_EXT_UNPACK_SUBIMAGE = 1ull << 20,
EXTENSION_NV_PACKED_FLOAT = 1ull << 21,
EXTENSION_NV_PIXEL_BUFFER_OBJECT = 1ull << 22,
EXTENSION_OES_EGL_IMAGE = 1ull << 23,
EXTENSION_OES_EGL_IMAGE_EXTERNAL = 1ull << 24,
EXTENSION_OES_MAPBUFFER = 1ull << 25,
EXTENSION_OES_REQUIRED_INTERNALFORMAT = 1ull << 26,
EXTENSION_OES_RGB8_RGBA8 = 1ull << 27,
EXTENSION_OES_TEXTURE_3D = 1ull << 28,
EXTENSION_OES_TEXTURE_FLOAT = 1ull << 29,
EXTENSION_OES_TEXTURE_FLOAT_LINEAR = 1ull << 30,
EXTENSION_OES_TEXTURE_HALF_FLOAT = 1ull << 31,
EXTENSION_QCOM_RENDER_SRGB_R8_RG8 = 1ull << 32,
};
enum gl_feature_flag {
@ -208,6 +210,9 @@ enum gl_feature_flag {
/* GL renderer can do blending explicitly in the fragment shader,
* using framebuffer fetch and store curves. */
FEATURE_SHADER_BLENDING = 1ull << 10,
/* The GL renderer can recover from a GPU reset. */
FEATURE_GRAPHICS_RESET_RECOVERY = 1ull << 11,
};
/* Keep the following in sync with vertex.glsl. */
@ -521,6 +526,9 @@ struct gl_renderer {
PFNGLTEXSTORAGE2DEXTPROC tex_storage_2d;
PFNGLTEXSTORAGE3DEXTPROC tex_storage_3d;
/* GL_EXT_robustness */
PFNGLGETGRAPHICSRESETSTATUSEXTPROC get_graphics_reset_status;
/* GL_EXT_shader_framebuffer_fetch_non_coherent */
PFNGLFRAMEBUFFERFETCHBARRIEREXTPROC framebuffer_fetch_barrier;
@ -532,7 +540,10 @@ struct gl_renderer {
int max_combined_texture_image_units;
bool blend_state;
bool recovering;
struct wl_list dma_bufs;
struct wl_list shm_bufs;
struct wl_list dmabuf_images;
struct wl_list dmabuf_formats;
struct wl_list pending_capture_list;

View file

@ -303,6 +303,11 @@ struct gl_buffer_state {
int num_textures;
struct wl_listener destroy_listener;
struct wl_list link; /* link to shm_bufs of gl renderer */
void *saved_gs; /* gl_surface_state, saved for gb recreation */
struct linux_dmabuf_buffer *saved_dmabuf;
struct weston_buffer *saved_buffer;
};
struct gl_surface_state {
@ -343,6 +348,7 @@ static const struct gl_extension_table extension_table[] = {
EXT("GL_EXT_EGL_image_storage", EXTENSION_EXT_EGL_IMAGE_STORAGE),
EXT("GL_EXT_map_buffer_range", EXTENSION_EXT_MAP_BUFFER_RANGE),
EXT("GL_EXT_read_format_bgra", EXTENSION_EXT_READ_FORMAT_BGRA),
EXT("GL_EXT_robustness", EXTENSION_EXT_ROBUSTNESS),
EXT("GL_EXT_shader_framebuffer_fetch_non_coherent", EXTENSION_EXT_SHADER_FB_FETCH_NC),
EXT("GL_EXT_texture_format_BGRA8888", EXTENSION_EXT_TEXTURE_FORMAT_BGRA8888),
EXT("GL_EXT_texture_norm16", EXTENSION_EXT_TEXTURE_NORM16),
@ -3130,6 +3136,37 @@ blit_shadow_to_output(struct weston_output *output,
pixman_region32_fini(&translated_damage);
}
static int
gl_renderer_check_reset(struct gl_renderer *gr)
{
bool has_reset = false;
int i;
if (!gl_features_has(gr, FEATURE_GRAPHICS_RESET_RECOVERY) ||
!gr->get_graphics_reset_status)
return 0;
/* Assume GPU reset should be finished within 5s. */
for (i = 0; i < 100000; i++) {
GLenum status;
status = gr->get_graphics_reset_status();
if (status == GL_NO_ERROR)
break;
has_reset = true;
usleep(50);
}
if (!has_reset)
return 0;
/* If GPU reset has not completed, nothing we can do. */
assert(i < 100000);
return -EAGAIN;
}
/* NOTE: We now allow falling back to ARGB gl visuals when XRGB is
* unavailable, so we're assuming the background has no transparency
* and that everything with a blend, like drop shadows, will have something
@ -3138,7 +3175,7 @@ blit_shadow_to_output(struct weston_output *output,
* Depending on the underlying hardware, violating that assumption could
* result in seeing through to another display plane.
*/
static void
static enum weston_renderer_error
gl_renderer_repaint_output(struct weston_output *output,
pixman_region32_t *output_damage,
weston_renderbuffer_t renderbuffer)
@ -3164,7 +3201,7 @@ gl_renderer_repaint_output(struct weston_output *output,
go->fb_size.height - go->area.height - go->area.y : go->area.y;
if (use_output(output) < 0)
return;
goto out;
rb = gl_renderer_update_renderbuffers(output, output_damage,
renderbuffer);
@ -3365,6 +3402,12 @@ gl_renderer_repaint_output(struct weston_output *output,
gr->wireframe_dirty = false;
gl_renderer_garbage_collect_programs(gr);
if (gl_renderer_check_reset(gr))
return WESTON_RENDERER_ERROR_LOST;
out:
return WESTON_RENDERER_ERROR_NONE;
}
static void
@ -3481,6 +3524,7 @@ destroy_buffer_state(struct gl_buffer_state *gb)
pixman_region32_fini(&gb->texture_damage);
wl_list_remove(&gb->destroy_listener.link);
wl_list_remove(&gb->link);
free(gb);
}
@ -3609,7 +3653,7 @@ gl_renderer_attach_shm(struct weston_surface *es, struct weston_buffer *buffer)
* than allocating a new one. */
assert(!gs->buffer ||
(old_buffer && old_buffer->type == WESTON_BUFFER_SHM));
if (gs->buffer &&
if (gs->buffer && !gr->recovering &&
buffer->width == old_buffer->width &&
buffer->height == old_buffer->height &&
buffer->pixel_format == old_buffer->pixel_format) {
@ -3624,8 +3668,10 @@ gl_renderer_attach_shm(struct weston_surface *es, struct weston_buffer *buffer)
gb = xzalloc(sizeof(*gb));
gb->gr = gr;
gb->saved_gs = gs;
wl_list_init(&gb->destroy_listener.link);
wl_list_init(&gb->link);
pixman_region32_init(&gb->texture_damage);
gb->pitch = pitch;
@ -3649,6 +3695,10 @@ gl_renderer_attach_shm(struct weston_surface *es, struct weston_buffer *buffer)
texture_format[i].swizzles.array,
false);
}
/* For recovery, we will manually add the new gb to shm_bufs. */
if (!gr->recovering)
wl_list_insert(&gr->shm_bufs, &gb->link);
}
static bool
@ -3674,6 +3724,7 @@ gl_renderer_fill_buffer_info(struct weston_compositor *ec,
gb->gr = gr;
pixman_region32_init(&gb->texture_damage);
wl_list_init(&gb->link);
buffer->legacy_buffer = (struct wl_buffer *)buffer->resource;
ret &= gr->query_buffer(gr->egl_display, buffer->legacy_buffer,
@ -4135,6 +4186,7 @@ import_dmabuf(struct gl_renderer *gr,
gb->gr = gr;
pixman_region32_init(&gb->texture_damage);
wl_list_init(&gb->destroy_listener.link);
wl_list_init(&gb->link);
quirks = &gr->compositor->test_data.test_quirks;
if (quirks->gl_force_import_yuv_fallback &&
@ -4305,6 +4357,8 @@ gl_renderer_import_dmabuf(struct weston_compositor *ec,
if (!gb)
return false;
wl_list_insert(&gr->dma_bufs, &gb->link);
gb->saved_dmabuf = dmabuf;
linux_dmabuf_buffer_set_user_data(dmabuf, gb,
gl_renderer_destroy_dmabuf);
@ -4321,6 +4375,8 @@ gl_renderer_attach_buffer(struct weston_surface *surface,
assert(buffer->renderer_private);
gb = buffer->renderer_private;
gs->buffer = gb;
gb->saved_gs = gs;
gb->saved_buffer = buffer;
}
static const struct weston_drm_format_array *
@ -4414,6 +4470,13 @@ gl_renderer_attach(struct weston_paint_node *pnode)
gs->buffer = NULL;
}
} else {
if (gs->buffer) {
/*
* Non-SHM buffer state is buffer-owned. Clear the saved
* surface link before dropping gs->buffer.
*/
gs->buffer->saved_gs = NULL;
}
gs->buffer = NULL;
}
@ -4472,6 +4535,9 @@ gl_renderer_buffer_init(struct weston_compositor *etc,
assert(gb);
linux_dmabuf_buffer_set_user_data(buffer->dmabuf, NULL, NULL);
buffer->renderer_private = gb;
gb->saved_gs = NULL;
gb->saved_dmabuf = NULL;
gb->saved_buffer = buffer;
gb->destroy_listener.notify = handle_buffer_destroy;
wl_signal_add(&buffer->destroy_signal, &gb->destroy_listener);
}
@ -4599,6 +4665,9 @@ surface_state_destroy(struct gl_surface_state *gs, struct gl_renderer *gr)
if (gs->buffer && gs->buffer_ref.buffer->type == WESTON_BUFFER_SHM)
destroy_buffer_state(gs->buffer);
else if (gs->buffer)
/* Clear the saved surface link for buffer-owned state */
gs->buffer->saved_gs = NULL;
gs->buffer = NULL;
weston_buffer_reference(&gs->buffer_ref, NULL,
@ -5163,23 +5232,27 @@ gl_renderer_allocator_create(struct gl_renderer *gr,
}
static void
gl_renderer_destroy(struct weston_compositor *ec)
gl_renderer_destroy_context(struct weston_compositor *ec)
{
struct gl_renderer *gr = get_renderer(ec);
struct dmabuf_format *format, *next_format;
struct gl_capture_task *gl_task, *tmp;
struct gl_buffer_state *gb;
wl_signal_emit(&gr->destroy_signal, gr);
if (gr->display_bound)
if (gr->display_bound) {
gr->unbind_display(gr->egl_display, ec->wl_display);
gr->display_bound = false;
}
wl_list_for_each_safe(gl_task, tmp, &gr->pending_capture_list, link)
destroy_capture_task(gl_task);
gl_renderer_shader_list_destroy(gr);
if (gr->fallback_shader)
if (gr->fallback_shader) {
gl_shader_destroy(gr, gr->fallback_shader);
gr->fallback_shader = NULL;
}
gr->current_shader = NULL;
if (gr->wireframe_tex)
gl_texture_fini(&gr->wireframe_tex);
@ -5189,16 +5262,79 @@ gl_renderer_destroy(struct weston_compositor *ec)
EGL_NO_SURFACE, EGL_NO_SURFACE,
EGL_NO_CONTEXT);
if (gr->recovering && gr->drm_device) {
weston_dmabuf_feedback_destroy(ec->default_dmabuf_feedback);
ec->default_dmabuf_feedback = NULL;
weston_dmabuf_feedback_format_table_destroy(ec->dmabuf_feedback_format_table);
ec->dmabuf_feedback_format_table = NULL;
}
/*
* Destroy the GL textures of the SHM buffer as they are invalid in the
* new context. gl_renderer_attach_shm() will be called after recovery
* to recreate all the SHM buffer states.
*/
if (gr->recovering) {
wl_list_for_each(gb, &gr->shm_bufs, link) {
glDeleteTextures(gb->num_textures, gb->textures);
gb->num_textures = 0;
}
/* Destroy all the GL/EGL resources for dma-bufs. */
wl_list_for_each(gb, &gr->dma_bufs, link) {
struct gl_color_egl_image *color_egl_image;
int i;
glDeleteTextures(gb->num_textures, gb->textures);
gb->num_textures = 0;
for (i = 0; i < gb->num_images; i++) {
gr->destroy_image(gr->egl_display, gb->images[i]);
gb->images[i] = NULL;
}
gb->num_images = 0;
wl_array_for_each(color_egl_image, &gb->reimported_egl_images)
gr->destroy_image(gr->egl_display, color_egl_image->image);
wl_array_release(&gb->reimported_egl_images);
wl_array_init(&gb->reimported_egl_images);
gb->active_reimported_egl_image = NULL;
}
}
wl_list_for_each_safe(format, next_format, &gr->dmabuf_formats, link)
dmabuf_format_destroy(format);
weston_drm_format_array_fini(&gr->supported_dmabuf_formats);
free(gr->supported_rendering_formats);
gr->supported_rendering_formats = NULL;
gr->supported_rendering_formats_count = 0;
gl_renderer_allocator_destroy(gr->allocator);
gr->base.dmabuf_alloc = NULL;
gr->base.import_dmabuf = NULL;
gr->base.get_supported_dmabuf_formats = NULL;
gr->base.create_renderbuffer_dmabuf = NULL;
eglTerminate(gr->egl_display);
eglReleaseThread();
}
static void
gl_renderer_destroy(struct weston_compositor *ec)
{
struct gl_renderer *gr = get_renderer(ec);
if (gr->recovering) {
gl_renderer_destroy_context(ec);
return;
}
wl_signal_emit(&gr->destroy_signal, gr);
gl_renderer_destroy_context(ec);
gl_renderer_allocator_destroy(gr->allocator);
gr->allocator = NULL;
wl_array_release(&gr->position_stream);
wl_array_release(&gr->barycentric_stream);
@ -5247,57 +5383,200 @@ create_default_dmabuf_feedback(struct weston_compositor *ec,
}
static int
gl_renderer_display_create(struct weston_compositor *ec,
const struct gl_renderer_display_options *options)
gl_renderer_recover_resources(struct weston_compositor *ec)
{
struct gl_renderer *gr;
const struct pixel_format_info *info;
int ret, nformats, i, j;
bool supported;
struct gl_renderer *gr = get_renderer(ec);
struct gl_buffer_state *gb, *tmp;
struct weston_output *output;
struct wl_list tmp_gb_list;
gr = zalloc(sizeof *gr);
if (gr == NULL)
return -1;
wl_list_init(&tmp_gb_list);
wl_list_for_each_safe(gb, tmp, &gr->shm_bufs, link) {
struct gl_surface_state *gs = gb->saved_gs;
struct weston_surface *es = gs->surface;
struct gl_buffer_state *new_gb;
struct weston_buffer *buffer;
assert(es);
/*
* This gb is no longer the surface's current SHM buffer state.
* It has lost its owner link through gs->buffer, so the normal
* surface lifecycle will not destroy it for us anymore. Treat it
* as an orphaned stale entry and clean it up here.
*/
if (gs->buffer != gb) {
destroy_buffer_state(gb);
continue;
}
/* Destroy the original gb and recreate it. */
buffer = es->buffer_ref.buffer;
if (!buffer || buffer->type != WESTON_BUFFER_SHM) {
/*
* The surface no longer points at a live SHM buffer, so
* this gb should not participate in GPU recovery.
* However, gs->buffer still owns it, and the normal
* attach/surface-destroy paths will destroy it later.
* Remove it from shm_bufs so recovery ignores it, but do
* not change its original lifetime.
*/
wl_list_remove(&gb->link);
wl_list_init(&gb->link);
continue;
}
gl_renderer_attach_shm(es, buffer);
new_gb = gs->buffer;
assert(new_gb);
wl_list_insert(&tmp_gb_list, &new_gb->link);
}
assert(wl_list_empty(&gr->shm_bufs));
/* Manually add the new gbs to shm_bufs. */
wl_list_insert_list(&gr->shm_bufs, &tmp_gb_list);
/* Restore dma_bufs. */
wl_list_init(&tmp_gb_list);
wl_list_for_each_safe(gb, tmp, &gr->dma_bufs, link) {
#define BUFFER_RECREATE() do { \
destroy_buffer_state(gb); \
new_gb = import_dmabuf(gr, dmabuf); \
if (!new_gb) \
return -EINVAL; \
} while (0)
#define BUFFER_REATTACH() do { \
linux_dmabuf_buffer_set_user_data(dmabuf, NULL, NULL); \
buffer->renderer_private = new_gb; \
new_gb->destroy_listener.notify = handle_buffer_destroy; \
wl_signal_add(&buffer->destroy_signal, &new_gb->destroy_listener); \
} while (0)
struct weston_buffer *buffer = gb->saved_buffer;
struct linux_dmabuf_buffer *dmabuf = gb->saved_dmabuf;
struct gl_surface_state *gs = gb->saved_gs;
struct gl_buffer_state *new_gb;
bool surface_bound = false;
/*
* dma-buf ownership can live in three places:
* 1, Imported-only: linux_dmabuf_buffer userdata still owns gb.
* 2, Buffer-owned: weston_buffer::renderer_private owns gb.
* 3, Surface-bound: a live gl_surface_state still points at gb.
*/
if (dmabuf) {
/* 1, Imported-only state */
void *user_data =
linux_dmabuf_buffer_get_user_data(dmabuf);
/* Stale imported state, no longer owned by the dmabuf. */
if (user_data != gb) {
destroy_buffer_state(gb);
continue;
}
linux_dmabuf_buffer_set_user_data(dmabuf, NULL, NULL);
BUFFER_RECREATE();
linux_dmabuf_buffer_set_user_data(dmabuf, new_gb,
gl_renderer_destroy_dmabuf);
new_gb->saved_dmabuf = dmabuf;
} else {
/* 2, Buffer-owned state */
/* Stale buffer-owned state. */
if (!buffer || buffer->renderer_private != gb) {
destroy_buffer_state(gb);
continue;
}
dmabuf = buffer->dmabuf;
if (!dmabuf) {
/* The buffer no longer has a live dma-buf backing. */
destroy_buffer_state(gb);
continue;
}
if (gs &&
gs->surface &&
gs->surface->renderer_state == gs &&
gs->buffer == gb &&
gs->buffer_ref.buffer == buffer)
surface_bound = true;
BUFFER_RECREATE();
BUFFER_REATTACH();
new_gb->saved_gs = NULL;
new_gb->saved_dmabuf = NULL;
new_gb->saved_buffer = buffer;
/* 3, Surface-bound state */
if (surface_bound)
gl_renderer_attach_buffer(gs->surface, buffer);
}
wl_list_insert(&tmp_gb_list, &new_gb->link);
#undef BUFFER_RECREATE
#undef BUFFER_REATTACH
}
/* Manually add the new gbs to dma_bufs. */
wl_list_insert_list(&gr->dma_bufs, &tmp_gb_list);
/*
* Since all the buffer states have been recreated, mark them all as
* dirty to avoid garbage caused by original damage information.
*/
wl_list_for_each(output, &ec->output_list, link) {
struct weston_paint_node *pnode;
wl_list_for_each(pnode, &output->paint_node_z_order_list,
z_order_link) {
struct weston_surface *surface = pnode->surface;
struct weston_buffer *buffer = surface->buffer_ref.buffer;
if (!buffer || buffer->type != WESTON_BUFFER_SHM)
continue;
pixman_region32_t region;
pixman_region32_init_rect(&region, 0, 0,
buffer->width,
buffer->height);
pixman_region32_copy(&surface->damage, &region);
ec->renderer->flush_damage(pnode);
pixman_region32_fini(&region);
}
}
return 0;
}
static int
gl_renderer_init_context(struct weston_compositor *ec,
const struct gl_renderer_display_options *options)
{
struct gl_renderer *gr = get_renderer(ec);
int ret;
gr->compositor = ec;
wl_list_init(&gr->shader_list);
gr->platform = options->egl_platform;
gr->extensions_scope = weston_compositor_add_log_scope(ec, "gl-renderer-ext",
"Print GL-renderer extensions\n", NULL, NULL, gr);
gr->paint_node_scope = weston_compositor_add_log_scope(ec, "gl-renderer-paint-nodes",
"Print GL-renderer debug information about paint nodes\n", NULL, NULL, gr);
gr->shader_scope = gl_shader_scope_create(gr);
if (gl_renderer_setup_egl_client_extensions(gr) < 0)
goto fail;
gr->base.repaint_output = gl_renderer_repaint_output;
gr->base.resize_output = gl_renderer_resize_output;
gr->base.create_renderbuffer = gl_renderer_create_renderbuffer;
gr->base.destroy_renderbuffer = gl_renderer_destroy_renderbuffer;
gr->base.flush_damage = gl_renderer_flush_damage;
gr->base.attach = gl_renderer_attach;
gr->base.destroy = gl_renderer_destroy;
gr->base.surface_copy_content = gl_renderer_surface_copy_content;
gr->base.fill_buffer_info = gl_renderer_fill_buffer_info;
gr->base.buffer_init = gl_renderer_buffer_init;
gr->base.output_set_border = gl_renderer_output_set_border;
gr->base.type = WESTON_RENDERER_GL;
gr->egl_device_extensions = 0;
gr->egl_display_extensions = 0;
gr->gl_extensions = 0;
gr->features = 0;
gr->get_graphics_reset_status = NULL;
gr->supported_rendering_formats = NULL;
gr->supported_rendering_formats_count = 0;
gr->base.dmabuf_alloc = NULL;
gr->base.import_dmabuf = NULL;
gr->base.get_supported_dmabuf_formats = NULL;
gr->base.create_renderbuffer_dmabuf = NULL;
if (gl_renderer_setup_egl_display(gr, options->egl_native_display) < 0)
goto fail;
gr->allocator = gl_renderer_allocator_create(gr, options);
if (!gr->allocator)
weston_log("failed to initialize allocator\n");
weston_drm_format_array_init(&gr->supported_dmabuf_formats);
log_egl_info(gr, gr->egl_display);
ec->renderer = &gr->base;
if (gl_renderer_setup_egl_extensions(ec) < 0)
goto fail_with_error;
@ -5362,7 +5641,92 @@ gl_renderer_display_create(struct weston_compositor *ec,
}
wl_list_init(&gr->dmabuf_formats);
if (gr->recovering && gl_renderer_recover_resources(ec))
goto fail_with_error;
return 0;
fail_with_error:
gl_renderer_print_egl_error_state();
if (gr->drm_device) {
weston_dmabuf_feedback_destroy(ec->default_dmabuf_feedback);
ec->default_dmabuf_feedback = NULL;
}
fail_feedback:
if (gr->drm_device) {
weston_dmabuf_feedback_format_table_destroy(ec->dmabuf_feedback_format_table);
ec->dmabuf_feedback_format_table = NULL;
}
fail_terminate:
free(gr->supported_rendering_formats);
gr->supported_rendering_formats = NULL;
gr->supported_rendering_formats_count = 0;
weston_drm_format_array_fini(&gr->supported_dmabuf_formats);
gr->base.dmabuf_alloc = NULL;
gr->base.import_dmabuf = NULL;
gr->base.get_supported_dmabuf_formats = NULL;
gr->base.create_renderbuffer_dmabuf = NULL;
eglTerminate(gr->egl_display);
fail:
return -1;
}
static int
gl_renderer_display_create(struct weston_compositor *ec,
const struct gl_renderer_display_options *options)
{
struct gl_renderer *gr = get_renderer(ec);
const struct pixel_format_info *info;
int nformats, i, j;
bool supported;
if (gr && gr->recovering)
return gl_renderer_init_context(ec, options);
gr = zalloc(sizeof *gr);
if (gr == NULL)
return -1;
gr->compositor = ec;
wl_list_init(&gr->shader_list);
gr->platform = options->egl_platform;
gr->extensions_scope = weston_compositor_add_log_scope(ec, "gl-renderer-ext",
"Print GL-renderer extensions\n", NULL, NULL, gr);
gr->paint_node_scope = weston_compositor_add_log_scope(ec, "gl-renderer-paint-nodes",
"Print GL-renderer debug information about paint nodes\n", NULL, NULL, gr);
gr->shader_scope = gl_shader_scope_create(gr);
if (gl_renderer_setup_egl_client_extensions(gr) < 0)
goto fail;
gr->base.repaint_output = gl_renderer_repaint_output;
gr->base.resize_output = gl_renderer_resize_output;
gr->base.create_renderbuffer = gl_renderer_create_renderbuffer;
gr->base.destroy_renderbuffer = gl_renderer_destroy_renderbuffer;
gr->base.flush_damage = gl_renderer_flush_damage;
gr->base.attach = gl_renderer_attach;
gr->base.destroy = gl_renderer_destroy;
gr->base.surface_copy_content = gl_renderer_surface_copy_content;
gr->base.fill_buffer_info = gl_renderer_fill_buffer_info;
gr->base.buffer_init = gl_renderer_buffer_init;
gr->base.output_set_border = gl_renderer_output_set_border;
gr->base.type = WESTON_RENDERER_GL;
ec->renderer = &gr->base;
wl_signal_init(&gr->destroy_signal);
wl_list_init(&gr->dma_bufs);
wl_list_init(&gr->shm_bufs);
if (gl_renderer_init_context(ec, options) < 0)
goto fail_context;
gr->allocator = gl_renderer_allocator_create(gr, options);
if (!gr->allocator)
weston_log("failed to initialize allocator\n");
else
gr->base.dmabuf_alloc = gl_renderer_dmabuf_alloc;
/* Register supported wl_shm RGB formats. */
nformats = pixel_format_get_info_count();
@ -5416,21 +5780,7 @@ gl_renderer_display_create(struct weston_compositor *ec,
return 0;
fail_with_error:
gl_renderer_print_egl_error_state();
if (gr->drm_device) {
weston_dmabuf_feedback_destroy(ec->default_dmabuf_feedback);
ec->default_dmabuf_feedback = NULL;
}
fail_feedback:
if (gr->drm_device) {
weston_dmabuf_feedback_format_table_destroy(ec->dmabuf_feedback_format_table);
ec->dmabuf_feedback_format_table = NULL;
}
fail_terminate:
free(gr->supported_rendering_formats);
weston_drm_format_array_fini(&gr->supported_dmabuf_formats);
eglTerminate(gr->egl_display);
fail_context:
fail:
weston_log_scope_destroy(gr->shader_scope);
weston_log_scope_destroy(gr->extensions_scope);
@ -5510,6 +5860,12 @@ gl_renderer_setup(struct weston_compositor *ec)
context_attribs[nattr++] = EGL_CONTEXT_PRIORITY_HIGH_IMG;
}
if (egl_display_has(gr, EXTENSION_EXT_CREATE_CONTEXT_ROBUSTNESS)) {
context_attribs[nattr++] =
EGL_CONTEXT_OPENGL_RESET_NOTIFICATION_STRATEGY_KHR;
context_attribs[nattr++] = EGL_LOSE_CONTEXT_ON_RESET_KHR;
}
assert(nattr < ARRAY_LENGTH(context_attribs));
context_attribs[nattr] = EGL_NONE;
@ -5571,6 +5927,18 @@ gl_renderer_setup(struct weston_compositor *ec)
return -1;
}
if (gl_extensions_has(gr, EXTENSION_EXT_ROBUSTNESS)) {
GET_PROC_ADDRESS(gr->get_graphics_reset_status,
"glGetGraphicsResetStatusEXT");
if (egl_display_has(gr, EXTENSION_EXT_CREATE_CONTEXT_ROBUSTNESS)) {
GLint strategy = 0;
glGetIntegerv(GL_RESET_NOTIFICATION_STRATEGY_EXT, &strategy);
if (strategy == GL_LOSE_CONTEXT_ON_RESET_EXT)
gr->features |= FEATURE_GRAPHICS_RESET_RECOVERY;
}
}
if (gl_extensions_has(gr, EXTENSION_OES_EGL_IMAGE)) {
GET_PROC_ADDRESS(gr->image_target_texture_2d,
"glEGLImageTargetTexture2DOES");
@ -5751,15 +6119,26 @@ gl_renderer_setup(struct weston_compositor *ec)
gl_extensions_has(gr, EXTENSION_OES_REQUIRED_INTERNALFORMAT)));
weston_log_continue(STAMP_SPACE "In-shader blending: %s\n",
yesno(gl_features_has(gr, FEATURE_SHADER_BLENDING)));
weston_log_continue(STAMP_SPACE "Graphics reset recovery: %s\n",
yesno(gl_features_has(gr, FEATURE_GRAPHICS_RESET_RECOVERY)));
return 0;
}
static void
gl_renderer_set_recovering(struct weston_compositor *ec, bool recovering)
{
struct gl_renderer *gr = get_renderer(ec);
gr->recovering = recovering;
}
WL_EXPORT struct gl_renderer_interface gl_renderer_interface = {
.display_create = gl_renderer_display_create,
.output_window_create = gl_renderer_output_window_create,
.get_supported_rendering_formats = gl_renderer_get_supported_rendering_formats,
.output_fbo_create = gl_renderer_output_fbo_create,
.output_destroy = gl_renderer_output_destroy,
.set_recovering = gl_renderer_set_recovering,
.create_fence_fd = gl_renderer_create_fence_fd,
};

View file

@ -184,6 +184,8 @@ struct gl_renderer_interface {
void (*output_destroy)(struct weston_output *output);
void (*set_recovering)(struct weston_compositor *ec, bool recovering);
/* Create fence sync FD to wait for GPU rendering.
*
* Return FD on success, -1 on failure or unsupported

View file

@ -2395,7 +2395,7 @@ vulkan_renderer_recreate_swapchain(struct weston_output *output,
vulkan_renderer_create_swapchain(output, fb_size);
}
static void
static enum weston_renderer_error
vulkan_renderer_repaint_output(struct weston_output *output,
pixman_region32_t *output_damage,
weston_renderbuffer_t renderbuffer)
@ -2665,6 +2665,8 @@ vulkan_renderer_repaint_output(struct weston_output *output,
pixman_region32_clear(&rb->damage);
vo->frame_index = (vo->frame_index + 1) % vo->num_frames;
return WESTON_RENDERER_ERROR_NONE;
}
static void