2012-09-05 21:54:15 -04:00
|
|
|
/*
|
|
|
|
|
* Copyright © 2012 Intel Corporation
|
2021-02-10 12:33:03 +02:00
|
|
|
* Copyright © 2015,2019,2021 Collabora, Ltd.
|
2016-05-18 17:50:54 +02:00
|
|
|
* Copyright © 2016 NVIDIA Corporation
|
2012-09-05 21:54:15 -04:00
|
|
|
*
|
2015-06-11 15:35:43 -07:00
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining
|
|
|
|
|
* a copy of this software and associated documentation files (the
|
|
|
|
|
* "Software"), to deal in the Software without restriction, including
|
|
|
|
|
* without limitation the rights to use, copy, modify, merge, publish,
|
|
|
|
|
* distribute, sublicense, and/or sell copies of the Software, and to
|
|
|
|
|
* permit persons to whom the Software is furnished to do so, subject to
|
|
|
|
|
* the following conditions:
|
2012-09-05 21:54:15 -04:00
|
|
|
*
|
2015-06-11 15:35:43 -07:00
|
|
|
* The above copyright notice and this permission notice (including the
|
|
|
|
|
* next paragraph) shall be included in all copies or substantial
|
|
|
|
|
* portions of the Software.
|
|
|
|
|
*
|
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
|
* SOFTWARE.
|
2012-09-05 21:54:15 -04:00
|
|
|
*/
|
|
|
|
|
|
2013-05-22 18:03:19 +03:00
|
|
|
#include "config.h"
|
2012-09-05 22:06:26 -04:00
|
|
|
|
2012-11-13 19:10:28 +01:00
|
|
|
#include <GLES2/gl2.h>
|
|
|
|
|
#include <GLES2/gl2ext.h>
|
2019-04-18 21:45:48 +05:30
|
|
|
#include <GLES3/gl3.h>
|
2012-11-13 19:10:28 +01:00
|
|
|
|
2023-09-22 09:56:07 +02:00
|
|
|
#include <fcntl.h>
|
2014-10-16 16:37:02 -05:00
|
|
|
#include <stdbool.h>
|
2016-07-19 14:16:27 +03:00
|
|
|
#include <stdint.h>
|
2012-09-05 21:59:35 -04:00
|
|
|
#include <stdlib.h>
|
2012-09-05 22:06:26 -04:00
|
|
|
#include <string.h>
|
2021-01-18 19:36:48 -03:00
|
|
|
#include <sys/stat.h>
|
2012-09-05 22:06:26 -04:00
|
|
|
#include <ctype.h>
|
2012-09-11 17:02:05 +03:00
|
|
|
#include <float.h>
|
|
|
|
|
#include <assert.h>
|
2012-11-08 17:20:46 +02:00
|
|
|
#include <linux/input.h>
|
2017-09-27 15:09:16 +03:00
|
|
|
#include <unistd.h>
|
|
|
|
|
|
2024-11-19 16:16:14 +00:00
|
|
|
#ifdef HAVE_GBM
|
2023-09-22 09:56:07 +02:00
|
|
|
#include <gbm.h>
|
2024-11-19 16:16:14 +00:00
|
|
|
#endif
|
2023-09-22 09:56:07 +02:00
|
|
|
|
2018-10-19 12:14:11 +03:00
|
|
|
#include "linux-sync-file.h"
|
2017-09-27 15:09:16 +03:00
|
|
|
#include "timeline.h"
|
2012-09-05 21:59:35 -04:00
|
|
|
|
libweston: introduce CMS component architecture
See: https://gitlab.freedesktop.org/wayland/weston/-/issues/467#note_814985
This starts building the framework required for implementing color
management.
The main new interface is struct weston_color_manager. This commit also
adds a no-op color manager implementation, which is used if no other
color manager is loaded. This no-op color manager simply provides
identity color transforms for everything, so that Weston keeps running
exactly like before.
weston_color_manager interface is incomplete and will be extended later.
Colorspace objects are not introduced in this commit. However, when
client content colorspace and output colorspace definitions are
combined, they will produce color transformations from client content to
output blending space and from output blending space to output space.
This commit introduces a placeholder struct for color transforms,
weston_color_transform. Objects of this type are expected to be heavy to
create and store, which is why they are designed to be shared as much as
possible, ideally making their instances unique. As color transform
description is intended to be generic in libweston core, renderers and
backends are expected to derive their own state for each transform
object as necessary. Creating and storing the derived state maybe be
expensive as well, more the reason to re-use these objects as much as
possible. E.g. GL-renderer might upload a 3D LUT into a texture and keep
the texture around. DRM-backend might create a KMS blob for a LUT and
keep that around.
As a color transform depends on both the surface and the output, a
transform object may need to be created for each unique pair of them.
Therefore color transforms are referenced from weston_paint_node. As
paint nodes exist for not just surface+output but surface+view+output
triplets, the code ensures that all paint nodes (having different view)
for the same surface+output have the same color transform state.
As a special case, if weston_color_transform is NULL, it means identity
transform. This short-circuits some checks and memory allocations, but
it does mean we use a separate member on weston_paint_node to know if
the color transform has been initialized or not.
Color transformations are pre-created at the weston_output
paint_node_z_order_list creation step. Currently the z order lists
contain all views globally, which means we populate color transforms we
may never need, e.g. a view is never shown on a particular output.
This problem should get fixed naturally when z order lists are
constructed "pruned" in the future: to contain only those paint nodes
that actually contribute to the output's image.
As nothing actually supports color transforms yet, both renderers and
the DRM-backend assert that they only get identity transforms. This
check has the side-effect that all surface-output pairs actually get a
weston_surface_color_transform_ref even though it points to NULL
weston_color_transform.
This design is inspired by Sebastian Wick's Weston color management
work.
Co-authored-by: Sebastian Wick <sebastian@sebastianwick.net>
Signed-off-by: Pekka Paalanen <pekka.paalanen@collabora.com>
2021-02-25 12:03:28 +02:00
|
|
|
#include "color.h"
|
2012-11-13 19:10:28 +01:00
|
|
|
#include "gl-renderer.h"
|
2019-09-23 14:54:49 +03:00
|
|
|
#include "gl-renderer-internal.h"
|
2013-09-13 10:01:21 +08:00
|
|
|
#include "vertex-clipping.h"
|
2014-06-12 16:49:29 +03:00
|
|
|
#include "linux-dmabuf.h"
|
2015-11-17 16:00:28 +08:00
|
|
|
#include "linux-dmabuf-unstable-v1-server-protocol.h"
|
libweston: Support zwp_surface_synchronization_v1.set_acquire_fence
Implement the set_acquire_fence request of the
zwp_surface_synchronization_v1 interface.
The implementation uses the acquire fence in two ways:
1. If the associated buffer is used as GL render source, an
EGLSyncKHR is created from the fence and used to synchronize
access.
2. If the associated buffer is used as a plane framebuffer,
the acquire fence is treated as an in-fence for the atomic
commit operation. If in-fences are not supported and the buffer
has an acquire fence, we don't consider it for plane placement.
If the used compositor/renderer doesn't support explicit
synchronization, we don't advertise the protocol at all. Currently only
the DRM and X11 backends when using the GL renderer advertise the
protocol for production use.
Issues for discussion
---------------------
a. Currently, a server-side wait of EGLSyncKHR is performed before
using the EGLImage/texture during rendering. Unfortunately, it's not clear
from the specs whether this is generally safe to do, or we need to
sync before glEGLImageTargetTexture2DOES. The exception is
TEXTURE_EXTERNAL_OES where the spec mentions it's enough to sync
and then glBindTexture for any changes to take effect.
Changes in v5:
- Meson support.
- Make explicit sync server error reporting more generic, supporting
all explicit sync related interfaces not just
wp_linux_surface_synchronization.
- Fix typo in warning for missing EGL_KHR_wait_sync extension.
- Support minor version 2 of the explicit sync protocol (i.e., support
fences for opaque EGL buffers).
Changes in v4:
- Introduce and use fd_clear and and fd_move helpers.
- Don't check for a valid buffer when updating surface acquire fence fd
from state.
- Assert that pending state acquire fence fd is always clear
after a commit.
- Clarify that WESTON_CAP_EXPLICIT_SYNC applies to just the
renderer.
- Check for EGL_KHR_wait_sync before using eglWaitSyncKHR.
- Dup the acquire fence before passing to EGL.
Changes in v3:
- Keep acquire_fence_fd in surface instead of buffer.
- Clarify that WESTON_CAP_EXPLICIT_SYNC applies to both backend and
renderer.
- Move comment about non-ownership of in_fence_fd to struct
drm_plane_state definition.
- Assert that we don't try to use planes with in-fences when using the
legacy KMS API.
- Remove unnecessary info from wayland error messages.
- Handle acquire fence for subsurface commits.
- Guard against self-update in fd_update.
- Disconnect the client if acquire fence EGLSyncKHR creation or wait
fails.
- Use updated protocol interface names.
- User correct format specifier for resource ids.
- Advertise protocol for X11 backend with GL renderer.
Changes in v2:
- Remove sync file wait fallbacks.
- Raise UNSUPPORTED_BUFFER error at commit if we have an acquire
fence, but the committed buffer is not a valid linux_dmabuf.
- Don't put buffers with in-fences on planes that don't support
in-fences.
- Don't advertise explicit sync protocol if backend does not
support explicit sync.
Signed-off-by: Alexandros Frantzis <alexandros.frantzis@collabora.com>
2018-10-19 12:14:11 +03:00
|
|
|
#include "linux-explicit-synchronization.h"
|
2022-08-02 16:20:06 +03:00
|
|
|
#include "output-capture.h"
|
2018-09-03 19:44:59 +02:00
|
|
|
#include "pixel-formats.h"
|
2012-11-13 19:10:28 +01:00
|
|
|
|
2018-10-19 12:14:11 +03:00
|
|
|
#include "shared/fd-util.h"
|
2024-12-16 13:35:02 +02:00
|
|
|
#include "shared/helpers.h"
|
2016-07-04 15:34:18 +01:00
|
|
|
#include "shared/platform.h"
|
2022-05-17 15:46:03 +03:00
|
|
|
#include "shared/string-helpers.h"
|
2017-09-27 15:09:16 +03:00
|
|
|
#include "shared/timespec-util.h"
|
2021-02-04 17:39:45 +02:00
|
|
|
#include "shared/weston-drm-fourcc.h"
|
2019-04-05 16:09:45 +03:00
|
|
|
#include "shared/weston-egl-ext.h"
|
2022-08-02 16:20:06 +03:00
|
|
|
#include "shared/xalloc.h"
|
2012-09-05 21:54:15 -04:00
|
|
|
|
2013-03-05 17:30:30 +02:00
|
|
|
#define BUFFER_DAMAGE_COUNT 2
|
|
|
|
|
|
2024-05-06 19:17:24 +02:00
|
|
|
enum gl_debug_mode {
|
|
|
|
|
DEBUG_MODE_NONE = 0,
|
2024-05-07 15:14:28 +02:00
|
|
|
DEBUG_MODE_WIREFRAME,
|
2024-05-07 16:22:40 +02:00
|
|
|
DEBUG_MODE_BATCHES,
|
2024-05-07 16:45:48 +02:00
|
|
|
DEBUG_MODE_DAMAGE,
|
2024-05-18 17:02:16 +02:00
|
|
|
DEBUG_MODE_OPAQUE,
|
2024-05-06 19:17:24 +02:00
|
|
|
DEBUG_MODE_LAST,
|
|
|
|
|
};
|
|
|
|
|
|
2014-02-04 21:36:38 -06:00
|
|
|
enum gl_border_status {
|
|
|
|
|
BORDER_STATUS_CLEAN = 0,
|
|
|
|
|
BORDER_TOP_DIRTY = 1 << GL_RENDERER_BORDER_TOP,
|
|
|
|
|
BORDER_LEFT_DIRTY = 1 << GL_RENDERER_BORDER_LEFT,
|
|
|
|
|
BORDER_RIGHT_DIRTY = 1 << GL_RENDERER_BORDER_RIGHT,
|
|
|
|
|
BORDER_BOTTOM_DIRTY = 1 << GL_RENDERER_BORDER_BOTTOM,
|
|
|
|
|
BORDER_ALL_DIRTY = 0xf,
|
|
|
|
|
BORDER_SIZE_CHANGED = 0x10
|
|
|
|
|
};
|
|
|
|
|
|
2024-08-01 10:02:11 +02:00
|
|
|
enum gl_renderbuffer_type {
|
2024-08-09 15:41:30 +02:00
|
|
|
RENDERBUFFER_WINDOW = 0,
|
2024-08-07 16:00:17 +02:00
|
|
|
RENDERBUFFER_BUFFER,
|
2024-08-01 10:02:11 +02:00
|
|
|
RENDERBUFFER_DMABUF,
|
|
|
|
|
};
|
|
|
|
|
|
2013-10-27 22:24:54 -05:00
|
|
|
struct gl_border_image {
|
|
|
|
|
GLuint tex;
|
|
|
|
|
int32_t width, height;
|
|
|
|
|
int32_t tex_width;
|
|
|
|
|
void *data;
|
|
|
|
|
};
|
|
|
|
|
|
2024-08-09 15:41:30 +02:00
|
|
|
/* Track buffers allocated by the window system for window-based outputs. */
|
|
|
|
|
struct gl_renderbuffer_window {
|
2024-08-05 10:28:23 +02:00
|
|
|
int age;
|
|
|
|
|
};
|
|
|
|
|
|
2024-08-07 16:00:17 +02:00
|
|
|
struct gl_renderbuffer_buffer {
|
2024-08-05 10:28:23 +02:00
|
|
|
GLuint rb;
|
2024-08-07 16:00:17 +02:00
|
|
|
void *data;
|
|
|
|
|
int stride;
|
2024-08-05 10:28:23 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct gl_renderbuffer_dmabuf {
|
|
|
|
|
GLuint rb;
|
|
|
|
|
struct gl_renderer *gr;
|
|
|
|
|
struct linux_dmabuf_memory *memory;
|
|
|
|
|
EGLImageKHR image;
|
|
|
|
|
};
|
|
|
|
|
|
2023-06-07 11:15:09 +02:00
|
|
|
struct gl_renderbuffer {
|
2024-08-01 10:02:11 +02:00
|
|
|
enum gl_renderbuffer_type type;
|
2024-07-31 18:29:35 +02:00
|
|
|
pixman_region32_t damage;
|
2023-06-07 11:15:09 +02:00
|
|
|
enum gl_border_status border_damage;
|
2024-08-01 10:02:11 +02:00
|
|
|
bool stale;
|
2024-08-05 10:28:23 +02:00
|
|
|
|
|
|
|
|
GLuint fb;
|
|
|
|
|
union {
|
2024-08-09 15:41:30 +02:00
|
|
|
struct gl_renderbuffer_window window;
|
2024-08-07 16:00:17 +02:00
|
|
|
struct gl_renderbuffer_buffer buffer;
|
2024-08-05 10:28:23 +02:00
|
|
|
struct gl_renderbuffer_dmabuf dmabuf;
|
|
|
|
|
};
|
|
|
|
|
|
2024-07-29 15:27:52 +02:00
|
|
|
weston_renderbuffer_discarded_func discarded_cb;
|
|
|
|
|
void *user_data;
|
2023-06-07 11:15:09 +02:00
|
|
|
struct wl_list link;
|
|
|
|
|
};
|
|
|
|
|
|
2012-11-13 19:10:29 +01:00
|
|
|
struct gl_output_state {
|
2022-07-26 17:27:07 +03:00
|
|
|
struct weston_size fb_size; /**< in pixels, including borders */
|
|
|
|
|
struct weston_geometry area; /**< composited area in pixels inside fb */
|
|
|
|
|
|
2023-09-22 09:56:07 +02:00
|
|
|
float y_flip;
|
|
|
|
|
|
2012-11-13 19:10:19 +01:00
|
|
|
EGLSurface egl_surface;
|
2013-10-27 22:24:54 -05:00
|
|
|
struct gl_border_image borders[4];
|
2014-02-04 21:36:38 -06:00
|
|
|
enum gl_border_status border_status;
|
2014-10-16 10:55:21 -05:00
|
|
|
|
|
|
|
|
struct weston_matrix output_matrix;
|
2017-09-27 15:09:16 +03:00
|
|
|
|
2023-01-12 08:04:40 +01:00
|
|
|
EGLSyncKHR render_sync;
|
|
|
|
|
GLuint render_query;
|
2018-05-22 12:05:14 +09:00
|
|
|
|
2017-09-27 15:09:16 +03:00
|
|
|
/* struct timeline_render_point::link */
|
|
|
|
|
struct wl_list timeline_render_point_list;
|
2019-04-18 21:45:48 +05:30
|
|
|
|
2022-07-26 17:27:07 +03:00
|
|
|
const struct pixel_format_info *shadow_format;
|
2024-08-06 15:11:50 +02:00
|
|
|
GLuint shadow_tex;
|
|
|
|
|
GLuint shadow_fb;
|
2023-06-07 11:15:09 +02:00
|
|
|
|
|
|
|
|
/* struct gl_renderbuffer::link */
|
|
|
|
|
struct wl_list renderbuffer_list;
|
2012-11-13 19:10:19 +01:00
|
|
|
};
|
|
|
|
|
|
2015-06-08 16:37:05 +03:00
|
|
|
struct gl_renderer;
|
|
|
|
|
|
2023-10-05 09:23:47 +02:00
|
|
|
struct gl_capture_task {
|
|
|
|
|
struct weston_capture_task *task;
|
|
|
|
|
struct wl_event_source *source;
|
|
|
|
|
struct gl_renderer *gr;
|
|
|
|
|
struct wl_list link;
|
|
|
|
|
GLuint pbo;
|
|
|
|
|
int stride;
|
|
|
|
|
int height;
|
|
|
|
|
bool reverse;
|
2023-10-30 11:20:17 +01:00
|
|
|
EGLSyncKHR sync;
|
|
|
|
|
int fd;
|
2023-10-05 09:23:47 +02:00
|
|
|
};
|
|
|
|
|
|
2024-11-19 16:16:14 +00:00
|
|
|
#ifndef HAVE_GBM
|
|
|
|
|
struct gbm_device;
|
|
|
|
|
struct gbm_bo;
|
|
|
|
|
#endif
|
|
|
|
|
|
2023-09-22 09:56:07 +02:00
|
|
|
struct dmabuf_allocator {
|
|
|
|
|
struct gbm_device *gbm_device;
|
|
|
|
|
bool has_own_device;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct gl_renderer_dmabuf_memory {
|
|
|
|
|
struct linux_dmabuf_memory base;
|
|
|
|
|
struct dmabuf_allocator *allocator;
|
|
|
|
|
struct gbm_bo *bo;
|
|
|
|
|
};
|
|
|
|
|
|
2019-11-27 14:11:05 +01:00
|
|
|
struct dmabuf_format {
|
|
|
|
|
uint32_t format;
|
|
|
|
|
struct wl_list link;
|
|
|
|
|
|
|
|
|
|
uint64_t *modifiers;
|
|
|
|
|
unsigned *external_only;
|
|
|
|
|
int num_modifiers;
|
|
|
|
|
};
|
|
|
|
|
|
2022-05-18 13:32:35 +01:00
|
|
|
/*
|
|
|
|
|
* yuv_format_descriptor and yuv_plane_descriptor describe the translation
|
|
|
|
|
* between YUV and RGB formats. When native YUV sampling is not available, we
|
|
|
|
|
* bind each YUV plane as one or more RGB plane and convert in the shader.
|
|
|
|
|
* This structure describes the mapping: output_planes is the number of
|
|
|
|
|
* RGB images we need to bind, each of which has a yuv_plane_descriptor
|
|
|
|
|
* describing the GL format and the input (YUV) plane index to bind.
|
|
|
|
|
*
|
|
|
|
|
* The specified shader_variant is then used to sample.
|
|
|
|
|
*/
|
2016-01-11 19:04:35 +00:00
|
|
|
struct yuv_plane_descriptor {
|
|
|
|
|
uint32_t format;
|
|
|
|
|
int plane_index;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct yuv_format_descriptor {
|
|
|
|
|
uint32_t format;
|
|
|
|
|
int output_planes;
|
2022-04-28 01:33:15 +01:00
|
|
|
enum gl_shader_texture_variant shader_variant;
|
2022-05-19 23:04:02 +01:00
|
|
|
struct yuv_plane_descriptor plane[3];
|
2015-06-08 16:37:05 +03:00
|
|
|
};
|
|
|
|
|
|
2022-01-13 23:57:48 +00:00
|
|
|
struct gl_buffer_state {
|
2022-01-20 18:23:26 +00:00
|
|
|
struct gl_renderer *gr;
|
|
|
|
|
|
2012-11-13 19:10:23 +01:00
|
|
|
GLfloat color[4];
|
2012-11-13 19:10:26 +01:00
|
|
|
|
2015-11-18 16:32:27 -06:00
|
|
|
bool needs_full_upload;
|
2012-12-04 15:58:16 +02:00
|
|
|
pixman_region32_t texture_damage;
|
2012-11-13 19:10:26 +01:00
|
|
|
|
2022-01-19 03:00:10 +00:00
|
|
|
/* Only needed between attach() and flush_damage() */
|
2022-04-28 02:25:20 +01:00
|
|
|
int pitch; /* plane 0 pitch in pixels */
|
2014-04-07 15:01:01 +01:00
|
|
|
GLenum gl_pixel_type;
|
2022-04-28 02:25:20 +01:00
|
|
|
GLenum gl_format[3];
|
renderer-gl: Support more shm RGB formats
Some applications, e.g. Chromium browser, may provide ABGR format buf.
Tested with gstreamer 1.22.8:
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=RGB' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=BGR' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=ARGB' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=xRGB' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=ABGR' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=xBGR' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=RGBA' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=RGBx' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=BGRA' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=BGRx' ! waylandsink
Signed-off-by: Jeffy Chen <jeffy.chen@rock-chips.com>
2022-07-07 11:09:23 +08:00
|
|
|
enum gl_channel_order gl_channel_order;
|
2022-04-28 02:25:20 +01:00
|
|
|
int offset[3]; /* per-plane pitch in bytes */
|
2014-04-07 15:01:01 +01:00
|
|
|
|
2022-01-20 18:23:26 +00:00
|
|
|
EGLImageKHR images[3];
|
2012-11-13 19:10:26 +01:00
|
|
|
int num_images;
|
2021-03-25 14:06:02 +02:00
|
|
|
enum gl_shader_texture_variant shader_variant;
|
2012-12-04 15:58:13 +02:00
|
|
|
|
2022-01-20 18:49:05 +00:00
|
|
|
GLuint textures[3];
|
|
|
|
|
int num_textures;
|
|
|
|
|
|
2022-01-20 16:05:03 +00:00
|
|
|
struct wl_listener destroy_listener;
|
2022-01-13 23:57:48 +00:00
|
|
|
};
|
2016-10-05 14:54:34 +02:00
|
|
|
|
2022-01-13 23:57:48 +00:00
|
|
|
struct gl_surface_state {
|
2013-10-25 16:26:33 +03:00
|
|
|
struct weston_surface *surface;
|
|
|
|
|
|
2022-01-20 18:14:07 +00:00
|
|
|
struct gl_buffer_state *buffer;
|
2022-01-13 23:57:48 +00:00
|
|
|
|
|
|
|
|
/* These buffer references should really be attached to paint nodes
|
|
|
|
|
* rather than either buffer or surface state */
|
|
|
|
|
struct weston_buffer_reference buffer_ref;
|
|
|
|
|
struct weston_buffer_release_reference buffer_release_ref;
|
|
|
|
|
|
2018-10-19 12:14:11 +03:00
|
|
|
/* Whether this surface was used in the current output repaint.
|
|
|
|
|
Used only in the context of a gl_renderer_repaint_output call. */
|
|
|
|
|
bool used_in_output_repaint;
|
|
|
|
|
|
2013-10-25 16:26:33 +03:00
|
|
|
struct wl_listener surface_destroy_listener;
|
2013-10-25 16:26:34 +03:00
|
|
|
struct wl_listener renderer_destroy_listener;
|
2012-11-13 19:10:23 +01:00
|
|
|
};
|
|
|
|
|
|
2017-09-27 15:09:16 +03:00
|
|
|
struct timeline_render_point {
|
|
|
|
|
struct wl_list link; /* gl_output_state::timeline_render_point_list */
|
|
|
|
|
|
|
|
|
|
int fd;
|
2023-01-12 08:04:40 +01:00
|
|
|
GLuint query;
|
2017-09-27 15:09:16 +03:00
|
|
|
struct weston_output *output;
|
|
|
|
|
struct wl_event_source *event_source;
|
|
|
|
|
};
|
|
|
|
|
|
2024-09-28 17:12:58 +02:00
|
|
|
/* Keep in sync with gl-renderer-internal.h. */
|
|
|
|
|
static const struct gl_extension_table extension_table[] = {
|
|
|
|
|
EXT("GL_ANGLE_pack_reverse_row_order", EXTENSION_ANGLE_PACK_REVERSE_ROW_ORDER),
|
|
|
|
|
EXT("GL_EXT_color_buffer_half_float", EXTENSION_EXT_COLOR_BUFFER_HALF_FLOAT),
|
|
|
|
|
EXT("GL_EXT_disjoint_timer_query", EXTENSION_EXT_DISJOINT_TIMER_QUERY),
|
|
|
|
|
EXT("GL_EXT_map_buffer_range", EXTENSION_EXT_MAP_BUFFER_RANGE),
|
|
|
|
|
EXT("GL_EXT_read_format_bgra", EXTENSION_EXT_READ_FORMAT_BGRA),
|
|
|
|
|
EXT("GL_EXT_texture_format_BGRA8888", EXTENSION_EXT_TEXTURE_FORMAT_BGRA8888),
|
|
|
|
|
EXT("GL_EXT_texture_norm16", EXTENSION_EXT_TEXTURE_NORM16),
|
|
|
|
|
EXT("GL_EXT_texture_rg", EXTENSION_EXT_TEXTURE_RG),
|
|
|
|
|
EXT("GL_EXT_texture_storage", EXTENSION_EXT_TEXTURE_STORAGE),
|
|
|
|
|
EXT("GL_EXT_texture_type_2_10_10_10_REV", EXTENSION_EXT_TEXTURE_TYPE_2_10_10_10_REV),
|
|
|
|
|
EXT("GL_EXT_unpack_subimage", EXTENSION_EXT_UNPACK_SUBIMAGE),
|
|
|
|
|
EXT("GL_NV_pixel_buffer_object", EXTENSION_NV_PIXEL_BUFFER_OBJECT),
|
2024-09-30 17:10:27 +02:00
|
|
|
EXT("GL_OES_EGL_image", EXTENSION_OES_EGL_IMAGE),
|
2024-09-28 17:12:58 +02:00
|
|
|
EXT("GL_OES_EGL_image_external", EXTENSION_OES_EGL_IMAGE_EXTERNAL),
|
|
|
|
|
EXT("GL_OES_mapbuffer", EXTENSION_OES_MAPBUFFER),
|
|
|
|
|
EXT("GL_OES_rgb8_rgba8", EXTENSION_OES_RGB8_RGBA8),
|
|
|
|
|
EXT("GL_OES_texture_float_linear", EXTENSION_OES_TEXTURE_FLOAT_LINEAR),
|
|
|
|
|
{ NULL, 0, 0 }
|
|
|
|
|
};
|
|
|
|
|
|
2016-01-11 19:04:34 +00:00
|
|
|
static inline const char *
|
|
|
|
|
dump_format(uint32_t format, char out[4])
|
|
|
|
|
{
|
|
|
|
|
#if BYTE_ORDER == BIG_ENDIAN
|
2024-04-30 19:48:56 +02:00
|
|
|
format = bswap32(format);
|
2016-01-11 19:04:34 +00:00
|
|
|
#endif
|
|
|
|
|
memcpy(out, &format, 4);
|
|
|
|
|
return out;
|
|
|
|
|
}
|
|
|
|
|
|
2024-05-07 11:58:34 +02:00
|
|
|
static inline void
|
|
|
|
|
copy_uniform4f(float dst[4], const float src[4])
|
|
|
|
|
{
|
|
|
|
|
memcpy(dst, src, 4 * sizeof(float));
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-13 19:10:29 +01:00
|
|
|
static inline struct gl_output_state *
|
2012-11-13 19:10:18 +01:00
|
|
|
get_output_state(struct weston_output *output)
|
|
|
|
|
{
|
2012-11-13 19:10:29 +01:00
|
|
|
return (struct gl_output_state *)output->renderer_state;
|
2012-11-13 19:10:18 +01:00
|
|
|
}
|
|
|
|
|
|
2013-10-25 16:26:33 +03:00
|
|
|
static int
|
|
|
|
|
gl_renderer_create_surface(struct weston_surface *surface);
|
|
|
|
|
|
2012-11-13 19:10:29 +01:00
|
|
|
static inline struct gl_surface_state *
|
2012-11-13 19:10:23 +01:00
|
|
|
get_surface_state(struct weston_surface *surface)
|
|
|
|
|
{
|
2013-10-25 16:26:33 +03:00
|
|
|
if (!surface->renderer_state)
|
|
|
|
|
gl_renderer_create_surface(surface);
|
|
|
|
|
|
2012-11-13 19:10:29 +01:00
|
|
|
return (struct gl_surface_state *)surface->renderer_state;
|
2012-11-13 19:10:23 +01:00
|
|
|
}
|
|
|
|
|
|
2019-04-18 21:45:48 +05:30
|
|
|
static bool
|
|
|
|
|
shadow_exists(const struct gl_output_state *go)
|
|
|
|
|
{
|
2024-08-06 15:11:50 +02:00
|
|
|
return go->shadow_fb != 0;
|
2019-04-18 21:45:48 +05:30
|
|
|
}
|
|
|
|
|
|
2023-09-22 09:56:07 +02:00
|
|
|
static bool
|
|
|
|
|
is_y_flipped(const struct gl_output_state *go)
|
|
|
|
|
{
|
|
|
|
|
return go->y_flip < 0.0f;
|
|
|
|
|
}
|
|
|
|
|
|
2022-04-28 02:25:20 +01:00
|
|
|
struct yuv_format_descriptor yuv_formats[] = {
|
|
|
|
|
{
|
|
|
|
|
.format = DRM_FORMAT_YUYV,
|
|
|
|
|
.output_planes = 2,
|
|
|
|
|
.shader_variant = SHADER_VARIANT_Y_XUXV,
|
|
|
|
|
{{
|
|
|
|
|
.format = DRM_FORMAT_GR88,
|
|
|
|
|
.plane_index = 0
|
|
|
|
|
}, {
|
|
|
|
|
.format = DRM_FORMAT_ARGB8888,
|
|
|
|
|
.plane_index = 0
|
|
|
|
|
}}
|
|
|
|
|
}, {
|
|
|
|
|
.format = DRM_FORMAT_NV12,
|
|
|
|
|
.output_planes = 2,
|
|
|
|
|
.shader_variant = SHADER_VARIANT_Y_UV,
|
|
|
|
|
{{
|
|
|
|
|
.format = DRM_FORMAT_R8,
|
|
|
|
|
.plane_index = 0
|
|
|
|
|
}, {
|
|
|
|
|
.format = DRM_FORMAT_GR88,
|
|
|
|
|
.plane_index = 1
|
|
|
|
|
}}
|
2019-11-22 12:06:35 +08:00
|
|
|
}, {
|
|
|
|
|
.format = DRM_FORMAT_NV16,
|
|
|
|
|
.output_planes = 2,
|
|
|
|
|
.shader_variant = SHADER_VARIANT_Y_UV,
|
|
|
|
|
{{
|
|
|
|
|
.format = DRM_FORMAT_R8,
|
|
|
|
|
.plane_index = 0
|
|
|
|
|
}, {
|
|
|
|
|
.format = DRM_FORMAT_GR88,
|
|
|
|
|
.plane_index = 1
|
|
|
|
|
}}
|
2024-03-29 16:27:01 +08:00
|
|
|
}, {
|
|
|
|
|
.format = DRM_FORMAT_NV24,
|
|
|
|
|
.output_planes = 2,
|
|
|
|
|
.shader_variant = SHADER_VARIANT_Y_UV,
|
|
|
|
|
{{
|
|
|
|
|
.format = DRM_FORMAT_R8,
|
|
|
|
|
.plane_index = 0
|
|
|
|
|
}, {
|
|
|
|
|
.format = DRM_FORMAT_GR88,
|
|
|
|
|
.plane_index = 1
|
|
|
|
|
}}
|
2023-09-04 23:58:53 +02:00
|
|
|
}, {
|
|
|
|
|
.format = DRM_FORMAT_P010,
|
|
|
|
|
.output_planes = 2,
|
|
|
|
|
.shader_variant = SHADER_VARIANT_Y_UV,
|
|
|
|
|
{{
|
|
|
|
|
.format = DRM_FORMAT_R16,
|
|
|
|
|
.plane_index = 0
|
|
|
|
|
}, {
|
|
|
|
|
.format = DRM_FORMAT_GR1616,
|
|
|
|
|
.plane_index = 1
|
|
|
|
|
}}
|
|
|
|
|
}, {
|
|
|
|
|
.format = DRM_FORMAT_P012,
|
|
|
|
|
.output_planes = 2,
|
|
|
|
|
.shader_variant = SHADER_VARIANT_Y_UV,
|
|
|
|
|
{{
|
|
|
|
|
.format = DRM_FORMAT_R16,
|
|
|
|
|
.plane_index = 0
|
|
|
|
|
}, {
|
|
|
|
|
.format = DRM_FORMAT_GR1616,
|
|
|
|
|
.plane_index = 1
|
|
|
|
|
}}
|
|
|
|
|
}, {
|
|
|
|
|
.format = DRM_FORMAT_P016,
|
|
|
|
|
.output_planes = 2,
|
|
|
|
|
.shader_variant = SHADER_VARIANT_Y_UV,
|
|
|
|
|
{{
|
|
|
|
|
.format = DRM_FORMAT_R16,
|
|
|
|
|
.plane_index = 0
|
|
|
|
|
}, {
|
|
|
|
|
.format = DRM_FORMAT_GR1616,
|
|
|
|
|
.plane_index = 1
|
|
|
|
|
}}
|
2022-04-28 02:25:20 +01:00
|
|
|
}, {
|
|
|
|
|
.format = DRM_FORMAT_YUV420,
|
|
|
|
|
.output_planes = 3,
|
|
|
|
|
.shader_variant = SHADER_VARIANT_Y_U_V,
|
|
|
|
|
{{
|
|
|
|
|
.format = DRM_FORMAT_R8,
|
|
|
|
|
.plane_index = 0
|
|
|
|
|
}, {
|
|
|
|
|
.format = DRM_FORMAT_R8,
|
|
|
|
|
.plane_index = 1
|
|
|
|
|
}, {
|
|
|
|
|
.format = DRM_FORMAT_R8,
|
|
|
|
|
.plane_index = 2
|
|
|
|
|
}}
|
|
|
|
|
}, {
|
|
|
|
|
.format = DRM_FORMAT_YUV444,
|
|
|
|
|
.output_planes = 3,
|
|
|
|
|
.shader_variant = SHADER_VARIANT_Y_U_V,
|
|
|
|
|
{{
|
|
|
|
|
.format = DRM_FORMAT_R8,
|
|
|
|
|
.plane_index = 0
|
|
|
|
|
}, {
|
|
|
|
|
.format = DRM_FORMAT_R8,
|
|
|
|
|
.plane_index = 1
|
|
|
|
|
}, {
|
|
|
|
|
.format = DRM_FORMAT_R8,
|
|
|
|
|
.plane_index = 2
|
|
|
|
|
}}
|
|
|
|
|
}, {
|
|
|
|
|
.format = DRM_FORMAT_XYUV8888,
|
|
|
|
|
.output_planes = 1,
|
|
|
|
|
.shader_variant = SHADER_VARIANT_XYUV,
|
|
|
|
|
{{
|
|
|
|
|
.format = DRM_FORMAT_XBGR8888,
|
|
|
|
|
.plane_index = 0
|
|
|
|
|
}}
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
2024-10-10 11:30:42 +02:00
|
|
|
/* Add extension flags to the bitfield that 'flags_out' points to. 'table'
|
|
|
|
|
* stores extension names and flags to check for and 'extensions' is the list
|
|
|
|
|
* usually returned by the EGL or GL implementation. New flags are stored using
|
|
|
|
|
* a binary OR in order to keep flags set from a previous call. Caller must
|
|
|
|
|
* ensure the bitfield is set to 0 at first call.
|
|
|
|
|
*/
|
|
|
|
|
void
|
|
|
|
|
gl_extensions_add(const struct gl_extension_table *table,
|
|
|
|
|
const char *extensions,
|
|
|
|
|
uint64_t *flags_out)
|
|
|
|
|
{
|
|
|
|
|
struct { const char *str; size_t len; } *map;
|
|
|
|
|
size_t i = 0, n = 0;
|
|
|
|
|
uint64_t flags = 0;
|
|
|
|
|
char prev_char = ' ';
|
|
|
|
|
|
|
|
|
|
/* Get number of extensions. */
|
|
|
|
|
while (extensions[i]) {
|
|
|
|
|
if (prev_char == ' ' && extensions[i] != ' ')
|
|
|
|
|
n++;
|
|
|
|
|
prev_char = extensions[i++];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (n == 0)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
/* Allocate data structure mapping each extension with their length. */
|
|
|
|
|
map = xmalloc(n * sizeof *map);
|
|
|
|
|
prev_char = ' ';
|
|
|
|
|
i = n = 0;
|
|
|
|
|
while (prev_char) {
|
|
|
|
|
if (extensions[i] != ' ' && extensions[i] != '\0') {
|
|
|
|
|
if (prev_char == ' ')
|
|
|
|
|
map[n].str = &extensions[i];
|
|
|
|
|
} else if (prev_char != ' ') {
|
|
|
|
|
map[n].len = &extensions[i] - map[n].str;
|
|
|
|
|
n++;
|
|
|
|
|
}
|
|
|
|
|
prev_char = extensions[i++];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Match extensions with table. */
|
|
|
|
|
for (; table->str; table++) {
|
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
|
if (table->len == map[i].len &&
|
|
|
|
|
!strncmp(table->str, map[i].str, table->len)) {
|
|
|
|
|
flags |= table->flag;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*flags_out |= flags;
|
|
|
|
|
free(map);
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-12 08:04:40 +01:00
|
|
|
static void
|
|
|
|
|
timeline_begin_render_query(struct gl_renderer *gr, GLuint query)
|
|
|
|
|
{
|
2024-10-11 12:57:45 +02:00
|
|
|
if (gl_features_has(gr, FEATURE_GPU_TIMELINE) &&
|
|
|
|
|
weston_log_scope_is_enabled(gr->compositor->timeline))
|
2023-01-12 08:04:40 +01:00
|
|
|
gr->begin_query(GL_TIME_ELAPSED_EXT, query);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
timeline_end_render_query(struct gl_renderer *gr)
|
|
|
|
|
{
|
2024-10-11 12:57:45 +02:00
|
|
|
if (gl_features_has(gr, FEATURE_GPU_TIMELINE) &&
|
|
|
|
|
weston_log_scope_is_enabled(gr->compositor->timeline))
|
2023-01-12 08:04:40 +01:00
|
|
|
gr->end_query(GL_TIME_ELAPSED_EXT);
|
|
|
|
|
}
|
2022-04-28 02:25:20 +01:00
|
|
|
|
2017-09-27 15:09:16 +03:00
|
|
|
static void
|
|
|
|
|
timeline_render_point_destroy(struct timeline_render_point *trp)
|
|
|
|
|
{
|
|
|
|
|
wl_list_remove(&trp->link);
|
|
|
|
|
wl_event_source_remove(trp->event_source);
|
|
|
|
|
close(trp->fd);
|
|
|
|
|
free(trp);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
timeline_render_point_handler(int fd, uint32_t mask, void *data)
|
|
|
|
|
{
|
|
|
|
|
struct timeline_render_point *trp = data;
|
2023-01-12 08:04:40 +01:00
|
|
|
struct timespec end;
|
|
|
|
|
|
|
|
|
|
if ((mask & WL_EVENT_READABLE) &&
|
|
|
|
|
(weston_linux_sync_file_read_timestamp(trp->fd, &end) == 0)) {
|
|
|
|
|
struct gl_renderer *gr = get_renderer(trp->output->compositor);
|
|
|
|
|
struct timespec begin;
|
|
|
|
|
GLuint64 elapsed;
|
|
|
|
|
#if !defined(NDEBUG)
|
|
|
|
|
GLint result_available;
|
|
|
|
|
|
|
|
|
|
/* The elapsed time result must now be available since the
|
|
|
|
|
* begin/end queries are meant to be queued prior to fence sync
|
|
|
|
|
* creation. */
|
|
|
|
|
gr->get_query_object_iv(trp->query,
|
|
|
|
|
GL_QUERY_RESULT_AVAILABLE_EXT,
|
|
|
|
|
&result_available);
|
|
|
|
|
assert(result_available == GL_TRUE);
|
|
|
|
|
#endif
|
2017-09-27 15:09:16 +03:00
|
|
|
|
2023-01-12 08:04:40 +01:00
|
|
|
gr->get_query_object_ui64v(trp->query, GL_QUERY_RESULT_EXT,
|
|
|
|
|
&elapsed);
|
|
|
|
|
timespec_add_nsec(&begin, &end, -elapsed);
|
2017-09-27 15:09:16 +03:00
|
|
|
|
2023-01-12 08:04:40 +01:00
|
|
|
TL_POINT(trp->output->compositor, "renderer_gpu_begin",
|
|
|
|
|
TLP_GPU(&begin), TLP_OUTPUT(trp->output), TLP_END);
|
|
|
|
|
TL_POINT(trp->output->compositor, "renderer_gpu_end",
|
|
|
|
|
TLP_GPU(&end), TLP_OUTPUT(trp->output), TLP_END);
|
2017-09-27 15:09:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
timeline_render_point_destroy(trp);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static EGLSyncKHR
|
2018-05-22 12:05:14 +09:00
|
|
|
create_render_sync(struct gl_renderer *gr)
|
2017-09-27 15:09:16 +03:00
|
|
|
{
|
|
|
|
|
static const EGLint attribs[] = { EGL_NONE };
|
|
|
|
|
|
2024-10-10 14:02:12 +02:00
|
|
|
if (!egl_display_has(gr, EXTENSION_ANDROID_NATIVE_FENCE_SYNC))
|
2017-09-27 15:09:16 +03:00
|
|
|
return EGL_NO_SYNC_KHR;
|
|
|
|
|
|
|
|
|
|
return gr->create_sync(gr->egl_display, EGL_SYNC_NATIVE_FENCE_ANDROID,
|
|
|
|
|
attribs);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
timeline_submit_render_sync(struct gl_renderer *gr,
|
|
|
|
|
struct weston_output *output,
|
|
|
|
|
EGLSyncKHR sync,
|
2023-01-12 08:04:40 +01:00
|
|
|
GLuint query)
|
2017-09-27 15:09:16 +03:00
|
|
|
{
|
|
|
|
|
struct gl_output_state *go;
|
|
|
|
|
struct wl_event_loop *loop;
|
|
|
|
|
int fd;
|
|
|
|
|
struct timeline_render_point *trp;
|
|
|
|
|
|
2024-10-11 12:57:45 +02:00
|
|
|
if (!gl_features_has(gr, FEATURE_GPU_TIMELINE) ||
|
|
|
|
|
!weston_log_scope_is_enabled(gr->compositor->timeline) ||
|
2017-09-27 15:09:16 +03:00
|
|
|
sync == EGL_NO_SYNC_KHR)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
go = get_output_state(output);
|
2021-02-09 17:31:11 +02:00
|
|
|
loop = wl_display_get_event_loop(gr->compositor->wl_display);
|
2017-09-27 15:09:16 +03:00
|
|
|
|
|
|
|
|
fd = gr->dup_native_fence_fd(gr->egl_display, sync);
|
|
|
|
|
if (fd == EGL_NO_NATIVE_FENCE_FD_ANDROID)
|
2018-05-22 12:05:14 +09:00
|
|
|
return;
|
2017-09-27 15:09:16 +03:00
|
|
|
|
|
|
|
|
trp = zalloc(sizeof *trp);
|
|
|
|
|
if (trp == NULL) {
|
|
|
|
|
close(fd);
|
2018-05-22 12:05:14 +09:00
|
|
|
return;
|
2017-09-27 15:09:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
trp->fd = fd;
|
2023-01-12 08:04:40 +01:00
|
|
|
trp->query = query;
|
2017-09-27 15:09:16 +03:00
|
|
|
trp->output = output;
|
|
|
|
|
trp->event_source = wl_event_loop_add_fd(loop, fd,
|
|
|
|
|
WL_EVENT_READABLE,
|
|
|
|
|
timeline_render_point_handler,
|
|
|
|
|
trp);
|
|
|
|
|
|
|
|
|
|
wl_list_insert(&go->timeline_render_point_list, &trp->link);
|
|
|
|
|
}
|
|
|
|
|
|
2024-08-14 21:04:08 +02:00
|
|
|
/* Initialise a pair of framebuffer and renderbuffer objects. The framebuffer
|
|
|
|
|
* object is left bound on success. Use gl_fbo_fini() to finalise.
|
2024-08-06 13:52:55 +02:00
|
|
|
*/
|
|
|
|
|
static bool
|
|
|
|
|
gl_fbo_init(GLenum internal_format,
|
|
|
|
|
int width,
|
|
|
|
|
int height,
|
|
|
|
|
GLuint *fb_out,
|
|
|
|
|
GLuint *rb_out)
|
|
|
|
|
{
|
|
|
|
|
GLuint fb, rb;
|
|
|
|
|
GLenum status;
|
|
|
|
|
|
|
|
|
|
glGenFramebuffers(1, &fb);
|
|
|
|
|
glBindFramebuffer(GL_FRAMEBUFFER, fb);
|
|
|
|
|
glGenRenderbuffers(1, &rb);
|
|
|
|
|
glBindRenderbuffer(GL_RENDERBUFFER, rb);
|
|
|
|
|
glRenderbufferStorage(GL_RENDERBUFFER, internal_format, width, height);
|
|
|
|
|
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
|
|
|
|
|
GL_RENDERBUFFER, rb);
|
|
|
|
|
status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
|
|
|
|
|
glBindRenderbuffer(GL_RENDERBUFFER, 0);
|
|
|
|
|
if (status != GL_FRAMEBUFFER_COMPLETE) {
|
|
|
|
|
weston_log("Error: FBO incomplete.\n");
|
|
|
|
|
goto error;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*fb_out = fb;
|
|
|
|
|
*rb_out = rb;
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
error:
|
|
|
|
|
glDeleteFramebuffers(1, &fb);
|
|
|
|
|
glDeleteRenderbuffers(1, &rb);
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Finalise a pair of framebuffer and renderbuffer objects.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
gl_fbo_fini(GLuint *fb,
|
|
|
|
|
GLuint *rb)
|
|
|
|
|
{
|
|
|
|
|
glDeleteFramebuffers(1, fb);
|
|
|
|
|
glDeleteRenderbuffers(1, rb);
|
|
|
|
|
*fb = 0;
|
|
|
|
|
*rb = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Initialise a pair of framebuffer and renderbuffer objects to render into an
|
2024-08-14 21:04:08 +02:00
|
|
|
* EGL image. The framebuffer object is left bound on success. Use gl_fbo_fini()
|
|
|
|
|
* to finalise.
|
2024-08-06 13:52:55 +02:00
|
|
|
*/
|
|
|
|
|
static bool
|
|
|
|
|
gl_fbo_image_init(struct gl_renderer *gr,
|
|
|
|
|
EGLImageKHR image,
|
|
|
|
|
GLuint *fb_out,
|
|
|
|
|
GLuint *rb_out)
|
|
|
|
|
{
|
|
|
|
|
GLuint fb, rb;
|
|
|
|
|
GLenum status;
|
|
|
|
|
|
|
|
|
|
glGenFramebuffers(1, &fb);
|
|
|
|
|
glBindFramebuffer(GL_FRAMEBUFFER, fb);
|
|
|
|
|
glGenRenderbuffers(1, &rb);
|
|
|
|
|
glBindRenderbuffer(GL_RENDERBUFFER, rb);
|
|
|
|
|
gr->image_target_renderbuffer_storage(GL_RENDERBUFFER, image);
|
|
|
|
|
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
|
|
|
|
|
GL_RENDERBUFFER, rb);
|
|
|
|
|
status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
|
|
|
|
|
glBindRenderbuffer(GL_RENDERBUFFER, 0);
|
|
|
|
|
if (status != GL_FRAMEBUFFER_COMPLETE) {
|
|
|
|
|
weston_log("Error: FBO incomplete.\n");
|
|
|
|
|
goto error;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*fb_out = fb;
|
|
|
|
|
*rb_out = rb;
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
error:
|
|
|
|
|
glDeleteFramebuffers(1, &fb);
|
|
|
|
|
glDeleteRenderbuffers(1, &rb);
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2024-08-06 15:11:50 +02:00
|
|
|
/* Initialise a pair of framebuffer and texture objects to render into a
|
2024-08-14 21:04:08 +02:00
|
|
|
* texture. The framebuffer object is left bound on success. Use
|
|
|
|
|
* gl_fbo_texture_fini() to finalise.
|
2019-04-18 21:45:48 +05:30
|
|
|
*/
|
|
|
|
|
static bool
|
2024-08-06 15:11:50 +02:00
|
|
|
gl_fbo_texture_init(GLenum internal_format,
|
|
|
|
|
int width,
|
|
|
|
|
int height,
|
|
|
|
|
GLenum format,
|
|
|
|
|
GLenum type,
|
|
|
|
|
GLuint *fb_out,
|
|
|
|
|
GLuint *tex_out)
|
2019-04-18 21:45:48 +05:30
|
|
|
{
|
2024-08-06 15:11:50 +02:00
|
|
|
GLenum status;
|
|
|
|
|
GLuint fb, tex;
|
2019-04-18 21:45:48 +05:30
|
|
|
|
2024-08-06 15:11:50 +02:00
|
|
|
glGenTextures(1, &tex);
|
|
|
|
|
glBindTexture(GL_TEXTURE_2D, tex);
|
2019-04-18 21:45:48 +05:30
|
|
|
glTexImage2D(GL_TEXTURE_2D, 0, internal_format, width, height, 0,
|
|
|
|
|
format, type, NULL);
|
|
|
|
|
glBindTexture(GL_TEXTURE_2D, 0);
|
2024-08-06 15:11:50 +02:00
|
|
|
glGenFramebuffers(1, &fb);
|
|
|
|
|
glBindFramebuffer(GL_FRAMEBUFFER, fb);
|
2019-04-18 21:45:48 +05:30
|
|
|
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
|
2024-08-06 15:11:50 +02:00
|
|
|
GL_TEXTURE_2D, tex, 0);
|
|
|
|
|
status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
|
|
|
|
|
if (status != GL_FRAMEBUFFER_COMPLETE) {
|
|
|
|
|
weston_log("Error: FBO incomplete.\n");
|
|
|
|
|
goto error;
|
2019-04-18 21:45:48 +05:30
|
|
|
}
|
|
|
|
|
|
2024-08-06 15:11:50 +02:00
|
|
|
*fb_out = fb;
|
|
|
|
|
*tex_out = tex;
|
2019-04-18 21:45:48 +05:30
|
|
|
return true;
|
2024-08-06 15:11:50 +02:00
|
|
|
|
|
|
|
|
error:
|
|
|
|
|
glDeleteFramebuffers(1, &fb);
|
|
|
|
|
glDeleteTextures(1, &tex);
|
|
|
|
|
return false;
|
2019-04-18 21:45:48 +05:30
|
|
|
}
|
|
|
|
|
|
2024-08-06 15:11:50 +02:00
|
|
|
/* Finalise a pair of framebuffer and texture objects.
|
|
|
|
|
*/
|
2019-04-18 21:45:48 +05:30
|
|
|
static void
|
2024-08-06 15:11:50 +02:00
|
|
|
gl_fbo_texture_fini(GLuint *fb,
|
|
|
|
|
GLuint *tex)
|
2019-04-18 21:45:48 +05:30
|
|
|
{
|
2024-08-06 15:11:50 +02:00
|
|
|
glDeleteFramebuffers(1, fb);
|
|
|
|
|
glDeleteTextures(1, tex);
|
|
|
|
|
*fb = 0;
|
|
|
|
|
*tex = 0;
|
2019-04-18 21:45:48 +05:30
|
|
|
}
|
|
|
|
|
|
2024-08-05 11:41:04 +02:00
|
|
|
static void
|
|
|
|
|
gl_renderbuffer_init(struct gl_renderbuffer *renderbuffer,
|
|
|
|
|
enum gl_renderbuffer_type type,
|
|
|
|
|
enum gl_border_status border_damage,
|
|
|
|
|
GLuint framebuffer,
|
|
|
|
|
weston_renderbuffer_discarded_func discarded_cb,
|
|
|
|
|
void *user_data,
|
|
|
|
|
struct weston_output *output)
|
|
|
|
|
{
|
|
|
|
|
struct gl_output_state *go = get_output_state(output);
|
|
|
|
|
|
|
|
|
|
renderbuffer->type = type;
|
|
|
|
|
pixman_region32_init(&renderbuffer->damage);
|
|
|
|
|
pixman_region32_copy(&renderbuffer->damage, &output->region);
|
|
|
|
|
renderbuffer->border_damage = border_damage;
|
|
|
|
|
renderbuffer->fb = framebuffer;
|
|
|
|
|
renderbuffer->discarded_cb = discarded_cb;
|
|
|
|
|
renderbuffer->user_data = user_data;
|
|
|
|
|
|
|
|
|
|
wl_list_insert(&go->renderbuffer_list, &renderbuffer->link);
|
|
|
|
|
}
|
|
|
|
|
|
2023-06-07 11:15:09 +02:00
|
|
|
static void
|
2024-08-01 10:02:11 +02:00
|
|
|
gl_renderbuffer_fini(struct gl_renderbuffer *renderbuffer)
|
2023-06-07 11:15:09 +02:00
|
|
|
{
|
2024-08-01 10:02:11 +02:00
|
|
|
assert(!renderbuffer->stale);
|
|
|
|
|
|
|
|
|
|
pixman_region32_fini(&renderbuffer->damage);
|
2024-08-05 10:28:23 +02:00
|
|
|
|
2024-08-07 16:00:17 +02:00
|
|
|
if (renderbuffer->type == RENDERBUFFER_BUFFER) {
|
|
|
|
|
gl_fbo_fini(&renderbuffer->fb, &renderbuffer->buffer.rb);
|
2024-08-05 10:28:23 +02:00
|
|
|
} else if (renderbuffer->type == RENDERBUFFER_DMABUF) {
|
2024-08-06 13:52:55 +02:00
|
|
|
gl_fbo_fini(&renderbuffer->fb, &renderbuffer->dmabuf.rb);
|
2024-08-05 10:28:23 +02:00
|
|
|
renderbuffer->dmabuf.gr->destroy_image(renderbuffer->dmabuf.gr->egl_display,
|
|
|
|
|
renderbuffer->dmabuf.image);
|
2024-08-01 10:02:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
renderbuffer->stale = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
gl_renderer_destroy_renderbuffer(weston_renderbuffer_t weston_renderbuffer)
|
|
|
|
|
{
|
|
|
|
|
struct gl_renderbuffer *rb =
|
|
|
|
|
(struct gl_renderbuffer *) weston_renderbuffer;
|
|
|
|
|
|
|
|
|
|
wl_list_remove(&rb->link);
|
|
|
|
|
|
|
|
|
|
if (!rb->stale)
|
|
|
|
|
gl_renderbuffer_fini(rb);
|
|
|
|
|
|
2024-08-05 10:28:23 +02:00
|
|
|
if (rb->type == RENDERBUFFER_DMABUF)
|
|
|
|
|
rb->dmabuf.memory->destroy(rb->dmabuf.memory);
|
|
|
|
|
|
|
|
|
|
free(rb);
|
2024-08-01 10:02:11 +02:00
|
|
|
}
|
|
|
|
|
|
2024-07-29 15:27:52 +02:00
|
|
|
static bool
|
2024-08-01 10:02:11 +02:00
|
|
|
gl_renderer_discard_renderbuffers(struct gl_output_state *go,
|
|
|
|
|
bool destroy)
|
|
|
|
|
{
|
|
|
|
|
struct gl_renderbuffer *rb, *tmp;
|
2024-07-29 15:27:52 +02:00
|
|
|
bool success = true;
|
2024-08-01 10:02:11 +02:00
|
|
|
|
|
|
|
|
/* A renderbuffer goes stale after being discarded. Most resources are
|
|
|
|
|
* released. It's kept in the output states' renderbuffer list waiting
|
|
|
|
|
* for the backend to destroy it. */
|
|
|
|
|
wl_list_for_each_safe(rb, tmp, &go->renderbuffer_list, link) {
|
2024-08-09 15:41:30 +02:00
|
|
|
if ((rb->type == RENDERBUFFER_WINDOW) || destroy) {
|
2024-08-01 10:02:11 +02:00
|
|
|
gl_renderer_destroy_renderbuffer((weston_renderbuffer_t) rb);
|
2024-07-29 15:27:52 +02:00
|
|
|
} else if (!rb->stale) {
|
2024-08-01 10:02:11 +02:00
|
|
|
gl_renderbuffer_fini(rb);
|
2024-07-29 15:27:52 +02:00
|
|
|
if (success && rb->discarded_cb)
|
|
|
|
|
success = rb->discarded_cb((weston_renderbuffer_t) rb,
|
|
|
|
|
rb->user_data);
|
|
|
|
|
}
|
2024-08-01 10:02:11 +02:00
|
|
|
}
|
2024-07-29 15:27:52 +02:00
|
|
|
|
|
|
|
|
return success;
|
2023-06-07 11:15:09 +02:00
|
|
|
}
|
|
|
|
|
|
2024-08-12 09:56:19 +02:00
|
|
|
/* Get the age of the current back-buffer as the number of frames elapsed since
|
|
|
|
|
* it was most recently defined. */
|
|
|
|
|
static int
|
|
|
|
|
get_renderbuffer_window_age(struct weston_output *output)
|
|
|
|
|
{
|
|
|
|
|
struct gl_output_state *go = get_output_state(output);
|
|
|
|
|
struct gl_renderer *gr = get_renderer(output->compositor);
|
|
|
|
|
EGLint buffer_age = 0;
|
|
|
|
|
EGLBoolean ret;
|
|
|
|
|
|
|
|
|
|
if ((egl_display_has(gr, EXTENSION_EXT_BUFFER_AGE) ||
|
|
|
|
|
egl_display_has(gr, EXTENSION_KHR_PARTIAL_UPDATE)) &&
|
|
|
|
|
go->egl_surface != EGL_NO_SURFACE) {
|
|
|
|
|
ret = eglQuerySurface(gr->egl_display, go->egl_surface,
|
|
|
|
|
EGL_BUFFER_AGE_EXT, &buffer_age);
|
|
|
|
|
if (ret == EGL_FALSE) {
|
|
|
|
|
weston_log("buffer age query failed.\n");
|
|
|
|
|
gl_renderer_print_egl_error_state();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return buffer_age;
|
|
|
|
|
}
|
|
|
|
|
|
2023-06-07 11:15:09 +02:00
|
|
|
static struct gl_renderbuffer *
|
2024-08-12 09:56:19 +02:00
|
|
|
gl_renderer_get_renderbuffer_window(struct weston_output *output)
|
2023-06-07 11:15:09 +02:00
|
|
|
{
|
2024-08-12 09:56:19 +02:00
|
|
|
struct gl_output_state *go = get_output_state(output);
|
|
|
|
|
struct gl_renderer *gr = get_renderer(output->compositor);
|
|
|
|
|
int current_age = get_renderbuffer_window_age(output);
|
|
|
|
|
int count = 0;
|
|
|
|
|
struct gl_renderbuffer *rb;
|
|
|
|
|
struct gl_renderbuffer *ret = NULL;
|
|
|
|
|
struct gl_renderbuffer *oldest_rb = NULL;
|
|
|
|
|
int max_buffers;
|
2023-06-07 11:15:09 +02:00
|
|
|
|
2024-08-12 09:56:19 +02:00
|
|
|
wl_list_for_each(rb, &go->renderbuffer_list, link) {
|
|
|
|
|
if (rb->type == RENDERBUFFER_WINDOW) {
|
|
|
|
|
/* Count window renderbuffers, age them, */
|
|
|
|
|
count++;
|
|
|
|
|
rb->window.age++;
|
|
|
|
|
/* find the one with current_age to return, */
|
|
|
|
|
if (rb->window.age == current_age)
|
|
|
|
|
ret = rb;
|
|
|
|
|
/* and the oldest one in case we decide to reuse it. */
|
|
|
|
|
if (!oldest_rb ||
|
|
|
|
|
rb->window.age > oldest_rb->window.age)
|
|
|
|
|
oldest_rb = rb;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* If a renderbuffer of correct age was found, return it, */
|
|
|
|
|
if (ret) {
|
|
|
|
|
ret->window.age = 0;
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
2023-06-07 11:15:09 +02:00
|
|
|
|
2024-08-12 09:56:19 +02:00
|
|
|
/* otherwise decide whether to refurbish and return the oldest, */
|
|
|
|
|
max_buffers = (egl_display_has(gr, EXTENSION_EXT_BUFFER_AGE) ||
|
|
|
|
|
egl_display_has(gr, EXTENSION_KHR_PARTIAL_UPDATE)) ?
|
|
|
|
|
BUFFER_DAMAGE_COUNT : 1;
|
|
|
|
|
if ((current_age == 0 || current_age - 1 > BUFFER_DAMAGE_COUNT) &&
|
|
|
|
|
count >= max_buffers) {
|
|
|
|
|
pixman_region32_copy(&oldest_rb->damage, &output->region);
|
|
|
|
|
oldest_rb->border_damage = BORDER_ALL_DIRTY;
|
|
|
|
|
oldest_rb->window.age = 0;
|
|
|
|
|
return oldest_rb;
|
|
|
|
|
}
|
2023-06-07 11:15:09 +02:00
|
|
|
|
2024-08-12 09:56:19 +02:00
|
|
|
/* or create a new window renderbuffer (window renderbuffers use the
|
|
|
|
|
* default surface framebuffer 0). */
|
|
|
|
|
rb = xzalloc(sizeof(*rb));
|
|
|
|
|
gl_renderbuffer_init(rb, RENDERBUFFER_WINDOW, BORDER_ALL_DIRTY, 0, NULL,
|
|
|
|
|
NULL, output);
|
|
|
|
|
|
|
|
|
|
return rb;
|
2023-06-07 11:15:09 +02:00
|
|
|
}
|
|
|
|
|
|
2024-08-01 10:02:11 +02:00
|
|
|
static weston_renderbuffer_t
|
2024-08-07 16:00:17 +02:00
|
|
|
gl_renderer_create_renderbuffer(struct weston_output *output,
|
|
|
|
|
const struct pixel_format_info *format,
|
2024-08-09 14:39:32 +02:00
|
|
|
void *buffer, int stride,
|
2024-08-07 16:00:17 +02:00
|
|
|
weston_renderbuffer_discarded_func discarded_cb,
|
|
|
|
|
void *user_data)
|
2021-03-30 13:27:00 +02:00
|
|
|
{
|
2024-08-09 14:39:32 +02:00
|
|
|
struct gl_output_state *go = get_output_state(output);
|
2021-03-30 13:27:00 +02:00
|
|
|
struct gl_renderer *gr = get_renderer(output->compositor);
|
|
|
|
|
struct gl_renderbuffer *renderbuffer;
|
2024-08-06 13:52:55 +02:00
|
|
|
GLuint fb, rb;
|
2021-03-30 13:27:00 +02:00
|
|
|
|
|
|
|
|
switch (format->gl_internalformat) {
|
|
|
|
|
case GL_RGB8:
|
|
|
|
|
case GL_RGBA8:
|
2024-10-09 18:13:47 +02:00
|
|
|
if (gr->gl_version < gl_version(3, 0) &&
|
|
|
|
|
!gl_extensions_has(gr, EXTENSION_OES_RGB8_RGBA8))
|
2021-03-30 13:27:00 +02:00
|
|
|
return NULL;
|
|
|
|
|
break;
|
|
|
|
|
case GL_RGB10_A2:
|
2024-10-09 18:13:47 +02:00
|
|
|
if (gr->gl_version < gl_version(3, 0) &&
|
|
|
|
|
(!gl_extensions_has(gr, EXTENSION_EXT_TEXTURE_TYPE_2_10_10_10_REV) ||
|
|
|
|
|
!gl_extensions_has(gr, EXTENSION_EXT_TEXTURE_STORAGE)))
|
2021-03-30 13:27:00 +02:00
|
|
|
return NULL;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2024-08-09 14:39:32 +02:00
|
|
|
if (!gl_fbo_init(format->gl_internalformat, go->fb_size.width,
|
|
|
|
|
go->fb_size.height, &fb, &rb)) {
|
2024-08-07 16:00:17 +02:00
|
|
|
weston_log("Failed to init renderbuffer%s\n",
|
|
|
|
|
buffer ? " from buffer" : "");
|
2021-03-30 13:27:00 +02:00
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2024-08-06 13:52:55 +02:00
|
|
|
renderbuffer = xzalloc(sizeof(*renderbuffer));
|
|
|
|
|
|
2024-08-07 16:00:17 +02:00
|
|
|
renderbuffer->buffer.rb = rb;
|
|
|
|
|
renderbuffer->buffer.data = buffer;
|
|
|
|
|
renderbuffer->buffer.stride = stride;
|
|
|
|
|
gl_renderbuffer_init(renderbuffer, RENDERBUFFER_BUFFER,
|
2024-08-05 11:41:04 +02:00
|
|
|
BORDER_STATUS_CLEAN, fb, discarded_cb, user_data,
|
|
|
|
|
output);
|
2021-03-30 13:27:00 +02:00
|
|
|
|
2024-08-01 10:02:11 +02:00
|
|
|
return (weston_renderbuffer_t) renderbuffer;
|
2021-03-30 13:27:00 +02:00
|
|
|
}
|
|
|
|
|
|
2024-08-05 13:11:55 +02:00
|
|
|
static EGLImageKHR
|
|
|
|
|
import_simple_dmabuf(struct gl_renderer *, const struct dmabuf_attributes *);
|
|
|
|
|
|
|
|
|
|
static weston_renderbuffer_t
|
|
|
|
|
gl_renderer_create_renderbuffer_dmabuf(struct weston_output *output,
|
|
|
|
|
struct linux_dmabuf_memory *dmabuf,
|
|
|
|
|
weston_renderbuffer_discarded_func discarded_cb,
|
|
|
|
|
void *user_data)
|
|
|
|
|
{
|
|
|
|
|
struct gl_renderer *gr = get_renderer(output->compositor);
|
|
|
|
|
struct dmabuf_attributes *attributes = dmabuf->attributes;
|
|
|
|
|
struct gl_renderbuffer *renderbuffer;
|
2024-08-06 13:52:55 +02:00
|
|
|
EGLImageKHR image;
|
|
|
|
|
GLuint fb, rb;
|
2024-08-05 13:11:55 +02:00
|
|
|
|
2024-08-06 13:52:55 +02:00
|
|
|
image = import_simple_dmabuf(gr, attributes);
|
|
|
|
|
if (image == EGL_NO_IMAGE_KHR) {
|
|
|
|
|
weston_log("Failed to import dmabuf\n");
|
2024-08-05 13:11:55 +02:00
|
|
|
return NULL;
|
|
|
|
|
}
|
2024-08-06 13:52:55 +02:00
|
|
|
if (!gl_fbo_image_init(gr, image, &fb, &rb)) {
|
|
|
|
|
weston_log("Failed to init renderbuffer from dmabuf\n");
|
|
|
|
|
gr->destroy_image(gr->egl_display, image);
|
2024-08-05 13:11:55 +02:00
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2024-08-06 13:52:55 +02:00
|
|
|
renderbuffer = xzalloc(sizeof(*renderbuffer));
|
2024-08-05 13:11:55 +02:00
|
|
|
|
|
|
|
|
renderbuffer->dmabuf.gr = gr;
|
|
|
|
|
renderbuffer->dmabuf.memory = dmabuf;
|
2024-08-06 13:52:55 +02:00
|
|
|
renderbuffer->dmabuf.image = image;
|
2024-08-05 13:11:55 +02:00
|
|
|
gl_renderbuffer_init(renderbuffer, RENDERBUFFER_DMABUF,
|
|
|
|
|
BORDER_STATUS_CLEAN, fb, discarded_cb, user_data,
|
|
|
|
|
output);
|
|
|
|
|
|
|
|
|
|
return (weston_renderbuffer_t) renderbuffer;
|
|
|
|
|
}
|
|
|
|
|
|
2024-08-12 10:09:40 +02:00
|
|
|
static struct gl_renderbuffer *
|
|
|
|
|
gl_renderer_update_renderbuffers(struct weston_output *output,
|
|
|
|
|
pixman_region32_t *damage,
|
|
|
|
|
weston_renderbuffer_t renderbuffer)
|
|
|
|
|
{
|
|
|
|
|
struct gl_output_state *go = get_output_state(output);
|
|
|
|
|
struct gl_renderbuffer *rb;
|
|
|
|
|
|
|
|
|
|
/* Accumulate damages in non-stale renderbuffers. */
|
|
|
|
|
wl_list_for_each(rb, &go->renderbuffer_list, link) {
|
|
|
|
|
if (!rb->stale) {
|
|
|
|
|
pixman_region32_union(&rb->damage, &rb->damage, damage);
|
|
|
|
|
rb->border_damage |= go->border_status;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (renderbuffer)
|
|
|
|
|
return (struct gl_renderbuffer *) renderbuffer;
|
|
|
|
|
|
|
|
|
|
/* A NULL renderbuffer parameter is a special value to request
|
|
|
|
|
* renderbuffers for window outputs. */
|
|
|
|
|
return gl_renderer_get_renderbuffer_window(output);
|
|
|
|
|
}
|
|
|
|
|
|
2022-08-02 16:20:06 +03:00
|
|
|
static bool
|
2023-06-08 14:51:32 +02:00
|
|
|
gl_renderer_do_read_pixels(struct gl_renderer *gr,
|
2024-07-15 14:09:22 +02:00
|
|
|
struct gl_output_state *go,
|
2023-06-08 14:51:32 +02:00
|
|
|
const struct pixel_format_info *fmt,
|
|
|
|
|
void *pixels, int stride,
|
|
|
|
|
const struct weston_geometry *rect)
|
2022-08-02 16:20:06 +03:00
|
|
|
{
|
|
|
|
|
pixman_image_t *tmp = NULL;
|
2023-12-20 10:48:18 +08:00
|
|
|
void *tmp_data = NULL;
|
2024-07-22 17:10:46 +02:00
|
|
|
pixman_image_t *image;
|
|
|
|
|
pixman_transform_t flip;
|
2022-08-02 16:20:06 +03:00
|
|
|
|
|
|
|
|
assert(fmt->gl_type != 0);
|
|
|
|
|
assert(fmt->gl_format != 0);
|
|
|
|
|
|
2024-07-15 14:09:22 +02:00
|
|
|
if (!is_y_flipped(go)) {
|
|
|
|
|
glReadPixels(rect->x, rect->y, rect->width, rect->height,
|
|
|
|
|
fmt->gl_format, fmt->gl_type, pixels);
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2024-10-09 18:13:47 +02:00
|
|
|
if (gl_extensions_has(gr, EXTENSION_ANGLE_PACK_REVERSE_ROW_ORDER)) {
|
2022-08-02 16:20:06 +03:00
|
|
|
/* Make glReadPixels() return top row first. */
|
|
|
|
|
glPixelStorei(GL_PACK_REVERSE_ROW_ORDER_ANGLE, GL_TRUE);
|
2024-07-22 17:10:46 +02:00
|
|
|
glReadPixels(rect->x, rect->y, rect->width, rect->height,
|
|
|
|
|
fmt->gl_format, fmt->gl_type, pixels);
|
2024-07-22 17:23:40 +02:00
|
|
|
glPixelStorei(GL_PACK_REVERSE_ROW_ORDER_ANGLE, GL_FALSE);
|
2024-07-22 17:10:46 +02:00
|
|
|
return true;
|
2022-08-02 16:20:06 +03:00
|
|
|
}
|
|
|
|
|
|
2024-07-22 17:10:46 +02:00
|
|
|
/*
|
|
|
|
|
* glReadPixels() returns bottom row first. We need to read into a
|
|
|
|
|
* temporary buffer and y-flip it.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
tmp_data = malloc(stride * rect->height);
|
|
|
|
|
if (!tmp_data)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
tmp = pixman_image_create_bits(fmt->pixman_format, rect->width,
|
|
|
|
|
rect->height, tmp_data, stride);
|
|
|
|
|
if (!tmp) {
|
2023-12-20 10:48:18 +08:00
|
|
|
free(tmp_data);
|
2024-07-22 17:10:46 +02:00
|
|
|
return false;
|
2022-08-02 16:20:06 +03:00
|
|
|
}
|
|
|
|
|
|
2024-07-22 17:10:46 +02:00
|
|
|
glReadPixels(rect->x, rect->y, rect->width, rect->height,
|
|
|
|
|
fmt->gl_format, fmt->gl_type, pixman_image_get_data(tmp));
|
|
|
|
|
|
|
|
|
|
image = pixman_image_create_bits_no_clear(fmt->pixman_format,
|
|
|
|
|
rect->width, rect->height,
|
|
|
|
|
pixels, stride);
|
|
|
|
|
abort_oom_if_null(image);
|
|
|
|
|
|
|
|
|
|
pixman_transform_init_scale(&flip, pixman_fixed_1,
|
|
|
|
|
pixman_fixed_minus_1);
|
|
|
|
|
pixman_transform_translate(&flip, NULL, 0,
|
|
|
|
|
pixman_int_to_fixed(rect->height));
|
|
|
|
|
pixman_image_set_transform(tmp, &flip);
|
|
|
|
|
|
|
|
|
|
pixman_image_composite32(PIXMAN_OP_SRC,
|
|
|
|
|
tmp, /* src */
|
|
|
|
|
NULL, /* mask */
|
|
|
|
|
image, /* dest */
|
|
|
|
|
0, 0, /* src x,y */
|
|
|
|
|
0, 0, /* mask x,y */
|
|
|
|
|
0, 0, /* dest x,y */
|
|
|
|
|
rect->width, rect->height);
|
|
|
|
|
|
|
|
|
|
pixman_image_unref(image);
|
|
|
|
|
pixman_image_unref(tmp);
|
|
|
|
|
free(tmp_data);
|
|
|
|
|
|
2023-06-08 14:51:32 +02:00
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
2024-07-15 14:09:22 +02:00
|
|
|
gl_renderer_do_capture(struct gl_renderer *gr, struct gl_output_state *go,
|
|
|
|
|
struct weston_buffer *into,
|
2023-06-08 14:51:32 +02:00
|
|
|
const struct weston_geometry *rect)
|
|
|
|
|
{
|
|
|
|
|
struct wl_shm_buffer *shm = into->shm_buffer;
|
|
|
|
|
const struct pixel_format_info *fmt = into->pixel_format;
|
|
|
|
|
bool ret;
|
|
|
|
|
|
|
|
|
|
assert(into->type == WESTON_BUFFER_SHM);
|
|
|
|
|
assert(shm);
|
|
|
|
|
|
|
|
|
|
wl_shm_buffer_begin_access(shm);
|
|
|
|
|
|
2024-07-15 14:09:22 +02:00
|
|
|
ret = gl_renderer_do_read_pixels(gr, go, fmt, wl_shm_buffer_get_data(shm),
|
2024-07-03 13:20:50 -05:00
|
|
|
into->stride, rect);
|
2023-06-08 14:51:32 +02:00
|
|
|
|
2022-08-02 16:20:06 +03:00
|
|
|
wl_shm_buffer_end_access(shm);
|
|
|
|
|
|
2023-06-08 14:51:32 +02:00
|
|
|
return ret;
|
2022-08-02 16:20:06 +03:00
|
|
|
}
|
|
|
|
|
|
2023-10-30 11:20:17 +01:00
|
|
|
static struct gl_capture_task*
|
|
|
|
|
create_capture_task(struct weston_capture_task *task,
|
|
|
|
|
struct gl_renderer *gr,
|
|
|
|
|
const struct weston_geometry *rect)
|
|
|
|
|
{
|
|
|
|
|
struct gl_capture_task *gl_task = xzalloc(sizeof *gl_task);
|
|
|
|
|
|
|
|
|
|
gl_task->task = task;
|
|
|
|
|
gl_task->gr = gr;
|
|
|
|
|
glGenBuffers(1, &gl_task->pbo);
|
|
|
|
|
gl_task->stride = (gr->compositor->read_format->bpp / 8) * rect->width;
|
|
|
|
|
gl_task->height = rect->height;
|
2024-10-09 18:13:47 +02:00
|
|
|
gl_task->reverse =
|
|
|
|
|
!gl_extensions_has(gr, EXTENSION_ANGLE_PACK_REVERSE_ROW_ORDER);
|
2023-10-30 11:20:17 +01:00
|
|
|
gl_task->sync = EGL_NO_SYNC_KHR;
|
|
|
|
|
gl_task->fd = EGL_NO_NATIVE_FENCE_FD_ANDROID;
|
|
|
|
|
|
|
|
|
|
return gl_task;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
destroy_capture_task(struct gl_capture_task *gl_task)
|
|
|
|
|
{
|
|
|
|
|
assert(gl_task);
|
|
|
|
|
|
|
|
|
|
wl_event_source_remove(gl_task->source);
|
|
|
|
|
wl_list_remove(&gl_task->link);
|
|
|
|
|
glDeleteBuffers(1, &gl_task->pbo);
|
|
|
|
|
|
|
|
|
|
if (gl_task->sync != EGL_NO_SYNC_KHR)
|
|
|
|
|
gl_task->gr->destroy_sync(gl_task->gr->egl_display,
|
|
|
|
|
gl_task->sync);
|
|
|
|
|
if (gl_task->fd != EGL_NO_NATIVE_FENCE_FD_ANDROID)
|
|
|
|
|
close(gl_task->fd);
|
|
|
|
|
|
|
|
|
|
free(gl_task);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
copy_capture(struct gl_capture_task *gl_task)
|
2023-10-05 09:23:47 +02:00
|
|
|
{
|
|
|
|
|
struct weston_buffer *buffer =
|
|
|
|
|
weston_capture_task_get_buffer(gl_task->task);
|
|
|
|
|
struct wl_shm_buffer *shm = buffer->shm_buffer;
|
2023-11-29 12:54:52 +01:00
|
|
|
struct gl_renderer *gr = gl_task->gr;
|
2023-10-05 09:23:47 +02:00
|
|
|
uint8_t *src, *dst;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
assert(shm);
|
|
|
|
|
|
|
|
|
|
glBindBuffer(GL_PIXEL_PACK_BUFFER, gl_task->pbo);
|
2023-11-29 12:54:52 +01:00
|
|
|
src = gr->map_buffer_range(GL_PIXEL_PACK_BUFFER, 0,
|
|
|
|
|
gl_task->stride * gl_task->height,
|
|
|
|
|
GL_MAP_READ_BIT);
|
2023-10-05 09:23:47 +02:00
|
|
|
dst = wl_shm_buffer_get_data(shm);
|
|
|
|
|
wl_shm_buffer_begin_access(shm);
|
|
|
|
|
|
|
|
|
|
if (!gl_task->reverse) {
|
|
|
|
|
memcpy(dst, src, gl_task->stride * gl_task->height);
|
|
|
|
|
} else {
|
|
|
|
|
src += (gl_task->height - 1) * gl_task->stride;
|
|
|
|
|
for (i = 0; i < gl_task->height; i++) {
|
|
|
|
|
memcpy(dst, src, gl_task->stride);
|
|
|
|
|
dst += gl_task->stride;
|
|
|
|
|
src -= gl_task->stride;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
wl_shm_buffer_end_access(shm);
|
2023-11-29 12:54:52 +01:00
|
|
|
gr->unmap_buffer(GL_PIXEL_PACK_BUFFER);
|
2023-10-05 09:23:47 +02:00
|
|
|
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
|
2023-10-30 11:20:17 +01:00
|
|
|
}
|
2023-10-05 09:23:47 +02:00
|
|
|
|
2023-10-30 11:20:17 +01:00
|
|
|
static int
|
|
|
|
|
async_capture_handler(void *data)
|
|
|
|
|
{
|
|
|
|
|
struct gl_capture_task *gl_task = (struct gl_capture_task *) data;
|
|
|
|
|
|
|
|
|
|
assert(gl_task);
|
|
|
|
|
|
|
|
|
|
copy_capture(gl_task);
|
2023-10-05 09:23:47 +02:00
|
|
|
weston_capture_task_retire_complete(gl_task->task);
|
2023-10-30 11:20:17 +01:00
|
|
|
destroy_capture_task(gl_task);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
async_capture_handler_fd(int fd, uint32_t mask, void *data)
|
|
|
|
|
{
|
|
|
|
|
struct gl_capture_task *gl_task = (struct gl_capture_task *) data;
|
|
|
|
|
|
|
|
|
|
assert(gl_task);
|
|
|
|
|
assert(fd == gl_task->fd);
|
|
|
|
|
|
|
|
|
|
if (mask & WL_EVENT_READABLE) {
|
|
|
|
|
copy_capture(gl_task);
|
|
|
|
|
weston_capture_task_retire_complete(gl_task->task);
|
|
|
|
|
} else {
|
|
|
|
|
weston_capture_task_retire_failed(gl_task->task,
|
|
|
|
|
"GL: capture failed");
|
|
|
|
|
}
|
|
|
|
|
destroy_capture_task(gl_task);
|
2023-10-05 09:23:47 +02:00
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2023-11-28 18:17:31 +01:00
|
|
|
static void
|
2023-10-05 09:23:47 +02:00
|
|
|
gl_renderer_do_read_pixels_async(struct gl_renderer *gr,
|
2024-07-15 14:09:22 +02:00
|
|
|
struct gl_output_state *go,
|
2023-10-05 09:23:47 +02:00
|
|
|
struct weston_output *output,
|
|
|
|
|
struct weston_capture_task *task,
|
|
|
|
|
const struct weston_geometry *rect)
|
|
|
|
|
{
|
|
|
|
|
struct weston_buffer *buffer = weston_capture_task_get_buffer(task);
|
|
|
|
|
const struct pixel_format_info *fmt = buffer->pixel_format;
|
|
|
|
|
struct gl_capture_task *gl_task;
|
|
|
|
|
struct wl_event_loop *loop;
|
|
|
|
|
int refresh_mhz, refresh_msec;
|
|
|
|
|
|
2024-10-11 12:19:14 +02:00
|
|
|
assert(gl_features_has(gr, FEATURE_ASYNC_READBACK));
|
2023-10-05 09:23:47 +02:00
|
|
|
assert(output->current_mode->refresh > 0);
|
|
|
|
|
assert(buffer->type == WESTON_BUFFER_SHM);
|
|
|
|
|
assert(fmt->gl_type != 0);
|
|
|
|
|
assert(fmt->gl_format != 0);
|
|
|
|
|
|
2024-10-09 18:13:47 +02:00
|
|
|
if (gl_extensions_has(gr, EXTENSION_ANGLE_PACK_REVERSE_ROW_ORDER) &&
|
|
|
|
|
is_y_flipped(go))
|
2024-07-22 17:23:40 +02:00
|
|
|
glPixelStorei(GL_PACK_REVERSE_ROW_ORDER_ANGLE, GL_TRUE);
|
2023-10-05 09:23:47 +02:00
|
|
|
|
2023-10-30 11:20:17 +01:00
|
|
|
gl_task = create_capture_task(task, gr, rect);
|
2023-10-05 09:23:47 +02:00
|
|
|
|
|
|
|
|
glBindBuffer(GL_PIXEL_PACK_BUFFER, gl_task->pbo);
|
|
|
|
|
glBufferData(GL_PIXEL_PACK_BUFFER, gl_task->stride * gl_task->height,
|
2023-11-29 12:54:52 +01:00
|
|
|
NULL, gr->pbo_usage);
|
2023-10-05 09:23:47 +02:00
|
|
|
glReadPixels(rect->x, rect->y, rect->width, rect->height,
|
|
|
|
|
fmt->gl_format, fmt->gl_type, 0);
|
|
|
|
|
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
|
|
|
|
|
|
|
|
|
|
loop = wl_display_get_event_loop(gr->compositor->wl_display);
|
2023-10-30 11:20:17 +01:00
|
|
|
gl_task->sync = create_render_sync(gr);
|
|
|
|
|
|
|
|
|
|
/* Make sure the read back request is flushed. Doing so right between
|
|
|
|
|
* fence sync object creation and native fence fd duplication ensures
|
|
|
|
|
* the fd is created as stated by EGL_ANDROID_native_fence_sync: "the
|
|
|
|
|
* next Flush() operation performed by the current client API causes a
|
|
|
|
|
* new native fence object to be created". */
|
|
|
|
|
glFlush();
|
|
|
|
|
|
|
|
|
|
if (gl_task->sync != EGL_NO_SYNC_KHR)
|
|
|
|
|
gl_task->fd = gr->dup_native_fence_fd(gr->egl_display,
|
|
|
|
|
gl_task->sync);
|
|
|
|
|
|
|
|
|
|
if (gl_task->fd != EGL_NO_NATIVE_FENCE_FD_ANDROID) {
|
|
|
|
|
gl_task->source = wl_event_loop_add_fd(loop, gl_task->fd,
|
|
|
|
|
WL_EVENT_READABLE,
|
|
|
|
|
async_capture_handler_fd,
|
|
|
|
|
gl_task);
|
|
|
|
|
} else {
|
|
|
|
|
/* We guess here an async read back doesn't take more than 5
|
|
|
|
|
* frames on most platforms. */
|
|
|
|
|
gl_task->source = wl_event_loop_add_timer(loop,
|
|
|
|
|
async_capture_handler,
|
|
|
|
|
gl_task);
|
|
|
|
|
refresh_mhz = output->current_mode->refresh;
|
|
|
|
|
refresh_msec = millihz_to_nsec(refresh_mhz) / 1000000;
|
|
|
|
|
wl_event_source_timer_update(gl_task->source, 5 * refresh_msec);
|
|
|
|
|
}
|
2023-10-05 09:23:47 +02:00
|
|
|
|
|
|
|
|
wl_list_insert(&gr->pending_capture_list, &gl_task->link);
|
2024-07-22 17:23:40 +02:00
|
|
|
|
2024-10-09 18:13:47 +02:00
|
|
|
if (gl_extensions_has(gr, EXTENSION_ANGLE_PACK_REVERSE_ROW_ORDER) &&
|
|
|
|
|
is_y_flipped(go))
|
2024-07-22 17:23:40 +02:00
|
|
|
glPixelStorei(GL_PACK_REVERSE_ROW_ORDER_ANGLE, GL_FALSE);
|
2023-10-05 09:23:47 +02:00
|
|
|
}
|
|
|
|
|
|
2022-08-02 16:20:06 +03:00
|
|
|
static void
|
|
|
|
|
gl_renderer_do_capture_tasks(struct gl_renderer *gr,
|
|
|
|
|
struct weston_output *output,
|
|
|
|
|
enum weston_output_capture_source source)
|
|
|
|
|
{
|
|
|
|
|
struct gl_output_state *go = get_output_state(output);
|
|
|
|
|
const struct pixel_format_info *format;
|
|
|
|
|
struct weston_capture_task *ct;
|
|
|
|
|
struct weston_geometry rect;
|
|
|
|
|
|
|
|
|
|
switch (source) {
|
|
|
|
|
case WESTON_OUTPUT_CAPTURE_SOURCE_FRAMEBUFFER:
|
|
|
|
|
format = output->compositor->read_format;
|
|
|
|
|
rect = go->area;
|
|
|
|
|
/* Because glReadPixels has bottom-left origin */
|
2023-09-22 09:56:07 +02:00
|
|
|
if (is_y_flipped(go))
|
|
|
|
|
rect.y = go->fb_size.height - go->area.y - go->area.height;
|
2022-08-02 16:20:06 +03:00
|
|
|
break;
|
|
|
|
|
case WESTON_OUTPUT_CAPTURE_SOURCE_FULL_FRAMEBUFFER:
|
|
|
|
|
format = output->compositor->read_format;
|
|
|
|
|
rect.x = 0;
|
|
|
|
|
rect.y = 0;
|
|
|
|
|
rect.width = go->fb_size.width;
|
|
|
|
|
rect.height = go->fb_size.height;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
assert(0);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
while ((ct = weston_output_pull_capture_task(output, source, rect.width,
|
|
|
|
|
rect.height, format))) {
|
|
|
|
|
struct weston_buffer *buffer = weston_capture_task_get_buffer(ct);
|
|
|
|
|
|
|
|
|
|
assert(buffer->width == rect.width);
|
|
|
|
|
assert(buffer->height == rect.height);
|
|
|
|
|
assert(buffer->pixel_format->format == format->format);
|
|
|
|
|
|
|
|
|
|
if (buffer->type != WESTON_BUFFER_SHM ||
|
|
|
|
|
buffer->buffer_origin != ORIGIN_TOP_LEFT) {
|
|
|
|
|
weston_capture_task_retire_failed(ct, "GL: unsupported buffer");
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-03 13:20:50 -05:00
|
|
|
if (buffer->stride % 4 != 0) {
|
2023-11-28 18:17:31 +01:00
|
|
|
weston_capture_task_retire_failed(ct, "GL: buffer stride not multiple of 4");
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2024-10-11 12:19:14 +02:00
|
|
|
if (gl_features_has(gr, FEATURE_ASYNC_READBACK)) {
|
2024-07-15 14:09:22 +02:00
|
|
|
gl_renderer_do_read_pixels_async(gr, go, output, ct, &rect);
|
2023-10-05 09:23:47 +02:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-15 14:09:22 +02:00
|
|
|
if (gl_renderer_do_capture(gr, go, buffer, &rect))
|
2022-08-02 16:20:06 +03:00
|
|
|
weston_capture_task_retire_complete(ct);
|
|
|
|
|
else
|
|
|
|
|
weston_capture_task_retire_failed(ct, "GL: capture failed");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-22 11:43:01 +02:00
|
|
|
static void
|
2023-01-11 13:23:13 -06:00
|
|
|
gl_renderer_send_shader_error(struct weston_paint_node *pnode)
|
2021-03-22 11:43:01 +02:00
|
|
|
{
|
2023-01-11 13:23:13 -06:00
|
|
|
struct wl_resource *resource = pnode->surface->resource;
|
2021-03-22 11:43:01 +02:00
|
|
|
|
|
|
|
|
if (!resource)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
wl_client_post_implementation_error(wl_resource_get_client(resource),
|
|
|
|
|
"Weston GL-renderer shader failed for wl_surface@%u",
|
|
|
|
|
wl_resource_get_id(resource));
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-13 19:10:21 +01:00
|
|
|
static int
|
|
|
|
|
use_output(struct weston_output *output)
|
|
|
|
|
{
|
|
|
|
|
static int errored;
|
2012-11-13 19:10:29 +01:00
|
|
|
struct gl_output_state *go = get_output_state(output);
|
|
|
|
|
struct gl_renderer *gr = get_renderer(output->compositor);
|
2012-11-13 19:10:21 +01:00
|
|
|
EGLBoolean ret;
|
|
|
|
|
|
|
|
|
|
ret = eglMakeCurrent(gr->egl_display, go->egl_surface,
|
|
|
|
|
go->egl_surface, gr->egl_context);
|
|
|
|
|
|
|
|
|
|
if (ret == EGL_FALSE) {
|
|
|
|
|
if (errored)
|
|
|
|
|
return -1;
|
|
|
|
|
errored = 1;
|
|
|
|
|
weston_log("Failed to make EGL context current.\n");
|
2012-11-27 12:25:25 +02:00
|
|
|
gl_renderer_print_egl_error_state();
|
2012-11-13 19:10:21 +01:00
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
libweston: Support zwp_surface_synchronization_v1.set_acquire_fence
Implement the set_acquire_fence request of the
zwp_surface_synchronization_v1 interface.
The implementation uses the acquire fence in two ways:
1. If the associated buffer is used as GL render source, an
EGLSyncKHR is created from the fence and used to synchronize
access.
2. If the associated buffer is used as a plane framebuffer,
the acquire fence is treated as an in-fence for the atomic
commit operation. If in-fences are not supported and the buffer
has an acquire fence, we don't consider it for plane placement.
If the used compositor/renderer doesn't support explicit
synchronization, we don't advertise the protocol at all. Currently only
the DRM and X11 backends when using the GL renderer advertise the
protocol for production use.
Issues for discussion
---------------------
a. Currently, a server-side wait of EGLSyncKHR is performed before
using the EGLImage/texture during rendering. Unfortunately, it's not clear
from the specs whether this is generally safe to do, or we need to
sync before glEGLImageTargetTexture2DOES. The exception is
TEXTURE_EXTERNAL_OES where the spec mentions it's enough to sync
and then glBindTexture for any changes to take effect.
Changes in v5:
- Meson support.
- Make explicit sync server error reporting more generic, supporting
all explicit sync related interfaces not just
wp_linux_surface_synchronization.
- Fix typo in warning for missing EGL_KHR_wait_sync extension.
- Support minor version 2 of the explicit sync protocol (i.e., support
fences for opaque EGL buffers).
Changes in v4:
- Introduce and use fd_clear and and fd_move helpers.
- Don't check for a valid buffer when updating surface acquire fence fd
from state.
- Assert that pending state acquire fence fd is always clear
after a commit.
- Clarify that WESTON_CAP_EXPLICIT_SYNC applies to just the
renderer.
- Check for EGL_KHR_wait_sync before using eglWaitSyncKHR.
- Dup the acquire fence before passing to EGL.
Changes in v3:
- Keep acquire_fence_fd in surface instead of buffer.
- Clarify that WESTON_CAP_EXPLICIT_SYNC applies to both backend and
renderer.
- Move comment about non-ownership of in_fence_fd to struct
drm_plane_state definition.
- Assert that we don't try to use planes with in-fences when using the
legacy KMS API.
- Remove unnecessary info from wayland error messages.
- Handle acquire fence for subsurface commits.
- Guard against self-update in fd_update.
- Disconnect the client if acquire fence EGLSyncKHR creation or wait
fails.
- Use updated protocol interface names.
- User correct format specifier for resource ids.
- Advertise protocol for X11 backend with GL renderer.
Changes in v2:
- Remove sync file wait fallbacks.
- Raise UNSUPPORTED_BUFFER error at commit if we have an acquire
fence, but the committed buffer is not a valid linux_dmabuf.
- Don't put buffers with in-fences on planes that don't support
in-fences.
- Don't advertise explicit sync protocol if backend does not
support explicit sync.
Signed-off-by: Alexandros Frantzis <alexandros.frantzis@collabora.com>
2018-10-19 12:14:11 +03:00
|
|
|
static int
|
|
|
|
|
ensure_surface_buffer_is_ready(struct gl_renderer *gr,
|
|
|
|
|
struct gl_surface_state *gs)
|
|
|
|
|
{
|
|
|
|
|
EGLint attribs[] = {
|
|
|
|
|
EGL_SYNC_NATIVE_FENCE_FD_ANDROID,
|
|
|
|
|
-1,
|
|
|
|
|
EGL_NONE
|
|
|
|
|
};
|
|
|
|
|
struct weston_surface *surface = gs->surface;
|
|
|
|
|
struct weston_buffer *buffer = gs->buffer_ref.buffer;
|
|
|
|
|
EGLSyncKHR sync;
|
|
|
|
|
EGLint wait_ret;
|
|
|
|
|
EGLint destroy_ret;
|
|
|
|
|
|
|
|
|
|
if (!buffer)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
if (surface->acquire_fence_fd < 0)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
/* We should only get a fence if we support EGLSyncKHR, since
|
|
|
|
|
* we don't advertise the explicit sync protocol otherwise. */
|
2024-10-10 17:52:59 +02:00
|
|
|
assert(gl_features_has(gr, FEATURE_EXPLICIT_SYNC));
|
|
|
|
|
|
libweston: Support zwp_surface_synchronization_v1.set_acquire_fence
Implement the set_acquire_fence request of the
zwp_surface_synchronization_v1 interface.
The implementation uses the acquire fence in two ways:
1. If the associated buffer is used as GL render source, an
EGLSyncKHR is created from the fence and used to synchronize
access.
2. If the associated buffer is used as a plane framebuffer,
the acquire fence is treated as an in-fence for the atomic
commit operation. If in-fences are not supported and the buffer
has an acquire fence, we don't consider it for plane placement.
If the used compositor/renderer doesn't support explicit
synchronization, we don't advertise the protocol at all. Currently only
the DRM and X11 backends when using the GL renderer advertise the
protocol for production use.
Issues for discussion
---------------------
a. Currently, a server-side wait of EGLSyncKHR is performed before
using the EGLImage/texture during rendering. Unfortunately, it's not clear
from the specs whether this is generally safe to do, or we need to
sync before glEGLImageTargetTexture2DOES. The exception is
TEXTURE_EXTERNAL_OES where the spec mentions it's enough to sync
and then glBindTexture for any changes to take effect.
Changes in v5:
- Meson support.
- Make explicit sync server error reporting more generic, supporting
all explicit sync related interfaces not just
wp_linux_surface_synchronization.
- Fix typo in warning for missing EGL_KHR_wait_sync extension.
- Support minor version 2 of the explicit sync protocol (i.e., support
fences for opaque EGL buffers).
Changes in v4:
- Introduce and use fd_clear and and fd_move helpers.
- Don't check for a valid buffer when updating surface acquire fence fd
from state.
- Assert that pending state acquire fence fd is always clear
after a commit.
- Clarify that WESTON_CAP_EXPLICIT_SYNC applies to just the
renderer.
- Check for EGL_KHR_wait_sync before using eglWaitSyncKHR.
- Dup the acquire fence before passing to EGL.
Changes in v3:
- Keep acquire_fence_fd in surface instead of buffer.
- Clarify that WESTON_CAP_EXPLICIT_SYNC applies to both backend and
renderer.
- Move comment about non-ownership of in_fence_fd to struct
drm_plane_state definition.
- Assert that we don't try to use planes with in-fences when using the
legacy KMS API.
- Remove unnecessary info from wayland error messages.
- Handle acquire fence for subsurface commits.
- Guard against self-update in fd_update.
- Disconnect the client if acquire fence EGLSyncKHR creation or wait
fails.
- Use updated protocol interface names.
- User correct format specifier for resource ids.
- Advertise protocol for X11 backend with GL renderer.
Changes in v2:
- Remove sync file wait fallbacks.
- Raise UNSUPPORTED_BUFFER error at commit if we have an acquire
fence, but the committed buffer is not a valid linux_dmabuf.
- Don't put buffers with in-fences on planes that don't support
in-fences.
- Don't advertise explicit sync protocol if backend does not
support explicit sync.
Signed-off-by: Alexandros Frantzis <alexandros.frantzis@collabora.com>
2018-10-19 12:14:11 +03:00
|
|
|
/* We should only get a fence for non-SHM buffers, since surface
|
|
|
|
|
* commit would have failed otherwise. */
|
2022-01-14 01:36:54 +00:00
|
|
|
assert(buffer->type != WESTON_BUFFER_SHM);
|
libweston: Support zwp_surface_synchronization_v1.set_acquire_fence
Implement the set_acquire_fence request of the
zwp_surface_synchronization_v1 interface.
The implementation uses the acquire fence in two ways:
1. If the associated buffer is used as GL render source, an
EGLSyncKHR is created from the fence and used to synchronize
access.
2. If the associated buffer is used as a plane framebuffer,
the acquire fence is treated as an in-fence for the atomic
commit operation. If in-fences are not supported and the buffer
has an acquire fence, we don't consider it for plane placement.
If the used compositor/renderer doesn't support explicit
synchronization, we don't advertise the protocol at all. Currently only
the DRM and X11 backends when using the GL renderer advertise the
protocol for production use.
Issues for discussion
---------------------
a. Currently, a server-side wait of EGLSyncKHR is performed before
using the EGLImage/texture during rendering. Unfortunately, it's not clear
from the specs whether this is generally safe to do, or we need to
sync before glEGLImageTargetTexture2DOES. The exception is
TEXTURE_EXTERNAL_OES where the spec mentions it's enough to sync
and then glBindTexture for any changes to take effect.
Changes in v5:
- Meson support.
- Make explicit sync server error reporting more generic, supporting
all explicit sync related interfaces not just
wp_linux_surface_synchronization.
- Fix typo in warning for missing EGL_KHR_wait_sync extension.
- Support minor version 2 of the explicit sync protocol (i.e., support
fences for opaque EGL buffers).
Changes in v4:
- Introduce and use fd_clear and and fd_move helpers.
- Don't check for a valid buffer when updating surface acquire fence fd
from state.
- Assert that pending state acquire fence fd is always clear
after a commit.
- Clarify that WESTON_CAP_EXPLICIT_SYNC applies to just the
renderer.
- Check for EGL_KHR_wait_sync before using eglWaitSyncKHR.
- Dup the acquire fence before passing to EGL.
Changes in v3:
- Keep acquire_fence_fd in surface instead of buffer.
- Clarify that WESTON_CAP_EXPLICIT_SYNC applies to both backend and
renderer.
- Move comment about non-ownership of in_fence_fd to struct
drm_plane_state definition.
- Assert that we don't try to use planes with in-fences when using the
legacy KMS API.
- Remove unnecessary info from wayland error messages.
- Handle acquire fence for subsurface commits.
- Guard against self-update in fd_update.
- Disconnect the client if acquire fence EGLSyncKHR creation or wait
fails.
- Use updated protocol interface names.
- User correct format specifier for resource ids.
- Advertise protocol for X11 backend with GL renderer.
Changes in v2:
- Remove sync file wait fallbacks.
- Raise UNSUPPORTED_BUFFER error at commit if we have an acquire
fence, but the committed buffer is not a valid linux_dmabuf.
- Don't put buffers with in-fences on planes that don't support
in-fences.
- Don't advertise explicit sync protocol if backend does not
support explicit sync.
Signed-off-by: Alexandros Frantzis <alexandros.frantzis@collabora.com>
2018-10-19 12:14:11 +03:00
|
|
|
|
|
|
|
|
attribs[1] = dup(surface->acquire_fence_fd);
|
|
|
|
|
if (attribs[1] == -1) {
|
|
|
|
|
linux_explicit_synchronization_send_server_error(
|
|
|
|
|
gs->surface->synchronization_resource,
|
|
|
|
|
"Failed to dup acquire fence");
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
sync = gr->create_sync(gr->egl_display,
|
|
|
|
|
EGL_SYNC_NATIVE_FENCE_ANDROID,
|
|
|
|
|
attribs);
|
|
|
|
|
if (sync == EGL_NO_SYNC_KHR) {
|
|
|
|
|
linux_explicit_synchronization_send_server_error(
|
|
|
|
|
gs->surface->synchronization_resource,
|
|
|
|
|
"Failed to create EGLSyncKHR object");
|
|
|
|
|
close(attribs[1]);
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
wait_ret = gr->wait_sync(gr->egl_display, sync, 0);
|
|
|
|
|
if (wait_ret == EGL_FALSE) {
|
|
|
|
|
linux_explicit_synchronization_send_server_error(
|
|
|
|
|
gs->surface->synchronization_resource,
|
|
|
|
|
"Failed to wait on EGLSyncKHR object");
|
|
|
|
|
/* Continue to try to destroy the sync object. */
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
destroy_ret = gr->destroy_sync(gr->egl_display, sync);
|
|
|
|
|
if (destroy_ret == EGL_FALSE) {
|
|
|
|
|
linux_explicit_synchronization_send_server_error(
|
|
|
|
|
gs->surface->synchronization_resource,
|
|
|
|
|
"Failed to destroy on EGLSyncKHR object");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return (wait_ret == EGL_TRUE && destroy_ret == EGL_TRUE) ? 0 : -1;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-23 16:28:39 +02:00
|
|
|
static void
|
2024-06-04 11:36:03 -05:00
|
|
|
prepare_placeholder(struct gl_shader_config *sconf,
|
2024-06-06 12:59:30 -05:00
|
|
|
struct weston_paint_node *pnode)
|
2021-03-23 16:28:39 +02:00
|
|
|
{
|
2022-04-28 15:25:00 +03:00
|
|
|
struct weston_color_transform *ctransf;
|
2024-06-06 12:59:30 -05:00
|
|
|
struct weston_output *output = pnode->output;
|
2022-02-20 18:07:36 +01:00
|
|
|
struct gl_renderer *gr = get_renderer(output->compositor);
|
2021-03-23 16:28:39 +02:00
|
|
|
struct gl_shader_config alt = {
|
|
|
|
|
.req = {
|
|
|
|
|
.variant = SHADER_VARIANT_SOLID,
|
|
|
|
|
.input_is_premult = true,
|
|
|
|
|
},
|
|
|
|
|
.projection = sconf->projection,
|
|
|
|
|
.view_alpha = sconf->view_alpha,
|
2024-06-06 12:59:30 -05:00
|
|
|
.unicolor = { pnode->solid.r,
|
|
|
|
|
pnode->solid.g,
|
|
|
|
|
pnode->solid.b,
|
|
|
|
|
pnode->solid.a,
|
|
|
|
|
},
|
2021-03-23 16:28:39 +02:00
|
|
|
};
|
2022-04-28 15:25:00 +03:00
|
|
|
ctransf = output->color_outcome->from_sRGB_to_blend;
|
2022-02-20 18:07:36 +01:00
|
|
|
if (!gl_shader_config_set_color_transform(gr, &alt, ctransf)) {
|
2021-03-23 16:28:39 +02:00
|
|
|
weston_log("GL-renderer: %s failed to generate a color transformation.\n",
|
|
|
|
|
__func__);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*sconf = alt;
|
|
|
|
|
}
|
2021-03-18 17:42:06 +02:00
|
|
|
static void
|
|
|
|
|
gl_shader_config_set_input_textures(struct gl_shader_config *sconf,
|
|
|
|
|
struct gl_surface_state *gs)
|
|
|
|
|
{
|
2022-01-20 18:14:07 +00:00
|
|
|
struct gl_buffer_state *gb = gs->buffer;
|
2021-03-18 17:42:06 +02:00
|
|
|
int i;
|
|
|
|
|
|
2022-01-13 23:57:48 +00:00
|
|
|
sconf->req.variant = gb->shader_variant;
|
renderer-gl: Support more shm RGB formats
Some applications, e.g. Chromium browser, may provide ABGR format buf.
Tested with gstreamer 1.22.8:
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=RGB' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=BGR' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=ARGB' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=xRGB' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=ABGR' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=xBGR' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=RGBA' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=RGBx' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=BGRA' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=BGRx' ! waylandsink
Signed-off-by: Jeffy Chen <jeffy.chen@rock-chips.com>
2022-07-07 11:09:23 +08:00
|
|
|
sconf->req.color_channel_order = gb->gl_channel_order;
|
2021-03-12 14:06:34 +02:00
|
|
|
sconf->req.input_is_premult =
|
2022-01-13 23:57:48 +00:00
|
|
|
gl_shader_texture_variant_can_be_premult(gb->shader_variant);
|
2021-03-18 17:42:06 +02:00
|
|
|
|
2024-05-07 11:58:34 +02:00
|
|
|
copy_uniform4f(sconf->unicolor, gb->color);
|
2021-03-18 17:42:06 +02:00
|
|
|
|
2024-07-23 12:37:31 +02:00
|
|
|
assert(gb->num_textures <= SHADER_INPUT_TEX_MAX);
|
2022-01-20 18:49:05 +00:00
|
|
|
for (i = 0; i < gb->num_textures; i++)
|
|
|
|
|
sconf->input_tex[i] = gb->textures[i];
|
2024-07-23 12:37:31 +02:00
|
|
|
for (; i < SHADER_INPUT_TEX_MAX; i++)
|
2021-03-18 17:42:06 +02:00
|
|
|
sconf->input_tex[i] = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
2021-05-20 17:10:54 +03:00
|
|
|
gl_shader_config_init_for_paint_node(struct gl_shader_config *sconf,
|
|
|
|
|
struct weston_paint_node *pnode,
|
|
|
|
|
GLint filter)
|
2021-03-18 17:42:06 +02:00
|
|
|
{
|
2022-02-20 18:07:36 +01:00
|
|
|
struct gl_renderer *gr = get_renderer(pnode->surface->compositor);
|
2021-05-20 17:10:54 +03:00
|
|
|
struct gl_surface_state *gs = get_surface_state(pnode->surface);
|
|
|
|
|
struct gl_output_state *go = get_output_state(pnode->output);
|
2023-02-15 14:38:32 +01:00
|
|
|
struct weston_buffer *buffer = gs->buffer_ref.buffer;
|
2021-03-18 17:42:06 +02:00
|
|
|
|
libweston: introduce CMS component architecture
See: https://gitlab.freedesktop.org/wayland/weston/-/issues/467#note_814985
This starts building the framework required for implementing color
management.
The main new interface is struct weston_color_manager. This commit also
adds a no-op color manager implementation, which is used if no other
color manager is loaded. This no-op color manager simply provides
identity color transforms for everything, so that Weston keeps running
exactly like before.
weston_color_manager interface is incomplete and will be extended later.
Colorspace objects are not introduced in this commit. However, when
client content colorspace and output colorspace definitions are
combined, they will produce color transformations from client content to
output blending space and from output blending space to output space.
This commit introduces a placeholder struct for color transforms,
weston_color_transform. Objects of this type are expected to be heavy to
create and store, which is why they are designed to be shared as much as
possible, ideally making their instances unique. As color transform
description is intended to be generic in libweston core, renderers and
backends are expected to derive their own state for each transform
object as necessary. Creating and storing the derived state maybe be
expensive as well, more the reason to re-use these objects as much as
possible. E.g. GL-renderer might upload a 3D LUT into a texture and keep
the texture around. DRM-backend might create a KMS blob for a LUT and
keep that around.
As a color transform depends on both the surface and the output, a
transform object may need to be created for each unique pair of them.
Therefore color transforms are referenced from weston_paint_node. As
paint nodes exist for not just surface+output but surface+view+output
triplets, the code ensures that all paint nodes (having different view)
for the same surface+output have the same color transform state.
As a special case, if weston_color_transform is NULL, it means identity
transform. This short-circuits some checks and memory allocations, but
it does mean we use a separate member on weston_paint_node to know if
the color transform has been initialized or not.
Color transformations are pre-created at the weston_output
paint_node_z_order_list creation step. Currently the z order lists
contain all views globally, which means we populate color transforms we
may never need, e.g. a view is never shown on a particular output.
This problem should get fixed naturally when z order lists are
constructed "pruned" in the future: to contain only those paint nodes
that actually contribute to the output's image.
As nothing actually supports color transforms yet, both renderers and
the DRM-backend assert that they only get identity transforms. This
check has the side-effect that all surface-output pairs actually get a
weston_surface_color_transform_ref even though it points to NULL
weston_color_transform.
This design is inspired by Sebastian Wick's Weston color management
work.
Co-authored-by: Sebastian Wick <sebastian@sebastianwick.net>
Signed-off-by: Pekka Paalanen <pekka.paalanen@collabora.com>
2021-02-25 12:03:28 +02:00
|
|
|
if (!pnode->surf_xform_valid)
|
|
|
|
|
return false;
|
|
|
|
|
|
2021-03-18 17:42:06 +02:00
|
|
|
*sconf = (struct gl_shader_config) {
|
2023-02-15 14:38:32 +01:00
|
|
|
.req.texcoord_input = SHADER_TEXCOORD_INPUT_SURFACE,
|
2023-02-14 22:41:46 +01:00
|
|
|
.projection = pnode->view->transform.matrix,
|
2023-02-15 14:38:32 +01:00
|
|
|
.surface_to_buffer =
|
|
|
|
|
pnode->view->surface->surface_to_buffer_matrix,
|
2021-05-20 17:10:54 +03:00
|
|
|
.view_alpha = pnode->view->alpha,
|
2021-03-18 17:42:06 +02:00
|
|
|
.input_tex_filter = filter,
|
|
|
|
|
};
|
2019-07-05 19:41:38 +05:30
|
|
|
|
2023-02-14 22:41:46 +01:00
|
|
|
weston_matrix_multiply(&sconf->projection, &go->output_matrix);
|
|
|
|
|
|
2023-02-15 14:38:32 +01:00
|
|
|
if (buffer->buffer_origin == ORIGIN_TOP_LEFT) {
|
|
|
|
|
weston_matrix_scale(&sconf->surface_to_buffer,
|
|
|
|
|
1.0f / buffer->width,
|
|
|
|
|
1.0f / buffer->height, 1);
|
|
|
|
|
} else {
|
|
|
|
|
weston_matrix_scale(&sconf->surface_to_buffer,
|
|
|
|
|
1.0f / buffer->width,
|
2023-09-22 09:56:07 +02:00
|
|
|
go->y_flip / buffer->height, 1);
|
2023-02-15 14:38:32 +01:00
|
|
|
weston_matrix_translate(&sconf->surface_to_buffer, 0, 1, 0);
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-18 17:42:06 +02:00
|
|
|
gl_shader_config_set_input_textures(sconf, gs);
|
|
|
|
|
|
2022-02-20 18:07:36 +01:00
|
|
|
if (!gl_shader_config_set_color_transform(gr, sconf, pnode->surf_xform.transform)) {
|
2021-03-09 16:40:25 +02:00
|
|
|
weston_log("GL-renderer: failed to generate a color transformation.\n");
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-18 17:42:06 +02:00
|
|
|
return true;
|
2019-07-05 19:41:38 +05:30
|
|
|
}
|
|
|
|
|
|
2023-09-01 15:09:11 +02:00
|
|
|
/* A Pixman region is implemented as a "y-x-banded" array of rectangles sorted
|
|
|
|
|
* first vertically and then horizontally. This means that if 2 rectangles with
|
|
|
|
|
* different y coordinates share a group of scanlines, both rectangles will be
|
|
|
|
|
* split into 2 more rectangles with sharing edges. While Pixman coalesces
|
|
|
|
|
* rectangles in horizontal bands whenever possible, this function merges
|
|
|
|
|
* vertical bands.
|
|
|
|
|
*/
|
2024-02-21 01:48:54 +01:00
|
|
|
static int
|
|
|
|
|
compress_bands(pixman_box32_t *inrects, int nrects, pixman_box32_t **outrects)
|
|
|
|
|
{
|
2023-09-01 15:09:11 +02:00
|
|
|
pixman_box32_t *out;
|
2024-02-21 01:48:54 +01:00
|
|
|
int i, j, nout;
|
|
|
|
|
|
2023-08-31 14:11:50 +02:00
|
|
|
assert(nrects > 0);
|
2024-02-21 01:48:54 +01:00
|
|
|
|
|
|
|
|
/* nrects is an upper bound - we're not too worried about
|
|
|
|
|
* allocating a little extra
|
|
|
|
|
*/
|
|
|
|
|
out = malloc(sizeof(pixman_box32_t) * nrects);
|
|
|
|
|
out[0] = inrects[0];
|
|
|
|
|
nout = 1;
|
|
|
|
|
for (i = 1; i < nrects; i++) {
|
|
|
|
|
for (j = 0; j < nout; j++) {
|
2023-09-01 15:09:11 +02:00
|
|
|
if (inrects[i].x1 == out[j].x1 &&
|
|
|
|
|
inrects[i].x2 == out[j].x2 &&
|
|
|
|
|
inrects[i].y1 == out[j].y2) {
|
|
|
|
|
out[j].y2 = inrects[i].y2;
|
|
|
|
|
goto merged;
|
2024-02-21 01:48:54 +01:00
|
|
|
}
|
|
|
|
|
}
|
2023-09-01 15:09:11 +02:00
|
|
|
out[nout] = inrects[i];
|
|
|
|
|
nout++;
|
|
|
|
|
merged: ;
|
2024-02-21 01:48:54 +01:00
|
|
|
}
|
|
|
|
|
*outrects = out;
|
|
|
|
|
return nout;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
global_to_surface(pixman_box32_t *rect, struct weston_view *ev,
|
2023-08-31 12:20:17 +02:00
|
|
|
struct clipper_vertex polygon[4])
|
2024-02-21 01:48:54 +01:00
|
|
|
{
|
|
|
|
|
struct weston_coord_global rect_g[4] = {
|
|
|
|
|
{ .c = weston_coord(rect->x1, rect->y1) },
|
|
|
|
|
{ .c = weston_coord(rect->x2, rect->y1) },
|
|
|
|
|
{ .c = weston_coord(rect->x2, rect->y2) },
|
|
|
|
|
{ .c = weston_coord(rect->x1, rect->y2) },
|
|
|
|
|
};
|
|
|
|
|
struct weston_coord rect_s;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
|
rect_s = weston_coord_global_to_surface(ev, rect_g[i]).c;
|
|
|
|
|
polygon[i].x = (float)rect_s.x;
|
|
|
|
|
polygon[i].y = (float)rect_s.y;
|
|
|
|
|
}
|
2023-08-31 12:20:17 +02:00
|
|
|
}
|
2024-02-21 01:48:54 +01:00
|
|
|
|
2023-08-31 09:16:56 +02:00
|
|
|
/* Transform damage 'region' in global coordinates to damage 'quads' in surface
|
|
|
|
|
* coordinates. 'quads' and 'nquads' are output arguments set if 'quads' is
|
|
|
|
|
* NULL, no transformation happens otherwise. Caller must free 'quads' if
|
2023-08-31 14:11:50 +02:00
|
|
|
* set. Caller must ensure 'region' is not empty.
|
2023-08-30 16:20:52 +02:00
|
|
|
*/
|
|
|
|
|
static void
|
2023-08-31 09:16:56 +02:00
|
|
|
transform_damage(const struct weston_paint_node *pnode,
|
|
|
|
|
pixman_region32_t *region,
|
|
|
|
|
struct clipper_quad **quads,
|
|
|
|
|
int *nquads)
|
2023-08-30 16:20:52 +02:00
|
|
|
{
|
2023-08-31 09:16:56 +02:00
|
|
|
pixman_box32_t *rects;
|
|
|
|
|
int nrects, i;
|
|
|
|
|
bool compress, axis_aligned;
|
|
|
|
|
struct clipper_quad *quads_alloc;
|
|
|
|
|
struct clipper_vertex polygon[4];
|
|
|
|
|
struct weston_view *view;
|
2023-08-30 16:20:52 +02:00
|
|
|
|
2023-08-31 09:16:56 +02:00
|
|
|
if (*quads)
|
2023-08-30 16:20:52 +02:00
|
|
|
return;
|
|
|
|
|
|
2023-08-31 09:16:56 +02:00
|
|
|
rects = pixman_region32_rectangles(region, &nrects);
|
|
|
|
|
compress = nrects >= 4;
|
|
|
|
|
if (compress)
|
|
|
|
|
nrects = compress_bands(rects, nrects, &rects);
|
2023-08-30 16:20:52 +02:00
|
|
|
|
2023-08-31 14:11:50 +02:00
|
|
|
assert(nrects > 0);
|
2023-08-31 09:16:56 +02:00
|
|
|
*quads = quads_alloc = malloc(nrects * sizeof *quads_alloc);
|
|
|
|
|
*nquads = nrects;
|
|
|
|
|
|
2024-04-09 15:02:53 +01:00
|
|
|
/* All the damage rects are axis-aligned in global space. This implies
|
|
|
|
|
* that all the horizontal and vertical edges are respectively parallel
|
|
|
|
|
* to each other. Because affine transformations preserve parallelism we
|
|
|
|
|
* can safely assume that if the node's output matrix is affine and
|
|
|
|
|
* stores standard output transforms (translations, flips and rotations
|
|
|
|
|
* by 90°), then all the transformed quads are axis-aligned in surface
|
|
|
|
|
* space. */
|
2023-08-31 09:16:56 +02:00
|
|
|
view = pnode->view;
|
2024-04-09 15:02:53 +01:00
|
|
|
axis_aligned = pnode->valid_transform;
|
2023-08-31 09:16:56 +02:00
|
|
|
for (i = 0; i < nrects; i++) {
|
2023-08-31 12:20:17 +02:00
|
|
|
global_to_surface(&rects[i], view, polygon);
|
2023-08-31 09:16:56 +02:00
|
|
|
clipper_quad_init(&quads_alloc[i], polygon, axis_aligned);
|
2023-08-30 16:20:52 +02:00
|
|
|
}
|
2023-08-31 09:16:56 +02:00
|
|
|
|
|
|
|
|
if (compress)
|
|
|
|
|
free(rects);
|
2023-08-30 16:20:52 +02:00
|
|
|
}
|
|
|
|
|
|
2024-05-07 14:34:03 +02:00
|
|
|
/* Set barycentric coordinates of a sub-mesh of 'count' vertices. 8 barycentric
|
|
|
|
|
* coordinates (32 bytes too) are stored unconditionally into
|
|
|
|
|
* 'barycentric_stream'.
|
2024-04-30 12:58:46 +02:00
|
|
|
*/
|
|
|
|
|
static void
|
2024-05-06 09:27:04 +02:00
|
|
|
store_wireframes(size_t count,
|
2024-05-07 14:34:03 +02:00
|
|
|
uint32_t *barycentric_stream)
|
2024-04-30 12:58:46 +02:00
|
|
|
{
|
2024-05-06 09:27:04 +02:00
|
|
|
const uint32_t x = 0xff0000, y = 0x00ff00, z = 0x0000ff;
|
|
|
|
|
static const uint32_t barycentrics[][8] = {
|
|
|
|
|
{}, {}, {},
|
|
|
|
|
{ x, z, y, 0, 0, 0, 0, 0 },
|
|
|
|
|
{ x, z, x, y, 0, 0, 0, 0 },
|
|
|
|
|
{ x, z, y, x, y, 0, 0, 0 },
|
|
|
|
|
{ x, z, y, z, x, y, 0, 0 },
|
|
|
|
|
{ x, z, y, x, z, x, y, 0 },
|
|
|
|
|
{ x, z, y, x, y, z, x, y },
|
|
|
|
|
};
|
2024-04-30 12:58:46 +02:00
|
|
|
int i;
|
|
|
|
|
|
2024-05-06 09:27:04 +02:00
|
|
|
assert(count < ARRAY_LENGTH(barycentrics));
|
|
|
|
|
|
2024-05-07 14:34:03 +02:00
|
|
|
for (i = 0; i < 8; i++)
|
2024-05-06 09:27:04 +02:00
|
|
|
barycentric_stream[i] = barycentrics[count][i];
|
2024-04-30 12:58:46 +02:00
|
|
|
}
|
|
|
|
|
|
2023-09-13 10:44:31 +02:00
|
|
|
/* Triangulate a sub-mesh of 'count' vertices as an indexed triangle strip.
|
|
|
|
|
* 'bias' is added to each index. In order to chain sub-meshes, the last index
|
|
|
|
|
* is followed by 2 indices creating 4 degenerate triangles. 'count' must be
|
|
|
|
|
* less than or equal to 8. 16 indices (32 bytes) are stored unconditionally
|
|
|
|
|
* into 'indices'. The return value is the index count, including the 2 chaining
|
|
|
|
|
* indices.
|
|
|
|
|
*/
|
|
|
|
|
static int
|
2024-05-06 09:27:04 +02:00
|
|
|
store_indices(size_t count,
|
|
|
|
|
uint16_t bias,
|
|
|
|
|
uint16_t *indices)
|
2023-09-13 10:44:31 +02:00
|
|
|
{
|
|
|
|
|
/* Look-up table of triangle strips with last entry storing the index
|
|
|
|
|
* count. Padded to 16 elements for compilers to emit packed adds. */
|
|
|
|
|
static const uint16_t strips[][16] = {
|
|
|
|
|
{}, {}, {},
|
|
|
|
|
{ 0, 2, 1, 1, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5 },
|
|
|
|
|
{ 0, 3, 1, 2, 2, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6 },
|
|
|
|
|
{ 0, 4, 1, 3, 2, 2, 5, 0, 0, 0, 0, 0, 0, 0, 0, 7 },
|
|
|
|
|
{ 0, 5, 1, 4, 2, 3, 3, 6, 0, 0, 0, 0, 0, 0, 0, 8 },
|
|
|
|
|
{ 0, 6, 1, 5, 2, 4, 3, 3, 7, 0, 0, 0, 0, 0, 0, 9 },
|
|
|
|
|
{ 0, 7, 1, 6, 2, 5, 3, 4, 4, 8, 0, 0, 0, 0, 0, 10 },
|
|
|
|
|
};
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
assert(count < ARRAY_LENGTH(strips));
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < 16; i++)
|
|
|
|
|
indices[i] = strips[count][i] + bias;
|
|
|
|
|
|
|
|
|
|
return strips[count][15];
|
|
|
|
|
}
|
|
|
|
|
|
2024-06-06 18:37:25 +02:00
|
|
|
static void
|
|
|
|
|
set_debug_mode(struct gl_renderer *gr,
|
|
|
|
|
struct gl_shader_config *sconf,
|
|
|
|
|
const uint32_t *barycentrics,
|
|
|
|
|
bool opaque)
|
|
|
|
|
{
|
|
|
|
|
/* Debug mode tints indexed by gl_debug_mode enumeration. While tints
|
|
|
|
|
* are meant to be premultiplied, debug modes can have invalid colors in
|
|
|
|
|
* order to create visual effects. */
|
|
|
|
|
static const float tints[DEBUG_MODE_LAST][4] = {
|
|
|
|
|
{}, /* DEBUG_MODE_NONE */
|
|
|
|
|
{ 0.0f, 0.0f, 0.0f, 0.3f }, /* DEBUG_MODE_WIREFRAME */
|
|
|
|
|
{}, /* DEBUG_MODE_BATCHES */
|
|
|
|
|
{ 0.4f, -0.4f, -0.4f, 0.0f }, /* DEBUG_MODE_DAMAGE */
|
|
|
|
|
{ -0.4f, -0.4f, 0.7f, 0.0f }, /* DEBUG_MODE_OPAQUE */
|
|
|
|
|
};
|
|
|
|
|
static const float batch_tints[][4] = {
|
|
|
|
|
{ 0.9f, 0.0f, 0.0f, 0.9f },
|
|
|
|
|
{ 0.0f, 0.9f, 0.0f, 0.9f },
|
|
|
|
|
{ 0.0f, 0.0f, 0.9f, 0.9f },
|
|
|
|
|
{ 0.9f, 0.9f, 0.0f, 0.9f },
|
|
|
|
|
{ 0.9f, 0.0f, 0.9f, 0.9f },
|
|
|
|
|
{ 0.0f, 0.9f, 0.9f, 0.9f },
|
|
|
|
|
{ 0.9f, 0.9f, 0.9f, 0.9f },
|
|
|
|
|
};
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
switch (gr->debug_mode) {
|
|
|
|
|
case DEBUG_MODE_WIREFRAME:
|
|
|
|
|
/* Wireframe rendering is based on Celes & Abraham's "Fast and
|
|
|
|
|
* versatile texture-based wireframe rendering", 2011. */
|
|
|
|
|
sconf->req.wireframe = true;
|
|
|
|
|
sconf->wireframe_tex = gr->wireframe_tex;
|
|
|
|
|
glEnableVertexAttribArray(SHADER_ATTRIB_LOC_BARYCENTRIC);
|
|
|
|
|
glVertexAttribPointer(SHADER_ATTRIB_LOC_BARYCENTRIC, 4,
|
|
|
|
|
GL_UNSIGNED_BYTE, GL_TRUE, 0,
|
|
|
|
|
barycentrics);
|
|
|
|
|
FALLTHROUGH;
|
|
|
|
|
|
|
|
|
|
case DEBUG_MODE_DAMAGE:
|
|
|
|
|
sconf->req.tint = true;
|
|
|
|
|
copy_uniform4f(sconf->tint, tints[gr->debug_mode]);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case DEBUG_MODE_OPAQUE:
|
|
|
|
|
sconf->req.tint = opaque;
|
|
|
|
|
copy_uniform4f(sconf->tint, tints[gr->debug_mode]);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case DEBUG_MODE_BATCHES:
|
|
|
|
|
sconf->req.tint = true;
|
|
|
|
|
i = gr->nbatches++ % ARRAY_LENGTH(batch_tints);
|
|
|
|
|
copy_uniform4f(sconf->tint, batch_tints[i]);
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
unreachable("Invalid debug mode");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-09-13 10:44:31 +02:00
|
|
|
static void
|
|
|
|
|
draw_mesh(struct gl_renderer *gr,
|
|
|
|
|
struct weston_paint_node *pnode,
|
2024-05-06 09:27:04 +02:00
|
|
|
struct gl_shader_config *sconf,
|
2023-09-13 10:44:31 +02:00
|
|
|
const struct clipper_vertex *positions,
|
2024-05-06 09:27:04 +02:00
|
|
|
const uint32_t *barycentrics,
|
2024-05-07 14:34:03 +02:00
|
|
|
const uint16_t *indices,
|
2024-05-18 17:02:16 +02:00
|
|
|
int nidx,
|
|
|
|
|
bool opaque)
|
2023-09-13 10:44:31 +02:00
|
|
|
{
|
2024-05-07 14:34:03 +02:00
|
|
|
assert(nidx > 0);
|
2023-09-13 10:44:31 +02:00
|
|
|
|
2024-06-06 18:37:25 +02:00
|
|
|
if (gr->debug_mode)
|
|
|
|
|
set_debug_mode(gr, sconf, barycentrics, opaque);
|
2024-05-06 09:27:04 +02:00
|
|
|
|
2023-09-13 10:44:31 +02:00
|
|
|
if (!gl_renderer_use_program(gr, sconf))
|
|
|
|
|
gl_renderer_send_shader_error(pnode); /* Use fallback shader. */
|
|
|
|
|
|
2024-05-06 09:27:04 +02:00
|
|
|
glVertexAttribPointer(SHADER_ATTRIB_LOC_POSITION, 2, GL_FLOAT, GL_FALSE,
|
|
|
|
|
0, positions);
|
2024-05-07 14:34:03 +02:00
|
|
|
glDrawElements(GL_TRIANGLE_STRIP, nidx, GL_UNSIGNED_SHORT, indices);
|
2024-04-30 12:58:46 +02:00
|
|
|
|
2024-05-07 15:14:28 +02:00
|
|
|
if (gr->debug_mode == DEBUG_MODE_WIREFRAME)
|
2024-05-06 09:27:04 +02:00
|
|
|
glDisableVertexAttribArray(SHADER_ATTRIB_LOC_BARYCENTRIC);
|
2023-09-13 10:44:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
repaint_region(struct gl_renderer *gr,
|
|
|
|
|
struct weston_paint_node *pnode,
|
|
|
|
|
struct clipper_quad *quads,
|
|
|
|
|
int nquads,
|
|
|
|
|
pixman_region32_t *region,
|
2024-05-18 17:02:16 +02:00
|
|
|
struct gl_shader_config *sconf,
|
|
|
|
|
bool opaque)
|
2023-09-13 10:44:31 +02:00
|
|
|
{
|
|
|
|
|
pixman_box32_t *rects;
|
|
|
|
|
struct clipper_vertex *positions;
|
2024-05-07 14:34:03 +02:00
|
|
|
uint32_t *barycentrics = NULL;
|
2024-05-06 09:27:04 +02:00
|
|
|
uint16_t *indices;
|
2024-05-07 14:34:03 +02:00
|
|
|
int i, j, n, nrects, positions_size, barycentrics_size, indices_size;
|
|
|
|
|
int nvtx = 0, nidx = 0;
|
2024-05-07 15:14:28 +02:00
|
|
|
bool wireframe = gr->debug_mode == DEBUG_MODE_WIREFRAME;
|
2023-09-13 10:44:31 +02:00
|
|
|
|
|
|
|
|
/* Build-time sub-mesh constants. Clipping emits 8 vertices max.
|
2024-05-06 09:27:04 +02:00
|
|
|
* store_indices() store at most 10 indices. */
|
2023-09-13 10:44:31 +02:00
|
|
|
const int nvtx_max = 8;
|
2024-05-06 09:27:04 +02:00
|
|
|
const int nidx_max = 10;
|
2023-09-13 10:44:31 +02:00
|
|
|
|
|
|
|
|
rects = pixman_region32_rectangles(region, &nrects);
|
|
|
|
|
assert((nrects > 0) && (nquads > 0));
|
|
|
|
|
|
|
|
|
|
/* Worst case allocation sizes per sub-mesh. */
|
|
|
|
|
n = nquads * nrects;
|
|
|
|
|
positions_size = n * nvtx_max * sizeof *positions;
|
2024-05-06 09:27:04 +02:00
|
|
|
barycentrics_size = ROUND_UP_N(n * nvtx_max * sizeof *barycentrics, 32);
|
|
|
|
|
indices_size = ROUND_UP_N(n * nidx_max * sizeof *indices, 32);
|
2023-09-13 10:44:31 +02:00
|
|
|
|
|
|
|
|
positions = wl_array_add(&gr->position_stream, positions_size);
|
2024-05-06 09:27:04 +02:00
|
|
|
indices = wl_array_add(&gr->indices, indices_size);
|
2024-05-07 14:34:03 +02:00
|
|
|
if (wireframe)
|
2024-05-06 09:27:04 +02:00
|
|
|
barycentrics = wl_array_add(&gr->barycentric_stream,
|
|
|
|
|
barycentrics_size);
|
2023-09-13 10:44:31 +02:00
|
|
|
|
|
|
|
|
/* A node's damage mesh is created by clipping damage quads to surface
|
|
|
|
|
* rects and by chaining the resulting sub-meshes into an indexed
|
|
|
|
|
* triangle strip. Damage quads are transformed to surface space in a
|
|
|
|
|
* prior pass for clipping to take place there. A surface rect is always
|
|
|
|
|
* axis-aligned in surface space. In the common (and fast) case, a
|
|
|
|
|
* damage quad is axis-aligned and clipping generates an axis-aligned
|
|
|
|
|
* rectangle. When a damage quad isn't axis-aligned, clipping generates
|
|
|
|
|
* a convex [3,8]-gon. No vertices are generated if the intersection is
|
|
|
|
|
* empty.
|
|
|
|
|
*
|
|
|
|
|
* 0 -------- 1 Clipped vertices are emitted using quads'
|
|
|
|
|
* ! _.-'/ '. clockwise winding order. Sub-meshes are then
|
|
|
|
|
* ! _.-' / '. triangulated by zigzagging between the first
|
|
|
|
|
* 5 / 2 and last emitted vertices, ending up with a
|
|
|
|
|
* '. / _.-'! counter-clockwise winding order.
|
|
|
|
|
* '. / _.-' !
|
|
|
|
|
* 4 -------- 3 Triangle strip: 0, 5, 1, 4, 2, 3.
|
|
|
|
|
*/
|
|
|
|
|
for (i = 0; i < nquads; i++) {
|
|
|
|
|
for (j = 0; j < nrects; j++) {
|
|
|
|
|
n = clipper_quad_clip_box32(&quads[i], &rects[j],
|
|
|
|
|
&positions[nvtx]);
|
2024-05-06 09:27:04 +02:00
|
|
|
nidx += store_indices(n, nvtx, &indices[nidx]);
|
|
|
|
|
if (wireframe)
|
2024-05-07 14:34:03 +02:00
|
|
|
store_wireframes(n, &barycentrics[nvtx]);
|
2023-09-13 10:44:31 +02:00
|
|
|
nvtx += n;
|
|
|
|
|
|
|
|
|
|
/* Highly unlikely flush to prevent index wraparound.
|
|
|
|
|
* Subtracting 2 removes the last chaining indices. */
|
|
|
|
|
if ((nvtx + nvtx_max) > UINT16_MAX) {
|
2024-05-07 14:34:03 +02:00
|
|
|
draw_mesh(gr, pnode, sconf, positions,
|
2024-05-18 17:02:16 +02:00
|
|
|
barycentrics, indices, nidx - 2,
|
|
|
|
|
opaque);
|
2024-05-06 09:27:04 +02:00
|
|
|
nvtx = nidx = 0;
|
2023-09-13 10:44:31 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (nvtx)
|
2024-05-07 14:34:03 +02:00
|
|
|
draw_mesh(gr, pnode, sconf, positions, barycentrics, indices,
|
2024-05-18 17:02:16 +02:00
|
|
|
nidx - 2, opaque);
|
2023-09-13 10:44:31 +02:00
|
|
|
|
|
|
|
|
gr->position_stream.size = 0;
|
2024-05-06 09:27:04 +02:00
|
|
|
gr->indices.size = 0;
|
2024-05-07 14:34:03 +02:00
|
|
|
if (wireframe)
|
2024-05-06 09:27:04 +02:00
|
|
|
gr->barycentric_stream.size = 0;
|
2023-09-13 10:44:31 +02:00
|
|
|
}
|
|
|
|
|
|
2012-09-05 21:59:35 -04:00
|
|
|
static void
|
2021-05-20 17:10:54 +03:00
|
|
|
draw_paint_node(struct weston_paint_node *pnode,
|
|
|
|
|
pixman_region32_t *damage /* in global coordinates */)
|
2012-09-05 21:59:35 -04:00
|
|
|
{
|
2021-05-20 17:10:54 +03:00
|
|
|
struct gl_renderer *gr = get_renderer(pnode->surface->compositor);
|
|
|
|
|
struct gl_surface_state *gs = get_surface_state(pnode->surface);
|
2022-01-20 18:14:07 +00:00
|
|
|
struct gl_buffer_state *gb = gs->buffer;
|
2022-01-19 17:59:50 +00:00
|
|
|
struct weston_buffer *buffer = gs->buffer_ref.buffer;
|
2012-09-05 21:59:35 -04:00
|
|
|
/* repaint bounding region in global coordinates: */
|
|
|
|
|
pixman_region32_t repaint;
|
2015-02-18 09:48:59 +02:00
|
|
|
/* opaque region in surface coordinates: */
|
|
|
|
|
pixman_region32_t surface_opaque;
|
2012-09-05 21:59:35 -04:00
|
|
|
/* non-opaque region in surface coordinates: */
|
|
|
|
|
pixman_region32_t surface_blend;
|
|
|
|
|
GLint filter;
|
2021-03-18 17:42:06 +02:00
|
|
|
struct gl_shader_config sconf;
|
2023-08-31 09:16:56 +02:00
|
|
|
struct clipper_quad *quads = NULL;
|
|
|
|
|
int nquads;
|
2012-09-05 21:59:35 -04:00
|
|
|
|
2022-01-19 17:59:50 +00:00
|
|
|
if (gb->shader_variant == SHADER_VARIANT_NONE &&
|
|
|
|
|
!buffer->direct_display)
|
2013-11-19 15:22:04 +02:00
|
|
|
return;
|
|
|
|
|
|
2012-09-05 21:59:35 -04:00
|
|
|
pixman_region32_init(&repaint);
|
2023-06-15 13:52:53 -05:00
|
|
|
pixman_region32_intersect(&repaint, &pnode->visible, damage);
|
2012-09-05 21:59:35 -04:00
|
|
|
|
|
|
|
|
if (!pixman_region32_not_empty(&repaint))
|
|
|
|
|
goto out;
|
|
|
|
|
|
2024-06-06 12:59:30 -05:00
|
|
|
if (!pnode->draw_solid && ensure_surface_buffer_is_ready(gr, gs) < 0)
|
libweston: Support zwp_surface_synchronization_v1.set_acquire_fence
Implement the set_acquire_fence request of the
zwp_surface_synchronization_v1 interface.
The implementation uses the acquire fence in two ways:
1. If the associated buffer is used as GL render source, an
EGLSyncKHR is created from the fence and used to synchronize
access.
2. If the associated buffer is used as a plane framebuffer,
the acquire fence is treated as an in-fence for the atomic
commit operation. If in-fences are not supported and the buffer
has an acquire fence, we don't consider it for plane placement.
If the used compositor/renderer doesn't support explicit
synchronization, we don't advertise the protocol at all. Currently only
the DRM and X11 backends when using the GL renderer advertise the
protocol for production use.
Issues for discussion
---------------------
a. Currently, a server-side wait of EGLSyncKHR is performed before
using the EGLImage/texture during rendering. Unfortunately, it's not clear
from the specs whether this is generally safe to do, or we need to
sync before glEGLImageTargetTexture2DOES. The exception is
TEXTURE_EXTERNAL_OES where the spec mentions it's enough to sync
and then glBindTexture for any changes to take effect.
Changes in v5:
- Meson support.
- Make explicit sync server error reporting more generic, supporting
all explicit sync related interfaces not just
wp_linux_surface_synchronization.
- Fix typo in warning for missing EGL_KHR_wait_sync extension.
- Support minor version 2 of the explicit sync protocol (i.e., support
fences for opaque EGL buffers).
Changes in v4:
- Introduce and use fd_clear and and fd_move helpers.
- Don't check for a valid buffer when updating surface acquire fence fd
from state.
- Assert that pending state acquire fence fd is always clear
after a commit.
- Clarify that WESTON_CAP_EXPLICIT_SYNC applies to just the
renderer.
- Check for EGL_KHR_wait_sync before using eglWaitSyncKHR.
- Dup the acquire fence before passing to EGL.
Changes in v3:
- Keep acquire_fence_fd in surface instead of buffer.
- Clarify that WESTON_CAP_EXPLICIT_SYNC applies to both backend and
renderer.
- Move comment about non-ownership of in_fence_fd to struct
drm_plane_state definition.
- Assert that we don't try to use planes with in-fences when using the
legacy KMS API.
- Remove unnecessary info from wayland error messages.
- Handle acquire fence for subsurface commits.
- Guard against self-update in fd_update.
- Disconnect the client if acquire fence EGLSyncKHR creation or wait
fails.
- Use updated protocol interface names.
- User correct format specifier for resource ids.
- Advertise protocol for X11 backend with GL renderer.
Changes in v2:
- Remove sync file wait fallbacks.
- Raise UNSUPPORTED_BUFFER error at commit if we have an acquire
fence, but the committed buffer is not a valid linux_dmabuf.
- Don't put buffers with in-fences on planes that don't support
in-fences.
- Don't advertise explicit sync protocol if backend does not
support explicit sync.
Signed-off-by: Alexandros Frantzis <alexandros.frantzis@collabora.com>
2018-10-19 12:14:11 +03:00
|
|
|
goto out;
|
|
|
|
|
|
2022-01-27 11:31:26 -06:00
|
|
|
if (pnode->needs_filtering)
|
2012-09-05 21:59:35 -04:00
|
|
|
filter = GL_LINEAR;
|
|
|
|
|
else
|
|
|
|
|
filter = GL_NEAREST;
|
|
|
|
|
|
2021-05-20 17:10:54 +03:00
|
|
|
if (!gl_shader_config_init_for_paint_node(&sconf, pnode, filter))
|
2021-03-18 17:42:06 +02:00
|
|
|
goto out;
|
2012-09-05 21:59:35 -04:00
|
|
|
|
Split the geometry information from weston_surface out into weston_view
The weston_surface structure is split into two structures:
* The weston_surface structure storres everything required for a
client-side or server-side surface. This includes buffers; callbacks;
backend private data; input, damage, and opaque regions; and a few other
bookkeeping bits.
* The weston_view structure represents an entity in the scenegraph and
storres all of the geometry information. This includes clip region,
alpha, position, and the transformation list as well as all of the
temporary information derived from the geometry state. Because a view,
and not a surface, is a scenegraph element, the view is what is placed
in layers and planes.
There are a few things worth noting about the surface/view split:
1. This is *not* a modification to the protocol. It is, instead, a
modification to Weston's internal scenegraph to allow a single surface
to exist in multiple places at a time. Clients are completely unaware
of how many views to a particular surface exist.
2. A view is considered a direct child of a surface and is destroyed when
the surface is destroyed. Because of this, the view.surface pointer is
always valid and non-null.
3. The compositor's surface_list is replaced with a view_list. Due to
subsurfaces, building the view list is a little more complicated than
it used to be and involves building a tree of views on the fly whenever
subsurfaces are used. However, this means that backends can remain
completely subsurface-agnostic.
4. Surfaces and views both keep track of which outputs they are on.
5. The weston_surface structure now has width and height fields. These
are populated when a new buffer is attached before surface.configure
is called. This is because there are many surface-based operations
that really require the width and height and digging through the views
didn't work well.
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
2013-10-12 22:38:11 -05:00
|
|
|
/* XXX: Should we be using ev->transform.opaque here? */
|
2024-06-12 10:35:44 -05:00
|
|
|
if (pnode->is_fully_opaque)
|
2024-06-06 12:22:42 -05:00
|
|
|
pixman_region32_init_rect(&surface_opaque, 0, 0,
|
|
|
|
|
pnode->surface->width,
|
|
|
|
|
pnode->surface->height);
|
2024-07-03 08:26:06 -05:00
|
|
|
else {
|
2024-06-06 12:22:42 -05:00
|
|
|
pixman_region32_init(&surface_opaque);
|
2024-07-03 08:26:06 -05:00
|
|
|
pixman_region32_copy(&surface_opaque, &pnode->surface->opaque);
|
|
|
|
|
}
|
2024-06-06 12:22:42 -05:00
|
|
|
|
2021-05-20 17:10:54 +03:00
|
|
|
if (pnode->view->geometry.scissor_enabled)
|
2015-02-18 09:48:59 +02:00
|
|
|
pixman_region32_intersect(&surface_opaque,
|
2024-07-03 08:26:06 -05:00
|
|
|
&surface_opaque,
|
2021-05-20 17:10:54 +03:00
|
|
|
&pnode->view->geometry.scissor);
|
2015-02-18 09:48:59 +02:00
|
|
|
|
2024-06-06 12:12:52 -05:00
|
|
|
/* blended region is whole surface minus opaque region: */
|
|
|
|
|
pixman_region32_init_rect(&surface_blend, 0, 0,
|
|
|
|
|
pnode->surface->width, pnode->surface->height);
|
|
|
|
|
if (pnode->view->geometry.scissor_enabled)
|
|
|
|
|
pixman_region32_intersect(&surface_blend, &surface_blend,
|
|
|
|
|
&pnode->view->geometry.scissor);
|
|
|
|
|
pixman_region32_subtract(&surface_blend, &surface_blend,
|
2024-06-06 12:16:29 -05:00
|
|
|
&surface_opaque);
|
2024-06-06 12:12:52 -05:00
|
|
|
|
2024-06-06 12:59:30 -05:00
|
|
|
if (pnode->draw_solid)
|
|
|
|
|
prepare_placeholder(&sconf, pnode);
|
2019-04-18 21:45:48 +05:30
|
|
|
|
2015-02-18 09:48:59 +02:00
|
|
|
if (pixman_region32_not_empty(&surface_opaque)) {
|
2021-03-18 17:42:06 +02:00
|
|
|
struct gl_shader_config alt = sconf;
|
2019-04-18 21:45:48 +05:30
|
|
|
|
2021-03-18 17:42:06 +02:00
|
|
|
if (alt.req.variant == SHADER_VARIANT_RGBA) {
|
2012-09-05 21:59:35 -04:00
|
|
|
/* Special case for RGBA textures with possibly
|
|
|
|
|
* bad data in alpha channel: use the shader
|
|
|
|
|
* that forces texture alpha = 1.0.
|
|
|
|
|
* Xwayland surfaces need this.
|
|
|
|
|
*/
|
2021-03-18 17:42:06 +02:00
|
|
|
alt.req.variant = SHADER_VARIANT_RGBX;
|
2012-09-05 21:59:35 -04:00
|
|
|
}
|
|
|
|
|
|
2021-05-20 17:10:54 +03:00
|
|
|
if (pnode->view->alpha < 1.0)
|
2012-09-05 21:59:35 -04:00
|
|
|
glEnable(GL_BLEND);
|
|
|
|
|
else
|
|
|
|
|
glDisable(GL_BLEND);
|
|
|
|
|
|
2023-08-31 09:16:56 +02:00
|
|
|
transform_damage(pnode, &repaint, &quads, &nquads);
|
2024-05-18 17:02:16 +02:00
|
|
|
repaint_region(gr, pnode, quads, nquads, &surface_opaque, &alt,
|
|
|
|
|
true);
|
2018-10-19 12:14:11 +03:00
|
|
|
gs->used_in_output_repaint = true;
|
2012-09-05 21:59:35 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (pixman_region32_not_empty(&surface_blend)) {
|
|
|
|
|
glEnable(GL_BLEND);
|
2023-08-31 09:16:56 +02:00
|
|
|
transform_damage(pnode, &repaint, &quads, &nquads);
|
2024-05-18 17:02:16 +02:00
|
|
|
repaint_region(gr, pnode, quads, nquads, &surface_blend, &sconf,
|
|
|
|
|
false);
|
2018-10-19 12:14:11 +03:00
|
|
|
gs->used_in_output_repaint = true;
|
2012-09-05 21:59:35 -04:00
|
|
|
}
|
|
|
|
|
|
2023-08-31 09:16:56 +02:00
|
|
|
if (quads)
|
|
|
|
|
free(quads);
|
2023-08-30 16:20:52 +02:00
|
|
|
|
2012-09-05 21:59:35 -04:00
|
|
|
pixman_region32_fini(&surface_blend);
|
2015-02-18 09:48:59 +02:00
|
|
|
pixman_region32_fini(&surface_opaque);
|
2012-09-05 21:59:35 -04:00
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
pixman_region32_fini(&repaint);
|
|
|
|
|
}
|
|
|
|
|
|
2012-09-05 21:54:15 -04:00
|
|
|
static void
|
Split the geometry information from weston_surface out into weston_view
The weston_surface structure is split into two structures:
* The weston_surface structure storres everything required for a
client-side or server-side surface. This includes buffers; callbacks;
backend private data; input, damage, and opaque regions; and a few other
bookkeeping bits.
* The weston_view structure represents an entity in the scenegraph and
storres all of the geometry information. This includes clip region,
alpha, position, and the transformation list as well as all of the
temporary information derived from the geometry state. Because a view,
and not a surface, is a scenegraph element, the view is what is placed
in layers and planes.
There are a few things worth noting about the surface/view split:
1. This is *not* a modification to the protocol. It is, instead, a
modification to Weston's internal scenegraph to allow a single surface
to exist in multiple places at a time. Clients are completely unaware
of how many views to a particular surface exist.
2. A view is considered a direct child of a surface and is destroyed when
the surface is destroyed. Because of this, the view.surface pointer is
always valid and non-null.
3. The compositor's surface_list is replaced with a view_list. Due to
subsurfaces, building the view list is a little more complicated than
it used to be and involves building a tree of views on the fly whenever
subsurfaces are used. However, this means that backends can remain
completely subsurface-agnostic.
4. Surfaces and views both keep track of which outputs they are on.
5. The weston_surface structure now has width and height fields. These
are populated when a new buffer is attached before surface.configure
is called. This is because there are many surface-based operations
that really require the width and height and digging through the views
didn't work well.
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
2013-10-12 22:38:11 -05:00
|
|
|
repaint_views(struct weston_output *output, pixman_region32_t *damage)
|
2012-09-05 21:54:15 -04:00
|
|
|
{
|
2024-05-07 16:22:40 +02:00
|
|
|
struct gl_renderer *gr = get_renderer(output->compositor);
|
2021-05-03 14:06:55 +03:00
|
|
|
struct weston_paint_node *pnode;
|
2012-09-05 21:54:15 -04:00
|
|
|
|
2024-05-07 16:22:40 +02:00
|
|
|
gr->nbatches = 0;
|
|
|
|
|
|
2023-01-03 19:30:24 +01:00
|
|
|
glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
|
2024-05-06 13:09:28 +02:00
|
|
|
glEnableVertexAttribArray(SHADER_ATTRIB_LOC_POSITION);
|
2023-01-03 19:43:22 +01:00
|
|
|
|
2021-05-03 14:06:55 +03:00
|
|
|
wl_list_for_each_reverse(pnode, &output->paint_node_z_order_list,
|
|
|
|
|
z_order_link) {
|
2024-06-17 17:05:32 +09:00
|
|
|
if (pnode->plane == &output->primary_plane ||
|
|
|
|
|
pnode->need_hole)
|
2021-05-20 17:10:54 +03:00
|
|
|
draw_paint_node(pnode, damage);
|
2021-05-03 14:06:55 +03:00
|
|
|
}
|
2023-01-03 19:43:22 +01:00
|
|
|
|
2024-05-06 13:09:28 +02:00
|
|
|
glDisableVertexAttribArray(SHADER_ATTRIB_LOC_POSITION);
|
2012-09-05 21:54:15 -04:00
|
|
|
}
|
|
|
|
|
|
2018-10-19 12:14:11 +03:00
|
|
|
static int
|
|
|
|
|
gl_renderer_create_fence_fd(struct weston_output *output);
|
|
|
|
|
|
|
|
|
|
/* Updates the release fences of surfaces that were used in the current output
|
|
|
|
|
* repaint. Should only be used from gl_renderer_repaint_output, so that the
|
|
|
|
|
* information in gl_surface_state.used_in_output_repaint is accurate.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
update_buffer_release_fences(struct weston_compositor *compositor,
|
|
|
|
|
struct weston_output *output)
|
|
|
|
|
{
|
2021-05-03 14:06:55 +03:00
|
|
|
struct weston_paint_node *pnode;
|
2018-10-19 12:14:11 +03:00
|
|
|
|
2021-05-03 14:06:55 +03:00
|
|
|
wl_list_for_each_reverse(pnode, &output->paint_node_z_order_list,
|
|
|
|
|
z_order_link) {
|
2018-10-19 12:14:11 +03:00
|
|
|
struct gl_surface_state *gs;
|
|
|
|
|
struct weston_buffer_release *buffer_release;
|
|
|
|
|
int fence_fd;
|
|
|
|
|
|
2023-06-23 12:00:12 -05:00
|
|
|
if (pnode->plane != &output->primary_plane)
|
2018-10-19 12:14:11 +03:00
|
|
|
continue;
|
|
|
|
|
|
2024-06-06 12:59:30 -05:00
|
|
|
if (pnode->draw_solid)
|
|
|
|
|
continue;
|
|
|
|
|
|
2024-06-05 13:34:30 -05:00
|
|
|
gs = get_surface_state(pnode->surface);
|
2018-10-19 12:14:11 +03:00
|
|
|
buffer_release = gs->buffer_release_ref.buffer_release;
|
|
|
|
|
|
|
|
|
|
if (!gs->used_in_output_repaint || !buffer_release)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
fence_fd = gl_renderer_create_fence_fd(output);
|
|
|
|
|
|
|
|
|
|
/* If we have a buffer_release then it means we support fences,
|
|
|
|
|
* and we should be able to create the release fence. If we
|
|
|
|
|
* can't, something has gone horribly wrong, so disconnect the
|
|
|
|
|
* client.
|
|
|
|
|
*/
|
|
|
|
|
if (fence_fd == -1) {
|
|
|
|
|
linux_explicit_synchronization_send_server_error(
|
|
|
|
|
buffer_release->resource,
|
|
|
|
|
"Failed to create release fence");
|
|
|
|
|
fd_clear(&buffer_release->fence_fd);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* At the moment it is safe to just replace the fence_fd,
|
|
|
|
|
* discarding the previous one:
|
|
|
|
|
*
|
|
|
|
|
* 1. If the previous fence fd represents a sync fence from
|
|
|
|
|
* a previous repaint cycle, that fence fd is now not
|
|
|
|
|
* sufficient to provide the release guarantee and should
|
|
|
|
|
* be replaced.
|
|
|
|
|
*
|
|
|
|
|
* 2. If the fence fd represents a sync fence from another
|
|
|
|
|
* output in the same repaint cycle, it's fine to replace
|
|
|
|
|
* it since we are rendering to all outputs using the same
|
|
|
|
|
* EGL context, so a fence issued for a later output rendering
|
|
|
|
|
* is guaranteed to signal after fences for previous output
|
|
|
|
|
* renderings.
|
|
|
|
|
*
|
|
|
|
|
* Note that the above is only valid if the buffer_release
|
|
|
|
|
* fences only originate from the GL renderer, which guarantees
|
|
|
|
|
* a total order of operations and fences. If we introduce
|
|
|
|
|
* fences from other sources (e.g., plane out-fences), we will
|
|
|
|
|
* need to merge fences instead.
|
|
|
|
|
*/
|
|
|
|
|
fd_update(&buffer_release->fence_fd, fence_fd);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-05-06 09:27:04 +02:00
|
|
|
/* Update the wireframe texture. The texture is either created, deleted or
|
|
|
|
|
* resized depending on the wireframe debugging state and the area.
|
|
|
|
|
*/
|
|
|
|
|
static void
|
|
|
|
|
update_wireframe_tex(struct gl_renderer *gr,
|
|
|
|
|
const struct weston_geometry *area)
|
|
|
|
|
{
|
|
|
|
|
int new_size, i;
|
|
|
|
|
uint8_t *buffer;
|
|
|
|
|
|
2024-05-07 15:14:28 +02:00
|
|
|
if (gr->debug_mode != DEBUG_MODE_WIREFRAME) {
|
2024-05-06 09:27:04 +02:00
|
|
|
if (gr->wireframe_size) {
|
|
|
|
|
glDeleteTextures(1, &gr->wireframe_tex);
|
|
|
|
|
gr->wireframe_size = 0;
|
|
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Texture size at mip level 0 should be at least as large as the area
|
|
|
|
|
* in order to correctly anti-alias triangles covering it entirely. */
|
|
|
|
|
glGetIntegerv(GL_MAX_TEXTURE_SIZE, &new_size);
|
|
|
|
|
new_size = MIN(round_up_pow2_32(MAX(area->width, area->height)),
|
|
|
|
|
round_down_pow2_32(new_size));
|
|
|
|
|
if (new_size <= gr->wireframe_size)
|
|
|
|
|
return;
|
|
|
|
|
|
2024-07-23 14:01:59 +02:00
|
|
|
glActiveTexture(GL_TEXTURE0 + TEX_UNIT_WIREFRAME);
|
2024-05-06 09:27:04 +02:00
|
|
|
if (gr->wireframe_size == 0) {
|
|
|
|
|
glGenTextures(1, &gr->wireframe_tex);
|
|
|
|
|
glBindTexture(GL_TEXTURE_2D, gr->wireframe_tex);
|
|
|
|
|
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,
|
|
|
|
|
GL_CLAMP_TO_EDGE);
|
|
|
|
|
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,
|
|
|
|
|
GL_CLAMP_TO_EDGE);
|
|
|
|
|
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
|
|
|
|
|
GL_LINEAR_MIPMAP_LINEAR);
|
|
|
|
|
} else {
|
|
|
|
|
glBindTexture(GL_TEXTURE_2D, gr->wireframe_tex);
|
|
|
|
|
}
|
|
|
|
|
gr->wireframe_size = new_size;
|
|
|
|
|
|
|
|
|
|
/* Generate mip chain with a wireframe thickness of 1.0. */
|
|
|
|
|
buffer = xzalloc(new_size);
|
|
|
|
|
buffer[0] = 0xff;
|
|
|
|
|
for (i = 0; new_size; i++, new_size >>= 1)
|
|
|
|
|
glTexImage2D(GL_TEXTURE_2D, i, GL_LUMINANCE, new_size, 1, 0,
|
|
|
|
|
GL_LUMINANCE, GL_UNSIGNED_BYTE, buffer);
|
|
|
|
|
free(buffer);
|
2024-07-23 15:23:23 +02:00
|
|
|
|
|
|
|
|
glActiveTexture(GL_TEXTURE0);
|
2024-05-06 09:27:04 +02:00
|
|
|
}
|
|
|
|
|
|
2013-10-27 22:24:54 -05:00
|
|
|
static void
|
2021-03-18 17:42:06 +02:00
|
|
|
draw_output_border_texture(struct gl_renderer *gr,
|
|
|
|
|
struct gl_output_state *go,
|
|
|
|
|
struct gl_shader_config *sconf,
|
2014-02-04 21:36:38 -06:00
|
|
|
enum gl_renderer_border_side side,
|
|
|
|
|
int32_t x, int32_t y,
|
2013-10-27 22:24:54 -05:00
|
|
|
int32_t width, int32_t height)
|
|
|
|
|
{
|
2014-02-04 21:36:38 -06:00
|
|
|
struct gl_border_image *img = &go->borders[side];
|
2013-10-27 22:24:54 -05:00
|
|
|
static GLushort indices [] = { 0, 1, 3, 3, 1, 2 };
|
|
|
|
|
|
|
|
|
|
if (!img->data) {
|
|
|
|
|
if (img->tex) {
|
|
|
|
|
glDeleteTextures(1, &img->tex);
|
|
|
|
|
img->tex = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!img->tex) {
|
|
|
|
|
glGenTextures(1, &img->tex);
|
|
|
|
|
glBindTexture(GL_TEXTURE_2D, img->tex);
|
|
|
|
|
|
|
|
|
|
glTexParameteri(GL_TEXTURE_2D,
|
|
|
|
|
GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
|
|
|
|
glTexParameteri(GL_TEXTURE_2D,
|
|
|
|
|
GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
|
|
|
|
} else {
|
|
|
|
|
glBindTexture(GL_TEXTURE_2D, img->tex);
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-22 18:44:02 +02:00
|
|
|
if (go->border_status & (1 << side))
|
2013-10-27 22:24:54 -05:00
|
|
|
glTexImage2D(GL_TEXTURE_2D, 0, GL_BGRA_EXT,
|
|
|
|
|
img->tex_width, img->height, 0,
|
|
|
|
|
GL_BGRA_EXT, GL_UNSIGNED_BYTE, img->data);
|
|
|
|
|
|
2021-03-18 17:42:06 +02:00
|
|
|
sconf->input_tex_filter = GL_NEAREST;
|
|
|
|
|
sconf->input_tex[0] = img->tex;
|
|
|
|
|
gl_renderer_use_program(gr, sconf);
|
|
|
|
|
|
2013-10-27 22:24:54 -05:00
|
|
|
GLfloat texcoord[] = {
|
|
|
|
|
0.0f, 0.0f,
|
|
|
|
|
(GLfloat)img->width / (GLfloat)img->tex_width, 0.0f,
|
|
|
|
|
(GLfloat)img->width / (GLfloat)img->tex_width, 1.0f,
|
|
|
|
|
0.0f, 1.0f,
|
|
|
|
|
};
|
|
|
|
|
|
2024-05-06 13:09:28 +02:00
|
|
|
GLfloat position[] = {
|
2013-10-27 22:24:54 -05:00
|
|
|
x, y,
|
|
|
|
|
x + width, y,
|
|
|
|
|
x + width, y + height,
|
|
|
|
|
x, y + height
|
|
|
|
|
};
|
|
|
|
|
|
2024-05-06 13:09:28 +02:00
|
|
|
glVertexAttribPointer(SHADER_ATTRIB_LOC_POSITION, 2, GL_FLOAT, GL_FALSE,
|
|
|
|
|
0, position);
|
|
|
|
|
glVertexAttribPointer(SHADER_ATTRIB_LOC_TEXCOORD, 2, GL_FLOAT, GL_FALSE,
|
|
|
|
|
0, texcoord);
|
2013-10-27 22:24:54 -05:00
|
|
|
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, indices);
|
|
|
|
|
}
|
|
|
|
|
|
2014-02-04 21:36:39 -06:00
|
|
|
static int
|
|
|
|
|
output_has_borders(struct weston_output *output)
|
|
|
|
|
{
|
|
|
|
|
struct gl_output_state *go = get_output_state(output);
|
|
|
|
|
|
|
|
|
|
return go->borders[GL_RENDERER_BORDER_TOP].data ||
|
|
|
|
|
go->borders[GL_RENDERER_BORDER_RIGHT].data ||
|
|
|
|
|
go->borders[GL_RENDERER_BORDER_BOTTOM].data ||
|
|
|
|
|
go->borders[GL_RENDERER_BORDER_LEFT].data;
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-29 14:46:11 +03:00
|
|
|
static struct weston_geometry
|
|
|
|
|
output_get_border_area(const struct gl_output_state *go,
|
|
|
|
|
enum gl_renderer_border_side side)
|
|
|
|
|
{
|
|
|
|
|
const struct weston_size *fb = &go->fb_size;
|
|
|
|
|
const struct weston_geometry *area = &go->area;
|
|
|
|
|
|
|
|
|
|
switch (side) {
|
|
|
|
|
case GL_RENDERER_BORDER_TOP:
|
|
|
|
|
return (struct weston_geometry){
|
|
|
|
|
.x = 0,
|
|
|
|
|
.y = 0,
|
|
|
|
|
.width = fb->width,
|
2022-07-29 14:52:10 +03:00
|
|
|
.height = area->y
|
2022-07-29 14:46:11 +03:00
|
|
|
};
|
|
|
|
|
case GL_RENDERER_BORDER_LEFT:
|
|
|
|
|
return (struct weston_geometry){
|
|
|
|
|
.x = 0,
|
2022-07-29 14:52:10 +03:00
|
|
|
.y = area->y,
|
|
|
|
|
.width = area->x,
|
2022-07-29 14:46:11 +03:00
|
|
|
.height = area->height
|
|
|
|
|
};
|
|
|
|
|
case GL_RENDERER_BORDER_RIGHT:
|
|
|
|
|
return (struct weston_geometry){
|
2022-07-29 14:52:10 +03:00
|
|
|
.x = area->x + area->width,
|
|
|
|
|
.y = area->y,
|
|
|
|
|
.width = fb->width - area->x - area->width,
|
2022-07-29 14:46:11 +03:00
|
|
|
.height = area->height
|
|
|
|
|
};
|
|
|
|
|
case GL_RENDERER_BORDER_BOTTOM:
|
|
|
|
|
return (struct weston_geometry){
|
|
|
|
|
.x = 0,
|
2022-07-29 14:52:10 +03:00
|
|
|
.y = area->y + area->height,
|
2022-07-29 14:46:11 +03:00
|
|
|
.width = fb->width,
|
2022-07-29 14:52:10 +03:00
|
|
|
.height = fb->height - area->y - area->height
|
2022-07-29 14:46:11 +03:00
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert(0);
|
|
|
|
|
return (struct weston_geometry){};
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-27 22:24:54 -05:00
|
|
|
static void
|
2014-02-04 21:36:38 -06:00
|
|
|
draw_output_borders(struct weston_output *output,
|
|
|
|
|
enum gl_border_status border_status)
|
2013-10-27 22:24:54 -05:00
|
|
|
{
|
2021-03-18 17:42:06 +02:00
|
|
|
struct gl_shader_config sconf = {
|
|
|
|
|
.req = {
|
|
|
|
|
.variant = SHADER_VARIANT_RGBA,
|
2021-03-12 14:06:34 +02:00
|
|
|
.input_is_premult = true,
|
2021-03-18 17:42:06 +02:00
|
|
|
},
|
|
|
|
|
.view_alpha = 1.0f,
|
2019-04-18 21:45:48 +05:30
|
|
|
};
|
2022-04-28 15:25:00 +03:00
|
|
|
struct weston_color_transform *ctransf;
|
2013-10-27 22:24:54 -05:00
|
|
|
struct gl_output_state *go = get_output_state(output);
|
|
|
|
|
struct gl_renderer *gr = get_renderer(output->compositor);
|
2022-07-29 14:20:33 +03:00
|
|
|
const struct weston_size *fb = &go->fb_size;
|
2022-07-29 15:09:30 +03:00
|
|
|
unsigned side;
|
2013-10-27 22:24:59 -05:00
|
|
|
|
2014-02-04 21:36:38 -06:00
|
|
|
if (border_status == BORDER_STATUS_CLEAN)
|
|
|
|
|
return; /* Clean. Nothing to do. */
|
|
|
|
|
|
2022-04-28 15:25:00 +03:00
|
|
|
ctransf = output->color_outcome->from_sRGB_to_output;
|
2022-02-20 18:07:36 +01:00
|
|
|
if (!gl_shader_config_set_color_transform(gr, &sconf, ctransf)) {
|
2021-03-23 16:32:21 +02:00
|
|
|
weston_log("GL-renderer: %s failed to generate a color transformation.\n", __func__);
|
|
|
|
|
return;
|
|
|
|
|
}
|
2021-03-22 17:32:07 +02:00
|
|
|
|
2013-10-27 22:24:54 -05:00
|
|
|
glDisable(GL_BLEND);
|
2022-07-29 14:20:33 +03:00
|
|
|
glViewport(0, 0, fb->width, fb->height);
|
2013-10-27 22:24:59 -05:00
|
|
|
|
2021-03-18 17:42:06 +02:00
|
|
|
weston_matrix_init(&sconf.projection);
|
|
|
|
|
weston_matrix_translate(&sconf.projection,
|
2022-07-29 14:20:33 +03:00
|
|
|
-fb->width / 2.0, -fb->height / 2.0, 0);
|
2021-03-18 17:42:06 +02:00
|
|
|
weston_matrix_scale(&sconf.projection,
|
2023-09-22 09:56:07 +02:00
|
|
|
2.0 / fb->width, go->y_flip * 2.0 / fb->height, 1);
|
2013-10-27 22:24:54 -05:00
|
|
|
|
2024-05-06 13:09:28 +02:00
|
|
|
glEnableVertexAttribArray(SHADER_ATTRIB_LOC_POSITION);
|
|
|
|
|
glEnableVertexAttribArray(SHADER_ATTRIB_LOC_TEXCOORD);
|
2013-10-27 22:24:54 -05:00
|
|
|
|
2022-07-29 15:09:30 +03:00
|
|
|
for (side = 0; side < 4; side++) {
|
|
|
|
|
struct weston_geometry g;
|
|
|
|
|
|
|
|
|
|
if (!(border_status & (1 << side)))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
g = output_get_border_area(go, side);
|
|
|
|
|
draw_output_border_texture(gr, go, &sconf, side,
|
2022-07-29 14:46:11 +03:00
|
|
|
g.x, g.y, g.width, g.height);
|
|
|
|
|
}
|
2023-01-03 19:43:22 +01:00
|
|
|
|
2024-05-06 13:09:28 +02:00
|
|
|
glDisableVertexAttribArray(SHADER_ATTRIB_LOC_TEXCOORD);
|
|
|
|
|
glDisableVertexAttribArray(SHADER_ATTRIB_LOC_POSITION);
|
2013-10-27 22:24:54 -05:00
|
|
|
}
|
2012-11-13 19:10:20 +01:00
|
|
|
|
2014-02-04 21:36:39 -06:00
|
|
|
static void
|
|
|
|
|
output_get_border_damage(struct weston_output *output,
|
|
|
|
|
enum gl_border_status border_status,
|
|
|
|
|
pixman_region32_t *damage)
|
|
|
|
|
{
|
|
|
|
|
struct gl_output_state *go = get_output_state(output);
|
2022-07-29 15:13:24 +03:00
|
|
|
unsigned side;
|
2014-02-04 21:36:39 -06:00
|
|
|
|
2022-07-29 15:13:24 +03:00
|
|
|
for (side = 0; side < 4; side++) {
|
|
|
|
|
struct weston_geometry g;
|
2014-02-04 21:36:39 -06:00
|
|
|
|
2022-07-29 15:13:24 +03:00
|
|
|
if (!(border_status & (1 << side)))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
g = output_get_border_area(go, side);
|
2014-02-04 21:36:39 -06:00
|
|
|
pixman_region32_union_rect(damage, damage,
|
2022-07-29 14:46:11 +03:00
|
|
|
g.x, g.y, g.width, g.height);
|
|
|
|
|
}
|
2014-02-04 21:36:39 -06:00
|
|
|
}
|
|
|
|
|
|
2019-02-08 13:20:51 +00:00
|
|
|
/**
|
|
|
|
|
* Given a region in Weston's (top-left-origin) global co-ordinate space,
|
|
|
|
|
* translate it to the co-ordinate space used by GL for our output
|
|
|
|
|
* rendering. This requires shifting it into output co-ordinate space:
|
|
|
|
|
* translating for output offset within the global co-ordinate space,
|
|
|
|
|
* multiplying by output scale to get buffer rather than logical size.
|
|
|
|
|
*
|
|
|
|
|
* Finally, if borders are drawn around the output, we translate the area
|
|
|
|
|
* to account for the border region around the outside, and add any
|
|
|
|
|
* damage if the borders have been redrawn.
|
|
|
|
|
*
|
|
|
|
|
* @param output The output whose co-ordinate space we are after
|
|
|
|
|
* @param global_region The affected region in global co-ordinate space
|
2023-09-22 09:56:07 +02:00
|
|
|
* @param[out] rects quads in {x,y,w,h} order; caller must free
|
2019-02-08 13:20:51 +00:00
|
|
|
* @param[out] nrects Number of quads (4x number of co-ordinates)
|
|
|
|
|
*/
|
|
|
|
|
static void
|
2023-09-22 09:56:07 +02:00
|
|
|
pixman_region_to_egl(struct weston_output *output,
|
|
|
|
|
struct pixman_region32 *global_region,
|
|
|
|
|
EGLint **rects,
|
|
|
|
|
EGLint *nrects)
|
2019-02-08 13:20:51 +00:00
|
|
|
{
|
|
|
|
|
struct gl_output_state *go = get_output_state(output);
|
|
|
|
|
pixman_region32_t transformed;
|
|
|
|
|
struct pixman_box32 *box;
|
|
|
|
|
EGLint *d;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
/* Translate from global to output co-ordinate space. */
|
|
|
|
|
pixman_region32_init(&transformed);
|
2022-09-20 15:21:17 -05:00
|
|
|
weston_region_global_to_output(&transformed,
|
|
|
|
|
output,
|
2022-01-19 17:01:36 -06:00
|
|
|
global_region);
|
2019-02-08 13:20:51 +00:00
|
|
|
|
|
|
|
|
/* If we have borders drawn around the output, shift our output damage
|
|
|
|
|
* to account for borders being drawn around the outside, adding any
|
|
|
|
|
* damage resulting from borders being redrawn. */
|
|
|
|
|
if (output_has_borders(output)) {
|
|
|
|
|
pixman_region32_translate(&transformed,
|
2022-07-29 15:22:29 +03:00
|
|
|
go->area.x, go->area.y);
|
2019-02-08 13:20:51 +00:00
|
|
|
output_get_border_damage(output, go->border_status,
|
|
|
|
|
&transformed);
|
|
|
|
|
}
|
|
|
|
|
|
2023-09-22 09:56:07 +02:00
|
|
|
/* Convert from a Pixman region into {x,y,w,h} quads, potentially
|
|
|
|
|
* flipping in the Y axis to account for GL's lower-left-origin
|
|
|
|
|
* coordinate space if the output uses the GL coordinate space. */
|
2019-02-08 13:20:51 +00:00
|
|
|
box = pixman_region32_rectangles(&transformed, nrects);
|
|
|
|
|
*rects = malloc(*nrects * 4 * sizeof(EGLint));
|
|
|
|
|
|
|
|
|
|
d = *rects;
|
|
|
|
|
for (i = 0; i < *nrects; ++i) {
|
|
|
|
|
*d++ = box[i].x1;
|
2023-09-22 09:56:07 +02:00
|
|
|
*d++ = is_y_flipped(go) ?
|
|
|
|
|
go->fb_size.height - box[i].y2 : box[i].y1;
|
2019-02-08 13:20:51 +00:00
|
|
|
*d++ = box[i].x2 - box[i].x1;
|
|
|
|
|
*d++ = box[i].y2 - box[i].y1;
|
|
|
|
|
}
|
2019-07-02 11:52:09 +01:00
|
|
|
|
|
|
|
|
pixman_region32_fini(&transformed);
|
2019-02-08 13:20:51 +00:00
|
|
|
}
|
|
|
|
|
|
2019-04-18 21:45:48 +05:30
|
|
|
static void
|
|
|
|
|
blit_shadow_to_output(struct weston_output *output,
|
|
|
|
|
pixman_region32_t *output_damage)
|
|
|
|
|
{
|
|
|
|
|
struct gl_output_state *go = get_output_state(output);
|
2021-03-23 16:36:11 +02:00
|
|
|
struct gl_shader_config sconf = {
|
2021-03-18 17:42:06 +02:00
|
|
|
.req = {
|
|
|
|
|
.variant = SHADER_VARIANT_RGBA,
|
2021-03-12 14:06:34 +02:00
|
|
|
.input_is_premult = true,
|
2021-03-18 17:42:06 +02:00
|
|
|
},
|
|
|
|
|
.projection = {
|
|
|
|
|
.d = { /* transpose */
|
2023-09-22 09:56:07 +02:00
|
|
|
2.0f, 0.0f, 0.0f, 0.0f,
|
|
|
|
|
0.0f, go->y_flip * 2.0f, 0.0f, 0.0f,
|
|
|
|
|
0.0f, 0.0f, 1.0f, 0.0f,
|
|
|
|
|
-1.0f, -go->y_flip, 0.0f, 1.0f
|
2021-03-18 17:42:06 +02:00
|
|
|
},
|
|
|
|
|
.type = WESTON_MATRIX_TRANSFORM_SCALE |
|
|
|
|
|
WESTON_MATRIX_TRANSFORM_TRANSLATE,
|
|
|
|
|
},
|
|
|
|
|
.view_alpha = 1.0f,
|
|
|
|
|
.input_tex_filter = GL_NEAREST,
|
2024-08-06 15:11:50 +02:00
|
|
|
.input_tex[0] = go->shadow_tex,
|
2021-03-18 17:42:06 +02:00
|
|
|
};
|
2019-04-18 21:45:48 +05:30
|
|
|
struct gl_renderer *gr = get_renderer(output->compositor);
|
2022-07-29 15:39:58 +03:00
|
|
|
double width = go->area.width;
|
|
|
|
|
double height = go->area.height;
|
2022-04-28 15:25:00 +03:00
|
|
|
struct weston_color_transform *ctransf;
|
2019-04-18 21:45:48 +05:30
|
|
|
pixman_box32_t *rects;
|
|
|
|
|
int n_rects;
|
|
|
|
|
int i;
|
|
|
|
|
pixman_region32_t translated_damage;
|
2024-07-15 16:37:07 +02:00
|
|
|
struct { GLfloat x, y; } position[4];
|
|
|
|
|
struct { GLfloat s, t; } texcoord[4];
|
2019-04-18 21:45:48 +05:30
|
|
|
|
2022-04-28 15:25:00 +03:00
|
|
|
ctransf = output->color_outcome->from_blend_to_output;
|
2022-02-20 18:07:36 +01:00
|
|
|
if (!gl_shader_config_set_color_transform(gr, &sconf, ctransf)) {
|
2021-03-23 16:36:11 +02:00
|
|
|
weston_log("GL-renderer: %s failed to generate a color transformation.\n", __func__);
|
|
|
|
|
return;
|
|
|
|
|
}
|
2021-02-25 12:03:28 +02:00
|
|
|
|
2019-04-18 21:45:48 +05:30
|
|
|
pixman_region32_init(&translated_damage);
|
|
|
|
|
|
2021-03-18 17:42:06 +02:00
|
|
|
gl_renderer_use_program(gr, &sconf);
|
2019-04-18 21:45:48 +05:30
|
|
|
glDisable(GL_BLEND);
|
|
|
|
|
|
|
|
|
|
/* output_damage is in global coordinates */
|
|
|
|
|
pixman_region32_intersect(&translated_damage, output_damage,
|
|
|
|
|
&output->region);
|
|
|
|
|
/* Convert to output pixel coordinates in-place */
|
2022-09-20 14:53:27 -05:00
|
|
|
weston_region_global_to_output(&translated_damage, output,
|
|
|
|
|
&translated_damage);
|
2019-04-18 21:45:48 +05:30
|
|
|
|
2024-05-06 13:09:28 +02:00
|
|
|
glEnableVertexAttribArray(SHADER_ATTRIB_LOC_POSITION);
|
|
|
|
|
glEnableVertexAttribArray(SHADER_ATTRIB_LOC_TEXCOORD);
|
2023-01-03 19:43:22 +01:00
|
|
|
|
2019-04-18 21:45:48 +05:30
|
|
|
rects = pixman_region32_rectangles(&translated_damage, &n_rects);
|
|
|
|
|
for (i = 0; i < n_rects; i++) {
|
2024-07-15 16:37:07 +02:00
|
|
|
const GLfloat x1 = rects[i].x1 / width;
|
|
|
|
|
const GLfloat x2 = rects[i].x2 / width;
|
|
|
|
|
const GLfloat y1 = rects[i].y1 / height;
|
|
|
|
|
const GLfloat y2 = rects[i].y2 / height;
|
|
|
|
|
const GLfloat y1_flipped = 1.0f - y1;
|
|
|
|
|
const GLfloat y2_flipped = 1.0f - y2;
|
|
|
|
|
|
|
|
|
|
position[0].x = x1;
|
|
|
|
|
position[0].y = y1;
|
|
|
|
|
position[1].x = x2;
|
|
|
|
|
position[1].y = y1;
|
|
|
|
|
position[2].x = x2;
|
|
|
|
|
position[2].y = y2;
|
|
|
|
|
position[3].x = x1;
|
|
|
|
|
position[3].y = y2;
|
|
|
|
|
|
|
|
|
|
texcoord[0].s = x1;
|
2023-09-22 09:56:07 +02:00
|
|
|
texcoord[0].t = is_y_flipped(go) ? y1_flipped : y1;
|
2024-07-15 16:37:07 +02:00
|
|
|
texcoord[1].s = x2;
|
2023-09-22 09:56:07 +02:00
|
|
|
texcoord[1].t = is_y_flipped(go) ? y1_flipped : y1;
|
2024-07-15 16:37:07 +02:00
|
|
|
texcoord[2].s = x2;
|
2023-09-22 09:56:07 +02:00
|
|
|
texcoord[2].t = is_y_flipped(go) ? y2_flipped : y2;
|
2024-07-15 16:37:07 +02:00
|
|
|
texcoord[3].s = x1;
|
2023-09-22 09:56:07 +02:00
|
|
|
texcoord[3].t = is_y_flipped(go) ? y2_flipped : y2;
|
2019-04-18 21:45:48 +05:30
|
|
|
|
2024-05-06 13:09:28 +02:00
|
|
|
glVertexAttribPointer(SHADER_ATTRIB_LOC_POSITION, 2, GL_FLOAT,
|
2024-07-15 16:37:07 +02:00
|
|
|
GL_FALSE, 0, position);
|
|
|
|
|
|
2024-05-06 13:09:28 +02:00
|
|
|
glVertexAttribPointer(SHADER_ATTRIB_LOC_TEXCOORD, 2, GL_FLOAT,
|
2024-07-15 16:37:07 +02:00
|
|
|
GL_FALSE, 0, texcoord);
|
2019-04-18 21:45:48 +05:30
|
|
|
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
|
|
|
|
|
}
|
|
|
|
|
|
2024-05-06 13:09:28 +02:00
|
|
|
glDisableVertexAttribArray(SHADER_ATTRIB_LOC_TEXCOORD);
|
|
|
|
|
glDisableVertexAttribArray(SHADER_ATTRIB_LOC_POSITION);
|
2023-01-03 20:14:41 +01:00
|
|
|
|
2019-04-18 21:45:48 +05:30
|
|
|
glBindTexture(GL_TEXTURE_2D, 0);
|
|
|
|
|
pixman_region32_fini(&translated_damage);
|
|
|
|
|
}
|
|
|
|
|
|
2015-05-15 12:12:40 -05:00
|
|
|
/* NOTE: We now allow falling back to ARGB gl visuals when XRGB is
|
|
|
|
|
* unavailable, so we're assuming the background has no transparency
|
|
|
|
|
* and that everything with a blend, like drop shadows, will have something
|
|
|
|
|
* opaque (like the background) drawn underneath it.
|
|
|
|
|
*
|
|
|
|
|
* Depending on the underlying hardware, violating that assumption could
|
|
|
|
|
* result in seeing through to another display plane.
|
|
|
|
|
*/
|
2012-09-05 22:49:55 -04:00
|
|
|
static void
|
2012-11-13 19:10:29 +01:00
|
|
|
gl_renderer_repaint_output(struct weston_output *output,
|
2023-01-11 23:01:15 +01:00
|
|
|
pixman_region32_t *output_damage,
|
2024-08-01 10:02:11 +02:00
|
|
|
weston_renderbuffer_t renderbuffer)
|
2012-09-05 21:54:15 -04:00
|
|
|
{
|
2012-11-13 19:10:29 +01:00
|
|
|
struct gl_output_state *go = get_output_state(output);
|
2012-09-05 21:54:15 -04:00
|
|
|
struct weston_compositor *compositor = output->compositor;
|
2012-11-13 19:10:29 +01:00
|
|
|
struct gl_renderer *gr = get_renderer(compositor);
|
2012-09-05 21:54:15 -04:00
|
|
|
static int errored;
|
2021-05-03 14:06:55 +03:00
|
|
|
struct weston_paint_node *pnode;
|
2023-09-22 09:56:07 +02:00
|
|
|
const int32_t area_y =
|
|
|
|
|
is_y_flipped(go) ? go->fb_size.height - go->area.height - go->area.y : go->area.y;
|
2023-06-07 11:15:09 +02:00
|
|
|
struct gl_renderbuffer *rb;
|
2012-09-05 21:54:15 -04:00
|
|
|
|
2021-02-25 12:03:28 +02:00
|
|
|
assert(output->from_blend_to_output_by_backend ||
|
2022-04-28 15:25:00 +03:00
|
|
|
output->color_outcome->from_blend_to_output == NULL ||
|
|
|
|
|
shadow_exists(go));
|
2021-02-25 12:03:28 +02:00
|
|
|
|
2014-10-16 10:55:20 -05:00
|
|
|
if (use_output(output) < 0)
|
|
|
|
|
return;
|
|
|
|
|
|
2024-08-12 10:09:40 +02:00
|
|
|
rb = gl_renderer_update_renderbuffers(output, output_damage,
|
|
|
|
|
renderbuffer);
|
2023-06-07 11:15:09 +02:00
|
|
|
|
2018-10-19 12:14:11 +03:00
|
|
|
/* Clear the used_in_output_repaint flag, so that we can properly track
|
|
|
|
|
* which surfaces were used in this output repaint. */
|
2021-05-03 14:06:55 +03:00
|
|
|
wl_list_for_each_reverse(pnode, &output->paint_node_z_order_list,
|
|
|
|
|
z_order_link) {
|
2023-06-23 12:00:12 -05:00
|
|
|
if (pnode->plane == &output->primary_plane) {
|
2018-10-19 12:14:11 +03:00
|
|
|
struct gl_surface_state *gs =
|
2024-06-05 13:34:30 -05:00
|
|
|
get_surface_state(pnode->surface);
|
2018-10-19 12:14:11 +03:00
|
|
|
gs->used_in_output_repaint = false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-12 08:04:40 +01:00
|
|
|
timeline_begin_render_query(gr, go->render_query);
|
2017-09-27 15:09:16 +03:00
|
|
|
|
2014-10-16 10:55:21 -05:00
|
|
|
/* Calculate the global GL matrix */
|
|
|
|
|
go->output_matrix = output->matrix;
|
|
|
|
|
weston_matrix_translate(&go->output_matrix,
|
2022-07-29 15:39:58 +03:00
|
|
|
-(go->area.width / 2.0),
|
|
|
|
|
-(go->area.height / 2.0), 0);
|
2014-10-16 10:55:21 -05:00
|
|
|
weston_matrix_scale(&go->output_matrix,
|
2022-07-29 15:39:58 +03:00
|
|
|
2.0 / go->area.width,
|
2023-09-22 09:56:07 +02:00
|
|
|
go->y_flip * 2.0 / go->area.height, 1);
|
2014-10-16 10:55:21 -05:00
|
|
|
|
2019-04-18 21:45:48 +05:30
|
|
|
/* If using shadow, redirect all drawing to it first. */
|
|
|
|
|
if (shadow_exists(go)) {
|
2024-08-06 15:11:50 +02:00
|
|
|
glBindFramebuffer(GL_FRAMEBUFFER, go->shadow_fb);
|
2022-07-26 17:27:07 +03:00
|
|
|
glViewport(0, 0, go->area.width, go->area.height);
|
2019-04-18 21:45:48 +05:30
|
|
|
} else {
|
2024-08-05 10:28:23 +02:00
|
|
|
glBindFramebuffer(GL_FRAMEBUFFER, rb->fb);
|
2023-09-22 09:56:07 +02:00
|
|
|
glViewport(go->area.x, area_y,
|
2022-07-29 15:39:58 +03:00
|
|
|
go->area.width, go->area.height);
|
2019-04-18 21:45:48 +05:30
|
|
|
}
|
|
|
|
|
|
2024-05-06 09:27:04 +02:00
|
|
|
if (gr->wireframe_dirty) {
|
|
|
|
|
update_wireframe_tex(gr, &go->area);
|
|
|
|
|
gr->wireframe_dirty = false;
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-26 17:36:23 +02:00
|
|
|
/* Some of the debug modes need an entire repaint to make sure that we
|
|
|
|
|
* clear any debug left over on this buffer. This precludes the use of
|
|
|
|
|
* EGL_EXT_swap_buffers_with_damage and EGL_KHR_partial_update, since we
|
|
|
|
|
* damage the whole area. */
|
2024-05-07 15:45:55 +02:00
|
|
|
if (gr->debug_clear) {
|
2012-09-05 21:54:15 -04:00
|
|
|
pixman_region32_t undamaged;
|
2024-07-26 13:32:35 +02:00
|
|
|
pixman_region32_t *damaged =
|
2024-07-31 18:29:35 +02:00
|
|
|
shadow_exists(go) ? output_damage : &rb->damage;
|
2024-05-07 15:45:55 +02:00
|
|
|
int debug_mode = gr->debug_mode;
|
|
|
|
|
|
2012-09-05 21:54:15 -04:00
|
|
|
pixman_region32_init(&undamaged);
|
2024-07-26 13:32:35 +02:00
|
|
|
pixman_region32_subtract(&undamaged, &output->region, damaged);
|
2024-05-07 15:14:28 +02:00
|
|
|
gr->debug_mode = DEBUG_MODE_NONE;
|
Split the geometry information from weston_surface out into weston_view
The weston_surface structure is split into two structures:
* The weston_surface structure storres everything required for a
client-side or server-side surface. This includes buffers; callbacks;
backend private data; input, damage, and opaque regions; and a few other
bookkeeping bits.
* The weston_view structure represents an entity in the scenegraph and
storres all of the geometry information. This includes clip region,
alpha, position, and the transformation list as well as all of the
temporary information derived from the geometry state. Because a view,
and not a surface, is a scenegraph element, the view is what is placed
in layers and planes.
There are a few things worth noting about the surface/view split:
1. This is *not* a modification to the protocol. It is, instead, a
modification to Weston's internal scenegraph to allow a single surface
to exist in multiple places at a time. Clients are completely unaware
of how many views to a particular surface exist.
2. A view is considered a direct child of a surface and is destroyed when
the surface is destroyed. Because of this, the view.surface pointer is
always valid and non-null.
3. The compositor's surface_list is replaced with a view_list. Due to
subsurfaces, building the view list is a little more complicated than
it used to be and involves building a tree of views on the fly whenever
subsurfaces are used. However, this means that backends can remain
completely subsurface-agnostic.
4. Surfaces and views both keep track of which outputs they are on.
5. The weston_surface structure now has width and height fields. These
are populated when a new buffer is attached before surface.configure
is called. This is because there are many surface-based operations
that really require the width and height and digging through the views
didn't work well.
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
2013-10-12 22:38:11 -05:00
|
|
|
repaint_views(output, &undamaged);
|
2024-05-07 15:45:55 +02:00
|
|
|
gr->debug_mode = debug_mode;
|
2012-09-05 21:54:15 -04:00
|
|
|
pixman_region32_fini(&undamaged);
|
|
|
|
|
}
|
|
|
|
|
|
2024-10-10 14:02:12 +02:00
|
|
|
if (egl_display_has(gr, EXTENSION_KHR_PARTIAL_UPDATE) &&
|
2024-07-26 13:24:07 +02:00
|
|
|
go->egl_surface != EGL_NO_SURFACE &&
|
|
|
|
|
!gr->debug_clear) {
|
2019-02-08 05:29:05 +00:00
|
|
|
int n_egl_rects;
|
|
|
|
|
EGLint *egl_rects;
|
|
|
|
|
|
|
|
|
|
/* For partial_update, we need to pass the region which has
|
2024-07-26 17:36:23 +02:00
|
|
|
* changed since we last rendered into this specific buffer. */
|
2024-07-31 18:29:35 +02:00
|
|
|
pixman_region_to_egl(output, &rb->damage,
|
2023-09-22 09:56:07 +02:00
|
|
|
&egl_rects, &n_egl_rects);
|
2019-02-08 05:29:05 +00:00
|
|
|
gr->set_damage_region(gr->egl_display, go->egl_surface,
|
|
|
|
|
egl_rects, n_egl_rects);
|
|
|
|
|
free(egl_rects);
|
|
|
|
|
}
|
|
|
|
|
|
2019-04-18 21:45:48 +05:30
|
|
|
if (shadow_exists(go)) {
|
|
|
|
|
/* Repaint into shadow. */
|
2020-12-08 14:13:56 +02:00
|
|
|
if (compositor->test_data.test_quirks.gl_force_full_redraw_of_shadow_fb)
|
|
|
|
|
repaint_views(output, &output->region);
|
|
|
|
|
else
|
|
|
|
|
repaint_views(output, output_damage);
|
2019-04-18 21:45:48 +05:30
|
|
|
|
2024-08-05 10:28:23 +02:00
|
|
|
glBindFramebuffer(GL_FRAMEBUFFER, rb->fb);
|
2023-09-22 09:56:07 +02:00
|
|
|
glViewport(go->area.x, area_y,
|
2022-07-29 15:39:58 +03:00
|
|
|
go->area.width, go->area.height);
|
2024-07-26 14:51:05 +02:00
|
|
|
blit_shadow_to_output(output, gr->debug_clear ?
|
2024-07-31 18:29:35 +02:00
|
|
|
&output->region : &rb->damage);
|
2019-04-18 21:45:48 +05:30
|
|
|
} else {
|
2024-07-31 18:29:35 +02:00
|
|
|
repaint_views(output, &rb->damage);
|
2019-04-18 21:45:48 +05:30
|
|
|
}
|
2013-03-05 17:30:28 +02:00
|
|
|
|
2023-06-07 11:15:09 +02:00
|
|
|
draw_output_borders(output, rb->border_damage);
|
2012-11-13 19:10:20 +01:00
|
|
|
|
2022-08-02 16:20:06 +03:00
|
|
|
gl_renderer_do_capture_tasks(gr, output,
|
|
|
|
|
WESTON_OUTPUT_CAPTURE_SOURCE_FRAMEBUFFER);
|
|
|
|
|
gl_renderer_do_capture_tasks(gr, output,
|
|
|
|
|
WESTON_OUTPUT_CAPTURE_SOURCE_FULL_FRAMEBUFFER);
|
2019-12-09 13:26:57 +01:00
|
|
|
wl_signal_emit(&output->frame_signal, output_damage);
|
2012-09-05 21:54:15 -04:00
|
|
|
|
2023-01-12 08:04:40 +01:00
|
|
|
timeline_end_render_query(gr);
|
|
|
|
|
|
|
|
|
|
if (go->render_sync != EGL_NO_SYNC_KHR)
|
|
|
|
|
gr->destroy_sync(gr->egl_display, go->render_sync);
|
|
|
|
|
go->render_sync = create_render_sync(gr);
|
2017-09-27 15:09:16 +03:00
|
|
|
|
2021-03-30 13:27:00 +02:00
|
|
|
if (go->egl_surface != EGL_NO_SURFACE) {
|
|
|
|
|
EGLBoolean ret;
|
|
|
|
|
|
2024-10-10 16:29:09 +02:00
|
|
|
if (gl_features_has(gr, FEATURE_SWAP_BUFFERS_WITH_DAMAGE) &&
|
2024-10-10 14:02:12 +02:00
|
|
|
!gr->debug_clear) {
|
2021-03-30 13:27:00 +02:00
|
|
|
int n_egl_rects;
|
|
|
|
|
EGLint *egl_rects;
|
|
|
|
|
|
|
|
|
|
/* For swap_buffers_with_damage, we need to pass the region
|
|
|
|
|
* which has changed since the previous SwapBuffers on this
|
|
|
|
|
* surface - this is output_damage. */
|
2023-09-22 09:56:07 +02:00
|
|
|
pixman_region_to_egl(output, output_damage,
|
|
|
|
|
&egl_rects, &n_egl_rects);
|
2021-03-30 13:27:00 +02:00
|
|
|
ret = gr->swap_buffers_with_damage(gr->egl_display,
|
|
|
|
|
go->egl_surface,
|
|
|
|
|
egl_rects, n_egl_rects);
|
|
|
|
|
free(egl_rects);
|
|
|
|
|
} else {
|
|
|
|
|
ret = eglSwapBuffers(gr->egl_display, go->egl_surface);
|
|
|
|
|
}
|
2019-02-08 13:20:51 +00:00
|
|
|
|
2021-03-30 13:27:00 +02:00
|
|
|
if (ret == EGL_FALSE && !errored) {
|
|
|
|
|
errored = 1;
|
|
|
|
|
weston_log("Failed in eglSwapBuffers.\n");
|
|
|
|
|
gl_renderer_print_egl_error_state();
|
|
|
|
|
}
|
2014-02-04 21:36:39 -06:00
|
|
|
} else {
|
2021-03-30 13:27:00 +02:00
|
|
|
glFlush();
|
2012-09-05 21:54:15 -04:00
|
|
|
}
|
|
|
|
|
|
2023-06-07 11:15:09 +02:00
|
|
|
rb->border_damage = BORDER_STATUS_CLEAN;
|
2014-02-04 21:36:38 -06:00
|
|
|
go->border_status = BORDER_STATUS_CLEAN;
|
2017-09-27 15:09:16 +03:00
|
|
|
|
|
|
|
|
/* We have to submit the render sync objects after swap buffers, since
|
|
|
|
|
* the objects get assigned a valid sync file fd only after a gl flush.
|
|
|
|
|
*/
|
2023-01-12 08:04:40 +01:00
|
|
|
timeline_submit_render_sync(gr, output, go->render_sync,
|
|
|
|
|
go->render_query);
|
2018-10-19 12:14:11 +03:00
|
|
|
|
|
|
|
|
update_buffer_release_fences(compositor, output);
|
2021-02-10 12:33:03 +02:00
|
|
|
|
2024-08-07 16:00:17 +02:00
|
|
|
if (rb->type == RENDERBUFFER_BUFFER && rb->buffer.data) {
|
|
|
|
|
uint32_t *pixels = rb->buffer.data;
|
2023-12-28 16:40:30 +08:00
|
|
|
int width = go->fb_size.width;
|
|
|
|
|
int stride = width * (compositor->read_format->bpp >> 3);
|
2024-04-04 14:48:41 -05:00
|
|
|
pixman_box32_t extents;
|
2023-06-06 14:58:57 +02:00
|
|
|
struct weston_geometry rect = {
|
|
|
|
|
.x = go->area.x,
|
|
|
|
|
.width = go->area.width,
|
|
|
|
|
};
|
|
|
|
|
|
2024-08-07 16:00:17 +02:00
|
|
|
/* XXX Needs a bit of rework in order to respect the backend
|
|
|
|
|
* provided stride. */
|
|
|
|
|
assert(rb->buffer.stride == stride);
|
|
|
|
|
|
2024-04-04 14:48:41 -05:00
|
|
|
extents = weston_matrix_transform_rect(&output->matrix,
|
2024-07-31 18:29:35 +02:00
|
|
|
rb->damage.extents);
|
2024-04-04 14:48:41 -05:00
|
|
|
|
2024-05-07 15:45:55 +02:00
|
|
|
if (gr->debug_clear) {
|
2023-09-22 09:56:07 +02:00
|
|
|
rect.y = go->area.y;
|
2023-06-06 14:58:57 +02:00
|
|
|
rect.height = go->area.height;
|
|
|
|
|
} else {
|
2023-09-22 09:56:07 +02:00
|
|
|
rect.y = go->area.y + extents.y1;
|
2024-04-04 14:48:41 -05:00
|
|
|
rect.height = extents.y2 - extents.y1;
|
|
|
|
|
pixels += rect.width * extents.y1;
|
2023-06-06 14:58:57 +02:00
|
|
|
}
|
|
|
|
|
|
2024-10-10 10:14:09 +02:00
|
|
|
if (gr->gl_version >= gl_version(3, 0) && !gr->debug_clear) {
|
2023-12-28 16:40:30 +08:00
|
|
|
glPixelStorei(GL_PACK_ROW_LENGTH, width);
|
2024-04-04 14:48:41 -05:00
|
|
|
rect.width = extents.x2 - extents.x1;
|
|
|
|
|
rect.x += extents.x1;
|
|
|
|
|
pixels += extents.x1;
|
2023-06-06 14:58:57 +02:00
|
|
|
}
|
|
|
|
|
|
2024-07-15 14:09:22 +02:00
|
|
|
gl_renderer_do_read_pixels(gr, go, compositor->read_format,
|
|
|
|
|
pixels, stride, &rect);
|
2023-06-06 14:58:57 +02:00
|
|
|
|
2024-10-10 10:14:09 +02:00
|
|
|
if (gr->gl_version >= gl_version(3, 0))
|
2023-06-06 14:58:57 +02:00
|
|
|
glPixelStorei(GL_PACK_ROW_LENGTH, 0);
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-31 18:29:35 +02:00
|
|
|
pixman_region32_clear(&rb->damage);
|
2023-06-06 14:58:57 +02:00
|
|
|
|
2021-02-10 12:33:03 +02:00
|
|
|
gl_renderer_garbage_collect_programs(gr);
|
2012-09-05 21:54:15 -04:00
|
|
|
}
|
2012-09-05 22:06:26 -04:00
|
|
|
|
2012-11-13 19:10:21 +01:00
|
|
|
static int
|
2012-11-13 19:10:29 +01:00
|
|
|
gl_renderer_read_pixels(struct weston_output *output,
|
2022-08-01 13:31:27 +03:00
|
|
|
const struct pixel_format_info *format, void *pixels,
|
2021-02-09 17:29:43 +02:00
|
|
|
uint32_t x, uint32_t y,
|
|
|
|
|
uint32_t width, uint32_t height)
|
2012-11-13 19:10:21 +01:00
|
|
|
{
|
2014-04-02 19:53:59 -05:00
|
|
|
struct gl_output_state *go = get_output_state(output);
|
|
|
|
|
|
2022-07-29 15:39:58 +03:00
|
|
|
x += go->area.x;
|
|
|
|
|
y += go->fb_size.height - go->area.y - go->area.height;
|
2012-11-13 19:10:21 +01:00
|
|
|
|
2022-08-01 16:02:09 +03:00
|
|
|
if (format->gl_format == 0 || format->gl_type == 0)
|
2012-11-13 19:10:21 +01:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
|
|
if (use_output(output) < 0)
|
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
|
|
glPixelStorei(GL_PACK_ALIGNMENT, 1);
|
2022-08-01 16:02:09 +03:00
|
|
|
glReadPixels(x, y, width, height, format->gl_format,
|
|
|
|
|
format->gl_type, pixels);
|
2024-07-22 18:21:12 +02:00
|
|
|
glPixelStorei(GL_PACK_ALIGNMENT, 4);
|
2012-11-13 19:10:21 +01:00
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-09 17:29:43 +02:00
|
|
|
static GLenum
|
|
|
|
|
gl_format_from_internal(GLenum internal_format)
|
2017-11-29 15:25:34 +01:00
|
|
|
{
|
|
|
|
|
switch (internal_format) {
|
|
|
|
|
case GL_R8_EXT:
|
|
|
|
|
return GL_RED_EXT;
|
|
|
|
|
case GL_RG8_EXT:
|
|
|
|
|
return GL_RG_EXT;
|
2022-01-04 14:32:44 -05:00
|
|
|
case GL_RGBA16_EXT:
|
2021-11-06 23:09:42 -04:00
|
|
|
case GL_RGBA16F:
|
2021-10-20 20:52:11 -04:00
|
|
|
case GL_RGB10_A2:
|
|
|
|
|
return GL_RGBA;
|
2017-11-29 15:25:34 +01:00
|
|
|
default:
|
|
|
|
|
return internal_format;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2012-09-05 22:49:55 -04:00
|
|
|
static void
|
2024-06-11 07:02:32 -05:00
|
|
|
gl_renderer_flush_damage(struct weston_paint_node *pnode)
|
2012-09-05 22:13:58 -04:00
|
|
|
{
|
2024-06-11 07:02:32 -05:00
|
|
|
struct weston_surface *surface = pnode->surface;
|
2020-12-04 15:20:11 +02:00
|
|
|
const struct weston_testsuite_quirks *quirks =
|
|
|
|
|
&surface->compositor->test_data.test_quirks;
|
2024-06-05 14:24:26 -05:00
|
|
|
struct weston_buffer *buffer = surface->buffer_ref.buffer;
|
2012-11-13 19:10:29 +01:00
|
|
|
struct gl_surface_state *gs = get_surface_state(surface);
|
2022-01-20 18:14:07 +00:00
|
|
|
struct gl_buffer_state *gb = gs->buffer;
|
2012-09-05 22:13:58 -04:00
|
|
|
pixman_box32_t *rectangles;
|
2017-03-28 18:17:56 +02:00
|
|
|
uint8_t *data;
|
2016-10-05 14:54:34 +02:00
|
|
|
int i, j, n;
|
2012-09-05 22:13:58 -04:00
|
|
|
|
2022-01-20 18:14:07 +00:00
|
|
|
assert(buffer && gb);
|
2022-01-14 00:05:46 +00:00
|
|
|
|
2022-01-13 23:57:48 +00:00
|
|
|
pixman_region32_union(&gb->texture_damage,
|
|
|
|
|
&gb->texture_damage, &surface->damage);
|
2012-11-07 12:25:13 +02:00
|
|
|
|
2024-06-11 07:02:32 -05:00
|
|
|
if (pnode->plane != &pnode->output->primary_plane)
|
2024-06-05 14:24:26 -05:00
|
|
|
return;
|
|
|
|
|
|
2022-02-02 16:51:45 +00:00
|
|
|
/* This can happen if a SHM wl_buffer gets destroyed before we flush
|
|
|
|
|
* damage, because wayland-server just nukes the wl_shm_buffer from
|
|
|
|
|
* underneath us */
|
|
|
|
|
if (!buffer->shm_buffer)
|
|
|
|
|
return;
|
|
|
|
|
|
2022-01-13 23:57:48 +00:00
|
|
|
if (!pixman_region32_not_empty(&gb->texture_damage) &&
|
|
|
|
|
!gb->needs_full_upload)
|
2012-12-04 15:58:13 +02:00
|
|
|
goto done;
|
2012-11-07 12:25:13 +02:00
|
|
|
|
2016-10-05 14:54:34 +02:00
|
|
|
data = wl_shm_buffer_get_data(buffer->shm_buffer);
|
2012-09-05 22:13:58 -04:00
|
|
|
|
2022-01-13 23:57:48 +00:00
|
|
|
if (gb->needs_full_upload || quirks->gl_force_full_upload) {
|
2013-11-13 15:44:06 +00:00
|
|
|
wl_shm_buffer_begin_access(buffer->shm_buffer);
|
2022-04-27 23:49:46 +01:00
|
|
|
|
2022-01-20 18:49:05 +00:00
|
|
|
for (j = 0; j < gb->num_textures; j++) {
|
2022-04-28 01:01:23 +01:00
|
|
|
int hsub = pixel_format_hsub(buffer->pixel_format, j);
|
|
|
|
|
int vsub = pixel_format_vsub(buffer->pixel_format, j);
|
|
|
|
|
|
2022-01-20 18:49:05 +00:00
|
|
|
glBindTexture(GL_TEXTURE_2D, gb->textures[j]);
|
2017-10-05 15:31:26 +01:00
|
|
|
glPixelStorei(GL_UNPACK_ROW_LENGTH_EXT,
|
2022-04-28 01:01:23 +01:00
|
|
|
gb->pitch / hsub);
|
2016-10-05 14:54:34 +02:00
|
|
|
glTexImage2D(GL_TEXTURE_2D, 0,
|
2022-01-13 23:57:48 +00:00
|
|
|
gb->gl_format[j],
|
2022-04-28 01:01:23 +01:00
|
|
|
buffer->width / hsub,
|
|
|
|
|
buffer->height / vsub,
|
2016-10-05 14:54:34 +02:00
|
|
|
0,
|
2022-01-13 23:57:48 +00:00
|
|
|
gl_format_from_internal(gb->gl_format[j]),
|
|
|
|
|
gb->gl_pixel_type,
|
|
|
|
|
data + gb->offset[j]);
|
2016-10-05 14:54:34 +02:00
|
|
|
}
|
2013-11-13 15:44:06 +00:00
|
|
|
wl_shm_buffer_end_access(buffer->shm_buffer);
|
2013-06-07 16:52:45 +03:00
|
|
|
goto done;
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-13 23:57:48 +00:00
|
|
|
rectangles = pixman_region32_rectangles(&gb->texture_damage, &n);
|
2013-11-13 15:44:06 +00:00
|
|
|
wl_shm_buffer_begin_access(buffer->shm_buffer);
|
2012-09-05 22:13:58 -04:00
|
|
|
for (i = 0; i < n; i++) {
|
2012-11-28 17:10:26 +02:00
|
|
|
pixman_box32_t r;
|
|
|
|
|
|
|
|
|
|
r = weston_surface_to_buffer_rect(surface, rectangles[i]);
|
|
|
|
|
|
2022-01-20 18:49:05 +00:00
|
|
|
for (j = 0; j < gb->num_textures; j++) {
|
2022-04-28 01:01:23 +01:00
|
|
|
int hsub = pixel_format_hsub(buffer->pixel_format, j);
|
|
|
|
|
int vsub = pixel_format_vsub(buffer->pixel_format, j);
|
|
|
|
|
|
2022-01-20 18:49:05 +00:00
|
|
|
glBindTexture(GL_TEXTURE_2D, gb->textures[j]);
|
2017-10-05 15:31:26 +01:00
|
|
|
glPixelStorei(GL_UNPACK_ROW_LENGTH_EXT,
|
2022-04-28 01:01:23 +01:00
|
|
|
gb->pitch / hsub);
|
|
|
|
|
glPixelStorei(GL_UNPACK_SKIP_PIXELS_EXT, r.x1 / hsub);
|
|
|
|
|
glPixelStorei(GL_UNPACK_SKIP_ROWS_EXT, r.y1 / vsub);
|
2016-10-05 14:54:34 +02:00
|
|
|
glTexSubImage2D(GL_TEXTURE_2D, 0,
|
2022-04-28 01:01:23 +01:00
|
|
|
r.x1 / hsub,
|
|
|
|
|
r.y1 / vsub,
|
|
|
|
|
(r.x2 - r.x1) / hsub,
|
|
|
|
|
(r.y2 - r.y1) / vsub,
|
2022-01-13 23:57:48 +00:00
|
|
|
gl_format_from_internal(gb->gl_format[j]),
|
|
|
|
|
gb->gl_pixel_type,
|
|
|
|
|
data + gb->offset[j]);
|
2016-10-05 14:54:34 +02:00
|
|
|
}
|
2012-09-05 22:13:58 -04:00
|
|
|
}
|
2013-11-13 15:44:06 +00:00
|
|
|
wl_shm_buffer_end_access(buffer->shm_buffer);
|
2012-11-07 12:25:13 +02:00
|
|
|
|
|
|
|
|
done:
|
2024-07-22 18:44:02 +02:00
|
|
|
glPixelStorei(GL_UNPACK_ROW_LENGTH_EXT, 0);
|
|
|
|
|
glPixelStorei(GL_UNPACK_SKIP_PIXELS_EXT, 0);
|
|
|
|
|
glPixelStorei(GL_UNPACK_SKIP_ROWS_EXT, 0);
|
|
|
|
|
|
2022-01-13 23:57:48 +00:00
|
|
|
pixman_region32_fini(&gb->texture_damage);
|
|
|
|
|
pixman_region32_init(&gb->texture_damage);
|
|
|
|
|
gb->needs_full_upload = false;
|
2012-12-04 15:58:13 +02:00
|
|
|
|
2022-01-15 03:12:53 +00:00
|
|
|
weston_buffer_reference(&gs->buffer_ref, buffer,
|
|
|
|
|
BUFFER_WILL_NOT_BE_ACCESSED);
|
2018-10-19 12:14:11 +03:00
|
|
|
weston_buffer_release_reference(&gs->buffer_release_ref, NULL);
|
2012-09-05 22:13:58 -04:00
|
|
|
}
|
|
|
|
|
|
2022-01-20 16:05:03 +00:00
|
|
|
static void
|
|
|
|
|
destroy_buffer_state(struct gl_buffer_state *gb)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
2022-01-20 18:49:05 +00:00
|
|
|
glDeleteTextures(gb->num_textures, gb->textures);
|
|
|
|
|
|
2022-01-20 16:05:03 +00:00
|
|
|
for (i = 0; i < gb->num_images; i++)
|
2022-01-20 18:23:26 +00:00
|
|
|
gb->gr->destroy_image(gb->gr->egl_display, gb->images[i]);
|
2022-01-20 16:05:03 +00:00
|
|
|
|
2022-01-20 18:14:07 +00:00
|
|
|
pixman_region32_fini(&gb->texture_damage);
|
2022-01-20 16:05:03 +00:00
|
|
|
wl_list_remove(&gb->destroy_listener.link);
|
|
|
|
|
|
|
|
|
|
free(gb);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
handle_buffer_destroy(struct wl_listener *listener, void *data)
|
|
|
|
|
{
|
|
|
|
|
struct weston_buffer *buffer = data;
|
|
|
|
|
struct gl_buffer_state *gb =
|
|
|
|
|
container_of(listener, struct gl_buffer_state, destroy_listener);
|
|
|
|
|
|
|
|
|
|
assert(gb == buffer->renderer_private);
|
|
|
|
|
buffer->renderer_private = NULL;
|
|
|
|
|
|
|
|
|
|
destroy_buffer_state(gb);
|
|
|
|
|
}
|
|
|
|
|
|
2012-09-05 22:38:18 -04:00
|
|
|
static void
|
2022-01-20 18:49:05 +00:00
|
|
|
ensure_textures(struct gl_buffer_state *gb, GLenum target, int num_textures)
|
2012-09-05 22:38:18 -04:00
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
|
2022-01-20 18:49:05 +00:00
|
|
|
assert(gb->num_textures == 0);
|
2012-09-05 22:38:18 -04:00
|
|
|
|
2022-01-20 18:49:05 +00:00
|
|
|
for (i = 0; i < num_textures; i++) {
|
|
|
|
|
glGenTextures(1, &gb->textures[i]);
|
|
|
|
|
glBindTexture(target, gb->textures[i]);
|
2021-03-25 14:49:57 +02:00
|
|
|
glTexParameteri(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
|
|
|
|
|
glTexParameteri(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
|
2012-09-05 22:38:18 -04:00
|
|
|
}
|
2022-01-20 18:49:05 +00:00
|
|
|
gb->num_textures = num_textures;
|
2021-03-25 14:49:57 +02:00
|
|
|
glBindTexture(target, 0);
|
2012-09-05 22:38:18 -04:00
|
|
|
}
|
|
|
|
|
|
2024-06-05 13:37:35 -05:00
|
|
|
static void
|
2022-01-19 02:38:16 +00:00
|
|
|
gl_renderer_attach_shm(struct weston_surface *es, struct weston_buffer *buffer)
|
2013-06-07 16:52:46 +03:00
|
|
|
{
|
|
|
|
|
struct weston_compositor *ec = es->compositor;
|
|
|
|
|
struct gl_renderer *gr = get_renderer(ec);
|
|
|
|
|
struct gl_surface_state *gs = get_surface_state(es);
|
2022-01-19 17:56:26 +00:00
|
|
|
struct gl_buffer_state *gb;
|
2022-01-19 02:54:06 +00:00
|
|
|
struct weston_buffer *old_buffer = gs->buffer_ref.buffer;
|
2016-10-05 14:54:35 +02:00
|
|
|
GLenum gl_format[3] = {0, 0, 0};
|
|
|
|
|
GLenum gl_pixel_type;
|
2022-01-19 17:56:26 +00:00
|
|
|
enum gl_shader_texture_variant shader_variant;
|
2013-08-06 20:05:55 +02:00
|
|
|
int pitch;
|
2022-01-19 17:56:26 +00:00
|
|
|
int offset[3] = { 0, 0, 0 };
|
2022-04-28 02:25:20 +01:00
|
|
|
unsigned int num_planes;
|
2022-04-28 15:15:04 +01:00
|
|
|
unsigned int i;
|
2024-10-10 10:14:09 +02:00
|
|
|
bool using_glesv2 = gr->gl_version < gl_version(3, 0);
|
2022-04-28 02:25:20 +01:00
|
|
|
const struct yuv_format_descriptor *yuv = NULL;
|
|
|
|
|
|
|
|
|
|
/* When sampling YUV input textures and converting to RGB by hand, we
|
|
|
|
|
* have to bind to each plane separately, with a different format. For
|
|
|
|
|
* example, YUYV will have a single wl_shm input plane, but be bound as
|
|
|
|
|
* two planes within gl-renderer, one as GR88 and one as ARGB8888.
|
|
|
|
|
*
|
|
|
|
|
* The yuv_formats array gives us this translation.
|
|
|
|
|
*/
|
|
|
|
|
for (i = 0; i < ARRAY_LENGTH(yuv_formats); ++i) {
|
|
|
|
|
if (yuv_formats[i].format == buffer->pixel_format->format) {
|
|
|
|
|
yuv = &yuv_formats[i];
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2013-06-07 16:52:46 +03:00
|
|
|
|
2022-04-28 02:25:20 +01:00
|
|
|
if (yuv) {
|
|
|
|
|
unsigned int out;
|
|
|
|
|
unsigned int shm_plane_count;
|
|
|
|
|
int shm_offset[3] = { 0 };
|
|
|
|
|
int bpp = buffer->pixel_format->bpp;
|
|
|
|
|
|
|
|
|
|
/* XXX: Pitch here is given in pixel units, whereas offset is
|
|
|
|
|
* given in byte units. This is fragile and will break with
|
|
|
|
|
* new formats.
|
2021-02-08 16:29:51 +02:00
|
|
|
*/
|
2022-04-28 02:25:20 +01:00
|
|
|
if (!bpp)
|
|
|
|
|
bpp = pixel_format_get_info(yuv->plane[0].format)->bpp;
|
2024-07-03 13:20:50 -05:00
|
|
|
pitch = buffer->stride / (bpp / 8);
|
2022-04-28 02:25:20 +01:00
|
|
|
|
|
|
|
|
/* well, they all are so far ... */
|
2021-02-08 16:29:51 +02:00
|
|
|
gl_pixel_type = GL_UNSIGNED_BYTE;
|
2022-04-28 02:25:20 +01:00
|
|
|
shader_variant = yuv->shader_variant;
|
|
|
|
|
|
|
|
|
|
/* pre-compute all plane offsets in shm buffer */
|
|
|
|
|
shm_plane_count = pixel_format_get_plane_count(buffer->pixel_format);
|
|
|
|
|
assert(shm_plane_count <= ARRAY_LENGTH(shm_offset));
|
|
|
|
|
for (i = 1; i < shm_plane_count; i++) {
|
|
|
|
|
int hsub, vsub;
|
|
|
|
|
|
|
|
|
|
hsub = pixel_format_hsub(buffer->pixel_format, i - 1);
|
|
|
|
|
vsub = pixel_format_vsub(buffer->pixel_format, i - 1);
|
|
|
|
|
shm_offset[i] = shm_offset[i - 1] +
|
|
|
|
|
((pitch / hsub) * (buffer->height / vsub));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
num_planes = yuv->output_planes;
|
|
|
|
|
for (out = 0; out < num_planes; out++) {
|
|
|
|
|
const struct pixel_format_info *sub_info =
|
|
|
|
|
pixel_format_get_info(yuv->plane[out].format);
|
|
|
|
|
|
|
|
|
|
assert(sub_info);
|
|
|
|
|
assert(yuv->plane[out].plane_index < (int) shm_plane_count);
|
|
|
|
|
|
|
|
|
|
gl_format[out] = sub_info->gl_format;
|
|
|
|
|
offset[out] = shm_offset[yuv->plane[out].plane_index];
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
int bpp = buffer->pixel_format->bpp;
|
|
|
|
|
|
2022-04-28 00:43:24 +01:00
|
|
|
assert(pixel_format_get_plane_count(buffer->pixel_format) == 1);
|
2022-04-28 02:25:20 +01:00
|
|
|
num_planes = 1;
|
2022-04-28 00:43:24 +01:00
|
|
|
|
|
|
|
|
if (pixel_format_is_opaque(buffer->pixel_format))
|
|
|
|
|
shader_variant = SHADER_VARIANT_RGBX;
|
|
|
|
|
else
|
|
|
|
|
shader_variant = SHADER_VARIANT_RGBA;
|
|
|
|
|
|
|
|
|
|
assert(bpp > 0 && !(bpp & 7));
|
2024-07-03 13:20:50 -05:00
|
|
|
pitch = buffer->stride / (bpp / 8);
|
2022-04-28 00:43:24 +01:00
|
|
|
|
|
|
|
|
gl_format[0] = buffer->pixel_format->gl_format;
|
|
|
|
|
gl_pixel_type = buffer->pixel_format->gl_type;
|
2013-08-06 20:05:55 +02:00
|
|
|
}
|
|
|
|
|
|
2022-04-28 15:15:04 +01:00
|
|
|
for (i = 0; i < ARRAY_LENGTH(gb->gl_format); i++) {
|
|
|
|
|
/* Fall back to GL_RGBA for 10bpc formats on ES2 */
|
|
|
|
|
if (using_glesv2 && gl_format[i] == GL_RGB10_A2) {
|
|
|
|
|
assert(gl_pixel_type == GL_UNSIGNED_INT_2_10_10_10_REV_EXT);
|
|
|
|
|
gl_format[i] = GL_RGBA;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Fall back to old luminance-based formats if we don't have
|
|
|
|
|
* GL_EXT_texture_rg, which requires different sampling for
|
|
|
|
|
* two-component formats. */
|
2024-10-09 18:13:47 +02:00
|
|
|
if (using_glesv2 &&
|
|
|
|
|
!gl_extensions_has(gr, EXTENSION_EXT_TEXTURE_RG) &&
|
|
|
|
|
gl_format[i] == GL_R8_EXT) {
|
2022-04-28 15:15:04 +01:00
|
|
|
assert(gl_pixel_type == GL_UNSIGNED_BYTE);
|
|
|
|
|
assert(shader_variant == SHADER_VARIANT_Y_U_V ||
|
|
|
|
|
shader_variant == SHADER_VARIANT_Y_UV);
|
|
|
|
|
gl_format[i] = GL_LUMINANCE;
|
|
|
|
|
}
|
2024-10-09 18:13:47 +02:00
|
|
|
if (using_glesv2 &&
|
|
|
|
|
!gl_extensions_has(gr, EXTENSION_EXT_TEXTURE_RG) &&
|
|
|
|
|
gl_format[i] == GL_RG8_EXT) {
|
2022-04-28 15:15:04 +01:00
|
|
|
assert(gl_pixel_type == GL_UNSIGNED_BYTE);
|
|
|
|
|
assert(shader_variant == SHADER_VARIANT_Y_UV ||
|
|
|
|
|
shader_variant == SHADER_VARIANT_Y_XUXV);
|
|
|
|
|
shader_variant = SHADER_VARIANT_Y_XUXV;
|
|
|
|
|
gl_format[i] = GL_LUMINANCE_ALPHA;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-20 18:14:07 +00:00
|
|
|
/* If this surface previously had a SHM buffer, its gl_buffer_state will
|
|
|
|
|
* be speculatively retained. Check to see if we can reuse it rather
|
|
|
|
|
* than allocating a new one. */
|
|
|
|
|
assert(!gs->buffer ||
|
|
|
|
|
(old_buffer && old_buffer->type == WESTON_BUFFER_SHM));
|
|
|
|
|
if (gs->buffer &&
|
2022-01-19 03:02:47 +00:00
|
|
|
buffer->width == old_buffer->width &&
|
2022-01-19 02:54:06 +00:00
|
|
|
buffer->height == old_buffer->height &&
|
2022-04-27 01:10:31 +01:00
|
|
|
buffer->pixel_format == old_buffer->pixel_format) {
|
2022-01-20 18:14:07 +00:00
|
|
|
gs->buffer->pitch = pitch;
|
|
|
|
|
memcpy(gs->buffer->offset, offset, sizeof(offset));
|
2024-06-05 13:37:35 -05:00
|
|
|
return;
|
2013-06-07 16:52:46 +03:00
|
|
|
}
|
2022-01-19 02:27:15 +00:00
|
|
|
|
2022-01-20 18:14:07 +00:00
|
|
|
if (gs->buffer)
|
|
|
|
|
destroy_buffer_state(gs->buffer);
|
|
|
|
|
gs->buffer = NULL;
|
|
|
|
|
|
2024-06-05 13:28:32 -05:00
|
|
|
gb = xzalloc(sizeof(*gb));
|
2022-01-20 18:23:26 +00:00
|
|
|
gb->gr = gr;
|
2022-01-20 18:14:07 +00:00
|
|
|
|
|
|
|
|
wl_list_init(&gb->destroy_listener.link);
|
|
|
|
|
pixman_region32_init(&gb->texture_damage);
|
|
|
|
|
|
|
|
|
|
gb->pitch = pitch;
|
|
|
|
|
gb->shader_variant = shader_variant;
|
2022-04-27 00:53:49 +01:00
|
|
|
ARRAY_COPY(gb->offset, offset);
|
|
|
|
|
ARRAY_COPY(gb->gl_format, gl_format);
|
renderer-gl: Support more shm RGB formats
Some applications, e.g. Chromium browser, may provide ABGR format buf.
Tested with gstreamer 1.22.8:
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=RGB' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=BGR' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=ARGB' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=xRGB' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=ABGR' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=xBGR' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=RGBA' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=RGBx' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=BGRA' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=BGRx' ! waylandsink
Signed-off-by: Jeffy Chen <jeffy.chen@rock-chips.com>
2022-07-07 11:09:23 +08:00
|
|
|
gb->gl_channel_order = buffer->pixel_format->gl_channel_order;
|
2022-01-19 02:47:05 +00:00
|
|
|
gb->gl_pixel_type = gl_pixel_type;
|
|
|
|
|
gb->needs_full_upload = true;
|
|
|
|
|
|
2022-01-20 18:14:07 +00:00
|
|
|
gs->buffer = gb;
|
2022-01-19 02:47:05 +00:00
|
|
|
gs->surface = es;
|
|
|
|
|
|
2022-01-20 18:49:05 +00:00
|
|
|
ensure_textures(gb, GL_TEXTURE_2D, num_planes);
|
2013-06-07 16:52:46 +03:00
|
|
|
}
|
|
|
|
|
|
2022-01-14 01:02:21 +00:00
|
|
|
static bool
|
|
|
|
|
gl_renderer_fill_buffer_info(struct weston_compositor *ec,
|
|
|
|
|
struct weston_buffer *buffer)
|
|
|
|
|
{
|
|
|
|
|
struct gl_renderer *gr = get_renderer(ec);
|
2024-10-10 08:46:22 +02:00
|
|
|
struct gl_buffer_state *gb;
|
2022-01-14 01:55:50 +00:00
|
|
|
EGLint format;
|
2023-06-29 12:35:37 +02:00
|
|
|
uint32_t fourcc = DRM_FORMAT_INVALID;
|
2022-01-20 18:49:05 +00:00
|
|
|
GLenum target;
|
2022-01-15 17:36:02 +00:00
|
|
|
EGLint y_inverted;
|
2022-01-14 01:02:21 +00:00
|
|
|
bool ret = true;
|
2022-01-20 16:05:03 +00:00
|
|
|
int i;
|
|
|
|
|
|
2024-10-10 08:46:22 +02:00
|
|
|
/* Ensure that EGL_WL_bind_wayland_display (and EGL_KHR_image_base) is
|
|
|
|
|
* available and that the Wayland display is bound. */
|
|
|
|
|
if (!gr->display_bound)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
gb = zalloc(sizeof(*gb));
|
2022-01-20 16:05:03 +00:00
|
|
|
if (!gb)
|
|
|
|
|
return false;
|
2022-01-14 01:02:21 +00:00
|
|
|
|
2022-01-20 18:23:26 +00:00
|
|
|
gb->gr = gr;
|
2022-01-20 18:14:07 +00:00
|
|
|
pixman_region32_init(&gb->texture_damage);
|
|
|
|
|
|
2022-01-14 01:02:21 +00:00
|
|
|
buffer->legacy_buffer = (struct wl_buffer *)buffer->resource;
|
|
|
|
|
ret &= gr->query_buffer(gr->egl_display, buffer->legacy_buffer,
|
|
|
|
|
EGL_WIDTH, &buffer->width);
|
|
|
|
|
ret &= gr->query_buffer(gr->egl_display, buffer->legacy_buffer,
|
|
|
|
|
EGL_HEIGHT, &buffer->height);
|
2022-01-14 01:55:50 +00:00
|
|
|
ret &= gr->query_buffer(gr->egl_display, buffer->legacy_buffer,
|
|
|
|
|
EGL_TEXTURE_FORMAT, &format);
|
2022-01-20 16:05:03 +00:00
|
|
|
if (!ret) {
|
|
|
|
|
weston_log("eglQueryWaylandBufferWL failed\n");
|
|
|
|
|
gl_renderer_print_egl_error_state();
|
|
|
|
|
goto err_free;
|
|
|
|
|
}
|
2022-01-14 01:55:50 +00:00
|
|
|
|
|
|
|
|
/* The legacy EGL buffer interface only describes the channels we can
|
|
|
|
|
* sample from; not their depths or order. Take a stab at something
|
|
|
|
|
* which might be representative. Pessimise extremely hard for
|
|
|
|
|
* TEXTURE_EXTERNAL_OES. */
|
|
|
|
|
switch (format) {
|
|
|
|
|
case EGL_TEXTURE_RGB:
|
|
|
|
|
fourcc = DRM_FORMAT_XRGB8888;
|
2022-01-20 16:05:03 +00:00
|
|
|
gb->num_images = 1;
|
|
|
|
|
gb->shader_variant = SHADER_VARIANT_RGBA;
|
2022-01-14 01:55:50 +00:00
|
|
|
break;
|
|
|
|
|
case EGL_TEXTURE_RGBA:
|
|
|
|
|
fourcc = DRM_FORMAT_ARGB8888;
|
2022-01-20 16:05:03 +00:00
|
|
|
gb->num_images = 1;
|
|
|
|
|
gb->shader_variant = SHADER_VARIANT_RGBA;
|
|
|
|
|
break;
|
|
|
|
|
case EGL_TEXTURE_EXTERNAL_WL:
|
|
|
|
|
fourcc = DRM_FORMAT_ARGB8888;
|
|
|
|
|
gb->num_images = 1;
|
|
|
|
|
gb->shader_variant = SHADER_VARIANT_EXTERNAL;
|
2022-01-14 01:55:50 +00:00
|
|
|
break;
|
|
|
|
|
case EGL_TEXTURE_Y_XUXV_WL:
|
|
|
|
|
fourcc = DRM_FORMAT_YUYV;
|
2022-01-20 16:05:03 +00:00
|
|
|
gb->num_images = 2;
|
|
|
|
|
gb->shader_variant = SHADER_VARIANT_Y_XUXV;
|
2022-01-14 01:55:50 +00:00
|
|
|
break;
|
|
|
|
|
case EGL_TEXTURE_Y_UV_WL:
|
|
|
|
|
fourcc = DRM_FORMAT_NV12;
|
2022-01-20 16:05:03 +00:00
|
|
|
gb->num_images = 2;
|
|
|
|
|
gb->shader_variant = SHADER_VARIANT_Y_UV;
|
2022-01-14 01:55:50 +00:00
|
|
|
break;
|
|
|
|
|
case EGL_TEXTURE_Y_U_V_WL:
|
|
|
|
|
fourcc = DRM_FORMAT_YUV420;
|
2022-05-19 23:26:47 +01:00
|
|
|
gb->num_images = 3;
|
2022-01-20 16:05:03 +00:00
|
|
|
gb->shader_variant = SHADER_VARIANT_Y_U_V;
|
2022-01-14 01:55:50 +00:00
|
|
|
break;
|
2022-05-12 17:11:17 +02:00
|
|
|
default:
|
|
|
|
|
assert(0 && "not reached");
|
2022-01-14 01:55:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
buffer->pixel_format = pixel_format_get_info(fourcc);
|
|
|
|
|
assert(buffer->pixel_format);
|
|
|
|
|
buffer->format_modifier = DRM_FORMAT_MOD_INVALID;
|
2022-01-14 01:02:21 +00:00
|
|
|
|
|
|
|
|
/* Assume scanout co-ordinate space i.e. (0,0) is top-left
|
|
|
|
|
* if the query fails */
|
2022-01-15 17:36:02 +00:00
|
|
|
ret = gr->query_buffer(gr->egl_display, buffer->legacy_buffer,
|
|
|
|
|
EGL_WAYLAND_Y_INVERTED_WL, &y_inverted);
|
|
|
|
|
if (!ret || y_inverted)
|
|
|
|
|
buffer->buffer_origin = ORIGIN_TOP_LEFT;
|
|
|
|
|
else
|
|
|
|
|
buffer->buffer_origin = ORIGIN_BOTTOM_LEFT;
|
2022-01-14 01:02:21 +00:00
|
|
|
|
2022-01-20 16:05:03 +00:00
|
|
|
for (i = 0; i < gb->num_images; i++) {
|
|
|
|
|
const EGLint attribs[] = {
|
|
|
|
|
EGL_WAYLAND_PLANE_WL, i,
|
|
|
|
|
EGL_IMAGE_PRESERVED_KHR, EGL_TRUE,
|
|
|
|
|
EGL_NONE
|
|
|
|
|
};
|
|
|
|
|
|
2022-01-20 18:23:26 +00:00
|
|
|
gb->images[i] = gr->create_image(gr->egl_display,
|
|
|
|
|
EGL_NO_CONTEXT,
|
|
|
|
|
EGL_WAYLAND_BUFFER_WL,
|
2022-01-20 16:05:03 +00:00
|
|
|
buffer->legacy_buffer,
|
|
|
|
|
attribs);
|
2022-01-20 18:23:26 +00:00
|
|
|
if (gb->images[i] == EGL_NO_IMAGE_KHR) {
|
2022-01-20 16:05:03 +00:00
|
|
|
weston_log("couldn't create EGLImage for plane %d\n", i);
|
|
|
|
|
goto err_img;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-20 18:49:05 +00:00
|
|
|
target = gl_shader_texture_variant_get_target(gb->shader_variant);
|
|
|
|
|
ensure_textures(gb, target, gb->num_images);
|
|
|
|
|
|
2022-01-20 16:05:03 +00:00
|
|
|
buffer->renderer_private = gb;
|
|
|
|
|
gb->destroy_listener.notify = handle_buffer_destroy;
|
|
|
|
|
wl_signal_add(&buffer->destroy_signal, &gb->destroy_listener);
|
2022-01-14 01:55:50 +00:00
|
|
|
return true;
|
2022-01-20 16:05:03 +00:00
|
|
|
|
|
|
|
|
err_img:
|
|
|
|
|
while (--i >= 0)
|
2022-01-20 18:23:26 +00:00
|
|
|
gr->destroy_image(gb->gr->egl_display, gb->images[i]);
|
2022-01-20 16:05:03 +00:00
|
|
|
err_free:
|
|
|
|
|
free(gb);
|
|
|
|
|
return false;
|
2022-01-14 01:02:21 +00:00
|
|
|
}
|
|
|
|
|
|
2014-06-12 16:49:29 +03:00
|
|
|
static void
|
|
|
|
|
gl_renderer_destroy_dmabuf(struct linux_dmabuf_buffer *dmabuf)
|
|
|
|
|
{
|
2022-01-20 17:40:06 +00:00
|
|
|
struct gl_buffer_state *gb =
|
|
|
|
|
linux_dmabuf_buffer_get_user_data(dmabuf);
|
2014-06-12 16:49:29 +03:00
|
|
|
|
2022-01-20 17:40:06 +00:00
|
|
|
linux_dmabuf_buffer_set_user_data(dmabuf, NULL, NULL);
|
|
|
|
|
destroy_buffer_state(gb);
|
2014-06-12 16:49:29 +03:00
|
|
|
}
|
|
|
|
|
|
2022-01-20 18:23:26 +00:00
|
|
|
static EGLImageKHR
|
2016-01-11 19:04:34 +00:00
|
|
|
import_simple_dmabuf(struct gl_renderer *gr,
|
2021-05-12 18:46:21 +02:00
|
|
|
const struct dmabuf_attributes *attributes)
|
2014-06-12 16:49:29 +03:00
|
|
|
{
|
2021-07-28 08:04:33 -05:00
|
|
|
EGLint attribs[52];
|
2014-06-12 16:49:29 +03:00
|
|
|
int atti = 0;
|
2017-10-03 12:58:53 +01:00
|
|
|
bool has_modifier;
|
2014-06-12 16:49:29 +03:00
|
|
|
|
|
|
|
|
/* This requires the Mesa commit in
|
|
|
|
|
* Mesa 10.3 (08264e5dad4df448e7718e782ad9077902089a07) or
|
|
|
|
|
* Mesa 10.2.7 (55d28925e6109a4afd61f109e845a8a51bd17652).
|
|
|
|
|
* Otherwise Mesa closes the fd behind our back and re-importing
|
|
|
|
|
* will fail.
|
|
|
|
|
* https://bugs.freedesktop.org/show_bug.cgi?id=76188
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
attribs[atti++] = EGL_WIDTH;
|
2016-01-11 19:04:34 +00:00
|
|
|
attribs[atti++] = attributes->width;
|
2014-06-12 16:49:29 +03:00
|
|
|
attribs[atti++] = EGL_HEIGHT;
|
2016-01-11 19:04:34 +00:00
|
|
|
attribs[atti++] = attributes->height;
|
2014-06-12 16:49:29 +03:00
|
|
|
attribs[atti++] = EGL_LINUX_DRM_FOURCC_EXT;
|
2016-01-11 19:04:34 +00:00
|
|
|
attribs[atti++] = attributes->format;
|
2021-07-28 08:04:33 -05:00
|
|
|
attribs[atti++] = EGL_IMAGE_PRESERVED_KHR;
|
|
|
|
|
attribs[atti++] = EGL_TRUE;
|
2014-06-12 16:49:29 +03:00
|
|
|
|
2023-12-11 14:27:12 +01:00
|
|
|
if (attributes->modifier != DRM_FORMAT_MOD_INVALID) {
|
2024-10-10 14:02:12 +02:00
|
|
|
if (!egl_display_has(gr, EXTENSION_EXT_IMAGE_DMA_BUF_IMPORT_MODIFIERS))
|
2017-10-03 12:58:53 +01:00
|
|
|
return NULL;
|
|
|
|
|
has_modifier = true;
|
|
|
|
|
} else {
|
|
|
|
|
has_modifier = false;
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-11 19:04:34 +00:00
|
|
|
if (attributes->n_planes > 0) {
|
2014-06-12 16:49:29 +03:00
|
|
|
attribs[atti++] = EGL_DMA_BUF_PLANE0_FD_EXT;
|
2016-01-11 19:04:34 +00:00
|
|
|
attribs[atti++] = attributes->fd[0];
|
2014-06-12 16:49:29 +03:00
|
|
|
attribs[atti++] = EGL_DMA_BUF_PLANE0_OFFSET_EXT;
|
2016-01-11 19:04:34 +00:00
|
|
|
attribs[atti++] = attributes->offset[0];
|
2014-06-12 16:49:29 +03:00
|
|
|
attribs[atti++] = EGL_DMA_BUF_PLANE0_PITCH_EXT;
|
2016-01-11 19:04:34 +00:00
|
|
|
attribs[atti++] = attributes->stride[0];
|
2017-10-03 12:58:53 +01:00
|
|
|
if (has_modifier) {
|
2016-11-23 14:03:18 +05:30
|
|
|
attribs[atti++] = EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT;
|
2023-12-11 14:27:12 +01:00
|
|
|
attribs[atti++] = attributes->modifier & 0xFFFFFFFF;
|
2016-11-23 14:03:18 +05:30
|
|
|
attribs[atti++] = EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT;
|
2023-12-11 14:27:12 +01:00
|
|
|
attribs[atti++] = attributes->modifier >> 32;
|
2016-11-23 14:03:18 +05:30
|
|
|
}
|
2014-06-12 16:49:29 +03:00
|
|
|
}
|
|
|
|
|
|
2016-01-11 19:04:34 +00:00
|
|
|
if (attributes->n_planes > 1) {
|
2014-06-12 16:49:29 +03:00
|
|
|
attribs[atti++] = EGL_DMA_BUF_PLANE1_FD_EXT;
|
2016-01-11 19:04:34 +00:00
|
|
|
attribs[atti++] = attributes->fd[1];
|
2014-06-12 16:49:29 +03:00
|
|
|
attribs[atti++] = EGL_DMA_BUF_PLANE1_OFFSET_EXT;
|
2016-01-11 19:04:34 +00:00
|
|
|
attribs[atti++] = attributes->offset[1];
|
2014-06-12 16:49:29 +03:00
|
|
|
attribs[atti++] = EGL_DMA_BUF_PLANE1_PITCH_EXT;
|
2016-01-11 19:04:34 +00:00
|
|
|
attribs[atti++] = attributes->stride[1];
|
2017-10-03 12:58:53 +01:00
|
|
|
if (has_modifier) {
|
2016-11-23 14:03:18 +05:30
|
|
|
attribs[atti++] = EGL_DMA_BUF_PLANE1_MODIFIER_LO_EXT;
|
2023-12-11 14:27:12 +01:00
|
|
|
attribs[atti++] = attributes->modifier & 0xFFFFFFFF;
|
2016-11-23 14:03:18 +05:30
|
|
|
attribs[atti++] = EGL_DMA_BUF_PLANE1_MODIFIER_HI_EXT;
|
2023-12-11 14:27:12 +01:00
|
|
|
attribs[atti++] = attributes->modifier >> 32;
|
2016-11-23 14:03:18 +05:30
|
|
|
}
|
2014-06-12 16:49:29 +03:00
|
|
|
}
|
|
|
|
|
|
2016-01-11 19:04:34 +00:00
|
|
|
if (attributes->n_planes > 2) {
|
2014-06-12 16:49:29 +03:00
|
|
|
attribs[atti++] = EGL_DMA_BUF_PLANE2_FD_EXT;
|
2016-01-11 19:04:34 +00:00
|
|
|
attribs[atti++] = attributes->fd[2];
|
2014-06-12 16:49:29 +03:00
|
|
|
attribs[atti++] = EGL_DMA_BUF_PLANE2_OFFSET_EXT;
|
2016-01-11 19:04:34 +00:00
|
|
|
attribs[atti++] = attributes->offset[2];
|
2014-06-12 16:49:29 +03:00
|
|
|
attribs[atti++] = EGL_DMA_BUF_PLANE2_PITCH_EXT;
|
2016-01-11 19:04:34 +00:00
|
|
|
attribs[atti++] = attributes->stride[2];
|
2017-10-03 12:58:53 +01:00
|
|
|
if (has_modifier) {
|
2016-11-23 14:03:18 +05:30
|
|
|
attribs[atti++] = EGL_DMA_BUF_PLANE2_MODIFIER_LO_EXT;
|
2023-12-11 14:27:12 +01:00
|
|
|
attribs[atti++] = attributes->modifier & 0xFFFFFFFF;
|
2016-11-23 14:03:18 +05:30
|
|
|
attribs[atti++] = EGL_DMA_BUF_PLANE2_MODIFIER_HI_EXT;
|
2023-12-11 14:27:12 +01:00
|
|
|
attribs[atti++] = attributes->modifier >> 32;
|
2016-11-23 14:03:18 +05:30
|
|
|
}
|
2014-06-12 16:49:29 +03:00
|
|
|
}
|
|
|
|
|
|
2024-10-10 14:02:12 +02:00
|
|
|
if (egl_display_has(gr, EXTENSION_EXT_IMAGE_DMA_BUF_IMPORT_MODIFIERS)) {
|
2016-11-23 14:03:19 +05:30
|
|
|
if (attributes->n_planes > 3) {
|
|
|
|
|
attribs[atti++] = EGL_DMA_BUF_PLANE3_FD_EXT;
|
|
|
|
|
attribs[atti++] = attributes->fd[3];
|
|
|
|
|
attribs[atti++] = EGL_DMA_BUF_PLANE3_OFFSET_EXT;
|
|
|
|
|
attribs[atti++] = attributes->offset[3];
|
|
|
|
|
attribs[atti++] = EGL_DMA_BUF_PLANE3_PITCH_EXT;
|
|
|
|
|
attribs[atti++] = attributes->stride[3];
|
|
|
|
|
attribs[atti++] = EGL_DMA_BUF_PLANE3_MODIFIER_LO_EXT;
|
2023-12-11 14:27:12 +01:00
|
|
|
attribs[atti++] = attributes->modifier & 0xFFFFFFFF;
|
2016-11-23 14:03:19 +05:30
|
|
|
attribs[atti++] = EGL_DMA_BUF_PLANE3_MODIFIER_HI_EXT;
|
2023-12-11 14:27:12 +01:00
|
|
|
attribs[atti++] = attributes->modifier >> 32;
|
2016-11-23 14:03:19 +05:30
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-12 16:49:29 +03:00
|
|
|
attribs[atti++] = EGL_NONE;
|
|
|
|
|
|
2022-01-20 18:23:26 +00:00
|
|
|
return gr->create_image(gr->egl_display, EGL_NO_CONTEXT,
|
|
|
|
|
EGL_LINUX_DMA_BUF_EXT, NULL, attribs);
|
2016-01-11 19:04:34 +00:00
|
|
|
}
|
|
|
|
|
|
2022-01-20 18:23:26 +00:00
|
|
|
static EGLImageKHR
|
2016-01-11 19:04:35 +00:00
|
|
|
import_dmabuf_single_plane(struct gl_renderer *gr,
|
2022-04-28 01:53:05 +01:00
|
|
|
const struct pixel_format_info *info,
|
|
|
|
|
int idx,
|
2016-01-11 19:04:35 +00:00
|
|
|
const struct dmabuf_attributes *attributes,
|
|
|
|
|
struct yuv_plane_descriptor *descriptor)
|
|
|
|
|
{
|
|
|
|
|
struct dmabuf_attributes plane;
|
2022-01-20 18:23:26 +00:00
|
|
|
EGLImageKHR image;
|
2016-01-11 19:04:35 +00:00
|
|
|
char fmt[4];
|
2022-04-28 01:53:05 +01:00
|
|
|
int hsub = pixel_format_hsub(info, idx);
|
|
|
|
|
int vsub = pixel_format_vsub(info, idx);
|
2016-01-11 19:04:35 +00:00
|
|
|
|
2022-04-28 01:53:05 +01:00
|
|
|
plane.width = attributes->width / hsub;
|
|
|
|
|
plane.height = attributes->height / vsub;
|
2016-01-11 19:04:35 +00:00
|
|
|
plane.format = descriptor->format;
|
|
|
|
|
plane.n_planes = 1;
|
|
|
|
|
plane.fd[0] = attributes->fd[descriptor->plane_index];
|
|
|
|
|
plane.offset[0] = attributes->offset[descriptor->plane_index];
|
|
|
|
|
plane.stride[0] = attributes->stride[descriptor->plane_index];
|
2023-12-11 14:27:12 +01:00
|
|
|
plane.modifier = attributes->modifier;
|
2016-01-11 19:04:35 +00:00
|
|
|
|
|
|
|
|
image = import_simple_dmabuf(gr, &plane);
|
2022-01-20 18:23:26 +00:00
|
|
|
if (image == EGL_NO_IMAGE_KHR) {
|
2016-01-11 19:04:35 +00:00
|
|
|
weston_log("Failed to import plane %d as %.4s\n",
|
|
|
|
|
descriptor->plane_index,
|
|
|
|
|
dump_format(descriptor->format, fmt));
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return image;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
2022-01-20 17:40:06 +00:00
|
|
|
import_yuv_dmabuf(struct gl_renderer *gr, struct gl_buffer_state *gb,
|
|
|
|
|
struct dmabuf_attributes *attributes)
|
2016-01-11 19:04:35 +00:00
|
|
|
{
|
|
|
|
|
unsigned i;
|
|
|
|
|
int j;
|
|
|
|
|
struct yuv_format_descriptor *format = NULL;
|
2022-04-28 01:51:10 +01:00
|
|
|
const struct pixel_format_info *info;
|
|
|
|
|
int plane_count;
|
2022-01-20 18:49:05 +00:00
|
|
|
GLenum target;
|
2016-01-11 19:04:35 +00:00
|
|
|
char fmt[4];
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_LENGTH(yuv_formats); ++i) {
|
|
|
|
|
if (yuv_formats[i].format == attributes->format) {
|
|
|
|
|
format = &yuv_formats[i];
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!format) {
|
|
|
|
|
weston_log("Error during import, and no known conversion for format "
|
2017-06-26 14:42:44 -05:00
|
|
|
"%.4s in the renderer\n",
|
2016-01-11 19:04:35 +00:00
|
|
|
dump_format(attributes->format, fmt));
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2022-04-28 01:51:10 +01:00
|
|
|
info = pixel_format_get_info(attributes->format);
|
|
|
|
|
assert(info);
|
|
|
|
|
plane_count = pixel_format_get_plane_count(info);
|
|
|
|
|
|
|
|
|
|
if (attributes->n_planes != plane_count) {
|
2017-06-26 14:42:44 -05:00
|
|
|
weston_log("%.4s dmabuf must contain %d plane%s (%d provided)\n",
|
2016-01-11 19:04:35 +00:00
|
|
|
dump_format(format->format, fmt),
|
2022-04-28 01:51:10 +01:00
|
|
|
plane_count,
|
|
|
|
|
(plane_count > 1) ? "s" : "",
|
2016-01-11 19:04:35 +00:00
|
|
|
attributes->n_planes);
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (j = 0; j < format->output_planes; ++j) {
|
2022-04-28 01:53:05 +01:00
|
|
|
gb->images[j] = import_dmabuf_single_plane(gr, info, j, attributes,
|
2022-01-20 17:40:06 +00:00
|
|
|
&format->plane[j]);
|
2022-01-20 18:23:26 +00:00
|
|
|
if (gb->images[j] == EGL_NO_IMAGE_KHR) {
|
2022-01-20 17:40:06 +00:00
|
|
|
while (--j >= 0) {
|
2022-01-20 18:23:26 +00:00
|
|
|
gr->destroy_image(gb->gr->egl_display,
|
|
|
|
|
gb->images[j]);
|
2022-01-20 17:40:06 +00:00
|
|
|
gb->images[j] = NULL;
|
2016-01-11 19:04:35 +00:00
|
|
|
}
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-20 17:40:06 +00:00
|
|
|
gb->num_images = format->output_planes;
|
2022-04-28 01:33:15 +01:00
|
|
|
gb->shader_variant = format->shader_variant;
|
2016-01-11 19:04:35 +00:00
|
|
|
|
2022-01-20 18:49:05 +00:00
|
|
|
target = gl_shader_texture_variant_get_target(gb->shader_variant);
|
|
|
|
|
ensure_textures(gb, target, gb->num_images);
|
|
|
|
|
|
2016-01-11 19:04:35 +00:00
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-27 14:11:05 +01:00
|
|
|
static void
|
|
|
|
|
gl_renderer_query_dmabuf_modifiers_full(struct gl_renderer *gr, int format,
|
|
|
|
|
uint64_t **modifiers,
|
|
|
|
|
unsigned **external_only,
|
|
|
|
|
int *num_modifiers);
|
|
|
|
|
|
|
|
|
|
static struct dmabuf_format*
|
|
|
|
|
dmabuf_format_create(struct gl_renderer *gr, uint32_t format)
|
|
|
|
|
{
|
|
|
|
|
struct dmabuf_format *dmabuf_format;
|
|
|
|
|
|
|
|
|
|
dmabuf_format = calloc(1, sizeof(struct dmabuf_format));
|
|
|
|
|
if (!dmabuf_format)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
dmabuf_format->format = format;
|
|
|
|
|
|
|
|
|
|
gl_renderer_query_dmabuf_modifiers_full(gr, format,
|
|
|
|
|
&dmabuf_format->modifiers,
|
|
|
|
|
&dmabuf_format->external_only,
|
|
|
|
|
&dmabuf_format->num_modifiers);
|
|
|
|
|
|
|
|
|
|
if (dmabuf_format->num_modifiers == 0) {
|
|
|
|
|
free(dmabuf_format);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
wl_list_insert(&gr->dmabuf_formats, &dmabuf_format->link);
|
|
|
|
|
return dmabuf_format;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
dmabuf_format_destroy(struct dmabuf_format *format)
|
|
|
|
|
{
|
|
|
|
|
free(format->modifiers);
|
|
|
|
|
free(format->external_only);
|
|
|
|
|
wl_list_remove(&format->link);
|
|
|
|
|
free(format);
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-11 19:04:35 +00:00
|
|
|
static GLenum
|
2019-11-27 14:11:05 +01:00
|
|
|
choose_texture_target(struct gl_renderer *gr,
|
|
|
|
|
struct dmabuf_attributes *attributes)
|
2016-01-11 19:04:35 +00:00
|
|
|
{
|
2019-11-27 14:11:05 +01:00
|
|
|
struct dmabuf_format *tmp, *format = NULL;
|
|
|
|
|
|
|
|
|
|
wl_list_for_each(tmp, &gr->dmabuf_formats, link) {
|
|
|
|
|
if (tmp->format == attributes->format) {
|
|
|
|
|
format = tmp;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!format)
|
|
|
|
|
format = dmabuf_format_create(gr, attributes->format);
|
|
|
|
|
|
|
|
|
|
if (format) {
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < format->num_modifiers; ++i) {
|
2023-12-11 14:27:12 +01:00
|
|
|
if (format->modifiers[i] == attributes->modifier) {
|
2022-04-28 02:49:33 +01:00
|
|
|
if (format->external_only[i])
|
2019-11-27 14:11:05 +01:00
|
|
|
return GL_TEXTURE_EXTERNAL_OES;
|
|
|
|
|
else
|
|
|
|
|
return GL_TEXTURE_2D;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-11 19:04:35 +00:00
|
|
|
switch (attributes->format & ~DRM_FORMAT_BIG_ENDIAN) {
|
|
|
|
|
case DRM_FORMAT_YUYV:
|
|
|
|
|
case DRM_FORMAT_YVYU:
|
|
|
|
|
case DRM_FORMAT_UYVY:
|
|
|
|
|
case DRM_FORMAT_VYUY:
|
|
|
|
|
case DRM_FORMAT_AYUV:
|
2019-02-20 17:28:48 -08:00
|
|
|
case DRM_FORMAT_XYUV8888:
|
2016-01-11 19:04:35 +00:00
|
|
|
return GL_TEXTURE_EXTERNAL_OES;
|
|
|
|
|
default:
|
|
|
|
|
return GL_TEXTURE_2D;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-20 17:40:06 +00:00
|
|
|
static struct gl_buffer_state *
|
2016-01-11 19:04:34 +00:00
|
|
|
import_dmabuf(struct gl_renderer *gr,
|
|
|
|
|
struct linux_dmabuf_buffer *dmabuf)
|
|
|
|
|
{
|
2022-01-20 18:23:26 +00:00
|
|
|
EGLImageKHR egl_image;
|
2022-05-31 13:08:01 +03:00
|
|
|
struct gl_buffer_state *gb;
|
2014-06-12 16:49:29 +03:00
|
|
|
|
2022-01-21 15:19:05 +00:00
|
|
|
if (!pixel_format_get_info(dmabuf->attributes.format))
|
|
|
|
|
return NULL;
|
|
|
|
|
|
2022-01-20 17:40:06 +00:00
|
|
|
gb = zalloc(sizeof(*gb));
|
|
|
|
|
if (!gb)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
2022-01-20 18:23:26 +00:00
|
|
|
gb->gr = gr;
|
2022-01-20 18:14:07 +00:00
|
|
|
pixman_region32_init(&gb->texture_damage);
|
2022-01-20 17:40:06 +00:00
|
|
|
wl_list_init(&gb->destroy_listener.link);
|
2016-01-11 19:04:35 +00:00
|
|
|
|
|
|
|
|
egl_image = import_simple_dmabuf(gr, &dmabuf->attributes);
|
2022-01-20 18:23:26 +00:00
|
|
|
if (egl_image != EGL_NO_IMAGE_KHR) {
|
2022-01-20 17:40:06 +00:00
|
|
|
GLenum target = choose_texture_target(gr, &dmabuf->attributes);
|
|
|
|
|
|
|
|
|
|
gb->num_images = 1;
|
|
|
|
|
gb->images[0] = egl_image;
|
2016-01-11 19:04:35 +00:00
|
|
|
|
2021-03-25 14:49:57 +02:00
|
|
|
switch (target) {
|
2016-01-11 19:04:35 +00:00
|
|
|
case GL_TEXTURE_2D:
|
2022-01-20 17:40:06 +00:00
|
|
|
gb->shader_variant = SHADER_VARIANT_RGBA;
|
2016-01-11 19:04:35 +00:00
|
|
|
break;
|
|
|
|
|
default:
|
2022-01-20 17:40:06 +00:00
|
|
|
gb->shader_variant = SHADER_VARIANT_EXTERNAL;
|
2016-01-11 19:04:35 +00:00
|
|
|
}
|
2022-01-20 17:40:06 +00:00
|
|
|
|
2022-01-20 18:49:05 +00:00
|
|
|
ensure_textures(gb, target, gb->num_images);
|
|
|
|
|
|
2022-01-20 17:40:06 +00:00
|
|
|
return gb;
|
2016-01-11 19:04:35 +00:00
|
|
|
}
|
2014-06-12 16:49:29 +03:00
|
|
|
|
2022-01-20 17:40:06 +00:00
|
|
|
if (!import_yuv_dmabuf(gr, gb, &dmabuf->attributes)) {
|
|
|
|
|
destroy_buffer_state(gb);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return gb;
|
2014-06-12 16:49:29 +03:00
|
|
|
}
|
|
|
|
|
|
2017-12-05 14:26:16 +00:00
|
|
|
static void
|
2016-11-23 14:03:20 +05:30
|
|
|
gl_renderer_query_dmabuf_formats(struct weston_compositor *wc,
|
|
|
|
|
int **formats, int *num_formats)
|
|
|
|
|
{
|
|
|
|
|
struct gl_renderer *gr = get_renderer(wc);
|
2018-01-17 17:54:32 +01:00
|
|
|
static const int fallback_formats[] = {
|
|
|
|
|
DRM_FORMAT_ARGB8888,
|
|
|
|
|
DRM_FORMAT_XRGB8888,
|
|
|
|
|
DRM_FORMAT_YUYV,
|
|
|
|
|
DRM_FORMAT_NV12,
|
|
|
|
|
DRM_FORMAT_YUV420,
|
|
|
|
|
DRM_FORMAT_YUV444,
|
2019-02-20 17:28:48 -08:00
|
|
|
DRM_FORMAT_XYUV8888,
|
2018-01-17 17:54:32 +01:00
|
|
|
};
|
|
|
|
|
bool fallback = false;
|
2016-11-23 14:03:20 +05:30
|
|
|
EGLint num;
|
|
|
|
|
|
2024-10-10 14:02:12 +02:00
|
|
|
assert(egl_display_has(gr, EXTENSION_EXT_IMAGE_DMA_BUF_IMPORT));
|
2016-11-23 14:03:20 +05:30
|
|
|
|
2024-10-10 14:02:12 +02:00
|
|
|
if (!egl_display_has(gr, EXTENSION_EXT_IMAGE_DMA_BUF_IMPORT_MODIFIERS) ||
|
2016-11-23 14:03:20 +05:30
|
|
|
!gr->query_dmabuf_formats(gr->egl_display, 0, NULL, &num)) {
|
2024-10-09 18:13:47 +02:00
|
|
|
if (gr->gl_version >= gl_version(3, 0) ||
|
|
|
|
|
gl_extensions_has(gr, EXTENSION_EXT_TEXTURE_RG))
|
|
|
|
|
num = ARRAY_LENGTH(fallback_formats);
|
|
|
|
|
else
|
|
|
|
|
num = 2;
|
2018-01-17 17:54:32 +01:00
|
|
|
fallback = true;
|
2016-11-23 14:03:20 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*formats = calloc(num, sizeof(int));
|
|
|
|
|
if (*formats == NULL) {
|
|
|
|
|
*num_formats = 0;
|
2017-12-05 14:26:16 +00:00
|
|
|
return;
|
2016-11-23 14:03:20 +05:30
|
|
|
}
|
2018-01-17 17:54:32 +01:00
|
|
|
|
|
|
|
|
if (fallback) {
|
2018-02-09 21:59:17 +01:00
|
|
|
memcpy(*formats, fallback_formats, num * sizeof(int));
|
2018-01-17 17:54:32 +01:00
|
|
|
*num_formats = num;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2017-07-31 20:45:20 +01:00
|
|
|
if (!gr->query_dmabuf_formats(gr->egl_display, num, *formats, &num)) {
|
2016-11-23 14:03:20 +05:30
|
|
|
*num_formats = 0;
|
|
|
|
|
free(*formats);
|
2017-12-05 14:26:16 +00:00
|
|
|
return;
|
2016-11-23 14:03:20 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*num_formats = num;
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-05 14:26:16 +00:00
|
|
|
static void
|
2019-11-27 14:11:05 +01:00
|
|
|
gl_renderer_query_dmabuf_modifiers_full(struct gl_renderer *gr, int format,
|
2016-11-23 14:03:20 +05:30
|
|
|
uint64_t **modifiers,
|
2019-11-27 14:11:05 +01:00
|
|
|
unsigned **external_only,
|
2016-11-23 14:03:20 +05:30
|
|
|
int *num_modifiers)
|
|
|
|
|
{
|
|
|
|
|
int num;
|
|
|
|
|
|
2024-10-10 14:02:12 +02:00
|
|
|
assert(egl_display_has(gr, EXTENSION_EXT_IMAGE_DMA_BUF_IMPORT));
|
2016-11-23 14:03:20 +05:30
|
|
|
|
2024-10-10 14:02:12 +02:00
|
|
|
if (!egl_display_has(gr, EXTENSION_EXT_IMAGE_DMA_BUF_IMPORT_MODIFIERS) ||
|
2016-11-23 14:03:20 +05:30
|
|
|
!gr->query_dmabuf_modifiers(gr->egl_display, format, 0, NULL,
|
2019-09-05 12:53:42 -04:00
|
|
|
NULL, &num) ||
|
|
|
|
|
num == 0) {
|
2016-11-23 14:03:20 +05:30
|
|
|
*num_modifiers = 0;
|
2017-12-05 14:26:16 +00:00
|
|
|
return;
|
2016-11-23 14:03:20 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*modifiers = calloc(num, sizeof(uint64_t));
|
|
|
|
|
if (*modifiers == NULL) {
|
|
|
|
|
*num_modifiers = 0;
|
2017-12-05 14:26:16 +00:00
|
|
|
return;
|
2016-11-23 14:03:20 +05:30
|
|
|
}
|
2019-11-27 14:11:05 +01:00
|
|
|
if (external_only) {
|
|
|
|
|
*external_only = calloc(num, sizeof(unsigned));
|
|
|
|
|
if (*external_only == NULL) {
|
|
|
|
|
*num_modifiers = 0;
|
|
|
|
|
free(*modifiers);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-11-23 14:03:20 +05:30
|
|
|
if (!gr->query_dmabuf_modifiers(gr->egl_display, format,
|
2019-11-27 14:11:05 +01:00
|
|
|
num, *modifiers, external_only ?
|
|
|
|
|
*external_only : NULL, &num)) {
|
2016-11-23 14:03:20 +05:30
|
|
|
*num_modifiers = 0;
|
|
|
|
|
free(*modifiers);
|
2019-11-27 14:11:05 +01:00
|
|
|
if (external_only)
|
|
|
|
|
free(*external_only);
|
2017-12-05 14:26:16 +00:00
|
|
|
return;
|
2016-11-23 14:03:20 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*num_modifiers = num;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-27 14:11:05 +01:00
|
|
|
static void
|
|
|
|
|
gl_renderer_query_dmabuf_modifiers(struct weston_compositor *wc, int format,
|
|
|
|
|
uint64_t **modifiers,
|
|
|
|
|
int *num_modifiers)
|
|
|
|
|
{
|
|
|
|
|
struct gl_renderer *gr = get_renderer(wc);
|
|
|
|
|
|
|
|
|
|
gl_renderer_query_dmabuf_modifiers_full(gr, format, modifiers, NULL,
|
|
|
|
|
num_modifiers);
|
|
|
|
|
}
|
|
|
|
|
|
2014-06-12 16:49:29 +03:00
|
|
|
static bool
|
|
|
|
|
gl_renderer_import_dmabuf(struct weston_compositor *ec,
|
|
|
|
|
struct linux_dmabuf_buffer *dmabuf)
|
|
|
|
|
{
|
|
|
|
|
struct gl_renderer *gr = get_renderer(ec);
|
2022-01-20 17:40:06 +00:00
|
|
|
struct gl_buffer_state *gb;
|
2014-06-12 16:49:29 +03:00
|
|
|
|
2024-10-10 14:02:12 +02:00
|
|
|
assert(egl_display_has(gr, EXTENSION_EXT_IMAGE_DMA_BUF_IMPORT));
|
2014-06-12 16:49:29 +03:00
|
|
|
|
2023-12-11 14:27:12 +01:00
|
|
|
/* return if EGL doesn't support import modifiers */
|
|
|
|
|
if (dmabuf->attributes.modifier != DRM_FORMAT_MOD_INVALID)
|
2024-10-10 14:02:12 +02:00
|
|
|
if (!egl_display_has(gr, EXTENSION_EXT_IMAGE_DMA_BUF_IMPORT_MODIFIERS))
|
2014-06-12 16:49:29 +03:00
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
/* reject all flags we do not recognize or handle */
|
2015-11-24 19:28:24 +00:00
|
|
|
if (dmabuf->attributes.flags & ~ZWP_LINUX_BUFFER_PARAMS_V1_FLAGS_Y_INVERT)
|
2014-06-12 16:49:29 +03:00
|
|
|
return false;
|
|
|
|
|
|
2022-01-20 17:40:06 +00:00
|
|
|
gb = import_dmabuf(gr, dmabuf);
|
|
|
|
|
if (!gb)
|
2014-06-12 16:49:29 +03:00
|
|
|
return false;
|
|
|
|
|
|
2022-01-20 17:40:06 +00:00
|
|
|
linux_dmabuf_buffer_set_user_data(dmabuf, gb,
|
2016-01-11 19:04:34 +00:00
|
|
|
gl_renderer_destroy_dmabuf);
|
2014-06-12 16:49:29 +03:00
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2022-06-02 13:34:54 +03:00
|
|
|
static struct gl_buffer_state *
|
|
|
|
|
ensure_renderer_gl_buffer_state(struct weston_surface *surface,
|
|
|
|
|
struct weston_buffer *buffer)
|
|
|
|
|
{
|
|
|
|
|
struct gl_renderer *gr = get_renderer(surface->compositor);
|
|
|
|
|
struct gl_surface_state *gs = get_surface_state(surface);
|
|
|
|
|
struct gl_buffer_state *gb = buffer->renderer_private;
|
|
|
|
|
|
|
|
|
|
if (gb) {
|
|
|
|
|
gs->buffer = gb;
|
|
|
|
|
return gb;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
gb = zalloc(sizeof(*gb));
|
|
|
|
|
gb->gr = gr;
|
|
|
|
|
pixman_region32_init(&gb->texture_damage);
|
|
|
|
|
buffer->renderer_private = gb;
|
|
|
|
|
gb->destroy_listener.notify = handle_buffer_destroy;
|
|
|
|
|
wl_signal_add(&buffer->destroy_signal, &gb->destroy_listener);
|
|
|
|
|
|
|
|
|
|
gs->buffer = gb;
|
|
|
|
|
|
|
|
|
|
return gb;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2024-06-06 12:59:30 -05:00
|
|
|
attach_direct_display_placeholder(struct weston_paint_node *pnode)
|
2022-06-02 13:34:54 +03:00
|
|
|
{
|
2024-06-06 12:59:30 -05:00
|
|
|
struct weston_surface *surface = pnode->surface;
|
|
|
|
|
struct weston_buffer *buffer = surface->buffer_ref.buffer;
|
2022-06-02 13:34:54 +03:00
|
|
|
struct gl_buffer_state *gb;
|
|
|
|
|
|
|
|
|
|
gb = ensure_renderer_gl_buffer_state(surface, buffer);
|
|
|
|
|
|
|
|
|
|
/* uses the same color as the content-protection placeholder */
|
2024-06-06 12:59:30 -05:00
|
|
|
gb->color[0] = pnode->solid.r;
|
|
|
|
|
gb->color[1] = pnode->solid.g;
|
|
|
|
|
gb->color[2] = pnode->solid.b;
|
|
|
|
|
gb->color[3] = pnode->solid.a;
|
2022-06-02 13:34:54 +03:00
|
|
|
|
|
|
|
|
gb->shader_variant = SHADER_VARIANT_SOLID;
|
|
|
|
|
}
|
|
|
|
|
|
2024-07-23 12:32:04 +02:00
|
|
|
static void
|
|
|
|
|
gl_renderer_attach_buffer(struct weston_surface *surface,
|
2022-01-19 02:38:16 +00:00
|
|
|
struct weston_buffer *buffer)
|
2014-06-12 16:49:29 +03:00
|
|
|
{
|
|
|
|
|
struct gl_renderer *gr = get_renderer(surface->compositor);
|
|
|
|
|
struct gl_surface_state *gs = get_surface_state(surface);
|
2022-01-20 17:40:06 +00:00
|
|
|
struct gl_buffer_state *gb;
|
2021-03-25 14:49:57 +02:00
|
|
|
GLenum target;
|
2014-06-12 16:49:29 +03:00
|
|
|
int i;
|
|
|
|
|
|
2022-01-20 17:40:06 +00:00
|
|
|
assert(buffer->renderer_private);
|
|
|
|
|
gb = buffer->renderer_private;
|
|
|
|
|
|
2022-01-20 18:14:07 +00:00
|
|
|
gs->buffer = gb;
|
2014-06-12 16:49:29 +03:00
|
|
|
|
2022-01-20 18:14:07 +00:00
|
|
|
target = gl_shader_texture_variant_get_target(gb->shader_variant);
|
|
|
|
|
for (i = 0; i < gb->num_images; ++i) {
|
2016-01-11 19:04:34 +00:00
|
|
|
glActiveTexture(GL_TEXTURE0 + i);
|
2022-01-20 18:49:05 +00:00
|
|
|
glBindTexture(target, gb->textures[i]);
|
2022-01-20 18:23:26 +00:00
|
|
|
gr->image_target_texture_2d(target, gb->images[i]);
|
2016-01-11 19:04:34 +00:00
|
|
|
}
|
2024-07-23 15:23:23 +02:00
|
|
|
glActiveTexture(GL_TEXTURE0);
|
2014-06-12 16:49:29 +03:00
|
|
|
}
|
|
|
|
|
|
2021-03-04 17:47:13 -03:00
|
|
|
static const struct weston_drm_format_array *
|
|
|
|
|
gl_renderer_get_supported_formats(struct weston_compositor *ec)
|
|
|
|
|
{
|
|
|
|
|
struct gl_renderer *gr = get_renderer(ec);
|
|
|
|
|
|
|
|
|
|
return &gr->supported_formats;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
populate_supported_formats(struct weston_compositor *ec,
|
|
|
|
|
struct weston_drm_format_array *supported_formats)
|
|
|
|
|
{
|
|
|
|
|
struct weston_drm_format *fmt;
|
|
|
|
|
int *formats = NULL;
|
|
|
|
|
uint64_t *modifiers = NULL;
|
2021-08-03 13:01:53 +01:00
|
|
|
int num_formats, num_modifiers;
|
|
|
|
|
int i, j;
|
2021-03-04 17:47:13 -03:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
|
|
/* Use EGL_EXT_image_dma_buf_import_modifiers to query the
|
|
|
|
|
* list of formats/modifiers of the renderer. */
|
|
|
|
|
gl_renderer_query_dmabuf_formats(ec, &formats, &num_formats);
|
|
|
|
|
if (num_formats == 0)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < num_formats; i++) {
|
2022-04-28 01:44:19 +01:00
|
|
|
const struct pixel_format_info *info =
|
|
|
|
|
pixel_format_get_info(formats[i]);
|
|
|
|
|
|
|
|
|
|
if (!info || info->hide_from_clients)
|
2022-01-21 15:19:05 +00:00
|
|
|
continue;
|
|
|
|
|
|
2021-03-04 17:47:13 -03:00
|
|
|
fmt = weston_drm_format_array_add_format(supported_formats,
|
|
|
|
|
formats[i]);
|
|
|
|
|
if (!fmt) {
|
|
|
|
|
ret = -1;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
2021-04-20 19:56:30 -03:00
|
|
|
/* Always add DRM_FORMAT_MOD_INVALID, as EGL implementations
|
|
|
|
|
* support implicit modifiers. */
|
|
|
|
|
ret = weston_drm_format_add_modifier(fmt, DRM_FORMAT_MOD_INVALID);
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
goto out;
|
|
|
|
|
|
2021-03-04 17:47:13 -03:00
|
|
|
gl_renderer_query_dmabuf_modifiers(ec, formats[i],
|
|
|
|
|
&modifiers, &num_modifiers);
|
2021-04-20 19:56:30 -03:00
|
|
|
if (num_modifiers == 0)
|
2021-03-04 17:47:13 -03:00
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
for (j = 0; j < num_modifiers; j++) {
|
2021-04-20 19:56:30 -03:00
|
|
|
/* Skip MOD_INVALID, as it has already been added. */
|
|
|
|
|
if (modifiers[j] == DRM_FORMAT_MOD_INVALID)
|
|
|
|
|
continue;
|
2021-03-04 17:47:13 -03:00
|
|
|
ret = weston_drm_format_add_modifier(fmt, modifiers[j]);
|
|
|
|
|
if (ret < 0) {
|
|
|
|
|
free(modifiers);
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
free(modifiers);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
free(formats);
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
2024-06-05 13:37:35 -05:00
|
|
|
static void
|
2022-01-19 01:30:12 +00:00
|
|
|
gl_renderer_attach_solid(struct weston_surface *surface,
|
|
|
|
|
struct weston_buffer *buffer)
|
2022-01-15 17:51:25 +00:00
|
|
|
{
|
2022-06-02 13:34:54 +03:00
|
|
|
struct gl_buffer_state *gb;
|
2022-01-20 18:14:07 +00:00
|
|
|
|
2022-06-02 13:34:54 +03:00
|
|
|
gb = ensure_renderer_gl_buffer_state(surface, buffer);
|
2022-01-15 17:51:25 +00:00
|
|
|
|
2022-01-19 01:30:12 +00:00
|
|
|
gb->color[0] = buffer->solid.r;
|
|
|
|
|
gb->color[1] = buffer->solid.g;
|
|
|
|
|
gb->color[2] = buffer->solid.b;
|
|
|
|
|
gb->color[3] = buffer->solid.a;
|
2022-01-15 17:51:25 +00:00
|
|
|
|
2022-01-13 23:57:48 +00:00
|
|
|
gb->shader_variant = SHADER_VARIANT_SOLID;
|
2022-01-15 17:51:25 +00:00
|
|
|
}
|
|
|
|
|
|
2012-09-05 22:49:55 -04:00
|
|
|
static void
|
2024-06-11 07:02:32 -05:00
|
|
|
gl_renderer_attach(struct weston_paint_node *pnode)
|
2012-09-05 22:38:18 -04:00
|
|
|
{
|
2024-06-11 07:02:32 -05:00
|
|
|
struct weston_surface *es = pnode->surface;
|
|
|
|
|
struct weston_buffer *buffer = es->buffer_ref.buffer;
|
2012-11-13 19:10:29 +01:00
|
|
|
struct gl_surface_state *gs = get_surface_state(es);
|
2022-01-20 18:14:07 +00:00
|
|
|
|
2023-09-08 19:39:49 +02:00
|
|
|
if (gs->buffer_ref.buffer == buffer)
|
|
|
|
|
return;
|
|
|
|
|
|
2022-01-20 18:14:07 +00:00
|
|
|
/* SHM buffers are a little special in that they are allocated
|
|
|
|
|
* per-surface rather than per-buffer, because we keep a shadow
|
|
|
|
|
* copy of the SHM data in a GL texture; for these we need to
|
|
|
|
|
* destroy the buffer state when we're switching to another
|
|
|
|
|
* buffer type. For all the others, the gl_buffer_state comes
|
|
|
|
|
* from the weston_buffer itself, and will only be destroyed
|
|
|
|
|
* along with it. */
|
|
|
|
|
if (gs->buffer && gs->buffer_ref.buffer->type == WESTON_BUFFER_SHM) {
|
|
|
|
|
if (!buffer || buffer->type != WESTON_BUFFER_SHM) {
|
|
|
|
|
destroy_buffer_state(gs->buffer);
|
|
|
|
|
gs->buffer = NULL;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
gs->buffer = NULL;
|
|
|
|
|
}
|
2012-09-05 22:38:18 -04:00
|
|
|
|
2022-01-19 02:30:11 +00:00
|
|
|
if (!buffer)
|
|
|
|
|
goto out;
|
2012-09-05 22:38:18 -04:00
|
|
|
|
2024-06-11 07:02:32 -05:00
|
|
|
if (pnode->is_direct) {
|
|
|
|
|
attach_direct_display_placeholder(pnode);
|
2024-06-06 12:59:30 -05:00
|
|
|
goto success;
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-14 01:36:54 +00:00
|
|
|
switch (buffer->type) {
|
|
|
|
|
case WESTON_BUFFER_SHM:
|
2024-06-05 13:37:35 -05:00
|
|
|
gl_renderer_attach_shm(es, buffer);
|
2022-01-19 02:27:15 +00:00
|
|
|
break;
|
2022-01-14 01:36:54 +00:00
|
|
|
case WESTON_BUFFER_DMABUF:
|
|
|
|
|
case WESTON_BUFFER_RENDERER_OPAQUE:
|
2024-07-23 12:32:04 +02:00
|
|
|
gl_renderer_attach_buffer(es, buffer);
|
2022-01-19 02:27:15 +00:00
|
|
|
break;
|
2022-01-15 17:51:25 +00:00
|
|
|
case WESTON_BUFFER_SOLID:
|
2024-06-05 13:37:35 -05:00
|
|
|
gl_renderer_attach_solid(es, buffer);
|
2022-01-19 02:27:15 +00:00
|
|
|
break;
|
2022-01-14 01:36:54 +00:00
|
|
|
default:
|
2022-01-19 02:44:44 +00:00
|
|
|
weston_log("unhandled buffer type!\n");
|
|
|
|
|
weston_buffer_send_server_error(buffer,
|
|
|
|
|
"disconnecting due to unhandled buffer type");
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
2024-06-06 12:59:30 -05:00
|
|
|
success:
|
2022-01-19 02:44:44 +00:00
|
|
|
weston_buffer_reference(&gs->buffer_ref, buffer,
|
|
|
|
|
BUFFER_MAY_BE_ACCESSED);
|
|
|
|
|
weston_buffer_release_reference(&gs->buffer_release_ref,
|
|
|
|
|
es->buffer_release_ref.buffer_release);
|
|
|
|
|
return;
|
2022-01-19 02:27:15 +00:00
|
|
|
|
2022-01-19 02:44:44 +00:00
|
|
|
out:
|
2022-01-20 18:14:07 +00:00
|
|
|
assert(!gs->buffer);
|
2022-01-15 03:12:53 +00:00
|
|
|
weston_buffer_reference(&gs->buffer_ref, NULL,
|
|
|
|
|
BUFFER_WILL_NOT_BE_ACCESSED);
|
2022-01-14 01:36:54 +00:00
|
|
|
weston_buffer_release_reference(&gs->buffer_release_ref, NULL);
|
2012-09-05 22:38:18 -04:00
|
|
|
}
|
|
|
|
|
|
2024-06-13 10:03:11 -05:00
|
|
|
static void
|
|
|
|
|
gl_renderer_buffer_init(struct weston_compositor *etc,
|
|
|
|
|
struct weston_buffer *buffer)
|
|
|
|
|
{
|
|
|
|
|
struct gl_buffer_state *gb;
|
|
|
|
|
|
2024-12-01 23:05:23 +02:00
|
|
|
if (buffer->type != WESTON_BUFFER_DMABUF ||
|
|
|
|
|
(buffer->type == WESTON_BUFFER_DMABUF && buffer->direct_display))
|
2024-06-13 10:03:11 -05:00
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
/* Thanks to linux-dmabuf being totally independent of libweston,
|
|
|
|
|
* the gl_buffer_state willonly be set as userdata on the dmabuf,
|
|
|
|
|
* not on the weston_buffer. Steal it away into the weston_buffer. */
|
|
|
|
|
assert(!buffer->renderer_private);
|
|
|
|
|
gb = linux_dmabuf_buffer_get_user_data(buffer->dmabuf);
|
|
|
|
|
assert(gb);
|
|
|
|
|
linux_dmabuf_buffer_set_user_data(buffer->dmabuf, NULL, NULL);
|
|
|
|
|
buffer->renderer_private = gb;
|
|
|
|
|
gb->destroy_listener.notify = handle_buffer_destroy;
|
|
|
|
|
wl_signal_add(&buffer->destroy_signal, &gb->destroy_listener);
|
|
|
|
|
}
|
|
|
|
|
|
2015-02-09 13:37:27 +02:00
|
|
|
static uint32_t
|
|
|
|
|
pack_color(pixman_format_code_t format, float *c)
|
|
|
|
|
{
|
|
|
|
|
uint8_t r = round(c[0] * 255.0f);
|
|
|
|
|
uint8_t g = round(c[1] * 255.0f);
|
|
|
|
|
uint8_t b = round(c[2] * 255.0f);
|
|
|
|
|
uint8_t a = round(c[3] * 255.0f);
|
|
|
|
|
|
|
|
|
|
switch (format) {
|
|
|
|
|
case PIXMAN_a8b8g8r8:
|
|
|
|
|
return (a << 24) | (b << 16) | (g << 8) | r;
|
|
|
|
|
default:
|
|
|
|
|
assert(0);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
gl_renderer_surface_copy_content(struct weston_surface *surface,
|
|
|
|
|
void *target, size_t size,
|
|
|
|
|
int src_x, int src_y,
|
|
|
|
|
int width, int height)
|
|
|
|
|
{
|
|
|
|
|
static const GLfloat verts[4 * 2] = {
|
|
|
|
|
0.0f, 0.0f,
|
|
|
|
|
1.0f, 0.0f,
|
|
|
|
|
1.0f, 1.0f,
|
|
|
|
|
0.0f, 1.0f
|
|
|
|
|
};
|
|
|
|
|
static const GLfloat projmat_normal[16] = { /* transpose */
|
|
|
|
|
2.0f, 0.0f, 0.0f, 0.0f,
|
|
|
|
|
0.0f, 2.0f, 0.0f, 0.0f,
|
|
|
|
|
0.0f, 0.0f, 1.0f, 0.0f,
|
|
|
|
|
-1.0f, -1.0f, 0.0f, 1.0f
|
|
|
|
|
};
|
|
|
|
|
static const GLfloat projmat_yinvert[16] = { /* transpose */
|
|
|
|
|
2.0f, 0.0f, 0.0f, 0.0f,
|
|
|
|
|
0.0f, -2.0f, 0.0f, 0.0f,
|
|
|
|
|
0.0f, 0.0f, 1.0f, 0.0f,
|
|
|
|
|
-1.0f, 1.0f, 0.0f, 1.0f
|
|
|
|
|
};
|
2021-03-18 17:42:06 +02:00
|
|
|
struct gl_shader_config sconf = {
|
|
|
|
|
.view_alpha = 1.0f,
|
|
|
|
|
.input_tex_filter = GL_NEAREST,
|
|
|
|
|
};
|
2015-02-09 13:37:27 +02:00
|
|
|
const pixman_format_code_t format = PIXMAN_a8b8g8r8;
|
|
|
|
|
const GLenum gl_format = GL_RGBA; /* PIXMAN_a8b8g8r8 little-endian */
|
|
|
|
|
struct gl_renderer *gr = get_renderer(surface->compositor);
|
2024-06-05 14:50:27 -05:00
|
|
|
struct gl_surface_state *gs;
|
|
|
|
|
struct gl_buffer_state *gb;
|
|
|
|
|
struct weston_buffer *buffer;
|
2015-02-09 13:37:27 +02:00
|
|
|
int cw, ch;
|
2024-08-06 13:52:55 +02:00
|
|
|
GLuint fbo, rb;
|
2021-03-18 17:42:06 +02:00
|
|
|
int ret = -1;
|
2015-02-09 13:37:27 +02:00
|
|
|
|
2024-06-05 14:50:27 -05:00
|
|
|
gs = get_surface_state(surface);
|
|
|
|
|
gb = gs->buffer;
|
|
|
|
|
buffer = gs->buffer_ref.buffer;
|
2022-01-19 03:06:00 +00:00
|
|
|
assert(buffer);
|
2024-06-06 12:59:30 -05:00
|
|
|
if (buffer->direct_display)
|
|
|
|
|
return -1;
|
2022-01-19 03:06:00 +00:00
|
|
|
|
2022-01-19 03:08:02 +00:00
|
|
|
cw = buffer->width;
|
|
|
|
|
ch = buffer->height;
|
2015-02-09 13:37:27 +02:00
|
|
|
|
2022-01-19 03:06:00 +00:00
|
|
|
switch (buffer->type) {
|
|
|
|
|
case WESTON_BUFFER_SOLID:
|
2022-01-13 23:57:48 +00:00
|
|
|
*(uint32_t *)target = pack_color(format, gb->color);
|
2015-02-09 13:37:27 +02:00
|
|
|
return 0;
|
2022-01-19 03:06:00 +00:00
|
|
|
case WESTON_BUFFER_SHM:
|
|
|
|
|
case WESTON_BUFFER_DMABUF:
|
|
|
|
|
case WESTON_BUFFER_RENDERER_OPAQUE:
|
2015-02-09 13:37:27 +02:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-18 17:42:06 +02:00
|
|
|
gl_shader_config_set_input_textures(&sconf, gs);
|
2019-04-18 21:45:48 +05:30
|
|
|
|
2024-08-06 13:52:55 +02:00
|
|
|
if (!gl_fbo_init(GL_RGBA, cw, ch, &fbo, &rb)) {
|
|
|
|
|
weston_log("Failed to init FBO\n");
|
|
|
|
|
goto fbo_init_error;
|
|
|
|
|
}
|
2024-08-05 20:11:46 +02:00
|
|
|
|
2015-02-09 13:37:27 +02:00
|
|
|
glViewport(0, 0, cw, ch);
|
|
|
|
|
glDisable(GL_BLEND);
|
2022-01-19 02:57:40 +00:00
|
|
|
if (buffer->buffer_origin == ORIGIN_TOP_LEFT)
|
2021-04-28 12:17:21 -03:00
|
|
|
ARRAY_COPY(sconf.projection.d, projmat_normal);
|
2015-02-09 13:37:27 +02:00
|
|
|
else
|
2021-04-28 12:17:21 -03:00
|
|
|
ARRAY_COPY(sconf.projection.d, projmat_yinvert);
|
2021-03-18 17:42:06 +02:00
|
|
|
sconf.projection.type = WESTON_MATRIX_TRANSFORM_SCALE |
|
|
|
|
|
WESTON_MATRIX_TRANSFORM_TRANSLATE;
|
2015-02-09 13:37:27 +02:00
|
|
|
|
2021-03-18 17:42:06 +02:00
|
|
|
if (!gl_renderer_use_program(gr, &sconf))
|
2024-08-06 13:52:55 +02:00
|
|
|
goto use_program_error;
|
2015-02-09 13:37:27 +02:00
|
|
|
|
2024-05-06 13:09:28 +02:00
|
|
|
glEnableVertexAttribArray(SHADER_ATTRIB_LOC_POSITION);
|
|
|
|
|
glEnableVertexAttribArray(SHADER_ATTRIB_LOC_TEXCOORD);
|
|
|
|
|
glVertexAttribPointer(SHADER_ATTRIB_LOC_POSITION, 2, GL_FLOAT, GL_FALSE,
|
|
|
|
|
0, verts);
|
|
|
|
|
glVertexAttribPointer(SHADER_ATTRIB_LOC_TEXCOORD, 2, GL_FLOAT, GL_FALSE,
|
|
|
|
|
0, verts);
|
2015-02-09 13:37:27 +02:00
|
|
|
glDrawArrays(GL_TRIANGLE_FAN, 0, 4);
|
2024-05-06 13:09:28 +02:00
|
|
|
glDisableVertexAttribArray(SHADER_ATTRIB_LOC_TEXCOORD);
|
|
|
|
|
glDisableVertexAttribArray(SHADER_ATTRIB_LOC_POSITION);
|
2015-02-09 13:37:27 +02:00
|
|
|
|
|
|
|
|
glReadPixels(src_x, src_y, width, height, gl_format,
|
|
|
|
|
GL_UNSIGNED_BYTE, target);
|
2021-03-18 17:42:06 +02:00
|
|
|
ret = 0;
|
2015-02-09 13:37:27 +02:00
|
|
|
|
2024-08-06 13:52:55 +02:00
|
|
|
use_program_error:
|
|
|
|
|
gl_fbo_fini(&fbo, &rb);
|
|
|
|
|
fbo_init_error:
|
2021-03-18 17:42:06 +02:00
|
|
|
return ret;
|
2015-02-09 13:37:27 +02:00
|
|
|
}
|
|
|
|
|
|
2013-10-25 16:26:33 +03:00
|
|
|
static void
|
2013-10-25 16:26:34 +03:00
|
|
|
surface_state_destroy(struct gl_surface_state *gs, struct gl_renderer *gr)
|
2013-10-25 16:26:33 +03:00
|
|
|
{
|
2013-10-25 16:26:34 +03:00
|
|
|
wl_list_remove(&gs->surface_destroy_listener.link);
|
|
|
|
|
wl_list_remove(&gs->renderer_destroy_listener.link);
|
2013-10-25 16:26:33 +03:00
|
|
|
|
|
|
|
|
gs->surface->renderer_state = NULL;
|
|
|
|
|
|
2022-01-20 18:14:07 +00:00
|
|
|
if (gs->buffer && gs->buffer_ref.buffer->type == WESTON_BUFFER_SHM)
|
|
|
|
|
destroy_buffer_state(gs->buffer);
|
|
|
|
|
gs->buffer = NULL;
|
2013-10-25 16:26:33 +03:00
|
|
|
|
2022-01-15 03:12:53 +00:00
|
|
|
weston_buffer_reference(&gs->buffer_ref, NULL,
|
|
|
|
|
BUFFER_WILL_NOT_BE_ACCESSED);
|
2018-10-19 12:14:11 +03:00
|
|
|
weston_buffer_release_reference(&gs->buffer_release_ref, NULL);
|
2022-01-20 18:14:07 +00:00
|
|
|
|
2013-10-25 16:26:33 +03:00
|
|
|
free(gs);
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-25 16:26:34 +03:00
|
|
|
static void
|
|
|
|
|
surface_state_handle_surface_destroy(struct wl_listener *listener, void *data)
|
|
|
|
|
{
|
|
|
|
|
struct gl_surface_state *gs;
|
|
|
|
|
struct gl_renderer *gr;
|
|
|
|
|
|
|
|
|
|
gs = container_of(listener, struct gl_surface_state,
|
|
|
|
|
surface_destroy_listener);
|
|
|
|
|
|
|
|
|
|
gr = get_renderer(gs->surface->compositor);
|
|
|
|
|
|
|
|
|
|
surface_state_destroy(gs, gr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
surface_state_handle_renderer_destroy(struct wl_listener *listener, void *data)
|
|
|
|
|
{
|
|
|
|
|
struct gl_surface_state *gs;
|
|
|
|
|
struct gl_renderer *gr;
|
|
|
|
|
|
|
|
|
|
gr = data;
|
|
|
|
|
|
|
|
|
|
gs = container_of(listener, struct gl_surface_state,
|
|
|
|
|
renderer_destroy_listener);
|
|
|
|
|
|
|
|
|
|
surface_state_destroy(gs, gr);
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-13 19:10:23 +01:00
|
|
|
static int
|
2012-11-13 19:10:29 +01:00
|
|
|
gl_renderer_create_surface(struct weston_surface *surface)
|
2012-11-13 19:10:23 +01:00
|
|
|
{
|
2012-11-13 19:10:29 +01:00
|
|
|
struct gl_surface_state *gs;
|
2013-10-25 16:26:34 +03:00
|
|
|
struct gl_renderer *gr = get_renderer(surface->compositor);
|
2012-11-13 19:10:23 +01:00
|
|
|
|
2014-11-20 22:21:57 -08:00
|
|
|
gs = zalloc(sizeof *gs);
|
|
|
|
|
if (gs == NULL)
|
2012-11-13 19:10:23 +01:00
|
|
|
return -1;
|
|
|
|
|
|
2012-12-04 15:58:15 +02:00
|
|
|
/* A buffer is never attached to solid color surfaces, yet
|
|
|
|
|
* they still go through texcoord computations. Do not divide
|
|
|
|
|
* by zero there.
|
|
|
|
|
*/
|
2013-10-25 16:26:34 +03:00
|
|
|
gs->surface = surface;
|
|
|
|
|
|
2012-11-13 19:10:23 +01:00
|
|
|
surface->renderer_state = gs;
|
|
|
|
|
|
2013-10-25 16:26:33 +03:00
|
|
|
gs->surface_destroy_listener.notify =
|
|
|
|
|
surface_state_handle_surface_destroy;
|
|
|
|
|
wl_signal_add(&surface->destroy_signal,
|
|
|
|
|
&gs->surface_destroy_listener);
|
2012-11-13 19:10:23 +01:00
|
|
|
|
2013-10-25 16:26:34 +03:00
|
|
|
gs->renderer_destroy_listener.notify =
|
|
|
|
|
surface_state_handle_renderer_destroy;
|
|
|
|
|
wl_signal_add(&gr->destroy_signal,
|
|
|
|
|
&gs->renderer_destroy_listener);
|
|
|
|
|
|
2013-10-25 16:26:33 +03:00
|
|
|
return 0;
|
2012-09-06 21:59:29 -04:00
|
|
|
}
|
|
|
|
|
|
2019-12-20 14:55:39 +13:00
|
|
|
void
|
2022-05-17 14:55:27 +03:00
|
|
|
gl_renderer_log_extensions(struct gl_renderer *gr,
|
|
|
|
|
const char *name, const char *extensions)
|
2012-09-05 22:06:26 -04:00
|
|
|
{
|
|
|
|
|
const char *p, *end;
|
|
|
|
|
int l;
|
|
|
|
|
int len;
|
|
|
|
|
|
2022-05-17 15:15:47 +03:00
|
|
|
if (!weston_log_scope_is_enabled(gr->renderer_scope))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
l = weston_log_scope_printf(gr->renderer_scope, "%s:", name);
|
2012-09-05 22:06:26 -04:00
|
|
|
p = extensions;
|
|
|
|
|
while (*p) {
|
|
|
|
|
end = strchrnul(p, ' ');
|
|
|
|
|
len = end - p;
|
2022-05-17 15:15:47 +03:00
|
|
|
if (l + len > 78) {
|
|
|
|
|
l = weston_log_scope_printf(gr->renderer_scope,
|
|
|
|
|
"\n %.*s", len, p);
|
|
|
|
|
} else {
|
|
|
|
|
l += weston_log_scope_printf(gr->renderer_scope,
|
|
|
|
|
" %.*s", len, p);
|
|
|
|
|
}
|
2012-09-05 22:06:26 -04:00
|
|
|
for (p = end; isspace(*p); p++)
|
|
|
|
|
;
|
|
|
|
|
}
|
2022-05-17 15:15:47 +03:00
|
|
|
weston_log_scope_printf(gr->renderer_scope, "\n");
|
2012-09-05 22:06:26 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2022-05-17 14:55:27 +03:00
|
|
|
log_egl_info(struct gl_renderer *gr, EGLDisplay egldpy)
|
2012-09-05 22:06:26 -04:00
|
|
|
{
|
|
|
|
|
const char *str;
|
|
|
|
|
|
|
|
|
|
str = eglQueryString(egldpy, EGL_VERSION);
|
|
|
|
|
weston_log("EGL version: %s\n", str ? str : "(null)");
|
|
|
|
|
|
|
|
|
|
str = eglQueryString(egldpy, EGL_VENDOR);
|
|
|
|
|
weston_log("EGL vendor: %s\n", str ? str : "(null)");
|
|
|
|
|
|
|
|
|
|
str = eglQueryString(egldpy, EGL_CLIENT_APIS);
|
|
|
|
|
weston_log("EGL client APIs: %s\n", str ? str : "(null)");
|
|
|
|
|
|
|
|
|
|
str = eglQueryString(egldpy, EGL_EXTENSIONS);
|
2022-05-17 14:55:27 +03:00
|
|
|
gl_renderer_log_extensions(gr, "EGL extensions", str ? str : "(null)");
|
2018-10-18 15:55:28 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2022-05-17 14:55:27 +03:00
|
|
|
log_gl_info(struct gl_renderer *gr)
|
2018-10-18 15:55:28 +01:00
|
|
|
{
|
|
|
|
|
const char *str;
|
2012-09-05 22:06:26 -04:00
|
|
|
|
|
|
|
|
str = (char *)glGetString(GL_VERSION);
|
|
|
|
|
weston_log("GL version: %s\n", str ? str : "(null)");
|
|
|
|
|
|
|
|
|
|
str = (char *)glGetString(GL_SHADING_LANGUAGE_VERSION);
|
|
|
|
|
weston_log("GLSL version: %s\n", str ? str : "(null)");
|
|
|
|
|
|
|
|
|
|
str = (char *)glGetString(GL_VENDOR);
|
|
|
|
|
weston_log("GL vendor: %s\n", str ? str : "(null)");
|
|
|
|
|
|
|
|
|
|
str = (char *)glGetString(GL_RENDERER);
|
|
|
|
|
weston_log("GL renderer: %s\n", str ? str : "(null)");
|
|
|
|
|
|
|
|
|
|
str = (char *)glGetString(GL_EXTENSIONS);
|
2022-05-17 14:55:27 +03:00
|
|
|
gl_renderer_log_extensions(gr, "GL extensions", str ? str : "(null)");
|
2012-09-05 22:06:26 -04:00
|
|
|
}
|
|
|
|
|
|
2013-10-27 22:24:54 -05:00
|
|
|
static void
|
|
|
|
|
gl_renderer_output_set_border(struct weston_output *output,
|
|
|
|
|
enum gl_renderer_border_side side,
|
|
|
|
|
int32_t width, int32_t height,
|
|
|
|
|
int32_t tex_width, unsigned char *data)
|
|
|
|
|
{
|
|
|
|
|
struct gl_output_state *go = get_output_state(output);
|
|
|
|
|
|
2014-02-04 21:36:38 -06:00
|
|
|
if (go->borders[side].width != width ||
|
|
|
|
|
go->borders[side].height != height)
|
|
|
|
|
/* In this case, we have to blow everything and do a full
|
|
|
|
|
* repaint. */
|
|
|
|
|
go->border_status |= BORDER_SIZE_CHANGED | BORDER_ALL_DIRTY;
|
|
|
|
|
|
|
|
|
|
if (data == NULL) {
|
|
|
|
|
width = 0;
|
|
|
|
|
height = 0;
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-27 22:24:54 -05:00
|
|
|
go->borders[side].width = width;
|
|
|
|
|
go->borders[side].height = height;
|
|
|
|
|
go->borders[side].tex_width = tex_width;
|
|
|
|
|
go->borders[side].data = data;
|
2014-02-04 21:36:38 -06:00
|
|
|
go->border_status |= 1 << side;
|
2013-10-27 22:24:54 -05:00
|
|
|
}
|
|
|
|
|
|
2022-07-22 11:30:04 +03:00
|
|
|
static bool
|
|
|
|
|
gl_renderer_resize_output(struct weston_output *output,
|
|
|
|
|
const struct weston_size *fb_size,
|
|
|
|
|
const struct weston_geometry *area)
|
|
|
|
|
{
|
2024-05-06 09:27:04 +02:00
|
|
|
struct gl_renderer *gr = get_renderer(output->compositor);
|
2022-07-26 17:27:07 +03:00
|
|
|
struct gl_output_state *go = get_output_state(output);
|
|
|
|
|
const struct pixel_format_info *shfmt = go->shadow_format;
|
|
|
|
|
bool ret;
|
|
|
|
|
|
2022-07-22 11:30:04 +03:00
|
|
|
check_compositing_area(fb_size, area);
|
2022-07-26 17:27:07 +03:00
|
|
|
|
|
|
|
|
go->fb_size = *fb_size;
|
|
|
|
|
go->area = *area;
|
2024-05-06 09:27:04 +02:00
|
|
|
gr->wireframe_dirty = true;
|
2022-07-26 17:27:07 +03:00
|
|
|
|
2022-08-02 16:20:06 +03:00
|
|
|
weston_output_update_capture_info(output,
|
|
|
|
|
WESTON_OUTPUT_CAPTURE_SOURCE_FRAMEBUFFER,
|
|
|
|
|
area->width, area->height,
|
2023-01-26 12:07:15 +01:00
|
|
|
output->compositor->read_format);
|
2022-08-02 16:20:06 +03:00
|
|
|
|
|
|
|
|
weston_output_update_capture_info(output,
|
|
|
|
|
WESTON_OUTPUT_CAPTURE_SOURCE_FULL_FRAMEBUFFER,
|
|
|
|
|
fb_size->width, fb_size->height,
|
2023-01-26 12:07:15 +01:00
|
|
|
output->compositor->read_format);
|
2022-08-02 16:20:06 +03:00
|
|
|
|
2024-07-29 15:27:52 +02:00
|
|
|
/* Discard renderbuffers as a last step in order to emit discarded
|
|
|
|
|
* callbacks once the renderer has correctly been updated. */
|
|
|
|
|
if (!gl_renderer_discard_renderbuffers(go, false))
|
|
|
|
|
return false;
|
|
|
|
|
|
2022-07-26 17:27:07 +03:00
|
|
|
if (!shfmt)
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
if (shadow_exists(go))
|
2024-08-06 15:11:50 +02:00
|
|
|
gl_fbo_texture_fini(&go->shadow_fb, &go->shadow_tex);
|
2022-07-26 17:27:07 +03:00
|
|
|
|
2024-08-06 15:11:50 +02:00
|
|
|
ret = gl_fbo_texture_init(shfmt->gl_format, area->width, area->height,
|
|
|
|
|
GL_RGBA, shfmt->gl_type, &go->shadow_fb,
|
|
|
|
|
&go->shadow_tex);
|
2022-07-26 17:27:07 +03:00
|
|
|
|
|
|
|
|
return ret;
|
2022-07-22 11:30:04 +03:00
|
|
|
}
|
|
|
|
|
|
2012-11-13 19:10:18 +01:00
|
|
|
static int
|
2023-01-11 19:20:11 +01:00
|
|
|
gl_renderer_setup(struct weston_compositor *ec);
|
2012-11-13 19:10:18 +01:00
|
|
|
|
2016-05-18 17:50:54 +02:00
|
|
|
static EGLSurface
|
|
|
|
|
gl_renderer_create_window_surface(struct gl_renderer *gr,
|
|
|
|
|
EGLNativeWindowType window_for_legacy,
|
|
|
|
|
void *window_for_platform,
|
2023-01-26 17:33:05 +01:00
|
|
|
const struct pixel_format_info *const *formats,
|
|
|
|
|
unsigned formats_count)
|
2012-11-13 19:10:18 +01:00
|
|
|
{
|
2016-05-18 17:50:54 +02:00
|
|
|
EGLSurface egl_surface = EGL_NO_SURFACE;
|
2014-03-07 18:05:50 +00:00
|
|
|
EGLConfig egl_config;
|
2012-11-13 19:10:18 +01:00
|
|
|
|
2019-09-13 17:24:49 +03:00
|
|
|
egl_config = gl_renderer_get_egl_config(gr, EGL_WINDOW_BIT,
|
2023-01-26 17:33:05 +01:00
|
|
|
formats, formats_count);
|
2019-09-13 15:35:10 +03:00
|
|
|
if (egl_config == EGL_NO_CONFIG_KHR)
|
2016-05-18 17:50:54 +02:00
|
|
|
return EGL_NO_SURFACE;
|
2014-03-07 18:05:50 +00:00
|
|
|
|
2016-05-18 17:50:54 +02:00
|
|
|
log_egl_config_info(gr->egl_display, egl_config);
|
|
|
|
|
|
|
|
|
|
if (gr->create_platform_window)
|
|
|
|
|
egl_surface = gr->create_platform_window(gr->egl_display,
|
|
|
|
|
egl_config,
|
|
|
|
|
window_for_platform,
|
|
|
|
|
NULL);
|
|
|
|
|
else
|
|
|
|
|
egl_surface = eglCreateWindowSurface(gr->egl_display,
|
|
|
|
|
egl_config,
|
|
|
|
|
window_for_legacy, NULL);
|
|
|
|
|
|
|
|
|
|
return egl_surface;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
|
gl_renderer_output_create(struct weston_output *output,
|
2022-07-26 17:22:04 +03:00
|
|
|
EGLSurface surface,
|
|
|
|
|
const struct weston_size *fb_size,
|
|
|
|
|
const struct weston_geometry *area)
|
2016-05-18 17:50:54 +02:00
|
|
|
{
|
|
|
|
|
struct gl_output_state *go;
|
2019-04-18 21:45:48 +05:30
|
|
|
struct gl_renderer *gr = get_renderer(output->compositor);
|
2020-12-08 14:13:56 +02:00
|
|
|
const struct weston_testsuite_quirks *quirks;
|
2016-05-18 17:50:54 +02:00
|
|
|
|
2020-12-08 14:13:56 +02:00
|
|
|
quirks = &output->compositor->test_data.test_quirks;
|
|
|
|
|
|
2014-11-20 22:21:57 -08:00
|
|
|
go = zalloc(sizeof *go);
|
|
|
|
|
if (go == NULL)
|
2012-11-13 19:10:18 +01:00
|
|
|
return -1;
|
|
|
|
|
|
2016-05-18 17:50:54 +02:00
|
|
|
go->egl_surface = surface;
|
2024-07-15 14:25:32 +02:00
|
|
|
go->y_flip = surface == EGL_NO_SURFACE ? 1.0f : -1.0f;
|
2012-11-13 19:10:18 +01:00
|
|
|
|
2024-10-11 12:57:45 +02:00
|
|
|
if (gl_features_has(gr, FEATURE_GPU_TIMELINE))
|
2023-01-12 08:04:40 +01:00
|
|
|
gr->gen_queries(1, &go->render_query);
|
|
|
|
|
|
2017-09-27 15:09:16 +03:00
|
|
|
wl_list_init(&go->timeline_render_point_list);
|
|
|
|
|
|
2023-01-12 08:04:40 +01:00
|
|
|
go->render_sync = EGL_NO_SYNC_KHR;
|
2018-05-22 12:05:14 +09:00
|
|
|
|
2022-04-28 15:25:00 +03:00
|
|
|
if ((output->color_outcome->from_blend_to_output != NULL &&
|
2021-04-15 16:32:28 +03:00
|
|
|
output->from_blend_to_output_by_backend == false) ||
|
|
|
|
|
quirks->gl_force_full_redraw_of_shadow_fb) {
|
2024-10-11 12:33:03 +02:00
|
|
|
assert(gl_features_has(gr, FEATURE_COLOR_TRANSFORMS));
|
2019-04-18 21:45:48 +05:30
|
|
|
|
2022-07-26 17:27:07 +03:00
|
|
|
go->shadow_format =
|
|
|
|
|
pixel_format_get_info(DRM_FORMAT_ABGR16161616F);
|
2019-04-18 21:45:48 +05:30
|
|
|
}
|
|
|
|
|
|
2023-06-07 11:15:09 +02:00
|
|
|
wl_list_init(&go->renderbuffer_list);
|
|
|
|
|
|
2012-11-13 19:10:18 +01:00
|
|
|
output->renderer_state = go;
|
|
|
|
|
|
2022-07-26 17:27:07 +03:00
|
|
|
if (!gl_renderer_resize_output(output, fb_size, area)) {
|
|
|
|
|
weston_log("Output %s failed to create 16F shadow.\n",
|
|
|
|
|
output->name);
|
|
|
|
|
output->renderer_state = NULL;
|
|
|
|
|
free(go);
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (shadow_exists(go)) {
|
|
|
|
|
weston_log("Output %s uses 16F shadow.\n",
|
|
|
|
|
output->name);
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-13 19:10:18 +01:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-18 17:50:54 +02:00
|
|
|
static int
|
|
|
|
|
gl_renderer_output_window_create(struct weston_output *output,
|
2020-03-06 13:04:18 +00:00
|
|
|
const struct gl_renderer_output_options *options)
|
2016-05-18 17:50:54 +02:00
|
|
|
{
|
|
|
|
|
struct weston_compositor *ec = output->compositor;
|
|
|
|
|
struct gl_renderer *gr = get_renderer(ec);
|
|
|
|
|
EGLSurface egl_surface = EGL_NO_SURFACE;
|
2021-03-30 13:27:00 +02:00
|
|
|
int ret;
|
2016-05-18 17:50:54 +02:00
|
|
|
|
|
|
|
|
egl_surface = gl_renderer_create_window_surface(gr,
|
2020-03-06 13:04:18 +00:00
|
|
|
options->window_for_legacy,
|
|
|
|
|
options->window_for_platform,
|
2023-01-26 17:33:05 +01:00
|
|
|
options->formats,
|
|
|
|
|
options->formats_count);
|
2016-05-18 17:50:54 +02:00
|
|
|
if (egl_surface == EGL_NO_SURFACE) {
|
|
|
|
|
weston_log("failed to create egl surface\n");
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-26 17:22:04 +03:00
|
|
|
ret = gl_renderer_output_create(output, egl_surface,
|
|
|
|
|
&options->fb_size, &options->area);
|
2016-05-18 17:50:54 +02:00
|
|
|
if (ret < 0)
|
2016-11-14 17:08:17 +00:00
|
|
|
weston_platform_destroy_egl_surface(gr->egl_display, egl_surface);
|
2016-05-18 17:50:54 +02:00
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-30 13:27:00 +02:00
|
|
|
static int
|
|
|
|
|
gl_renderer_output_fbo_create(struct weston_output *output,
|
|
|
|
|
const struct gl_renderer_fbo_options *options)
|
|
|
|
|
{
|
|
|
|
|
return gl_renderer_output_create(output, EGL_NO_SURFACE,
|
|
|
|
|
&options->fb_size, &options->area);
|
|
|
|
|
}
|
|
|
|
|
|
2024-11-19 16:16:14 +00:00
|
|
|
#ifdef HAVE_GBM
|
2023-09-22 09:56:07 +02:00
|
|
|
static void
|
|
|
|
|
gl_renderer_dmabuf_destroy(struct linux_dmabuf_memory *dmabuf)
|
|
|
|
|
{
|
|
|
|
|
struct gl_renderer_dmabuf_memory *gl_renderer_dmabuf;
|
|
|
|
|
struct dmabuf_attributes *attributes;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
gl_renderer_dmabuf = (struct gl_renderer_dmabuf_memory *)dmabuf;
|
|
|
|
|
|
|
|
|
|
attributes = dmabuf->attributes;
|
|
|
|
|
for (i = 0; i < attributes->n_planes; ++i)
|
|
|
|
|
close(attributes->fd[i]);
|
|
|
|
|
free(dmabuf->attributes);
|
|
|
|
|
|
|
|
|
|
gbm_bo_destroy(gl_renderer_dmabuf->bo);
|
|
|
|
|
free(gl_renderer_dmabuf);
|
|
|
|
|
}
|
2024-11-19 16:16:14 +00:00
|
|
|
#endif
|
2023-09-22 09:56:07 +02:00
|
|
|
|
|
|
|
|
static struct linux_dmabuf_memory *
|
|
|
|
|
gl_renderer_dmabuf_alloc(struct weston_renderer *renderer,
|
|
|
|
|
unsigned int width, unsigned int height,
|
|
|
|
|
uint32_t format,
|
|
|
|
|
const uint64_t *modifiers, const unsigned int count)
|
|
|
|
|
{
|
|
|
|
|
struct gl_renderer *gr = (struct gl_renderer *)renderer;
|
|
|
|
|
struct dmabuf_allocator *allocator = gr->allocator;
|
2024-11-19 16:16:14 +00:00
|
|
|
struct linux_dmabuf_memory *dmabuf = NULL;
|
2023-09-22 09:56:07 +02:00
|
|
|
|
|
|
|
|
if (!allocator)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
2024-11-19 16:16:14 +00:00
|
|
|
#ifdef HAVE_GBM
|
|
|
|
|
struct gl_renderer_dmabuf_memory *gl_renderer_dmabuf;
|
|
|
|
|
struct dmabuf_attributes *attributes;
|
|
|
|
|
struct gbm_bo *bo;
|
|
|
|
|
int i;
|
2023-09-22 09:56:07 +02:00
|
|
|
#ifdef HAVE_GBM_BO_CREATE_WITH_MODIFIERS2
|
|
|
|
|
bo = gbm_bo_create_with_modifiers2(allocator->gbm_device,
|
|
|
|
|
width, height, format,
|
|
|
|
|
modifiers, count,
|
|
|
|
|
GBM_BO_USE_RENDERING);
|
|
|
|
|
#else
|
|
|
|
|
bo = gbm_bo_create_with_modifiers(allocator->gbm_device,
|
|
|
|
|
width, height, format,
|
|
|
|
|
modifiers, count);
|
|
|
|
|
#endif
|
|
|
|
|
if (!bo)
|
|
|
|
|
bo = gbm_bo_create(allocator->gbm_device,
|
|
|
|
|
width, height, format,
|
|
|
|
|
GBM_BO_USE_RENDERING | GBM_BO_USE_LINEAR);
|
|
|
|
|
if (!bo) {
|
|
|
|
|
weston_log("failed to create gbm_bo\n");
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
gl_renderer_dmabuf = xzalloc(sizeof(*gl_renderer_dmabuf));
|
|
|
|
|
gl_renderer_dmabuf->bo = bo;
|
|
|
|
|
gl_renderer_dmabuf->allocator = allocator;
|
|
|
|
|
|
|
|
|
|
attributes = xzalloc(sizeof(*attributes));
|
|
|
|
|
attributes->width = width;
|
|
|
|
|
attributes->height = height;
|
|
|
|
|
attributes->format = format;
|
|
|
|
|
attributes->n_planes = gbm_bo_get_plane_count(bo);
|
|
|
|
|
for (i = 0; i < attributes->n_planes; ++i) {
|
|
|
|
|
attributes->fd[i] = gbm_bo_get_fd(bo);
|
|
|
|
|
attributes->stride[i] = gbm_bo_get_stride_for_plane(bo, i);
|
|
|
|
|
attributes->offset[i] = gbm_bo_get_offset(bo, i);
|
|
|
|
|
}
|
|
|
|
|
attributes->modifier = gbm_bo_get_modifier(bo);
|
|
|
|
|
|
|
|
|
|
dmabuf = &gl_renderer_dmabuf->base;
|
|
|
|
|
dmabuf->attributes = attributes;
|
|
|
|
|
dmabuf->destroy = gl_renderer_dmabuf_destroy;
|
2024-11-19 16:16:14 +00:00
|
|
|
#endif
|
2023-09-22 09:56:07 +02:00
|
|
|
|
|
|
|
|
return dmabuf;
|
|
|
|
|
}
|
|
|
|
|
|
2013-10-14 15:57:11 +03:00
|
|
|
static void
|
2012-11-13 19:10:29 +01:00
|
|
|
gl_renderer_output_destroy(struct weston_output *output)
|
2012-11-13 19:10:18 +01:00
|
|
|
{
|
2012-11-13 19:10:29 +01:00
|
|
|
struct gl_renderer *gr = get_renderer(output->compositor);
|
|
|
|
|
struct gl_output_state *go = get_output_state(output);
|
2017-09-27 15:09:16 +03:00
|
|
|
struct timeline_render_point *trp, *tmp;
|
2012-11-13 19:10:18 +01:00
|
|
|
|
2019-04-18 21:45:48 +05:30
|
|
|
if (shadow_exists(go))
|
2024-08-06 15:11:50 +02:00
|
|
|
gl_fbo_texture_fini(&go->shadow_fb, &go->shadow_tex);
|
2019-04-18 21:45:48 +05:30
|
|
|
|
2023-01-11 19:20:11 +01:00
|
|
|
eglMakeCurrent(gr->egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE,
|
|
|
|
|
gr->egl_context);
|
2016-11-23 18:41:00 -08:00
|
|
|
|
2016-11-14 17:08:17 +00:00
|
|
|
weston_platform_destroy_egl_surface(gr->egl_display, go->egl_surface);
|
2012-11-13 19:10:18 +01:00
|
|
|
|
2017-09-27 15:09:16 +03:00
|
|
|
if (!wl_list_empty(&go->timeline_render_point_list))
|
|
|
|
|
weston_log("warning: discarding pending timeline render"
|
|
|
|
|
"objects at output destruction");
|
|
|
|
|
|
2024-10-11 12:57:45 +02:00
|
|
|
if (gl_features_has(gr, FEATURE_GPU_TIMELINE))
|
2023-01-12 08:04:40 +01:00
|
|
|
gr->delete_queries(1, &go->render_query);
|
|
|
|
|
|
2017-09-27 15:09:16 +03:00
|
|
|
wl_list_for_each_safe(trp, tmp, &go->timeline_render_point_list, link)
|
|
|
|
|
timeline_render_point_destroy(trp);
|
|
|
|
|
|
2023-01-12 08:04:40 +01:00
|
|
|
if (go->render_sync != EGL_NO_SYNC_KHR)
|
|
|
|
|
gr->destroy_sync(gr->egl_display, go->render_sync);
|
2018-05-22 12:05:14 +09:00
|
|
|
|
2024-08-01 10:02:11 +02:00
|
|
|
gl_renderer_discard_renderbuffers(go, true);
|
2023-06-07 11:15:09 +02:00
|
|
|
|
2012-11-13 19:10:18 +01:00
|
|
|
free(go);
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-22 12:05:14 +09:00
|
|
|
static int
|
|
|
|
|
gl_renderer_create_fence_fd(struct weston_output *output)
|
|
|
|
|
{
|
|
|
|
|
struct gl_output_state *go = get_output_state(output);
|
|
|
|
|
struct gl_renderer *gr = get_renderer(output->compositor);
|
|
|
|
|
int fd;
|
|
|
|
|
|
2023-01-12 08:04:40 +01:00
|
|
|
if (go->render_sync == EGL_NO_SYNC_KHR)
|
2018-05-22 12:05:14 +09:00
|
|
|
return -1;
|
|
|
|
|
|
2023-01-12 08:04:40 +01:00
|
|
|
fd = gr->dup_native_fence_fd(gr->egl_display, go->render_sync);
|
2018-05-22 12:05:14 +09:00
|
|
|
if (fd == EGL_NO_NATIVE_FENCE_FD_ANDROID)
|
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
|
|
return fd;
|
|
|
|
|
}
|
|
|
|
|
|
2023-09-22 09:56:07 +02:00
|
|
|
static void
|
|
|
|
|
gl_renderer_allocator_destroy(struct dmabuf_allocator *allocator)
|
|
|
|
|
{
|
|
|
|
|
if (!allocator)
|
|
|
|
|
return;
|
|
|
|
|
|
2024-11-19 16:16:14 +00:00
|
|
|
#ifdef HAVE_GBM
|
2023-09-22 17:22:30 +02:00
|
|
|
if (allocator->gbm_device && allocator->has_own_device)
|
2023-09-22 09:56:07 +02:00
|
|
|
gbm_device_destroy(allocator->gbm_device);
|
|
|
|
|
|
2024-11-19 16:16:14 +00:00
|
|
|
#else
|
|
|
|
|
assert(!allocator->has_own_device);
|
|
|
|
|
#endif
|
|
|
|
|
|
2023-09-22 09:56:07 +02:00
|
|
|
free(allocator);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct dmabuf_allocator *
|
|
|
|
|
gl_renderer_allocator_create(struct gl_renderer *gr,
|
|
|
|
|
const struct gl_renderer_display_options * options)
|
|
|
|
|
{
|
|
|
|
|
struct dmabuf_allocator *allocator;
|
|
|
|
|
struct gbm_device *gbm = NULL;
|
2023-09-22 17:22:30 +02:00
|
|
|
bool has_own_device = false;
|
2023-09-22 09:56:07 +02:00
|
|
|
|
2023-09-22 17:22:30 +02:00
|
|
|
if (options->egl_platform == EGL_PLATFORM_GBM_KHR)
|
|
|
|
|
gbm = options->egl_native_display;
|
2024-11-19 16:16:14 +00:00
|
|
|
#ifdef HAVE_GBM
|
2023-09-22 17:22:30 +02:00
|
|
|
if (!gbm && gr->drm_device) {
|
2023-09-22 09:56:07 +02:00
|
|
|
int fd = open(gr->drm_device, O_RDWR);
|
|
|
|
|
gbm = gbm_create_device(fd);
|
2023-09-22 17:22:30 +02:00
|
|
|
has_own_device = true;
|
2023-09-22 09:56:07 +02:00
|
|
|
}
|
2024-11-19 16:16:14 +00:00
|
|
|
#endif
|
2023-09-22 09:56:07 +02:00
|
|
|
if (!gbm)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
allocator = xzalloc(sizeof(*allocator));
|
|
|
|
|
allocator->gbm_device = gbm;
|
2023-09-22 17:22:30 +02:00
|
|
|
allocator->has_own_device = has_own_device;
|
2023-09-22 09:56:07 +02:00
|
|
|
|
|
|
|
|
return allocator;
|
|
|
|
|
}
|
|
|
|
|
|
2013-01-08 19:09:01 +03:00
|
|
|
static void
|
2012-11-13 19:10:29 +01:00
|
|
|
gl_renderer_destroy(struct weston_compositor *ec)
|
2012-09-06 21:44:24 -04:00
|
|
|
{
|
2012-11-13 19:10:29 +01:00
|
|
|
struct gl_renderer *gr = get_renderer(ec);
|
2019-11-27 14:11:05 +01:00
|
|
|
struct dmabuf_format *format, *next_format;
|
2023-10-05 09:23:47 +02:00
|
|
|
struct gl_capture_task *gl_task, *tmp;
|
2012-11-13 19:10:19 +01:00
|
|
|
|
2013-10-25 16:26:34 +03:00
|
|
|
wl_signal_emit(&gr->destroy_signal, gr);
|
|
|
|
|
|
2024-10-10 08:46:22 +02:00
|
|
|
if (gr->display_bound)
|
2012-11-13 19:10:27 +01:00
|
|
|
gr->unbind_display(gr->egl_display, ec->wl_display);
|
2012-11-13 19:10:19 +01:00
|
|
|
|
2023-10-30 11:20:17 +01:00
|
|
|
wl_list_for_each_safe(gl_task, tmp, &gr->pending_capture_list, link)
|
|
|
|
|
destroy_capture_task(gl_task);
|
2023-10-05 09:23:47 +02:00
|
|
|
|
2021-03-22 12:55:51 +02:00
|
|
|
gl_renderer_shader_list_destroy(gr);
|
2021-01-28 14:12:48 +02:00
|
|
|
if (gr->fallback_shader)
|
2021-02-10 13:15:29 +02:00
|
|
|
gl_shader_destroy(gr, gr->fallback_shader);
|
2021-01-28 14:12:48 +02:00
|
|
|
|
2024-05-06 09:27:04 +02:00
|
|
|
if (gr->wireframe_size)
|
|
|
|
|
glDeleteTextures(1, &gr->wireframe_tex);
|
|
|
|
|
|
2012-11-13 19:10:19 +01:00
|
|
|
/* Work around crash in egl_dri2.c's dri2_make_current() - when does this apply? */
|
|
|
|
|
eglMakeCurrent(gr->egl_display,
|
|
|
|
|
EGL_NO_SURFACE, EGL_NO_SURFACE,
|
|
|
|
|
EGL_NO_CONTEXT);
|
|
|
|
|
|
2019-11-27 14:11:05 +01:00
|
|
|
wl_list_for_each_safe(format, next_format, &gr->dmabuf_formats, link)
|
|
|
|
|
dmabuf_format_destroy(format);
|
|
|
|
|
|
2021-03-04 17:47:13 -03:00
|
|
|
weston_drm_format_array_fini(&gr->supported_formats);
|
|
|
|
|
|
2023-09-22 09:56:07 +02:00
|
|
|
gl_renderer_allocator_destroy(gr->allocator);
|
|
|
|
|
|
2012-11-13 19:10:19 +01:00
|
|
|
eglTerminate(gr->egl_display);
|
|
|
|
|
eglReleaseThread();
|
2013-03-07 10:15:17 -07:00
|
|
|
|
2023-09-13 10:44:31 +02:00
|
|
|
wl_array_release(&gr->position_stream);
|
2024-05-06 09:27:04 +02:00
|
|
|
wl_array_release(&gr->barycentric_stream);
|
|
|
|
|
wl_array_release(&gr->indices);
|
2013-05-08 22:38:05 -04:00
|
|
|
|
2024-05-06 19:17:24 +02:00
|
|
|
if (gr->debug_mode_binding)
|
|
|
|
|
weston_binding_destroy(gr->debug_mode_binding);
|
2013-10-25 16:26:32 +03:00
|
|
|
|
2019-04-18 21:45:48 +05:30
|
|
|
weston_log_scope_destroy(gr->shader_scope);
|
2022-05-17 15:15:47 +03:00
|
|
|
weston_log_scope_destroy(gr->renderer_scope);
|
2013-03-07 10:15:17 -07:00
|
|
|
free(gr);
|
2023-06-21 10:29:39 +02:00
|
|
|
ec->renderer = NULL;
|
2012-11-13 19:10:19 +01:00
|
|
|
}
|
|
|
|
|
|
2021-01-18 19:36:48 -03:00
|
|
|
static int
|
|
|
|
|
create_default_dmabuf_feedback(struct weston_compositor *ec,
|
|
|
|
|
struct gl_renderer *gr)
|
|
|
|
|
{
|
|
|
|
|
struct stat dev_stat;
|
|
|
|
|
struct weston_dmabuf_feedback_tranche *tranche;
|
|
|
|
|
uint32_t flags = 0;
|
|
|
|
|
|
|
|
|
|
if (stat(gr->drm_device, &dev_stat) != 0) {
|
|
|
|
|
weston_log("%s: device disappeared, so we can't recover\n", __func__);
|
|
|
|
|
abort();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ec->default_dmabuf_feedback =
|
|
|
|
|
weston_dmabuf_feedback_create(dev_stat.st_rdev);
|
|
|
|
|
if (!ec->default_dmabuf_feedback)
|
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
|
|
tranche =
|
|
|
|
|
weston_dmabuf_feedback_tranche_create(ec->default_dmabuf_feedback,
|
|
|
|
|
ec->dmabuf_feedback_format_table,
|
|
|
|
|
dev_stat.st_rdev, flags,
|
|
|
|
|
RENDERER_PREF);
|
|
|
|
|
if (!tranche) {
|
|
|
|
|
weston_dmabuf_feedback_destroy(ec->default_dmabuf_feedback);
|
|
|
|
|
ec->default_dmabuf_feedback = NULL;
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2015-03-20 15:26:50 +01:00
|
|
|
static int
|
2019-09-19 12:17:28 +03:00
|
|
|
gl_renderer_display_create(struct weston_compositor *ec,
|
2020-03-06 13:04:18 +00:00
|
|
|
const struct gl_renderer_display_options *options)
|
2012-11-13 19:10:19 +01:00
|
|
|
{
|
2012-11-13 19:10:29 +01:00
|
|
|
struct gl_renderer *gr;
|
2021-03-04 17:47:13 -03:00
|
|
|
int ret;
|
2019-09-20 14:18:14 +03:00
|
|
|
|
2014-11-20 22:21:57 -08:00
|
|
|
gr = zalloc(sizeof *gr);
|
2012-11-13 19:10:19 +01:00
|
|
|
if (gr == NULL)
|
|
|
|
|
return -1;
|
|
|
|
|
|
2021-02-09 17:31:11 +02:00
|
|
|
gr->compositor = ec;
|
2019-04-18 21:45:48 +05:30
|
|
|
wl_list_init(&gr->shader_list);
|
2020-03-06 13:04:18 +00:00
|
|
|
gr->platform = options->egl_platform;
|
2019-12-20 14:55:39 +13:00
|
|
|
|
2022-05-17 15:15:47 +03:00
|
|
|
gr->renderer_scope = weston_compositor_add_log_scope(ec, "gl-renderer",
|
|
|
|
|
"GL-renderer verbose messages\n", NULL, NULL, gr);
|
|
|
|
|
if (!gr->renderer_scope)
|
|
|
|
|
goto fail;
|
|
|
|
|
|
2021-02-09 17:31:11 +02:00
|
|
|
gr->shader_scope = gl_shader_scope_create(gr);
|
2019-04-18 21:45:48 +05:30
|
|
|
if (!gr->shader_scope)
|
|
|
|
|
goto fail;
|
|
|
|
|
|
2019-12-20 14:55:39 +13:00
|
|
|
if (gl_renderer_setup_egl_client_extensions(gr) < 0)
|
|
|
|
|
goto fail;
|
2019-12-20 14:28:44 +13:00
|
|
|
|
2012-11-13 19:10:29 +01:00
|
|
|
gr->base.read_pixels = gl_renderer_read_pixels;
|
|
|
|
|
gr->base.repaint_output = gl_renderer_repaint_output;
|
2022-07-22 11:30:04 +03:00
|
|
|
gr->base.resize_output = gl_renderer_resize_output;
|
2024-08-07 16:00:17 +02:00
|
|
|
gr->base.create_renderbuffer = gl_renderer_create_renderbuffer;
|
2024-08-01 10:02:11 +02:00
|
|
|
gr->base.destroy_renderbuffer = gl_renderer_destroy_renderbuffer;
|
2012-11-13 19:10:29 +01:00
|
|
|
gr->base.flush_damage = gl_renderer_flush_damage;
|
|
|
|
|
gr->base.attach = gl_renderer_attach;
|
2013-01-08 19:09:01 +03:00
|
|
|
gr->base.destroy = gl_renderer_destroy;
|
2015-02-09 13:37:27 +02:00
|
|
|
gr->base.surface_copy_content = gl_renderer_surface_copy_content;
|
2022-01-14 01:02:21 +00:00
|
|
|
gr->base.fill_buffer_info = gl_renderer_fill_buffer_info;
|
2024-06-13 10:03:11 -05:00
|
|
|
gr->base.buffer_init = gl_renderer_buffer_init;
|
2022-11-30 10:34:52 +01:00
|
|
|
gr->base.type = WESTON_RENDERER_GL;
|
2012-11-13 19:10:19 +01:00
|
|
|
|
2020-03-06 13:04:18 +00:00
|
|
|
if (gl_renderer_setup_egl_display(gr, options->egl_native_display) < 0)
|
2015-06-11 12:14:45 -05:00
|
|
|
goto fail;
|
2012-11-13 19:10:19 +01:00
|
|
|
|
2023-09-22 09:56:07 +02:00
|
|
|
gr->allocator = gl_renderer_allocator_create(gr, options);
|
|
|
|
|
if (!gr->allocator)
|
|
|
|
|
weston_log("failed to initialize allocator\n");
|
|
|
|
|
|
2021-03-04 17:47:13 -03:00
|
|
|
weston_drm_format_array_init(&gr->supported_formats);
|
|
|
|
|
|
2022-05-17 14:55:27 +03:00
|
|
|
log_egl_info(gr, gr->egl_display);
|
2018-10-18 15:58:09 +01:00
|
|
|
|
2012-11-13 19:10:19 +01:00
|
|
|
ec->renderer = &gr->base;
|
|
|
|
|
|
2014-03-07 18:05:49 +00:00
|
|
|
if (gl_renderer_setup_egl_extensions(ec) < 0)
|
2015-06-11 12:14:45 -05:00
|
|
|
goto fail_with_error;
|
2014-03-07 18:05:49 +00:00
|
|
|
|
2024-10-10 08:46:22 +02:00
|
|
|
if (egl_display_has(gr, EXTENSION_WL_BIND_WAYLAND_DISPLAY)) {
|
|
|
|
|
gr->display_bound = gr->bind_display(gr->egl_display,
|
|
|
|
|
ec->wl_display);
|
|
|
|
|
if (!gr->display_bound)
|
|
|
|
|
weston_log("warning: There is already a Wayland "
|
|
|
|
|
"display bound to the EGL display.\n");
|
|
|
|
|
}
|
|
|
|
|
|
2024-10-10 14:02:12 +02:00
|
|
|
if (!egl_display_has(gr, EXTENSION_KHR_SURFACELESS_CONTEXT))
|
2023-01-11 19:20:11 +01:00
|
|
|
goto fail_terminate;
|
|
|
|
|
|
2024-10-10 16:01:44 +02:00
|
|
|
if (!gl_features_has(gr, FEATURE_NO_CONFIG_CONTEXT)) {
|
2020-03-06 13:04:18 +00:00
|
|
|
gr->egl_config =
|
|
|
|
|
gl_renderer_get_egl_config(gr,
|
2024-09-26 19:30:18 +02:00
|
|
|
options->egl_surface_type,
|
2023-01-26 17:33:05 +01:00
|
|
|
options->formats,
|
|
|
|
|
options->formats_count);
|
2019-09-13 16:25:34 +03:00
|
|
|
if (gr->egl_config == EGL_NO_CONFIG_KHR) {
|
|
|
|
|
weston_log("failed to choose EGL config\n");
|
|
|
|
|
goto fail_terminate;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-09-30 17:10:27 +02:00
|
|
|
if (gl_renderer_setup(ec) < 0)
|
|
|
|
|
goto fail_terminate;
|
|
|
|
|
|
libweston: Support zwp_surface_synchronization_v1.set_acquire_fence
Implement the set_acquire_fence request of the
zwp_surface_synchronization_v1 interface.
The implementation uses the acquire fence in two ways:
1. If the associated buffer is used as GL render source, an
EGLSyncKHR is created from the fence and used to synchronize
access.
2. If the associated buffer is used as a plane framebuffer,
the acquire fence is treated as an in-fence for the atomic
commit operation. If in-fences are not supported and the buffer
has an acquire fence, we don't consider it for plane placement.
If the used compositor/renderer doesn't support explicit
synchronization, we don't advertise the protocol at all. Currently only
the DRM and X11 backends when using the GL renderer advertise the
protocol for production use.
Issues for discussion
---------------------
a. Currently, a server-side wait of EGLSyncKHR is performed before
using the EGLImage/texture during rendering. Unfortunately, it's not clear
from the specs whether this is generally safe to do, or we need to
sync before glEGLImageTargetTexture2DOES. The exception is
TEXTURE_EXTERNAL_OES where the spec mentions it's enough to sync
and then glBindTexture for any changes to take effect.
Changes in v5:
- Meson support.
- Make explicit sync server error reporting more generic, supporting
all explicit sync related interfaces not just
wp_linux_surface_synchronization.
- Fix typo in warning for missing EGL_KHR_wait_sync extension.
- Support minor version 2 of the explicit sync protocol (i.e., support
fences for opaque EGL buffers).
Changes in v4:
- Introduce and use fd_clear and and fd_move helpers.
- Don't check for a valid buffer when updating surface acquire fence fd
from state.
- Assert that pending state acquire fence fd is always clear
after a commit.
- Clarify that WESTON_CAP_EXPLICIT_SYNC applies to just the
renderer.
- Check for EGL_KHR_wait_sync before using eglWaitSyncKHR.
- Dup the acquire fence before passing to EGL.
Changes in v3:
- Keep acquire_fence_fd in surface instead of buffer.
- Clarify that WESTON_CAP_EXPLICIT_SYNC applies to both backend and
renderer.
- Move comment about non-ownership of in_fence_fd to struct
drm_plane_state definition.
- Assert that we don't try to use planes with in-fences when using the
legacy KMS API.
- Remove unnecessary info from wayland error messages.
- Handle acquire fence for subsurface commits.
- Guard against self-update in fd_update.
- Disconnect the client if acquire fence EGLSyncKHR creation or wait
fails.
- Use updated protocol interface names.
- User correct format specifier for resource ids.
- Advertise protocol for X11 backend with GL renderer.
Changes in v2:
- Remove sync file wait fallbacks.
- Raise UNSUPPORTED_BUFFER error at commit if we have an acquire
fence, but the committed buffer is not a valid linux_dmabuf.
- Don't put buffers with in-fences on planes that don't support
in-fences.
- Don't advertise explicit sync protocol if backend does not
support explicit sync.
Signed-off-by: Alexandros Frantzis <alexandros.frantzis@collabora.com>
2018-10-19 12:14:11 +03:00
|
|
|
ec->capabilities |= WESTON_CAP_ROTATION_ANY;
|
|
|
|
|
ec->capabilities |= WESTON_CAP_CAPTURE_YFLIP;
|
|
|
|
|
ec->capabilities |= WESTON_CAP_VIEW_CLIP_MASK;
|
2024-10-10 17:52:59 +02:00
|
|
|
if (gl_features_has(gr, FEATURE_EXPLICIT_SYNC))
|
libweston: Support zwp_surface_synchronization_v1.set_acquire_fence
Implement the set_acquire_fence request of the
zwp_surface_synchronization_v1 interface.
The implementation uses the acquire fence in two ways:
1. If the associated buffer is used as GL render source, an
EGLSyncKHR is created from the fence and used to synchronize
access.
2. If the associated buffer is used as a plane framebuffer,
the acquire fence is treated as an in-fence for the atomic
commit operation. If in-fences are not supported and the buffer
has an acquire fence, we don't consider it for plane placement.
If the used compositor/renderer doesn't support explicit
synchronization, we don't advertise the protocol at all. Currently only
the DRM and X11 backends when using the GL renderer advertise the
protocol for production use.
Issues for discussion
---------------------
a. Currently, a server-side wait of EGLSyncKHR is performed before
using the EGLImage/texture during rendering. Unfortunately, it's not clear
from the specs whether this is generally safe to do, or we need to
sync before glEGLImageTargetTexture2DOES. The exception is
TEXTURE_EXTERNAL_OES where the spec mentions it's enough to sync
and then glBindTexture for any changes to take effect.
Changes in v5:
- Meson support.
- Make explicit sync server error reporting more generic, supporting
all explicit sync related interfaces not just
wp_linux_surface_synchronization.
- Fix typo in warning for missing EGL_KHR_wait_sync extension.
- Support minor version 2 of the explicit sync protocol (i.e., support
fences for opaque EGL buffers).
Changes in v4:
- Introduce and use fd_clear and and fd_move helpers.
- Don't check for a valid buffer when updating surface acquire fence fd
from state.
- Assert that pending state acquire fence fd is always clear
after a commit.
- Clarify that WESTON_CAP_EXPLICIT_SYNC applies to just the
renderer.
- Check for EGL_KHR_wait_sync before using eglWaitSyncKHR.
- Dup the acquire fence before passing to EGL.
Changes in v3:
- Keep acquire_fence_fd in surface instead of buffer.
- Clarify that WESTON_CAP_EXPLICIT_SYNC applies to both backend and
renderer.
- Move comment about non-ownership of in_fence_fd to struct
drm_plane_state definition.
- Assert that we don't try to use planes with in-fences when using the
legacy KMS API.
- Remove unnecessary info from wayland error messages.
- Handle acquire fence for subsurface commits.
- Guard against self-update in fd_update.
- Disconnect the client if acquire fence EGLSyncKHR creation or wait
fails.
- Use updated protocol interface names.
- User correct format specifier for resource ids.
- Advertise protocol for X11 backend with GL renderer.
Changes in v2:
- Remove sync file wait fallbacks.
- Raise UNSUPPORTED_BUFFER error at commit if we have an acquire
fence, but the committed buffer is not a valid linux_dmabuf.
- Don't put buffers with in-fences on planes that don't support
in-fences.
- Don't advertise explicit sync protocol if backend does not
support explicit sync.
Signed-off-by: Alexandros Frantzis <alexandros.frantzis@collabora.com>
2018-10-19 12:14:11 +03:00
|
|
|
ec->capabilities |= WESTON_CAP_EXPLICIT_SYNC;
|
2024-10-11 12:33:03 +02:00
|
|
|
if (gl_features_has(gr, FEATURE_COLOR_TRANSFORMS))
|
|
|
|
|
ec->capabilities |= WESTON_CAP_COLOR_OPS;
|
libweston: Support zwp_surface_synchronization_v1.set_acquire_fence
Implement the set_acquire_fence request of the
zwp_surface_synchronization_v1 interface.
The implementation uses the acquire fence in two ways:
1. If the associated buffer is used as GL render source, an
EGLSyncKHR is created from the fence and used to synchronize
access.
2. If the associated buffer is used as a plane framebuffer,
the acquire fence is treated as an in-fence for the atomic
commit operation. If in-fences are not supported and the buffer
has an acquire fence, we don't consider it for plane placement.
If the used compositor/renderer doesn't support explicit
synchronization, we don't advertise the protocol at all. Currently only
the DRM and X11 backends when using the GL renderer advertise the
protocol for production use.
Issues for discussion
---------------------
a. Currently, a server-side wait of EGLSyncKHR is performed before
using the EGLImage/texture during rendering. Unfortunately, it's not clear
from the specs whether this is generally safe to do, or we need to
sync before glEGLImageTargetTexture2DOES. The exception is
TEXTURE_EXTERNAL_OES where the spec mentions it's enough to sync
and then glBindTexture for any changes to take effect.
Changes in v5:
- Meson support.
- Make explicit sync server error reporting more generic, supporting
all explicit sync related interfaces not just
wp_linux_surface_synchronization.
- Fix typo in warning for missing EGL_KHR_wait_sync extension.
- Support minor version 2 of the explicit sync protocol (i.e., support
fences for opaque EGL buffers).
Changes in v4:
- Introduce and use fd_clear and and fd_move helpers.
- Don't check for a valid buffer when updating surface acquire fence fd
from state.
- Assert that pending state acquire fence fd is always clear
after a commit.
- Clarify that WESTON_CAP_EXPLICIT_SYNC applies to just the
renderer.
- Check for EGL_KHR_wait_sync before using eglWaitSyncKHR.
- Dup the acquire fence before passing to EGL.
Changes in v3:
- Keep acquire_fence_fd in surface instead of buffer.
- Clarify that WESTON_CAP_EXPLICIT_SYNC applies to both backend and
renderer.
- Move comment about non-ownership of in_fence_fd to struct
drm_plane_state definition.
- Assert that we don't try to use planes with in-fences when using the
legacy KMS API.
- Remove unnecessary info from wayland error messages.
- Handle acquire fence for subsurface commits.
- Guard against self-update in fd_update.
- Disconnect the client if acquire fence EGLSyncKHR creation or wait
fails.
- Use updated protocol interface names.
- User correct format specifier for resource ids.
- Advertise protocol for X11 backend with GL renderer.
Changes in v2:
- Remove sync file wait fallbacks.
- Raise UNSUPPORTED_BUFFER error at commit if we have an acquire
fence, but the committed buffer is not a valid linux_dmabuf.
- Don't put buffers with in-fences on planes that don't support
in-fences.
- Don't advertise explicit sync protocol if backend does not
support explicit sync.
Signed-off-by: Alexandros Frantzis <alexandros.frantzis@collabora.com>
2018-10-19 12:14:11 +03:00
|
|
|
|
2023-09-22 09:56:07 +02:00
|
|
|
if (gr->allocator)
|
|
|
|
|
gr->base.dmabuf_alloc = gl_renderer_dmabuf_alloc;
|
|
|
|
|
|
2024-09-30 17:10:27 +02:00
|
|
|
/* No need to check for GL_OES_EGL_image_external because this is gated
|
|
|
|
|
* by EGL_EXT_image_dma_buf_import_modifiers which depends on it. */
|
|
|
|
|
if (egl_display_has(gr, EXTENSION_EXT_IMAGE_DMA_BUF_IMPORT) &&
|
|
|
|
|
gl_extensions_has(gr, EXTENSION_OES_EGL_IMAGE)) {
|
2014-06-12 16:49:29 +03:00
|
|
|
gr->base.import_dmabuf = gl_renderer_import_dmabuf;
|
2021-03-04 17:47:13 -03:00
|
|
|
gr->base.get_supported_formats = gl_renderer_get_supported_formats;
|
2024-08-07 16:00:17 +02:00
|
|
|
gr->base.create_renderbuffer_dmabuf =
|
|
|
|
|
gl_renderer_create_renderbuffer_dmabuf;
|
2021-03-04 17:47:13 -03:00
|
|
|
ret = populate_supported_formats(ec, &gr->supported_formats);
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
goto fail_terminate;
|
2021-01-18 19:36:48 -03:00
|
|
|
if (gr->drm_device) {
|
|
|
|
|
/* We support dma-buf feedback only when the renderer
|
|
|
|
|
* exposes a DRM-device */
|
|
|
|
|
ec->dmabuf_feedback_format_table =
|
|
|
|
|
weston_dmabuf_feedback_format_table_create(&gr->supported_formats);
|
|
|
|
|
if (!ec->dmabuf_feedback_format_table)
|
|
|
|
|
goto fail_terminate;
|
|
|
|
|
ret = create_default_dmabuf_feedback(ec, gr);
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
goto fail_feedback;
|
|
|
|
|
}
|
2016-11-23 14:03:20 +05:30
|
|
|
}
|
2019-11-27 14:11:05 +01:00
|
|
|
wl_list_init(&gr->dmabuf_formats);
|
2014-06-12 16:49:29 +03:00
|
|
|
|
2013-10-25 16:26:34 +03:00
|
|
|
wl_signal_init(&gr->destroy_signal);
|
|
|
|
|
|
renderer-gl: Support more shm RGB formats
Some applications, e.g. Chromium browser, may provide ABGR format buf.
Tested with gstreamer 1.22.8:
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=RGB' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=BGR' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=ARGB' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=xRGB' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=ABGR' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=xBGR' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=RGBA' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=RGBx' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=BGRA' ! waylandsink
gst-launch-1.0 videotestsrc ! 'video/x-raw,format=BGRx' ! waylandsink
Signed-off-by: Jeffy Chen <jeffy.chen@rock-chips.com>
2022-07-07 11:09:23 +08:00
|
|
|
wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_XBGR8888);
|
|
|
|
|
wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_ABGR8888);
|
|
|
|
|
wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_RGBX8888);
|
|
|
|
|
wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_RGBA8888);
|
|
|
|
|
wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_BGRX8888);
|
|
|
|
|
wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_BGRA8888);
|
|
|
|
|
wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_RGB888);
|
|
|
|
|
wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_BGR888);
|
2021-10-20 20:52:11 -04:00
|
|
|
wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_RGB565);
|
|
|
|
|
wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_YUV420);
|
2022-04-28 01:21:15 +01:00
|
|
|
wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_YUV444);
|
2021-10-20 20:52:11 -04:00
|
|
|
wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_NV12);
|
2019-11-22 12:06:35 +08:00
|
|
|
wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_NV16);
|
2024-03-29 16:27:01 +08:00
|
|
|
wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_NV24);
|
2021-10-20 20:52:11 -04:00
|
|
|
wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_YUYV);
|
|
|
|
|
wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_XYUV8888);
|
2023-09-11 18:42:22 +02:00
|
|
|
wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_ABGR8888);
|
2021-10-20 20:52:11 -04:00
|
|
|
#if __BYTE_ORDER == __LITTLE_ENDIAN
|
2024-10-09 18:13:47 +02:00
|
|
|
if (gr->gl_version >= gl_version(3, 0) ||
|
|
|
|
|
gl_extensions_has(gr, EXTENSION_EXT_TEXTURE_TYPE_2_10_10_10_REV)) {
|
2021-10-20 20:52:11 -04:00
|
|
|
wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_ABGR2101010);
|
|
|
|
|
wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_XBGR2101010);
|
|
|
|
|
}
|
2024-10-11 12:33:03 +02:00
|
|
|
if (gl_features_has(gr, FEATURE_COLOR_TRANSFORMS)) {
|
2021-11-06 23:09:42 -04:00
|
|
|
wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_ABGR16161616F);
|
|
|
|
|
wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_XBGR16161616F);
|
|
|
|
|
}
|
2024-10-09 18:13:47 +02:00
|
|
|
if (gl_extensions_has(gr, EXTENSION_EXT_TEXTURE_NORM16)) {
|
2022-01-04 14:32:44 -05:00
|
|
|
wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_ABGR16161616);
|
|
|
|
|
wl_display_add_shm_format(ec->wl_display, WL_SHM_FORMAT_XBGR16161616);
|
|
|
|
|
}
|
2021-10-20 20:52:11 -04:00
|
|
|
#endif
|
2012-11-13 19:10:19 +01:00
|
|
|
return 0;
|
|
|
|
|
|
2015-06-11 12:14:45 -05:00
|
|
|
fail_with_error:
|
2012-11-27 12:25:25 +02:00
|
|
|
gl_renderer_print_egl_error_state();
|
2021-01-18 19:36:48 -03:00
|
|
|
if (gr->drm_device) {
|
|
|
|
|
weston_dmabuf_feedback_destroy(ec->default_dmabuf_feedback);
|
|
|
|
|
ec->default_dmabuf_feedback = NULL;
|
|
|
|
|
}
|
|
|
|
|
fail_feedback:
|
|
|
|
|
if (gr->drm_device) {
|
|
|
|
|
weston_dmabuf_feedback_format_table_destroy(ec->dmabuf_feedback_format_table);
|
|
|
|
|
ec->dmabuf_feedback_format_table = NULL;
|
|
|
|
|
}
|
2015-08-21 00:20:54 -03:00
|
|
|
fail_terminate:
|
2021-03-04 17:47:13 -03:00
|
|
|
weston_drm_format_array_fini(&gr->supported_formats);
|
2015-08-21 00:20:54 -03:00
|
|
|
eglTerminate(gr->egl_display);
|
2015-06-11 12:14:45 -05:00
|
|
|
fail:
|
2019-04-18 21:45:48 +05:30
|
|
|
weston_log_scope_destroy(gr->shader_scope);
|
2022-05-17 15:15:47 +03:00
|
|
|
weston_log_scope_destroy(gr->renderer_scope);
|
2012-11-13 19:10:19 +01:00
|
|
|
free(gr);
|
2020-02-03 20:01:21 +00:00
|
|
|
ec->renderer = NULL;
|
2012-11-13 19:10:19 +01:00
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-08 17:20:46 +02:00
|
|
|
static void
|
2024-05-06 19:17:24 +02:00
|
|
|
debug_mode_binding(struct weston_keyboard *keyboard,
|
|
|
|
|
const struct timespec *time,
|
|
|
|
|
uint32_t key, void *data)
|
2012-11-08 17:20:46 +02:00
|
|
|
{
|
2024-05-06 19:17:24 +02:00
|
|
|
struct weston_compositor *compositor = data;
|
|
|
|
|
struct gl_renderer *gr = get_renderer(compositor);
|
2024-05-07 15:45:55 +02:00
|
|
|
int mode;
|
2012-11-08 17:20:46 +02:00
|
|
|
|
2024-05-07 15:45:55 +02:00
|
|
|
mode = (gr->debug_mode + 1) % DEBUG_MODE_LAST;
|
|
|
|
|
gr->debug_mode = mode;
|
2024-05-07 16:22:40 +02:00
|
|
|
gr->debug_clear = mode == DEBUG_MODE_WIREFRAME ||
|
2024-05-07 16:45:48 +02:00
|
|
|
mode == DEBUG_MODE_BATCHES ||
|
2024-05-18 17:02:16 +02:00
|
|
|
mode == DEBUG_MODE_DAMAGE ||
|
|
|
|
|
mode == DEBUG_MODE_OPAQUE;
|
2024-05-07 15:45:55 +02:00
|
|
|
gr->wireframe_dirty = mode == DEBUG_MODE_WIREFRAME;
|
2012-11-08 17:20:46 +02:00
|
|
|
|
2013-05-07 10:50:09 -04:00
|
|
|
weston_compositor_damage_all(compositor);
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-17 19:36:32 +01:00
|
|
|
static uint32_t
|
|
|
|
|
get_gl_version(void)
|
|
|
|
|
{
|
|
|
|
|
const char *version;
|
|
|
|
|
int major, minor;
|
|
|
|
|
|
|
|
|
|
version = (const char *) glGetString(GL_VERSION);
|
|
|
|
|
if (version &&
|
|
|
|
|
(sscanf(version, "%d.%d", &major, &minor) == 2 ||
|
2020-11-05 14:56:55 +02:00
|
|
|
sscanf(version, "OpenGL ES %d.%d", &major, &minor) == 2) &&
|
|
|
|
|
major > 0 && minor >= 0) {
|
2024-10-10 10:14:09 +02:00
|
|
|
return gl_version(major, minor);
|
2018-01-17 19:36:32 +01:00
|
|
|
}
|
|
|
|
|
|
2020-11-05 14:56:55 +02:00
|
|
|
weston_log("warning: failed to detect GLES version, defaulting to 2.0.\n");
|
2024-10-10 10:14:09 +02:00
|
|
|
return gl_version(2, 0);
|
2018-01-17 19:36:32 +01:00
|
|
|
}
|
|
|
|
|
|
2012-11-13 19:10:18 +01:00
|
|
|
static int
|
2023-01-11 19:20:11 +01:00
|
|
|
gl_renderer_setup(struct weston_compositor *ec)
|
2012-09-05 22:06:26 -04:00
|
|
|
{
|
2012-11-13 19:10:29 +01:00
|
|
|
struct gl_renderer *gr = get_renderer(ec);
|
2012-09-05 22:06:26 -04:00
|
|
|
const char *extensions;
|
2012-09-06 20:51:00 -04:00
|
|
|
EGLBoolean ret;
|
2024-10-11 12:03:08 +02:00
|
|
|
PFNGLGETQUERYIVEXTPROC get_query_iv;
|
|
|
|
|
int elapsed_bits;
|
2012-09-05 22:06:26 -04:00
|
|
|
|
2018-03-01 08:28:30 +00:00
|
|
|
EGLint context_attribs[16] = {
|
2018-01-17 19:36:33 +01:00
|
|
|
EGL_CONTEXT_CLIENT_VERSION, 0,
|
2012-09-06 21:07:40 -04:00
|
|
|
};
|
2018-03-01 08:28:30 +00:00
|
|
|
unsigned int nattr = 2;
|
2012-09-06 21:07:40 -04:00
|
|
|
|
|
|
|
|
if (!eglBindAPI(EGL_OPENGL_ES_API)) {
|
|
|
|
|
weston_log("failed to bind EGL_OPENGL_ES_API\n");
|
2012-11-27 12:25:25 +02:00
|
|
|
gl_renderer_print_egl_error_state();
|
2012-09-06 21:07:40 -04:00
|
|
|
return -1;
|
|
|
|
|
}
|
2012-10-24 09:43:05 +03:00
|
|
|
|
2018-03-01 08:28:30 +00:00
|
|
|
/*
|
|
|
|
|
* Being the compositor we require minimum output latency,
|
|
|
|
|
* so request a high priority context for ourselves - that should
|
|
|
|
|
* reschedule all of our rendering and its dependencies to be completed
|
|
|
|
|
* first. If the driver doesn't permit us to create a high priority
|
|
|
|
|
* context, it will fallback to the default priority (MEDIUM).
|
|
|
|
|
*/
|
2024-10-10 14:02:12 +02:00
|
|
|
if (egl_display_has(gr, EXTENSION_IMG_CONTEXT_PRIORITY)) {
|
2018-03-01 08:28:30 +00:00
|
|
|
context_attribs[nattr++] = EGL_CONTEXT_PRIORITY_LEVEL_IMG;
|
|
|
|
|
context_attribs[nattr++] = EGL_CONTEXT_PRIORITY_HIGH_IMG;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert(nattr < ARRAY_LENGTH(context_attribs));
|
|
|
|
|
context_attribs[nattr] = EGL_NONE;
|
|
|
|
|
|
2018-01-17 19:36:33 +01:00
|
|
|
/* try to create an OpenGLES 3 context first */
|
|
|
|
|
context_attribs[1] = 3;
|
2019-09-13 16:25:34 +03:00
|
|
|
gr->egl_context = eglCreateContext(gr->egl_display, gr->egl_config,
|
2012-09-06 21:07:40 -04:00
|
|
|
EGL_NO_CONTEXT, context_attribs);
|
2012-11-13 19:10:19 +01:00
|
|
|
if (gr->egl_context == NULL) {
|
2018-01-17 19:36:33 +01:00
|
|
|
/* and then fallback to OpenGLES 2 */
|
|
|
|
|
context_attribs[1] = 2;
|
|
|
|
|
gr->egl_context = eglCreateContext(gr->egl_display,
|
2019-09-13 16:25:34 +03:00
|
|
|
gr->egl_config,
|
2018-01-17 19:36:33 +01:00
|
|
|
EGL_NO_CONTEXT,
|
|
|
|
|
context_attribs);
|
|
|
|
|
if (gr->egl_context == NULL) {
|
|
|
|
|
weston_log("failed to create context\n");
|
|
|
|
|
gl_renderer_print_egl_error_state();
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
2012-09-06 21:07:40 -04:00
|
|
|
}
|
|
|
|
|
|
2024-10-10 14:02:12 +02:00
|
|
|
if (egl_display_has(gr, EXTENSION_IMG_CONTEXT_PRIORITY)) {
|
2018-03-01 08:28:30 +00:00
|
|
|
EGLint value = EGL_CONTEXT_PRIORITY_MEDIUM_IMG;
|
|
|
|
|
|
|
|
|
|
eglQueryContext(gr->egl_display, gr->egl_context,
|
|
|
|
|
EGL_CONTEXT_PRIORITY_LEVEL_IMG, &value);
|
|
|
|
|
|
|
|
|
|
if (value != EGL_CONTEXT_PRIORITY_HIGH_IMG) {
|
|
|
|
|
weston_log("Failed to obtain a high priority context.\n");
|
|
|
|
|
/* Not an error, continue on as normal */
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-11 19:20:11 +01:00
|
|
|
ret = eglMakeCurrent(gr->egl_display, EGL_NO_SURFACE, EGL_NO_SURFACE,
|
|
|
|
|
gr->egl_context);
|
2012-09-06 20:51:00 -04:00
|
|
|
if (ret == EGL_FALSE) {
|
|
|
|
|
weston_log("Failed to make EGL context current.\n");
|
2012-11-27 12:25:25 +02:00
|
|
|
gl_renderer_print_egl_error_state();
|
2012-09-06 20:51:00 -04:00
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-17 19:36:32 +01:00
|
|
|
gr->gl_version = get_gl_version();
|
2022-05-17 14:55:27 +03:00
|
|
|
log_gl_info(gr);
|
2012-09-05 22:06:26 -04:00
|
|
|
|
|
|
|
|
extensions = (const char *) glGetString(GL_EXTENSIONS);
|
|
|
|
|
if (!extensions) {
|
|
|
|
|
weston_log("Retrieving GL extension string failed.\n");
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
2024-09-28 17:12:58 +02:00
|
|
|
gl_extensions_add(extension_table, extensions, &gr->gl_extensions);
|
|
|
|
|
|
2024-09-30 17:10:27 +02:00
|
|
|
if (gl_extensions_has(gr, EXTENSION_OES_EGL_IMAGE)) {
|
2024-10-11 12:03:08 +02:00
|
|
|
GET_PROC_ADDRESS(gr->image_target_texture_2d,
|
|
|
|
|
"glEGLImageTargetTexture2DOES");
|
|
|
|
|
GET_PROC_ADDRESS(gr->image_target_renderbuffer_storage,
|
|
|
|
|
"glEGLImageTargetRenderbufferStorageOES");
|
2024-09-30 17:10:27 +02:00
|
|
|
}
|
|
|
|
|
|
2024-09-28 17:12:58 +02:00
|
|
|
if (!gl_extensions_has(gr, EXTENSION_EXT_TEXTURE_FORMAT_BGRA8888)) {
|
2012-09-05 22:06:26 -04:00
|
|
|
weston_log("GL_EXT_texture_format_BGRA8888 not available\n");
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
2024-09-28 17:12:58 +02:00
|
|
|
if (gl_extensions_has(gr, EXTENSION_EXT_READ_FORMAT_BGRA))
|
2023-09-11 18:42:22 +02:00
|
|
|
ec->read_format = pixel_format_get_info(DRM_FORMAT_ARGB8888);
|
2012-09-05 22:06:26 -04:00
|
|
|
else
|
2023-09-11 18:42:22 +02:00
|
|
|
ec->read_format = pixel_format_get_info(DRM_FORMAT_ABGR8888);
|
2012-09-05 22:06:26 -04:00
|
|
|
|
2024-10-10 10:14:09 +02:00
|
|
|
if (gr->gl_version < gl_version(3, 0) &&
|
2024-09-28 17:12:58 +02:00
|
|
|
!gl_extensions_has(gr, EXTENSION_EXT_UNPACK_SUBIMAGE)) {
|
2020-12-15 15:10:04 +02:00
|
|
|
weston_log("GL_EXT_unpack_subimage not available.\n");
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
2012-09-05 22:06:26 -04:00
|
|
|
|
2024-10-11 12:19:14 +02:00
|
|
|
if (gl_extensions_has(gr, EXTENSION_OES_MAPBUFFER))
|
|
|
|
|
GET_PROC_ADDRESS(gr->unmap_buffer, "glUnmapBufferOES");
|
|
|
|
|
|
|
|
|
|
if (gl_extensions_has(gr, EXTENSION_EXT_MAP_BUFFER_RANGE))
|
|
|
|
|
GET_PROC_ADDRESS(gr->map_buffer_range, "glMapBufferRangeEXT");
|
|
|
|
|
|
2024-10-11 12:57:45 +02:00
|
|
|
if (gl_extensions_has(gr, EXTENSION_EXT_DISJOINT_TIMER_QUERY)) {
|
|
|
|
|
GET_PROC_ADDRESS(gr->gen_queries, "glGenQueriesEXT");
|
|
|
|
|
GET_PROC_ADDRESS(gr->delete_queries, "glDeleteQueriesEXT");
|
|
|
|
|
GET_PROC_ADDRESS(gr->begin_query, "glBeginQueryEXT");
|
|
|
|
|
GET_PROC_ADDRESS(gr->end_query, "glEndQueryEXT");
|
|
|
|
|
#if !defined(NDEBUG)
|
|
|
|
|
GET_PROC_ADDRESS(gr->get_query_object_iv,
|
|
|
|
|
"glGetQueryObjectivEXT");
|
|
|
|
|
#endif
|
|
|
|
|
GET_PROC_ADDRESS(gr->get_query_object_ui64v,
|
|
|
|
|
"glGetQueryObjectui64vEXT");
|
|
|
|
|
GET_PROC_ADDRESS(get_query_iv, "glGetQueryivEXT");
|
|
|
|
|
get_query_iv(GL_TIME_ELAPSED_EXT, GL_QUERY_COUNTER_BITS_EXT,
|
|
|
|
|
&elapsed_bits);
|
|
|
|
|
if (elapsed_bits == 0)
|
|
|
|
|
gr->gl_extensions &=
|
|
|
|
|
~EXTENSION_EXT_DISJOINT_TIMER_QUERY;
|
|
|
|
|
}
|
|
|
|
|
|
2024-10-11 12:19:14 +02:00
|
|
|
/* Async read-back feature. */
|
2024-10-13 20:30:14 +02:00
|
|
|
if (gr->gl_version >= gl_version(3, 0) &&
|
|
|
|
|
egl_display_has(gr, EXTENSION_KHR_GET_ALL_PROC_ADDRESSES)) {
|
2024-10-11 12:03:08 +02:00
|
|
|
GET_PROC_ADDRESS(gr->map_buffer_range, "glMapBufferRange");
|
|
|
|
|
GET_PROC_ADDRESS(gr->unmap_buffer, "glUnmapBuffer");
|
2023-11-29 12:54:52 +01:00
|
|
|
gr->pbo_usage = GL_STREAM_READ;
|
2024-10-11 12:19:14 +02:00
|
|
|
gr->features |= FEATURE_ASYNC_READBACK;
|
2024-09-28 17:12:58 +02:00
|
|
|
} else if (gl_extensions_has(gr, EXTENSION_NV_PIXEL_BUFFER_OBJECT) &&
|
|
|
|
|
gl_extensions_has(gr, EXTENSION_EXT_MAP_BUFFER_RANGE) &&
|
|
|
|
|
gl_extensions_has(gr, EXTENSION_OES_MAPBUFFER)) {
|
2023-11-29 12:54:52 +01:00
|
|
|
/* Reading isn't exposed to BufferData() on ES 2.0 and
|
|
|
|
|
* NV_pixel_buffer_object mentions that "glMapBufferOES does not
|
|
|
|
|
* allow reading from the mapped pointer". EXT_map_buffer_range
|
|
|
|
|
* (which depends on OES_mapbuffer) adds read access support to
|
|
|
|
|
* MapBufferRangeEXT() without extending BufferData() so we
|
|
|
|
|
* create a PBO with a write usage hint that ends up being
|
|
|
|
|
* mapped with a read access. Even though that sounds incorrect,
|
|
|
|
|
* EXT_map_buffer_range provides examples doing so. Mesa
|
|
|
|
|
* actually ignores PBOs' usage hint assuming read access. */
|
|
|
|
|
gr->pbo_usage = GL_STREAM_DRAW;
|
2024-10-11 12:19:14 +02:00
|
|
|
gr->features |= FEATURE_ASYNC_READBACK;
|
2023-11-29 12:54:52 +01:00
|
|
|
}
|
2023-10-05 09:23:47 +02:00
|
|
|
|
2024-10-11 12:33:03 +02:00
|
|
|
/* Color transforms feature. */
|
2024-10-09 15:04:57 +02:00
|
|
|
if ((gr->gl_version >= gl_version(3, 2) &&
|
|
|
|
|
egl_display_has(gr, EXTENSION_KHR_GET_ALL_PROC_ADDRESSES) &&
|
|
|
|
|
gl_extensions_has(gr, EXTENSION_OES_TEXTURE_FLOAT_LINEAR)) ||
|
|
|
|
|
(gr->gl_version >= gl_version(3, 0) &&
|
|
|
|
|
egl_display_has(gr, EXTENSION_KHR_GET_ALL_PROC_ADDRESSES) &&
|
|
|
|
|
gl_extensions_has(gr, EXTENSION_OES_TEXTURE_FLOAT_LINEAR) &&
|
|
|
|
|
gl_extensions_has(gr, EXTENSION_EXT_COLOR_BUFFER_HALF_FLOAT))) {
|
2024-10-11 12:03:08 +02:00
|
|
|
GET_PROC_ADDRESS(gr->tex_image_3d, "glTexImage3D");
|
2024-10-11 12:33:03 +02:00
|
|
|
gr->features |= FEATURE_COLOR_TRANSFORMS;
|
2019-04-18 21:45:48 +05:30
|
|
|
}
|
|
|
|
|
|
2024-10-11 12:57:45 +02:00
|
|
|
/* GPU timeline feature. */
|
|
|
|
|
if (egl_display_has(gr, EXTENSION_ANDROID_NATIVE_FENCE_SYNC) &&
|
|
|
|
|
gl_extensions_has(gr, EXTENSION_EXT_DISJOINT_TIMER_QUERY))
|
|
|
|
|
gr->features |= FEATURE_GPU_TIMELINE;
|
2023-01-12 08:04:40 +01:00
|
|
|
|
2024-10-09 19:44:41 +02:00
|
|
|
wl_list_init(&gr->pending_capture_list);
|
|
|
|
|
|
2012-09-05 22:06:26 -04:00
|
|
|
glActiveTexture(GL_TEXTURE0);
|
|
|
|
|
|
2021-01-28 14:12:48 +02:00
|
|
|
gr->fallback_shader = gl_renderer_create_fallback_shader(gr);
|
|
|
|
|
if (!gr->fallback_shader) {
|
|
|
|
|
weston_log("Error: compiling fallback shader failed.\n");
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
2024-05-06 19:17:24 +02:00
|
|
|
gr->debug_mode_binding =
|
|
|
|
|
weston_compositor_add_debug_binding(ec, KEY_M,
|
|
|
|
|
debug_mode_binding, ec);
|
2012-11-08 17:20:46 +02:00
|
|
|
|
2020-11-05 14:56:55 +02:00
|
|
|
weston_log("GL ES %d.%d - renderer features:\n",
|
2024-10-10 10:14:09 +02:00
|
|
|
gl_version_major(gr->gl_version),
|
|
|
|
|
gl_version_minor(gr->gl_version));
|
2022-05-17 15:46:03 +03:00
|
|
|
weston_log_continue(STAMP_SPACE "read-back format: %s\n",
|
2022-08-01 13:31:27 +03:00
|
|
|
ec->read_format->drm_format_name);
|
2022-08-02 16:20:06 +03:00
|
|
|
weston_log_continue(STAMP_SPACE "glReadPixels supports y-flip: %s\n",
|
2024-10-09 18:13:47 +02:00
|
|
|
yesno(gl_extensions_has(gr, EXTENSION_ANGLE_PACK_REVERSE_ROW_ORDER)));
|
2023-10-05 09:23:47 +02:00
|
|
|
weston_log_continue(STAMP_SPACE "glReadPixels supports PBO: %s\n",
|
2024-10-11 12:19:14 +02:00
|
|
|
yesno(gl_features_has(gr, FEATURE_ASYNC_READBACK)));
|
2022-05-17 15:46:03 +03:00
|
|
|
weston_log_continue(STAMP_SPACE "wl_shm 10 bpc formats: %s\n",
|
2024-10-09 18:13:47 +02:00
|
|
|
yesno(gr->gl_version >= gl_version(3, 0) ||
|
|
|
|
|
gl_extensions_has(gr, EXTENSION_EXT_TEXTURE_TYPE_2_10_10_10_REV)));
|
2022-05-17 15:46:03 +03:00
|
|
|
weston_log_continue(STAMP_SPACE "wl_shm 16 bpc formats: %s\n",
|
2024-10-09 18:13:47 +02:00
|
|
|
yesno(gl_extensions_has(gr, EXTENSION_EXT_TEXTURE_NORM16)));
|
2022-05-17 15:46:03 +03:00
|
|
|
weston_log_continue(STAMP_SPACE "wl_shm half-float formats: %s\n",
|
2024-10-11 12:33:03 +02:00
|
|
|
yesno(gl_features_has(gr, FEATURE_COLOR_TRANSFORMS)));
|
2022-05-17 15:46:03 +03:00
|
|
|
weston_log_continue(STAMP_SPACE "internal R and RG formats: %s\n",
|
2024-10-09 18:13:47 +02:00
|
|
|
yesno(gr->gl_version >= gl_version(3, 0) ||
|
|
|
|
|
gl_extensions_has(gr, EXTENSION_EXT_TEXTURE_RG)));
|
2022-05-17 15:46:03 +03:00
|
|
|
weston_log_continue(STAMP_SPACE "OES_EGL_image_external: %s\n",
|
2024-10-09 18:13:47 +02:00
|
|
|
yesno(gl_extensions_has(gr, EXTENSION_OES_EGL_IMAGE_EXTERNAL)));
|
2024-10-11 12:57:45 +02:00
|
|
|
weston_log_continue(STAMP_SPACE "GPU timeline: %s\n",
|
|
|
|
|
yesno(gl_features_has(gr, FEATURE_GPU_TIMELINE)));
|
2012-10-24 09:43:06 +03:00
|
|
|
|
2012-09-05 22:06:26 -04:00
|
|
|
return 0;
|
|
|
|
|
}
|
2013-10-14 15:57:11 +03:00
|
|
|
|
|
|
|
|
WL_EXPORT struct gl_renderer_interface gl_renderer_interface = {
|
2016-05-18 17:41:07 +02:00
|
|
|
.display_create = gl_renderer_display_create,
|
2016-05-18 17:43:00 +02:00
|
|
|
.output_window_create = gl_renderer_output_window_create,
|
2021-03-30 13:27:00 +02:00
|
|
|
.output_fbo_create = gl_renderer_output_fbo_create,
|
2013-10-14 15:57:11 +03:00
|
|
|
.output_destroy = gl_renderer_output_destroy,
|
2013-10-27 22:24:54 -05:00
|
|
|
.output_set_border = gl_renderer_output_set_border,
|
2018-05-22 12:05:14 +09:00
|
|
|
.create_fence_fd = gl_renderer_create_fence_fd,
|
2013-10-14 15:57:11 +03:00
|
|
|
};
|