mirror of
https://gitlab.freedesktop.org/cairo/cairo.git
synced 2026-01-13 10:50:28 +01:00
drm: code dump, sorry.
Lots upon lots of tiny fixes mixed in with experimental code. :(
This commit is contained in:
parent
c8fba49603
commit
bd672d080c
38 changed files with 5327 additions and 1315 deletions
|
|
@ -169,6 +169,16 @@ enabled_cairo_boilerplate_private += $(cairo_boilerplate_drm_private)
|
|||
enabled_cairo_boilerplate_sources += $(cairo_boilerplate_drm_sources)
|
||||
endif
|
||||
|
||||
unsupported_cairo_boilerplate_headers += $(cairo_boilerplate_drm_xr_headers)
|
||||
all_cairo_boilerplate_headers += $(cairo_boilerplate_drm_xr_headers)
|
||||
all_cairo_boilerplate_private += $(cairo_boilerplate_drm_xr_private)
|
||||
all_cairo_boilerplate_sources += $(cairo_boilerplate_drm_xr_sources)
|
||||
ifeq ($(CAIRO_HAS_DRM_XR_FUNCTIONS),1)
|
||||
enabled_cairo_boilerplate_headers += $(cairo_boilerplate_drm_xr_headers)
|
||||
enabled_cairo_boilerplate_private += $(cairo_boilerplate_drm_xr_private)
|
||||
enabled_cairo_boilerplate_sources += $(cairo_boilerplate_drm_xr_sources)
|
||||
endif
|
||||
|
||||
unsupported_cairo_boilerplate_headers += $(cairo_boilerplate_gallium_headers)
|
||||
all_cairo_boilerplate_headers += $(cairo_boilerplate_gallium_headers)
|
||||
all_cairo_boilerplate_private += $(cairo_boilerplate_gallium_private)
|
||||
|
|
@ -199,6 +209,14 @@ enabled_cairo_boilerplate_private += $(cairo_boilerplate_png_private)
|
|||
enabled_cairo_boilerplate_sources += $(cairo_boilerplate_png_sources)
|
||||
endif
|
||||
|
||||
supported_cairo_boilerplate_headers += $(cairo_boilerplate_glew_headers)
|
||||
all_cairo_boilerplate_headers += $(cairo_boilerplate_glew_headers)
|
||||
all_cairo_boilerplate_private += $(cairo_boilerplate_glew_private)
|
||||
all_cairo_boilerplate_sources += $(cairo_boilerplate_glew_sources)
|
||||
enabled_cairo_boilerplate_headers += $(cairo_boilerplate_glew_headers)
|
||||
enabled_cairo_boilerplate_private += $(cairo_boilerplate_glew_private)
|
||||
enabled_cairo_boilerplate_sources += $(cairo_boilerplate_glew_sources)
|
||||
|
||||
unsupported_cairo_boilerplate_headers += $(cairo_boilerplate_gl_headers)
|
||||
all_cairo_boilerplate_headers += $(cairo_boilerplate_gl_headers)
|
||||
all_cairo_boilerplate_private += $(cairo_boilerplate_gl_private)
|
||||
|
|
|
|||
|
|
@ -46,12 +46,20 @@ _cairo_boilerplate_drm_create_surface (const char *name,
|
|||
void **closure)
|
||||
{
|
||||
cairo_device_t *device;
|
||||
cairo_format_t format;
|
||||
|
||||
device = cairo_drm_device_default ();
|
||||
if (device == NULL)
|
||||
return NULL; /* skip tests if no supported h/w found */
|
||||
|
||||
return *closure = cairo_drm_surface_create (device, content, width, height);
|
||||
switch (content) {
|
||||
case CAIRO_CONTENT_ALPHA: format = CAIRO_FORMAT_A8; break;
|
||||
case CAIRO_CONTENT_COLOR: format = CAIRO_FORMAT_RGB24; break;
|
||||
default:
|
||||
case CAIRO_CONTENT_COLOR_ALPHA: format = CAIRO_FORMAT_ARGB32; break;
|
||||
}
|
||||
|
||||
return *closure = cairo_drm_surface_create (device, format, width, height);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -59,7 +67,7 @@ _cairo_boilerplate_drm_synchronize (void *closure)
|
|||
{
|
||||
cairo_surface_t *image;
|
||||
|
||||
image = cairo_drm_surface_map (closure);
|
||||
image = cairo_drm_surface_map_to_image (closure);
|
||||
if (cairo_surface_status (image) == CAIRO_STATUS_SUCCESS)
|
||||
cairo_drm_surface_unmap (closure, image);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ CAIRO_HAS_SKIA_SURFACE=0
|
|||
CAIRO_HAS_OS2_SURFACE=0
|
||||
CAIRO_HAS_BEOS_SURFACE=0
|
||||
CAIRO_HAS_DRM_SURFACE=0
|
||||
CAIRO_HAS_DRM_XR_FUNCTIONS=0
|
||||
CAIRO_HAS_GALLIUM_SURFACE=0
|
||||
CAIRO_HAS_XCB_DRM_FUNCTIONS=0
|
||||
CAIRO_HAS_PNG_FUNCTIONS=1
|
||||
|
|
|
|||
|
|
@ -50,6 +50,9 @@ endif
|
|||
ifeq ($(CAIRO_HAS_DRM_SURFACE),1)
|
||||
@echo "#define CAIRO_HAS_DRM_SURFACE 1" >> src/cairo-features.h
|
||||
endif
|
||||
ifeq ($(CAIRO_HAS_DRM_XR_FUNCTIONS),1)
|
||||
@echo "#define CAIRO_HAS_DRM_XR_FUNCTIONS 1" >> src/cairo-features.h
|
||||
endif
|
||||
ifeq ($(CAIRO_HAS_GALLIUM_SURFACE),1)
|
||||
@echo "#define CAIRO_HAS_GALLIUM_SURFACE 1" >> src/cairo-features.h
|
||||
endif
|
||||
|
|
@ -59,6 +62,7 @@ endif
|
|||
ifeq ($(CAIRO_HAS_PNG_FUNCTIONS),1)
|
||||
@echo "#define CAIRO_HAS_PNG_FUNCTIONS 1" >> src/cairo-features.h
|
||||
endif
|
||||
@echo "#define CAIRO_HAS_GLEW_FUNCTIONS 1" >> src/cairo-features.h
|
||||
ifeq ($(CAIRO_HAS_GL_SURFACE),1)
|
||||
@echo "#define CAIRO_HAS_GL_SURFACE 1" >> src/cairo-features.h
|
||||
endif
|
||||
|
|
|
|||
|
|
@ -394,6 +394,7 @@ AC_DEFUN([CAIRO_REPORT],
|
|||
echo " X11-xcb functions: $use_xlib_xcb"
|
||||
echo " XCB-drm functions: $use_xcb_drm"
|
||||
echo " XCB-shm functions: $use_xcb_shm"
|
||||
echo " DRM-Xr functions: $use_drm_xr"
|
||||
echo ""
|
||||
echo "The following features and utilities:"
|
||||
echo " cairo-trace: $use_trace"
|
||||
|
|
|
|||
13
configure.ac
13
configure.ac
|
|
@ -258,6 +258,19 @@ CAIRO_ENABLE_SURFACE_BACKEND(drm, DRM, no, [
|
|||
use_drm="no (requires $drm_REQUIRES, udev is available from git://git.kernel.org/pub/scm/linux/hotplug/udev.git)"])
|
||||
])
|
||||
|
||||
CAIRO_ENABLE_FUNCTIONS(drm_xr, DRM Xr (DDX), no, [
|
||||
if test "x$use_drm" == "xyes"; then
|
||||
drm_xr_REQUIRES="xorg-server >= 1.6 xproto xextproto >= 7.0.99.1 renderproto x11"
|
||||
PKG_CHECK_MODULES(drm_xr, $drm_xr_REQUIRES, ,
|
||||
[AC_MSG_RESULT(no)
|
||||
use_drm_xr="no (requires $drm_xr)"])
|
||||
drm_xr_CFLAGS=`echo "$drm_xr_CFLAGS" | $SED -e 's/-fvisibility=hidden//g'`
|
||||
else
|
||||
use_drm_xr="no (requires --enable-drm)"
|
||||
fi
|
||||
])
|
||||
AM_CONDITIONAL(BUILD_DRM_XR, test "x$use_drm_xr" = "xyes")
|
||||
|
||||
CAIRO_ENABLE_SURFACE_BACKEND(gallium, Gallium3D, no, [
|
||||
if test "x$use_drm" = "xyes"; then
|
||||
AC_ARG_WITH([gallium],
|
||||
|
|
|
|||
|
|
@ -356,6 +356,13 @@ cairo_drm_sources = drm/cairo-drm.c \
|
|||
drm/cairo-drm-radeon-surface.c
|
||||
cairo_gallium_sources = drm/cairo-drm-gallium-surface.c
|
||||
|
||||
if BUILD_DRM_XR
|
||||
cairo_drm_headers += cairo-drm-xr.h
|
||||
cairo_drm_sources += \
|
||||
drm/cairo-drm-xr.c \
|
||||
$(NULL)
|
||||
endif
|
||||
|
||||
cairo_script_headers = cairo-script.h
|
||||
cairo_script_sources = cairo-script-surface.c
|
||||
|
||||
|
|
|
|||
|
|
@ -231,6 +231,20 @@ ifeq ($(CAIRO_HAS_DRM_SURFACE),1)
|
|||
enabled_cairo_pkgconf += cairo-drm.pc
|
||||
endif
|
||||
|
||||
unsupported_cairo_headers += $(cairo_drm_xr_headers)
|
||||
all_cairo_headers += $(cairo_drm_xr_headers)
|
||||
all_cairo_private += $(cairo_drm_xr_private)
|
||||
all_cairo_sources += $(cairo_drm_xr_sources)
|
||||
ifeq ($(CAIRO_HAS_DRM_XR_FUNCTIONS),1)
|
||||
enabled_cairo_headers += $(cairo_drm_xr_headers)
|
||||
enabled_cairo_private += $(cairo_drm_xr_private)
|
||||
enabled_cairo_sources += $(cairo_drm_xr_sources)
|
||||
endif
|
||||
all_cairo_pkgconf += cairo-drm-xr.pc
|
||||
ifeq ($(CAIRO_HAS_DRM_XR_FUNCTIONS),1)
|
||||
enabled_cairo_pkgconf += cairo-drm-xr.pc
|
||||
endif
|
||||
|
||||
unsupported_cairo_headers += $(cairo_gallium_headers)
|
||||
all_cairo_headers += $(cairo_gallium_headers)
|
||||
all_cairo_private += $(cairo_gallium_private)
|
||||
|
|
@ -273,6 +287,14 @@ ifeq ($(CAIRO_HAS_PNG_FUNCTIONS),1)
|
|||
enabled_cairo_pkgconf += cairo-png.pc
|
||||
endif
|
||||
|
||||
supported_cairo_headers += $(cairo_glew_headers)
|
||||
all_cairo_headers += $(cairo_glew_headers)
|
||||
all_cairo_private += $(cairo_glew_private)
|
||||
all_cairo_sources += $(cairo_glew_sources)
|
||||
enabled_cairo_headers += $(cairo_glew_headers)
|
||||
enabled_cairo_private += $(cairo_glew_private)
|
||||
enabled_cairo_sources += $(cairo_glew_sources)
|
||||
|
||||
unsupported_cairo_headers += $(cairo_gl_headers)
|
||||
all_cairo_headers += $(cairo_gl_headers)
|
||||
all_cairo_private += $(cairo_gl_private)
|
||||
|
|
|
|||
|
|
@ -71,6 +71,10 @@ cairo_private cairo_status_t
|
|||
_cairo_boxes_add (cairo_boxes_t *boxes,
|
||||
const cairo_box_t *box);
|
||||
|
||||
cairo_private void
|
||||
_cairo_boxes_extents (const cairo_boxes_t *boxes,
|
||||
cairo_rectangle_int_t *extents);
|
||||
|
||||
cairo_private void
|
||||
_cairo_boxes_clear (cairo_boxes_t *boxes);
|
||||
|
||||
|
|
|
|||
|
|
@ -239,6 +239,37 @@ _cairo_boxes_add (cairo_boxes_t *boxes,
|
|||
return boxes->status;
|
||||
}
|
||||
|
||||
void
|
||||
_cairo_boxes_extents (const cairo_boxes_t *boxes,
|
||||
cairo_rectangle_int_t *extents)
|
||||
{
|
||||
const struct _cairo_boxes_chunk *chunk;
|
||||
cairo_box_t box;
|
||||
int i;
|
||||
|
||||
box.p1.y = box.p1.x = INT_MAX;
|
||||
box.p2.y = box.p2.x = INT_MIN;
|
||||
|
||||
for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
|
||||
const cairo_box_t *b = chunk->base;
|
||||
for (i = 0; i < chunk->count; i++) {
|
||||
if (b[i].p1.x < box.p1.x)
|
||||
box.p1.x = b[i].p1.x;
|
||||
|
||||
if (b[i].p1.y < box.p1.y)
|
||||
box.p1.y = b[i].p1.y;
|
||||
|
||||
if (b[i].p2.x > box.p2.x)
|
||||
box.p2.x = b[i].p2.x;
|
||||
|
||||
if (b[i].p2.y > box.p2.y)
|
||||
box.p2.y = b[i].p2.y;
|
||||
}
|
||||
}
|
||||
|
||||
_cairo_box_round_to_rectangle (&box, extents);
|
||||
}
|
||||
|
||||
void
|
||||
_cairo_boxes_clear (cairo_boxes_t *boxes)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -66,9 +66,10 @@ _cairo_composite_rectangles_init (cairo_composite_rectangles_t *extents,
|
|||
extents->is_bounded = _cairo_operator_bounded_by_either (op);
|
||||
|
||||
_cairo_pattern_get_extents (source, &extents->source);
|
||||
if (extents->is_bounded & CAIRO_OPERATOR_BOUND_BY_SOURCE) {
|
||||
if (! _cairo_rectangle_intersect (&extents->bounded, &extents->source))
|
||||
return FALSE;
|
||||
if (! _cairo_rectangle_intersect (&extents->bounded, &extents->source) &&
|
||||
extents->is_bounded & CAIRO_OPERATOR_BOUND_BY_SOURCE)
|
||||
{
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
return TRUE;
|
||||
|
|
|
|||
66
src/cairo-drm-xr.h
Normal file
66
src/cairo-drm-xr.h
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
/* Cairo - a vector graphics library with display and print output
|
||||
*
|
||||
* Copyright © 2010 Intel Coropration
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it either under the terms of the GNU Lesser General Public
|
||||
* License version 2.1 as published by the Free Software Foundation
|
||||
* (the "LGPL") or, at your option, under the terms of the Mozilla
|
||||
* Public License Version 1.1 (the "MPL"). If you do not alter this
|
||||
* notice, a recipient may use your version of this file under either
|
||||
* the MPL or the LGPL.
|
||||
*
|
||||
* You should have received a copy of the LGPL along with this library
|
||||
* in the file COPYING-LGPL-2.1; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
* You should have received a copy of the MPL along with this library
|
||||
* in the file COPYING-MPL-1.1
|
||||
*
|
||||
* The contents of this file are subject to the Mozilla Public License
|
||||
* Version 1.1 (the "License"); you may not use this file except in
|
||||
* compliance with the License. You may obtain a copy of the License at
|
||||
* http://www.mozilla.org/MPL/
|
||||
*
|
||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
|
||||
* OF ANY KIND, either express or implied. See the LGPL or the MPL for
|
||||
* the specific language governing rights and limitations.
|
||||
*
|
||||
* The Original Code is the cairo graphics library.
|
||||
*
|
||||
* The Initial Developer of the Original Code is Chris Wilson.
|
||||
*/
|
||||
|
||||
#ifndef CAIRO_DRM_XR_H
|
||||
#define CAIRO_DRM_XR_H
|
||||
|
||||
#include "cairo.h"
|
||||
|
||||
#if CAIRO_HAS_DRM_XR_FUNCTIONS
|
||||
|
||||
CAIRO_BEGIN_DECLS
|
||||
|
||||
typedef struct _xr_screen xr_screen_t;
|
||||
|
||||
cairo_public xr_screen_t *
|
||||
cairo_drm_xr_enable (ScreenPtr screen, int fd);
|
||||
|
||||
cairo_public void
|
||||
cairo_drm_xr_pixmap_link_bo (xr_screen_t *xr,
|
||||
PixmapPtr pixmap,
|
||||
uint32_t name,
|
||||
cairo_format_t format,
|
||||
int width,
|
||||
int height,
|
||||
int stride);
|
||||
|
||||
cairo_public void
|
||||
cairo_drm_xr_pixmap_unlink_bo (xr_screen_t *xr,
|
||||
PixmapPtr pixmap);
|
||||
|
||||
CAIRO_END_DECLS
|
||||
|
||||
#else /* CAIRO_HAS_DRM_XR_FUNCTIOSN */
|
||||
# error Cairo was not compiled with support for the DRM Xr DDX functions
|
||||
#endif /* CAIRO_HAS_DRM_XR_FUNCTIOSN */
|
||||
|
||||
#endif /* CAIRO_DRM_XR_H */
|
||||
|
|
@ -58,7 +58,7 @@ cairo_drm_device_throttle (cairo_device_t *device);
|
|||
|
||||
cairo_public cairo_surface_t *
|
||||
cairo_drm_surface_create (cairo_device_t *device,
|
||||
cairo_content_t content,
|
||||
cairo_format_t format,
|
||||
int width, int height);
|
||||
|
||||
cairo_public cairo_surface_t *
|
||||
|
|
@ -105,7 +105,7 @@ cairo_drm_surface_get_stride (cairo_surface_t *surface);
|
|||
* will also disassociate the mapping.)
|
||||
*/
|
||||
cairo_public cairo_surface_t *
|
||||
cairo_drm_surface_map (cairo_surface_t *surface);
|
||||
cairo_drm_surface_map_to_image (cairo_surface_t *surface);
|
||||
|
||||
cairo_public void
|
||||
cairo_drm_surface_unmap (cairo_surface_t *drm_surface,
|
||||
|
|
|
|||
|
|
@ -135,6 +135,16 @@ _cairo_fixed_from_26_6 (uint32_t i)
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline cairo_fixed_t
|
||||
_cairo_fixed_from_16_16 (uint32_t i)
|
||||
{
|
||||
#if CAIRO_FIXED_FRAC_BITS > 16
|
||||
return i << (CAIRO_FIXED_FRAC_BITS - 16);
|
||||
#else
|
||||
return i >> (16 - CAIRO_FIXED_FRAC_BITS);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline double
|
||||
_cairo_fixed_to_double (cairo_fixed_t f)
|
||||
{
|
||||
|
|
@ -242,7 +252,7 @@ _cairo_fixed_16_16_from_double (double d)
|
|||
}
|
||||
|
||||
static inline int
|
||||
_cairo_fixed_16_16_floor (cairo_fixed_t f)
|
||||
_cairo_fixed_16_16_floor (cairo_fixed_16_16_t f)
|
||||
{
|
||||
if (f >= 0)
|
||||
return f >> 16;
|
||||
|
|
@ -250,6 +260,12 @@ _cairo_fixed_16_16_floor (cairo_fixed_t f)
|
|||
return -((-f - 1) >> 16) - 1;
|
||||
}
|
||||
|
||||
static inline double
|
||||
_cairo_fixed_16_16_to_double (cairo_fixed_16_16_t f)
|
||||
{
|
||||
return ((double) f) / (double) (1 << 16);
|
||||
}
|
||||
|
||||
#if CAIRO_FIXED_BITS == 32
|
||||
|
||||
static inline cairo_fixed_t
|
||||
|
|
|
|||
|
|
@ -111,7 +111,6 @@ _cairo_freepool_reset (cairo_freepool_t *freepool)
|
|||
freepool->embedded_pool.data = freepool->embedded_data;
|
||||
}
|
||||
|
||||
|
||||
cairo_private void *
|
||||
_cairo_freepool_alloc_from_new_pool (cairo_freepool_t *freepool);
|
||||
|
||||
|
|
|
|||
|
|
@ -84,7 +84,6 @@ _cairo_freelist_free (cairo_freelist_t *freelist, void *voidnode)
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
_cairo_freepool_init (cairo_freepool_t *freepool, unsigned nodesize)
|
||||
{
|
||||
|
|
@ -98,8 +97,7 @@ _cairo_freepool_init (cairo_freepool_t *freepool, unsigned nodesize)
|
|||
freepool->embedded_pool.rem = sizeof (freepool->embedded_data);
|
||||
freepool->embedded_pool.data = freepool->embedded_data;
|
||||
|
||||
VG (VALGRIND_MAKE_MEM_NOACCESS (freepool->embedded_data,
|
||||
sizeof (freepool->embedded_data)));
|
||||
VG (VALGRIND_MAKE_MEM_NOACCESS (freepool->embedded_data, sizeof (freepool->embedded_data)));
|
||||
}
|
||||
|
||||
void
|
||||
|
|
@ -154,8 +152,7 @@ _cairo_freepool_alloc_from_new_pool (cairo_freepool_t *freepool)
|
|||
pool->rem = poolsize - freepool->nodesize;
|
||||
pool->data = (uint8_t *) (pool + 1) + freepool->nodesize;
|
||||
|
||||
VG (VALGRIND_MAKE_MEM_NOACCESS (pool->data, poolsize));
|
||||
VG (VALGRIND_MAKE_MEM_UNDEFINED (pool->data, freepool->nodesize));
|
||||
VG (VALGRIND_MAKE_MEM_NOACCESS (pool->data, pool->rem));
|
||||
|
||||
return pool + 1;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -264,7 +264,7 @@ _pixman_format_to_masks (pixman_format_code_t format,
|
|||
}
|
||||
}
|
||||
|
||||
static pixman_format_code_t
|
||||
pixman_format_code_t
|
||||
_cairo_format_to_pixman_format_code (cairo_format_t format)
|
||||
{
|
||||
pixman_format_code_t ret;
|
||||
|
|
|
|||
|
|
@ -73,19 +73,38 @@ typedef struct _cairo_list {
|
|||
&pos->member != (head); \
|
||||
pos = cairo_list_entry(pos->member.prev, type, member))
|
||||
|
||||
#define cairo_list_foreach_entry_reverse_safe(pos, n, type, head, member) \
|
||||
for (pos = cairo_list_entry((head)->prev, type, member),\
|
||||
n = cairo_list_entry (pos->member.prev, type, member);\
|
||||
&pos->member != (head); \
|
||||
pos = n, n = cairo_list_entry (n->member.prev, type, member))
|
||||
|
||||
#ifdef CAIRO_LIST_DEBUG
|
||||
static inline void
|
||||
_cairo_list_validate (const cairo_list_t *link)
|
||||
{
|
||||
assert (link->next->prev == link);
|
||||
assert (link->prev->next == link);
|
||||
}
|
||||
static inline void
|
||||
cairo_list_validate (const cairo_list_t *head)
|
||||
{
|
||||
cairo_list_t *link;
|
||||
|
||||
cairo_list_foreach (link, head) {
|
||||
assert (link->next->prev == link);
|
||||
assert (link->prev->next == link);
|
||||
}
|
||||
cairo_list_foreach (link, head)
|
||||
_cairo_list_validate (link);
|
||||
}
|
||||
static inline cairo_bool_t
|
||||
cairo_list_is_empty (const cairo_list_t *head);
|
||||
static inline void
|
||||
cairo_list_validate_is_empty (const cairo_list_t *head)
|
||||
{
|
||||
assert (head->next == NULL || (cairo_list_is_empty (head) && head->next == head->prev));
|
||||
}
|
||||
#else
|
||||
#define _cairo_list_validate(link)
|
||||
#define cairo_list_validate(head)
|
||||
#define cairo_list_validate_is_empty(head)
|
||||
#endif
|
||||
|
||||
static inline void
|
||||
|
|
@ -110,6 +129,7 @@ static inline void
|
|||
cairo_list_add (cairo_list_t *entry, cairo_list_t *head)
|
||||
{
|
||||
cairo_list_validate (head);
|
||||
cairo_list_validate_is_empty (entry);
|
||||
__cairo_list_add (entry, head, head->next);
|
||||
cairo_list_validate (head);
|
||||
}
|
||||
|
|
@ -118,6 +138,7 @@ static inline void
|
|||
cairo_list_add_tail (cairo_list_t *entry, cairo_list_t *head)
|
||||
{
|
||||
cairo_list_validate (head);
|
||||
cairo_list_validate_is_empty (entry);
|
||||
__cairo_list_add (entry, head->prev, head);
|
||||
cairo_list_validate (head);
|
||||
}
|
||||
|
|
@ -157,10 +178,8 @@ cairo_list_move_tail (cairo_list_t *entry, cairo_list_t *head)
|
|||
static inline void
|
||||
cairo_list_swap (cairo_list_t *entry, cairo_list_t *other)
|
||||
{
|
||||
cairo_list_validate (head);
|
||||
__cairo_list_add (entry, other->prev, other->next);
|
||||
cairo_list_init (other);
|
||||
cairo_list_validate (head);
|
||||
}
|
||||
|
||||
static inline cairo_bool_t
|
||||
|
|
|
|||
|
|
@ -2180,6 +2180,9 @@ cairo_private cairo_surface_t *
|
|||
_cairo_image_surface_create_for_pixman_image (pixman_image_t *pixman_image,
|
||||
pixman_format_code_t pixman_format);
|
||||
|
||||
pixman_format_code_t
|
||||
_cairo_format_to_pixman_format_code (cairo_format_t format);
|
||||
|
||||
cairo_private cairo_bool_t
|
||||
_pixman_format_from_masks (cairo_format_masks_t *masks,
|
||||
pixman_format_code_t *format_ret);
|
||||
|
|
@ -2519,6 +2522,10 @@ _cairo_pattern_is_opaque (const cairo_pattern_t *pattern,
|
|||
cairo_private cairo_bool_t
|
||||
_cairo_pattern_is_clear (const cairo_pattern_t *pattern);
|
||||
|
||||
cairo_private_no_warn cairo_filter_t
|
||||
_cairo_pattern_analyze_filter (const cairo_pattern_t *pattern,
|
||||
double *pad_out);
|
||||
|
||||
enum {
|
||||
CAIRO_PATTERN_ACQUIRE_NONE = 0x0,
|
||||
CAIRO_PATTERN_ACQUIRE_NO_REFLECT = 0x1
|
||||
|
|
|
|||
|
|
@ -40,6 +40,25 @@
|
|||
#include "cairo-error-private.h"
|
||||
#include "cairo-rtree-private.h"
|
||||
|
||||
static void
|
||||
i915_emit_glyph_rectangle_zero (i915_device_t *device,
|
||||
i915_shader_t *shader,
|
||||
int x1, int y1,
|
||||
int x2, int y2,
|
||||
intel_glyph_t *glyph)
|
||||
{
|
||||
float *v;
|
||||
|
||||
/* Each vertex is:
|
||||
* 2 vertex coordinates
|
||||
*/
|
||||
|
||||
v = i915_add_rectangle (device);
|
||||
*v++ = x2; *v++ = y2;
|
||||
*v++ = x1; *v++ = y2;
|
||||
*v++ = x1; *v++ = y1;
|
||||
}
|
||||
|
||||
static void
|
||||
i915_emit_glyph_rectangle_constant (i915_device_t *device,
|
||||
i915_shader_t *shader,
|
||||
|
|
@ -91,6 +110,7 @@ i915_emit_glyph_rectangle_general (i915_device_t *device,
|
|||
*v++ = x2; *v++ = y2;
|
||||
s = x2, t = y2;
|
||||
switch (shader->source.type.vertex) {
|
||||
case VS_ZERO:
|
||||
case VS_CONSTANT:
|
||||
break;
|
||||
case VS_LINEAR:
|
||||
|
|
@ -111,6 +131,7 @@ i915_emit_glyph_rectangle_general (i915_device_t *device,
|
|||
*v++ = x1; *v++ = y2;
|
||||
s = x1, t = y2;
|
||||
switch (shader->source.type.vertex) {
|
||||
case VS_ZERO:
|
||||
case VS_CONSTANT:
|
||||
break;
|
||||
case VS_LINEAR:
|
||||
|
|
@ -131,6 +152,7 @@ i915_emit_glyph_rectangle_general (i915_device_t *device,
|
|||
*v++ = x1; *v++ = y1;
|
||||
s = x1, t = y2;
|
||||
switch (shader->source.type.vertex) {
|
||||
case VS_ZERO:
|
||||
case VS_CONSTANT:
|
||||
break;
|
||||
case VS_LINEAR:
|
||||
|
|
@ -168,7 +190,7 @@ i915_surface_mask_internal (i915_surface_t *dst,
|
|||
cairo_region_t *clip_region = NULL;
|
||||
cairo_status_t status;
|
||||
|
||||
i915_shader_init (&shader, dst, op);
|
||||
i915_shader_init (&shader, dst, op, 1.);
|
||||
|
||||
status = i915_shader_acquire_pattern (&shader, &shader.source,
|
||||
source, &extents->bounded);
|
||||
|
|
@ -176,6 +198,7 @@ i915_surface_mask_internal (i915_surface_t *dst,
|
|||
return status;
|
||||
|
||||
shader.mask.type.vertex = VS_TEXTURE_16;
|
||||
shader.mask.type.pattern = PATTERN_TEXTURE;
|
||||
shader.mask.type.fragment = FS_TEXTURE;
|
||||
shader.mask.base.content = mask->intel.drm.base.content;
|
||||
shader.mask.base.texfmt = TEXCOORDFMT_2D_16;
|
||||
|
|
@ -188,20 +211,19 @@ i915_surface_mask_internal (i915_surface_t *dst,
|
|||
i915_texture_extend (CAIRO_EXTEND_NONE);
|
||||
|
||||
cairo_matrix_init_translate (&shader.mask.base.matrix,
|
||||
-extents->bounded.x + NEAREST_BIAS,
|
||||
-extents->bounded.y + NEAREST_BIAS);
|
||||
-extents->bounded.x,
|
||||
-extents->bounded.y);
|
||||
cairo_matrix_scale (&shader.mask.base.matrix,
|
||||
1. / mask->intel.drm.width,
|
||||
1. / mask->intel.drm.height);
|
||||
|
||||
shader.mask.base.bo = to_intel_bo (mask->intel.drm.bo);
|
||||
shader.mask.base.bo = intel_bo_reference (to_intel_bo (mask->intel.drm.bo));
|
||||
shader.mask.base.offset[0] = 0;
|
||||
shader.mask.base.map[0] = mask->map0;
|
||||
shader.mask.base.map[1] = mask->map1;
|
||||
|
||||
if (clip != NULL) {
|
||||
status = _cairo_clip_get_region (clip, &clip_region);
|
||||
assert (status == CAIRO_STATUS_SUCCESS || status == CAIRO_INT_STATUS_UNSUPPORTED);
|
||||
|
||||
if (clip_region != NULL && cairo_region_num_rectangles (clip_region) == 1)
|
||||
clip_region = NULL;
|
||||
|
|
@ -240,7 +262,7 @@ i915_surface_mask_internal (i915_surface_t *dst,
|
|||
extents->bounded.y + extents->bounded.height);
|
||||
}
|
||||
|
||||
if ((extents->is_bounded & CAIRO_OPERATOR_BOUND_BY_MASK) == 0)
|
||||
if (! extents->is_bounded)
|
||||
status = i915_fixup_unbounded (dst, extents, clip);
|
||||
|
||||
CLEANUP_DEVICE:
|
||||
|
|
@ -300,16 +322,32 @@ i915_surface_glyphs (void *abstract_surface,
|
|||
have_clip = TRUE;
|
||||
}
|
||||
|
||||
if (overlap || (extents.is_bounded & CAIRO_OPERATOR_BOUND_BY_MASK) == 0) {
|
||||
cairo_content_t content;
|
||||
if (clip != NULL) {
|
||||
status = _cairo_clip_get_region (clip, &clip_region);
|
||||
if (unlikely (_cairo_status_is_error (status) ||
|
||||
status == CAIRO_INT_STATUS_NOTHING_TO_DO))
|
||||
{
|
||||
if (have_clip)
|
||||
_cairo_clip_fini (&local_clip);
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
content = CAIRO_CONTENT_ALPHA;
|
||||
if (i915_surface_needs_tiling (surface)) {
|
||||
ASSERT_NOT_REACHED;
|
||||
return CAIRO_INT_STATUS_UNSUPPORTED;
|
||||
}
|
||||
|
||||
if (overlap || ! extents.is_bounded) {
|
||||
cairo_format_t format;
|
||||
|
||||
format = CAIRO_FORMAT_A8;
|
||||
if (scaled_font->options.antialias == CAIRO_ANTIALIAS_SUBPIXEL)
|
||||
content |= CAIRO_CONTENT_COLOR;
|
||||
format = CAIRO_FORMAT_ARGB32;
|
||||
|
||||
mask = (i915_surface_t *)
|
||||
i915_surface_create_internal (&i915_device (surface)->intel.base,
|
||||
CAIRO_CONTENT_ALPHA,
|
||||
format,
|
||||
extents.bounded.width,
|
||||
extents.bounded.height,
|
||||
I915_TILING_DEFAULT,
|
||||
|
|
@ -317,16 +355,13 @@ i915_surface_glyphs (void *abstract_surface,
|
|||
if (unlikely (mask->intel.drm.base.status))
|
||||
return mask->intel.drm.base.status;
|
||||
|
||||
status = _cairo_surface_paint (&mask->intel.drm.base,
|
||||
CAIRO_OPERATOR_CLEAR,
|
||||
&_cairo_pattern_clear.base,
|
||||
NULL);
|
||||
status = i915_surface_clear (mask);
|
||||
if (unlikely (status)) {
|
||||
cairo_surface_destroy (&mask->intel.drm.base);
|
||||
return status;
|
||||
}
|
||||
|
||||
i915_shader_init (&shader, mask, CAIRO_OPERATOR_ADD);
|
||||
i915_shader_init (&shader, mask, CAIRO_OPERATOR_ADD, 1.);
|
||||
|
||||
status = i915_shader_acquire_pattern (&shader, &shader.source,
|
||||
&_cairo_pattern_white.base,
|
||||
|
|
@ -339,7 +374,7 @@ i915_surface_glyphs (void *abstract_surface,
|
|||
mask_x = -extents.bounded.x;
|
||||
mask_y = -extents.bounded.y;
|
||||
} else {
|
||||
i915_shader_init (&shader, surface, op);
|
||||
i915_shader_init (&shader, surface, op, 1.);
|
||||
|
||||
status = i915_shader_acquire_pattern (&shader, &shader.source,
|
||||
source, &extents.bounded);
|
||||
|
|
@ -348,7 +383,6 @@ i915_surface_glyphs (void *abstract_surface,
|
|||
|
||||
if (clip != NULL) {
|
||||
status = _cairo_clip_get_region (clip, &clip_region);
|
||||
assert (status == CAIRO_STATUS_SUCCESS || status == CAIRO_INT_STATUS_UNSUPPORTED);
|
||||
|
||||
if (clip_region != NULL && cairo_region_num_rectangles (clip_region) == 1)
|
||||
clip_region = NULL;
|
||||
|
|
@ -370,6 +404,9 @@ i915_surface_glyphs (void *abstract_surface,
|
|||
i915_texture_extend (CAIRO_EXTEND_NONE);
|
||||
|
||||
switch (shader.source.type.vertex) {
|
||||
case VS_ZERO:
|
||||
emit_func = i915_emit_glyph_rectangle_zero;
|
||||
break;
|
||||
case VS_CONSTANT:
|
||||
emit_func = i915_emit_glyph_rectangle_constant;
|
||||
break;
|
||||
|
|
@ -466,8 +503,13 @@ i915_surface_glyphs (void *abstract_surface,
|
|||
last_bo = cache->buffer.bo;
|
||||
}
|
||||
|
||||
x1 += mask_x; x2 += mask_x;
|
||||
y1 += mask_y; y2 += mask_y;
|
||||
x2 = x1 + glyph->width;
|
||||
y2 = y1 + glyph->height;
|
||||
|
||||
if (mask_x)
|
||||
x1 += mask_x, x2 += mask_x;
|
||||
if (mask_y)
|
||||
y1 += mask_y, y2 += mask_y;
|
||||
|
||||
/* XXX clip glyph */
|
||||
emit_func (device, &shader, x1, y1, x2, y2, glyph);
|
||||
|
|
|
|||
|
|
@ -36,6 +36,8 @@
|
|||
#include "cairo-drm-intel-ioctl-private.h"
|
||||
#include "cairo-freelist-private.h"
|
||||
|
||||
#include <setjmp.h>
|
||||
|
||||
#define I915_VERBOSE 1
|
||||
|
||||
#define I915_MAX_TEX_INDIRECT 4
|
||||
|
|
@ -652,17 +654,22 @@ struct i915_device {
|
|||
|
||||
cairo_bool_t debug;
|
||||
|
||||
i915_shader_t *shader; /* note: only valid during geometry emission */
|
||||
|
||||
struct i915_batch {
|
||||
intel_bo_t *target_bo[I915_MAX_RELOCS];
|
||||
size_t gtt_size;
|
||||
size_t gtt_avail_size;
|
||||
size_t est_gtt_size;
|
||||
size_t total_gtt_size;
|
||||
|
||||
uint16_t fences;
|
||||
uint16_t fences_avail;
|
||||
uint16_t reloc_count;
|
||||
uint16_t exec_count;
|
||||
uint16_t used;
|
||||
|
||||
struct drm_i915_gem_exec_object2 exec[I915_MAX_RELOCS];
|
||||
int exec_count;
|
||||
|
||||
struct drm_i915_gem_relocation_entry reloc[I915_MAX_RELOCS];
|
||||
uint16_t reloc_count;
|
||||
|
||||
uint16_t used;
|
||||
} batch;
|
||||
|
||||
uint32_t vbo;
|
||||
|
|
@ -677,8 +684,6 @@ struct i915_device {
|
|||
uint32_t last_vbo_offset;
|
||||
uint32_t last_vbo_space;
|
||||
|
||||
i915_shader_t *current_shader;
|
||||
|
||||
i915_surface_t *current_target;
|
||||
uint32_t current_size;
|
||||
uint32_t current_diffuse;
|
||||
|
|
@ -691,9 +696,12 @@ struct i915_device {
|
|||
uint32_t current_blend;
|
||||
uint32_t current_constants[8*4];
|
||||
uint32_t current_n_constants;
|
||||
uint32_t current_samplers[2*(3+3*4)];
|
||||
uint32_t current_samplers[2*4];
|
||||
uint32_t current_maps[4*4];
|
||||
uint32_t current_n_samplers;
|
||||
uint32_t current_n_maps;
|
||||
uint32_t last_source_fragment;
|
||||
uint32_t clear_alpha;
|
||||
|
||||
cairo_list_t image_caches[2];
|
||||
|
||||
|
|
@ -709,6 +717,7 @@ enum {
|
|||
};
|
||||
|
||||
typedef enum {
|
||||
VS_ZERO,
|
||||
VS_CONSTANT,
|
||||
VS_LINEAR,
|
||||
VS_TEXTURE,
|
||||
|
|
@ -744,6 +753,7 @@ struct i915_surface {
|
|||
uint32_t map0, map1;
|
||||
uint32_t colorbuf;
|
||||
|
||||
cairo_bool_t deferred_clear;
|
||||
uint32_t offset;
|
||||
uint32_t is_current_texture;
|
||||
|
||||
|
|
@ -787,8 +797,10 @@ struct i915_shader {
|
|||
|
||||
cairo_operator_t op;
|
||||
uint32_t blend;
|
||||
float opacity;
|
||||
cairo_content_t content;
|
||||
|
||||
cairo_bool_t committed;
|
||||
cairo_bool_t need_combine;
|
||||
|
||||
i915_add_rectangle_func_t add_rectangle;
|
||||
|
|
@ -834,6 +846,8 @@ struct i915_shader {
|
|||
i915_packed_pixel_t pixel;
|
||||
} surface;
|
||||
} source, mask, clip, dst;
|
||||
|
||||
jmp_buf unwind;
|
||||
};
|
||||
|
||||
enum i915_shader_linear_mode {
|
||||
|
|
@ -862,11 +876,12 @@ i915_clip_and_composite_spans (i915_surface_t *dst,
|
|||
i915_spans_func_t draw_func,
|
||||
void *draw_closure,
|
||||
const cairo_composite_rectangles_t*extents,
|
||||
cairo_clip_t *clip);
|
||||
cairo_clip_t *clip,
|
||||
double opacity);
|
||||
|
||||
cairo_private cairo_surface_t *
|
||||
i915_surface_create_internal (cairo_drm_device_t *base_dev,
|
||||
cairo_content_t content,
|
||||
cairo_format_t format,
|
||||
int width, int height,
|
||||
uint32_t tiling,
|
||||
cairo_bool_t gpu_target);
|
||||
|
|
@ -904,8 +919,9 @@ i915_tiling_stride (int format, uint32_t stride)
|
|||
{
|
||||
uint32_t tile_width;
|
||||
|
||||
/* use 64B alignment so that the buffer may be used as a scanout */
|
||||
if (format == I915_TILING_NONE)
|
||||
return (stride + 31) & -32;
|
||||
return (stride + 63) & -64;
|
||||
|
||||
tile_width = 512;
|
||||
/* XXX Currently the kernel enforces a tile_width of 512 for TILING_Y.
|
||||
|
|
@ -943,7 +959,7 @@ i915_tiling_size (uint32_t tiling, uint32_t size)
|
|||
return fence;
|
||||
}
|
||||
|
||||
static inline cairo_bool_t cairo_pure
|
||||
static inline cairo_bool_t cairo_const
|
||||
i915_texture_filter_is_nearest (cairo_filter_t filter)
|
||||
{
|
||||
switch (filter) {
|
||||
|
|
@ -959,7 +975,7 @@ i915_texture_filter_is_nearest (cairo_filter_t filter)
|
|||
}
|
||||
}
|
||||
|
||||
static inline uint32_t cairo_pure
|
||||
static inline uint32_t cairo_const
|
||||
i915_texture_filter (cairo_filter_t filter)
|
||||
{
|
||||
switch (filter) {
|
||||
|
|
@ -979,7 +995,7 @@ i915_texture_filter (cairo_filter_t filter)
|
|||
}
|
||||
}
|
||||
|
||||
static inline uint32_t cairo_pure
|
||||
static inline uint32_t cairo_const
|
||||
i915_texture_extend (cairo_extend_t extend)
|
||||
{
|
||||
switch (extend) {
|
||||
|
|
@ -1003,7 +1019,7 @@ i915_texture_extend (cairo_extend_t extend)
|
|||
}
|
||||
}
|
||||
|
||||
static inline uint32_t cairo_pure
|
||||
static inline uint32_t cairo_const
|
||||
BUF_tiling (uint32_t tiling)
|
||||
{
|
||||
switch (tiling) {
|
||||
|
|
@ -1015,7 +1031,8 @@ BUF_tiling (uint32_t tiling)
|
|||
}
|
||||
|
||||
#define OUT_DWORD(dword) i915_batch_emit_dword (device, dword)
|
||||
#define OUT_RELOC(surface, read, write) i915_batch_emit_reloc (device, to_intel_bo (surface->intel.drm.bo), surface->offset, read, write)
|
||||
#define OUT_RELOC(surface, read, write) i915_batch_emit_reloc (device, to_intel_bo (surface->intel.drm.bo), surface->offset, read, write, FALSE)
|
||||
#define OUT_RELOC_FENCED(surface, read, write) i915_batch_emit_reloc (device, to_intel_bo (surface->intel.drm.bo), surface->offset, read, write, TRUE)
|
||||
|
||||
#define FS_LOCALS \
|
||||
uint32_t *_shader_start
|
||||
|
|
@ -1039,26 +1056,54 @@ i915_batch_space (i915_device_t *device)
|
|||
}
|
||||
|
||||
static inline cairo_bool_t
|
||||
i915_check_aperture_size (const i915_device_t *device, int relocs, size_t size)
|
||||
i915_check_aperture_size (const i915_device_t *device, int relocs, size_t est_size, size_t size)
|
||||
{
|
||||
return device->batch.reloc_count + relocs < I915_MAX_RELOCS &&
|
||||
device->batch.gtt_size + size <= device->intel.gtt_avail_size;
|
||||
return device->batch.reloc_count + relocs < I915_MAX_RELOCS - 2 &&
|
||||
device->batch.est_gtt_size + est_size <= device->batch.gtt_avail_size &&
|
||||
device->batch.total_gtt_size + size <= device->intel.gtt_avail_size;
|
||||
}
|
||||
|
||||
static inline cairo_bool_t
|
||||
i915_check_aperture (const i915_device_t *device, intel_bo_t **bo_array, int count)
|
||||
{
|
||||
uint32_t relocs = 0, size = 0;
|
||||
uint32_t relocs = 0, est_size = 0, size = 0;
|
||||
|
||||
while (count--) {
|
||||
const intel_bo_t *bo = *bo_array++;
|
||||
if (bo->exec == NULL) {
|
||||
relocs++;
|
||||
size += bo->base.size;
|
||||
if (!bo->busy)
|
||||
est_size += bo->base.size;
|
||||
}
|
||||
}
|
||||
|
||||
return i915_check_aperture_size (device, relocs, size);
|
||||
return i915_check_aperture_size (device, relocs, est_size, size);
|
||||
}
|
||||
|
||||
static inline cairo_bool_t
|
||||
i915_check_aperture_and_fences (const i915_device_t *device, intel_bo_t **bo_array, int count)
|
||||
{
|
||||
uint32_t relocs = 0, est_size = 0, size = 0;
|
||||
uint32_t fences = 0;
|
||||
|
||||
while (count--) {
|
||||
const intel_bo_t *bo = *bo_array++;
|
||||
if (bo->exec == NULL) {
|
||||
relocs++;
|
||||
size += bo->base.size;
|
||||
if (!bo->busy)
|
||||
est_size += bo->base.size;
|
||||
if (bo->tiling != I915_TILING_NONE)
|
||||
fences++;
|
||||
} else if (bo->tiling != I915_TILING_NONE) {
|
||||
if ((bo->exec->flags & EXEC_OBJECT_NEEDS_FENCE) == 0)
|
||||
fences++;
|
||||
}
|
||||
}
|
||||
|
||||
return i915_check_aperture_size (device, relocs, est_size, size) &&
|
||||
device->batch.fences + fences <= device->batch.fences_avail;
|
||||
}
|
||||
|
||||
#define BATCH_PTR(device) &(device)->batch_base[(device)->batch.used]
|
||||
|
|
@ -1073,7 +1118,8 @@ i915_batch_add_reloc (i915_device_t *device, uint32_t pos,
|
|||
intel_bo_t *bo,
|
||||
uint32_t offset,
|
||||
uint32_t read_domains,
|
||||
uint32_t write_domain);
|
||||
uint32_t write_domain,
|
||||
cairo_bool_t needs_fence);
|
||||
|
||||
static inline void
|
||||
i915_batch_fill_reloc (i915_device_t *device, uint32_t pos,
|
||||
|
|
@ -1084,7 +1130,8 @@ i915_batch_fill_reloc (i915_device_t *device, uint32_t pos,
|
|||
{
|
||||
i915_batch_add_reloc (device, pos,
|
||||
bo, offset,
|
||||
read_domains, write_domain);
|
||||
read_domains, write_domain,
|
||||
FALSE);
|
||||
device->batch_base[pos] = bo->offset + offset;
|
||||
}
|
||||
|
||||
|
|
@ -1093,14 +1140,19 @@ i915_batch_emit_reloc (i915_device_t *device,
|
|||
intel_bo_t *bo,
|
||||
uint32_t offset,
|
||||
uint32_t read_domains,
|
||||
uint32_t write_domain)
|
||||
uint32_t write_domain,
|
||||
cairo_bool_t needs_fence)
|
||||
{
|
||||
i915_batch_add_reloc (device, device->batch.used,
|
||||
bo, offset,
|
||||
read_domains, write_domain);
|
||||
read_domains, write_domain,
|
||||
needs_fence);
|
||||
i915_batch_emit_dword (device, bo->offset + offset);
|
||||
}
|
||||
|
||||
cairo_private void
|
||||
i915_vbo_flush (i915_device_t *device);
|
||||
|
||||
cairo_private void
|
||||
i915_vbo_finish (i915_device_t *device);
|
||||
|
||||
|
|
@ -1114,6 +1166,7 @@ i915_add_rectangle (i915_device_t *device)
|
|||
uint32_t size;
|
||||
|
||||
assert (device->floats_per_vertex);
|
||||
assert (device->rectangle_size == 3*device->floats_per_vertex*sizeof(float));
|
||||
|
||||
size = device->rectangle_size;
|
||||
if (unlikely (device->vbo_offset + size > I915_VBO_SIZE))
|
||||
|
|
@ -1131,10 +1184,17 @@ i915_device (i915_surface_t *surface)
|
|||
return (i915_device_t *) surface->intel.drm.base.device;
|
||||
}
|
||||
|
||||
cairo_private cairo_status_t
|
||||
i915_surface_clear (i915_surface_t *dst);
|
||||
|
||||
cairo_private void
|
||||
i915_set_dst (i915_device_t *device, i915_surface_t *dst);
|
||||
|
||||
cairo_private void
|
||||
i915_shader_init (i915_shader_t *shader,
|
||||
i915_surface_t *dst,
|
||||
cairo_operator_t op);
|
||||
cairo_operator_t op,
|
||||
double opacity);
|
||||
|
||||
cairo_private cairo_status_t
|
||||
i915_shader_acquire_pattern (i915_shader_t *shader,
|
||||
|
|
@ -1168,4 +1228,43 @@ i915_fixup_unbounded (i915_surface_t *dst,
|
|||
const cairo_composite_rectangles_t *extents,
|
||||
cairo_clip_t *clip);
|
||||
|
||||
static inline cairo_bool_t
|
||||
i915_surface_needs_tiling (i915_surface_t *dst)
|
||||
{
|
||||
return dst->intel.drm.width > 2048 || dst->intel.drm.height > 2048;
|
||||
}
|
||||
|
||||
cairo_private cairo_status_t
|
||||
i915_surface_copy_subimage (i915_device_t *device,
|
||||
i915_surface_t *src,
|
||||
const cairo_rectangle_int_t *extents,
|
||||
cairo_bool_t flush,
|
||||
i915_surface_t **clone_out);
|
||||
|
||||
static inline uint32_t
|
||||
pack_float (float f)
|
||||
{
|
||||
union {
|
||||
float f;
|
||||
uint32_t ui;
|
||||
} t;
|
||||
t.f = f;
|
||||
return t.ui;
|
||||
}
|
||||
|
||||
static inline cairo_status_t
|
||||
i915_surface_fallback_flush (i915_surface_t *surface)
|
||||
{
|
||||
cairo_status_t status;
|
||||
|
||||
if (unlikely (surface->intel.drm.fallback != NULL))
|
||||
return intel_surface_flush (&surface->intel);
|
||||
|
||||
status = CAIRO_STATUS_SUCCESS;
|
||||
if (unlikely (surface->deferred_clear))
|
||||
status = i915_surface_clear (surface);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
#endif /* CAIRO_DRM_I915_PRIVATE_H */
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -78,8 +78,6 @@ struct _i915_spans {
|
|||
unsigned int count;
|
||||
} head, *tail;
|
||||
|
||||
int rectangle_size;
|
||||
|
||||
unsigned int vbo_offset;
|
||||
float *vbo_base;
|
||||
};
|
||||
|
|
@ -96,12 +94,10 @@ i915_accumulate_rectangle (i915_spans_t *spans)
|
|||
float *vertices;
|
||||
uint32_t size;
|
||||
|
||||
size = spans->rectangle_size;
|
||||
size = spans->device->rectangle_size;
|
||||
if (unlikely (spans->vbo_offset + size > I915_VBO_SIZE)) {
|
||||
struct vbo *vbo;
|
||||
|
||||
intel_bo_unmap (spans->tail->bo);
|
||||
|
||||
vbo = malloc (sizeof (struct vbo));
|
||||
if (unlikely (vbo == NULL)) {
|
||||
/* throw error! */
|
||||
|
|
@ -111,7 +107,9 @@ i915_accumulate_rectangle (i915_spans_t *spans)
|
|||
spans->tail = vbo;
|
||||
|
||||
vbo->next = NULL;
|
||||
vbo->bo = intel_bo_create (&spans->device->intel, I915_VBO_SIZE, FALSE);
|
||||
vbo->bo = intel_bo_create (&spans->device->intel,
|
||||
I915_VBO_SIZE, I915_VBO_SIZE,
|
||||
FALSE, I915_TILING_NONE, 0);
|
||||
vbo->count = 0;
|
||||
|
||||
spans->vbo_offset = 0;
|
||||
|
|
@ -125,6 +123,25 @@ i915_accumulate_rectangle (i915_spans_t *spans)
|
|||
return vertices;
|
||||
}
|
||||
|
||||
static void
|
||||
i915_span_zero (i915_spans_t *spans,
|
||||
int x0, int x1, int y0, int y1,
|
||||
int alpha)
|
||||
{
|
||||
float *vertices;
|
||||
|
||||
vertices = spans->get_rectangle (spans);
|
||||
|
||||
*vertices++ = x1;
|
||||
*vertices++ = y1;
|
||||
|
||||
*vertices++ = x0;
|
||||
*vertices++ = y1;
|
||||
|
||||
*vertices++ = x0;
|
||||
*vertices++ = y0;
|
||||
}
|
||||
|
||||
static void
|
||||
i915_span_constant (i915_spans_t *spans,
|
||||
int x0, int x1, int y0, int y1,
|
||||
|
|
@ -264,6 +281,7 @@ i915_span_generic (i915_spans_t *spans,
|
|||
*vertices++ = x1; *vertices++ = y1;
|
||||
s = x1, t = y1;
|
||||
switch (spans->shader.source.type.vertex) {
|
||||
case VS_ZERO:
|
||||
case VS_CONSTANT:
|
||||
break;
|
||||
case VS_LINEAR:
|
||||
|
|
@ -294,6 +312,7 @@ i915_span_generic (i915_spans_t *spans,
|
|||
*vertices++ = x0; *vertices++ = y1;
|
||||
s = x0, t = y1;
|
||||
switch (spans->shader.source.type.vertex) {
|
||||
case VS_ZERO:
|
||||
case VS_CONSTANT:
|
||||
break;
|
||||
case VS_LINEAR:
|
||||
|
|
@ -324,6 +343,7 @@ i915_span_generic (i915_spans_t *spans,
|
|||
*vertices++ = x0; *vertices++ = y0;
|
||||
s = x0, t = y0;
|
||||
switch (spans->shader.source.type.vertex) {
|
||||
case VS_ZERO:
|
||||
case VS_CONSTANT:
|
||||
break;
|
||||
case VS_LINEAR:
|
||||
|
|
@ -351,6 +371,78 @@ i915_span_generic (i915_spans_t *spans,
|
|||
}
|
||||
}
|
||||
|
||||
static cairo_status_t
|
||||
i915_zero_spans_mono (void *abstract_renderer,
|
||||
int y, int height,
|
||||
const cairo_half_open_span_t *half,
|
||||
unsigned num_spans)
|
||||
{
|
||||
i915_spans_t *spans = abstract_renderer;
|
||||
int x0, x1;
|
||||
|
||||
if (num_spans == 0)
|
||||
return CAIRO_STATUS_SUCCESS;
|
||||
|
||||
do {
|
||||
while (num_spans && half[0].coverage < 128)
|
||||
half++, num_spans--;
|
||||
if (num_spans == 0)
|
||||
break;
|
||||
|
||||
x0 = x1 = half[0].x;
|
||||
while (num_spans--) {
|
||||
half++;
|
||||
|
||||
x1 = half[0].x;
|
||||
if (half[0].coverage < 128)
|
||||
break;
|
||||
}
|
||||
|
||||
i915_span_zero (spans,
|
||||
x0, x1,
|
||||
y, y + height,
|
||||
0);
|
||||
} while (num_spans);
|
||||
|
||||
return CAIRO_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static cairo_status_t
|
||||
i915_zero_spans (void *abstract_renderer,
|
||||
int y, int height,
|
||||
const cairo_half_open_span_t *half,
|
||||
unsigned num_spans)
|
||||
{
|
||||
i915_spans_t *spans = abstract_renderer;
|
||||
int x0, x1;
|
||||
|
||||
if (num_spans == 0)
|
||||
return CAIRO_STATUS_SUCCESS;
|
||||
|
||||
do {
|
||||
while (num_spans && half[0].coverage == 0)
|
||||
half++, num_spans--;
|
||||
if (num_spans == 0)
|
||||
break;
|
||||
|
||||
x0 = x1 = half[0].x;
|
||||
while (num_spans--) {
|
||||
half++;
|
||||
|
||||
x1 = half[0].x;
|
||||
if (half[0].coverage == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
i915_span_zero (spans,
|
||||
x0, x1,
|
||||
y, y + height,
|
||||
0);
|
||||
} while (num_spans);
|
||||
|
||||
return CAIRO_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static cairo_status_t
|
||||
i915_bounded_spans_mono (void *abstract_renderer,
|
||||
int y, int height,
|
||||
|
|
@ -491,6 +583,7 @@ i915_spans_init (i915_spans_t *spans,
|
|||
const cairo_pattern_t *pattern,
|
||||
cairo_antialias_t antialias,
|
||||
cairo_clip_t *clip,
|
||||
double opacity,
|
||||
const cairo_composite_rectangles_t *extents)
|
||||
{
|
||||
cairo_status_t status;
|
||||
|
|
@ -542,7 +635,8 @@ i915_spans_init (i915_spans_t *spans,
|
|||
assert (! extents->is_bounded);
|
||||
spans->get_rectangle = i915_accumulate_rectangle;
|
||||
spans->head.bo = intel_bo_create (&spans->device->intel,
|
||||
I915_VBO_SIZE, FALSE);
|
||||
I915_VBO_SIZE, I915_VBO_SIZE,
|
||||
FALSE, I915_TILING_NONE, 0);
|
||||
if (unlikely (spans->head.bo == NULL))
|
||||
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
|
||||
|
||||
|
|
@ -550,7 +644,7 @@ i915_spans_init (i915_spans_t *spans,
|
|||
}
|
||||
spans->vbo_offset = 0;
|
||||
|
||||
i915_shader_init (&spans->shader, dst, op);
|
||||
i915_shader_init (&spans->shader, dst, op, opacity);
|
||||
if (spans->need_clip_surface)
|
||||
i915_shader_set_clip (&spans->shader, clip);
|
||||
|
||||
|
|
@ -559,7 +653,6 @@ i915_spans_init (i915_spans_t *spans,
|
|||
if (unlikely (status))
|
||||
return status;
|
||||
|
||||
spans->rectangle_size = 3 * (2 + i915_shader_num_texcoords (&spans->shader));
|
||||
return CAIRO_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
@ -568,9 +661,6 @@ i915_spans_fini (i915_spans_t *spans)
|
|||
{
|
||||
i915_shader_fini (&spans->shader);
|
||||
|
||||
if (spans->tail->bo && spans->tail->bo->virtual)
|
||||
intel_bo_unmap (spans->tail->bo);
|
||||
|
||||
if (spans->head.bo != NULL) {
|
||||
struct vbo *vbo, *next;
|
||||
|
||||
|
|
@ -591,19 +681,25 @@ i915_clip_and_composite_spans (i915_surface_t *dst,
|
|||
i915_spans_func_t draw_func,
|
||||
void *draw_closure,
|
||||
const cairo_composite_rectangles_t*extents,
|
||||
cairo_clip_t *clip)
|
||||
cairo_clip_t *clip,
|
||||
double opacity)
|
||||
{
|
||||
i915_spans_t spans;
|
||||
i915_device_t *device;
|
||||
cairo_status_t status;
|
||||
struct vbo *vbo;
|
||||
|
||||
if (i915_surface_needs_tiling (dst)) {
|
||||
ASSERT_NOT_REACHED;
|
||||
return CAIRO_INT_STATUS_UNSUPPORTED;
|
||||
}
|
||||
|
||||
if (op == CAIRO_OPERATOR_CLEAR) {
|
||||
pattern = &_cairo_pattern_white.base;
|
||||
op = CAIRO_OPERATOR_DEST_OUT;
|
||||
}
|
||||
|
||||
status = i915_spans_init (&spans, dst, op, pattern, antialias, clip, extents);
|
||||
status = i915_spans_init (&spans, dst, op, pattern, antialias, clip, opacity, extents);
|
||||
if (unlikely (status))
|
||||
return status;
|
||||
|
||||
|
|
@ -615,13 +711,28 @@ i915_clip_and_composite_spans (i915_surface_t *dst,
|
|||
if (unlikely (status))
|
||||
goto CLEANUP_SPANS;
|
||||
|
||||
if (dst->deferred_clear) {
|
||||
status = i915_surface_clear (dst);
|
||||
if (unlikely (status))
|
||||
goto CLEANUP_SPANS;
|
||||
}
|
||||
|
||||
device = i915_device (dst);
|
||||
status = i915_shader_commit (&spans.shader, device);
|
||||
if (unlikely (status))
|
||||
goto CLEANUP_DEVICE;
|
||||
|
||||
if (!spans.shader.need_combine && ! spans.need_clip_surface) {
|
||||
if (! spans.shader.need_combine && ! spans.need_clip_surface) {
|
||||
switch (spans.shader.source.type.vertex) {
|
||||
case VS_ZERO:
|
||||
spans.span = i915_span_zero;
|
||||
if (extents->is_bounded) {
|
||||
if (antialias == CAIRO_ANTIALIAS_NONE)
|
||||
spans.renderer.render_rows = i915_zero_spans_mono;
|
||||
else
|
||||
spans.renderer.render_rows = i915_zero_spans;
|
||||
}
|
||||
break;
|
||||
case VS_CONSTANT:
|
||||
spans.span = i915_span_constant;
|
||||
break;
|
||||
|
|
@ -644,8 +755,6 @@ i915_clip_and_composite_spans (i915_surface_t *dst,
|
|||
|
||||
status = draw_func (draw_closure, &spans.renderer, spans.extents);
|
||||
if (spans.clip_region != NULL && status == CAIRO_STATUS_SUCCESS) {
|
||||
intel_bo_unmap (spans.tail->bo);
|
||||
|
||||
i915_vbo_finish (device);
|
||||
|
||||
OUT_DWORD (_3DSTATE_SCISSOR_ENABLE_CMD | ENABLE_SCISSOR_RECT);
|
||||
|
|
@ -656,7 +765,8 @@ i915_clip_and_composite_spans (i915_surface_t *dst,
|
|||
|
||||
OUT_DWORD (_3DSTATE_LOAD_STATE_IMMEDIATE_1 | I1_LOAD_S (0) | I1_LOAD_S (1) | 1);
|
||||
i915_batch_emit_reloc (device, vbo->bo, 0,
|
||||
I915_GEM_DOMAIN_VERTEX, 0);
|
||||
I915_GEM_DOMAIN_VERTEX, 0,
|
||||
FALSE);
|
||||
OUT_DWORD ((device->floats_per_vertex << S1_VERTEX_WIDTH_SHIFT) |
|
||||
(device->floats_per_vertex << S1_VERTEX_PITCH_SHIFT) |
|
||||
vbo->count);
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -71,8 +71,6 @@ i965_glyphs_accumulate_rectangle (i965_glyphs_t *glyphs)
|
|||
if (unlikely (glyphs->vbo_offset + size > I965_VERTEX_SIZE)) {
|
||||
struct i965_vbo *vbo;
|
||||
|
||||
intel_bo_unmap (glyphs->tail->bo);
|
||||
|
||||
vbo = malloc (sizeof (struct i965_vbo));
|
||||
if (unlikely (vbo == NULL)) {
|
||||
/* throw error! */
|
||||
|
|
@ -83,7 +81,8 @@ i965_glyphs_accumulate_rectangle (i965_glyphs_t *glyphs)
|
|||
|
||||
vbo->next = NULL;
|
||||
vbo->bo = intel_bo_create (&glyphs->shader.device->intel,
|
||||
I965_VERTEX_SIZE, FALSE);
|
||||
I965_VERTEX_SIZE, I965_VERTEX_SIZE,
|
||||
FALSE, I915_TILING_NONE, 0);
|
||||
vbo->count = 0;
|
||||
|
||||
glyphs->vbo_offset = 0;
|
||||
|
|
@ -152,8 +151,8 @@ i965_surface_mask_internal (i965_surface_t *dst,
|
|||
shader.mask.base.extend = i965_extend (CAIRO_EXTEND_NONE);
|
||||
|
||||
cairo_matrix_init_translate (&shader.mask.base.matrix,
|
||||
-extents->bounded.x + NEAREST_BIAS,
|
||||
-extents->bounded.y + NEAREST_BIAS);
|
||||
-extents->bounded.x,
|
||||
-extents->bounded.y);
|
||||
cairo_matrix_scale (&shader.mask.base.matrix,
|
||||
1. / mask->intel.drm.width,
|
||||
1. / mask->intel.drm.height);
|
||||
|
|
@ -266,15 +265,15 @@ i965_surface_glyphs (void *abstract_surface,
|
|||
}
|
||||
|
||||
if (overlap || ! extents.is_bounded) {
|
||||
cairo_content_t content;
|
||||
cairo_format_t format;
|
||||
|
||||
content = CAIRO_CONTENT_ALPHA;
|
||||
format = CAIRO_FORMAT_A8;
|
||||
if (scaled_font->options.antialias == CAIRO_ANTIALIAS_SUBPIXEL)
|
||||
content |= CAIRO_CONTENT_COLOR;
|
||||
format = CAIRO_FORMAT_ARGB32;
|
||||
|
||||
mask = (i965_surface_t *)
|
||||
i965_surface_create_internal (&i965_device (surface)->intel.base,
|
||||
content,
|
||||
format,
|
||||
extents.bounded.width,
|
||||
extents.bounded.height,
|
||||
I965_TILING_DEFAULT,
|
||||
|
|
@ -331,7 +330,8 @@ i965_surface_glyphs (void *abstract_surface,
|
|||
} else {
|
||||
glyphs.get_rectangle = i965_glyphs_accumulate_rectangle;
|
||||
glyphs.head.bo = intel_bo_create (&device->intel,
|
||||
I965_VERTEX_SIZE, FALSE);
|
||||
I965_VERTEX_SIZE, I965_VERTEX_SIZE,
|
||||
FALSE, I915_TILING_NONE, 0);
|
||||
if (unlikely (glyphs.head.bo == NULL))
|
||||
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
|
||||
|
||||
|
|
@ -431,16 +431,19 @@ i965_surface_glyphs (void *abstract_surface,
|
|||
last_bo = cache->buffer.bo;
|
||||
}
|
||||
|
||||
x1 += mask_x; x2 += mask_x;
|
||||
y1 += mask_y; y2 += mask_y;
|
||||
x2 = x1 + glyph->width;
|
||||
y2 = y1 + glyph->height;
|
||||
|
||||
if (mask_x)
|
||||
x1 += mask_x, x2 += mask_x;
|
||||
if (mask_y)
|
||||
y1 += mask_y, y2 += mask_y;
|
||||
|
||||
i965_add_glyph_rectangle (&glyphs, x1, y1, x2, y2, glyph);
|
||||
}
|
||||
|
||||
if (mask != NULL && clip_region != NULL) {
|
||||
intel_bo_unmap (glyphs.tail->bo);
|
||||
if (mask != NULL && clip_region != NULL)
|
||||
i965_clipped_vertices (device, &glyphs.head, clip_region);
|
||||
}
|
||||
|
||||
status = CAIRO_STATUS_SUCCESS;
|
||||
FINISH:
|
||||
|
|
@ -449,9 +452,6 @@ i965_surface_glyphs (void *abstract_surface,
|
|||
CLEANUP_GLYPHS:
|
||||
i965_shader_fini (&glyphs.shader);
|
||||
|
||||
if (glyphs.tail->bo && glyphs.tail->bo->virtual)
|
||||
intel_bo_unmap (glyphs.tail->bo);
|
||||
|
||||
if (glyphs.head.bo != NULL) {
|
||||
struct i965_vbo *vbo, *next;
|
||||
|
||||
|
|
|
|||
|
|
@ -646,7 +646,7 @@ i965_shader_add_rectangle (const i965_shader_t *shader,
|
|||
|
||||
cairo_private cairo_surface_t *
|
||||
i965_surface_create_internal (cairo_drm_device_t *base_dev,
|
||||
cairo_content_t content,
|
||||
cairo_format_t format,
|
||||
int width, int height,
|
||||
uint32_t tiling,
|
||||
cairo_bool_t gpu_target);
|
||||
|
|
|
|||
|
|
@ -271,7 +271,6 @@ i965_surface_clone (i965_device_t *device,
|
|||
|
||||
status = intel_bo_put_image (&device->intel,
|
||||
to_intel_bo (clone->intel.drm.bo),
|
||||
clone->intel.drm.stride,
|
||||
image,
|
||||
0, 0,
|
||||
image->width, image->height,
|
||||
|
|
@ -317,7 +316,6 @@ i965_surface_clone_subimage (i965_device_t *device,
|
|||
|
||||
status = intel_bo_put_image (to_intel_device (clone->intel.drm.base.device),
|
||||
to_intel_bo (clone->intel.drm.bo),
|
||||
clone->intel.drm.stride,
|
||||
image,
|
||||
extents->x, extents->y,
|
||||
extents->width, extents->height,
|
||||
|
|
@ -668,8 +666,6 @@ i965_shader_acquire_surface (i965_shader_t *shader,
|
|||
src->base.matrix = pattern->base.matrix;
|
||||
if (src_x | src_y)
|
||||
cairo_matrix_translate (&src->base.matrix, src_x, src_x);
|
||||
if (src->base.filter == BRW_MAPFILTER_NEAREST)
|
||||
cairo_matrix_translate (&src->base.matrix, NEAREST_BIAS, NEAREST_BIAS);
|
||||
cairo_matrix_init_scale (&m, 1. / src->base.width, 1. / src->base.height);
|
||||
cairo_matrix_multiply (&src->base.matrix, &src->base.matrix, &m);
|
||||
|
||||
|
|
@ -793,8 +789,7 @@ i965_shader_set_clip (i965_shader_t *shader,
|
|||
1. / s->intel.drm.height);
|
||||
|
||||
cairo_matrix_translate (&shader->clip.base.matrix,
|
||||
NEAREST_BIAS - clip_x,
|
||||
NEAREST_BIAS - clip_y);
|
||||
-clip_x, -clip_y);
|
||||
}
|
||||
|
||||
static cairo_bool_t
|
||||
|
|
@ -888,9 +883,6 @@ i965_shader_setup_dst (i965_shader_t *shader)
|
|||
cairo_matrix_init_scale (&channel->base.matrix,
|
||||
1. / s->intel.drm.width,
|
||||
1. / s->intel.drm.height);
|
||||
cairo_matrix_translate (&channel->base.matrix,
|
||||
NEAREST_BIAS,
|
||||
NEAREST_BIAS);
|
||||
|
||||
channel->surface.surface = &clone->intel.drm.base;
|
||||
|
||||
|
|
@ -2827,7 +2819,6 @@ i965_clipped_vertices (i965_device_t *device,
|
|||
size = vertex_count * device->vertex_size;
|
||||
ptr = intel_bo_map (&device->intel, vbo->bo);
|
||||
memcpy (device->vertex.data + device->vertex.used, ptr, size);
|
||||
intel_bo_unmap (vbo->bo);
|
||||
device->vertex.committed = device->vertex.used += size;
|
||||
|
||||
for (i = 0; i < num_rectangles; i++) {
|
||||
|
|
|
|||
|
|
@ -87,8 +87,6 @@ i965_spans_accumulate_rectangle (i965_spans_t *spans)
|
|||
if (unlikely (spans->vbo_offset + size > I965_VERTEX_SIZE)) {
|
||||
struct i965_vbo *vbo;
|
||||
|
||||
intel_bo_unmap (spans->tail->bo);
|
||||
|
||||
vbo = malloc (sizeof (struct i965_vbo));
|
||||
if (unlikely (vbo == NULL)) {
|
||||
/* throw error! */
|
||||
|
|
@ -98,7 +96,9 @@ i965_spans_accumulate_rectangle (i965_spans_t *spans)
|
|||
spans->tail = vbo;
|
||||
|
||||
vbo->next = NULL;
|
||||
vbo->bo = intel_bo_create (&spans->device->intel, I965_VERTEX_SIZE, FALSE);
|
||||
vbo->bo = intel_bo_create (&spans->device->intel,
|
||||
I965_VERTEX_SIZE, I965_VERTEX_SIZE,
|
||||
FALSE, I915_TILING_NONE, 0);
|
||||
vbo->count = 0;
|
||||
|
||||
spans->vbo_offset = 0;
|
||||
|
|
@ -326,7 +326,8 @@ i965_spans_init (i965_spans_t *spans,
|
|||
} else {
|
||||
spans->get_rectangle = i965_spans_accumulate_rectangle;
|
||||
spans->head.bo = intel_bo_create (&spans->device->intel,
|
||||
I965_VERTEX_SIZE, FALSE);
|
||||
I965_VERTEX_SIZE, I965_VERTEX_SIZE,
|
||||
FALSE, I915_TILING_NONE, 0);
|
||||
if (unlikely (spans->head.bo == NULL))
|
||||
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
|
||||
|
||||
|
|
@ -344,9 +345,6 @@ i965_spans_fini (i965_spans_t *spans)
|
|||
{
|
||||
i965_shader_fini (&spans->shader);
|
||||
|
||||
if (spans->tail->bo && spans->tail->bo->virtual)
|
||||
intel_bo_unmap (spans->tail->bo);
|
||||
|
||||
if (spans->head.bo != NULL) {
|
||||
struct i965_vbo *vbo, *next;
|
||||
|
||||
|
|
@ -397,10 +395,8 @@ i965_clip_and_composite_spans (i965_surface_t *dst,
|
|||
goto CLEANUP_DEVICE;
|
||||
|
||||
status = draw_func (draw_closure, &spans.renderer, spans.extents);
|
||||
if (spans.clip_region != NULL && status == CAIRO_STATUS_SUCCESS) {
|
||||
intel_bo_unmap (spans.tail->bo);
|
||||
if (spans.clip_region != NULL && status == CAIRO_STATUS_SUCCESS)
|
||||
i965_clipped_vertices (device, &spans.head, spans.clip_region);
|
||||
}
|
||||
|
||||
CLEANUP_DEVICE:
|
||||
cairo_device_release (dst->intel.drm.base.device);
|
||||
|
|
|
|||
|
|
@ -168,7 +168,9 @@ i965_stream_commit (i965_device_t *device,
|
|||
|
||||
assert (stream->used);
|
||||
|
||||
bo = intel_bo_create (&device->intel, stream->used, FALSE);
|
||||
bo = intel_bo_create (&device->intel,
|
||||
stream->used, stream->used,
|
||||
FALSE, I915_TILING_NONE, 0);
|
||||
|
||||
/* apply pending relocations */
|
||||
for (n = 0; n < stream->num_pending_relocations; n++) {
|
||||
|
|
@ -373,22 +375,25 @@ i965_exec (i965_device_t *device, uint32_t offset)
|
|||
|
||||
/* XXX any write target within the batch should now be in error */
|
||||
for (i = 0; i < device->exec.count; i++) {
|
||||
intel_bo_t *bo = device->exec.bo[i];
|
||||
cairo_bool_t ret;
|
||||
|
||||
device->exec.bo[i]->offset = device->exec.exec[i].offset;
|
||||
device->exec.bo[i]->exec = NULL;
|
||||
device->exec.bo[i]->batch_read_domains = 0;
|
||||
device->exec.bo[i]->batch_write_domain = 0;
|
||||
bo->offset = device->exec.exec[i].offset;
|
||||
bo->exec = NULL;
|
||||
bo->batch_read_domains = 0;
|
||||
bo->batch_write_domain = 0;
|
||||
|
||||
if (device->exec.bo[i]->purgeable) {
|
||||
ret = intel_bo_madvise (&device->intel,
|
||||
device->exec.bo[i],
|
||||
I915_MADV_DONTNEED);
|
||||
if (bo->virtual)
|
||||
intel_bo_unmap (bo);
|
||||
bo->cpu = FALSE;
|
||||
|
||||
if (bo->purgeable)
|
||||
ret = intel_bo_madvise (&device->intel, bo, I915_MADV_DONTNEED);
|
||||
/* ignore immediate notification of purging */
|
||||
}
|
||||
|
||||
cairo_list_init (&device->exec.bo[i]->link);
|
||||
intel_bo_destroy (&device->intel, device->exec.bo[i]);
|
||||
cairo_list_del (&bo->cache_list);
|
||||
cairo_list_init (&bo->link);
|
||||
intel_bo_destroy (&device->intel, bo);
|
||||
}
|
||||
cairo_list_init (&device->flush);
|
||||
|
||||
|
|
@ -496,7 +501,8 @@ i965_device_flush (i965_device_t *device)
|
|||
|
||||
bo = intel_bo_create (&device->intel,
|
||||
device->general.used,
|
||||
FALSE);
|
||||
device->general.used,
|
||||
FALSE, I915_TILING_NONE, 0);
|
||||
if (unlikely (bo == NULL))
|
||||
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
|
||||
|
||||
|
|
@ -547,7 +553,9 @@ i965_device_flush (i965_device_t *device)
|
|||
if (aligned <= 8192)
|
||||
max = aligned;
|
||||
|
||||
bo = intel_bo_create (&device->intel, max, FALSE);
|
||||
bo = intel_bo_create (&device->intel,
|
||||
max, max,
|
||||
FALSE, I915_TILING_NONE, 0);
|
||||
if (unlikely (bo == NULL))
|
||||
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
|
||||
|
||||
|
|
@ -615,7 +623,9 @@ i965_device_flush (i965_device_t *device)
|
|||
if (device->surface.used)
|
||||
i965_stream_commit (device, &device->surface);
|
||||
|
||||
bo = intel_bo_create (&device->intel, device->batch.used, FALSE);
|
||||
bo = intel_bo_create (&device->intel,
|
||||
device->batch.used, device->batch.used,
|
||||
FALSE, I915_TILING_NONE, 0);
|
||||
if (unlikely (bo == NULL))
|
||||
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
|
||||
|
||||
|
|
@ -638,8 +648,6 @@ i965_device_flush (i965_device_t *device)
|
|||
aligned = 0;
|
||||
}
|
||||
|
||||
intel_glyph_cache_unmap (&device->intel);
|
||||
|
||||
status = i965_exec (device, aligned);
|
||||
|
||||
i965_stream_reset (&device->vertex);
|
||||
|
|
@ -654,6 +662,29 @@ i965_device_flush (i965_device_t *device)
|
|||
return status;
|
||||
}
|
||||
|
||||
static cairo_surface_t *
|
||||
i965_surface_create_similar (void *abstract_other,
|
||||
cairo_content_t content,
|
||||
int width, int height)
|
||||
{
|
||||
i965_surface_t *other;
|
||||
cairo_format_t format;
|
||||
|
||||
if (width > 8192 || height > 8192)
|
||||
return NULL;
|
||||
|
||||
other = abstract_other;
|
||||
if (content == other->intel.drm.base.content)
|
||||
format = other->intel.drm.format;
|
||||
else
|
||||
format = _cairo_format_from_content (content);
|
||||
|
||||
return i965_surface_create_internal ((cairo_drm_device_t *) other->intel.drm.base.device,
|
||||
format,
|
||||
width, height,
|
||||
I965_TILING_DEFAULT, TRUE);
|
||||
}
|
||||
|
||||
static cairo_status_t
|
||||
i965_surface_finish (void *abstract_surface)
|
||||
{
|
||||
|
|
@ -1462,7 +1493,7 @@ CLEANUP_BOXES:
|
|||
static const cairo_surface_backend_t i965_surface_backend = {
|
||||
CAIRO_SURFACE_TYPE_DRM,
|
||||
|
||||
_cairo_drm_surface_create_similar,
|
||||
i965_surface_create_similar,
|
||||
i965_surface_finish,
|
||||
intel_surface_acquire_source_image,
|
||||
intel_surface_release_source_image,
|
||||
|
|
@ -1494,10 +1525,12 @@ static const cairo_surface_backend_t i965_surface_backend = {
|
|||
|
||||
static void
|
||||
i965_surface_init (i965_surface_t *surface,
|
||||
cairo_content_t content,
|
||||
cairo_drm_device_t *device)
|
||||
cairo_drm_device_t *device,
|
||||
cairo_format_t format,
|
||||
int width, int height)
|
||||
{
|
||||
intel_surface_init (&surface->intel, &i965_surface_backend, device, content);
|
||||
intel_surface_init (&surface->intel, &i965_surface_backend, device,
|
||||
format, width, height);
|
||||
surface->stream = 0;
|
||||
}
|
||||
|
||||
|
|
@ -1523,7 +1556,7 @@ i965_tiling_height (uint32_t tiling, int height)
|
|||
|
||||
cairo_surface_t *
|
||||
i965_surface_create_internal (cairo_drm_device_t *base_dev,
|
||||
cairo_content_t content,
|
||||
cairo_format_t format,
|
||||
int width, int height,
|
||||
uint32_t tiling,
|
||||
cairo_bool_t gpu_target)
|
||||
|
|
@ -1535,47 +1568,36 @@ i965_surface_create_internal (cairo_drm_device_t *base_dev,
|
|||
if (unlikely (surface == NULL))
|
||||
return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
|
||||
|
||||
i965_surface_init (surface, content, base_dev);
|
||||
i965_surface_init (surface, base_dev, format, width, height);
|
||||
|
||||
if (width && height) {
|
||||
uint32_t size;
|
||||
|
||||
surface->intel.drm.width = width;
|
||||
surface->intel.drm.height = height;
|
||||
uint32_t size, stride;
|
||||
intel_bo_t *bo;
|
||||
|
||||
width = (width + 3) & -4;
|
||||
surface->intel.drm.stride = cairo_format_stride_for_width (surface->intel.drm.format,
|
||||
width);
|
||||
surface->intel.drm.stride = (surface->intel.drm.stride + 63) & ~63;
|
||||
|
||||
#if 0
|
||||
/* check for tiny surfaces for which tiling is irrelevant */
|
||||
if (height * surface->intel.drm.stride < 4096)
|
||||
tiling = I915_TILING_NONE;
|
||||
#endif
|
||||
surface->intel.drm.stride = i965_tiling_stride (tiling,
|
||||
surface->intel.drm.stride);
|
||||
stride = cairo_format_stride_for_width (surface->intel.drm.format, width);
|
||||
stride = (stride + 63) & ~63;
|
||||
stride = i965_tiling_stride (tiling, stride);
|
||||
surface->intel.drm.stride = stride;
|
||||
|
||||
height = i965_tiling_height (tiling, height);
|
||||
assert (height <= I965_MAX_SIZE);
|
||||
|
||||
size = surface->intel.drm.stride * height;
|
||||
if (tiling != I915_TILING_NONE)
|
||||
size = (size + 4095) & -4096;
|
||||
|
||||
surface->intel.drm.bo = &intel_bo_create (to_intel_device (&base_dev->base),
|
||||
size, gpu_target)->base;
|
||||
if (surface->intel.drm.bo == NULL) {
|
||||
size = stride * height;
|
||||
bo = intel_bo_create (to_intel_device (&base_dev->base),
|
||||
size, size,
|
||||
gpu_target, tiling, stride);
|
||||
if (bo == NULL) {
|
||||
status_ignored = _cairo_drm_surface_finish (&surface->intel.drm);
|
||||
free (surface);
|
||||
return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
|
||||
}
|
||||
|
||||
intel_bo_set_tiling (to_intel_device (&base_dev->base),
|
||||
to_intel_bo (surface->intel.drm.bo),
|
||||
tiling, surface->intel.drm.stride);
|
||||
bo->tiling = tiling;
|
||||
bo->stride = stride;
|
||||
surface->intel.drm.bo = &bo->base;
|
||||
|
||||
assert (surface->intel.drm.bo->size >= (size_t) surface->intel.drm.stride*height);
|
||||
assert (bo->base.size >= (size_t) stride*height);
|
||||
}
|
||||
|
||||
return &surface->intel.drm.base;
|
||||
|
|
@ -1583,9 +1605,21 @@ i965_surface_create_internal (cairo_drm_device_t *base_dev,
|
|||
|
||||
static cairo_surface_t *
|
||||
i965_surface_create (cairo_drm_device_t *device,
|
||||
cairo_content_t content, int width, int height)
|
||||
cairo_format_t format, int width, int height)
|
||||
{
|
||||
return i965_surface_create_internal (device, content, width, height,
|
||||
switch (format) {
|
||||
case CAIRO_FORMAT_ARGB32:
|
||||
case CAIRO_FORMAT_RGB16_565:
|
||||
case CAIRO_FORMAT_RGB24:
|
||||
case CAIRO_FORMAT_A8:
|
||||
break;
|
||||
case CAIRO_FORMAT_INVALID:
|
||||
default:
|
||||
case CAIRO_FORMAT_A1:
|
||||
return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
|
||||
}
|
||||
|
||||
return i965_surface_create_internal (device, format, width, height,
|
||||
I965_TILING_DEFAULT, TRUE);
|
||||
}
|
||||
|
||||
|
|
@ -1597,7 +1631,6 @@ i965_surface_create_for_name (cairo_drm_device_t *base_dev,
|
|||
{
|
||||
i965_device_t *device;
|
||||
i965_surface_t *surface;
|
||||
cairo_content_t content;
|
||||
cairo_status_t status_ignored;
|
||||
int min_stride;
|
||||
|
||||
|
|
@ -1610,14 +1643,9 @@ i965_surface_create_for_name (cairo_drm_device_t *base_dev,
|
|||
|
||||
switch (format) {
|
||||
case CAIRO_FORMAT_ARGB32:
|
||||
content = CAIRO_CONTENT_COLOR_ALPHA;
|
||||
break;
|
||||
case CAIRO_FORMAT_RGB16_565:
|
||||
case CAIRO_FORMAT_RGB24:
|
||||
content = CAIRO_CONTENT_COLOR;
|
||||
break;
|
||||
case CAIRO_FORMAT_A8:
|
||||
content = CAIRO_CONTENT_ALPHA;
|
||||
break;
|
||||
case CAIRO_FORMAT_INVALID:
|
||||
default:
|
||||
|
|
@ -1629,7 +1657,7 @@ i965_surface_create_for_name (cairo_drm_device_t *base_dev,
|
|||
if (unlikely (surface == NULL))
|
||||
return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
|
||||
|
||||
i965_surface_init (surface, content, base_dev);
|
||||
i965_surface_init (surface, base_dev, format, width, height);
|
||||
|
||||
device = (i965_device_t *) base_dev;
|
||||
surface->intel.drm.bo = &intel_bo_create_for_name (&device->intel, name)->base;
|
||||
|
|
@ -1639,8 +1667,6 @@ i965_surface_create_for_name (cairo_drm_device_t *base_dev,
|
|||
return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
|
||||
}
|
||||
|
||||
surface->intel.drm.width = width;
|
||||
surface->intel.drm.height = height;
|
||||
surface->intel.drm.stride = stride;
|
||||
|
||||
return &surface->intel.drm.base;
|
||||
|
|
|
|||
|
|
@ -720,6 +720,7 @@ debug_copy_blit (struct debug_stream *stream,
|
|||
uint32_t *ptr = (uint32_t *)(stream->ptr + stream->offset);
|
||||
uint32_t j = 0;
|
||||
|
||||
fprintf (stderr, "%04x: ", stream->offset);
|
||||
fprintf (stderr, "%s (%d dwords):\n", name, len);
|
||||
fprintf (stderr, "\t0x%08x\n", ptr[j++]);
|
||||
|
||||
|
|
|
|||
|
|
@ -32,6 +32,22 @@
|
|||
|
||||
#include "cairo-drm-intel-command-private.h"
|
||||
|
||||
#define I915_PARAM_IRQ_ACTIVE 1
|
||||
#define I915_PARAM_ALLOW_BATCHBUFFER 2
|
||||
#define I915_PARAM_LAST_DISPATCH 3
|
||||
#define I915_PARAM_CHIPSET_ID 4
|
||||
#define I915_PARAM_HAS_GEM 5
|
||||
#define I915_PARAM_NUM_FENCES_AVAIL 6
|
||||
#define I915_PARAM_HAS_OVERLAY 7
|
||||
#define I915_PARAM_HAS_PAGEFLIPPING 8
|
||||
#define I915_PARAM_HAS_EXECBUF2 9
|
||||
|
||||
struct intel_getparam {
|
||||
int param;
|
||||
int *value;
|
||||
};
|
||||
|
||||
|
||||
/** @{
|
||||
* Intel memory domains
|
||||
*
|
||||
|
|
@ -331,7 +347,9 @@ struct drm_i915_gem_get_aperture {
|
|||
uint64_t aper_available_size;
|
||||
};
|
||||
|
||||
#define DRM_I915_GETPARAM 0x06
|
||||
|
||||
#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, struct intel_getparam)
|
||||
#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
|
||||
#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
|
||||
#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
|
||||
|
|
@ -414,4 +432,11 @@ struct drm_i915_gem_execbuffer2 {
|
|||
#define DRM_I915_GEM_EXECBUFFER2 0x29
|
||||
#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
|
||||
|
||||
struct drm_i915_gem_real_size {
|
||||
uint32_t handle;
|
||||
uint64_t size;
|
||||
};
|
||||
#define DRM_I915_GEM_REAL_SIZE 0x2a
|
||||
#define DRM_IOCTL_I915_GEM_REAL_SIZE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_REAL_SIZE, struct drm_i915_gem_real_size)
|
||||
|
||||
#endif /* CAIRO_DRM_INTEL_IOCTL_PRIVATE_H */
|
||||
|
|
|
|||
|
|
@ -42,11 +42,8 @@
|
|||
|
||||
#include "cairo-drm-intel-ioctl-private.h"
|
||||
|
||||
#define NEAREST_BIAS (-.375)
|
||||
|
||||
#define INTEL_TILING_DEFAULT I915_TILING_Y
|
||||
|
||||
|
||||
#define INTEL_BO_CACHE_BUCKETS 12 /* cache surfaces up to 16 MiB */
|
||||
|
||||
#define INTEL_GLYPH_CACHE_WIDTH 1024
|
||||
|
|
@ -57,24 +54,28 @@
|
|||
typedef struct _intel_bo {
|
||||
cairo_drm_bo_t base;
|
||||
|
||||
cairo_list_t link;
|
||||
cairo_list_t cache_list;
|
||||
|
||||
uint32_t offset;
|
||||
void *virtual;
|
||||
|
||||
uint32_t tiling;
|
||||
uint32_t swizzle;
|
||||
uint32_t stride;
|
||||
cairo_bool_t purgeable;
|
||||
uint32_t batch_read_domains;
|
||||
uint32_t batch_write_domain;
|
||||
|
||||
uint32_t opaque0;
|
||||
uint32_t opaque1;
|
||||
|
||||
struct drm_i915_gem_exec_object2 *exec;
|
||||
uint32_t batch_read_domains;
|
||||
uint32_t batch_write_domain;
|
||||
uint32_t full_size;
|
||||
uint16_t stride;
|
||||
uint16_t _stride;
|
||||
uint32_t bucket :4;
|
||||
uint32_t tiling :4;
|
||||
uint32_t _tiling :4;
|
||||
uint32_t purgeable :1;
|
||||
uint32_t busy :1;
|
||||
uint32_t cpu :1;
|
||||
|
||||
cairo_list_t link;
|
||||
struct drm_i915_gem_exec_object2 *exec;
|
||||
void *virtual;
|
||||
} intel_bo_t;
|
||||
|
||||
#define INTEL_BATCH_SIZE (64*1024)
|
||||
|
|
@ -82,11 +83,10 @@ typedef struct _intel_bo {
|
|||
#define INTEL_MAX_RELOCS 2048
|
||||
|
||||
static inline void
|
||||
intel_bo_mark_purgeable (intel_bo_t *bo,
|
||||
cairo_bool_t purgeable)
|
||||
intel_bo_mark_purgeable (intel_bo_t *bo)
|
||||
{
|
||||
if (bo->base.name == 0)
|
||||
bo->purgeable = purgeable;
|
||||
bo->purgeable = 1;
|
||||
}
|
||||
|
||||
typedef struct _intel_vertex_buffer intel_vertex_buffer_t;
|
||||
|
|
@ -168,6 +168,7 @@ typedef struct _intel_glyph {
|
|||
intel_buffer_cache_t *cache;
|
||||
void **owner;
|
||||
float texcoord[3];
|
||||
int width, height;
|
||||
} intel_glyph_t;
|
||||
|
||||
typedef struct _intel_gradient_cache {
|
||||
|
|
@ -200,11 +201,11 @@ typedef struct _intel_device {
|
|||
size_t bo_cache_size;
|
||||
size_t bo_max_cache_size_high;
|
||||
size_t bo_max_cache_size_low;
|
||||
cairo_list_t bo_in_flight;
|
||||
|
||||
cairo_mutex_t mutex;
|
||||
intel_batch_t batch;
|
||||
|
||||
cairo_bool_t glyph_cache_mapped;
|
||||
intel_buffer_cache_t glyph_cache[2];
|
||||
cairo_list_t fonts;
|
||||
|
||||
|
|
@ -242,13 +243,23 @@ intel_bo_reference (intel_bo_t *bo)
|
|||
cairo_private cairo_bool_t
|
||||
intel_bo_madvise (intel_device_t *device, intel_bo_t *bo, int madv);
|
||||
|
||||
|
||||
static cairo_always_inline void
|
||||
intel_bo_destroy (intel_device_t *device, intel_bo_t *bo)
|
||||
{
|
||||
cairo_drm_bo_destroy (&device->base.base, &bo->base);
|
||||
}
|
||||
|
||||
static inline void
|
||||
intel_bo_in_flight_add (intel_device_t *device,
|
||||
intel_bo_t *bo)
|
||||
{
|
||||
if (bo->base.name == 0 && bo->exec != NULL && cairo_list_is_empty (&bo->cache_list))
|
||||
cairo_list_add (&bo->cache_list, &device->bo_in_flight);
|
||||
}
|
||||
|
||||
cairo_private int
|
||||
intel_get (int fd, int param);
|
||||
|
||||
cairo_private cairo_bool_t
|
||||
intel_info (int fd, uint64_t *gtt_size);
|
||||
|
||||
|
|
@ -260,23 +271,24 @@ intel_device_fini (intel_device_t *dev);
|
|||
|
||||
cairo_private intel_bo_t *
|
||||
intel_bo_create (intel_device_t *dev,
|
||||
uint32_t size,
|
||||
cairo_bool_t gpu_target);
|
||||
uint32_t max_size,
|
||||
uint32_t real_size,
|
||||
cairo_bool_t gpu_target,
|
||||
uint32_t tiling,
|
||||
uint32_t stride);
|
||||
|
||||
cairo_private intel_bo_t *
|
||||
intel_bo_create_for_name (intel_device_t *dev, uint32_t name);
|
||||
|
||||
cairo_private void
|
||||
intel_bo_set_tiling (const intel_device_t *dev,
|
||||
intel_bo_t *bo,
|
||||
uint32_t tiling,
|
||||
uint32_t stride);
|
||||
intel_bo_t *bo);
|
||||
|
||||
cairo_private cairo_bool_t
|
||||
intel_bo_is_inactive (const intel_device_t *device,
|
||||
const intel_bo_t *bo);
|
||||
intel_bo_t *bo);
|
||||
|
||||
cairo_private void
|
||||
cairo_private cairo_bool_t
|
||||
intel_bo_wait (const intel_device_t *device, const intel_bo_t *bo);
|
||||
|
||||
cairo_private void
|
||||
|
|
@ -318,7 +330,7 @@ intel_bo_get_image (const intel_device_t *device,
|
|||
|
||||
cairo_private cairo_status_t
|
||||
intel_bo_put_image (intel_device_t *dev,
|
||||
intel_bo_t *bo, int stride,
|
||||
intel_bo_t *bo,
|
||||
cairo_image_surface_t *src,
|
||||
int src_x, int src_y,
|
||||
int width, int height,
|
||||
|
|
@ -328,7 +340,8 @@ cairo_private void
|
|||
intel_surface_init (intel_surface_t *surface,
|
||||
const cairo_surface_backend_t *backend,
|
||||
cairo_drm_device_t *device,
|
||||
cairo_content_t content);
|
||||
cairo_format_t format,
|
||||
int width, int height);
|
||||
|
||||
cairo_private cairo_status_t
|
||||
intel_buffer_cache_init (intel_buffer_cache_t *cache,
|
||||
|
|
@ -353,9 +366,6 @@ intel_scaled_glyph_fini (cairo_scaled_glyph_t *scaled_glyph,
|
|||
cairo_private void
|
||||
intel_scaled_font_fini (cairo_scaled_font_t *scaled_font);
|
||||
|
||||
cairo_private void
|
||||
intel_glyph_cache_unmap (intel_device_t *device);
|
||||
|
||||
cairo_private void
|
||||
intel_glyph_cache_unpin (intel_device_t *device);
|
||||
|
||||
|
|
@ -404,17 +414,6 @@ intel_dump_batchbuffer (const void *batch,
|
|||
uint32_t length,
|
||||
int devid);
|
||||
|
||||
static inline float cairo_const
|
||||
texcoord_2d_16 (double x, double y)
|
||||
{
|
||||
union {
|
||||
uint32_t ui;
|
||||
float f;
|
||||
} u;
|
||||
u.ui = (_cairo_half_from_float (y) << 16) | _cairo_half_from_float (x);
|
||||
return u.f;
|
||||
}
|
||||
|
||||
static inline uint32_t cairo_const
|
||||
MS3_tiling (uint32_t tiling)
|
||||
{
|
||||
|
|
@ -426,6 +425,17 @@ MS3_tiling (uint32_t tiling)
|
|||
}
|
||||
}
|
||||
|
||||
static inline float cairo_const
|
||||
texcoord_2d_16 (double x, double y)
|
||||
{
|
||||
union {
|
||||
uint32_t ui;
|
||||
float f;
|
||||
} u;
|
||||
u.ui = (_cairo_half_from_float (y) << 16) | _cairo_half_from_float (x);
|
||||
return u.f;
|
||||
}
|
||||
|
||||
#define PCI_CHIP_I810 0x7121
|
||||
#define PCI_CHIP_I810_DC100 0x7123
|
||||
#define PCI_CHIP_I810_E 0x7125
|
||||
|
|
|
|||
|
|
@ -53,9 +53,18 @@ intel_surface_finish (void *abstract_surface)
|
|||
{
|
||||
intel_surface_t *surface = abstract_surface;
|
||||
|
||||
intel_bo_in_flight_add (to_intel_device (surface->drm.base.device),
|
||||
to_intel_bo (surface->drm.bo));
|
||||
return _cairo_drm_surface_finish (&surface->drm);
|
||||
}
|
||||
|
||||
static void
|
||||
surface_finish_and_destroy (cairo_surface_t *surface)
|
||||
{
|
||||
cairo_surface_finish (surface);
|
||||
cairo_surface_destroy (surface);
|
||||
}
|
||||
|
||||
cairo_status_t
|
||||
intel_surface_acquire_source_image (void *abstract_surface,
|
||||
cairo_image_surface_t **image_out,
|
||||
|
|
@ -64,8 +73,7 @@ intel_surface_acquire_source_image (void *abstract_surface,
|
|||
intel_surface_t *surface = abstract_surface;
|
||||
cairo_surface_t *image;
|
||||
cairo_status_t status;
|
||||
|
||||
/* XXX batch flush */
|
||||
void *ptr;
|
||||
|
||||
if (surface->drm.fallback != NULL) {
|
||||
image = surface->drm.fallback;
|
||||
|
|
@ -83,14 +91,20 @@ intel_surface_acquire_source_image (void *abstract_surface,
|
|||
return status;
|
||||
}
|
||||
|
||||
image = intel_bo_get_image (to_intel_device (surface->drm.base.device),
|
||||
to_intel_bo (surface->drm.bo),
|
||||
&surface->drm);
|
||||
status = image->status;
|
||||
if (unlikely (status))
|
||||
return status;
|
||||
ptr = intel_bo_map (to_intel_device (surface->drm.base.device),
|
||||
to_intel_bo (surface->drm.bo));
|
||||
if (unlikely (ptr == NULL))
|
||||
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
|
||||
|
||||
_cairo_surface_attach_snapshot (&surface->drm.base, image, cairo_surface_destroy);
|
||||
image = cairo_image_surface_create_for_data (ptr,
|
||||
surface->drm.format,
|
||||
surface->drm.width,
|
||||
surface->drm.height,
|
||||
surface->drm.stride);
|
||||
if (unlikely (image->status))
|
||||
return image->status;
|
||||
|
||||
_cairo_surface_attach_snapshot (&surface->drm.base, image, surface_finish_and_destroy);
|
||||
|
||||
DONE:
|
||||
*image_out = (cairo_image_surface_t *) cairo_surface_reference (image);
|
||||
|
|
@ -132,10 +146,8 @@ intel_surface_map_to_image (void *abstract_surface)
|
|||
surface->drm.width,
|
||||
surface->drm.height,
|
||||
surface->drm.stride);
|
||||
if (unlikely (image->status)) {
|
||||
intel_bo_unmap (to_intel_bo (surface->drm.bo));
|
||||
if (unlikely (image->status))
|
||||
return image;
|
||||
}
|
||||
|
||||
surface->drm.fallback = image;
|
||||
}
|
||||
|
|
@ -159,8 +171,6 @@ intel_surface_flush (void *abstract_surface)
|
|||
cairo_surface_destroy (surface->drm.fallback);
|
||||
surface->drm.fallback = NULL;
|
||||
|
||||
intel_bo_unmap (to_intel_bo (surface->drm.bo));
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
|
@ -271,34 +281,21 @@ void
|
|||
intel_surface_init (intel_surface_t *surface,
|
||||
const cairo_surface_backend_t *backend,
|
||||
cairo_drm_device_t *device,
|
||||
cairo_content_t content)
|
||||
cairo_format_t format,
|
||||
int width, int height)
|
||||
{
|
||||
_cairo_surface_init (&surface->drm.base,
|
||||
backend,
|
||||
&device->base,
|
||||
content);
|
||||
_cairo_drm_surface_init (&surface->drm, device);
|
||||
|
||||
switch (content) {
|
||||
case CAIRO_CONTENT_ALPHA:
|
||||
surface->drm.format = CAIRO_FORMAT_A8;
|
||||
break;
|
||||
case CAIRO_CONTENT_COLOR:
|
||||
surface->drm.format = CAIRO_FORMAT_RGB24;
|
||||
break;
|
||||
default:
|
||||
ASSERT_NOT_REACHED;
|
||||
case CAIRO_CONTENT_COLOR_ALPHA:
|
||||
surface->drm.format = CAIRO_FORMAT_ARGB32;
|
||||
break;
|
||||
}
|
||||
_cairo_content_from_format (format));
|
||||
_cairo_drm_surface_init (&surface->drm, format, width, height);
|
||||
|
||||
surface->snapshot_cache_entry.hash = 0;
|
||||
}
|
||||
|
||||
static cairo_surface_t *
|
||||
intel_surface_create (cairo_drm_device_t *device,
|
||||
cairo_content_t content,
|
||||
cairo_format_t format,
|
||||
int width, int height)
|
||||
{
|
||||
intel_surface_t *surface;
|
||||
|
|
@ -308,12 +305,10 @@ intel_surface_create (cairo_drm_device_t *device,
|
|||
if (unlikely (surface == NULL))
|
||||
return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
|
||||
|
||||
intel_surface_init (surface, &intel_surface_backend, device, content);
|
||||
intel_surface_init (surface, &intel_surface_backend, device,
|
||||
format, width, height);
|
||||
|
||||
if (width && height) {
|
||||
surface->drm.width = width;
|
||||
surface->drm.height = height;
|
||||
|
||||
/* Vol I, p134: size restrictions for textures */
|
||||
width = (width + 3) & -4;
|
||||
height = (height + 1) & -2;
|
||||
|
|
@ -321,7 +316,8 @@ intel_surface_create (cairo_drm_device_t *device,
|
|||
cairo_format_stride_for_width (surface->drm.format, width);
|
||||
surface->drm.bo = &intel_bo_create (to_intel_device (&device->base),
|
||||
surface->drm.stride * height,
|
||||
TRUE)->base;
|
||||
surface->drm.stride * height,
|
||||
TRUE, I915_TILING_NONE, surface->drm.stride)->base;
|
||||
if (surface->drm.bo == NULL) {
|
||||
status = _cairo_drm_surface_finish (&surface->drm);
|
||||
free (surface);
|
||||
|
|
@ -339,7 +335,6 @@ intel_surface_create_for_name (cairo_drm_device_t *device,
|
|||
int width, int height, int stride)
|
||||
{
|
||||
intel_surface_t *surface;
|
||||
cairo_content_t content;
|
||||
cairo_status_t status;
|
||||
|
||||
switch (format) {
|
||||
|
|
@ -348,14 +343,9 @@ intel_surface_create_for_name (cairo_drm_device_t *device,
|
|||
case CAIRO_FORMAT_A1:
|
||||
return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
|
||||
case CAIRO_FORMAT_ARGB32:
|
||||
content = CAIRO_CONTENT_COLOR_ALPHA;
|
||||
break;
|
||||
case CAIRO_FORMAT_RGB16_565:
|
||||
case CAIRO_FORMAT_RGB24:
|
||||
content = CAIRO_CONTENT_COLOR;
|
||||
break;
|
||||
case CAIRO_FORMAT_A8:
|
||||
content = CAIRO_CONTENT_ALPHA;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
@ -366,11 +356,10 @@ intel_surface_create_for_name (cairo_drm_device_t *device,
|
|||
if (unlikely (surface == NULL))
|
||||
return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
|
||||
|
||||
intel_surface_init (surface, &intel_surface_backend, device, content);
|
||||
intel_surface_init (surface, &intel_surface_backend,
|
||||
device, format, width, height);
|
||||
|
||||
if (width && height) {
|
||||
surface->drm.width = width;
|
||||
surface->drm.height = height;
|
||||
surface->drm.stride = stride;
|
||||
|
||||
surface->drm.bo = &intel_bo_create_for_name (to_intel_device (&device->base),
|
||||
|
|
@ -394,14 +383,7 @@ intel_surface_enable_scan_out (void *abstract_surface)
|
|||
if (unlikely (surface->drm.bo == NULL))
|
||||
return _cairo_error (CAIRO_STATUS_INVALID_SIZE);
|
||||
|
||||
if (to_intel_bo (surface->drm.bo)->tiling == I915_TILING_Y) {
|
||||
intel_bo_set_tiling (to_intel_device (surface->drm.base.device),
|
||||
to_intel_bo (surface->drm.bo),
|
||||
I915_TILING_X, surface->drm.stride);
|
||||
}
|
||||
|
||||
if (unlikely (to_intel_bo (surface->drm.bo)->tiling == I915_TILING_Y))
|
||||
return _cairo_error (CAIRO_STATUS_INVALID_FORMAT); /* XXX */
|
||||
to_intel_bo (surface->drm.bo)->tiling = I915_TILING_X;
|
||||
|
||||
return CAIRO_STATUS_SUCCESS;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -49,16 +49,38 @@
|
|||
#define IMAGE_CACHE_WIDTH 1024
|
||||
#define IMAGE_CACHE_HEIGHT 1024
|
||||
|
||||
int
|
||||
intel_get (int fd, int param)
|
||||
{
|
||||
struct intel_getparam gp;
|
||||
int value;
|
||||
|
||||
gp.param = param;
|
||||
gp.value = &value;
|
||||
if (ioctl (fd, DRM_IOCTL_I915_GETPARAM, &gp) < 0)
|
||||
return 0;
|
||||
|
||||
VG (VALGRIND_MAKE_MEM_DEFINED (&value, sizeof (value)));
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
cairo_bool_t
|
||||
intel_info (int fd, uint64_t *gtt_size)
|
||||
{
|
||||
struct drm_i915_gem_get_aperture info;
|
||||
int ret;
|
||||
|
||||
ret = ioctl (fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &info);
|
||||
if (ret == -1)
|
||||
if (! intel_get (fd, I915_PARAM_HAS_GEM))
|
||||
return FALSE;
|
||||
|
||||
if (! intel_get (fd, I915_PARAM_HAS_EXECBUF2))
|
||||
return FALSE;
|
||||
|
||||
if (ioctl (fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &info) < 0)
|
||||
return FALSE;
|
||||
|
||||
VG (VALGRIND_MAKE_MEM_DEFINED (&info, sizeof (info)));
|
||||
|
||||
if (gtt_size != NULL)
|
||||
*gtt_size = info.aper_size;
|
||||
|
||||
|
|
@ -75,6 +97,15 @@ intel_bo_write (const intel_device_t *device,
|
|||
struct drm_i915_gem_pwrite pwrite;
|
||||
int ret;
|
||||
|
||||
assert (bo->tiling == I915_TILING_NONE);
|
||||
assert (size);
|
||||
assert (offset < bo->base.size);
|
||||
assert (size+offset <= bo->base.size);
|
||||
|
||||
intel_bo_set_tiling (device, bo);
|
||||
|
||||
assert (bo->_tiling == I915_TILING_NONE);
|
||||
|
||||
memset (&pwrite, 0, sizeof (pwrite));
|
||||
pwrite.handle = bo->base.handle;
|
||||
pwrite.offset = offset;
|
||||
|
|
@ -83,6 +114,9 @@ intel_bo_write (const intel_device_t *device,
|
|||
do {
|
||||
ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
|
||||
} while (ret == -1 && errno == EINTR);
|
||||
assert (ret == 0);
|
||||
|
||||
bo->busy = FALSE;
|
||||
}
|
||||
|
||||
void
|
||||
|
|
@ -95,6 +129,15 @@ intel_bo_read (const intel_device_t *device,
|
|||
struct drm_i915_gem_pread pread;
|
||||
int ret;
|
||||
|
||||
assert (bo->tiling == I915_TILING_NONE);
|
||||
assert (size);
|
||||
assert (offset < bo->base.size);
|
||||
assert (size+offset <= bo->base.size);
|
||||
|
||||
intel_bo_set_tiling (device, bo);
|
||||
|
||||
assert (bo->_tiling == I915_TILING_NONE);
|
||||
|
||||
memset (&pread, 0, sizeof (pread));
|
||||
pread.handle = bo->base.handle;
|
||||
pread.offset = offset;
|
||||
|
|
@ -103,25 +146,48 @@ intel_bo_read (const intel_device_t *device,
|
|||
do {
|
||||
ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
|
||||
} while (ret == -1 && errno == EINTR);
|
||||
assert (ret == 0);
|
||||
|
||||
bo->cpu = TRUE;
|
||||
bo->busy = FALSE;
|
||||
}
|
||||
|
||||
void *
|
||||
intel_bo_map (const intel_device_t *device, intel_bo_t *bo)
|
||||
{
|
||||
struct drm_i915_gem_set_domain set_domain;
|
||||
int ret;
|
||||
uint32_t domain;
|
||||
int ret;
|
||||
|
||||
assert (bo->virtual == NULL);
|
||||
intel_bo_set_tiling (device, bo);
|
||||
|
||||
if (bo->tiling != I915_TILING_NONE) {
|
||||
struct drm_i915_gem_mmap_gtt mmap_arg;
|
||||
void *ptr;
|
||||
if (bo->virtual != NULL)
|
||||
return bo->virtual;
|
||||
|
||||
if (bo->cpu && bo->tiling == I915_TILING_NONE) {
|
||||
struct drm_i915_gem_mmap mmap_arg;
|
||||
|
||||
mmap_arg.handle = bo->base.handle;
|
||||
mmap_arg.offset = 0;
|
||||
mmap_arg.size = bo->base.size;
|
||||
mmap_arg.addr_ptr = 0;
|
||||
|
||||
do {
|
||||
ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
|
||||
} while (ret == -1 && errno == EINTR);
|
||||
if (unlikely (ret != 0)) {
|
||||
_cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bo->virtual = (void *) (uintptr_t) mmap_arg.addr_ptr;
|
||||
domain = I915_GEM_DOMAIN_CPU;
|
||||
} else {
|
||||
struct drm_i915_gem_mmap_gtt mmap_arg;
|
||||
void *ptr;
|
||||
|
||||
/* Get the fake offset back... */
|
||||
mmap_arg.handle = bo->base.handle;
|
||||
do {
|
||||
ret = ioctl (device->base.fd,
|
||||
DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
|
||||
|
|
@ -141,27 +207,11 @@ intel_bo_map (const intel_device_t *device, intel_bo_t *bo)
|
|||
}
|
||||
|
||||
bo->virtual = ptr;
|
||||
} else {
|
||||
struct drm_i915_gem_mmap mmap_arg;
|
||||
|
||||
mmap_arg.handle = bo->base.handle;
|
||||
mmap_arg.offset = 0;
|
||||
mmap_arg.size = bo->base.size;
|
||||
mmap_arg.addr_ptr = 0;
|
||||
|
||||
do {
|
||||
ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
|
||||
} while (ret == -1 && errno == EINTR);
|
||||
if (unlikely (ret != 0)) {
|
||||
_cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bo->virtual = (void *) (uintptr_t) mmap_arg.addr_ptr;
|
||||
domain = I915_GEM_DOMAIN_GTT;
|
||||
}
|
||||
|
||||
domain = bo->tiling == I915_TILING_NONE ?
|
||||
I915_GEM_DOMAIN_CPU : I915_GEM_DOMAIN_GTT;
|
||||
VG (VALGRIND_MAKE_MEM_DEFINED (bo->virtual, bo->base.size));
|
||||
|
||||
set_domain.handle = bo->base.handle;
|
||||
set_domain.read_domains = domain;
|
||||
set_domain.write_domain = domain;
|
||||
|
|
@ -178,6 +228,7 @@ intel_bo_map (const intel_device_t *device, intel_bo_t *bo)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
bo->busy = FALSE;
|
||||
return bo->virtual;
|
||||
}
|
||||
|
||||
|
|
@ -189,19 +240,23 @@ intel_bo_unmap (intel_bo_t *bo)
|
|||
}
|
||||
|
||||
cairo_bool_t
|
||||
intel_bo_is_inactive (const intel_device_t *device, const intel_bo_t *bo)
|
||||
intel_bo_is_inactive (const intel_device_t *device, intel_bo_t *bo)
|
||||
{
|
||||
struct drm_i915_gem_busy busy;
|
||||
|
||||
if (! bo->busy)
|
||||
return TRUE;
|
||||
|
||||
/* Is this buffer busy for our intended usage pattern? */
|
||||
busy.handle = bo->base.handle;
|
||||
busy.busy = 1;
|
||||
ioctl (device->base.fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
|
||||
|
||||
bo->busy = busy.busy;
|
||||
return ! busy.busy;
|
||||
}
|
||||
|
||||
void
|
||||
cairo_bool_t
|
||||
intel_bo_wait (const intel_device_t *device, const intel_bo_t *bo)
|
||||
{
|
||||
struct drm_i915_gem_set_domain set_domain;
|
||||
|
|
@ -214,6 +269,8 @@ intel_bo_wait (const intel_device_t *device, const intel_bo_t *bo)
|
|||
do {
|
||||
ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
|
||||
} while (ret == -1 && errno == EINTR);
|
||||
|
||||
return ret == 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
|
@ -238,11 +295,8 @@ intel_bo_cache_remove (intel_device_t *device,
|
|||
|
||||
cairo_list_del (&bo->cache_list);
|
||||
|
||||
if (device->bo_cache[bucket].num_entries-- >
|
||||
device->bo_cache[bucket].min_entries)
|
||||
{
|
||||
device->bo_cache_size -= bo->base.size;
|
||||
}
|
||||
device->bo_cache[bucket].num_entries--;
|
||||
device->bo_cache_size -= 4096 * (1 << bucket);
|
||||
|
||||
_cairo_freepool_free (&device->bo_pool, bo);
|
||||
}
|
||||
|
|
@ -261,6 +315,36 @@ intel_bo_madvise (intel_device_t *device,
|
|||
return madv.retained;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_bo_set_real_size (intel_device_t *device,
|
||||
intel_bo_t *bo,
|
||||
size_t size)
|
||||
{
|
||||
struct drm_i915_gem_real_size arg;
|
||||
int ret;
|
||||
|
||||
return;
|
||||
|
||||
if (size == bo->base.size)
|
||||
return;
|
||||
|
||||
arg.handle = bo->base.handle;
|
||||
arg.size = size;
|
||||
do {
|
||||
ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_REAL_SIZE, &arg);
|
||||
} while (ret == -1 && errno == EINTR);
|
||||
|
||||
if (ret == 0) {
|
||||
if (size > bo->base.size) {
|
||||
assert (bo->exec == NULL);
|
||||
bo->cpu = TRUE;
|
||||
bo->busy = FALSE;
|
||||
}
|
||||
|
||||
bo->base.size = size;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
intel_bo_cache_purge (intel_device_t *device)
|
||||
{
|
||||
|
|
@ -282,89 +366,131 @@ intel_bo_cache_purge (intel_device_t *device)
|
|||
|
||||
intel_bo_t *
|
||||
intel_bo_create (intel_device_t *device,
|
||||
uint32_t size,
|
||||
cairo_bool_t gpu_target)
|
||||
uint32_t max_size,
|
||||
uint32_t real_size,
|
||||
cairo_bool_t gpu_target,
|
||||
uint32_t tiling,
|
||||
uint32_t stride)
|
||||
{
|
||||
intel_bo_t *bo = NULL;
|
||||
intel_bo_t *bo;
|
||||
uint32_t cache_size;
|
||||
struct drm_i915_gem_create create;
|
||||
int bucket;
|
||||
int ret;
|
||||
|
||||
cache_size = pot ((size + 4095) & -4096);
|
||||
max_size = (max_size + 4095) & -4096;
|
||||
real_size = (real_size + 4095) & -4096;
|
||||
cache_size = pot (max_size);
|
||||
bucket = ffs (cache_size / 4096) - 1;
|
||||
if (bucket >= INTEL_BO_CACHE_BUCKETS)
|
||||
cache_size = max_size;
|
||||
|
||||
if (gpu_target) {
|
||||
intel_bo_t *first = NULL;
|
||||
|
||||
cairo_list_foreach_entry (bo, intel_bo_t,
|
||||
&device->bo_in_flight,
|
||||
cache_list)
|
||||
{
|
||||
assert (bo->exec != NULL);
|
||||
if (tiling && bo->_tiling &&
|
||||
(bo->_tiling != tiling || bo->_stride != stride))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if (real_size <= bo->base.size) {
|
||||
if (real_size >= bo->base.size/2) {
|
||||
cairo_list_del (&bo->cache_list);
|
||||
bo = intel_bo_reference (bo);
|
||||
goto DONE;
|
||||
}
|
||||
|
||||
if (first == NULL)
|
||||
first = bo;
|
||||
}
|
||||
}
|
||||
|
||||
if (first != NULL) {
|
||||
cairo_list_del (&first->cache_list);
|
||||
bo = intel_bo_reference (first);
|
||||
goto DONE;
|
||||
}
|
||||
}
|
||||
|
||||
bo = NULL;
|
||||
|
||||
CAIRO_MUTEX_LOCK (device->bo_mutex);
|
||||
if (bucket < INTEL_BO_CACHE_BUCKETS) {
|
||||
size = cache_size;
|
||||
|
||||
int loop = MIN (3, INTEL_BO_CACHE_BUCKETS - bucket);
|
||||
/* Our goal is to avoid clflush which occur on CPU->GPU
|
||||
* transitions, so we want to minimise reusing CPU
|
||||
* write buffers. However, by the time a buffer is freed
|
||||
* it is most likely in the GPU domain anyway (readback is rare!).
|
||||
*/
|
||||
retry:
|
||||
if (gpu_target) {
|
||||
do {
|
||||
cairo_list_foreach_entry_reverse (bo,
|
||||
intel_bo_t,
|
||||
&device->bo_cache[bucket].list,
|
||||
cache_list)
|
||||
do {
|
||||
if (gpu_target) {
|
||||
intel_bo_t *next;
|
||||
|
||||
cairo_list_foreach_entry_reverse_safe (bo, next,
|
||||
intel_bo_t,
|
||||
&device->bo_cache[bucket].list,
|
||||
cache_list)
|
||||
{
|
||||
if (real_size > bo->base.size)
|
||||
continue;
|
||||
|
||||
/* For a gpu target, by the time our batch fires, the
|
||||
* GPU will have finished using this buffer. However,
|
||||
* changing tiling may require a fence deallocation and
|
||||
* cause serialisation...
|
||||
*/
|
||||
|
||||
if (device->bo_cache[bucket].num_entries-- >
|
||||
device->bo_cache[bucket].min_entries)
|
||||
if (tiling && bo->_tiling &&
|
||||
(bo->_tiling != tiling || bo->_stride != stride))
|
||||
{
|
||||
device->bo_cache_size -= bo->base.size;
|
||||
continue;
|
||||
}
|
||||
|
||||
device->bo_cache[bucket].num_entries--;
|
||||
device->bo_cache_size -= 4096 * (1 << bucket);
|
||||
cairo_list_del (&bo->cache_list);
|
||||
|
||||
if (! intel_bo_madvise (device, bo, I915_MADV_WILLNEED)) {
|
||||
_cairo_drm_bo_close (&device->base, &bo->base);
|
||||
_cairo_freepool_free (&device->bo_pool, bo);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
goto DONE;
|
||||
} else
|
||||
goto INIT;
|
||||
}
|
||||
}
|
||||
|
||||
/* As it is unlikely to trigger clflush, we can use the
|
||||
* first available buffer into which we fit.
|
||||
*/
|
||||
} while (++bucket < INTEL_BO_CACHE_BUCKETS);
|
||||
} else {
|
||||
if (! cairo_list_is_empty (&device->bo_cache[bucket].list)) {
|
||||
while (! cairo_list_is_empty (&device->bo_cache[bucket].list)) {
|
||||
bo = cairo_list_first_entry (&device->bo_cache[bucket].list,
|
||||
intel_bo_t, cache_list);
|
||||
if (intel_bo_is_inactive (device, bo)) {
|
||||
if (device->bo_cache[bucket].num_entries-- >
|
||||
device->bo_cache[bucket].min_entries)
|
||||
{
|
||||
device->bo_cache_size -= bo->base.size;
|
||||
}
|
||||
device->bo_cache[bucket].num_entries--;
|
||||
device->bo_cache_size -= 4096 * (1 << bucket);
|
||||
cairo_list_del (&bo->cache_list);
|
||||
|
||||
if (! intel_bo_madvise (device, bo, I915_MADV_WILLNEED)) {
|
||||
_cairo_drm_bo_close (&device->base, &bo->base);
|
||||
_cairo_freepool_free (&device->bo_pool, bo);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
goto DONE;
|
||||
}
|
||||
} else
|
||||
goto SIZE;
|
||||
} else
|
||||
break;
|
||||
}
|
||||
}
|
||||
} while (--loop && ++bucket);
|
||||
}
|
||||
|
||||
if (device->bo_cache_size > device->bo_max_cache_size_high) {
|
||||
cairo_bool_t not_empty;
|
||||
|
||||
intel_bo_cache_purge (device);
|
||||
|
||||
/* trim caches by discarding the most recent buffer in each bucket */
|
||||
while (device->bo_cache_size > device->bo_max_cache_size_low) {
|
||||
do {
|
||||
not_empty = FALSE;
|
||||
for (bucket = INTEL_BO_CACHE_BUCKETS; bucket--; ) {
|
||||
if (device->bo_cache[bucket].num_entries >
|
||||
device->bo_cache[bucket].min_entries)
|
||||
|
|
@ -373,30 +499,36 @@ intel_bo_create (intel_device_t *device,
|
|||
intel_bo_t, cache_list);
|
||||
|
||||
intel_bo_cache_remove (device, bo, bucket);
|
||||
not_empty = TRUE;
|
||||
}
|
||||
}
|
||||
}
|
||||
} while (not_empty && device->bo_cache_size > device->bo_max_cache_size_low);
|
||||
}
|
||||
|
||||
/* no cached buffer available, allocate fresh */
|
||||
bo = _cairo_freepool_alloc (&device->bo_pool);
|
||||
if (unlikely (bo == NULL)) {
|
||||
_cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
|
||||
goto UNLOCK;
|
||||
CAIRO_MUTEX_UNLOCK (device->bo_mutex);
|
||||
return bo;
|
||||
}
|
||||
|
||||
cairo_list_init (&bo->cache_list);
|
||||
|
||||
bo->base.name = 0;
|
||||
bo->base.size = size;
|
||||
|
||||
bo->offset = 0;
|
||||
bo->virtual = NULL;
|
||||
bo->cpu = TRUE;
|
||||
|
||||
bo->tiling = I915_TILING_NONE;
|
||||
bo->stride = 0;
|
||||
bo->swizzle = I915_BIT_6_SWIZZLE_NONE;
|
||||
bucket = ffs (cache_size / 4096) - 1;
|
||||
if (bucket > INTEL_BO_CACHE_BUCKETS)
|
||||
bucket = INTEL_BO_CACHE_BUCKETS;
|
||||
bo->bucket = bucket;
|
||||
bo->_tiling = I915_TILING_NONE;
|
||||
bo->_stride = 0;
|
||||
bo->purgeable = 0;
|
||||
bo->busy = FALSE;
|
||||
|
||||
bo->opaque0 = 0;
|
||||
bo->opaque1 = 0;
|
||||
|
|
@ -406,23 +538,27 @@ intel_bo_create (intel_device_t *device,
|
|||
bo->batch_write_domain = 0;
|
||||
cairo_list_init (&bo->link);
|
||||
|
||||
create.size = size;
|
||||
create.size = cache_size;
|
||||
create.handle = 0;
|
||||
ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_CREATE, &create);
|
||||
if (unlikely (ret != 0)) {
|
||||
_cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
|
||||
_cairo_freepool_free (&device->bo_pool, bo);
|
||||
bo = NULL;
|
||||
goto UNLOCK;
|
||||
CAIRO_MUTEX_UNLOCK (device->bo_mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bo->base.handle = create.handle;
|
||||
bo->full_size = bo->base.size = create.size;
|
||||
|
||||
DONE:
|
||||
SIZE:
|
||||
intel_bo_set_real_size (device, bo, real_size);
|
||||
INIT:
|
||||
CAIRO_REFERENCE_COUNT_INIT (&bo->base.ref_count, 1);
|
||||
UNLOCK:
|
||||
CAIRO_MUTEX_UNLOCK (device->bo_mutex);
|
||||
|
||||
DONE:
|
||||
bo->tiling = tiling;
|
||||
bo->stride = stride;
|
||||
return bo;
|
||||
}
|
||||
|
||||
|
|
@ -449,9 +585,13 @@ intel_bo_create_for_name (intel_device_t *device, uint32_t name)
|
|||
CAIRO_REFERENCE_COUNT_INIT (&bo->base.ref_count, 1);
|
||||
cairo_list_init (&bo->cache_list);
|
||||
|
||||
bo->full_size = bo->base.size;
|
||||
bo->offset = 0;
|
||||
bo->virtual = NULL;
|
||||
bo->purgeable = 0;
|
||||
bo->busy = TRUE;
|
||||
bo->cpu = FALSE;
|
||||
bo->bucket = INTEL_BO_CACHE_BUCKETS;
|
||||
|
||||
bo->opaque0 = 0;
|
||||
bo->opaque1 = 0;
|
||||
|
|
@ -471,8 +611,7 @@ intel_bo_create_for_name (intel_device_t *device, uint32_t name)
|
|||
goto FAIL;
|
||||
}
|
||||
|
||||
bo->tiling = get_tiling.tiling_mode;
|
||||
bo->swizzle = get_tiling.swizzle_mode;
|
||||
bo->_tiling = bo->tiling = get_tiling.tiling_mode;
|
||||
// bo->stride = get_tiling.stride; /* XXX not available from get_tiling */
|
||||
|
||||
return bo;
|
||||
|
|
@ -491,24 +630,26 @@ intel_bo_release (void *_dev, void *_bo)
|
|||
intel_bo_t *bo = _bo;
|
||||
int bucket;
|
||||
|
||||
assert (bo->virtual == NULL);
|
||||
if (bo->virtual != NULL)
|
||||
intel_bo_unmap (bo);
|
||||
|
||||
bucket = INTEL_BO_CACHE_BUCKETS;
|
||||
if (bo->base.size & -bo->base.size)
|
||||
bucket = ffs (bo->base.size / 4096) - 1;
|
||||
assert (bo->exec == NULL);
|
||||
assert (cairo_list_is_empty (&bo->cache_list));
|
||||
|
||||
bucket = bo->bucket;
|
||||
|
||||
CAIRO_MUTEX_LOCK (device->bo_mutex);
|
||||
if (bo->base.name == 0 &&
|
||||
bucket < INTEL_BO_CACHE_BUCKETS &&
|
||||
intel_bo_madvise (device, bo, I915_MADV_DONTNEED))
|
||||
{
|
||||
if (++device->bo_cache[bucket].num_entries >
|
||||
device->bo_cache[bucket].min_entries)
|
||||
{
|
||||
device->bo_cache_size += bo->base.size;
|
||||
}
|
||||
device->bo_cache[bucket].num_entries++;
|
||||
device->bo_cache_size += 4096 * (1 << bucket);
|
||||
|
||||
cairo_list_add_tail (&bo->cache_list, &device->bo_cache[bucket].list);
|
||||
if (bo->busy)
|
||||
cairo_list_add_tail (&bo->cache_list, &device->bo_cache[bucket].list);
|
||||
else
|
||||
cairo_list_add (&bo->cache_list, &device->bo_cache[bucket].list);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
@ -520,36 +661,26 @@ intel_bo_release (void *_dev, void *_bo)
|
|||
|
||||
void
|
||||
intel_bo_set_tiling (const intel_device_t *device,
|
||||
intel_bo_t *bo,
|
||||
uint32_t tiling,
|
||||
uint32_t stride)
|
||||
intel_bo_t *bo)
|
||||
{
|
||||
struct drm_i915_gem_set_tiling set_tiling;
|
||||
int ret;
|
||||
|
||||
if (bo->tiling == tiling &&
|
||||
(tiling == I915_TILING_NONE || bo->stride == stride))
|
||||
{
|
||||
if (bo->tiling == bo->_tiling &&
|
||||
(bo->tiling == I915_TILING_NONE || bo->stride == bo->_stride))
|
||||
return;
|
||||
}
|
||||
|
||||
assert (bo->exec == NULL);
|
||||
|
||||
if (bo->virtual)
|
||||
intel_bo_unmap (bo);
|
||||
|
||||
do {
|
||||
set_tiling.handle = bo->base.handle;
|
||||
set_tiling.tiling_mode = tiling;
|
||||
set_tiling.stride = stride;
|
||||
set_tiling.tiling_mode = bo->tiling;
|
||||
set_tiling.stride = bo->stride;
|
||||
|
||||
ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
|
||||
} while (ret == -1 && errno == EINTR);
|
||||
if (ret == 0) {
|
||||
bo->tiling = set_tiling.tiling_mode;
|
||||
bo->swizzle = set_tiling.swizzle_mode;
|
||||
bo->stride = set_tiling.stride;
|
||||
}
|
||||
|
||||
assert (ret == 0);
|
||||
bo->_tiling = bo->tiling;
|
||||
bo->_stride = bo->stride;
|
||||
}
|
||||
|
||||
cairo_surface_t *
|
||||
|
|
@ -568,26 +699,11 @@ intel_bo_get_image (const intel_device_t *device,
|
|||
if (unlikely (image->base.status))
|
||||
return &image->base;
|
||||
|
||||
if (bo->tiling == I915_TILING_NONE) {
|
||||
if (image->stride == surface->stride) {
|
||||
size = surface->stride * surface->height;
|
||||
intel_bo_read (device, bo, 0, size, image->data);
|
||||
} else {
|
||||
int offset;
|
||||
intel_bo_set_tiling (device, bo);
|
||||
|
||||
size = surface->width;
|
||||
if (surface->format != CAIRO_FORMAT_A8)
|
||||
size *= 4;
|
||||
|
||||
offset = 0;
|
||||
row = surface->height;
|
||||
dst = image->data;
|
||||
while (row--) {
|
||||
intel_bo_read (device, bo, offset, size, dst);
|
||||
offset += surface->stride;
|
||||
dst += image->stride;
|
||||
}
|
||||
}
|
||||
if (bo->tiling == I915_TILING_NONE && image->stride == surface->stride) {
|
||||
size = surface->stride * surface->height;
|
||||
intel_bo_read (device, bo, 0, size, image->data);
|
||||
} else {
|
||||
const uint8_t *src;
|
||||
|
||||
|
|
@ -606,16 +722,14 @@ intel_bo_get_image (const intel_device_t *device,
|
|||
dst += image->stride;
|
||||
src += surface->stride;
|
||||
}
|
||||
|
||||
intel_bo_unmap (bo);
|
||||
}
|
||||
|
||||
return &image->base;
|
||||
}
|
||||
|
||||
static cairo_status_t
|
||||
_intel_bo_put_a1_image (intel_device_t *dev,
|
||||
intel_bo_t *bo, int stride,
|
||||
_intel_bo_put_a1_image (intel_device_t *device,
|
||||
intel_bo_t *bo,
|
||||
cairo_image_surface_t *src,
|
||||
int src_x, int src_y,
|
||||
int width, int height,
|
||||
|
|
@ -628,13 +742,13 @@ _intel_bo_put_a1_image (intel_device_t *dev,
|
|||
|
||||
data = src->data + src_y * src->stride;
|
||||
|
||||
if (bo->tiling == I915_TILING_NONE && width == stride) {
|
||||
if (bo->tiling == I915_TILING_NONE && width == bo->stride) {
|
||||
uint8_t *p;
|
||||
int size;
|
||||
|
||||
size = stride * height;
|
||||
size = bo->stride * height;
|
||||
if (size > (int) sizeof (buf)) {
|
||||
a8 = _cairo_malloc_ab (stride, height);
|
||||
a8 = _cairo_malloc_ab (bo->stride, height);
|
||||
if (a8 == NULL)
|
||||
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
|
||||
}
|
||||
|
|
@ -649,11 +763,11 @@ _intel_bo_put_a1_image (intel_device_t *dev,
|
|||
}
|
||||
|
||||
data += src->stride;
|
||||
p += stride;
|
||||
p += bo->stride;
|
||||
}
|
||||
|
||||
intel_bo_write (dev, bo,
|
||||
dst_y * stride + dst_x, /* XXX bo_offset */
|
||||
intel_bo_write (device, bo,
|
||||
dst_y * bo->stride + dst_x, /* XXX bo_offset */
|
||||
size, a8);
|
||||
} else {
|
||||
uint8_t *dst;
|
||||
|
|
@ -664,14 +778,14 @@ _intel_bo_put_a1_image (intel_device_t *dev,
|
|||
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
|
||||
}
|
||||
|
||||
dst = intel_bo_map (dev, bo);
|
||||
dst = intel_bo_map (device, bo);
|
||||
if (dst == NULL) {
|
||||
if (a8 != buf)
|
||||
free (a8);
|
||||
return _cairo_error (CAIRO_STATUS_DEVICE_ERROR);
|
||||
}
|
||||
|
||||
dst += dst_y * stride + dst_x; /* XXX bo_offset */
|
||||
dst += dst_y * bo->stride + dst_x; /* XXX bo_offset */
|
||||
while (height--) {
|
||||
for (x = 0; x < width; x++) {
|
||||
int i = src_x + x;
|
||||
|
|
@ -681,10 +795,9 @@ _intel_bo_put_a1_image (intel_device_t *dev,
|
|||
}
|
||||
|
||||
memcpy (dst, a8, width);
|
||||
dst += stride;
|
||||
dst += bo->stride;
|
||||
data += src->stride;
|
||||
}
|
||||
intel_bo_unmap (bo);
|
||||
}
|
||||
|
||||
if (a8 != buf)
|
||||
|
|
@ -694,8 +807,8 @@ _intel_bo_put_a1_image (intel_device_t *dev,
|
|||
}
|
||||
|
||||
cairo_status_t
|
||||
intel_bo_put_image (intel_device_t *dev,
|
||||
intel_bo_t *bo, int stride,
|
||||
intel_bo_put_image (intel_device_t *device,
|
||||
intel_bo_t *bo,
|
||||
cairo_image_surface_t *src,
|
||||
int src_x, int src_y,
|
||||
int width, int height,
|
||||
|
|
@ -705,7 +818,9 @@ intel_bo_put_image (intel_device_t *dev,
|
|||
int size;
|
||||
int offset;
|
||||
|
||||
offset = dst_y * stride;
|
||||
intel_bo_set_tiling (device, bo);
|
||||
|
||||
offset = dst_y * bo->stride;
|
||||
data = src->data + src_y * src->stride;
|
||||
switch (src->format) {
|
||||
case CAIRO_FORMAT_ARGB32:
|
||||
|
|
@ -725,8 +840,7 @@ intel_bo_put_image (intel_device_t *dev,
|
|||
size = width;
|
||||
break;
|
||||
case CAIRO_FORMAT_A1:
|
||||
return _intel_bo_put_a1_image (dev,
|
||||
bo, stride, src,
|
||||
return _intel_bo_put_a1_image (device, bo, src,
|
||||
src_x, src_y,
|
||||
width, height,
|
||||
dst_x, dst_y);
|
||||
|
|
@ -735,28 +849,21 @@ intel_bo_put_image (intel_device_t *dev,
|
|||
return _cairo_error (CAIRO_STATUS_INVALID_FORMAT);
|
||||
}
|
||||
|
||||
if (bo->tiling == I915_TILING_NONE) {
|
||||
if (src->stride == stride) {
|
||||
intel_bo_write (dev, bo, offset, stride * height, data);
|
||||
} else while (height--) {
|
||||
intel_bo_write (dev, bo, offset, size, data);
|
||||
offset += stride;
|
||||
data += src->stride;
|
||||
}
|
||||
if (bo->tiling == I915_TILING_NONE && src->stride == bo->stride) {
|
||||
intel_bo_write (device, bo, offset, bo->stride * height, data);
|
||||
} else {
|
||||
uint8_t *dst;
|
||||
|
||||
dst = intel_bo_map (dev, bo);
|
||||
dst = intel_bo_map (device, bo);
|
||||
if (unlikely (dst == NULL))
|
||||
return _cairo_error (CAIRO_STATUS_DEVICE_ERROR);
|
||||
|
||||
dst += offset;
|
||||
while (height--) {
|
||||
memcpy (dst, data, size);
|
||||
dst += stride;
|
||||
dst += bo->stride;
|
||||
data += src->stride;
|
||||
}
|
||||
intel_bo_unmap (bo);
|
||||
}
|
||||
|
||||
return CAIRO_STATUS_SUCCESS;
|
||||
|
|
@ -771,6 +878,7 @@ _intel_device_init_bo_cache (intel_device_t *device)
|
|||
device->bo_cache_size = 0;
|
||||
device->bo_max_cache_size_high = device->gtt_max_size / 2;
|
||||
device->bo_max_cache_size_low = device->gtt_max_size / 4;
|
||||
cairo_list_init (&device->bo_in_flight);
|
||||
|
||||
for (i = 0; i < INTEL_BO_CACHE_BUCKETS; i++) {
|
||||
struct _intel_bo_cache *cache = &device->bo_cache[i];
|
||||
|
|
@ -805,7 +913,6 @@ _intel_snapshot_cache_entry_destroy (void *closure)
|
|||
snapshot_cache_entry);
|
||||
|
||||
surface->snapshot_cache_entry.hash = 0;
|
||||
cairo_surface_destroy (&surface->drm.base);
|
||||
}
|
||||
|
||||
cairo_status_t
|
||||
|
|
@ -839,7 +946,6 @@ intel_device_init (intel_device_t *device, int fd)
|
|||
if (unlikely (status))
|
||||
return status;
|
||||
|
||||
device->glyph_cache_mapped = FALSE;
|
||||
for (n = 0; n < ARRAY_LENGTH (device->glyph_cache); n++) {
|
||||
device->glyph_cache[n].buffer.bo = NULL;
|
||||
cairo_list_init (&device->glyph_cache[n].rtree.pinned);
|
||||
|
|
@ -925,25 +1031,6 @@ intel_throttle (intel_device_t *device)
|
|||
ioctl (device->base.fd, DRM_IOCTL_I915_GEM_THROTTLE);
|
||||
}
|
||||
|
||||
void
|
||||
intel_glyph_cache_unmap (intel_device_t *device)
|
||||
{
|
||||
int n;
|
||||
|
||||
if (likely (! device->glyph_cache_mapped))
|
||||
return;
|
||||
|
||||
for (n = 0; n < ARRAY_LENGTH (device->glyph_cache); n++) {
|
||||
if (device->glyph_cache[n].buffer.bo != NULL &&
|
||||
device->glyph_cache[n].buffer.bo->virtual != NULL)
|
||||
{
|
||||
intel_bo_unmap (device->glyph_cache[n].buffer.bo);
|
||||
}
|
||||
}
|
||||
|
||||
device->glyph_cache_mapped = FALSE;
|
||||
}
|
||||
|
||||
void
|
||||
intel_glyph_cache_unpin (intel_device_t *device)
|
||||
{
|
||||
|
|
@ -984,6 +1071,8 @@ intel_glyph_cache_add_glyph (intel_device_t *device,
|
|||
if (unlikely (status))
|
||||
return status;
|
||||
|
||||
/* XXX streaming upload? */
|
||||
|
||||
height = glyph_surface->height;
|
||||
src = glyph_surface->data;
|
||||
dst = cache->buffer.bo->virtual;
|
||||
|
|
@ -1002,10 +1091,8 @@ intel_glyph_cache_add_glyph (intel_device_t *device,
|
|||
|
||||
if (width > (int) sizeof (buf)) {
|
||||
a8 = malloc (width);
|
||||
if (unlikely (a8 == NULL)) {
|
||||
intel_bo_unmap (cache->buffer.bo);
|
||||
if (unlikely (a8 == NULL))
|
||||
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
|
||||
}
|
||||
}
|
||||
|
||||
dst += node->x;
|
||||
|
|
@ -1051,9 +1138,6 @@ intel_glyph_cache_add_glyph (intel_device_t *device,
|
|||
return _cairo_error (CAIRO_STATUS_INVALID_FORMAT);
|
||||
}
|
||||
|
||||
/* leave mapped! */
|
||||
device->glyph_cache_mapped = TRUE;
|
||||
|
||||
scaled_glyph->surface_private = node;
|
||||
|
||||
glyph= (intel_glyph_t *) node;
|
||||
|
|
@ -1064,14 +1148,17 @@ intel_glyph_cache_add_glyph (intel_device_t *device,
|
|||
sf_x = 1. / cache->buffer.width;
|
||||
sf_y = 1. / cache->buffer.height;
|
||||
glyph->texcoord[0] =
|
||||
texcoord_2d_16 (sf_x * (node->x + glyph_surface->width + NEAREST_BIAS),
|
||||
sf_y * (node->y + glyph_surface->height + NEAREST_BIAS));
|
||||
texcoord_2d_16 (sf_x * (node->x + glyph_surface->width),
|
||||
sf_y * (node->y + glyph_surface->height));
|
||||
glyph->texcoord[1] =
|
||||
texcoord_2d_16 (sf_x * (node->x + NEAREST_BIAS),
|
||||
sf_y * (node->y + glyph_surface->height + NEAREST_BIAS));
|
||||
texcoord_2d_16 (sf_x * node->x,
|
||||
sf_y * (node->y + glyph_surface->height));
|
||||
glyph->texcoord[2] =
|
||||
texcoord_2d_16 (sf_x * (node->x + NEAREST_BIAS),
|
||||
sf_y * (node->y + NEAREST_BIAS));
|
||||
texcoord_2d_16 (sf_x * node->x,
|
||||
sf_y * node->y);
|
||||
|
||||
glyph->width = glyph_surface->width;
|
||||
glyph->height = glyph_surface->height;
|
||||
|
||||
return CAIRO_STATUS_SUCCESS;
|
||||
}
|
||||
|
|
@ -1190,9 +1277,6 @@ intel_get_glyph (intel_device_t *device,
|
|||
|
||||
assert (cache->buffer.bo->exec != NULL);
|
||||
|
||||
if (cache->buffer.bo->virtual != NULL)
|
||||
intel_bo_unmap (cache->buffer.bo);
|
||||
|
||||
_cairo_rtree_reset (&cache->rtree);
|
||||
intel_bo_destroy (device, cache->buffer.bo);
|
||||
cache->buffer.bo = NULL;
|
||||
|
|
@ -1225,6 +1309,7 @@ intel_buffer_cache_init (intel_buffer_cache_t *cache,
|
|||
int width, int height)
|
||||
{
|
||||
const uint32_t tiling = I915_TILING_Y;
|
||||
uint32_t stride, size;
|
||||
|
||||
assert ((width & 3) == 0);
|
||||
assert ((height & 1) == 0);
|
||||
|
|
@ -1233,6 +1318,7 @@ intel_buffer_cache_init (intel_buffer_cache_t *cache,
|
|||
cache->buffer.height = height;
|
||||
|
||||
switch (format) {
|
||||
default:
|
||||
case CAIRO_FORMAT_A1:
|
||||
case CAIRO_FORMAT_RGB16_565:
|
||||
case CAIRO_FORMAT_RGB24:
|
||||
|
|
@ -1241,25 +1327,28 @@ intel_buffer_cache_init (intel_buffer_cache_t *cache,
|
|||
return _cairo_error (CAIRO_STATUS_INVALID_FORMAT);
|
||||
case CAIRO_FORMAT_ARGB32:
|
||||
cache->buffer.map0 = MAPSURF_32BIT | MT_32BIT_ARGB8888;
|
||||
cache->buffer.stride = width * 4;
|
||||
stride = width * 4;
|
||||
break;
|
||||
case CAIRO_FORMAT_A8:
|
||||
cache->buffer.map0 = MAPSURF_8BIT | MT_8BIT_I8;
|
||||
cache->buffer.stride = width;
|
||||
stride = width;
|
||||
break;
|
||||
}
|
||||
cache->buffer.map0 |= ((height - 1) << MS3_HEIGHT_SHIFT) |
|
||||
((width - 1) << MS3_WIDTH_SHIFT);
|
||||
cache->buffer.map1 = ((cache->buffer.stride / 4) - 1) << MS4_PITCH_SHIFT;
|
||||
|
||||
size = height * stride;
|
||||
cache->buffer.bo = intel_bo_create (device,
|
||||
height * cache->buffer.stride, FALSE);
|
||||
size, size,
|
||||
FALSE, tiling, stride);
|
||||
if (unlikely (cache->buffer.bo == NULL))
|
||||
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
|
||||
|
||||
intel_bo_set_tiling (device, cache->buffer.bo, tiling, cache->buffer.stride);
|
||||
cache->buffer.stride = stride;
|
||||
|
||||
cache->buffer.map0 |= MS3_tiling (cache->buffer.bo->tiling);
|
||||
cache->buffer.offset = 0;
|
||||
cache->buffer.map0 |= MS3_tiling (tiling);
|
||||
cache->buffer.map0 |= ((height - 1) << MS3_HEIGHT_SHIFT) |
|
||||
((width - 1) << MS3_WIDTH_SHIFT);
|
||||
cache->buffer.map1 = ((stride / 4) - 1) << MS4_PITCH_SHIFT;
|
||||
|
||||
cache->ref_count = 0;
|
||||
cairo_list_init (&cache->link);
|
||||
|
|
@ -1272,16 +1361,8 @@ intel_snapshot_cache_insert (intel_device_t *device,
|
|||
intel_surface_t *surface)
|
||||
{
|
||||
cairo_status_t status;
|
||||
int bpp;
|
||||
|
||||
bpp = 1;
|
||||
if (surface->drm.format != CAIRO_FORMAT_A8)
|
||||
bpp = 4;
|
||||
|
||||
surface->snapshot_cache_entry.hash = (unsigned long) surface;
|
||||
surface->snapshot_cache_entry.size =
|
||||
surface->drm.width * surface->drm.height * bpp;
|
||||
|
||||
surface->snapshot_cache_entry.size = surface->drm.bo->size;
|
||||
if (surface->snapshot_cache_entry.size >
|
||||
device->snapshot_cache_max_size)
|
||||
{
|
||||
|
|
@ -1291,6 +1372,7 @@ intel_snapshot_cache_insert (intel_device_t *device,
|
|||
if (device->snapshot_cache.freeze_count == 0)
|
||||
_cairo_cache_freeze (&device->snapshot_cache);
|
||||
|
||||
surface->snapshot_cache_entry.hash = (unsigned long) surface;
|
||||
status = _cairo_cache_insert (&device->snapshot_cache,
|
||||
&surface->snapshot_cache_entry);
|
||||
if (unlikely (status)) {
|
||||
|
|
@ -1298,8 +1380,6 @@ intel_snapshot_cache_insert (intel_device_t *device,
|
|||
return status;
|
||||
}
|
||||
|
||||
cairo_surface_reference (&surface->drm.base);
|
||||
|
||||
return CAIRO_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
@ -1314,7 +1394,7 @@ intel_surface_detach_snapshot (cairo_surface_t *abstract_surface)
|
|||
device = (intel_device_t *) surface->drm.base.device;
|
||||
_cairo_cache_remove (&device->snapshot_cache,
|
||||
&surface->snapshot_cache_entry);
|
||||
surface->snapshot_cache_entry.hash = 0;
|
||||
assert (surface->snapshot_cache_entry.hash == 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1470,12 +1550,13 @@ intel_gradient_render (intel_device_t *device,
|
|||
|
||||
pixman_image_unref (gradient);
|
||||
|
||||
buffer->bo = intel_bo_create (device, 4*width, FALSE);
|
||||
buffer->bo = intel_bo_create (device,
|
||||
4*width, 4*width,
|
||||
FALSE, I915_TILING_NONE, 4*width);
|
||||
if (unlikely (buffer->bo == NULL)) {
|
||||
pixman_image_unref (image);
|
||||
return _cairo_error (CAIRO_STATUS_NO_MEMORY);
|
||||
}
|
||||
intel_bo_set_tiling (device, buffer->bo, I915_TILING_NONE, 0);
|
||||
|
||||
intel_bo_write (device, buffer->bo, 0, 4*width, pixman_image_get_data (image));
|
||||
pixman_image_unref (image);
|
||||
|
|
@ -1486,8 +1567,7 @@ intel_gradient_render (intel_device_t *device,
|
|||
buffer->stride = 4*width;
|
||||
buffer->format = CAIRO_FORMAT_ARGB32;
|
||||
buffer->map0 = MAPSURF_32BIT | MT_32BIT_ARGB8888;
|
||||
buffer->map0 |= MS3_tiling (buffer->bo->tiling);
|
||||
buffer->map0 |= ((width - 1) << MS3_WIDTH_SHIFT);
|
||||
buffer->map0 |= ((width - 1) << MS3_WIDTH_SHIFT);
|
||||
buffer->map1 = (width - 1) << MS4_PITCH_SHIFT;
|
||||
|
||||
if (device->gradient_cache.size < GRADIENT_CACHE_SIZE) {
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ typedef void
|
|||
|
||||
typedef cairo_surface_t *
|
||||
(*cairo_drm_surface_create_func_t) (cairo_drm_device_t *device,
|
||||
cairo_content_t content,
|
||||
cairo_format_t format,
|
||||
int width, int height);
|
||||
|
||||
typedef cairo_surface_t *
|
||||
|
|
@ -172,16 +172,12 @@ _cairo_drm_bo_close (const cairo_drm_device_t *dev,
|
|||
|
||||
cairo_private void
|
||||
_cairo_drm_surface_init (cairo_drm_surface_t *surface,
|
||||
cairo_drm_device_t *device);
|
||||
cairo_format_t format,
|
||||
int width, int height);
|
||||
|
||||
cairo_private cairo_status_t
|
||||
_cairo_drm_surface_finish (cairo_drm_surface_t *surface);
|
||||
|
||||
cairo_private cairo_surface_t *
|
||||
_cairo_drm_surface_create_similar (void *abstract_src,
|
||||
cairo_content_t content,
|
||||
int width,
|
||||
int height);
|
||||
cairo_private void
|
||||
_cairo_drm_surface_get_font_options (void *abstract_surface,
|
||||
cairo_font_options_t *options);
|
||||
|
|
|
|||
|
|
@ -283,34 +283,21 @@ static const cairo_surface_backend_t radeon_surface_backend = {
|
|||
|
||||
static void
|
||||
radeon_surface_init (radeon_surface_t *surface,
|
||||
cairo_content_t content,
|
||||
cairo_drm_device_t *device)
|
||||
cairo_drm_device_t *device,
|
||||
cairo_format_t format,
|
||||
int width, int height)
|
||||
{
|
||||
_cairo_surface_init (&surface->base.base,
|
||||
&radeon_surface_backend,
|
||||
&device->base,
|
||||
content);
|
||||
_cairo_drm_surface_init (&surface->base, device);
|
||||
|
||||
switch (content) {
|
||||
case CAIRO_CONTENT_ALPHA:
|
||||
surface->base.format = CAIRO_FORMAT_A8;
|
||||
break;
|
||||
case CAIRO_CONTENT_COLOR:
|
||||
surface->base.format = CAIRO_FORMAT_RGB24;
|
||||
break;
|
||||
default:
|
||||
ASSERT_NOT_REACHED;
|
||||
case CAIRO_CONTENT_COLOR_ALPHA:
|
||||
surface->base.format = CAIRO_FORMAT_ARGB32;
|
||||
break;
|
||||
}
|
||||
_cairo_content_from_format (format));
|
||||
_cairo_drm_surface_init (&surface->base, format, width, height);
|
||||
}
|
||||
|
||||
static cairo_surface_t *
|
||||
radeon_surface_create_internal (cairo_drm_device_t *device,
|
||||
cairo_content_t content,
|
||||
int width, int height)
|
||||
cairo_format_t format,
|
||||
int width, int height)
|
||||
{
|
||||
radeon_surface_t *surface;
|
||||
cairo_status_t status;
|
||||
|
|
@ -319,12 +306,9 @@ radeon_surface_create_internal (cairo_drm_device_t *device,
|
|||
if (unlikely (surface == NULL))
|
||||
return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
|
||||
|
||||
radeon_surface_init (surface, content, device);
|
||||
radeon_surface_init (surface, device, format, width, height);
|
||||
|
||||
if (width && height) {
|
||||
surface->base.width = width;
|
||||
surface->base.height = height;
|
||||
|
||||
surface->base.stride =
|
||||
cairo_format_stride_for_width (surface->base.format, width);
|
||||
|
||||
|
|
@ -344,10 +328,22 @@ radeon_surface_create_internal (cairo_drm_device_t *device,
|
|||
|
||||
static cairo_surface_t *
|
||||
radeon_surface_create (cairo_drm_device_t *device,
|
||||
cairo_content_t content,
|
||||
int width, int height)
|
||||
cairo_format_t format,
|
||||
int width, int height)
|
||||
{
|
||||
return radeon_surface_create_internal (device, content, width, height);
|
||||
switch (format) {
|
||||
default:
|
||||
case CAIRO_FORMAT_INVALID:
|
||||
case CAIRO_FORMAT_A1:
|
||||
case CAIRO_FORMAT_RGB16_565:
|
||||
return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
|
||||
case CAIRO_FORMAT_ARGB32:
|
||||
case CAIRO_FORMAT_RGB24:
|
||||
case CAIRO_FORMAT_A8:
|
||||
break;
|
||||
}
|
||||
|
||||
return radeon_surface_create_internal (device, format, width, height);
|
||||
}
|
||||
|
||||
static cairo_surface_t *
|
||||
|
|
@ -358,7 +354,6 @@ radeon_surface_create_for_name (cairo_drm_device_t *device,
|
|||
{
|
||||
radeon_surface_t *surface;
|
||||
cairo_status_t status;
|
||||
cairo_content_t content;
|
||||
|
||||
switch (format) {
|
||||
default:
|
||||
|
|
@ -367,13 +362,8 @@ radeon_surface_create_for_name (cairo_drm_device_t *device,
|
|||
case CAIRO_FORMAT_RGB16_565:
|
||||
return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
|
||||
case CAIRO_FORMAT_ARGB32:
|
||||
content = CAIRO_CONTENT_COLOR_ALPHA;
|
||||
break;
|
||||
case CAIRO_FORMAT_RGB24:
|
||||
content = CAIRO_CONTENT_COLOR;
|
||||
break;
|
||||
case CAIRO_FORMAT_A8:
|
||||
content = CAIRO_CONTENT_ALPHA;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
@ -384,11 +374,9 @@ radeon_surface_create_for_name (cairo_drm_device_t *device,
|
|||
if (unlikely (surface == NULL))
|
||||
return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
|
||||
|
||||
radeon_surface_init (surface, content, device);
|
||||
radeon_surface_init (surface, device, format, width, height);
|
||||
|
||||
if (width && height) {
|
||||
surface->base.width = width;
|
||||
surface->base.height = height;
|
||||
surface->base.stride = stride;
|
||||
|
||||
surface->base.bo = radeon_bo_create_for_name (to_radeon_device (&device->base),
|
||||
|
|
|
|||
|
|
@ -36,28 +36,15 @@
|
|||
|
||||
#include "cairo-error-private.h"
|
||||
|
||||
cairo_surface_t *
|
||||
_cairo_drm_surface_create_similar (void *abstract_surface,
|
||||
cairo_content_t content,
|
||||
int width,
|
||||
int height)
|
||||
{
|
||||
cairo_drm_surface_t *surface = abstract_surface;
|
||||
cairo_drm_device_t *device = (cairo_drm_device_t *) surface->base.device;
|
||||
|
||||
if (width > device->max_surface_size || height > device->max_surface_size)
|
||||
return NULL;
|
||||
|
||||
return device->surface.create (device, content, width, height);
|
||||
}
|
||||
|
||||
void
|
||||
_cairo_drm_surface_init (cairo_drm_surface_t *surface,
|
||||
cairo_drm_device_t *device)
|
||||
cairo_format_t format,
|
||||
int width, int height)
|
||||
{
|
||||
surface->bo = NULL;
|
||||
surface->width = 0;
|
||||
surface->height = 0;
|
||||
surface->format = format;
|
||||
surface->width = width;
|
||||
surface->height = height;
|
||||
surface->stride = 0;
|
||||
|
||||
surface->fallback = NULL;
|
||||
|
|
@ -100,15 +87,12 @@ _cairo_drm_surface_get_extents (void *abstract_surface,
|
|||
|
||||
cairo_surface_t *
|
||||
cairo_drm_surface_create (cairo_device_t *abstract_device,
|
||||
cairo_content_t content,
|
||||
cairo_format_t format,
|
||||
int width, int height)
|
||||
{
|
||||
cairo_drm_device_t *device = (cairo_drm_device_t *) abstract_device;
|
||||
cairo_surface_t *surface;
|
||||
|
||||
if (! CAIRO_CONTENT_VALID (content))
|
||||
return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_CONTENT));
|
||||
|
||||
if (device != NULL && device->base.status)
|
||||
{
|
||||
surface = _cairo_surface_create_in_error (device->base.status);
|
||||
|
|
@ -118,8 +102,7 @@ cairo_drm_surface_create (cairo_device_t *abstract_device,
|
|||
width == 0 || width > device->max_surface_size ||
|
||||
height == 0 || height > device->max_surface_size)
|
||||
{
|
||||
surface = cairo_image_surface_create (_cairo_format_from_content (content),
|
||||
width, height);
|
||||
surface = cairo_image_surface_create (format, width, height);
|
||||
}
|
||||
else if (device->base.finished)
|
||||
{
|
||||
|
|
@ -127,7 +110,9 @@ cairo_drm_surface_create (cairo_device_t *abstract_device,
|
|||
}
|
||||
else
|
||||
{
|
||||
surface = device->surface.create (device, content, width, height);
|
||||
surface = device->surface.create (device, format, width, height);
|
||||
if (surface->status == CAIRO_STATUS_INVALID_SIZE)
|
||||
surface = cairo_image_surface_create (format, width, height);
|
||||
}
|
||||
|
||||
return surface;
|
||||
|
|
@ -334,7 +319,7 @@ cairo_drm_surface_get_stride (cairo_surface_t *abstract_surface)
|
|||
|
||||
/* XXX drm or general surface layer? naming? */
|
||||
cairo_surface_t *
|
||||
cairo_drm_surface_map (cairo_surface_t *abstract_surface)
|
||||
cairo_drm_surface_map_to_image (cairo_surface_t *abstract_surface)
|
||||
{
|
||||
cairo_drm_surface_t *surface;
|
||||
cairo_drm_device_t *device;
|
||||
|
|
|
|||
2377
src/drm/cairo-drm-xr.c
Normal file
2377
src/drm/cairo-drm-xr.c
Normal file
File diff suppressed because it is too large
Load diff
Loading…
Add table
Reference in a new issue