spans: Pass multiple rows of identical spans to the renders.

It is quite common amongst our geometry to have rows of repeated span
data, for example a rounded rectangle will have repeating data between
the top and bottom rounded corners. By passing the repeat length to the
renderers, they may be able to use that information more efficiently,
and the scan converters can avoid recomputing the same span data.
This commit is contained in:
Chris Wilson 2010-01-19 17:11:11 +00:00
parent a04873c077
commit 9cd9137843
7 changed files with 349 additions and 261 deletions

View file

@ -1981,6 +1981,8 @@ typedef struct _cairo_gl_surface_span_renderer {
cairo_gl_composite_setup_t setup;
int xmin, xmax;
cairo_operator_t op;
cairo_antialias_t antialias;
@ -1988,7 +1990,6 @@ typedef struct _cairo_gl_surface_span_renderer {
cairo_gl_context_t *ctx;
cairo_region_t *clip;
cairo_composite_rectangles_t composite_rectangles;
GLuint vbo;
void *vbo_base;
unsigned int vbo_size;
@ -2019,11 +2020,11 @@ _cairo_gl_span_renderer_flush (cairo_gl_surface_span_renderer_t *renderer)
cairo_region_get_rectangle (renderer->clip, i, &rect);
glScissor (rect.x, rect.y, rect.width, rect.height);
glDrawArrays (GL_LINES, 0, count);
glDrawArrays (GL_QUADS, 0, count);
}
glDisable (GL_SCISSOR_TEST);
} else {
glDrawArrays (GL_LINES, 0, count);
glDrawArrays (GL_QUADS, 0, count);
}
}
@ -2104,72 +2105,87 @@ _cairo_gl_emit_span_vertex (cairo_gl_surface_span_renderer_t *renderer,
static void
_cairo_gl_emit_span (cairo_gl_surface_span_renderer_t *renderer,
int x1, int x2, int y, uint8_t alpha)
int x, int y1, int y2,
uint8_t alpha)
{
float *vertices = _cairo_gl_span_renderer_get_vbo (renderer, 2);
_cairo_gl_emit_span_vertex (renderer, x1, y, alpha, vertices);
_cairo_gl_emit_span_vertex (renderer, x2, y, alpha,
_cairo_gl_emit_span_vertex (renderer, x, y1, alpha, vertices);
_cairo_gl_emit_span_vertex (renderer, x, y2, alpha,
vertices + renderer->vertex_size / 4);
}
/* Emits the contents of the span renderer rows as GL_LINES with the span's
* alpha.
*
* Unlike the image surface, which is compositing into a temporary, we emit
* coverage even for alpha == 0, in case we're using an unbounded operator.
* But it means we avoid having to do the fixup.
*/
static void
_cairo_gl_emit_rectangle (cairo_gl_surface_span_renderer_t *renderer,
int x1, int y1,
int x2, int y2,
int coverage)
{
_cairo_gl_emit_span (renderer, x1, y1, y2, coverage);
_cairo_gl_emit_span (renderer, x2, y2, y1, coverage);
}
static cairo_status_t
_cairo_gl_surface_span_renderer_render_row (
void *abstract_renderer,
int y,
const cairo_half_open_span_t *spans,
unsigned num_spans)
_cairo_gl_render_bounded_spans (void *abstract_renderer,
int y, int height,
const cairo_half_open_span_t *spans,
unsigned num_spans)
{
cairo_gl_surface_span_renderer_t *renderer = abstract_renderer;
int xmin = renderer->composite_rectangles.mask.x;
int xmax = xmin + renderer->composite_rectangles.width;
int prev_x = xmin;
int prev_alpha = 0;
unsigned i;
int x_translate;
/* Make sure we're within y-range. */
if (y < renderer->composite_rectangles.mask.y ||
y >= renderer->composite_rectangles.mask.y +
renderer->composite_rectangles.height)
if (num_spans == 0)
return CAIRO_STATUS_SUCCESS;
x_translate = renderer->composite_rectangles.dst.x -
renderer->composite_rectangles.mask.x;
y += renderer->composite_rectangles.dst.y -
renderer->composite_rectangles.mask.y;
do {
if (spans[0].coverage) {
_cairo_gl_emit_rectangle (renderer,
spans[0].x, y,
spans[1].x, y + height,
spans[0].coverage);
}
/* Find the first span within x-range. */
for (i=0; i < num_spans && spans[i].x < xmin; i++) {}
if (i>0)
prev_alpha = spans[i-1].coverage;
spans++;
} while (--num_spans > 1);
/* Set the intermediate spans. */
for (; i < num_spans; i++) {
int x = spans[i].x;
return CAIRO_STATUS_SUCCESS;
}
if (x >= xmax)
break;
static cairo_status_t
_cairo_gl_render_unbounded_spans (void *abstract_renderer,
int y, int height,
const cairo_half_open_span_t *spans,
unsigned num_spans)
{
cairo_gl_surface_span_renderer_t *renderer = abstract_renderer;
_cairo_gl_emit_span (renderer,
prev_x + x_translate, x + x_translate, y,
prev_alpha);
prev_x = x;
prev_alpha = spans[i].coverage;
if (num_spans == 0) {
_cairo_gl_emit_rectangle (renderer,
renderer->xmin, y,
renderer->xmax, y + height,
0);
return CAIRO_STATUS_SUCCESS;
}
if (prev_x < xmax) {
_cairo_gl_emit_span (renderer,
prev_x + x_translate, xmax + x_translate, y,
prev_alpha);
if (spans[0].x != renderer->xmin) {
_cairo_gl_emit_rectangle (renderer,
renderer->xmin, y,
spans[0].x, y + height,
0);
}
do {
_cairo_gl_emit_rectangle (renderer,
spans[0].x, y,
spans[1].x, y + height,
spans[0].coverage);
spans++;
} while (--num_spans > 1);
if (spans[0].x != renderer->xmax) {
_cairo_gl_emit_rectangle (renderer,
spans[0].x, y,
renderer->xmax, y + height,
0);
}
return CAIRO_STATUS_SUCCESS;
@ -2244,8 +2260,6 @@ _cairo_gl_surface_create_span_renderer (cairo_operator_t op,
cairo_gl_surface_t *dst = abstract_dst;
cairo_gl_surface_span_renderer_t *renderer;
cairo_status_t status;
int width = rects->width;
int height = rects->height;
cairo_surface_attributes_t *src_attributes;
GLenum err;
@ -2255,31 +2269,30 @@ _cairo_gl_surface_create_span_renderer (cairo_operator_t op,
renderer->base.destroy = _cairo_gl_surface_span_renderer_destroy;
renderer->base.finish = _cairo_gl_surface_span_renderer_finish;
renderer->base.render_row =
_cairo_gl_surface_span_renderer_render_row;
if (_cairo_operator_bounded_by_mask (op))
renderer->base.render_rows = _cairo_gl_render_bounded_spans;
else
renderer->base.render_rows = _cairo_gl_render_unbounded_spans;
renderer->xmin = rects->mask.x;
renderer->xmax = rects->mask.x + rects->width;
renderer->op = op;
renderer->antialias = antialias;
renderer->dst = dst;
renderer->clip = clip_region;
renderer->composite_rectangles = *rects;
status = _cairo_gl_operand_init (&renderer->setup.src, src, dst,
rects->src.x, rects->src.y,
rects->dst.x, rects->dst.y,
width, height);
rects->width, rects->height);
if (unlikely (status)) {
cairo_status_t status_ignored;
status_ignored = _cairo_gl_context_acquire (dst->base.device, &renderer->ctx);
_cairo_gl_surface_span_renderer_destroy (renderer);
free (renderer);
return _cairo_span_renderer_create_in_error (status);
}
status = _cairo_gl_context_acquire (dst->base.device, &renderer->ctx);
if (unlikely (status)) {
_cairo_gl_surface_span_renderer_destroy (renderer);
_cairo_gl_operand_destroy (&renderer->setup.src);
free (renderer);
return _cairo_span_renderer_create_in_error (status);
}
_cairo_gl_set_destination (dst);

View file

@ -1393,11 +1393,13 @@ typedef struct _cairo_image_surface_span_renderer {
const cairo_pattern_t *pattern;
cairo_antialias_t antialias;
uint8_t *mask_data;
uint32_t mask_stride;
cairo_image_surface_t *src;
cairo_surface_attributes_t src_attributes;
cairo_image_surface_t *mask;
cairo_image_surface_t *dst;
cairo_composite_rectangles_t composite_rectangles;
} cairo_image_surface_span_renderer_t;
@ -1406,66 +1408,46 @@ _cairo_image_surface_span_render_row (
int y,
const cairo_half_open_span_t *spans,
unsigned num_spans,
cairo_image_surface_t *mask,
const cairo_composite_rectangles_t *rects)
uint8_t *data,
uint32_t stride)
{
int xmin = rects->mask.x;
int xmax = xmin + rects->width;
uint8_t *row;
int prev_x = xmin;
int prev_alpha = 0;
unsigned i;
/* Make sure we're within y-range. */
y -= rects->mask.y;
if (y < 0 || y >= rects->height)
if (num_spans == 0)
return;
row = (uint8_t*)(mask->data) + y*(size_t)mask->stride - xmin;
row = data + y * stride;
for (i = 0; i < num_spans - 1; i++) {
if (! spans[i].coverage)
continue;
/* Find the first span within x-range. */
for (i=0; i < num_spans && spans[i].x < xmin; i++) {}
if (i>0)
prev_alpha = spans[i-1].coverage;
/* Set the intermediate spans. */
for (; i < num_spans; i++) {
int x = spans[i].x;
if (x >= xmax)
break;
if (prev_alpha != 0) {
/* We implement setting rendering the most common single
* pixel wide span case to avoid the overhead of a memset
* call. Open coding setting longer spans didn't show a
* noticeable improvement over memset. */
if (x == prev_x + 1) {
row[prev_x] = prev_alpha;
}
else {
memset(row + prev_x, prev_alpha, x - prev_x);
}
/* We implement setting the most common single pixel wide
* span case to avoid the overhead of a memset call.
* Open coding setting longer spans didn't show a
* noticeable improvement over memset.
*/
if (spans[i+1].x == spans[i].x + 1) {
row[spans[i].x] = spans[i].coverage;
} else {
memset (row + spans[i].x,
spans[i].coverage,
spans[i+1].x - spans[i].x);
}
prev_x = x;
prev_alpha = spans[i].coverage;
}
if (prev_alpha != 0 && prev_x < xmax) {
memset(row + prev_x, prev_alpha, xmax - prev_x);
}
}
static cairo_status_t
_cairo_image_surface_span_renderer_render_row (
_cairo_image_surface_span_renderer_render_rows (
void *abstract_renderer,
int y,
int height,
const cairo_half_open_span_t *spans,
unsigned num_spans)
{
cairo_image_surface_span_renderer_t *renderer = abstract_renderer;
_cairo_image_surface_span_render_row (y, spans, num_spans, renderer->mask, &renderer->composite_rectangles);
while (height--)
_cairo_image_surface_span_render_row (y++, spans, num_spans, renderer->mask_data, renderer->mask_stride);
return CAIRO_STATUS_SUCCESS;
}
@ -1520,11 +1502,11 @@ _cairo_image_surface_span_renderer_finish (void *abstract_renderer)
&dst->base,
src_attributes,
src->width, src->height,
rects->width, rects->height,
width, height,
rects->src.x, rects->src.y,
0, 0, /* mask.x, mask.y */
rects->dst.x, rects->dst.y,
rects->width, rects->height,
width, height,
dst->clip_region);
}
}
@ -1570,7 +1552,7 @@ _cairo_image_surface_create_span_renderer (cairo_operator_t op,
renderer->base.destroy = _cairo_image_surface_span_renderer_destroy;
renderer->base.finish = _cairo_image_surface_span_renderer_finish;
renderer->base.render_row = _cairo_image_surface_span_renderer_render_row;
renderer->base.render_rows = _cairo_image_surface_span_renderer_render_rows;
renderer->op = op;
renderer->pattern = pattern;
renderer->antialias = antialias;
@ -1607,6 +1589,9 @@ _cairo_image_surface_create_span_renderer (cairo_operator_t op,
_cairo_image_surface_span_renderer_destroy (renderer);
return _cairo_span_renderer_create_in_error (status);
}
renderer->mask_data = renderer->mask->data - rects->mask.x - rects->mask.y * renderer->mask->stride;
renderer->mask_stride = renderer->mask->stride;
return &renderer->base;
}

View file

@ -47,26 +47,24 @@ typedef struct _cairo_half_open_span {
* surfaces if they want to composite spans instead of trapezoids. */
typedef struct _cairo_span_renderer cairo_span_renderer_t;
struct _cairo_span_renderer {
/* Private status variable. */
cairo_status_t status;
/* Called to destroy the renderer. */
cairo_destroy_func_t destroy;
/* Render the spans on row y of the source by whatever compositing
* method is required. The function should ignore spans outside
* the bounding box set by the init() function. */
cairo_status_t (*render_row)(
void *abstract_renderer,
int y,
const cairo_half_open_span_t *coverages,
unsigned num_coverages);
/* Render the spans on row y of the destination by whatever compositing
* method is required. */
cairo_warn cairo_status_t
(*render_rows) (void *abstract_renderer,
int y, int height,
const cairo_half_open_span_t *coverages,
unsigned num_coverages);
/* Called after all rows have been rendered to perform whatever
* final rendering step is required. This function is called just
* once before the renderer is destroyed. */
cairo_status_t (*finish)(
void *abstract_renderer);
/* Private status variable. */
cairo_status_t status;
cairo_status_t (*finish) (void *abstract_renderer);
};
/* Scan converter interface. */

View file

@ -277,13 +277,15 @@ _cairo_scan_converter_create_in_error (cairo_status_t status)
}
static cairo_status_t
_cairo_nil_span_renderer_render_row (
_cairo_nil_span_renderer_render_rows (
void *abstract_renderer,
int y,
int height,
const cairo_half_open_span_t *coverages,
unsigned num_coverages)
{
(void) y;
(void) height;
(void) coverages;
(void) num_coverages;
return _cairo_span_renderer_status (abstract_renderer);
@ -312,7 +314,7 @@ _cairo_span_renderer_set_error (
ASSERT_NOT_REACHED;
}
if (renderer->status == CAIRO_STATUS_SUCCESS) {
renderer->render_row = _cairo_nil_span_renderer_render_row;
renderer->render_rows = _cairo_nil_span_renderer_render_rows;
renderer->finish = _cairo_nil_span_renderer_finish;
renderer->status = error;
}

View file

@ -129,27 +129,29 @@ blit_with_span_renderer(
cairo_span_renderer_t *span_renderer,
struct pool *span_pool,
int y,
int height,
int xmin,
int xmax);
static glitter_status_t
blit_empty_with_span_renderer (cairo_span_renderer_t *renderer, int y);
blit_empty_with_span_renderer (cairo_span_renderer_t *renderer, int y, int height);
#define GLITTER_BLIT_COVERAGES_ARGS \
cairo_span_renderer_t *span_renderer, \
struct pool *span_pool
#define GLITTER_BLIT_COVERAGES(cells, y, xmin, xmax) do { \
#define GLITTER_BLIT_COVERAGES(cells, y, height,xmin, xmax) do { \
cairo_status_t status = blit_with_span_renderer (cells, \
span_renderer, \
span_pool, \
y, xmin, xmax); \
y, height, \
xmin, xmax); \
if (unlikely (status)) \
return status; \
} while (0)
#define GLITTER_BLIT_COVERAGES_EMPTY(y, xmin, xmax) do { \
cairo_status_t status = blit_empty_with_span_renderer (span_renderer, y); \
#define GLITTER_BLIT_COVERAGES_EMPTY(y, height, xmin, xmax) do { \
cairo_status_t status = blit_empty_with_span_renderer (span_renderer, y, height); \
if (unlikely (status)) \
return status; \
} while (0)
@ -310,8 +312,8 @@ typedef int grid_area_t;
#define UNROLL3(x) x x x
struct quorem {
int quo;
int rem;
int32_t quo;
int32_t rem;
};
/* Header for a chunk of memory in a memory pool. */
@ -383,6 +385,7 @@ struct edge {
/* Original sign of the edge: +1 for downwards, -1 for upwards
* edges. */
int dir;
int vertical;
};
/* Number of subsample rows per y-bucket. Must be GRID_Y. */
@ -703,7 +706,6 @@ static void
cell_list_fini(struct cell_list *cells)
{
pool_fini (cells->cell_pool.base);
cell_list_init (cells);
}
/* Empty the cell list. This is called at the start of every pixel
@ -716,6 +718,26 @@ cell_list_reset (struct cell_list *cells)
pool_reset (cells->cell_pool.base);
}
static struct cell *
cell_list_alloc (struct cell_list *cells,
struct cell **cursor,
struct cell *tail,
int x)
{
struct cell *cell;
cell = pool_alloc (cells->cell_pool.base, sizeof (struct cell));
if (unlikely (NULL == cell))
return NULL;
*cursor = cell;
cell->next = tail;
cell->x = x;
cell->uncovered_area = 0;
cell->covered_height = 0;
return cell;
}
/* Find a cell at the given x-coordinate. Returns %NULL if a new cell
* needed to be allocated but couldn't be. Cells must be found with
* non-decreasing x-coordinate until the cell list is rewound using
@ -738,22 +760,10 @@ cell_list_find (struct cell_list *cells, int x)
}
cells->cursor = cursor;
if (tail->x == x) {
if (tail->x == x)
return tail;
} else {
struct cell *cell;
cell = pool_alloc (cells->cell_pool.base, sizeof (struct cell));
if (unlikely (NULL == cell))
return NULL;
*cursor = cell;
cell->next = tail;
cell->x = x;
cell->uncovered_area = 0;
cell->covered_height = 0;
return cell;
}
return cell_list_alloc (cells, cursor, tail, x);
}
/* Find two cells at x1 and x2. This is exactly equivalent
@ -833,9 +843,8 @@ cell_list_find_pair(struct cell_list *cells, int x1, int x2)
/* Add an unbounded subpixel span covering subpixels >= x to the
* coverage cells. */
static glitter_status_t
cell_list_add_unbounded_subspan(
struct cell_list *cells,
grid_scaled_x_t x)
cell_list_add_unbounded_subspan (struct cell_list *cells,
grid_scaled_x_t x)
{
struct cell *cell;
int ix, fx;
@ -908,20 +917,24 @@ cell_list_render_edge(
struct edge *edge,
int sign)
{
struct quorem x1 = edge->x;
struct quorem x2 = x1;
grid_scaled_y_t y1, y2, dy;
grid_scaled_x_t dx;
int ix1, ix2;
grid_scaled_x_t fx1, fx2;
x2.quo += edge->dxdy_full.quo;
x2.rem += edge->dxdy_full.rem;
if (x2.rem >= 0) {
++x2.quo;
x2.rem -= edge->dy;
struct quorem x1 = edge->x;
struct quorem x2 = x1;
if (! edge->vertical) {
x2.quo += edge->dxdy_full.quo;
x2.rem += edge->dxdy_full.rem;
if (x2.rem >= 0) {
++x2.quo;
x2.rem -= edge->dy;
}
edge->x = x2;
}
edge->x = x2;
GRID_X_TO_INT_FRAC(x1.quo, ix1, fx1);
GRID_X_TO_INT_FRAC(x2.quo, ix2, fx2);
@ -1046,10 +1059,9 @@ polygon_fini (struct polygon *polygon)
* receive new edges and clip them to the vertical range
* [ymin,ymax). */
static glitter_status_t
polygon_reset(
struct polygon *polygon,
grid_scaled_y_t ymin,
grid_scaled_y_t ymax)
polygon_reset (struct polygon *polygon,
grid_scaled_y_t ymin,
grid_scaled_y_t ymax)
{
unsigned h = ymax - ymin;
unsigned num_buckets = EDGE_Y_BUCKET_INDEX(ymax + EDGE_Y_BUCKET_HEIGHT-1,
@ -1116,30 +1128,38 @@ polygon_add_edge (struct polygon *polygon,
dx = edge->line.p2.x - edge->line.p1.x;
dy = edge->line.p2.y - edge->line.p1.y;
e->dy = dy;
e->dxdy = floored_divrem (dx, dy);
if (ymin <= edge->top)
ytop = edge->top;
else
ytop = ymin;
if (ytop == edge->line.p1.y) {
e->x.quo = edge->line.p1.x;
e->x.rem = 0;
} else {
e->x = floored_muldivrem (ytop - edge->line.p1.y, dx, dy);
e->x.quo += edge->line.p1.x;
}
e->dir = edge->dir;
ytop = edge->top >= ymin ? edge->top : ymin;
ybot = edge->bottom <= ymax ? edge->bottom : ymax;
e->ytop = ytop;
ybot = edge->bottom < ymax ? edge->bottom : ymax;
e->height_left = ybot - ytop;
if (e->height_left >= GRID_Y) {
e->dxdy_full = floored_muldivrem (GRID_Y, dx, dy);
} else {
if (dx == 0) {
e->vertical = TRUE;
e->x.quo = edge->line.p1.x;
e->x.rem = 0;
e->dxdy.quo = 0;
e->dxdy.rem = 0;
e->dxdy_full.quo = 0;
e->dxdy_full.rem = 0;
} else {
e->vertical = FALSE;
e->dxdy = floored_divrem (dx, dy);
if (ytop == edge->line.p1.y) {
e->x.quo = edge->line.p1.x;
e->x.rem = 0;
} else {
e->x = floored_muldivrem (ytop - edge->line.p1.y, dx, dy);
e->x.quo += edge->line.p1.x;
}
if (e->height_left >= GRID_Y) {
e->dxdy_full = floored_muldivrem (GRID_Y, dx, dy);
} else {
e->dxdy_full.quo = 0;
e->dxdy_full.rem = 0;
}
}
_polygon_insert_edge_into_its_y_bucket (polygon, e);
@ -1162,31 +1182,30 @@ active_list_init(struct active_list *active)
active_list_reset(active);
}
static void
active_list_fini(
struct active_list *active)
{
active_list_reset(active);
}
/* Merge the edges in an unsorted list of edges into a sorted
* list. The sort order is edges ascending by edge->x.quo. Returns
* the new head of the sorted list. */
static struct edge *
merge_unsorted_edges(struct edge *sorted_head, struct edge *unsorted_head)
{
struct edge *head = unsorted_head;
struct edge **cursor = &sorted_head;
int x;
while (NULL != head) {
struct edge *prev = *cursor;
struct edge *next = head->next;
x = head->x.quo;
if (sorted_head == NULL) {
sorted_head = unsorted_head;
unsorted_head = unsorted_head->next;
sorted_head->next = NULL;
if (unsorted_head == NULL)
return sorted_head;
}
if (NULL == prev || x < prev->x.quo) {
do {
struct edge *next = unsorted_head->next;
struct edge *prev = *cursor;
x = unsorted_head->x.quo;
if (x < prev->x.quo)
cursor = &sorted_head;
}
while (1) {
UNROLL3({
@ -1197,26 +1216,28 @@ merge_unsorted_edges(struct edge *sorted_head, struct edge *unsorted_head)
});
}
head->next = *cursor;
*cursor = head;
unsorted_head->next = *cursor;
*cursor = unsorted_head;
unsorted_head = next;
} while (unsorted_head != NULL);
head = next;
}
return sorted_head;
}
/* Test if the edges on the active list can be safely advanced by a
* full row without intersections or any edges ending. */
inline static int
active_list_can_step_full_row(
struct active_list *active)
active_list_can_step_full_row (struct active_list *active)
{
const struct edge *e;
int prev_x = INT_MIN;
/* Recomputes the minimum height of all edges on the active
* list if we have been dropping edges. */
if (active->min_height <= 0) {
struct edge *e = active->head;
int min_height = INT_MAX;
e = active->head;
while (NULL != e) {
if (e->height_left < min_height)
min_height = e->height_left;
@ -1226,27 +1247,29 @@ active_list_can_step_full_row(
active->min_height = min_height;
}
/* Check for intersections only if no edges end during the next
* row. */
if (active->min_height >= GRID_Y) {
grid_scaled_x_t prev_x = INT_MIN;
struct edge *e = active->head;
while (NULL != e) {
struct quorem x = e->x;
if (active->min_height < GRID_Y)
return 0;
/* Check for intersections as no edges end during the next row. */
e = active->head;
while (NULL != e) {
struct quorem x = e->x;
if (! e->vertical) {
x.quo += e->dxdy_full.quo;
x.rem += e->dxdy_full.rem;
if (x.rem >= 0)
++x.quo;
if (x.quo <= prev_x)
return 0;
prev_x = x.quo;
e = e->next;
}
return 1;
if (x.quo <= prev_x)
return 0;
prev_x = x.quo;
e = e->next;
}
return 0;
return 1;
}
/* Merges edges on the given subpixel row from the polygon to the
@ -1278,8 +1301,10 @@ active_list_merge_edges_from_polygon(
ptail = &tail->next;
}
}
active->head = merge_unsorted_edges(active->head, subrow_edges);
active->min_height = min_height;
if (subrow_edges) {
active->head = merge_unsorted_edges(active->head, subrow_edges);
active->min_height = min_height;
}
}
/* Advance the edges on the active list by one subsample row by
@ -1440,11 +1465,13 @@ apply_nonzero_fill_rule_and_step_edges (struct active_list *active,
}
}
right_edge->x.quo += right_edge->dxdy_full.quo;
right_edge->x.rem += right_edge->dxdy_full.rem;
if (right_edge->x.rem >= 0) {
++right_edge->x.quo;
right_edge->x.rem -= right_edge->dy;
if (! right_edge->vertical) {
right_edge->x.quo += right_edge->dxdy_full.quo;
right_edge->x.rem += right_edge->dxdy_full.rem;
if (right_edge->x.rem >= 0) {
++right_edge->x.quo;
right_edge->x.rem -= right_edge->dy;
}
}
}
@ -1497,11 +1524,13 @@ apply_evenodd_fill_rule_and_step_edges (struct active_list *active,
break;
}
right_edge->x.quo += right_edge->dxdy_full.quo;
right_edge->x.rem += right_edge->dxdy_full.rem;
if (right_edge->x.rem >= 0) {
++right_edge->x.quo;
right_edge->x.rem -= right_edge->dy;
if (! right_edge->vertical) {
right_edge->x.quo += right_edge->dxdy_full.quo;
right_edge->x.rem += right_edge->dxdy_full.rem;
if (right_edge->x.rem >= 0) {
++right_edge->x.quo;
right_edge->x.rem -= right_edge->dy;
}
}
}
@ -1538,8 +1567,14 @@ blit_span(
}
}
#define GLITTER_BLIT_COVERAGES(coverages, y, xmin, xmax) \
blit_cells(coverages, raster_pixels + (y)*raster_stride, xmin, xmax)
#define GLITTER_BLIT_COVERAGES(coverages, y, height, xmin, xmax) \
do { \
int __y = y; \
int __h = height; \
do { \
blit_cells(coverages, raster_pixels + (__y)*raster_stride, xmin, xmax); \
} while (--__h); \
} while (0)
static void
blit_cells(
@ -1598,7 +1633,6 @@ static void
_glitter_scan_converter_fini(glitter_scan_converter_t *converter)
{
polygon_fini(converter->polygon);
active_list_fini(converter->active);
cell_list_fini(converter->coverages);
converter->xmin=0;
converter->ymin=0;
@ -1712,16 +1746,44 @@ glitter_scan_converter_add_edge (glitter_scan_converter_t *converter,
#endif
#ifndef GLITTER_BLIT_COVERAGES_EMPTY
# define GLITTER_BLIT_COVERAGES_EMPTY(y, xmin, xmax)
# define GLITTER_BLIT_COVERAGES_EMPTY(y0, y1, xmin, xmax)
#endif
static cairo_bool_t
active_list_is_vertical (struct active_list *active)
{
struct edge *e;
for (e = active->head; e != NULL; e = e->next) {
if (! e->vertical)
return FALSE;
}
return TRUE;
}
static void
step_edges (struct active_list *active, int count)
{
struct edge **cursor = &active->head;
struct edge *edge;
for (edge = *cursor; edge != NULL; edge = *cursor) {
edge->height_left -= GRID_Y * count;
if (edge->height_left)
cursor = &edge->next;
else
*cursor = edge->next;
}
}
I glitter_status_t
glitter_scan_converter_render(
glitter_scan_converter_t *converter,
int nonzero_fill,
GLITTER_BLIT_COVERAGES_ARGS)
{
int i;
int i, j;
int ymax_i = converter->ymax / GRID_Y;
int ymin_i = converter->ymin / GRID_Y;
int xmin_i, xmax_i;
@ -1739,23 +1801,25 @@ glitter_scan_converter_render(
GLITTER_BLIT_COVERAGES_BEGIN;
/* Render each pixel row. */
for (i=0; i<h; i++) {
for (i = 0; i < h; i = j) {
int do_full_step = 0;
glitter_status_t status = 0;
j = i + 1;
/* Determine if we can ignore this row or use the full pixel
* stepper. */
if (GRID_Y == EDGE_Y_BUCKET_HEIGHT && ! polygon->y_buckets[i]) {
if (! active->head) {
GLITTER_BLIT_COVERAGES_EMPTY (i+ymin_i, xmin_i, xmax_i);
for (; j < h && ! polygon->y_buckets[j]; j++)
;
GLITTER_BLIT_COVERAGES_EMPTY (i+ymin_i, j-i, xmin_i, xmax_i);
continue;
}
do_full_step = active_list_can_step_full_row (active);
}
cell_list_reset (coverages);
if (do_full_step) {
/* Step by a full pixel row's worth. */
if (nonzero_fill) {
@ -1765,9 +1829,22 @@ glitter_scan_converter_render(
status = apply_evenodd_fill_rule_and_step_edges (active,
coverages);
}
if (active_list_is_vertical (active)) {
while (j < h &&
polygon->y_buckets[j] == NULL &&
active->min_height >= 2*GRID_Y)
{
active->min_height -= GRID_Y;
j++;
}
if (j != i + 1)
step_edges (active, j - (i + 1));
}
} else {
/* Subsample this row. */
grid_scaled_y_t suby;
/* Subsample this row. */
for (suby = 0; suby < GRID_Y; suby++) {
grid_scaled_y_t y = (i+ymin_i)*GRID_Y + suby;
@ -1788,13 +1865,13 @@ glitter_scan_converter_render(
if (unlikely (status))
return status;
GLITTER_BLIT_COVERAGES(coverages, i+ymin_i, xmin_i, xmax_i);
GLITTER_BLIT_COVERAGES(coverages, i+ymin_i, j-i, xmin_i, xmax_i);
cell_list_reset (coverages);
if (! active->head) {
if (! active->head)
active->min_height = INT_MAX;
} else {
else
active->min_height -= GRID_Y;
}
}
/* Clean up the coverage blitter. */
@ -1808,21 +1885,20 @@ glitter_scan_converter_render(
* scan converter subclass. */
static glitter_status_t
blit_with_span_renderer(
struct cell_list *cells,
cairo_span_renderer_t *renderer,
struct pool *span_pool,
int y,
int xmin,
int xmax)
blit_with_span_renderer (struct cell_list *cells,
cairo_span_renderer_t *renderer,
struct pool *span_pool,
int y, int height,
int xmin, int xmax)
{
struct cell *cell = cells->head;
int prev_x = xmin;
int cover = 0;
cairo_half_open_span_t *spans;
unsigned num_spans;
if (cell == NULL)
return CAIRO_STATUS_SUCCESS;
return blit_empty_with_span_renderer (renderer, y, height);
/* Skip cells to the left of the clip region. */
while (cell != NULL && cell->x < xmin) {
@ -1834,12 +1910,12 @@ blit_with_span_renderer(
/* Count number of cells remaining. */
{
struct cell *next = cell;
num_spans = 0;
while (next) {
num_spans = 1;
while (next != NULL) {
next = next->next;
++num_spans;
}
num_spans = 2*num_spans + 1;
num_spans = 2*num_spans;
}
/* Allocate enough spans for the row. */
@ -1854,6 +1930,7 @@ blit_with_span_renderer(
for (; cell != NULL; cell = cell->next) {
int x = cell->x;
int area;
if (x >= xmax)
break;
@ -1873,20 +1950,26 @@ blit_with_span_renderer(
prev_x = x+1;
}
if (prev_x < xmax) {
if (prev_x <= xmax) {
spans[num_spans].x = prev_x;
spans[num_spans].coverage = GRID_AREA_TO_ALPHA (cover);
++num_spans;
}
if (prev_x < xmax && cover) {
spans[num_spans].x = xmax;
spans[num_spans].coverage = 0;
++num_spans;
}
/* Dump them into the renderer. */
return renderer->render_row (renderer, y, spans, num_spans);
return renderer->render_rows (renderer, y, height, spans, num_spans);
}
static glitter_status_t
blit_empty_with_span_renderer (cairo_span_renderer_t *renderer, int y)
blit_empty_with_span_renderer (cairo_span_renderer_t *renderer, int y, int height)
{
return renderer->render_row (renderer, y, NULL, 0);
return renderer->render_rows (renderer, y, height, NULL, 0);
}
struct _cairo_tor_scan_converter {

View file

@ -1874,6 +1874,9 @@ typedef struct _cairo_win32_surface_span_renderer {
const cairo_pattern_t *pattern;
cairo_antialias_t antialias;
uint8_t *mask_data;
uint32_t mask_stride;
cairo_image_surface_t *mask;
cairo_win32_surface_t *dst;
cairo_region_t *clip_region;
@ -1885,11 +1888,13 @@ static cairo_status_t
_cairo_win32_surface_span_renderer_render_row (
void *abstract_renderer,
int y,
int height,
const cairo_half_open_span_t *spans,
unsigned num_spans)
{
cairo_win32_surface_span_renderer_t *renderer = abstract_renderer;
_cairo_image_surface_span_render_row (y, spans, num_spans, renderer->mask, &renderer->composite_rectangles);
while (height--)
_cairo_image_surface_span_render_row (y++, spans, num_spans, renderer->mask_data, renderer->mask_stride);
return CAIRO_STATUS_SUCCESS;
}
@ -1986,8 +1991,7 @@ _cairo_win32_surface_create_span_renderer (cairo_operator_t op,
renderer->base.destroy = _cairo_win32_surface_span_renderer_destroy;
renderer->base.finish = _cairo_win32_surface_span_renderer_finish;
renderer->base.render_row =
_cairo_win32_surface_span_renderer_render_row;
renderer->base.render_row = _cairo_win32_surface_span_renderer_render_rows;
renderer->op = op;
renderer->pattern = pattern;
renderer->antialias = antialias;
@ -2008,6 +2012,9 @@ _cairo_win32_surface_create_span_renderer (cairo_operator_t op,
_cairo_win32_surface_span_renderer_destroy (renderer);
return _cairo_span_renderer_create_in_error (status);
}
renderer->mask_data = renderer->mask->data - rects->mask.x - rects->mask.y * renderer->mask->stride;
renderer->mask_stride = renderer->mask->stride;
return &renderer->base;
}

View file

@ -2236,8 +2236,8 @@ cairo_private void
_cairo_image_surface_span_render_row (int y,
const cairo_half_open_span_t *spans,
unsigned num_spans,
cairo_image_surface_t *mask,
const cairo_composite_rectangles_t *rects);
uint8_t *data,
uint32_t stride);
cairo_private cairo_image_transparency_t
_cairo_image_analyze_transparency (cairo_image_surface_t *image);