Add WARN_CFLAGS, autodetection for 64/128 bit ints and cairo_wideint.[ch]

Check status return from _cairo_gstate_glyph_extents
Quiet compiler warnings about uninitialized variables
Switch to alternate exact line intersection code.
Add 64/128-bit wide integer arithmetic.
Switch to stdint.h types (and new wide types).
This commit is contained in:
Keith Packard 2004-05-28 12:37:15 +00:00
parent 878c76807a
commit 41f549a870
15 changed files with 2785 additions and 77 deletions

View file

@ -1,3 +1,47 @@
2004-05-28 Keith Packard <keithp@keithp.com>
* configure.in:
* src/Makefile.am:
Add WARN_CFLAGS, autodetection for 64/128 bit ints and
cairo_wideint.[ch]
* src/cairo_gstate.c: (_cairo_gstate_show_glyphs):
Check status return from _cairo_gstate_glyph_extents
* src/cairo_pattern.c: (_cairo_image_data_set_radial),
(_cairo_pattern_get_image):
* src/cairo_png_surface.c: (_cairo_png_surface_copy_page):
* src/cairo_surface.c: (_cairo_surface_composite):
Quiet compiler warnings about uninitialized variables
* src/cairo_traps.c: (_det16_32), (_det32_64),
(_fixed_16_16_to_fixed_32_32), (_line_segs_intersect_ceil):
Switch to alternate exact line intersection code.
* src/cairo_wideint.c: (_cairo_uint64_divrem),
(_cairo_uint32_to_uint64), (_cairo_int32_to_int64),
(_cairo_uint32s_to_uint64), (_cairo_uint64_add),
(_cairo_uint64_sub), (_cairo_uint32x32_64_mul),
(_cairo_uint64_mul), (_cairo_uint64_lsl), (_cairo_uint64_rsl),
(_cairo_uint64_rsa), (_cairo_uint64_lt), (_cairo_uint64_eq),
(_cairo_int64_lt), (_cairo_uint64_not), (_cairo_uint64_negate),
(_cairo_leading_zeros32), (_cairo_uint64x32_normalized_divrem),
(_cairo_int64_divrem), (_cairo_uint128_divrem),
(_cairo_uint32_to_uint128), (_cairo_int32_to_int128),
(_cairo_uint64_to_uint128), (_cairo_int64_to_int128),
(_cairo_uint128_add), (_cairo_uint128_sub), (uint64_lo),
(uint64_hi), (uint64_shift32), (_cairo_uint64x64_128_mul),
(_cairo_uint128_mul), (_cairo_uint128_lsl), (_cairo_uint128_rsl),
(_cairo_uint128_rsa), (_cairo_uint128_lt), (_cairo_int128_lt),
(_cairo_uint128_eq), (_cairo_uint128x64_normalized_divrem),
(_cairo_leading_zeros64), (_cairo_int128_negate),
(_cairo_int128_not), (_cairo_int128_divrem):
* src/cairo_wideint.h:
Add 64/128-bit wide integer arithmetic.
* src/cairoint.h:
Switch to stdint.h types (and new wide types).
2004-05-24 David Reveman <c99drn@cs.umu.se>
* src/cairo.c (cairo_restore): Moved CAIRO_CHECK_SANITY below

View file

@ -227,6 +227,22 @@ AC_SUBST(CAIRO_LIBS)
dnl ===========================================================================
dnl Checks for precise integer types
AC_CHECK_TYPES([uint64_t, uint128_t])
dnl Use lots of warning flags with GCC
WARN_CFLAGS=""
if test "x$GCC" = "xyes"; then
WARN_CFLAGS="-Wall -Wpointer-arith -Wstrict-prototypes \
-Wmissing-prototypes -Wmissing-declarations \
-Wnested-externs -fno-strict-aliasing"
fi
AC_SUBST(WARN_CFLAGS)
dnl ===========================================================================
AC_OUTPUT([
cairo.pc
Makefile

View file

@ -50,6 +50,8 @@ libcairo_la_SOURCES = \
cairo_surface.c \
cairo_traps.c \
cairo_pattern.c \
cairo_wideint.c \
cairo_wideint.h \
$(libcairo_ps_sources) \
$(libcairo_png_sources) \
$(libcairo_xlib_sources)\
@ -59,6 +61,6 @@ libcairo_la_SOURCES = \
libcairo_la_LDFLAGS = -version-info @VERSION_INFO@ -no-undefined
INCLUDES = -I$(srcdir) $(CAIRO_CFLAGS) $(FONTCONFIG_CFLAGS) $(XRENDER_CFLAGS) $(XCB_CFLAGS) $(PNG_CFLAGS) $(GL_CFLAGS)
INCLUDES = -I$(srcdir) $(WARN_CFLAGS) $(CAIRO_CFLAGS) $(FONTCONFIG_CFLAGS) $(XRENDER_CFLAGS) $(XCB_CFLAGS) $(PNG_CFLAGS) $(GL_CFLAGS)
libcairo_la_LIBADD = $(CAIRO_LIBS) $(FONTCONFIG_LIBS) $(XRENDER_LIBS) $(XCB_LIBS) $(PS_LIBS) $(PNG_LIBS) $(GL_LIBS) -lm

View file

@ -601,6 +601,7 @@ _cairo_image_data_set_radial (cairo_pattern_t *pattern,
} else {
aligned_circles = 1;
r1 = 1.0 / (r1 - r0);
r1_2 = c0_c1 = 0.0; /* shut up compiler */
}
cairo_matrix_get_affine (&pattern->matrix, &a, &b, &c, &d, &tx, &ty);
@ -738,6 +739,9 @@ _cairo_pattern_get_image (cairo_pattern_t *pattern, cairo_box_t *box)
}
break;
default:
surface = NULL;
break;
}
return (cairo_image_surface_t *) surface;

View file

@ -236,7 +236,7 @@ _cairo_surface_composite (cairo_operator_t operator,
unsigned int height)
{
cairo_int_status_t status;
cairo_image_surface_t *src_image, *mask_image, *dst_image;
cairo_image_surface_t *src_image, *mask_image = 0, *dst_image;
status = dst->backend->composite (operator,
src, mask, dst,

View file

@ -52,12 +52,6 @@ _compare_cairo_edge_by_slope (const void *av, const void *bv);
static cairo_fixed_16_16_t
_compute_x (cairo_line_t *line, cairo_fixed_t y);
static double
_compute_inverse_slope (cairo_line_t *l);
static double
_compute_x_intercept (cairo_line_t *l, double inverse_slope);
static int
_line_segs_intersect_ceil (cairo_line_t *left, cairo_line_t *right, cairo_fixed_t *y_ret);
@ -327,40 +321,108 @@ _compare_cairo_edge_by_current_x_slope (const void *av, const void *bv)
sub-computations -- just a bunch of determinants. I haven't
looked at complexity, (both are probably similar and it probably
doesn't matter much anyway).
*/
static double
_det (double a, double b, double c, double d)
static const cairo_fixed_32_32_t
_det16_32 (cairo_fixed_16_16_t a,
cairo_fixed_16_16_t b,
cairo_fixed_16_16_t c,
cairo_fixed_16_16_t d)
{
return a * d - b * c;
return _cairo_int64_sub (_cairo_int32x32_64_mul (a, d),
_cairo_int32x32_64_mul (b, c));
}
static const cairo_fixed_64_64_t
_det32_64 (cairo_fixed_32_32_t a,
cairo_fixed_32_32_t b,
cairo_fixed_32_32_t c,
cairo_fixed_32_32_t d)
{
return _cairo_int128_sub (_cairo_int64x64_128_mul (a, d),
_cairo_int64x64_128_mul (b, c));
}
static const cairo_fixed_32_32_t
_fixed_16_16_to_fixed_32_32 (cairo_fixed_16_16_t a)
{
return _cairo_int64_lsl (_cairo_int32_to_int64 (a), 16);
}
static int
_lines_intersect (cairo_line_t *l1, cairo_line_t *l2, cairo_fixed_t *y_intersection)
_line_segs_intersect_ceil (cairo_line_t *l1, cairo_line_t *l2, cairo_fixed_t *y_intersection)
{
double dx1 = cairo_fixed_to_double (l1->p1.x - l1->p2.x);
double dy1 = cairo_fixed_to_double (l1->p1.y - l1->p2.y);
cairo_fixed_16_16_t dx1, dx2, dy1, dy2;
cairo_fixed_32_32_t den_det;
cairo_fixed_32_32_t l1_det, l2_det;
cairo_fixed_64_64_t num_det;
cairo_fixed_32_32_t intersect_32_32;
cairo_fixed_48_16_t intersect_48_16;
cairo_fixed_16_16_t intersect_16_16;
cairo_quorem128_t qr;
double dx2 = cairo_fixed_to_double (l2->p1.x - l2->p2.x);
double dy2 = cairo_fixed_to_double (l2->p1.y - l2->p2.y);
double l1_det, l2_det;
double den_det = _det (dx1, dy1, dx2, dy2);
if (den_det == 0)
dx1 = l1->p1.x - l1->p2.x;
dy1 = l1->p1.y - l1->p2.y;
dx2 = l2->p1.x - l2->p2.x;
dy2 = l2->p1.y - l2->p2.y;
den_det = _det16_32 (dx1, dy1,
dx2, dy2);
if (_cairo_int64_eq (den_det, _cairo_int32_to_int64(0)))
return 0;
l1_det = _det (l1->p1.x, l1->p1.y,
l1->p2.x, l1->p2.y);
l2_det = _det (l2->p1.x, l2->p1.y,
l2->p2.x, l2->p2.y);
l1_det = _det16_32 (l1->p1.x, l1->p1.y,
l1->p2.x, l1->p2.y);
l2_det = _det16_32 (l2->p1.x, l2->p1.y,
l2->p2.x, l2->p2.y);
*y_intersection = _det (l1_det, dy1,
l2_det, dy2) / den_det;
num_det = _det32_64 (l1_det, _fixed_16_16_to_fixed_32_32 (dy1),
l2_det, _fixed_16_16_to_fixed_32_32 (dy2));
/*
* Ok, this one is a bit tricky in fixed point, the denominator
* needs to be left with 32-bits of fraction so that the
* result of the divide ends up with 32-bits of fraction (64 - 32 = 32)
*/
qr = _cairo_int128_divrem (num_det, _cairo_int64_to_int128 (den_det));
intersect_32_32 = _cairo_int128_to_int64 (qr.quo);
/*
* Find the ceiling of the quotient -- divrem returns
* the quotient truncated towards zero, so if the
* quotient should be positive (num_den and den_det have same sign)
* bump the quotient up by one.
*/
if (_cairo_int128_ne (qr.rem, _cairo_int32_to_int128 (0)) &&
(_cairo_int128_ge (num_det, _cairo_int32_to_int128 (0)) ==
_cairo_int64_ge (den_det, _cairo_int32_to_int64 (0))))
{
intersect_32_32 = _cairo_int64_add (intersect_32_32,
_cairo_int32_to_int64 (1));
}
/*
* Now convert from 32.32 to 48.16 and take the ceiling;
* this requires adding in 15 1 bits and shifting the result
*/
intersect_32_32 = _cairo_int64_add (intersect_32_32,
_cairo_int32_to_int64 ((1 << 16) - 1));
intersect_48_16 = _cairo_int64_rsa (intersect_32_32, 16);
/*
* And drop the top bits
*/
intersect_16_16 = _cairo_int64_to_int32 (intersect_48_16);
*y_intersection = intersect_16_16;
return 1;
}
*/
static cairo_fixed_16_16_t
_compute_x (cairo_line_t *line, cairo_fixed_t y)
{
@ -371,6 +433,7 @@ _compute_x (cairo_line_t *line, cairo_fixed_t y)
return line->p1.x + (ex / dy);
}
#if 0
static double
_compute_inverse_slope (cairo_line_t *l)
{
@ -460,6 +523,7 @@ _line_segs_intersect_ceil (cairo_line_t *l1, cairo_line_t *l2, cairo_fixed_t *y_
return 1;
}
#endif
/* The algorithm here is pretty simple:

986
src/cairo-wideint.c Normal file
View file

@ -0,0 +1,986 @@
/*
* $Id: cairo-wideint.c,v 1.1 2004-05-28 19:37:15 keithp Exp $
*
* Copyright © 2004 Keith Packard
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Keith Packard not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. Keith Packard makes no
* representations about the suitability of this software for any purpose. It
* is provided "as is" without express or implied warranty.
*
* KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#include "cairoint.h"
#if !HAVE_UINT64_T || !HAVE_UINT128_T
static const unsigned char top_bit[256] =
{
0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
};
#endif
#if HAVE_UINT64_T
#define _cairo_uint32s_to_uint64(h,l) ((uint64_t) (h) << 32 | (l))
const cairo_uquorem64_t
_cairo_uint64_divrem (cairo_uint64_t num, cairo_uint64_t den)
{
cairo_uquorem64_t qr;
qr.quo = num / den;
qr.rem = num % den;
return qr;
}
#else
const cairo_uint64_t
_cairo_uint32_to_uint64 (uint32_t i)
{
cairo_uint64_t q;
q.lo = i;
q.hi = 0;
return q;
}
const cairo_int64_t
_cairo_int32_to_int64 (int32_t i)
{
cairo_uint64_t q;
q.lo = i;
q.hi = i < 0 ? -1 : 0;
return q;
}
static const cairo_uint64_t
_cairo_uint32s_to_uint64 (uint32_t h, uint32_t l)
{
cairo_uint64_t q;
q.lo = l;
q.hi = h;
return q;
}
const cairo_uint64_t
_cairo_uint64_add (cairo_uint64_t a, cairo_uint64_t b)
{
cairo_uint64_t s;
s.hi = a.hi + b.hi;
s.lo = a.lo + b.lo;
if (s.lo < a.lo)
s.hi++;
return s;
}
const cairo_uint64_t
_cairo_uint64_sub (cairo_uint64_t a, cairo_uint64_t b)
{
cairo_uint64_t s;
s.hi = a.hi - b.hi;
s.lo = a.lo - b.lo;
if (s.lo > a.lo)
s.hi--;
return s;
}
#define uint32_lo(i) ((i) & 0xffff)
#define uint32_hi(i) ((i) >> 16)
#define uint32_carry16 ((1) << 16)
const cairo_uint64_t
_cairo_uint32x32_64_mul (uint32_t a, uint32_t b)
{
cairo_uint64_t s;
uint16_t ah, al, bh, bl;
uint32_t r0, r1, r2, r3;
al = uint32_lo (a);
ah = uint32_hi (a);
bl = uint32_lo (b);
bh = uint32_hi (b);
r0 = (uint32_t) al * bl;
r1 = (uint32_t) al * bh;
r2 = (uint32_t) ah * bl;
r3 = (uint32_t) ah * bh;
r1 += uint32_hi(r0); /* no carry possible */
r1 += r2; /* but this can carry */
if (r1 < r2) /* check */
r3 += uint32_carry16;
s.hi = r3 + uint32_hi(r1);
s.lo = (uint32_lo (r1) << 16) + uint32_lo (r0);
return s;
}
const cairo_uint64_t
_cairo_uint64_mul (cairo_uint64_t a, cairo_uint64_t b)
{
cairo_uint64_t s;
s = _cairo_uint32x32_64_mul (a.lo, b.lo);
s.hi += a.lo * b.hi + a.hi * b.lo;
return s;
}
const cairo_uint64_t
_cairo_uint64_lsl (cairo_uint64_t a, int shift)
{
if (shift >= 32)
{
a.hi = a.lo;
a.lo = 0;
shift -= 32;
}
if (shift)
{
a.hi = a.hi << shift | a.lo >> (32 - shift);
a.lo = a.lo << shift;
}
return a;
}
const cairo_uint64_t
_cairo_uint64_rsl (cairo_uint64_t a, int shift)
{
if (shift >= 32)
{
a.lo = a.hi;
a.hi = 0;
shift -= 32;
}
if (shift)
{
a.lo = a.lo >> shift | a.hi << (32 - shift);
a.hi = a.hi >> shift;
}
return a;
}
#define _cairo_uint32_rsa(a,n) ((uint32_t) (((int32_t) (a)) >> (n)))
const cairo_int64_t
_cairo_uint64_rsa (cairo_int64_t a, int shift)
{
if (shift >= 32)
{
a.lo = a.hi;
a.hi = _cairo_uint32_rsa (a.hi, 31);
shift -= 32;
}
if (shift)
{
a.lo = a.lo >> shift | a.hi << (32 - shift);
a.hi = _cairo_uint32_rsa (a.hi, shift);
}
return a;
}
const int
_cairo_uint64_lt (cairo_uint64_t a, cairo_uint64_t b)
{
return (a.hi < b.hi ||
(a.hi == b.hi && a.lo < b.lo));
}
const int
_cairo_uint64_eq (cairo_uint64_t a, cairo_uint64_t b)
{
return a.hi == b.hi && a.lo == b.lo;
}
const int
_cairo_int64_lt (cairo_int64_t a, cairo_int64_t b)
{
if (_cairo_int64_negative (a) && !_cairo_int64_negative (b))
return 1;
if (!_cairo_int64_negative (a) && _cairo_int64_negative (b))
return 0;
return _cairo_uint64_lt (a, b);
}
const cairo_uint64_t
_cairo_uint64_not (cairo_uint64_t a)
{
a.lo = ~a.lo;
a.hi = ~a.hi;
return a;
}
const cairo_uint64_t
_cairo_uint64_negate (cairo_uint64_t a)
{
a.lo = ~a.lo;
a.hi = ~a.hi;
if (++a.lo == 0)
++a.hi;
return a;
}
/*
* The design of this algorithm comes from GCC,
* but the actual implementation is new
*/
static const int
_cairo_leading_zeros32 (uint32_t i)
{
int top;
if (i < 0x100)
top = 0;
else if (i < 0x10000)
top = 8;
else if (i < 0x1000000)
top = 16;
else
top = 24;
top = top + top_bit [i >> top];
return 32 - top;
}
typedef struct _cairo_uquorem32_t {
uint32_t quo;
uint32_t rem;
} cairo_uquorem32_t;
/*
* den >= num.hi
*/
static const cairo_uquorem32_t
_cairo_uint64x32_normalized_divrem (cairo_uint64_t num, uint32_t den)
{
cairo_uquorem32_t qr;
uint32_t q0, q1, r0, r1;
uint16_t d0, d1;
uint32_t t;
d0 = den & 0xffff;
d1 = den >> 16;
q1 = num.hi / d1;
r1 = num.hi % d1;
t = q1 * d0;
r1 = (r1 << 16) | (num.lo >> 16);
if (r1 < t)
{
q1--;
r1 += den;
if (r1 >= den && r1 < t)
{
q1--;
r1 += den;
}
}
r1 -= t;
q0 = r1 / d1;
r0 = r1 % d1;
t = q0 * d0;
r0 = (r0 << 16) | (num.lo & 0xffff);
if (r0 < t)
{
q0--;
r0 += den;
if (r0 >= den && r0 < t)
{
q0--;
r0 += den;
}
}
r0 -= t;
qr.quo = (q1 << 16) | q0;
qr.rem = r0;
return qr;
}
const cairo_uquorem64_t
_cairo_uint64_divrem (cairo_uint64_t num, cairo_uint64_t den)
{
cairo_uquorem32_t qr32;
cairo_uquorem64_t qr;
int norm;
uint32_t q1, q0, r1, r0;
if (den.hi == 0)
{
if (den.lo > num.hi)
{
/* 0q = nn / 0d */
norm = _cairo_leading_zeros32 (den.lo);
if (norm)
{
den.lo <<= norm;
num = _cairo_uint64_lsl (num, norm);
}
q1 = 0;
}
else
{
/* qq = NN / 0d */
if (den.lo == 0)
den.lo = 1 / den.lo;
norm = _cairo_leading_zeros32 (den.lo);
if (norm)
{
cairo_uint64_t num1;
den.lo <<= norm;
num1 = _cairo_uint64_rsl (num, 32 - norm);
qr32 = _cairo_uint64x32_normalized_divrem (num1, den.lo);
q1 = qr32.quo;
num.hi = qr32.rem;
num.lo <<= norm;
}
else
{
num.hi -= den.lo;
q1 = 1;
}
}
qr32 = _cairo_uint64x32_normalized_divrem (num, den.lo);
q0 = qr32.quo;
r1 = 0;
r0 = qr32.rem >> norm;
}
else
{
if (den.hi > num.hi)
{
/* 00 = nn / DD */
q0 = q1 = 0;
r0 = num.lo;
r1 = num.hi;
}
else
{
/* 0q = NN / dd */
norm = _cairo_leading_zeros32 (den.hi);
if (norm == 0)
{
if (num.hi > den.hi || num.lo >= den.lo)
{
q0 = 1;
num = _cairo_uint64_sub (num, den);
}
else
q0 = 0;
q1 = 0;
r0 = num.lo;
r1 = num.hi;
}
else
{
cairo_uint64_t num1;
cairo_uint64_t part;
num1 = _cairo_uint64_rsl (num, 32 - norm);
den = _cairo_uint64_lsl (den, norm);
qr32 = _cairo_uint64x32_normalized_divrem (num1, den.hi);
part = _cairo_uint32x32_64_mul (qr32.quo, den.lo);
q0 = qr32.quo;
num.lo <<= norm;
num.hi = qr32.rem;
if (_cairo_uint64_gt (part, num))
{
q0--;
part = _cairo_uint64_sub (part, den);
}
q1 = 0;
num = _cairo_uint64_sub (num, part);
num = _cairo_uint64_rsl (num, norm);
r0 = num.lo;
r1 = num.hi;
}
}
}
qr.quo.lo = q0;
qr.quo.hi = q1;
qr.rem.lo = r0;
qr.rem.hi = r1;
return qr;
}
#endif /* !HAVE_UINT64_T */
const cairo_quorem64_t
_cairo_int64_divrem (cairo_int64_t num, cairo_int64_t den)
{
int num_neg = _cairo_int64_negative (num);
int den_neg = _cairo_int64_negative (den);
cairo_uquorem64_t uqr;
cairo_quorem64_t qr;
if (num_neg)
num = _cairo_int64_negate (num);
if (den_neg)
den = _cairo_int64_negate (den);
uqr = _cairo_uint64_divrem (num, den);
if (num_neg)
qr.rem = _cairo_int64_negate (uqr.rem);
else
qr.rem = uqr.rem;
if (num_neg != den_neg)
qr.quo = (cairo_int64_t) _cairo_int64_negate (uqr.quo);
else
qr.quo = (cairo_int64_t) uqr.quo;
return qr;
}
#if HAVE_UINT128_T
const cairo_uquorem128_t
_cairo_uint128_divrem (cairo_uint128_t num, cairo_uint128_t den)
{
cairo_uquorem128_t qr;
qr.quo = num / den;
qr.rem = num % den;
return qr;
}
#else
const cairo_uint128_t
_cairo_uint32_to_uint128 (uint32_t i)
{
cairo_uint128_t q;
q.lo = _cairo_uint32_to_uint64 (i);
q.hi = _cairo_uint32_to_uint64 (0);
return q;
}
const cairo_int128_t
_cairo_int32_to_int128 (int32_t i)
{
cairo_int128_t q;
q.lo = _cairo_int32_to_int64 (i);
q.hi = _cairo_int32_to_int64 (i < 0 ? -1 : 0);
return q;
}
const cairo_uint128_t
_cairo_uint64_to_uint128 (cairo_uint64_t i)
{
cairo_uint128_t q;
q.lo = i;
q.hi = _cairo_uint32_to_uint64 (0);
return q;
}
const cairo_int128_t
_cairo_int64_to_int128 (cairo_int64_t i)
{
cairo_int128_t q;
q.lo = i;
q.hi = _cairo_int32_to_int64 (_cairo_int64_negative(i) ? -1 : 0);
return q;
}
const cairo_uint128_t
_cairo_uint128_add (cairo_uint128_t a, cairo_uint128_t b)
{
cairo_uint128_t s;
s.hi = _cairo_uint64_add (a.hi, b.hi);
s.lo = _cairo_uint64_add (a.lo, b.lo);
if (_cairo_uint64_lt (s.lo, a.lo))
s.hi = _cairo_uint64_add (s.hi, _cairo_uint32_to_uint64 (1));
return s;
}
const cairo_uint128_t
_cairo_uint128_sub (cairo_uint128_t a, cairo_uint128_t b)
{
cairo_uint128_t s;
s.hi = _cairo_uint64_sub (a.hi, b.hi);
s.lo = _cairo_uint64_sub (a.lo, b.lo);
if (_cairo_uint64_gt (s.lo, a.lo))
s.hi = _cairo_uint64_sub (s.hi, _cairo_uint32_to_uint64(1));
return s;
}
#if HAVE_UINT64_T
#define uint64_lo32(i) ((i) & 0xffffffff)
#define uint64_hi32(i) ((i) >> 32)
#define uint64_lo(i) ((i) & 0xffffffff)
#define uint64_hi(i) ((i) >> 32)
#define uint64_shift32(i) ((i) << 32)
#define uint64_carry32 (((uint64_t) 1) << 32)
#else
#define uint64_lo32(i) ((i).lo)
#define uint64_hi32(i) ((i).hi)
static const cairo_uint64_t
uint64_lo (cairo_uint64_t i)
{
cairo_uint64_t s;
s.lo = i.lo;
s.hi = 0;
return s;
}
static const cairo_uint64_t
uint64_hi (cairo_uint64_t i)
{
cairo_uint64_t s;
s.lo = i.hi;
s.hi = 0;
return s;
}
static const cairo_uint64_t
uint64_shift32 (cairo_uint64_t i)
{
cairo_uint64_t s;
s.lo = 0;
s.hi = i.lo;
return s;
}
static const cairo_uint64_t uint64_carry32 = { 0, 1 };
#endif
const cairo_uint128_t
_cairo_uint64x64_128_mul (cairo_uint64_t a, cairo_uint64_t b)
{
cairo_uint128_t s;
uint32_t ah, al, bh, bl;
cairo_uint64_t r0, r1, r2, r3;
al = uint64_lo32 (a);
ah = uint64_hi32 (a);
bl = uint64_lo32 (b);
bh = uint64_hi32 (b);
r0 = _cairo_uint32x32_64_mul (al, bl);
r1 = _cairo_uint32x32_64_mul (al, bh);
r2 = _cairo_uint32x32_64_mul (ah, bl);
r3 = _cairo_uint32x32_64_mul (ah, bh);
r1 = _cairo_uint64_add (r1, uint64_hi (r0)); /* no carry possible */
r1 = _cairo_uint64_add (r1, r2); /* but this can carry */
if (_cairo_uint64_lt (r1, r2)) /* check */
r3 = _cairo_uint64_add (r3, uint64_carry32);
s.hi = _cairo_uint64_add (r3, uint64_hi(r1));
s.lo = _cairo_uint64_add (uint64_shift32 (r1),
uint64_lo (r0));
return s;
}
const cairo_uint128_t
_cairo_uint128_mul (cairo_uint128_t a, cairo_uint128_t b)
{
cairo_uint128_t s;
s = _cairo_uint64x64_128_mul (a.lo, b.lo);
s.hi = _cairo_uint64_add (s.hi,
_cairo_uint64_mul (a.lo, b.hi));
s.hi = _cairo_uint64_add (s.hi,
_cairo_uint64_mul (a.hi, b.lo));
return s;
}
const cairo_uint128_t
_cairo_uint128_lsl (cairo_uint128_t a, int shift)
{
if (shift >= 64)
{
a.hi = a.lo;
a.lo = _cairo_uint32_to_uint64 (0);
shift -= 64;
}
if (shift)
{
a.hi = _cairo_uint64_add (_cairo_uint64_lsl (a.hi, shift),
_cairo_uint64_rsl (a.lo, (64 - shift)));
a.lo = _cairo_uint64_lsl (a.lo, shift);
}
return a;
}
const cairo_uint128_t
_cairo_uint128_rsl (cairo_uint128_t a, int shift)
{
if (shift >= 64)
{
a.lo = a.hi;
a.hi = _cairo_uint32_to_uint64 (0);
shift -= 64;
}
if (shift)
{
a.lo = _cairo_uint64_add (_cairo_uint64_rsl (a.lo, shift),
_cairo_uint64_lsl (a.hi, (64 - shift)));
a.hi = _cairo_uint64_rsl (a.hi, shift);
}
return a;
}
const cairo_uint128_t
_cairo_uint128_rsa (cairo_int128_t a, int shift)
{
if (shift >= 64)
{
a.lo = a.hi;
a.hi = _cairo_uint64_rsa (a.hi, 64-1);
shift -= 64;
}
if (shift)
{
a.lo = _cairo_uint64_add (_cairo_uint64_rsl (a.lo, shift),
_cairo_uint64_lsl (a.hi, (64 - shift)));
a.hi = _cairo_uint64_rsa (a.hi, shift);
}
return a;
}
const int
_cairo_uint128_lt (cairo_uint128_t a, cairo_uint128_t b)
{
return (_cairo_uint64_lt (a.hi, b.hi) ||
(_cairo_uint64_eq (a.hi, b.hi) &&
_cairo_uint64_lt (a.lo, b.lo)));
}
const int
_cairo_int128_lt (cairo_int128_t a, cairo_int128_t b)
{
if (_cairo_int128_negative (a) && !_cairo_int128_negative (b))
return 1;
if (!_cairo_int128_negative (a) && _cairo_int128_negative (b))
return 0;
return _cairo_uint128_lt (a, b);
}
const int
_cairo_uint128_eq (cairo_uint128_t a, cairo_uint128_t b)
{
return (_cairo_uint64_eq (a.hi, b.hi) &&
_cairo_uint64_eq (a.lo, b.lo));
}
/*
* The design of this algorithm comes from GCC,
* but the actual implementation is new
*/
/*
* den >= num.hi
*/
static const cairo_uquorem64_t
_cairo_uint128x64_normalized_divrem (cairo_uint128_t num, cairo_uint64_t den)
{
cairo_uquorem64_t qr64;
cairo_uquorem64_t qr;
uint32_t q0, q1;
cairo_uint64_t r0, r1;
uint32_t d0, d1;
cairo_uint64_t t;
d0 = uint64_lo32 (den);
d1 = uint64_hi32 (den);
qr64 = _cairo_uint64_divrem (num.hi, _cairo_uint32_to_uint64 (d1));
q1 = _cairo_uint64_to_uint32 (qr64.quo);
r1 = qr64.rem;
t = _cairo_uint32x32_64_mul (q1, d0);
r1 = _cairo_uint64_add (_cairo_uint64_lsl (r1, 32),
_cairo_uint64_rsl (num.lo, 32));
if (_cairo_uint64_lt (r1, t))
{
q1--;
r1 = _cairo_uint64_add (r1, den);
if (_cairo_uint64_ge (r1, den) && _cairo_uint64_lt (r1, t))
{
q1--;
r1 = _cairo_uint64_add (r1, den);
}
}
r1 = _cairo_uint64_sub (r1, t);
qr64 = _cairo_uint64_divrem (r1, _cairo_uint32_to_uint64 (d1));
q0 = _cairo_uint64_to_uint32 (qr64.quo);
r0 = qr64.rem;
t = _cairo_uint32x32_64_mul (q0, d0);
r0 = _cairo_uint64_add (_cairo_uint64_lsl (r0, 32),
_cairo_uint32_to_uint64 (_cairo_uint64_to_uint32 (num.lo)));
if (_cairo_uint64_lt (r0, t))
{
q0--;
r0 = _cairo_uint64_add (r0, den);
if (_cairo_uint64_ge (r0, den) && _cairo_uint64_lt (r0, t))
{
q0--;
r0 = _cairo_uint64_add (r0, den);
}
}
r0 = _cairo_uint64_sub (r0, t);
qr.quo = _cairo_uint32s_to_uint64 (q1, q0);
qr.rem = r0;
return qr;
}
#if HAVE_UINT64_T
static const int
_cairo_leading_zeros64 (cairo_uint64_t q)
{
int top = 0;
if (q >= (uint64_t) 0x10000 << 16)
{
top += 32;
q >>= 32;
}
if (q >= (uint64_t) 0x10000)
{
top += 16;
q >>= 16;
}
if (q >= (uint64_t) 0x100)
{
top += 8;
q >>= 8;
}
top += top_bit [q];
return 64 - top;
}
#else
static const int
_cairo_leading_zeros64 (cairo_uint64_t d)
{
if (d.hi)
return _cairo_leading_zeros32 (d.hi);
else
return 32 + _cairo_leading_zeros32 (d.lo);
}
#endif
const cairo_uquorem128_t
_cairo_uint128_divrem (cairo_uint128_t num, cairo_uint128_t den)
{
cairo_uquorem64_t qr64;
cairo_uquorem128_t qr;
int norm;
cairo_uint64_t q1, q0, r1, r0;
if (_cairo_uint64_eq (den.hi, _cairo_uint32_to_uint64 (0)))
{
if (_cairo_uint64_gt (den.lo, num.hi))
{
/* 0q = nn / 0d */
norm = _cairo_leading_zeros64 (den.lo);
if (norm)
{
den.lo = _cairo_uint64_lsl (den.lo, norm);
num = _cairo_uint128_lsl (num, norm);
}
q1 = _cairo_uint32_to_uint64 (0);
}
else
{
/* qq = NN / 0d */
if (_cairo_uint64_eq (den.lo, _cairo_uint32_to_uint64 (0)))
den.lo = _cairo_uint64_divrem (_cairo_uint32_to_uint64 (1),
den.lo).quo;
norm = _cairo_leading_zeros64 (den.lo);
if (norm)
{
cairo_uint128_t num1;
den.lo = _cairo_uint64_lsl (den.lo, norm);
num1 = _cairo_uint128_rsl (num, 64 - norm);
qr64 = _cairo_uint128x64_normalized_divrem (num1, den.lo);
q1 = qr64.quo;
num.hi = qr64.rem;
num.lo = _cairo_uint64_lsl (num.lo, norm);
}
else
{
num.hi = _cairo_uint64_sub (num.hi, den.lo);
q1 = _cairo_uint32_to_uint64 (1);
}
}
qr64 = _cairo_uint128x64_normalized_divrem (num, den.lo);
q0 = qr64.quo;
r1 = _cairo_uint32_to_uint64 (0);
r0 = _cairo_uint64_rsl (qr64.rem, norm);
}
else
{
if (_cairo_uint64_gt (den.hi, num.hi))
{
/* 00 = nn / DD */
q0 = q1 = _cairo_uint32_to_uint64 (0);
r0 = num.lo;
r1 = num.hi;
}
else
{
/* 0q = NN / dd */
norm = _cairo_leading_zeros64 (den.hi);
if (norm == 0)
{
if (_cairo_uint64_gt (num.hi, den.hi) ||
_cairo_uint64_ge (num.lo, den.lo))
{
q0 = _cairo_uint32_to_uint64 (1);
num = _cairo_uint128_sub (num, den);
}
else
q0 = _cairo_uint32_to_uint64 (0);
q1 = _cairo_uint32_to_uint64 (0);
r0 = num.lo;
r1 = num.hi;
}
else
{
cairo_uint128_t num1;
cairo_uint128_t part;
num1 = _cairo_uint128_rsl (num, 64 - norm);
den = _cairo_uint128_lsl (den, norm);
qr64 = _cairo_uint128x64_normalized_divrem (num1, den.hi);
part = _cairo_uint64x64_128_mul (qr64.quo, den.lo);
q0 = qr64.quo;
num.lo = _cairo_uint64_lsl (num.lo, norm);
num.hi = qr64.rem;
if (_cairo_uint128_gt (part, num))
{
q0 = _cairo_uint64_sub (q0, _cairo_uint32_to_uint64 (1));
part = _cairo_uint128_sub (part, den);
}
q1 = _cairo_uint32_to_uint64 (0);
num = _cairo_uint128_sub (num, part);
num = _cairo_uint128_rsl (num, norm);
r0 = num.lo;
r1 = num.hi;
}
}
}
qr.quo.lo = q0;
qr.quo.hi = q1;
qr.rem.lo = r0;
qr.rem.hi = r1;
return qr;
}
const cairo_int128_t
_cairo_int128_negate (cairo_int128_t a)
{
a.lo = _cairo_uint64_not (a.lo);
a.hi = _cairo_uint64_not (a.hi);
return _cairo_uint128_add (a, _cairo_uint32_to_uint128 (1));
}
const cairo_int128_t
_cairo_int128_not (cairo_int128_t a)
{
a.lo = _cairo_uint64_not (a.lo);
a.hi = _cairo_uint64_not (a.hi);
return a;
}
#endif /* !HAVE_UINT128_T */
const cairo_quorem128_t
_cairo_int128_divrem (cairo_int128_t num, cairo_int128_t den)
{
int num_neg = _cairo_int128_negative (num);
int den_neg = _cairo_int128_negative (den);
cairo_uquorem128_t uqr;
cairo_quorem128_t qr;
if (num_neg)
num = _cairo_int128_negate (num);
if (den_neg)
den = _cairo_int128_negate (den);
uqr = _cairo_uint128_divrem (num, den);
if (num_neg)
qr.rem = _cairo_int128_negate (uqr.rem);
else
qr.rem = uqr.rem;
if (num_neg != den_neg)
qr.quo = _cairo_int128_negate (uqr.quo);
else
qr.quo = uqr.quo;
return qr;
}

272
src/cairo-wideint.h Normal file
View file

@ -0,0 +1,272 @@
/*
* $Id: cairo-wideint.h,v 1.1 2004-05-28 19:37:15 keithp Exp $
*
* Copyright © 2004 Keith Packard
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Keith Packard not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. Keith Packard makes no
* representations about the suitability of this software for any purpose. It
* is provided "as is" without express or implied warranty.
*
* KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef CAIRO_WIDEINT_H
#define CAIRO_WIDEINT_H
#include <stdint.h>
/*
* 64-bit datatypes. Two separate implementations, one using
* built-in 64-bit signed/unsigned types another implemented
* as a pair of 32-bit ints
*/
#define I __internal_linkage
#if !HAVE_UINT64_T
typedef struct _cairo_uint64 {
uint32_t lo, hi;
} cairo_uint64_t, cairo_int64_t;
const cairo_uint64_t I _cairo_uint32_to_uint64 (uint32_t i);
#define _cairo_uint64_to_uint32(a) ((a).lo)
const cairo_uint64_t I _cairo_uint64_add (cairo_uint64_t a, cairo_uint64_t b);
const cairo_uint64_t I _cairo_uint64_sub (cairo_uint64_t a, cairo_uint64_t b);
const cairo_uint64_t I _cairo_uint64_mul (cairo_uint64_t a, cairo_uint64_t b);
const cairo_uint64_t I _cairo_uint32x32_64_mul (uint32_t a, uint32_t b);
const cairo_uint64_t I _cairo_uint64_lsl (cairo_uint64_t a, int shift);
const cairo_uint64_t I _cairo_uint64_rsl (cairo_uint64_t a, int shift);
const cairo_uint64_t I _cairo_uint64_rsa (cairo_uint64_t a, int shift);
const int _cairo_uint64_lt (cairo_uint64_t a, cairo_uint64_t b);
const int _cairo_uint64_eq (cairo_uint64_t a, cairo_uint64_t b);
const cairo_uint64_t I _cairo_uint64_negate (cairo_uint64_t a);
#define _cairo_uint64_negative(a) (((int32_t) ((a).hi)) < 0)
const cairo_uint64_t I _cairo_uint64_not (cairo_uint64_t a);
#define _cairo_uint64_to_int64(i) (i)
#define _cairo_int64_to_uint64(i) (i)
const cairo_int64_t I _cairo_int32_to_int64(int32_t i);
#define _cairo_int64_to_int32(a) ((int32_t) _cairo_uint64_to_uint32(a))
#define _cairo_int64_add(a,b) _cairo_uint64_add (a,b)
#define _cairo_int64_sub(a,b) _cairo_uint64_sub (a,b)
#define _cairo_int64_mul(a,b) _cairo_uint64_mul (a,b)
#define _cairo_int32x32_64_mul(a,b) _cairo_uint32x32_64_mul ((uint32_t) (a), (uint32_t) (b)))
const int _cairo_int64_lt (cairo_uint64_t a, cairo_uint64_t b);
#define _cairo_int64_eq(a,b) _cairo_uint64_eq (a,b)
#define _cairo_int64_lsl(a,b) _cairo_uint64_lsl (a,b)
#define _cairo_int64_rsl(a,b) _cairo_uint64_rsl (a,b)
#define _cairo_int64_rsa(a,b) _cairo_uint64_rsa (a,b)
#define _cairo_int64_negate(a) _cairo_uint64_negate(a)
#define _cairo_int64_negative(a) (((int32_t) ((a).hi)) < 0)
#define _cairo_int64_not(a) _cairo_uint64_not(a)
#else
typedef uint64_t cairo_uint64_t;
typedef int64_t cairo_int64_t;
#define _cairo_uint32_to_uint64(i) ((uint64_t) (i))
#define _cairo_uint64_to_uint32(i) ((uint32_t) (i))
#define _cairo_uint64_add(a,b) ((a) + (b))
#define _cairo_uint64_sub(a,b) ((a) - (b))
#define _cairo_uint64_mul(a,b) ((a) * (b))
#define _cairo_uint32x32_64_mul(a,b) ((uint64_t) (a) * (b))
#define _cairo_uint64_lsl(a,b) ((a) << (b))
#define _cairo_uint64_rsl(a,b) ((uint64_t) (a) >> (b))
#define _cairo_uint64_rsa(a,b) ((uint64_t) ((int64_t) (a) >> (b)))
#define _cairo_uint64_lt(a,b) ((a) < (b))
#define _cairo_uint64_eq(a,b) ((a) == (b))
#define _cairo_uint64_negate(a) ((uint64_t) -((int64_t) (a)))
#define _cairo_uint64_negative(a) ((int64_t) (a) < 0)
#define _cairo_uint64_not(a) (~(a))
#define _cairo_uint64_to_int64(i) ((int64_t) (i))
#define _cairo_int64_to_uint64(i) ((uint64_t) (i))
#define _cairo_int32_to_int64(i) ((int64_t) (i))
#define _cairo_int64_to_int32(i) ((int32_t) (i))
#define _cairo_int64_add(a,b) ((a) + (b))
#define _cairo_int64_sub(a,b) ((a) - (b))
#define _cairo_int64_mul(a,b) ((a) * (b))
#define _cairo_int32x32_64_mul(a,b) ((int64_t) (a) * (b))
#define _cairo_int64_lt(a,b) ((a) < (b))
#define _cairo_int64_eq(a,b) ((a) == (b))
#define _cairo_int64_lsl(a,b) ((a) << (b))
#define _cairo_int64_rsl(a,b) ((int64_t) ((uint64_t) (a) >> (b)))
#define _cairo_int64_rsa(a,b) ((int64_t) (a) >> (b))
#define _cairo_int64_negate(a) (-(a))
#define _cairo_int64_negative(a) ((a) < 0)
#define _cairo_int64_not(a) (~(a))
#endif
/*
* 64-bit comparisions derived from lt or eq
*/
#define _cairo_uint64_le(a,b) (!_cairo_uint64_gt(a,b))
#define _cairo_uint64_ne(a,b) (!_cairo_uint64_eq(a,b))
#define _cairo_uint64_ge(a,b) (!_cairo_uint64_lt(a,b))
#define _cairo_uint64_gt(a,b) _cairo_uint64_lt(b,a)
#define _cairo_int64_le(a,b) (!_cairo_int64_gt(a,b))
#define _cairo_int64_ne(a,b) (!_cairo_int64_eq(a,b))
#define _cairo_int64_ge(a,b) (!_cairo_int64_lt(a,b))
#define _cairo_int64_gt(a,b) _cairo_int64_lt(b,a)
/*
* As the C implementation always computes both, create
* a function which returns both for the 'native' type as well
*/
typedef struct _cairo_uquorem64 {
cairo_uint64_t quo;
cairo_uint64_t rem;
} cairo_uquorem64_t;
typedef struct _cairo_quorem64 {
cairo_int64_t quo;
cairo_int64_t rem;
} cairo_quorem64_t;
const cairo_uquorem64_t I
_cairo_uint64_divrem (cairo_uint64_t num, cairo_uint64_t den);
const cairo_quorem64_t I
_cairo_int64_divrem (cairo_int64_t num, cairo_int64_t den);
/*
* 128-bit datatypes. Again, provide two implementations in
* case the machine has a native 128-bit datatype. GCC supports int128_t
* on ia64
*/
#if !HAVE_UINT128_T
typedef struct cairo_uint128 {
cairo_uint64_t lo, hi;
} cairo_uint128_t, cairo_int128_t;
const cairo_uint128_t I _cairo_uint32_to_uint128 (uint32_t i);
const cairo_uint128_t I _cairo_uint64_to_uint128 (cairo_uint64_t i);
#define _cairo_uint128_to_uint64(a) ((a).lo)
#define _cairo_uint128_to_uint32(a) _cairo_uint64_to_uint32(_cairo_uint128_to_uint64(a))
const cairo_uint128_t I _cairo_uint128_add (cairo_uint128_t a, cairo_uint128_t b);
const cairo_uint128_t I _cairo_uint128_sub (cairo_uint128_t a, cairo_uint128_t b);
const cairo_uint128_t I _cairo_uint128_mul (cairo_uint128_t a, cairo_uint128_t b);
const cairo_uint128_t I _cairo_uint64x64_128_mul (cairo_uint64_t a, cairo_uint64_t b);
const cairo_uint128_t I _cairo_uint128_lsl (cairo_uint128_t a, int shift);
const cairo_uint128_t I _cairo_uint128_rsl (cairo_uint128_t a, int shift);
const cairo_uint128_t I _cairo_uint128_rsa (cairo_uint128_t a, int shift);
const int _cairo_uint128_lt (cairo_uint128_t a, cairo_uint128_t b);
const int _cairo_uint128_eq (cairo_uint128_t a, cairo_uint128_t b);
const cairo_uint128_t I _cairo_uint128_negate (cairo_uint128_t a);
#define _cairo_uint128_negative(a) (_cairo_uint64_negative(a.hi))
const cairo_uint128_t I _cairo_uint128_not (cairo_uint128_t a);
#define _cairo_uint128_to_int128_(i) (i)
#define _cairo_int128_to_uint128(i) (i)
const cairo_int128_t I _cairo_int32_to_int128 (int32_t i);
const cairo_int128_t I _cairo_int64_to_int128 (cairo_int64_t i);
#define _cairo_int128_to_int64(a) ((cairo_int64_t) (a).lo);
#define _cairo_int128_to_int32(a) _cairo_int64_to_int32(_cairo_int128_to_int64(a))
#define _cairo_int128_add(a,b) _cairo_uint128_add(a,b)
#define _cairo_int128_sub(a,b) _cairo_uint128_sub(a,b)
#define _cairo_int128_mul(a,b) _cairo_uint128_mul(a,b)
#define _cairo_int64x64_128_mul(a,b) _cairo_uint64x64_128_mul ((cairo_uint64_t) (a), (cairo_uint64_t) (b))
#define _cairo_int128_lsl(a,b) _cairo_uint128_lsl(a,b)
#define _cairo_int128_rsl(a,b) _cairo_uint128_rsl(a,b)
#define _cairo_int128_rsa(a,b) _cairo_uint128_rsa(a,b)
const int _cairo_int128_lt (cairo_int128_t a, cairo_int128_t b);
#define _cairo_int128_eq(a,b) _cairo_uint128_eq (a,b)
#define _cairo_int128_negate(a) _cairo_uint128_negate(a)
#define _cairo_int128_negative(a) (_cairo_uint128_negative(a))
#define _cairo_int128_not(a) _cairo_uint128_not(a)
#else /* !HAVE_UINT128_T */
typedef uint128_t cairo_uint128_t;
typedef int128_t cairo_int128_t;
#define _cairo_uint32_to_uint128(i) ((uint128_t) (i))
#define _cairo_uint64_to_uint128(i) ((uint128_t) (i))
#define _cairo_uint128_to_uint64(i) ((uint64_t) (i))
#define _cairo_uint128_to_uint32(i) ((uint32_t) (i))
#define _cairo_uint128_add(a,b) ((a) + (b))
#define _cairo_uint128_sub(a,b) ((a) - (b))
#define _cairo_uint128_mul(a,b) ((a) * (b))
#define _cairo_uint64x64_128_mul(a,b) ((uint128_t) (a) * (b))
#define _cairo_uint128_lsl(a,b) ((a) << (b))
#define _cairo_uint128_rsl(a,b) ((uint128_t) (a) >> (b))
#define _cairo_uint128_rsa(a,b) ((uint128_t) ((int128_t) (a) >> (b)))
#define _cairo_uint128_lt(a,b) ((a) < (b))
#define _cairo_uint128_eq(a,b) ((a) == (b))
#define _cairo_uint128_negate(a) ((uint128_t) -((int128_t) (a)))
#define _cairo_uint128_negative(a) ((int128_t) (a) < 0)
#define _cairo_uint128_not(a) (~(a))
#define _cairo_uint128_to_int128(i) ((int128_t) (i))
#define _cairo_int128_to_uint128(i) ((uint128_t) (i))
#define _cairo_int32_to_int128(i) ((int128_t) (i))
#define _cairo_int64_to_int128(i) ((int128_t) (i))
#define _cairo_int128_to_int64(i) ((int64_t) (i))
#define _cairo_int128_to_int32(i) ((int32_t) (i))
#define _cairo_int128_add(a,b) ((a) + (b))
#define _cairo_int128_sub(a,b) ((a) - (b))
#define _cairo_int128_mul(a,b) ((a) * (b))
#define _cairo_int64x64_128_mul(a,b) ((int128_t) (a) * (b))
#define _cairo_int128_lt(a,b) ((a) < (b))
#define _cairo_int128_eq(a,b) ((a) == (b))
#define _cairo_int128_lsl(a,b) ((a) << (b))
#define _cairo_int128_rsl(a,b) ((int128_t) ((uint128_t) (a) >> (b)))
#define _cairo_int128_rsa(a,b) ((int128_t) (a) >> (b))
#define _cairo_int128_negate(a) (-(a))
#define _cairo_int128_negative(a) ((a) < 0)
#define _cairo_int128_not(a) (~(a))
#endif /* HAVE_UINT128_T */
typedef struct _cairo_uquorem128 {
cairo_uint128_t quo;
cairo_uint128_t rem;
} cairo_uquorem128_t;
typedef struct _cairo_quorem128 {
cairo_int128_t quo;
cairo_int128_t rem;
} cairo_quorem128_t;
const cairo_uquorem128_t I
_cairo_uint128_divrem (cairo_uint128_t num, cairo_uint128_t den);
const cairo_quorem128_t I
_cairo_int128_divrem (cairo_int128_t num, cairo_int128_t den);
#define _cairo_uint128_le(a,b) (!_cairo_uint128_gt(a,b))
#define _cairo_uint128_ne(a,b) (!_cairo_uint128_eq(a,b))
#define _cairo_uint128_ge(a,b) (!_cairo_uint128_lt(a,b))
#define _cairo_uint128_gt(a,b) _cairo_uint128_lt(b,a)
#define _cairo_int128_le(a,b) (!_cairo_int128_gt(a,b))
#define _cairo_int128_ne(a,b) (!_cairo_int128_eq(a,b))
#define _cairo_int128_ge(a,b) (!_cairo_int128_lt(a,b))
#define _cairo_int128_gt(a,b) _cairo_int128_lt(b,a)
#undef I
#endif /* CAIRO_WIDEINT_H */

View file

@ -601,6 +601,7 @@ _cairo_image_data_set_radial (cairo_pattern_t *pattern,
} else {
aligned_circles = 1;
r1 = 1.0 / (r1 - r0);
r1_2 = c0_c1 = 0.0; /* shut up compiler */
}
cairo_matrix_get_affine (&pattern->matrix, &a, &b, &c, &d, &tx, &ty);
@ -738,6 +739,9 @@ _cairo_pattern_get_image (cairo_pattern_t *pattern, cairo_box_t *box)
}
break;
default:
surface = NULL;
break;
}
return (cairo_image_surface_t *) surface;

View file

@ -292,6 +292,9 @@ _cairo_png_surface_copy_page (void *abstract_surface)
depth = 1;
png_color_type = PNG_COLOR_TYPE_GRAY;
break;
default:
status = CAIRO_STATUS_NULL_POINTER;
goto BAIL;
}
png_set_IHDR (png, info,

View file

@ -236,7 +236,7 @@ _cairo_surface_composite (cairo_operator_t operator,
unsigned int height)
{
cairo_int_status_t status;
cairo_image_surface_t *src_image, *mask_image, *dst_image;
cairo_image_surface_t *src_image, *mask_image = 0, *dst_image;
status = dst->backend->composite (operator,
src, mask, dst,

View file

@ -52,12 +52,6 @@ _compare_cairo_edge_by_slope (const void *av, const void *bv);
static cairo_fixed_16_16_t
_compute_x (cairo_line_t *line, cairo_fixed_t y);
static double
_compute_inverse_slope (cairo_line_t *l);
static double
_compute_x_intercept (cairo_line_t *l, double inverse_slope);
static int
_line_segs_intersect_ceil (cairo_line_t *left, cairo_line_t *right, cairo_fixed_t *y_ret);
@ -327,40 +321,108 @@ _compare_cairo_edge_by_current_x_slope (const void *av, const void *bv)
sub-computations -- just a bunch of determinants. I haven't
looked at complexity, (both are probably similar and it probably
doesn't matter much anyway).
*/
static double
_det (double a, double b, double c, double d)
static const cairo_fixed_32_32_t
_det16_32 (cairo_fixed_16_16_t a,
cairo_fixed_16_16_t b,
cairo_fixed_16_16_t c,
cairo_fixed_16_16_t d)
{
return a * d - b * c;
return _cairo_int64_sub (_cairo_int32x32_64_mul (a, d),
_cairo_int32x32_64_mul (b, c));
}
static const cairo_fixed_64_64_t
_det32_64 (cairo_fixed_32_32_t a,
cairo_fixed_32_32_t b,
cairo_fixed_32_32_t c,
cairo_fixed_32_32_t d)
{
return _cairo_int128_sub (_cairo_int64x64_128_mul (a, d),
_cairo_int64x64_128_mul (b, c));
}
static const cairo_fixed_32_32_t
_fixed_16_16_to_fixed_32_32 (cairo_fixed_16_16_t a)
{
return _cairo_int64_lsl (_cairo_int32_to_int64 (a), 16);
}
static int
_lines_intersect (cairo_line_t *l1, cairo_line_t *l2, cairo_fixed_t *y_intersection)
_line_segs_intersect_ceil (cairo_line_t *l1, cairo_line_t *l2, cairo_fixed_t *y_intersection)
{
double dx1 = cairo_fixed_to_double (l1->p1.x - l1->p2.x);
double dy1 = cairo_fixed_to_double (l1->p1.y - l1->p2.y);
cairo_fixed_16_16_t dx1, dx2, dy1, dy2;
cairo_fixed_32_32_t den_det;
cairo_fixed_32_32_t l1_det, l2_det;
cairo_fixed_64_64_t num_det;
cairo_fixed_32_32_t intersect_32_32;
cairo_fixed_48_16_t intersect_48_16;
cairo_fixed_16_16_t intersect_16_16;
cairo_quorem128_t qr;
double dx2 = cairo_fixed_to_double (l2->p1.x - l2->p2.x);
double dy2 = cairo_fixed_to_double (l2->p1.y - l2->p2.y);
double l1_det, l2_det;
double den_det = _det (dx1, dy1, dx2, dy2);
if (den_det == 0)
dx1 = l1->p1.x - l1->p2.x;
dy1 = l1->p1.y - l1->p2.y;
dx2 = l2->p1.x - l2->p2.x;
dy2 = l2->p1.y - l2->p2.y;
den_det = _det16_32 (dx1, dy1,
dx2, dy2);
if (_cairo_int64_eq (den_det, _cairo_int32_to_int64(0)))
return 0;
l1_det = _det (l1->p1.x, l1->p1.y,
l1->p2.x, l1->p2.y);
l2_det = _det (l2->p1.x, l2->p1.y,
l2->p2.x, l2->p2.y);
l1_det = _det16_32 (l1->p1.x, l1->p1.y,
l1->p2.x, l1->p2.y);
l2_det = _det16_32 (l2->p1.x, l2->p1.y,
l2->p2.x, l2->p2.y);
*y_intersection = _det (l1_det, dy1,
l2_det, dy2) / den_det;
num_det = _det32_64 (l1_det, _fixed_16_16_to_fixed_32_32 (dy1),
l2_det, _fixed_16_16_to_fixed_32_32 (dy2));
/*
* Ok, this one is a bit tricky in fixed point, the denominator
* needs to be left with 32-bits of fraction so that the
* result of the divide ends up with 32-bits of fraction (64 - 32 = 32)
*/
qr = _cairo_int128_divrem (num_det, _cairo_int64_to_int128 (den_det));
intersect_32_32 = _cairo_int128_to_int64 (qr.quo);
/*
* Find the ceiling of the quotient -- divrem returns
* the quotient truncated towards zero, so if the
* quotient should be positive (num_den and den_det have same sign)
* bump the quotient up by one.
*/
if (_cairo_int128_ne (qr.rem, _cairo_int32_to_int128 (0)) &&
(_cairo_int128_ge (num_det, _cairo_int32_to_int128 (0)) ==
_cairo_int64_ge (den_det, _cairo_int32_to_int64 (0))))
{
intersect_32_32 = _cairo_int64_add (intersect_32_32,
_cairo_int32_to_int64 (1));
}
/*
* Now convert from 32.32 to 48.16 and take the ceiling;
* this requires adding in 15 1 bits and shifting the result
*/
intersect_32_32 = _cairo_int64_add (intersect_32_32,
_cairo_int32_to_int64 ((1 << 16) - 1));
intersect_48_16 = _cairo_int64_rsa (intersect_32_32, 16);
/*
* And drop the top bits
*/
intersect_16_16 = _cairo_int64_to_int32 (intersect_48_16);
*y_intersection = intersect_16_16;
return 1;
}
*/
static cairo_fixed_16_16_t
_compute_x (cairo_line_t *line, cairo_fixed_t y)
{
@ -371,6 +433,7 @@ _compute_x (cairo_line_t *line, cairo_fixed_t y)
return line->p1.x + (ex / dy);
}
#if 0
static double
_compute_inverse_slope (cairo_line_t *l)
{
@ -460,6 +523,7 @@ _line_segs_intersect_ceil (cairo_line_t *l1, cairo_line_t *l2, cairo_fixed_t *y_
return 1;
}
#endif
/* The algorithm here is pretty simple:

986
src/cairo_wideint.c Normal file
View file

@ -0,0 +1,986 @@
/*
* $Id: cairo_wideint.c,v 1.1 2004-05-28 19:37:15 keithp Exp $
*
* Copyright © 2004 Keith Packard
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Keith Packard not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. Keith Packard makes no
* representations about the suitability of this software for any purpose. It
* is provided "as is" without express or implied warranty.
*
* KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#include "cairoint.h"
#if !HAVE_UINT64_T || !HAVE_UINT128_T
static const unsigned char top_bit[256] =
{
0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
};
#endif
#if HAVE_UINT64_T
#define _cairo_uint32s_to_uint64(h,l) ((uint64_t) (h) << 32 | (l))
const cairo_uquorem64_t
_cairo_uint64_divrem (cairo_uint64_t num, cairo_uint64_t den)
{
cairo_uquorem64_t qr;
qr.quo = num / den;
qr.rem = num % den;
return qr;
}
#else
const cairo_uint64_t
_cairo_uint32_to_uint64 (uint32_t i)
{
cairo_uint64_t q;
q.lo = i;
q.hi = 0;
return q;
}
const cairo_int64_t
_cairo_int32_to_int64 (int32_t i)
{
cairo_uint64_t q;
q.lo = i;
q.hi = i < 0 ? -1 : 0;
return q;
}
static const cairo_uint64_t
_cairo_uint32s_to_uint64 (uint32_t h, uint32_t l)
{
cairo_uint64_t q;
q.lo = l;
q.hi = h;
return q;
}
const cairo_uint64_t
_cairo_uint64_add (cairo_uint64_t a, cairo_uint64_t b)
{
cairo_uint64_t s;
s.hi = a.hi + b.hi;
s.lo = a.lo + b.lo;
if (s.lo < a.lo)
s.hi++;
return s;
}
const cairo_uint64_t
_cairo_uint64_sub (cairo_uint64_t a, cairo_uint64_t b)
{
cairo_uint64_t s;
s.hi = a.hi - b.hi;
s.lo = a.lo - b.lo;
if (s.lo > a.lo)
s.hi--;
return s;
}
#define uint32_lo(i) ((i) & 0xffff)
#define uint32_hi(i) ((i) >> 16)
#define uint32_carry16 ((1) << 16)
const cairo_uint64_t
_cairo_uint32x32_64_mul (uint32_t a, uint32_t b)
{
cairo_uint64_t s;
uint16_t ah, al, bh, bl;
uint32_t r0, r1, r2, r3;
al = uint32_lo (a);
ah = uint32_hi (a);
bl = uint32_lo (b);
bh = uint32_hi (b);
r0 = (uint32_t) al * bl;
r1 = (uint32_t) al * bh;
r2 = (uint32_t) ah * bl;
r3 = (uint32_t) ah * bh;
r1 += uint32_hi(r0); /* no carry possible */
r1 += r2; /* but this can carry */
if (r1 < r2) /* check */
r3 += uint32_carry16;
s.hi = r3 + uint32_hi(r1);
s.lo = (uint32_lo (r1) << 16) + uint32_lo (r0);
return s;
}
const cairo_uint64_t
_cairo_uint64_mul (cairo_uint64_t a, cairo_uint64_t b)
{
cairo_uint64_t s;
s = _cairo_uint32x32_64_mul (a.lo, b.lo);
s.hi += a.lo * b.hi + a.hi * b.lo;
return s;
}
const cairo_uint64_t
_cairo_uint64_lsl (cairo_uint64_t a, int shift)
{
if (shift >= 32)
{
a.hi = a.lo;
a.lo = 0;
shift -= 32;
}
if (shift)
{
a.hi = a.hi << shift | a.lo >> (32 - shift);
a.lo = a.lo << shift;
}
return a;
}
const cairo_uint64_t
_cairo_uint64_rsl (cairo_uint64_t a, int shift)
{
if (shift >= 32)
{
a.lo = a.hi;
a.hi = 0;
shift -= 32;
}
if (shift)
{
a.lo = a.lo >> shift | a.hi << (32 - shift);
a.hi = a.hi >> shift;
}
return a;
}
#define _cairo_uint32_rsa(a,n) ((uint32_t) (((int32_t) (a)) >> (n)))
const cairo_int64_t
_cairo_uint64_rsa (cairo_int64_t a, int shift)
{
if (shift >= 32)
{
a.lo = a.hi;
a.hi = _cairo_uint32_rsa (a.hi, 31);
shift -= 32;
}
if (shift)
{
a.lo = a.lo >> shift | a.hi << (32 - shift);
a.hi = _cairo_uint32_rsa (a.hi, shift);
}
return a;
}
const int
_cairo_uint64_lt (cairo_uint64_t a, cairo_uint64_t b)
{
return (a.hi < b.hi ||
(a.hi == b.hi && a.lo < b.lo));
}
const int
_cairo_uint64_eq (cairo_uint64_t a, cairo_uint64_t b)
{
return a.hi == b.hi && a.lo == b.lo;
}
const int
_cairo_int64_lt (cairo_int64_t a, cairo_int64_t b)
{
if (_cairo_int64_negative (a) && !_cairo_int64_negative (b))
return 1;
if (!_cairo_int64_negative (a) && _cairo_int64_negative (b))
return 0;
return _cairo_uint64_lt (a, b);
}
const cairo_uint64_t
_cairo_uint64_not (cairo_uint64_t a)
{
a.lo = ~a.lo;
a.hi = ~a.hi;
return a;
}
const cairo_uint64_t
_cairo_uint64_negate (cairo_uint64_t a)
{
a.lo = ~a.lo;
a.hi = ~a.hi;
if (++a.lo == 0)
++a.hi;
return a;
}
/*
* The design of this algorithm comes from GCC,
* but the actual implementation is new
*/
static const int
_cairo_leading_zeros32 (uint32_t i)
{
int top;
if (i < 0x100)
top = 0;
else if (i < 0x10000)
top = 8;
else if (i < 0x1000000)
top = 16;
else
top = 24;
top = top + top_bit [i >> top];
return 32 - top;
}
typedef struct _cairo_uquorem32_t {
uint32_t quo;
uint32_t rem;
} cairo_uquorem32_t;
/*
* den >= num.hi
*/
static const cairo_uquorem32_t
_cairo_uint64x32_normalized_divrem (cairo_uint64_t num, uint32_t den)
{
cairo_uquorem32_t qr;
uint32_t q0, q1, r0, r1;
uint16_t d0, d1;
uint32_t t;
d0 = den & 0xffff;
d1 = den >> 16;
q1 = num.hi / d1;
r1 = num.hi % d1;
t = q1 * d0;
r1 = (r1 << 16) | (num.lo >> 16);
if (r1 < t)
{
q1--;
r1 += den;
if (r1 >= den && r1 < t)
{
q1--;
r1 += den;
}
}
r1 -= t;
q0 = r1 / d1;
r0 = r1 % d1;
t = q0 * d0;
r0 = (r0 << 16) | (num.lo & 0xffff);
if (r0 < t)
{
q0--;
r0 += den;
if (r0 >= den && r0 < t)
{
q0--;
r0 += den;
}
}
r0 -= t;
qr.quo = (q1 << 16) | q0;
qr.rem = r0;
return qr;
}
const cairo_uquorem64_t
_cairo_uint64_divrem (cairo_uint64_t num, cairo_uint64_t den)
{
cairo_uquorem32_t qr32;
cairo_uquorem64_t qr;
int norm;
uint32_t q1, q0, r1, r0;
if (den.hi == 0)
{
if (den.lo > num.hi)
{
/* 0q = nn / 0d */
norm = _cairo_leading_zeros32 (den.lo);
if (norm)
{
den.lo <<= norm;
num = _cairo_uint64_lsl (num, norm);
}
q1 = 0;
}
else
{
/* qq = NN / 0d */
if (den.lo == 0)
den.lo = 1 / den.lo;
norm = _cairo_leading_zeros32 (den.lo);
if (norm)
{
cairo_uint64_t num1;
den.lo <<= norm;
num1 = _cairo_uint64_rsl (num, 32 - norm);
qr32 = _cairo_uint64x32_normalized_divrem (num1, den.lo);
q1 = qr32.quo;
num.hi = qr32.rem;
num.lo <<= norm;
}
else
{
num.hi -= den.lo;
q1 = 1;
}
}
qr32 = _cairo_uint64x32_normalized_divrem (num, den.lo);
q0 = qr32.quo;
r1 = 0;
r0 = qr32.rem >> norm;
}
else
{
if (den.hi > num.hi)
{
/* 00 = nn / DD */
q0 = q1 = 0;
r0 = num.lo;
r1 = num.hi;
}
else
{
/* 0q = NN / dd */
norm = _cairo_leading_zeros32 (den.hi);
if (norm == 0)
{
if (num.hi > den.hi || num.lo >= den.lo)
{
q0 = 1;
num = _cairo_uint64_sub (num, den);
}
else
q0 = 0;
q1 = 0;
r0 = num.lo;
r1 = num.hi;
}
else
{
cairo_uint64_t num1;
cairo_uint64_t part;
num1 = _cairo_uint64_rsl (num, 32 - norm);
den = _cairo_uint64_lsl (den, norm);
qr32 = _cairo_uint64x32_normalized_divrem (num1, den.hi);
part = _cairo_uint32x32_64_mul (qr32.quo, den.lo);
q0 = qr32.quo;
num.lo <<= norm;
num.hi = qr32.rem;
if (_cairo_uint64_gt (part, num))
{
q0--;
part = _cairo_uint64_sub (part, den);
}
q1 = 0;
num = _cairo_uint64_sub (num, part);
num = _cairo_uint64_rsl (num, norm);
r0 = num.lo;
r1 = num.hi;
}
}
}
qr.quo.lo = q0;
qr.quo.hi = q1;
qr.rem.lo = r0;
qr.rem.hi = r1;
return qr;
}
#endif /* !HAVE_UINT64_T */
const cairo_quorem64_t
_cairo_int64_divrem (cairo_int64_t num, cairo_int64_t den)
{
int num_neg = _cairo_int64_negative (num);
int den_neg = _cairo_int64_negative (den);
cairo_uquorem64_t uqr;
cairo_quorem64_t qr;
if (num_neg)
num = _cairo_int64_negate (num);
if (den_neg)
den = _cairo_int64_negate (den);
uqr = _cairo_uint64_divrem (num, den);
if (num_neg)
qr.rem = _cairo_int64_negate (uqr.rem);
else
qr.rem = uqr.rem;
if (num_neg != den_neg)
qr.quo = (cairo_int64_t) _cairo_int64_negate (uqr.quo);
else
qr.quo = (cairo_int64_t) uqr.quo;
return qr;
}
#if HAVE_UINT128_T
const cairo_uquorem128_t
_cairo_uint128_divrem (cairo_uint128_t num, cairo_uint128_t den)
{
cairo_uquorem128_t qr;
qr.quo = num / den;
qr.rem = num % den;
return qr;
}
#else
const cairo_uint128_t
_cairo_uint32_to_uint128 (uint32_t i)
{
cairo_uint128_t q;
q.lo = _cairo_uint32_to_uint64 (i);
q.hi = _cairo_uint32_to_uint64 (0);
return q;
}
const cairo_int128_t
_cairo_int32_to_int128 (int32_t i)
{
cairo_int128_t q;
q.lo = _cairo_int32_to_int64 (i);
q.hi = _cairo_int32_to_int64 (i < 0 ? -1 : 0);
return q;
}
const cairo_uint128_t
_cairo_uint64_to_uint128 (cairo_uint64_t i)
{
cairo_uint128_t q;
q.lo = i;
q.hi = _cairo_uint32_to_uint64 (0);
return q;
}
const cairo_int128_t
_cairo_int64_to_int128 (cairo_int64_t i)
{
cairo_int128_t q;
q.lo = i;
q.hi = _cairo_int32_to_int64 (_cairo_int64_negative(i) ? -1 : 0);
return q;
}
const cairo_uint128_t
_cairo_uint128_add (cairo_uint128_t a, cairo_uint128_t b)
{
cairo_uint128_t s;
s.hi = _cairo_uint64_add (a.hi, b.hi);
s.lo = _cairo_uint64_add (a.lo, b.lo);
if (_cairo_uint64_lt (s.lo, a.lo))
s.hi = _cairo_uint64_add (s.hi, _cairo_uint32_to_uint64 (1));
return s;
}
const cairo_uint128_t
_cairo_uint128_sub (cairo_uint128_t a, cairo_uint128_t b)
{
cairo_uint128_t s;
s.hi = _cairo_uint64_sub (a.hi, b.hi);
s.lo = _cairo_uint64_sub (a.lo, b.lo);
if (_cairo_uint64_gt (s.lo, a.lo))
s.hi = _cairo_uint64_sub (s.hi, _cairo_uint32_to_uint64(1));
return s;
}
#if HAVE_UINT64_T
#define uint64_lo32(i) ((i) & 0xffffffff)
#define uint64_hi32(i) ((i) >> 32)
#define uint64_lo(i) ((i) & 0xffffffff)
#define uint64_hi(i) ((i) >> 32)
#define uint64_shift32(i) ((i) << 32)
#define uint64_carry32 (((uint64_t) 1) << 32)
#else
#define uint64_lo32(i) ((i).lo)
#define uint64_hi32(i) ((i).hi)
static const cairo_uint64_t
uint64_lo (cairo_uint64_t i)
{
cairo_uint64_t s;
s.lo = i.lo;
s.hi = 0;
return s;
}
static const cairo_uint64_t
uint64_hi (cairo_uint64_t i)
{
cairo_uint64_t s;
s.lo = i.hi;
s.hi = 0;
return s;
}
static const cairo_uint64_t
uint64_shift32 (cairo_uint64_t i)
{
cairo_uint64_t s;
s.lo = 0;
s.hi = i.lo;
return s;
}
static const cairo_uint64_t uint64_carry32 = { 0, 1 };
#endif
const cairo_uint128_t
_cairo_uint64x64_128_mul (cairo_uint64_t a, cairo_uint64_t b)
{
cairo_uint128_t s;
uint32_t ah, al, bh, bl;
cairo_uint64_t r0, r1, r2, r3;
al = uint64_lo32 (a);
ah = uint64_hi32 (a);
bl = uint64_lo32 (b);
bh = uint64_hi32 (b);
r0 = _cairo_uint32x32_64_mul (al, bl);
r1 = _cairo_uint32x32_64_mul (al, bh);
r2 = _cairo_uint32x32_64_mul (ah, bl);
r3 = _cairo_uint32x32_64_mul (ah, bh);
r1 = _cairo_uint64_add (r1, uint64_hi (r0)); /* no carry possible */
r1 = _cairo_uint64_add (r1, r2); /* but this can carry */
if (_cairo_uint64_lt (r1, r2)) /* check */
r3 = _cairo_uint64_add (r3, uint64_carry32);
s.hi = _cairo_uint64_add (r3, uint64_hi(r1));
s.lo = _cairo_uint64_add (uint64_shift32 (r1),
uint64_lo (r0));
return s;
}
const cairo_uint128_t
_cairo_uint128_mul (cairo_uint128_t a, cairo_uint128_t b)
{
cairo_uint128_t s;
s = _cairo_uint64x64_128_mul (a.lo, b.lo);
s.hi = _cairo_uint64_add (s.hi,
_cairo_uint64_mul (a.lo, b.hi));
s.hi = _cairo_uint64_add (s.hi,
_cairo_uint64_mul (a.hi, b.lo));
return s;
}
const cairo_uint128_t
_cairo_uint128_lsl (cairo_uint128_t a, int shift)
{
if (shift >= 64)
{
a.hi = a.lo;
a.lo = _cairo_uint32_to_uint64 (0);
shift -= 64;
}
if (shift)
{
a.hi = _cairo_uint64_add (_cairo_uint64_lsl (a.hi, shift),
_cairo_uint64_rsl (a.lo, (64 - shift)));
a.lo = _cairo_uint64_lsl (a.lo, shift);
}
return a;
}
const cairo_uint128_t
_cairo_uint128_rsl (cairo_uint128_t a, int shift)
{
if (shift >= 64)
{
a.lo = a.hi;
a.hi = _cairo_uint32_to_uint64 (0);
shift -= 64;
}
if (shift)
{
a.lo = _cairo_uint64_add (_cairo_uint64_rsl (a.lo, shift),
_cairo_uint64_lsl (a.hi, (64 - shift)));
a.hi = _cairo_uint64_rsl (a.hi, shift);
}
return a;
}
const cairo_uint128_t
_cairo_uint128_rsa (cairo_int128_t a, int shift)
{
if (shift >= 64)
{
a.lo = a.hi;
a.hi = _cairo_uint64_rsa (a.hi, 64-1);
shift -= 64;
}
if (shift)
{
a.lo = _cairo_uint64_add (_cairo_uint64_rsl (a.lo, shift),
_cairo_uint64_lsl (a.hi, (64 - shift)));
a.hi = _cairo_uint64_rsa (a.hi, shift);
}
return a;
}
const int
_cairo_uint128_lt (cairo_uint128_t a, cairo_uint128_t b)
{
return (_cairo_uint64_lt (a.hi, b.hi) ||
(_cairo_uint64_eq (a.hi, b.hi) &&
_cairo_uint64_lt (a.lo, b.lo)));
}
const int
_cairo_int128_lt (cairo_int128_t a, cairo_int128_t b)
{
if (_cairo_int128_negative (a) && !_cairo_int128_negative (b))
return 1;
if (!_cairo_int128_negative (a) && _cairo_int128_negative (b))
return 0;
return _cairo_uint128_lt (a, b);
}
const int
_cairo_uint128_eq (cairo_uint128_t a, cairo_uint128_t b)
{
return (_cairo_uint64_eq (a.hi, b.hi) &&
_cairo_uint64_eq (a.lo, b.lo));
}
/*
* The design of this algorithm comes from GCC,
* but the actual implementation is new
*/
/*
* den >= num.hi
*/
static const cairo_uquorem64_t
_cairo_uint128x64_normalized_divrem (cairo_uint128_t num, cairo_uint64_t den)
{
cairo_uquorem64_t qr64;
cairo_uquorem64_t qr;
uint32_t q0, q1;
cairo_uint64_t r0, r1;
uint32_t d0, d1;
cairo_uint64_t t;
d0 = uint64_lo32 (den);
d1 = uint64_hi32 (den);
qr64 = _cairo_uint64_divrem (num.hi, _cairo_uint32_to_uint64 (d1));
q1 = _cairo_uint64_to_uint32 (qr64.quo);
r1 = qr64.rem;
t = _cairo_uint32x32_64_mul (q1, d0);
r1 = _cairo_uint64_add (_cairo_uint64_lsl (r1, 32),
_cairo_uint64_rsl (num.lo, 32));
if (_cairo_uint64_lt (r1, t))
{
q1--;
r1 = _cairo_uint64_add (r1, den);
if (_cairo_uint64_ge (r1, den) && _cairo_uint64_lt (r1, t))
{
q1--;
r1 = _cairo_uint64_add (r1, den);
}
}
r1 = _cairo_uint64_sub (r1, t);
qr64 = _cairo_uint64_divrem (r1, _cairo_uint32_to_uint64 (d1));
q0 = _cairo_uint64_to_uint32 (qr64.quo);
r0 = qr64.rem;
t = _cairo_uint32x32_64_mul (q0, d0);
r0 = _cairo_uint64_add (_cairo_uint64_lsl (r0, 32),
_cairo_uint32_to_uint64 (_cairo_uint64_to_uint32 (num.lo)));
if (_cairo_uint64_lt (r0, t))
{
q0--;
r0 = _cairo_uint64_add (r0, den);
if (_cairo_uint64_ge (r0, den) && _cairo_uint64_lt (r0, t))
{
q0--;
r0 = _cairo_uint64_add (r0, den);
}
}
r0 = _cairo_uint64_sub (r0, t);
qr.quo = _cairo_uint32s_to_uint64 (q1, q0);
qr.rem = r0;
return qr;
}
#if HAVE_UINT64_T
static const int
_cairo_leading_zeros64 (cairo_uint64_t q)
{
int top = 0;
if (q >= (uint64_t) 0x10000 << 16)
{
top += 32;
q >>= 32;
}
if (q >= (uint64_t) 0x10000)
{
top += 16;
q >>= 16;
}
if (q >= (uint64_t) 0x100)
{
top += 8;
q >>= 8;
}
top += top_bit [q];
return 64 - top;
}
#else
static const int
_cairo_leading_zeros64 (cairo_uint64_t d)
{
if (d.hi)
return _cairo_leading_zeros32 (d.hi);
else
return 32 + _cairo_leading_zeros32 (d.lo);
}
#endif
const cairo_uquorem128_t
_cairo_uint128_divrem (cairo_uint128_t num, cairo_uint128_t den)
{
cairo_uquorem64_t qr64;
cairo_uquorem128_t qr;
int norm;
cairo_uint64_t q1, q0, r1, r0;
if (_cairo_uint64_eq (den.hi, _cairo_uint32_to_uint64 (0)))
{
if (_cairo_uint64_gt (den.lo, num.hi))
{
/* 0q = nn / 0d */
norm = _cairo_leading_zeros64 (den.lo);
if (norm)
{
den.lo = _cairo_uint64_lsl (den.lo, norm);
num = _cairo_uint128_lsl (num, norm);
}
q1 = _cairo_uint32_to_uint64 (0);
}
else
{
/* qq = NN / 0d */
if (_cairo_uint64_eq (den.lo, _cairo_uint32_to_uint64 (0)))
den.lo = _cairo_uint64_divrem (_cairo_uint32_to_uint64 (1),
den.lo).quo;
norm = _cairo_leading_zeros64 (den.lo);
if (norm)
{
cairo_uint128_t num1;
den.lo = _cairo_uint64_lsl (den.lo, norm);
num1 = _cairo_uint128_rsl (num, 64 - norm);
qr64 = _cairo_uint128x64_normalized_divrem (num1, den.lo);
q1 = qr64.quo;
num.hi = qr64.rem;
num.lo = _cairo_uint64_lsl (num.lo, norm);
}
else
{
num.hi = _cairo_uint64_sub (num.hi, den.lo);
q1 = _cairo_uint32_to_uint64 (1);
}
}
qr64 = _cairo_uint128x64_normalized_divrem (num, den.lo);
q0 = qr64.quo;
r1 = _cairo_uint32_to_uint64 (0);
r0 = _cairo_uint64_rsl (qr64.rem, norm);
}
else
{
if (_cairo_uint64_gt (den.hi, num.hi))
{
/* 00 = nn / DD */
q0 = q1 = _cairo_uint32_to_uint64 (0);
r0 = num.lo;
r1 = num.hi;
}
else
{
/* 0q = NN / dd */
norm = _cairo_leading_zeros64 (den.hi);
if (norm == 0)
{
if (_cairo_uint64_gt (num.hi, den.hi) ||
_cairo_uint64_ge (num.lo, den.lo))
{
q0 = _cairo_uint32_to_uint64 (1);
num = _cairo_uint128_sub (num, den);
}
else
q0 = _cairo_uint32_to_uint64 (0);
q1 = _cairo_uint32_to_uint64 (0);
r0 = num.lo;
r1 = num.hi;
}
else
{
cairo_uint128_t num1;
cairo_uint128_t part;
num1 = _cairo_uint128_rsl (num, 64 - norm);
den = _cairo_uint128_lsl (den, norm);
qr64 = _cairo_uint128x64_normalized_divrem (num1, den.hi);
part = _cairo_uint64x64_128_mul (qr64.quo, den.lo);
q0 = qr64.quo;
num.lo = _cairo_uint64_lsl (num.lo, norm);
num.hi = qr64.rem;
if (_cairo_uint128_gt (part, num))
{
q0 = _cairo_uint64_sub (q0, _cairo_uint32_to_uint64 (1));
part = _cairo_uint128_sub (part, den);
}
q1 = _cairo_uint32_to_uint64 (0);
num = _cairo_uint128_sub (num, part);
num = _cairo_uint128_rsl (num, norm);
r0 = num.lo;
r1 = num.hi;
}
}
}
qr.quo.lo = q0;
qr.quo.hi = q1;
qr.rem.lo = r0;
qr.rem.hi = r1;
return qr;
}
const cairo_int128_t
_cairo_int128_negate (cairo_int128_t a)
{
a.lo = _cairo_uint64_not (a.lo);
a.hi = _cairo_uint64_not (a.hi);
return _cairo_uint128_add (a, _cairo_uint32_to_uint128 (1));
}
const cairo_int128_t
_cairo_int128_not (cairo_int128_t a)
{
a.lo = _cairo_uint64_not (a.lo);
a.hi = _cairo_uint64_not (a.hi);
return a;
}
#endif /* !HAVE_UINT128_T */
const cairo_quorem128_t
_cairo_int128_divrem (cairo_int128_t num, cairo_int128_t den)
{
int num_neg = _cairo_int128_negative (num);
int den_neg = _cairo_int128_negative (den);
cairo_uquorem128_t uqr;
cairo_quorem128_t qr;
if (num_neg)
num = _cairo_int128_negate (num);
if (den_neg)
den = _cairo_int128_negate (den);
uqr = _cairo_uint128_divrem (num, den);
if (num_neg)
qr.rem = _cairo_int128_negate (uqr.rem);
else
qr.rem = uqr.rem;
if (num_neg != den_neg)
qr.quo = _cairo_int128_negate (uqr.quo);
else
qr.quo = uqr.quo;
return qr;
}

272
src/cairo_wideint.h Normal file
View file

@ -0,0 +1,272 @@
/*
* $Id: cairo_wideint.h,v 1.1 2004-05-28 19:37:15 keithp Exp $
*
* Copyright © 2004 Keith Packard
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of Keith Packard not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. Keith Packard makes no
* representations about the suitability of this software for any purpose. It
* is provided "as is" without express or implied warranty.
*
* KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef CAIRO_WIDEINT_H
#define CAIRO_WIDEINT_H
#include <stdint.h>
/*
* 64-bit datatypes. Two separate implementations, one using
* built-in 64-bit signed/unsigned types another implemented
* as a pair of 32-bit ints
*/
#define I __internal_linkage
#if !HAVE_UINT64_T
typedef struct _cairo_uint64 {
uint32_t lo, hi;
} cairo_uint64_t, cairo_int64_t;
const cairo_uint64_t I _cairo_uint32_to_uint64 (uint32_t i);
#define _cairo_uint64_to_uint32(a) ((a).lo)
const cairo_uint64_t I _cairo_uint64_add (cairo_uint64_t a, cairo_uint64_t b);
const cairo_uint64_t I _cairo_uint64_sub (cairo_uint64_t a, cairo_uint64_t b);
const cairo_uint64_t I _cairo_uint64_mul (cairo_uint64_t a, cairo_uint64_t b);
const cairo_uint64_t I _cairo_uint32x32_64_mul (uint32_t a, uint32_t b);
const cairo_uint64_t I _cairo_uint64_lsl (cairo_uint64_t a, int shift);
const cairo_uint64_t I _cairo_uint64_rsl (cairo_uint64_t a, int shift);
const cairo_uint64_t I _cairo_uint64_rsa (cairo_uint64_t a, int shift);
const int _cairo_uint64_lt (cairo_uint64_t a, cairo_uint64_t b);
const int _cairo_uint64_eq (cairo_uint64_t a, cairo_uint64_t b);
const cairo_uint64_t I _cairo_uint64_negate (cairo_uint64_t a);
#define _cairo_uint64_negative(a) (((int32_t) ((a).hi)) < 0)
const cairo_uint64_t I _cairo_uint64_not (cairo_uint64_t a);
#define _cairo_uint64_to_int64(i) (i)
#define _cairo_int64_to_uint64(i) (i)
const cairo_int64_t I _cairo_int32_to_int64(int32_t i);
#define _cairo_int64_to_int32(a) ((int32_t) _cairo_uint64_to_uint32(a))
#define _cairo_int64_add(a,b) _cairo_uint64_add (a,b)
#define _cairo_int64_sub(a,b) _cairo_uint64_sub (a,b)
#define _cairo_int64_mul(a,b) _cairo_uint64_mul (a,b)
#define _cairo_int32x32_64_mul(a,b) _cairo_uint32x32_64_mul ((uint32_t) (a), (uint32_t) (b)))
const int _cairo_int64_lt (cairo_uint64_t a, cairo_uint64_t b);
#define _cairo_int64_eq(a,b) _cairo_uint64_eq (a,b)
#define _cairo_int64_lsl(a,b) _cairo_uint64_lsl (a,b)
#define _cairo_int64_rsl(a,b) _cairo_uint64_rsl (a,b)
#define _cairo_int64_rsa(a,b) _cairo_uint64_rsa (a,b)
#define _cairo_int64_negate(a) _cairo_uint64_negate(a)
#define _cairo_int64_negative(a) (((int32_t) ((a).hi)) < 0)
#define _cairo_int64_not(a) _cairo_uint64_not(a)
#else
typedef uint64_t cairo_uint64_t;
typedef int64_t cairo_int64_t;
#define _cairo_uint32_to_uint64(i) ((uint64_t) (i))
#define _cairo_uint64_to_uint32(i) ((uint32_t) (i))
#define _cairo_uint64_add(a,b) ((a) + (b))
#define _cairo_uint64_sub(a,b) ((a) - (b))
#define _cairo_uint64_mul(a,b) ((a) * (b))
#define _cairo_uint32x32_64_mul(a,b) ((uint64_t) (a) * (b))
#define _cairo_uint64_lsl(a,b) ((a) << (b))
#define _cairo_uint64_rsl(a,b) ((uint64_t) (a) >> (b))
#define _cairo_uint64_rsa(a,b) ((uint64_t) ((int64_t) (a) >> (b)))
#define _cairo_uint64_lt(a,b) ((a) < (b))
#define _cairo_uint64_eq(a,b) ((a) == (b))
#define _cairo_uint64_negate(a) ((uint64_t) -((int64_t) (a)))
#define _cairo_uint64_negative(a) ((int64_t) (a) < 0)
#define _cairo_uint64_not(a) (~(a))
#define _cairo_uint64_to_int64(i) ((int64_t) (i))
#define _cairo_int64_to_uint64(i) ((uint64_t) (i))
#define _cairo_int32_to_int64(i) ((int64_t) (i))
#define _cairo_int64_to_int32(i) ((int32_t) (i))
#define _cairo_int64_add(a,b) ((a) + (b))
#define _cairo_int64_sub(a,b) ((a) - (b))
#define _cairo_int64_mul(a,b) ((a) * (b))
#define _cairo_int32x32_64_mul(a,b) ((int64_t) (a) * (b))
#define _cairo_int64_lt(a,b) ((a) < (b))
#define _cairo_int64_eq(a,b) ((a) == (b))
#define _cairo_int64_lsl(a,b) ((a) << (b))
#define _cairo_int64_rsl(a,b) ((int64_t) ((uint64_t) (a) >> (b)))
#define _cairo_int64_rsa(a,b) ((int64_t) (a) >> (b))
#define _cairo_int64_negate(a) (-(a))
#define _cairo_int64_negative(a) ((a) < 0)
#define _cairo_int64_not(a) (~(a))
#endif
/*
* 64-bit comparisions derived from lt or eq
*/
#define _cairo_uint64_le(a,b) (!_cairo_uint64_gt(a,b))
#define _cairo_uint64_ne(a,b) (!_cairo_uint64_eq(a,b))
#define _cairo_uint64_ge(a,b) (!_cairo_uint64_lt(a,b))
#define _cairo_uint64_gt(a,b) _cairo_uint64_lt(b,a)
#define _cairo_int64_le(a,b) (!_cairo_int64_gt(a,b))
#define _cairo_int64_ne(a,b) (!_cairo_int64_eq(a,b))
#define _cairo_int64_ge(a,b) (!_cairo_int64_lt(a,b))
#define _cairo_int64_gt(a,b) _cairo_int64_lt(b,a)
/*
* As the C implementation always computes both, create
* a function which returns both for the 'native' type as well
*/
typedef struct _cairo_uquorem64 {
cairo_uint64_t quo;
cairo_uint64_t rem;
} cairo_uquorem64_t;
typedef struct _cairo_quorem64 {
cairo_int64_t quo;
cairo_int64_t rem;
} cairo_quorem64_t;
const cairo_uquorem64_t I
_cairo_uint64_divrem (cairo_uint64_t num, cairo_uint64_t den);
const cairo_quorem64_t I
_cairo_int64_divrem (cairo_int64_t num, cairo_int64_t den);
/*
* 128-bit datatypes. Again, provide two implementations in
* case the machine has a native 128-bit datatype. GCC supports int128_t
* on ia64
*/
#if !HAVE_UINT128_T
typedef struct cairo_uint128 {
cairo_uint64_t lo, hi;
} cairo_uint128_t, cairo_int128_t;
const cairo_uint128_t I _cairo_uint32_to_uint128 (uint32_t i);
const cairo_uint128_t I _cairo_uint64_to_uint128 (cairo_uint64_t i);
#define _cairo_uint128_to_uint64(a) ((a).lo)
#define _cairo_uint128_to_uint32(a) _cairo_uint64_to_uint32(_cairo_uint128_to_uint64(a))
const cairo_uint128_t I _cairo_uint128_add (cairo_uint128_t a, cairo_uint128_t b);
const cairo_uint128_t I _cairo_uint128_sub (cairo_uint128_t a, cairo_uint128_t b);
const cairo_uint128_t I _cairo_uint128_mul (cairo_uint128_t a, cairo_uint128_t b);
const cairo_uint128_t I _cairo_uint64x64_128_mul (cairo_uint64_t a, cairo_uint64_t b);
const cairo_uint128_t I _cairo_uint128_lsl (cairo_uint128_t a, int shift);
const cairo_uint128_t I _cairo_uint128_rsl (cairo_uint128_t a, int shift);
const cairo_uint128_t I _cairo_uint128_rsa (cairo_uint128_t a, int shift);
const int _cairo_uint128_lt (cairo_uint128_t a, cairo_uint128_t b);
const int _cairo_uint128_eq (cairo_uint128_t a, cairo_uint128_t b);
const cairo_uint128_t I _cairo_uint128_negate (cairo_uint128_t a);
#define _cairo_uint128_negative(a) (_cairo_uint64_negative(a.hi))
const cairo_uint128_t I _cairo_uint128_not (cairo_uint128_t a);
#define _cairo_uint128_to_int128_(i) (i)
#define _cairo_int128_to_uint128(i) (i)
const cairo_int128_t I _cairo_int32_to_int128 (int32_t i);
const cairo_int128_t I _cairo_int64_to_int128 (cairo_int64_t i);
#define _cairo_int128_to_int64(a) ((cairo_int64_t) (a).lo);
#define _cairo_int128_to_int32(a) _cairo_int64_to_int32(_cairo_int128_to_int64(a))
#define _cairo_int128_add(a,b) _cairo_uint128_add(a,b)
#define _cairo_int128_sub(a,b) _cairo_uint128_sub(a,b)
#define _cairo_int128_mul(a,b) _cairo_uint128_mul(a,b)
#define _cairo_int64x64_128_mul(a,b) _cairo_uint64x64_128_mul ((cairo_uint64_t) (a), (cairo_uint64_t) (b))
#define _cairo_int128_lsl(a,b) _cairo_uint128_lsl(a,b)
#define _cairo_int128_rsl(a,b) _cairo_uint128_rsl(a,b)
#define _cairo_int128_rsa(a,b) _cairo_uint128_rsa(a,b)
const int _cairo_int128_lt (cairo_int128_t a, cairo_int128_t b);
#define _cairo_int128_eq(a,b) _cairo_uint128_eq (a,b)
#define _cairo_int128_negate(a) _cairo_uint128_negate(a)
#define _cairo_int128_negative(a) (_cairo_uint128_negative(a))
#define _cairo_int128_not(a) _cairo_uint128_not(a)
#else /* !HAVE_UINT128_T */
typedef uint128_t cairo_uint128_t;
typedef int128_t cairo_int128_t;
#define _cairo_uint32_to_uint128(i) ((uint128_t) (i))
#define _cairo_uint64_to_uint128(i) ((uint128_t) (i))
#define _cairo_uint128_to_uint64(i) ((uint64_t) (i))
#define _cairo_uint128_to_uint32(i) ((uint32_t) (i))
#define _cairo_uint128_add(a,b) ((a) + (b))
#define _cairo_uint128_sub(a,b) ((a) - (b))
#define _cairo_uint128_mul(a,b) ((a) * (b))
#define _cairo_uint64x64_128_mul(a,b) ((uint128_t) (a) * (b))
#define _cairo_uint128_lsl(a,b) ((a) << (b))
#define _cairo_uint128_rsl(a,b) ((uint128_t) (a) >> (b))
#define _cairo_uint128_rsa(a,b) ((uint128_t) ((int128_t) (a) >> (b)))
#define _cairo_uint128_lt(a,b) ((a) < (b))
#define _cairo_uint128_eq(a,b) ((a) == (b))
#define _cairo_uint128_negate(a) ((uint128_t) -((int128_t) (a)))
#define _cairo_uint128_negative(a) ((int128_t) (a) < 0)
#define _cairo_uint128_not(a) (~(a))
#define _cairo_uint128_to_int128(i) ((int128_t) (i))
#define _cairo_int128_to_uint128(i) ((uint128_t) (i))
#define _cairo_int32_to_int128(i) ((int128_t) (i))
#define _cairo_int64_to_int128(i) ((int128_t) (i))
#define _cairo_int128_to_int64(i) ((int64_t) (i))
#define _cairo_int128_to_int32(i) ((int32_t) (i))
#define _cairo_int128_add(a,b) ((a) + (b))
#define _cairo_int128_sub(a,b) ((a) - (b))
#define _cairo_int128_mul(a,b) ((a) * (b))
#define _cairo_int64x64_128_mul(a,b) ((int128_t) (a) * (b))
#define _cairo_int128_lt(a,b) ((a) < (b))
#define _cairo_int128_eq(a,b) ((a) == (b))
#define _cairo_int128_lsl(a,b) ((a) << (b))
#define _cairo_int128_rsl(a,b) ((int128_t) ((uint128_t) (a) >> (b)))
#define _cairo_int128_rsa(a,b) ((int128_t) (a) >> (b))
#define _cairo_int128_negate(a) (-(a))
#define _cairo_int128_negative(a) ((a) < 0)
#define _cairo_int128_not(a) (~(a))
#endif /* HAVE_UINT128_T */
typedef struct _cairo_uquorem128 {
cairo_uint128_t quo;
cairo_uint128_t rem;
} cairo_uquorem128_t;
typedef struct _cairo_quorem128 {
cairo_int128_t quo;
cairo_int128_t rem;
} cairo_quorem128_t;
const cairo_uquorem128_t I
_cairo_uint128_divrem (cairo_uint128_t num, cairo_uint128_t den);
const cairo_quorem128_t I
_cairo_int128_divrem (cairo_int128_t num, cairo_int128_t den);
#define _cairo_uint128_le(a,b) (!_cairo_uint128_gt(a,b))
#define _cairo_uint128_ne(a,b) (!_cairo_uint128_eq(a,b))
#define _cairo_uint128_ge(a,b) (!_cairo_uint128_lt(a,b))
#define _cairo_uint128_gt(a,b) _cairo_uint128_lt(b,a)
#define _cairo_int128_le(a,b) (!_cairo_int128_gt(a,b))
#define _cairo_int128_ne(a,b) (!_cairo_int128_eq(a,b))
#define _cairo_int128_ge(a,b) (!_cairo_int128_lt(a,b))
#define _cairo_int128_gt(a,b) _cairo_int128_lt(b,a)
#undef I
#endif /* CAIRO_WIDEINT_H */

View file

@ -36,11 +36,16 @@
#ifndef _CAIROINT_H_
#define _CAIROINT_H_
#if HAVE_CONFIG_H
#include "config.h"
#endif
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <limits.h>
#include <stdint.h>
#include "cairo.h"
@ -95,27 +100,13 @@
#define __attribute__(x)
#endif
#ifdef WIN32
typedef __int64 cairo_fixed_32_32_t;
#else
# if defined(__alpha__) || defined(__alpha) || \
defined(ia64) || defined(__ia64__) || \
defined(__sparc64__) || \
defined(__s390x__) || \
defined(x86_64) || defined (__x86_64__)
typedef long cairo_fixed_32_32_t;
# else
# if defined(__GNUC__) && \
((__GNUC__ > 2) || \
((__GNUC__ == 2) && defined(__GNUC_MINOR__) && (__GNUC_MINOR__ > 7)))
__extension__
# endif
typedef long long int cairo_fixed_32_32_t;
# endif
#endif
#include "cairo_wideint.h"
typedef cairo_fixed_32_32_t cairo_fixed_48_16_t;
typedef int32_t cairo_fixed_16_16_t;
typedef int32_t cairo_fixed_16_16_t;
typedef cairo_int64_t cairo_fixed_32_32_t;
typedef cairo_int64_t cairo_fixed_48_16_t;
typedef cairo_int128_t cairo_fixed_64_64_t;
typedef cairo_int128_t cairo_fixed_96_32_t;
/* The common 16.16 format gets a shorter name */
typedef cairo_fixed_16_16_t cairo_fixed_t;