diff options
author | toma <toma@283d02a7-25f6-0310-bc7c-ecb5cbfe19da> | 2009-11-25 17:56:58 +0000 |
---|---|---|
committer | toma <toma@283d02a7-25f6-0310-bc7c-ecb5cbfe19da> | 2009-11-25 17:56:58 +0000 |
commit | 47d455dd55be855e4cc691c32f687f723d9247ee (patch) | |
tree | 52e236aaa2576bdb3840ebede26619692fed6d7d /ksvg/impl/libs/art_support | |
download | tdegraphics-47d455dd55be855e4cc691c32f687f723d9247ee.tar.gz tdegraphics-47d455dd55be855e4cc691c32f687f723d9247ee.zip |
Copy the KDE 3.5 branch to branches/trinity for new KDE 3.5 features.
BUG:215923
git-svn-id: svn://anonsvn.kde.org/home/kde/branches/trinity/kdegraphics@1054174 283d02a7-25f6-0310-bc7c-ecb5cbfe19da
Diffstat (limited to 'ksvg/impl/libs/art_support')
-rw-r--r-- | ksvg/impl/libs/art_support/Makefile.am | 4 | ||||
-rw-r--r-- | ksvg/impl/libs/art_support/art_misc.c | 1841 | ||||
-rw-r--r-- | ksvg/impl/libs/art_support/art_misc.h | 79 | ||||
-rw-r--r-- | ksvg/impl/libs/art_support/art_render_misc.c | 774 | ||||
-rw-r--r-- | ksvg/impl/libs/art_support/art_render_misc.h | 90 | ||||
-rw-r--r-- | ksvg/impl/libs/art_support/art_rgba_svp.c | 659 | ||||
-rw-r--r-- | ksvg/impl/libs/art_support/art_rgba_svp.h | 57 |
7 files changed, 3504 insertions, 0 deletions
diff --git a/ksvg/impl/libs/art_support/Makefile.am b/ksvg/impl/libs/art_support/Makefile.am new file mode 100644 index 00000000..4bf00dd7 --- /dev/null +++ b/ksvg/impl/libs/art_support/Makefile.am @@ -0,0 +1,4 @@ +noinst_LTLIBRARIES = libksvgart.la +libksvgart_la_SOURCES = art_render_misc.c art_rgba_svp.c art_misc.c + +INCLUDES = $(LIBART_CFLAGS) $(all_includes) diff --git a/ksvg/impl/libs/art_support/art_misc.c b/ksvg/impl/libs/art_support/art_misc.c new file mode 100644 index 00000000..69b45306 --- /dev/null +++ b/ksvg/impl/libs/art_support/art_misc.c @@ -0,0 +1,1841 @@ +#include <libart_lgpl/art_vpath.h> +#include <libart_lgpl/art_bpath.h> +#include <libart_lgpl/art_misc.h> +#include <libart_lgpl/art_affine.h> +#include <libart_lgpl/art_svp_render_aa.h> + +#include "art_misc.h" + +extern double ceil(double x); +extern double floor(double x); + +/** + * art_vpath_render_bez: Render a bezier segment into the vpath. + * @p_vpath: Where the pointer to the #ArtVpath structure is stored. + * @pn_points: Pointer to the number of points in *@p_vpath. + * @pn_points_max: Pointer to the number of points allocated. + * @x0: X coordinate of starting bezier point. + * @y0: Y coordinate of starting bezier point. + * @x1: X coordinate of first bezier control point. + * @y1: Y coordinate of first bezier control point. + * @x2: X coordinate of second bezier control point. + * @y2: Y coordinate of second bezier control point. + * @x3: X coordinate of ending bezier point. + * @y3: Y coordinate of ending bezier point. + * @flatness: Flatness control. + * + * Renders a bezier segment into the vector path, reallocating and + * updating *@p_vpath and *@pn_vpath_max as necessary. *@pn_vpath is + * incremented by the number of vector points added. + * + * This step includes (@x0, @y0) but not (@x3, @y3). + * + * The @flatness argument guides the amount of subdivision. The Adobe + * PostScript reference manual defines flatness as the maximum + * deviation between the any point on the vpath approximation and the + * corresponding point on the "true" curve, and we follow this + * definition here. A value of 0.25 should ensure high quality for aa + * rendering. + **/ + void +ksvg_art_vpath_render_bez (ArtVpath **p_vpath, int *pn, int *pn_max, + double x0, double y0, + double x1, double y1, + double x2, double y2, + double x3, double y3, + double flatness) +{ + double x3_0, y3_0; + double z3_0_dot; + double z1_dot, z2_dot; + double z1_perp, z2_perp; + double max_perp_sq; + + double x_m, y_m; + double xa1, ya1; + double xa2, ya2; + double xb1, yb1; + double xb2, yb2; + + /* It's possible to optimize this routine a fair amount. + + First, once the _dot conditions are met, they will also be met in + all further subdivisions. So we might recurse to a different + routine that only checks the _perp conditions. + + Second, the distance _should_ decrease according to fairly + predictable rules (a factor of 4 with each subdivision). So it might + be possible to note that the distance is within a factor of 4 of + acceptable, and subdivide once. But proving this might be hard. + + Third, at the last subdivision, x_m and y_m can be computed more + expeditiously (as in the routine above). + + Finally, if we were able to subdivide by, say 2 or 3, this would + allow considerably finer-grain control, i.e. fewer points for the + same flatness tolerance. This would speed things up downstream. + + In any case, this routine is unlikely to be the bottleneck. It's + just that I have this undying quest for more speed... + +*/ + + x3_0 = x3 - x0; + y3_0 = y3 - y0; + + /* z3_0_dot is dist z0-z3 squared */ + z3_0_dot = x3_0 * x3_0 + y3_0 * y3_0; + + /* todo: this test is far from satisfactory. */ + if (z3_0_dot < 0.001) + goto nosubdivide; + + /* we can avoid subdivision if: + + z1 has distance no more than flatness from the z0-z3 line + + z1 is no more z0'ward than flatness past z0-z3 + + z1 is more z0'ward than z3'ward on the line traversing z0-z3 + + and correspondingly for z2 */ + + /* perp is distance from line, multiplied by dist z0-z3 */ + max_perp_sq = flatness * flatness * z3_0_dot; + z1_perp = (y1 - y0) * x3_0 - (x1 - x0) * y3_0; + if (z1_perp * z1_perp > max_perp_sq) + goto subdivide; + + z2_perp = (y3 - y2) * x3_0 - (x3 - x2) * y3_0; + if (z2_perp * z2_perp > max_perp_sq) + goto subdivide; + + z1_dot = (x1 - x0) * x3_0 + (y1 - y0) * y3_0; + if (z1_dot < 0 && z1_dot * z1_dot > max_perp_sq) + goto subdivide; + + z2_dot = (x3 - x2) * x3_0 + (y3 - y2) * y3_0; + if (z2_dot < 0 && z2_dot * z2_dot > max_perp_sq) + goto subdivide; + + if (z1_dot + z1_dot > z3_0_dot) + goto subdivide; + + if (z2_dot + z2_dot > z3_0_dot) + goto subdivide; + +nosubdivide: + /* don't subdivide */ + art_vpath_add_point (p_vpath, pn, pn_max, + ART_LINETO, x3, y3); + return; + +subdivide: + + xa1 = (x0 + x1) * 0.5; + ya1 = (y0 + y1) * 0.5; + xa2 = (x0 + 2 * x1 + x2) * 0.25; + ya2 = (y0 + 2 * y1 + y2) * 0.25; + xb1 = (x1 + 2 * x2 + x3) * 0.25; + yb1 = (y1 + 2 * y2 + y3) * 0.25; + xb2 = (x2 + x3) * 0.5; + yb2 = (y2 + y3) * 0.5; + x_m = (xa2 + xb1) * 0.5; + y_m = (ya2 + yb1) * 0.5; +#ifdef VERBOSE + printf ("%g,%g %g,%g %g,%g %g,%g\n", xa1, ya1, xa2, ya2, + xb1, yb1, xb2, yb2); +#endif + ksvg_art_vpath_render_bez (p_vpath, pn, pn_max, + x0, y0, xa1, ya1, xa2, ya2, x_m, y_m, flatness); + ksvg_art_vpath_render_bez (p_vpath, pn, pn_max, + x_m, y_m, xb1, yb1, xb2, yb2, x3, y3, flatness); +} + +#define RENDER_LEVEL 4 +#define RENDER_SIZE (1 << (RENDER_LEVEL)) + +/** + * ksvg_art_bez_path_to_vec: Create vpath from bezier path. + * @bez: Bezier path. + * @flatness: Flatness control. + * + * Creates a vector path closely approximating the bezier path defined by + * @bez. The @flatness argument controls the amount of subdivision. In + * general, the resulting vpath deviates by at most @flatness pixels + * from the "ideal" path described by @bez. + * + * Return value: Newly allocated vpath. + **/ + ArtVpath * +ksvg_art_bez_path_to_vec(const ArtBpath *bez, double flatness) +{ + ArtVpath *vec; + int vec_n, vec_n_max; + int bez_index; + double x, y; + + vec_n = 0; + vec_n_max = RENDER_SIZE; + vec = art_new (ArtVpath, vec_n_max); + + /* Initialization is unnecessary because of the precondition that the + bezier path does not begin with LINETO or CURVETO, but is here + to make the code warning-free. */ + x = 0; + y = 0; + + bez_index = 0; + do + { +#ifdef VERBOSE + printf ("%s %g %g\n", + bez[bez_index].code == ART_CURVETO ? "curveto" : + bez[bez_index].code == ART_LINETO ? "lineto" : + bez[bez_index].code == ART_MOVETO ? "moveto" : + bez[bez_index].code == ART_MOVETO_OPEN ? "moveto-open" : + "end", bez[bez_index].x3, bez[bez_index].y3); +#endif + /* make sure space for at least one more code */ + if (vec_n >= vec_n_max) + art_expand (vec, ArtVpath, vec_n_max); + switch (bez[bez_index].code) + { + case ART_MOVETO_OPEN: + case ART_MOVETO: + case ART_LINETO: + x = bez[bez_index].x3; + y = bez[bez_index].y3; + vec[vec_n].code = bez[bez_index].code; + vec[vec_n].x = x; + vec[vec_n].y = y; + vec_n++; + break; + case ART_END: + vec[vec_n].code = ART_END; + vec[vec_n].x = 0; + vec[vec_n].y = 0; + vec_n++; + break; + case ART_END2: + vec[vec_n].code = (ArtPathcode)ART_END2; + vec[vec_n].x = bez[bez_index].x3; + vec[vec_n].y = bez[bez_index].y3; + vec_n++; + break; + case ART_CURVETO: +#ifdef VERBOSE + printf ("%g,%g %g,%g %g,%g %g,%g\n", x, y, + bez[bez_index].x1, bez[bez_index].y1, + bez[bez_index].x2, bez[bez_index].y2, + bez[bez_index].x3, bez[bez_index].y3); +#endif + ksvg_art_vpath_render_bez (&vec, &vec_n, &vec_n_max, + x, y, + bez[bez_index].x1, bez[bez_index].y1, + bez[bez_index].x2, bez[bez_index].y2, + bez[bez_index].x3, bez[bez_index].y3, + flatness); + x = bez[bez_index].x3; + y = bez[bez_index].y3; + break; + } + } + while (bez[bez_index++].code != ART_END); + return vec; +} + +/* Private functions for the rgb affine image compositors - primarily, +* the determination of runs, eliminating the need for source image +* bbox calculation in the inner loop. */ + +/* Determine a "run", such that the inverse affine of all pixels from +* (x0, y) inclusive to (x1, y) exclusive fit within the bounds +* of the source image. +* +* Initial values of x0, x1, and result values stored in first two +* pointer arguments. +* */ + +#define EPSILON 1e-6 + + void ksvg_art_rgb_affine_run (int *p_x0, int *p_x1, int y, + int src_width, int src_height, + const double affine[6]) +{ + int x0, x1; + double z; + double x_intercept; + int xi; + + x0 = *p_x0; + x1 = *p_x1; + + /* do left and right edges */ + if (affine[0] > EPSILON) + { + z = affine[2] * (y + 0.5) + affine[4]; + x_intercept = -z / affine[0]; + xi = ceil (x_intercept + EPSILON - 0.5); + if (xi > x0) + x0 = xi; + x_intercept = (-z + src_width) / affine[0]; + xi = ceil (x_intercept - EPSILON - 0.5); + if (xi < x1) + x1 = xi; + } + else if (affine[0] < -EPSILON) + { + z = affine[2] * (y + 0.5) + affine[4]; + x_intercept = (-z + src_width) / affine[0]; + xi = ceil (x_intercept + EPSILON - 0.5); + if (xi > x0) + x0 = xi; + x_intercept = -z / affine[0]; + xi = ceil (x_intercept - EPSILON - 0.5); + if (xi < x1) + x1 = xi; + } + else + { + z = affine[2] * (y + 0.5) + affine[4]; + if (z < 0 || z >= src_width) + { + *p_x1 = *p_x0; + return; + } + } + /* do top and bottom edges */ + if (affine[1] > EPSILON) + { + z = affine[3] * (y + 0.5) + affine[5]; + x_intercept = -z / affine[1]; + xi = ceil (x_intercept + EPSILON - 0.5); + if (xi > x0) + x0 = xi; + x_intercept = (-z + src_height) / affine[1]; + xi = ceil (x_intercept - EPSILON - 0.5); + if (xi < x1) + x1 = xi; + } + else if (affine[1] < -EPSILON) + { + z = affine[3] * (y + 0.5) + affine[5]; + x_intercept = (-z + src_height) / affine[1]; + xi = ceil (x_intercept + EPSILON - 0.5); + if (xi > x0) + x0 = xi; + x_intercept = -z / affine[1]; + xi = ceil (x_intercept - EPSILON - 0.5); + if (xi < x1) + x1 = xi; + } + else + { + z = affine[3] * (y + 0.5) + affine[5]; + if (z < 0 || z >= src_height) + { + *p_x1 = *p_x0; + return; + } + } + + *p_x0 = x0; + *p_x1 = x1; +} + +/** + * ksvg_art_rgb_affine: Affine transform source RGB image and composite. + * @dst: Destination image RGB buffer. + * @x0: Left coordinate of destination rectangle. + * @y0: Top coordinate of destination rectangle. + * @x1: Right coordinate of destination rectangle. + * @y1: Bottom coordinate of destination rectangle. + * @dst_rowstride: Rowstride of @dst buffer. + * @src: Source image RGB buffer. + * @src_width: Width of source image. + * @src_height: Height of source image. + * @src_rowstride: Rowstride of @src buffer. + * @affine: Affine transform. + * @level: Filter level. + * @alphagamma: #ArtAlphaGamma for gamma-correcting the compositing. + * @alpha: Alpha, range 0..256. + * + * Affine transform the source image stored in @src, compositing over + * the area of destination image @dst specified by the rectangle + * (@x0, @y0) - (@x1, @y1). As usual in libart, the left and top edges + * of this rectangle are included, and the right and bottom edges are + * excluded. + * + * The @alphagamma parameter specifies that the alpha compositing be done + * in a gamma-corrected color space. Since the source image is opaque RGB, + * this argument only affects the edges. In the current implementation, + * it is ignored. + * + * The @level parameter specifies the speed/quality tradeoff of the + * image interpolation. Currently, only ART_FILTER_NEAREST is + * implemented. + * + * KSVG additions : we have changed this function to support an alpha level as well. +* also we made sure compositing an rgba image over an rgb buffer works. +**/ + void ksvg_art_rgb_affine (art_u8 *dst, int x0, int y0, int x1, int y1, int dst_rowstride, + const art_u8 *src, + int src_width, int src_height, int src_rowstride, + const double affine[6], + ArtFilterLevel level, + ArtAlphaGamma *alphagamma, + int alpha) +{ + /* Note: this is a slow implementation, and is missing all filter + levels other than NEAREST. It is here for clarity of presentation + and to establish the interface. */ + int x, y; + double inv[6]; + art_u8 *dst_p, *dst_linestart; + const art_u8 *src_p; + ArtPoint pt, src_pt; + int src_x, src_y; + int run_x0, run_x1; + + dst_linestart = dst; + art_affine_invert (inv, affine); + + if(alpha == 255) + for (y = y0; y < y1; y++) + { + pt.y = y + 0.5; + run_x0 = x0; + run_x1 = x1; + ksvg_art_rgb_affine_run (&run_x0, &run_x1, y, src_width, src_height, + inv); + dst_p = dst_linestart + (run_x0 - x0) * 3; + for (x = run_x0; x < run_x1; x++) + { + pt.x = x + 0.5; + art_affine_point (&src_pt, &pt, inv); + src_x = floor (src_pt.x); + src_y = floor (src_pt.y); + src_p = src + (src_y * src_rowstride) + src_x * 4; + dst_p[0] = dst_p[0] + (((src_p[2] - dst_p[0]) * src_p[3] + 0x80) >> 8); + dst_p[1] = dst_p[1] + (((src_p[1] - dst_p[1]) * src_p[3] + 0x80) >> 8); + dst_p[2] = dst_p[2] + (((src_p[0] - dst_p[2]) * src_p[3] + 0x80) >> 8); + dst_p += 3; + } + dst_linestart += dst_rowstride; + } + else + for (y = y0; y < y1; y++) + { + pt.y = y + 0.5; + run_x0 = x0; + run_x1 = x1; + ksvg_art_rgb_affine_run (&run_x0, &run_x1, y, src_width, src_height, + inv); + dst_p = dst_linestart + (run_x0 - x0) * 3; + for (x = run_x0; x < run_x1; x++) + { + pt.x = x + 0.5; + art_affine_point (&src_pt, &pt, inv); + src_x = floor (src_pt.x); + src_y = floor (src_pt.y); + src_p = src + (src_y * src_rowstride) + src_x * 4; + dst_p[0] = dst_p[0] + (((src_p[2] - dst_p[0]) * alpha + 0x80) >> 8); + dst_p[1] = dst_p[1] + (((src_p[1] - dst_p[1]) * alpha + 0x80) >> 8); + dst_p[2] = dst_p[2] + (((src_p[0] - dst_p[2]) * alpha + 0x80) >> 8); + dst_p += 3; + } + dst_linestart += dst_rowstride; + } +} + + +typedef struct _ksvgArtRgbAffineClipAlphaData ksvgArtRgbAffineClipAlphaData; + +struct _ksvgArtRgbAffineClipAlphaData +{ + int alphatab[256]; + art_u8 alpha; + art_u8 *dst; + int dst_rowstride; + int x0, x1; + double inv[6]; + const art_u8 *src; + int src_width; + int src_height; + int src_rowstride; + const art_u8 *mask; + int y0; +}; + +static +void ksvg_art_rgb_affine_clip_run(art_u8 *dst_p, int x0, int x1, int y, const double inv[6], + int alpha, const art_u8 *src, int src_rowstride, int src_width, int src_height) +{ + const art_u8 *src_p; + ArtPoint pt, src_pt; + int src_x, src_y; + int x; + + if(alpha > 255) + alpha = 255; + + pt.y = y; + + for(x = x0; x < x1; x++) + { + pt.x = x; + + art_affine_point(&src_pt, &pt, inv); + + src_x = (int)(src_pt.x); + src_y = (int)(src_pt.y); + + if(src_x >= 0 && src_x < src_width && src_y >= 0 && src_y < src_height) + { + int s; + int d; + int tmp; + int srcAlpha; + + src_p = src + (src_y * src_rowstride) + src_x * 4; + + srcAlpha = alpha * src_p[3] + 0x80; + srcAlpha = (srcAlpha + (srcAlpha >> 8)) >> 8; + + d = *dst_p; + s = src_p[2]; + + tmp = srcAlpha * (s - d) + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + *dst_p++ = d + tmp; + + d = *dst_p; + s = src_p[1]; + + tmp = srcAlpha * (s - d) + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + *dst_p++ = d + tmp; + + d = *dst_p; + s = src_p[0]; + + tmp = srcAlpha * (s - d) + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + *dst_p++ = d + tmp; + } + else + dst_p += 3; + } +} + +static void +ksvg_art_rgb_affine_clip_callback (void *callback_data, int y, + int start, ArtSVPRenderAAStep *steps, int n_steps) +{ + ksvgArtRgbAffineClipAlphaData *data = (ksvgArtRgbAffineClipAlphaData *)callback_data; + art_u8 *linebuf; + int run_x0, run_x1; + art_u32 running_sum = start; + int x0, x1; + int k; + int *alphatab; + int alpha; + + linebuf = data->dst; + x0 = data->x0; + x1 = data->x1; + + alphatab = data->alphatab; + + if(n_steps > 0) + { + run_x1 = steps[0].x; + if(run_x1 > x0) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgb_affine_clip_run(linebuf, x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + + for(k = 0; k < n_steps - 1; k++) + { + running_sum += steps[k].delta; + run_x0 = run_x1; + run_x1 = steps[k + 1].x; + if(run_x1 > run_x0) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgb_affine_clip_run(linebuf + (run_x0 - x0) * 3, run_x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + } + running_sum += steps[k].delta; + if(x1 > run_x1) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgb_affine_clip_run(linebuf + (run_x1 - x0) * 3, run_x1, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + } + else + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgb_affine_clip_run(linebuf, x0, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + + data->dst += data->dst_rowstride; +} + +static +void ksvg_art_rgb_affine_clip_mask_run(art_u8 *dst_p, const art_u8 *mask, int x0, int x1, int y, const double inv[6], + int alpha, const art_u8 *src, int src_rowstride, int src_width, int src_height) +{ + const art_u8 *src_p; + ArtPoint pt, src_pt; + int src_x, src_y; + int x; + + if(alpha > 255) + alpha = 255; + + pt.y = y; + + for(x = x0; x < x1; x++) + { + pt.x = x; + + art_affine_point(&src_pt, &pt, inv); + + src_x = (int)(src_pt.x); + src_y = (int)(src_pt.y); + + if(src_x >= 0 && src_x < src_width && src_y >= 0 && src_y < src_height) + { + int s; + int d; + int tmp; + int srcAlpha; + + src_p = src + (src_y * src_rowstride) + src_x * 4; + + srcAlpha = alpha * src_p[3] + 0x80; + srcAlpha = (srcAlpha + (srcAlpha >> 8)) >> 8; + + srcAlpha = (srcAlpha * *mask++) + 0x80; + srcAlpha = (srcAlpha + (srcAlpha >> 8)) >> 8; + + d = *dst_p; + s = src_p[2]; + + tmp = srcAlpha * (s - d) + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + *dst_p++ = d + tmp; + + d = *dst_p; + s = src_p[1]; + + tmp = srcAlpha * (s - d) + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + *dst_p++ = d + tmp; + + d = *dst_p; + s = src_p[0]; + + tmp = srcAlpha * (s - d) + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + *dst_p++ = d + tmp; + } + else + { + dst_p += 3; + mask++; + } + } +} + +static void +ksvg_art_rgb_affine_clip_mask_callback (void *callback_data, int y, + int start, ArtSVPRenderAAStep *steps, int n_steps) +{ + ksvgArtRgbAffineClipAlphaData *data = (ksvgArtRgbAffineClipAlphaData *)callback_data; + art_u8 *linebuf; + int run_x0, run_x1; + art_u32 running_sum = start; + int x0, x1; + int k; + int *alphatab; + int alpha; + const art_u8 *maskbuf; + + linebuf = data->dst; + x0 = data->x0; + x1 = data->x1; + + alphatab = data->alphatab; + maskbuf = data->mask + (y - data->y0) * (x1 - x0); + + if(n_steps > 0) + { + run_x1 = steps[0].x; + if(run_x1 > x0) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgb_affine_clip_mask_run(linebuf, maskbuf, x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + + for(k = 0; k < n_steps - 1; k++) + { + running_sum += steps[k].delta; + run_x0 = run_x1; + run_x1 = steps[k + 1].x; + if(run_x1 > run_x0) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgb_affine_clip_mask_run(linebuf + (run_x0 - x0) * 3, maskbuf + (run_x0 - x0), run_x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + } + running_sum += steps[k].delta; + if(x1 > run_x1) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgb_affine_clip_mask_run(linebuf + (run_x1 - x0) * 3, maskbuf + (run_x1 - x0), run_x1, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + } + else + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgb_affine_clip_mask_run(linebuf, maskbuf, x0, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + + data->dst += data->dst_rowstride; +} + +static +void ksvg_art_rgba_affine_clip_run(art_u8 *dst_p, int x0, int x1, int y, const double inv[6], + int alpha, const art_u8 *src, int src_rowstride, int src_width, int src_height) +{ + const art_u8 *src_p; + ArtPoint pt, src_pt; + int src_x, src_y; + int x; + + if(alpha > 255) + alpha = 255; + + pt.y = y; + + for(x = x0; x < x1; x++) + { + pt.x = x; + + art_affine_point(&src_pt, &pt, inv); + + src_x = (int)(src_pt.x); + src_y = (int)(src_pt.y); + + if(src_x >= 0 && src_x < src_width && src_y >= 0 && src_y < src_height) + { + int s; + int d; + int tmp; + int srcAlpha; + + src_p = src + (src_y * src_rowstride) + src_x * 4; + + srcAlpha = alpha * src_p[3] + 0x80; + srcAlpha = (srcAlpha + (srcAlpha >> 8)) >> 8; + + d = *dst_p; + s = src_p[2]; + + tmp = srcAlpha * (s - d) + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + *dst_p++ = d + tmp; + + d = *dst_p; + s = src_p[1]; + + tmp = srcAlpha * (s - d) + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + *dst_p++ = d + tmp; + + d = *dst_p; + s = src_p[0]; + + tmp = srcAlpha * (s - d) + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + *dst_p++ = d + tmp; + + d = *dst_p; + + tmp = srcAlpha * (255 - d) + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + *dst_p++ = d + tmp; + } + else + dst_p += 4; + } +} + +static void +ksvg_art_rgba_affine_clip_callback (void *callback_data, int y, + int start, ArtSVPRenderAAStep *steps, int n_steps) +{ + ksvgArtRgbAffineClipAlphaData *data = (ksvgArtRgbAffineClipAlphaData *)callback_data; + art_u8 *linebuf; + int run_x0, run_x1; + art_u32 running_sum = start; + int x0, x1; + int k; + int *alphatab; + int alpha; + + linebuf = data->dst; + x0 = data->x0; + x1 = data->x1; + + alphatab = data->alphatab; + + if(n_steps > 0) + { + run_x1 = steps[0].x; + if(run_x1 > x0) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgba_affine_clip_run(linebuf, x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + + for(k = 0; k < n_steps - 1; k++) + { + running_sum += steps[k].delta; + run_x0 = run_x1; + run_x1 = steps[k + 1].x; + if(run_x1 > run_x0) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgba_affine_clip_run(linebuf + (run_x0 - x0) * 4, run_x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + } + running_sum += steps[k].delta; + if(x1 > run_x1) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgba_affine_clip_run(linebuf + (run_x1 - x0) * 4, run_x1, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + } + else + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgba_affine_clip_run(linebuf, x0, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + + data->dst += data->dst_rowstride; +} + +static +void ksvg_art_rgba_affine_clip_mask_run(art_u8 *dst_p, const art_u8 *mask, int x0, int x1, int y, const double inv[6], + int alpha, const art_u8 *src, int src_rowstride, int src_width, int src_height) +{ + const art_u8 *src_p; + ArtPoint pt, src_pt; + int src_x, src_y; + int x; + + if(alpha > 255) + alpha = 255; + + pt.y = y; + + for(x = x0; x < x1; x++) + { + pt.x = x; + + art_affine_point(&src_pt, &pt, inv); + + src_x = (int)(src_pt.x); + src_y = (int)(src_pt.y); + + if(src_x >= 0 && src_x < src_width && src_y >= 0 && src_y < src_height) + { + int s; + int d; + int tmp; + int srcAlpha; + + src_p = src + (src_y * src_rowstride) + src_x * 4; + + srcAlpha = alpha * src_p[3] + 0x80; + srcAlpha = (srcAlpha + (srcAlpha >> 8)) >> 8; + + srcAlpha = (srcAlpha * *mask++) + 0x80; + srcAlpha = (srcAlpha + (srcAlpha >> 8)) >> 8; + + d = *dst_p; + s = src_p[2]; + + tmp = srcAlpha * (s - d) + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + *dst_p++ = d + tmp; + + d = *dst_p; + s = src_p[1]; + + tmp = srcAlpha * (s - d) + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + *dst_p++ = d + tmp; + + d = *dst_p; + s = src_p[0]; + + tmp = srcAlpha * (s - d) + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + *dst_p++ = d + tmp; + + d = *dst_p; + + tmp = srcAlpha * (255 - d) + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + *dst_p++ = d + tmp; + } + else + { + dst_p += 4; + mask++; + } + } +} + +static void +ksvg_art_rgba_affine_clip_mask_callback (void *callback_data, int y, + int start, ArtSVPRenderAAStep *steps, int n_steps) +{ + ksvgArtRgbAffineClipAlphaData *data = (ksvgArtRgbAffineClipAlphaData *)callback_data; + art_u8 *linebuf; + int run_x0, run_x1; + art_u32 running_sum = start; + int x0, x1; + int k; + int *alphatab; + int alpha; + const art_u8 *maskbuf; + + linebuf = data->dst; + x0 = data->x0; + x1 = data->x1; + + alphatab = data->alphatab; + maskbuf = data->mask + (y - data->y0) * (x1 - x0); + + if(n_steps > 0) + { + run_x1 = steps[0].x; + if(run_x1 > x0) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgba_affine_clip_mask_run(linebuf, maskbuf, x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + + for(k = 0; k < n_steps - 1; k++) + { + running_sum += steps[k].delta; + run_x0 = run_x1; + run_x1 = steps[k + 1].x; + if(run_x1 > run_x0) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgba_affine_clip_mask_run(linebuf + (run_x0 - x0) * 4, maskbuf + (run_x0 - x0), run_x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + } + running_sum += steps[k].delta; + if(x1 > run_x1) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgba_affine_clip_mask_run(linebuf + (run_x1 - x0) * 4, maskbuf + (run_x1 - x0), run_x1, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + } + else + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgba_affine_clip_mask_run(linebuf, maskbuf, x0, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + + data->dst += data->dst_rowstride; +} + +/** + * ksvg_art_rgb_affine_clip: Affine transform source RGB image and composite, with clipping path. + * @svp: Clipping path. + * @dst: Destination image RGB buffer. + * @x0: Left coordinate of destination rectangle. + * @y0: Top coordinate of destination rectangle. + * @x1: Right coordinate of destination rectangle. + * @y1: Bottom coordinate of destination rectangle. + * @dst_rowstride: Rowstride of @dst buffer. + * @src: Source image RGB buffer. + * @src_width: Width of source image. + * @src_height: Height of source image. + * @src_rowstride: Rowstride of @src buffer. + * @affine: Affine transform. + * @level: Filter level. + * @alphagamma: #ArtAlphaGamma for gamma-correcting the compositing. + * @alpha: Alpha, range 0..256. + * + * Affine transform the source image stored in @src, compositing over + * the area of destination image @dst specified by the rectangle + * (@x0, @y0) - (@x1, @y1). As usual in libart, the left and top edges + * of this rectangle are included, and the right and bottom edges are + * excluded. + * + * The @alphagamma parameter specifies that the alpha compositing be done + * in a gamma-corrected color space. Since the source image is opaque RGB, + * this argument only affects the edges. In the current implementation, + * it is ignored. + * + * The @level parameter specifies the speed/quality tradeoff of the + * image interpolation. Currently, only ART_FILTER_NEAREST is + * implemented. + * + * KSVG additions : we have changed this function to support an alpha level as well. +* also we made sure compositing an rgba image over an rgb buffer works. +**/ +void ksvg_art_rgb_affine_clip(const ArtSVP *svp, art_u8 *dst, int x0, int y0, int x1, int y1, int dst_rowstride, int dst_channels, + const art_u8 *src, + int src_width, int src_height, int src_rowstride, + const double affine[6], + int alpha, const art_u8 *mask) +{ + ksvgArtRgbAffineClipAlphaData data; + int i; + int a, da; + + data.alpha = alpha; + + a = 0x8000; + da = (alpha * 66051 + 0x80) >> 8; /* 66051 equals 2 ^ 32 / (255 * 255) */ + + for(i = 0; i < 256; i++) + { + data.alphatab[i] = a >> 16; + a += da; + } + + data.dst = dst; + data.dst_rowstride = dst_rowstride; + data.x0 = x0; + data.x1 = x1; + data.y0 = y0; + data.mask = mask; + + art_affine_invert(data.inv, affine); + + data.src = src; + data.src_width = src_width; + data.src_height = src_height; + data.src_rowstride = src_rowstride; + + if(dst_channels == 3) + { + if(mask) + art_svp_render_aa(svp, x0, y0, x1, y1, ksvg_art_rgb_affine_clip_mask_callback, &data); + else + art_svp_render_aa(svp, x0, y0, x1, y1, ksvg_art_rgb_affine_clip_callback, &data); + } + else + { + if(mask) + art_svp_render_aa(svp, x0, y0, x1, y1, ksvg_art_rgba_affine_clip_mask_callback, &data); + else + art_svp_render_aa(svp, x0, y0, x1, y1, ksvg_art_rgba_affine_clip_callback, &data); + } +} + + +static +void ksvg_art_rgb_texture_run(art_u8 *dst_p, int x0, int x1, int y, const double inv[6], + int alpha, const art_u8 *src, int src_rowstride, int src_width, int src_height) +{ + const art_u8 *src_p; + ArtPoint pt, src_pt; + int src_x, src_y; + int x; + int srcAlpha; + + if(alpha > 255) + alpha = 255; + + /* TODO: optimise and filter? */ + pt.y = y + 0.5; + + for(x = x0; x < x1; x++) + { + int s; + int d; + int tmp; + int tmp2; + + pt.x = x + 0.5; + + art_affine_point(&src_pt, &pt, inv); + + src_x = (int)floor(src_pt.x); + src_y = (int)floor(src_pt.y); + + if(src_x < 0) + { + /* Can't assume % behaviour with negative values */ + src_x += ((src_x / -src_width) + 1) * src_width; + } + + if(src_y < 0) + { + src_y += ((src_y / -src_height) + 1) * src_height; + } + + src_x %= src_width; + src_y %= src_height; + + src_p = src + (src_y * src_rowstride) + src_x * 4; + + /* Pattern source is in RGBA format, premultiplied. + * alpha represents fill/stroke/group opacity. + * + * Multiply source alpha by 'alpha' then composite over. + * For each channel, d = d + alpha * (s - srcAlpha * d). + */ + + srcAlpha = src_p[3]; + + d = *dst_p; + s = *src_p++; + + tmp = srcAlpha * d + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + tmp2 = alpha * (s - tmp) + 0x80; + tmp2 = (tmp2 + (tmp2 >> 8)) >> 8; + + *dst_p++ = d + tmp2; + + d = *dst_p; + s = *src_p++; + + tmp = srcAlpha * d + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + tmp2 = alpha * (s - tmp) + 0x80; + tmp2 = (tmp2 + (tmp2 >> 8)) >> 8; + + *dst_p++ = d + tmp2; + + d = *dst_p; + s = *src_p++; + + tmp = srcAlpha * d + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + tmp2 = alpha * (s - tmp) + 0x80; + tmp2 = (tmp2 + (tmp2 >> 8)) >> 8; + + *dst_p++ = d + tmp2; + } +} + +static void +ksvg_art_rgb_texture_callback (void *callback_data, int y, + int start, ArtSVPRenderAAStep *steps, int n_steps) +{ + ksvgArtRgbAffineClipAlphaData *data = (ksvgArtRgbAffineClipAlphaData *)callback_data; + art_u8 *linebuf; + int run_x0, run_x1; + art_u32 running_sum = start; + int x0, x1; + int k; + int *alphatab; + int alpha; + + linebuf = data->dst; + x0 = data->x0; + x1 = data->x1; + + alphatab = data->alphatab; + + if(n_steps > 0) + { + run_x1 = steps[0].x; + if(run_x1 > x0) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgb_texture_run(linebuf, x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + + for(k = 0; k < n_steps - 1; k++) + { + running_sum += steps[k].delta; + run_x0 = run_x1; + run_x1 = steps[k + 1].x; + if(run_x1 > run_x0) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgb_texture_run(linebuf + (run_x0 - x0) * 3, run_x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + } + running_sum += steps[k].delta; + if(x1 > run_x1) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgb_texture_run(linebuf + (run_x1 - x0) * 3, run_x1, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + } + else + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgb_texture_run(linebuf, x0, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + + data->dst += data->dst_rowstride; +} + +static +void ksvg_art_rgb_texture_mask_run(art_u8 *dst_p, const art_u8 *mask, int x0, int x1, int y, const double inv[6], + int alpha, const art_u8 *src, int src_rowstride, int src_width, int src_height) +{ + const art_u8 *src_p; + ArtPoint pt, src_pt; + int src_x, src_y; + int x; + int srcAlpha; + + if(alpha > 255) + alpha = 255; + + /* TODO: optimise and filter? */ + pt.y = y + 0.5; + + for(x = x0; x < x1; x++) + { + int s; + int d; + int am; + int tmp; + int tmp2; + + pt.x = x + 0.5; + + art_affine_point(&src_pt, &pt, inv); + + src_x = (int)floor(src_pt.x); + src_y = (int)floor(src_pt.y); + + if(src_x < 0) + { + /* Can't assume % behaviour with negative values */ + src_x += ((src_x / -src_width) + 1) * src_width; + } + + if(src_y < 0) + { + src_y += ((src_y / -src_height) + 1) * src_height; + } + + src_x %= src_width; + src_y %= src_height; + + src_p = src + (src_y * src_rowstride) + src_x * 4; + + /* Pattern source is in RGBA format, premultiplied. + * alpha represents fill/stroke/group opacity. + * + * Multiply source alpha by 'alpha' and mask value then composite over. + * For each channel, d = d + alpha * mask * (s - srcAlpha * d). + */ + + am = (alpha * *mask++) + 0x80; + am = (am + (am >> 8)) >> 8; + + srcAlpha = src_p[3]; + + d = *dst_p; + s = *src_p++; + + tmp = srcAlpha * d + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + tmp2 = am * (s - tmp) + 0x80; + tmp2 = (tmp2 + (tmp2 >> 8)) >> 8; + + *dst_p++ = d + tmp2; + + d = *dst_p; + s = *src_p++; + + tmp = srcAlpha * d + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + tmp2 = am * (s - tmp) + 0x80; + tmp2 = (tmp2 + (tmp2 >> 8)) >> 8; + + *dst_p++ = d + tmp2; + + d = *dst_p; + s = *src_p++; + + tmp = srcAlpha * d + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + tmp2 = am * (s - tmp) + 0x80; + tmp2 = (tmp2 + (tmp2 >> 8)) >> 8; + + *dst_p++ = d + tmp2; + } +} + +static void +ksvg_art_rgb_texture_mask_callback (void *callback_data, int y, + int start, ArtSVPRenderAAStep *steps, int n_steps) +{ + ksvgArtRgbAffineClipAlphaData *data = (ksvgArtRgbAffineClipAlphaData *)callback_data; + art_u8 *linebuf; + int run_x0, run_x1; + art_u32 running_sum = start; + int x0, x1; + int k; + int *alphatab; + int alpha; + const art_u8 *maskbuf; + + linebuf = data->dst; + x0 = data->x0; + x1 = data->x1; + + alphatab = data->alphatab; + + maskbuf = data->mask + (y - data->y0) * (x1 - x0); + + if(n_steps > 0) + { + run_x1 = steps[0].x; + if(run_x1 > x0) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgb_texture_mask_run(linebuf, maskbuf, x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + + for(k = 0; k < n_steps - 1; k++) + { + running_sum += steps[k].delta; + run_x0 = run_x1; + run_x1 = steps[k + 1].x; + if(run_x1 > run_x0) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgb_texture_mask_run(linebuf + (run_x0 - x0) * 3, maskbuf + (run_x0 - x0), run_x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + } + running_sum += steps[k].delta; + if(x1 > run_x1) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgb_texture_mask_run(linebuf + (run_x1 - x0) * 3, maskbuf + (run_x1 - x0), run_x1, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + } + else + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgb_texture_mask_run(linebuf, maskbuf, x0, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + + data->dst += data->dst_rowstride; +} + +static +void ksvg_art_rgba_texture_run(art_u8 *dst_p, int x0, int x1, int y, const double inv[6], + int alpha, const art_u8 *src, int src_rowstride, int src_width, int src_height) +{ + const art_u8 *src_p; + ArtPoint pt, src_pt; + int src_x, src_y; + int x; + int srcAlpha; + + if(alpha > 255) + alpha = 255; + + /* TODO: optimise and filter? */ + pt.y = y + 0.5; + + for(x = x0; x < x1; x++) + { + int s; + int d; + int tmp; + int tmp2; + + pt.x = x + 0.5; + + art_affine_point(&src_pt, &pt, inv); + + src_x = (int)floor(src_pt.x); + src_y = (int)floor(src_pt.y); + + if(src_x < 0) + { + /* Can't assume % behaviour with negative values */ + src_x += ((src_x / -src_width) + 1) * src_width; + } + + if(src_y < 0) + { + src_y += ((src_y / -src_height) + 1) * src_height; + } + + src_x %= src_width; + src_y %= src_height; + + src_p = src + (src_y * src_rowstride) + src_x * 4; + + /* Pattern source is in RGBA format, premultiplied. + * alpha represents fill/stroke/group opacity. + * + * Multiply source alpha by 'alpha' then composite over. + * For each colour channel, d = d + alpha * (s - srcAlpha * d). + */ + + srcAlpha = src_p[3]; + + d = *dst_p; + s = *src_p++; + + tmp = srcAlpha * d + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + tmp2 = alpha * (s - tmp) + 0x80; + tmp2 = (tmp2 + (tmp2 >> 8)) >> 8; + + *dst_p++ = d + tmp2; + + d = *dst_p; + s = *src_p++; + + tmp = srcAlpha * d + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + tmp2 = alpha * (s - tmp) + 0x80; + tmp2 = (tmp2 + (tmp2 >> 8)) >> 8; + + *dst_p++ = d + tmp2; + + d = *dst_p; + s = *src_p++; + + tmp = srcAlpha * d + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + tmp2 = alpha * (s - tmp) + 0x80; + tmp2 = (tmp2 + (tmp2 >> 8)) >> 8; + + *dst_p++ = d + tmp2; + + /* dstAlpha = dstAlpha + srcAlpha * alpha * (1 - dstAlpha) */ + d = *dst_p; + + tmp = srcAlpha * alpha + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + tmp2 = tmp * (255 - d) + 0x80; + tmp2 = (tmp2 + (tmp2 >> 8)) >> 8; + + *dst_p++ = d + tmp2; + src_p++; + } +} + +static void +ksvg_art_rgba_texture_callback (void *callback_data, int y, + int start, ArtSVPRenderAAStep *steps, int n_steps) +{ + ksvgArtRgbAffineClipAlphaData *data = (ksvgArtRgbAffineClipAlphaData *)callback_data; + art_u8 *linebuf; + int run_x0, run_x1; + art_u32 running_sum = start; + int x0, x1; + int k; + int *alphatab; + int alpha; + + linebuf = data->dst; + x0 = data->x0; + x1 = data->x1; + + alphatab = data->alphatab; + + if(n_steps > 0) + { + run_x1 = steps[0].x; + if(run_x1 > x0) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgba_texture_run(linebuf, x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + + for(k = 0; k < n_steps - 1; k++) + { + running_sum += steps[k].delta; + run_x0 = run_x1; + run_x1 = steps[k + 1].x; + if(run_x1 > run_x0) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgba_texture_run(linebuf + (run_x0 - x0) * 4, run_x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + } + running_sum += steps[k].delta; + if(x1 > run_x1) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgba_texture_run(linebuf + (run_x1 - x0) * 4, run_x1, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + } + else + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgba_texture_run(linebuf, x0, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + + data->dst += data->dst_rowstride; +} + +static +void ksvg_art_rgba_texture_mask_run(art_u8 *dst_p, const art_u8 *mask, int x0, int x1, int y, const double inv[6], + int alpha, const art_u8 *src, int src_rowstride, int src_width, int src_height) +{ + const art_u8 *src_p; + ArtPoint pt, src_pt; + int src_x, src_y; + int x; + int srcAlpha; + + if(alpha > 255) + alpha = 255; + + /* TODO: optimise and filter? */ + pt.y = y + 0.5; + + for(x = x0; x < x1; x++) + { + int s; + int d; + int am; + int tmp; + int tmp2; + + pt.x = x + 0.5; + + art_affine_point(&src_pt, &pt, inv); + + src_x = (int)floor(src_pt.x); + src_y = (int)floor(src_pt.y); + + if(src_x < 0) + { + /* Can't assume % behaviour with negative values */ + src_x += ((src_x / -src_width) + 1) * src_width; + } + + if(src_y < 0) + { + src_y += ((src_y / -src_height) + 1) * src_height; + } + + src_x %= src_width; + src_y %= src_height; + + src_p = src + (src_y * src_rowstride) + src_x * 4; + + /* Pattern source is in RGBA format, premultiplied. + * alpha represents fill/stroke/group opacity. + * + * Multiply source alpha by 'alpha' and mask value then composite over. + * For each channel, d = d + alpha * mask * (s - srcAlpha * d). + */ + + am = (alpha * *mask++) + 0x80; + am = (am + (am >> 8)) >> 8; + + srcAlpha = src_p[3]; + + d = *dst_p; + s = *src_p++; + + tmp = srcAlpha * d + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + tmp2 = am * (s - tmp) + 0x80; + tmp2 = (tmp2 + (tmp2 >> 8)) >> 8; + + *dst_p++ = d + tmp2; + + d = *dst_p; + s = *src_p++; + + tmp = srcAlpha * d + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + tmp2 = am * (s - tmp) + 0x80; + tmp2 = (tmp2 + (tmp2 >> 8)) >> 8; + + *dst_p++ = d + tmp2; + + d = *dst_p; + s = *src_p++; + + tmp = srcAlpha * d + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + tmp2 = am * (s - tmp) + 0x80; + tmp2 = (tmp2 + (tmp2 >> 8)) >> 8; + + *dst_p++ = d + tmp2; + + /* dstAlpha = dstAlpha + srcAlpha * alpha * mask * (1 - dstAlpha) */ + d = *dst_p; + + tmp = srcAlpha * am + 0x80; + tmp = (tmp + (tmp >> 8)) >> 8; + + tmp2 = tmp * (255 - d) + 0x80; + tmp2 = (tmp2 + (tmp2 >> 8)) >> 8; + + *dst_p++ = d + tmp2; + src_p++; + } +} + +static void +ksvg_art_rgba_texture_mask_callback (void *callback_data, int y, + int start, ArtSVPRenderAAStep *steps, int n_steps) +{ + ksvgArtRgbAffineClipAlphaData *data = (ksvgArtRgbAffineClipAlphaData *)callback_data; + art_u8 *linebuf; + int run_x0, run_x1; + art_u32 running_sum = start; + int x0, x1; + int k; + int *alphatab; + int alpha; + const art_u8 *maskbuf; + + linebuf = data->dst; + x0 = data->x0; + x1 = data->x1; + + alphatab = data->alphatab; + + maskbuf = data->mask + (y - data->y0) * (x1 - x0); + + if(n_steps > 0) + { + run_x1 = steps[0].x; + if(run_x1 > x0) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgba_texture_mask_run(linebuf, maskbuf, x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + + for(k = 0; k < n_steps - 1; k++) + { + running_sum += steps[k].delta; + run_x0 = run_x1; + run_x1 = steps[k + 1].x; + if(run_x1 > run_x0) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgba_texture_mask_run(linebuf + (run_x0 - x0) * 4, maskbuf + (run_x0 - x0), run_x0, run_x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + } + running_sum += steps[k].delta; + if(x1 > run_x1) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgba_texture_mask_run(linebuf + (run_x1 - x0) * 4, maskbuf + (run_x1 - x0), run_x1, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + } + else + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + ksvg_art_rgba_texture_mask_run(linebuf, maskbuf, x0, x1, y, data->inv, alphatab[alpha], data->src, data->src_rowstride, data->src_width, data->src_height); + } + + data->dst += data->dst_rowstride; +} + +/** + * ksvg_art_rgb_texture: Affine transform source RGB image and composite, with clipping path. + * @svp: Clipping path. + * @dst: Destination image RGB buffer. + * @x0: Left coordinate of destination rectangle. + * @y0: Top coordinate of destination rectangle. + * @x1: Right coordinate of destination rectangle. + * @y1: Bottom coordinate of destination rectangle. + * @dst_rowstride: Rowstride of @dst buffer. + * @src: Source image RGB buffer. + * @src_width: Width of source image. + * @src_height: Height of source image. + * @src_rowstride: Rowstride of @src buffer. + * @affine: Affine transform. + * @level: Filter level. + * @alphagamma: #ArtAlphaGamma for gamma-correcting the compositing. + * @alpha: Alpha, range 0..256. + * + * Affine transform the source image stored in @src, compositing over + * the area of destination image @dst specified by the rectangle + * (@x0, @y0) - (@x1, @y1). As usual in libart, the left and top edges + * of this rectangle are included, and the right and bottom edges are + * excluded. + * + * The @alphagamma parameter specifies that the alpha compositing be done + * in a gamma-corrected color space. Since the source image is opaque RGB, + * this argument only affects the edges. In the current implementation, + * it is ignored. + * + * The @level parameter specifies the speed/quality tradeoff of the + * image interpolation. Currently, only ART_FILTER_NEAREST is + * implemented. + * + * KSVG additions : we have changed this function to support an alpha level as well. +* also we made sure compositing an rgba image over an rgb buffer works. +**/ +void ksvg_art_rgb_texture(const ArtSVP *svp, art_u8 *dst, int x0, int y0, int x1, int y1, int dst_rowstride, + int dst_channels, + const art_u8 *src, + int src_width, int src_height, int src_rowstride, + const double affine[6], + ArtFilterLevel level, + ArtAlphaGamma *alphaGamma, + int alpha, + const art_u8 *mask) +{ + ksvgArtRgbAffineClipAlphaData data; + int i; + int a, da; + + data.alpha = alpha; + + a = 0x8000; + da = (alpha * 66051 + 0x80) >> 8; /* 66051 equals 2 ^ 32 / (255 * 255) */ + + for(i = 0; i < 256; i++) + { + data.alphatab[i] = a >> 16; + a += da; + } + + data.dst = dst; + data.dst_rowstride = dst_rowstride; + data.x0 = x0; + data.x1 = x1; + + data.inv[0] = affine[0]; + data.inv[1] = affine[1]; + data.inv[2] = affine[2]; + data.inv[3] = affine[3]; + data.inv[4] = affine[4]; + data.inv[5] = affine[5]; + + data.src = src; + data.src_width = src_width; + data.src_height = src_height; + data.src_rowstride = src_rowstride; + + data.mask = mask; + data.y0 = y0; + + if(mask) + { + if(dst_channels == 3) + art_svp_render_aa(svp, x0, y0, x1, y1, ksvg_art_rgb_texture_mask_callback, &data); + else + art_svp_render_aa(svp, x0, y0, x1, y1, ksvg_art_rgba_texture_mask_callback, &data); + } + else + { + if(dst_channels == 3) + art_svp_render_aa(svp, x0, y0, x1, y1, ksvg_art_rgb_texture_callback, &data); + else + art_svp_render_aa(svp, x0, y0, x1, y1, ksvg_art_rgba_texture_callback, &data); + } +} + +/** + * ksvg_art_svp_move: moves an svp relatively to the current position. + * @svp: SVP to move. + * @dx: relative amount to move horizontally. + * @dy: relative amount to move vertically. + * + * Note : this function always moves the svp, not taking into account render buffer + * boundaries. + **/ +void ksvg_art_svp_move(ArtSVP *svp, int dx, int dy) +{ + int i, j; + ArtSVPSeg *seg; + + if(dx == 0 && dy == 0) return; + for(i = 0;i < svp->n_segs;i++) + { + seg = &svp->segs[i]; + for(j = 0;j < seg->n_points;j++) + { + seg->points[j].x += dx; + seg->points[j].y += dy; + } + seg->bbox.x0 += dx; + seg->bbox.y0 += dy; + seg->bbox.x1 += dx; + seg->bbox.y1 += dy; + } +} + diff --git a/ksvg/impl/libs/art_support/art_misc.h b/ksvg/impl/libs/art_support/art_misc.h new file mode 100644 index 00000000..52f09a63 --- /dev/null +++ b/ksvg/impl/libs/art_support/art_misc.h @@ -0,0 +1,79 @@ +/* Libart_LGPL - library of basic graphic primitives + * Copyright (C) 1998 Raph Levien + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + */ + +#ifndef __KSVG_ART_H__ +#define __KSVG_ART_H__ + +#include <libart_lgpl/art_misc.h> +#include <libart_lgpl/art_bpath.h> +#include <libart_lgpl/art_vpath.h> +#include <libart_lgpl/art_alphagamma.h> +#include <libart_lgpl/art_filterlevel.h> +#include <libart_lgpl/art_svp.h> + +#define ART_END2 10 + +#ifdef __cplusplus +extern "C" { +#endif + + void ksvg_art_vpath_render_bez (ArtVpath **p_vpath, int *pn, int *pn_max, + double x0, double y0, + double x1, double y1, + double x2, double y2, + double x3, double y3, + double flatness); + + ArtVpath *ksvg_art_bez_path_to_vec(const ArtBpath *bez, double flatness); + + void ksvg_art_rgb_affine_run (int *p_x0, int *p_x1, int y, + int src_width, int src_height, + const double affine[6]); + + void ksvg_art_rgb_affine (art_u8 *dst, int x0, int y0, int x1, int y1, int dst_rowstride, + const art_u8 *src, + int src_width, int src_height, int src_rowstride, + const double affine[6], + ArtFilterLevel level, + ArtAlphaGamma *alphagamma, + int alpha); + + void ksvg_art_rgb_affine_clip(const ArtSVP *svp, art_u8 *dst, int x0, int y0, int x1, int y1, int dst_rowstride, int dst_channels, + const art_u8 *src, + int src_width, int src_height, int src_rowstride, + const double affine[6], + int alpha, const art_u8 *mask); + + void ksvg_art_rgb_texture(const ArtSVP *svp, art_u8 *dst, int x0, int y0, int x1, int y1, int dst_rowstride, + int dst_channels, + const art_u8 *src, + int src_width, int src_height, int src_rowstride, + const double affine[6], + ArtFilterLevel level, + ArtAlphaGamma *alphaGamma, + int alpha, + const art_u8 *mask); + + void ksvg_art_svp_move(ArtSVP *svp, int dx, int dy); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ksvg/impl/libs/art_support/art_render_misc.c b/ksvg/impl/libs/art_support/art_render_misc.c new file mode 100644 index 00000000..1603da1e --- /dev/null +++ b/ksvg/impl/libs/art_support/art_render_misc.c @@ -0,0 +1,774 @@ +/* This file is part of the KDE project. + * art_render_misc.c: Here I store some routines I feel should be in libart :) + * + * Copyright (C) 2001-2003 KSVG Team + * + * This code is adapted from : + * + * art_render_gradient.c: Gradient image source for modular rendering. + * + * Libart_LGPL - library of basic graphic primitives + * Copyright (C) 2000 Raph Levien + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + * Authors: Raph Levien <[email protected]> + * Alexander Larsson <[email protected]> + */ + +#include "config.h" +#include "art_render_misc.h" + +#include <math.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> + +/* These are in KSVGHelper.cpp */ +int linearRGBFromsRGB(int sRGB8bit); +int sRGBFromLinearRGB(int linearRGB8bit); + +typedef struct _ArtImageSourceGradRad ArtImageSourceGradRad; + +struct _ArtImageSourceGradRad { + ArtImageSource super; + const ArtKSVGGradientRadial *gradient; + double a; +}; + +#define EPSILON 1e-6 + +/** + * art_ksvg_render_gradient_setpix: Set a gradient pixel. + * @render: The render object. + * @dst: Pointer to destination (where to store pixel). + * @n_stops: Number of stops in @stops. + * @stops: The stops for the gradient. + * @offset: The offset. + * + * @n_stops must be > 0. + * + * Sets a gradient pixel, storing it at @dst. + **/ +static void +art_ksvg_render_gradient_setpix (ArtRender *render, + art_u8 *dst, + int n_stops, ArtGradientStop *stops, + double offset, ArtKSVGGradientInterpolation interpolation) +{ + int ix; + int j; + double off0, off1; + int n_ch = render->n_chan + 1; + + for (ix = 0; ix < n_stops; ix++) + if (stops[ix].offset > offset) + break; + /* stops[ix - 1].offset < offset < stops[ix].offset */ + if (ix > 0 && ix < n_stops) + { + off0 = stops[ix - 1].offset; + off1 = stops[ix].offset; + if (fabs (off1 - off0) > EPSILON) + { + double interp; + + interp = (offset - off0) / (off1 - off0); + if(interpolation == ART_KSVG_LINEARRGB_INTERPOLATION) + { + for (j = 0; j < n_ch; j++) + { + int z0, z1; + int z; + int z0_8bit, z0_linearRGB_8bit; + int z1_8bit, z1_linearRGB_8bit; + int z_8bit, z_sRGB_8bit; + + /* Note: Using explicit variables for intermediate steps since */ + /* the ART_PIX macros reference the argument more than once. */ + z0 = stops[ix - 1].color[j]; + z0_8bit = ART_PIX_8_FROM_MAX(z0); + z0_linearRGB_8bit = linearRGBFromsRGB(z0_8bit); + z0 = ART_PIX_MAX_FROM_8(z0_linearRGB_8bit); + + z1 = stops[ix].color[j]; + z1_8bit = ART_PIX_8_FROM_MAX(z1); + z1_linearRGB_8bit = linearRGBFromsRGB(z1_8bit); + z1 = ART_PIX_MAX_FROM_8(z1_linearRGB_8bit); + + z = floor (z0 + (z1 - z0) * interp + 0.5); + z_8bit = ART_PIX_8_FROM_MAX(z); + z_sRGB_8bit = sRGBFromLinearRGB(z_8bit); + + if (render->buf_depth == 8) + dst[j] = z_sRGB_8bit; + else /* (render->buf_depth == 16) */ + ((art_u16 *)dst)[j] = ART_PIX_MAX_FROM_8(z_sRGB_8bit); + } + } + else + { + /* sRGB interpolation */ + for (j = 0; j < n_ch; j++) + { + int z0, z1; + int z; + z0 = stops[ix - 1].color[j]; + z1 = stops[ix].color[j]; + z = floor (z0 + (z1 - z0) * interp + 0.5); + if (render->buf_depth == 8) + dst[j] = ART_PIX_8_FROM_MAX (z); + else /* (render->buf_depth == 16) */ + ((art_u16 *)dst)[j] = z; + } + } + return; + } + } + else if (ix == n_stops) + ix--; + + for (j = 0; j < n_ch; j++) + { + int z; + z = stops[ix].color[j]; + if (render->buf_depth == 8) + dst[j] = ART_PIX_8_FROM_MAX (z); + else /* (render->buf_depth == 16) */ + ((art_u16 *)dst)[j] = z; + } +} + +static void +art_ksvg_render_gradient_radial_done (ArtRenderCallback *self, ArtRender *render) +{ + art_free (self); +} + +static void +art_ksvg_render_gradient_radial_render (ArtRenderCallback *self, ArtRender *render, + art_u8 *dest, int y) +{ + ArtImageSourceGradRad *z = (ArtImageSourceGradRad *)self; + const ArtKSVGGradientRadial *gradient = z->gradient; + int pixstride = (render->n_chan + 1) * (render->depth >> 3); + int x; + int x0 = render->x0; + int width = render->x1 - x0; + int n_stops = gradient->n_stops; + ArtGradientStop *stops = gradient->stops; + art_u8 *bufp = render->image_buf; + double fx = gradient->fx; + double fy = gradient->fy; + double dx, dy; + double *affine = gradient->affine; + double aff0 = affine[0]; + double aff1 = affine[1]; + const double a = z->a; + const double arecip = 1.0 / a; + double b, db; + double c, dc, ddc; + double b_a, db_a; + double rad, drad, ddrad; + ArtGradientSpread spread = gradient->spread; + + dx = x0 * aff0 + y * affine[2] + affine[4] - fx; + dy = x0 * aff1 + y * affine[3] + affine[5] - fy; + b = dx * fx + dy * fy; + db = aff0 * fx + aff1 * fy; + c = dx * dx + dy * dy; + dc = 2 * aff0 * dx + aff0 * aff0 + 2 * aff1 * dy + aff1 * aff1; + ddc = 2 * aff0 * aff0 + 2 * aff1 * aff1; + + b_a = b * arecip; + db_a = db * arecip; + + rad = b_a * b_a + c * arecip; + drad = 2 * b_a * db_a + db_a * db_a + dc * arecip; + ddrad = 2 * db_a * db_a + ddc * arecip; + + for (x = 0; x < width; x++) + { + double z; + + if (rad > 0) + z = b_a + sqrt (rad); + else + z = b_a; + + if (spread == ART_GRADIENT_REPEAT) + z = z - floor (z); + else if (spread == ART_GRADIENT_REFLECT) + { + double tmp; + + tmp = z - 2 * floor (0.5 * z); + z = tmp > 1 ? 2 - tmp : tmp; + } + + art_ksvg_render_gradient_setpix (render, bufp, n_stops, stops, z, gradient->interpolation); + bufp += pixstride; + b_a += db_a; + rad += drad; + drad += ddrad; + } +} + +static void +art_ksvg_render_gradient_radial_negotiate (ArtImageSource *self, ArtRender *render, + ArtImageSourceFlags *p_flags, + int *p_buf_depth, ArtAlphaType *p_alpha) +{ + self->super.render = art_ksvg_render_gradient_radial_render; + *p_flags = 0; + *p_buf_depth = render->depth; + *p_alpha = ART_ALPHA_SEPARATE; +} + +/** + * art_ksvg_render_gradient_radial: Add a radial gradient image source. + * @render: The render object. + * @gradient: The radial gradient. + * + * Adds the radial gradient @gradient as the image source for rendering + * in the render object @render. + **/ +void +art_ksvg_render_gradient_radial (ArtRender *render, + const ArtKSVGGradientRadial *gradient, + ArtFilterLevel level) +{ + ArtImageSourceGradRad *image_source = art_new (ArtImageSourceGradRad, 1); + double fx = gradient->fx; + double fy = gradient->fy; + + image_source->super.super.render = NULL; + image_source->super.super.done = art_ksvg_render_gradient_radial_done; + image_source->super.negotiate = art_ksvg_render_gradient_radial_negotiate; + + image_source->gradient = gradient; + /* todo: sanitycheck fx, fy? */ + image_source->a = 1 - fx * fx - fy * fy; + + art_render_add_image_source (render, &image_source->super); +} + + +/* Hack to find out how to define alloca on different platforms. + * Modified version of glib/galloca.h. + */ + +#ifdef __GNUC__ +/* GCC does the right thing */ + #undef alloca + #define alloca(size) __builtin_alloca (size) +#elif defined (HAVE_ALLOCA_H) +/* a native and working alloca.h is there */ + #include <alloca.h> +#else /* !__GNUC__ && !HAVE_ALLOCA_H */ + #ifdef _MSC_VER + #include <malloc.h> + #define alloca _alloca + #else /* !_MSC_VER */ + #ifdef _AIX + #pragma alloca + #else /* !_AIX */ + #ifndef alloca /* predefined by HP cc +Olibcalls */ +char *alloca (); + #endif /* !alloca */ + #endif /* !_AIX */ + #endif /* !_MSC_VER */ +#endif /* !__GNUC__ && !HAVE_ALLOCA_H */ + +#undef DEBUG_SPEW + +typedef struct _ArtImageSourceGradLin ArtImageSourceGradLin; + +/* The stops will be copied right after this structure */ +struct _ArtImageSourceGradLin +{ + ArtImageSource super; + ArtKSVGGradientLinear gradient; + ArtGradientStop stops[1]; +}; + +#ifndef MAX + #define MAX(a, b) (((a) > (b)) ? (a) : (b)) +#endif /* MAX */ + +#ifndef MIN + #define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#endif /* MIN */ + +static void +art_ksvg_rgba_gradient_run (art_u8 *buf, + art_u8 *color1, + art_u8 *color2, + int len) +{ + int i; + int r, g, b, a; + int dr, dg, db, da; + +#ifdef DEBUG_SPEW + printf ("gradient run from %3d %3d %3d %3d to %3d %3d %3d %3d in %d pixels\n", + color1[0], color1[1], color1[2], color1[3], + color2[0], color2[1], color2[2], color2[3], + len); +#endif + + r = (color1[0] << 16) + 0x8000; + g = (color1[1] << 16) + 0x8000; + b = (color1[2] << 16) + 0x8000; + a = (color1[3] << 16) + 0x8000; + dr = ((color2[0] - color1[0]) << 16) / len; + dg = ((color2[1] - color1[1]) << 16) / len; + db = ((color2[2] - color1[2]) << 16) / len; + da = ((color2[3] - color1[3]) << 16) / len; + + for(i = 0; i < len; i++) + { + *buf++ = (r>>16); + *buf++ = (g>>16); + *buf++ = (b>>16); + *buf++ = (a>>16); + + r += dr; + g += dg; + b += db; + a += da; + } +} + +static void +ksvg_calc_color_at (ArtGradientStop *stops, + int n_stops, + ArtGradientSpread spread, + double offset, + double offset_fraction, + int favor_start, + int ix, + art_u8 *color) +{ + double off0, off1; + int j; + + if(spread == ART_GRADIENT_PAD) + { + if(offset < EPSILON) + { + color[0] = ART_PIX_8_FROM_MAX (stops[0].color[0]); + color[1] = ART_PIX_8_FROM_MAX (stops[0].color[1]); + color[2] = ART_PIX_8_FROM_MAX (stops[0].color[2]); + color[3] = ART_PIX_8_FROM_MAX (stops[0].color[3]); + return; + } + if(offset >= 1.0 - EPSILON) + { + color[0] = ART_PIX_8_FROM_MAX (stops[n_stops-1].color[0]); + color[1] = ART_PIX_8_FROM_MAX (stops[n_stops-1].color[1]); + color[2] = ART_PIX_8_FROM_MAX (stops[n_stops-1].color[2]); + color[3] = ART_PIX_8_FROM_MAX (stops[n_stops-1].color[3]); + return; + } + } + + if(ix > 0 && ix < n_stops) + { + off0 = stops[ix - 1].offset; + off1 = stops[ix].offset; + if(fabs (off1 - off0) > EPSILON) + { + double interp; + double o; + o = offset_fraction; + + if((fabs (o) < EPSILON) && (!favor_start)) + o = 1.0; + else if((fabs (o-1.0) < EPSILON) && (favor_start)) + o = 0.0; + + /* + if (offset_fraction == 0.0 && !favor_start) + offset_fraction = 1.0; + */ + + interp = (o - off0) / (off1 - off0); + for(j = 0; j < 4; j++) + { + int z0, z1; + int z; + z0 = stops[ix - 1].color[j]; + z1 = stops[ix].color[j]; + z = floor (z0 + (z1 - z0) * interp + 0.5); + color[j] = ART_PIX_8_FROM_MAX (z); + } + return; + } + /* If offsets are too close to safely do the division, just + pick the ix color. */ + color[0] = ART_PIX_8_FROM_MAX (stops[ix].color[0]); + color[1] = ART_PIX_8_FROM_MAX (stops[ix].color[1]); + color[2] = ART_PIX_8_FROM_MAX (stops[ix].color[2]); + color[3] = ART_PIX_8_FROM_MAX (stops[ix].color[3]); + return; + } + + /*printf ("WARNING! bad ix %d in calc_color_at() [internal error]\n", ix); + assert (0);*/ +} + +static void +art_ksvg_render_gradient_linear_render_8 (ArtRenderCallback *self, + ArtRender *render, + art_u8 *dest, int y) +{ + ArtImageSourceGradLin *z = (ArtImageSourceGradLin *)self; + const ArtKSVGGradientLinear *gradient = &(z->gradient); + int i; + int width = render->x1 - render->x0; + int len; + double offset, d_offset; + double offset_fraction; + int next_stop; + int ix; + art_u8 color1[4], color2[4]; + int n_stops = gradient->n_stops; + int extra_stops; + ArtGradientStop *stops = gradient->stops; + ArtGradientStop *tmp_stops; + art_u8 *bufp = render->image_buf; + ArtGradientSpread spread = gradient->spread; + +#ifdef DEBUG_SPEW + printf ("x1: %d, x2: %d, y: %d\n", render->x0, render->x1, y); + printf ("spread: %d, stops:", gradient->spread); + for(i=0;i<n_stops;i++) + { + printf ("%f, ", gradient->stops[i].offset); + } + printf ("\n"); + printf ("a: %f, b: %f, c: %f\n", gradient->a, gradient->b, gradient->c); +#endif + + offset = render->x0 * gradient->affine[0] + y * gradient->affine[2] + gradient->affine[4]; + d_offset = gradient->affine[0]; + + /* We need to force the gradient to extend the whole 0..1 segment, + because the rest of the code doesn't handle partial gradients + correctly */ + if((gradient->stops[0].offset > EPSILON /* == 0.0 */) || + (gradient->stops[n_stops-1].offset < (1.0 - EPSILON))) + { + extra_stops = 0; + tmp_stops = stops = alloca (sizeof (ArtGradientStop) * (n_stops + 2)); + if(gradient->stops[0].offset > EPSILON /* 0.0 */) + { + memcpy (tmp_stops, gradient->stops, sizeof (ArtGradientStop)); + tmp_stops[0].offset = 0.0; + tmp_stops += 1; + extra_stops++; + } + memcpy (tmp_stops, gradient->stops, sizeof (ArtGradientStop) * n_stops); + if(gradient->stops[n_stops-1].offset < (1.0 - EPSILON)) + { + tmp_stops += n_stops; + memcpy (tmp_stops, &gradient->stops[n_stops-1], sizeof (ArtGradientStop)); + tmp_stops[0].offset = 1.0; + extra_stops++; + } + n_stops += extra_stops; + + +#ifdef DEBUG_SPEW + printf ("start/stop modified stops:"); + for(i=0;i<n_stops;i++) + { + printf ("%f, ", stops[i].offset); + } + printf ("\n"); +#endif + + } + + if(spread == ART_GRADIENT_REFLECT) + { + tmp_stops = stops; + stops = alloca (sizeof (ArtGradientStop) * n_stops * 2); + memcpy (stops, tmp_stops, sizeof (ArtGradientStop) * n_stops); + + for(i = 0; i< n_stops; i++) + { + stops[n_stops * 2 - 1 - i].offset = (1.0 - stops[i].offset / 2.0); + memcpy (stops[n_stops * 2 - 1 - i].color, stops[i].color, sizeof (stops[i].color)); + stops[i].offset = stops[i].offset / 2.0; + } + + spread = ART_GRADIENT_REPEAT; + offset = offset / 2.0; + d_offset = d_offset / 2.0; + + n_stops = 2 * n_stops; + +#ifdef DEBUG_SPEW + printf ("reflect modified stops:"); + for(i=0;i<n_stops;i++) + { + printf ("%f, ", stops[i].offset); + } + printf ("\n"); +#endif + } + + offset_fraction = offset - floor (offset); +#ifdef DEBUG_SPEW + printf ("inital offset: %f, fraction: %f d_offset: %f\n", offset, offset_fraction, d_offset); +#endif + /* ix is selected so that offset_fraction is + stops[ix-1] <= offset_fraction <= stops[ix] + If offset_fraction is equal to one of the edges, ix + is selected so the the section of the line extending + in the same direction as d_offset is between ix-1 and ix. + */ + for(ix = 0; ix < n_stops; ix++) + if(stops[ix].offset > offset_fraction || + (d_offset < 0.0 && fabs (stops[ix].offset - offset_fraction) < EPSILON)) + break; + if(ix == 0) + ix = n_stops - 1; + else if(ix == n_stops) + ix = n_stops - 1; + +#ifdef DEBUG_SPEW + printf ("Initial ix: %d\n", ix); +#endif +#if 0 + assert (ix > 0); + assert (ix < n_stops); + assert ((stops[ix-1].offset <= offset_fraction + EPSILON) || + ((stops[ix].offset > (1.0 - EPSILON)) && (offset_fraction < EPSILON /* == 0.0*/))); + /*assert (offset_fraction <= stops[ix].offset);*/ + /* FIXME: These asserts may be broken, it is for now + safer to not use them. Should be fixed! + See bug #121850 + assert ((offset_fraction != stops[ix-1].offset) || + (d_offset >= 0.0)); + assert ((offset_fraction != stops[ix].offset) || + (d_offset <= 0.0)); + */ +#endif + while(width > 0) + { +#ifdef DEBUG_SPEW + printf ("ix: %d\n", ix); + printf ("start offset: %f\n", offset); +#endif + ksvg_calc_color_at (stops, n_stops, + spread, + offset, + offset_fraction, + (d_offset > -EPSILON), + ix, + color1); + + if(d_offset > 0) + next_stop = ix; + else + next_stop = ix-1; + +#ifdef DEBUG_SPEW + printf ("next_stop: %d\n", next_stop); +#endif + if(fabs (d_offset) > EPSILON) + { + double o; + o = offset_fraction; + + if((fabs (o) <= EPSILON) && (ix == n_stops - 1)) + o = 1.0; + else if((fabs (o-1.0) <= EPSILON) && (ix == 1)) + o = 0.0; + +#ifdef DEBUG_SPEW + printf ("o: %f\n", o); +#endif + len = (int)floor (fabs ((stops[next_stop].offset - o) / d_offset)) + 1; + len = MAX (len, 0); + len = MIN (len, width); + } + else + { + len = width; + } +#ifdef DEBUG_SPEW + printf ("len: %d\n", len); +#endif + if(len > 0) + { + offset = offset + (len-1) * d_offset; + offset_fraction = offset - floor (offset); +#ifdef DEBUG_SPEW + printf ("end offset: %f, fraction: %f\n", offset, offset_fraction); +#endif + ksvg_calc_color_at (stops, n_stops, + spread, + offset, + offset_fraction, + (d_offset < EPSILON), + ix, + color2); + + art_ksvg_rgba_gradient_run (bufp, + color1, + color2, + len); + offset += d_offset; + offset_fraction = offset - floor (offset); + } + + if(d_offset > 0) + { + do + { + ix++; + if(ix == n_stops) + ix = 1; + /* Note: offset_fraction can actually be one here on x86 machines that + does calculations with extended precision, but later rounds to 64bit. + This happens if the 80bit offset_fraction is larger than the + largest 64bit double that is less than one. + */ + } + while(!((stops[ix-1].offset <= offset_fraction && + offset_fraction < stops[ix].offset) || + (ix == 1 && offset_fraction > (1.0 - EPSILON)))); + } + else + { + do + { + ix--; + if(ix == 0) + ix = n_stops - 1; + } + while(!((stops[ix-1].offset < offset_fraction && + offset_fraction <= stops[ix].offset) || + (ix == n_stops - 1 && offset_fraction < EPSILON /* == 0.0*/))); + } + + bufp += 4*len; + width -= len; + } +} + +static void +art_ksvg_render_gradient_linear_done (ArtRenderCallback *self, ArtRender *render) +{ + art_free (self); +} + +static void +art_ksvg_render_gradient_linear_render (ArtRenderCallback *self, ArtRender *render, + art_u8 *dest, int y) +{ + ArtImageSourceGradLin *z = (ArtImageSourceGradLin *)self; + const ArtKSVGGradientLinear *gradient = &(z->gradient); + int pixstride = (render->n_chan + 1) * (render->depth >> 3); + int x; + int width = render->x1 - render->x0; + double offset, d_offset; + double actual_offset; + int n_stops = gradient->n_stops; + ArtGradientStop *stops = gradient->stops; + art_u8 *bufp = render->image_buf; + ArtGradientSpread spread = gradient->spread; + + offset = render->x0 * gradient->affine[0] + y * gradient->affine[2] + gradient->affine[4]; + d_offset = gradient->affine[0]; + + for(x = 0; x < width; x++) + { + if(spread == ART_GRADIENT_PAD) + actual_offset = offset; + else if(spread == ART_GRADIENT_REPEAT) + actual_offset = offset - floor (offset); + else /* (spread == ART_GRADIENT_REFLECT) */ + { + double tmp; + + tmp = offset - 2 * floor (0.5 * offset); + actual_offset = tmp > 1 ? 2 - tmp : tmp; + } + art_ksvg_render_gradient_setpix (render, bufp, n_stops, stops, actual_offset, gradient->interpolation); + offset += d_offset; + bufp += pixstride; + } +} + +static void +art_ksvg_render_gradient_linear_negotiate (ArtImageSource *self, ArtRender *render, + ArtImageSourceFlags *p_flags, + int *p_buf_depth, ArtAlphaType *p_alpha) +{ + ArtImageSourceGradLin *z = (ArtImageSourceGradLin *)self; + + if(render->depth == 8 && + render->n_chan == 3 && + z->gradient.interpolation == ART_KSVG_SRGB_INTERPOLATION) + { + /* The optimised renderer doesn't support linearRGB interpolation at the moment */ + /* so we only use it for sRGB, which is more common anyway. */ + self->super.render = art_ksvg_render_gradient_linear_render_8; + *p_flags = 0; + *p_buf_depth = 8; + *p_alpha = ART_ALPHA_SEPARATE; + return; + } + + self->super.render = art_ksvg_render_gradient_linear_render; + *p_flags = 0; + *p_buf_depth = render->depth; + *p_alpha = ART_ALPHA_SEPARATE; +} + +/** + * art_render_gradient_linear: Add a linear gradient image source. + * @render: The render object. + * @gradient: The linear gradient. + * + * Adds the linear gradient @gradient as the image source for rendering + * in the render object @render. + **/ +void +art_ksvg_render_gradient_linear (ArtRender *render, + const ArtKSVGGradientLinear *gradient, + ArtFilterLevel level) +{ + ArtImageSourceGradLin *image_source = art_alloc (sizeof (ArtImageSourceGradLin) + + sizeof (ArtGradientStop) * (gradient->n_stops - 1)); + + image_source->super.super.render = NULL; + image_source->super.super.done = art_ksvg_render_gradient_linear_done; + image_source->super.negotiate = art_ksvg_render_gradient_linear_negotiate; + + /* copy the gradient into the structure */ + image_source->gradient = *gradient; + image_source->gradient.stops = image_source->stops; + memcpy (image_source->gradient.stops, gradient->stops, sizeof (ArtGradientStop) * gradient->n_stops); + + art_render_add_image_source (render, &image_source->super); +} + diff --git a/ksvg/impl/libs/art_support/art_render_misc.h b/ksvg/impl/libs/art_support/art_render_misc.h new file mode 100644 index 00000000..d1f3690b --- /dev/null +++ b/ksvg/impl/libs/art_support/art_render_misc.h @@ -0,0 +1,90 @@ +/* This file is part of the KDE project. + * art_render_misc.c: Here I store some routines I feel should be in libart :) + * + * Copyright (C) 2001-2002 KSVG Team + * + * This code is adapted from : + * + * art_render_gradient.h: Gradient image source for modular rendering. + * + * Libart_LGPL - library of basic graphic primitives + * Copyright (C) 2000 Raph Levien + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + * Authors: Raph Levien <[email protected]> + * Alexander Larsson <[email protected]> + */ + +#ifndef __ART_RENDER_MISC_H__ +#define __ART_RENDER_MISC_H__ + +#ifdef LIBART_COMPILATION +#include "art_filterlevel.h" +#include "art_render.h" +#include "art_render_gradient.h" +#else +#include <libart_lgpl/art_filterlevel.h> +#include <libart_lgpl/art_render.h> +#include <libart_lgpl/art_render_gradient.h> +#endif + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +typedef enum +{ + ART_KSVG_SRGB_INTERPOLATION, + ART_KSVG_LINEARRGB_INTERPOLATION +} ArtKSVGGradientInterpolation; + +typedef struct _ArtKSVGGradientRadial ArtKSVGGradientRadial; + +struct _ArtKSVGGradientRadial { + double affine[6]; /* transforms user coordinates to unit circle */ + double fx, fy; /* focal point in unit circle coords */ + int n_stops; + ArtGradientSpread spread; + ArtGradientStop *stops; + ArtKSVGGradientInterpolation interpolation; +}; + +void +art_ksvg_render_gradient_radial (ArtRender *render, + const ArtKSVGGradientRadial *gradient, + ArtFilterLevel level); + +typedef struct _ArtKSVGGradientLinear ArtKSVGGradientLinear; + +struct _ArtKSVGGradientLinear { + double affine[6]; /* transforms screen gradient vector to unit vector (1, 0) */ + ArtGradientSpread spread; + int n_stops; + ArtGradientStop *stops; + ArtKSVGGradientInterpolation interpolation; +}; + +void +art_ksvg_render_gradient_linear (ArtRender *render, + const ArtKSVGGradientLinear *gradient, + ArtFilterLevel level); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* __ART_RENDER_MISC_H__ */ diff --git a/ksvg/impl/libs/art_support/art_rgba_svp.c b/ksvg/impl/libs/art_support/art_rgba_svp.c new file mode 100644 index 00000000..47c7d924 --- /dev/null +++ b/ksvg/impl/libs/art_support/art_rgba_svp.c @@ -0,0 +1,659 @@ +/* Libart_LGPL - library of basic graphic primitives + * Copyright (C) 1998 Raph Levien + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + */ + +/* Render a sorted vector path into an RGB buffer. */ + +#include <X11/Xos.h> + +#include "art_rgba_svp.h" + +#include <libart_lgpl/art_svp.h> +#include <libart_lgpl/art_svp_render_aa.h> +#include <libart_lgpl/art_rgba.h> +#include <libart_lgpl/art_rgb.h> + +/* RGBA renderers */ + +typedef struct _ArtKSVGRgbaSVPAlphaData ArtKSVGRgbaSVPAlphaData; + +struct _ArtKSVGRgbaSVPAlphaData { + int alphatab[256]; + art_u8 r, g, b, alpha; + art_u32 rgba; + art_u8 *buf; + art_u8 *mask; + int rowstride; + int x0, x1; + int y0; +}; + +/** + * art_rgba_fill_run: fill an RGBA buffer a solid RGB color. + * @buf: Buffer to fill. + * @r: Red, range 0..255. + * @g: Green, range 0..255. + * @b: Blue, range 0..255. + * @n: Number of RGB triples to fill. + * + * Fills a buffer with @n copies of the (@r, @g, @b) triple, solid + * alpha. Thus, locations @buf (inclusive) through @buf + 4 * @n + * (exclusive) are written. + **/ +static void +art_ksvg_rgba_fill_run (art_u8 *buf, art_u8 r, art_u8 g, art_u8 b, int n) +{ + int i; +#if X_BYTE_ORDER == X_BIG_ENDIAN + art_u32 src_rgba; +#else + art_u32 src_abgr; +#endif + +#if X_BYTE_ORDER == X_BIG_ENDIAN + src_rgba = (r << 24) | (g << 16) | (b << 8) | 255; +#else + src_abgr = (255 << 24) | (b << 16) | (g << 8) | r; +#endif + for(i = 0; i < n; i++) + { +#if X_BYTE_ORDER == X_BIG_ENDIAN + ((art_u32 *)buf)[i] = src_rgba; +#else + ((art_u32 *)buf)[i] = src_abgr; +#endif + } +} + +/** + * art_rgba_run_alpha: Render semitransparent color over RGBA buffer. + * @buf: Buffer for rendering. + * @r: Red, range 0..255. + * @g: Green, range 0..255. + * @b: Blue, range 0..255. + * @alpha: Alpha, range 0..255. + * @n: Number of RGB triples to render. + * + * Renders a sequential run of solid (@r, @g, @b) color over @buf with + * opacity @alpha. Note that the range of @alpha is 0..255, in contrast + * to art_rgb_run_alpha, which has a range of 0..256. + **/ +static void +art_ksvg_rgba_run_alpha (art_u8 *buf, art_u8 r, art_u8 g, art_u8 b, int alpha, int n) +{ + int i; + int v; + int tmp; + + if(alpha > 255) + alpha = 255; + + for(i = 0; i < n; i++) + { + v = *buf; + tmp = (r - v) * alpha + 0x80; + *buf++ = v + ((tmp + (tmp >> 8)) >> 8); + + v = *buf; + tmp = (g - v) * alpha + 0x80; + *buf++ = v + ((tmp + (tmp >> 8)) >> 8); + + v = *buf; + tmp = (b - v) * alpha + 0x80; + *buf++ = v + ((tmp + (tmp >> 8)) >> 8); + + v = *buf; + tmp = (255 - alpha) * v + 0x80; + *buf++ = alpha + ((tmp + (tmp >> 8)) >> 8); + } +} + +static void +art_ksvg_rgba_mask_run_alpha (art_u8 *buf, art_u8 *mask, art_u8 r, art_u8 g, art_u8 b, int alpha, int n) +{ + int i; + int v; + int am; + int tmp; + + if(alpha > 255) + alpha = 255; + + for(i = 0; i < n; i++) + { + am = (alpha * *mask++) + 0x80; + am = (am + (am >> 8)) >> 8; + + v = *buf; + tmp = (r - v) * am + 0x80; + *buf++ = v + ((tmp + (tmp >> 8)) >> 8); + + v = *buf; + tmp = (g - v) * am + 0x80; + *buf++ = v + ((tmp + (tmp >> 8)) >> 8); + + v = *buf; + tmp = (b - v) * am + 0x80; + *buf++ = v + ((tmp + (tmp >> 8)) >> 8); + + v = *buf; + tmp = (255 - am) * v + 0x80; + *buf++ = am + ((tmp + (tmp >> 8)) >> 8); + } +} + +static void +art_ksvg_rgba_svp_alpha_callback(void *callback_data, int y, + int start, ArtSVPRenderAAStep *steps, int n_steps) +{ + ArtKSVGRgbaSVPAlphaData *data = (ArtKSVGRgbaSVPAlphaData *)callback_data; + art_u8 *linebuf; + int run_x0, run_x1; + art_u32 running_sum = start; + int x0, x1; + int k; + art_u8 r, g, b; + int *alphatab; + int alpha; + + linebuf = data->buf; + x0 = data->x0; + x1 = data->x1; + + r = data->r; + g = data->g; + b = data->b; + alphatab = data->alphatab; + + if (n_steps > 0) + { + run_x1 = steps[0].x; + if (run_x1 > x0) + { + alpha = (running_sum >> 16) & 0xff; + if (alpha) + art_ksvg_rgba_run_alpha (linebuf, + r, g, b, alphatab[alpha], + run_x1 - x0); + } + + for (k = 0; k < n_steps - 1; k++) + { + running_sum += steps[k].delta; + run_x0 = run_x1; + run_x1 = steps[k + 1].x; + if (run_x1 > run_x0) + { + alpha = (running_sum >> 16) & 0xff; + if (alpha) + art_ksvg_rgba_run_alpha (linebuf + (run_x0 - x0) * 4, + r, g, b, alphatab[alpha], + run_x1 - run_x0); + } + } + running_sum += steps[k].delta; + if (x1 > run_x1) + { + alpha = (running_sum >> 16) & 0xff; + if (alpha) + art_ksvg_rgba_run_alpha (linebuf + (run_x1 - x0) * 4, + r, g, b, alphatab[alpha], + x1 - run_x1); + } + } + else + { + alpha = (running_sum >> 16) & 0xff; + if (alpha) + art_ksvg_rgba_run_alpha (linebuf, + r, g, b, alphatab[alpha], + x1 - x0); + } + + data->buf += data->rowstride; +} + +static void +art_ksvg_rgba_svp_alpha_opaque_callback(void *callback_data, int y, + int start, + ArtSVPRenderAAStep *steps, int n_steps) +{ + ArtKSVGRgbaSVPAlphaData *data = (ArtKSVGRgbaSVPAlphaData *)callback_data; + art_u8 *linebuf; + int run_x0, run_x1; + art_u32 running_sum = start; + int x0, x1; + int k; + art_u8 r, g, b; + art_u32 rgba; + int *alphatab; + int alpha; + + linebuf = data->buf; + x0 = data->x0; + x1 = data->x1; + + r = data->r; + g = data->g; + b = data->b; + rgba = data->rgba; + alphatab = data->alphatab; + + if (n_steps > 0) + { + run_x1 = steps[0].x; + if (run_x1 > x0) + { + alpha = running_sum >> 16; + if (alpha) + { + if (alpha >= 255) + art_ksvg_rgba_fill_run (linebuf, + r, g, b, + run_x1 - x0); + else + art_ksvg_rgba_run_alpha (linebuf, + r, g, b, alphatab[alpha], + run_x1 - x0); + } + } + + for (k = 0; k < n_steps - 1; k++) + { + running_sum += steps[k].delta; + run_x0 = run_x1; + run_x1 = steps[k + 1].x; + if (run_x1 > run_x0) + { + alpha = running_sum >> 16; + if (alpha) + { + if (alpha >= 255) + art_ksvg_rgba_fill_run (linebuf + (run_x0 - x0) * 4, + r, g, b, + run_x1 - run_x0); + else + art_ksvg_rgba_run_alpha (linebuf + (run_x0 - x0) * 4, + r, g, b, alphatab[alpha], + run_x1 - run_x0); + } + } + } + running_sum += steps[k].delta; + if (x1 > run_x1) + { + alpha = running_sum >> 16; + if (alpha) + { + if (alpha >= 255) + art_ksvg_rgba_fill_run (linebuf + (run_x1 - x0) * 4, + r, g, b, + x1 - run_x1); + else + art_ksvg_rgba_run_alpha (linebuf + (run_x1 - x0) * 4, + r, g, b, alphatab[alpha], + x1 - run_x1); + } + } + } + else + { + alpha = running_sum >> 16; + if (alpha) + { + if (alpha >= 255) + art_ksvg_rgba_fill_run (linebuf, + r, g, b, + x1 - x0); + else + art_ksvg_rgba_run_alpha (linebuf, + r, g, b, alphatab[alpha], + x1 - x0); + } + } + data->buf += data->rowstride; +} + +static void +art_ksvg_rgba_svp_alpha_mask_callback(void *callback_data, int y, + int start, ArtSVPRenderAAStep *steps, int n_steps) +{ + ArtKSVGRgbaSVPAlphaData *data = (ArtKSVGRgbaSVPAlphaData *)callback_data; + art_u8 *linebuf; + int run_x0, run_x1; + art_u32 running_sum = start; + int x0, x1; + int k; + art_u8 r, g, b; + int *alphatab; + int alpha; + art_u8 *maskbuf; + + linebuf = data->buf; + x0 = data->x0; + x1 = data->x1; + + r = data->r; + g = data->g; + b = data->b; + alphatab = data->alphatab; + + maskbuf = data->mask + (y - data->y0) * (data->x1 - data->x0); + + if(n_steps > 0) + { + run_x1 = steps[0].x; + if(run_x1 > x0) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + art_ksvg_rgba_mask_run_alpha (linebuf, maskbuf, + r, g, b, alphatab[alpha], + run_x1 - x0); + } + + for(k = 0; k < n_steps - 1; k++) + { + running_sum += steps[k].delta; + run_x0 = run_x1; + run_x1 = steps[k + 1].x; + if(run_x1 > run_x0) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + art_ksvg_rgba_mask_run_alpha (linebuf + (run_x0 - x0) * 4, maskbuf + (run_x0 - x0), + r, g, b, alphatab[alpha], + run_x1 - run_x0); + } + } + running_sum += steps[k].delta; + if(x1 > run_x1) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + art_ksvg_rgba_mask_run_alpha (linebuf + (run_x1 - x0) * 4, maskbuf + (run_x1 - x0) , + r, g, b, alphatab[alpha], + x1 - run_x1); + } + } + else + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + art_ksvg_rgba_mask_run_alpha (linebuf, maskbuf, + r, g, b, alphatab[alpha], + x1 - x0); + } + + data->buf += data->rowstride; +} + +/** + * art_rgb_svp_alpha: Alpha-composite sorted vector path over RGB buffer. + * @svp: The source sorted vector path. + * @x0: Left coordinate of destination rectangle. + * @y0: Top coordinate of destination rectangle. + * @x1: Right coordinate of destination rectangle. + * @y1: Bottom coordinate of destination rectangle. + * @rgba: Color in 0xRRGGBBAA format. + * @buf: Destination RGB buffer. + * @rowstride: Rowstride of @buf buffer. + * @alphagamma: #ArtAlphaGamma for gamma-correcting the compositing. + * + * Renders the shape specified with @svp over the @buf RGB buffer. + * @x1 - @x0 specifies the width, and @y1 - @y0 specifies the height, + * of the rectangle rendered. The new pixels are stored starting at + * the first byte of @buf. Thus, the @x0 and @y0 parameters specify + * an offset within @svp, and may be tweaked as a way of doing + * integer-pixel translations without fiddling with @svp itself. + * + * The @rgba argument specifies the color for the rendering. Pixels of + * entirely 0 winding number are left untouched. Pixels of entirely + * 1 winding number have the color @rgba composited over them (ie, + * are replaced by the red, green, blue components of @rgba if the alpha + * component is 0xff). Pixels of intermediate coverage are interpolated + * according to the rule in @alphagamma, or default to linear if + * @alphagamma is NULL. + **/ +void +art_ksvg_rgba_svp_alpha(const ArtSVP *svp, + int x0, int y0, int x1, int y1, + art_u32 rgba, + art_u8 *buf, int rowstride, + ArtAlphaGamma *alphagamma, + art_u8 *mask) +{ + ArtKSVGRgbaSVPAlphaData data; + int r, g, b; + int i; + int a, da; + int alpha; + + r = (rgba >> 24) & 0xff; + g = (rgba >> 16) & 0xff; + b = (rgba >> 8) & 0xff; + alpha = rgba & 0xff; + + data.r = r; + data.g = g; + data.b = b; + data.alpha = alpha; + data.rgba = rgba; + data.mask = mask; + + a = 0x8000; + da = (alpha * 66051 + 0x80) >> 8; /* 66051 equals 2 ^ 32 / (255 * 255) */ + + for (i = 0; i < 256; i++) + { + data.alphatab[i] = a >> 16; + a += da; + } + + data.buf = buf; + data.rowstride = rowstride; + data.x0 = x0; + data.x1 = x1; + data.y0 = y0; + + if(mask) + art_svp_render_aa (svp, x0, y0, x1, y1, art_ksvg_rgba_svp_alpha_mask_callback, &data); + else + { + if (alpha == 255) + art_svp_render_aa (svp, x0, y0, x1, y1, art_ksvg_rgba_svp_alpha_opaque_callback, &data); + else + art_svp_render_aa (svp, x0, y0, x1, y1, art_ksvg_rgba_svp_alpha_callback, &data); + } +} + +/* RGB renderers */ + +static void +art_ksvg_rgb_mask_run_alpha(art_u8 *buf, art_u8 *mask, art_u8 r, art_u8 g, art_u8 b, int alpha, int n) +{ + int i; + int v; + int am; + int tmp; + + if(alpha > 255) + alpha = 255; + + for(i = 0; i < n; i++) + { + am = (alpha * *mask++) + 0x80; + am = (am + (am >> 8)) >> 8; + + v = *buf; + tmp = (r - v) * am + 0x80; + *buf++ = v + ((tmp + (tmp >> 8)) >> 8); + + v = *buf; + tmp = (g - v) * am + 0x80; + *buf++ = v + ((tmp + (tmp >> 8)) >> 8); + + v = *buf; + tmp = (b - v) * am + 0x80; + *buf++ = v + ((tmp + (tmp >> 8)) >> 8); + } +} + +static void +art_ksvg_rgb_svp_alpha_mask_callback(void *callback_data, int y, + int start, ArtSVPRenderAAStep *steps, int n_steps) +{ + ArtKSVGRgbaSVPAlphaData *data = (ArtKSVGRgbaSVPAlphaData *)callback_data; + art_u8 *linebuf; + int run_x0, run_x1; + art_u32 running_sum = start; + int x0, x1; + int k; + art_u8 r, g, b; + int *alphatab; + int alpha; + art_u8 *maskbuf; + + linebuf = data->buf; + x0 = data->x0; + x1 = data->x1; + + r = data->r; + g = data->g; + b = data->b; + alphatab = data->alphatab; + + maskbuf = data->mask + (y - data->y0) * (data->x1 - data->x0); + + if(n_steps > 0) + { + run_x1 = steps[0].x; + if(run_x1 > x0) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + art_ksvg_rgb_mask_run_alpha (linebuf, maskbuf, + r, g, b, alphatab[alpha], + run_x1 - x0); + } + + for(k = 0; k < n_steps - 1; k++) + { + running_sum += steps[k].delta; + run_x0 = run_x1; + run_x1 = steps[k + 1].x; + if(run_x1 > run_x0) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + art_ksvg_rgb_mask_run_alpha (linebuf + (run_x0 - x0) * 3, maskbuf + (run_x0 - x0), + r, g, b, alphatab[alpha], + run_x1 - run_x0); + } + } + running_sum += steps[k].delta; + if(x1 > run_x1) + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + art_ksvg_rgb_mask_run_alpha (linebuf + (run_x1 - x0) * 3, maskbuf + (run_x1 - x0) , + r, g, b, alphatab[alpha], + x1 - run_x1); + } + } + else + { + alpha = (running_sum >> 16) & 0xff; + if(alpha) + art_ksvg_rgb_mask_run_alpha (linebuf, maskbuf, + r, g, b, alphatab[alpha], + x1 - x0); + } + + data->buf += data->rowstride; +} + +/** + * art_rgb_svp_alpha: Alpha-composite sorted vector path over RGB buffer. + * @svp: The source sorted vector path. + * @x0: Left coordinate of destination rectangle. + * @y0: Top coordinate of destination rectangle. + * @x1: Right coordinate of destination rectangle. + * @y1: Bottom coordinate of destination rectangle. + * @rgba: Color in 0xRRGGBBAA format. + * @buf: Destination RGB buffer. + * @rowstride: Rowstride of @buf buffer. + * @alphagamma: #ArtAlphaGamma for gamma-correcting the compositing. + * + * Renders the shape specified with @svp over the @buf RGB buffer. + * @x1 - @x0 specifies the width, and @y1 - @y0 specifies the height, + * of the rectangle rendered. The new pixels are stored starting at + * the first byte of @buf. Thus, the @x0 and @y0 parameters specify + * an offset within @svp, and may be tweaked as a way of doing + * integer-pixel translations without fiddling with @svp itself. + * + * The @rgba argument specifies the color for the rendering. Pixels of + * entirely 0 winding number are left untouched. Pixels of entirely + * 1 winding number have the color @rgba composited over them (ie, + * are replaced by the red, green, blue components of @rgba if the alpha + * component is 0xff). Pixels of intermediate coverage are interpolated + * according to the rule in @alphagamma, or default to linear if + * @alphagamma is NULL. + **/ +void +art_ksvg_rgb_svp_alpha_mask(const ArtSVP *svp, + int x0, int y0, int x1, int y1, + art_u32 rgba, + art_u8 *buf, int rowstride, + ArtAlphaGamma *alphagamma, + art_u8 *mask) +{ + ArtKSVGRgbaSVPAlphaData data; + int r, g, b, alpha; + int i; + int a, da; + + r = rgba >> 24; + g = (rgba >> 16) & 0xff; + b = (rgba >> 8) & 0xff; + alpha = rgba & 0xff; + + data.r = r; + data.g = g; + data.b = b; + data.alpha = alpha; + data.mask = mask; + + a = 0x8000; + da = (alpha * 66051 + 0x80) >> 8; /* 66051 equals 2 ^ 32 / (255 * 255) */ + + for(i = 0; i < 256; i++) + { + data.alphatab[i] = a >> 16; + a += da; + } + + data.buf = buf; + data.rowstride = rowstride; + data.x0 = x0; + data.x1 = x1; + data.y0 = y0; + + art_svp_render_aa(svp, x0, y0, x1, y1, art_ksvg_rgb_svp_alpha_mask_callback, &data); +} + diff --git a/ksvg/impl/libs/art_support/art_rgba_svp.h b/ksvg/impl/libs/art_support/art_rgba_svp.h new file mode 100644 index 00000000..b59096d4 --- /dev/null +++ b/ksvg/impl/libs/art_support/art_rgba_svp.h @@ -0,0 +1,57 @@ +/* Libart_LGPL - library of basic graphic primitives + * Copyright (C) 1998 Raph Levien + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + */ + +#ifndef __ART_RGBA_SVP_H__ +#define __ART_RGBA_SVP_H__ + +/* Render a sorted vector path into an RGB buffer. */ + +#ifdef LIBART_COMPILATION +#include "art_alphagamma.h" +#include "art_svp.h" +#else +#include <libart_lgpl/art_alphagamma.h> +#include <libart_lgpl/art_svp.h> +#endif + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +void +art_ksvg_rgba_svp_alpha(const ArtSVP *svp, + int x0, int y0, int x1, int y1, + art_u32 rgba, + art_u8 *buf, int rowstride, + ArtAlphaGamma *alphagamma, + art_u8 *mask); + +void +art_ksvg_rgb_svp_alpha_mask(const ArtSVP *svp, + int x0, int y0, int x1, int y1, + art_u32 rgba, + art_u8 *buf, int rowstride, + ArtAlphaGamma *alphagamma, + art_u8 *mask); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* __ART_RGB_SVP_H__ */ |