pixman: Branch 'master' - 38 commits

Søren Sandmann Pedersen sandmann at kemper.freedesktop.org
Wed Jul 8 16:21:48 PDT 2009


 pixman/make-combine.pl           |    5 
 pixman/pixman-access.c           |  436 ++--
 pixman/pixman-arm-neon.c         |  918 +++++-----
 pixman/pixman-arm-simd.c         |  182 -
 pixman/pixman-bits-image.c       |   42 
 pixman/pixman-combine.c.template | 1118 ++++++------
 pixman/pixman-combine.h.template |   31 
 pixman/pixman-conical-gradient.c |    8 
 pixman/pixman-cpu.c              |   23 
 pixman/pixman-edge-imp.h         |   58 
 pixman/pixman-edge.c             |   74 
 pixman/pixman-fast-path.c        |  914 ++++-----
 pixman/pixman-general.c          |   74 
 pixman/pixman-gradient-walker.c  |    2 
 pixman/pixman-image.c            |   11 
 pixman/pixman-linear-gradient.c  |    8 
 pixman/pixman-matrix.c           |   70 
 pixman/pixman-mmx.c              | 1112 ++++++------
 pixman/pixman-private.h          |   32 
 pixman/pixman-radial-gradient.c  |    8 
 pixman/pixman-region.c           |  961 +++++-----
 pixman/pixman-region16.c         |    6 
 pixman/pixman-solid-fill.c       |    4 
 pixman/pixman-sse2.c             | 3572 +++++++++++++++++++--------------------
 pixman/pixman-utils.c            |  296 +--
 pixman/pixman-vmx.c              |  344 +--
 pixman/pixman.c                  |   22 
 pixman/pixman.h                  |   48 
 28 files changed, 5187 insertions(+), 5192 deletions(-)

New commits:
commit 0db0430d1d410855863e669f0de9e8b5d26db7fd
Merge: 31a40a1... b3cf3f0...
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Wed Jul 8 18:59:15 2009 -0400

    Merge branch 'naming'

commit b3cf3f0c2be462cd61e63e07655d1b45e55f4a7b
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Mon Jul 6 20:33:05 2009 -0400

    Fix up some overeager search-and-replace renamings

diff --git a/pixman/pixman-sse2.c b/pixman/pixman-sse2.c
index abb6f46..334990d 100644
--- a/pixman/pixman-sse2.c
+++ b/pixman/pixman-sse2.c
@@ -2698,7 +2698,7 @@ sse2_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
 
-    __m64 mmsrc_x, mmx_alpha, mmmask_x, mmdest_x;
+    __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest;
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
@@ -2710,7 +2710,7 @@ sse2_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
 
     xmm_src = _mm_unpacklo_epi8 (create_mask_2x32_128 (src, src), _mm_setzero_si128 ());
     xmm_alpha = expand_alpha_1x128 (xmm_src);
-    mmsrc_x   = _mm_movepi64_pi64 (xmm_src);
+    mmx_src   = _mm_movepi64_pi64 (xmm_src);
     mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
 
     while (height--)
@@ -2733,13 +2733,13 @@ sse2_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
             if (m)
             {
                 d = *pd;
-                mmmask_x = unpack_32_1x64 (m);
-                mmdest_x = unpack_32_1x64 (d);
+                mmx_mask = unpack_32_1x64 (m);
+                mmx_dest = unpack_32_1x64 (d);
 
-                *pd = pack_1x64_32 (in_over_1x64 (&mmsrc_x,
+                *pd = pack_1x64_32 (in_over_1x64 (&mmx_src,
                                                  &mmx_alpha,
-                                                 &mmmask_x,
-                                                 &mmdest_x));
+                                                 &mmx_mask,
+                                                 &mmx_dest));
             }
 
             pd++;
@@ -2785,13 +2785,13 @@ sse2_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
             if (m)
             {
                 d = *pd;
-                mmmask_x = unpack_32_1x64 (m);
-                mmdest_x = unpack_32_1x64 (d);
+                mmx_mask = unpack_32_1x64 (m);
+                mmx_dest = unpack_32_1x64 (d);
 
-                *pd = pack_1x64_32 (in_over_1x64 (&mmsrc_x,
+                *pd = pack_1x64_32 (in_over_1x64 (&mmx_src,
                                                  &mmx_alpha,
-                                                 &mmmask_x,
-                                                 &mmdest_x));
+                                                 &mmx_mask,
+                                                 &mmx_dest));
             }
 
             pd++;
@@ -3222,7 +3222,7 @@ sse2_composite_over_n_8_8888 (pixman_implementation_t *imp,
     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
 
-    __m64 mmsrc_x, mmx_alpha, mmmask_x, mmx_dest;
+    __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest;
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
@@ -3236,7 +3236,7 @@ sse2_composite_over_n_8_8888 (pixman_implementation_t *imp,
     xmm_def = create_mask_2x32_128 (src, src);
     xmm_src = expand_pixel_32_1x128 (src);
     xmm_alpha = expand_alpha_1x128 (xmm_src);
-    mmsrc_x   = _mm_movepi64_pi64 (xmm_src);
+    mmx_src   = _mm_movepi64_pi64 (xmm_src);
     mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
 
     while (height--)
@@ -3258,12 +3258,12 @@ sse2_composite_over_n_8_8888 (pixman_implementation_t *imp,
             if (m)
             {
                 d = *dst;
-                mmmask_x = expand_pixel_8_1x64 (m);
+                mmx_mask = expand_pixel_8_1x64 (m);
                 mmx_dest = unpack_32_1x64 (d);
 
-                *dst = pack_1x64_32 (in_over_1x64 (&mmsrc_x,
+                *dst = pack_1x64_32 (in_over_1x64 (&mmx_src,
                                                   &mmx_alpha,
-                                                  &mmmask_x,
+                                                  &mmx_mask,
                                                   &mmx_dest));
             }
 
@@ -3316,12 +3316,12 @@ sse2_composite_over_n_8_8888 (pixman_implementation_t *imp,
             if (m)
             {
                 d = *dst;
-                mmmask_x = expand_pixel_8_1x64 (m);
+                mmx_mask = expand_pixel_8_1x64 (m);
                 mmx_dest = unpack_32_1x64 (d);
 
-                *dst = pack_1x64_32 (in_over_1x64 (&mmsrc_x,
+                *dst = pack_1x64_32 (in_over_1x64 (&mmx_src,
                                                   &mmx_alpha,
-                                                  &mmmask_x,
+                                                  &mmx_mask,
                                                   &mmx_dest));
             }
 
@@ -3631,7 +3631,7 @@ sse2_composite_over_n_8_0565 (pixman_implementation_t *imp,
     int	dst_stride, mask_stride;
     uint16_t	w;
     uint32_t m;
-    __m64 mmsrc_x, mmx_alpha, mmmask_x, mmx_dest;
+    __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest;
 
     __m128i xmm_src, xmm_alpha;
     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
@@ -3648,7 +3648,7 @@ sse2_composite_over_n_8_0565 (pixman_implementation_t *imp,
 
     xmm_src = expand_pixel_32_1x128 (src);
     xmm_alpha = expand_alpha_1x128 (xmm_src);
-    mmsrc_x = _mm_movepi64_pi64 (xmm_src);
+    mmx_src = _mm_movepi64_pi64 (xmm_src);
     mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
 
     while (height--)
@@ -3670,12 +3670,12 @@ sse2_composite_over_n_8_0565 (pixman_implementation_t *imp,
             if (m)
             {
                 d = *dst;
-                mmmask_x = expand_alpha_rev_1x64 (unpack_32_1x64 (m));
+                mmx_mask = expand_alpha_rev_1x64 (unpack_32_1x64 (m));
                 mmx_dest = expand565_16_1x64 (d);
 
-                *dst = pack_565_32_16 (pack_1x64_32 (in_over_1x64 (&mmsrc_x,
+                *dst = pack_565_32_16 (pack_1x64_32 (in_over_1x64 (&mmx_src,
                                                                  &mmx_alpha,
-                                                                 &mmmask_x,
+                                                                 &mmx_mask,
                                                                  &mmx_dest)));
             }
 
@@ -3739,12 +3739,12 @@ sse2_composite_over_n_8_0565 (pixman_implementation_t *imp,
             if (m)
             {
                 d = *dst;
-                mmmask_x = expand_alpha_rev_1x64 (unpack_32_1x64 (m));
+                mmx_mask = expand_alpha_rev_1x64 (unpack_32_1x64 (m));
                 mmx_dest = expand565_16_1x64 (d);
 
-                *dst = pack_565_32_16 (pack_1x64_32 (in_over_1x64 (&mmsrc_x,
+                *dst = pack_565_32_16 (pack_1x64_32 (in_over_1x64 (&mmx_src,
                                                                  &mmx_alpha,
-                                                                 &mmmask_x,
+                                                                 &mmx_mask,
                                                                  &mmx_dest)));
             }
 
@@ -4034,7 +4034,7 @@ sse2_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
     __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
 
-    __m64 mmsrc_x, mmx_alpha, mmmask_x, mmx_dest;
+    __m64 mmx_src, mmx_alpha, mmx_mask, mmx_dest;
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
@@ -4046,7 +4046,7 @@ sse2_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
 
     xmm_src = expand_pixel_32_1x128 (src);
     xmm_alpha = expand_alpha_1x128 (xmm_src);
-    mmsrc_x = _mm_movepi64_pi64 (xmm_src);
+    mmx_src = _mm_movepi64_pi64 (xmm_src);
     mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
 
     while (height--)
@@ -4068,12 +4068,12 @@ sse2_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
             if (m)
             {
                 d = *dst;
-                mmmask_x = unpack_32_1x64 (m);
+                mmx_mask = unpack_32_1x64 (m);
                 mmx_dest = expand565_16_1x64 (d);
 
-                *dst = pack_565_32_16 (pack_1x64_32 (in_over_1x64 (&mmsrc_x,
+                *dst = pack_565_32_16 (pack_1x64_32 (in_over_1x64 (&mmx_src,
                                                                  &mmx_alpha,
-                                                                 &mmmask_x,
+                                                                 &mmx_mask,
                                                                  &mmx_dest)));
             }
 
@@ -4134,12 +4134,12 @@ sse2_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
             if (m)
             {
                 d = *dst;
-                mmmask_x = unpack_32_1x64 (m);
+                mmx_mask = unpack_32_1x64 (m);
                 mmx_dest = expand565_16_1x64 (d);
 
-                *dst = pack_565_32_16 (pack_1x64_32 (in_over_1x64 (&mmsrc_x,
+                *dst = pack_565_32_16 (pack_1x64_32 (in_over_1x64 (&mmx_src,
                                                                  &mmx_alpha,
-                                                                 &mmmask_x,
+                                                                 &mmx_mask,
                                                                  &mmx_dest)));
             }
 
commit c2e331693d858c01b69135342c139546780b7021
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Mon Jul 6 12:48:59 2009 -0400

    Rename num_rects back to numRects.
    
    The name numRects is public API and can't be changed.

diff --git a/pixman/pixman-region.c b/pixman/pixman-region.c
index c061d17..8439bae 100644
--- a/pixman/pixman-region.c
+++ b/pixman/pixman-region.c
@@ -51,17 +51,17 @@ SOFTWARE.
 #include <stdio.h>
 #include "pixman-private.h"
 
-#define PIXREGION_NIL(reg) ((reg)->data && !(reg)->data->num_rects)
+#define PIXREGION_NIL(reg) ((reg)->data && !(reg)->data->numRects)
 /* not a region */
 #define PIXREGION_NAR(reg)	((reg)->data == pixman_broken_data)
-#define PIXREGION_NUM_RECTS(reg) ((reg)->data ? (reg)->data->num_rects : 1)
+#define PIXREGION_NUMRECTS(reg) ((reg)->data ? (reg)->data->numRects : 1)
 #define PIXREGION_SIZE(reg) ((reg)->data ? (reg)->data->size : 0)
 #define PIXREGION_RECTS(reg) ((reg)->data ? (box_type_t *)((reg)->data + 1) \
 			               : &(reg)->extents)
 #define PIXREGION_BOXPTR(reg) ((box_type_t *)((reg)->data + 1))
 #define PIXREGION_BOX(reg,i) (&PIXREGION_BOXPTR(reg)[i])
-#define PIXREGION_TOP(reg) PIXREGION_BOX(reg, (reg)->data->num_rects)
-#define PIXREGION_END(reg) PIXREGION_BOX(reg, (reg)->data->num_rects - 1)
+#define PIXREGION_TOP(reg) PIXREGION_BOX(reg, (reg)->data->numRects)
+#define PIXREGION_END(reg) PIXREGION_BOX(reg, (reg)->data->numRects - 1)
 
 #define GOOD(reg) assert(PREFIX(_selfcheck) (reg))
 
@@ -171,11 +171,11 @@ alloc_data(size_t n)
 #define FREE_DATA(reg) if ((reg)->data && (reg)->data->size) free((reg)->data)
 
 #define RECTALLOC_BAIL(region,n,bail) \
-if (!(region)->data || (((region)->data->num_rects + (n)) > (region)->data->size)) \
+if (!(region)->data || (((region)->data->numRects + (n)) > (region)->data->size)) \
     if (!pixman_rect_alloc(region, n)) { goto bail; }
 
 #define RECTALLOC(region,n) \
-if (!(region)->data || (((region)->data->num_rects + (n)) > (region)->data->size)) \
+if (!(region)->data || (((region)->data->numRects + (n)) > (region)->data->size)) \
     if (!pixman_rect_alloc(region, n)) { return FALSE; }
 
 #define ADDRECT(next_rect,nx1,ny1,nx2,ny2)	\
@@ -189,29 +189,29 @@ if (!(region)->data || (((region)->data->num_rects + (n)) > (region)->data->size
 
 #define NEWRECT(region,next_rect,nx1,ny1,nx2,ny2)			\
 {									\
-    if (!(region)->data || ((region)->data->num_rects == (region)->data->size))\
+    if (!(region)->data || ((region)->data->numRects == (region)->data->size))\
     {									\
 	if (!pixman_rect_alloc(region, 1))					\
 	    return FALSE;						\
 	next_rect = PIXREGION_TOP(region);					\
     }									\
     ADDRECT(next_rect,nx1,ny1,nx2,ny2);					\
-    region->data->num_rects++;						\
-    assert(region->data->num_rects<=region->data->size);			\
+    region->data->numRects++;						\
+    assert(region->data->numRects<=region->data->size);			\
 }
 
-#define DOWNSIZE(reg,num_rects)						\
-    if (((num_rects) < ((reg)->data->size >> 1)) && ((reg)->data->size > 50)) \
+#define DOWNSIZE(reg,numRects)						\
+    if (((numRects) < ((reg)->data->size >> 1)) && ((reg)->data->size > 50)) \
     {									\
 	region_data_type_t * new_data;				\
-	size_t data_size = PIXREGION_SZOF(num_rects);			\
+	size_t data_size = PIXREGION_SZOF(numRects);			\
 	if (!data_size)							\
 	    new_data = NULL;						\
 	else								\
 	    new_data = (region_data_type_t *)realloc((reg)->data, data_size); \
 	if (new_data)							\
 	{								\
-	    new_data->size = (num_rects);					\
+	    new_data->size = (numRects);					\
 	    (reg)->data = new_data;					\
 	}								\
     }
@@ -229,11 +229,11 @@ PREFIX(_equal) (reg1, reg2)
     if (reg1->extents.x2 != reg2->extents.x2) return FALSE;
     if (reg1->extents.y1 != reg2->extents.y1) return FALSE;
     if (reg1->extents.y2 != reg2->extents.y2) return FALSE;
-    if (PIXREGION_NUM_RECTS(reg1) != PIXREGION_NUM_RECTS(reg2)) return FALSE;
+    if (PIXREGION_NUMRECTS(reg1) != PIXREGION_NUMRECTS(reg2)) return FALSE;
 
     rects1 = PIXREGION_RECTS(reg1);
     rects2 = PIXREGION_RECTS(reg2);
-    for (i = 0; i != PIXREGION_NUM_RECTS(reg1); i++) {
+    for (i = 0; i != PIXREGION_NUMRECTS(reg1); i++) {
 	if (rects1[i].x1 != rects2[i].x1) return FALSE;
 	if (rects1[i].x2 != rects2[i].x2) return FALSE;
 	if (rects1[i].y1 != rects2[i].y1) return FALSE;
@@ -250,7 +250,7 @@ PREFIX(_print) (rgn)
     int i;
     box_type_t * rects;
 
-    num = PIXREGION_NUM_RECTS(rgn);
+    num = PIXREGION_NUMRECTS(rgn);
     size = PIXREGION_SIZE(rgn);
     rects = PIXREGION_RECTS(rgn);
     fprintf(stderr, "num: %d size: %d\n", num, size);
@@ -299,7 +299,7 @@ PREFIX(_fini) (region_type_t *region)
 PIXMAN_EXPORT int
 PREFIX(_n_rects) (region_type_t *region)
 {
-    return PIXREGION_NUM_RECTS (region);
+    return PIXREGION_NUMRECTS (region);
 }
 
 PIXMAN_EXPORT box_type_t *
@@ -307,7 +307,7 @@ PREFIX(_rectangles) (region_type_t *region,
 				  int		    *n_rects)
 {
     if (n_rects)
-	*n_rects = PIXREGION_NUM_RECTS (region);
+	*n_rects = PIXREGION_NUMRECTS (region);
 
     return PIXREGION_RECTS (region);
 }
@@ -332,7 +332,7 @@ pixman_rect_alloc (region_type_t * region, int n)
 	region->data = alloc_data(n);
 	if (!region->data)
 	    return pixman_break (region);
-	region->data->num_rects = 1;
+	region->data->numRects = 1;
 	*PIXREGION_BOXPTR(region) = region->extents;
     }
     else if (!region->data->size)
@@ -340,18 +340,18 @@ pixman_rect_alloc (region_type_t * region, int n)
 	region->data = alloc_data(n);
 	if (!region->data)
 	    return pixman_break (region);
-	region->data->num_rects = 0;
+	region->data->numRects = 0;
     }
     else
     {
 	size_t data_size;
 	if (n == 1)
 	{
-	    n = region->data->num_rects;
+	    n = region->data->numRects;
 	    if (n > 500) /* XXX pick numbers out of a hat */
 		n = 250;
 	}
-	n += region->data->num_rects;
+	n += region->data->numRects;
 	data_size = PIXREGION_SZOF(n);
 	if (!data_size)
 	    data = NULL;
@@ -379,17 +379,17 @@ PREFIX(_copy) (region_type_t *dst, region_type_t *src)
 	dst->data = src->data;
 	return TRUE;
     }
-    if (!dst->data || (dst->data->size < src->data->num_rects))
+    if (!dst->data || (dst->data->size < src->data->numRects))
     {
 	FREE_DATA(dst);
-	dst->data = alloc_data(src->data->num_rects);
+	dst->data = alloc_data(src->data->numRects);
 	if (!dst->data)
 	    return pixman_break (dst);
-	dst->data->size = src->data->num_rects;
+	dst->data->size = src->data->numRects;
     }
-    dst->data->num_rects = src->data->num_rects;
+    dst->data->numRects = src->data->numRects;
     memmove((char *)PIXREGION_BOXPTR(dst),(char *)PIXREGION_BOXPTR(src),
-	  dst->data->num_rects * sizeof(box_type_t));
+	  dst->data->numRects * sizeof(box_type_t));
     return TRUE;
 }
 
@@ -411,7 +411,7 @@ PREFIX(_copy) (region_type_t *dst, region_type_t *src)
  *	If coalescing takes place:
  *	    - rectangles in the previous band will have their y2 fields
  *	      altered.
- *	    - region->data->num_rects will be decreased.
+ *	    - region->data->numRects will be decreased.
  *
  *-----------------------------------------------------------------------
  */
@@ -423,15 +423,15 @@ pixman_coalesce (
 {
     box_type_t *	prev_box;   	/* Current box in previous band	     */
     box_type_t *	cur_box;    	/* Current box in current band       */
-    int  	num_rects;	/* Number rectangles in both bands   */
+    int  	numRects;	/* Number rectangles in both bands   */
     int	y2;		/* Bottom of current band	     */
     /*
      * Figure out how many rectangles are in the band.
      */
-    num_rects = cur_start - prev_start;
-    assert(num_rects == region->data->num_rects - cur_start);
+    numRects = cur_start - prev_start;
+    assert(numRects == region->data->numRects - cur_start);
 
-    if (!num_rects) return cur_start;
+    if (!numRects) return cur_start;
 
     /*
      * The bands may only be coalesced if the bottom of the previous
@@ -455,27 +455,27 @@ pixman_coalesce (
 	}
 	prev_box++;
 	cur_box++;
-	num_rects--;
-    } while (num_rects);
+	numRects--;
+    } while (numRects);
 
     /*
      * The bands may be merged, so set the bottom y of each box
      * in the previous band to the bottom y of the current band.
      */
-    num_rects = cur_start - prev_start;
-    region->data->num_rects -= num_rects;
+    numRects = cur_start - prev_start;
+    region->data->numRects -= numRects;
     do {
 	prev_box--;
 	prev_box->y2 = y2;
-	num_rects--;
-    } while (num_rects);
+	numRects--;
+    } while (numRects);
     return prev_start;
 }
 
 /* Quicky macro to avoid trivial reject procedure calls to pixman_coalesce */
 
 #define COALESCE(new_reg, prev_band, cur_band)				\
-    if (cur_band - prev_band == new_reg->data->num_rects - cur_band) {	\
+    if (cur_band - prev_band == new_reg->data->numRects - cur_band) {	\
 	prev_band = pixman_coalesce(new_reg, prev_band, cur_band);		\
     } else {								\
 	prev_band = cur_band;						\
@@ -492,7 +492,7 @@ pixman_coalesce (
  *	None.
  *
  * Side Effects:
- *	region->data->num_rects is incremented and the rectangles overwritten
+ *	region->data->numRects is incremented and the rectangles overwritten
  *	with the rectangles we're passed.
  *
  *-----------------------------------------------------------------------
@@ -517,7 +517,7 @@ pixman_region_append_non_o (
     /* Make sure we have enough space for all rectangles to be added */
     RECTALLOC(region, new_rects);
     next_rect = PIXREGION_TOP(region);
-    region->data->num_rects += new_rects;
+    region->data->numRects += new_rects;
     do {
 	assert(r->x1 < r->x2);
 	ADDRECT(next_rect, r->x1, y1, r->x2, y2);
@@ -543,7 +543,7 @@ pixman_region_append_non_o (
 	RECTALLOC_BAIL(new_reg, new_rects, bail);					\
 	memmove((char *)PIXREGION_TOP(new_reg),(char *)r, 			\
               new_rects * sizeof(box_type_t));				\
-	new_reg->data->num_rects += new_rects;				\
+	new_reg->data->numRects += new_rects;				\
     }									\
 }
 
@@ -617,7 +617,7 @@ pixman_op(
     int    r1y1;		    /* Temps for r1->y1 and r2->y1   */
     int    r2y1;
     int		    new_size;
-    int		    num_rects;
+    int		    numRects;
 
     /*
      * Break any region computed from a broken region
@@ -634,29 +634,29 @@ pixman_op(
      */
 
     r1 = PIXREGION_RECTS(reg1);
-    new_size = PIXREGION_NUM_RECTS(reg1);
+    new_size = PIXREGION_NUMRECTS(reg1);
     r1_end = r1 + new_size;
-    num_rects = PIXREGION_NUM_RECTS(reg2);
+    numRects = PIXREGION_NUMRECTS(reg2);
     r2 = PIXREGION_RECTS(reg2);
-    r2_end = r2 + num_rects;
+    r2_end = r2 + numRects;
     assert(r1 != r1_end);
     assert(r2 != r2_end);
 
     old_data = (region_data_type_t *)NULL;
     if (((new_reg == reg1) && (new_size > 1)) ||
-	((new_reg == reg2) && (num_rects > 1)))
+	((new_reg == reg2) && (numRects > 1)))
     {
 	old_data = new_reg->data;
 	new_reg->data = pixman_region_empty_data;
     }
     /* guess at new size */
-    if (num_rects > new_size)
-	new_size = num_rects;
+    if (numRects > new_size)
+	new_size = numRects;
     new_size <<= 1;
     if (!new_reg->data)
 	new_reg->data = pixman_region_empty_data;
     else if (new_reg->data->size)
-	new_reg->data->num_rects = 0;
+	new_reg->data->numRects = 0;
     if (new_size > new_reg->data->size) {
 	if (!pixman_rect_alloc(new_reg, new_size)) {
 	    if (old_data)
@@ -719,7 +719,7 @@ pixman_op(
 		top = MAX(r1y1, ybot);
 		bot = MIN(r1->y2, r2y1);
 		if (top != bot)	{
-		    cur_band = new_reg->data->num_rects;
+		    cur_band = new_reg->data->numRects;
 		    if (!pixman_region_append_non_o(new_reg, r1, r1_band_end, top, bot))
 			goto bail;
 		    COALESCE(new_reg, prev_band, cur_band);
@@ -731,7 +731,7 @@ pixman_op(
 		top = MAX(r2y1, ybot);
 		bot = MIN(r2->y2, r1y1);
 		if (top != bot) {
-		    cur_band = new_reg->data->num_rects;
+		    cur_band = new_reg->data->numRects;
 		    if (!pixman_region_append_non_o(new_reg, r2, r2_band_end, top, bot))
 			goto bail;
 		    COALESCE(new_reg, prev_band, cur_band);
@@ -748,7 +748,7 @@ pixman_op(
 	 */
 	ybot = MIN(r1->y2, r2->y2);
 	if (ybot > ytop) {
-	    cur_band = new_reg->data->num_rects;
+	    cur_band = new_reg->data->numRects;
 	    if (!(* overlap_func)(new_reg,
 				 r1, r1_band_end,
 				 r2, r2_band_end,
@@ -778,7 +778,7 @@ pixman_op(
     if ((r1 != r1_end) && append_non1) {
 	/* Do first non_overlap1Func call, which may be able to coalesce */
 	FIND_BAND(r1, r1_band_end, r1_end, r1y1);
-	cur_band = new_reg->data->num_rects;
+	cur_band = new_reg->data->numRects;
 	if (!pixman_region_append_non_o(new_reg,
 				      r1, r1_band_end,
 				      MAX(r1y1, ybot), r1->y2))
@@ -790,7 +790,7 @@ pixman_op(
     } else if ((r2 != r2_end) && append_non2) {
 	/* Do first non_overlap2Func call, which may be able to coalesce */
 	FIND_BAND(r2, r2_band_end, r2_end, r2y1);
-	cur_band = new_reg->data->num_rects;
+	cur_band = new_reg->data->numRects;
 	if (!pixman_region_append_non_o(new_reg,
 				      r2, r2_band_end,
 				      MAX(r2y1, ybot), r2->y2))
@@ -803,12 +803,12 @@ pixman_op(
     if (old_data)
 	free(old_data);
 
-    if (!(num_rects = new_reg->data->num_rects))
+    if (!(numRects = new_reg->data->numRects))
     {
 	FREE_DATA(new_reg);
 	new_reg->data = pixman_region_empty_data;
     }
-    else if (num_rects == 1)
+    else if (numRects == 1)
     {
 	new_reg->extents = *PIXREGION_BOXPTR(new_reg);
 	FREE_DATA(new_reg);
@@ -816,7 +816,7 @@ pixman_op(
     }
     else
     {
-	DOWNSIZE(new_reg, num_rects);
+	DOWNSIZE(new_reg, numRects);
     }
 
     return TRUE;
@@ -1215,18 +1215,18 @@ PREFIX(_union) (region_type_t *new_reg,
 static void
 quick_sort_rects(
     box_type_t     rects[],
-    int        num_rects)
+    int        numRects)
 {
     int	y1;
     int	x1;
     int        i, j;
     box_type_t *r;
 
-    /* Always called with num_rects > 1 */
+    /* Always called with numRects > 1 */
 
     do
     {
-	if (num_rects == 2)
+	if (numRects == 2)
 	{
 	    if (rects[0].y1 > rects[1].y1 ||
 		    (rects[0].y1 == rects[1].y1 && rects[0].x1 > rects[1].x1))
@@ -1235,13 +1235,13 @@ quick_sort_rects(
 	}
 
 	/* Choose partition element, stick in location 0 */
-        EXCHANGE_RECTS(0, num_rects >> 1);
+        EXCHANGE_RECTS(0, numRects >> 1);
 	y1 = rects[0].y1;
 	x1 = rects[0].x1;
 
         /* Partition array */
         i = 0;
-        j = num_rects;
+        j = numRects;
         do
 	{
 	    r = &(rects[i]);
@@ -1249,7 +1249,7 @@ quick_sort_rects(
 	    {
 		r++;
 		i++;
-            } while (i != num_rects &&
+            } while (i != numRects &&
 		     (r->y1 < y1 || (r->y1 == y1 && r->x1 < x1)));
 	    r = &(rects[j]);
 	    do
@@ -1265,10 +1265,10 @@ quick_sort_rects(
         EXCHANGE_RECTS(0, j);
 
 	/* Recurse */
-        if (num_rects-j-1 > 1)
-	    quick_sort_rects(&rects[j+1], num_rects-j-1);
-        num_rects = j;
-    } while (num_rects > 1);
+        if (numRects-j-1 > 1)
+	    quick_sort_rects(&rects[j+1], numRects-j-1);
+        numRects = j;
+    } while (numRects > 1);
 }
 
 /*-
@@ -1317,7 +1317,7 @@ validate (region_type_t * badreg,
 
     region_info_t stack_regions[64];
 
-	     int	num_rects;   /* Original num_rects for badreg	    */
+	     int	numRects;   /* Original numRects for badreg	    */
 	     region_info_t *ri;	    /* Array of current regions		    */
     	     int	num_ri;      /* Number of entries used in ri	    */
 	     int	size_ri;	    /* Number of entries available in ri    */
@@ -1336,8 +1336,8 @@ validate (region_type_t * badreg,
 	GOOD(badreg);
 	return TRUE;
     }
-    num_rects = badreg->data->num_rects;
-    if (!num_rects)
+    numRects = badreg->data->numRects;
+    if (!numRects)
     {
 	if (PIXREGION_NAR(badreg))
 	    return FALSE;
@@ -1346,21 +1346,21 @@ validate (region_type_t * badreg,
     }
     if (badreg->extents.x1 < badreg->extents.x2)
     {
-	if ((num_rects) == 1)
+	if ((numRects) == 1)
 	{
 	    FREE_DATA(badreg);
 	    badreg->data = (region_data_type_t *) NULL;
 	}
 	else
 	{
-	    DOWNSIZE(badreg, num_rects);
+	    DOWNSIZE(badreg, numRects);
 	}
 	GOOD(badreg);
 	return TRUE;
     }
 
     /* Step 1: Sort the rects array into ascending (y1, x1) order */
-    quick_sort_rects(PIXREGION_BOXPTR(badreg), num_rects);
+    quick_sort_rects(PIXREGION_BOXPTR(badreg), numRects);
 
     /* Step 2: Scatter the sorted array into the minimum number of regions */
 
@@ -1374,7 +1374,7 @@ validate (region_type_t * badreg,
     ri[0].reg = *badreg;
     box = PIXREGION_BOXPTR(&ri[0].reg);
     ri[0].reg.extents = *box;
-    ri[0].reg.data->num_rects = 1;
+    ri[0].reg.data->numRects = 1;
     badreg->extents = *pixman_region_empty_box;
     badreg->data = pixman_region_empty_data;
 
@@ -1384,7 +1384,7 @@ validate (region_type_t * badreg,
        forget it.  Try the next region.  If it doesn't fit cleanly into any
        region, make a new one. */
 
-    for (i = num_rects; --i > 0;)
+    for (i = numRects; --i > 0;)
     {
 	box++;
 	/* Look for a region to append box to */
@@ -1406,7 +1406,7 @@ validate (region_type_t * badreg,
 		{
 		    RECTALLOC_BAIL(reg, 1, bail);
 		    *PIXREGION_TOP(reg) = *box;
-		    reg->data->num_rects++;
+		    reg->data->numRects++;
 		}
 		goto next_rect;   /* So sue me */
 	    }
@@ -1416,10 +1416,10 @@ validate (region_type_t * badreg,
 		if (reg->extents.x2 < ri_box->x2) reg->extents.x2 = ri_box->x2;
 		if (reg->extents.x1 > box->x1)   reg->extents.x1 = box->x1;
 		COALESCE(reg, rit->prev_band, rit->cur_band);
-		rit->cur_band = reg->data->num_rects;
+		rit->cur_band = reg->data->numRects;
 		RECTALLOC_BAIL(reg, 1, bail);
 		*PIXREGION_TOP(reg) = *box;
-		reg->data->num_rects++;
+		reg->data->numRects++;
 		goto next_rect;
 	    }
 	    /* Well, this region was inappropriate.  Try the next one. */
@@ -1469,7 +1469,7 @@ next_rect: ;
 	reg->extents.y2 = ri_box->y2;
 	if (reg->extents.x2 < ri_box->x2) reg->extents.x2 = ri_box->x2;
 	COALESCE(reg, rit->prev_band, rit->cur_band);
-	if (reg->data->num_rects == 1) /* keep unions happy below */
+	if (reg->data->numRects == 1) /* keep unions happy below */
 	{
 	    FREE_DATA(reg);
 	    reg->data = (region_data_type_t *)NULL;
@@ -1788,15 +1788,15 @@ PIXMAN_EXPORT PREFIX(_contains_rectangle) (region_type_t *  region,
     box_type_t *     pbox;
     box_type_t *     pbox_end;
     int			part_in, part_out;
-    int			num_rects;
+    int			numRects;
 
     GOOD(region);
-    num_rects = PIXREGION_NUM_RECTS(region);
+    numRects = PIXREGION_NUMRECTS(region);
     /* useful optimization */
-    if (!num_rects || !EXTENTCHECK(&region->extents, prect))
+    if (!numRects || !EXTENTCHECK(&region->extents, prect))
         return(PIXMAN_REGION_OUT);
 
-    if (num_rects == 1)
+    if (numRects == 1)
     {
 	/* We know that it must be PIXMAN_REGION_IN or PIXMAN_REGION_PART */
 	if (SUBSUMES(&region->extents, prect))
@@ -1813,7 +1813,7 @@ PIXMAN_EXPORT PREFIX(_contains_rectangle) (region_type_t *  region,
     y = prect->y1;
 
     /* can stop when both part_out and part_in are TRUE, or we reach prect->y2 */
-    for (pbox = PIXREGION_BOXPTR(region), pbox_end = pbox + num_rects;
+    for (pbox = PIXREGION_BOXPTR(region), pbox_end = pbox + numRects;
          pbox != pbox_end;
          pbox++)
     {
@@ -1898,7 +1898,7 @@ PREFIX(_translate) (region_type_t * region, int x, int y)
     region->extents.y2 = y2 = region->extents.y2 + y;
     if (((x1 - SHRT_MIN)|(y1 - SHRT_MIN)|(SHRT_MAX - x2)|(SHRT_MAX - y2)) >= 0)
     {
-	if (region->data && (nbox = region->data->num_rects))
+	if (region->data && (nbox = region->data->numRects))
 	{
 	    for (pbox = PIXREGION_BOXPTR(region); nbox--; pbox++)
 	    {
@@ -1926,7 +1926,7 @@ PREFIX(_translate) (region_type_t * region, int x, int y)
 	region->extents.y1 = SHRT_MIN;
     else if (y2 > SHRT_MAX)
 	region->extents.y2 = SHRT_MAX;
-    if (region->data && (nbox = region->data->num_rects))
+    if (region->data && (nbox = region->data->numRects))
     {
 	box_type_t * pbox_out;
 
@@ -1939,7 +1939,7 @@ PREFIX(_translate) (region_type_t * region, int x, int y)
 	    if (((x2 - SHRT_MIN)|(y2 - SHRT_MIN)|
 		 (SHRT_MAX - x1)|(SHRT_MAX - y1)) <= 0)
 	    {
-		region->data->num_rects--;
+		region->data->numRects--;
 		continue;
 	    }
 	    if (x1 < SHRT_MIN)
@@ -1954,7 +1954,7 @@ PREFIX(_translate) (region_type_t * region, int x, int y)
 	}
 	if (pbox_out != pbox)
 	{
-	    if (region->data->num_rects == 1)
+	    if (region->data->numRects == 1)
 	    {
 		region->extents = *PIXREGION_BOXPTR(region);
 		FREE_DATA(region);
@@ -1984,20 +1984,20 @@ PREFIX(_contains_point) (region_type_t * region,
 			     box_type_t * box)
 {
     box_type_t *pbox, *pbox_end;
-    int num_rects;
+    int numRects;
 
     GOOD(region);
-    num_rects = PIXREGION_NUM_RECTS(region);
-    if (!num_rects || !INBOX(&region->extents, x, y))
+    numRects = PIXREGION_NUMRECTS(region);
+    if (!numRects || !INBOX(&region->extents, x, y))
         return(FALSE);
-    if (num_rects == 1)
+    if (numRects == 1)
     {
         if (box)
 	    *box = region->extents;
 
 	return(TRUE);
     }
-    for (pbox = PIXREGION_BOXPTR(region), pbox_end = pbox + num_rects;
+    for (pbox = PIXREGION_BOXPTR(region), pbox_end = pbox + numRects;
 	 pbox != pbox_end;
 	 pbox++)
     {
@@ -2041,17 +2041,17 @@ PIXMAN_EXPORT pixman_bool_t
 PREFIX(_selfcheck) (reg)
     region_type_t * reg;
 {
-    int i, num_rects;
+    int i, numRects;
 
     if ((reg->extents.x1 > reg->extents.x2) ||
 	(reg->extents.y1 > reg->extents.y2))
 	return FALSE;
-    num_rects = PIXREGION_NUM_RECTS(reg);
-    if (!num_rects)
+    numRects = PIXREGION_NUMRECTS(reg);
+    if (!numRects)
 	return ((reg->extents.x1 == reg->extents.x2) &&
 		(reg->extents.y1 == reg->extents.y2) &&
 		(reg->data->size || (reg->data == pixman_region_empty_data)));
-    else if (num_rects == 1)
+    else if (numRects == 1)
 	return (!reg->data);
     else
     {
@@ -2060,9 +2060,9 @@ PREFIX(_selfcheck) (reg)
 
 	pbox_p = PIXREGION_RECTS(reg);
 	box = *pbox_p;
-	box.y2 = pbox_p[num_rects-1].y2;
+	box.y2 = pbox_p[numRects-1].y2;
 	pbox_n = pbox_p + 1;
-	for (i = num_rects; --i > 0; pbox_p++, pbox_n++)
+	for (i = numRects; --i > 0; pbox_p++, pbox_n++)
 	{
 	    if ((pbox_n->x1 >= pbox_n->x2) ||
 		(pbox_n->y1 >= pbox_n->y2))
@@ -2141,10 +2141,10 @@ PREFIX(_init_rects) (region_type_t *region,
      */
     if (region->data->numRects == 0)
     {
-	freeData (region);
+	FREE_DATA (region);
 	region->data = NULL;
 
-	good (region);
+	GOOD (region);
 	
 	return TRUE;
     }
@@ -2153,10 +2153,10 @@ PREFIX(_init_rects) (region_type_t *region,
     {
 	region->extents = rects[0];
 
-	freeData (region);
+	FREE_DATA (region);
 	region->data = NULL;
 
-	good (region);
+	GOOD (region);
 	
 	return TRUE;
     }
diff --git a/pixman/pixman.h b/pixman/pixman.h
index 92ed696..df69511 100644
--- a/pixman/pixman.h
+++ b/pixman/pixman.h
@@ -398,7 +398,7 @@ typedef struct pixman_region16		pixman_region16_t;
 
 struct pixman_region16_data {
     long		size;
-    long		num_rects;
+    long		numRects;
 /*  pixman_box16_t	rects[size];   in memory but not explicitly declared */
 };
 
@@ -500,7 +500,7 @@ typedef struct pixman_region32		pixman_region32_t;
 
 struct pixman_region32_data {
     long		size;
-    long		num_rects;
+    long		numRects;
 /*  pixman_box32_t	rects[size];   in memory but not explicitly declared */
 };
 
commit 8261b4d57cfdf77d7fdd4e4c0fc805ba48f7e0a0
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Sun Jul 5 02:12:21 2009 -0400

    Rename combine_*_c to combine_*_ca
    
    s/combine_(.+)_c([^a-z0-9A-Z])/combine_$1_ca$2/g;

diff --git a/pixman/pixman-combine.c.template b/pixman/pixman-combine.c.template
index b794000..44a09ad 100644
--- a/pixman/pixman-combine.c.template
+++ b/pixman/pixman-combine.c.template
@@ -12,7 +12,7 @@
 /*** per channel helper functions ***/
 
 static void
-combine_mask_c (comp4_t *src, comp4_t *mask)
+combine_mask_ca (comp4_t *src, comp4_t *mask)
 {
     comp4_t a = *mask;
 
@@ -43,7 +43,7 @@ combine_mask_c (comp4_t *src, comp4_t *mask)
 }
 
 static void
-combine_mask_value_c (comp4_t *src, const comp4_t *mask)
+combine_mask_value_ca (comp4_t *src, const comp4_t *mask)
 {
     comp4_t a = *mask;
     comp4_t	x;
@@ -63,7 +63,7 @@ combine_mask_value_c (comp4_t *src, const comp4_t *mask)
 }
 
 static void
-combine_mask_alpha_c (const comp4_t *src, comp4_t *mask)
+combine_mask_alpha_ca (const comp4_t *src, comp4_t *mask)
 {
     comp4_t a = *(mask);
     comp4_t	x;
@@ -380,7 +380,7 @@ combine_multiply_u (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combine_multiply_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_multiply_ca (pixman_implementation_t *imp, pixman_op_t op,
                     comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -391,7 +391,7 @@ combine_multiply_c (pixman_implementation_t *imp, pixman_op_t op,
 	comp4_t r = d;
 	comp4_t dest_ia = ALPHA_c (~d);
 
-	combine_mask_value_c (&s, &m);
+	combine_mask_value_ca (&s, &m);
 
 	UNcx4_MUL_UNcx4_ADD_UNcx4_MUL_UNc (r, ~m, s, dest_ia);
 	UNcx4_MUL_UNcx4 (d, s);
@@ -428,7 +428,7 @@ combine_ ## name ## _u (pixman_implementation_t *imp, pixman_op_t op, \
 }						    \
 						    \
 static void				    \
-combine_ ## name ## _c (pixman_implementation_t *imp, pixman_op_t op, \
+combine_ ## name ## _ca (pixman_implementation_t *imp, pixman_op_t op, \
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width) \
 {						    \
     int i;					    \
@@ -440,7 +440,7 @@ combine_ ## name ## _c (pixman_implementation_t *imp, pixman_op_t op, \
 	comp1_t ida = ~da;			    \
 	comp4_t result;				    \
 						    \
-	combine_mask_value_c (&s, &m);		    \
+	combine_mask_value_ca (&s, &m);		    \
 						    \
 	result = d;				    \
 	UNcx4_MUL_UNcx4_ADD_UNcx4_MUL_UNc (result, ~m, s, ida);	    \
@@ -1305,14 +1305,14 @@ combine_conjoint_xor_u (pixman_implementation_t *imp, pixman_op_t op,
 /********************************************************************************/
 
 static void
-combine_clear_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_clear_ca (pixman_implementation_t *imp, pixman_op_t op,
 		 comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     memset(dest, 0, width*sizeof(comp4_t));
 }
 
 static void
-combine_src_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_src_ca (pixman_implementation_t *imp, pixman_op_t op,
 	       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1321,14 +1321,14 @@ combine_src_c (pixman_implementation_t *imp, pixman_op_t op,
 	comp4_t s = *(src + i);
 	comp4_t m = *(mask + i);
 
-	combine_mask_value_c (&s, &m);
+	combine_mask_value_ca (&s, &m);
 
 	*(dest) = s;
     }
 }
 
 static void
-combine_over_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_over_ca (pixman_implementation_t *imp, pixman_op_t op,
 		comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1338,7 +1338,7 @@ combine_over_c (pixman_implementation_t *imp, pixman_op_t op,
 	comp4_t m = *(mask + i);
 	comp4_t a;
 
-	combine_mask_c (&s, &m);
+	combine_mask_ca (&s, &m);
 
 	a = ~m;
         if (a != ~0)
@@ -1355,7 +1355,7 @@ combine_over_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combine_over_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_over_reverse_ca (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1369,7 +1369,7 @@ combine_over_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
             comp4_t s = *(src + i);
 	    comp4_t m = *(mask + i);
 
-	    combine_mask_value_c (&s, &m);
+	    combine_mask_value_ca (&s, &m);
 
             if (a != MASK)
             {
@@ -1381,7 +1381,7 @@ combine_over_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combine_in_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_in_ca (pixman_implementation_t *imp, pixman_op_t op,
 	      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1395,7 +1395,7 @@ combine_in_c (pixman_implementation_t *imp, pixman_op_t op,
 	    comp4_t m = *(mask + i);
 
 	    s = *(src + i);
-	    combine_mask_value_c (&s, &m);
+	    combine_mask_value_ca (&s, &m);
             if (a != MASK)
             {
                 UNcx4_MUL_UNc(s, a);
@@ -1406,7 +1406,7 @@ combine_in_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combine_in_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_in_reverse_ca (pixman_implementation_t *imp, pixman_op_t op,
 		     comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1416,7 +1416,7 @@ combine_in_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
         comp4_t m = *(mask + i);
         comp4_t a;
 
-	combine_mask_alpha_c (&s, &m);
+	combine_mask_alpha_ca (&s, &m);
 
 	a = m;
         if (a != ~0)
@@ -1433,7 +1433,7 @@ combine_in_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combine_out_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_out_ca (pixman_implementation_t *imp, pixman_op_t op,
 	       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1447,7 +1447,7 @@ combine_out_c (pixman_implementation_t *imp, pixman_op_t op,
 	    comp4_t m = *(mask + i);
 
 	    s = *(src + i);
-	    combine_mask_value_c (&s, &m);
+	    combine_mask_value_ca (&s, &m);
 
             if (a != MASK)
             {
@@ -1459,7 +1459,7 @@ combine_out_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combine_out_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_out_reverse_ca (pixman_implementation_t *imp, pixman_op_t op,
 		      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1469,7 +1469,7 @@ combine_out_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 	comp4_t m = *(mask + i);
 	comp4_t a;
 
-	combine_mask_alpha_c (&s, &m);
+	combine_mask_alpha_ca (&s, &m);
 
         a = ~m;
         if (a != ~0)
@@ -1486,7 +1486,7 @@ combine_out_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combine_atop_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_atop_ca (pixman_implementation_t *imp, pixman_op_t op,
 		comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1498,7 +1498,7 @@ combine_atop_c (pixman_implementation_t *imp, pixman_op_t op,
         comp4_t ad;
         comp2_t as = d >> A_SHIFT;
 
-	combine_mask_c (&s, &m);
+	combine_mask_ca (&s, &m);
 
         ad = ~m;
 
@@ -1508,7 +1508,7 @@ combine_atop_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combine_atop_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_atop_reverse_ca (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1521,7 +1521,7 @@ combine_atop_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
         comp4_t ad;
         comp2_t as = ~d >> A_SHIFT;
 
-	combine_mask_c (&s, &m);
+	combine_mask_ca (&s, &m);
 
 	ad = m;
 
@@ -1531,7 +1531,7 @@ combine_atop_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combine_xor_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_xor_ca (pixman_implementation_t *imp, pixman_op_t op,
 	       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1543,7 +1543,7 @@ combine_xor_c (pixman_implementation_t *imp, pixman_op_t op,
         comp4_t ad;
         comp2_t as = ~d >> A_SHIFT;
 
-	combine_mask_c (&s, &m);
+	combine_mask_ca (&s, &m);
 
 	ad = ~m;
 
@@ -1553,7 +1553,7 @@ combine_xor_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combine_add_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_add_ca (pixman_implementation_t *imp, pixman_op_t op,
 	       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1563,7 +1563,7 @@ combine_add_c (pixman_implementation_t *imp, pixman_op_t op,
         comp4_t m = *(mask + i);
         comp4_t d = *(dest + i);
 
-	combine_mask_value_c (&s, &m);
+	combine_mask_value_ca (&s, &m);
 
         UNcx4_ADD_UNcx4(d, s);
 	*(dest + i) = d;
@@ -1571,7 +1571,7 @@ combine_add_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combine_saturate_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_saturate_ca (pixman_implementation_t *imp, pixman_op_t op,
 		    comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1586,7 +1586,7 @@ combine_saturate_c (pixman_implementation_t *imp, pixman_op_t op,
         s = *(src + i);
 	m = *(mask + i);
 
-	combine_mask_c (&s, &m);
+	combine_mask_ca (&s, &m);
 
         sa = (m >> A_SHIFT);
         sr = (m >> R_SHIFT) & MASK;
@@ -1619,7 +1619,7 @@ combine_saturate_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combine_disjoint_general_c (comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width, comp1_t combine)
+combine_disjoint_general_ca (comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width, comp1_t combine)
 {
     int i;
 
@@ -1636,7 +1636,7 @@ combine_disjoint_general_c (comp4_t *dest, const comp4_t *src, const comp4_t *ma
         d = *(dest + i);
         da = d >> A_SHIFT;
 
-	combine_mask_c (&s, &m);
+	combine_mask_ca (&s, &m);
 
 	sa = m;
 
@@ -1695,63 +1695,63 @@ combine_disjoint_general_c (comp4_t *dest, const comp4_t *src, const comp4_t *ma
 }
 
 static void
-combine_disjoint_over_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_disjoint_over_ca (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combine_disjoint_general_c (dest, src, mask, width, COMBINE_A_OVER);
+    combine_disjoint_general_ca (dest, src, mask, width, COMBINE_A_OVER);
 }
 
 static void
-combine_disjoint_in_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_disjoint_in_ca (pixman_implementation_t *imp, pixman_op_t op,
 		      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combine_disjoint_general_c (dest, src, mask, width, COMBINE_A_IN);
+    combine_disjoint_general_ca (dest, src, mask, width, COMBINE_A_IN);
 }
 
 static void
-combine_disjoint_in_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_disjoint_in_reverse_ca (pixman_implementation_t *imp, pixman_op_t op,
 			     comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combine_disjoint_general_c (dest, src, mask, width, COMBINE_B_IN);
+    combine_disjoint_general_ca (dest, src, mask, width, COMBINE_B_IN);
 }
 
 static void
-combine_disjoint_out_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_disjoint_out_ca (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combine_disjoint_general_c (dest, src, mask, width, COMBINE_A_OUT);
+    combine_disjoint_general_ca (dest, src, mask, width, COMBINE_A_OUT);
 }
 
 static void
-combine_disjoint_out_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_disjoint_out_reverse_ca (pixman_implementation_t *imp, pixman_op_t op,
 			      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combine_disjoint_general_c (dest, src, mask, width, COMBINE_B_OUT);
+    combine_disjoint_general_ca (dest, src, mask, width, COMBINE_B_OUT);
 }
 
 static void
-combine_disjoint_atop_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_disjoint_atop_ca (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combine_disjoint_general_c (dest, src, mask, width, COMBINE_A_ATOP);
+    combine_disjoint_general_ca (dest, src, mask, width, COMBINE_A_ATOP);
 }
 
 static void
-combine_disjoint_atop_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_disjoint_atop_reverse_ca (pixman_implementation_t *imp, pixman_op_t op,
 			       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combine_disjoint_general_c (dest, src, mask, width, COMBINE_B_ATOP);
+    combine_disjoint_general_ca (dest, src, mask, width, COMBINE_B_ATOP);
 }
 
 static void
-combine_disjoint_xor_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_disjoint_xor_ca (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combine_disjoint_general_c (dest, src, mask, width, COMBINE_XOR);
+    combine_disjoint_general_ca (dest, src, mask, width, COMBINE_XOR);
 }
 
 static void
-combine_conjoint_general_c (comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width, comp1_t combine)
+combine_conjoint_general_ca (comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width, comp1_t combine)
 {
     int i;
 
@@ -1768,7 +1768,7 @@ combine_conjoint_general_c (comp4_t *dest, const comp4_t *src, const comp4_t *ma
         d = *(dest + i);
         da = d >> A_SHIFT;
 
-	combine_mask_c (&s, &m);
+	combine_mask_ca (&s, &m);
 
         sa = m;
 
@@ -1827,66 +1827,66 @@ combine_conjoint_general_c (comp4_t *dest, const comp4_t *src, const comp4_t *ma
 }
 
 static void
-combine_conjoint_over_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_over_ca (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combine_conjoint_general_c (dest, src, mask, width, COMBINE_A_OVER);
+    combine_conjoint_general_ca (dest, src, mask, width, COMBINE_A_OVER);
 }
 
 static void
-combine_conjoint_over_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_over_reverse_ca (pixman_implementation_t *imp, pixman_op_t op,
 			       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combine_conjoint_general_c (dest, src, mask, width, COMBINE_B_OVER);
+    combine_conjoint_general_ca (dest, src, mask, width, COMBINE_B_OVER);
 }
 
 static void
-combine_conjoint_in_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_in_ca (pixman_implementation_t *imp, pixman_op_t op,
 		      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combine_conjoint_general_c (dest, src, mask, width, COMBINE_A_IN);
+    combine_conjoint_general_ca (dest, src, mask, width, COMBINE_A_IN);
 }
 
 static void
-combine_conjoint_in_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_in_reverse_ca (pixman_implementation_t *imp, pixman_op_t op,
 			     comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combine_conjoint_general_c (dest, src, mask, width, COMBINE_B_IN);
+    combine_conjoint_general_ca (dest, src, mask, width, COMBINE_B_IN);
 }
 
 static void
-combine_conjoint_out_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_out_ca (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combine_conjoint_general_c (dest, src, mask, width, COMBINE_A_OUT);
+    combine_conjoint_general_ca (dest, src, mask, width, COMBINE_A_OUT);
 }
 
 static void
-combine_conjoint_out_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_out_reverse_ca (pixman_implementation_t *imp, pixman_op_t op,
 			      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combine_conjoint_general_c (dest, src, mask, width, COMBINE_B_OUT);
+    combine_conjoint_general_ca (dest, src, mask, width, COMBINE_B_OUT);
 }
 
 static void
-combine_conjoint_atop_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_atop_ca (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combine_conjoint_general_c (dest, src, mask, width, COMBINE_A_ATOP);
+    combine_conjoint_general_ca (dest, src, mask, width, COMBINE_A_ATOP);
 }
 
 static void
-combine_conjoint_atop_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_atop_reverse_ca (pixman_implementation_t *imp, pixman_op_t op,
 			       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combine_conjoint_general_c (dest, src, mask, width, COMBINE_B_ATOP);
+    combine_conjoint_general_ca (dest, src, mask, width, COMBINE_B_ATOP);
 }
 
 static void
-combine_conjoint_xor_c (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_xor_ca (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combine_conjoint_general_c (dest, src, mask, width, COMBINE_XOR);
+    combine_conjoint_general_ca (dest, src, mask, width, COMBINE_XOR);
 }
 
 void
@@ -1953,60 +1953,60 @@ _pixman_setup_combiner_functions_width (pixman_implementation_t *imp)
     imp->combine_width[PIXMAN_OP_HSL_LUMINOSITY] = combine_hsl_luminosity_u;
 
     /* Component alpha combiners */
-    imp->combine_width_ca[PIXMAN_OP_CLEAR] = combine_clear_c;
-    imp->combine_width_ca[PIXMAN_OP_SRC] = combine_src_c;
+    imp->combine_width_ca[PIXMAN_OP_CLEAR] = combine_clear_ca;
+    imp->combine_width_ca[PIXMAN_OP_SRC] = combine_src_ca;
     /* dest */
-    imp->combine_width_ca[PIXMAN_OP_OVER] = combine_over_c;
-    imp->combine_width_ca[PIXMAN_OP_OVER_REVERSE] = combine_over_reverse_c;
-    imp->combine_width_ca[PIXMAN_OP_IN] = combine_in_c;
-    imp->combine_width_ca[PIXMAN_OP_IN_REVERSE] = combine_in_reverse_c;
-    imp->combine_width_ca[PIXMAN_OP_OUT] = combine_out_c;
-    imp->combine_width_ca[PIXMAN_OP_OUT_REVERSE] = combine_out_reverse_c;
-    imp->combine_width_ca[PIXMAN_OP_ATOP] = combine_atop_c;
-    imp->combine_width_ca[PIXMAN_OP_ATOP_REVERSE] = combine_atop_reverse_c;
-    imp->combine_width_ca[PIXMAN_OP_XOR] = combine_xor_c;
-    imp->combine_width_ca[PIXMAN_OP_ADD] = combine_add_c;
-    imp->combine_width_ca[PIXMAN_OP_SATURATE] = combine_saturate_c;
+    imp->combine_width_ca[PIXMAN_OP_OVER] = combine_over_ca;
+    imp->combine_width_ca[PIXMAN_OP_OVER_REVERSE] = combine_over_reverse_ca;
+    imp->combine_width_ca[PIXMAN_OP_IN] = combine_in_ca;
+    imp->combine_width_ca[PIXMAN_OP_IN_REVERSE] = combine_in_reverse_ca;
+    imp->combine_width_ca[PIXMAN_OP_OUT] = combine_out_ca;
+    imp->combine_width_ca[PIXMAN_OP_OUT_REVERSE] = combine_out_reverse_ca;
+    imp->combine_width_ca[PIXMAN_OP_ATOP] = combine_atop_ca;
+    imp->combine_width_ca[PIXMAN_OP_ATOP_REVERSE] = combine_atop_reverse_ca;
+    imp->combine_width_ca[PIXMAN_OP_XOR] = combine_xor_ca;
+    imp->combine_width_ca[PIXMAN_OP_ADD] = combine_add_ca;
+    imp->combine_width_ca[PIXMAN_OP_SATURATE] = combine_saturate_ca;
 
     /* Disjoint CA */
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_CLEAR] = combine_clear_c;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_SRC] = combine_src_c;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_CLEAR] = combine_clear_ca;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_SRC] = combine_src_ca;
     /* dest */
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_OVER] = combine_disjoint_over_c;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_OVER_REVERSE] = combine_saturate_c;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_IN] = combine_disjoint_in_c;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_IN_REVERSE] = combine_disjoint_in_reverse_c;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_OUT] = combine_disjoint_out_c;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_OUT_REVERSE] = combine_disjoint_out_reverse_c;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_ATOP] = combine_disjoint_atop_c;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_ATOP_REVERSE] = combine_disjoint_atop_reverse_c;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_XOR] = combine_disjoint_xor_c;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_OVER] = combine_disjoint_over_ca;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_OVER_REVERSE] = combine_saturate_ca;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_IN] = combine_disjoint_in_ca;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_IN_REVERSE] = combine_disjoint_in_reverse_ca;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_OUT] = combine_disjoint_out_ca;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_OUT_REVERSE] = combine_disjoint_out_reverse_ca;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_ATOP] = combine_disjoint_atop_ca;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_ATOP_REVERSE] = combine_disjoint_atop_reverse_ca;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_XOR] = combine_disjoint_xor_ca;
 
     /* Conjoint CA */
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_CLEAR] = combine_clear_c;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_SRC] = combine_src_c;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_CLEAR] = combine_clear_ca;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_SRC] = combine_src_ca;
     /* dest */
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_OVER] = combine_conjoint_over_c;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_OVER_REVERSE] = combine_conjoint_over_reverse_c;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_IN] = combine_conjoint_in_c;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_IN_REVERSE] = combine_conjoint_in_reverse_c;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_OUT] = combine_conjoint_out_c;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_OUT_REVERSE] = combine_conjoint_out_reverse_c;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_ATOP] = combine_conjoint_atop_c;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_ATOP_REVERSE] = combine_conjoint_atop_reverse_c;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_XOR] = combine_conjoint_xor_c;
-
-    imp->combine_width_ca[PIXMAN_OP_MULTIPLY] = combine_multiply_c;
-    imp->combine_width_ca[PIXMAN_OP_SCREEN] = combine_screen_c;
-    imp->combine_width_ca[PIXMAN_OP_OVERLAY] = combine_overlay_c;
-    imp->combine_width_ca[PIXMAN_OP_DARKEN] = combine_darken_c;
-    imp->combine_width_ca[PIXMAN_OP_LIGHTEN] = combine_lighten_c;
-    imp->combine_width_ca[PIXMAN_OP_COLOR_DODGE] = combine_color_dodge_c;
-    imp->combine_width_ca[PIXMAN_OP_COLOR_BURN] = combine_color_burn_c;
-    imp->combine_width_ca[PIXMAN_OP_HARD_LIGHT] = combine_hard_light_c;
-    imp->combine_width_ca[PIXMAN_OP_SOFT_LIGHT] = combine_soft_light_c;
-    imp->combine_width_ca[PIXMAN_OP_DIFFERENCE] = combine_difference_c;
-    imp->combine_width_ca[PIXMAN_OP_EXCLUSION] = combine_exclusion_c;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_OVER] = combine_conjoint_over_ca;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_OVER_REVERSE] = combine_conjoint_over_reverse_ca;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_IN] = combine_conjoint_in_ca;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_IN_REVERSE] = combine_conjoint_in_reverse_ca;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_OUT] = combine_conjoint_out_ca;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_OUT_REVERSE] = combine_conjoint_out_reverse_ca;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_ATOP] = combine_conjoint_atop_ca;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_ATOP_REVERSE] = combine_conjoint_atop_reverse_ca;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_XOR] = combine_conjoint_xor_ca;
+
+    imp->combine_width_ca[PIXMAN_OP_MULTIPLY] = combine_multiply_ca;
+    imp->combine_width_ca[PIXMAN_OP_SCREEN] = combine_screen_ca;
+    imp->combine_width_ca[PIXMAN_OP_OVERLAY] = combine_overlay_ca;
+    imp->combine_width_ca[PIXMAN_OP_DARKEN] = combine_darken_ca;
+    imp->combine_width_ca[PIXMAN_OP_LIGHTEN] = combine_lighten_ca;
+    imp->combine_width_ca[PIXMAN_OP_COLOR_DODGE] = combine_color_dodge_ca;
+    imp->combine_width_ca[PIXMAN_OP_COLOR_BURN] = combine_color_burn_ca;
+    imp->combine_width_ca[PIXMAN_OP_HARD_LIGHT] = combine_hard_light_ca;
+    imp->combine_width_ca[PIXMAN_OP_SOFT_LIGHT] = combine_soft_light_ca;
+    imp->combine_width_ca[PIXMAN_OP_DIFFERENCE] = combine_difference_ca;
+    imp->combine_width_ca[PIXMAN_OP_EXCLUSION] = combine_exclusion_ca;
     /* It is not clear that these make sense, so leave them out for now */
     imp->combine_width_ca[PIXMAN_OP_HSL_HUE] = NULL;
     imp->combine_width_ca[PIXMAN_OP_HSL_SATURATION] = NULL;
diff --git a/pixman/pixman-mmx.c b/pixman/pixman-mmx.c
index 6a62b44..fbc7322 100644
--- a/pixman/pixman-mmx.c
+++ b/pixman/pixman-mmx.c
@@ -688,7 +688,7 @@ mmx_combine_saturate_u (pixman_implementation_t *imp, pixman_op_t op,
 
 
 static void
-mmx_combine_src_c (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_src_ca (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = src + width;
@@ -705,7 +705,7 @@ mmx_combine_src_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmx_combine_over_c (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_over_ca (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = src + width;
@@ -725,7 +725,7 @@ mmx_combine_over_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmx_combine_over_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_over_reverse_ca (pixman_implementation_t *imp, pixman_op_t op,
 			uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = src + width;
@@ -746,7 +746,7 @@ mmx_combine_over_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 
 
 static void
-mmx_combine_in_c (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_in_ca (pixman_implementation_t *imp, pixman_op_t op,
 	       uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = src + width;
@@ -766,7 +766,7 @@ mmx_combine_in_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmx_combine_in_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_in_reverse_ca (pixman_implementation_t *imp, pixman_op_t op,
 		      uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = src + width;
@@ -786,7 +786,7 @@ mmx_combine_in_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmx_combine_out_c (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_out_ca (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = src + width;
@@ -807,7 +807,7 @@ mmx_combine_out_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmx_combine_out_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_out_reverse_ca (pixman_implementation_t *imp, pixman_op_t op,
 		       uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = src + width;
@@ -828,7 +828,7 @@ mmx_combine_out_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmx_combine_atop_c (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_atop_ca (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = src + width;
@@ -851,7 +851,7 @@ mmx_combine_atop_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmx_combine_atop_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_atop_reverse_ca (pixman_implementation_t *imp, pixman_op_t op,
 			uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = src + width;
@@ -874,7 +874,7 @@ mmx_combine_atop_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmx_combine_xor_c (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_xor_ca (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = src + width;
@@ -898,7 +898,7 @@ mmx_combine_xor_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmx_combine_add_c (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_add_ca (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = src + width;
@@ -3187,17 +3187,17 @@ _pixman_implementation_create_mmx (void)
     imp->combine_32[PIXMAN_OP_ADD] = mmx_combine_add_u;
     imp->combine_32[PIXMAN_OP_SATURATE] = mmx_combine_saturate_u;
     
-    imp->combine_32_ca[PIXMAN_OP_SRC] = mmx_combine_src_c;
-    imp->combine_32_ca[PIXMAN_OP_OVER] = mmx_combine_over_c;
-    imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = mmx_combine_over_reverse_c;
-    imp->combine_32_ca[PIXMAN_OP_IN] = mmx_combine_in_c;
-    imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = mmx_combine_in_reverse_c;
-    imp->combine_32_ca[PIXMAN_OP_OUT] = mmx_combine_out_c;
-    imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = mmx_combine_out_reverse_c;
-    imp->combine_32_ca[PIXMAN_OP_ATOP] = mmx_combine_atop_c;
-    imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = mmx_combine_atop_reverse_c;
-    imp->combine_32_ca[PIXMAN_OP_XOR] = mmx_combine_xor_c;
-    imp->combine_32_ca[PIXMAN_OP_ADD] = mmx_combine_add_c;
+    imp->combine_32_ca[PIXMAN_OP_SRC] = mmx_combine_src_ca;
+    imp->combine_32_ca[PIXMAN_OP_OVER] = mmx_combine_over_ca;
+    imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = mmx_combine_over_reverse_ca;
+    imp->combine_32_ca[PIXMAN_OP_IN] = mmx_combine_in_ca;
+    imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = mmx_combine_in_reverse_ca;
+    imp->combine_32_ca[PIXMAN_OP_OUT] = mmx_combine_out_ca;
+    imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = mmx_combine_out_reverse_ca;
+    imp->combine_32_ca[PIXMAN_OP_ATOP] = mmx_combine_atop_ca;
+    imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = mmx_combine_atop_reverse_ca;
+    imp->combine_32_ca[PIXMAN_OP_XOR] = mmx_combine_xor_ca;
+    imp->combine_32_ca[PIXMAN_OP_ADD] = mmx_combine_add_ca;
 
     imp->composite = mmx_composite;
     imp->blt = mmx_blt;
diff --git a/pixman/pixman-sse2.c b/pixman/pixman-sse2.c
index dd068ab..abb6f46 100644
--- a/pixman/pixman-sse2.c
+++ b/pixman/pixman-sse2.c
@@ -1431,7 +1431,7 @@ core_combine_saturate_u_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *
 }
 
 static force_inline void
-core_combine_src_c_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm, int w)
+core_combine_src_ca_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm, int w)
 {
     uint32_t s, m;
 
@@ -1490,7 +1490,7 @@ core_combine_src_c_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm, i
 }
 
 static force_inline uint32_t
-core_combine_over_c_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst)
+core_combine_over_ca_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst)
 {
     __m64 s = unpack_32_1x64 (src);
     __m64 expAlpha = expand_alpha_1x64 (s);
@@ -1501,7 +1501,7 @@ core_combine_over_c_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst)
 }
 
 static force_inline void
-core_combine_over_c_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm, int w)
+core_combine_over_ca_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm, int w)
 {
     uint32_t s, m, d;
 
@@ -1521,7 +1521,7 @@ core_combine_over_c_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm,
         m = *pm++;
         d = *pd;
 
-        *pd++ = core_combine_over_c_pixel_sse2 (s, m, d);
+        *pd++ = core_combine_over_ca_pixel_sse2 (s, m, d);
         w--;
     }
 
@@ -1563,13 +1563,13 @@ core_combine_over_c_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm,
         m = *pm++;
         d = *pd;
 
-        *pd++ = core_combine_over_c_pixel_sse2 (s, m, d);
+        *pd++ = core_combine_over_ca_pixel_sse2 (s, m, d);
         w--;
     }
 }
 
 static force_inline uint32_t
-core_combine_over_reverse_c_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst)
+core_combine_over_reverse_ca_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst)
 {
     __m64 d = unpack_32_1x64 (dst);
 
@@ -1577,7 +1577,7 @@ core_combine_over_reverse_c_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t ds
 }
 
 static force_inline void
-core_combine_over_reverse_c_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm, int w)
+core_combine_over_reverse_ca_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm, int w)
 {
     uint32_t s, m, d;
 
@@ -1597,7 +1597,7 @@ core_combine_over_reverse_c_sse2 (uint32_t* pd, const uint32_t* ps, const uint32
         m = *pm++;
         d = *pd;
 
-        *pd++ = core_combine_over_reverse_c_pixel_sse2 (s, m, d);
+        *pd++ = core_combine_over_reverse_ca_pixel_sse2 (s, m, d);
         w--;
     }
 
@@ -1640,13 +1640,13 @@ core_combine_over_reverse_c_sse2 (uint32_t* pd, const uint32_t* ps, const uint32
         m = *pm++;
         d = *pd;
 
-        *pd++ = core_combine_over_reverse_c_pixel_sse2 (s, m, d);
+        *pd++ = core_combine_over_reverse_ca_pixel_sse2 (s, m, d);
         w--;
     }
 }
 
 static force_inline void
-core_combine_in_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
+core_combine_in_ca_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
 {
     uint32_t s, m, d;
 
@@ -1717,7 +1717,7 @@ core_combine_in_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, in
 }
 
 static force_inline void
-core_combine_in_reverse_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
+core_combine_in_reverse_ca_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
 {
     uint32_t s, m, d;
 
@@ -1790,7 +1790,7 @@ core_combine_in_reverse_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t
 }
 
 static force_inline void
-core_combine_out_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
+core_combine_out_ca_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
 {
     uint32_t s, m, d;
 
@@ -1862,7 +1862,7 @@ core_combine_out_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, i
 }
 
 static force_inline void
-core_combine_out_reverse_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
+core_combine_out_reverse_ca_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
 {
     uint32_t s, m, d;
 
@@ -1938,7 +1938,7 @@ core_combine_out_reverse_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_
 }
 
 static force_inline uint32_t
-core_combine_atop_c_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst)
+core_combine_atop_ca_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst)
 {
     __m64 m = unpack_32_1x64 (mask);
     __m64 s = unpack_32_1x64 (src);
@@ -1953,7 +1953,7 @@ core_combine_atop_c_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst)
 }
 
 static force_inline void
-core_combine_atop_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
+core_combine_atop_ca_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
 {
     uint32_t s, m, d;
 
@@ -1974,7 +1974,7 @@ core_combine_atop_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm,
         m = *pm++;
         d = *pd;
 
-        *pd++ = core_combine_atop_c_pixel_sse2 (s, m, d);
+        *pd++ = core_combine_atop_ca_pixel_sse2 (s, m, d);
         w--;
     }
 
@@ -2024,13 +2024,13 @@ core_combine_atop_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm,
         m = *pm++;
         d = *pd;
 
-        *pd++ = core_combine_atop_c_pixel_sse2 (s, m, d);
+        *pd++ = core_combine_atop_ca_pixel_sse2 (s, m, d);
         w--;
     }
 }
 
 static force_inline uint32_t
-core_combine_reverse_atop_c_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst)
+core_combine_reverse_atop_ca_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst)
 {
     __m64 m = unpack_32_1x64 (mask);
     __m64 s = unpack_32_1x64 (src);
@@ -2046,7 +2046,7 @@ core_combine_reverse_atop_c_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t ds
 }
 
 static force_inline void
-core_combine_reverse_atop_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
+core_combine_reverse_atop_ca_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
 {
     uint32_t s, m, d;
 
@@ -2067,7 +2067,7 @@ core_combine_reverse_atop_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32
         m = *pm++;
         d = *pd;
 
-        *pd++ = core_combine_reverse_atop_c_pixel_sse2 (s, m, d);
+        *pd++ = core_combine_reverse_atop_ca_pixel_sse2 (s, m, d);
         w--;
     }
 
@@ -2117,13 +2117,13 @@ core_combine_reverse_atop_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32
         m = *pm++;
         d = *pd;
 
-        *pd++ = core_combine_reverse_atop_c_pixel_sse2 (s, m, d);
+        *pd++ = core_combine_reverse_atop_ca_pixel_sse2 (s, m, d);
         w--;
     }
 }
 
 static force_inline uint32_t
-core_combine_xor_c_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst)
+core_combine_xor_ca_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst)
 {
     __m64 a = unpack_32_1x64 (mask);
     __m64 s = unpack_32_1x64 (src);
@@ -2140,7 +2140,7 @@ core_combine_xor_c_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst)
 }
 
 static force_inline void
-core_combine_xor_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
+core_combine_xor_ca_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
 {
     uint32_t s, m, d;
 
@@ -2161,7 +2161,7 @@ core_combine_xor_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, i
         m = *pm++;
         d = *pd;
 
-        *pd++ = core_combine_xor_c_pixel_sse2 (s, m, d);
+        *pd++ = core_combine_xor_ca_pixel_sse2 (s, m, d);
         w--;
     }
 
@@ -2212,13 +2212,13 @@ core_combine_xor_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, i
         m = *pm++;
         d = *pd;
 
-        *pd++ = core_combine_xor_c_pixel_sse2 (s, m, d);
+        *pd++ = core_combine_xor_ca_pixel_sse2 (s, m, d);
         w--;
     }
 }
 
 static force_inline void
-core_combine_add_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
+core_combine_add_ca_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
 {
     uint32_t s, m, d;
 
@@ -2405,90 +2405,90 @@ sse2_combine_saturate_u (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-sse2_combine_src_c (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_src_ca (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    core_combine_src_c_sse2 (dst, src, mask, width);
+    core_combine_src_ca_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2_combine_over_c (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_over_ca (pixman_implementation_t *imp, pixman_op_t op,
 		  uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    core_combine_over_c_sse2 (dst, src, mask, width);
+    core_combine_over_ca_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2_combine_over_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_over_reverse_ca (pixman_implementation_t *imp, pixman_op_t op,
 			 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    core_combine_over_reverse_c_sse2 (dst, src, mask, width);
+    core_combine_over_reverse_ca_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2_combine_in_c (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_in_ca (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    core_combine_in_c_sse2 (dst, src, mask, width);
+    core_combine_in_ca_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2_combine_in_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_in_reverse_ca (pixman_implementation_t *imp, pixman_op_t op,
 		       uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    core_combine_in_reverse_c_sse2 (dst, src, mask, width);
+    core_combine_in_reverse_ca_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2_combine_out_c (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_out_ca (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    core_combine_out_c_sse2 (dst, src, mask, width);
+    core_combine_out_ca_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2_combine_out_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_out_reverse_ca (pixman_implementation_t *imp, pixman_op_t op,
 			uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    core_combine_out_reverse_c_sse2 (dst, src, mask, width);
+    core_combine_out_reverse_ca_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2_combine_atop_c (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_atop_ca (pixman_implementation_t *imp, pixman_op_t op,
 		  uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    core_combine_atop_c_sse2 (dst, src, mask, width);
+    core_combine_atop_ca_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2_combine_atop_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_atop_reverse_ca (pixman_implementation_t *imp, pixman_op_t op,
 			 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    core_combine_reverse_atop_c_sse2 (dst, src, mask, width);
+    core_combine_reverse_atop_ca_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2_combine_xor_c (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_xor_ca (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    core_combine_xor_c_sse2 (dst, src, mask, width);
+    core_combine_xor_ca_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2_combine_add_c (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_add_ca (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    core_combine_add_c_sse2 (dst, src, mask, width);
+    core_combine_add_ca_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
@@ -5099,17 +5099,17 @@ _pixman_implementation_create_sse2 (void)
     
     imp->combine_32[PIXMAN_OP_SATURATE] = sse2_combine_saturate_u;
     
-    imp->combine_32_ca[PIXMAN_OP_SRC] = sse2_combine_src_c;
-    imp->combine_32_ca[PIXMAN_OP_OVER] = sse2_combine_over_c;
-    imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = sse2_combine_over_reverse_c;
-    imp->combine_32_ca[PIXMAN_OP_IN] = sse2_combine_in_c;
-    imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = sse2_combine_in_reverse_c;
-    imp->combine_32_ca[PIXMAN_OP_OUT] = sse2_combine_out_c;
-    imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = sse2_combine_out_reverse_c;
-    imp->combine_32_ca[PIXMAN_OP_ATOP] = sse2_combine_atop_c;
-    imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = sse2_combine_atop_reverse_c;
-    imp->combine_32_ca[PIXMAN_OP_XOR] = sse2_combine_xor_c;
-    imp->combine_32_ca[PIXMAN_OP_ADD] = sse2_combine_add_c;
+    imp->combine_32_ca[PIXMAN_OP_SRC] = sse2_combine_src_ca;
+    imp->combine_32_ca[PIXMAN_OP_OVER] = sse2_combine_over_ca;
+    imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = sse2_combine_over_reverse_ca;
+    imp->combine_32_ca[PIXMAN_OP_IN] = sse2_combine_in_ca;
+    imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = sse2_combine_in_reverse_ca;
+    imp->combine_32_ca[PIXMAN_OP_OUT] = sse2_combine_out_ca;
+    imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = sse2_combine_out_reverse_ca;
+    imp->combine_32_ca[PIXMAN_OP_ATOP] = sse2_combine_atop_ca;
+    imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = sse2_combine_atop_reverse_ca;
+    imp->combine_32_ca[PIXMAN_OP_XOR] = sse2_combine_xor_ca;
+    imp->combine_32_ca[PIXMAN_OP_ADD] = sse2_combine_add_ca;
     
     imp->composite = sse2_composite;
     imp->blt = sse2_blt;
diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index bc0c0b8..359afbe 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -1074,7 +1074,7 @@ vmx_combine_add_u (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-vmx_combine_src_c (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_src_ca (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     int i;
@@ -1106,7 +1106,7 @@ vmx_combine_src_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-vmx_combine_over_c (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_over_ca (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     int i;
@@ -1140,7 +1140,7 @@ vmx_combine_over_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-vmx_combine_over_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_over_reverse_ca (pixman_implementation_t *imp, pixman_op_t op,
 			uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     int i;
@@ -1175,7 +1175,7 @@ vmx_combine_over_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-vmx_combine_in_c (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_in_ca (pixman_implementation_t *imp, pixman_op_t op,
 	       uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     int i;
@@ -1210,7 +1210,7 @@ vmx_combine_in_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-vmx_combine_in_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_in_reverse_ca (pixman_implementation_t *imp, pixman_op_t op,
 		      uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     int i;
@@ -1245,7 +1245,7 @@ vmx_combine_in_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-vmx_combine_out_c (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_out_ca (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     int i;
@@ -1281,7 +1281,7 @@ vmx_combine_out_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-vmx_combine_out_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_out_reverse_ca (pixman_implementation_t *imp, pixman_op_t op,
 		       uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     int i;
@@ -1318,7 +1318,7 @@ vmx_combine_out_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-vmx_combine_atop_c (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_atop_ca (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     int i;
@@ -1360,7 +1360,7 @@ vmx_combine_atop_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-vmx_combine_atop_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_atop_reverse_ca (pixman_implementation_t *imp, pixman_op_t op,
 			uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     int i;
@@ -1402,7 +1402,7 @@ vmx_combine_atop_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-vmx_combine_xor_c (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_xor_ca (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     int i;
@@ -1444,7 +1444,7 @@ vmx_combine_xor_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-vmx_combine_add_c (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_add_ca (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     int i;
@@ -1578,17 +1578,17 @@ _pixman_implementation_create_vmx (void)
 
     imp->combine_32[PIXMAN_OP_ADD] = vmx_combine_add_u;
 
-    imp->combine_32_ca[PIXMAN_OP_SRC] = vmx_combine_src_c;
-    imp->combine_32_ca[PIXMAN_OP_OVER] = vmx_combine_over_c;
-    imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = vmx_combine_over_reverse_c;
-    imp->combine_32_ca[PIXMAN_OP_IN] = vmx_combine_in_c;
-    imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = vmx_combine_in_reverse_c;
-    imp->combine_32_ca[PIXMAN_OP_OUT] = vmx_combine_out_c;
-    imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = vmx_combine_out_reverse_c;
-    imp->combine_32_ca[PIXMAN_OP_ATOP] = vmx_combine_atop_c;
-    imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = vmx_combine_atop_reverse_c;
-    imp->combine_32_ca[PIXMAN_OP_XOR] = vmx_combine_xor_c;
-    imp->combine_32_ca[PIXMAN_OP_ADD] = vmx_combine_add_c;
+    imp->combine_32_ca[PIXMAN_OP_SRC] = vmx_combine_src_ca;
+    imp->combine_32_ca[PIXMAN_OP_OVER] = vmx_combine_over_ca;
+    imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = vmx_combine_over_reverse_ca;
+    imp->combine_32_ca[PIXMAN_OP_IN] = vmx_combine_in_ca;
+    imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = vmx_combine_in_reverse_ca;
+    imp->combine_32_ca[PIXMAN_OP_OUT] = vmx_combine_out_ca;
+    imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = vmx_combine_out_reverse_ca;
+    imp->combine_32_ca[PIXMAN_OP_ATOP] = vmx_combine_atop_ca;
+    imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = vmx_combine_atop_reverse_ca;
+    imp->combine_32_ca[PIXMAN_OP_XOR] = vmx_combine_xor_ca;
+    imp->combine_32_ca[PIXMAN_OP_ADD] = vmx_combine_add_ca;
     
     return imp;
 }
commit 3c03990ba214bff000d3494587353b94f9432453
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Sun Jul 5 01:42:28 2009 -0400

    Various sse2 renamings

diff --git a/pixman/pixman-sse2.c b/pixman/pixman-sse2.c
index 6949bc9..dd068ab 100644
--- a/pixman/pixman-sse2.c
+++ b/pixman/pixman-sse2.c
@@ -84,7 +84,7 @@ unpack_128_2x128 (__m128i data, __m128i* data_lo, __m128i* data_hi)
 }
 
 static force_inline __m128i
-unpack_565to8888 (__m128i lo)
+unpack_565_to_8888 (__m128i lo)
 {
     __m128i r, g, b, rb, t;
     
@@ -112,8 +112,8 @@ unpack_565_128_4x128 (__m128i data, __m128i* data0, __m128i* data1, __m128i* dat
     lo = _mm_unpacklo_epi16 (data, _mm_setzero_si128 ());
     hi = _mm_unpackhi_epi16 (data, _mm_setzero_si128 ());
 
-    lo = unpack_565to8888 (lo);
-    hi = unpack_565to8888 (hi);
+    lo = unpack_565_to_8888 (lo);
+    hi = unpack_565_to_8888 (hi);
 
     unpack_128_2x128 (lo, data0, data1);
     unpack_128_2x128 (hi, data2, data3);
@@ -329,7 +329,7 @@ load_128_unaligned (const __m128i* src)
 
 /* save 4 pixels using Write Combining memory on a 16-byte boundary aligned address */
 static force_inline void
-save128write_combining (__m128i* dst, __m128i data)
+save_128_write_combining (__m128i* dst, __m128i data)
 {
     _mm_stream_si128 (dst, data);
 }
commit 9d0be1d4c81153ef2407518f605bc55380485955
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Sun Jul 5 01:38:10 2009 -0400

    s/sse2combine/sse2_combine/g

diff --git a/pixman/pixman-sse2.c b/pixman/pixman-sse2.c
index c739cd8..6949bc9 100644
--- a/pixman/pixman-sse2.c
+++ b/pixman/pixman-sse2.c
@@ -2317,7 +2317,7 @@ create_mask_2x32_128 (uint32_t mask0, uint32_t mask1)
 /* SSE2 code patch for fbcompose.c */
 
 static void
-sse2combine_over_u (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_over_u (pixman_implementation_t *imp, pixman_op_t op,
 		  uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
     core_combine_over_u_sse2 (dst, src, mask, width);
@@ -2325,7 +2325,7 @@ sse2combine_over_u (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-sse2combine_over_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_over_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 			 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
     core_combine_over_reverse_u_sse2 (dst, src, mask, width);
@@ -2333,7 +2333,7 @@ sse2combine_over_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-sse2combine_in_u (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_in_u (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
     core_combine_in_u_sse2 (dst, src, mask, width);
@@ -2341,7 +2341,7 @@ sse2combine_in_u (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-sse2combine_in_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_in_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 		       uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
     core_combine_reverse_in_u_sse2 (dst, src, mask, width);
@@ -2349,7 +2349,7 @@ sse2combine_in_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-sse2combine_out_u (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_out_u (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
     core_combine_out_u_sse2 (dst, src, mask, width);
@@ -2357,7 +2357,7 @@ sse2combine_out_u (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-sse2combine_out_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_out_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 			uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
     core_combine_reverse_out_u_sse2 (dst, src, mask, width);
@@ -2365,7 +2365,7 @@ sse2combine_out_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-sse2combine_atop_u (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_atop_u (pixman_implementation_t *imp, pixman_op_t op,
 		  uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
     core_combine_atop_u_sse2 (dst, src, mask, width);
@@ -2373,7 +2373,7 @@ sse2combine_atop_u (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-sse2combine_atop_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_atop_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 			 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
     core_combine_reverse_atop_u_sse2 (dst, src, mask, width);
@@ -2381,7 +2381,7 @@ sse2combine_atop_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-sse2combine_xor_u (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_xor_u (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
     core_combine_xor_u_sse2 (dst, src, mask, width);
@@ -2389,7 +2389,7 @@ sse2combine_xor_u (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-sse2combine_add_u (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_add_u (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
     core_combine_add_u_sse2 (dst, src, mask, width);
@@ -2397,7 +2397,7 @@ sse2combine_add_u (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-sse2combine_saturate_u (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_saturate_u (pixman_implementation_t *imp, pixman_op_t op,
 		      uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
     core_combine_saturate_u_sse2 (dst, src, mask, width);
@@ -2405,7 +2405,7 @@ sse2combine_saturate_u (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-sse2combine_src_c (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_src_c (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
     core_combine_src_c_sse2 (dst, src, mask, width);
@@ -2413,7 +2413,7 @@ sse2combine_src_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-sse2combine_over_c (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_over_c (pixman_implementation_t *imp, pixman_op_t op,
 		  uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
     core_combine_over_c_sse2 (dst, src, mask, width);
@@ -2421,7 +2421,7 @@ sse2combine_over_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-sse2combine_over_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_over_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 			 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
     core_combine_over_reverse_c_sse2 (dst, src, mask, width);
@@ -2429,7 +2429,7 @@ sse2combine_over_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-sse2combine_in_c (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_in_c (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
     core_combine_in_c_sse2 (dst, src, mask, width);
@@ -2437,7 +2437,7 @@ sse2combine_in_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-sse2combine_in_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_in_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 		       uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
     core_combine_in_reverse_c_sse2 (dst, src, mask, width);
@@ -2445,7 +2445,7 @@ sse2combine_in_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-sse2combine_out_c (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_out_c (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
     core_combine_out_c_sse2 (dst, src, mask, width);
@@ -2453,7 +2453,7 @@ sse2combine_out_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-sse2combine_out_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_out_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 			uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
     core_combine_out_reverse_c_sse2 (dst, src, mask, width);
@@ -2461,7 +2461,7 @@ sse2combine_out_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-sse2combine_atop_c (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_atop_c (pixman_implementation_t *imp, pixman_op_t op,
 		  uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
     core_combine_atop_c_sse2 (dst, src, mask, width);
@@ -2469,7 +2469,7 @@ sse2combine_atop_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-sse2combine_atop_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_atop_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 			 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
     core_combine_reverse_atop_c_sse2 (dst, src, mask, width);
@@ -2477,7 +2477,7 @@ sse2combine_atop_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-sse2combine_xor_c (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_xor_c (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
     core_combine_xor_c_sse2 (dst, src, mask, width);
@@ -2485,7 +2485,7 @@ sse2combine_xor_c (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-sse2combine_add_c (pixman_implementation_t *imp, pixman_op_t op,
+sse2_combine_add_c (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
     core_combine_add_c_sse2 (dst, src, mask, width);
@@ -5086,30 +5086,30 @@ _pixman_implementation_create_sse2 (void)
     /* Set up function pointers */
     
     /* SSE code patch for fbcompose.c */
-    imp->combine_32[PIXMAN_OP_OVER] = sse2combine_over_u;
-    imp->combine_32[PIXMAN_OP_OVER_REVERSE] = sse2combine_over_reverse_u;
-    imp->combine_32[PIXMAN_OP_IN] = sse2combine_in_u;
-    imp->combine_32[PIXMAN_OP_IN_REVERSE] = sse2combine_in_reverse_u;
-    imp->combine_32[PIXMAN_OP_OUT] = sse2combine_out_u;
-    imp->combine_32[PIXMAN_OP_OUT_REVERSE] = sse2combine_out_reverse_u;
-    imp->combine_32[PIXMAN_OP_ATOP] = sse2combine_atop_u;
-    imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = sse2combine_atop_reverse_u;
-    imp->combine_32[PIXMAN_OP_XOR] = sse2combine_xor_u;
-    imp->combine_32[PIXMAN_OP_ADD] = sse2combine_add_u;
+    imp->combine_32[PIXMAN_OP_OVER] = sse2_combine_over_u;
+    imp->combine_32[PIXMAN_OP_OVER_REVERSE] = sse2_combine_over_reverse_u;
+    imp->combine_32[PIXMAN_OP_IN] = sse2_combine_in_u;
+    imp->combine_32[PIXMAN_OP_IN_REVERSE] = sse2_combine_in_reverse_u;
+    imp->combine_32[PIXMAN_OP_OUT] = sse2_combine_out_u;
+    imp->combine_32[PIXMAN_OP_OUT_REVERSE] = sse2_combine_out_reverse_u;
+    imp->combine_32[PIXMAN_OP_ATOP] = sse2_combine_atop_u;
+    imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = sse2_combine_atop_reverse_u;
+    imp->combine_32[PIXMAN_OP_XOR] = sse2_combine_xor_u;
+    imp->combine_32[PIXMAN_OP_ADD] = sse2_combine_add_u;
     
-    imp->combine_32[PIXMAN_OP_SATURATE] = sse2combine_saturate_u;
+    imp->combine_32[PIXMAN_OP_SATURATE] = sse2_combine_saturate_u;
     
-    imp->combine_32_ca[PIXMAN_OP_SRC] = sse2combine_src_c;
-    imp->combine_32_ca[PIXMAN_OP_OVER] = sse2combine_over_c;
-    imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = sse2combine_over_reverse_c;
-    imp->combine_32_ca[PIXMAN_OP_IN] = sse2combine_in_c;
-    imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = sse2combine_in_reverse_c;
-    imp->combine_32_ca[PIXMAN_OP_OUT] = sse2combine_out_c;
-    imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = sse2combine_out_reverse_c;
-    imp->combine_32_ca[PIXMAN_OP_ATOP] = sse2combine_atop_c;
-    imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = sse2combine_atop_reverse_c;
-    imp->combine_32_ca[PIXMAN_OP_XOR] = sse2combine_xor_c;
-    imp->combine_32_ca[PIXMAN_OP_ADD] = sse2combine_add_c;
+    imp->combine_32_ca[PIXMAN_OP_SRC] = sse2_combine_src_c;
+    imp->combine_32_ca[PIXMAN_OP_OVER] = sse2_combine_over_c;
+    imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = sse2_combine_over_reverse_c;
+    imp->combine_32_ca[PIXMAN_OP_IN] = sse2_combine_in_c;
+    imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = sse2_combine_in_reverse_c;
+    imp->combine_32_ca[PIXMAN_OP_OUT] = sse2_combine_out_c;
+    imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = sse2_combine_out_reverse_c;
+    imp->combine_32_ca[PIXMAN_OP_ATOP] = sse2_combine_atop_c;
+    imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = sse2_combine_atop_reverse_c;
+    imp->combine_32_ca[PIXMAN_OP_XOR] = sse2_combine_xor_c;
+    imp->combine_32_ca[PIXMAN_OP_ADD] = sse2_combine_add_c;
     
     imp->composite = sse2_composite;
     imp->blt = sse2_blt;
commit a98b71eff4041df58c9dcc2b1e25cefa38f364ff
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Sun Jul 5 01:35:14 2009 -0400

    Convert CamelCase names to underscore_names.
    
    s/sizeRI/size_ri/g;
    s/numRI/num_ri/g;
    s/RepeatNone/REPEAT_NONE/g;
    s/fbOver/over/g;
    s/fbIn/in/g;
    s/iSrc/src_image/g;
    s/iMask/mask_image/g;
    s/iDst/dest_image/g;
    s/SaDa/Sa.Da/g;
    s/FbMaskBits/MASK_BITS/g;
    s/RenderSamplesX/RENDER_SAMPLES_X/g;
    s/MMXData/mmx_data_t/g;
    s/RegionInfo/region_info_t/g;
    
    s/([^0x])([a-z])([A-Z])/$1$2_\l$3/g;
    s/([^0x])([A-Z])([A-Z])([a-z])/$1$2_\l$3$4/g;
    s/([^0x])([A-Z])([a-z]+)_([a-z])/$1\l$2$3_$4/g;
    s/([a-z])_([A-Z])/$1_\l$2/g;
    
    s/su_sE/SuSE/g;
    s/X_Free86/XFree86/g;
    s/X_free86/XFree86/g;
    
    s/_ULL/ULL/g;
    s/_uLL/ULL/g;
    
    s/U_nc/UNc/g;
    s/combine ##/combine_ ##/g;
    s/## U/## _u/g;
    s/## C/## _c/g;
    s/UNc_aDD/UNc_ADD/g;
    
    s/BLEND_MODE \((.+)\)/BLEND_MODE (\l$1)/g;
    s/blend_(.+)/blend_\l$1/g;
    
    s/AN_ds/ANDs/g;
    s/O_rs/ORs/g;
    s/over565/over_565/g;
    s/8pix/8_pix/g;
    s/Over565/over_565/g;
    s/inU/in_u/g;
    s/inPart/in_part/g;
    s/inC/in_c/g;
    s/inreverse/in_reverse/g;
    s/get_exception_code/GetExceptionCode/g; # GetExceptionCode is WinCE API
    s/CP_us/CPUs/g;
    s/authentic_aMD/AuthenticAMD/g;
    s/op_sR_cx_mAS_kx_dST/op_src_mask_dest/g;
    s/no_VERBOSE/noVERBOSE/g;
    s/mc_cormack/McCormack/g;
    s/r1band/r1_band/g;
    s/r2band/r2_band/g;
    s/as GOOD things/as good things/g;
    s/brokendata/broken_data/g;
    s/X_render/XRender/g;
    s/__open_bSD__/__OpenBSD__/g;
    s/^Quick/quick/g;
    s/NextRect/next_rect/g;
    s/RectIn/rect_in/g;
    s/pboxout/pbox_out/g;
    s/F_sorted/FSorted/g;
    s/usse2/u_sse2/g;
    s/csse2/c_sse2/g;
    s/cPixelsse2/c_pixel_sse2/g;
    s/Mask565/mask_565/g;
    s/565fix_rB/565_fix_rb/g;
    s/565fix_g/565_fix_g/g;
    s/565r/565_r/g;
    s/565g/565_g/g;
    s/565b/565_b/g;
    s/uPixelsse2/u_pixel_sse2/g;
    s/Mask00ff/mask_00ff/g;
    s/Mask0080/mask_0080/g;
    s/Mask0101/mask_0101/g;
    s/Maskffff/mask_ffff/g;
    s/Maskff000000/mask_ff000000/g;
    s/load128Aligned/load_128_aligned/g;
    s/load128Unaligned/load_128_unaligned/g;
    s/save128Aligned/save_128_aligned/g;
    s/save128Unaligned/save_128_unaligned/g;
    s/fillsse2/fill_sse2/g;
    s/unpack565/unpack_565/g;
    s/pack565/pack_565/g;
    s/bltsse2/blt_sse2/g;
    s/x565Unpack/x565_unpack/g;
    s/r1End/r1_end/g;
    s/r2End/r2_end/g;
    s/argb8Pixels/argb8_pixels/g;

diff --git a/pixman/pixman-access.c b/pixman/pixman-access.c
index 47f4c52..4a4df07 100644
--- a/pixman/pixman-access.c
+++ b/pixman/pixman-access.c
@@ -2638,22 +2638,22 @@ store_scanline_g1 (bits_image_t *image,
 static void
 store_scanline_generic_64 (bits_image_t *image, int x, int y, int width, const uint32_t *values)
 {
-    uint32_t *argb8Pixels;
+    uint32_t *argb8_pixels;
 
     assert(image->common.type == BITS);
 
-    argb8Pixels = pixman_malloc_ab (width, sizeof(uint32_t));
-    if (!argb8Pixels)
+    argb8_pixels = pixman_malloc_ab (width, sizeof(uint32_t));
+    if (!argb8_pixels)
 	return;
 
     /* Contract the scanline.  We could do this in place if values weren't
      * const.
      */
-    pixman_contract(argb8Pixels, (uint64_t *)values, width);
+    pixman_contract(argb8_pixels, (uint64_t *)values, width);
     
-    image->store_scanline_raw_32 (image, x, y, width, argb8Pixels);
+    image->store_scanline_raw_32 (image, x, y, width, argb8_pixels);
 
-    free(argb8Pixels);
+    free(argb8_pixels);
 }
 
 /* Despite the type, this function expects both buffer and mask to be uint64_t */
diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
index f88785c..eeecede 100644
--- a/pixman/pixman-arm-neon.c
+++ b/pixman/pixman-arm-neon.c
@@ -124,7 +124,7 @@ static force_inline uint8x8x4_t neon8qadd(uint8x8x4_t x, uint8x8x4_t y)
 
 
 static void
-neon_CompositeAdd_8000_8000 (
+neon_composite_add_8000_8000 (
                             pixman_implementation_t * impl,
                             pixman_op_t op,
                                 pixman_image_t * src_image,
@@ -139,23 +139,23 @@ neon_CompositeAdd_8000_8000 (
                                 int32_t      width,
                                 int32_t      height)
 {
-    uint8_t     *dstLine, *dst;
-    uint8_t     *srcLine, *src;
-    int dstStride, srcStride;
+    uint8_t     *dst_line, *dst;
+    uint8_t     *src_line, *src;
+    int dst_stride, src_stride;
     uint16_t    w;
 
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
 
     if (width>=8)
     {
         // Use overlapping 8-pixel method
         while (height--)
         {
-            dst = dstLine;
-            dstLine += dstStride;
-            src = srcLine;
-            srcLine += srcStride;
+            dst = dst_line;
+            dst_line += dst_stride;
+            src = src_line;
+            src_line += src_stride;
             w = width;
 
             uint8_t *keep_dst=0;
@@ -230,10 +230,10 @@ neon_CompositeAdd_8000_8000 (
 
         while (height--)
         {
-            dst = dstLine;
-            dstLine += dstStride;
-            src = srcLine;
-            srcLine += srcStride;
+            dst = dst_line;
+            dst_line += dst_stride;
+            src = src_line;
+            src_line += src_stride;
             w = width;
             uint8x8_t sval=vnil, dval=vnil;
             uint8_t *dst4=0, *dst2=0;
@@ -289,23 +289,23 @@ neon_composite_over_8888_8888 (
 			 int32_t      width,
 			 int32_t      height)
 {
-    uint32_t	*dstLine, *dst;
-    uint32_t	*srcLine, *src;
-    int	dstStride, srcStride;
+    uint32_t	*dst_line, *dst;
+    uint32_t	*src_line, *src;
+    int	dst_stride, src_stride;
     uint32_t	w;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
     if (width>=8)
     {
         // Use overlapping 8-pixel method  
         while (height--)
         {
-	    dst = dstLine;
-	    dstLine += dstStride;
-	    src = srcLine;
-	    srcLine += srcStride;
+	    dst = dst_line;
+	    dst_line += dst_stride;
+	    src = src_line;
+	    src_line += src_stride;
 	    w = width;
 
             uint32_t *keep_dst=0;
@@ -399,10 +399,10 @@ neon_composite_over_8888_8888 (
         // Handle width<8
         while (height--)
         {
-            dst = dstLine;
-            dstLine += dstStride;
-            src = srcLine;
-            srcLine += srcStride;
+            dst = dst_line;
+            dst_line += dst_stride;
+            src = src_line;
+            src_line += src_stride;
             w = width;
 
             while (w>=2)
@@ -450,15 +450,15 @@ neon_composite_over_8888_n_8888 (
 			       int32_t      width,
 			       int32_t      height)
 {
-    uint32_t	*dstLine, *dst;
-    uint32_t	*srcLine, *src;
+    uint32_t	*dst_line, *dst;
+    uint32_t	*src_line, *src;
     uint32_t	mask;
-    int	dstStride, srcStride;
+    int	dst_stride, src_stride;
     uint32_t	w;
     uint8x8_t mask_alpha;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
     mask_alpha = vdup_n_u8((mask) >> 24);
@@ -468,10 +468,10 @@ neon_composite_over_8888_n_8888 (
         // Use overlapping 8-pixel method
         while (height--)
         {
-            dst = dstLine;
-            dstLine += dstStride;
-            src = srcLine;
-            srcLine += srcStride;
+            dst = dst_line;
+            dst_line += dst_stride;
+            src = src_line;
+            src_line += src_stride;
             w = width;
 
             uint32_t *keep_dst=0;
@@ -585,10 +585,10 @@ neon_composite_over_8888_n_8888 (
         // Handle width<8
         while (height--)
         {
-            dst = dstLine;
-            dstLine += dstStride;
-            src = srcLine;
-            srcLine += srcStride;
+            dst = dst_line;
+            dst_line += dst_stride;
+            src = src_line;
+            src_line += src_stride;
             w = width;
 
             while (w>=2)
@@ -632,7 +632,7 @@ neon_composite_over_8888_n_8888 (
 
 
 static void
-neon_CompositeOver_n_8_8888 (
+neon_composite_over_n_8_8888 (
                             pixman_implementation_t * impl,
                             pixman_op_t      op,
 			       pixman_image_t * src_image,
@@ -648,9 +648,9 @@ neon_CompositeOver_n_8_8888 (
 			       int32_t      height)
 {
     uint32_t	 src, srca;
-    uint32_t	*dstLine, *dst;
-    uint8_t	*maskLine, *mask;
-    int		 dstStride, maskStride;
+    uint32_t	*dst_line, *dst;
+    uint8_t	*mask_line, *mask;
+    int		 dst_stride, mask_stride;
     uint32_t	 w;
     uint8x8_t    sval2;
     uint8x8x4_t  sval8;
@@ -670,8 +670,8 @@ neon_CompositeOver_n_8_8888 (
     sval8.val[2]=vdup_lane_u8(sval2,2);
     sval8.val[3]=vdup_lane_u8(sval2,3);
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
     if (width>=8)
     {
@@ -680,10 +680,10 @@ neon_CompositeOver_n_8_8888 (
         {
             uint32_t *keep_dst=0;
 
-            dst = dstLine;
-            dstLine += dstStride;
-            mask = maskLine;
-            maskLine += maskStride;
+            dst = dst_line;
+            dst_line += dst_stride;
+            mask = mask_line;
+            mask_line += mask_stride;
             w = width;
 
 #ifndef USE_GCC_INLINE_ASM
@@ -799,10 +799,10 @@ neon_CompositeOver_n_8_8888 (
         {
             uint8x8_t alpha;
 
-            dst = dstLine;
-            dstLine += dstStride;
-            mask = maskLine;
-            maskLine += maskStride;
+            dst = dst_line;
+            dst_line += dst_stride;
+            mask = mask_line;
+            mask_line += mask_stride;
             w = width;
 
             while (w>=2)
@@ -839,7 +839,7 @@ neon_CompositeOver_n_8_8888 (
 
 
 static void
-neon_CompositeAdd_8888_8_8 (
+neon_composite_add_8888_8_8 (
                             pixman_implementation_t * impl,
                             pixman_op_t op,
                             pixman_image_t * src_image,
@@ -854,15 +854,15 @@ neon_CompositeAdd_8888_8_8 (
                             int32_t      width,
                             int32_t      height)
 {
-    uint8_t     *dstLine, *dst;
-    uint8_t     *maskLine, *mask;
-    int dstStride, maskStride;
+    uint8_t     *dst_line, *dst;
+    uint8_t     *mask_line, *mask;
+    int dst_stride, mask_stride;
     uint32_t    w;
     uint32_t    src;
     uint8x8_t   sa;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
     sa = vdup_n_u8((src) >> 24);
 
@@ -871,10 +871,10 @@ neon_CompositeAdd_8888_8_8 (
         // Use overlapping 8-pixel method, modified to avoid rewritten dest being reused
         while (height--)
         {
-            dst = dstLine;
-            dstLine += dstStride;
-            mask = maskLine;
-            maskLine += maskStride;
+            dst = dst_line;
+            dst_line += dst_stride;
+            mask = mask_line;
+            mask_line += mask_stride;
             w = width;
 
             uint8x8_t mval, dval, res;
@@ -911,10 +911,10 @@ neon_CompositeAdd_8888_8_8 (
         // Use 4/2/1 load/store method to handle 1-7 pixels
         while (height--)
         {
-            dst = dstLine;
-            dstLine += dstStride;
-            mask = maskLine;
-            maskLine += maskStride;
+            dst = dst_line;
+            dst_line += dst_stride;
+            mask = mask_line;
+            mask_line += mask_stride;
             w = width;
 
             uint8x8_t mval=sa, dval=sa, res;
@@ -958,7 +958,7 @@ neon_CompositeAdd_8888_8_8 (
 #ifdef USE_GCC_INLINE_ASM
 
 static void
-neon_CompositeSrc_16_16 (
+neon_composite_src_16_16 (
 	pixman_implementation_t * impl,
 	pixman_op_t op,
 	pixman_image_t * src_image,
@@ -973,19 +973,19 @@ neon_CompositeSrc_16_16 (
 	int32_t      width,
 	int32_t      height)
 {
-	uint16_t    *dstLine, *srcLine;
-	uint32_t     dstStride, srcStride;
+	uint16_t    *dst_line, *src_line;
+	uint32_t     dst_stride, src_stride;
 
 	if(!height || !width)
 		return;
 
 	/* We simply copy 16-bit-aligned pixels from one place to another. */
-	PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint16_t, srcStride, srcLine, 1);
-	PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+	PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint16_t, src_stride, src_line, 1);
+	PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
 
 	/* Preload the first input scanline */
 	{
-		uint16_t *srcPtr = srcLine;
+		uint16_t *src_ptr = src_line;
 		uint32_t count = width;
 
 		asm volatile (
@@ -996,15 +996,15 @@ neon_CompositeSrc_16_16 (
 		"	bgt 0b							\n"
 
 		// Clobbered input registers marked as input/outputs
-		: [src] "+r" (srcPtr), [count] "+r" (count)
+		: [src] "+r" (src_ptr), [count] "+r" (count)
 		: // no unclobbered inputs
 		: "cc"
 		);
 	}
 
 	while(height--) {
-		uint16_t *dstPtr = dstLine;
-		uint16_t *srcPtr = srcLine;
+		uint16_t *dst_ptr = dst_line;
+		uint16_t *src_ptr = src_line;
 		uint32_t count = width;
 		uint32_t tmp = 0;
 
@@ -1015,11 +1015,11 @@ neon_CompositeSrc_16_16 (
 		"	cmp       %[count], #64				\n"
 		"	blt 1f    @ skip oversized fragments		\n"
 		"0: @ start with eight quadwords at a time		\n"
-		"	pld       [%[src], %[srcStride], LSL #1]	\n" // preload from next scanline
+		"	pld       [%[src], %[src_stride], LSL #1]	\n" // preload from next scanline
 		"	sub       %[count], %[count], #64		\n"
 		"	vld1.16   {d16,d17,d18,d19}, [%[src]]!		\n"
 		"	vld1.16   {d20,d21,d22,d23}, [%[src]]!		\n"
-		"	pld       [%[src], %[srcStride], LSL #1]	\n" // preload from next scanline
+		"	pld       [%[src], %[src_stride], LSL #1]	\n" // preload from next scanline
 		"	vld1.16   {d24,d25,d26,d27}, [%[src]]!		\n"
 		"	vld1.16   {d28,d29,d30,d31}, [%[src]]!		\n"
 		"	cmp       %[count], #64				\n"
@@ -1033,7 +1033,7 @@ neon_CompositeSrc_16_16 (
 		"1: @ four quadwords					\n"
 		"	tst       %[count], #32				\n"
 		"	beq 2f    @ skip oversized fragment		\n"
-		"	pld       [%[src], %[srcStride], LSL #1]	\n" // preload from next scanline
+		"	pld       [%[src], %[src_stride], LSL #1]	\n" // preload from next scanline
 		"	vld1.16   {d16,d17,d18,d19}, [%[src]]!		\n"
 		"	vld1.16   {d20,d21,d22,d23}, [%[src]]!		\n"
 		"	vst1.16   {d16,d17,d18,d19}, [%[dst]]!		\n"
@@ -1041,7 +1041,7 @@ neon_CompositeSrc_16_16 (
 		"2: @ two quadwords					\n"
 		"	tst       %[count], #16				\n"
 		"	beq 3f    @ skip oversized fragment		\n"
-		"	pld       [%[src], %[srcStride], LSL #1]	\n" // preload from next scanline
+		"	pld       [%[src], %[src_stride], LSL #1]	\n" // preload from next scanline
 		"	vld1.16   {d16,d17,d18,d19}, [%[src]]!		\n"
 		"	vst1.16   {d16,d17,d18,d19}, [%[dst]]!		\n"
 		"3: @ one quadword					\n"
@@ -1067,25 +1067,25 @@ neon_CompositeSrc_16_16 (
 		"7: @ end						\n"
 
 		// Clobbered input registers marked as input/outputs
-		: [dst] "+r" (dstPtr), [src] "+r" (srcPtr), [count] "+r" (count), [tmp] "+r" (tmp)
+		: [dst] "+r" (dst_ptr), [src] "+r" (src_ptr), [count] "+r" (count), [tmp] "+r" (tmp)
 
 		// Unclobbered input
-		: [srcStride] "r" (srcStride)
+		: [src_stride] "r" (src_stride)
 
 		// Clobbered vector registers
 		// NB: these are the quad aliases of the double registers used in the asm
 		: "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15", "cc", "memory"
 		);
 
-		srcLine += srcStride;
-		dstLine += dstStride;
+		src_line += src_stride;
+		dst_line += dst_stride;
 	}
 }
 
 #endif /* USE_GCC_INLINE_ASM */
 
 static void
-neon_CompositeSrc_24_16 (
+neon_composite_src_24_16 (
 	pixman_implementation_t * impl,
 	pixman_op_t op,
 	pixman_image_t * src_image,
@@ -1100,20 +1100,20 @@ neon_CompositeSrc_24_16 (
 	int32_t      width,
 	int32_t      height)
 {
-	uint16_t    *dstLine;
-	uint32_t    *srcLine;
-	uint32_t     dstStride, srcStride;
+	uint16_t    *dst_line;
+	uint32_t    *src_line;
+	uint32_t     dst_stride, src_stride;
 
 	if(!width || !height)
 		return;
 
 	/* We simply copy pixels from one place to another, assuming that the source's alpha is opaque. */
-	PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
-	PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+	PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+	PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
 
 	/* Preload the first input scanline */
 	{
-		uint8_t *srcPtr = (uint8_t*) srcLine;
+		uint8_t *src_ptr = (uint8_t*) src_line;
 		uint32_t count = (width + 15) / 16;
 
 #ifdef USE_GCC_INLINE_ASM
@@ -1125,21 +1125,21 @@ neon_CompositeSrc_24_16 (
 		"	bgt 0b						\n"
 
 		// Clobbered input registers marked as input/outputs
-		: [src] "+r" (srcPtr), [count] "+r" (count)
+		: [src] "+r" (src_ptr), [count] "+r" (count)
 		: // no unclobbered inputs
 		: "cc"
 		);
 #else
 		do {
-			__pld(srcPtr);
-			srcPtr += 64;
+			__pld(src_ptr);
+			src_ptr += 64;
 		} while(--count);
 #endif
 	}
 
 	while(height--) {
-		uint16_t *dstPtr = dstLine;
-		uint32_t *srcPtr = srcLine;
+		uint16_t *dst_ptr = dst_line;
+		uint32_t *src_ptr = src_line;
 		uint32_t count = width;
 		const uint32_t rb_mask = 0x1F;
 		const uint32_t g_mask = 0x3F;
@@ -1147,7 +1147,7 @@ neon_CompositeSrc_24_16 (
 		// If you're going to complain about a goto, take a long hard look
 		// at the massive blocks of assembler this skips over.  ;-)
 		if(count < 8)
-			goto smallStuff;
+			goto small_stuff;
 
 #ifdef USE_GCC_INLINE_ASM
 
@@ -1159,7 +1159,7 @@ neon_CompositeSrc_24_16 (
 		"	blt 1f    @ skip oversized fragments								\n"
 		"0: @ start with sixteen pixels at a time								\n"
 		"	sub       %[count], %[count], #16								\n"
-		"	pld      [%[src], %[srcStride], lsl #2]         @ preload from next scanline			\n"
+		"	pld      [%[src], %[src_stride], lsl #2]         @ preload from next scanline			\n"
 		"	vld4.8    {d0,d1,d2,d3}, [%[src]]!		@ d3 is alpha and ignored, d2-0 are rgb.	\n"
 		"	vld4.8    {d4,d5,d6,d7}, [%[src]]!		@ d7 is alpha and ignored, d6-4 are rgb.	\n"
 		"	vshll.u8  q8, d2, #8				@ expand first red for repacking		\n"
@@ -1179,7 +1179,7 @@ neon_CompositeSrc_24_16 (
 		"	cmp       %[count], #8				@ can we still do an 8-pixel block?		\n"
 		"	blt 2f												\n"
 		"	sub       %[count], %[count], #8	\n"
-		"	pld      [%[src], %[srcStride], lsl #2]         @ preload from next scanline			\n"
+		"	pld      [%[src], %[src_stride], lsl #2]         @ preload from next scanline			\n"
 		"	vld4.8    {d0,d1,d2,d3}, [%[src]]!		@ d3 is alpha and ignored, d2-0 are rgb.	\n"
 		"	vshll.u8  q8, d2, #8				@ expand first red for repacking		\n"
 		"	vshll.u8  q10, d1, #8				@ expand first green for repacking		\n"
@@ -1190,10 +1190,10 @@ neon_CompositeSrc_24_16 (
 		"2: @ end												\n"
 
 		// Clobbered input and working registers marked as input/outputs
-		: [dst] "+r" (dstPtr), [src] "+r" (srcPtr), [count] "+r" (count)
+		: [dst] "+r" (dst_ptr), [src] "+r" (src_ptr), [count] "+r" (count)
 
 		// Unclobbered input
-		: [srcStride] "r" (srcStride)
+		: [src_stride] "r" (src_stride)
 
 		// Clobbered vector registers
 		// NB: these are the quad aliases of the double registers used in the asm
@@ -1203,101 +1203,101 @@ neon_CompositeSrc_24_16 (
 		// A copy of the above code, in intrinsics-form.
 		// This should be pretty self-documenting...
 		while(count >= 16) {
-			uint8x8x4_t pixelSetA, pixelSetB;
-			uint16x8_t redA, greenA, blueA;
-			uint16x8_t redB, greenB, blueB;
-			uint16x8_t destPixelsA, destPixelsB;
+			uint8x8x4_t pixel_set_a, pixel_set_b;
+			uint16x8_t red_a, green_a, blue_a;
+			uint16x8_t red_b, green_b, blue_b;
+			uint16x8_t dest_pixels_a, dest_pixels_b;
 
 			count -= 16;
-			__pld(srcPtr + srcStride);
-			pixelSetA = vld4_u8((uint8_t*)(srcPtr));
-			pixelSetB = vld4_u8((uint8_t*)(srcPtr+8));
-			srcPtr += 16;
-
-			redA   = vshll_n_u8(pixelSetA.val[2], 8);
-			greenA = vshll_n_u8(pixelSetA.val[1], 8);
-			blueA  = vshll_n_u8(pixelSetA.val[0], 8);
-			redB   = vshll_n_u8(pixelSetB.val[2], 8);
-			greenB = vshll_n_u8(pixelSetB.val[1], 8);
-			blueB  = vshll_n_u8(pixelSetB.val[0], 8);
-			destPixelsA = vsriq_n_u16(redA, greenA, 5);
-			destPixelsB = vsriq_n_u16(redB, greenB, 5);
-			destPixelsA = vsriq_n_u16(destPixelsA, blueA, 11);
-			destPixelsB = vsriq_n_u16(destPixelsB, blueB, 11);
+			__pld(src_ptr + src_stride);
+			pixel_set_a = vld4_u8((uint8_t*)(src_ptr));
+			pixel_set_b = vld4_u8((uint8_t*)(src_ptr+8));
+			src_ptr += 16;
+
+			red_a   = vshll_n_u8(pixel_set_a.val[2], 8);
+			green_a = vshll_n_u8(pixel_set_a.val[1], 8);
+			blue_a  = vshll_n_u8(pixel_set_a.val[0], 8);
+			red_b   = vshll_n_u8(pixel_set_b.val[2], 8);
+			green_b = vshll_n_u8(pixel_set_b.val[1], 8);
+			blue_b  = vshll_n_u8(pixel_set_b.val[0], 8);
+			dest_pixels_a = vsriq_n_u16(red_a, green_a, 5);
+			dest_pixels_b = vsriq_n_u16(red_b, green_b, 5);
+			dest_pixels_a = vsriq_n_u16(dest_pixels_a, blue_a, 11);
+			dest_pixels_b = vsriq_n_u16(dest_pixels_b, blue_b, 11);
 
 			// There doesn't seem to be an intrinsic for the double-quadword variant
-			vst1q_u16(dstPtr  , destPixelsA);
-			vst1q_u16(dstPtr+8, destPixelsB);
-			dstPtr += 16;
+			vst1q_u16(dst_ptr  , dest_pixels_a);
+			vst1q_u16(dst_ptr+8, dest_pixels_b);
+			dst_ptr += 16;
 		}
 
 		// 8-pixel loop
 		if(count >= 8) {
-			uint8x8x4_t pixelSetA;
-			uint16x8_t redA, greenA, blueA;
-			uint16x8_t destPixelsA;
+			uint8x8x4_t pixel_set_a;
+			uint16x8_t red_a, green_a, blue_a;
+			uint16x8_t dest_pixels_a;
 
-			__pld(srcPtr + srcStride);
+			__pld(src_ptr + src_stride);
 			count -= 8;
-			pixelSetA = vld4_u8((uint8_t*)(srcPtr));
-			srcPtr += 8;
+			pixel_set_a = vld4_u8((uint8_t*)(src_ptr));
+			src_ptr += 8;
 
-			redA   = vshll_n_u8(pixelSetA.val[2], 8);
-			greenA = vshll_n_u8(pixelSetA.val[1], 8);
-			blueA  = vshll_n_u8(pixelSetA.val[0], 8);
-			destPixelsA = vsriq_n_u16(redA, greenA, 5);
-			destPixelsA = vsriq_n_u16(destPixelsA, blueA, 11);
+			red_a   = vshll_n_u8(pixel_set_a.val[2], 8);
+			green_a = vshll_n_u8(pixel_set_a.val[1], 8);
+			blue_a  = vshll_n_u8(pixel_set_a.val[0], 8);
+			dest_pixels_a = vsriq_n_u16(red_a, green_a, 5);
+			dest_pixels_a = vsriq_n_u16(dest_pixels_a, blue_a, 11);
 
-			vst1q_u16(dstPtr  , destPixelsA);
-			dstPtr += 8;
+			vst1q_u16(dst_ptr  , dest_pixels_a);
+			dst_ptr += 8;
 		}
 
 #endif	// USE_GCC_INLINE_ASM
 
-	smallStuff:
+	small_stuff:
 
 		if(count)
-			__pld(srcPtr + srcStride);
+			__pld(src_ptr + src_stride);
 
 		while(count >= 2) {
-			uint32_t srcPixelA = *srcPtr++;
-			uint32_t srcPixelB = *srcPtr++;
+			uint32_t src_pixel_a = *src_ptr++;
+			uint32_t src_pixel_b = *src_ptr++;
 
 			// ARM is really good at shift-then-ALU ops.
 			// This should be a total of six shift-ANDs and five shift-ORs.
-			uint32_t dstPixelsA;
-			uint32_t dstPixelsB;
+			uint32_t dst_pixels_a;
+			uint32_t dst_pixels_b;
 
-			dstPixelsA  = ((srcPixelA >>  3) & rb_mask);
-			dstPixelsA |= ((srcPixelA >> 10) &  g_mask) << 5;
-			dstPixelsA |= ((srcPixelA >> 19) & rb_mask) << 11;
+			dst_pixels_a  = ((src_pixel_a >>  3) & rb_mask);
+			dst_pixels_a |= ((src_pixel_a >> 10) &  g_mask) << 5;
+			dst_pixels_a |= ((src_pixel_a >> 19) & rb_mask) << 11;
 
-			dstPixelsB  = ((srcPixelB >>  3) & rb_mask);
-			dstPixelsB |= ((srcPixelB >> 10) &  g_mask) << 5;
-			dstPixelsB |= ((srcPixelB >> 19) & rb_mask) << 11;
+			dst_pixels_b  = ((src_pixel_b >>  3) & rb_mask);
+			dst_pixels_b |= ((src_pixel_b >> 10) &  g_mask) << 5;
+			dst_pixels_b |= ((src_pixel_b >> 19) & rb_mask) << 11;
 
 			// little-endian mode only
-			*((uint32_t*) dstPtr) = dstPixelsA | (dstPixelsB << 16);
-			dstPtr += 2;
+			*((uint32_t*) dst_ptr) = dst_pixels_a | (dst_pixels_b << 16);
+			dst_ptr += 2;
 			count -= 2;
 		}
 
 		if(count) {
-			uint32_t srcPixel = *srcPtr++;
+			uint32_t src_pixel = *src_ptr++;
 
 			// ARM is really good at shift-then-ALU ops.
 			// This block should end up as three shift-ANDs and two shift-ORs.
-			uint32_t tmpBlue  = (srcPixel >>  3) & rb_mask;
-			uint32_t tmpGreen = (srcPixel >> 10) & g_mask;
-			uint32_t tmpRed   = (srcPixel >> 19) & rb_mask;
-			uint16_t dstPixel = (tmpRed << 11) | (tmpGreen << 5) | tmpBlue;
+			uint32_t tmp_blue  = (src_pixel >>  3) & rb_mask;
+			uint32_t tmp_green = (src_pixel >> 10) & g_mask;
+			uint32_t tmp_red   = (src_pixel >> 19) & rb_mask;
+			uint16_t dst_pixel = (tmp_red << 11) | (tmp_green << 5) | tmp_blue;
 
-			*dstPtr++ = dstPixel;
+			*dst_ptr++ = dst_pixel;
 			count--;
 		}
 
-		srcLine += srcStride;
-		dstLine += dstStride;
+		src_line += src_stride;
+		dst_line += dst_stride;
 	}
 }
 
@@ -1508,10 +1508,10 @@ static inline void neon_quadword_copy(
 	void* dst,
 	void* src,
 	uint32_t count,       // of quadwords
-	uint32_t trailerCount // of bytes
+	uint32_t trailer_count // of bytes
 )
 {
-	uint8_t *tDst = dst, *tSrc = src;
+	uint8_t *t_dst = dst, *t_src = src;
 
 	// Uses aligned multi-register loads to maximise read bandwidth
 	// on uncached memory such as framebuffers
@@ -1556,7 +1556,7 @@ static inline void neon_quadword_copy(
 	"4: @ end										\n"
 
 	// Clobbered input registers marked as input/outputs
-	: [dst] "+r" (tDst), [src] "+r" (tSrc), [count] "+r" (count)
+	: [dst] "+r" (t_dst), [src] "+r" (t_src), [count] "+r" (count)
 
 	// No unclobbered inputs
 	:
@@ -1569,70 +1569,70 @@ static inline void neon_quadword_copy(
 #else
 
 	while(count >= 8) {
-		uint8x16x4_t t1 = vld4q_u8(tSrc);
-		uint8x16x4_t t2 = vld4q_u8(tSrc + sizeof(uint8x16x4_t));
-		tSrc += sizeof(uint8x16x4_t) * 2;
-		vst4q_u8(tDst, t1);
-		vst4q_u8(tDst + sizeof(uint8x16x4_t), t2);
-		tDst += sizeof(uint8x16x4_t) * 2;
+		uint8x16x4_t t1 = vld4q_u8(t_src);
+		uint8x16x4_t t2 = vld4q_u8(t_src + sizeof(uint8x16x4_t));
+		t_src += sizeof(uint8x16x4_t) * 2;
+		vst4q_u8(t_dst, t1);
+		vst4q_u8(t_dst + sizeof(uint8x16x4_t), t2);
+		t_dst += sizeof(uint8x16x4_t) * 2;
 		count -= 8;
 	}
 
 	if(count & 4) {
-		uint8x16x4_t t1 = vld4q_u8(tSrc);
-		tSrc += sizeof(uint8x16x4_t);
-		vst4q_u8(tDst, t1);
-		tDst += sizeof(uint8x16x4_t);
+		uint8x16x4_t t1 = vld4q_u8(t_src);
+		t_src += sizeof(uint8x16x4_t);
+		vst4q_u8(t_dst, t1);
+		t_dst += sizeof(uint8x16x4_t);
 	}
 
 	if(count & 2) {
-		uint8x8x4_t t1 = vld4_u8(tSrc);
-		tSrc += sizeof(uint8x8x4_t);
-		vst4_u8(tDst, t1);
-		tDst += sizeof(uint8x8x4_t);
+		uint8x8x4_t t1 = vld4_u8(t_src);
+		t_src += sizeof(uint8x8x4_t);
+		vst4_u8(t_dst, t1);
+		t_dst += sizeof(uint8x8x4_t);
 	}
 
 	if(count & 1) {
-		uint8x16_t t1 = vld1q_u8(tSrc);
-		tSrc += sizeof(uint8x16_t);
-		vst1q_u8(tDst, t1);
-		tDst += sizeof(uint8x16_t);
+		uint8x16_t t1 = vld1q_u8(t_src);
+		t_src += sizeof(uint8x16_t);
+		vst1q_u8(t_dst, t1);
+		t_dst += sizeof(uint8x16_t);
 	}
 
 #endif  // !USE_GCC_INLINE_ASM
 
-	if(trailerCount) {
-		if(trailerCount & 8) {
-			uint8x8_t t1 = vld1_u8(tSrc);
-			tSrc += sizeof(uint8x8_t);
-			vst1_u8(tDst, t1);
-			tDst += sizeof(uint8x8_t);
+	if(trailer_count) {
+		if(trailer_count & 8) {
+			uint8x8_t t1 = vld1_u8(t_src);
+			t_src += sizeof(uint8x8_t);
+			vst1_u8(t_dst, t1);
+			t_dst += sizeof(uint8x8_t);
 		}
 
-		if(trailerCount & 4) {
-			*((uint32_t*) tDst) = *((uint32_t*) tSrc);
-			tDst += 4;
-			tSrc += 4;
+		if(trailer_count & 4) {
+			*((uint32_t*) t_dst) = *((uint32_t*) t_src);
+			t_dst += 4;
+			t_src += 4;
 		}
 
-		if(trailerCount & 2) {
-			*((uint16_t*) tDst) = *((uint16_t*) tSrc);
-			tDst += 2;
-			tSrc += 2;
+		if(trailer_count & 2) {
+			*((uint16_t*) t_dst) = *((uint16_t*) t_src);
+			t_dst += 2;
+			t_src += 2;
 		}
 
-		if(trailerCount & 1) {
-			*tDst++ = *tSrc++;
+		if(trailer_count & 1) {
+			*t_dst++ = *t_src++;
 		}
 	}
 }
 
-static inline void SolidOver565_8pix_neon(
-	uint32_t  glyphColour,
+static inline void solid_over_565_8_pix_neon(
+	uint32_t  glyph_colour,
 	uint16_t *dest,
-	uint8_t  *inMask,
-	uint32_t  destStride,  // bytes, not elements
-	uint32_t  maskStride,
+	uint8_t  *in_mask,
+	uint32_t  dest_stride,  // bytes, not elements
+	uint32_t  mask_stride,
 	uint32_t  count        // 8-pixel groups
 )
 {
@@ -1641,10 +1641,10 @@ static inline void SolidOver565_8pix_neon(
 #ifdef USE_GCC_INLINE_ASM
 
 	asm volatile (
-	"	vld4.8 {d20[],d21[],d22[],d23[]}, [%[glyphColour]]  @ splat solid colour components	\n"
+	"	vld4.8 {d20[],d21[],d22[],d23[]}, [%[glyph_colour]]  @ splat solid colour components	\n"
 	"0:	@ loop																				\n"
 	"	vld1.16   {d0,d1}, [%[dest]]         @ load first pixels from framebuffer			\n"
-	"	vld1.8    {d17}, [%[inMask]]         @ load alpha mask of glyph						\n"
+	"	vld1.8    {d17}, [%[in_mask]]         @ load alpha mask of glyph						\n"
 	"	vmull.u8  q9, d17, d23               @ apply glyph colour alpha to mask				\n"
 	"	vshrn.u16 d17, q9, #8                @ reformat it to match original mask			\n"
 	"	vmvn      d18, d17                   @ we need the inverse mask for the background	\n"
@@ -1661,18 +1661,18 @@ static inline void SolidOver565_8pix_neon(
 	"	vmlal.u8  q1, d17, d22               @ add masked foreground red...					\n"
 	"	vmlal.u8  q2, d17, d21               @ ...green...									\n"
 	"	vmlal.u8  q3, d17, d20               @ ...blue										\n"
-	"	add %[inMask], %[inMask], %[maskStride] @ advance mask pointer, while we wait		\n"
+	"	add %[in_mask], %[in_mask], %[mask_stride] @ advance mask pointer, while we wait		\n"
 	"	vsri.16   q1, q2, #5                 @ pack green behind red						\n"
 	"	vsri.16   q1, q3, #11                @ pack blue into pixels						\n"
 	"	vst1.16   {d2,d3}, [%[dest]]         @ store composited pixels						\n"
-	"	add %[dest], %[dest], %[destStride]  @ advance framebuffer pointer					\n"
+	"	add %[dest], %[dest], %[dest_stride]  @ advance framebuffer pointer					\n"
 	"	bne 0b                               @ next please									\n"
 
 	// Clobbered registers marked as input/outputs
-	: [dest] "+r" (dest), [inMask] "+r" (inMask), [count] "+r" (count)
+	: [dest] "+r" (dest), [in_mask] "+r" (in_mask), [count] "+r" (count)
 
 	// Inputs
-	: [destStride] "r" (destStride), [maskStride] "r" (maskStride), [glyphColour] "r" (&glyphColour)
+	: [dest_stride] "r" (dest_stride), [mask_stride] "r" (mask_stride), [glyph_colour] "r" (&glyph_colour)
 
 	// Clobbers, including the inputs we modify, and potentially lots of memory
 	: "q0", "q1", "q2", "q3", "d17", "q9", "q10", "q11", "q12", "cc", "memory"
@@ -1680,39 +1680,39 @@ static inline void SolidOver565_8pix_neon(
 
 #else
 
-	uint8x8x4_t solidColour = vld4_dup_u8((uint8_t*) &glyphColour);
+	uint8x8x4_t solid_colour = vld4_dup_u8((uint8_t*) &glyph_colour);
 
 	while(count--)
 	{
 		uint16x8_t  pixels = vld1q_u16(dest);
-		uint8x8_t   mask = vshrn_n_u16(vmull_u8(solidColour.val[3], vld1_u8(inMask)), 8);
-		uint8x8_t  iMask = vmvn_u8(mask);
+		uint8x8_t   mask = vshrn_n_u16(vmull_u8(solid_colour.val[3], vld1_u8(in_mask)), 8);
+		uint8x8_t  mask_image = vmvn_u8(mask);
 
-		uint8x8_t  tRed   = vshrn_n_u16(pixels, 8);
-		uint8x8_t  tGreen = vshrn_n_u16(pixels, 3);
-		uint8x8_t  tBlue  = vshrn_n_u16(vsli_n_u8(pixels, pixels, 5), 2);
+		uint8x8_t  t_red   = vshrn_n_u16(pixels, 8);
+		uint8x8_t  t_green = vshrn_n_u16(pixels, 3);
+		uint8x8_t  t_blue  = vshrn_n_u16(vsli_n_u8(pixels, pixels, 5), 2);
 
-		uint16x8_t sRed   = vmull_u8(vsri_n_u8(tRed  , tRed  , 5), iMask);
-		uint16x8_t sGreen = vmull_u8(vsri_n_u8(tGreen, tGreen, 6), iMask);
-		uint16x8_t sBlue  = vmull_u8(          tBlue             , iMask);
+		uint16x8_t s_red   = vmull_u8(vsri_n_u8(t_red  , t_red  , 5), mask_image);
+		uint16x8_t s_green = vmull_u8(vsri_n_u8(t_green, t_green, 6), mask_image);
+		uint16x8_t s_blue  = vmull_u8(          t_blue             , mask_image);
 
-		sRed   = vmlal(sRed  , mask, solidColour.val[2]);
-		sGreen = vmlal(sGreen, mask, solidColour.val[1]);
-		sBlue  = vmlal(sBlue , mask, solidColour.val[0]);
+		s_red   = vmlal(s_red  , mask, solid_colour.val[2]);
+		s_green = vmlal(s_green, mask, solid_colour.val[1]);
+		s_blue  = vmlal(s_blue , mask, solid_colour.val[0]);
 
-		pixels = vsri_n_u16(sRed, sGreen, 5);
-		pixels = vsri_n_u16(pixels, sBlue, 11);
+		pixels = vsri_n_u16(s_red, s_green, 5);
+		pixels = vsri_n_u16(pixels, s_blue, 11);
 		vst1q_u16(dest, pixels);
 
-		dest += destStride;
-		mask += maskStride;
+		dest += dest_stride;
+		mask += mask_stride;
 	}
 
 #endif
 }
 
 static void
-neon_CompositeOver_n_8_0565 (
+neon_composite_over_n_8_0565 (
 	pixman_implementation_t * impl,
 	pixman_op_t op,
 	pixman_image_t * src_image,
@@ -1728,11 +1728,11 @@ neon_CompositeOver_n_8_0565 (
 	int32_t      height)
 {
 	uint32_t     src, srca;
-	uint16_t    *dstLine, *alignedLine;
-	uint8_t     *maskLine;
-	uint32_t     dstStride, maskStride;
-	uint32_t     kernelCount, copyCount, copyTail;
-	uint8_t      kernelOffset, copyOffset;
+	uint16_t    *dst_line, *aligned_line;
+	uint8_t     *mask_line;
+	uint32_t     dst_stride, mask_stride;
+	uint32_t     kernel_count, copy_count, copy_tail;
+	uint8_t      kernel_offset, copy_offset;
 
 	src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
@@ -1748,82 +1748,82 @@ neon_CompositeOver_n_8_0565 (
 		// TODO: there must be a more elegant way of doing this.
 		int x;
 		for(x=0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS) {
-			neon_CompositeOver_n_8_0565(impl, op, src_image, mask_image, dst_image, src_x+x, src_y, mask_x+x, mask_y, dest_x+x, dest_y,
+			neon_composite_over_n_8_0565(impl, op, src_image, mask_image, dst_image, src_x+x, src_y, mask_x+x, mask_y, dest_x+x, dest_y,
 											  (x+NEON_SCANLINE_BUFFER_PIXELS > width) ? width-x : NEON_SCANLINE_BUFFER_PIXELS, height);
 		}
 		return;
 	}
 
-	PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
-	PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+	PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+	PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
 	// keep within minimum number of aligned quadwords on width
 	// while also keeping the minimum number of columns to process
 	{
-		unsigned long alignedLeft = (unsigned long)(dstLine) & ~0xF;
-		unsigned long alignedRight = (((unsigned long)(dstLine + width)) + 0xF) & ~0xF;
-		unsigned long ceilingLength = (((unsigned long) width) * sizeof(*dstLine) + 0xF) & ~0xF;
+		unsigned long aligned_left = (unsigned long)(dst_line) & ~0xF;
+		unsigned long aligned_right = (((unsigned long)(dst_line + width)) + 0xF) & ~0xF;
+		unsigned long ceiling_length = (((unsigned long) width) * sizeof(*dst_line) + 0xF) & ~0xF;
 
 		// the fast copy should be quadword aligned
-		copyOffset = dstLine - ((uint16_t*) alignedLeft);
-		alignedLine = dstLine - copyOffset;
-		copyCount = (uint32_t) ((alignedRight - alignedLeft) >> 4);
-		copyTail = 0;
+		copy_offset = dst_line - ((uint16_t*) aligned_left);
+		aligned_line = dst_line - copy_offset;
+		copy_count = (uint32_t) ((aligned_right - aligned_left) >> 4);
+		copy_tail = 0;
 
-		if(alignedRight - alignedLeft > ceilingLength) {
+		if(aligned_right - aligned_left > ceiling_length) {
 			// unaligned routine is tightest
-			kernelCount = (uint32_t) (ceilingLength >> 4);
-			kernelOffset = copyOffset;
+			kernel_count = (uint32_t) (ceiling_length >> 4);
+			kernel_offset = copy_offset;
 		} else {
 			// aligned routine is equally tight, so it is safer to align
-			kernelCount = copyCount;
-			kernelOffset = 0;
+			kernel_count = copy_count;
+			kernel_offset = 0;
 		}
 
 		// We should avoid reading beyond scanline ends for safety
-		if(alignedLine < (dstLine - xDst) ||
-			(alignedLine + (copyCount * 16 / sizeof(*dstLine))) > ((dstLine - xDst) + pDst->bits.width))
+		if(aligned_line < (dst_line - x_dst) ||
+			(aligned_line + (copy_count * 16 / sizeof(*dst_line))) > ((dst_line - x_dst) + p_dst->bits.width))
 		{
 			// switch to precise read
-			copyOffset = kernelOffset = 0;
-			alignedLine = dstLine;
-			kernelCount = (uint32_t) (ceilingLength >> 4);
-			copyCount = (width * sizeof(*dstLine)) >> 4;
-			copyTail = (width * sizeof(*dstLine)) & 0xF;
+			copy_offset = kernel_offset = 0;
+			aligned_line = dst_line;
+			kernel_count = (uint32_t) (ceiling_length >> 4);
+			copy_count = (width * sizeof(*dst_line)) >> 4;
+			copy_tail = (width * sizeof(*dst_line)) & 0xF;
 		}
 	}
 
 	{
-		uint16_t scanLine[NEON_SCANLINE_BUFFER_PIXELS + 8]; // deliberately not initialised
-		uint8_t glyphLine[NEON_SCANLINE_BUFFER_PIXELS + 8];
+		uint16_t scan_line[NEON_SCANLINE_BUFFER_PIXELS + 8]; // deliberately not initialised
+		uint8_t glyph_line[NEON_SCANLINE_BUFFER_PIXELS + 8];
 		int y = height;
 
 		// row-major order
 		// left edge, middle block, right edge
-		for( ; y--; maskLine += maskStride, alignedLine += dstStride, dstLine += dstStride) {
+		for( ; y--; mask_line += mask_stride, aligned_line += dst_stride, dst_line += dst_stride) {
 			// We don't want to overrun the edges of the glyph, so realign the edge data into known buffers
-			neon_quadword_copy(glyphLine + copyOffset, maskLine, width >> 4, width & 0xF);
+			neon_quadword_copy(glyph_line + copy_offset, mask_line, width >> 4, width & 0xF);
 
 			// Uncached framebuffer access is really, really slow if we do it piecemeal.
 			// It should be much faster if we grab it all at once.
 			// One scanline should easily fit in L1 cache, so this should not waste RAM bandwidth.
-			neon_quadword_copy(scanLine, alignedLine, copyCount, copyTail);
+			neon_quadword_copy(scan_line, aligned_line, copy_count, copy_tail);
 
 			// Apply the actual filter
-			SolidOver565_8pix_neon(src, scanLine + kernelOffset, glyphLine + kernelOffset, 8 * sizeof(*dstLine), 8, kernelCount);
+			solid_over_565_8_pix_neon(src, scan_line + kernel_offset, glyph_line + kernel_offset, 8 * sizeof(*dst_line), 8, kernel_count);
 
 			// Copy the modified scanline back
-			neon_quadword_copy(dstLine, scanLine + copyOffset, width >> 3, (width & 7) * 2);
+			neon_quadword_copy(dst_line, scan_line + copy_offset, width >> 3, (width & 7) * 2);
 		}
 	}
 }
 
 #ifdef USE_GCC_INLINE_ASM
 
-static inline void PlainOver565_8pix_neon(
+static inline void plain_over_565_8_pix_neon(
 	uint32_t  colour,
 	uint16_t *dest,
-	uint32_t  destStride,  // bytes, not elements
+	uint32_t  dest_stride,  // bytes, not elements
 	uint32_t  count        // 8-pixel groups
 )
 {
@@ -1852,14 +1852,14 @@ static inline void PlainOver565_8pix_neon(
 	"	vsri.16   q0, q1, #5                 @ pack green behind red			\n"
 	"	vsri.16   q0, q2, #11                @ pack blue into pixels			\n"
 	"	vst1.16   {d0,d1}, [%[dest]]         @ store composited pixels			\n"
-	"	add %[dest], %[dest], %[destStride]  @ advance framebuffer pointer		\n"
+	"	add %[dest], %[dest], %[dest_stride]  @ advance framebuffer pointer		\n"
 	"	bne 0b                               @ next please				\n"
 
 	// Clobbered registers marked as input/outputs
 	: [dest] "+r" (dest), [count] "+r" (count)
 
 	// Inputs
-	: [destStride] "r" (destStride), [colour] "r" (&colour)
+	: [dest_stride] "r" (dest_stride), [colour] "r" (&colour)
 
 	// Clobbers, including the inputs we modify, and potentially lots of memory
 	: "q0", "q1", "q2", "q3", "q9", "q10", "q11", "q12", "q13", "q14", "cc", "memory"
@@ -1867,7 +1867,7 @@ static inline void PlainOver565_8pix_neon(
 }
 
 static void
-neon_CompositeOver_n_0565 (
+neon_composite_over_n_0565 (
 	pixman_implementation_t * impl,
 	pixman_op_t op,
 	pixman_image_t * src_image,
@@ -1883,10 +1883,10 @@ neon_CompositeOver_n_0565 (
 	int32_t      height)
 {
 	uint32_t     src, srca;
-	uint16_t    *dstLine, *alignedLine;
-	uint32_t     dstStride;
-	uint32_t     kernelCount, copyCount, copyTail;
-	uint8_t      kernelOffset, copyOffset;
+	uint16_t    *dst_line, *aligned_line;
+	uint32_t     dst_stride;
+	uint32_t     kernel_count, copy_count, copy_tail;
+	uint8_t      kernel_offset, copy_offset;
 
 	src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
@@ -1902,81 +1902,81 @@ neon_CompositeOver_n_0565 (
 		// TODO: there must be a more elegant way of doing this.
 		int x;
 		for(x=0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS) {
-			neon_CompositeOver_n_0565(impl, op, src_image, mask_image, dst_image, src_x+x, src_y, mask_x+x, mask_y, dest_x+x, dest_y,
+			neon_composite_over_n_0565(impl, op, src_image, mask_image, dst_image, src_x+x, src_y, mask_x+x, mask_y, dest_x+x, dest_y,
 										(x+NEON_SCANLINE_BUFFER_PIXELS > width) ? width-x : NEON_SCANLINE_BUFFER_PIXELS, height);
 		}
 		return;
 	}
 
-	PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+	PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
 
 	// keep within minimum number of aligned quadwords on width
 	// while also keeping the minimum number of columns to process
 	{
-		unsigned long alignedLeft = (unsigned long)(dstLine) & ~0xF;
-		unsigned long alignedRight = (((unsigned long)(dstLine + width)) + 0xF) & ~0xF;
-		unsigned long ceilingLength = (((unsigned long) width) * sizeof(*dstLine) + 0xF) & ~0xF;
+		unsigned long aligned_left = (unsigned long)(dst_line) & ~0xF;
+		unsigned long aligned_right = (((unsigned long)(dst_line + width)) + 0xF) & ~0xF;
+		unsigned long ceiling_length = (((unsigned long) width) * sizeof(*dst_line) + 0xF) & ~0xF;
 
 		// the fast copy should be quadword aligned
-		copyOffset = dstLine - ((uint16_t*) alignedLeft);
-		alignedLine = dstLine - copyOffset;
-		copyCount = (uint32_t) ((alignedRight - alignedLeft) >> 4);
-		copyTail = 0;
+		copy_offset = dst_line - ((uint16_t*) aligned_left);
+		aligned_line = dst_line - copy_offset;
+		copy_count = (uint32_t) ((aligned_right - aligned_left) >> 4);
+		copy_tail = 0;
 
-		if(alignedRight - alignedLeft > ceilingLength) {
+		if(aligned_right - aligned_left > ceiling_length) {
 			// unaligned routine is tightest
-			kernelCount = (uint32_t) (ceilingLength >> 4);
-			kernelOffset = copyOffset;
+			kernel_count = (uint32_t) (ceiling_length >> 4);
+			kernel_offset = copy_offset;
 		} else {
 			// aligned routine is equally tight, so it is safer to align
-			kernelCount = copyCount;
-			kernelOffset = 0;
+			kernel_count = copy_count;
+			kernel_offset = 0;
 		}
 
 		// We should avoid reading beyond scanline ends for safety
-		if(alignedLine < (dstLine - xDst) ||
-			(alignedLine + (copyCount * 16 / sizeof(*dstLine))) > ((dstLine - xDst) + pDst->bits.width))
+		if(aligned_line < (dst_line - x_dst) ||
+			(aligned_line + (copy_count * 16 / sizeof(*dst_line))) > ((dst_line - x_dst) + p_dst->bits.width))
 		{
 			// switch to precise read
-			copyOffset = kernelOffset = 0;
-			alignedLine = dstLine;
-			kernelCount = (uint32_t) (ceilingLength >> 4);
-			copyCount = (width * sizeof(*dstLine)) >> 4;
-			copyTail = (width * sizeof(*dstLine)) & 0xF;
+			copy_offset = kernel_offset = 0;
+			aligned_line = dst_line;
+			kernel_count = (uint32_t) (ceiling_length >> 4);
+			copy_count = (width * sizeof(*dst_line)) >> 4;
+			copy_tail = (width * sizeof(*dst_line)) & 0xF;
 		}
 	}
 
 	{
-		uint16_t scanLine[NEON_SCANLINE_BUFFER_PIXELS + 8]; // deliberately not initialised
+		uint16_t scan_line[NEON_SCANLINE_BUFFER_PIXELS + 8]; // deliberately not initialised
 
 		// row-major order
 		// left edge, middle block, right edge
-		for( ; height--; alignedLine += dstStride, dstLine += dstStride) {
+		for( ; height--; aligned_line += dst_stride, dst_line += dst_stride) {
 
 			// Uncached framebuffer access is really, really slow if we do it piecemeal.
 			// It should be much faster if we grab it all at once.
 			// One scanline should easily fit in L1 cache, so this should not waste RAM bandwidth.
-			neon_quadword_copy(scanLine, alignedLine, copyCount, copyTail);
+			neon_quadword_copy(scan_line, aligned_line, copy_count, copy_tail);
 
 			// Apply the actual filter
-			PlainOver565_8pix_neon(src, scanLine + kernelOffset, 8 * sizeof(*dstLine), kernelCount);
+			plain_over_565_8_pix_neon(src, scan_line + kernel_offset, 8 * sizeof(*dst_line), kernel_count);
 
 			// Copy the modified scanline back
-			neon_quadword_copy(dstLine, scanLine + copyOffset, width >> 3, (width & 7) * 2);
+			neon_quadword_copy(dst_line, scan_line + copy_offset, width >> 3, (width & 7) * 2);
 		}
 	}
 }
 
-static inline void ARGB8_Over565_8pix_neon(
+static inline void ARGB8_over_565_8_pix_neon(
 	uint32_t *src,
 	uint16_t *dest,
-	uint32_t  srcStride,  // bytes, not elements
+	uint32_t  src_stride,  // bytes, not elements
 	uint32_t  count        // 8-pixel groups
 )
 {
 	asm volatile (
 	"0:	@ loop\n"
-	"	pld   [%[src], %[srcStride]]         @ preload from next scanline	\n"
+	"	pld   [%[src], %[src_stride]]         @ preload from next scanline	\n"
 	"	vld1.16   {d0,d1}, [%[dest]]         @ load pixels from framebuffer	\n"
 	"	vld4.8   {d20,d21,d22,d23},[%[src]]! @ load source image pixels		\n"
 	"	vsli.u16  q3, q0, #5                 @ duplicate framebuffer blue bits		\n"
@@ -2002,7 +2002,7 @@ static inline void ARGB8_Over565_8pix_neon(
 	: [dest] "+r" (dest), [src] "+r" (src), [count] "+r" (count)
 
 	// Inputs
-	: [srcStride] "r" (srcStride)
+	: [src_stride] "r" (src_stride)
 
 	// Clobbers, including the inputs we modify, and potentially lots of memory
 	: "q0", "q1", "q2", "q3", "d17", "d18", "q10", "q11", "cc", "memory"
@@ -2010,7 +2010,7 @@ static inline void ARGB8_Over565_8pix_neon(
 }
 
 static void
-neon_CompositeOver_8888_0565 (
+neon_composite_over_8888_0565 (
 	pixman_implementation_t * impl,
 	pixman_op_t op,
 	pixman_image_t * src_image,
@@ -2025,11 +2025,11 @@ neon_CompositeOver_8888_0565 (
 	int32_t      width,
 	int32_t      height)
 {
-	uint32_t    *srcLine;
-	uint16_t    *dstLine, *alignedLine;
-	uint32_t     dstStride, srcStride;
-	uint32_t     kernelCount, copyCount, copyTail;
-	uint8_t      kernelOffset, copyOffset;
+	uint32_t    *src_line;
+	uint16_t    *dst_line, *aligned_line;
+	uint32_t     dst_stride, src_stride;
+	uint32_t     kernel_count, copy_count, copy_tail;
+	uint8_t      kernel_offset, copy_offset;
 
 	// we assume mask is opaque
 	// so the only alpha to deal with is embedded in src
@@ -2038,54 +2038,54 @@ neon_CompositeOver_8888_0565 (
 		// split the blit, so we can use a fixed-size scanline buffer
 		int x;
 		for(x=0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS) {
-			neon_CompositeOver_8888_0565(impl, op, src_image, mask_image, dst_image, src_x+x, src_y, mask_x+x, mask_y, dest_x+x, dest_y,
+			neon_composite_over_8888_0565(impl, op, src_image, mask_image, dst_image, src_x+x, src_y, mask_x+x, mask_y, dest_x+x, dest_y,
 										  (x+NEON_SCANLINE_BUFFER_PIXELS > width) ? width-x : NEON_SCANLINE_BUFFER_PIXELS, height);
 		}
 		return;
 	}
 
-	PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
-	PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+	PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+	PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
 	// keep within minimum number of aligned quadwords on width
 	// while also keeping the minimum number of columns to process
 	{
-		unsigned long alignedLeft = (unsigned long)(dstLine) & ~0xF;
-		unsigned long alignedRight = (((unsigned long)(dstLine + width)) + 0xF) & ~0xF;
-		unsigned long ceilingLength = (((unsigned long) width) * sizeof(*dstLine) + 0xF) & ~0xF;
+		unsigned long aligned_left = (unsigned long)(dst_line) & ~0xF;
+		unsigned long aligned_right = (((unsigned long)(dst_line + width)) + 0xF) & ~0xF;
+		unsigned long ceiling_length = (((unsigned long) width) * sizeof(*dst_line) + 0xF) & ~0xF;
 
 		// the fast copy should be quadword aligned
-		copyOffset = dstLine - ((uint16_t*) alignedLeft);
-		alignedLine = dstLine - copyOffset;
-		copyCount = (uint32_t) ((alignedRight - alignedLeft) >> 4);
-		copyTail = 0;
+		copy_offset = dst_line - ((uint16_t*) aligned_left);
+		aligned_line = dst_line - copy_offset;
+		copy_count = (uint32_t) ((aligned_right - aligned_left) >> 4);
+		copy_tail = 0;
 
-		if(alignedRight - alignedLeft > ceilingLength) {
+		if(aligned_right - aligned_left > ceiling_length) {
 			// unaligned routine is tightest
-			kernelCount = (uint32_t) (ceilingLength >> 4);
-			kernelOffset = copyOffset;
+			kernel_count = (uint32_t) (ceiling_length >> 4);
+			kernel_offset = copy_offset;
 		} else {
 			// aligned routine is equally tight, so it is safer to align
-			kernelCount = copyCount;
-			kernelOffset = 0;
+			kernel_count = copy_count;
+			kernel_offset = 0;
 		}
 
 		// We should avoid reading beyond scanline ends for safety
-		if(alignedLine < (dstLine - xDst) ||
-			(alignedLine + (copyCount * 16 / sizeof(*dstLine))) > ((dstLine - xDst) + pDst->bits.width))
+		if(aligned_line < (dst_line - x_dst) ||
+			(aligned_line + (copy_count * 16 / sizeof(*dst_line))) > ((dst_line - x_dst) + p_dst->bits.width))
 		{
 			// switch to precise read
-			copyOffset = kernelOffset = 0;
-			alignedLine = dstLine;
-			kernelCount = (uint32_t) (ceilingLength >> 4);
-			copyCount = (width * sizeof(*dstLine)) >> 4;
-			copyTail = (width * sizeof(*dstLine)) & 0xF;
+			copy_offset = kernel_offset = 0;
+			aligned_line = dst_line;
+			kernel_count = (uint32_t) (ceiling_length >> 4);
+			copy_count = (width * sizeof(*dst_line)) >> 4;
+			copy_tail = (width * sizeof(*dst_line)) & 0xF;
 		}
 	}
 
 	/* Preload the first input scanline */
 	{
-		uint8_t *srcPtr = (uint8_t*) srcLine;
+		uint8_t *src_ptr = (uint8_t*) src_line;
 		uint32_t count = (width + 15) / 16;
 
 #ifdef USE_GCC_INLINE_ASM
@@ -2097,34 +2097,34 @@ neon_CompositeOver_8888_0565 (
 		"	bgt 0b						\n"
 
 		// Clobbered input registers marked as input/outputs
-		: [src] "+r" (srcPtr), [count] "+r" (count)
+		: [src] "+r" (src_ptr), [count] "+r" (count)
 		: // no unclobbered inputs
 		: "cc"
 		);
 #else
 		do {
-			__pld(srcPtr);
-			srcPtr += 64;
+			__pld(src_ptr);
+			src_ptr += 64;
 		} while(--count);
 #endif
 	}
 
 	{
-		uint16_t scanLine[NEON_SCANLINE_BUFFER_PIXELS + 8]; // deliberately not initialised
+		uint16_t scan_line[NEON_SCANLINE_BUFFER_PIXELS + 8]; // deliberately not initialised
 
 		// row-major order
 		// left edge, middle block, right edge
-		for( ; height--; srcLine += srcStride, alignedLine += dstStride) {
+		for( ; height--; src_line += src_stride, aligned_line += dst_stride) {
 			// Uncached framebuffer access is really, really slow if we do it piecemeal.
 			// It should be much faster if we grab it all at once.
 			// One scanline should easily fit in L1 cache, so this should not waste RAM bandwidth.
-			neon_quadword_copy(scanLine, alignedLine, copyCount, copyTail);
+			neon_quadword_copy(scan_line, aligned_line, copy_count, copy_tail);
 
 			// Apply the actual filter
-			ARGB8_Over565_8pix_neon(srcLine, scanLine + kernelOffset, srcStride * sizeof(*srcLine), kernelCount);
+			ARGB8_over_565_8_pix_neon(src_line, scan_line + kernel_offset, src_stride * sizeof(*src_line), kernel_count);
 
 			// Copy the modified scanline back
-			neon_quadword_copy(dstLine, scanLine + copyOffset, width >> 3, (width & 7) * 2);
+			neon_quadword_copy(dst_line, scan_line + copy_offset, width >> 3, (width & 7) * 2);
 		}
 	}
 }
@@ -2133,21 +2133,21 @@ neon_CompositeOver_8888_0565 (
 
 static const pixman_fast_path_t arm_neon_fast_path_array[] = 
 {
-    { PIXMAN_OP_ADD,  PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8,       neon_CompositeAdd_8888_8_8,        0 },
-    { PIXMAN_OP_ADD,  PIXMAN_a8,       PIXMAN_null,     PIXMAN_a8,       neon_CompositeAdd_8000_8000,       0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   neon_CompositeOver_n_8_0565,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b5g6r5,   neon_CompositeOver_n_8_0565,     0 },
-    { PIXMAN_OP_SRC,  PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   neon_CompositeSrc_24_16,              0 },
-    { PIXMAN_OP_SRC,  PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   neon_CompositeSrc_24_16,              0 },
-    { PIXMAN_OP_SRC,  PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_CompositeSrc_24_16,              0 },
-    { PIXMAN_OP_SRC,  PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_CompositeSrc_24_16,              0 },
+    { PIXMAN_OP_ADD,  PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8,       neon_composite_add_8888_8_8,        0 },
+    { PIXMAN_OP_ADD,  PIXMAN_a8,       PIXMAN_null,     PIXMAN_a8,       neon_composite_add_8000_8000,       0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   neon_composite_over_n_8_0565,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b5g6r5,   neon_composite_over_n_8_0565,     0 },
+    { PIXMAN_OP_SRC,  PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_src_24_16,              0 },
+    { PIXMAN_OP_SRC,  PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_src_24_16,              0 },
+    { PIXMAN_OP_SRC,  PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_src_24_16,              0 },
+    { PIXMAN_OP_SRC,  PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_src_24_16,              0 },
 #ifdef USE_GCC_INLINE_ASM
-    { PIXMAN_OP_SRC,  PIXMAN_r5g6b5,   PIXMAN_null,     PIXMAN_r5g6b5,   neon_CompositeSrc_16_16,              0 },
-    { PIXMAN_OP_SRC,  PIXMAN_b5g6r5,   PIXMAN_null,     PIXMAN_b5g6r5,   neon_CompositeSrc_16_16,              0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_r5g6b5,   neon_CompositeOver_n_0565,           0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_b5g6r5,   neon_CompositeOver_n_0565,           0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   neon_CompositeOver_8888_0565,         0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_CompositeOver_8888_0565,         0 },
+    { PIXMAN_OP_SRC,  PIXMAN_r5g6b5,   PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_src_16_16,              0 },
+    { PIXMAN_OP_SRC,  PIXMAN_b5g6r5,   PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_src_16_16,              0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_over_n_0565,           0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_over_n_0565,           0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_over_8888_0565,         0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_over_8888_0565,         0 },
 #endif
     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, neon_composite_over_8888_8888,          0 },
     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, neon_composite_over_8888_8888,          0 },
@@ -2155,10 +2155,10 @@ static const pixman_fast_path_t arm_neon_fast_path_array[] =
     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, neon_composite_over_8888_8888,          0 },
     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, neon_composite_over_8888_n_8888,        NEED_SOLID_MASK },
     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, neon_composite_over_8888_n_8888,        NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, neon_CompositeOver_n_8_8888,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, neon_CompositeOver_n_8_8888,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, neon_CompositeOver_n_8_8888,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, neon_CompositeOver_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, neon_composite_over_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, neon_composite_over_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, neon_composite_over_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, neon_composite_over_n_8_8888,     0 },
     { PIXMAN_OP_NONE },
 };
 
diff --git a/pixman/pixman-arm-simd.c b/pixman/pixman-arm-simd.c
index 71689fe..d16ca18 100644
--- a/pixman/pixman-arm-simd.c
+++ b/pixman/pixman-arm-simd.c
@@ -30,7 +30,7 @@
 #include "pixman-private.h"
 
 static void
-arm_CompositeAdd_8000_8000 (
+arm_composite_add_8000_8000 (
                             pixman_implementation_t * impl,
                             pixman_op_t op,
 				pixman_image_t * src_image,
@@ -45,21 +45,21 @@ arm_CompositeAdd_8000_8000 (
 				int32_t      width,
 				int32_t      height)
 {
-    uint8_t	*dstLine, *dst;
-    uint8_t	*srcLine, *src;
-    int	dstStride, srcStride;
+    uint8_t	*dst_line, *dst;
+    uint8_t	*src_line, *src;
+    int	dst_stride, src_stride;
     uint16_t	w;
     uint8_t	s, d;
 
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	src = srcLine;
-	srcLine += srcStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	src = src_line;
+	src_line += src_stride;
 	w = width;
 
         /* ensure both src and dst are properly aligned before doing 32 bit reads
@@ -115,23 +115,23 @@ arm_composite_over_8888_8888 (
 			 int32_t      width,
 			 int32_t      height)
 {
-    uint32_t	*dstLine, *dst;
-    uint32_t	*srcLine, *src;
-    int	dstStride, srcStride;
+    uint32_t	*dst_line, *dst;
+    uint32_t	*src_line, *src;
+    int	dst_stride, src_stride;
     uint16_t	w;
     uint32_t component_half = 0x800080;
     uint32_t upper_component_mask = 0xff00ff00;
     uint32_t alpha_mask = 0xff;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	src = srcLine;
-	srcLine += srcStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	src = src_line;
+	src_line += src_stride;
 	w = width;
 
 //#define inner_branch
@@ -208,26 +208,26 @@ arm_composite_over_8888_n_8888 (
 			       int32_t      width,
 			       int32_t      height)
 {
-    uint32_t	*dstLine, *dst;
-    uint32_t	*srcLine, *src;
+    uint32_t	*dst_line, *dst;
+    uint32_t	*src_line, *src;
     uint32_t	mask;
-    int	dstStride, srcStride;
+    int	dst_stride, src_stride;
     uint16_t	w;
     uint32_t component_half = 0x800080;
     uint32_t alpha_mask = 0xff;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
     mask = (mask) >> 24;
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	src = srcLine;
-	srcLine += srcStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	src = src_line;
+	src_line += src_stride;
 	w = width;
 
 //#define inner_branch
@@ -302,7 +302,7 @@ arm_composite_over_8888_n_8888 (
 }
 
 static void
-arm_CompositeOver_n_8_8888 (
+arm_composite_over_n_8_8888 (
                             pixman_implementation_t * impl,
                             pixman_op_t      op,
 			       pixman_image_t * src_image,
@@ -318,9 +318,9 @@ arm_CompositeOver_n_8_8888 (
 			       int32_t      height)
 {
     uint32_t	 src, srca;
-    uint32_t	*dstLine, *dst;
-    uint8_t	*maskLine, *mask;
-    int		 dstStride, maskStride;
+    uint32_t	*dst_line, *dst;
+    uint8_t	*mask_line, *mask;
+    int		 dst_stride, mask_stride;
     uint16_t	 w;
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
@@ -336,15 +336,15 @@ arm_CompositeOver_n_8_8888 (
     uint32_t src_hi = (src >> 8) & component_mask;
     uint32_t src_lo = src & component_mask;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	mask = maskLine;
-	maskLine += maskStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	mask = mask_line;
+	mask_line += mask_stride;
 	w = width;
 
 //#define inner_branch
@@ -426,12 +426,12 @@ static const pixman_fast_path_t arm_simd_fast_path_array[] =
     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, arm_composite_over_8888_n_8888,    NEED_SOLID_MASK },
     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, arm_composite_over_8888_n_8888,	   NEED_SOLID_MASK },
 
-    { PIXMAN_OP_ADD, PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       arm_CompositeAdd_8000_8000,   0 },
+    { PIXMAN_OP_ADD, PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       arm_composite_add_8000_8000,   0 },
 
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, arm_CompositeOver_n_8_8888,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, arm_CompositeOver_n_8_8888,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, arm_CompositeOver_n_8_8888,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, arm_CompositeOver_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, arm_composite_over_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, arm_composite_over_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, arm_composite_over_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, arm_composite_over_n_8_8888,     0 },
 
     { PIXMAN_OP_NONE },
 };
diff --git a/pixman/pixman-bits-image.c b/pixman/pixman-bits-image.c
index f22765a..8036705 100644
--- a/pixman/pixman-bits-image.c
+++ b/pixman/pixman-bits-image.c
@@ -490,7 +490,7 @@ bits_image_fetch_filtered (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 static void
 bits_image_fetch_transformed (pixman_image_t * pict, int x, int y,
 			      int width, uint32_t *buffer,
-			      const uint32_t *mask, uint32_t maskBits)
+			      const uint32_t *mask, uint32_t mask_bits)
 {
     uint32_t     *bits;
     int32_t    stride;
@@ -585,7 +585,7 @@ bits_image_fetch_transformed (pixman_image_t * pict, int x, int y,
 static void
 bits_image_fetch_solid_32 (pixman_image_t * image, int x, int y,
 			   int width, uint32_t *buffer,
-			   const uint32_t *mask, uint32_t maskBits)
+			   const uint32_t *mask, uint32_t mask_bits)
 {
     uint32_t color[2];
     uint32_t *end;
@@ -696,7 +696,7 @@ bits_image_fetch_untransformed_repeat_normal (bits_image_t *image, pixman_bool_t
 static void
 bits_image_fetch_untransformed_32 (pixman_image_t * image, int x, int y,
 				   int width, uint32_t *buffer,
-				   const uint32_t *mask, uint32_t maskBits)
+				   const uint32_t *mask, uint32_t mask_bits)
 {
     if (image->common.repeat == PIXMAN_REPEAT_NONE)
     {
diff --git a/pixman/pixman-combine.c.template b/pixman/pixman-combine.c.template
index 030bbc9..b794000 100644
--- a/pixman/pixman-combine.c.template
+++ b/pixman/pixman-combine.c.template
@@ -12,7 +12,7 @@
 /*** per channel helper functions ***/
 
 static void
-combineMaskC (comp4_t *src, comp4_t *mask)
+combine_mask_c (comp4_t *src, comp4_t *mask)
 {
     comp4_t a = *mask;
 
@@ -43,7 +43,7 @@ combineMaskC (comp4_t *src, comp4_t *mask)
 }
 
 static void
-combineMaskValueC (comp4_t *src, const comp4_t *mask)
+combine_mask_value_c (comp4_t *src, const comp4_t *mask)
 {
     comp4_t a = *mask;
     comp4_t	x;
@@ -63,7 +63,7 @@ combineMaskValueC (comp4_t *src, const comp4_t *mask)
 }
 
 static void
-combineMaskAlphaC (const comp4_t *src, comp4_t *mask)
+combine_mask_alpha_c (const comp4_t *src, comp4_t *mask)
 {
     comp4_t a = *(mask);
     comp4_t	x;
@@ -102,7 +102,7 @@ combineMaskAlphaC (const comp4_t *src, comp4_t *mask)
  */
 
 static force_inline comp4_t
-combineMask (const comp4_t *src, const comp4_t *mask, int i)
+combine_mask (const comp4_t *src, const comp4_t *mask, int i)
 {
     comp4_t s, m;
 
@@ -123,14 +123,14 @@ combineMask (const comp4_t *src, const comp4_t *mask, int i)
 }
 
 static void
-combineClear (pixman_implementation_t *imp, pixman_op_t op,
+combine_clear (pixman_implementation_t *imp, pixman_op_t op,
 		comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     memset(dest, 0, width*sizeof(comp4_t));
 }
 
 static void
-combineSrcU (pixman_implementation_t *imp, pixman_op_t op,
+combine_src_u (pixman_implementation_t *imp, pixman_op_t op,
 	       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -141,21 +141,21 @@ combineSrcU (pixman_implementation_t *imp, pixman_op_t op,
     {
 	for (i = 0; i < width; ++i)
 	{
-	    comp4_t s = combineMask (src, mask, i);
+	    comp4_t s = combine_mask (src, mask, i);
 	    
 	    *(dest + i) = s;
 	}
     }
 }
 
-/* if the Src is opaque, call combineSrcU */
+/* if the Src is opaque, call combine_src_u */
 static void
-combineOverU (pixman_implementation_t *imp, pixman_op_t op,
+combine_over_u (pixman_implementation_t *imp, pixman_op_t op,
 		comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
     for (i = 0; i < width; ++i) {
-        comp4_t s = combineMask (src, mask, i);
+        comp4_t s = combine_mask (src, mask, i);
         comp4_t d = *(dest + i);
         comp4_t ia = ALPHA_c(~s);
 
@@ -166,12 +166,12 @@ combineOverU (pixman_implementation_t *imp, pixman_op_t op,
 
 /* if the Dst is opaque, this is a noop */
 static void
-combineOverReverseU (pixman_implementation_t *imp, pixman_op_t op,
+combine_over_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
     for (i = 0; i < width; ++i) {
-        comp4_t s = combineMask (src, mask, i);
+        comp4_t s = combine_mask (src, mask, i);
         comp4_t d = *(dest + i);
         comp4_t ia = ALPHA_c(~*(dest + i));
         UNcx4_MUL_UNc_ADD_UNcx4(s, ia, d);
@@ -179,14 +179,14 @@ combineOverReverseU (pixman_implementation_t *imp, pixman_op_t op,
     }
 }
 
-/* if the Dst is opaque, call combineSrcU */
+/* if the Dst is opaque, call combine_src_u */
 static void
-combineInU (pixman_implementation_t *imp, pixman_op_t op,
+combine_in_u (pixman_implementation_t *imp, pixman_op_t op,
 	      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
     for (i = 0; i < width; ++i) {
-        comp4_t s = combineMask (src, mask, i);
+        comp4_t s = combine_mask (src, mask, i);
         comp4_t a = ALPHA_c(*(dest + i));
         UNcx4_MUL_UNc(s, a);
 	*(dest + i) = s;
@@ -195,12 +195,12 @@ combineInU (pixman_implementation_t *imp, pixman_op_t op,
 
 /* if the Src is opaque, this is a noop */
 static void
-combineInReverseU (pixman_implementation_t *imp, pixman_op_t op,
+combine_in_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 		     comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
     for (i = 0; i < width; ++i) {
-	comp4_t s = combineMask (src, mask, i);
+	comp4_t s = combine_mask (src, mask, i);
 	comp4_t d = *(dest + i);
         comp4_t a = ALPHA_c(s);
         UNcx4_MUL_UNc(d, a);
@@ -208,28 +208,28 @@ combineInReverseU (pixman_implementation_t *imp, pixman_op_t op,
     }
 }
 
-/* if the Dst is opaque, call combineClear */
+/* if the Dst is opaque, call combine_clear */
 static void
-combineOutU (pixman_implementation_t *imp, pixman_op_t op,
+combine_out_u (pixman_implementation_t *imp, pixman_op_t op,
 	       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
     for (i = 0; i < width; ++i) {
-        comp4_t s = combineMask (src, mask, i);
+        comp4_t s = combine_mask (src, mask, i);
         comp4_t a = ALPHA_c(~*(dest + i));
         UNcx4_MUL_UNc(s, a);
 	*(dest + i) = s;
     }
 }
 
-/* if the Src is opaque, call combineClear */
+/* if the Src is opaque, call combine_clear */
 static void
-combineOutReverseU (pixman_implementation_t *imp, pixman_op_t op,
+combine_out_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 		      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
     for (i = 0; i < width; ++i) {
-	comp4_t s = combineMask (src, mask, i);
+	comp4_t s = combine_mask (src, mask, i);
         comp4_t d = *(dest + i);
         comp4_t a = ALPHA_c(~s);
         UNcx4_MUL_UNc(d, a);
@@ -237,16 +237,16 @@ combineOutReverseU (pixman_implementation_t *imp, pixman_op_t op,
     }
 }
 
-/* if the Src is opaque, call combineInU */
-/* if the Dst is opaque, call combineOverU */
-/* if both the Src and Dst are opaque, call combineSrcU */
+/* if the Src is opaque, call combine_in_u */
+/* if the Dst is opaque, call combine_over_u */
+/* if both the Src and Dst are opaque, call combine_src_u */
 static void
-combineAtopU (pixman_implementation_t *imp, pixman_op_t op,
+combine_atop_u (pixman_implementation_t *imp, pixman_op_t op,
 		comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
     for (i = 0; i < width; ++i) {
-        comp4_t s = combineMask (src, mask, i);
+        comp4_t s = combine_mask (src, mask, i);
         comp4_t d = *(dest + i);
         comp4_t dest_a = ALPHA_c(d);
         comp4_t src_ia = ALPHA_c(~s);
@@ -256,16 +256,16 @@ combineAtopU (pixman_implementation_t *imp, pixman_op_t op,
     }
 }
 
-/* if the Src is opaque, call combineOverReverseU */
-/* if the Dst is opaque, call combineInReverseU */
-/* if both the Src and Dst are opaque, call combineDstU */
+/* if the Src is opaque, call combine_over_reverse_u */
+/* if the Dst is opaque, call combine_in_reverse_u */
+/* if both the Src and Dst are opaque, call combine_dst_u */
 static void
-combineAtopReverseU (pixman_implementation_t *imp, pixman_op_t op,
+combine_atop_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
     for (i = 0; i < width; ++i) {
-        comp4_t s = combineMask (src, mask, i);
+        comp4_t s = combine_mask (src, mask, i);
         comp4_t d = *(dest + i);
         comp4_t src_a = ALPHA_c(s);
         comp4_t dest_ia = ALPHA_c(~d);
@@ -275,16 +275,16 @@ combineAtopReverseU (pixman_implementation_t *imp, pixman_op_t op,
     }
 }
 
-/* if the Src is opaque, call combineOverU */
-/* if the Dst is opaque, call combineOverReverseU */
-/* if both the Src and Dst are opaque, call combineClear */
+/* if the Src is opaque, call combine_over_u */
+/* if the Dst is opaque, call combine_over_reverse_u */
+/* if both the Src and Dst are opaque, call combine_clear */
 static void
-combineXorU (pixman_implementation_t *imp, pixman_op_t op,
+combine_xor_u (pixman_implementation_t *imp, pixman_op_t op,
 	       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
     for (i = 0; i < width; ++i) {
-        comp4_t s = combineMask (src, mask, i);
+        comp4_t s = combine_mask (src, mask, i);
         comp4_t d = *(dest + i);
         comp4_t src_ia = ALPHA_c(~s);
         comp4_t dest_ia = ALPHA_c(~d);
@@ -295,28 +295,28 @@ combineXorU (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combineAddU (pixman_implementation_t *imp, pixman_op_t op,
+combine_add_u (pixman_implementation_t *imp, pixman_op_t op,
 	       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
     for (i = 0; i < width; ++i) {
-        comp4_t s = combineMask (src, mask, i);
+        comp4_t s = combine_mask (src, mask, i);
         comp4_t d = *(dest + i);
         UNcx4_ADD_UNcx4(d, s);
 	*(dest + i) = d;
     }
 }
 
-/* if the Src is opaque, call combineAddU */
-/* if the Dst is opaque, call combineAddU */
-/* if both the Src and Dst are opaque, call combineAddU */
+/* if the Src is opaque, call combine_add_u */
+/* if the Dst is opaque, call combine_add_u */
+/* if both the Src and Dst are opaque, call combine_add_u */
 static void
-combineSaturateU (pixman_implementation_t *imp, pixman_op_t op,
+combine_saturate_u (pixman_implementation_t *imp, pixman_op_t op,
 		    comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
     for (i = 0; i < width; ++i) {
-        comp4_t s = combineMask (src, mask, i);
+        comp4_t s = combine_mask (src, mask, i);
         comp4_t d = *(dest + i);
         comp2_t sa, da;
 
@@ -361,12 +361,12 @@ combineSaturateU (pixman_implementation_t *imp, pixman_op_t op,
  */
 
 static void
-combineMultiplyU (pixman_implementation_t *imp, pixman_op_t op,
+combine_multiply_u (pixman_implementation_t *imp, pixman_op_t op,
 		    comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
     for (i = 0; i < width; ++i) {
-        comp4_t s = combineMask (src, mask, i);
+        comp4_t s = combine_mask (src, mask, i);
         comp4_t d = *(dest + i);
 	comp4_t ss = s;
         comp4_t src_ia = ALPHA_c (~s);
@@ -380,7 +380,7 @@ combineMultiplyU (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combineMultiplyC (pixman_implementation_t *imp, pixman_op_t op,
+combine_multiply_c (pixman_implementation_t *imp, pixman_op_t op,
                     comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -391,7 +391,7 @@ combineMultiplyC (pixman_implementation_t *imp, pixman_op_t op,
 	comp4_t r = d;
 	comp4_t dest_ia = ALPHA_c (~d);
 
-	combineMaskValueC (&s, &m);
+	combine_mask_value_c (&s, &m);
 
 	UNcx4_MUL_UNcx4_ADD_UNcx4_MUL_UNc (r, ~m, s, dest_ia);
 	UNcx4_MUL_UNcx4 (d, s);
@@ -403,12 +403,12 @@ combineMultiplyC (pixman_implementation_t *imp, pixman_op_t op,
 
 #define PDF_SEPARABLE_BLEND_MODE(name)		    \
 static void					    \
-combine ## name ## U (pixman_implementation_t *imp, pixman_op_t op, \
+combine_ ## name ## _u (pixman_implementation_t *imp, pixman_op_t op, \
                         comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width) \
 {						    \
     int i;					    \
     for (i = 0; i < width; ++i) {		    \
-        comp4_t s = combineMask (src, mask, i);     \
+        comp4_t s = combine_mask (src, mask, i);     \
         comp4_t d = *(dest + i);		    \
         comp1_t sa = ALPHA_c(s);			    \
         comp1_t isa = ~sa;			    \
@@ -428,7 +428,7 @@ combine ## name ## U (pixman_implementation_t *imp, pixman_op_t op, \
 }						    \
 						    \
 static void				    \
-combine ## name ## C (pixman_implementation_t *imp, pixman_op_t op, \
+combine_ ## name ## _c (pixman_implementation_t *imp, pixman_op_t op, \
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width) \
 {						    \
     int i;					    \
@@ -440,7 +440,7 @@ combine ## name ## C (pixman_implementation_t *imp, pixman_op_t op, \
 	comp1_t ida = ~da;			    \
 	comp4_t result;				    \
 						    \
-	combineMaskValueC (&s, &m);		    \
+	combine_mask_value_c (&s, &m);		    \
 						    \
 	result = d;				    \
 	UNcx4_MUL_UNcx4_ADD_UNcx4_MUL_UNc (result, ~m, s, ida);	    \
@@ -461,12 +461,12 @@ combine ## name ## C (pixman_implementation_t *imp, pixman_op_t op, \
  */
 
 static inline comp4_t
-blend_Screen (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
+blend_screen (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
   return DIV_ONE_UNc (sca * da + dca * sa - sca * dca);
 }
 
-PDF_SEPARABLE_BLEND_MODE (Screen)
+PDF_SEPARABLE_BLEND_MODE (screen)
 
 /*
  * Overlay
@@ -478,7 +478,7 @@ PDF_SEPARABLE_BLEND_MODE (Screen)
  */
 
 static inline comp4_t
-blend_Overlay (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
+blend_overlay (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
     comp4_t rca;
 
@@ -489,7 +489,7 @@ blend_Overlay (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
     return DIV_ONE_UNc (rca);
 }
 
-PDF_SEPARABLE_BLEND_MODE (Overlay)
+PDF_SEPARABLE_BLEND_MODE (overlay)
 
 /*
  * Darken
@@ -497,7 +497,7 @@ PDF_SEPARABLE_BLEND_MODE (Overlay)
  */
 
 static inline comp4_t
-blend_Darken (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
+blend_darken (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
     comp4_t s, d;
     
@@ -506,7 +506,7 @@ blend_Darken (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
     return DIV_ONE_UNc (s > d ? d : s);
 }
 
-PDF_SEPARABLE_BLEND_MODE (Darken)
+PDF_SEPARABLE_BLEND_MODE (darken)
 
 /*
  * Lighten
@@ -514,7 +514,7 @@ PDF_SEPARABLE_BLEND_MODE (Darken)
  */
 
 static inline comp4_t
-blend_Lighten (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
+blend_lighten (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
     comp4_t s, d;
     
@@ -523,7 +523,7 @@ blend_Lighten (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
     return DIV_ONE_UNc (s > d ? s : d);
 }
 
-PDF_SEPARABLE_BLEND_MODE (Lighten)
+PDF_SEPARABLE_BLEND_MODE (lighten)
 
 /*
  * Color dodge
@@ -535,7 +535,7 @@ PDF_SEPARABLE_BLEND_MODE (Lighten)
  */ 
 
 static inline comp4_t
-blend_ColorDodge (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
+blend_color_dodge (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
     if (sca >= sa) {
 	return DIV_ONE_UNc (sa * da);
@@ -545,19 +545,19 @@ blend_ColorDodge (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
     }
 }
 
-PDF_SEPARABLE_BLEND_MODE (ColorDodge)
+PDF_SEPARABLE_BLEND_MODE (color_dodge)
 
 /*
  * Color burn
  * B(Dca, ab, Sca, as) = 
  *   if Sca. == 0
- *     (Da == Dca).SaDa
+ *     (Da == Dca).Sa.Da
  *   otherwise
  *     Sa.Da.(1 - min (1, (1 - Dca/Da).Sa / Sca))
  */
 
 static inline comp4_t
-blend_ColorBurn (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
+blend_color_burn (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
     if (sca == 0) {
 	return 0;
@@ -568,7 +568,7 @@ blend_ColorBurn (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
     }
 }
 
-PDF_SEPARABLE_BLEND_MODE (ColorBurn)
+PDF_SEPARABLE_BLEND_MODE (color_burn)
 
 /*
  * Hard light
@@ -579,7 +579,7 @@ PDF_SEPARABLE_BLEND_MODE (ColorBurn)
  *     Sa.Da - 2.(Da - Dca).(Sa - Sca)
  */
 static inline comp4_t
-blend_HardLight (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
+blend_hard_light (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
     if (2 * sca < sa)
 	return DIV_ONE_UNc (2 * sca * dca);
@@ -587,7 +587,7 @@ blend_HardLight (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 	return DIV_ONE_UNc (sa * da - 2 * (da - dca) * (sa - sca));
 }
 
-PDF_SEPARABLE_BLEND_MODE (HardLight)
+PDF_SEPARABLE_BLEND_MODE (hard_light)
 
 /*
  * Soft light
@@ -601,7 +601,7 @@ PDF_SEPARABLE_BLEND_MODE (HardLight)
  */
 
 static inline comp4_t
-blend_SoftLight (comp4_t dca_org, comp4_t da_org, comp4_t sca_org, comp4_t sa_org)
+blend_soft_light (comp4_t dca_org, comp4_t da_org, comp4_t sca_org, comp4_t sa_org)
 {
     double dca = dca_org * (1.0 / MASK);
     double da = da_org * (1.0 / MASK);
@@ -624,7 +624,7 @@ blend_SoftLight (comp4_t dca_org, comp4_t da_org, comp4_t sca_org, comp4_t sa_or
     return rca * MASK + 0.5;
 }
 
-PDF_SEPARABLE_BLEND_MODE (SoftLight)
+PDF_SEPARABLE_BLEND_MODE (soft_light)
 
 /*
  * Difference
@@ -632,7 +632,7 @@ PDF_SEPARABLE_BLEND_MODE (SoftLight)
  */
 
 static inline comp4_t
-blend_Difference (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
+blend_difference (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
     comp4_t dcasa = dca * sa;
     comp4_t scada = sca * da;
@@ -643,7 +643,7 @@ blend_Difference (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 	return DIV_ONE_UNc (scada - dcasa);
 }
 
-PDF_SEPARABLE_BLEND_MODE (Difference)
+PDF_SEPARABLE_BLEND_MODE (difference)
 
 /*
  * Exclusion
@@ -654,12 +654,12 @@ PDF_SEPARABLE_BLEND_MODE (Difference)
  * PDF_SEPARABLE_BLEND_MODE, but that's a performance optimization */
 
 static inline comp4_t
-blend_Exclusion (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
+blend_exclusion (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
     return DIV_ONE_UNc (sca * da + dca * sa - 2 * dca * sca);
 }
 
-PDF_SEPARABLE_BLEND_MODE (Exclusion)
+PDF_SEPARABLE_BLEND_MODE (exclusion)
 
 #undef PDF_SEPARABLE_BLEND_MODE
 
@@ -670,7 +670,7 @@ PDF_SEPARABLE_BLEND_MODE (Exclusion)
  * 
  * LUM (C) = 0.3 × Cred + 0.59 × Cgreen + 0.11 × Cblue
  *
- * ClipColor (C):
+ * clip_color (C):
  *   l = LUM (C)
  *   min = Cmin
  *   max = Cmax
@@ -683,7 +683,7 @@ PDF_SEPARABLE_BLEND_MODE (Exclusion)
  * set_lum (C, l):
  *   d = l – LUM (C)
  *   C += d
- *   return ClipColor (C)
+ *   return clip_color (C)
  *
  * SAT (C) = CH_MAX (C) - CH_MIN (C)
  *
@@ -702,7 +702,7 @@ PDF_SEPARABLE_BLEND_MODE (Exclusion)
  *
  *    LUM (r × C) = r × LUM (C)		SAT (r * C) = r * SAT (C)
  *
- * If we extend ClipColor with an extra argument a and change
+ * If we extend clip_color with an extra argument a and change
  *
  *        if x >= 1.0
  *
@@ -710,20 +710,20 @@ PDF_SEPARABLE_BLEND_MODE (Exclusion)
  *
  *        if x >= a
  *
- * then ClipColor is also linear:
+ * then clip_color is also linear:
  *
- *    r * ClipColor (C, a) = ClipColor (rC, ra);
+ *    r * clip_color (C, a) = clip_color (r_c, ra);
  *
  * for positive r.
  *
  * Similarly, we can extend set_lum with an extra argument that is just passed
- * on to ClipColor:
+ * on to clip_color:
  *
  *   r * set_lum ( C, l, a)
  *
- *   = r × ClipColor ( C + l - LUM (C), a)
+ *   = r × clip_color ( C + l - LUM (C), a)
  *
- *   = ClipColor ( r * C + r × l - r * LUM (C), r * a)
+ *   = clip_color ( r * C + r × l - r * LUM (C), r * a)
  *
  *   = set_lum ( r * C, r * l, r * a)
  *
@@ -734,7 +734,7 @@ PDF_SEPARABLE_BLEND_MODE (Exclusion)
  * The above holds for all non-zero x, because they x'es in the fraction for
  * C_mid cancel out. Specifically, it holds for x = r:
  *
- *    r * set_sat (C, s) = set_sat (rC, rs)
+ *    r * set_sat (C, s) = set_sat (r_c, rs)
  *  
  */
 
@@ -777,12 +777,12 @@ PDF_SEPARABLE_BLEND_MODE (Exclusion)
 
 #define PDF_NON_SEPARABLE_BLEND_MODE(name)					\
 static void								\
-combine ## name ## U (pixman_implementation_t *imp, pixman_op_t op,	\
+combine_ ## name ## _u (pixman_implementation_t *imp, pixman_op_t op,	\
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width) \
 {									\
     int i;								\
     for (i = 0; i < width; ++i) {					\
-        comp4_t s = combineMask (src, mask, i);				\
+        comp4_t s = combine_mask (src, mask, i);				\
         comp4_t d = *(dest + i);					\
         comp1_t sa = ALPHA_c(s);						\
         comp1_t isa = ~sa;						\
@@ -825,7 +825,7 @@ set_lum (comp4_t dest[3], comp4_t src[3], comp4_t sa, comp4_t lum)
   tmp[1] += l;
   tmp[2] += l;
 
-  /* ClipColor */
+  /* clip_color */
   l = LUM (tmp);
   min = CH_MIN (tmp);
   max = CH_MAX (tmp);
@@ -898,7 +898,7 @@ set_sat (comp4_t dest[3], comp4_t src[3], comp4_t sat)
  * B(Cb, Cs) = set_lum (set_sat (Cs, SAT (Cb)), LUM (Cb))
  */
 static inline void
-blend_HslHue (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t sa)
+blend_hsl_hue (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t sa)
 {
     c[0] = sc[0] * da;
     c[1] = sc[1] * da;
@@ -907,14 +907,14 @@ blend_HslHue (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t sa
     set_lum (c, c, sa * da, LUM (dc) * sa);
 }
 
-PDF_NON_SEPARABLE_BLEND_MODE (HslHue)
+PDF_NON_SEPARABLE_BLEND_MODE (hsl_hue)
 
 /*
  * Saturation:
  * B(Cb, Cs) = set_lum (set_sat (Cb, SAT (Cs)), LUM (Cb))
  */
 static inline void
-blend_HslSaturation (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t sa)
+blend_hsl_saturation (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t sa)
 {
     c[0] = dc[0] * sa;
     c[1] = dc[1] * sa;
@@ -923,14 +923,14 @@ blend_HslSaturation (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], com
     set_lum (c, c, sa * da, LUM (dc) * sa);
 }
 
-PDF_NON_SEPARABLE_BLEND_MODE (HslSaturation)
+PDF_NON_SEPARABLE_BLEND_MODE (hsl_saturation)
 
 /*
  * Color:
  * B(Cb, Cs) = set_lum (Cs, LUM (Cb))
  */
 static inline void
-blend_HslColor (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t sa)
+blend_hsl_color (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t sa)
 {
     c[0] = sc[0] * da;
     c[1] = sc[1] * da;
@@ -938,14 +938,14 @@ blend_HslColor (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t
     set_lum (c, c, sa * da, LUM (dc) * sa);
 }
 
-PDF_NON_SEPARABLE_BLEND_MODE (HslColor)
+PDF_NON_SEPARABLE_BLEND_MODE (hsl_color)
 
 /*
  * Luminosity:
  * B(Cb, Cs) = set_lum (Cb, LUM (Cs))
  */
 static inline void
-blend_HslLuminosity (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t sa)
+blend_hsl_luminosity (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t sa)
 {
     c[0] = dc[0] * sa;
     c[1] = dc[1] * sa;
@@ -953,7 +953,7 @@ blend_HslLuminosity (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], com
     set_lum (c, c, sa * da, LUM (sc) * da);
 }
 
-PDF_NON_SEPARABLE_BLEND_MODE (HslLuminosity)
+PDF_NON_SEPARABLE_BLEND_MODE (hsl_luminosity)
 
 #undef SAT
 #undef LUM
@@ -1003,7 +1003,7 @@ PDF_NON_SEPARABLE_BLEND_MODE (HslLuminosity)
 
 /* portion covered by a but not b */
 static comp1_t
-combineDisjointOutPart (comp1_t a, comp1_t b)
+combine_disjoint_out_part (comp1_t a, comp1_t b)
 {
     /* min (1, (1-b) / a) */
 
@@ -1015,7 +1015,7 @@ combineDisjointOutPart (comp1_t a, comp1_t b)
 
 /* portion covered by both a and b */
 static comp1_t
-combineDisjointInPart (comp1_t a, comp1_t b)
+combine_disjoint_in_part (comp1_t a, comp1_t b)
 {
     /* max (1-(1-b)/a,0) */
     /*  = - min ((1-b)/a - 1, 0) */
@@ -1029,7 +1029,7 @@ combineDisjointInPart (comp1_t a, comp1_t b)
 
 /* portion covered by a but not b */
 static comp1_t
-combineConjointOutPart (comp1_t a, comp1_t b)
+combine_conjoint_out_part (comp1_t a, comp1_t b)
 {
     /* max (1-b/a,0) */
     /* = 1-min(b/a,1) */
@@ -1043,7 +1043,7 @@ combineConjointOutPart (comp1_t a, comp1_t b)
 
 /* portion covered by both a and b */
 static comp1_t
-combineConjointInPart (comp1_t a, comp1_t b)
+combine_conjoint_in_part (comp1_t a, comp1_t b)
 {
     /* min (1,b/a) */
 
@@ -1063,11 +1063,11 @@ combineConjointInPart (comp1_t a, comp1_t b)
 					 (0 - ((t) >> G_SHIFT)))) << (i))
 
 static void
-combineDisjointGeneralU (comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width, comp1_t combine)
+combine_disjoint_general_u (comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width, comp1_t combine)
 {
     int i;
     for (i = 0; i < width; ++i) {
-        comp4_t s = combineMask (src, mask, i);
+        comp4_t s = combine_mask (src, mask, i);
         comp4_t d = *(dest + i);
         comp4_t m,n,o,p;
         comp2_t Fa, Fb, t, u, v;
@@ -1079,10 +1079,10 @@ combineDisjointGeneralU (comp4_t *dest, const comp4_t *src, const comp4_t *mask,
             Fa = 0;
             break;
         case COMBINE_A_OUT:
-            Fa = combineDisjointOutPart (sa, da);
+            Fa = combine_disjoint_out_part (sa, da);
             break;
         case COMBINE_A_IN:
-            Fa = combineDisjointInPart (sa, da);
+            Fa = combine_disjoint_in_part (sa, da);
             break;
         case COMBINE_A:
             Fa = MASK;
@@ -1094,10 +1094,10 @@ combineDisjointGeneralU (comp4_t *dest, const comp4_t *src, const comp4_t *mask,
             Fb = 0;
             break;
         case COMBINE_B_OUT:
-            Fb = combineDisjointOutPart (da, sa);
+            Fb = combine_disjoint_out_part (da, sa);
             break;
         case COMBINE_B_IN:
-            Fb = combineDisjointInPart (da, sa);
+            Fb = combine_disjoint_in_part (da, sa);
             break;
         case COMBINE_B:
             Fb = MASK;
@@ -1113,12 +1113,12 @@ combineDisjointGeneralU (comp4_t *dest, const comp4_t *src, const comp4_t *mask,
 }
 
 static void
-combineDisjointOverU (pixman_implementation_t *imp, pixman_op_t op,
+combine_disjoint_over_u (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
     for (i = 0; i < width; ++i) {
-        comp4_t s = combineMask (src, mask, i);
+        comp4_t s = combine_mask (src, mask, i);
         comp2_t a = s >> A_SHIFT;
 
         if (a != 0x00)
@@ -1126,7 +1126,7 @@ combineDisjointOverU (pixman_implementation_t *imp, pixman_op_t op,
             if (a != MASK)
             {
                 comp4_t d = *(dest + i);
-                a = combineDisjointOutPart (d >> A_SHIFT, a);
+                a = combine_disjoint_out_part (d >> A_SHIFT, a);
                 UNcx4_MUL_UNc_ADD_UNcx4(d, a, s);
                 s = d;
             }
@@ -1136,60 +1136,60 @@ combineDisjointOverU (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combineDisjointInU (pixman_implementation_t *imp, pixman_op_t op,
+combine_disjoint_in_u (pixman_implementation_t *imp, pixman_op_t op,
 		      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralU (dest, src, mask, width, COMBINE_A_IN);
+    combine_disjoint_general_u (dest, src, mask, width, COMBINE_A_IN);
 }
 
 static void
-combineDisjointInReverseU (pixman_implementation_t *imp, pixman_op_t op,
+combine_disjoint_in_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 			     comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralU (dest, src, mask, width, COMBINE_B_IN);
+    combine_disjoint_general_u (dest, src, mask, width, COMBINE_B_IN);
 }
 
 static void
-combineDisjointOutU (pixman_implementation_t *imp, pixman_op_t op,
+combine_disjoint_out_u (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralU (dest, src, mask, width, COMBINE_A_OUT);
+    combine_disjoint_general_u (dest, src, mask, width, COMBINE_A_OUT);
 }
 
 static void
-combineDisjointOutReverseU (pixman_implementation_t *imp, pixman_op_t op,
+combine_disjoint_out_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 			      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralU (dest, src, mask, width, COMBINE_B_OUT);
+    combine_disjoint_general_u (dest, src, mask, width, COMBINE_B_OUT);
 }
 
 static void
-combineDisjointAtopU (pixman_implementation_t *imp, pixman_op_t op,
+combine_disjoint_atop_u (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralU (dest, src, mask, width, COMBINE_A_ATOP);
+    combine_disjoint_general_u (dest, src, mask, width, COMBINE_A_ATOP);
 }
 
 static void
-combineDisjointAtopReverseU (pixman_implementation_t *imp, pixman_op_t op,
+combine_disjoint_atop_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 			       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralU (dest, src, mask, width, COMBINE_B_ATOP);
+    combine_disjoint_general_u (dest, src, mask, width, COMBINE_B_ATOP);
 }
 
 static void
-combineDisjointXorU (pixman_implementation_t *imp, pixman_op_t op,
+combine_disjoint_xor_u (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralU (dest, src, mask, width, COMBINE_XOR);
+    combine_disjoint_general_u (dest, src, mask, width, COMBINE_XOR);
 }
 
 static void
-combineConjointGeneralU (comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width, comp1_t combine)
+combine_conjoint_general_u (comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width, comp1_t combine)
 {
     int i;
     for (i = 0; i < width; ++i) {
-        comp4_t s = combineMask (src, mask, i);
+        comp4_t s = combine_mask (src, mask, i);
         comp4_t d = *(dest + i);
         comp4_t m,n,o,p;
         comp2_t Fa, Fb, t, u, v;
@@ -1201,10 +1201,10 @@ combineConjointGeneralU (comp4_t *dest, const comp4_t *src, const comp4_t *mask,
             Fa = 0;
             break;
         case COMBINE_A_OUT:
-            Fa = combineConjointOutPart (sa, da);
+            Fa = combine_conjoint_out_part (sa, da);
             break;
         case COMBINE_A_IN:
-            Fa = combineConjointInPart (sa, da);
+            Fa = combine_conjoint_in_part (sa, da);
             break;
         case COMBINE_A:
             Fa = MASK;
@@ -1216,10 +1216,10 @@ combineConjointGeneralU (comp4_t *dest, const comp4_t *src, const comp4_t *mask,
             Fb = 0;
             break;
         case COMBINE_B_OUT:
-            Fb = combineConjointOutPart (da, sa);
+            Fb = combine_conjoint_out_part (da, sa);
             break;
         case COMBINE_B_IN:
-            Fb = combineConjointInPart (da, sa);
+            Fb = combine_conjoint_in_part (da, sa);
             break;
         case COMBINE_B:
             Fb = MASK;
@@ -1235,69 +1235,69 @@ combineConjointGeneralU (comp4_t *dest, const comp4_t *src, const comp4_t *mask,
 }
 
 static void
-combineConjointOverU (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_over_u (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralU (dest, src, mask, width, COMBINE_A_OVER);
+    combine_conjoint_general_u (dest, src, mask, width, COMBINE_A_OVER);
 }
 
 
 static void
-combineConjointOverReverseU (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_over_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 			       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralU (dest, src, mask, width, COMBINE_B_OVER);
+    combine_conjoint_general_u (dest, src, mask, width, COMBINE_B_OVER);
 }
 
 
 static void
-combineConjointInU (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_in_u (pixman_implementation_t *imp, pixman_op_t op,
 		      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralU (dest, src, mask, width, COMBINE_A_IN);
+    combine_conjoint_general_u (dest, src, mask, width, COMBINE_A_IN);
 }
 
 
 static void
-combineConjointInReverseU (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_in_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 			     comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralU (dest, src, mask, width, COMBINE_B_IN);
+    combine_conjoint_general_u (dest, src, mask, width, COMBINE_B_IN);
 }
 
 static void
-combineConjointOutU (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_out_u (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralU (dest, src, mask, width, COMBINE_A_OUT);
+    combine_conjoint_general_u (dest, src, mask, width, COMBINE_A_OUT);
 }
 
 static void
-combineConjointOutReverseU (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_out_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 			      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralU (dest, src, mask, width, COMBINE_B_OUT);
+    combine_conjoint_general_u (dest, src, mask, width, COMBINE_B_OUT);
 }
 
 static void
-combineConjointAtopU (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_atop_u (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralU (dest, src, mask, width, COMBINE_A_ATOP);
+    combine_conjoint_general_u (dest, src, mask, width, COMBINE_A_ATOP);
 }
 
 static void
-combineConjointAtopReverseU (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_atop_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 			       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralU (dest, src, mask, width, COMBINE_B_ATOP);
+    combine_conjoint_general_u (dest, src, mask, width, COMBINE_B_ATOP);
 }
 
 static void
-combineConjointXorU (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_xor_u (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralU (dest, src, mask, width, COMBINE_XOR);
+    combine_conjoint_general_u (dest, src, mask, width, COMBINE_XOR);
 }
 
 /********************************************************************************/
@@ -1305,14 +1305,14 @@ combineConjointXorU (pixman_implementation_t *imp, pixman_op_t op,
 /********************************************************************************/
 
 static void
-combineClearC (pixman_implementation_t *imp, pixman_op_t op,
+combine_clear_c (pixman_implementation_t *imp, pixman_op_t op,
 		 comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     memset(dest, 0, width*sizeof(comp4_t));
 }
 
 static void
-combineSrcC (pixman_implementation_t *imp, pixman_op_t op,
+combine_src_c (pixman_implementation_t *imp, pixman_op_t op,
 	       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1321,14 +1321,14 @@ combineSrcC (pixman_implementation_t *imp, pixman_op_t op,
 	comp4_t s = *(src + i);
 	comp4_t m = *(mask + i);
 
-	combineMaskValueC (&s, &m);
+	combine_mask_value_c (&s, &m);
 
 	*(dest) = s;
     }
 }
 
 static void
-combineOverC (pixman_implementation_t *imp, pixman_op_t op,
+combine_over_c (pixman_implementation_t *imp, pixman_op_t op,
 		comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1338,7 +1338,7 @@ combineOverC (pixman_implementation_t *imp, pixman_op_t op,
 	comp4_t m = *(mask + i);
 	comp4_t a;
 
-	combineMaskC (&s, &m);
+	combine_mask_c (&s, &m);
 
 	a = ~m;
         if (a != ~0)
@@ -1355,7 +1355,7 @@ combineOverC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combineOverReverseC (pixman_implementation_t *imp, pixman_op_t op,
+combine_over_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1369,7 +1369,7 @@ combineOverReverseC (pixman_implementation_t *imp, pixman_op_t op,
             comp4_t s = *(src + i);
 	    comp4_t m = *(mask + i);
 
-	    combineMaskValueC (&s, &m);
+	    combine_mask_value_c (&s, &m);
 
             if (a != MASK)
             {
@@ -1381,7 +1381,7 @@ combineOverReverseC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combineInC (pixman_implementation_t *imp, pixman_op_t op,
+combine_in_c (pixman_implementation_t *imp, pixman_op_t op,
 	      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1395,7 +1395,7 @@ combineInC (pixman_implementation_t *imp, pixman_op_t op,
 	    comp4_t m = *(mask + i);
 
 	    s = *(src + i);
-	    combineMaskValueC (&s, &m);
+	    combine_mask_value_c (&s, &m);
             if (a != MASK)
             {
                 UNcx4_MUL_UNc(s, a);
@@ -1406,7 +1406,7 @@ combineInC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combineInReverseC (pixman_implementation_t *imp, pixman_op_t op,
+combine_in_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 		     comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1416,7 +1416,7 @@ combineInReverseC (pixman_implementation_t *imp, pixman_op_t op,
         comp4_t m = *(mask + i);
         comp4_t a;
 
-	combineMaskAlphaC (&s, &m);
+	combine_mask_alpha_c (&s, &m);
 
 	a = m;
         if (a != ~0)
@@ -1433,7 +1433,7 @@ combineInReverseC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combineOutC (pixman_implementation_t *imp, pixman_op_t op,
+combine_out_c (pixman_implementation_t *imp, pixman_op_t op,
 	       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1447,7 +1447,7 @@ combineOutC (pixman_implementation_t *imp, pixman_op_t op,
 	    comp4_t m = *(mask + i);
 
 	    s = *(src + i);
-	    combineMaskValueC (&s, &m);
+	    combine_mask_value_c (&s, &m);
 
             if (a != MASK)
             {
@@ -1459,7 +1459,7 @@ combineOutC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combineOutReverseC (pixman_implementation_t *imp, pixman_op_t op,
+combine_out_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 		      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1469,7 +1469,7 @@ combineOutReverseC (pixman_implementation_t *imp, pixman_op_t op,
 	comp4_t m = *(mask + i);
 	comp4_t a;
 
-	combineMaskAlphaC (&s, &m);
+	combine_mask_alpha_c (&s, &m);
 
         a = ~m;
         if (a != ~0)
@@ -1486,7 +1486,7 @@ combineOutReverseC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combineAtopC (pixman_implementation_t *imp, pixman_op_t op,
+combine_atop_c (pixman_implementation_t *imp, pixman_op_t op,
 		comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1498,7 +1498,7 @@ combineAtopC (pixman_implementation_t *imp, pixman_op_t op,
         comp4_t ad;
         comp2_t as = d >> A_SHIFT;
 
-	combineMaskC (&s, &m);
+	combine_mask_c (&s, &m);
 
         ad = ~m;
 
@@ -1508,7 +1508,7 @@ combineAtopC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combineAtopReverseC (pixman_implementation_t *imp, pixman_op_t op,
+combine_atop_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1521,7 +1521,7 @@ combineAtopReverseC (pixman_implementation_t *imp, pixman_op_t op,
         comp4_t ad;
         comp2_t as = ~d >> A_SHIFT;
 
-	combineMaskC (&s, &m);
+	combine_mask_c (&s, &m);
 
 	ad = m;
 
@@ -1531,7 +1531,7 @@ combineAtopReverseC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combineXorC (pixman_implementation_t *imp, pixman_op_t op,
+combine_xor_c (pixman_implementation_t *imp, pixman_op_t op,
 	       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1543,7 +1543,7 @@ combineXorC (pixman_implementation_t *imp, pixman_op_t op,
         comp4_t ad;
         comp2_t as = ~d >> A_SHIFT;
 
-	combineMaskC (&s, &m);
+	combine_mask_c (&s, &m);
 
 	ad = ~m;
 
@@ -1553,7 +1553,7 @@ combineXorC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combineAddC (pixman_implementation_t *imp, pixman_op_t op,
+combine_add_c (pixman_implementation_t *imp, pixman_op_t op,
 	       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1563,7 +1563,7 @@ combineAddC (pixman_implementation_t *imp, pixman_op_t op,
         comp4_t m = *(mask + i);
         comp4_t d = *(dest + i);
 
-	combineMaskValueC (&s, &m);
+	combine_mask_value_c (&s, &m);
 
         UNcx4_ADD_UNcx4(d, s);
 	*(dest + i) = d;
@@ -1571,7 +1571,7 @@ combineAddC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combineSaturateC (pixman_implementation_t *imp, pixman_op_t op,
+combine_saturate_c (pixman_implementation_t *imp, pixman_op_t op,
 		    comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1586,7 +1586,7 @@ combineSaturateC (pixman_implementation_t *imp, pixman_op_t op,
         s = *(src + i);
 	m = *(mask + i);
 
-	combineMaskC (&s, &m);
+	combine_mask_c (&s, &m);
 
         sa = (m >> A_SHIFT);
         sr = (m >> R_SHIFT) & MASK;
@@ -1619,7 +1619,7 @@ combineSaturateC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-combineDisjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width, comp1_t combine)
+combine_disjoint_general_c (comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width, comp1_t combine)
 {
     int i;
 
@@ -1636,7 +1636,7 @@ combineDisjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mask,
         d = *(dest + i);
         da = d >> A_SHIFT;
 
-	combineMaskC (&s, &m);
+	combine_mask_c (&s, &m);
 
 	sa = m;
 
@@ -1645,17 +1645,17 @@ combineDisjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mask,
             Fa = 0;
             break;
         case COMBINE_A_OUT:
-            m = (comp4_t)combineDisjointOutPart ((comp1_t) (sa >> 0), da);
-            n = (comp4_t)combineDisjointOutPart ((comp1_t) (sa >> G_SHIFT), da) << G_SHIFT;
-            o = (comp4_t)combineDisjointOutPart ((comp1_t) (sa >> R_SHIFT), da) << R_SHIFT;
-            p = (comp4_t)combineDisjointOutPart ((comp1_t) (sa >> A_SHIFT), da) << A_SHIFT;
+            m = (comp4_t)combine_disjoint_out_part ((comp1_t) (sa >> 0), da);
+            n = (comp4_t)combine_disjoint_out_part ((comp1_t) (sa >> G_SHIFT), da) << G_SHIFT;
+            o = (comp4_t)combine_disjoint_out_part ((comp1_t) (sa >> R_SHIFT), da) << R_SHIFT;
+            p = (comp4_t)combine_disjoint_out_part ((comp1_t) (sa >> A_SHIFT), da) << A_SHIFT;
             Fa = m|n|o|p;
             break;
         case COMBINE_A_IN:
-            m = (comp4_t)combineDisjointInPart ((comp1_t) (sa >> 0), da);
-            n = (comp4_t)combineDisjointInPart ((comp1_t) (sa >> G_SHIFT), da) << G_SHIFT;
-            o = (comp4_t)combineDisjointInPart ((comp1_t) (sa >> R_SHIFT), da) << R_SHIFT;
-            p = (comp4_t)combineDisjointInPart ((comp1_t) (sa >> A_SHIFT), da) << A_SHIFT;
+            m = (comp4_t)combine_disjoint_in_part ((comp1_t) (sa >> 0), da);
+            n = (comp4_t)combine_disjoint_in_part ((comp1_t) (sa >> G_SHIFT), da) << G_SHIFT;
+            o = (comp4_t)combine_disjoint_in_part ((comp1_t) (sa >> R_SHIFT), da) << R_SHIFT;
+            p = (comp4_t)combine_disjoint_in_part ((comp1_t) (sa >> A_SHIFT), da) << A_SHIFT;
             Fa = m|n|o|p;
             break;
         case COMBINE_A:
@@ -1668,17 +1668,17 @@ combineDisjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mask,
             Fb = 0;
             break;
         case COMBINE_B_OUT:
-            m = (comp4_t)combineDisjointOutPart (da, (comp1_t) (sa >> 0));
-            n = (comp4_t)combineDisjointOutPart (da, (comp1_t) (sa >> G_SHIFT)) << G_SHIFT;
-            o = (comp4_t)combineDisjointOutPart (da, (comp1_t) (sa >> R_SHIFT)) << R_SHIFT;
-            p = (comp4_t)combineDisjointOutPart (da, (comp1_t) (sa >> A_SHIFT)) << A_SHIFT;
+            m = (comp4_t)combine_disjoint_out_part (da, (comp1_t) (sa >> 0));
+            n = (comp4_t)combine_disjoint_out_part (da, (comp1_t) (sa >> G_SHIFT)) << G_SHIFT;
+            o = (comp4_t)combine_disjoint_out_part (da, (comp1_t) (sa >> R_SHIFT)) << R_SHIFT;
+            p = (comp4_t)combine_disjoint_out_part (da, (comp1_t) (sa >> A_SHIFT)) << A_SHIFT;
             Fb = m|n|o|p;
             break;
         case COMBINE_B_IN:
-            m = (comp4_t)combineDisjointInPart (da, (comp1_t) (sa >> 0));
-            n = (comp4_t)combineDisjointInPart (da, (comp1_t) (sa >> G_SHIFT)) << G_SHIFT;
-            o = (comp4_t)combineDisjointInPart (da, (comp1_t) (sa >> R_SHIFT)) << R_SHIFT;
-            p = (comp4_t)combineDisjointInPart (da, (comp1_t) (sa >> A_SHIFT)) << A_SHIFT;
+            m = (comp4_t)combine_disjoint_in_part (da, (comp1_t) (sa >> 0));
+            n = (comp4_t)combine_disjoint_in_part (da, (comp1_t) (sa >> G_SHIFT)) << G_SHIFT;
+            o = (comp4_t)combine_disjoint_in_part (da, (comp1_t) (sa >> R_SHIFT)) << R_SHIFT;
+            p = (comp4_t)combine_disjoint_in_part (da, (comp1_t) (sa >> A_SHIFT)) << A_SHIFT;
             Fb = m|n|o|p;
             break;
         case COMBINE_B:
@@ -1695,63 +1695,63 @@ combineDisjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mask,
 }
 
 static void
-combineDisjointOverC (pixman_implementation_t *imp, pixman_op_t op,
+combine_disjoint_over_c (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralC (dest, src, mask, width, COMBINE_A_OVER);
+    combine_disjoint_general_c (dest, src, mask, width, COMBINE_A_OVER);
 }
 
 static void
-combineDisjointInC (pixman_implementation_t *imp, pixman_op_t op,
+combine_disjoint_in_c (pixman_implementation_t *imp, pixman_op_t op,
 		      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralC (dest, src, mask, width, COMBINE_A_IN);
+    combine_disjoint_general_c (dest, src, mask, width, COMBINE_A_IN);
 }
 
 static void
-combineDisjointInReverseC (pixman_implementation_t *imp, pixman_op_t op,
+combine_disjoint_in_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 			     comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralC (dest, src, mask, width, COMBINE_B_IN);
+    combine_disjoint_general_c (dest, src, mask, width, COMBINE_B_IN);
 }
 
 static void
-combineDisjointOutC (pixman_implementation_t *imp, pixman_op_t op,
+combine_disjoint_out_c (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralC (dest, src, mask, width, COMBINE_A_OUT);
+    combine_disjoint_general_c (dest, src, mask, width, COMBINE_A_OUT);
 }
 
 static void
-combineDisjointOutReverseC (pixman_implementation_t *imp, pixman_op_t op,
+combine_disjoint_out_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 			      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralC (dest, src, mask, width, COMBINE_B_OUT);
+    combine_disjoint_general_c (dest, src, mask, width, COMBINE_B_OUT);
 }
 
 static void
-combineDisjointAtopC (pixman_implementation_t *imp, pixman_op_t op,
+combine_disjoint_atop_c (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralC (dest, src, mask, width, COMBINE_A_ATOP);
+    combine_disjoint_general_c (dest, src, mask, width, COMBINE_A_ATOP);
 }
 
 static void
-combineDisjointAtopReverseC (pixman_implementation_t *imp, pixman_op_t op,
+combine_disjoint_atop_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 			       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralC (dest, src, mask, width, COMBINE_B_ATOP);
+    combine_disjoint_general_c (dest, src, mask, width, COMBINE_B_ATOP);
 }
 
 static void
-combineDisjointXorC (pixman_implementation_t *imp, pixman_op_t op,
+combine_disjoint_xor_c (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralC (dest, src, mask, width, COMBINE_XOR);
+    combine_disjoint_general_c (dest, src, mask, width, COMBINE_XOR);
 }
 
 static void
-combineConjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width, comp1_t combine)
+combine_conjoint_general_c (comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width, comp1_t combine)
 {
     int i;
 
@@ -1768,7 +1768,7 @@ combineConjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mask,
         d = *(dest + i);
         da = d >> A_SHIFT;
 
-	combineMaskC (&s, &m);
+	combine_mask_c (&s, &m);
 
         sa = m;
 
@@ -1777,17 +1777,17 @@ combineConjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mask,
             Fa = 0;
             break;
         case COMBINE_A_OUT:
-            m = (comp4_t)combineConjointOutPart ((comp1_t) (sa >> 0), da);
-            n = (comp4_t)combineConjointOutPart ((comp1_t) (sa >> G_SHIFT), da) << G_SHIFT;
-            o = (comp4_t)combineConjointOutPart ((comp1_t) (sa >> R_SHIFT), da) << R_SHIFT;
-            p = (comp4_t)combineConjointOutPart ((comp1_t) (sa >> A_SHIFT), da) << A_SHIFT;
+            m = (comp4_t)combine_conjoint_out_part ((comp1_t) (sa >> 0), da);
+            n = (comp4_t)combine_conjoint_out_part ((comp1_t) (sa >> G_SHIFT), da) << G_SHIFT;
+            o = (comp4_t)combine_conjoint_out_part ((comp1_t) (sa >> R_SHIFT), da) << R_SHIFT;
+            p = (comp4_t)combine_conjoint_out_part ((comp1_t) (sa >> A_SHIFT), da) << A_SHIFT;
             Fa = m|n|o|p;
             break;
         case COMBINE_A_IN:
-            m = (comp4_t)combineConjointInPart ((comp1_t) (sa >> 0), da);
-            n = (comp4_t)combineConjointInPart ((comp1_t) (sa >> G_SHIFT), da) << G_SHIFT;
-            o = (comp4_t)combineConjointInPart ((comp1_t) (sa >> R_SHIFT), da) << R_SHIFT;
-            p = (comp4_t)combineConjointInPart ((comp1_t) (sa >> A_SHIFT), da) << A_SHIFT;
+            m = (comp4_t)combine_conjoint_in_part ((comp1_t) (sa >> 0), da);
+            n = (comp4_t)combine_conjoint_in_part ((comp1_t) (sa >> G_SHIFT), da) << G_SHIFT;
+            o = (comp4_t)combine_conjoint_in_part ((comp1_t) (sa >> R_SHIFT), da) << R_SHIFT;
+            p = (comp4_t)combine_conjoint_in_part ((comp1_t) (sa >> A_SHIFT), da) << A_SHIFT;
             Fa = m|n|o|p;
             break;
         case COMBINE_A:
@@ -1800,17 +1800,17 @@ combineConjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mask,
             Fb = 0;
             break;
         case COMBINE_B_OUT:
-            m = (comp4_t)combineConjointOutPart (da, (comp1_t) (sa >> 0));
-            n = (comp4_t)combineConjointOutPart (da, (comp1_t) (sa >> G_SHIFT)) << G_SHIFT;
-            o = (comp4_t)combineConjointOutPart (da, (comp1_t) (sa >> R_SHIFT)) << R_SHIFT;
-            p = (comp4_t)combineConjointOutPart (da, (comp1_t) (sa >> A_SHIFT)) << A_SHIFT;
+            m = (comp4_t)combine_conjoint_out_part (da, (comp1_t) (sa >> 0));
+            n = (comp4_t)combine_conjoint_out_part (da, (comp1_t) (sa >> G_SHIFT)) << G_SHIFT;
+            o = (comp4_t)combine_conjoint_out_part (da, (comp1_t) (sa >> R_SHIFT)) << R_SHIFT;
+            p = (comp4_t)combine_conjoint_out_part (da, (comp1_t) (sa >> A_SHIFT)) << A_SHIFT;
             Fb = m|n|o|p;
             break;
         case COMBINE_B_IN:
-            m = (comp4_t)combineConjointInPart (da, (comp1_t) (sa >> 0));
-            n = (comp4_t)combineConjointInPart (da, (comp1_t) (sa >> G_SHIFT)) << G_SHIFT;
-            o = (comp4_t)combineConjointInPart (da, (comp1_t) (sa >> R_SHIFT)) << R_SHIFT;
-            p = (comp4_t)combineConjointInPart (da, (comp1_t) (sa >> A_SHIFT)) << A_SHIFT;
+            m = (comp4_t)combine_conjoint_in_part (da, (comp1_t) (sa >> 0));
+            n = (comp4_t)combine_conjoint_in_part (da, (comp1_t) (sa >> G_SHIFT)) << G_SHIFT;
+            o = (comp4_t)combine_conjoint_in_part (da, (comp1_t) (sa >> R_SHIFT)) << R_SHIFT;
+            p = (comp4_t)combine_conjoint_in_part (da, (comp1_t) (sa >> A_SHIFT)) << A_SHIFT;
             Fb = m|n|o|p;
             break;
         case COMBINE_B:
@@ -1827,186 +1827,186 @@ combineConjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mask,
 }
 
 static void
-combineConjointOverC (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_over_c (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralC (dest, src, mask, width, COMBINE_A_OVER);
+    combine_conjoint_general_c (dest, src, mask, width, COMBINE_A_OVER);
 }
 
 static void
-combineConjointOverReverseC (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_over_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 			       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralC (dest, src, mask, width, COMBINE_B_OVER);
+    combine_conjoint_general_c (dest, src, mask, width, COMBINE_B_OVER);
 }
 
 static void
-combineConjointInC (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_in_c (pixman_implementation_t *imp, pixman_op_t op,
 		      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralC (dest, src, mask, width, COMBINE_A_IN);
+    combine_conjoint_general_c (dest, src, mask, width, COMBINE_A_IN);
 }
 
 static void
-combineConjointInReverseC (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_in_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 			     comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralC (dest, src, mask, width, COMBINE_B_IN);
+    combine_conjoint_general_c (dest, src, mask, width, COMBINE_B_IN);
 }
 
 static void
-combineConjointOutC (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_out_c (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralC (dest, src, mask, width, COMBINE_A_OUT);
+    combine_conjoint_general_c (dest, src, mask, width, COMBINE_A_OUT);
 }
 
 static void
-combineConjointOutReverseC (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_out_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 			      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralC (dest, src, mask, width, COMBINE_B_OUT);
+    combine_conjoint_general_c (dest, src, mask, width, COMBINE_B_OUT);
 }
 
 static void
-combineConjointAtopC (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_atop_c (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralC (dest, src, mask, width, COMBINE_A_ATOP);
+    combine_conjoint_general_c (dest, src, mask, width, COMBINE_A_ATOP);
 }
 
 static void
-combineConjointAtopReverseC (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_atop_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 			       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralC (dest, src, mask, width, COMBINE_B_ATOP);
+    combine_conjoint_general_c (dest, src, mask, width, COMBINE_B_ATOP);
 }
 
 static void
-combineConjointXorC (pixman_implementation_t *imp, pixman_op_t op,
+combine_conjoint_xor_c (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralC (dest, src, mask, width, COMBINE_XOR);
+    combine_conjoint_general_c (dest, src, mask, width, COMBINE_XOR);
 }
 
 void
 _pixman_setup_combiner_functions_width (pixman_implementation_t *imp)
 {
     /* Unified alpha */
-    imp->combine_width[PIXMAN_OP_CLEAR] = combineClear;
-    imp->combine_width[PIXMAN_OP_SRC] = combineSrcU;
+    imp->combine_width[PIXMAN_OP_CLEAR] = combine_clear;
+    imp->combine_width[PIXMAN_OP_SRC] = combine_src_u;
     /* dest */
-    imp->combine_width[PIXMAN_OP_OVER] = combineOverU;
-    imp->combine_width[PIXMAN_OP_OVER_REVERSE] = combineOverReverseU;
-    imp->combine_width[PIXMAN_OP_IN] = combineInU;
-    imp->combine_width[PIXMAN_OP_IN_REVERSE] = combineInReverseU;
-    imp->combine_width[PIXMAN_OP_OUT] = combineOutU;
-    imp->combine_width[PIXMAN_OP_OUT_REVERSE] = combineOutReverseU;
-    imp->combine_width[PIXMAN_OP_ATOP] = combineAtopU;
-    imp->combine_width[PIXMAN_OP_ATOP_REVERSE] = combineAtopReverseU;
-    imp->combine_width[PIXMAN_OP_XOR] = combineXorU;
-    imp->combine_width[PIXMAN_OP_ADD] = combineAddU;
-    imp->combine_width[PIXMAN_OP_SATURATE] = combineSaturateU;
+    imp->combine_width[PIXMAN_OP_OVER] = combine_over_u;
+    imp->combine_width[PIXMAN_OP_OVER_REVERSE] = combine_over_reverse_u;
+    imp->combine_width[PIXMAN_OP_IN] = combine_in_u;
+    imp->combine_width[PIXMAN_OP_IN_REVERSE] = combine_in_reverse_u;
+    imp->combine_width[PIXMAN_OP_OUT] = combine_out_u;
+    imp->combine_width[PIXMAN_OP_OUT_REVERSE] = combine_out_reverse_u;
+    imp->combine_width[PIXMAN_OP_ATOP] = combine_atop_u;
+    imp->combine_width[PIXMAN_OP_ATOP_REVERSE] = combine_atop_reverse_u;
+    imp->combine_width[PIXMAN_OP_XOR] = combine_xor_u;
+    imp->combine_width[PIXMAN_OP_ADD] = combine_add_u;
+    imp->combine_width[PIXMAN_OP_SATURATE] = combine_saturate_u;
 
     /* Disjoint, unified */
-    imp->combine_width[PIXMAN_OP_DISJOINT_CLEAR] = combineClear;
-    imp->combine_width[PIXMAN_OP_DISJOINT_SRC] = combineSrcU;
+    imp->combine_width[PIXMAN_OP_DISJOINT_CLEAR] = combine_clear;
+    imp->combine_width[PIXMAN_OP_DISJOINT_SRC] = combine_src_u;
     /* dest */
-    imp->combine_width[PIXMAN_OP_DISJOINT_OVER] = combineDisjointOverU;
-    imp->combine_width[PIXMAN_OP_DISJOINT_OVER_REVERSE] = combineSaturateU;
-    imp->combine_width[PIXMAN_OP_DISJOINT_IN] = combineDisjointInU;
-    imp->combine_width[PIXMAN_OP_DISJOINT_IN_REVERSE] = combineDisjointInReverseU;
-    imp->combine_width[PIXMAN_OP_DISJOINT_OUT] = combineDisjointOutU;
-    imp->combine_width[PIXMAN_OP_DISJOINT_OUT_REVERSE] = combineDisjointOutReverseU;
-    imp->combine_width[PIXMAN_OP_DISJOINT_ATOP] = combineDisjointAtopU;
-    imp->combine_width[PIXMAN_OP_DISJOINT_ATOP_REVERSE] = combineDisjointAtopReverseU;
-    imp->combine_width[PIXMAN_OP_DISJOINT_XOR] = combineDisjointXorU;
+    imp->combine_width[PIXMAN_OP_DISJOINT_OVER] = combine_disjoint_over_u;
+    imp->combine_width[PIXMAN_OP_DISJOINT_OVER_REVERSE] = combine_saturate_u;
+    imp->combine_width[PIXMAN_OP_DISJOINT_IN] = combine_disjoint_in_u;
+    imp->combine_width[PIXMAN_OP_DISJOINT_IN_REVERSE] = combine_disjoint_in_reverse_u;
+    imp->combine_width[PIXMAN_OP_DISJOINT_OUT] = combine_disjoint_out_u;
+    imp->combine_width[PIXMAN_OP_DISJOINT_OUT_REVERSE] = combine_disjoint_out_reverse_u;
+    imp->combine_width[PIXMAN_OP_DISJOINT_ATOP] = combine_disjoint_atop_u;
+    imp->combine_width[PIXMAN_OP_DISJOINT_ATOP_REVERSE] = combine_disjoint_atop_reverse_u;
+    imp->combine_width[PIXMAN_OP_DISJOINT_XOR] = combine_disjoint_xor_u;
 
     /* Conjoint, unified */
-    imp->combine_width[PIXMAN_OP_CONJOINT_CLEAR] = combineClear;
-    imp->combine_width[PIXMAN_OP_CONJOINT_SRC] = combineSrcU;
+    imp->combine_width[PIXMAN_OP_CONJOINT_CLEAR] = combine_clear;
+    imp->combine_width[PIXMAN_OP_CONJOINT_SRC] = combine_src_u;
     /* dest */
-    imp->combine_width[PIXMAN_OP_CONJOINT_OVER] = combineConjointOverU;
-    imp->combine_width[PIXMAN_OP_CONJOINT_OVER_REVERSE] = combineConjointOverReverseU;
-    imp->combine_width[PIXMAN_OP_CONJOINT_IN] = combineConjointInU;
-    imp->combine_width[PIXMAN_OP_CONJOINT_IN_REVERSE] = combineConjointInReverseU;
-    imp->combine_width[PIXMAN_OP_CONJOINT_OUT] = combineConjointOutU;
-    imp->combine_width[PIXMAN_OP_CONJOINT_OUT_REVERSE] = combineConjointOutReverseU;
-    imp->combine_width[PIXMAN_OP_CONJOINT_ATOP] = combineConjointAtopU;
-    imp->combine_width[PIXMAN_OP_CONJOINT_ATOP_REVERSE] = combineConjointAtopReverseU;
-    imp->combine_width[PIXMAN_OP_CONJOINT_XOR] = combineConjointXorU;
-
-    imp->combine_width[PIXMAN_OP_MULTIPLY] = combineMultiplyU;
-    imp->combine_width[PIXMAN_OP_SCREEN] = combineScreenU;
-    imp->combine_width[PIXMAN_OP_OVERLAY] = combineOverlayU;
-    imp->combine_width[PIXMAN_OP_DARKEN] = combineDarkenU;
-    imp->combine_width[PIXMAN_OP_LIGHTEN] = combineLightenU;
-    imp->combine_width[PIXMAN_OP_COLOR_DODGE] = combineColorDodgeU;
-    imp->combine_width[PIXMAN_OP_COLOR_BURN] = combineColorBurnU;
-    imp->combine_width[PIXMAN_OP_HARD_LIGHT] = combineHardLightU;
-    imp->combine_width[PIXMAN_OP_SOFT_LIGHT] = combineSoftLightU;
-    imp->combine_width[PIXMAN_OP_DIFFERENCE] = combineDifferenceU;
-    imp->combine_width[PIXMAN_OP_EXCLUSION] = combineExclusionU;
-    imp->combine_width[PIXMAN_OP_HSL_HUE] = combineHslHueU;
-    imp->combine_width[PIXMAN_OP_HSL_SATURATION] = combineHslSaturationU;
-    imp->combine_width[PIXMAN_OP_HSL_COLOR] = combineHslColorU;
-    imp->combine_width[PIXMAN_OP_HSL_LUMINOSITY] = combineHslLuminosityU;
+    imp->combine_width[PIXMAN_OP_CONJOINT_OVER] = combine_conjoint_over_u;
+    imp->combine_width[PIXMAN_OP_CONJOINT_OVER_REVERSE] = combine_conjoint_over_reverse_u;
+    imp->combine_width[PIXMAN_OP_CONJOINT_IN] = combine_conjoint_in_u;
+    imp->combine_width[PIXMAN_OP_CONJOINT_IN_REVERSE] = combine_conjoint_in_reverse_u;
+    imp->combine_width[PIXMAN_OP_CONJOINT_OUT] = combine_conjoint_out_u;
+    imp->combine_width[PIXMAN_OP_CONJOINT_OUT_REVERSE] = combine_conjoint_out_reverse_u;
+    imp->combine_width[PIXMAN_OP_CONJOINT_ATOP] = combine_conjoint_atop_u;
+    imp->combine_width[PIXMAN_OP_CONJOINT_ATOP_REVERSE] = combine_conjoint_atop_reverse_u;
+    imp->combine_width[PIXMAN_OP_CONJOINT_XOR] = combine_conjoint_xor_u;
+
+    imp->combine_width[PIXMAN_OP_MULTIPLY] = combine_multiply_u;
+    imp->combine_width[PIXMAN_OP_SCREEN] = combine_screen_u;
+    imp->combine_width[PIXMAN_OP_OVERLAY] = combine_overlay_u;
+    imp->combine_width[PIXMAN_OP_DARKEN] = combine_darken_u;
+    imp->combine_width[PIXMAN_OP_LIGHTEN] = combine_lighten_u;
+    imp->combine_width[PIXMAN_OP_COLOR_DODGE] = combine_color_dodge_u;
+    imp->combine_width[PIXMAN_OP_COLOR_BURN] = combine_color_burn_u;
+    imp->combine_width[PIXMAN_OP_HARD_LIGHT] = combine_hard_light_u;
+    imp->combine_width[PIXMAN_OP_SOFT_LIGHT] = combine_soft_light_u;
+    imp->combine_width[PIXMAN_OP_DIFFERENCE] = combine_difference_u;
+    imp->combine_width[PIXMAN_OP_EXCLUSION] = combine_exclusion_u;
+    imp->combine_width[PIXMAN_OP_HSL_HUE] = combine_hsl_hue_u;
+    imp->combine_width[PIXMAN_OP_HSL_SATURATION] = combine_hsl_saturation_u;
+    imp->combine_width[PIXMAN_OP_HSL_COLOR] = combine_hsl_color_u;
+    imp->combine_width[PIXMAN_OP_HSL_LUMINOSITY] = combine_hsl_luminosity_u;
 
     /* Component alpha combiners */
-    imp->combine_width_ca[PIXMAN_OP_CLEAR] = combineClearC;
-    imp->combine_width_ca[PIXMAN_OP_SRC] = combineSrcC;
+    imp->combine_width_ca[PIXMAN_OP_CLEAR] = combine_clear_c;
+    imp->combine_width_ca[PIXMAN_OP_SRC] = combine_src_c;
     /* dest */
-    imp->combine_width_ca[PIXMAN_OP_OVER] = combineOverC;
-    imp->combine_width_ca[PIXMAN_OP_OVER_REVERSE] = combineOverReverseC;
-    imp->combine_width_ca[PIXMAN_OP_IN] = combineInC;
-    imp->combine_width_ca[PIXMAN_OP_IN_REVERSE] = combineInReverseC;
-    imp->combine_width_ca[PIXMAN_OP_OUT] = combineOutC;
-    imp->combine_width_ca[PIXMAN_OP_OUT_REVERSE] = combineOutReverseC;
-    imp->combine_width_ca[PIXMAN_OP_ATOP] = combineAtopC;
-    imp->combine_width_ca[PIXMAN_OP_ATOP_REVERSE] = combineAtopReverseC;
-    imp->combine_width_ca[PIXMAN_OP_XOR] = combineXorC;
-    imp->combine_width_ca[PIXMAN_OP_ADD] = combineAddC;
-    imp->combine_width_ca[PIXMAN_OP_SATURATE] = combineSaturateC;
+    imp->combine_width_ca[PIXMAN_OP_OVER] = combine_over_c;
+    imp->combine_width_ca[PIXMAN_OP_OVER_REVERSE] = combine_over_reverse_c;
+    imp->combine_width_ca[PIXMAN_OP_IN] = combine_in_c;
+    imp->combine_width_ca[PIXMAN_OP_IN_REVERSE] = combine_in_reverse_c;
+    imp->combine_width_ca[PIXMAN_OP_OUT] = combine_out_c;
+    imp->combine_width_ca[PIXMAN_OP_OUT_REVERSE] = combine_out_reverse_c;
+    imp->combine_width_ca[PIXMAN_OP_ATOP] = combine_atop_c;
+    imp->combine_width_ca[PIXMAN_OP_ATOP_REVERSE] = combine_atop_reverse_c;
+    imp->combine_width_ca[PIXMAN_OP_XOR] = combine_xor_c;
+    imp->combine_width_ca[PIXMAN_OP_ADD] = combine_add_c;
+    imp->combine_width_ca[PIXMAN_OP_SATURATE] = combine_saturate_c;
 
     /* Disjoint CA */
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_CLEAR] = combineClearC;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_SRC] = combineSrcC;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_CLEAR] = combine_clear_c;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_SRC] = combine_src_c;
     /* dest */
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_OVER] = combineDisjointOverC;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_OVER_REVERSE] = combineSaturateC;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_IN] = combineDisjointInC;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_IN_REVERSE] = combineDisjointInReverseC;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_OUT] = combineDisjointOutC;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_OUT_REVERSE] = combineDisjointOutReverseC;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_ATOP] = combineDisjointAtopC;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_ATOP_REVERSE] = combineDisjointAtopReverseC;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_XOR] = combineDisjointXorC;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_OVER] = combine_disjoint_over_c;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_OVER_REVERSE] = combine_saturate_c;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_IN] = combine_disjoint_in_c;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_IN_REVERSE] = combine_disjoint_in_reverse_c;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_OUT] = combine_disjoint_out_c;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_OUT_REVERSE] = combine_disjoint_out_reverse_c;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_ATOP] = combine_disjoint_atop_c;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_ATOP_REVERSE] = combine_disjoint_atop_reverse_c;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_XOR] = combine_disjoint_xor_c;
 
     /* Conjoint CA */
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_CLEAR] = combineClearC;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_SRC] = combineSrcC;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_CLEAR] = combine_clear_c;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_SRC] = combine_src_c;
     /* dest */
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_OVER] = combineConjointOverC;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_OVER_REVERSE] = combineConjointOverReverseC;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_IN] = combineConjointInC;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_IN_REVERSE] = combineConjointInReverseC;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_OUT] = combineConjointOutC;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_OUT_REVERSE] = combineConjointOutReverseC;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_ATOP] = combineConjointAtopC;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_ATOP_REVERSE] = combineConjointAtopReverseC;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_XOR] = combineConjointXorC;
-
-    imp->combine_width_ca[PIXMAN_OP_MULTIPLY] = combineMultiplyC;
-    imp->combine_width_ca[PIXMAN_OP_SCREEN] = combineScreenC;
-    imp->combine_width_ca[PIXMAN_OP_OVERLAY] = combineOverlayC;
-    imp->combine_width_ca[PIXMAN_OP_DARKEN] = combineDarkenC;
-    imp->combine_width_ca[PIXMAN_OP_LIGHTEN] = combineLightenC;
-    imp->combine_width_ca[PIXMAN_OP_COLOR_DODGE] = combineColorDodgeC;
-    imp->combine_width_ca[PIXMAN_OP_COLOR_BURN] = combineColorBurnC;
-    imp->combine_width_ca[PIXMAN_OP_HARD_LIGHT] = combineHardLightC;
-    imp->combine_width_ca[PIXMAN_OP_SOFT_LIGHT] = combineSoftLightC;
-    imp->combine_width_ca[PIXMAN_OP_DIFFERENCE] = combineDifferenceC;
-    imp->combine_width_ca[PIXMAN_OP_EXCLUSION] = combineExclusionC;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_OVER] = combine_conjoint_over_c;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_OVER_REVERSE] = combine_conjoint_over_reverse_c;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_IN] = combine_conjoint_in_c;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_IN_REVERSE] = combine_conjoint_in_reverse_c;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_OUT] = combine_conjoint_out_c;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_OUT_REVERSE] = combine_conjoint_out_reverse_c;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_ATOP] = combine_conjoint_atop_c;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_ATOP_REVERSE] = combine_conjoint_atop_reverse_c;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_XOR] = combine_conjoint_xor_c;
+
+    imp->combine_width_ca[PIXMAN_OP_MULTIPLY] = combine_multiply_c;
+    imp->combine_width_ca[PIXMAN_OP_SCREEN] = combine_screen_c;
+    imp->combine_width_ca[PIXMAN_OP_OVERLAY] = combine_overlay_c;
+    imp->combine_width_ca[PIXMAN_OP_DARKEN] = combine_darken_c;
+    imp->combine_width_ca[PIXMAN_OP_LIGHTEN] = combine_lighten_c;
+    imp->combine_width_ca[PIXMAN_OP_COLOR_DODGE] = combine_color_dodge_c;
+    imp->combine_width_ca[PIXMAN_OP_COLOR_BURN] = combine_color_burn_c;
+    imp->combine_width_ca[PIXMAN_OP_HARD_LIGHT] = combine_hard_light_c;
+    imp->combine_width_ca[PIXMAN_OP_SOFT_LIGHT] = combine_soft_light_c;
+    imp->combine_width_ca[PIXMAN_OP_DIFFERENCE] = combine_difference_c;
+    imp->combine_width_ca[PIXMAN_OP_EXCLUSION] = combine_exclusion_c;
     /* It is not clear that these make sense, so leave them out for now */
     imp->combine_width_ca[PIXMAN_OP_HSL_HUE] = NULL;
     imp->combine_width_ca[PIXMAN_OP_HSL_SATURATION] = NULL;
diff --git a/pixman/pixman-conical-gradient.c b/pixman/pixman-conical-gradient.c
index cff58d4..8e7d1c5 100644
--- a/pixman/pixman-conical-gradient.c
+++ b/pixman/pixman-conical-gradient.c
@@ -31,7 +31,7 @@
 static void
 conical_gradient_get_scanline_32 (pixman_image_t *image, int x, int y,
 				  int width, uint32_t *buffer,
-				  const uint32_t *mask, uint32_t maskBits)
+				  const uint32_t *mask, uint32_t mask_bits)
 {
     source_image_t *source = (source_image_t *)image;
     gradient_t *gradient = (gradient_t *)source;
@@ -74,7 +74,7 @@ conical_gradient_get_scanline_32 (pixman_image_t *image, int x, int y,
 	while (buffer < end) {
 	    double angle;
 	    
-	    if (!mask || *mask++ & maskBits)
+	    if (!mask || *mask++ & mask_bits)
 	    {
 		pixman_fixed_48_16_t   t;
 		
@@ -93,7 +93,7 @@ conical_gradient_get_scanline_32 (pixman_image_t *image, int x, int y,
 	    double x, y;
 	    double angle;
 	    
-	    if (!mask || *mask++ & maskBits)
+	    if (!mask || *mask++ & mask_bits)
 	    {
 		pixman_fixed_48_16_t  t;
 		
diff --git a/pixman/pixman-edge-imp.h b/pixman/pixman-edge-imp.h
index 36fc5e3..a30f821 100644
--- a/pixman/pixman-edge-imp.h
+++ b/pixman/pixman-edge-imp.h
@@ -20,7 +20,7 @@
  * PERFORMANCE OF THIS SOFTWARE.
  */
 
-#ifndef rasterizeSpan
+#ifndef rasterize_span
 #endif
 
 static void
@@ -50,7 +50,7 @@ RASTERIZE_EDGES (pixman_image_t  *image,
 #if N_BITS == 1
 	/* For the non-antialiased case, round the coordinates up, in effect
 	 * sampling the center of the pixel. (The AA case does a similar 
-	 * adjustment in RenderSamplesX) */
+	 * adjustment in RENDER_SAMPLES_X) */
 	lx += X_FRAC_FIRST(1);
 	rx += X_FRAC_FIRST(1);
 #endif
@@ -94,7 +94,7 @@ RASTERIZE_EDGES (pixman_image_t  *image,
 		(((32 - (x)) & 0x1f) ?					\
 		 SCREEN_SHIFT_LEFT (0xffffffff, (32 - (x)) & 0x1f) : 0)
 		
-#define FbMaskBits(x,w,l,n,r) {						\
+#define MASK_BITS(x,w,l,n,r) {						\
 		    n = (w);						\
 		    r = RIGHT_MASK ((x) + n);				\
 		    l = LEFT_MASK (x);					\
@@ -119,7 +119,7 @@ RASTERIZE_EDGES (pixman_image_t  *image,
 		a += x >> 5;
 		x &= 0x1f;
 		
-		FbMaskBits (x, width, startmask, nmiddle, endmask);
+		MASK_BITS (x, width, startmask, nmiddle, endmask);
 
 		if (startmask) {
 		    WRITE(image, a, READ(image, a) | startmask);
@@ -137,8 +137,8 @@ RASTERIZE_EDGES (pixman_image_t  *image,
 		int     rxs;
 
 		/* Sample coverage for edge pixels */
-		lxs = RenderSamplesX (lx, N_BITS);
-		rxs = RenderSamplesX (rx, N_BITS);
+		lxs = RENDER_SAMPLES_X (lx, N_BITS);
+		rxs = RENDER_SAMPLES_X (rx, N_BITS);
 
 		/* Add coverage across row */
 		if (lxi == rxi)
@@ -183,4 +183,4 @@ RASTERIZE_EDGES (pixman_image_t  *image,
     }
 }
 
-#undef rasterizeSpan
+#undef rasterize_span
diff --git a/pixman/pixman-edge.c b/pixman/pixman-edge.c
index f6a580e..82d6385 100644
--- a/pixman/pixman-edge.c
+++ b/pixman/pixman-edge.c
@@ -190,8 +190,8 @@ rasterize_edges_8 (pixman_image_t       *image,
 	    rxi = pixman_fixed_to_int (rx);
 
             /* Sample coverage for edge pixels */
-            lxs = RenderSamplesX (lx, 8);
-            rxs = RenderSamplesX (rx, 8);
+            lxs = RENDER_SAMPLES_X (lx, 8);
+            rxs = RENDER_SAMPLES_X (rx, 8);
 
             /* Add coverage across row */
 	    if (lxi == rxi)
diff --git a/pixman/pixman-fast-path.c b/pixman/pixman-fast-path.c
index db8a51c..a255080 100644
--- a/pixman/pixman-fast-path.c
+++ b/pixman/pixman-fast-path.c
@@ -75,7 +75,7 @@ store_24 (uint8_t *a, uint32_t v)
 }
 
 static force_inline uint32_t
-fbOver (uint32_t src, uint32_t dest)
+over (uint32_t src, uint32_t dest)
 {
     uint32_t a = ~src >> 24; 
 
@@ -85,7 +85,7 @@ fbOver (uint32_t src, uint32_t dest)
 }
 
 static uint32_t
-fbIn (uint32_t x, uint8_t y)
+in (uint32_t x, uint8_t y)
 {
     uint16_t  a = y;
 
@@ -97,10 +97,10 @@ fbIn (uint32_t x, uint8_t y)
 /*
  * Naming convention:
  *
- *  opSRCxMASKxDST
+ *  op_src_mask_dest
  */
 static void
-fast_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
+fast_composite_over_x888_8_8888 (pixman_implementation_t *imp,
 			     pixman_op_t      op,
 			     pixman_image_t * src_image,
 			     pixman_image_t * mask_image,
@@ -114,26 +114,26 @@ fast_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
 			     int32_t     width,
 			     int32_t     height)
 {
-    uint32_t	*src, *srcLine;
-    uint32_t    *dst, *dstLine;
-    uint8_t	*mask, *maskLine;
-    int		 srcStride, maskStride, dstStride;
+    uint32_t	*src, *src_line;
+    uint32_t    *dst, *dst_line;
+    uint8_t	*mask, *mask_line;
+    int		 src_stride, mask_stride, dst_stride;
     uint8_t m;
     uint32_t s, d;
     uint16_t w;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
     while (height--)
     {
-	src = srcLine;
-	srcLine += srcStride;
-	dst = dstLine;
-	dstLine += dstStride;
-	mask = maskLine;
-	maskLine += maskStride;
+	src = src_line;
+	src_line += src_stride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	mask = mask_line;
+	mask_line += mask_stride;
 
 	w = width;
 	while (w--)
@@ -147,8 +147,8 @@ fast_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
 		    *dst = s;
 		else
 		{
-		    d = fbIn (s, m);
-		    *dst = fbOver (d, *dst);
+		    d = in (s, m);
+		    *dst = over (d, *dst);
 		}
 	    }
 	    src++;
@@ -158,11 +158,11 @@ fast_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
 }
 
 static void
-fast_CompositeIn_n_8_8 (pixman_implementation_t *imp,
+fast_composite_in_n_8_8 (pixman_implementation_t *imp,
 			      pixman_op_t      op,
-			      pixman_image_t    *iSrc,
-			      pixman_image_t    *iMask,
-			      pixman_image_t    *iDst,
+			      pixman_image_t    *src_image,
+			      pixman_image_t    *mask_image,
+			      pixman_image_t    *dest_image,
 			      int32_t      src_x,
 			      int32_t      src_y,
 			      int32_t      mask_x,
@@ -173,26 +173,26 @@ fast_CompositeIn_n_8_8 (pixman_implementation_t *imp,
 			      int32_t     height)
 {
     uint32_t	src, srca;
-    uint8_t	*dstLine, *dst;
-    uint8_t	*maskLine, *mask, m;
-    int	dstStride, maskStride;
+    uint8_t	*dst_line, *dst;
+    uint8_t	*mask_line, *mask, m;
+    int	dst_stride, mask_stride;
     uint16_t	w;
     uint16_t    t;
 
-    src = _pixman_image_get_solid(iSrc, iDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dest_image->bits.format);
 
     srca = src >> 24;
 
-    PIXMAN_IMAGE_GET_LINE (iDst, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (iMask, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
     if (srca == 0xff) {
 	while (height--)
 	{
-	    dst = dstLine;
-	    dstLine += dstStride;
-	    mask = maskLine;
-	    maskLine += maskStride;
+	    dst = dst_line;
+	    dst_line += dst_stride;
+	    mask = mask_line;
+	    mask_line += mask_stride;
 	    w = width;
 
 	    while (w--)
@@ -214,10 +214,10 @@ fast_CompositeIn_n_8_8 (pixman_implementation_t *imp,
     {
 	while (height--)
 	{
-	    dst = dstLine;
-	    dstLine += dstStride;
-	    mask = maskLine;
-	    maskLine += maskStride;
+	    dst = dst_line;
+	    dst_line += dst_stride;
+	    mask = mask_line;
+	    mask_line += mask_stride;
 	    w = width;
 
 	    while (w--)
@@ -240,11 +240,11 @@ fast_CompositeIn_n_8_8 (pixman_implementation_t *imp,
 
 
 static void
-fast_CompositeIn_8_8 (pixman_implementation_t *imp,
+fast_composite_in_8_8 (pixman_implementation_t *imp,
 		      pixman_op_t      op,
-		      pixman_image_t  *iSrc,
-		      pixman_image_t  *iMask,
-		      pixman_image_t  *iDst,
+		      pixman_image_t  *src_image,
+		      pixman_image_t  *mask_image,
+		      pixman_image_t  *dest_image,
 		      int32_t          src_x,
 		      int32_t          src_y,
 		      int32_t          mask_x,
@@ -254,22 +254,22 @@ fast_CompositeIn_8_8 (pixman_implementation_t *imp,
 		      int32_t         width,
 		      int32_t         height)
 {
-    uint8_t	*dstLine, *dst;
-    uint8_t	*srcLine, *src;
-    int	dstStride, srcStride;
+    uint8_t	*dst_line, *dst;
+    uint8_t	*src_line, *src;
+    int	dst_stride, src_stride;
     uint16_t	w;
     uint8_t	s;
     uint16_t	t;
 
-    PIXMAN_IMAGE_GET_LINE (iSrc, src_x, src_y, uint8_t, srcStride, srcLine, 1);
-    PIXMAN_IMAGE_GET_LINE (iDst, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
+    PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	src = srcLine;
-	srcLine += srcStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	src = src_line;
+	src_line += src_stride;
 	w = width;
 
 	while (w--)
@@ -289,7 +289,7 @@ fast_CompositeIn_8_8 (pixman_implementation_t *imp,
 }
 
 static void
-fast_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
+fast_composite_over_n_8_8888 (pixman_implementation_t *imp,
 			       pixman_op_t      op,
 			       pixman_image_t * src_image,
 			       pixman_image_t * mask_image,
@@ -304,9 +304,9 @@ fast_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
 			       int32_t     height)
 {
     uint32_t	 src, srca;
-    uint32_t	*dstLine, *dst, d;
-    uint8_t	*maskLine, *mask, m;
-    int		 dstStride, maskStride;
+    uint32_t	*dst_line, *dst, d;
+    uint8_t	*mask_line, *mask, m;
+    int		 dst_stride, mask_stride;
     uint16_t	 w;
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
@@ -315,15 +315,15 @@ fast_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
     if (src == 0)
 	return;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	mask = maskLine;
-	maskLine += maskStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	mask = mask_line;
+	mask_line += mask_stride;
 	w = width;
 
 	while (w--)
@@ -334,12 +334,12 @@ fast_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
 		if (srca == 0xff)
 		    *dst = src;
 		else
-		    *dst = fbOver (src, *dst);
+		    *dst = over (src, *dst);
 	    }
 	    else if (m)
 	    {
-		d = fbIn (src, m);
-		*dst = fbOver (d, *dst);
+		d = in (src, m);
+		*dst = over (d, *dst);
 	    }
 	    dst++;
 	}
@@ -347,7 +347,7 @@ fast_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
 }
 
 static void
-fast_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
+fast_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
 				   pixman_op_t op,
 				   pixman_image_t * src_image,
 				   pixman_image_t * mask_image,
@@ -362,9 +362,9 @@ fast_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
 				   int32_t     height)
 {
     uint32_t	src, srca;
-    uint32_t	*dstLine, *dst, d;
-    uint32_t	*maskLine, *mask, ma;
-    int	dstStride, maskStride;
+    uint32_t	*dst_line, *dst, d;
+    uint32_t	*mask_line, *mask, ma;
+    int	dst_stride, mask_stride;
     uint16_t	w;
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
@@ -373,15 +373,15 @@ fast_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
     if (src == 0)
 	return;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	mask = maskLine;
-	maskLine += maskStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	mask = mask_line;
+	mask_line += mask_stride;
 	w = width;
 
 	while (w--)
@@ -392,7 +392,7 @@ fast_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
 		if (srca == 0xff)
 		    *dst = src;
 		else
-		    *dst = fbOver (src, *dst);
+		    *dst = over (src, *dst);
 	    }
 	    else if (ma)
 	    {
@@ -412,7 +412,7 @@ fast_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
 }
 
 static void
-fast_CompositeOver_n_8_0888 (pixman_implementation_t *imp,
+fast_composite_over_n_8_0888 (pixman_implementation_t *imp,
 			       pixman_op_t op,
 			       pixman_image_t * src_image,
 			       pixman_image_t * mask_image,
@@ -427,10 +427,10 @@ fast_CompositeOver_n_8_0888 (pixman_implementation_t *imp,
 			       int32_t     height)
 {
     uint32_t	src, srca;
-    uint8_t	*dstLine, *dst;
+    uint8_t	*dst_line, *dst;
     uint32_t	d;
-    uint8_t	*maskLine, *mask, m;
-    int	dstStride, maskStride;
+    uint8_t	*mask_line, *mask, m;
+    int	dst_stride, mask_stride;
     uint16_t	w;
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
@@ -439,15 +439,15 @@ fast_CompositeOver_n_8_0888 (pixman_implementation_t *imp,
     if (src == 0)
 	return;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 3);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 3);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	mask = maskLine;
-	maskLine += maskStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	mask = mask_line;
+	mask_line += mask_stride;
 	w = width;
 
 	while (w--)
@@ -460,13 +460,13 @@ fast_CompositeOver_n_8_0888 (pixman_implementation_t *imp,
 		else
 		{
 		    d = fetch_24(dst);
-		    d = fbOver (src, d);
+		    d = over (src, d);
 		}
 		store_24(dst, d);
 	    }
 	    else if (m)
 	    {
-		d = fbOver (fbIn(src,m), fetch_24(dst));
+		d = over (in(src,m), fetch_24(dst));
 		store_24(dst, d);
 	    }
 	    dst += 3;
@@ -475,7 +475,7 @@ fast_CompositeOver_n_8_0888 (pixman_implementation_t *imp,
 }
 
 static void
-fast_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
+fast_composite_over_n_8_0565 (pixman_implementation_t *imp,
 			       pixman_op_t op,
 				  pixman_image_t * src_image,
 				  pixman_image_t * mask_image,
@@ -490,10 +490,10 @@ fast_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 				  int32_t     height)
 {
     uint32_t	src, srca;
-    uint16_t	*dstLine, *dst;
+    uint16_t	*dst_line, *dst;
     uint32_t	d;
-    uint8_t	*maskLine, *mask, m;
-    int	dstStride, maskStride;
+    uint8_t	*mask_line, *mask, m;
+    int	dst_stride, mask_stride;
     uint16_t	w;
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
@@ -502,15 +502,15 @@ fast_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
     if (src == 0)
 	return;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	mask = maskLine;
-	maskLine += maskStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	mask = mask_line;
+	mask_line += mask_stride;
 	w = width;
 
 	while (w--)
@@ -523,14 +523,14 @@ fast_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 		else
 		{
 		    d = *dst;
-		    d = fbOver (src, CONVERT_0565_TO_0888(d));
+		    d = over (src, CONVERT_0565_TO_0888(d));
 		}
 		*dst = CONVERT_8888_TO_0565(d);
 	    }
 	    else if (m)
 	    {
 		d = *dst;
-		d = fbOver (fbIn(src,m), CONVERT_0565_TO_0888(d));
+		d = over (in(src,m), CONVERT_0565_TO_0888(d));
 		*dst = CONVERT_8888_TO_0565(d);
 	    }
 	    dst++;
@@ -539,7 +539,7 @@ fast_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 }
 
 static void
-fast_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
+fast_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
 				   pixman_op_t op,
 				   pixman_image_t * src_image,
 				   pixman_image_t * mask_image,
@@ -555,10 +555,10 @@ fast_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 {
     uint32_t	src, srca;
     uint16_t	src16;
-    uint16_t	*dstLine, *dst;
+    uint16_t	*dst_line, *dst;
     uint32_t	d;
-    uint32_t	*maskLine, *mask, ma;
-    int	dstStride, maskStride;
+    uint32_t	*mask_line, *mask, ma;
+    int	dst_stride, mask_stride;
     uint16_t	w;
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
@@ -569,15 +569,15 @@ fast_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 
     src16 = CONVERT_8888_TO_0565(src);
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	mask = maskLine;
-	maskLine += maskStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	mask = mask_line;
+	mask_line += mask_stride;
 	w = width;
 
 	while (w--)
@@ -592,7 +592,7 @@ fast_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 		else
 		{
 		    d = *dst;
-		    d = fbOver (src, CONVERT_0565_TO_0888(d));
+		    d = over (src, CONVERT_0565_TO_0888(d));
 		    *dst = CONVERT_8888_TO_0565(d);
 		}
 	    }
@@ -628,21 +628,21 @@ fast_composite_over_8888_8888 (pixman_implementation_t *imp,
 			 int32_t     width,
 			 int32_t     height)
 {
-    uint32_t	*dstLine, *dst;
-    uint32_t	*srcLine, *src, s;
-    int	dstStride, srcStride;
+    uint32_t	*dst_line, *dst;
+    uint32_t	*src_line, *src, s;
+    int	dst_stride, src_stride;
     uint8_t	a;
     uint16_t	w;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	src = srcLine;
-	srcLine += srcStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	src = src_line;
+	src_line += src_stride;
 	w = width;
 
 	while (w--)
@@ -652,14 +652,14 @@ fast_composite_over_8888_8888 (pixman_implementation_t *imp,
 	    if (a == 0xff)
 		*dst = s;
 	    else if (s)
-		*dst = fbOver (s, *dst);
+		*dst = over (s, *dst);
 	    dst++;
 	}
     }
 }
 
 static void
-fast_CompositeSrc_8888_0888 (pixman_implementation_t *imp,
+fast_composite_src_8888_0888 (pixman_implementation_t *imp,
 			  pixman_op_t op,
 			 pixman_image_t * src_image,
 			 pixman_image_t * mask_image,
@@ -673,22 +673,22 @@ fast_CompositeSrc_8888_0888 (pixman_implementation_t *imp,
 			 int32_t     width,
 			 int32_t     height)
 {
-    uint8_t	*dstLine, *dst;
+    uint8_t	*dst_line, *dst;
     uint32_t	d;
-    uint32_t	*srcLine, *src, s;
+    uint32_t	*src_line, *src, s;
     uint8_t	a;
-    int	dstStride, srcStride;
+    int	dst_stride, src_stride;
     uint16_t	w;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 3);
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 3);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	src = srcLine;
-	srcLine += srcStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	src = src_line;
+	src_line += src_stride;
 	w = width;
 
 	while (w--)
@@ -700,7 +700,7 @@ fast_CompositeSrc_8888_0888 (pixman_implementation_t *imp,
 		if (a == 0xff)
 		    d = s;
 		else
-		    d = fbOver (s, fetch_24(dst));
+		    d = over (s, fetch_24(dst));
 
 		store_24(dst, d);
 	    }
@@ -724,22 +724,22 @@ fast_composite_over_8888_0565 (pixman_implementation_t *imp,
 			 int32_t     width,
 			 int32_t     height)
 {
-    uint16_t	*dstLine, *dst;
+    uint16_t	*dst_line, *dst;
     uint32_t	d;
-    uint32_t	*srcLine, *src, s;
+    uint32_t	*src_line, *src, s;
     uint8_t	a;
-    int	dstStride, srcStride;
+    int	dst_stride, src_stride;
     uint16_t	w;
 
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	src = srcLine;
-	srcLine += srcStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	src = src_line;
+	src_line += src_stride;
 	w = width;
 
 	while (w--)
@@ -753,7 +753,7 @@ fast_composite_over_8888_0565 (pixman_implementation_t *imp,
 		else
 		{
 		    d = *dst;
-		    d = fbOver (s, CONVERT_0565_TO_0888(d));
+		    d = over (s, CONVERT_0565_TO_0888(d));
 		}
 		*dst = CONVERT_8888_TO_0565(d);
 	    }
@@ -763,7 +763,7 @@ fast_composite_over_8888_0565 (pixman_implementation_t *imp,
 }
 
 static void
-fast_CompositeSrc_x888_0565 (pixman_implementation_t *imp,
+fast_composite_src_x888_0565 (pixman_implementation_t *imp,
 			  pixman_op_t op,
                           pixman_image_t * src_image,
                           pixman_image_t * mask_image,
@@ -777,20 +777,20 @@ fast_CompositeSrc_x888_0565 (pixman_implementation_t *imp,
                           int32_t     width,
                           int32_t     height)
 {
-    uint16_t	*dstLine, *dst;
-    uint32_t	*srcLine, *src, s;
-    int	dstStride, srcStride;
+    uint16_t	*dst_line, *dst;
+    uint32_t	*src_line, *src, s;
+    int	dst_stride, src_stride;
     uint16_t	w;
 
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	src = srcLine;
-	srcLine += srcStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	src = src_line;
+	src_line += src_stride;
 	w = width;
 
 	while (w--)
@@ -803,7 +803,7 @@ fast_CompositeSrc_x888_0565 (pixman_implementation_t *imp,
 }
 
 static void
-fast_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
+fast_composite_add_8000_8000 (pixman_implementation_t *imp,
 			     pixman_op_t	op,
 			     pixman_image_t * src_image,
 			     pixman_image_t * mask_image,
@@ -817,22 +817,22 @@ fast_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
 			     int32_t     width,
 			     int32_t     height)
 {
-    uint8_t	*dstLine, *dst;
-    uint8_t	*srcLine, *src;
-    int	dstStride, srcStride;
+    uint8_t	*dst_line, *dst;
+    uint8_t	*src_line, *src;
+    int	dst_stride, src_stride;
     uint16_t	w;
     uint8_t	s, d;
     uint16_t	t;
 
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	src = srcLine;
-	srcLine += srcStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	src = src_line;
+	src_line += src_stride;
 	w = width;
 
 	while (w--)
@@ -854,7 +854,7 @@ fast_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
 }
 
 static void
-fast_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
+fast_composite_add_8888_8888 (pixman_implementation_t *imp,
 			     pixman_op_t	op,
 			     pixman_image_t * src_image,
 			     pixman_image_t * mask_image,
@@ -868,21 +868,21 @@ fast_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
 			     int32_t     width,
 			     int32_t     height)
 {
-    uint32_t	*dstLine, *dst;
-    uint32_t	*srcLine, *src;
-    int	dstStride, srcStride;
+    uint32_t	*dst_line, *dst;
+    uint32_t	*src_line, *src;
+    int	dst_stride, src_stride;
     uint16_t	w;
     uint32_t	s, d;
 
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	src = srcLine;
-	srcLine += srcStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	src = src_line;
+	src_line += src_stride;
 	w = width;
 
 	while (w--)
@@ -904,7 +904,7 @@ fast_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
 }
 
 static void
-fast_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
+fast_composite_add_8888_8_8 (pixman_implementation_t *imp,
 			    pixman_op_t op,
 			    pixman_image_t * src_image,
 			    pixman_image_t * mask_image,
@@ -918,24 +918,24 @@ fast_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
 			    int32_t     width,
 			    int32_t     height)
 {
-    uint8_t	*dstLine, *dst;
-    uint8_t	*maskLine, *mask;
-    int	dstStride, maskStride;
+    uint8_t	*dst_line, *dst;
+    uint8_t	*mask_line, *mask;
+    int	dst_stride, mask_stride;
     uint16_t	w;
     uint32_t	src;
     uint8_t	sa;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
     sa = (src >> 24);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	mask = maskLine;
-	maskLine += maskStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	mask = mask_line;
+	mask_line += mask_stride;
 	w = width;
 
 	while (w--)
@@ -961,7 +961,7 @@ fast_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
  */
 
 static void
-fast_CompositeSolidFill (pixman_implementation_t *imp,
+fast_composite_solid_fill (pixman_implementation_t *imp,
 		      pixman_op_t op,
 		      pixman_image_t * src_image,
 		      pixman_image_t * mask_image,
@@ -993,7 +993,7 @@ fast_CompositeSolidFill (pixman_implementation_t *imp,
 }
 
 static void
-fast_CompositeSrc_8888_x888 (pixman_implementation_t *imp,
+fast_composite_src_8888_x888 (pixman_implementation_t *imp,
 			  pixman_op_t op,
 			  pixman_image_t * src_image,
 			  pixman_image_t * mask_image,
@@ -1009,72 +1009,72 @@ fast_CompositeSrc_8888_x888 (pixman_implementation_t *imp,
 {
     uint32_t	*dst;
     uint32_t    *src;
-    int		 dstStride, srcStride;
+    int		 dst_stride, src_stride;
     uint32_t	 n_bytes = width * sizeof (uint32_t);
 
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, src, 1);
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dst, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst, 1);
 
     while (height--)
     {
 	memcpy (dst, src, n_bytes);
 
-	dst += dstStride;
-	src += srcStride;
+	dst += dst_stride;
+	src += src_stride;
     }
 }
 
 static const pixman_fast_path_t c_fast_paths[] =
 {
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   fast_CompositeOver_n_8_0565, 0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b5g6r5,   fast_CompositeOver_n_8_0565, 0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r8g8b8,   fast_CompositeOver_n_8_0888, 0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b8g8r8,   fast_CompositeOver_n_8_0888, 0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, fast_CompositeOver_n_8_8888, 0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, fast_CompositeOver_n_8_8888, 0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, fast_CompositeOver_n_8_8888, 0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, fast_CompositeOver_n_8_8888, 0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, fast_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, fast_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   fast_CompositeOver_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, fast_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, fast_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   fast_CompositeOver_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,	PIXMAN_x8r8g8b8, fast_CompositeOver_x888_8_8888,       0 },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,	PIXMAN_a8r8g8b8, fast_CompositeOver_x888_8_8888,       0 },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,	PIXMAN_x8b8g8r8, fast_CompositeOver_x888_8_8888,       0 },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,	PIXMAN_a8b8g8r8, fast_CompositeOver_x888_8_8888,       0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   fast_composite_over_n_8_0565, 0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b5g6r5,   fast_composite_over_n_8_0565, 0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r8g8b8,   fast_composite_over_n_8_0888, 0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b8g8r8,   fast_composite_over_n_8_0888, 0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, fast_composite_over_n_8_8888, 0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, fast_composite_over_n_8_8888, 0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, fast_composite_over_n_8_8888, 0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, fast_composite_over_n_8_8888, 0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, fast_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, fast_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   fast_composite_over_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, fast_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, fast_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   fast_composite_over_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,	PIXMAN_x8r8g8b8, fast_composite_over_x888_8_8888,       0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,	PIXMAN_a8r8g8b8, fast_composite_over_x888_8_8888,       0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,	PIXMAN_x8b8g8r8, fast_composite_over_x888_8_8888,       0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,	PIXMAN_a8b8g8r8, fast_composite_over_x888_8_8888,       0 },
     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, fast_composite_over_8888_8888,	   0 },
     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,	PIXMAN_x8r8g8b8, fast_composite_over_8888_8888,	   0 },
     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,	PIXMAN_r5g6b5,	 fast_composite_over_8888_0565,	   0 },
     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_a8b8g8r8, fast_composite_over_8888_8888,	   0 },
     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_x8b8g8r8, fast_composite_over_8888_8888,	   0 },
     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   fast_composite_over_8888_0565,	   0 },
-    { PIXMAN_OP_ADD, PIXMAN_a8r8g8b8,  PIXMAN_null,	PIXMAN_a8r8g8b8, fast_CompositeAdd_8888_8888,   0 },
-    { PIXMAN_OP_ADD, PIXMAN_a8b8g8r8,  PIXMAN_null,	PIXMAN_a8b8g8r8, fast_CompositeAdd_8888_8888,   0 },
-    { PIXMAN_OP_ADD, PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       fast_CompositeAdd_8000_8000,   0 },
-    { PIXMAN_OP_ADD, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8,       fast_CompositeAdd_8888_8_8,    0 },
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_null,     PIXMAN_a8r8g8b8, fast_CompositeSolidFill, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_null,     PIXMAN_x8r8g8b8, fast_CompositeSolidFill, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_null,     PIXMAN_a8b8g8r8, fast_CompositeSolidFill, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_null,     PIXMAN_x8b8g8r8, fast_CompositeSolidFill, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_null,     PIXMAN_a8,       fast_CompositeSolidFill, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_null,     PIXMAN_r5g6b5,   fast_CompositeSolidFill, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,     PIXMAN_x8r8g8b8, fast_CompositeSrc_8888_x888, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8,  PIXMAN_null,     PIXMAN_x8r8g8b8, fast_CompositeSrc_8888_x888, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,     PIXMAN_x8b8g8r8, fast_CompositeSrc_8888_x888, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8,  PIXMAN_null,     PIXMAN_x8b8g8r8, fast_CompositeSrc_8888_x888, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,     PIXMAN_r5g6b5,   fast_CompositeSrc_x888_0565, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8,  PIXMAN_null,     PIXMAN_r5g6b5,   fast_CompositeSrc_x888_0565, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,     PIXMAN_b5g6r5,   fast_CompositeSrc_x888_0565, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8,  PIXMAN_null,     PIXMAN_b5g6r5,   fast_CompositeSrc_x888_0565, 0 },
-    { PIXMAN_OP_IN,  PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       fast_CompositeIn_8_8,   0 },
-    { PIXMAN_OP_IN,  PIXMAN_solid,     PIXMAN_a8,	PIXMAN_a8,	 fast_CompositeIn_n_8_8, 0 },
+    { PIXMAN_OP_ADD, PIXMAN_a8r8g8b8,  PIXMAN_null,	PIXMAN_a8r8g8b8, fast_composite_add_8888_8888,   0 },
+    { PIXMAN_OP_ADD, PIXMAN_a8b8g8r8,  PIXMAN_null,	PIXMAN_a8b8g8r8, fast_composite_add_8888_8888,   0 },
+    { PIXMAN_OP_ADD, PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       fast_composite_add_8000_8000,   0 },
+    { PIXMAN_OP_ADD, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8,       fast_composite_add_8888_8_8,    0 },
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_null,     PIXMAN_a8r8g8b8, fast_composite_solid_fill, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_null,     PIXMAN_x8r8g8b8, fast_composite_solid_fill, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_null,     PIXMAN_a8b8g8r8, fast_composite_solid_fill, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_null,     PIXMAN_x8b8g8r8, fast_composite_solid_fill, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_null,     PIXMAN_a8,       fast_composite_solid_fill, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_null,     PIXMAN_r5g6b5,   fast_composite_solid_fill, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,     PIXMAN_x8r8g8b8, fast_composite_src_8888_x888, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8,  PIXMAN_null,     PIXMAN_x8r8g8b8, fast_composite_src_8888_x888, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,     PIXMAN_x8b8g8r8, fast_composite_src_8888_x888, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8,  PIXMAN_null,     PIXMAN_x8b8g8r8, fast_composite_src_8888_x888, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,     PIXMAN_r5g6b5,   fast_composite_src_x888_0565, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8,  PIXMAN_null,     PIXMAN_r5g6b5,   fast_composite_src_x888_0565, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,     PIXMAN_b5g6r5,   fast_composite_src_x888_0565, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8,  PIXMAN_null,     PIXMAN_b5g6r5,   fast_composite_src_x888_0565, 0 },
+    { PIXMAN_OP_IN,  PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       fast_composite_in_8_8,   0 },
+    { PIXMAN_OP_IN,  PIXMAN_solid,     PIXMAN_a8,	PIXMAN_a8,	 fast_composite_in_n_8_8, 0 },
     { PIXMAN_OP_NONE },
 };
 
 static void
-fast_CompositeSrcScaleNearest (pixman_implementation_t *imp,
+fast_composite_src_scale_nearest (pixman_implementation_t *imp,
 			    pixman_op_t     op,
 			    pixman_image_t *src_image,
 			    pixman_image_t *mask_image,
@@ -1090,14 +1090,14 @@ fast_CompositeSrcScaleNearest (pixman_implementation_t *imp,
 {
     uint32_t       *dst;
     uint32_t       *src;
-    int             dstStride, srcStride;
+    int             dst_stride, src_stride;
     int             i, j;
     pixman_vector_t v;
     
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dst, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst, 1);
     /* pass in 0 instead of src_x and src_y because src_x and src_y need to be
      * transformed from destination space to source space */
-    PIXMAN_IMAGE_GET_LINE (src_image, 0, 0, uint32_t, srcStride, src, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, 0, 0, uint32_t, src_stride, src, 1);
     
     /* reference point is the center of the pixel */
     v.vector[0] = pixman_int_to_fixed(src_x) + pixman_fixed_1 / 2;
@@ -1153,7 +1153,7 @@ fast_CompositeSrcScaleNearest (pixman_implementation_t *imp,
 	    
             if (inside_bounds) {
                 //XXX: we should move this multiplication out of the loop
-                result = *(src + y * srcStride + x);
+                result = *(src + y * src_stride + x);
             } else {
                 result = 0;
             }
@@ -1166,7 +1166,7 @@ fast_CompositeSrcScaleNearest (pixman_implementation_t *imp,
         /* adjust the y location by a unit vector in the y direction
          * this is equivalent to transforming y+1 of the destination point to source space */
         v.vector[1] += src_image->common.transform->matrix[1][1];
-        dst += dstStride;
+        dst += dst_stride;
     }
 }
 
@@ -1209,7 +1209,7 @@ fast_path_composite (pixman_implementation_t *imp,
 					   mask_x, mask_y,
 					   dest_x, dest_y,
 					   width, height,
-					   fast_CompositeSrcScaleNearest);
+					   fast_composite_src_scale_nearest);
 	    return;
 	}
     }
diff --git a/pixman/pixman-general.c b/pixman/pixman-general.c
index 3da6afb..d9bdad7 100644
--- a/pixman/pixman-general.c
+++ b/pixman/pixman-general.c
@@ -55,20 +55,20 @@ general_composite_rect  (pixman_implementation_t *imp,
 			 int32_t                  height)
 {
     uint8_t stack_scanline_buffer[SCANLINE_BUFFER_LENGTH * 3];
-    const pixman_format_code_t srcFormat = src->type == BITS ? src->bits.format : 0;
-    const pixman_format_code_t maskFormat = mask && mask->type == BITS ? mask->bits.format : 0;
-    const pixman_format_code_t destFormat = dest->type == BITS ? dest->bits.format : 0;
-    const int srcWide = PIXMAN_FORMAT_IS_WIDE(srcFormat);
-    const int maskWide = mask && PIXMAN_FORMAT_IS_WIDE(maskFormat);
-    const int destWide = PIXMAN_FORMAT_IS_WIDE(destFormat);
-    const int wide = srcWide || maskWide || destWide;
+    const pixman_format_code_t src_format = src->type == BITS ? src->bits.format : 0;
+    const pixman_format_code_t mask_format = mask && mask->type == BITS ? mask->bits.format : 0;
+    const pixman_format_code_t dest_format = dest->type == BITS ? dest->bits.format : 0;
+    const int src_wide = PIXMAN_FORMAT_IS_WIDE(src_format);
+    const int mask_wide = mask && PIXMAN_FORMAT_IS_WIDE(mask_format);
+    const int dest_wide = PIXMAN_FORMAT_IS_WIDE(dest_format);
+    const int wide = src_wide || mask_wide || dest_wide;
     const int Bpp = wide ? 8 : 4;
     uint8_t *scanline_buffer = stack_scanline_buffer;
     uint8_t *src_buffer, *mask_buffer, *dest_buffer;
-    fetch_scanline_t fetchSrc = NULL, fetchMask = NULL, fetchDest = NULL;
+    fetch_scanline_t fetch_src = NULL, fetch_mask = NULL, fetch_dest = NULL;
     pixman_combine_32_func_t compose;
     store_scanline_t store;
-    source_pict_class_t srcClass, maskClass;
+    source_pict_class_t src_class, mask_class;
     pixman_bool_t component_alpha;
     uint32_t *bits;
     int32_t stride;
@@ -86,38 +86,38 @@ general_composite_rect  (pixman_implementation_t *imp,
     mask_buffer = src_buffer + width * Bpp;
     dest_buffer = mask_buffer + width * Bpp;
     
-    srcClass = _pixman_image_classify (src,
+    src_class = _pixman_image_classify (src,
 				       src_x, src_y,
 				       width, height);
     
-    maskClass = SOURCE_IMAGE_CLASS_UNKNOWN;
+    mask_class = SOURCE_IMAGE_CLASS_UNKNOWN;
     if (mask)
     {
-	maskClass = _pixman_image_classify (mask,
+	mask_class = _pixman_image_classify (mask,
 					    src_x, src_y,
 					    width, height);
     }
     
     if (op == PIXMAN_OP_CLEAR)
-        fetchSrc = NULL;
+        fetch_src = NULL;
     else if (wide)
-	fetchSrc = _pixman_image_get_scanline_64;
+	fetch_src = _pixman_image_get_scanline_64;
     else
-	fetchSrc = _pixman_image_get_scanline_32;
+	fetch_src = _pixman_image_get_scanline_32;
     
     if (!mask || op == PIXMAN_OP_CLEAR)
-	fetchMask = NULL;
+	fetch_mask = NULL;
     else if (wide)
-	fetchMask = _pixman_image_get_scanline_64;
+	fetch_mask = _pixman_image_get_scanline_64;
     else
-	fetchMask = _pixman_image_get_scanline_32;
+	fetch_mask = _pixman_image_get_scanline_32;
     
     if (op == PIXMAN_OP_CLEAR || op == PIXMAN_OP_SRC)
-	fetchDest = NULL;
+	fetch_dest = NULL;
     else if (wide)
-	fetchDest = _pixman_image_get_scanline_64;
+	fetch_dest = _pixman_image_get_scanline_64;
     else
-	fetchDest = _pixman_image_get_scanline_32;
+	fetch_dest = _pixman_image_get_scanline_32;
 
     if (wide)
 	store = _pixman_image_store_scanline_64;
@@ -150,8 +150,8 @@ general_composite_rect  (pixman_implementation_t *imp,
     }
     
     component_alpha =
-	fetchSrc			&&
-	fetchMask			&&
+	fetch_src			&&
+	fetch_mask			&&
 	mask				&&
 	mask->common.type == BITS	&&
 	mask->common.component_alpha	&&
@@ -175,49 +175,49 @@ general_composite_rect  (pixman_implementation_t *imp,
     if (!compose)
 	return;
     
-    if (!fetchMask)
+    if (!fetch_mask)
 	mask_buffer = NULL;
     
     for (i = 0; i < height; ++i)
     {
 	/* fill first half of scanline with source */
-	if (fetchSrc)
+	if (fetch_src)
 	{
-	    if (fetchMask)
+	    if (fetch_mask)
 	    {
 		/* fetch mask before source so that fetching of
 		   source can be optimized */
-		fetchMask (mask, mask_x, mask_y + i,
+		fetch_mask (mask, mask_x, mask_y + i,
 			   width, (void *)mask_buffer, 0, 0);
 		
-		if (maskClass == SOURCE_IMAGE_CLASS_HORIZONTAL)
-		    fetchMask = NULL;
+		if (mask_class == SOURCE_IMAGE_CLASS_HORIZONTAL)
+		    fetch_mask = NULL;
 	    }
 	    
-	    if (srcClass == SOURCE_IMAGE_CLASS_HORIZONTAL)
+	    if (src_class == SOURCE_IMAGE_CLASS_HORIZONTAL)
 	    {
-		fetchSrc (src, src_x, src_y + i,
+		fetch_src (src, src_x, src_y + i,
 			  width, (void *)src_buffer, 0, 0);
-		fetchSrc = NULL;
+		fetch_src = NULL;
 	    }
 	    else
 	    {
-		fetchSrc (src, src_x, src_y + i,
+		fetch_src (src, src_x, src_y + i,
 			  width, (void *)src_buffer, (void *)mask_buffer,
 			  0xffffffff);
 	    }
 	}
-	else if (fetchMask)
+	else if (fetch_mask)
 	{
-	    fetchMask (mask, mask_x, mask_y + i,
+	    fetch_mask (mask, mask_x, mask_y + i,
 		       width, (void *)mask_buffer, 0, 0);
 	}
 	
 	if (store)
 	{
 	    /* fill dest into second half of scanline */
-	    if (fetchDest)
-		fetchDest (dest, dest_x, dest_y + i,
+	    if (fetch_dest)
+		fetch_dest (dest, dest_x, dest_y + i,
 			   width, (void *)dest_buffer, 0, 0);
 	    
 	    /* blend */
diff --git a/pixman/pixman-gradient-walker.c b/pixman/pixman-gradient-walker.c
index bb554dc..79010a2 100644
--- a/pixman/pixman-gradient-walker.c
+++ b/pixman/pixman-gradient-walker.c
@@ -146,7 +146,7 @@ _pixman_gradient_walker_reset (pixman_gradient_walker_t       *walker,
 	right_x += (pos - x);
 	break;
 	
-    default:  /* RepeatNone */
+    default:  /* REPEAT_NONE */
 	for (n = 0; n < count; n++)
 	    if (pos < stops[n].x)
 		break;
diff --git a/pixman/pixman-image.c b/pixman/pixman-image.c
index 5c0a15f..f95bf3f 100644
--- a/pixman/pixman-image.c
+++ b/pixman/pixman-image.c
@@ -64,7 +64,7 @@ _pixman_init_gradient (gradient_t     *gradient,
 void
 _pixman_image_get_scanline_generic_64 (pixman_image_t * pict, int x, int y,
 				       int width, uint32_t *buffer,
-				       const uint32_t *mask, uint32_t maskBits)
+				       const uint32_t *mask, uint32_t mask_bits)
 {
     uint32_t *mask8 = NULL;
 
@@ -80,7 +80,7 @@ _pixman_image_get_scanline_generic_64 (pixman_image_t * pict, int x, int y,
 
     // Fetch the source image into the first half of buffer.
     _pixman_image_get_scanline_32 (pict, x, y, width, (uint32_t*)buffer, mask8,
-				   maskBits);
+				   mask_bits);
 
     // Expand from 32bpp to 64bpp in place.
     pixman_expand ((uint64_t *)buffer, buffer, PIXMAN_a8r8g8b8, width);
diff --git a/pixman/pixman-linear-gradient.c b/pixman/pixman-linear-gradient.c
index f5c9a5a..54330e7 100644
--- a/pixman/pixman-linear-gradient.c
+++ b/pixman/pixman-linear-gradient.c
@@ -89,7 +89,7 @@ linear_gradient_classify (pixman_image_t *image,
 
 static void
 linear_gradient_get_scanline_32 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
-				 const uint32_t *mask, uint32_t maskBits)
+				 const uint32_t *mask, uint32_t mask_bits)
 {
     pixman_vector_t v, unit;
     pixman_fixed_32_32_t l;
@@ -156,7 +156,7 @@ linear_gradient_get_scanline_32 (pixman_image_t *image, int x, int y, int width,
 		}
 	    } else {
 		while (buffer < end) {
-		    if (*mask++ & maskBits)
+		    if (*mask++ & mask_bits)
 		    {
 			*(buffer) = _pixman_gradient_walker_pixel (&walker, t);
 		    }
@@ -195,7 +195,7 @@ linear_gradient_get_scanline_32 (pixman_image_t *image, int x, int y, int width,
 	{
 	    while (buffer < end)
 	    {
-		if (!mask || *mask++ & maskBits)
+		if (!mask || *mask++ & mask_bits)
 		{
 		    if (v.vector[2] == 0) {
 			t = 0;
diff --git a/pixman/pixman-mmx.c b/pixman/pixman-mmx.c
index 60259b4..6a62b44 100644
--- a/pixman/pixman-mmx.c
+++ b/pixman/pixman-mmx.c
@@ -39,10 +39,10 @@
 #include "pixman-private.h"
 #include "pixman-combine32.h"
 
-#define noVERBOSE
+#define no_vERBOSE
 
 #ifdef VERBOSE
-#define CHECKPOINT() ErrorF ("at %s %d\n", __FUNCTION__, __LINE__)
+#define CHECKPOINT() error_f ("at %s %d\n", __FUNCTION__, __LINE__)
 #else
 #define CHECKPOINT()
 #endif
@@ -98,7 +98,7 @@ typedef struct
     mmxdatafield mmx_ffff0000ffff0000;
     mmxdatafield mmx_0000ffff00000000;
     mmxdatafield mmx_000000000000ffff;
-} MMXData;
+} mmx_data_t;
 
 #if defined(_MSC_VER)
 # define MMXDATA_INIT(field, val) { val##UI64 }
@@ -108,7 +108,7 @@ typedef struct
 # define MMXDATA_INIT(field, val) field =   val##ULL
 #endif
 
-static const MMXData c =
+static const mmx_data_t c =
 {
     MMXDATA_INIT(.mmx_4x00ff,			0x00ff00ff00ff00ff),
     MMXDATA_INIT(.mmx_4x0080,			0x0080008000800080),
@@ -363,7 +363,7 @@ expandx888 (__m64 in, int pos)
 }
 
 static force_inline __m64
-pack565 (__m64 pixel, __m64 target, int pos)
+pack_565 (__m64 pixel, __m64 target, int pos)
 {
     __m64 p = pixel;
     __m64 t = target;
@@ -437,7 +437,7 @@ combine (const uint32_t *src, const uint32_t *mask)
 }
 
 static void
-mmxCombineOverU (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_over_u (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = dest + width;
@@ -462,7 +462,7 @@ mmxCombineOverU (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmxCombineOverReverseU (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_over_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 			uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = dest + width;
@@ -482,7 +482,7 @@ mmxCombineOverReverseU (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmxCombineInU (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_in_u (pixman_implementation_t *imp, pixman_op_t op,
 	       uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = dest + width;
@@ -503,7 +503,7 @@ mmxCombineInU (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmxCombineInReverseU (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_in_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 		      uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = dest + width;
@@ -524,7 +524,7 @@ mmxCombineInReverseU (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmxCombineOutU (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_out_u (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = dest + width;
@@ -546,7 +546,7 @@ mmxCombineOutU (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmxCombineOutReverseU (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_out_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 		       uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = dest + width;
@@ -568,7 +568,7 @@ mmxCombineOutReverseU (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmxCombineAtopU (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_atop_u (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = dest + width;
@@ -591,7 +591,7 @@ mmxCombineAtopU (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmxCombineAtopReverseU (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_atop_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 			uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end;
@@ -616,7 +616,7 @@ mmxCombineAtopReverseU (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmxCombineXorU (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_xor_u (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = dest + width;
@@ -640,7 +640,7 @@ mmxCombineXorU (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmxCombineAddU (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_add_u (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = dest + width;
@@ -659,7 +659,7 @@ mmxCombineAddU (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmxCombineSaturateU (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_saturate_u (pixman_implementation_t *imp, pixman_op_t op,
 		     uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = dest + width;
@@ -688,7 +688,7 @@ mmxCombineSaturateU (pixman_implementation_t *imp, pixman_op_t op,
 
 
 static void
-mmxCombineSrcC (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_src_c (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = src + width;
@@ -705,7 +705,7 @@ mmxCombineSrcC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmxCombineOverC (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_over_c (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = src + width;
@@ -725,7 +725,7 @@ mmxCombineOverC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmxCombineOverReverseC (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_over_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 			uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = src + width;
@@ -746,7 +746,7 @@ mmxCombineOverReverseC (pixman_implementation_t *imp, pixman_op_t op,
 
 
 static void
-mmxCombineInC (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_in_c (pixman_implementation_t *imp, pixman_op_t op,
 	       uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = src + width;
@@ -766,7 +766,7 @@ mmxCombineInC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmxCombineInReverseC (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_in_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 		      uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = src + width;
@@ -786,7 +786,7 @@ mmxCombineInReverseC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmxCombineOutC (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_out_c (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = src + width;
@@ -807,7 +807,7 @@ mmxCombineOutC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmxCombineOutReverseC (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_out_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 		       uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = src + width;
@@ -828,7 +828,7 @@ mmxCombineOutReverseC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmxCombineAtopC (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_atop_c (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = src + width;
@@ -851,7 +851,7 @@ mmxCombineAtopC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmxCombineAtopReverseC (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_atop_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 			uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = src + width;
@@ -874,7 +874,7 @@ mmxCombineAtopReverseC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmxCombineXorC (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_xor_c (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = src + width;
@@ -898,7 +898,7 @@ mmxCombineXorC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-mmxCombineAddC (pixman_implementation_t *imp, pixman_op_t op,
+mmx_combine_add_c (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     const uint32_t *end = src + width;
@@ -919,7 +919,7 @@ mmxCombineAddC (pixman_implementation_t *imp, pixman_op_t op,
 /* ------------------ MMX code paths called from fbpict.c ----------------------- */
 
 static void
-mmx_CompositeOver_n_8888 (pixman_implementation_t *imp,
+mmx_composite_over_n_8888 (pixman_implementation_t *imp,
 			    pixman_op_t op,
 			    pixman_image_t * src_image,
 			    pixman_image_t * mask_image,
@@ -934,9 +934,9 @@ mmx_CompositeOver_n_8888 (pixman_implementation_t *imp,
 			    int32_t	height)
 {
     uint32_t	src;
-    uint32_t	*dstLine, *dst;
+    uint32_t	*dst_line, *dst;
     uint16_t	w;
-    int	dstStride;
+    int	dst_stride;
     __m64	vsrc, vsrca;
 
     CHECKPOINT();
@@ -946,15 +946,15 @@ mmx_CompositeOver_n_8888 (pixman_implementation_t *imp,
     if (src >> 24 == 0)
 	return;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
 
     vsrc = load8888 (src);
     vsrca = expand_alpha (vsrc);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
+	dst = dst_line;
+	dst_line += dst_stride;
 	w = width;
 
 	CHECKPOINT();
@@ -998,7 +998,7 @@ mmx_CompositeOver_n_8888 (pixman_implementation_t *imp,
 }
 
 static void
-mmx_CompositeOver_n_0565 (pixman_implementation_t *imp,
+mmx_composite_over_n_0565 (pixman_implementation_t *imp,
 			    pixman_op_t op,
 			    pixman_image_t * src_image,
 			    pixman_image_t * mask_image,
@@ -1013,9 +1013,9 @@ mmx_CompositeOver_n_0565 (pixman_implementation_t *imp,
 			    int32_t	height)
 {
     uint32_t	src;
-    uint16_t	*dstLine, *dst;
+    uint16_t	*dst_line, *dst;
     uint16_t	w;
-    int	dstStride;
+    int	dst_stride;
     __m64	vsrc, vsrca;
 
     CHECKPOINT();
@@ -1025,15 +1025,15 @@ mmx_CompositeOver_n_0565 (pixman_implementation_t *imp,
     if (src >> 24 == 0)
 	return;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
 
     vsrc = load8888 (src);
     vsrca = expand_alpha (vsrc);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
+	dst = dst_line;
+	dst_line += dst_stride;
 	w = width;
 
 	CHECKPOINT();
@@ -1042,7 +1042,7 @@ mmx_CompositeOver_n_0565 (pixman_implementation_t *imp,
 	{
 	    uint64_t d = *dst;
 	    __m64 vdest = expand565 (M64(d), 0);
-	    vdest = pack565(over(vsrc, vsrca, vdest), vdest, 0);
+	    vdest = pack_565(over(vsrc, vsrca, vdest), vdest, 0);
 	    *dst = UINT64(vdest);
 
 	    w--;
@@ -1055,10 +1055,10 @@ mmx_CompositeOver_n_0565 (pixman_implementation_t *imp,
 
 	    vdest = *(__m64 *)dst;
 
-	    vdest = pack565 (over(vsrc, vsrca, expand565(vdest, 0)), vdest, 0);
-	    vdest = pack565 (over(vsrc, vsrca, expand565(vdest, 1)), vdest, 1);
-	    vdest = pack565 (over(vsrc, vsrca, expand565(vdest, 2)), vdest, 2);
-	    vdest = pack565 (over(vsrc, vsrca, expand565(vdest, 3)), vdest, 3);
+	    vdest = pack_565 (over(vsrc, vsrca, expand565(vdest, 0)), vdest, 0);
+	    vdest = pack_565 (over(vsrc, vsrca, expand565(vdest, 1)), vdest, 1);
+	    vdest = pack_565 (over(vsrc, vsrca, expand565(vdest, 2)), vdest, 2);
+	    vdest = pack_565 (over(vsrc, vsrca, expand565(vdest, 3)), vdest, 3);
 
 	    *(__m64 *)dst = vdest;
 
@@ -1072,7 +1072,7 @@ mmx_CompositeOver_n_0565 (pixman_implementation_t *imp,
 	{
 	    uint64_t d = *dst;
 	    __m64 vdest = expand565 (M64(d), 0);
-	    vdest = pack565(over(vsrc, vsrca, vdest), vdest, 0);
+	    vdest = pack_565(over(vsrc, vsrca, vdest), vdest, 0);
 	    *dst = UINT64(vdest);
 
 	    w--;
@@ -1084,7 +1084,7 @@ mmx_CompositeOver_n_0565 (pixman_implementation_t *imp,
 }
 
 static void
-mmx_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
+mmx_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
 				      pixman_op_t op,
 				      pixman_image_t * src_image,
 				      pixman_image_t * mask_image,
@@ -1099,9 +1099,9 @@ mmx_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
 				      int32_t	height)
 {
     uint32_t	src, srca;
-    uint32_t	*dstLine;
-    uint32_t	*maskLine;
-    int	dstStride, maskStride;
+    uint32_t	*dst_line;
+    uint32_t	*mask_line;
+    int	dst_stride, mask_stride;
     __m64	vsrc, vsrca;
 
     CHECKPOINT();
@@ -1112,8 +1112,8 @@ mmx_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
     if (srca == 0)
 	return;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
 
     vsrc = load8888(src);
     vsrca = expand_alpha(vsrc);
@@ -1121,8 +1121,8 @@ mmx_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
     while (height--)
     {
 	int twidth = width;
-	uint32_t *p = (uint32_t *)maskLine;
-	uint32_t *q = (uint32_t *)dstLine;
+	uint32_t *p = (uint32_t *)mask_line;
+	uint32_t *q = (uint32_t *)dst_line;
 
 	while (twidth && (unsigned long)q & 7)
 	{
@@ -1180,8 +1180,8 @@ mmx_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
 	    q++;
 	}
 
-	dstLine += dstStride;
-	maskLine += maskStride;
+	dst_line += dst_stride;
+	mask_line += mask_stride;
     }
 
     _mm_empty();
@@ -1202,18 +1202,18 @@ mmx_composite_over_8888_n_8888 (pixman_implementation_t *imp,
 			       int32_t     width,
 			       int32_t     height)
 {
-    uint32_t	*dstLine, *dst;
-    uint32_t	*srcLine, *src;
+    uint32_t	*dst_line, *dst;
+    uint32_t	*src_line, *src;
     uint32_t	mask;
     __m64	vmask;
-    int	dstStride, srcStride;
+    int	dst_stride, src_stride;
     uint16_t	w;
     __m64  srca;
 
     CHECKPOINT();
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
     mask = mask | mask >> 8 | mask >> 16 | mask >> 24;
@@ -1222,10 +1222,10 @@ mmx_composite_over_8888_n_8888 (pixman_implementation_t *imp,
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	src = srcLine;
-	srcLine += srcStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	src = src_line;
+	src_line += src_stride;
 	w = width;
 
 	while (w && (unsigned long)dst & 7)
@@ -1273,7 +1273,7 @@ mmx_composite_over_8888_n_8888 (pixman_implementation_t *imp,
 }
 
 static void
-mmx_Composite_over_x888_n_8888 (pixman_implementation_t *imp,
+mmx_composite_over_x888_n_8888 (pixman_implementation_t *imp,
 			       pixman_op_t op,
 			       pixman_image_t * src_image,
 			       pixman_image_t * mask_image,
@@ -1287,18 +1287,18 @@ mmx_Composite_over_x888_n_8888 (pixman_implementation_t *imp,
 			       int32_t     width,
 			       int32_t     height)
 {
-    uint32_t	*dstLine, *dst;
-    uint32_t	*srcLine, *src;
+    uint32_t	*dst_line, *dst;
+    uint32_t	*src_line, *src;
     uint32_t	mask;
     __m64	vmask;
-    int	dstStride, srcStride;
+    int	dst_stride, src_stride;
     uint16_t	w;
     __m64  srca;
 
     CHECKPOINT();
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
 
     mask = mask | mask >> 8 | mask >> 16 | mask >> 24;
@@ -1307,10 +1307,10 @@ mmx_Composite_over_x888_n_8888 (pixman_implementation_t *imp,
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	src = srcLine;
-	srcLine += srcStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	src = src_line;
+	src_line += src_stride;
 	w = width;
 
 	while (w && (unsigned long)dst & 7)
@@ -1422,24 +1422,24 @@ mmx_composite_over_8888_8888 (pixman_implementation_t *imp,
 			     int32_t     width,
 			     int32_t     height)
 {
-    uint32_t	*dstLine, *dst;
-    uint32_t	*srcLine, *src;
+    uint32_t	*dst_line, *dst;
+    uint32_t	*src_line, *src;
     uint32_t    s;
-    int	dstStride, srcStride;
+    int	dst_stride, src_stride;
     uint8_t     a;
     uint16_t	w;
 
     CHECKPOINT();
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	src = srcLine;
-	srcLine += srcStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	src = src_line;
+	src_line += src_stride;
 	w = width;
 
 	while (w--)
@@ -1475,15 +1475,15 @@ mmx_composite_over_8888_0565 (pixman_implementation_t *imp,
 			     int32_t     width,
 			     int32_t     height)
 {
-    uint16_t	*dstLine, *dst;
-    uint32_t	*srcLine, *src;
-    int	dstStride, srcStride;
+    uint16_t	*dst_line, *dst;
+    uint32_t	*src_line, *src;
+    int	dst_stride, src_stride;
     uint16_t	w;
 
     CHECKPOINT();
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
 #if 0
     /* FIXME */
@@ -1492,10 +1492,10 @@ mmx_composite_over_8888_0565 (pixman_implementation_t *imp,
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	src = srcLine;
-	srcLine += srcStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	src = src_line;
+	src_line += src_stride;
 	w = width;
 
 	CHECKPOINT();
@@ -1506,7 +1506,7 @@ mmx_composite_over_8888_0565 (pixman_implementation_t *imp,
 	    uint64_t d = *dst;
 	    __m64 vdest = expand565 (M64(d), 0);
 
-	    vdest = pack565(over(vsrc, expand_alpha(vsrc), vdest), vdest, 0);
+	    vdest = pack_565(over(vsrc, expand_alpha(vsrc), vdest), vdest, 0);
 
 	    *dst = UINT64(vdest);
 
@@ -1529,10 +1529,10 @@ mmx_composite_over_8888_0565 (pixman_implementation_t *imp,
 
 	    vdest = *(__m64 *)dst;
 
-	    vdest = pack565(over(vsrc0, expand_alpha(vsrc0), expand565(vdest, 0)), vdest, 0);
-	    vdest = pack565(over(vsrc1, expand_alpha(vsrc1), expand565(vdest, 1)), vdest, 1);
-	    vdest = pack565(over(vsrc2, expand_alpha(vsrc2), expand565(vdest, 2)), vdest, 2);
-	    vdest = pack565(over(vsrc3, expand_alpha(vsrc3), expand565(vdest, 3)), vdest, 3);
+	    vdest = pack_565(over(vsrc0, expand_alpha(vsrc0), expand565(vdest, 0)), vdest, 0);
+	    vdest = pack_565(over(vsrc1, expand_alpha(vsrc1), expand565(vdest, 1)), vdest, 1);
+	    vdest = pack_565(over(vsrc2, expand_alpha(vsrc2), expand565(vdest, 2)), vdest, 2);
+	    vdest = pack_565(over(vsrc3, expand_alpha(vsrc3), expand565(vdest, 3)), vdest, 3);
 
 	    *(__m64 *)dst = vdest;
 
@@ -1549,7 +1549,7 @@ mmx_composite_over_8888_0565 (pixman_implementation_t *imp,
 	    uint64_t d = *dst;
 	    __m64 vdest = expand565 (M64(d), 0);
 
-	    vdest = pack565(over(vsrc, expand_alpha(vsrc), vdest), vdest, 0);
+	    vdest = pack_565(over(vsrc, expand_alpha(vsrc), vdest), vdest, 0);
 
 	    *dst = UINT64(vdest);
 
@@ -1563,7 +1563,7 @@ mmx_composite_over_8888_0565 (pixman_implementation_t *imp,
 }
 
 static void
-mmx_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
+mmx_composite_over_n_8_8888 (pixman_implementation_t *imp,
 				  pixman_op_t op,
 				  pixman_image_t * src_image,
 				  pixman_image_t * mask_image,
@@ -1578,9 +1578,9 @@ mmx_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
 				  int32_t     height)
 {
     uint32_t	src, srca;
-    uint32_t	*dstLine, *dst;
-    uint8_t	*maskLine, *mask;
-    int	dstStride, maskStride;
+    uint32_t	*dst_line, *dst;
+    uint8_t	*mask_line, *mask;
+    int	dst_stride, mask_stride;
     uint16_t	w;
     __m64	vsrc, vsrca;
     uint64_t	srcsrc;
@@ -1595,18 +1595,18 @@ mmx_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
 
     srcsrc = (uint64_t)src << 32 | src;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
     vsrc = load8888 (src);
     vsrca = expand_alpha (vsrc);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	mask = maskLine;
-	maskLine += maskStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	mask = mask_line;
+	mask_line += mask_stride;
 	w = width;
 
 	CHECKPOINT();
@@ -1835,7 +1835,7 @@ pixman_fill_mmx (uint32_t *bits,
 }
 
 static void
-mmx_CompositeSrc_n_8_8888 (pixman_implementation_t *imp,
+mmx_composite_src_n_8_8888 (pixman_implementation_t *imp,
 				     pixman_op_t op,
 				     pixman_image_t * src_image,
 				     pixman_image_t * mask_image,
@@ -1850,9 +1850,9 @@ mmx_CompositeSrc_n_8_8888 (pixman_implementation_t *imp,
 				     int32_t     height)
 {
     uint32_t	src, srca;
-    uint32_t	*dstLine, *dst;
-    uint8_t	*maskLine, *mask;
-    int	dstStride, maskStride;
+    uint32_t	*dst_line, *dst;
+    uint8_t	*mask_line, *mask;
+    int	dst_stride, mask_stride;
     uint16_t	w;
     __m64	vsrc, vsrca;
     uint64_t	srcsrc;
@@ -1871,18 +1871,18 @@ mmx_CompositeSrc_n_8_8888 (pixman_implementation_t *imp,
 
     srcsrc = (uint64_t)src << 32 | src;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
     vsrc = load8888 (src);
     vsrca = expand_alpha (vsrc);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	mask = maskLine;
-	maskLine += maskStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	mask = mask_line;
+	mask_line += mask_stride;
 	w = width;
 
 	CHECKPOINT();
@@ -1967,7 +1967,7 @@ mmx_CompositeSrc_n_8_8888 (pixman_implementation_t *imp,
 }
 
 static void
-mmx_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
+mmx_composite_over_n_8_0565 (pixman_implementation_t *imp,
 				  pixman_op_t op,
 				  pixman_image_t * src_image,
 				  pixman_image_t * mask_image,
@@ -1982,9 +1982,9 @@ mmx_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 				  int32_t     height)
 {
     uint32_t	src, srca;
-    uint16_t	*dstLine, *dst;
-    uint8_t	*maskLine, *mask;
-    int	dstStride, maskStride;
+    uint16_t	*dst_line, *dst;
+    uint8_t	*mask_line, *mask;
+    int	dst_stride, mask_stride;
     uint16_t	w;
     __m64	vsrc, vsrca, tmp;
     uint64_t srcsrcsrcsrc, src16;
@@ -1997,13 +1997,13 @@ mmx_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
     if (srca == 0)
 	return;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
     vsrc = load8888 (src);
     vsrca = expand_alpha (vsrc);
 
-    tmp = pack565(vsrc, _mm_setzero_si64(), 0);
+    tmp = pack_565(vsrc, _mm_setzero_si64(), 0);
     src16 = UINT64(tmp);
 
     srcsrcsrcsrc = (uint64_t)src16 << 48 | (uint64_t)src16 << 32 |
@@ -2011,10 +2011,10 @@ mmx_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	mask = maskLine;
-	maskLine += maskStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	mask = mask_line;
+	mask_line += mask_stride;
 	w = width;
 
 	CHECKPOINT();
@@ -2028,7 +2028,7 @@ mmx_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 		uint64_t d = *dst;
 		__m64 vd = M64(d);
 		__m64 vdest = in_over(vsrc, vsrca, expand_alpha_rev (M64 (m)), expand565(vd, 0));
-		vd = pack565(vdest, _mm_setzero_si64(), 0);
+		vd = pack_565(vdest, _mm_setzero_si64(), 0);
 		*dst = UINT64(vd);
 	    }
 
@@ -2059,13 +2059,13 @@ mmx_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 		vdest = *(__m64 *)dst;
 
 		vm0 = M64(m0);
-		vdest = pack565(in_over(vsrc, vsrca, expand_alpha_rev(vm0), expand565(vdest, 0)), vdest, 0);
+		vdest = pack_565(in_over(vsrc, vsrca, expand_alpha_rev(vm0), expand565(vdest, 0)), vdest, 0);
 		vm1 = M64(m1);
-		vdest = pack565(in_over(vsrc, vsrca, expand_alpha_rev(vm1), expand565(vdest, 1)), vdest, 1);
+		vdest = pack_565(in_over(vsrc, vsrca, expand_alpha_rev(vm1), expand565(vdest, 1)), vdest, 1);
 		vm2 = M64(m2);
-		vdest = pack565(in_over(vsrc, vsrca, expand_alpha_rev(vm2), expand565(vdest, 2)), vdest, 2);
+		vdest = pack_565(in_over(vsrc, vsrca, expand_alpha_rev(vm2), expand565(vdest, 2)), vdest, 2);
 		vm3 = M64(m3);
-		vdest = pack565(in_over(vsrc, vsrca, expand_alpha_rev(vm3), expand565(vdest, 3)), vdest, 3);
+		vdest = pack_565(in_over(vsrc, vsrca, expand_alpha_rev(vm3), expand565(vdest, 3)), vdest, 3);
 
 		*(__m64 *)dst = vdest;
 	    }
@@ -2086,7 +2086,7 @@ mmx_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 		uint64_t d = *dst;
 		__m64 vd = M64(d);
 		__m64 vdest = in_over(vsrc, vsrca, expand_alpha_rev (M64(m)), expand565(vd, 0));
-		vd = pack565(vdest, _mm_setzero_si64(), 0);
+		vd = pack_565(vdest, _mm_setzero_si64(), 0);
 		*dst = UINT64(vd);
 	    }
 
@@ -2100,7 +2100,7 @@ mmx_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 }
 
 static void
-mmx_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
+mmx_composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 				  pixman_op_t op,
 				  pixman_image_t * src_image,
 				  pixman_image_t * mask_image,
@@ -2114,15 +2114,15 @@ mmx_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 				  int32_t     width,
 				  int32_t     height)
 {
-    uint16_t	*dstLine, *dst;
-    uint32_t	*srcLine, *src;
-    int	dstStride, srcStride;
+    uint16_t	*dst_line, *dst;
+    uint32_t	*src_line, *src;
+    int	dst_stride, src_stride;
     uint16_t	w;
 
     CHECKPOINT();
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
 #if 0
     /* FIXME */
@@ -2131,10 +2131,10 @@ mmx_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	src = srcLine;
-	srcLine += srcStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	src = src_line;
+	src_line += src_stride;
 	w = width;
 
 	CHECKPOINT();
@@ -2145,7 +2145,7 @@ mmx_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 	    uint64_t d = *dst;
 	    __m64 vdest = expand565 (M64(d), 0);
 
-	    vdest = pack565(over_rev_non_pre(vsrc, vdest), vdest, 0);
+	    vdest = pack_565(over_rev_non_pre(vsrc, vdest), vdest, 0);
 
 	    *dst = UINT64(vdest);
 
@@ -2174,10 +2174,10 @@ mmx_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 	    if ((a0 & a1 & a2 & a3) == 0xFF)
 	    {
 		__m64 vdest;
-		vdest = pack565(invert_colors(load8888(s0)), _mm_setzero_si64(), 0);
-		vdest = pack565(invert_colors(load8888(s1)), vdest, 1);
-		vdest = pack565(invert_colors(load8888(s2)), vdest, 2);
-		vdest = pack565(invert_colors(load8888(s3)), vdest, 3);
+		vdest = pack_565(invert_colors(load8888(s0)), _mm_setzero_si64(), 0);
+		vdest = pack_565(invert_colors(load8888(s1)), vdest, 1);
+		vdest = pack_565(invert_colors(load8888(s2)), vdest, 2);
+		vdest = pack_565(invert_colors(load8888(s3)), vdest, 3);
 
 		*(__m64 *)dst = vdest;
 	    }
@@ -2185,10 +2185,10 @@ mmx_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 	    {
 		__m64 vdest = *(__m64 *)dst;
 
-		vdest = pack565(over_rev_non_pre(load8888(s0), expand565(vdest, 0)), vdest, 0);
-	        vdest = pack565(over_rev_non_pre(load8888(s1), expand565(vdest, 1)), vdest, 1);
-		vdest = pack565(over_rev_non_pre(load8888(s2), expand565(vdest, 2)), vdest, 2);
-		vdest = pack565(over_rev_non_pre(load8888(s3), expand565(vdest, 3)), vdest, 3);
+		vdest = pack_565(over_rev_non_pre(load8888(s0), expand565(vdest, 0)), vdest, 0);
+	        vdest = pack_565(over_rev_non_pre(load8888(s1), expand565(vdest, 1)), vdest, 1);
+		vdest = pack_565(over_rev_non_pre(load8888(s2), expand565(vdest, 2)), vdest, 2);
+		vdest = pack_565(over_rev_non_pre(load8888(s3), expand565(vdest, 3)), vdest, 3);
 
 		*(__m64 *)dst = vdest;
 	    }
@@ -2206,7 +2206,7 @@ mmx_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 	    uint64_t d = *dst;
 	    __m64 vdest = expand565 (M64(d), 0);
 
-	    vdest = pack565(over_rev_non_pre(vsrc, vdest), vdest, 0);
+	    vdest = pack_565(over_rev_non_pre(vsrc, vdest), vdest, 0);
 
 	    *dst = UINT64(vdest);
 
@@ -2220,7 +2220,7 @@ mmx_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 }
 
 static void
-mmx_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
+mmx_composite_over_pixbuf_8888 (pixman_implementation_t *imp,
 				  pixman_op_t op,
 				  pixman_image_t * src_image,
 				  pixman_image_t * mask_image,
@@ -2234,15 +2234,15 @@ mmx_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
 				  int32_t     width,
 				  int32_t     height)
 {
-    uint32_t	*dstLine, *dst;
-    uint32_t	*srcLine, *src;
-    int	dstStride, srcStride;
+    uint32_t	*dst_line, *dst;
+    uint32_t	*src_line, *src;
+    int	dst_stride, src_stride;
     uint16_t	w;
 
     CHECKPOINT();
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
 #if 0
     /* FIXME */
@@ -2251,10 +2251,10 @@ mmx_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	src = srcLine;
-	srcLine += srcStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	src = src_line;
+	src_line += src_stride;
 	w = width;
 
 	while (w && (unsigned long)dst & 7)
@@ -2320,7 +2320,7 @@ mmx_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
 }
 
 static void
-mmx_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
+mmx_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
 				      pixman_op_t op,
 				      pixman_image_t * src_image,
 				      pixman_image_t * mask_image,
@@ -2335,9 +2335,9 @@ mmx_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 				      int32_t     height)
 {
     uint32_t	src, srca;
-    uint16_t	*dstLine;
-    uint32_t	*maskLine;
-    int	dstStride, maskStride;
+    uint16_t	*dst_line;
+    uint32_t	*mask_line;
+    int	dst_stride, mask_stride;
     __m64  vsrc, vsrca;
 
     CHECKPOINT();
@@ -2348,8 +2348,8 @@ mmx_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
     if (srca == 0)
 	return;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
 
     vsrc = load8888 (src);
     vsrca = expand_alpha (vsrc);
@@ -2357,8 +2357,8 @@ mmx_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
     while (height--)
     {
 	int twidth = width;
-	uint32_t *p = (uint32_t *)maskLine;
-	uint16_t *q = (uint16_t *)dstLine;
+	uint32_t *p = (uint32_t *)mask_line;
+	uint16_t *q = (uint16_t *)dst_line;
 
 	while (twidth && ((unsigned long)q & 7))
 	{
@@ -2368,7 +2368,7 @@ mmx_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 	    {
 		uint64_t d = *q;
 		__m64 vdest = expand565 (M64(d), 0);
-		vdest = pack565 (in_over (vsrc, vsrca, load8888 (m), vdest), vdest, 0);
+		vdest = pack_565 (in_over (vsrc, vsrca, load8888 (m), vdest), vdest, 0);
 		*q = UINT64(vdest);
 	    }
 
@@ -2390,10 +2390,10 @@ mmx_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 	    {
 		__m64 vdest = *(__m64 *)q;
 
-		vdest = pack565(in_over(vsrc, vsrca, load8888(m0), expand565(vdest, 0)), vdest, 0);
-		vdest = pack565(in_over(vsrc, vsrca, load8888(m1), expand565(vdest, 1)), vdest, 1);
-		vdest = pack565(in_over(vsrc, vsrca, load8888(m2), expand565(vdest, 2)), vdest, 2);
-		vdest = pack565(in_over(vsrc, vsrca, load8888(m3), expand565(vdest, 3)), vdest, 3);
+		vdest = pack_565(in_over(vsrc, vsrca, load8888(m0), expand565(vdest, 0)), vdest, 0);
+		vdest = pack_565(in_over(vsrc, vsrca, load8888(m1), expand565(vdest, 1)), vdest, 1);
+		vdest = pack_565(in_over(vsrc, vsrca, load8888(m2), expand565(vdest, 2)), vdest, 2);
+		vdest = pack_565(in_over(vsrc, vsrca, load8888(m3), expand565(vdest, 3)), vdest, 3);
 
 		*(__m64 *)q = vdest;
 	    }
@@ -2411,7 +2411,7 @@ mmx_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 	    {
 		uint64_t d = *q;
 		__m64 vdest = expand565(M64(d), 0);
-		vdest = pack565 (in_over(vsrc, vsrca, load8888(m), vdest), vdest, 0);
+		vdest = pack_565 (in_over(vsrc, vsrca, load8888(m), vdest), vdest, 0);
 		*q = UINT64(vdest);
 	    }
 
@@ -2420,15 +2420,15 @@ mmx_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 	    q++;
 	}
 
-	maskLine += maskStride;
-	dstLine += dstStride;
+	mask_line += mask_stride;
+	dst_line += dst_stride;
     }
 
     _mm_empty ();
 }
 
 static void
-mmx_CompositeIn_n_8_8 (pixman_implementation_t *imp,
+mmx_composite_in_n_8_8 (pixman_implementation_t *imp,
 			pixman_op_t op,
 			pixman_image_t * src_image,
 			pixman_image_t * mask_image,
@@ -2442,16 +2442,16 @@ mmx_CompositeIn_n_8_8 (pixman_implementation_t *imp,
 			int32_t     width,
 			int32_t     height)
 {
-    uint8_t	*dstLine, *dst;
-    uint8_t	*maskLine, *mask;
-    int	dstStride, maskStride;
+    uint8_t	*dst_line, *dst;
+    uint8_t	*mask_line, *mask;
+    int	dst_stride, mask_stride;
     uint16_t	w;
     uint32_t	src;
     uint8_t	sa;
     __m64	vsrc, vsrca;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
@@ -2464,10 +2464,10 @@ mmx_CompositeIn_n_8_8 (pixman_implementation_t *imp,
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	mask = maskLine;
-	maskLine += maskStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	mask = mask_line;
+	mask_line += mask_stride;
 	w = width;
 
 	if ((((unsigned long)dst_image & 3) == 0) &&
@@ -2512,7 +2512,7 @@ mmx_CompositeIn_n_8_8 (pixman_implementation_t *imp,
 }
 
 static void
-mmx_CompositeIn_8_8 (pixman_implementation_t *imp,
+mmx_composite_in_8_8 (pixman_implementation_t *imp,
 		      pixman_op_t op,
 		      pixman_image_t * src_image,
 		      pixman_image_t * mask_image,
@@ -2526,20 +2526,20 @@ mmx_CompositeIn_8_8 (pixman_implementation_t *imp,
 		      int32_t     width,
 		      int32_t     height)
 {
-    uint8_t	*dstLine, *dst;
-    uint8_t	*srcLine, *src;
-    int	srcStride, dstStride;
+    uint8_t	*dst_line, *dst;
+    uint8_t	*src_line, *src;
+    int	src_stride, dst_stride;
     uint16_t	w;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	src = srcLine;
-	srcLine += srcStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	src = src_line;
+	src_line += src_stride;
 	w = width;
 
 	if ((((unsigned long)dst_image & 3) == 0) &&
@@ -2577,7 +2577,7 @@ mmx_CompositeIn_8_8 (pixman_implementation_t *imp,
 }
 
 static void
-mmx_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
+mmx_composite_add_8888_8_8 (pixman_implementation_t *imp,
 			       pixman_op_t op,
 			       pixman_image_t * src_image,
 			       pixman_image_t * mask_image,
@@ -2591,16 +2591,16 @@ mmx_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
 			       int32_t     width,
 			       int32_t     height)
 {
-    uint8_t	*dstLine, *dst;
-    uint8_t	*maskLine, *mask;
-    int	dstStride, maskStride;
+    uint8_t	*dst_line, *dst;
+    uint8_t	*mask_line, *mask;
+    int	dst_stride, mask_stride;
     uint16_t	w;
     uint32_t	src;
     uint8_t	sa;
     __m64	vsrc, vsrca;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
@@ -2613,10 +2613,10 @@ mmx_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	mask = maskLine;
-	maskLine += maskStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	mask = mask_line;
+	mask_line += mask_stride;
 	w = width;
 
 	if ((((unsigned long)mask_image & 3) == 0) &&
@@ -2656,7 +2656,7 @@ mmx_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
 }
 
 static void
-mmx_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
+mmx_composite_add_8000_8000 (pixman_implementation_t *imp,
 				pixman_op_t op,
 				pixman_image_t * src_image,
 				pixman_image_t * mask_image,
@@ -2670,24 +2670,24 @@ mmx_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
 				int32_t     width,
 				int32_t     height)
 {
-    uint8_t	*dstLine, *dst;
-    uint8_t	*srcLine, *src;
-    int	dstStride, srcStride;
+    uint8_t	*dst_line, *dst;
+    uint8_t	*src_line, *src;
+    int	dst_stride, src_stride;
     uint16_t	w;
     uint8_t	s, d;
     uint16_t	t;
 
     CHECKPOINT();
 
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	src = srcLine;
-	srcLine += srcStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	src = src_line;
+	src_line += src_stride;
 	w = width;
 
 	while (w && (unsigned long)dst & 7)
@@ -2729,7 +2729,7 @@ mmx_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
 }
 
 static void
-mmx_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
+mmx_composite_add_8888_8888 (pixman_implementation_t *imp,
 				pixman_op_t 	op,
 				pixman_image_t *	src_image,
 				pixman_image_t *	mask_image,
@@ -2744,22 +2744,22 @@ mmx_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
 				int32_t     height)
 {
     __m64 dst64;
-    uint32_t	*dstLine, *dst;
-    uint32_t	*srcLine, *src;
-    int	dstStride, srcStride;
+    uint32_t	*dst_line, *dst;
+    uint32_t	*src_line, *src;
+    int	dst_stride, src_stride;
     uint16_t	w;
 
     CHECKPOINT();
 
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	src = srcLine;
-	srcLine += srcStride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	src = src_line;
+	src_line += src_stride;
 	w = width;
 
 	while (w && (unsigned long)dst & 7)
@@ -2928,7 +2928,7 @@ pixman_blt_mmx (uint32_t *src_bits,
 }
 
 static void
-mmx_CompositeCopyArea (pixman_implementation_t *imp,
+mmx_composite_copy_area (pixman_implementation_t *imp,
 			pixman_op_t       op,
 			pixman_image_t *	src_image,
 			pixman_image_t *	mask_image,
@@ -2952,7 +2952,7 @@ mmx_CompositeCopyArea (pixman_implementation_t *imp,
 }
 
 static void
-mmx_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
+mmx_composite_over_x888_8_8888 (pixman_implementation_t *imp,
 				pixman_op_t      op,
 				pixman_image_t * src_image,
 				pixman_image_t * mask_image,
@@ -2966,24 +2966,24 @@ mmx_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
 				int32_t     width,
 				int32_t     height)
 {
-    uint32_t	*src, *srcLine;
-    uint32_t    *dst, *dstLine;
-    uint8_t	*mask, *maskLine;
-    int		 srcStride, maskStride, dstStride;
+    uint32_t	*src, *src_line;
+    uint32_t    *dst, *dst_line;
+    uint8_t	*mask, *mask_line;
+    int		 src_stride, mask_stride, dst_stride;
     uint16_t w;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
     while (height--)
     {
-	src = srcLine;
-	srcLine += srcStride;
-	dst = dstLine;
-	dstLine += dstStride;
-	mask = maskLine;
-	maskLine += maskStride;
+	src = src_line;
+	src_line += src_stride;
+	dst = dst_line;
+	dst_line += dst_stride;
+	mask = mask_line;
+	mask_line += mask_stride;
 
 	w = width;
 
@@ -3018,50 +3018,50 @@ mmx_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
 
 static const pixman_fast_path_t mmx_fast_paths[] =
 {
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   mmx_CompositeOver_n_8_0565,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b5g6r5,   mmx_CompositeOver_n_8_0565,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, mmx_CompositeOver_n_8_8888,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, mmx_CompositeOver_n_8_8888,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, mmx_CompositeOver_n_8_8888,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, mmx_CompositeOver_n_8_8888,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, mmx_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, mmx_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   mmx_CompositeOver_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, mmx_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, mmx_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   mmx_CompositeOver_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, mmx_Composite_over_pixbuf_8888, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_a8r8g8b8, mmx_Composite_over_pixbuf_8888, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, mmx_Composite_over_pixbuf_8888, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_x8r8g8b8, mmx_Composite_over_pixbuf_8888, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   mmx_Composite_over_pixbuf_0565, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_r5g6b5,   mmx_Composite_over_pixbuf_0565, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_a8b8g8r8, mmx_Composite_over_pixbuf_8888, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, mmx_Composite_over_pixbuf_8888, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_x8b8g8r8, mmx_Composite_over_pixbuf_8888, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, mmx_Composite_over_pixbuf_8888, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_b5g6r5,   mmx_Composite_over_pixbuf_0565, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   mmx_Composite_over_pixbuf_0565, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, mmx_Composite_over_x888_n_8888,    NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, mmx_Composite_over_x888_n_8888,	   NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,	PIXMAN_a8b8g8r8, mmx_Composite_over_x888_n_8888,	   NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,	PIXMAN_x8b8g8r8, mmx_Composite_over_x888_n_8888,	   NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   mmx_composite_over_n_8_0565,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b5g6r5,   mmx_composite_over_n_8_0565,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, mmx_composite_over_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, mmx_composite_over_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, mmx_composite_over_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, mmx_composite_over_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, mmx_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, mmx_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   mmx_composite_over_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, mmx_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, mmx_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   mmx_composite_over_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, mmx_composite_over_pixbuf_8888, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_a8r8g8b8, mmx_composite_over_pixbuf_8888, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, mmx_composite_over_pixbuf_8888, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_x8r8g8b8, mmx_composite_over_pixbuf_8888, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   mmx_composite_over_pixbuf_0565, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_r5g6b5,   mmx_composite_over_pixbuf_0565, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_a8b8g8r8, mmx_composite_over_pixbuf_8888, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, mmx_composite_over_pixbuf_8888, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_x8b8g8r8, mmx_composite_over_pixbuf_8888, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, mmx_composite_over_pixbuf_8888, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_b5g6r5,   mmx_composite_over_pixbuf_0565, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   mmx_composite_over_pixbuf_0565, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, mmx_composite_over_x888_n_8888,    NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, mmx_composite_over_x888_n_8888,	   NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,	PIXMAN_a8b8g8r8, mmx_composite_over_x888_n_8888,	   NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,	PIXMAN_x8b8g8r8, mmx_composite_over_x888_n_8888,	   NEED_SOLID_MASK },
     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, mmx_composite_over_8888_n_8888,    NEED_SOLID_MASK },
     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, mmx_composite_over_8888_n_8888,	   NEED_SOLID_MASK },
     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,	PIXMAN_a8b8g8r8, mmx_composite_over_8888_n_8888,	   NEED_SOLID_MASK },
     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,	PIXMAN_x8b8g8r8, mmx_composite_over_8888_n_8888,	   NEED_SOLID_MASK },
 #if 0
     /* FIXME: This code is commented out since it's apparently not actually faster than the generic code. */
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,	PIXMAN_x8r8g8b8, mmx_CompositeOver_x888_8_8888,   0 },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,	PIXMAN_a8r8g8b8, mmx_CompositeOver_x888_8_8888,   0 },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8r8g8, PIXMAN_a8,	PIXMAN_x8b8g8r8, mmx_CompositeOver_x888_8_8888,   0 },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8r8g8, PIXMAN_a8,	PIXMAN_a8r8g8b8, mmx_CompositeOver_x888_8_8888,   0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,	PIXMAN_x8r8g8b8, mmx_composite_over_x888_8_8888,   0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,	PIXMAN_a8r8g8b8, mmx_composite_over_x888_8_8888,   0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8r8g8, PIXMAN_a8,	PIXMAN_x8b8g8r8, mmx_composite_over_x888_8_8888,   0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8r8g8, PIXMAN_a8,	PIXMAN_a8r8g8b8, mmx_composite_over_x888_8_8888,   0 },
 #endif
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,	PIXMAN_a8r8g8b8, mmx_CompositeOver_n_8888,       0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_x8r8g8b8, mmx_CompositeOver_n_8888,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_r5g6b5,   mmx_CompositeOver_n_0565,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, mmx_CompositeCopyArea,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, mmx_CompositeCopyArea,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,	PIXMAN_a8r8g8b8, mmx_composite_over_n_8888,       0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_x8r8g8b8, mmx_composite_over_n_8888,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_r5g6b5,   mmx_composite_over_n_0565,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, mmx_composite_copy_area,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, mmx_composite_copy_area,	   0 },
 
     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, mmx_composite_over_8888_8888,	   0 },
     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,	PIXMAN_x8r8g8b8, mmx_composite_over_8888_8888,	   0 },
@@ -3070,26 +3070,26 @@ static const pixman_fast_path_t mmx_fast_paths[] =
     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_x8b8g8r8, mmx_composite_over_8888_8888,	   0 },
     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   mmx_composite_over_8888_0565,	   0 },
 
-    { PIXMAN_OP_ADD, PIXMAN_a8r8g8b8,  PIXMAN_null,	PIXMAN_a8r8g8b8, mmx_CompositeAdd_8888_8888,   0 },
-    { PIXMAN_OP_ADD, PIXMAN_a8b8g8r8,  PIXMAN_null,	PIXMAN_a8b8g8r8, mmx_CompositeAdd_8888_8888,   0 },
-    { PIXMAN_OP_ADD, PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       mmx_CompositeAdd_8000_8000,   0 },
-    { PIXMAN_OP_ADD, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8,       mmx_CompositeAdd_8888_8_8,    0 },
-
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8r8g8b8, mmx_CompositeSrc_n_8_8888, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_x8r8g8b8, mmx_CompositeSrc_n_8_8888, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8b8g8r8, mmx_CompositeSrc_n_8_8888, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_x8b8g8r8, mmx_CompositeSrc_n_8_8888, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,	PIXMAN_a8r8g8b8, mmx_CompositeCopyArea, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,	PIXMAN_a8b8g8r8, mmx_CompositeCopyArea, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,	PIXMAN_x8r8g8b8, mmx_CompositeCopyArea, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,	PIXMAN_x8b8g8r8, mmx_CompositeCopyArea, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8,  PIXMAN_null,	PIXMAN_x8r8g8b8, mmx_CompositeCopyArea, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8,  PIXMAN_null,	PIXMAN_x8b8g8r8, mmx_CompositeCopyArea, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_r5g6b5,    PIXMAN_null,     PIXMAN_r5g6b5,   mmx_CompositeCopyArea, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_b5g6r5,    PIXMAN_null,     PIXMAN_b5g6r5,   mmx_CompositeCopyArea, 0 },    
-
-    { PIXMAN_OP_IN,  PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       mmx_CompositeIn_8_8,   0 },
-    { PIXMAN_OP_IN,  PIXMAN_solid,     PIXMAN_a8,	PIXMAN_a8,	 mmx_CompositeIn_n_8_8, 0 },
+    { PIXMAN_OP_ADD, PIXMAN_a8r8g8b8,  PIXMAN_null,	PIXMAN_a8r8g8b8, mmx_composite_add_8888_8888,   0 },
+    { PIXMAN_OP_ADD, PIXMAN_a8b8g8r8,  PIXMAN_null,	PIXMAN_a8b8g8r8, mmx_composite_add_8888_8888,   0 },
+    { PIXMAN_OP_ADD, PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       mmx_composite_add_8000_8000,   0 },
+    { PIXMAN_OP_ADD, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8,       mmx_composite_add_8888_8_8,    0 },
+
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8r8g8b8, mmx_composite_src_n_8_8888, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_x8r8g8b8, mmx_composite_src_n_8_8888, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8b8g8r8, mmx_composite_src_n_8_8888, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_x8b8g8r8, mmx_composite_src_n_8_8888, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,	PIXMAN_a8r8g8b8, mmx_composite_copy_area, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,	PIXMAN_a8b8g8r8, mmx_composite_copy_area, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,	PIXMAN_x8r8g8b8, mmx_composite_copy_area, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,	PIXMAN_x8b8g8r8, mmx_composite_copy_area, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8,  PIXMAN_null,	PIXMAN_x8r8g8b8, mmx_composite_copy_area, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8,  PIXMAN_null,	PIXMAN_x8b8g8r8, mmx_composite_copy_area, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_r5g6b5,    PIXMAN_null,     PIXMAN_r5g6b5,   mmx_composite_copy_area, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_b5g6r5,    PIXMAN_null,     PIXMAN_b5g6r5,   mmx_composite_copy_area, 0 },    
+
+    { PIXMAN_OP_IN,  PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       mmx_composite_in_8_8,   0 },
+    { PIXMAN_OP_IN,  PIXMAN_solid,     PIXMAN_a8,	PIXMAN_a8,	 mmx_composite_in_n_8_8, 0 },
 
     { PIXMAN_OP_NONE },
 };
@@ -3175,29 +3175,29 @@ _pixman_implementation_create_mmx (void)
     pixman_implementation_t *general = _pixman_implementation_create_fast_path ();
     pixman_implementation_t *imp = _pixman_implementation_create (general);
 
-    imp->combine_32[PIXMAN_OP_OVER] = mmxCombineOverU;
-    imp->combine_32[PIXMAN_OP_OVER_REVERSE] = mmxCombineOverReverseU;
-    imp->combine_32[PIXMAN_OP_IN] = mmxCombineInU;
-    imp->combine_32[PIXMAN_OP_IN_REVERSE] = mmxCombineInReverseU;
-    imp->combine_32[PIXMAN_OP_OUT] = mmxCombineOutU;
-    imp->combine_32[PIXMAN_OP_OUT_REVERSE] = mmxCombineOutReverseU;
-    imp->combine_32[PIXMAN_OP_ATOP] = mmxCombineAtopU;
-    imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = mmxCombineAtopReverseU;
-    imp->combine_32[PIXMAN_OP_XOR] = mmxCombineXorU; 
-    imp->combine_32[PIXMAN_OP_ADD] = mmxCombineAddU;
-    imp->combine_32[PIXMAN_OP_SATURATE] = mmxCombineSaturateU;
+    imp->combine_32[PIXMAN_OP_OVER] = mmx_combine_over_u;
+    imp->combine_32[PIXMAN_OP_OVER_REVERSE] = mmx_combine_over_reverse_u;
+    imp->combine_32[PIXMAN_OP_IN] = mmx_combine_in_u;
+    imp->combine_32[PIXMAN_OP_IN_REVERSE] = mmx_combine_in_reverse_u;
+    imp->combine_32[PIXMAN_OP_OUT] = mmx_combine_out_u;
+    imp->combine_32[PIXMAN_OP_OUT_REVERSE] = mmx_combine_out_reverse_u;
+    imp->combine_32[PIXMAN_OP_ATOP] = mmx_combine_atop_u;
+    imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = mmx_combine_atop_reverse_u;
+    imp->combine_32[PIXMAN_OP_XOR] = mmx_combine_xor_u; 
+    imp->combine_32[PIXMAN_OP_ADD] = mmx_combine_add_u;
+    imp->combine_32[PIXMAN_OP_SATURATE] = mmx_combine_saturate_u;
     
-    imp->combine_32_ca[PIXMAN_OP_SRC] = mmxCombineSrcC;
-    imp->combine_32_ca[PIXMAN_OP_OVER] = mmxCombineOverC;
-    imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = mmxCombineOverReverseC;
-    imp->combine_32_ca[PIXMAN_OP_IN] = mmxCombineInC;
-    imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = mmxCombineInReverseC;
-    imp->combine_32_ca[PIXMAN_OP_OUT] = mmxCombineOutC;
-    imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = mmxCombineOutReverseC;
-    imp->combine_32_ca[PIXMAN_OP_ATOP] = mmxCombineAtopC;
-    imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = mmxCombineAtopReverseC;
-    imp->combine_32_ca[PIXMAN_OP_XOR] = mmxCombineXorC;
-    imp->combine_32_ca[PIXMAN_OP_ADD] = mmxCombineAddC;
+    imp->combine_32_ca[PIXMAN_OP_SRC] = mmx_combine_src_c;
+    imp->combine_32_ca[PIXMAN_OP_OVER] = mmx_combine_over_c;
+    imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = mmx_combine_over_reverse_c;
+    imp->combine_32_ca[PIXMAN_OP_IN] = mmx_combine_in_c;
+    imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = mmx_combine_in_reverse_c;
+    imp->combine_32_ca[PIXMAN_OP_OUT] = mmx_combine_out_c;
+    imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = mmx_combine_out_reverse_c;
+    imp->combine_32_ca[PIXMAN_OP_ATOP] = mmx_combine_atop_c;
+    imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = mmx_combine_atop_reverse_c;
+    imp->combine_32_ca[PIXMAN_OP_XOR] = mmx_combine_xor_c;
+    imp->combine_32_ca[PIXMAN_OP_ADD] = mmx_combine_add_c;
 
     imp->composite = mmx_composite;
     imp->blt = mmx_blt;
diff --git a/pixman/pixman-private.h b/pixman/pixman-private.h
index 1dc68da..b0056c4 100644
--- a/pixman/pixman-private.h
+++ b/pixman/pixman-private.h
@@ -202,7 +202,7 @@ _pixman_image_get_scanline_generic_64  (pixman_image_t *pict,
 					int             width,
 					uint32_t       *buffer,
 					const uint32_t *mask,
-					uint32_t        maskBits);
+					uint32_t        mask_bits);
 
 source_pict_class_t
 _pixman_image_classify (pixman_image_t *image,
@@ -320,7 +320,7 @@ _pixman_gradient_walker_pixel (pixman_gradient_walker_t       *walker,
 #define X_FRAC_FIRST(n)	(STEP_X_SMALL(n) / 2)
 #define X_FRAC_LAST(n)	(X_FRAC_FIRST(n) + (N_X_FRAC(n) - 1) * STEP_X_SMALL(n))
 
-#define RenderSamplesX(x,n)	((n) == 1 ? 0 : (pixman_fixed_frac (x) + X_FRAC_FIRST(n)) / STEP_X_SMALL(n))
+#define RENDER_SAMPLES_X(x,n)	((n) == 1 ? 0 : (pixman_fixed_frac (x) + X_FRAC_FIRST(n)) / STEP_X_SMALL(n))
 
 void
 pixman_rasterize_edges_accessors (pixman_image_t *image,
@@ -573,7 +573,7 @@ _pixman_walk_composite_region (pixman_implementation_t *imp,
 			       int16_t dest_y,
 			       uint16_t width,
 			       uint16_t height,
-			       pixman_composite_func_t compositeRect);
+			       pixman_composite_func_t composite_rect);
 
 void
 pixman_expand (uint64_t *dst, const uint32_t *src, pixman_format_code_t, int width);
diff --git a/pixman/pixman-radial-gradient.c b/pixman/pixman-radial-gradient.c
index ced8213..870c4ce 100644
--- a/pixman/pixman-radial-gradient.c
+++ b/pixman/pixman-radial-gradient.c
@@ -33,7 +33,7 @@
 
 static void
 radial_gradient_get_scanline_32 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
-				 const uint32_t *mask, uint32_t maskBits)
+				 const uint32_t *mask, uint32_t mask_bits)
 {
     /*
      * In the radial gradient problem we are given two circles (c₁,r₁) and
@@ -185,7 +185,7 @@ radial_gradient_get_scanline_32 (pixman_image_t *image, int x, int y, int width,
     
     if (affine) {
 	while (buffer < end) {
-	    if (!mask || *mask++ & maskBits)
+	    if (!mask || *mask++ & mask_bits)
 	    {
 		double pdx, pdy;
 		double B, C;
@@ -222,7 +222,7 @@ radial_gradient_get_scanline_32 (pixman_image_t *image, int x, int y, int width,
     } else {
 	/* projective */
 	while (buffer < end) {
-	    if (!mask || *mask++ & maskBits)
+	    if (!mask || *mask++ & mask_bits)
 	    {
 		double pdx, pdy;
 		double B, C;
diff --git a/pixman/pixman-region.c b/pixman/pixman-region.c
index 10db5fd..c061d17 100644
--- a/pixman/pixman-region.c
+++ b/pixman/pixman-region.c
@@ -51,27 +51,27 @@ SOFTWARE.
 #include <stdio.h>
 #include "pixman-private.h"
 
-#define PIXREGION_NIL(reg) ((reg)->data && !(reg)->data->numRects)
+#define PIXREGION_NIL(reg) ((reg)->data && !(reg)->data->num_rects)
 /* not a region */
-#define PIXREGION_NAR(reg)	((reg)->data == pixman_brokendata)
-#define PIXREGION_NUM_RECTS(reg) ((reg)->data ? (reg)->data->numRects : 1)
+#define PIXREGION_NAR(reg)	((reg)->data == pixman_broken_data)
+#define PIXREGION_NUM_RECTS(reg) ((reg)->data ? (reg)->data->num_rects : 1)
 #define PIXREGION_SIZE(reg) ((reg)->data ? (reg)->data->size : 0)
 #define PIXREGION_RECTS(reg) ((reg)->data ? (box_type_t *)((reg)->data + 1) \
 			               : &(reg)->extents)
 #define PIXREGION_BOXPTR(reg) ((box_type_t *)((reg)->data + 1))
 #define PIXREGION_BOX(reg,i) (&PIXREGION_BOXPTR(reg)[i])
-#define PIXREGION_TOP(reg) PIXREGION_BOX(reg, (reg)->data->numRects)
-#define PIXREGION_END(reg) PIXREGION_BOX(reg, (reg)->data->numRects - 1)
+#define PIXREGION_TOP(reg) PIXREGION_BOX(reg, (reg)->data->num_rects)
+#define PIXREGION_END(reg) PIXREGION_BOX(reg, (reg)->data->num_rects - 1)
 
 #define GOOD(reg) assert(PREFIX(_selfcheck) (reg))
 
-static const box_type_t PREFIX(_emptyBox_) = {0, 0, 0, 0};
-static const region_data_type_t PREFIX(_emptyData_) = {0, 0};
-static const region_data_type_t PREFIX(_brokendata_) = {0, 0};
+static const box_type_t PREFIX(_empty_box_) = {0, 0, 0, 0};
+static const region_data_type_t PREFIX(_empty_data_) = {0, 0};
+static const region_data_type_t PREFIX(_broken_data_) = {0, 0};
 
-static box_type_t *pixman_region_emptyBox = (box_type_t *)&PREFIX(_emptyBox_);
-static region_data_type_t *pixman_region_emptyData = (region_data_type_t *)&PREFIX(_emptyData_);
-static region_data_type_t *pixman_brokendata = (region_data_type_t *)&PREFIX(_brokendata_);
+static box_type_t *pixman_region_empty_box = (box_type_t *)&PREFIX(_empty_box_);
+static region_data_type_t *pixman_region_empty_data = (region_data_type_t *)&PREFIX(_empty_data_);
+static region_data_type_t *pixman_broken_data = (region_data_type_t *)&PREFIX(_broken_data_);
 
 static pixman_bool_t
 pixman_break (region_type_t *region);
@@ -118,7 +118,7 @@ pixman_break (region_type_t *region);
  * Adam de Boor wrote most of the original region code.  Joel McCormack
  * substantially modified or rewrote most of the core arithmetic routines, and
  * added pixman_region_validate in order to support several speed improvements to
- * pixman_region_validateTree.  Bob Scheifler changed the representation to be more
+ * pixman_region_validate_tree.  Bob Scheifler changed the representation to be more
  * compact when empty or a single rectangle, and did a bunch of gratuitous
  * reformatting. Carl Worth did further gratuitous reformatting while re-merging
  * the server and client region code into libpixregion.
@@ -171,11 +171,11 @@ alloc_data(size_t n)
 #define FREE_DATA(reg) if ((reg)->data && (reg)->data->size) free((reg)->data)
 
 #define RECTALLOC_BAIL(region,n,bail) \
-if (!(region)->data || (((region)->data->numRects + (n)) > (region)->data->size)) \
+if (!(region)->data || (((region)->data->num_rects + (n)) > (region)->data->size)) \
     if (!pixman_rect_alloc(region, n)) { goto bail; }
 
 #define RECTALLOC(region,n) \
-if (!(region)->data || (((region)->data->numRects + (n)) > (region)->data->size)) \
+if (!(region)->data || (((region)->data->num_rects + (n)) > (region)->data->size)) \
     if (!pixman_rect_alloc(region, n)) { return FALSE; }
 
 #define ADDRECT(next_rect,nx1,ny1,nx2,ny2)	\
@@ -189,30 +189,30 @@ if (!(region)->data || (((region)->data->numRects + (n)) > (region)->data->size)
 
 #define NEWRECT(region,next_rect,nx1,ny1,nx2,ny2)			\
 {									\
-    if (!(region)->data || ((region)->data->numRects == (region)->data->size))\
+    if (!(region)->data || ((region)->data->num_rects == (region)->data->size))\
     {									\
 	if (!pixman_rect_alloc(region, 1))					\
 	    return FALSE;						\
 	next_rect = PIXREGION_TOP(region);					\
     }									\
     ADDRECT(next_rect,nx1,ny1,nx2,ny2);					\
-    region->data->numRects++;						\
-    assert(region->data->numRects<=region->data->size);			\
+    region->data->num_rects++;						\
+    assert(region->data->num_rects<=region->data->size);			\
 }
 
-#define DOWNSIZE(reg,numRects)						\
-    if (((numRects) < ((reg)->data->size >> 1)) && ((reg)->data->size > 50)) \
+#define DOWNSIZE(reg,num_rects)						\
+    if (((num_rects) < ((reg)->data->size >> 1)) && ((reg)->data->size > 50)) \
     {									\
-	region_data_type_t * NewData;				\
-	size_t data_size = PIXREGION_SZOF(numRects);			\
+	region_data_type_t * new_data;				\
+	size_t data_size = PIXREGION_SZOF(num_rects);			\
 	if (!data_size)							\
-	    NewData = NULL;						\
+	    new_data = NULL;						\
 	else								\
-	    NewData = (region_data_type_t *)realloc((reg)->data, data_size); \
-	if (NewData)							\
+	    new_data = (region_data_type_t *)realloc((reg)->data, data_size); \
+	if (new_data)							\
 	{								\
-	    NewData->size = (numRects);					\
-	    (reg)->data = NewData;					\
+	    new_data->size = (num_rects);					\
+	    (reg)->data = new_data;					\
 	}								\
     }
 
@@ -267,8 +267,8 @@ PREFIX(_print) (rgn)
 PIXMAN_EXPORT void
 PREFIX(_init) (region_type_t *region)
 {
-    region->extents = *pixman_region_emptyBox;
-    region->data = pixman_region_emptyData;
+    region->extents = *pixman_region_empty_box;
+    region->data = pixman_region_empty_data;
 }
 
 PIXMAN_EXPORT void
@@ -316,8 +316,8 @@ static pixman_bool_t
 pixman_break (region_type_t *region)
 {
     FREE_DATA (region);
-    region->extents = *pixman_region_emptyBox;
-    region->data = pixman_brokendata;
+    region->extents = *pixman_region_empty_box;
+    region->data = pixman_broken_data;
     return FALSE;
 }
 
@@ -332,7 +332,7 @@ pixman_rect_alloc (region_type_t * region, int n)
 	region->data = alloc_data(n);
 	if (!region->data)
 	    return pixman_break (region);
-	region->data->numRects = 1;
+	region->data->num_rects = 1;
 	*PIXREGION_BOXPTR(region) = region->extents;
     }
     else if (!region->data->size)
@@ -340,18 +340,18 @@ pixman_rect_alloc (region_type_t * region, int n)
 	region->data = alloc_data(n);
 	if (!region->data)
 	    return pixman_break (region);
-	region->data->numRects = 0;
+	region->data->num_rects = 0;
     }
     else
     {
 	size_t data_size;
 	if (n == 1)
 	{
-	    n = region->data->numRects;
+	    n = region->data->num_rects;
 	    if (n > 500) /* XXX pick numbers out of a hat */
 		n = 250;
 	}
-	n += region->data->numRects;
+	n += region->data->num_rects;
 	data_size = PIXREGION_SZOF(n);
 	if (!data_size)
 	    data = NULL;
@@ -379,17 +379,17 @@ PREFIX(_copy) (region_type_t *dst, region_type_t *src)
 	dst->data = src->data;
 	return TRUE;
     }
-    if (!dst->data || (dst->data->size < src->data->numRects))
+    if (!dst->data || (dst->data->size < src->data->num_rects))
     {
 	FREE_DATA(dst);
-	dst->data = alloc_data(src->data->numRects);
+	dst->data = alloc_data(src->data->num_rects);
 	if (!dst->data)
 	    return pixman_break (dst);
-	dst->data->size = src->data->numRects;
+	dst->data->size = src->data->num_rects;
     }
-    dst->data->numRects = src->data->numRects;
+    dst->data->num_rects = src->data->num_rects;
     memmove((char *)PIXREGION_BOXPTR(dst),(char *)PIXREGION_BOXPTR(src),
-	  dst->data->numRects * sizeof(box_type_t));
+	  dst->data->num_rects * sizeof(box_type_t));
     return TRUE;
 }
 
@@ -411,35 +411,35 @@ PREFIX(_copy) (region_type_t *dst, region_type_t *src)
  *	If coalescing takes place:
  *	    - rectangles in the previous band will have their y2 fields
  *	      altered.
- *	    - region->data->numRects will be decreased.
+ *	    - region->data->num_rects will be decreased.
  *
  *-----------------------------------------------------------------------
  */
 static inline int
 pixman_coalesce (
     region_type_t *	region,	    	/* Region to coalesce		     */
-    int	    	  	prevStart,  	/* Index of start of previous band   */
-    int	    	  	curStart)   	/* Index of start of current band    */
+    int	    	  	prev_start,  	/* Index of start of previous band   */
+    int	    	  	cur_start)   	/* Index of start of current band    */
 {
-    box_type_t *	prevBox;   	/* Current box in previous band	     */
-    box_type_t *	curBox;    	/* Current box in current band       */
-    int  	numRects;	/* Number rectangles in both bands   */
+    box_type_t *	prev_box;   	/* Current box in previous band	     */
+    box_type_t *	cur_box;    	/* Current box in current band       */
+    int  	num_rects;	/* Number rectangles in both bands   */
     int	y2;		/* Bottom of current band	     */
     /*
      * Figure out how many rectangles are in the band.
      */
-    numRects = curStart - prevStart;
-    assert(numRects == region->data->numRects - curStart);
+    num_rects = cur_start - prev_start;
+    assert(num_rects == region->data->num_rects - cur_start);
 
-    if (!numRects) return curStart;
+    if (!num_rects) return cur_start;
 
     /*
      * The bands may only be coalesced if the bottom of the previous
      * matches the top scanline of the current.
      */
-    prevBox = PIXREGION_BOX(region, prevStart);
-    curBox = PIXREGION_BOX(region, curStart);
-    if (prevBox->y2 != curBox->y1) return curStart;
+    prev_box = PIXREGION_BOX(region, prev_start);
+    cur_box = PIXREGION_BOX(region, cur_start);
+    if (prev_box->y2 != cur_box->y1) return cur_start;
 
     /*
      * Make sure the bands have boxes in the same places. This
@@ -447,43 +447,43 @@ pixman_coalesce (
      * cover the most area possible. I.e. two boxes in a band must
      * have some horizontal space between them.
      */
-    y2 = curBox->y2;
+    y2 = cur_box->y2;
 
     do {
-	if ((prevBox->x1 != curBox->x1) || (prevBox->x2 != curBox->x2)) {
-	    return (curStart);
+	if ((prev_box->x1 != cur_box->x1) || (prev_box->x2 != cur_box->x2)) {
+	    return (cur_start);
 	}
-	prevBox++;
-	curBox++;
-	numRects--;
-    } while (numRects);
+	prev_box++;
+	cur_box++;
+	num_rects--;
+    } while (num_rects);
 
     /*
      * The bands may be merged, so set the bottom y of each box
      * in the previous band to the bottom y of the current band.
      */
-    numRects = curStart - prevStart;
-    region->data->numRects -= numRects;
+    num_rects = cur_start - prev_start;
+    region->data->num_rects -= num_rects;
     do {
-	prevBox--;
-	prevBox->y2 = y2;
-	numRects--;
-    } while (numRects);
-    return prevStart;
+	prev_box--;
+	prev_box->y2 = y2;
+	num_rects--;
+    } while (num_rects);
+    return prev_start;
 }
 
 /* Quicky macro to avoid trivial reject procedure calls to pixman_coalesce */
 
-#define COALESCE(newReg, prevBand, curBand)				\
-    if (curBand - prevBand == newReg->data->numRects - curBand) {	\
-	prevBand = pixman_coalesce(newReg, prevBand, curBand);		\
+#define COALESCE(new_reg, prev_band, cur_band)				\
+    if (cur_band - prev_band == new_reg->data->num_rects - cur_band) {	\
+	prev_band = pixman_coalesce(new_reg, prev_band, cur_band);		\
     } else {								\
-	prevBand = curBand;						\
+	prev_band = cur_band;						\
     }
 
 /*-
  *-----------------------------------------------------------------------
- * pixman_region_appendNonO --
+ * pixman_region_append_non_o --
  *	Handle a non-overlapping band for the union and subtract operations.
  *      Just adds the (top/bottom-clipped) rectangles into the region.
  *      Doesn't have to check for subsumption or anything.
@@ -492,58 +492,58 @@ pixman_coalesce (
  *	None.
  *
  * Side Effects:
- *	region->data->numRects is incremented and the rectangles overwritten
+ *	region->data->num_rects is incremented and the rectangles overwritten
  *	with the rectangles we're passed.
  *
  *-----------------------------------------------------------------------
  */
 
 static inline pixman_bool_t
-pixman_region_appendNonO (
+pixman_region_append_non_o (
     region_type_t *	region,
     box_type_t *	r,
-    box_type_t *  	  	rEnd,
+    box_type_t *  	  	r_end,
     int  	y1,
     int  	y2)
 {
     box_type_t *	next_rect;
-    int	newRects;
+    int	new_rects;
 
-    newRects = rEnd - r;
+    new_rects = r_end - r;
 
     assert(y1 < y2);
-    assert(newRects != 0);
+    assert(new_rects != 0);
 
     /* Make sure we have enough space for all rectangles to be added */
-    RECTALLOC(region, newRects);
+    RECTALLOC(region, new_rects);
     next_rect = PIXREGION_TOP(region);
-    region->data->numRects += newRects;
+    region->data->num_rects += new_rects;
     do {
 	assert(r->x1 < r->x2);
 	ADDRECT(next_rect, r->x1, y1, r->x2, y2);
 	r++;
-    } while (r != rEnd);
+    } while (r != r_end);
 
     return TRUE;
 }
 
-#define FIND_BAND(r, rBandEnd, rEnd, ry1)		    \
+#define FIND_BAND(r, r_band_end, r_end, ry1)		    \
 {							    \
     ry1 = r->y1;					    \
-    rBandEnd = r+1;					    \
-    while ((rBandEnd != rEnd) && (rBandEnd->y1 == ry1)) {   \
-	rBandEnd++;					    \
+    r_band_end = r+1;					    \
+    while ((r_band_end != r_end) && (r_band_end->y1 == ry1)) {   \
+	r_band_end++;					    \
     }							    \
 }
 
-#define	APPEND_REGIONS(newReg, r, rEnd)					\
+#define	APPEND_REGIONS(new_reg, r, r_end)					\
 {									\
-    int newRects;							\
-    if ((newRects = rEnd - r)) {					\
-	RECTALLOC_BAIL(newReg, newRects, bail);					\
-	memmove((char *)PIXREGION_TOP(newReg),(char *)r, 			\
-              newRects * sizeof(box_type_t));				\
-	newReg->data->numRects += newRects;				\
+    int new_rects;							\
+    if ((new_rects = r_end - r)) {					\
+	RECTALLOC_BAIL(new_reg, new_rects, bail);					\
+	memmove((char *)PIXREGION_TOP(new_reg),(char *)r, 			\
+              new_rects * sizeof(box_type_t));				\
+	new_reg->data->num_rects += new_rects;				\
     }									\
 }
 
@@ -559,15 +559,15 @@ pixman_region_appendNonO (
  *
  * Side Effects:
  *	The new region is overwritten.
- *	overlap set to TRUE if overlapFunc ever returns TRUE.
+ *	overlap set to TRUE if overlap_func ever returns TRUE.
  *
  * Notes:
  *	The idea behind this function is to view the two regions as sets.
  *	Together they cover a rectangle of area that this function divides
  *	into horizontal bands where points are covered only by one region
- *	or by both. For the first case, the nonOverlapFunc is called with
+ *	or by both. For the first case, the non_overlap_func is called with
  *	each the band and the band's upper and lower extents. For the
- *	second, the overlapFunc is called to process the entire band. It
+ *	second, the overlap_func is called to process the entire band. It
  *	is responsible for clipping the rectangles in the band, though
  *	this function provides the boundaries.
  *	At the end of each band, the new region is coalesced, if possible,
@@ -576,91 +576,91 @@ pixman_region_appendNonO (
  *-----------------------------------------------------------------------
  */
 
-typedef pixman_bool_t (*OverlapProcPtr)(
+typedef pixman_bool_t (*overlap_proc_ptr)(
     region_type_t	 *region,
     box_type_t *r1,
-    box_type_t *r1End,
+    box_type_t *r1_end,
     box_type_t *r2,
-    box_type_t *r2End,
+    box_type_t *r2_end,
     int    	 y1,
     int    	 y2,
     int		 *overlap);
 
 static pixman_bool_t
 pixman_op(
-    region_type_t *newReg,		    /* Place to store result	     */
+    region_type_t *new_reg,		    /* Place to store result	     */
     region_type_t *       reg1,		    /* First region in operation     */
     region_type_t *       reg2,		    /* 2d region in operation        */
-    OverlapProcPtr  overlapFunc,            /* Function to call for over-
+    overlap_proc_ptr  overlap_func,            /* Function to call for over-
 					     * lapping bands		     */
-    int	    appendNon1,		    /* Append non-overlapping bands  */
+    int	    append_non1,		    /* Append non-overlapping bands  */
 					    /* in region 1 ? */
-    int	    appendNon2,		    /* Append non-overlapping bands  */
+    int	    append_non2,		    /* Append non-overlapping bands  */
 					    /* in region 2 ? */
     int	    *overlap)
 {
     box_type_t * r1;			    /* Pointer into first region     */
     box_type_t * r2;			    /* Pointer into 2d region	     */
-    box_type_t *	    r1End;		    /* End of 1st region	     */
-    box_type_t *	    r2End;		    /* End of 2d region		     */
+    box_type_t *	    r1_end;		    /* End of 1st region	     */
+    box_type_t *	    r2_end;		    /* End of 2d region		     */
     int	    ybot;		    /* Bottom of intersection	     */
     int	    ytop;		    /* Top of intersection	     */
-    region_data_type_t *	    oldData;		    /* Old data for newReg	     */
-    int		    prevBand;		    /* Index of start of
-					     * previous band in newReg       */
-    int		    curBand;		    /* Index of start of current
-					     * band in newReg		     */
-    box_type_t * r1BandEnd;		    /* End of current band in r1     */
-    box_type_t * r2BandEnd;		    /* End of current band in r2     */
+    region_data_type_t *	    old_data;		    /* Old data for new_reg	     */
+    int		    prev_band;		    /* Index of start of
+					     * previous band in new_reg       */
+    int		    cur_band;		    /* Index of start of current
+					     * band in new_reg		     */
+    box_type_t * r1_band_end;		    /* End of current band in r1     */
+    box_type_t * r2_band_end;		    /* End of current band in r2     */
     int	    top;		    /* Top of non-overlapping band   */
     int	    bot;		    /* Bottom of non-overlapping band*/
     int    r1y1;		    /* Temps for r1->y1 and r2->y1   */
     int    r2y1;
-    int		    newSize;
-    int		    numRects;
+    int		    new_size;
+    int		    num_rects;
 
     /*
      * Break any region computed from a broken region
      */
     if (PIXREGION_NAR (reg1) || PIXREGION_NAR(reg2))
-	return pixman_break (newReg);
+	return pixman_break (new_reg);
 
     /*
      * Initialization:
-     *	set r1, r2, r1End and r2End appropriately, save the rectangles
+     *	set r1, r2, r1_end and r2_end appropriately, save the rectangles
      * of the destination region until the end in case it's one of
      * the two source regions, then mark the "new" region empty, allocating
      * another array of rectangles for it to use.
      */
 
     r1 = PIXREGION_RECTS(reg1);
-    newSize = PIXREGION_NUM_RECTS(reg1);
-    r1End = r1 + newSize;
-    numRects = PIXREGION_NUM_RECTS(reg2);
+    new_size = PIXREGION_NUM_RECTS(reg1);
+    r1_end = r1 + new_size;
+    num_rects = PIXREGION_NUM_RECTS(reg2);
     r2 = PIXREGION_RECTS(reg2);
-    r2End = r2 + numRects;
-    assert(r1 != r1End);
-    assert(r2 != r2End);
+    r2_end = r2 + num_rects;
+    assert(r1 != r1_end);
+    assert(r2 != r2_end);
 
-    oldData = (region_data_type_t *)NULL;
-    if (((newReg == reg1) && (newSize > 1)) ||
-	((newReg == reg2) && (numRects > 1)))
+    old_data = (region_data_type_t *)NULL;
+    if (((new_reg == reg1) && (new_size > 1)) ||
+	((new_reg == reg2) && (num_rects > 1)))
     {
-	oldData = newReg->data;
-	newReg->data = pixman_region_emptyData;
+	old_data = new_reg->data;
+	new_reg->data = pixman_region_empty_data;
     }
     /* guess at new size */
-    if (numRects > newSize)
-	newSize = numRects;
-    newSize <<= 1;
-    if (!newReg->data)
-	newReg->data = pixman_region_emptyData;
-    else if (newReg->data->size)
-	newReg->data->numRects = 0;
-    if (newSize > newReg->data->size) {
-	if (!pixman_rect_alloc(newReg, newSize)) {
-	    if (oldData)
-		free (oldData);
+    if (num_rects > new_size)
+	new_size = num_rects;
+    new_size <<= 1;
+    if (!new_reg->data)
+	new_reg->data = pixman_region_empty_data;
+    else if (new_reg->data->size)
+	new_reg->data->num_rects = 0;
+    if (new_size > new_reg->data->size) {
+	if (!pixman_rect_alloc(new_reg, new_size)) {
+	    if (old_data)
+		free (old_data);
 	    return FALSE;
 	}
     }
@@ -682,29 +682,29 @@ pixman_op(
     ybot = MIN(r1->y1, r2->y1);
 
     /*
-     * prevBand serves to mark the start of the previous band so rectangles
+     * prev_band serves to mark the start of the previous band so rectangles
      * can be coalesced into larger rectangles. qv. pixman_coalesce, above.
-     * In the beginning, there is no previous band, so prevBand == curBand
-     * (curBand is set later on, of course, but the first band will always
-     * start at index 0). prevBand and curBand must be indices because of
+     * In the beginning, there is no previous band, so prev_band == cur_band
+     * (cur_band is set later on, of course, but the first band will always
+     * start at index 0). prev_band and cur_band must be indices because of
      * the possible expansion, and resultant moving, of the new region's
      * array of rectangles.
      */
-    prevBand = 0;
+    prev_band = 0;
 
     do {
 	/*
 	 * This algorithm proceeds one source-band (as opposed to a
 	 * destination band, which is determined by where the two regions
-	 * intersect) at a time. r1BandEnd and r2BandEnd serve to mark the
+	 * intersect) at a time. r1_band_end and r2_band_end serve to mark the
 	 * rectangle after the last one in the current band for their
 	 * respective regions.
 	 */
-	assert(r1 != r1End);
-	assert(r2 != r2End);
+	assert(r1 != r1_end);
+	assert(r2 != r2_end);
 
-	FIND_BAND(r1, r1BandEnd, r1End, r1y1);
-	FIND_BAND(r2, r2BandEnd, r2End, r2y1);
+	FIND_BAND(r1, r1_band_end, r1_end, r1y1);
+	FIND_BAND(r2, r2_band_end, r2_end, r2y1);
 
 	/*
 	 * First handle the band that doesn't intersect, if any.
@@ -715,26 +715,26 @@ pixman_op(
 	 * the other, this entire loop will be passed through n times.
 	 */
 	if (r1y1 < r2y1) {
-	    if (appendNon1) {
+	    if (append_non1) {
 		top = MAX(r1y1, ybot);
 		bot = MIN(r1->y2, r2y1);
 		if (top != bot)	{
-		    curBand = newReg->data->numRects;
-		    if (!pixman_region_appendNonO(newReg, r1, r1BandEnd, top, bot))
+		    cur_band = new_reg->data->num_rects;
+		    if (!pixman_region_append_non_o(new_reg, r1, r1_band_end, top, bot))
 			goto bail;
-		    COALESCE(newReg, prevBand, curBand);
+		    COALESCE(new_reg, prev_band, cur_band);
 		}
 	    }
 	    ytop = r2y1;
 	} else if (r2y1 < r1y1) {
-	    if (appendNon2) {
+	    if (append_non2) {
 		top = MAX(r2y1, ybot);
 		bot = MIN(r2->y2, r1y1);
 		if (top != bot) {
-		    curBand = newReg->data->numRects;
-		    if (!pixman_region_appendNonO(newReg, r2, r2BandEnd, top, bot))
+		    cur_band = new_reg->data->num_rects;
+		    if (!pixman_region_append_non_o(new_reg, r2, r2_band_end, top, bot))
 			goto bail;
-		    COALESCE(newReg, prevBand, curBand);
+		    COALESCE(new_reg, prev_band, cur_band);
 		}
 	    }
 	    ytop = r1y1;
@@ -748,24 +748,24 @@ pixman_op(
 	 */
 	ybot = MIN(r1->y2, r2->y2);
 	if (ybot > ytop) {
-	    curBand = newReg->data->numRects;
-	    if (!(* overlapFunc)(newReg,
-				 r1, r1BandEnd,
-				 r2, r2BandEnd,
+	    cur_band = new_reg->data->num_rects;
+	    if (!(* overlap_func)(new_reg,
+				 r1, r1_band_end,
+				 r2, r2_band_end,
 				 ytop, ybot,
 				 overlap))
 		goto bail;
-	    COALESCE(newReg, prevBand, curBand);
+	    COALESCE(new_reg, prev_band, cur_band);
 	}
 
 	/*
 	 * If we've finished with a band (y2 == ybot) we skip forward
 	 * in the region to the next band.
 	 */
-	if (r1->y2 == ybot) r1 = r1BandEnd;
-	if (r2->y2 == ybot) r2 = r2BandEnd;
+	if (r1->y2 == ybot) r1 = r1_band_end;
+	if (r2->y2 == ybot) r2 = r2_band_end;
 
-    } while (r1 != r1End && r2 != r2End);
+    } while (r1 != r1_end && r2 != r2_end);
 
     /*
      * Deal with whichever region (if any) still has rectangles left.
@@ -775,56 +775,56 @@ pixman_op(
      * regardless of how many bands, into one final append to the list.
      */
 
-    if ((r1 != r1End) && appendNon1) {
-	/* Do first nonOverlap1Func call, which may be able to coalesce */
-	FIND_BAND(r1, r1BandEnd, r1End, r1y1);
-	curBand = newReg->data->numRects;
-	if (!pixman_region_appendNonO(newReg,
-				      r1, r1BandEnd,
+    if ((r1 != r1_end) && append_non1) {
+	/* Do first non_overlap1Func call, which may be able to coalesce */
+	FIND_BAND(r1, r1_band_end, r1_end, r1y1);
+	cur_band = new_reg->data->num_rects;
+	if (!pixman_region_append_non_o(new_reg,
+				      r1, r1_band_end,
 				      MAX(r1y1, ybot), r1->y2))
 	    goto bail;
-	COALESCE(newReg, prevBand, curBand);
+	COALESCE(new_reg, prev_band, cur_band);
 	/* Just append the rest of the boxes  */
-	APPEND_REGIONS(newReg, r1BandEnd, r1End);
-
-    } else if ((r2 != r2End) && appendNon2) {
-	/* Do first nonOverlap2Func call, which may be able to coalesce */
-	FIND_BAND(r2, r2BandEnd, r2End, r2y1);
-	curBand = newReg->data->numRects;
-	if (!pixman_region_appendNonO(newReg,
-				      r2, r2BandEnd,
+	APPEND_REGIONS(new_reg, r1_band_end, r1_end);
+
+    } else if ((r2 != r2_end) && append_non2) {
+	/* Do first non_overlap2Func call, which may be able to coalesce */
+	FIND_BAND(r2, r2_band_end, r2_end, r2y1);
+	cur_band = new_reg->data->num_rects;
+	if (!pixman_region_append_non_o(new_reg,
+				      r2, r2_band_end,
 				      MAX(r2y1, ybot), r2->y2))
 	    goto bail;
-	COALESCE(newReg, prevBand, curBand);
+	COALESCE(new_reg, prev_band, cur_band);
 	/* Append rest of boxes */
-	APPEND_REGIONS(newReg, r2BandEnd, r2End);
+	APPEND_REGIONS(new_reg, r2_band_end, r2_end);
     }
 
-    if (oldData)
-	free(oldData);
+    if (old_data)
+	free(old_data);
 
-    if (!(numRects = newReg->data->numRects))
+    if (!(num_rects = new_reg->data->num_rects))
     {
-	FREE_DATA(newReg);
-	newReg->data = pixman_region_emptyData;
+	FREE_DATA(new_reg);
+	new_reg->data = pixman_region_empty_data;
     }
-    else if (numRects == 1)
+    else if (num_rects == 1)
     {
-	newReg->extents = *PIXREGION_BOXPTR(newReg);
-	FREE_DATA(newReg);
-	newReg->data = (region_data_type_t *)NULL;
+	new_reg->extents = *PIXREGION_BOXPTR(new_reg);
+	FREE_DATA(new_reg);
+	new_reg->data = (region_data_type_t *)NULL;
     }
     else
     {
-	DOWNSIZE(newReg, numRects);
+	DOWNSIZE(new_reg, num_rects);
     }
 
     return TRUE;
 
 bail:
-    if (oldData)
-	free(oldData);
-    return pixman_break (newReg);
+    if (old_data)
+	free(old_data);
+    return pixman_break (new_reg);
 }
 
 /*-
@@ -845,7 +845,7 @@ bail:
 static void
 pixman_set_extents (region_type_t *region)
 {
-    box_type_t *box, *boxEnd;
+    box_type_t *box, *box_end;
 
     if (!region->data)
 	return;
@@ -857,22 +857,22 @@ pixman_set_extents (region_type_t *region)
     }
 
     box = PIXREGION_BOXPTR(region);
-    boxEnd = PIXREGION_END(region);
+    box_end = PIXREGION_END(region);
 
     /*
      * Since box is the first rectangle in the region, it must have the
-     * smallest y1 and since boxEnd is the last rectangle in the region,
+     * smallest y1 and since box_end is the last rectangle in the region,
      * it must have the largest y2, because of banding. Initialize x1 and
-     * x2 from  box and boxEnd, resp., as GOOD things to initialize them
+     * x2 from  box and box_end, resp., as good things to initialize them
      * to...
      */
     region->extents.x1 = box->x1;
     region->extents.y1 = box->y1;
-    region->extents.x2 = boxEnd->x2;
-    region->extents.y2 = boxEnd->y2;
+    region->extents.x2 = box_end->x2;
+    region->extents.y2 = box_end->y2;
 
     assert(region->extents.y1 < region->extents.y2);
-    while (box <= boxEnd) {
+    while (box <= box_end) {
 	if (box->x1 < region->extents.x1)
 	    region->extents.x1 = box->x1;
 	if (box->x2 > region->extents.x2)
@@ -888,7 +888,7 @@ pixman_set_extents (region_type_t *region)
  *====================================================================*/
 /*-
  *-----------------------------------------------------------------------
- * pixman_region_intersectO --
+ * pixman_region_intersect_o --
  *	Handle an overlapping band for pixman_region_intersect.
  *
  * Results:
@@ -901,11 +901,11 @@ pixman_set_extents (region_type_t *region)
  */
 /*ARGSUSED*/
 static pixman_bool_t
-pixman_region_intersectO (region_type_t *region,
+pixman_region_intersect_o (region_type_t *region,
 			  box_type_t    *r1,
-			  box_type_t    *r1End,
+			  box_type_t    *r1_end,
 			  box_type_t    *r2,
-			  box_type_t    *r2End,
+			  box_type_t    *r2_end,
 			  int    	     y1,
 			  int    	     y2,
 			  int		    *overlap)
@@ -917,7 +917,7 @@ pixman_region_intersectO (region_type_t *region,
     next_rect = PIXREGION_TOP(region);
 
     assert(y1 < y2);
-    assert(r1 != r1End && r2 != r2End);
+    assert(r1 != r1_end && r2 != r2_end);
 
     do {
 	x1 = MAX(r1->x1, r2->x1);
@@ -941,68 +941,68 @@ pixman_region_intersectO (region_type_t *region,
 	if (r2->x2 == x2) {
 	    r2++;
 	}
-    } while ((r1 != r1End) && (r2 != r2End));
+    } while ((r1 != r1_end) && (r2 != r2_end));
 
     return TRUE;
 }
 
 PIXMAN_EXPORT pixman_bool_t
-PREFIX(_intersect) (region_type_t * 	newReg,
+PREFIX(_intersect) (region_type_t * 	new_reg,
 			 region_type_t * 	reg1,
 			 region_type_t *	reg2)
 {
     GOOD(reg1);
     GOOD(reg2);
-    GOOD(newReg);
+    GOOD(new_reg);
    /* check for trivial reject */
     if (PIXREGION_NIL(reg1)  || PIXREGION_NIL(reg2) ||
 	!EXTENTCHECK(&reg1->extents, &reg2->extents))
     {
 	/* Covers about 20% of all cases */
-	FREE_DATA(newReg);
-	newReg->extents.x2 = newReg->extents.x1;
-	newReg->extents.y2 = newReg->extents.y1;
+	FREE_DATA(new_reg);
+	new_reg->extents.x2 = new_reg->extents.x1;
+	new_reg->extents.y2 = new_reg->extents.y1;
 	if (PIXREGION_NAR(reg1) || PIXREGION_NAR(reg2))
 	{
-	    newReg->data = pixman_brokendata;
+	    new_reg->data = pixman_broken_data;
 	    return FALSE;
 	}
 	else
-	    newReg->data = pixman_region_emptyData;
+	    new_reg->data = pixman_region_empty_data;
     }
     else if (!reg1->data && !reg2->data)
     {
 	/* Covers about 80% of cases that aren't trivially rejected */
-	newReg->extents.x1 = MAX(reg1->extents.x1, reg2->extents.x1);
-	newReg->extents.y1 = MAX(reg1->extents.y1, reg2->extents.y1);
-	newReg->extents.x2 = MIN(reg1->extents.x2, reg2->extents.x2);
-	newReg->extents.y2 = MIN(reg1->extents.y2, reg2->extents.y2);
-	FREE_DATA(newReg);
-	newReg->data = (region_data_type_t *)NULL;
+	new_reg->extents.x1 = MAX(reg1->extents.x1, reg2->extents.x1);
+	new_reg->extents.y1 = MAX(reg1->extents.y1, reg2->extents.y1);
+	new_reg->extents.x2 = MIN(reg1->extents.x2, reg2->extents.x2);
+	new_reg->extents.y2 = MIN(reg1->extents.y2, reg2->extents.y2);
+	FREE_DATA(new_reg);
+	new_reg->data = (region_data_type_t *)NULL;
     }
     else if (!reg2->data && SUBSUMES(&reg2->extents, &reg1->extents))
     {
-	return PREFIX(_copy) (newReg, reg1);
+	return PREFIX(_copy) (new_reg, reg1);
     }
     else if (!reg1->data && SUBSUMES(&reg1->extents, &reg2->extents))
     {
-	return PREFIX(_copy) (newReg, reg2);
+	return PREFIX(_copy) (new_reg, reg2);
     }
     else if (reg1 == reg2)
     {
-	return PREFIX(_copy) (newReg, reg1);
+	return PREFIX(_copy) (new_reg, reg1);
     }
     else
     {
 	/* General purpose intersection */
 	int overlap; /* result ignored */
-	if (!pixman_op(newReg, reg1, reg2, pixman_region_intersectO, FALSE, FALSE,
+	if (!pixman_op(new_reg, reg1, reg2, pixman_region_intersect_o, FALSE, FALSE,
 			&overlap))
 	    return FALSE;
-	pixman_set_extents(newReg);
+	pixman_set_extents(new_reg);
     }
 
-    GOOD(newReg);
+    GOOD(new_reg);
     return(TRUE);
 }
 
@@ -1027,7 +1027,7 @@ PREFIX(_intersect) (region_type_t * 	newReg,
 
 /*-
  *-----------------------------------------------------------------------
- * pixman_region_unionO --
+ * pixman_region_union_o --
  *	Handle an overlapping band for the union operation. Picks the
  *	left-most rectangle each time and merges it into the region.
  *
@@ -1041,12 +1041,12 @@ PREFIX(_intersect) (region_type_t * 	newReg,
  *-----------------------------------------------------------------------
  */
 static pixman_bool_t
-pixman_region_unionO (
+pixman_region_union_o (
     region_type_t	 *region,
     box_type_t *r1,
-    box_type_t *r1End,
+    box_type_t *r1_end,
     box_type_t *r2,
-    box_type_t *r2End,
+    box_type_t *r2_end,
     int	  y1,
     int	  y2,
     int		  *overlap)
@@ -1056,7 +1056,7 @@ pixman_region_unionO (
     int        x2;
 
     assert (y1 < y2);
-    assert(r1 != r1End && r2 != r2End);
+    assert(r1 != r1_end && r2 != r2_end);
 
     next_rect = PIXREGION_TOP(region);
 
@@ -1073,25 +1073,25 @@ pixman_region_unionO (
 	x2 = r2->x2;
 	r2++;
     }
-    while (r1 != r1End && r2 != r2End)
+    while (r1 != r1_end && r2 != r2_end)
     {
 	if (r1->x1 < r2->x1) MERGERECT(r1) else MERGERECT(r2);
     }
 
     /* Finish off whoever (if any) is left */
-    if (r1 != r1End)
+    if (r1 != r1_end)
     {
 	do
 	{
 	    MERGERECT(r1);
-	} while (r1 != r1End);
+	} while (r1 != r1_end);
     }
-    else if (r2 != r2End)
+    else if (r2 != r2_end)
     {
 	do
 	{
 	    MERGERECT(r2);
-	} while (r2 != r2End);
+	} while (r2 != r2_end);
     }
 
     /* Add current rectangle */
@@ -1123,7 +1123,7 @@ PREFIX(_union_rect) (region_type_t *dest,
 }
 
 PIXMAN_EXPORT pixman_bool_t
-PREFIX(_union) (region_type_t *newReg,
+PREFIX(_union) (region_type_t *new_reg,
 		     region_type_t *reg1,
 		     region_type_t *reg2)
 {
@@ -1134,7 +1134,7 @@ PREFIX(_union) (region_type_t *newReg,
      */
     GOOD(reg1);
     GOOD(reg2);
-    GOOD(newReg);
+    GOOD(new_reg);
     /*  checks all the simple cases */
 
     /*
@@ -1142,7 +1142,7 @@ PREFIX(_union) (region_type_t *newReg,
      */
     if (reg1 == reg2)
     {
-	return PREFIX(_copy) (newReg, reg1);
+	return PREFIX(_copy) (new_reg, reg1);
     }
 
     /*
@@ -1151,9 +1151,9 @@ PREFIX(_union) (region_type_t *newReg,
     if (PIXREGION_NIL(reg1))
     {
 	if (PIXREGION_NAR(reg1))
-	    return pixman_break (newReg);
-        if (newReg != reg2)
-	    return PREFIX(_copy) (newReg, reg2);
+	    return pixman_break (new_reg);
+        if (new_reg != reg2)
+	    return PREFIX(_copy) (new_reg, reg2);
         return TRUE;
     }
 
@@ -1163,9 +1163,9 @@ PREFIX(_union) (region_type_t *newReg,
     if (PIXREGION_NIL(reg2))
     {
 	if (PIXREGION_NAR(reg2))
-	    return pixman_break (newReg);
-        if (newReg != reg1)
-	    return PREFIX(_copy) (newReg, reg1);
+	    return pixman_break (new_reg);
+        if (new_reg != reg1)
+	    return PREFIX(_copy) (new_reg, reg1);
         return TRUE;
     }
 
@@ -1174,8 +1174,8 @@ PREFIX(_union) (region_type_t *newReg,
      */
     if (!reg1->data && SUBSUMES(&reg1->extents, &reg2->extents))
     {
-        if (newReg != reg1)
-	    return PREFIX(_copy) (newReg, reg1);
+        if (new_reg != reg1)
+	    return PREFIX(_copy) (new_reg, reg1);
         return TRUE;
     }
 
@@ -1184,19 +1184,19 @@ PREFIX(_union) (region_type_t *newReg,
      */
     if (!reg2->data && SUBSUMES(&reg2->extents, &reg1->extents))
     {
-        if (newReg != reg2)
-	    return PREFIX(_copy) (newReg, reg2);
+        if (new_reg != reg2)
+	    return PREFIX(_copy) (new_reg, reg2);
         return TRUE;
     }
 
-    if (!pixman_op(newReg, reg1, reg2, pixman_region_unionO, TRUE, TRUE, &overlap))
+    if (!pixman_op(new_reg, reg1, reg2, pixman_region_union_o, TRUE, TRUE, &overlap))
 	return FALSE;
 
-    newReg->extents.x1 = MIN(reg1->extents.x1, reg2->extents.x1);
-    newReg->extents.y1 = MIN(reg1->extents.y1, reg2->extents.y1);
-    newReg->extents.x2 = MAX(reg1->extents.x2, reg2->extents.x2);
-    newReg->extents.y2 = MAX(reg1->extents.y2, reg2->extents.y2);
-    GOOD(newReg);
+    new_reg->extents.x1 = MIN(reg1->extents.x1, reg2->extents.x1);
+    new_reg->extents.y1 = MIN(reg1->extents.y1, reg2->extents.y1);
+    new_reg->extents.x2 = MAX(reg1->extents.x2, reg2->extents.x2);
+    new_reg->extents.y2 = MAX(reg1->extents.y2, reg2->extents.y2);
+    GOOD(new_reg);
     return TRUE;
 }
 
@@ -1213,20 +1213,20 @@ PREFIX(_union) (region_type_t *newReg,
 }
 
 static void
-QuickSortRects(
+quick_sort_rects(
     box_type_t     rects[],
-    int        numRects)
+    int        num_rects)
 {
     int	y1;
     int	x1;
     int        i, j;
     box_type_t *r;
 
-    /* Always called with numRects > 1 */
+    /* Always called with num_rects > 1 */
 
     do
     {
-	if (numRects == 2)
+	if (num_rects == 2)
 	{
 	    if (rects[0].y1 > rects[1].y1 ||
 		    (rects[0].y1 == rects[1].y1 && rects[0].x1 > rects[1].x1))
@@ -1235,13 +1235,13 @@ QuickSortRects(
 	}
 
 	/* Choose partition element, stick in location 0 */
-        EXCHANGE_RECTS(0, numRects >> 1);
+        EXCHANGE_RECTS(0, num_rects >> 1);
 	y1 = rects[0].y1;
 	x1 = rects[0].x1;
 
         /* Partition array */
         i = 0;
-        j = numRects;
+        j = num_rects;
         do
 	{
 	    r = &(rects[i]);
@@ -1249,7 +1249,7 @@ QuickSortRects(
 	    {
 		r++;
 		i++;
-            } while (i != numRects &&
+            } while (i != num_rects &&
 		     (r->y1 < y1 || (r->y1 == y1 && r->x1 < x1)));
 	    r = &(rects[j]);
 	    do
@@ -1265,10 +1265,10 @@ QuickSortRects(
         EXCHANGE_RECTS(0, j);
 
 	/* Recurse */
-        if (numRects-j-1 > 1)
-	    QuickSortRects(&rects[j+1], numRects-j-1);
-        numRects = j;
-    } while (numRects > 1);
+        if (num_rects-j-1 > 1)
+	    quick_sort_rects(&rects[j+1], num_rects-j-1);
+        num_rects = j;
+    } while (num_rects > 1);
 }
 
 /*-
@@ -1311,22 +1311,22 @@ validate (region_type_t * badreg,
     /* Descriptor for regions under construction  in Step 2. */
     typedef struct {
 	region_type_t   reg;
-	int	    prevBand;
-	int	    curBand;
-    } RegionInfo;
+	int	    prev_band;
+	int	    cur_band;
+    } region_info_t;
 
-    RegionInfo stack_regions[64];
+    region_info_t stack_regions[64];
 
-	     int	numRects;   /* Original numRects for badreg	    */
-	     RegionInfo *ri;	    /* Array of current regions		    */
-    	     int	numRI;      /* Number of entries used in ri	    */
-	     int	sizeRI;	    /* Number of entries available in ri    */
+	     int	num_rects;   /* Original num_rects for badreg	    */
+	     region_info_t *ri;	    /* Array of current regions		    */
+    	     int	num_ri;      /* Number of entries used in ri	    */
+	     int	size_ri;	    /* Number of entries available in ri    */
 	     int	i;	    /* Index into rects			    */
     int	j;	    /* Index into ri			    */
-    RegionInfo *rit;       /* &ri[j]				    */
+    region_info_t *rit;       /* &ri[j]				    */
     region_type_t *  reg;        /* ri[j].reg			    */
     box_type_t *	box;	    /* Current box in rects		    */
-    box_type_t *	riBox;      /* Last box in ri[j].reg		    */
+    box_type_t *	ri_box;      /* Last box in ri[j].reg		    */
     region_type_t *  hreg;       /* ri[j_half].reg			    */
     pixman_bool_t ret = TRUE;
 
@@ -1336,8 +1336,8 @@ validate (region_type_t * badreg,
 	GOOD(badreg);
 	return TRUE;
     }
-    numRects = badreg->data->numRects;
-    if (!numRects)
+    num_rects = badreg->data->num_rects;
+    if (!num_rects)
     {
 	if (PIXREGION_NAR(badreg))
 	    return FALSE;
@@ -1346,37 +1346,37 @@ validate (region_type_t * badreg,
     }
     if (badreg->extents.x1 < badreg->extents.x2)
     {
-	if ((numRects) == 1)
+	if ((num_rects) == 1)
 	{
 	    FREE_DATA(badreg);
 	    badreg->data = (region_data_type_t *) NULL;
 	}
 	else
 	{
-	    DOWNSIZE(badreg, numRects);
+	    DOWNSIZE(badreg, num_rects);
 	}
 	GOOD(badreg);
 	return TRUE;
     }
 
     /* Step 1: Sort the rects array into ascending (y1, x1) order */
-    QuickSortRects(PIXREGION_BOXPTR(badreg), numRects);
+    quick_sort_rects(PIXREGION_BOXPTR(badreg), num_rects);
 
     /* Step 2: Scatter the sorted array into the minimum number of regions */
 
     /* Set up the first region to be the first rectangle in badreg */
     /* Note that step 2 code will never overflow the ri[0].reg rects array */
     ri = stack_regions;
-    sizeRI = sizeof (stack_regions) / sizeof (stack_regions[0]);
-    numRI = 1;
-    ri[0].prevBand = 0;
-    ri[0].curBand = 0;
+    size_ri = sizeof (stack_regions) / sizeof (stack_regions[0]);
+    num_ri = 1;
+    ri[0].prev_band = 0;
+    ri[0].cur_band = 0;
     ri[0].reg = *badreg;
     box = PIXREGION_BOXPTR(&ri[0].reg);
     ri[0].reg.extents = *box;
-    ri[0].reg.data->numRects = 1;
-    badreg->extents = *pixman_region_emptyBox;
-    badreg->data = pixman_region_emptyData;
+    ri[0].reg.data->num_rects = 1;
+    badreg->extents = *pixman_region_empty_box;
+    badreg->data = pixman_region_empty_data;
 
     /* Now scatter rectangles into the minimum set of valid regions.  If the
        next rectangle to be added to a region would force an existing rectangle
@@ -1384,92 +1384,92 @@ validate (region_type_t * badreg,
        forget it.  Try the next region.  If it doesn't fit cleanly into any
        region, make a new one. */
 
-    for (i = numRects; --i > 0;)
+    for (i = num_rects; --i > 0;)
     {
 	box++;
 	/* Look for a region to append box to */
-	for (j = numRI, rit = ri; --j >= 0; rit++)
+	for (j = num_ri, rit = ri; --j >= 0; rit++)
 	{
 	    reg = &rit->reg;
-	    riBox = PIXREGION_END(reg);
+	    ri_box = PIXREGION_END(reg);
 
-	    if (box->y1 == riBox->y1 && box->y2 == riBox->y2)
+	    if (box->y1 == ri_box->y1 && box->y2 == ri_box->y2)
 	    {
-		/* box is in same band as riBox.  Merge or append it */
-		if (box->x1 <= riBox->x2)
+		/* box is in same band as ri_box.  Merge or append it */
+		if (box->x1 <= ri_box->x2)
 		{
-		    /* Merge it with riBox */
-		    if (box->x1 < riBox->x2) *overlap = TRUE;
-		    if (box->x2 > riBox->x2) riBox->x2 = box->x2;
+		    /* Merge it with ri_box */
+		    if (box->x1 < ri_box->x2) *overlap = TRUE;
+		    if (box->x2 > ri_box->x2) ri_box->x2 = box->x2;
 		}
 		else
 		{
 		    RECTALLOC_BAIL(reg, 1, bail);
 		    *PIXREGION_TOP(reg) = *box;
-		    reg->data->numRects++;
+		    reg->data->num_rects++;
 		}
-		goto NextRect;   /* So sue me */
+		goto next_rect;   /* So sue me */
 	    }
-	    else if (box->y1 >= riBox->y2)
+	    else if (box->y1 >= ri_box->y2)
 	    {
 		/* Put box into new band */
-		if (reg->extents.x2 < riBox->x2) reg->extents.x2 = riBox->x2;
+		if (reg->extents.x2 < ri_box->x2) reg->extents.x2 = ri_box->x2;
 		if (reg->extents.x1 > box->x1)   reg->extents.x1 = box->x1;
-		COALESCE(reg, rit->prevBand, rit->curBand);
-		rit->curBand = reg->data->numRects;
+		COALESCE(reg, rit->prev_band, rit->cur_band);
+		rit->cur_band = reg->data->num_rects;
 		RECTALLOC_BAIL(reg, 1, bail);
 		*PIXREGION_TOP(reg) = *box;
-		reg->data->numRects++;
-		goto NextRect;
+		reg->data->num_rects++;
+		goto next_rect;
 	    }
 	    /* Well, this region was inappropriate.  Try the next one. */
 	} /* for j */
 
 	/* Uh-oh.  No regions were appropriate.  Create a new one. */
-	if (sizeRI == numRI)
+	if (size_ri == num_ri)
 	{
 	    size_t data_size;
 	    
 	    /* Oops, allocate space for new region information */
-	    sizeRI <<= 1;
+	    size_ri <<= 1;
 
-            data_size = sizeRI * sizeof(RegionInfo);
-            if (data_size / sizeRI != sizeof(RegionInfo))
+            data_size = size_ri * sizeof(region_info_t);
+            if (data_size / size_ri != sizeof(region_info_t))
                 goto bail;
 	    if (ri == stack_regions) {
 		rit = malloc (data_size);
 		if (!rit)
 		    goto bail;
-		memcpy (rit, ri, numRI * sizeof (RegionInfo));
+		memcpy (rit, ri, num_ri * sizeof (region_info_t));
 	    } else {
-		rit = (RegionInfo *) realloc(ri, data_size);
+		rit = (region_info_t *) realloc(ri, data_size);
 		if (!rit)
 		    goto bail;
 	    }
 	    ri = rit;
-	    rit = &ri[numRI];
+	    rit = &ri[num_ri];
 	}
-	numRI++;
-	rit->prevBand = 0;
-	rit->curBand = 0;
+	num_ri++;
+	rit->prev_band = 0;
+	rit->cur_band = 0;
 	rit->reg.extents = *box;
 	rit->reg.data = (region_data_type_t *)NULL;
-	if (!pixman_rect_alloc(&rit->reg, (i+numRI) / numRI)) /* MUST force allocation */
+	if (!pixman_rect_alloc(&rit->reg, (i+num_ri) / num_ri)) /* MUST force allocation */
 	    goto bail;
-NextRect: ;
+next_rect: ;
     } /* for i */
 
     /* Make a final pass over each region in order to COALESCE and set
        extents.x2 and extents.y2 */
 
-    for (j = numRI, rit = ri; --j >= 0; rit++)
+    for (j = num_ri, rit = ri; --j >= 0; rit++)
     {
 	reg = &rit->reg;
-	riBox = PIXREGION_END(reg);
-	reg->extents.y2 = riBox->y2;
-	if (reg->extents.x2 < riBox->x2) reg->extents.x2 = riBox->x2;
-	COALESCE(reg, rit->prevBand, rit->curBand);
-	if (reg->data->numRects == 1) /* keep unions happy below */
+	ri_box = PIXREGION_END(reg);
+	reg->extents.y2 = ri_box->y2;
+	if (reg->extents.x2 < ri_box->x2) reg->extents.x2 = ri_box->x2;
+	COALESCE(reg, rit->prev_band, rit->cur_band);
+	if (reg->data->num_rects == 1) /* keep unions happy below */
 	{
 	    FREE_DATA(reg);
 	    reg->data = (region_data_type_t *)NULL;
@@ -1477,14 +1477,14 @@ NextRect: ;
     }
 
     /* Step 3: Union all regions into a single region */
-    while (numRI > 1)
+    while (num_ri > 1)
     {
-	int half = numRI/2;
-	for (j = numRI & 1; j < (half + (numRI & 1)); j++)
+	int half = num_ri/2;
+	for (j = num_ri & 1; j < (half + (num_ri & 1)); j++)
 	{
 	    reg = &ri[j].reg;
 	    hreg = &ri[j+half].reg;
-	    if (!pixman_op(reg, reg, hreg, pixman_region_unionO, TRUE, TRUE, overlap))
+	    if (!pixman_op(reg, reg, hreg, pixman_region_union_o, TRUE, TRUE, overlap))
 		ret = FALSE;
 	    if (hreg->extents.x1 < reg->extents.x1)
 		reg->extents.x1 = hreg->extents.x1;
@@ -1496,7 +1496,7 @@ NextRect: ;
 		reg->extents.y2 = hreg->extents.y2;
 	    FREE_DATA(hreg);
 	}
-	numRI -= half;
+	num_ri -= half;
 	if (!ret)
 	    goto bail;
     }
@@ -1506,7 +1506,7 @@ NextRect: ;
     GOOD(badreg);
     return ret;
 bail:
-    for (i = 0; i < numRI; i++)
+    for (i = 0; i < num_ri; i++)
 	FREE_DATA(&ri[i].reg);
     if (ri != stack_regions)
 	free (ri);
@@ -1520,7 +1520,7 @@ bail:
 
 /*-
  *-----------------------------------------------------------------------
- * pixman_region_subtractO --
+ * pixman_region_subtract_o --
  *	Overlapping band subtraction. x1 is the left-most point not yet
  *	checked.
  *
@@ -1534,12 +1534,12 @@ bail:
  */
 /*ARGSUSED*/
 static pixman_bool_t
-pixman_region_subtractO (
+pixman_region_subtract_o (
     region_type_t *	region,
     box_type_t *	r1,
-    box_type_t *  	  	r1End,
+    box_type_t *  	  	r1_end,
     box_type_t *	r2,
-    box_type_t *  	  	r2End,
+    box_type_t *  	  	r2_end,
     int  	y1,
     int  	y2,
     int		*overlap)
@@ -1550,7 +1550,7 @@ pixman_region_subtractO (
     x1 = r1->x1;
 
     assert(y1<y2);
-    assert(r1 != r1End && r2 != r2End);
+    assert(r1 != r1_end && r2 != r2_end);
 
     next_rect = PIXREGION_TOP(region);
 
@@ -1576,7 +1576,7 @@ pixman_region_subtractO (
 		 * reset left fence to edge of new minuend.
 		 */
 		r1++;
-		if (r1 != r1End)
+		if (r1 != r1_end)
 		    x1 = r1->x1;
 	    }
 	    else
@@ -1604,7 +1604,7 @@ pixman_region_subtractO (
 		 * Minuend used up: advance to new...
 		 */
 		r1++;
-		if (r1 != r1End)
+		if (r1 != r1_end)
 		    x1 = r1->x1;
 	    }
 	    else
@@ -1623,20 +1623,20 @@ pixman_region_subtractO (
 	    if (r1->x2 > x1)
 		NEWRECT(region, next_rect, x1, y1, r1->x2, y2);
 	    r1++;
-	    if (r1 != r1End)
+	    if (r1 != r1_end)
 		x1 = r1->x1;
 	}
-    } while ((r1 != r1End) && (r2 != r2End));
+    } while ((r1 != r1_end) && (r2 != r2_end));
 
     /*
      * Add remaining minuend rectangles to region.
      */
-    while (r1 != r1End)
+    while (r1 != r1_end)
     {
 	assert(x1<r1->x2);
 	NEWRECT(region, next_rect, x1, y1, r1->x2, y2);
 	r1++;
-	if (r1 != r1End)
+	if (r1 != r1_end)
 	    x1 = r1->x1;
     }
     return TRUE;
@@ -1645,59 +1645,59 @@ pixman_region_subtractO (
 /*-
  *-----------------------------------------------------------------------
  * pixman_region_subtract --
- *	Subtract regS from regM and leave the result in regD.
+ *	Subtract reg_s from reg_m and leave the result in reg_d.
  *	S stands for subtrahend, M for minuend and D for difference.
  *
  * Results:
  *	TRUE if successful.
  *
  * Side Effects:
- *	regD is overwritten.
+ *	reg_d is overwritten.
  *
  *-----------------------------------------------------------------------
  */
 PIXMAN_EXPORT pixman_bool_t
-PREFIX(_subtract) (region_type_t *	regD,
-		       region_type_t * 	regM,
-		       region_type_t *	regS)
+PREFIX(_subtract) (region_type_t *	reg_d,
+		       region_type_t * 	reg_m,
+		       region_type_t *	reg_s)
 {
     int overlap; /* result ignored */
 
-    GOOD(regM);
-    GOOD(regS);
-    GOOD(regD);
+    GOOD(reg_m);
+    GOOD(reg_s);
+    GOOD(reg_d);
    /* check for trivial rejects */
-    if (PIXREGION_NIL(regM) || PIXREGION_NIL(regS) ||
-	!EXTENTCHECK(&regM->extents, &regS->extents))
+    if (PIXREGION_NIL(reg_m) || PIXREGION_NIL(reg_s) ||
+	!EXTENTCHECK(&reg_m->extents, &reg_s->extents))
     {
-	if (PIXREGION_NAR (regS))
-	    return pixman_break (regD);
-	return PREFIX(_copy) (regD, regM);
+	if (PIXREGION_NAR (reg_s))
+	    return pixman_break (reg_d);
+	return PREFIX(_copy) (reg_d, reg_m);
     }
-    else if (regM == regS)
+    else if (reg_m == reg_s)
     {
-	FREE_DATA(regD);
-	regD->extents.x2 = regD->extents.x1;
-	regD->extents.y2 = regD->extents.y1;
-	regD->data = pixman_region_emptyData;
+	FREE_DATA(reg_d);
+	reg_d->extents.x2 = reg_d->extents.x1;
+	reg_d->extents.y2 = reg_d->extents.y1;
+	reg_d->data = pixman_region_empty_data;
 	return TRUE;
     }
 
     /* Add those rectangles in region 1 that aren't in region 2,
        do yucky substraction for overlaps, and
        just throw away rectangles in region 2 that aren't in region 1 */
-    if (!pixman_op(regD, regM, regS, pixman_region_subtractO, TRUE, FALSE, &overlap))
+    if (!pixman_op(reg_d, reg_m, reg_s, pixman_region_subtract_o, TRUE, FALSE, &overlap))
 	return FALSE;
 
     /*
-     * Can't alter RegD's extents before we call pixman_op because
+     * Can't alter reg_d's extents before we call pixman_op because
      * it might be one of the source regions and pixman_op depends
      * on the extents of those regions being unaltered. Besides, this
      * way there's no checking against rectangles that will be nuked
      * due to coalescing, so we have to examine fewer rectangles.
      */
-    pixman_set_extents(regD);
-    GOOD(regD);
+    pixman_set_extents(reg_d);
+    GOOD(reg_d);
     return TRUE;
 }
 
@@ -1716,67 +1716,67 @@ PREFIX(_subtract) (region_type_t *	regD,
  *	TRUE.
  *
  * Side Effects:
- *	newReg is overwritten.
+ *	new_reg is overwritten.
  *
  *-----------------------------------------------------------------------
  */
 pixman_bool_t
-PIXMAN_EXPORT PREFIX(_inverse) (region_type_t * 	  newReg,       /* Destination region */
+PIXMAN_EXPORT PREFIX(_inverse) (region_type_t * 	  new_reg,       /* Destination region */
 		      region_type_t * 	  reg1,         /* Region to invert */
-		      box_type_t *     	  invRect) 	/* Bounding box for inversion */
+		      box_type_t *     	  inv_rect) 	/* Bounding box for inversion */
 {
-    region_type_t	  invReg;   	/* Quick and dirty region made from the
+    region_type_t	  inv_reg;   	/* Quick and dirty region made from the
 				 * bounding box */
     int	  overlap;	/* result ignored */
 
     GOOD(reg1);
-    GOOD(newReg);
+    GOOD(new_reg);
    /* check for trivial rejects */
-    if (PIXREGION_NIL(reg1) || !EXTENTCHECK(invRect, &reg1->extents))
+    if (PIXREGION_NIL(reg1) || !EXTENTCHECK(inv_rect, &reg1->extents))
     {
 	if (PIXREGION_NAR(reg1))
-	    return pixman_break (newReg);
-	newReg->extents = *invRect;
-	FREE_DATA(newReg);
-	newReg->data = (region_data_type_t *)NULL;
+	    return pixman_break (new_reg);
+	new_reg->extents = *inv_rect;
+	FREE_DATA(new_reg);
+	new_reg->data = (region_data_type_t *)NULL;
         return TRUE;
     }
 
     /* Add those rectangles in region 1 that aren't in region 2,
        do yucky substraction for overlaps, and
        just throw away rectangles in region 2 that aren't in region 1 */
-    invReg.extents = *invRect;
-    invReg.data = (region_data_type_t *)NULL;
-    if (!pixman_op(newReg, &invReg, reg1, pixman_region_subtractO, TRUE, FALSE, &overlap))
+    inv_reg.extents = *inv_rect;
+    inv_reg.data = (region_data_type_t *)NULL;
+    if (!pixman_op(new_reg, &inv_reg, reg1, pixman_region_subtract_o, TRUE, FALSE, &overlap))
 	return FALSE;
 
     /*
-     * Can't alter newReg's extents before we call pixman_op because
+     * Can't alter new_reg's extents before we call pixman_op because
      * it might be one of the source regions and pixman_op depends
      * on the extents of those regions being unaltered. Besides, this
      * way there's no checking against rectangles that will be nuked
      * due to coalescing, so we have to examine fewer rectangles.
      */
-    pixman_set_extents(newReg);
-    GOOD(newReg);
+    pixman_set_extents(new_reg);
+    GOOD(new_reg);
     return TRUE;
 }
 
 /*
- *   RectIn(region, rect)
+ *   rect_in(region, rect)
  *   This routine takes a pointer to a region and a pointer to a box
  *   and determines if the box is outside/inside/partly inside the region.
  *
  *   The idea is to travel through the list of rectangles trying to cover the
  *   passed box with them. Anytime a piece of the rectangle isn't covered
- *   by a band of rectangles, partOut is set TRUE. Any time a rectangle in
- *   the region covers part of the box, partIn is set TRUE. The process ends
+ *   by a band of rectangles, part_out is set TRUE. Any time a rectangle in
+ *   the region covers part of the box, part_in is set TRUE. The process ends
  *   when either the box has been completely covered (we reached a band that
- *   doesn't overlap the box, partIn is TRUE and partOut is false), the
- *   box has been partially covered (partIn == partOut == TRUE -- because of
+ *   doesn't overlap the box, part_in is TRUE and part_out is false), the
+ *   box has been partially covered (part_in == part_out == TRUE -- because of
  *   the banding, the first time this is true we know the box is only
  *   partially in the region) or is outside the region (we reached a band
- *   that doesn't overlap the box at all and partIn is false)
+ *   that doesn't overlap the box at all and part_in is false)
  */
 
 pixman_region_overlap_t
@@ -1786,17 +1786,17 @@ PIXMAN_EXPORT PREFIX(_contains_rectangle) (region_type_t *  region,
     int	x;
     int	y;
     box_type_t *     pbox;
-    box_type_t *     pboxEnd;
-    int			partIn, partOut;
-    int			numRects;
+    box_type_t *     pbox_end;
+    int			part_in, part_out;
+    int			num_rects;
 
     GOOD(region);
-    numRects = PIXREGION_NUM_RECTS(region);
+    num_rects = PIXREGION_NUM_RECTS(region);
     /* useful optimization */
-    if (!numRects || !EXTENTCHECK(&region->extents, prect))
+    if (!num_rects || !EXTENTCHECK(&region->extents, prect))
         return(PIXMAN_REGION_OUT);
 
-    if (numRects == 1)
+    if (num_rects == 1)
     {
 	/* We know that it must be PIXMAN_REGION_IN or PIXMAN_REGION_PART */
 	if (SUBSUMES(&region->extents, prect))
@@ -1805,16 +1805,16 @@ PIXMAN_EXPORT PREFIX(_contains_rectangle) (region_type_t *  region,
 	    return(PIXMAN_REGION_PART);
     }
 
-    partOut = FALSE;
-    partIn = FALSE;
+    part_out = FALSE;
+    part_in = FALSE;
 
     /* (x,y) starts at upper left of rect, moving to the right and down */
     x = prect->x1;
     y = prect->y1;
 
-    /* can stop when both partOut and partIn are TRUE, or we reach prect->y2 */
-    for (pbox = PIXREGION_BOXPTR(region), pboxEnd = pbox + numRects;
-         pbox != pboxEnd;
+    /* can stop when both part_out and part_in are TRUE, or we reach prect->y2 */
+    for (pbox = PIXREGION_BOXPTR(region), pbox_end = pbox + num_rects;
+         pbox != pbox_end;
          pbox++)
     {
 
@@ -1823,8 +1823,8 @@ PIXMAN_EXPORT PREFIX(_contains_rectangle) (region_type_t *  region,
 
         if (pbox->y1 > y)
         {
-           partOut = TRUE;      /* missed part of rectangle above */
-           if (partIn || (pbox->y1 >= prect->y2))
+           part_out = TRUE;      /* missed part of rectangle above */
+           if (part_in || (pbox->y1 >= prect->y2))
               break;
            y = pbox->y1;        /* x guaranteed to be == prect->x1 */
         }
@@ -1834,15 +1834,15 @@ PIXMAN_EXPORT PREFIX(_contains_rectangle) (region_type_t *  region,
 
         if (pbox->x1 > x)
         {
-           partOut = TRUE;      /* missed part of rectangle to left */
-           if (partIn)
+           part_out = TRUE;      /* missed part of rectangle to left */
+           if (part_in)
               break;
         }
 
         if (pbox->x1 < prect->x2)
         {
-            partIn = TRUE;      /* definitely overlap */
-            if (partOut)
+            part_in = TRUE;      /* definitely overlap */
+            if (part_out)
                break;
         }
 
@@ -1859,15 +1859,15 @@ PIXMAN_EXPORT PREFIX(_contains_rectangle) (region_type_t *  region,
 	     * Because boxes in a band are maximal width, if the first box
 	     * to overlap the rectangle doesn't completely cover it in that
 	     * band, the rectangle must be partially out, since some of it
-	     * will be uncovered in that band. partIn will have been set true
+	     * will be uncovered in that band. part_in will have been set true
 	     * by now...
 	     */
-	    partOut = TRUE;
+	    part_out = TRUE;
 	    break;
 	}
     }
 
-    if (partIn)
+    if (part_in)
     {
 	if (y < prect->y2)
 	    return PIXMAN_REGION_PART;
@@ -1898,7 +1898,7 @@ PREFIX(_translate) (region_type_t * region, int x, int y)
     region->extents.y2 = y2 = region->extents.y2 + y;
     if (((x1 - SHRT_MIN)|(y1 - SHRT_MIN)|(SHRT_MAX - x2)|(SHRT_MAX - y2)) >= 0)
     {
-	if (region->data && (nbox = region->data->numRects))
+	if (region->data && (nbox = region->data->num_rects))
 	{
 	    for (pbox = PIXREGION_BOXPTR(region); nbox--; pbox++)
 	    {
@@ -1915,7 +1915,7 @@ PREFIX(_translate) (region_type_t * region, int x, int y)
 	region->extents.x2 = region->extents.x1;
 	region->extents.y2 = region->extents.y1;
 	FREE_DATA(region);
-	region->data = pixman_region_emptyData;
+	region->data = pixman_region_empty_data;
 	return;
     }
     if (x1 < SHRT_MIN)
@@ -1926,35 +1926,35 @@ PREFIX(_translate) (region_type_t * region, int x, int y)
 	region->extents.y1 = SHRT_MIN;
     else if (y2 > SHRT_MAX)
 	region->extents.y2 = SHRT_MAX;
-    if (region->data && (nbox = region->data->numRects))
+    if (region->data && (nbox = region->data->num_rects))
     {
-	box_type_t * pboxout;
+	box_type_t * pbox_out;
 
-	for (pboxout = pbox = PIXREGION_BOXPTR(region); nbox--; pbox++)
+	for (pbox_out = pbox = PIXREGION_BOXPTR(region); nbox--; pbox++)
 	{
-	    pboxout->x1 = x1 = pbox->x1 + x;
-	    pboxout->y1 = y1 = pbox->y1 + y;
-	    pboxout->x2 = x2 = pbox->x2 + x;
-	    pboxout->y2 = y2 = pbox->y2 + y;
+	    pbox_out->x1 = x1 = pbox->x1 + x;
+	    pbox_out->y1 = y1 = pbox->y1 + y;
+	    pbox_out->x2 = x2 = pbox->x2 + x;
+	    pbox_out->y2 = y2 = pbox->y2 + y;
 	    if (((x2 - SHRT_MIN)|(y2 - SHRT_MIN)|
 		 (SHRT_MAX - x1)|(SHRT_MAX - y1)) <= 0)
 	    {
-		region->data->numRects--;
+		region->data->num_rects--;
 		continue;
 	    }
 	    if (x1 < SHRT_MIN)
-		pboxout->x1 = SHRT_MIN;
+		pbox_out->x1 = SHRT_MIN;
 	    else if (x2 > SHRT_MAX)
-		pboxout->x2 = SHRT_MAX;
+		pbox_out->x2 = SHRT_MAX;
 	    if (y1 < SHRT_MIN)
-		pboxout->y1 = SHRT_MIN;
+		pbox_out->y1 = SHRT_MIN;
 	    else if (y2 > SHRT_MAX)
-		pboxout->y2 = SHRT_MAX;
-	    pboxout++;
+		pbox_out->y2 = SHRT_MAX;
+	    pbox_out++;
 	}
-	if (pboxout != pbox)
+	if (pbox_out != pbox)
 	{
-	    if (region->data->numRects == 1)
+	    if (region->data->num_rects == 1)
 	    {
 		region->extents = *PIXREGION_BOXPTR(region);
 		FREE_DATA(region);
@@ -1983,22 +1983,22 @@ PREFIX(_contains_point) (region_type_t * region,
 			     int x, int y,
 			     box_type_t * box)
 {
-    box_type_t *pbox, *pboxEnd;
-    int numRects;
+    box_type_t *pbox, *pbox_end;
+    int num_rects;
 
     GOOD(region);
-    numRects = PIXREGION_NUM_RECTS(region);
-    if (!numRects || !INBOX(&region->extents, x, y))
+    num_rects = PIXREGION_NUM_RECTS(region);
+    if (!num_rects || !INBOX(&region->extents, x, y))
         return(FALSE);
-    if (numRects == 1)
+    if (num_rects == 1)
     {
         if (box)
 	    *box = region->extents;
 
 	return(TRUE);
     }
-    for (pbox = PIXREGION_BOXPTR(region), pboxEnd = pbox + numRects;
-	 pbox != pboxEnd;
+    for (pbox = PIXREGION_BOXPTR(region), pbox_end = pbox + num_rects;
+	 pbox != pbox_end;
 	 pbox++)
     {
         if (y >= pbox->y2)
@@ -2041,39 +2041,39 @@ PIXMAN_EXPORT pixman_bool_t
 PREFIX(_selfcheck) (reg)
     region_type_t * reg;
 {
-    int i, numRects;
+    int i, num_rects;
 
     if ((reg->extents.x1 > reg->extents.x2) ||
 	(reg->extents.y1 > reg->extents.y2))
 	return FALSE;
-    numRects = PIXREGION_NUM_RECTS(reg);
-    if (!numRects)
+    num_rects = PIXREGION_NUM_RECTS(reg);
+    if (!num_rects)
 	return ((reg->extents.x1 == reg->extents.x2) &&
 		(reg->extents.y1 == reg->extents.y2) &&
-		(reg->data->size || (reg->data == pixman_region_emptyData)));
-    else if (numRects == 1)
+		(reg->data->size || (reg->data == pixman_region_empty_data)));
+    else if (num_rects == 1)
 	return (!reg->data);
     else
     {
-	box_type_t * pboxP, * pboxN;
+	box_type_t * pbox_p, * pbox_n;
 	box_type_t box;
 
-	pboxP = PIXREGION_RECTS(reg);
-	box = *pboxP;
-	box.y2 = pboxP[numRects-1].y2;
-	pboxN = pboxP + 1;
-	for (i = numRects; --i > 0; pboxP++, pboxN++)
+	pbox_p = PIXREGION_RECTS(reg);
+	box = *pbox_p;
+	box.y2 = pbox_p[num_rects-1].y2;
+	pbox_n = pbox_p + 1;
+	for (i = num_rects; --i > 0; pbox_p++, pbox_n++)
 	{
-	    if ((pboxN->x1 >= pboxN->x2) ||
-		(pboxN->y1 >= pboxN->y2))
+	    if ((pbox_n->x1 >= pbox_n->x2) ||
+		(pbox_n->y1 >= pbox_n->y2))
 		return FALSE;
-	    if (pboxN->x1 < box.x1)
-	        box.x1 = pboxN->x1;
-	    if (pboxN->x2 > box.x2)
-		box.x2 = pboxN->x2;
-	    if ((pboxN->y1 < pboxP->y1) ||
-		((pboxN->y1 == pboxP->y1) &&
-		 ((pboxN->x1 < pboxP->x2) || (pboxN->y2 != pboxP->y2))))
+	    if (pbox_n->x1 < box.x1)
+	        box.x1 = pbox_n->x1;
+	    if (pbox_n->x2 > box.x2)
+		box.x2 = pbox_n->x2;
+	    if ((pbox_n->y1 < pbox_p->y1) ||
+		((pbox_n->y1 == pbox_p->y1) &&
+		 ((pbox_n->x1 < pbox_p->x2) || (pbox_n->y2 != pbox_p->y2))))
 		return FALSE;
 	}
 	return ((box.x1 == reg->extents.x1) &&
@@ -2107,7 +2107,7 @@ PREFIX(_init_rects) (region_type_t *region,
     /* if it's 0, don't call pixman_rect_alloc -- 0 rectangles is
      * a special case, and causing pixman_rect_alloc would cause
      * us to leak memory (because the 0-rect case should be the
-     * static pixman_region_emptyData data).
+     * static pixman_region_empty_data data).
      */
     if (count == 0)
         return TRUE;
diff --git a/pixman/pixman-region16.c b/pixman/pixman-region16.c
index 13f739a..760a2b4 100644
--- a/pixman/pixman-region16.c
+++ b/pixman/pixman-region16.c
@@ -57,7 +57,7 @@ pixman_region_set_static_pointers (pixman_box16_t *empty_box,
 				   pixman_region16_data_t *empty_data,
 				   pixman_region16_data_t *broken_data)
 {
-    pixman_region_emptyBox = empty_box;
-    pixman_region_emptyData = empty_data;
-    pixman_brokendata = broken_data;
+    pixman_region_empty_box = empty_box;
+    pixman_region_empty_data = empty_data;
+    pixman_broken_data = broken_data;
 }
diff --git a/pixman/pixman-solid-fill.c b/pixman/pixman-solid-fill.c
index 1359fcd..7e50970 100644
--- a/pixman/pixman-solid-fill.c
+++ b/pixman/pixman-solid-fill.c
@@ -26,7 +26,7 @@
 
 static void
 solid_fill_get_scanline_32 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
-			    const uint32_t *mask, uint32_t maskBits)
+			    const uint32_t *mask, uint32_t mask_bits)
 {
     uint32_t *end = buffer + width;
     register uint32_t color = ((solid_fill_t *)image)->color;
diff --git a/pixman/pixman-sse2.c b/pixman/pixman-sse2.c
index 5813649..c739cd8 100644
--- a/pixman/pixman-sse2.c
+++ b/pixman/pixman-sse2.c
@@ -45,27 +45,27 @@
 static __m64 mask_x0080;
 static __m64 mask_x00ff;
 static __m64 mask_x0101;
-static __m64 mask_xAlpha;
+static __m64 mask_x_alpha;
 
-static __m64 mask_x565rgb;
-static __m64 mask_x565Unpack;
+static __m64 mask_x565_rgb;
+static __m64 mask_x565_unpack;
 
-static __m128i Mask0080;
-static __m128i Mask00ff;
-static __m128i Mask0101;
-static __m128i Maskffff;
-static __m128i Maskff000000;
-static __m128i MaskAlpha;
+static __m128i mask_0080;
+static __m128i mask_00ff;
+static __m128i mask_0101;
+static __m128i mask_ffff;
+static __m128i mask_ff000000;
+static __m128i mask_alpha;
 
-static __m128i Mask565r;
-static __m128i Mask565g1, Mask565g2;
-static __m128i Mask565b;
-static __m128i MaskRed;
-static __m128i MaskGreen;
-static __m128i MaskBlue;
+static __m128i mask_565_r;
+static __m128i mask_565_g1, mask_565_g2;
+static __m128i mask_565_b;
+static __m128i mask_red;
+static __m128i mask_green;
+static __m128i mask_blue;
 
-static __m128i Mask565FixRB;
-static __m128i Mask565FixG;
+static __m128i mask_565_fix_rb;
+static __m128i mask_565_fix_g;
 
 /* -------------------------------------------------------------------------------------------------
  * SSE2 Inlines
@@ -77,27 +77,27 @@ unpack_32_1x128 (uint32_t data)
 }
 
 static force_inline void
-unpack_128_2x128 (__m128i data, __m128i* dataLo, __m128i* dataHi)
+unpack_128_2x128 (__m128i data, __m128i* data_lo, __m128i* data_hi)
 {
-    *dataLo = _mm_unpacklo_epi8 (data, _mm_setzero_si128 ());
-    *dataHi = _mm_unpackhi_epi8 (data, _mm_setzero_si128 ());
+    *data_lo = _mm_unpacklo_epi8 (data, _mm_setzero_si128 ());
+    *data_hi = _mm_unpackhi_epi8 (data, _mm_setzero_si128 ());
 }
 
 static force_inline __m128i
-unpack565to8888 (__m128i lo)
+unpack_565to8888 (__m128i lo)
 {
     __m128i r, g, b, rb, t;
     
-    r = _mm_and_si128 (_mm_slli_epi32 (lo, 8), MaskRed);
-    g = _mm_and_si128 (_mm_slli_epi32 (lo, 5), MaskGreen);
-    b = _mm_and_si128 (_mm_slli_epi32 (lo, 3), MaskBlue);
+    r = _mm_and_si128 (_mm_slli_epi32 (lo, 8), mask_red);
+    g = _mm_and_si128 (_mm_slli_epi32 (lo, 5), mask_green);
+    b = _mm_and_si128 (_mm_slli_epi32 (lo, 3), mask_blue);
 
     rb = _mm_or_si128 (r, b);
-    t  = _mm_and_si128 (rb, Mask565FixRB);
+    t  = _mm_and_si128 (rb, mask_565_fix_rb);
     t  = _mm_srli_epi32 (t, 5);
     rb = _mm_or_si128 (rb, t);
 
-    t  = _mm_and_si128 (g, Mask565FixG);
+    t  = _mm_and_si128 (g, mask_565_fix_g);
     t  = _mm_srli_epi32 (t, 6);
     g  = _mm_or_si128 (g, t);
     
@@ -105,22 +105,22 @@ unpack565to8888 (__m128i lo)
 }
 
 static force_inline void
-unpack565_128_4x128 (__m128i data, __m128i* data0, __m128i* data1, __m128i* data2, __m128i* data3)
+unpack_565_128_4x128 (__m128i data, __m128i* data0, __m128i* data1, __m128i* data2, __m128i* data3)
 {
     __m128i lo, hi;
 
     lo = _mm_unpacklo_epi16 (data, _mm_setzero_si128 ());
     hi = _mm_unpackhi_epi16 (data, _mm_setzero_si128 ());
 
-    lo = unpack565to8888 (lo);
-    hi = unpack565to8888 (hi);
+    lo = unpack_565to8888 (lo);
+    hi = unpack_565to8888 (hi);
 
     unpack_128_2x128 (lo, data0, data1);
     unpack_128_2x128 (hi, data2, data3);
 }
 
 static force_inline uint16_t
-pack565_32_16 (uint32_t pixel)
+pack_565_32_16 (uint32_t pixel)
 {
     return (uint16_t) (((pixel>>8) & 0xf800) | ((pixel>>5) & 0x07e0) | ((pixel>>3) & 0x001f));
 }
@@ -132,218 +132,218 @@ pack_2x128_128 (__m128i lo, __m128i hi)
 }
 
 static force_inline __m128i
-pack565_2x128_128 (__m128i lo, __m128i hi)
+pack_565_2x128_128 (__m128i lo, __m128i hi)
 {
     __m128i data;
     __m128i r, g1, g2, b;
 
     data = pack_2x128_128 ( lo, hi );
 
-    r  = _mm_and_si128 (data , Mask565r);
-    g1 = _mm_and_si128 (_mm_slli_epi32 (data , 3), Mask565g1);
-    g2 = _mm_and_si128 (_mm_srli_epi32 (data , 5), Mask565g2);
-    b  = _mm_and_si128 (_mm_srli_epi32 (data , 3), Mask565b);
+    r  = _mm_and_si128 (data , mask_565_r);
+    g1 = _mm_and_si128 (_mm_slli_epi32 (data , 3), mask_565_g1);
+    g2 = _mm_and_si128 (_mm_srli_epi32 (data , 5), mask_565_g2);
+    b  = _mm_and_si128 (_mm_srli_epi32 (data , 3), mask_565_b);
 
     return _mm_or_si128 (_mm_or_si128 (_mm_or_si128 (r, g1), g2), b);
 }
 
 static force_inline __m128i
-pack565_4x128_128 (__m128i* xmm0, __m128i* xmm1, __m128i* xmm2, __m128i* xmm3)
+pack_565_4x128_128 (__m128i* xmm0, __m128i* xmm1, __m128i* xmm2, __m128i* xmm3)
 {
-    return _mm_packus_epi16 (pack565_2x128_128 (*xmm0, *xmm1), pack565_2x128_128 (*xmm2, *xmm3));
+    return _mm_packus_epi16 (pack_565_2x128_128 (*xmm0, *xmm1), pack_565_2x128_128 (*xmm2, *xmm3));
 }
 
 static force_inline int
-isOpaque (__m128i x)
+is_opaque (__m128i x)
 {
     __m128i ffs = _mm_cmpeq_epi8 (x, x);
     return (_mm_movemask_epi8 (_mm_cmpeq_epi8 (x, ffs)) & 0x8888) == 0x8888;
 }
 
 static force_inline int
-isZero (__m128i x)
+is_zero (__m128i x)
 {
     return _mm_movemask_epi8 (_mm_cmpeq_epi8 (x, _mm_setzero_si128())) == 0xffff;
 }
 
 static force_inline int
-isTransparent (__m128i x)
+is_transparent (__m128i x)
 {
     return (_mm_movemask_epi8 (_mm_cmpeq_epi8 (x, _mm_setzero_si128())) & 0x8888) == 0x8888;
 }
 
 static force_inline __m128i
-expandPixel_32_1x128 (uint32_t data)
+expand_pixel_32_1x128 (uint32_t data)
 {
     return _mm_shuffle_epi32 (unpack_32_1x128 (data), _MM_SHUFFLE(1, 0, 1, 0));
 }
 
 static force_inline __m128i
-expandAlpha_1x128 (__m128i data)
+expand_alpha_1x128 (__m128i data)
 {
     return _mm_shufflehi_epi16 (_mm_shufflelo_epi16 (data, _MM_SHUFFLE(3, 3, 3, 3)), _MM_SHUFFLE(3, 3, 3, 3));
 }
 
 static force_inline void
-expandAlpha_2x128 (__m128i dataLo, __m128i dataHi, __m128i* alphaLo, __m128i* alphaHi)
+expand_alpha_2x128 (__m128i data_lo, __m128i data_hi, __m128i* alpha_lo, __m128i* alpha_hi)
 {
     __m128i lo, hi;
 
-    lo = _mm_shufflelo_epi16 (dataLo, _MM_SHUFFLE(3, 3, 3, 3));
-    hi = _mm_shufflelo_epi16 (dataHi, _MM_SHUFFLE(3, 3, 3, 3));
-    *alphaLo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE(3, 3, 3, 3));
-    *alphaHi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE(3, 3, 3, 3));
+    lo = _mm_shufflelo_epi16 (data_lo, _MM_SHUFFLE(3, 3, 3, 3));
+    hi = _mm_shufflelo_epi16 (data_hi, _MM_SHUFFLE(3, 3, 3, 3));
+    *alpha_lo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE(3, 3, 3, 3));
+    *alpha_hi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE(3, 3, 3, 3));
 }
 
 static force_inline void
-expandAlphaRev_2x128 (__m128i dataLo, __m128i dataHi, __m128i* alphaLo, __m128i* alphaHi)
+expand_alpha_rev_2x128 (__m128i data_lo, __m128i data_hi, __m128i* alpha_lo, __m128i* alpha_hi)
 {
     __m128i lo, hi;
 
-    lo = _mm_shufflelo_epi16 (dataLo, _MM_SHUFFLE(0, 0, 0, 0));
-    hi = _mm_shufflelo_epi16 (dataHi, _MM_SHUFFLE(0, 0, 0, 0));
-    *alphaLo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE(0, 0, 0, 0));
-    *alphaHi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE(0, 0, 0, 0));
+    lo = _mm_shufflelo_epi16 (data_lo, _MM_SHUFFLE(0, 0, 0, 0));
+    hi = _mm_shufflelo_epi16 (data_hi, _MM_SHUFFLE(0, 0, 0, 0));
+    *alpha_lo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE(0, 0, 0, 0));
+    *alpha_hi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE(0, 0, 0, 0));
 }
 
 static force_inline void
-pixMultiply_2x128 (__m128i* dataLo, __m128i* dataHi, __m128i* alphaLo, __m128i* alphaHi, __m128i* retLo, __m128i* retHi)
+pix_multiply_2x128 (__m128i* data_lo, __m128i* data_hi, __m128i* alpha_lo, __m128i* alpha_hi, __m128i* ret_lo, __m128i* ret_hi)
 {
     __m128i lo, hi;
 
-    lo = _mm_mullo_epi16 (*dataLo, *alphaLo);
-    hi = _mm_mullo_epi16 (*dataHi, *alphaHi);
-    lo = _mm_adds_epu16 (lo, Mask0080);
-    hi = _mm_adds_epu16 (hi, Mask0080);
-    *retLo = _mm_mulhi_epu16 (lo, Mask0101);
-    *retHi = _mm_mulhi_epu16 (hi, Mask0101);
+    lo = _mm_mullo_epi16 (*data_lo, *alpha_lo);
+    hi = _mm_mullo_epi16 (*data_hi, *alpha_hi);
+    lo = _mm_adds_epu16 (lo, mask_0080);
+    hi = _mm_adds_epu16 (hi, mask_0080);
+    *ret_lo = _mm_mulhi_epu16 (lo, mask_0101);
+    *ret_hi = _mm_mulhi_epu16 (hi, mask_0101);
 }
 
 static force_inline void
-pixAddMultiply_2x128 (__m128i* srcLo, __m128i* srcHi, __m128i* alphaDstLo, __m128i* alphaDstHi,
-                      __m128i* dstLo, __m128i* dstHi, __m128i* alphaSrcLo, __m128i* alphaSrcHi,
-                      __m128i* retLo, __m128i* retHi)
+pix_add_multiply_2x128 (__m128i* src_lo, __m128i* src_hi, __m128i* alpha_dst_lo, __m128i* alpha_dst_hi,
+                      __m128i* dst_lo, __m128i* dst_hi, __m128i* alpha_src_lo, __m128i* alpha_src_hi,
+                      __m128i* ret_lo, __m128i* ret_hi)
 {
     __m128i lo, hi;
-    __m128i mulLo, mulHi;
+    __m128i mul_lo, mul_hi;
 
-    lo = _mm_mullo_epi16 (*srcLo, *alphaDstLo);
-    hi = _mm_mullo_epi16 (*srcHi, *alphaDstHi);
-    mulLo = _mm_mullo_epi16 (*dstLo, *alphaSrcLo);
-    mulHi = _mm_mullo_epi16 (*dstHi, *alphaSrcHi);
-    lo = _mm_adds_epu16 (lo, Mask0080);
-    hi = _mm_adds_epu16 (hi, Mask0080);
-    lo = _mm_adds_epu16 (lo, mulLo);
-    hi = _mm_adds_epu16 (hi, mulHi);
-    *retLo = _mm_mulhi_epu16 (lo, Mask0101);
-    *retHi = _mm_mulhi_epu16 (hi, Mask0101);
+    lo = _mm_mullo_epi16 (*src_lo, *alpha_dst_lo);
+    hi = _mm_mullo_epi16 (*src_hi, *alpha_dst_hi);
+    mul_lo = _mm_mullo_epi16 (*dst_lo, *alpha_src_lo);
+    mul_hi = _mm_mullo_epi16 (*dst_hi, *alpha_src_hi);
+    lo = _mm_adds_epu16 (lo, mask_0080);
+    hi = _mm_adds_epu16 (hi, mask_0080);
+    lo = _mm_adds_epu16 (lo, mul_lo);
+    hi = _mm_adds_epu16 (hi, mul_hi);
+    *ret_lo = _mm_mulhi_epu16 (lo, mask_0101);
+    *ret_hi = _mm_mulhi_epu16 (hi, mask_0101);
 }
 
 static force_inline void
-negate_2x128 (__m128i dataLo, __m128i dataHi, __m128i* negLo, __m128i* negHi)
+negate_2x128 (__m128i data_lo, __m128i data_hi, __m128i* neg_lo, __m128i* neg_hi)
 {
-    *negLo = _mm_xor_si128 (dataLo, Mask00ff);
-    *negHi = _mm_xor_si128 (dataHi, Mask00ff);
+    *neg_lo = _mm_xor_si128 (data_lo, mask_00ff);
+    *neg_hi = _mm_xor_si128 (data_hi, mask_00ff);
 }
 
 static force_inline void
-invertColors_2x128 (__m128i dataLo, __m128i dataHi, __m128i* invLo, __m128i* invHi)
+invert_colors_2x128 (__m128i data_lo, __m128i data_hi, __m128i* inv_lo, __m128i* inv_hi)
 {
     __m128i lo, hi;
 
-    lo = _mm_shufflelo_epi16 (dataLo, _MM_SHUFFLE(3, 0, 1, 2));
-    hi = _mm_shufflelo_epi16 (dataHi, _MM_SHUFFLE(3, 0, 1, 2));
-    *invLo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE(3, 0, 1, 2));
-    *invHi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE(3, 0, 1, 2));
+    lo = _mm_shufflelo_epi16 (data_lo, _MM_SHUFFLE(3, 0, 1, 2));
+    hi = _mm_shufflelo_epi16 (data_hi, _MM_SHUFFLE(3, 0, 1, 2));
+    *inv_lo = _mm_shufflehi_epi16 (lo, _MM_SHUFFLE(3, 0, 1, 2));
+    *inv_hi = _mm_shufflehi_epi16 (hi, _MM_SHUFFLE(3, 0, 1, 2));
 }
 
 static force_inline void
-over_2x128 (__m128i* srcLo, __m128i* srcHi, __m128i* alphaLo, __m128i* alphaHi, __m128i* dstLo, __m128i* dstHi)
+over_2x128 (__m128i* src_lo, __m128i* src_hi, __m128i* alpha_lo, __m128i* alpha_hi, __m128i* dst_lo, __m128i* dst_hi)
 {
     __m128i t1, t2;
 
-    negate_2x128 (*alphaLo, *alphaHi, &t1, &t2);
+    negate_2x128 (*alpha_lo, *alpha_hi, &t1, &t2);
 
-    pixMultiply_2x128 (dstLo, dstHi, &t1, &t2, dstLo, dstHi);
+    pix_multiply_2x128 (dst_lo, dst_hi, &t1, &t2, dst_lo, dst_hi);
 
-    *dstLo = _mm_adds_epu8 (*srcLo, *dstLo);
-    *dstHi = _mm_adds_epu8 (*srcHi, *dstHi);
+    *dst_lo = _mm_adds_epu8 (*src_lo, *dst_lo);
+    *dst_hi = _mm_adds_epu8 (*src_hi, *dst_hi);
 }
 
 static force_inline void
-overRevNonPre_2x128 (__m128i srcLo, __m128i srcHi, __m128i* dstLo, __m128i* dstHi)
+over_rev_non_pre_2x128 (__m128i src_lo, __m128i src_hi, __m128i* dst_lo, __m128i* dst_hi)
 {
     __m128i lo, hi;
-    __m128i alphaLo, alphaHi;
+    __m128i alpha_lo, alpha_hi;
 
-    expandAlpha_2x128 (srcLo, srcHi, &alphaLo, &alphaHi);
+    expand_alpha_2x128 (src_lo, src_hi, &alpha_lo, &alpha_hi);
 
-    lo = _mm_or_si128 (alphaLo, MaskAlpha);
-    hi = _mm_or_si128 (alphaHi, MaskAlpha);
+    lo = _mm_or_si128 (alpha_lo, mask_alpha);
+    hi = _mm_or_si128 (alpha_hi, mask_alpha);
 
-    invertColors_2x128 (srcLo, srcHi, &srcLo, &srcHi);
+    invert_colors_2x128 (src_lo, src_hi, &src_lo, &src_hi);
 
-    pixMultiply_2x128 (&srcLo, &srcHi, &lo, &hi, &lo, &hi);
+    pix_multiply_2x128 (&src_lo, &src_hi, &lo, &hi, &lo, &hi);
 
-    over_2x128 (&lo, &hi, &alphaLo, &alphaHi, dstLo, dstHi);
+    over_2x128 (&lo, &hi, &alpha_lo, &alpha_hi, dst_lo, dst_hi);
 }
 
 static force_inline void
-inOver_2x128 (__m128i* srcLo,  __m128i* srcHi,  __m128i*  alphaLo, __m128i*  alphaHi,
-              __m128i* maskLo, __m128i* maskHi, __m128i* dstLo,   __m128i* dstHi)
+in_over_2x128 (__m128i* src_lo,  __m128i* src_hi,  __m128i*  alpha_lo, __m128i*  alpha_hi,
+              __m128i* mask_lo, __m128i* mask_hi, __m128i* dst_lo,   __m128i* dst_hi)
 {
-    __m128i sLo, sHi;
-    __m128i aLo, aHi;
+    __m128i s_lo, s_hi;
+    __m128i a_lo, a_hi;
 
-    pixMultiply_2x128 (  srcLo,   srcHi, maskLo, maskHi, &sLo, &sHi);
-    pixMultiply_2x128 (alphaLo, alphaHi, maskLo, maskHi, &aLo, &aHi);
+    pix_multiply_2x128 (  src_lo,   src_hi, mask_lo, mask_hi, &s_lo, &s_hi);
+    pix_multiply_2x128 (alpha_lo, alpha_hi, mask_lo, mask_hi, &a_lo, &a_hi);
 
-    over_2x128 (&sLo, &sHi, &aLo, &aHi, dstLo, dstHi);
+    over_2x128 (&s_lo, &s_hi, &a_lo, &a_hi, dst_lo, dst_hi);
 }
 
 static force_inline void
-cachePrefetch (__m128i* addr)
+cache_prefetch (__m128i* addr)
 {
     _mm_prefetch (addr, _MM_HINT_T0);
 }
 
 static force_inline void
-cachePrefetchNext (__m128i* addr)
+cache_prefetch_next (__m128i* addr)
 {
     _mm_prefetch (addr + 4, _MM_HINT_T0); // 64 bytes ahead
 }
 
 /* load 4 pixels from a 16-byte boundary aligned address */
 static force_inline __m128i
-load128Aligned (__m128i* src)
+load_128_aligned (__m128i* src)
 {
     return _mm_load_si128 (src);
 }
 
 /* load 4 pixels from a unaligned address */
 static force_inline __m128i
-load128Unaligned (const __m128i* src)
+load_128_unaligned (const __m128i* src)
 {
     return _mm_loadu_si128 (src);
 }
 
 /* save 4 pixels using Write Combining memory on a 16-byte boundary aligned address */
 static force_inline void
-save128WriteCombining (__m128i* dst, __m128i data)
+save128write_combining (__m128i* dst, __m128i data)
 {
     _mm_stream_si128 (dst, data);
 }
 
 /* save 4 pixels on a 16-byte boundary aligned address */
 static force_inline void
-save128Aligned (__m128i* dst, __m128i data)
+save_128_aligned (__m128i* dst, __m128i data)
 {
     _mm_store_si128 (dst, data);
 }
 
 /* save 4 pixels on a unaligned address */
 static force_inline void
-save128Unaligned (__m128i* dst, __m128i data)
+save_128_unaligned (__m128i* dst, __m128i data)
 {
     _mm_storeu_si128 (dst, data);
 }
@@ -359,25 +359,25 @@ unpack_32_1x64 (uint32_t data)
 }
 
 static force_inline __m64
-expandAlpha_1x64 (__m64 data)
+expand_alpha_1x64 (__m64 data)
 {
     return _mm_shuffle_pi16 (data, _MM_SHUFFLE(3, 3, 3, 3));
 }
 
 static force_inline __m64
-expandAlphaRev_1x64 (__m64 data)
+expand_alpha_rev_1x64 (__m64 data)
 {
     return _mm_shuffle_pi16 (data, _MM_SHUFFLE(0, 0, 0, 0));
 }
 
 static force_inline __m64
-expandPixel_8_1x64 (uint8_t data)
+expand_pixel_8_1x64 (uint8_t data)
 {
     return _mm_shuffle_pi16 (unpack_32_1x64 ((uint32_t)data), _MM_SHUFFLE(0, 0, 0, 0));
 }
 
 static force_inline __m64
-pixMultiply_1x64 (__m64 data, __m64 alpha)
+pix_multiply_1x64 (__m64 data, __m64 alpha)
 {
     return _mm_mulhi_pu16 (_mm_adds_pu16 (_mm_mullo_pi16 (data, alpha),
                                           mask_x0080),
@@ -385,11 +385,11 @@ pixMultiply_1x64 (__m64 data, __m64 alpha)
 }
 
 static force_inline __m64
-pixAddMultiply_1x64 (__m64* src, __m64* alphaDst, __m64* dst, __m64* alphaSrc)
+pix_add_multiply_1x64 (__m64* src, __m64* alpha_dst, __m64* dst, __m64* alpha_src)
 {
-    return _mm_mulhi_pu16 (_mm_adds_pu16 (_mm_adds_pu16 (_mm_mullo_pi16 (*src, *alphaDst),
+    return _mm_mulhi_pu16 (_mm_adds_pu16 (_mm_adds_pu16 (_mm_mullo_pi16 (*src, *alpha_dst),
                                                          mask_x0080),
-                                          _mm_mullo_pi16 (*dst, *alphaSrc)),
+                                          _mm_mullo_pi16 (*dst, *alpha_src)),
                            mask_x0101);
 }
 
@@ -400,7 +400,7 @@ negate_1x64 (__m64 data)
 }
 
 static force_inline __m64
-invertColors_1x64 (__m64 data)
+invert_colors_1x64 (__m64 data)
 {
     return _mm_shuffle_pi16 (data, _MM_SHUFFLE(3, 0, 1, 2));
 }
@@ -408,24 +408,24 @@ invertColors_1x64 (__m64 data)
 static force_inline __m64
 over_1x64 (__m64 src, __m64 alpha, __m64 dst)
 {
-    return _mm_adds_pu8 (src, pixMultiply_1x64 (dst, negate_1x64 (alpha)));
+    return _mm_adds_pu8 (src, pix_multiply_1x64 (dst, negate_1x64 (alpha)));
 }
 
 static force_inline __m64
-inOver_1x64 (__m64* src, __m64* alpha, __m64* mask, __m64* dst)
+in_over_1x64 (__m64* src, __m64* alpha, __m64* mask, __m64* dst)
 {
-    return over_1x64 (pixMultiply_1x64 (*src, *mask),
-                      pixMultiply_1x64 (*alpha, *mask),
+    return over_1x64 (pix_multiply_1x64 (*src, *mask),
+                      pix_multiply_1x64 (*alpha, *mask),
                       *dst);
 }
 
 static force_inline __m64
-overRevNonPre_1x64 (__m64 src, __m64 dst)
+over_rev_non_pre_1x64 (__m64 src, __m64 dst)
 {
-    __m64 alpha = expandAlpha_1x64 (src);
+    __m64 alpha = expand_alpha_1x64 (src);
 
-    return over_1x64 (pixMultiply_1x64 (invertColors_1x64 (src),
-                                        _mm_or_si64 (alpha, mask_xAlpha)),
+    return over_1x64 (pix_multiply_1x64 (invert_colors_1x64 (src),
+                                        _mm_or_si64 (alpha, mask_x_alpha)),
                       alpha,
                       dst);
 }
@@ -463,8 +463,8 @@ expand565_16_1x64 (uint16_t pixel)
 
     p = _mm_or_si64 (t1, p);
     p = _mm_or_si64 (t2, p);
-    p = _mm_and_si64 (p, mask_x565rgb);
-    p = _mm_mullo_pi16 (p, mask_x565Unpack);
+    p = _mm_and_si64 (p, mask_x565_rgb);
+    p = _mm_mullo_pi16 (p, mask_x565_unpack);
 
     return _mm_srli_pi16 (p, 8);
 }
@@ -473,7 +473,7 @@ expand565_16_1x64 (uint16_t pixel)
  * Compose Core transformations
  */
 static force_inline uint32_t
-coreCombineOverUPixelsse2 (uint32_t src, uint32_t dst)
+core_combine_over_u_pixel_sse2 (uint32_t src, uint32_t dst)
 {
     uint8_t     a;
     __m64       ms;
@@ -487,7 +487,7 @@ coreCombineOverUPixelsse2 (uint32_t src, uint32_t dst)
     else if (src)
     {
         ms = unpack_32_1x64 (src);
-        return pack_1x64_32 (over_1x64 (ms, expandAlpha_1x64 (ms), unpack_32_1x64 (dst)));
+        return pack_1x64_32 (over_1x64 (ms, expand_alpha_1x64 (ms), unpack_32_1x64 (dst)));
     }
 
     return dst;
@@ -503,10 +503,10 @@ combine1 (const uint32_t *ps, const uint32_t *pm)
 	__m64 ms, mm;
 
 	mm = unpack_32_1x64 (*pm);
-	mm = expandAlpha_1x64 (mm);
+	mm = expand_alpha_1x64 (mm);
 	
 	ms = unpack_32_1x64 (s);
-	ms = pixMultiply_1x64 (ms, mm);
+	ms = pix_multiply_1x64 (ms, mm);
 
 	s = pack_1x64_32 (ms);
     }
@@ -517,48 +517,48 @@ combine1 (const uint32_t *ps, const uint32_t *pm)
 static force_inline __m128i
 combine4 (const __m128i *ps, const __m128i *pm)
 {
-    __m128i xmmSrcLo, xmmSrcHi;
-    __m128i xmmMskLo, xmmMskHi;
+    __m128i xmm_src_lo, xmm_src_hi;
+    __m128i xmm_msk_lo, xmm_msk_hi;
     __m128i s;
     
     if (pm)
     {
-	xmmMskLo = load128Unaligned (pm);
+	xmm_msk_lo = load_128_unaligned (pm);
 
-	if (isTransparent (xmmMskLo))
+	if (is_transparent (xmm_msk_lo))
 	    return _mm_setzero_si128 ();
     }
     
-    s = load128Unaligned (ps);
+    s = load_128_unaligned (ps);
 	
     if (pm)
     {
-	unpack_128_2x128 (s, &xmmSrcLo, &xmmSrcHi);
-	unpack_128_2x128 (xmmMskLo, &xmmMskLo, &xmmMskHi);
+	unpack_128_2x128 (s, &xmm_src_lo, &xmm_src_hi);
+	unpack_128_2x128 (xmm_msk_lo, &xmm_msk_lo, &xmm_msk_hi);
 	
-	expandAlpha_2x128 (xmmMskLo, xmmMskHi, &xmmMskLo, &xmmMskHi);
+	expand_alpha_2x128 (xmm_msk_lo, xmm_msk_hi, &xmm_msk_lo, &xmm_msk_hi);
 	
-	pixMultiply_2x128 (&xmmSrcLo, &xmmSrcHi, &xmmMskLo, &xmmMskHi, &xmmSrcLo, &xmmSrcHi);
+	pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_msk_lo, &xmm_msk_hi, &xmm_src_lo, &xmm_src_hi);
 	
-	s = pack_2x128_128 (xmmSrcLo, xmmSrcHi);
+	s = pack_2x128_128 (xmm_src_lo, xmm_src_hi);
     }
 
     return s;
 }
 
 static force_inline void
-coreCombineOverUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w)
+core_combine_over_u_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w)
 {
     uint32_t s, d;
 
-    __m128i xmmDstLo, xmmDstHi;
-    __m128i xmmSrcLo, xmmSrcHi;
-    __m128i xmmAlphaLo, xmmAlphaHi;
+    __m128i xmm_dst_lo, xmm_dst_hi;
+    __m128i xmm_src_lo, xmm_src_hi;
+    __m128i xmm_alpha_lo, xmm_alpha_hi;
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     /* Align dst on a 16-byte boundary */
     while (w &&
@@ -567,7 +567,7 @@ coreCombineOverUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int
         d = *pd;
         s = combine1 (ps, pm);
 
-        *pd++ = coreCombineOverUPixelsse2 (s, d);
+        *pd++ = core_combine_over_u_pixel_sse2 (s, d);
 	ps++;
 	if (pm)
 	    pm++;
@@ -575,37 +575,37 @@ coreCombineOverUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int
     }
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w >= 4)
     {
         /* fill cache line with next memory */
-        cachePrefetchNext ((__m128i*)ps);
-        cachePrefetchNext ((__m128i*)pd);
-	cachePrefetchNext ((__m128i*)pm);
+        cache_prefetch_next ((__m128i*)ps);
+        cache_prefetch_next ((__m128i*)pd);
+	cache_prefetch_next ((__m128i*)pm);
 
         /* I'm loading unaligned because I'm not sure about the address alignment. */
-        xmmSrcHi = combine4 ((__m128i*)ps, (__m128i*)pm);
+        xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
 
-        if (isOpaque (xmmSrcHi))
+        if (is_opaque (xmm_src_hi))
         {
-            save128Aligned ((__m128i*)pd, xmmSrcHi);
+            save_128_aligned ((__m128i*)pd, xmm_src_hi);
         }
-        else if (!isZero (xmmSrcHi))
+        else if (!is_zero (xmm_src_hi))
         {
-            xmmDstHi = load128Aligned ((__m128i*) pd);
+            xmm_dst_hi = load_128_aligned ((__m128i*) pd);
 
-            unpack_128_2x128 (xmmSrcHi, &xmmSrcLo, &xmmSrcHi);
-            unpack_128_2x128 (xmmDstHi, &xmmDstLo, &xmmDstHi);
+            unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+            unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-            expandAlpha_2x128 (xmmSrcLo, xmmSrcHi, &xmmAlphaLo, &xmmAlphaHi);
+            expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi);
 
-            over_2x128 (&xmmSrcLo, &xmmSrcHi, &xmmAlphaLo, &xmmAlphaHi, &xmmDstLo, &xmmDstHi);
+            over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_dst_lo, &xmm_dst_hi);
 
             /* rebuid the 4 pixel data and save*/
-            save128Aligned ((__m128i*)pd, pack_2x128_128 (xmmDstLo, xmmDstHi));
+            save_128_aligned ((__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
         }
 
         w -= 4;
@@ -620,7 +620,7 @@ coreCombineOverUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int
         d = *pd;
         s = combine1 (ps, pm);
 
-        *pd++ = coreCombineOverUPixelsse2 (s, d);
+        *pd++ = core_combine_over_u_pixel_sse2 (s, d);
 	ps++;
 	if (pm)
 	    pm++;
@@ -629,18 +629,18 @@ coreCombineOverUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int
 }
 
 static force_inline void
-coreCombineOverReverseUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w)
+core_combine_over_reverse_u_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w)
 {
     uint32_t s, d;
 
-    __m128i xmmDstLo, xmmDstHi;
-    __m128i xmmSrcLo, xmmSrcHi;
-    __m128i xmmAlphaLo, xmmAlphaHi;
+    __m128i xmm_dst_lo, xmm_dst_hi;
+    __m128i xmm_src_lo, xmm_src_hi;
+    __m128i xmm_alpha_lo, xmm_alpha_hi;
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     /* Align dst on a 16-byte boundary */
     while (w &&
@@ -649,7 +649,7 @@ coreCombineOverReverseUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* p
         d = *pd;
         s = combine1 (ps, pm);
 
-        *pd++ = coreCombineOverUPixelsse2 (d, s);
+        *pd++ = core_combine_over_u_pixel_sse2 (d, s);
         w--;
 	ps++;
 	if (pm)
@@ -657,30 +657,30 @@ coreCombineOverReverseUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* p
     }
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w >= 4)
     {
         /* fill cache line with next memory */
-        cachePrefetchNext ((__m128i*)ps);
-        cachePrefetchNext ((__m128i*)pd);
-	cachePrefetchNext ((__m128i*)pm);
+        cache_prefetch_next ((__m128i*)ps);
+        cache_prefetch_next ((__m128i*)pd);
+	cache_prefetch_next ((__m128i*)pm);
 
         /* I'm loading unaligned because I'm not sure about the address alignment. */
-        xmmSrcHi = combine4 ((__m128i*)ps, (__m128i*)pm);
-        xmmDstHi = load128Aligned ((__m128i*) pd);
+        xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
+        xmm_dst_hi = load_128_aligned ((__m128i*) pd);
 
-        unpack_128_2x128 (xmmSrcHi, &xmmSrcLo, &xmmSrcHi);
-        unpack_128_2x128 (xmmDstHi, &xmmDstLo, &xmmDstHi);
+        unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+        unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-        expandAlpha_2x128 (xmmDstLo, xmmDstHi, &xmmAlphaLo, &xmmAlphaHi);
+        expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi);
 
-        over_2x128 (&xmmDstLo, &xmmDstHi, &xmmAlphaLo, &xmmAlphaHi, &xmmSrcLo, &xmmSrcHi);
+        over_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_src_lo, &xmm_src_hi);
 
         /* rebuid the 4 pixel data and save*/
-        save128Aligned ((__m128i*)pd, pack_2x128_128 (xmmSrcLo, xmmSrcHi));
+        save_128_aligned ((__m128i*)pd, pack_2x128_128 (xmm_src_lo, xmm_src_hi));
 
         w -= 4;
         ps += 4;
@@ -694,7 +694,7 @@ coreCombineOverReverseUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* p
         d = *pd;
         s = combine1 (ps, pm);
 
-        *pd++ = coreCombineOverUPixelsse2 (d, s);
+        *pd++ = core_combine_over_u_pixel_sse2 (d, s);
 	ps++;
         w--;
 	if (pm)
@@ -703,7 +703,7 @@ coreCombineOverReverseUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* p
 }
 
 static force_inline uint32_t
-coreCombineInUPixelsse2 (uint32_t src, uint32_t dst)
+core_combine_in_u_pixelsse2 (uint32_t src, uint32_t dst)
 {
     uint32_t maska = src >> 24;
 
@@ -713,31 +713,31 @@ coreCombineInUPixelsse2 (uint32_t src, uint32_t dst)
     }
     else if (maska != 0xff)
     {
-        return pack_1x64_32(pixMultiply_1x64 (unpack_32_1x64 (dst), expandAlpha_1x64 (unpack_32_1x64 (src))));
+        return pack_1x64_32(pix_multiply_1x64 (unpack_32_1x64 (dst), expand_alpha_1x64 (unpack_32_1x64 (src))));
     }
 
     return dst;
 }
 
 static force_inline void
-coreCombineInUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w)
+core_combine_in_u_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w)
 {
     uint32_t s, d;
 
-    __m128i xmmSrcLo, xmmSrcHi;
-    __m128i xmmDstLo, xmmDstHi;
+    __m128i xmm_src_lo, xmm_src_hi;
+    __m128i xmm_dst_lo, xmm_dst_hi;
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w && ((unsigned long) pd & 15))
     {
         s = combine1 (ps, pm);
         d = *pd;
 
-        *pd++ = coreCombineInUPixelsse2 (d, s);
+        *pd++ = core_combine_in_u_pixelsse2 (d, s);
         w--;
 	ps++;
 	if (pm)
@@ -745,27 +745,27 @@ coreCombineInUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w)
     }
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w >= 4)
     {
         /* fill cache line with next memory */
-        cachePrefetchNext ((__m128i*)ps);
-        cachePrefetchNext ((__m128i*)pd);
-	cachePrefetchNext ((__m128i*)pm);
+        cache_prefetch_next ((__m128i*)ps);
+        cache_prefetch_next ((__m128i*)pd);
+	cache_prefetch_next ((__m128i*)pm);
 
-        xmmDstHi = load128Aligned ((__m128i*) pd);
-        xmmSrcHi = combine4 ((__m128i*) ps, (__m128i*) pm);
+        xmm_dst_hi = load_128_aligned ((__m128i*) pd);
+        xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*) pm);
 
-        unpack_128_2x128 (xmmDstHi, &xmmDstLo, &xmmDstHi);
-        expandAlpha_2x128 (xmmDstLo, xmmDstHi, &xmmDstLo, &xmmDstHi);
+        unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+        expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-        unpack_128_2x128 (xmmSrcHi, &xmmSrcLo, &xmmSrcHi);
-        pixMultiply_2x128 (&xmmSrcLo, &xmmSrcHi, &xmmDstLo, &xmmDstHi, &xmmDstLo, &xmmDstHi);
+        unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+        pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_dst_lo, &xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-        save128Aligned ((__m128i*)pd, pack_2x128_128 (xmmDstLo, xmmDstHi));
+        save_128_aligned ((__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
 
         ps += 4;
         pd += 4;
@@ -779,7 +779,7 @@ coreCombineInUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w)
         s = combine1 (ps, pm);
         d = *pd;
 
-        *pd++ = coreCombineInUPixelsse2 (d, s);
+        *pd++ = core_combine_in_u_pixelsse2 (d, s);
         w--;
 	ps++;
 	if (pm)
@@ -788,24 +788,24 @@ coreCombineInUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w)
 }
 
 static force_inline void
-coreCombineReverseInUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm, int w)
+core_combine_reverse_in_u_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm, int w)
 {
     uint32_t s, d;
 
-    __m128i xmmSrcLo, xmmSrcHi;
-    __m128i xmmDstLo, xmmDstHi;
+    __m128i xmm_src_lo, xmm_src_hi;
+    __m128i xmm_dst_lo, xmm_dst_hi;
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w && ((unsigned long) pd & 15))
     {
         s = combine1 (ps, pm);
         d = *pd;
 
-        *pd++ = coreCombineInUPixelsse2 (s, d);
+        *pd++ = core_combine_in_u_pixelsse2 (s, d);
 	ps++;
         w--;
 	if (pm)
@@ -813,27 +813,27 @@ coreCombineReverseInUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm,
     }
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w >= 4)
     {
         /* fill cache line with next memory */
-        cachePrefetchNext ((__m128i*)ps);
-        cachePrefetchNext ((__m128i*)pd);
-	cachePrefetchNext ((__m128i*)pm);
+        cache_prefetch_next ((__m128i*)ps);
+        cache_prefetch_next ((__m128i*)pd);
+	cache_prefetch_next ((__m128i*)pm);
 
-        xmmDstHi = load128Aligned ((__m128i*) pd);
-        xmmSrcHi = combine4 ((__m128i*) ps, (__m128i*)pm);
+        xmm_dst_hi = load_128_aligned ((__m128i*) pd);
+        xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*)pm);
 
-        unpack_128_2x128 (xmmSrcHi, &xmmSrcLo, &xmmSrcHi);
-        expandAlpha_2x128 (xmmSrcLo, xmmSrcHi, &xmmSrcLo, &xmmSrcHi);
+        unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+        expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
 
-        unpack_128_2x128 (xmmDstHi, &xmmDstLo, &xmmDstHi);
-        pixMultiply_2x128 (&xmmDstLo, &xmmDstHi, &xmmSrcLo, &xmmSrcHi, &xmmDstLo, &xmmDstHi);
+        unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+        pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_src_lo, &xmm_src_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-        save128Aligned ((__m128i*)pd, pack_2x128_128 (xmmDstLo, xmmDstHi));
+        save_128_aligned ((__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
 
         ps += 4;
         pd += 4;
@@ -847,7 +847,7 @@ coreCombineReverseInUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm,
         s = combine1 (ps, pm);
         d = *pd;
 
-        *pd++ = coreCombineInUPixelsse2 (s, d);
+        *pd++ = core_combine_in_u_pixelsse2 (s, d);
         w--;
 	ps++;
 	if (pm)
@@ -856,19 +856,19 @@ coreCombineReverseInUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm,
 }
 
 static force_inline void
-coreCombineReverseOutUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w)
+core_combine_reverse_out_u_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w)
 {
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w && ((unsigned long) pd & 15))
     {
         uint32_t s = combine1 (ps, pm);
         uint32_t d = *pd;
 
-        *pd++ = pack_1x64_32 (pixMultiply_1x64 (unpack_32_1x64 (d), negate_1x64 (expandAlpha_1x64 (unpack_32_1x64 (s)))));
+        *pd++ = pack_1x64_32 (pix_multiply_1x64 (unpack_32_1x64 (d), negate_1x64 (expand_alpha_1x64 (unpack_32_1x64 (s)))));
 	if (pm)
 	    pm++;
 	ps++;
@@ -876,32 +876,32 @@ coreCombineReverseOutUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm
     }
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w >= 4)
     {
-        __m128i xmmSrcLo, xmmSrcHi;
-        __m128i xmmDstLo, xmmDstHi;
+        __m128i xmm_src_lo, xmm_src_hi;
+        __m128i xmm_dst_lo, xmm_dst_hi;
 
         /* fill cache line with next memory */
-        cachePrefetchNext ((__m128i*)ps);
-        cachePrefetchNext ((__m128i*)pd);
-	cachePrefetchNext ((__m128i*)pm);
+        cache_prefetch_next ((__m128i*)ps);
+        cache_prefetch_next ((__m128i*)pd);
+	cache_prefetch_next ((__m128i*)pm);
 
-        xmmSrcHi = combine4 ((__m128i*)ps, (__m128i*)pm);
-        xmmDstHi = load128Aligned ((__m128i*) pd);
+        xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
+        xmm_dst_hi = load_128_aligned ((__m128i*) pd);
 
-        unpack_128_2x128 (xmmSrcHi, &xmmSrcLo, &xmmSrcHi);
-        unpack_128_2x128 (xmmDstHi, &xmmDstLo, &xmmDstHi);
+        unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+        unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-        expandAlpha_2x128 (xmmSrcLo, xmmSrcHi, &xmmSrcLo, &xmmSrcHi);
-        negate_2x128      (xmmSrcLo, xmmSrcHi, &xmmSrcLo, &xmmSrcHi);
+        expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+        negate_2x128      (xmm_src_lo, xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
 
-        pixMultiply_2x128 (&xmmDstLo, &xmmDstHi, &xmmSrcLo, &xmmSrcHi, &xmmDstLo, &xmmDstHi);
+        pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_src_lo, &xmm_src_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-        save128Aligned ((__m128i*)pd, pack_2x128_128 (xmmDstLo, xmmDstHi));
+        save_128_aligned ((__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
 
         ps += 4;
         pd += 4;
@@ -915,7 +915,7 @@ coreCombineReverseOutUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm
         uint32_t s = combine1 (ps, pm);
         uint32_t d = *pd;
 
-        *pd++ = pack_1x64_32 (pixMultiply_1x64 (unpack_32_1x64 (d), negate_1x64 (expandAlpha_1x64 (unpack_32_1x64 (s)))));
+        *pd++ = pack_1x64_32 (pix_multiply_1x64 (unpack_32_1x64 (d), negate_1x64 (expand_alpha_1x64 (unpack_32_1x64 (s)))));
 	ps++;
 	if (pm)
 	    pm++;
@@ -924,19 +924,19 @@ coreCombineReverseOutUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm
 }
 
 static force_inline void
-coreCombineOutUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w)
+core_combine_out_u_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w)
 {
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w && ((unsigned long) pd & 15))
     {
         uint32_t s = combine1 (ps, pm);
         uint32_t d = *pd;
 
-        *pd++ = pack_1x64_32 (pixMultiply_1x64 (unpack_32_1x64 (s), negate_1x64 (expandAlpha_1x64 (unpack_32_1x64 (d)))));
+        *pd++ = pack_1x64_32 (pix_multiply_1x64 (unpack_32_1x64 (s), negate_1x64 (expand_alpha_1x64 (unpack_32_1x64 (d)))));
         w--;
 	ps++;
 	if (pm)
@@ -944,32 +944,32 @@ coreCombineOutUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w
     }
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w >= 4)
     {
-        __m128i xmmSrcLo, xmmSrcHi;
-        __m128i xmmDstLo, xmmDstHi;
+        __m128i xmm_src_lo, xmm_src_hi;
+        __m128i xmm_dst_lo, xmm_dst_hi;
 
         /* fill cache line with next memory */
-        cachePrefetchNext ((__m128i*)ps);
-        cachePrefetchNext ((__m128i*)pd);
-	cachePrefetchNext ((__m128i*)pm);
+        cache_prefetch_next ((__m128i*)ps);
+        cache_prefetch_next ((__m128i*)pd);
+	cache_prefetch_next ((__m128i*)pm);
 
-        xmmSrcHi = combine4 ((__m128i*) ps, (__m128i*)pm);
-        xmmDstHi = load128Aligned ((__m128i*) pd);
+        xmm_src_hi = combine4 ((__m128i*) ps, (__m128i*)pm);
+        xmm_dst_hi = load_128_aligned ((__m128i*) pd);
 
-        unpack_128_2x128 (xmmSrcHi, &xmmSrcLo, &xmmSrcHi);
-        unpack_128_2x128 (xmmDstHi, &xmmDstLo, &xmmDstHi);
+        unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+        unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-        expandAlpha_2x128 (xmmDstLo, xmmDstHi, &xmmDstLo, &xmmDstHi);
-        negate_2x128      (xmmDstLo, xmmDstHi, &xmmDstLo, &xmmDstHi);
+        expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+        negate_2x128      (xmm_dst_lo, xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-        pixMultiply_2x128 (&xmmSrcLo, &xmmSrcHi, &xmmDstLo, &xmmDstHi, &xmmDstLo, &xmmDstHi);
+        pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_dst_lo, &xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-        save128Aligned ((__m128i*)pd, pack_2x128_128 (xmmDstLo, xmmDstHi));
+        save_128_aligned ((__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
 
         ps += 4;
         pd += 4;
@@ -983,7 +983,7 @@ coreCombineOutUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w
         uint32_t s = combine1 (ps, pm);
         uint32_t d = *pd;
 
-        *pd++ = pack_1x64_32 (pixMultiply_1x64 (unpack_32_1x64 (s), negate_1x64 (expandAlpha_1x64 (unpack_32_1x64 (d)))));
+        *pd++ = pack_1x64_32 (pix_multiply_1x64 (unpack_32_1x64 (s), negate_1x64 (expand_alpha_1x64 (unpack_32_1x64 (d)))));
         w--;
 	ps++;
 	if (pm)
@@ -992,38 +992,38 @@ coreCombineOutUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w
 }
 
 static force_inline uint32_t
-coreCombineAtopUPixelsse2 (uint32_t src, uint32_t dst)
+core_combine_atop_u_pixel_sse2 (uint32_t src, uint32_t dst)
 {
     __m64 s = unpack_32_1x64 (src);
     __m64 d = unpack_32_1x64 (dst);
 
-    __m64 sa = negate_1x64 (expandAlpha_1x64 (s));
-    __m64 da = expandAlpha_1x64 (d);
+    __m64 sa = negate_1x64 (expand_alpha_1x64 (s));
+    __m64 da = expand_alpha_1x64 (d);
 
-    return pack_1x64_32 (pixAddMultiply_1x64 (&s, &da, &d, &sa));
+    return pack_1x64_32 (pix_add_multiply_1x64 (&s, &da, &d, &sa));
 }
 
 static force_inline void
-coreCombineAtopUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w)
+core_combine_atop_u_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w)
 {
     uint32_t s, d;
 
-    __m128i xmmSrcLo, xmmSrcHi;
-    __m128i xmmDstLo, xmmDstHi;
-    __m128i xmmAlphaSrcLo, xmmAlphaSrcHi;
-    __m128i xmmAlphaDstLo, xmmAlphaDstHi;
+    __m128i xmm_src_lo, xmm_src_hi;
+    __m128i xmm_dst_lo, xmm_dst_hi;
+    __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
+    __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w && ((unsigned long) pd & 15))
     {
         s = combine1 (ps, pm);
         d = *pd;
 
-        *pd++ = coreCombineAtopUPixelsse2 (s, d);
+        *pd++ = core_combine_atop_u_pixel_sse2 (s, d);
         w--;
 	ps++;
 	if (pm)
@@ -1031,33 +1031,33 @@ coreCombineAtopUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int
     }
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w >= 4)
     {
         /* fill cache line with next memory */
-        cachePrefetchNext ((__m128i*)ps);
-        cachePrefetchNext ((__m128i*)pd);
-	cachePrefetchNext ((__m128i*)pm);
+        cache_prefetch_next ((__m128i*)ps);
+        cache_prefetch_next ((__m128i*)pd);
+	cache_prefetch_next ((__m128i*)pm);
 
-        xmmSrcHi = combine4 ((__m128i*)ps, (__m128i*)pm);
-        xmmDstHi = load128Aligned ((__m128i*) pd);
+        xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
+        xmm_dst_hi = load_128_aligned ((__m128i*) pd);
 
-        unpack_128_2x128 (xmmSrcHi, &xmmSrcLo, &xmmSrcHi);
-        unpack_128_2x128 (xmmDstHi, &xmmDstLo, &xmmDstHi);
+        unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+        unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-        expandAlpha_2x128 (xmmSrcLo, xmmSrcHi, &xmmAlphaSrcLo, &xmmAlphaSrcHi);
-        expandAlpha_2x128 (xmmDstLo, xmmDstHi, &xmmAlphaDstLo, &xmmAlphaDstHi);
+        expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi);
+        expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
 
-        negate_2x128 (xmmAlphaSrcLo, xmmAlphaSrcHi, &xmmAlphaSrcLo, &xmmAlphaSrcHi);
+        negate_2x128 (xmm_alpha_src_lo, xmm_alpha_src_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi);
 
-        pixAddMultiply_2x128 ( &xmmSrcLo, &xmmSrcHi, &xmmAlphaDstLo, &xmmAlphaDstHi,
-                               &xmmDstLo, &xmmDstHi, &xmmAlphaSrcLo, &xmmAlphaSrcHi,
-                               &xmmDstLo, &xmmDstHi );
+        pix_add_multiply_2x128 ( &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
+                               &xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi,
+                               &xmm_dst_lo, &xmm_dst_hi );
 
-        save128Aligned ((__m128i*)pd, pack_2x128_128 (xmmDstLo, xmmDstHi));
+        save_128_aligned ((__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
 
         ps += 4;
         pd += 4;
@@ -1071,7 +1071,7 @@ coreCombineAtopUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int
         s = combine1 (ps, pm);
         d = *pd;
 
-        *pd++ = coreCombineAtopUPixelsse2 (s, d);
+        *pd++ = core_combine_atop_u_pixel_sse2 (s, d);
         w--;
 	ps++;
 	if (pm)
@@ -1080,38 +1080,38 @@ coreCombineAtopUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int
 }
 
 static force_inline uint32_t
-coreCombineReverseAtopUPixelsse2 (uint32_t src, uint32_t dst)
+core_combine_reverse_atop_u_pixel_sse2 (uint32_t src, uint32_t dst)
 {
     __m64 s = unpack_32_1x64 (src);
     __m64 d = unpack_32_1x64 (dst);
 
-    __m64 sa = expandAlpha_1x64 (s);
-    __m64 da = negate_1x64 (expandAlpha_1x64 (d));
+    __m64 sa = expand_alpha_1x64 (s);
+    __m64 da = negate_1x64 (expand_alpha_1x64 (d));
 
-    return pack_1x64_32 (pixAddMultiply_1x64 (&s, &da, &d, &sa));
+    return pack_1x64_32 (pix_add_multiply_1x64 (&s, &da, &d, &sa));
 }
 
 static force_inline void
-coreCombineReverseAtopUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w)
+core_combine_reverse_atop_u_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* pm, int w)
 {
     uint32_t s, d;
 
-    __m128i xmmSrcLo, xmmSrcHi;
-    __m128i xmmDstLo, xmmDstHi;
-    __m128i xmmAlphaSrcLo, xmmAlphaSrcHi;
-    __m128i xmmAlphaDstLo, xmmAlphaDstHi;
+    __m128i xmm_src_lo, xmm_src_hi;
+    __m128i xmm_dst_lo, xmm_dst_hi;
+    __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
+    __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w && ((unsigned long) pd & 15))
     {
         s = combine1 (ps, pm);
         d = *pd;
 
-        *pd++ = coreCombineReverseAtopUPixelsse2 (s, d);
+        *pd++ = core_combine_reverse_atop_u_pixel_sse2 (s, d);
 	ps++;
         w--;
 	if (pm)
@@ -1119,33 +1119,33 @@ coreCombineReverseAtopUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* p
     }
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w >= 4)
     {
         /* fill cache line with next memory */
-        cachePrefetchNext ((__m128i*)ps);
-        cachePrefetchNext ((__m128i*)pd);
-	cachePrefetchNext ((__m128i*)pm);
+        cache_prefetch_next ((__m128i*)ps);
+        cache_prefetch_next ((__m128i*)pd);
+	cache_prefetch_next ((__m128i*)pm);
 
-        xmmSrcHi = combine4 ((__m128i*)ps, (__m128i*)pm);
-        xmmDstHi = load128Aligned ((__m128i*) pd);
+        xmm_src_hi = combine4 ((__m128i*)ps, (__m128i*)pm);
+        xmm_dst_hi = load_128_aligned ((__m128i*) pd);
 
-        unpack_128_2x128 (xmmSrcHi, &xmmSrcLo, &xmmSrcHi);
-        unpack_128_2x128 (xmmDstHi, &xmmDstLo, &xmmDstHi);
+        unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+        unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-        expandAlpha_2x128 (xmmSrcLo, xmmSrcHi, &xmmAlphaSrcLo, &xmmAlphaSrcHi);
-        expandAlpha_2x128 (xmmDstLo, xmmDstHi, &xmmAlphaDstLo, &xmmAlphaDstHi);
+        expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi);
+        expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
 
-        negate_2x128 (xmmAlphaDstLo, xmmAlphaDstHi, &xmmAlphaDstLo, &xmmAlphaDstHi);
+        negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
 
-        pixAddMultiply_2x128 ( &xmmSrcLo, &xmmSrcHi, &xmmAlphaDstLo, &xmmAlphaDstHi,
-                               &xmmDstLo, &xmmDstHi, &xmmAlphaSrcLo, &xmmAlphaSrcHi,
-                               &xmmDstLo, &xmmDstHi );
+        pix_add_multiply_2x128 ( &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
+                               &xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi,
+                               &xmm_dst_lo, &xmm_dst_hi );
 
-        save128Aligned ((__m128i*)pd, pack_2x128_128 (xmmDstLo, xmmDstHi));
+        save_128_aligned ((__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
 
         ps += 4;
         pd += 4;
@@ -1159,7 +1159,7 @@ coreCombineReverseAtopUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* p
         s = combine1 (ps, pm);
         d = *pd;
 
-        *pd++ = coreCombineReverseAtopUPixelsse2 (s, d);
+        *pd++ = core_combine_reverse_atop_u_pixel_sse2 (s, d);
 	ps++;
         w--;
 	if (pm)
@@ -1168,19 +1168,19 @@ coreCombineReverseAtopUsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t* p
 }
 
 static force_inline uint32_t
-coreCombineXorUPixelsse2 (uint32_t src, uint32_t dst)
+core_combine_xor_u_pixel_sse2 (uint32_t src, uint32_t dst)
 {
     __m64 s = unpack_32_1x64 (src);
     __m64 d = unpack_32_1x64 (dst);
 
-    __m64 negD = negate_1x64 (expandAlpha_1x64 (d));
-    __m64 negS = negate_1x64 (expandAlpha_1x64 (s));
+    __m64 neg_d = negate_1x64 (expand_alpha_1x64 (d));
+    __m64 neg_s = negate_1x64 (expand_alpha_1x64 (s));
 
-    return pack_1x64_32 (pixAddMultiply_1x64 (&s, &negD, &d, &negS));
+    return pack_1x64_32 (pix_add_multiply_1x64 (&s, &neg_d, &d, &neg_s));
 }
 
 static force_inline void
-coreCombineXorUsse2 (uint32_t* dst, const uint32_t* src, const uint32_t *mask, int width)
+core_combine_xor_u_sse2 (uint32_t* dst, const uint32_t* src, const uint32_t *mask, int width)
 {
     int w = width;
     uint32_t s, d;
@@ -1188,22 +1188,22 @@ coreCombineXorUsse2 (uint32_t* dst, const uint32_t* src, const uint32_t *mask, i
     const uint32_t* ps = src;
     const uint32_t* pm = mask;
     
-    __m128i xmmSrc, xmmSrcLo, xmmSrcHi;
-    __m128i xmmDst, xmmDstLo, xmmDstHi;
-    __m128i xmmAlphaSrcLo, xmmAlphaSrcHi;
-    __m128i xmmAlphaDstLo, xmmAlphaDstHi;
+    __m128i xmm_src, xmm_src_lo, xmm_src_hi;
+    __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
+    __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
+    __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w && ((unsigned long) pd & 15))
     {
         s = combine1 (ps, pm);
         d = *pd;
 
-        *pd++ = coreCombineXorUPixelsse2 (s, d);
+        *pd++ = core_combine_xor_u_pixel_sse2 (s, d);
         w--;
 	ps++;
 	if (pm)
@@ -1211,34 +1211,34 @@ coreCombineXorUsse2 (uint32_t* dst, const uint32_t* src, const uint32_t *mask, i
     }
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w >= 4)
     {
         /* fill cache line with next memory */
-        cachePrefetchNext ((__m128i*)ps);
-        cachePrefetchNext ((__m128i*)pd);
-	cachePrefetchNext ((__m128i*)pm);
+        cache_prefetch_next ((__m128i*)ps);
+        cache_prefetch_next ((__m128i*)pd);
+	cache_prefetch_next ((__m128i*)pm);
 
-        xmmSrc = combine4 ((__m128i*) ps, (__m128i*) pm);
-        xmmDst = load128Aligned ((__m128i*) pd);
+        xmm_src = combine4 ((__m128i*) ps, (__m128i*) pm);
+        xmm_dst = load_128_aligned ((__m128i*) pd);
 
-        unpack_128_2x128 (xmmSrc, &xmmSrcLo, &xmmSrcHi);
-        unpack_128_2x128 (xmmDst, &xmmDstLo, &xmmDstHi);
+        unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
+        unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
 
-        expandAlpha_2x128 (xmmSrcLo, xmmSrcHi, &xmmAlphaSrcLo, &xmmAlphaSrcHi);
-        expandAlpha_2x128 (xmmDstLo, xmmDstHi, &xmmAlphaDstLo, &xmmAlphaDstHi);
+        expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi);
+        expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
 
-        negate_2x128 (xmmAlphaSrcLo, xmmAlphaSrcHi, &xmmAlphaSrcLo, &xmmAlphaSrcHi);
-        negate_2x128 (xmmAlphaDstLo, xmmAlphaDstHi, &xmmAlphaDstLo, &xmmAlphaDstHi);
+        negate_2x128 (xmm_alpha_src_lo, xmm_alpha_src_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi);
+        negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
 
-        pixAddMultiply_2x128 ( &xmmSrcLo, &xmmSrcHi, &xmmAlphaDstLo, &xmmAlphaDstHi,
-                               &xmmDstLo, &xmmDstHi, &xmmAlphaSrcLo, &xmmAlphaSrcHi,
-                               &xmmDstLo, &xmmDstHi );
+        pix_add_multiply_2x128 ( &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
+                               &xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi,
+                               &xmm_dst_lo, &xmm_dst_hi );
 
-        save128Aligned ((__m128i*)pd, pack_2x128_128 (xmmDstLo, xmmDstHi));
+        save_128_aligned ((__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
 
         ps += 4;
         pd += 4;
@@ -1252,7 +1252,7 @@ coreCombineXorUsse2 (uint32_t* dst, const uint32_t* src, const uint32_t *mask, i
         s = combine1 (ps, pm);
         d = *pd;
 
-        *pd++ = coreCombineXorUPixelsse2 (s, d);
+        *pd++ = core_combine_xor_u_pixel_sse2 (s, d);
         w--;
 	ps++;
 	if (pm)
@@ -1261,7 +1261,7 @@ coreCombineXorUsse2 (uint32_t* dst, const uint32_t* src, const uint32_t *mask, i
 }
 
 static force_inline void
-coreCombineAddUsse2 (uint32_t* dst, const uint32_t* src, const uint32_t* mask, int width)
+core_combine_add_u_sse2 (uint32_t* dst, const uint32_t* src, const uint32_t* mask, int width)
 {
     int w = width;
     uint32_t s,d;
@@ -1270,9 +1270,9 @@ coreCombineAddUsse2 (uint32_t* dst, const uint32_t* src, const uint32_t* mask, i
     const uint32_t* pm = mask;
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w && (unsigned long)pd & 15)
     {
@@ -1286,23 +1286,23 @@ coreCombineAddUsse2 (uint32_t* dst, const uint32_t* src, const uint32_t* mask, i
     }
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w >= 4)
     {
 	__m128i s;
 	
         /* fill cache line with next memory */
-        cachePrefetchNext ((__m128i*)ps);
-        cachePrefetchNext ((__m128i*)pd);
-	cachePrefetchNext ((__m128i*)pm);
+        cache_prefetch_next ((__m128i*)ps);
+        cache_prefetch_next ((__m128i*)pd);
+	cache_prefetch_next ((__m128i*)pm);
 
 	s = combine4((__m128i*)ps,(__m128i*)pm);
 	
-        save128Aligned( (__m128i*)pd,
-                        _mm_adds_epu8( s, load128Aligned  ((__m128i*)pd)) );
+        save_128_aligned( (__m128i*)pd,
+                        _mm_adds_epu8( s, load_128_aligned  ((__m128i*)pd)) );
         pd += 4;
         ps += 4;
 	if (pm)
@@ -1322,7 +1322,7 @@ coreCombineAddUsse2 (uint32_t* dst, const uint32_t* src, const uint32_t* mask, i
 }
 
 static force_inline uint32_t
-coreCombineSaturateUPixelsse2 (uint32_t src, uint32_t dst)
+core_combine_saturate_u_pixel_sse2 (uint32_t src, uint32_t dst)
 {
     __m64 ms = unpack_32_1x64 (src);
     __m64 md = unpack_32_1x64 (dst);
@@ -1331,30 +1331,30 @@ coreCombineSaturateUPixelsse2 (uint32_t src, uint32_t dst)
 
     if (sa > da)
     {
-        ms = pixMultiply_1x64 (ms, expandAlpha_1x64 (unpack_32_1x64 (DIV_UN8(da, sa) << 24)));
+        ms = pix_multiply_1x64 (ms, expand_alpha_1x64 (unpack_32_1x64 (DIV_UN8(da, sa) << 24)));
     }
 
     return pack_1x64_32 (_mm_adds_pu16 (md, ms));
 }
 
 static force_inline void
-coreCombineSaturateUsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
+core_combine_saturate_u_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
 {
     uint32_t s,d;
 
-    uint32_t packCmp;
-    __m128i xmmSrc, xmmDst;
+    uint32_t pack_cmp;
+    __m128i xmm_src, xmm_dst;
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w && (unsigned long)pd & 15)
     {
         s = combine1 (ps, pm);
         d = *pd;
-        *pd++ = coreCombineSaturateUPixelsse2 (s, d);
+        *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
         w--;
 	ps++;
 	if (pm)
@@ -1362,53 +1362,53 @@ coreCombineSaturateUsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm,
     }
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w >= 4)
     {
         /* fill cache line with next memory */
-        cachePrefetchNext ((__m128i*)ps);
-        cachePrefetchNext ((__m128i*)pd);
-	cachePrefetchNext ((__m128i*)pm);
+        cache_prefetch_next ((__m128i*)ps);
+        cache_prefetch_next ((__m128i*)pd);
+	cache_prefetch_next ((__m128i*)pm);
 
-        xmmDst = load128Aligned  ((__m128i*)pd);
-        xmmSrc = combine4 ((__m128i*)ps, (__m128i*)pm);
+        xmm_dst = load_128_aligned  ((__m128i*)pd);
+        xmm_src = combine4 ((__m128i*)ps, (__m128i*)pm);
 
-        packCmp = _mm_movemask_epi8 (_mm_cmpgt_epi32 (_mm_srli_epi32 (xmmSrc, 24),
-                                                      _mm_srli_epi32 (_mm_xor_si128 (xmmDst, Maskff000000), 24)));
+        pack_cmp = _mm_movemask_epi8 (_mm_cmpgt_epi32 (_mm_srli_epi32 (xmm_src, 24),
+                                                      _mm_srli_epi32 (_mm_xor_si128 (xmm_dst, mask_ff000000), 24)));
 
         /* if some alpha src is grater than respective ~alpha dst */
-        if (packCmp)
+        if (pack_cmp)
         {
             s = combine1 (ps++, pm);
             d = *pd;
-            *pd++ = coreCombineSaturateUPixelsse2 (s, d);
+            *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
 	    if (pm)
 		pm++;
 
             s = combine1 (ps++, pm);
             d = *pd;
-            *pd++ = coreCombineSaturateUPixelsse2 (s, d);
+            *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
 	    if (pm)
 		pm++;
 
             s = combine1 (ps++, pm);
             d = *pd;
-            *pd++ = coreCombineSaturateUPixelsse2 (s, d);
+            *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
 	    if (pm)
 		pm++;
 
             s = combine1 (ps++, pm);
             d = *pd;
-            *pd++ = coreCombineSaturateUPixelsse2 (s, d);
+            *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
 	    if (pm)
 		pm++;
         }
         else
         {
-            save128Aligned ((__m128i*)pd, _mm_adds_epu8 (xmmDst, xmmSrc));
+            save_128_aligned ((__m128i*)pd, _mm_adds_epu8 (xmm_dst, xmm_src));
 
             pd += 4;
             ps += 4;
@@ -1423,7 +1423,7 @@ coreCombineSaturateUsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm,
     {
         s = combine1 (ps, pm);
         d = *pd;
-        *pd++ = coreCombineSaturateUPixelsse2 (s, d);
+        *pd++ = core_combine_saturate_u_pixel_sse2 (s, d);
 	ps++;
 	if (pm)
 	    pm++;
@@ -1431,48 +1431,48 @@ coreCombineSaturateUsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm,
 }
 
 static force_inline void
-coreCombineSrcCsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm, int w)
+core_combine_src_c_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm, int w)
 {
     uint32_t s, m;
 
-    __m128i xmmSrcLo, xmmSrcHi;
-    __m128i xmmMaskLo, xmmMaskHi;
-    __m128i xmmDstLo, xmmDstHi;
+    __m128i xmm_src_lo, xmm_src_hi;
+    __m128i xmm_mask_lo, xmm_mask_hi;
+    __m128i xmm_dst_lo, xmm_dst_hi;
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w && (unsigned long)pd & 15)
     {
         s = *ps++;
         m = *pm++;
-        *pd++ = pack_1x64_32 (pixMultiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)));
+        *pd++ = pack_1x64_32 (pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)));
         w--;
     }
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w >= 4)
     {
         /* fill cache line with next memory */
-        cachePrefetchNext ((__m128i*)ps);
-        cachePrefetchNext ((__m128i*)pd);
-        cachePrefetchNext ((__m128i*)pm);
+        cache_prefetch_next ((__m128i*)ps);
+        cache_prefetch_next ((__m128i*)pd);
+        cache_prefetch_next ((__m128i*)pm);
 
-        xmmSrcHi = load128Unaligned ((__m128i*)ps);
-        xmmMaskHi = load128Unaligned ((__m128i*)pm);
+        xmm_src_hi = load_128_unaligned ((__m128i*)ps);
+        xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
 
-        unpack_128_2x128 (xmmSrcHi, &xmmSrcLo, &xmmSrcHi);
-        unpack_128_2x128 (xmmMaskHi, &xmmMaskLo, &xmmMaskHi);
+        unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+        unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
 
-        pixMultiply_2x128 (&xmmSrcLo, &xmmSrcHi, &xmmMaskLo, &xmmMaskHi, &xmmDstLo, &xmmDstHi);
+        pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-        save128Aligned( (__m128i*)pd, pack_2x128_128 (xmmDstLo, xmmDstHi));
+        save_128_aligned( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
 
         ps += 4;
         pd += 4;
@@ -1484,36 +1484,36 @@ coreCombineSrcCsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm, int w
     {
         s = *ps++;
         m = *pm++;
-        *pd++ = pack_1x64_32 (pixMultiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)));
+        *pd++ = pack_1x64_32 (pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)));
         w--;
     }
 }
 
 static force_inline uint32_t
-coreCombineOverCPixelsse2 (uint32_t src, uint32_t mask, uint32_t dst)
+core_combine_over_c_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst)
 {
     __m64 s = unpack_32_1x64 (src);
-    __m64 expAlpha = expandAlpha_1x64 (s);
-    __m64 unpkMask = unpack_32_1x64 (mask);
-    __m64 unpkDst  = unpack_32_1x64 (dst);
+    __m64 expAlpha = expand_alpha_1x64 (s);
+    __m64 unpk_mask = unpack_32_1x64 (mask);
+    __m64 unpk_dst  = unpack_32_1x64 (dst);
 
-    return pack_1x64_32 (inOver_1x64 (&s, &expAlpha, &unpkMask, &unpkDst));
+    return pack_1x64_32 (in_over_1x64 (&s, &expAlpha, &unpk_mask, &unpk_dst));
 }
 
 static force_inline void
-coreCombineOverCsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm, int w)
+core_combine_over_c_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm, int w)
 {
     uint32_t s, m, d;
 
-    __m128i xmmAlphaLo, xmmAlphaHi;
-    __m128i xmmSrcLo, xmmSrcHi;
-    __m128i xmmDstLo, xmmDstHi;
-    __m128i xmmMaskLo, xmmMaskHi;
+    __m128i xmm_alpha_lo, xmm_alpha_hi;
+    __m128i xmm_src_lo, xmm_src_hi;
+    __m128i xmm_dst_lo, xmm_dst_hi;
+    __m128i xmm_mask_lo, xmm_mask_hi;
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w && (unsigned long)pd & 15)
     {
@@ -1521,35 +1521,35 @@ coreCombineOverCsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm, int
         m = *pm++;
         d = *pd;
 
-        *pd++ = coreCombineOverCPixelsse2 (s, m, d);
+        *pd++ = core_combine_over_c_pixel_sse2 (s, m, d);
         w--;
     }
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w >= 4)
     {
         /* fill cache line with next memory */
-        cachePrefetchNext ((__m128i*)ps);
-        cachePrefetchNext ((__m128i*)pd);
-        cachePrefetchNext ((__m128i*)pm);
+        cache_prefetch_next ((__m128i*)ps);
+        cache_prefetch_next ((__m128i*)pd);
+        cache_prefetch_next ((__m128i*)pm);
 
-        xmmDstHi = load128Aligned ((__m128i*)pd);
-        xmmSrcHi = load128Unaligned ((__m128i*)ps);
-        xmmMaskHi = load128Unaligned ((__m128i*)pm);
+        xmm_dst_hi = load_128_aligned ((__m128i*)pd);
+        xmm_src_hi = load_128_unaligned ((__m128i*)ps);
+        xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
 
-        unpack_128_2x128 (xmmDstHi, &xmmDstLo, &xmmDstHi);
-        unpack_128_2x128 (xmmSrcHi, &xmmSrcLo, &xmmSrcHi);
-        unpack_128_2x128 (xmmMaskHi, &xmmMaskLo, &xmmMaskHi);
+        unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+        unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+        unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
 
-        expandAlpha_2x128 (xmmSrcLo, xmmSrcHi, &xmmAlphaLo, &xmmAlphaHi);
+        expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi);
 
-        inOver_2x128 (&xmmSrcLo, &xmmSrcHi, &xmmAlphaLo, &xmmAlphaHi, &xmmMaskLo, &xmmMaskHi, &xmmDstLo, &xmmDstHi);
+        in_over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-        save128Aligned( (__m128i*)pd, pack_2x128_128 (xmmDstLo, xmmDstHi));
+        save_128_aligned( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
 
         ps += 4;
         pd += 4;
@@ -1563,33 +1563,33 @@ coreCombineOverCsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm, int
         m = *pm++;
         d = *pd;
 
-        *pd++ = coreCombineOverCPixelsse2 (s, m, d);
+        *pd++ = core_combine_over_c_pixel_sse2 (s, m, d);
         w--;
     }
 }
 
 static force_inline uint32_t
-coreCombineOverReverseCPixelsse2 (uint32_t src, uint32_t mask, uint32_t dst)
+core_combine_over_reverse_c_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst)
 {
     __m64 d = unpack_32_1x64 (dst);
 
-	return pack_1x64_32(over_1x64 (d, expandAlpha_1x64 (d), pixMultiply_1x64 (unpack_32_1x64 (src), unpack_32_1x64 (mask))));
+	return pack_1x64_32(over_1x64 (d, expand_alpha_1x64 (d), pix_multiply_1x64 (unpack_32_1x64 (src), unpack_32_1x64 (mask))));
 }
 
 static force_inline void
-coreCombineOverReverseCsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm, int w)
+core_combine_over_reverse_c_sse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *pm, int w)
 {
     uint32_t s, m, d;
 
-    __m128i xmmAlphaLo, xmmAlphaHi;
-    __m128i xmmSrcLo, xmmSrcHi;
-    __m128i xmmDstLo, xmmDstHi;
-    __m128i xmmMaskLo, xmmMaskHi;
+    __m128i xmm_alpha_lo, xmm_alpha_hi;
+    __m128i xmm_src_lo, xmm_src_hi;
+    __m128i xmm_dst_lo, xmm_dst_hi;
+    __m128i xmm_mask_lo, xmm_mask_hi;
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w && (unsigned long)pd & 15)
     {
@@ -1597,36 +1597,36 @@ coreCombineOverReverseCsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *p
         m = *pm++;
         d = *pd;
 
-        *pd++ = coreCombineOverReverseCPixelsse2 (s, m, d);
+        *pd++ = core_combine_over_reverse_c_pixel_sse2 (s, m, d);
         w--;
     }
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w >= 4)
     {
         /* fill cache line with next memory */
-        cachePrefetchNext ((__m128i*)ps);
-        cachePrefetchNext ((__m128i*)pd);
-        cachePrefetchNext ((__m128i*)pm);
+        cache_prefetch_next ((__m128i*)ps);
+        cache_prefetch_next ((__m128i*)pd);
+        cache_prefetch_next ((__m128i*)pm);
 
-        xmmDstHi = load128Aligned ((__m128i*)pd);
-        xmmSrcHi = load128Unaligned ((__m128i*)ps);
-        xmmMaskHi = load128Unaligned ((__m128i*)pm);
+        xmm_dst_hi = load_128_aligned ((__m128i*)pd);
+        xmm_src_hi = load_128_unaligned ((__m128i*)ps);
+        xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
 
-        unpack_128_2x128 (xmmDstHi, &xmmDstLo, &xmmDstHi);
-        unpack_128_2x128 (xmmSrcHi, &xmmSrcLo, &xmmSrcHi);
-        unpack_128_2x128 (xmmMaskHi, &xmmMaskLo, &xmmMaskHi);
+        unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+        unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+        unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
 
-        expandAlpha_2x128 (xmmDstLo, xmmDstHi, &xmmAlphaLo, &xmmAlphaHi);
-        pixMultiply_2x128 (&xmmSrcLo, &xmmSrcHi, &xmmMaskLo, &xmmMaskHi, &xmmMaskLo, &xmmMaskHi);
+        expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi);
+        pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
 
-        over_2x128 (&xmmDstLo, &xmmDstHi, &xmmAlphaLo, &xmmAlphaHi, &xmmMaskLo, &xmmMaskHi);
+        over_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_mask_lo, &xmm_mask_hi);
 
-        save128Aligned( (__m128i*)pd, pack_2x128_128 (xmmMaskLo, xmmMaskHi));
+        save_128_aligned( (__m128i*)pd, pack_2x128_128 (xmm_mask_lo, xmm_mask_hi));
 
         ps += 4;
         pd += 4;
@@ -1640,25 +1640,25 @@ coreCombineOverReverseCsse2 (uint32_t* pd, const uint32_t* ps, const uint32_t *p
         m = *pm++;
         d = *pd;
 
-        *pd++ = coreCombineOverReverseCPixelsse2 (s, m, d);
+        *pd++ = core_combine_over_reverse_c_pixel_sse2 (s, m, d);
         w--;
     }
 }
 
 static force_inline void
-coreCombineInCsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
+core_combine_in_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
 {
     uint32_t s, m, d;
 
-    __m128i xmmAlphaLo, xmmAlphaHi;
-    __m128i xmmSrcLo, xmmSrcHi;
-    __m128i xmmDstLo, xmmDstHi;
-    __m128i xmmMaskLo, xmmMaskHi;
+    __m128i xmm_alpha_lo, xmm_alpha_hi;
+    __m128i xmm_src_lo, xmm_src_hi;
+    __m128i xmm_dst_lo, xmm_dst_hi;
+    __m128i xmm_mask_lo, xmm_mask_hi;
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w && (unsigned long)pd & 15)
     {
@@ -1666,37 +1666,37 @@ coreCombineInCsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
         m = *pm++;
         d = *pd;
 
-        *pd++ = pack_1x64_32 (pixMultiply_1x64 (pixMultiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)),
-                                                expandAlpha_1x64 (unpack_32_1x64 (d))));
+        *pd++ = pack_1x64_32 (pix_multiply_1x64 (pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)),
+                                                expand_alpha_1x64 (unpack_32_1x64 (d))));
         w--;
     }
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w >= 4)
     {
         /* fill cache line with next memory */
-        cachePrefetchNext ((__m128i*)ps);
-        cachePrefetchNext ((__m128i*)pd);
-        cachePrefetchNext ((__m128i*)pm);
+        cache_prefetch_next ((__m128i*)ps);
+        cache_prefetch_next ((__m128i*)pd);
+        cache_prefetch_next ((__m128i*)pm);
 
-        xmmDstHi = load128Aligned ((__m128i*)pd);
-        xmmSrcHi = load128Unaligned ((__m128i*)ps);
-        xmmMaskHi = load128Unaligned ((__m128i*)pm);
+        xmm_dst_hi = load_128_aligned ((__m128i*)pd);
+        xmm_src_hi = load_128_unaligned ((__m128i*)ps);
+        xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
 
-        unpack_128_2x128 (xmmDstHi, &xmmDstLo, &xmmDstHi);
-        unpack_128_2x128 (xmmSrcHi, &xmmSrcLo, &xmmSrcHi);
-        unpack_128_2x128 (xmmMaskHi, &xmmMaskLo, &xmmMaskHi);
+        unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+        unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+        unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
 
-        expandAlpha_2x128 (xmmDstLo, xmmDstHi, &xmmAlphaLo, &xmmAlphaHi);
-        pixMultiply_2x128 (&xmmSrcLo, &xmmSrcHi, &xmmMaskLo, &xmmMaskHi, &xmmDstLo, &xmmDstHi);
+        expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi);
+        pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-        pixMultiply_2x128 (&xmmDstLo, &xmmDstHi, &xmmAlphaLo, &xmmAlphaHi, &xmmDstLo, &xmmDstHi);
+        pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-        save128Aligned( (__m128i*)pd, pack_2x128_128 (xmmDstLo, xmmDstHi));
+        save_128_aligned( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
 
         ps += 4;
         pd += 4;
@@ -1710,26 +1710,26 @@ coreCombineInCsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
         m = *pm++;
         d = *pd;
 
-        *pd++ = pack_1x64_32 (pixMultiply_1x64 (pixMultiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)),
-                                                expandAlpha_1x64 (unpack_32_1x64 (d))));
+        *pd++ = pack_1x64_32 (pix_multiply_1x64 (pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)),
+                                                expand_alpha_1x64 (unpack_32_1x64 (d))));
         w--;
     }
 }
 
 static force_inline void
-coreCombineInReverseCsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
+core_combine_in_reverse_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
 {
     uint32_t s, m, d;
 
-    __m128i xmmAlphaLo, xmmAlphaHi;
-    __m128i xmmSrcLo, xmmSrcHi;
-    __m128i xmmDstLo, xmmDstHi;
-    __m128i xmmMaskLo, xmmMaskHi;
+    __m128i xmm_alpha_lo, xmm_alpha_hi;
+    __m128i xmm_src_lo, xmm_src_hi;
+    __m128i xmm_dst_lo, xmm_dst_hi;
+    __m128i xmm_mask_lo, xmm_mask_hi;
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w && (unsigned long)pd & 15)
     {
@@ -1737,38 +1737,38 @@ coreCombineInReverseCsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm,
         m = *pm++;
         d = *pd;
 
-        *pd++ = pack_1x64_32 (pixMultiply_1x64 (unpack_32_1x64 (d),
-                                                pixMultiply_1x64 (unpack_32_1x64 (m),
-                                                                  expandAlpha_1x64 (unpack_32_1x64 (s)))));
+        *pd++ = pack_1x64_32 (pix_multiply_1x64 (unpack_32_1x64 (d),
+                                                pix_multiply_1x64 (unpack_32_1x64 (m),
+                                                                  expand_alpha_1x64 (unpack_32_1x64 (s)))));
         w--;
     }
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w >= 4)
     {
         /* fill cache line with next memory */
-        cachePrefetchNext ((__m128i*)ps);
-        cachePrefetchNext ((__m128i*)pd);
-        cachePrefetchNext ((__m128i*)pm);
+        cache_prefetch_next ((__m128i*)ps);
+        cache_prefetch_next ((__m128i*)pd);
+        cache_prefetch_next ((__m128i*)pm);
 
-        xmmDstHi = load128Aligned ((__m128i*)pd);
-        xmmSrcHi = load128Unaligned ((__m128i*)ps);
-        xmmMaskHi = load128Unaligned ((__m128i*)pm);
+        xmm_dst_hi = load_128_aligned ((__m128i*)pd);
+        xmm_src_hi = load_128_unaligned ((__m128i*)ps);
+        xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
 
-        unpack_128_2x128 (xmmDstHi, &xmmDstLo, &xmmDstHi);
-        unpack_128_2x128 (xmmSrcHi, &xmmSrcLo, &xmmSrcHi);
-        unpack_128_2x128 (xmmMaskHi, &xmmMaskLo, &xmmMaskHi);
+        unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+        unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+        unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
 
-        expandAlpha_2x128 (xmmSrcLo, xmmSrcHi, &xmmAlphaLo, &xmmAlphaHi);
-        pixMultiply_2x128 (&xmmMaskLo, &xmmMaskHi, &xmmAlphaLo, &xmmAlphaHi, &xmmAlphaLo, &xmmAlphaHi);
+        expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi);
+        pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_alpha_lo, &xmm_alpha_hi);
 
-        pixMultiply_2x128 (&xmmDstLo, &xmmDstHi, &xmmAlphaLo, &xmmAlphaHi, &xmmDstLo, &xmmDstHi);
+        pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-        save128Aligned( (__m128i*)pd, pack_2x128_128 (xmmDstLo, xmmDstHi));
+        save_128_aligned( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
 
         ps += 4;
         pd += 4;
@@ -1782,27 +1782,27 @@ coreCombineInReverseCsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm,
         m = *pm++;
         d = *pd;
 
-        *pd++ = pack_1x64_32 (pixMultiply_1x64 (unpack_32_1x64 (d),
-                                                pixMultiply_1x64 (unpack_32_1x64 (m),
-                                                                  expandAlpha_1x64 (unpack_32_1x64 (s)))));
+        *pd++ = pack_1x64_32 (pix_multiply_1x64 (unpack_32_1x64 (d),
+                                                pix_multiply_1x64 (unpack_32_1x64 (m),
+                                                                  expand_alpha_1x64 (unpack_32_1x64 (s)))));
         w--;
     }
 }
 
 static force_inline void
-coreCombineOutCsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
+core_combine_out_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
 {
     uint32_t s, m, d;
 
-    __m128i xmmAlphaLo, xmmAlphaHi;
-    __m128i xmmSrcLo, xmmSrcHi;
-    __m128i xmmDstLo, xmmDstHi;
-    __m128i xmmMaskLo, xmmMaskHi;
+    __m128i xmm_alpha_lo, xmm_alpha_hi;
+    __m128i xmm_src_lo, xmm_src_hi;
+    __m128i xmm_dst_lo, xmm_dst_hi;
+    __m128i xmm_mask_lo, xmm_mask_hi;
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w && (unsigned long)pd & 15)
     {
@@ -1810,38 +1810,38 @@ coreCombineOutCsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w
         m = *pm++;
         d = *pd;
 
-        *pd++ = pack_1x64_32 (pixMultiply_1x64 (pixMultiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)),
-                                                negate_1x64 (expandAlpha_1x64 (unpack_32_1x64 (d)))));
+        *pd++ = pack_1x64_32 (pix_multiply_1x64 (pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)),
+                                                negate_1x64 (expand_alpha_1x64 (unpack_32_1x64 (d)))));
         w--;
     }
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w >= 4)
     {
         /* fill cache line with next memory */
-        cachePrefetchNext ((__m128i*)ps);
-        cachePrefetchNext ((__m128i*)pd);
-        cachePrefetchNext ((__m128i*)pm);
+        cache_prefetch_next ((__m128i*)ps);
+        cache_prefetch_next ((__m128i*)pd);
+        cache_prefetch_next ((__m128i*)pm);
 
-        xmmDstHi = load128Aligned ((__m128i*)pd);
-        xmmSrcHi = load128Unaligned ((__m128i*)ps);
-        xmmMaskHi = load128Unaligned ((__m128i*)pm);
+        xmm_dst_hi = load_128_aligned ((__m128i*)pd);
+        xmm_src_hi = load_128_unaligned ((__m128i*)ps);
+        xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
 
-        unpack_128_2x128 (xmmDstHi, &xmmDstLo, &xmmDstHi);
-        unpack_128_2x128 (xmmSrcHi, &xmmSrcLo, &xmmSrcHi);
-        unpack_128_2x128 (xmmMaskHi, &xmmMaskLo, &xmmMaskHi);
+        unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+        unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+        unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
 
-        expandAlpha_2x128 (xmmDstLo, xmmDstHi, &xmmAlphaLo, &xmmAlphaHi);
-        negate_2x128 (xmmAlphaLo, xmmAlphaHi, &xmmAlphaLo, &xmmAlphaHi);
+        expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi);
+        negate_2x128 (xmm_alpha_lo, xmm_alpha_hi, &xmm_alpha_lo, &xmm_alpha_hi);
 
-        pixMultiply_2x128 (&xmmSrcLo, &xmmSrcHi, &xmmMaskLo, &xmmMaskHi, &xmmDstLo, &xmmDstHi);
-        pixMultiply_2x128 (&xmmDstLo, &xmmDstHi, &xmmAlphaLo, &xmmAlphaHi, &xmmDstLo, &xmmDstHi);
+        pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi);
+        pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-        save128Aligned( (__m128i*)pd, pack_2x128_128 (xmmDstLo, xmmDstHi));
+        save_128_aligned( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
 
         ps += 4;
         pd += 4;
@@ -1855,26 +1855,26 @@ coreCombineOutCsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w
         m = *pm++;
         d = *pd;
 
-        *pd++ = pack_1x64_32 (pixMultiply_1x64 (pixMultiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)),
-                                                negate_1x64 (expandAlpha_1x64 (unpack_32_1x64 (d)))));
+        *pd++ = pack_1x64_32 (pix_multiply_1x64 (pix_multiply_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (m)),
+                                                negate_1x64 (expand_alpha_1x64 (unpack_32_1x64 (d)))));
         w--;
     }
 }
 
 static force_inline void
-coreCombineOutReverseCsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
+core_combine_out_reverse_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
 {
     uint32_t s, m, d;
 
-    __m128i xmmAlphaLo, xmmAlphaHi;
-    __m128i xmmSrcLo, xmmSrcHi;
-    __m128i xmmDstLo, xmmDstHi;
-    __m128i xmmMaskLo, xmmMaskHi;
+    __m128i xmm_alpha_lo, xmm_alpha_hi;
+    __m128i xmm_src_lo, xmm_src_hi;
+    __m128i xmm_dst_lo, xmm_dst_hi;
+    __m128i xmm_mask_lo, xmm_mask_hi;
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w && (unsigned long)pd & 15)
     {
@@ -1882,41 +1882,41 @@ coreCombineOutReverseCsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm
         m = *pm++;
         d = *pd;
 
-        *pd++ = pack_1x64_32 (pixMultiply_1x64 (unpack_32_1x64 (d),
-                                                negate_1x64 (pixMultiply_1x64 (unpack_32_1x64 (m),
-                                                                               expandAlpha_1x64 (unpack_32_1x64 (s))))));
+        *pd++ = pack_1x64_32 (pix_multiply_1x64 (unpack_32_1x64 (d),
+                                                negate_1x64 (pix_multiply_1x64 (unpack_32_1x64 (m),
+                                                                               expand_alpha_1x64 (unpack_32_1x64 (s))))));
         w--;
     }
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w >= 4)
     {
         /* fill cache line with next memory */
-        cachePrefetchNext ((__m128i*)ps);
-        cachePrefetchNext ((__m128i*)pd);
-        cachePrefetchNext ((__m128i*)pm);
+        cache_prefetch_next ((__m128i*)ps);
+        cache_prefetch_next ((__m128i*)pd);
+        cache_prefetch_next ((__m128i*)pm);
 
-        xmmDstHi = load128Aligned ((__m128i*)pd);
-        xmmSrcHi = load128Unaligned ((__m128i*)ps);
-        xmmMaskHi = load128Unaligned ((__m128i*)pm);
+        xmm_dst_hi = load_128_aligned ((__m128i*)pd);
+        xmm_src_hi = load_128_unaligned ((__m128i*)ps);
+        xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
 
-        unpack_128_2x128 (xmmDstHi, &xmmDstLo, &xmmDstHi);
-        unpack_128_2x128 (xmmSrcHi, &xmmSrcLo, &xmmSrcHi);
-        unpack_128_2x128 (xmmMaskHi, &xmmMaskLo, &xmmMaskHi);
+        unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+        unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+        unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
 
-        expandAlpha_2x128 (xmmSrcLo, xmmSrcHi, &xmmAlphaLo, &xmmAlphaHi);
+        expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi);
 
-        pixMultiply_2x128 (&xmmMaskLo, &xmmMaskHi, &xmmAlphaLo, &xmmAlphaHi, &xmmMaskLo, &xmmMaskHi);
+        pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_mask_lo, &xmm_mask_hi);
 
-        negate_2x128 (xmmMaskLo, xmmMaskHi, &xmmMaskLo, &xmmMaskHi);
+        negate_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
 
-        pixMultiply_2x128 (&xmmDstLo, &xmmDstHi, &xmmMaskLo, &xmmMaskHi, &xmmDstLo, &xmmDstHi);
+        pix_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-        save128Aligned( (__m128i*)pd, pack_2x128_128 (xmmDstLo, xmmDstHi));
+        save_128_aligned( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
 
         ps += 4;
         pd += 4;
@@ -1930,43 +1930,43 @@ coreCombineOutReverseCsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm
         m = *pm++;
         d = *pd;
 
-        *pd++ = pack_1x64_32 (pixMultiply_1x64 (unpack_32_1x64 (d),
-                                                negate_1x64 (pixMultiply_1x64 (unpack_32_1x64 (m),
-                                                                               expandAlpha_1x64 (unpack_32_1x64 (s))))));
+        *pd++ = pack_1x64_32 (pix_multiply_1x64 (unpack_32_1x64 (d),
+                                                negate_1x64 (pix_multiply_1x64 (unpack_32_1x64 (m),
+                                                                               expand_alpha_1x64 (unpack_32_1x64 (s))))));
         w--;
     }
 }
 
 static force_inline uint32_t
-coreCombineAtopCPixelsse2 (uint32_t src, uint32_t mask, uint32_t dst)
+core_combine_atop_c_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst)
 {
     __m64 m = unpack_32_1x64 (mask);
     __m64 s = unpack_32_1x64 (src);
     __m64 d = unpack_32_1x64 (dst);
-    __m64 sa = expandAlpha_1x64 (s);
-    __m64 da = expandAlpha_1x64 (d);
+    __m64 sa = expand_alpha_1x64 (s);
+    __m64 da = expand_alpha_1x64 (d);
 
-    s = pixMultiply_1x64 (s, m);
-    m = negate_1x64 (pixMultiply_1x64 (m, sa));
+    s = pix_multiply_1x64 (s, m);
+    m = negate_1x64 (pix_multiply_1x64 (m, sa));
 
-    return pack_1x64_32 (pixAddMultiply_1x64 (&d, &m, &s, &da));
+    return pack_1x64_32 (pix_add_multiply_1x64 (&d, &m, &s, &da));
 }
 
 static force_inline void
-coreCombineAtopCsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
+core_combine_atop_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
 {
     uint32_t s, m, d;
 
-    __m128i xmmSrcLo, xmmSrcHi;
-    __m128i xmmDstLo, xmmDstHi;
-    __m128i xmmAlphaSrcLo, xmmAlphaSrcHi;
-    __m128i xmmAlphaDstLo, xmmAlphaDstHi;
-    __m128i xmmMaskLo, xmmMaskHi;
+    __m128i xmm_src_lo, xmm_src_hi;
+    __m128i xmm_dst_lo, xmm_dst_hi;
+    __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
+    __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
+    __m128i xmm_mask_lo, xmm_mask_hi;
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w && (unsigned long)pd & 15)
     {
@@ -1974,43 +1974,43 @@ coreCombineAtopCsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int
         m = *pm++;
         d = *pd;
 
-        *pd++ = coreCombineAtopCPixelsse2 (s, m, d);
+        *pd++ = core_combine_atop_c_pixel_sse2 (s, m, d);
         w--;
     }
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w >= 4)
     {
         /* fill cache line with next memory */
-        cachePrefetchNext ((__m128i*)ps);
-        cachePrefetchNext ((__m128i*)pd);
-        cachePrefetchNext ((__m128i*)pm);
+        cache_prefetch_next ((__m128i*)ps);
+        cache_prefetch_next ((__m128i*)pd);
+        cache_prefetch_next ((__m128i*)pm);
 
-        xmmDstHi = load128Aligned ((__m128i*)pd);
-        xmmSrcHi = load128Unaligned ((__m128i*)ps);
-        xmmMaskHi = load128Unaligned ((__m128i*)pm);
+        xmm_dst_hi = load_128_aligned ((__m128i*)pd);
+        xmm_src_hi = load_128_unaligned ((__m128i*)ps);
+        xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
 
-        unpack_128_2x128 (xmmDstHi, &xmmDstLo, &xmmDstHi);
-        unpack_128_2x128 (xmmSrcHi, &xmmSrcLo, &xmmSrcHi);
-        unpack_128_2x128 (xmmMaskHi, &xmmMaskLo, &xmmMaskHi);
+        unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+        unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+        unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
 
-        expandAlpha_2x128 (xmmSrcLo, xmmSrcHi, &xmmAlphaSrcLo, &xmmAlphaSrcHi);
-        expandAlpha_2x128 (xmmDstLo, xmmDstHi, &xmmAlphaDstLo, &xmmAlphaDstHi);
+        expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi);
+        expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
 
-        pixMultiply_2x128 (&xmmSrcLo, &xmmSrcHi, &xmmMaskLo, &xmmMaskHi, &xmmSrcLo, &xmmSrcHi);
-        pixMultiply_2x128 (&xmmMaskLo, &xmmMaskHi, &xmmAlphaSrcLo, &xmmAlphaSrcHi, &xmmMaskLo, &xmmMaskHi);
+        pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_src_lo, &xmm_src_hi);
+        pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi, &xmm_mask_lo, &xmm_mask_hi);
 
-        negate_2x128 (xmmMaskLo, xmmMaskHi, &xmmMaskLo, &xmmMaskHi);
+        negate_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
 
-        pixAddMultiply_2x128 (&xmmDstLo, &xmmDstHi, &xmmMaskLo, &xmmMaskHi,
-                              &xmmSrcLo, &xmmSrcHi, &xmmAlphaDstLo, &xmmAlphaDstHi,
-                              &xmmDstLo, &xmmDstHi);
+        pix_add_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi,
+                              &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
+                              &xmm_dst_lo, &xmm_dst_hi);
 
-        save128Aligned( (__m128i*)pd, pack_2x128_128 (xmmDstLo, xmmDstHi));
+        save_128_aligned( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
 
         ps += 4;
         pd += 4;
@@ -2024,42 +2024,42 @@ coreCombineAtopCsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int
         m = *pm++;
         d = *pd;
 
-        *pd++ = coreCombineAtopCPixelsse2 (s, m, d);
+        *pd++ = core_combine_atop_c_pixel_sse2 (s, m, d);
         w--;
     }
 }
 
 static force_inline uint32_t
-coreCombineReverseAtopCPixelsse2 (uint32_t src, uint32_t mask, uint32_t dst)
+core_combine_reverse_atop_c_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst)
 {
     __m64 m = unpack_32_1x64 (mask);
     __m64 s = unpack_32_1x64 (src);
     __m64 d = unpack_32_1x64 (dst);
 
-    __m64 da = negate_1x64 (expandAlpha_1x64 (d));
-    __m64 sa = expandAlpha_1x64 (s);
+    __m64 da = negate_1x64 (expand_alpha_1x64 (d));
+    __m64 sa = expand_alpha_1x64 (s);
 
-    s = pixMultiply_1x64 (s, m);
-    m = pixMultiply_1x64 (m, sa);
+    s = pix_multiply_1x64 (s, m);
+    m = pix_multiply_1x64 (m, sa);
 
-    return pack_1x64_32 (pixAddMultiply_1x64 (&d, &m, &s, &da));
+    return pack_1x64_32 (pix_add_multiply_1x64 (&d, &m, &s, &da));
 }
 
 static force_inline void
-coreCombineReverseAtopCsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
+core_combine_reverse_atop_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
 {
     uint32_t s, m, d;
 
-    __m128i xmmSrcLo, xmmSrcHi;
-    __m128i xmmDstLo, xmmDstHi;
-    __m128i xmmAlphaSrcLo, xmmAlphaSrcHi;
-    __m128i xmmAlphaDstLo, xmmAlphaDstHi;
-    __m128i xmmMaskLo, xmmMaskHi;
+    __m128i xmm_src_lo, xmm_src_hi;
+    __m128i xmm_dst_lo, xmm_dst_hi;
+    __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
+    __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
+    __m128i xmm_mask_lo, xmm_mask_hi;
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w && (unsigned long)pd & 15)
     {
@@ -2067,43 +2067,43 @@ coreCombineReverseAtopCsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *p
         m = *pm++;
         d = *pd;
 
-        *pd++ = coreCombineReverseAtopCPixelsse2 (s, m, d);
+        *pd++ = core_combine_reverse_atop_c_pixel_sse2 (s, m, d);
         w--;
     }
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w >= 4)
     {
         /* fill cache line with next memory */
-        cachePrefetchNext ((__m128i*)ps);
-        cachePrefetchNext ((__m128i*)pd);
-        cachePrefetchNext ((__m128i*)pm);
+        cache_prefetch_next ((__m128i*)ps);
+        cache_prefetch_next ((__m128i*)pd);
+        cache_prefetch_next ((__m128i*)pm);
 
-        xmmDstHi = load128Aligned ((__m128i*)pd);
-        xmmSrcHi = load128Unaligned ((__m128i*)ps);
-        xmmMaskHi = load128Unaligned ((__m128i*)pm);
+        xmm_dst_hi = load_128_aligned ((__m128i*)pd);
+        xmm_src_hi = load_128_unaligned ((__m128i*)ps);
+        xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
 
-        unpack_128_2x128 (xmmDstHi, &xmmDstLo, &xmmDstHi);
-        unpack_128_2x128 (xmmSrcHi, &xmmSrcLo, &xmmSrcHi);
-        unpack_128_2x128 (xmmMaskHi, &xmmMaskLo, &xmmMaskHi);
+        unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+        unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+        unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
 
-        expandAlpha_2x128 (xmmSrcLo, xmmSrcHi, &xmmAlphaSrcLo, &xmmAlphaSrcHi);
-        expandAlpha_2x128 (xmmDstLo, xmmDstHi, &xmmAlphaDstLo, &xmmAlphaDstHi);
+        expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi);
+        expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
 
-        pixMultiply_2x128 (&xmmSrcLo, &xmmSrcHi, &xmmMaskLo, &xmmMaskHi, &xmmSrcLo, &xmmSrcHi);
-        pixMultiply_2x128 (&xmmMaskLo, &xmmMaskHi, &xmmAlphaSrcLo, &xmmAlphaSrcHi, &xmmMaskLo, &xmmMaskHi);
+        pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_src_lo, &xmm_src_hi);
+        pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi, &xmm_mask_lo, &xmm_mask_hi);
 
-        negate_2x128 (xmmAlphaDstLo, xmmAlphaDstHi, &xmmAlphaDstLo, &xmmAlphaDstHi);
+        negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
 
-        pixAddMultiply_2x128 (&xmmDstLo, &xmmDstHi, &xmmMaskLo, &xmmMaskHi,
-                              &xmmSrcLo, &xmmSrcHi, &xmmAlphaDstLo, &xmmAlphaDstHi,
-                              &xmmDstLo, &xmmDstHi);
+        pix_add_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi,
+                              &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
+                              &xmm_dst_lo, &xmm_dst_hi);
 
-        save128Aligned( (__m128i*)pd, pack_2x128_128 (xmmDstLo, xmmDstHi));
+        save_128_aligned( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
 
         ps += 4;
         pd += 4;
@@ -2117,43 +2117,43 @@ coreCombineReverseAtopCsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *p
         m = *pm++;
         d = *pd;
 
-        *pd++ = coreCombineReverseAtopCPixelsse2 (s, m, d);
+        *pd++ = core_combine_reverse_atop_c_pixel_sse2 (s, m, d);
         w--;
     }
 }
 
 static force_inline uint32_t
-coreCombineXorCPixelsse2 (uint32_t src, uint32_t mask, uint32_t dst)
+core_combine_xor_c_pixel_sse2 (uint32_t src, uint32_t mask, uint32_t dst)
 {
     __m64 a = unpack_32_1x64 (mask);
     __m64 s = unpack_32_1x64 (src);
     __m64 d = unpack_32_1x64 (dst);
 
-    __m64 alphaDst = negate_1x64 (pixMultiply_1x64 (a, expandAlpha_1x64 (s)));
-    __m64 dest      = pixMultiply_1x64 (s, a);
-    __m64 alphaSrc = negate_1x64 (expandAlpha_1x64 (d));
+    __m64 alpha_dst = negate_1x64 (pix_multiply_1x64 (a, expand_alpha_1x64 (s)));
+    __m64 dest      = pix_multiply_1x64 (s, a);
+    __m64 alpha_src = negate_1x64 (expand_alpha_1x64 (d));
 
-    return pack_1x64_32 (pixAddMultiply_1x64 (&d,
-                                              &alphaDst,
+    return pack_1x64_32 (pix_add_multiply_1x64 (&d,
+                                              &alpha_dst,
                                               &dest,
-                                              &alphaSrc));
+                                              &alpha_src));
 }
 
 static force_inline void
-coreCombineXorCsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
+core_combine_xor_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
 {
     uint32_t s, m, d;
 
-    __m128i xmmSrcLo, xmmSrcHi;
-    __m128i xmmDstLo, xmmDstHi;
-    __m128i xmmAlphaSrcLo, xmmAlphaSrcHi;
-    __m128i xmmAlphaDstLo, xmmAlphaDstHi;
-    __m128i xmmMaskLo, xmmMaskHi;
+    __m128i xmm_src_lo, xmm_src_hi;
+    __m128i xmm_dst_lo, xmm_dst_hi;
+    __m128i xmm_alpha_src_lo, xmm_alpha_src_hi;
+    __m128i xmm_alpha_dst_lo, xmm_alpha_dst_hi;
+    __m128i xmm_mask_lo, xmm_mask_hi;
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w && (unsigned long)pd & 15)
     {
@@ -2161,44 +2161,44 @@ coreCombineXorCsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w
         m = *pm++;
         d = *pd;
 
-        *pd++ = coreCombineXorCPixelsse2 (s, m, d);
+        *pd++ = core_combine_xor_c_pixel_sse2 (s, m, d);
         w--;
     }
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w >= 4)
     {
         /* fill cache line with next memory */
-        cachePrefetchNext ((__m128i*)ps);
-        cachePrefetchNext ((__m128i*)pd);
-        cachePrefetchNext ((__m128i*)pm);
+        cache_prefetch_next ((__m128i*)ps);
+        cache_prefetch_next ((__m128i*)pd);
+        cache_prefetch_next ((__m128i*)pm);
 
-        xmmDstHi = load128Aligned ((__m128i*)pd);
-        xmmSrcHi = load128Unaligned ((__m128i*)ps);
-        xmmMaskHi = load128Unaligned ((__m128i*)pm);
+        xmm_dst_hi = load_128_aligned ((__m128i*)pd);
+        xmm_src_hi = load_128_unaligned ((__m128i*)ps);
+        xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
 
-        unpack_128_2x128 (xmmDstHi, &xmmDstLo, &xmmDstHi);
-        unpack_128_2x128 (xmmSrcHi, &xmmSrcLo, &xmmSrcHi);
-        unpack_128_2x128 (xmmMaskHi, &xmmMaskLo, &xmmMaskHi);
+        unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
+        unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+        unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
 
-        expandAlpha_2x128 (xmmSrcLo, xmmSrcHi, &xmmAlphaSrcLo, &xmmAlphaSrcHi);
-        expandAlpha_2x128 (xmmDstLo, xmmDstHi, &xmmAlphaDstLo, &xmmAlphaDstHi);
+        expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi);
+        expand_alpha_2x128 (xmm_dst_lo, xmm_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
 
-        pixMultiply_2x128 (&xmmSrcLo, &xmmSrcHi, &xmmMaskLo, &xmmMaskHi, &xmmSrcLo, &xmmSrcHi);
-        pixMultiply_2x128 (&xmmMaskLo, &xmmMaskHi, &xmmAlphaSrcLo, &xmmAlphaSrcHi, &xmmMaskLo, &xmmMaskHi);
+        pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_src_lo, &xmm_src_hi);
+        pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi, &xmm_alpha_src_lo, &xmm_alpha_src_hi, &xmm_mask_lo, &xmm_mask_hi);
 
-        negate_2x128 (xmmAlphaDstLo, xmmAlphaDstHi, &xmmAlphaDstLo, &xmmAlphaDstHi);
-        negate_2x128 (xmmMaskLo, xmmMaskHi, &xmmMaskLo, &xmmMaskHi);
+        negate_2x128 (xmm_alpha_dst_lo, xmm_alpha_dst_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi);
+        negate_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
 
-        pixAddMultiply_2x128 (&xmmDstLo, &xmmDstHi, &xmmMaskLo, &xmmMaskHi,
-                              &xmmSrcLo, &xmmSrcHi, &xmmAlphaDstLo, &xmmAlphaDstHi,
-                              &xmmDstLo, &xmmDstHi);
+        pix_add_multiply_2x128 (&xmm_dst_lo, &xmm_dst_hi, &xmm_mask_lo, &xmm_mask_hi,
+                              &xmm_src_lo, &xmm_src_hi, &xmm_alpha_dst_lo, &xmm_alpha_dst_hi,
+                              &xmm_dst_lo, &xmm_dst_hi);
 
-        save128Aligned( (__m128i*)pd, pack_2x128_128 (xmmDstLo, xmmDstHi));
+        save_128_aligned( (__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
 
         ps += 4;
         pd += 4;
@@ -2212,24 +2212,24 @@ coreCombineXorCsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w
         m = *pm++;
         d = *pd;
 
-        *pd++ = coreCombineXorCPixelsse2 (s, m, d);
+        *pd++ = core_combine_xor_c_pixel_sse2 (s, m, d);
         w--;
     }
 }
 
 static force_inline void
-coreCombineAddCsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
+core_combine_add_c_sse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w)
 {
     uint32_t s, m, d;
 
-    __m128i xmmSrcLo, xmmSrcHi;
-    __m128i xmmDstLo, xmmDstHi;
-    __m128i xmmMaskLo, xmmMaskHi;
+    __m128i xmm_src_lo, xmm_src_hi;
+    __m128i xmm_dst_lo, xmm_dst_hi;
+    __m128i xmm_mask_lo, xmm_mask_hi;
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w && (unsigned long)pd & 15)
     {
@@ -2237,36 +2237,36 @@ coreCombineAddCsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w
         m = *pm++;
         d = *pd;
 
-        *pd++ = pack_1x64_32 (_mm_adds_pu8 (pixMultiply_1x64 (unpack_32_1x64 (s),
+        *pd++ = pack_1x64_32 (_mm_adds_pu8 (pix_multiply_1x64 (unpack_32_1x64 (s),
                                                               unpack_32_1x64 (m)),
                                             unpack_32_1x64 (d)));
         w--;
     }
 
     /* call prefetch hint to optimize cache load*/
-    cachePrefetch ((__m128i*)ps);
-    cachePrefetch ((__m128i*)pd);
-    cachePrefetch ((__m128i*)pm);
+    cache_prefetch ((__m128i*)ps);
+    cache_prefetch ((__m128i*)pd);
+    cache_prefetch ((__m128i*)pm);
 
     while (w >= 4)
     {
         /* fill cache line with next memory */
-        cachePrefetchNext ((__m128i*)ps);
-        cachePrefetchNext ((__m128i*)pd);
-        cachePrefetchNext ((__m128i*)pm);
+        cache_prefetch_next ((__m128i*)ps);
+        cache_prefetch_next ((__m128i*)pd);
+        cache_prefetch_next ((__m128i*)pm);
 
-        xmmSrcHi = load128Unaligned ((__m128i*)ps);
-        xmmMaskHi = load128Unaligned ((__m128i*)pm);
-        xmmDstHi = load128Aligned ((__m128i*)pd);
+        xmm_src_hi = load_128_unaligned ((__m128i*)ps);
+        xmm_mask_hi = load_128_unaligned ((__m128i*)pm);
+        xmm_dst_hi = load_128_aligned ((__m128i*)pd);
 
-        unpack_128_2x128 (xmmSrcHi, &xmmSrcLo, &xmmSrcHi);
-        unpack_128_2x128 (xmmMaskHi, &xmmMaskLo, &xmmMaskHi);
-        unpack_128_2x128 (xmmDstHi, &xmmDstLo, &xmmDstHi);
+        unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
+        unpack_128_2x128 (xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
+        unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-        pixMultiply_2x128 (&xmmSrcLo, &xmmSrcHi, &xmmMaskLo, &xmmMaskHi, &xmmSrcLo, &xmmSrcHi);
+        pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_mask_lo, &xmm_mask_hi, &xmm_src_lo, &xmm_src_hi);
 
-        save128Aligned( (__m128i*)pd, pack_2x128_128 (_mm_adds_epu8 (xmmSrcLo, xmmDstLo),
-                                                      _mm_adds_epu8 (xmmSrcHi, xmmDstHi)));
+        save_128_aligned( (__m128i*)pd, pack_2x128_128 (_mm_adds_epu8 (xmm_src_lo, xmm_dst_lo),
+                                                      _mm_adds_epu8 (xmm_src_hi, xmm_dst_hi)));
 
         ps += 4;
         pd += 4;
@@ -2280,7 +2280,7 @@ coreCombineAddCsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w
         m = *pm++;
         d = *pd;
 
-        *pd++ = pack_1x64_32 (_mm_adds_pu8 (pixMultiply_1x64 (unpack_32_1x64 (s),
+        *pd++ = pack_1x64_32 (_mm_adds_pu8 (pix_multiply_1x64 (unpack_32_1x64 (s),
                                                               unpack_32_1x64 (m)),
                                             unpack_32_1x64 (d)));
         w--;
@@ -2288,28 +2288,28 @@ coreCombineAddCsse2 (uint32_t *pd, const uint32_t *ps, const uint32_t *pm, int w
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fbComposeSetupSSE2
+ * fb_compose_setup_sSE2
  */
 static force_inline __m64
-createMask_16_64 (uint16_t mask)
+create_mask_16_64 (uint16_t mask)
 {
     return _mm_set1_pi16 (mask);
 }
 
 static force_inline __m128i
-createMask_16_128 (uint16_t mask)
+create_mask_16_128 (uint16_t mask)
 {
     return _mm_set1_epi16 (mask);
 }
 
 static force_inline __m64
-createMask_2x32_64 (uint32_t mask0, uint32_t mask1)
+create_mask_2x32_64 (uint32_t mask0, uint32_t mask1)
 {
     return _mm_set_pi32 (mask0, mask1);
 }
 
 static force_inline __m128i
-createMask_2x32_128 (uint32_t mask0, uint32_t mask1)
+create_mask_2x32_128 (uint32_t mask0, uint32_t mask1)
 {
     return _mm_set_epi32 (mask0, mask1, mask0, mask1);
 }
@@ -2317,187 +2317,187 @@ createMask_2x32_128 (uint32_t mask0, uint32_t mask1)
 /* SSE2 code patch for fbcompose.c */
 
 static void
-sse2CombineOverU (pixman_implementation_t *imp, pixman_op_t op,
+sse2combine_over_u (pixman_implementation_t *imp, pixman_op_t op,
 		  uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    coreCombineOverUsse2 (dst, src, mask, width);
+    core_combine_over_u_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2CombineOverReverseU (pixman_implementation_t *imp, pixman_op_t op,
+sse2combine_over_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 			 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    coreCombineOverReverseUsse2 (dst, src, mask, width);
+    core_combine_over_reverse_u_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2CombineInU (pixman_implementation_t *imp, pixman_op_t op,
+sse2combine_in_u (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    coreCombineInUsse2 (dst, src, mask, width);
+    core_combine_in_u_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2CombineInReverseU (pixman_implementation_t *imp, pixman_op_t op,
+sse2combine_in_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 		       uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    coreCombineReverseInUsse2 (dst, src, mask, width);
+    core_combine_reverse_in_u_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2CombineOutU (pixman_implementation_t *imp, pixman_op_t op,
+sse2combine_out_u (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    coreCombineOutUsse2 (dst, src, mask, width);
+    core_combine_out_u_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2CombineOutReverseU (pixman_implementation_t *imp, pixman_op_t op,
+sse2combine_out_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 			uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    coreCombineReverseOutUsse2 (dst, src, mask, width);
+    core_combine_reverse_out_u_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2CombineAtopU (pixman_implementation_t *imp, pixman_op_t op,
+sse2combine_atop_u (pixman_implementation_t *imp, pixman_op_t op,
 		  uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    coreCombineAtopUsse2 (dst, src, mask, width);
+    core_combine_atop_u_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2CombineAtopReverseU (pixman_implementation_t *imp, pixman_op_t op,
+sse2combine_atop_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 			 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    coreCombineReverseAtopUsse2 (dst, src, mask, width);
+    core_combine_reverse_atop_u_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2CombineXorU (pixman_implementation_t *imp, pixman_op_t op,
+sse2combine_xor_u (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    coreCombineXorUsse2 (dst, src, mask, width);
+    core_combine_xor_u_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2CombineAddU (pixman_implementation_t *imp, pixman_op_t op,
+sse2combine_add_u (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    coreCombineAddUsse2 (dst, src, mask, width);
+    core_combine_add_u_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2CombineSaturateU (pixman_implementation_t *imp, pixman_op_t op,
+sse2combine_saturate_u (pixman_implementation_t *imp, pixman_op_t op,
 		      uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    coreCombineSaturateUsse2 (dst, src, mask, width);
+    core_combine_saturate_u_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2CombineSrcC (pixman_implementation_t *imp, pixman_op_t op,
+sse2combine_src_c (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    coreCombineSrcCsse2 (dst, src, mask, width);
+    core_combine_src_c_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2CombineOverC (pixman_implementation_t *imp, pixman_op_t op,
+sse2combine_over_c (pixman_implementation_t *imp, pixman_op_t op,
 		  uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    coreCombineOverCsse2 (dst, src, mask, width);
+    core_combine_over_c_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2CombineOverReverseC (pixman_implementation_t *imp, pixman_op_t op,
+sse2combine_over_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 			 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    coreCombineOverReverseCsse2 (dst, src, mask, width);
+    core_combine_over_reverse_c_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2CombineInC (pixman_implementation_t *imp, pixman_op_t op,
+sse2combine_in_c (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    coreCombineInCsse2 (dst, src, mask, width);
+    core_combine_in_c_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2CombineInReverseC (pixman_implementation_t *imp, pixman_op_t op,
+sse2combine_in_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 		       uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    coreCombineInReverseCsse2 (dst, src, mask, width);
+    core_combine_in_reverse_c_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2CombineOutC (pixman_implementation_t *imp, pixman_op_t op,
+sse2combine_out_c (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    coreCombineOutCsse2 (dst, src, mask, width);
+    core_combine_out_c_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2CombineOutReverseC (pixman_implementation_t *imp, pixman_op_t op,
+sse2combine_out_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 			uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    coreCombineOutReverseCsse2 (dst, src, mask, width);
+    core_combine_out_reverse_c_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2CombineAtopC (pixman_implementation_t *imp, pixman_op_t op,
+sse2combine_atop_c (pixman_implementation_t *imp, pixman_op_t op,
 		  uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    coreCombineAtopCsse2 (dst, src, mask, width);
+    core_combine_atop_c_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2CombineAtopReverseC (pixman_implementation_t *imp, pixman_op_t op,
+sse2combine_atop_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 			 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    coreCombineReverseAtopCsse2 (dst, src, mask, width);
+    core_combine_reverse_atop_c_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2CombineXorC (pixman_implementation_t *imp, pixman_op_t op,
+sse2combine_xor_c (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    coreCombineXorCsse2 (dst, src, mask, width);
+    core_combine_xor_c_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 static void
-sse2CombineAddC (pixman_implementation_t *imp, pixman_op_t op,
+sse2combine_add_c (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dst, const uint32_t *src, const uint32_t *mask, int width)
 {
-    coreCombineAddCsse2 (dst, src, mask, width);
+    core_combine_add_c_sse2 (dst, src, mask, width);
     _mm_empty();
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fast_CompositeOver_n_8888
+ * fast_composite_over_n_8888
  */
 
 static void
-sse2_CompositeOver_n_8888 (pixman_implementation_t *imp,
+sse2_composite_over_n_8888 (pixman_implementation_t *imp,
 			     pixman_op_t op,
 			    pixman_image_t * src_image,
 			    pixman_image_t * mask_image,
@@ -2512,56 +2512,56 @@ sse2_CompositeOver_n_8888 (pixman_implementation_t *imp,
 			    int32_t	height)
 {
     uint32_t	src;
-    uint32_t	*dstLine, *dst, d;
+    uint32_t	*dst_line, *dst, d;
     uint16_t	w;
-    int	dstStride;
-    __m128i xmmSrc, xmmAlpha;
-    __m128i xmmDst, xmmDstLo, xmmDstHi;
+    int	dst_stride;
+    __m128i xmm_src, xmm_alpha;
+    __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     if (src == 0)
 	return;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
 
-    xmmSrc = expandPixel_32_1x128 (src);
-    xmmAlpha = expandAlpha_1x128 (xmmSrc);
+    xmm_src = expand_pixel_32_1x128 (src);
+    xmm_alpha = expand_alpha_1x128 (xmm_src);
 
     while (height--)
     {
-        dst = dstLine;
+        dst = dst_line;
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)dst);
 
-        dstLine += dstStride;
+        dst_line += dst_stride;
         w = width;
 
         while (w && (unsigned long)dst & 15)
         {
             d = *dst;
-            *dst++ = pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmmSrc),
-                                              _mm_movepi64_pi64 (xmmAlpha),
+            *dst++ = pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src),
+                                              _mm_movepi64_pi64 (xmm_alpha),
                                               unpack_32_1x64 (d)));
             w--;
         }
 
-        cachePrefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)dst);
 
         while (w >= 4)
         {
             /* fill cache line with next memory */
-            cachePrefetchNext ((__m128i*)dst);
+            cache_prefetch_next ((__m128i*)dst);
 
-            xmmDst = load128Aligned ((__m128i*)dst);
+            xmm_dst = load_128_aligned ((__m128i*)dst);
 
-            unpack_128_2x128 (xmmDst, &xmmDstLo, &xmmDstHi);
+            unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
 
-            over_2x128 (&xmmSrc, &xmmSrc, &xmmAlpha, &xmmAlpha, &xmmDstLo, &xmmDstHi);
+            over_2x128 (&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_dst_lo, &xmm_dst_hi);
 
             /* rebuid the 4 pixel data and save*/
-            save128Aligned ((__m128i*)dst, pack_2x128_128 (xmmDstLo, xmmDstHi));
+            save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
 
             w -= 4;
             dst += 4;
@@ -2570,8 +2570,8 @@ sse2_CompositeOver_n_8888 (pixman_implementation_t *imp,
         while (w)
         {
             d = *dst;
-            *dst++ = pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmmSrc),
-                                              _mm_movepi64_pi64 (xmmAlpha),
+            *dst++ = pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src),
+                                              _mm_movepi64_pi64 (xmm_alpha),
                                               unpack_32_1x64 (d)));
             w--;
         }
@@ -2581,10 +2581,10 @@ sse2_CompositeOver_n_8888 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fast_CompositeOver_n_0565
+ * fast_composite_over_n_0565
  */
 static void
-sse2_CompositeOver_n_0565 (pixman_implementation_t *imp,
+sse2_composite_over_n_0565 (pixman_implementation_t *imp,
 			     pixman_op_t op,
 			    pixman_image_t * src_image,
 			    pixman_image_t * mask_image,
@@ -2599,59 +2599,59 @@ sse2_CompositeOver_n_0565 (pixman_implementation_t *imp,
 			    int32_t	height)
 {
     uint32_t	src;
-    uint16_t	*dstLine, *dst, d;
+    uint16_t	*dst_line, *dst, d;
     uint16_t	w;
-    int	        dstStride;
-    __m128i xmmSrc, xmmAlpha;
-    __m128i xmmDst, xmmDst0, xmmDst1, xmmDst2, xmmDst3;
+    int	        dst_stride;
+    __m128i xmm_src, xmm_alpha;
+    __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     if (src == 0)
         return;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
 
-    xmmSrc = expandPixel_32_1x128 (src);
-    xmmAlpha = expandAlpha_1x128 (xmmSrc);
+    xmm_src = expand_pixel_32_1x128 (src);
+    xmm_alpha = expand_alpha_1x128 (xmm_src);
 
     while (height--)
     {
-        dst = dstLine;
+        dst = dst_line;
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)dst);
 
-        dstLine += dstStride;
+        dst_line += dst_stride;
         w = width;
 
         while (w && (unsigned long)dst & 15)
         {
             d = *dst;
 
-            *dst++ = pack565_32_16 (pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmmSrc),
-                                                             _mm_movepi64_pi64 (xmmAlpha),
+            *dst++ = pack_565_32_16 (pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src),
+                                                             _mm_movepi64_pi64 (xmm_alpha),
                                                              expand565_16_1x64 (d))));
             w--;
         }
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)dst);
 
         while (w >= 8)
         {
             /* fill cache line with next memory */
-            cachePrefetchNext ((__m128i*)dst);
+            cache_prefetch_next ((__m128i*)dst);
 
-	    xmmDst = load128Aligned ((__m128i*)dst);
+	    xmm_dst = load_128_aligned ((__m128i*)dst);
 	    
-	    unpack565_128_4x128 (xmmDst, &xmmDst0, &xmmDst1, &xmmDst2, &xmmDst3);
+	    unpack_565_128_4x128 (xmm_dst, &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
 	    
-            over_2x128 (&xmmSrc, &xmmSrc, &xmmAlpha, &xmmAlpha, &xmmDst0, &xmmDst1);
-            over_2x128 (&xmmSrc, &xmmSrc, &xmmAlpha, &xmmAlpha, &xmmDst2, &xmmDst3);
+            over_2x128 (&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_dst0, &xmm_dst1);
+            over_2x128 (&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_dst2, &xmm_dst3);
 
-            xmmDst = pack565_4x128_128 (&xmmDst0, &xmmDst1, &xmmDst2, &xmmDst3);
-            save128Aligned ((__m128i*)dst, xmmDst);
+            xmm_dst = pack_565_4x128_128 (&xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
+            save_128_aligned ((__m128i*)dst, xmm_dst);
 
             dst += 8;
             w -= 8;
@@ -2660,8 +2660,8 @@ sse2_CompositeOver_n_0565 (pixman_implementation_t *imp,
         while (w--)
         {
             d = *dst;
-            *dst++ = pack565_32_16 (pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmmSrc),
-                                                             _mm_movepi64_pi64 (xmmAlpha),
+            *dst++ = pack_565_32_16 (pack_1x64_32 (over_1x64 (_mm_movepi64_pi64 (xmm_src),
+                                                             _mm_movepi64_pi64 (xmm_alpha),
                                                              expand565_16_1x64 (d))));
         }
     }
@@ -2670,11 +2670,11 @@ sse2_CompositeOver_n_0565 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fast_CompositeOver_n_8888_8888_ca
+ * fast_composite_over_n_8888_8888_ca
  */
 
 static void
-sse2_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
+sse2_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
 				       pixman_op_t op,
 				      pixman_image_t * src_image,
 				      pixman_image_t * mask_image,
@@ -2689,42 +2689,42 @@ sse2_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
 				      int32_t	height)
 {
     uint32_t	src;
-    uint32_t	*dstLine, d;
-    uint32_t	*maskLine, m;
-    uint32_t    packCmp;
-    int	dstStride, maskStride;
+    uint32_t	*dst_line, d;
+    uint32_t	*mask_line, m;
+    uint32_t    pack_cmp;
+    int	dst_stride, mask_stride;
 
-    __m128i xmmSrc, xmmAlpha;
-    __m128i xmmDst, xmmDstLo, xmmDstHi;
-    __m128i xmmMask, xmmMaskLo, xmmMaskHi;
+    __m128i xmm_src, xmm_alpha;
+    __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
+    __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
 
-    __m64 mmsrc_x, mmxAlpha, mmmask_x, mmdest_x;
+    __m64 mmsrc_x, mmx_alpha, mmmask_x, mmdest_x;
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     if (src == 0)
 	return;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
 
-    xmmSrc = _mm_unpacklo_epi8 (createMask_2x32_128 (src, src), _mm_setzero_si128 ());
-    xmmAlpha = expandAlpha_1x128 (xmmSrc);
-    mmsrc_x   = _mm_movepi64_pi64 (xmmSrc);
-    mmxAlpha = _mm_movepi64_pi64 (xmmAlpha);
+    xmm_src = _mm_unpacklo_epi8 (create_mask_2x32_128 (src, src), _mm_setzero_si128 ());
+    xmm_alpha = expand_alpha_1x128 (xmm_src);
+    mmsrc_x   = _mm_movepi64_pi64 (xmm_src);
+    mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
 
     while (height--)
     {
         int w = width;
-        const uint32_t *pm = (uint32_t *)maskLine;
-        uint32_t *pd = (uint32_t *)dstLine;
+        const uint32_t *pm = (uint32_t *)mask_line;
+        uint32_t *pd = (uint32_t *)dst_line;
 
-        dstLine += dstStride;
-        maskLine += maskStride;
+        dst_line += dst_stride;
+        mask_line += mask_stride;
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)pd);
-        cachePrefetch ((__m128i*)pm);
+        cache_prefetch ((__m128i*)pd);
+        cache_prefetch ((__m128i*)pm);
 
         while (w && (unsigned long)pd & 15)
         {
@@ -2736,8 +2736,8 @@ sse2_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
                 mmmask_x = unpack_32_1x64 (m);
                 mmdest_x = unpack_32_1x64 (d);
 
-                *pd = pack_1x64_32 (inOver_1x64 (&mmsrc_x,
-                                                 &mmxAlpha,
+                *pd = pack_1x64_32 (in_over_1x64 (&mmsrc_x,
+                                                 &mmx_alpha,
                                                  &mmmask_x,
                                                  &mmdest_x));
             }
@@ -2747,30 +2747,30 @@ sse2_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
         }
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)pd);
-        cachePrefetch ((__m128i*)pm);
+        cache_prefetch ((__m128i*)pd);
+        cache_prefetch ((__m128i*)pm);
 
         while (w >= 4)
         {
             /* fill cache line with next memory */
-            cachePrefetchNext ((__m128i*)pd);
-            cachePrefetchNext ((__m128i*)pm);
+            cache_prefetch_next ((__m128i*)pd);
+            cache_prefetch_next ((__m128i*)pm);
 
-            xmmMask = load128Unaligned ((__m128i*)pm);
+            xmm_mask = load_128_unaligned ((__m128i*)pm);
 
-            packCmp = _mm_movemask_epi8 (_mm_cmpeq_epi32 (xmmMask, _mm_setzero_si128()));
+            pack_cmp = _mm_movemask_epi8 (_mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128()));
 
-            /* if all bits in mask are zero, packCmp are equal to 0xffff */
-            if (packCmp != 0xffff)
+            /* if all bits in mask are zero, pack_cmp are equal to 0xffff */
+            if (pack_cmp != 0xffff)
             {
-                xmmDst = load128Aligned ((__m128i*)pd);
+                xmm_dst = load_128_aligned ((__m128i*)pd);
 
-                unpack_128_2x128 (xmmMask, &xmmMaskLo, &xmmMaskHi);
-                unpack_128_2x128 (xmmDst, &xmmDstLo, &xmmDstHi);
+                unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
+                unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
 
-                inOver_2x128 (&xmmSrc, &xmmSrc, &xmmAlpha, &xmmAlpha, &xmmMaskLo, &xmmMaskHi, &xmmDstLo, &xmmDstHi);
+                in_over_2x128 (&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-                save128Aligned ((__m128i*)pd, pack_2x128_128 (xmmDstLo, xmmDstHi));
+                save_128_aligned ((__m128i*)pd, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
             }
 
             pd += 4;
@@ -2788,8 +2788,8 @@ sse2_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
                 mmmask_x = unpack_32_1x64 (m);
                 mmdest_x = unpack_32_1x64 (d);
 
-                *pd = pack_1x64_32 (inOver_1x64 (&mmsrc_x,
-                                                 &mmxAlpha,
+                *pd = pack_1x64_32 (in_over_1x64 (&mmsrc_x,
+                                                 &mmx_alpha,
                                                  &mmmask_x,
                                                  &mmdest_x));
             }
@@ -2822,34 +2822,34 @@ sse2_composite_over_8888_n_8888 (pixman_implementation_t *imp,
 			       int32_t     width,
 			       int32_t     height)
 {
-    uint32_t	*dstLine, *dst;
-    uint32_t	*srcLine, *src;
+    uint32_t	*dst_line, *dst;
+    uint32_t	*src_line, *src;
     uint32_t	mask;
     uint16_t	w;
-    int	dstStride, srcStride;
+    int	dst_stride, src_stride;
 
-    __m128i xmmMask;
-    __m128i xmmSrc, xmmSrcLo, xmmSrcHi;
-    __m128i xmmDst, xmmDstLo, xmmDstHi;
-    __m128i xmmAlphaLo, xmmAlphaHi;
+    __m128i xmm_mask;
+    __m128i xmm_src, xmm_src_lo, xmm_src_hi;
+    __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
+    __m128i xmm_alpha_lo, xmm_alpha_hi;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
 
-    xmmMask = createMask_16_128 (mask >> 24);
+    xmm_mask = create_mask_16_128 (mask >> 24);
 
     while (height--)
     {
-        dst = dstLine;
-        dstLine += dstStride;
-        src = srcLine;
-        srcLine += srcStride;
+        dst = dst_line;
+        dst_line += dst_stride;
+        src = src_line;
+        src_line += src_stride;
         w = width;
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)dst);
-        cachePrefetch ((__m128i*)src);
+        cache_prefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)src);
 
         while (w && (unsigned long)dst & 15)
         {
@@ -2857,38 +2857,38 @@ sse2_composite_over_8888_n_8888 (pixman_implementation_t *imp,
             uint32_t d = *dst;
 
             __m64 ms = unpack_32_1x64 (s);
-            __m64 alpha    = expandAlpha_1x64 (ms);
-            __m64 dest     = _mm_movepi64_pi64 (xmmMask);
-            __m64 alphaDst = unpack_32_1x64 (d);
+            __m64 alpha    = expand_alpha_1x64 (ms);
+            __m64 dest     = _mm_movepi64_pi64 (xmm_mask);
+            __m64 alpha_dst = unpack_32_1x64 (d);
 
-            *dst++ = pack_1x64_32 (inOver_1x64 (&ms,
+            *dst++ = pack_1x64_32 (in_over_1x64 (&ms,
                                                 &alpha,
                                                 &dest,
-                                                &alphaDst));
+                                                &alpha_dst));
 
             w--;
         }
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)dst);
-        cachePrefetch ((__m128i*)src);
+        cache_prefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)src);
 
         while (w >= 4)
         {
             /* fill cache line with next memory */
-            cachePrefetchNext ((__m128i*)dst);
-            cachePrefetchNext ((__m128i*)src);
+            cache_prefetch_next ((__m128i*)dst);
+            cache_prefetch_next ((__m128i*)src);
 
-            xmmSrc = load128Unaligned ((__m128i*)src);
-            xmmDst = load128Aligned ((__m128i*)dst);
+            xmm_src = load_128_unaligned ((__m128i*)src);
+            xmm_dst = load_128_aligned ((__m128i*)dst);
 
-            unpack_128_2x128 (xmmSrc, &xmmSrcLo, &xmmSrcHi);
-            unpack_128_2x128 (xmmDst, &xmmDstLo, &xmmDstHi);
-            expandAlpha_2x128 (xmmSrcLo, xmmSrcHi, &xmmAlphaLo, &xmmAlphaHi);
+            unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
+            unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
+            expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi);
 
-            inOver_2x128 (&xmmSrcLo, &xmmSrcHi, &xmmAlphaLo, &xmmAlphaHi, &xmmMask, &xmmMask, &xmmDstLo, &xmmDstHi);
+            in_over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_mask, &xmm_mask, &xmm_dst_lo, &xmm_dst_hi);
 
-            save128Aligned( (__m128i*)dst, pack_2x128_128 (xmmDstLo, xmmDstHi));
+            save_128_aligned( (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
 
             dst += 4;
             src += 4;
@@ -2901,11 +2901,11 @@ sse2_composite_over_8888_n_8888 (pixman_implementation_t *imp,
             uint32_t d = *dst;
 
             __m64 ms = unpack_32_1x64 (s);
-            __m64 alpha = expandAlpha_1x64 (ms);
-            __m64 mask  = _mm_movepi64_pi64 (xmmMask);
+            __m64 alpha = expand_alpha_1x64 (ms);
+            __m64 mask  = _mm_movepi64_pi64 (xmm_mask);
             __m64 dest  = unpack_32_1x64 (d);
 
-            *dst++ = pack_1x64_32 (inOver_1x64 (&ms,
+            *dst++ = pack_1x64_32 (in_over_1x64 (&ms,
                                                 &alpha,
                                                 &mask,
                                                 &dest));
@@ -2918,10 +2918,10 @@ sse2_composite_over_8888_n_8888 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fast_Composite_over_x888_n_8888
+ * fast_composite_over_x888_n_8888
  */
 static void
-sse2_Composite_over_x888_n_8888 (pixman_implementation_t *imp,
+sse2_composite_over_x888_n_8888 (pixman_implementation_t *imp,
 				pixman_op_t op,
 			       pixman_image_t * src_image,
 			       pixman_image_t * mask_image,
@@ -2935,34 +2935,34 @@ sse2_Composite_over_x888_n_8888 (pixman_implementation_t *imp,
 			       int32_t     width,
 			       int32_t     height)
 {
-    uint32_t	*dstLine, *dst;
-    uint32_t	*srcLine, *src;
+    uint32_t	*dst_line, *dst;
+    uint32_t	*src_line, *src;
     uint32_t	mask;
-    int	dstStride, srcStride;
+    int	dst_stride, src_stride;
     uint16_t	w;
 
-    __m128i xmmMask, xmmAlpha;
-    __m128i xmmSrc, xmmSrcLo, xmmSrcHi;
-    __m128i xmmDst, xmmDstLo, xmmDstHi;
+    __m128i xmm_mask, xmm_alpha;
+    __m128i xmm_src, xmm_src_lo, xmm_src_hi;
+    __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
 
-    xmmMask = createMask_16_128 (mask >> 24);
-    xmmAlpha = Mask00ff;
+    xmm_mask = create_mask_16_128 (mask >> 24);
+    xmm_alpha = mask_00ff;
 
     while (height--)
     {
-        dst = dstLine;
-        dstLine += dstStride;
-        src = srcLine;
-        srcLine += srcStride;
+        dst = dst_line;
+        dst_line += dst_stride;
+        src = src_line;
+        src_line += src_stride;
         w = width;
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)dst);
-        cachePrefetch ((__m128i*)src);
+        cache_prefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)src);
 
         while (w && (unsigned long)dst & 15)
         {
@@ -2970,11 +2970,11 @@ sse2_Composite_over_x888_n_8888 (pixman_implementation_t *imp,
             uint32_t d = *dst;
 
             __m64 src   = unpack_32_1x64 (s);
-            __m64 alpha = _mm_movepi64_pi64 (xmmAlpha);
-            __m64 mask  = _mm_movepi64_pi64 (xmmMask);
+            __m64 alpha = _mm_movepi64_pi64 (xmm_alpha);
+            __m64 mask  = _mm_movepi64_pi64 (xmm_mask);
             __m64 dest  = unpack_32_1x64 (d);
 
-            *dst++ = pack_1x64_32 (inOver_1x64 (&src,
+            *dst++ = pack_1x64_32 (in_over_1x64 (&src,
                                                 &alpha,
                                                 &mask,
                                                 &dest));
@@ -2983,24 +2983,24 @@ sse2_Composite_over_x888_n_8888 (pixman_implementation_t *imp,
         }
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)dst);
-        cachePrefetch ((__m128i*)src);
+        cache_prefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)src);
 
         while (w >= 4)
         {
             /* fill cache line with next memory */
-            cachePrefetchNext ((__m128i*)dst);
-            cachePrefetchNext ((__m128i*)src);
+            cache_prefetch_next ((__m128i*)dst);
+            cache_prefetch_next ((__m128i*)src);
 
-            xmmSrc = _mm_or_si128 (load128Unaligned ((__m128i*)src), Maskff000000);
-            xmmDst = load128Aligned ((__m128i*)dst);
+            xmm_src = _mm_or_si128 (load_128_unaligned ((__m128i*)src), mask_ff000000);
+            xmm_dst = load_128_aligned ((__m128i*)dst);
 
-            unpack_128_2x128 (xmmSrc, &xmmSrcLo, &xmmSrcHi);
-            unpack_128_2x128 (xmmDst, &xmmDstLo, &xmmDstHi);
+            unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
+            unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
 
-            inOver_2x128 (&xmmSrcLo, &xmmSrcHi, &xmmAlpha, &xmmAlpha, &xmmMask, &xmmMask, &xmmDstLo, &xmmDstHi);
+            in_over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_alpha, &xmm_alpha, &xmm_mask, &xmm_mask, &xmm_dst_lo, &xmm_dst_hi);
 
-            save128Aligned( (__m128i*)dst, pack_2x128_128 (xmmDstLo, xmmDstHi));
+            save_128_aligned( (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
 
             dst += 4;
             src += 4;
@@ -3014,11 +3014,11 @@ sse2_Composite_over_x888_n_8888 (pixman_implementation_t *imp,
             uint32_t d = *dst;
 
             __m64 src  = unpack_32_1x64 (s);
-            __m64 alpha = _mm_movepi64_pi64 (xmmAlpha);
-            __m64 mask  = _mm_movepi64_pi64 (xmmMask);
+            __m64 alpha = _mm_movepi64_pi64 (xmm_alpha);
+            __m64 mask  = _mm_movepi64_pi64 (xmm_mask);
             __m64 dest  = unpack_32_1x64 (d);
 
-            *dst++ = pack_1x64_32 (inOver_1x64 (&src,
+            *dst++ = pack_1x64_32 (in_over_1x64 (&src,
                                                 &alpha,
                                                 &mask,
                                                 &dest));
@@ -3048,22 +3048,22 @@ sse2_composite_over_8888_8888 (pixman_implementation_t *imp,
 			     int32_t     width,
 			     int32_t     height)
 {
-    int	        dstStride, srcStride;
-    uint32_t	*dstLine, *dst;
-    uint32_t	*srcLine, *src;
+    int	        dst_stride, src_stride;
+    uint32_t	*dst_line, *dst;
+    uint32_t	*src_line, *src;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
-    dst = dstLine;
-    src = srcLine;
+    dst = dst_line;
+    src = src_line;
 
     while (height--)
     {
-        coreCombineOverUsse2 (dst, src, NULL, width);
+        core_combine_over_u_sse2 (dst, src, NULL, width);
 
-        dst += dstStride;
-        src += srcStride;
+        dst += dst_stride;
+        src += src_stride;
     }
     _mm_empty();
 }
@@ -3077,8 +3077,8 @@ fast_composite_over_8888_0565pixel (uint32_t src, uint16_t dst)
     __m64       ms;
 
     ms = unpack_32_1x64 (src);
-    return pack565_32_16( pack_1x64_32 (over_1x64 (ms,
-                                                   expandAlpha_1x64 (ms),
+    return pack_565_32_16( pack_1x64_32 (over_1x64 (ms,
+                                                   expand_alpha_1x64 (ms),
                                                    expand565_16_1x64 (dst))));
 }
 
@@ -3097,17 +3097,17 @@ sse2_composite_over_8888_0565 (pixman_implementation_t *imp,
 			     int32_t     width,
 			     int32_t     height)
 {
-    uint16_t	*dstLine, *dst, d;
-    uint32_t	*srcLine, *src, s;
-    int	dstStride, srcStride;
+    uint16_t	*dst_line, *dst, d;
+    uint32_t	*src_line, *src, s;
+    int	dst_stride, src_stride;
     uint16_t	w;
 
-    __m128i xmmAlphaLo, xmmAlphaHi;
-    __m128i xmmSrc, xmmSrcLo, xmmSrcHi;
-    __m128i xmmDst, xmmDst0, xmmDst1, xmmDst2, xmmDst3;
+    __m128i xmm_alpha_lo, xmm_alpha_hi;
+    __m128i xmm_src, xmm_src_lo, xmm_src_hi;
+    __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
 #if 0
     /* FIXME
@@ -3120,15 +3120,15 @@ sse2_composite_over_8888_0565 (pixman_implementation_t *imp,
 
     while (height--)
     {
-        dst = dstLine;
-        src = srcLine;
+        dst = dst_line;
+        src = src_line;
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)src);
-        cachePrefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)src);
+        cache_prefetch ((__m128i*)dst);
 
-        dstLine += dstStride;
-        srcLine += srcStride;
+        dst_line += dst_stride;
+        src_line += src_stride;
         w = width;
 
         /* Align dst on a 16-byte boundary */
@@ -3143,37 +3143,37 @@ sse2_composite_over_8888_0565 (pixman_implementation_t *imp,
         }
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)src);
-        cachePrefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)src);
+        cache_prefetch ((__m128i*)dst);
 
         /* It's a 8 pixel loop */
         while (w >= 8)
         {
             /* fill cache line with next memory */
-            cachePrefetchNext ((__m128i*)src);
-            cachePrefetchNext ((__m128i*)dst);
+            cache_prefetch_next ((__m128i*)src);
+            cache_prefetch_next ((__m128i*)dst);
 
             /* I'm loading unaligned because I'm not sure about the address alignment. */
-            xmmSrc = load128Unaligned ((__m128i*) src);
-            xmmDst = load128Aligned ((__m128i*) dst);
+            xmm_src = load_128_unaligned ((__m128i*) src);
+            xmm_dst = load_128_aligned ((__m128i*) dst);
 
             /* Unpacking */
-            unpack_128_2x128 (xmmSrc, &xmmSrcLo, &xmmSrcHi);
-            unpack565_128_4x128 (xmmDst, &xmmDst0, &xmmDst1, &xmmDst2, &xmmDst3);
-            expandAlpha_2x128 (xmmSrcLo, xmmSrcHi, &xmmAlphaLo, &xmmAlphaHi);
+            unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
+            unpack_565_128_4x128 (xmm_dst, &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
+            expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi);
 
             /* I'm loading next 4 pixels from memory before to optimze the memory read. */
-            xmmSrc = load128Unaligned ((__m128i*) (src+4));
+            xmm_src = load_128_unaligned ((__m128i*) (src+4));
 
-            over_2x128 (&xmmSrcLo, &xmmSrcHi, &xmmAlphaLo, &xmmAlphaHi, &xmmDst0, &xmmDst1);
+            over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_dst0, &xmm_dst1);
 
             /* Unpacking */
-            unpack_128_2x128 (xmmSrc, &xmmSrcLo, &xmmSrcHi);
-            expandAlpha_2x128 (xmmSrcLo, xmmSrcHi, &xmmAlphaLo, &xmmAlphaHi);
+            unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
+            expand_alpha_2x128 (xmm_src_lo, xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi);
 
-            over_2x128 (&xmmSrcLo, &xmmSrcHi, &xmmAlphaLo, &xmmAlphaHi, &xmmDst2, &xmmDst3);
+            over_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_alpha_lo, &xmm_alpha_hi, &xmm_dst2, &xmm_dst3);
 
-            save128Aligned ((__m128i*)dst, pack565_4x128_128 (&xmmDst0, &xmmDst1, &xmmDst2, &xmmDst3));
+            save_128_aligned ((__m128i*)dst, pack_565_4x128_128 (&xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3));
 
             w -= 8;
             dst += 8;
@@ -3193,11 +3193,11 @@ sse2_composite_over_8888_0565 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fast_CompositeOver_n_8_8888
+ * fast_composite_over_n_8_8888
  */
 
 static void
-sse2_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
+sse2_composite_over_n_8_8888 (pixman_implementation_t *imp,
 				   pixman_op_t op,
 				  pixman_image_t * src_image,
 				  pixman_image_t * mask_image,
@@ -3212,17 +3212,17 @@ sse2_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
 				  int32_t     height)
 {
     uint32_t	src, srca;
-    uint32_t	*dstLine, *dst;
-    uint8_t	*maskLine, *mask;
-    int	dstStride, maskStride;
+    uint32_t	*dst_line, *dst;
+    uint8_t	*mask_line, *mask;
+    int	dst_stride, mask_stride;
     uint16_t	w;
     uint32_t m, d;
 
-    __m128i xmmSrc, xmmAlpha, xmmDef;
-    __m128i xmmDst, xmmDstLo, xmmDstHi;
-    __m128i xmmMask, xmmMaskLo, xmmMaskHi;
+    __m128i xmm_src, xmm_alpha, xmm_def;
+    __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
+    __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
 
-    __m64 mmsrc_x, mmxAlpha, mmmask_x, mmxDest;
+    __m64 mmsrc_x, mmx_alpha, mmmask_x, mmx_dest;
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
@@ -3230,26 +3230,26 @@ sse2_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
     if (src == 0)
 	return;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
-    xmmDef = createMask_2x32_128 (src, src);
-    xmmSrc = expandPixel_32_1x128 (src);
-    xmmAlpha = expandAlpha_1x128 (xmmSrc);
-    mmsrc_x   = _mm_movepi64_pi64 (xmmSrc);
-    mmxAlpha = _mm_movepi64_pi64 (xmmAlpha);
+    xmm_def = create_mask_2x32_128 (src, src);
+    xmm_src = expand_pixel_32_1x128 (src);
+    xmm_alpha = expand_alpha_1x128 (xmm_src);
+    mmsrc_x   = _mm_movepi64_pi64 (xmm_src);
+    mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
 
     while (height--)
     {
-        dst = dstLine;
-        dstLine += dstStride;
-        mask = maskLine;
-        maskLine += maskStride;
+        dst = dst_line;
+        dst_line += dst_stride;
+        mask = mask_line;
+        mask_line += mask_stride;
         w = width;
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)mask);
-        cachePrefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)mask);
+        cache_prefetch ((__m128i*)dst);
 
         while (w && (unsigned long)dst & 15)
         {
@@ -3258,13 +3258,13 @@ sse2_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
             if (m)
             {
                 d = *dst;
-                mmmask_x = expandPixel_8_1x64 (m);
-                mmxDest = unpack_32_1x64 (d);
+                mmmask_x = expand_pixel_8_1x64 (m);
+                mmx_dest = unpack_32_1x64 (d);
 
-                *dst = pack_1x64_32 (inOver_1x64 (&mmsrc_x,
-                                                  &mmxAlpha,
+                *dst = pack_1x64_32 (in_over_1x64 (&mmsrc_x,
+                                                  &mmx_alpha,
                                                   &mmmask_x,
-                                                  &mmxDest));
+                                                  &mmx_dest));
             }
 
             w--;
@@ -3272,36 +3272,36 @@ sse2_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
         }
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)mask);
-        cachePrefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)mask);
+        cache_prefetch ((__m128i*)dst);
 
         while (w >= 4)
         {
             /* fill cache line with next memory */
-            cachePrefetchNext ((__m128i*)mask);
-            cachePrefetchNext ((__m128i*)dst);
+            cache_prefetch_next ((__m128i*)mask);
+            cache_prefetch_next ((__m128i*)dst);
 
             m = *((uint32_t*)mask);
 
             if (srca == 0xff && m == 0xffffffff)
             {
-                save128Aligned ((__m128i*)dst, xmmDef);
+                save_128_aligned ((__m128i*)dst, xmm_def);
             }
             else if (m)
             {
-                xmmDst = load128Aligned ((__m128i*) dst);
-                xmmMask = unpack_32_1x128 (m);
-                xmmMask = _mm_unpacklo_epi8 (xmmMask, _mm_setzero_si128());
+                xmm_dst = load_128_aligned ((__m128i*) dst);
+                xmm_mask = unpack_32_1x128 (m);
+                xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128());
 
                 /* Unpacking */
-                unpack_128_2x128 (xmmDst, &xmmDstLo, &xmmDstHi);
-                unpack_128_2x128 (xmmMask, &xmmMaskLo, &xmmMaskHi);
+                unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
+                unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
 
-                expandAlphaRev_2x128 (xmmMaskLo, xmmMaskHi, &xmmMaskLo, &xmmMaskHi);
+                expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
 
-                inOver_2x128 (&xmmSrc, &xmmSrc, &xmmAlpha, &xmmAlpha, &xmmMaskLo, &xmmMaskHi, &xmmDstLo, &xmmDstHi);
+                in_over_2x128 (&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-                save128Aligned ((__m128i*)dst, pack_2x128_128 (xmmDstLo, xmmDstHi));
+                save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
             }
 
             w -= 4;
@@ -3316,13 +3316,13 @@ sse2_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
             if (m)
             {
                 d = *dst;
-                mmmask_x = expandPixel_8_1x64 (m);
-                mmxDest = unpack_32_1x64 (d);
+                mmmask_x = expand_pixel_8_1x64 (m);
+                mmx_dest = unpack_32_1x64 (d);
 
-                *dst = pack_1x64_32 (inOver_1x64 (&mmsrc_x,
-                                                  &mmxAlpha,
+                *dst = pack_1x64_32 (in_over_1x64 (&mmsrc_x,
+                                                  &mmx_alpha,
                                                   &mmmask_x,
-                                                  &mmxDest));
+                                                  &mmx_dest));
             }
 
             w--;
@@ -3334,11 +3334,11 @@ sse2_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fast_CompositeOver_n_8_8888
+ * fast_composite_over_n_8_8888
  */
 
 pixman_bool_t
-pixmanFillsse2 (uint32_t *bits,
+pixman_fill_sse2 (uint32_t *bits,
 		 int stride,
 		 int bpp,
 		 int x,
@@ -3350,7 +3350,7 @@ pixmanFillsse2 (uint32_t *bits,
     uint32_t	byte_width;
     uint8_t	    *byte_line;
 
-    __m128i xmmDef;
+    __m128i xmm_def;
 
     if (bpp == 16 && (data >> 16 != (data & 0xffff)))
 	return FALSE;
@@ -3373,8 +3373,8 @@ pixmanFillsse2 (uint32_t *bits,
         stride *= 4;
     }
 
-    cachePrefetch ((__m128i*)byte_line);
-    xmmDef = createMask_2x32_128 (data, data);
+    cache_prefetch ((__m128i*)byte_line);
+    xmm_def = create_mask_2x32_128 (data, data);
 
     while (height--)
     {
@@ -3384,7 +3384,7 @@ pixmanFillsse2 (uint32_t *bits,
         w = byte_width;
 
 
-        cachePrefetchNext ((__m128i*)d);
+        cache_prefetch_next ((__m128i*)d);
 
         while (w >= 2 && ((unsigned long)d & 3))
         {
@@ -3401,20 +3401,20 @@ pixmanFillsse2 (uint32_t *bits,
             d += 4;
         }
 
-        cachePrefetchNext ((__m128i*)d);
+        cache_prefetch_next ((__m128i*)d);
 
         while (w >= 128)
         {
-            cachePrefetch (((__m128i*)d) + 12);
+            cache_prefetch (((__m128i*)d) + 12);
 
-            save128Aligned ((__m128i*)(d),     xmmDef);
-            save128Aligned ((__m128i*)(d+16),  xmmDef);
-            save128Aligned ((__m128i*)(d+32),  xmmDef);
-            save128Aligned ((__m128i*)(d+48),  xmmDef);
-            save128Aligned ((__m128i*)(d+64),  xmmDef);
-            save128Aligned ((__m128i*)(d+80),  xmmDef);
-            save128Aligned ((__m128i*)(d+96),  xmmDef);
-            save128Aligned ((__m128i*)(d+112), xmmDef);
+            save_128_aligned ((__m128i*)(d),     xmm_def);
+            save_128_aligned ((__m128i*)(d+16),  xmm_def);
+            save_128_aligned ((__m128i*)(d+32),  xmm_def);
+            save_128_aligned ((__m128i*)(d+48),  xmm_def);
+            save_128_aligned ((__m128i*)(d+64),  xmm_def);
+            save_128_aligned ((__m128i*)(d+80),  xmm_def);
+            save_128_aligned ((__m128i*)(d+96),  xmm_def);
+            save_128_aligned ((__m128i*)(d+112), xmm_def);
 
             d += 128;
             w -= 128;
@@ -3422,23 +3422,23 @@ pixmanFillsse2 (uint32_t *bits,
 
         if (w >= 64)
         {
-            cachePrefetch (((__m128i*)d) + 8);
+            cache_prefetch (((__m128i*)d) + 8);
 
-            save128Aligned ((__m128i*)(d),     xmmDef);
-            save128Aligned ((__m128i*)(d+16),  xmmDef);
-            save128Aligned ((__m128i*)(d+32),  xmmDef);
-            save128Aligned ((__m128i*)(d+48),  xmmDef);
+            save_128_aligned ((__m128i*)(d),     xmm_def);
+            save_128_aligned ((__m128i*)(d+16),  xmm_def);
+            save_128_aligned ((__m128i*)(d+32),  xmm_def);
+            save_128_aligned ((__m128i*)(d+48),  xmm_def);
 
             d += 64;
             w -= 64;
         }
 
-        cachePrefetchNext ((__m128i*)d);
+        cache_prefetch_next ((__m128i*)d);
 
         if (w >= 32)
         {
-            save128Aligned ((__m128i*)(d),     xmmDef);
-            save128Aligned ((__m128i*)(d+16),  xmmDef);
+            save_128_aligned ((__m128i*)(d),     xmm_def);
+            save_128_aligned ((__m128i*)(d+16),  xmm_def);
 
             d += 32;
             w -= 32;
@@ -3446,13 +3446,13 @@ pixmanFillsse2 (uint32_t *bits,
 
         if (w >= 16)
         {
-            save128Aligned ((__m128i*)(d),     xmmDef);
+            save_128_aligned ((__m128i*)(d),     xmm_def);
 
             d += 16;
             w -= 16;
         }
 
-        cachePrefetchNext ((__m128i*)d);
+        cache_prefetch_next ((__m128i*)d);
 
         while (w >= 4)
         {
@@ -3475,7 +3475,7 @@ pixmanFillsse2 (uint32_t *bits,
 }
 
 static void
-sse2_CompositeSrc_n_8_8888 (pixman_implementation_t *imp,
+sse2_composite_src_n_8_8888 (pixman_implementation_t *imp,
 				      pixman_op_t op,
 				     pixman_image_t * src_image,
 				     pixman_image_t * mask_image,
@@ -3490,43 +3490,43 @@ sse2_CompositeSrc_n_8_8888 (pixman_implementation_t *imp,
 				     int32_t     height)
 {
     uint32_t	src, srca;
-    uint32_t	*dstLine, *dst;
-    uint8_t	*maskLine, *mask;
-    int	dstStride, maskStride;
+    uint32_t	*dst_line, *dst;
+    uint8_t	*mask_line, *mask;
+    int	dst_stride, mask_stride;
     uint16_t	w;
     uint32_t    m;
 
-    __m128i xmmSrc, xmmDef;
-    __m128i xmmMask, xmmMaskLo, xmmMaskHi;
+    __m128i xmm_src, xmm_def;
+    __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     srca = src >> 24;
     if (src == 0)
     {
-        pixmanFillsse2 (dst_image->bits.bits, dst_image->bits.rowstride,
+        pixman_fill_sse2 (dst_image->bits.bits, dst_image->bits.rowstride,
                         PIXMAN_FORMAT_BPP (dst_image->bits.format),
                         dest_x, dest_y, width, height, 0);
         return;
     }
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
-    xmmDef = createMask_2x32_128 (src, src);
-    xmmSrc = expandPixel_32_1x128 (src);
+    xmm_def = create_mask_2x32_128 (src, src);
+    xmm_src = expand_pixel_32_1x128 (src);
 
     while (height--)
     {
-        dst = dstLine;
-        dstLine += dstStride;
-        mask = maskLine;
-        maskLine += maskStride;
+        dst = dst_line;
+        dst_line += dst_stride;
+        mask = mask_line;
+        mask_line += mask_stride;
         w = width;
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)mask);
-        cachePrefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)mask);
+        cache_prefetch ((__m128i*)dst);
 
         while (w && (unsigned long)dst & 15)
         {
@@ -3534,7 +3534,7 @@ sse2_CompositeSrc_n_8_8888 (pixman_implementation_t *imp,
 
             if (m)
             {
-                *dst = pack_1x64_32 (pixMultiply_1x64 (_mm_movepi64_pi64 (xmmSrc), expandPixel_8_1x64 (m)));
+                *dst = pack_1x64_32 (pix_multiply_1x64 (_mm_movepi64_pi64 (xmm_src), expand_pixel_8_1x64 (m)));
             }
             else
             {
@@ -3546,38 +3546,38 @@ sse2_CompositeSrc_n_8_8888 (pixman_implementation_t *imp,
         }
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)mask);
-        cachePrefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)mask);
+        cache_prefetch ((__m128i*)dst);
 
         while (w >= 4)
         {
             /* fill cache line with next memory */
-            cachePrefetchNext ((__m128i*)mask);
-            cachePrefetchNext ((__m128i*)dst);
+            cache_prefetch_next ((__m128i*)mask);
+            cache_prefetch_next ((__m128i*)dst);
 
             m = *((uint32_t*)mask);
 
             if (srca == 0xff && m == 0xffffffff)
             {
-                save128Aligned ((__m128i*)dst, xmmDef);
+                save_128_aligned ((__m128i*)dst, xmm_def);
             }
             else if (m)
             {
-                xmmMask = unpack_32_1x128 (m);
-                xmmMask = _mm_unpacklo_epi8 (xmmMask, _mm_setzero_si128());
+                xmm_mask = unpack_32_1x128 (m);
+                xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128());
 
                 /* Unpacking */
-                unpack_128_2x128 (xmmMask, &xmmMaskLo, &xmmMaskHi);
+                unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
 
-                expandAlphaRev_2x128 (xmmMaskLo, xmmMaskHi, &xmmMaskLo, &xmmMaskHi);
+                expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
 
-                pixMultiply_2x128 (&xmmSrc, &xmmSrc, &xmmMaskLo, &xmmMaskHi, &xmmMaskLo, &xmmMaskHi);
+                pix_multiply_2x128 (&xmm_src, &xmm_src, &xmm_mask_lo, &xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
 
-                save128Aligned ((__m128i*)dst, pack_2x128_128 (xmmMaskLo, xmmMaskHi));
+                save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_mask_lo, xmm_mask_hi));
             }
             else
             {
-                save128Aligned ((__m128i*)dst, _mm_setzero_si128());
+                save_128_aligned ((__m128i*)dst, _mm_setzero_si128());
             }
 
             w -= 4;
@@ -3591,7 +3591,7 @@ sse2_CompositeSrc_n_8_8888 (pixman_implementation_t *imp,
 
             if (m)
             {
-                *dst = pack_1x64_32 (pixMultiply_1x64 (_mm_movepi64_pi64 (xmmSrc), expandPixel_8_1x64 (m)));
+                *dst = pack_1x64_32 (pix_multiply_1x64 (_mm_movepi64_pi64 (xmm_src), expand_pixel_8_1x64 (m)));
             }
             else
             {
@@ -3607,11 +3607,11 @@ sse2_CompositeSrc_n_8_8888 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fast_CompositeOver_n_8_0565
+ * fast_composite_over_n_8_0565
  */
 
 static void
-sse2_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
+sse2_composite_over_n_8_0565 (pixman_implementation_t *imp,
 				   pixman_op_t op,
 				  pixman_image_t * src_image,
 				  pixman_image_t * mask_image,
@@ -3626,16 +3626,16 @@ sse2_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 				  int32_t     height)
 {
     uint32_t	src, srca;
-    uint16_t	*dstLine, *dst, d;
-    uint8_t	*maskLine, *mask;
-    int	dstStride, maskStride;
+    uint16_t	*dst_line, *dst, d;
+    uint8_t	*mask_line, *mask;
+    int	dst_stride, mask_stride;
     uint16_t	w;
     uint32_t m;
-    __m64 mmsrc_x, mmxAlpha, mmmask_x, mmxDest;
+    __m64 mmsrc_x, mmx_alpha, mmmask_x, mmx_dest;
 
-    __m128i xmmSrc, xmmAlpha;
-    __m128i xmmMask, xmmMaskLo, xmmMaskHi;
-    __m128i xmmDst, xmmDst0, xmmDst1, xmmDst2, xmmDst3;
+    __m128i xmm_src, xmm_alpha;
+    __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
+    __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
@@ -3643,25 +3643,25 @@ sse2_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
     if (src == 0)
 	return;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
-    xmmSrc = expandPixel_32_1x128 (src);
-    xmmAlpha = expandAlpha_1x128 (xmmSrc);
-    mmsrc_x = _mm_movepi64_pi64 (xmmSrc);
-    mmxAlpha = _mm_movepi64_pi64 (xmmAlpha);
+    xmm_src = expand_pixel_32_1x128 (src);
+    xmm_alpha = expand_alpha_1x128 (xmm_src);
+    mmsrc_x = _mm_movepi64_pi64 (xmm_src);
+    mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
 
     while (height--)
     {
-        dst = dstLine;
-        dstLine += dstStride;
-        mask = maskLine;
-        maskLine += maskStride;
+        dst = dst_line;
+        dst_line += dst_stride;
+        mask = mask_line;
+        mask_line += mask_stride;
         w = width;
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)mask);
-        cachePrefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)mask);
+        cache_prefetch ((__m128i*)dst);
 
         while (w && (unsigned long)dst & 15)
         {
@@ -3670,13 +3670,13 @@ sse2_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
             if (m)
             {
                 d = *dst;
-                mmmask_x = expandAlphaRev_1x64 (unpack_32_1x64 (m));
-                mmxDest = expand565_16_1x64 (d);
+                mmmask_x = expand_alpha_rev_1x64 (unpack_32_1x64 (m));
+                mmx_dest = expand565_16_1x64 (d);
 
-                *dst = pack565_32_16 (pack_1x64_32 (inOver_1x64 (&mmsrc_x,
-                                                                 &mmxAlpha,
+                *dst = pack_565_32_16 (pack_1x64_32 (in_over_1x64 (&mmsrc_x,
+                                                                 &mmx_alpha,
                                                                  &mmmask_x,
-                                                                 &mmxDest)));
+                                                                 &mmx_dest)));
             }
 
             w--;
@@ -3684,31 +3684,31 @@ sse2_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
         }
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)mask);
-        cachePrefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)mask);
+        cache_prefetch ((__m128i*)dst);
 
         while (w >= 8)
         {
             /* fill cache line with next memory */
-            cachePrefetchNext ((__m128i*)mask);
-            cachePrefetchNext ((__m128i*)dst);
+            cache_prefetch_next ((__m128i*)mask);
+            cache_prefetch_next ((__m128i*)dst);
 
-            xmmDst = load128Aligned ((__m128i*) dst);
-            unpack565_128_4x128 (xmmDst, &xmmDst0, &xmmDst1, &xmmDst2, &xmmDst3);
+            xmm_dst = load_128_aligned ((__m128i*) dst);
+            unpack_565_128_4x128 (xmm_dst, &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
 
             m = *((uint32_t*)mask);
             mask += 4;
 
             if (m)
             {
-                xmmMask = unpack_32_1x128 (m);
-                xmmMask = _mm_unpacklo_epi8 (xmmMask, _mm_setzero_si128());
+                xmm_mask = unpack_32_1x128 (m);
+                xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128());
 
                 /* Unpacking */
-                unpack_128_2x128 (xmmMask, &xmmMaskLo, &xmmMaskHi);
+                unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
 
-                expandAlphaRev_2x128 (xmmMaskLo, xmmMaskHi, &xmmMaskLo, &xmmMaskHi);
-                inOver_2x128 (&xmmSrc, &xmmSrc, &xmmAlpha, &xmmAlpha, &xmmMaskLo, &xmmMaskHi, &xmmDst0, &xmmDst1);
+                expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
+                in_over_2x128 (&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst0, &xmm_dst1);
             }
 
             m = *((uint32_t*)mask);
@@ -3716,17 +3716,17 @@ sse2_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 
             if (m)
             {
-                xmmMask = unpack_32_1x128 (m);
-                xmmMask = _mm_unpacklo_epi8 (xmmMask, _mm_setzero_si128());
+                xmm_mask = unpack_32_1x128 (m);
+                xmm_mask = _mm_unpacklo_epi8 (xmm_mask, _mm_setzero_si128());
 
                 /* Unpacking */
-                unpack_128_2x128 (xmmMask, &xmmMaskLo, &xmmMaskHi);
+                unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
 
-                expandAlphaRev_2x128 (xmmMaskLo, xmmMaskHi, &xmmMaskLo, &xmmMaskHi);
-                inOver_2x128 (&xmmSrc, &xmmSrc, &xmmAlpha, &xmmAlpha, &xmmMaskLo, &xmmMaskHi, &xmmDst2, &xmmDst3);
+                expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
+                in_over_2x128 (&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst2, &xmm_dst3);
             }
 
-            save128Aligned ((__m128i*)dst, pack565_4x128_128 (&xmmDst0, &xmmDst1, &xmmDst2, &xmmDst3));
+            save_128_aligned ((__m128i*)dst, pack_565_4x128_128 (&xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3));
 
             w -= 8;
             dst += 8;
@@ -3739,13 +3739,13 @@ sse2_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
             if (m)
             {
                 d = *dst;
-                mmmask_x = expandAlphaRev_1x64 (unpack_32_1x64 (m));
-                mmxDest = expand565_16_1x64 (d);
+                mmmask_x = expand_alpha_rev_1x64 (unpack_32_1x64 (m));
+                mmx_dest = expand565_16_1x64 (d);
 
-                *dst = pack565_32_16 (pack_1x64_32 (inOver_1x64 (&mmsrc_x,
-                                                                 &mmxAlpha,
+                *dst = pack_565_32_16 (pack_1x64_32 (in_over_1x64 (&mmsrc_x,
+                                                                 &mmx_alpha,
                                                                  &mmmask_x,
-                                                                 &mmxDest)));
+                                                                 &mmx_dest)));
             }
 
             w--;
@@ -3757,11 +3757,11 @@ sse2_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fast_Composite_over_pixbuf_0565
+ * fast_composite_over_pixbuf_0565
  */
 
 static void
-sse2_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
+sse2_composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 				   pixman_op_t op,
 				  pixman_image_t * src_image,
 				  pixman_image_t * mask_image,
@@ -3775,18 +3775,18 @@ sse2_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 				  int32_t     width,
 				  int32_t     height)
 {
-    uint16_t	*dstLine, *dst, d;
-    uint32_t	*srcLine, *src, s;
-    int		dstStride, srcStride;
+    uint16_t	*dst_line, *dst, d;
+    uint32_t	*src_line, *src, s;
+    int		dst_stride, src_stride;
     uint16_t	w;
     uint32_t    opaque, zero;
 
     __m64 ms;
-    __m128i xmmSrc, xmmSrcLo, xmmSrcHi;
-    __m128i xmmDst, xmmDst0, xmmDst1, xmmDst2, xmmDst3;
+    __m128i xmm_src, xmm_src_lo, xmm_src_hi;
+    __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
 #if 0
     /* FIXME
@@ -3799,15 +3799,15 @@ sse2_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 
     while (height--)
     {
-        dst = dstLine;
-        dstLine += dstStride;
-        src = srcLine;
-        srcLine += srcStride;
+        dst = dst_line;
+        dst_line += dst_stride;
+        src = src_line;
+        src_line += src_stride;
         w = width;
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)src);
-        cachePrefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)src);
+        cache_prefetch ((__m128i*)dst);
 
         while (w && (unsigned long)dst & 15)
         {
@@ -3816,58 +3816,58 @@ sse2_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 
             ms = unpack_32_1x64 (s);
 
-            *dst++ = pack565_32_16 (pack_1x64_32 (overRevNonPre_1x64(ms, expand565_16_1x64 (d))));
+            *dst++ = pack_565_32_16 (pack_1x64_32 (over_rev_non_pre_1x64(ms, expand565_16_1x64 (d))));
             w--;
         }
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)src);
-        cachePrefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)src);
+        cache_prefetch ((__m128i*)dst);
 
         while (w >= 8)
         {
             /* fill cache line with next memory */
-            cachePrefetchNext ((__m128i*)src);
-            cachePrefetchNext ((__m128i*)dst);
+            cache_prefetch_next ((__m128i*)src);
+            cache_prefetch_next ((__m128i*)dst);
 
             /* First round */
-            xmmSrc = load128Unaligned((__m128i*)src);
-            xmmDst = load128Aligned  ((__m128i*)dst);
+            xmm_src = load_128_unaligned((__m128i*)src);
+            xmm_dst = load_128_aligned  ((__m128i*)dst);
 
-            opaque = isOpaque (xmmSrc);
-	    zero = isZero (xmmSrc);
+            opaque = is_opaque (xmm_src);
+	    zero = is_zero (xmm_src);
 
-	    unpack565_128_4x128 (xmmDst, &xmmDst0, &xmmDst1, &xmmDst2, &xmmDst3);
-            unpack_128_2x128 (xmmSrc, &xmmSrcLo, &xmmSrcHi);
+	    unpack_565_128_4x128 (xmm_dst, &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
+            unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
 
             /* preload next round*/
-            xmmSrc = load128Unaligned((__m128i*)(src+4));
+            xmm_src = load_128_unaligned((__m128i*)(src+4));
 	    
             if (opaque)
             {
-                invertColors_2x128 (xmmSrcLo, xmmSrcHi, &xmmDst0, &xmmDst1);
+                invert_colors_2x128 (xmm_src_lo, xmm_src_hi, &xmm_dst0, &xmm_dst1);
             }
             else if (!zero)
             {
-                overRevNonPre_2x128 (xmmSrcLo, xmmSrcHi, &xmmDst0, &xmmDst1);
+                over_rev_non_pre_2x128 (xmm_src_lo, xmm_src_hi, &xmm_dst0, &xmm_dst1);
             }
 
             /* Second round */
-	    opaque = isOpaque (xmmSrc);
-	    zero = isZero (xmmSrc);
+	    opaque = is_opaque (xmm_src);
+	    zero = is_zero (xmm_src);
 
-            unpack_128_2x128 (xmmSrc, &xmmSrcLo, &xmmSrcHi);
+            unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
 
             if (opaque)
             {
-                invertColors_2x128 (xmmSrcLo, xmmSrcHi, &xmmDst2, &xmmDst3);
+                invert_colors_2x128 (xmm_src_lo, xmm_src_hi, &xmm_dst2, &xmm_dst3);
             }
             else if (zero)
             {
-                overRevNonPre_2x128 (xmmSrcLo, xmmSrcHi, &xmmDst2, &xmmDst3);
+                over_rev_non_pre_2x128 (xmm_src_lo, xmm_src_hi, &xmm_dst2, &xmm_dst3);
             }
 
-            save128Aligned ((__m128i*)dst, pack565_4x128_128 (&xmmDst0, &xmmDst1, &xmmDst2, &xmmDst3));
+            save_128_aligned ((__m128i*)dst, pack_565_4x128_128 (&xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3));
 
             w -= 8;
             src += 8;
@@ -3881,7 +3881,7 @@ sse2_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 
             ms = unpack_32_1x64 (s);
 
-            *dst++ = pack565_32_16 (pack_1x64_32 (overRevNonPre_1x64(ms, expand565_16_1x64 (d))));
+            *dst++ = pack_565_32_16 (pack_1x64_32 (over_rev_non_pre_1x64(ms, expand565_16_1x64 (d))));
             w--;
         }
     }
@@ -3890,11 +3890,11 @@ sse2_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fast_Composite_over_pixbuf_8888
+ * fast_composite_over_pixbuf_8888
  */
 
 static void
-sse2_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
+sse2_composite_over_pixbuf_8888 (pixman_implementation_t *imp,
 				   pixman_op_t op,
 				  pixman_image_t * src_image,
 				  pixman_image_t * mask_image,
@@ -3908,17 +3908,17 @@ sse2_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
 				  int32_t     width,
 				  int32_t     height)
 {
-    uint32_t	*dstLine, *dst, d;
-    uint32_t	*srcLine, *src, s;
-    int	dstStride, srcStride;
+    uint32_t	*dst_line, *dst, d;
+    uint32_t	*src_line, *src, s;
+    int	dst_stride, src_stride;
     uint16_t	w;
     uint32_t    opaque, zero;
 
-    __m128i xmmSrcLo, xmmSrcHi;
-    __m128i xmmDstLo, xmmDstHi;
+    __m128i xmm_src_lo, xmm_src_hi;
+    __m128i xmm_dst_lo, xmm_dst_hi;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
 #if 0
     /* FIXME
@@ -3931,58 +3931,58 @@ sse2_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
 
     while (height--)
     {
-        dst = dstLine;
-        dstLine += dstStride;
-        src = srcLine;
-        srcLine += srcStride;
+        dst = dst_line;
+        dst_line += dst_stride;
+        src = src_line;
+        src_line += src_stride;
         w = width;
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)src);
-        cachePrefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)src);
+        cache_prefetch ((__m128i*)dst);
 
         while (w && (unsigned long)dst & 15)
         {
             s = *src++;
             d = *dst;
 
-            *dst++ = pack_1x64_32 (overRevNonPre_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (d)));
+            *dst++ = pack_1x64_32 (over_rev_non_pre_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (d)));
 
             w--;
         }
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)src);
-        cachePrefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)src);
+        cache_prefetch ((__m128i*)dst);
 
         while (w >= 4)
         {
             /* fill cache line with next memory */
-            cachePrefetchNext ((__m128i*)src);
-            cachePrefetchNext ((__m128i*)dst);
+            cache_prefetch_next ((__m128i*)src);
+            cache_prefetch_next ((__m128i*)dst);
 
-            xmmSrcHi = load128Unaligned((__m128i*)src);
+            xmm_src_hi = load_128_unaligned((__m128i*)src);
 
-            opaque = isOpaque (xmmSrcHi);
-	    zero = isZero (xmmSrcHi);
+            opaque = is_opaque (xmm_src_hi);
+	    zero = is_zero (xmm_src_hi);
 
-            unpack_128_2x128 (xmmSrcHi, &xmmSrcLo, &xmmSrcHi);
+            unpack_128_2x128 (xmm_src_hi, &xmm_src_lo, &xmm_src_hi);
 
             if (opaque)
             {
-                invertColors_2x128( xmmSrcLo, xmmSrcHi, &xmmDstLo, &xmmDstHi);
+                invert_colors_2x128( xmm_src_lo, xmm_src_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-                save128Aligned ((__m128i*)dst, pack_2x128_128 (xmmDstLo, xmmDstHi));
+                save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
             }
             else if (!zero)
             {
-                xmmDstHi = load128Aligned  ((__m128i*)dst);
+                xmm_dst_hi = load_128_aligned  ((__m128i*)dst);
 
-                unpack_128_2x128 (xmmDstHi, &xmmDstLo, &xmmDstHi);
+                unpack_128_2x128 (xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-                overRevNonPre_2x128 (xmmSrcLo, xmmSrcHi, &xmmDstLo, &xmmDstHi);
+                over_rev_non_pre_2x128 (xmm_src_lo, xmm_src_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-                save128Aligned ((__m128i*)dst, pack_2x128_128 (xmmDstLo, xmmDstHi));
+                save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
             }
 
             w -= 4;
@@ -3995,7 +3995,7 @@ sse2_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
             s = *src++;
             d = *dst;
 
-            *dst++ = pack_1x64_32 (overRevNonPre_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (d)));
+            *dst++ = pack_1x64_32 (over_rev_non_pre_1x64 (unpack_32_1x64 (s), unpack_32_1x64 (d)));
 
             w--;
         }
@@ -4005,11 +4005,11 @@ sse2_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fast_CompositeOver_n_8888_0565_ca
+ * fast_composite_over_n_8888_0565_ca
  */
 
 static void
-sse2_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
+sse2_composite_over_n_8888_0565_ca (pixman_implementation_t *imp,
 				       pixman_op_t op,
 				      pixman_image_t * src_image,
 				      pixman_image_t * mask_image,
@@ -4024,42 +4024,42 @@ sse2_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 				      int32_t     height)
 {
     uint32_t	src;
-    uint16_t	*dstLine, *dst, d;
-    uint32_t	*maskLine, *mask, m;
-    int	dstStride, maskStride;
+    uint16_t	*dst_line, *dst, d;
+    uint32_t	*mask_line, *mask, m;
+    int	dst_stride, mask_stride;
     int w;
-    uint32_t packCmp;
+    uint32_t pack_cmp;
 
-    __m128i xmmSrc, xmmAlpha;
-    __m128i xmmMask, xmmMaskLo, xmmMaskHi;
-    __m128i xmmDst, xmmDst0, xmmDst1, xmmDst2, xmmDst3;
+    __m128i xmm_src, xmm_alpha;
+    __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
+    __m128i xmm_dst, xmm_dst0, xmm_dst1, xmm_dst2, xmm_dst3;
 
-    __m64 mmsrc_x, mmxAlpha, mmmask_x, mmxDest;
+    __m64 mmsrc_x, mmx_alpha, mmmask_x, mmx_dest;
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     if (src == 0)
         return;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
 
-    xmmSrc = expandPixel_32_1x128 (src);
-    xmmAlpha = expandAlpha_1x128 (xmmSrc);
-    mmsrc_x = _mm_movepi64_pi64 (xmmSrc);
-    mmxAlpha = _mm_movepi64_pi64 (xmmAlpha);
+    xmm_src = expand_pixel_32_1x128 (src);
+    xmm_alpha = expand_alpha_1x128 (xmm_src);
+    mmsrc_x = _mm_movepi64_pi64 (xmm_src);
+    mmx_alpha = _mm_movepi64_pi64 (xmm_alpha);
 
     while (height--)
     {
         w = width;
-        mask = maskLine;
-        dst = dstLine;
-        maskLine += maskStride;
-        dstLine += dstStride;
+        mask = mask_line;
+        dst = dst_line;
+        mask_line += mask_stride;
+        dst_line += dst_stride;
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)mask);
-        cachePrefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)mask);
+        cache_prefetch ((__m128i*)dst);
 
         while (w && ((unsigned long)dst & 15))
         {
@@ -4069,12 +4069,12 @@ sse2_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
             {
                 d = *dst;
                 mmmask_x = unpack_32_1x64 (m);
-                mmxDest = expand565_16_1x64 (d);
+                mmx_dest = expand565_16_1x64 (d);
 
-                *dst = pack565_32_16 (pack_1x64_32 (inOver_1x64 (&mmsrc_x,
-                                                                 &mmxAlpha,
+                *dst = pack_565_32_16 (pack_1x64_32 (in_over_1x64 (&mmsrc_x,
+                                                                 &mmx_alpha,
                                                                  &mmmask_x,
-                                                                 &mmxDest)));
+                                                                 &mmx_dest)));
             }
 
             w--;
@@ -4083,44 +4083,44 @@ sse2_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
         }
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)mask);
-        cachePrefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)mask);
+        cache_prefetch ((__m128i*)dst);
 
         while (w >= 8)
         {
             /* fill cache line with next memory */
-            cachePrefetchNext ((__m128i*)mask);
-            cachePrefetchNext ((__m128i*)dst);
+            cache_prefetch_next ((__m128i*)mask);
+            cache_prefetch_next ((__m128i*)dst);
 
             /* First round */
-            xmmMask = load128Unaligned((__m128i*)mask);
-            xmmDst = load128Aligned((__m128i*)dst);
+            xmm_mask = load_128_unaligned((__m128i*)mask);
+            xmm_dst = load_128_aligned((__m128i*)dst);
 
-            packCmp = _mm_movemask_epi8 (_mm_cmpeq_epi32 (xmmMask, _mm_setzero_si128()));
+            pack_cmp = _mm_movemask_epi8 (_mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128()));
 
-            unpack565_128_4x128 (xmmDst, &xmmDst0, &xmmDst1, &xmmDst2, &xmmDst3);
-            unpack_128_2x128 (xmmMask, &xmmMaskLo, &xmmMaskHi);
+            unpack_565_128_4x128 (xmm_dst, &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
+            unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
 
             /* preload next round*/
-            xmmMask = load128Unaligned((__m128i*)(mask+4));
+            xmm_mask = load_128_unaligned((__m128i*)(mask+4));
             /* preload next round*/
 
-            if (packCmp != 0xffff)
+            if (pack_cmp != 0xffff)
             {
-                inOver_2x128(&xmmSrc, &xmmSrc, &xmmAlpha, &xmmAlpha, &xmmMaskLo, &xmmMaskHi, &xmmDst0, &xmmDst1);
+                in_over_2x128(&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst0, &xmm_dst1);
             }
 
             /* Second round */
-            packCmp = _mm_movemask_epi8 (_mm_cmpeq_epi32 (xmmMask, _mm_setzero_si128()));
+            pack_cmp = _mm_movemask_epi8 (_mm_cmpeq_epi32 (xmm_mask, _mm_setzero_si128()));
 
-            unpack_128_2x128 (xmmMask, &xmmMaskLo, &xmmMaskHi);
+            unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
 
-            if (packCmp != 0xffff)
+            if (pack_cmp != 0xffff)
             {
-                inOver_2x128(&xmmSrc, &xmmSrc, &xmmAlpha, &xmmAlpha, &xmmMaskLo, &xmmMaskHi, &xmmDst2, &xmmDst3);
+                in_over_2x128(&xmm_src, &xmm_src, &xmm_alpha, &xmm_alpha, &xmm_mask_lo, &xmm_mask_hi, &xmm_dst2, &xmm_dst3);
             }
 
-            save128Aligned ((__m128i*)dst, pack565_4x128_128 (&xmmDst0, &xmmDst1, &xmmDst2, &xmmDst3));
+            save_128_aligned ((__m128i*)dst, pack_565_4x128_128 (&xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3));
 
             w -= 8;
             dst += 8;
@@ -4135,12 +4135,12 @@ sse2_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
             {
                 d = *dst;
                 mmmask_x = unpack_32_1x64 (m);
-                mmxDest = expand565_16_1x64 (d);
+                mmx_dest = expand565_16_1x64 (d);
 
-                *dst = pack565_32_16 (pack_1x64_32 (inOver_1x64 (&mmsrc_x,
-                                                                 &mmxAlpha,
+                *dst = pack_565_32_16 (pack_1x64_32 (in_over_1x64 (&mmsrc_x,
+                                                                 &mmx_alpha,
                                                                  &mmmask_x,
-                                                                 &mmxDest)));
+                                                                 &mmx_dest)));
             }
 
             w--;
@@ -4153,11 +4153,11 @@ sse2_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fast_CompositeIn_n_8_8
+ * fast_composite_in_n_8_8
  */
 
 static void
-sse2_CompositeIn_n_8_8 (pixman_implementation_t *imp,
+sse2_composite_in_n_8_8 (pixman_implementation_t *imp,
 			 pixman_op_t op,
 			pixman_image_t * src_image,
 			pixman_image_t * mask_image,
@@ -4171,19 +4171,19 @@ sse2_CompositeIn_n_8_8 (pixman_implementation_t *imp,
 			int32_t     width,
 			int32_t     height)
 {
-    uint8_t	*dstLine, *dst;
-    uint8_t	*maskLine, *mask;
-    int	dstStride, maskStride;
+    uint8_t	*dst_line, *dst;
+    uint8_t	*mask_line, *mask;
+    int	dst_stride, mask_stride;
     uint16_t	w, d, m;
     uint32_t	src;
     uint8_t	sa;
 
-    __m128i xmmAlpha;
-    __m128i xmmMask, xmmMaskLo, xmmMaskHi;
-    __m128i xmmDst, xmmDstLo, xmmDstHi;
+    __m128i xmm_alpha;
+    __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
+    __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
@@ -4191,50 +4191,50 @@ sse2_CompositeIn_n_8_8 (pixman_implementation_t *imp,
     if (sa == 0)
         return;
 
-    xmmAlpha = expandAlpha_1x128 (expandPixel_32_1x128 (src));
+    xmm_alpha = expand_alpha_1x128 (expand_pixel_32_1x128 (src));
 
     while (height--)
     {
-        dst = dstLine;
-        dstLine += dstStride;
-        mask = maskLine;
-        maskLine += maskStride;
+        dst = dst_line;
+        dst_line += dst_stride;
+        mask = mask_line;
+        mask_line += mask_stride;
         w = width;
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)mask);
-        cachePrefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)mask);
+        cache_prefetch ((__m128i*)dst);
 
         while (w && ((unsigned long)dst & 15))
         {
             m = (uint32_t) *mask++;
             d = (uint32_t) *dst;
 
-            *dst++ = (uint8_t) pack_1x64_32 (pixMultiply_1x64 (pixMultiply_1x64 (_mm_movepi64_pi64 (xmmAlpha), unpack_32_1x64 (m)),
+            *dst++ = (uint8_t) pack_1x64_32 (pix_multiply_1x64 (pix_multiply_1x64 (_mm_movepi64_pi64 (xmm_alpha), unpack_32_1x64 (m)),
                                                                unpack_32_1x64 (d)));
             w--;
         }
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)mask);
-        cachePrefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)mask);
+        cache_prefetch ((__m128i*)dst);
 
         while (w >= 16)
         {
             /* fill cache line with next memory */
-            cachePrefetchNext ((__m128i*)mask);
-            cachePrefetchNext ((__m128i*)dst);
+            cache_prefetch_next ((__m128i*)mask);
+            cache_prefetch_next ((__m128i*)dst);
 
-            xmmMask = load128Unaligned((__m128i*)mask);
-            xmmDst = load128Aligned((__m128i*)dst);
+            xmm_mask = load_128_unaligned((__m128i*)mask);
+            xmm_dst = load_128_aligned((__m128i*)dst);
 
-            unpack_128_2x128 (xmmMask, &xmmMaskLo, &xmmMaskHi);
-            unpack_128_2x128 (xmmDst, &xmmDstLo, &xmmDstHi);
+            unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
+            unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
 
-            pixMultiply_2x128 (&xmmAlpha, &xmmAlpha, &xmmMaskLo, &xmmMaskHi, &xmmMaskLo, &xmmMaskHi);
-            pixMultiply_2x128 (&xmmMaskLo, &xmmMaskHi, &xmmDstLo, &xmmDstHi, &xmmDstLo, &xmmDstHi);
+            pix_multiply_2x128 (&xmm_alpha, &xmm_alpha, &xmm_mask_lo, &xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
+            pix_multiply_2x128 (&xmm_mask_lo, &xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-            save128Aligned ((__m128i*)dst, pack_2x128_128 (xmmDstLo, xmmDstHi));
+            save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
 
             mask += 16;
             dst += 16;
@@ -4246,7 +4246,7 @@ sse2_CompositeIn_n_8_8 (pixman_implementation_t *imp,
             m = (uint32_t) *mask++;
             d = (uint32_t) *dst;
 
-            *dst++ = (uint8_t) pack_1x64_32 (pixMultiply_1x64 (pixMultiply_1x64 (_mm_movepi64_pi64 (xmmAlpha), unpack_32_1x64 (m)),
+            *dst++ = (uint8_t) pack_1x64_32 (pix_multiply_1x64 (pix_multiply_1x64 (_mm_movepi64_pi64 (xmm_alpha), unpack_32_1x64 (m)),
                                                                unpack_32_1x64 (d)));
             w--;
         }
@@ -4256,11 +4256,11 @@ sse2_CompositeIn_n_8_8 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fast_CompositeIn_8_8
+ * fast_composite_in_8_8
  */
 
 static void
-sse2_CompositeIn_8_8 (pixman_implementation_t *imp,
+sse2_composite_in_8_8 (pixman_implementation_t *imp,
 		       pixman_op_t op,
 		      pixman_image_t * src_image,
 		      pixman_image_t * mask_image,
@@ -4274,58 +4274,58 @@ sse2_CompositeIn_8_8 (pixman_implementation_t *imp,
 		      int32_t     width,
 		      int32_t     height)
 {
-    uint8_t	*dstLine, *dst;
-    uint8_t	*srcLine, *src;
-    int	srcStride, dstStride;
+    uint8_t	*dst_line, *dst;
+    uint8_t	*src_line, *src;
+    int	src_stride, dst_stride;
     uint16_t	w;
     uint32_t    s, d;
 
-    __m128i xmmSrc, xmmSrcLo, xmmSrcHi;
-    __m128i xmmDst, xmmDstLo, xmmDstHi;
+    __m128i xmm_src, xmm_src_lo, xmm_src_hi;
+    __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
 
     while (height--)
     {
-        dst = dstLine;
-        dstLine += dstStride;
-        src = srcLine;
-        srcLine += srcStride;
+        dst = dst_line;
+        dst_line += dst_stride;
+        src = src_line;
+        src_line += src_stride;
         w = width;
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)src);
-        cachePrefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)src);
+        cache_prefetch ((__m128i*)dst);
 
         while (w && ((unsigned long)dst & 15))
         {
             s = (uint32_t) *src++;
             d = (uint32_t) *dst;
 
-            *dst++ = (uint8_t) pack_1x64_32 (pixMultiply_1x64 (unpack_32_1x64 (s),unpack_32_1x64 (d)));
+            *dst++ = (uint8_t) pack_1x64_32 (pix_multiply_1x64 (unpack_32_1x64 (s),unpack_32_1x64 (d)));
             w--;
         }
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)src);
-        cachePrefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)src);
+        cache_prefetch ((__m128i*)dst);
 
         while (w >= 16)
         {
             /* fill cache line with next memory */
-            cachePrefetchNext ((__m128i*)src);
-            cachePrefetchNext ((__m128i*)dst);
+            cache_prefetch_next ((__m128i*)src);
+            cache_prefetch_next ((__m128i*)dst);
 
-            xmmSrc = load128Unaligned((__m128i*)src);
-            xmmDst = load128Aligned((__m128i*)dst);
+            xmm_src = load_128_unaligned((__m128i*)src);
+            xmm_dst = load_128_aligned((__m128i*)dst);
 
-            unpack_128_2x128 (xmmSrc, &xmmSrcLo, &xmmSrcHi);
-            unpack_128_2x128 (xmmDst, &xmmDstLo, &xmmDstHi);
+            unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
+            unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
 
-            pixMultiply_2x128 (&xmmSrcLo, &xmmSrcHi, &xmmDstLo, &xmmDstHi, &xmmDstLo, &xmmDstHi);
+            pix_multiply_2x128 (&xmm_src_lo, &xmm_src_hi, &xmm_dst_lo, &xmm_dst_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-            save128Aligned ((__m128i*)dst, pack_2x128_128 (xmmDstLo, xmmDstHi));
+            save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
 
             src += 16;
             dst += 16;
@@ -4337,7 +4337,7 @@ sse2_CompositeIn_8_8 (pixman_implementation_t *imp,
             s = (uint32_t) *src++;
             d = (uint32_t) *dst;
 
-            *dst++ = (uint8_t) pack_1x64_32 (pixMultiply_1x64 (unpack_32_1x64 (s),unpack_32_1x64 (d)));
+            *dst++ = (uint8_t) pack_1x64_32 (pix_multiply_1x64 (unpack_32_1x64 (s),unpack_32_1x64 (d)));
             w--;
         }
     }
@@ -4346,11 +4346,11 @@ sse2_CompositeIn_8_8 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fast_CompositeAdd_8888_8_8
+ * fast_composite_add_8888_8_8
  */
 
 static void
-sse2_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
+sse2_composite_add_8888_8_8 (pixman_implementation_t *imp,
 				pixman_op_t op,
 			       pixman_image_t * src_image,
 			       pixman_image_t * mask_image,
@@ -4364,20 +4364,20 @@ sse2_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
 			       int32_t     width,
 			       int32_t     height)
 {
-    uint8_t	*dstLine, *dst;
-    uint8_t	*maskLine, *mask;
-    int	dstStride, maskStride;
+    uint8_t	*dst_line, *dst;
+    uint8_t	*mask_line, *mask;
+    int	dst_stride, mask_stride;
     uint16_t	w;
     uint32_t	src;
     uint8_t	sa;
     uint32_t m, d;
 
-    __m128i xmmAlpha;
-    __m128i xmmMask, xmmMaskLo, xmmMaskHi;
-    __m128i xmmDst, xmmDstLo, xmmDstHi;
+    __m128i xmm_alpha;
+    __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
+    __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
@@ -4385,52 +4385,52 @@ sse2_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
     if (sa == 0)
         return;
 
-    xmmAlpha = expandAlpha_1x128 (expandPixel_32_1x128 (src));
+    xmm_alpha = expand_alpha_1x128 (expand_pixel_32_1x128 (src));
 
     while (height--)
     {
-        dst = dstLine;
-        dstLine += dstStride;
-        mask = maskLine;
-        maskLine += maskStride;
+        dst = dst_line;
+        dst_line += dst_stride;
+        mask = mask_line;
+        mask_line += mask_stride;
         w = width;
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)mask);
-        cachePrefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)mask);
+        cache_prefetch ((__m128i*)dst);
 
         while (w && ((unsigned long)dst & 15))
         {
             m = (uint32_t) *mask++;
             d = (uint32_t) *dst;
 
-            *dst++ = (uint8_t) pack_1x64_32 (_mm_adds_pu16 (pixMultiply_1x64 (_mm_movepi64_pi64 (xmmAlpha), unpack_32_1x64 (m)),
+            *dst++ = (uint8_t) pack_1x64_32 (_mm_adds_pu16 (pix_multiply_1x64 (_mm_movepi64_pi64 (xmm_alpha), unpack_32_1x64 (m)),
                                                                               unpack_32_1x64 (d)));
             w--;
         }
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)mask);
-        cachePrefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)mask);
+        cache_prefetch ((__m128i*)dst);
 
         while (w >= 16)
         {
             /* fill cache line with next memory */
-            cachePrefetchNext ((__m128i*)mask);
-            cachePrefetchNext ((__m128i*)dst);
+            cache_prefetch_next ((__m128i*)mask);
+            cache_prefetch_next ((__m128i*)dst);
 
-            xmmMask = load128Unaligned((__m128i*)mask);
-            xmmDst = load128Aligned((__m128i*)dst);
+            xmm_mask = load_128_unaligned((__m128i*)mask);
+            xmm_dst = load_128_aligned((__m128i*)dst);
 
-            unpack_128_2x128 (xmmMask, &xmmMaskLo, &xmmMaskHi);
-            unpack_128_2x128 (xmmDst, &xmmDstLo, &xmmDstHi);
+            unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
+            unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
 
-            pixMultiply_2x128 (&xmmAlpha, &xmmAlpha, &xmmMaskLo, &xmmMaskHi, &xmmMaskLo, &xmmMaskHi);
+            pix_multiply_2x128 (&xmm_alpha, &xmm_alpha, &xmm_mask_lo, &xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
 
-            xmmDstLo = _mm_adds_epu16 (xmmMaskLo, xmmDstLo);
-            xmmDstHi = _mm_adds_epu16 (xmmMaskHi, xmmDstHi);
+            xmm_dst_lo = _mm_adds_epu16 (xmm_mask_lo, xmm_dst_lo);
+            xmm_dst_hi = _mm_adds_epu16 (xmm_mask_hi, xmm_dst_hi);
 
-            save128Aligned ((__m128i*)dst, pack_2x128_128 (xmmDstLo, xmmDstHi));
+            save_128_aligned ((__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
 
             mask += 16;
             dst += 16;
@@ -4442,7 +4442,7 @@ sse2_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
             m = (uint32_t) *mask++;
             d = (uint32_t) *dst;
 
-            *dst++ = (uint8_t) pack_1x64_32 (_mm_adds_pu16 (pixMultiply_1x64 (_mm_movepi64_pi64 (xmmAlpha), unpack_32_1x64 (m)),
+            *dst++ = (uint8_t) pack_1x64_32 (_mm_adds_pu16 (pix_multiply_1x64 (_mm_movepi64_pi64 (xmm_alpha), unpack_32_1x64 (m)),
                                                                               unpack_32_1x64 (d)));
             w--;
         }
@@ -4452,11 +4452,11 @@ sse2_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fast_CompositeAdd_8000_8000
+ * fast_composite_add_8000_8000
  */
 
 static void
-sse2_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
+sse2_composite_add_8000_8000 (pixman_implementation_t *imp,
 				 pixman_op_t op,
 				pixman_image_t * src_image,
 				pixman_image_t * mask_image,
@@ -4470,26 +4470,26 @@ sse2_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
 				int32_t     width,
 				int32_t     height)
 {
-    uint8_t	*dstLine, *dst;
-    uint8_t	*srcLine, *src;
-    int	dstStride, srcStride;
+    uint8_t	*dst_line, *dst;
+    uint8_t	*src_line, *src;
+    int	dst_stride, src_stride;
     uint16_t	w;
     uint16_t	t;
 
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
 
     while (height--)
     {
-        dst = dstLine;
-        src = srcLine;
+        dst = dst_line;
+        src = src_line;
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)src);
-        cachePrefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)src);
+        cache_prefetch ((__m128i*)dst);
 
-        dstLine += dstStride;
-        srcLine += srcStride;
+        dst_line += dst_stride;
+        src_line += src_stride;
         w = width;
 
         /* Small head */
@@ -4500,7 +4500,7 @@ sse2_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
             w--;
         }
 
-        coreCombineAddUsse2 ((uint32_t*)dst, (uint32_t*)src, NULL, w >> 2);
+        core_combine_add_u_sse2 ((uint32_t*)dst, (uint32_t*)src, NULL, w >> 2);
 
         /* Small tail */
         dst += w & 0xfffc;
@@ -4520,10 +4520,10 @@ sse2_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fast_CompositeAdd_8888_8888
+ * fast_composite_add_8888_8888
  */
 static void
-sse2_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
+sse2_composite_add_8888_8888 (pixman_implementation_t *imp,
 				 pixman_op_t 	op,
 				pixman_image_t *	src_image,
 				pixman_image_t *	mask_image,
@@ -4537,32 +4537,32 @@ sse2_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
 				int32_t     width,
 				int32_t     height)
 {
-    uint32_t	*dstLine, *dst;
-    uint32_t	*srcLine, *src;
-    int	dstStride, srcStride;
+    uint32_t	*dst_line, *dst;
+    uint32_t	*src_line, *src;
+    int	dst_stride, src_stride;
 
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
 
     while (height--)
     {
-        dst = dstLine;
-        dstLine += dstStride;
-        src = srcLine;
-        srcLine += srcStride;
+        dst = dst_line;
+        dst_line += dst_stride;
+        src = src_line;
+        src_line += src_stride;
 
-        coreCombineAddUsse2 (dst, src, NULL, width);
+        core_combine_add_u_sse2 (dst, src, NULL, width);
     }
 
     _mm_empty();
 }
 
 /* -------------------------------------------------------------------------------------------------
- * sse2_CompositeCopyArea
+ * sse2_composite_copy_area
  */
 
 static pixman_bool_t
-pixmanBltsse2 (uint32_t *src_bits,
+pixman_blt_sse2 (uint32_t *src_bits,
 	       uint32_t *dst_bits,
 	       int src_stride,
 	       int dst_stride,
@@ -4604,8 +4604,8 @@ pixmanBltsse2 (uint32_t *src_bits,
         return FALSE;
     }
 
-    cachePrefetch ((__m128i*)src_bytes);
-    cachePrefetch ((__m128i*)dst_bytes);
+    cache_prefetch ((__m128i*)src_bytes);
+    cache_prefetch ((__m128i*)dst_bytes);
 
     while (height--)
     {
@@ -4616,8 +4616,8 @@ pixmanBltsse2 (uint32_t *src_bits,
         dst_bytes += dst_stride;
         w = byte_width;
 
-        cachePrefetchNext ((__m128i*)s);
-        cachePrefetchNext ((__m128i*)d);
+        cache_prefetch_next ((__m128i*)s);
+        cache_prefetch_next ((__m128i*)d);
 
         while (w >= 2 && ((unsigned long)d & 3))
         {
@@ -4636,46 +4636,46 @@ pixmanBltsse2 (uint32_t *src_bits,
             d += 4;
         }
 
-        cachePrefetchNext ((__m128i*)s);
-        cachePrefetchNext ((__m128i*)d);
+        cache_prefetch_next ((__m128i*)s);
+        cache_prefetch_next ((__m128i*)d);
 
         while (w >= 64)
         {
             __m128i xmm0, xmm1, xmm2, xmm3;
 
             /* 128 bytes ahead */
-            cachePrefetch (((__m128i*)s) + 8);
-            cachePrefetch (((__m128i*)d) + 8);
+            cache_prefetch (((__m128i*)s) + 8);
+            cache_prefetch (((__m128i*)d) + 8);
 
-            xmm0 = load128Unaligned ((__m128i*)(s));
-            xmm1 = load128Unaligned ((__m128i*)(s+16));
-            xmm2 = load128Unaligned ((__m128i*)(s+32));
-            xmm3 = load128Unaligned ((__m128i*)(s+48));
+            xmm0 = load_128_unaligned ((__m128i*)(s));
+            xmm1 = load_128_unaligned ((__m128i*)(s+16));
+            xmm2 = load_128_unaligned ((__m128i*)(s+32));
+            xmm3 = load_128_unaligned ((__m128i*)(s+48));
 
-            save128Aligned ((__m128i*)(d),    xmm0);
-            save128Aligned ((__m128i*)(d+16), xmm1);
-            save128Aligned ((__m128i*)(d+32), xmm2);
-            save128Aligned ((__m128i*)(d+48), xmm3);
+            save_128_aligned ((__m128i*)(d),    xmm0);
+            save_128_aligned ((__m128i*)(d+16), xmm1);
+            save_128_aligned ((__m128i*)(d+32), xmm2);
+            save_128_aligned ((__m128i*)(d+48), xmm3);
 
             s += 64;
             d += 64;
             w -= 64;
         }
 
-        cachePrefetchNext ((__m128i*)s);
-        cachePrefetchNext ((__m128i*)d);
+        cache_prefetch_next ((__m128i*)s);
+        cache_prefetch_next ((__m128i*)d);
 
         while (w >= 16)
         {
-            save128Aligned ((__m128i*)d, load128Unaligned ((__m128i*)s) );
+            save_128_aligned ((__m128i*)d, load_128_unaligned ((__m128i*)s) );
 
             w -= 16;
             d += 16;
             s += 16;
         }
 
-        cachePrefetchNext ((__m128i*)s);
-        cachePrefetchNext ((__m128i*)d);
+        cache_prefetch_next ((__m128i*)s);
+        cache_prefetch_next ((__m128i*)d);
 
         while (w >= 4)
         {
@@ -4701,7 +4701,7 @@ pixmanBltsse2 (uint32_t *src_bits,
 }
 
 static void
-sse2_CompositeCopyArea (pixman_implementation_t *imp,
+sse2_composite_copy_area (pixman_implementation_t *imp,
 			 pixman_op_t       op,
 			pixman_image_t *	src_image,
 			pixman_image_t *	mask_image,
@@ -4715,7 +4715,7 @@ sse2_CompositeCopyArea (pixman_implementation_t *imp,
 			int32_t		width,
 			int32_t		height)
 {
-    pixmanBltsse2 (src_image->bits.bits,
+    pixman_blt_sse2 (src_image->bits.bits,
 		    dst_image->bits.bits,
 		    src_image->bits.rowstride,
 		    dst_image->bits.rowstride,
@@ -4727,7 +4727,7 @@ sse2_CompositeCopyArea (pixman_implementation_t *imp,
 #if 0
 /* This code are buggy in MMX version, now the bug was translated to SSE2 version */
 void
-sse2_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
+sse2_composite_over_x888_8_8888 (pixman_implementation_t *imp,
 				 pixman_op_t      op,
 				pixman_image_t * src_image,
 				pixman_image_t * mask_image,
@@ -4741,36 +4741,36 @@ sse2_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
 				int32_t     width,
 				int32_t     height)
 {
-    uint32_t	*src, *srcLine, s;
-    uint32_t    *dst, *dstLine, d;
-    uint8_t	    *mask, *maskLine;
+    uint32_t	*src, *src_line, s;
+    uint32_t    *dst, *dst_line, d;
+    uint8_t	    *mask, *mask_line;
     uint32_t    m;
-    int		 srcStride, maskStride, dstStride;
+    int		 src_stride, mask_stride, dst_stride;
     uint16_t w;
 
-    __m128i xmmSrc, xmmSrcLo, xmmSrcHi;
-    __m128i xmmDst, xmmDstLo, xmmDstHi;
-    __m128i xmmMask, xmmMaskLo, xmmMaskHi;
+    __m128i xmm_src, xmm_src_lo, xmm_src_hi;
+    __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
+    __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
 
     while (height--)
     {
-        src = srcLine;
-        srcLine += srcStride;
-        dst = dstLine;
-        dstLine += dstStride;
-        mask = maskLine;
-        maskLine += maskStride;
+        src = src_line;
+        src_line += src_stride;
+        dst = dst_line;
+        dst_line += dst_stride;
+        mask = mask_line;
+        mask_line += mask_stride;
 
         w = width;
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)src);
-        cachePrefetch ((__m128i*)dst);
-        cachePrefetch ((__m128i*)mask);
+        cache_prefetch ((__m128i*)src);
+        cache_prefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)mask);
 
         while (w && (unsigned long)dst & 15)
         {
@@ -4782,9 +4782,9 @@ sse2_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
 
             if (m != 0xff)
             {
-                ms = inOver_1x64 (ms,
+                ms = in_over_1x64 (ms,
                                   mask_x00ff,
-                                  expandAlphaRev_1x64 (unpack_32_1x64 (m)),
+                                  expand_alpha_rev_1x64 (unpack_32_1x64 (m)),
                                   unpack_32_1x64 (d));
             }
 
@@ -4793,39 +4793,39 @@ sse2_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
         }
 
         /* call prefetch hint to optimize cache load*/
-        cachePrefetch ((__m128i*)src);
-        cachePrefetch ((__m128i*)dst);
-        cachePrefetch ((__m128i*)mask);
+        cache_prefetch ((__m128i*)src);
+        cache_prefetch ((__m128i*)dst);
+        cache_prefetch ((__m128i*)mask);
 
         while (w >= 4)
         {
             /* fill cache line with next memory */
-            cachePrefetchNext ((__m128i*)src);
-            cachePrefetchNext ((__m128i*)dst);
-            cachePrefetchNext ((__m128i*)mask);
+            cache_prefetch_next ((__m128i*)src);
+            cache_prefetch_next ((__m128i*)dst);
+            cache_prefetch_next ((__m128i*)mask);
 
             m = *(uint32_t*) mask;
-            xmmSrc = _mm_or_si128 (load128Unaligned ((__m128i*)src), Maskff000000);
+            xmm_src = _mm_or_si128 (load_128_unaligned ((__m128i*)src), mask_ff000000);
 
             if (m == 0xffffffff)
             {
-                save128Aligned ((__m128i*)dst, xmmSrc);
+                save_128_aligned ((__m128i*)dst, xmm_src);
             }
             else
             {
-                xmmDst = load128Aligned ((__m128i*)dst);
+                xmm_dst = load_128_aligned ((__m128i*)dst);
 
-                xmmMask = _mm_unpacklo_epi16 (unpack_32_1x128 (m), _mm_setzero_si128());
+                xmm_mask = _mm_unpacklo_epi16 (unpack_32_1x128 (m), _mm_setzero_si128());
 
-                unpack_128_2x128 (xmmSrc, &xmmSrcLo, &xmmSrcHi);
-                unpack_128_2x128 (xmmMask, &xmmMaskLo, &xmmMaskHi);
-                unpack_128_2x128 (xmmDst, &xmmDstLo, &xmmDstHi);
+                unpack_128_2x128 (xmm_src, &xmm_src_lo, &xmm_src_hi);
+                unpack_128_2x128 (xmm_mask, &xmm_mask_lo, &xmm_mask_hi);
+                unpack_128_2x128 (xmm_dst, &xmm_dst_lo, &xmm_dst_hi);
 
-                expandAlphaRev_2x128 (xmmMaskLo, xmmMaskHi, &xmmMaskLo, &xmmMaskHi);
+                expand_alpha_rev_2x128 (xmm_mask_lo, xmm_mask_hi, &xmm_mask_lo, &xmm_mask_hi);
 
-                inOver_2x128 (xmmSrcLo, xmmSrcHi, Mask00ff, Mask00ff, xmmMaskLo, xmmMaskHi, &xmmDstLo, &xmmDstHi);
+                in_over_2x128 (xmm_src_lo, xmm_src_hi, mask_00ff, mask_00ff, xmm_mask_lo, xmm_mask_hi, &xmm_dst_lo, &xmm_dst_hi);
 
-                save128Aligned( (__m128i*)dst, pack_2x128_128 (xmmDstLo, xmmDstHi));
+                save_128_aligned( (__m128i*)dst, pack_2x128_128 (xmm_dst_lo, xmm_dst_hi));
             }
 
             src += 4;
@@ -4850,9 +4850,9 @@ sse2_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
                 {
                     d = *dst;
 
-                    *dst = pack_1x64_32 (inOver_1x64 (unpack_32_1x64 (s),
+                    *dst = pack_1x64_32 (in_over_1x64 (unpack_32_1x64 (s),
                                                       mask_x00ff,
-                                                      expandAlphaRev_1x64 (unpack_32_1x64 (m)),
+                                                      expand_alpha_rev_1x64 (unpack_32_1x64 (m)),
                                                       unpack_32_1x64 (d)));
                 }
 
@@ -4870,77 +4870,77 @@ sse2_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
 
 static const pixman_fast_path_t sse2_fast_paths[] =
 {
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   sse2_CompositeOver_n_8_0565,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b5g6r5,   sse2_CompositeOver_n_8_0565,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_CompositeOver_n_8888,           0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_CompositeOver_n_8888,           0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_r5g6b5,   sse2_CompositeOver_n_0565,           0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   sse2_composite_over_n_8_0565,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b5g6r5,   sse2_composite_over_n_8_0565,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_composite_over_n_8888,           0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_over_n_8888,           0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_r5g6b5,   sse2_composite_over_n_0565,           0 },
     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_composite_over_8888_8888,          0 },
     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_over_8888_8888,          0 },
     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, sse2_composite_over_8888_8888,          0 },
     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_composite_over_8888_8888,          0 },
     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   sse2_composite_over_8888_0565,          0 },
     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   sse2_composite_over_8888_0565,          0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_CompositeOver_n_8_8888,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_CompositeOver_n_8_8888,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_CompositeOver_n_8_8888,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_CompositeOver_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_over_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_composite_over_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_over_n_8_8888,     0 },
 #if 0
     /* FIXME: This code are buggy in MMX version, now the bug was translated to SSE2 version */
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_CompositeOver_x888_8_8888,       0 },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_CompositeOver_x888_8_8888,       0 },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_CompositeOver_x888_8_8888,       0 },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_CompositeOver_x888_8_8888,       0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_over_x888_8_8888,       0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_x888_8_8888,       0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_over_x888_8_8888,       0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_x888_8_8888,       0 },
 #endif
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_Composite_over_x888_n_8888,        NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_Composite_over_x888_n_8888,        NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_Composite_over_x888_n_8888,        NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_Composite_over_x888_n_8888,        NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_x888_n_8888,        NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_over_x888_n_8888,        NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_composite_over_x888_n_8888,        NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_over_x888_n_8888,        NEED_SOLID_MASK },
     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_8888_n_8888,        NEED_SOLID_MASK },
     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_over_8888_n_8888,        NEED_SOLID_MASK },
     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_composite_over_8888_n_8888,        NEED_SOLID_MASK },
     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_over_8888_n_8888,        NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, sse2_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, sse2_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, sse2_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, sse2_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   sse2_CompositeOver_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   sse2_CompositeOver_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, sse2_Composite_over_pixbuf_8888,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_a8r8g8b8, sse2_Composite_over_pixbuf_8888,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, sse2_Composite_over_pixbuf_8888,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_x8r8g8b8, sse2_Composite_over_pixbuf_8888,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_a8b8g8r8, sse2_Composite_over_pixbuf_8888,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, sse2_Composite_over_pixbuf_8888,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_x8b8g8r8, sse2_Composite_over_pixbuf_8888,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, sse2_Composite_over_pixbuf_8888,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   sse2_Composite_over_pixbuf_0565,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_r5g6b5,   sse2_Composite_over_pixbuf_0565,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_b5g6r5,   sse2_Composite_over_pixbuf_0565,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   sse2_Composite_over_pixbuf_0565,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_CompositeCopyArea,               0 },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_CompositeCopyArea,               0 },
-
-    { PIXMAN_OP_ADD,  PIXMAN_a8,       PIXMAN_null,     PIXMAN_a8,       sse2_CompositeAdd_8000_8000,       0 },
-    { PIXMAN_OP_ADD,  PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_CompositeAdd_8888_8888,       0 },
-    { PIXMAN_OP_ADD,  PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, sse2_CompositeAdd_8888_8888,       0 },
-    { PIXMAN_OP_ADD,  PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8,       sse2_CompositeAdd_8888_8_8,        0 },
-
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_CompositeSrc_n_8_8888,  0 },
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_CompositeSrc_n_8_8888,  0 },
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_CompositeSrc_n_8_8888,  0 },
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_CompositeSrc_n_8_8888,  0 },
-    { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_CompositeCopyArea,               0 },
-    { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,     PIXMAN_a8b8g8r8, sse2_CompositeCopyArea,               0 },
-    { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_CompositeCopyArea,		0 },
-    { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,	PIXMAN_x8b8g8r8, sse2_CompositeCopyArea,		0 },
-    { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8,  PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_CompositeCopyArea,               0 },
-    { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8,  PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_CompositeCopyArea,               0 },
-    { PIXMAN_OP_SRC, PIXMAN_r5g6b5,    PIXMAN_null,     PIXMAN_r5g6b5,   sse2_CompositeCopyArea,               0 },
-    { PIXMAN_OP_SRC, PIXMAN_b5g6r5,    PIXMAN_null,     PIXMAN_b5g6r5,   sse2_CompositeCopyArea,               0 },
-
-    { PIXMAN_OP_IN,  PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       sse2_CompositeIn_8_8,                 0 },
-    { PIXMAN_OP_IN,  PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8,       sse2_CompositeIn_n_8_8,               0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, sse2_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, sse2_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, sse2_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, sse2_composite_over_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   sse2_composite_over_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   sse2_composite_over_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, sse2_composite_over_pixbuf_8888,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_a8r8g8b8, sse2_composite_over_pixbuf_8888,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, sse2_composite_over_pixbuf_8888,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_x8r8g8b8, sse2_composite_over_pixbuf_8888,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_a8b8g8r8, sse2_composite_over_pixbuf_8888,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, sse2_composite_over_pixbuf_8888,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_x8b8g8r8, sse2_composite_over_pixbuf_8888,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, sse2_composite_over_pixbuf_8888,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   sse2_composite_over_pixbuf_0565,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_r5g6b5,   sse2_composite_over_pixbuf_0565,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_b5g6r5,   sse2_composite_over_pixbuf_0565,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   sse2_composite_over_pixbuf_0565,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_copy_area,               0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_composite_copy_area,               0 },
+
+    { PIXMAN_OP_ADD,  PIXMAN_a8,       PIXMAN_null,     PIXMAN_a8,       sse2_composite_add_8000_8000,       0 },
+    { PIXMAN_OP_ADD,  PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_composite_add_8888_8888,       0 },
+    { PIXMAN_OP_ADD,  PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, sse2_composite_add_8888_8888,       0 },
+    { PIXMAN_OP_ADD,  PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8,       sse2_composite_add_8888_8_8,        0 },
+
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_src_n_8_8888,  0 },
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_src_n_8_8888,  0 },
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_composite_src_n_8_8888,  0 },
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_src_n_8_8888,  0 },
+    { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_composite_copy_area,               0 },
+    { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,     PIXMAN_a8b8g8r8, sse2_composite_copy_area,               0 },
+    { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_copy_area,		0 },
+    { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,	PIXMAN_x8b8g8r8, sse2_composite_copy_area,		0 },
+    { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8,  PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_copy_area,               0 },
+    { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8,  PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_composite_copy_area,               0 },
+    { PIXMAN_OP_SRC, PIXMAN_r5g6b5,    PIXMAN_null,     PIXMAN_r5g6b5,   sse2_composite_copy_area,               0 },
+    { PIXMAN_OP_SRC, PIXMAN_b5g6r5,    PIXMAN_null,     PIXMAN_b5g6r5,   sse2_composite_copy_area,               0 },
+
+    { PIXMAN_OP_IN,  PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       sse2_composite_in_8_8,                 0 },
+    { PIXMAN_OP_IN,  PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8,       sse2_composite_in_n_8_8,               0 },
 
     { PIXMAN_OP_NONE },
 };
@@ -5012,7 +5012,7 @@ sse2_blt (pixman_implementation_t *imp,
 	  int dst_x, int dst_y,
 	  int width, int height)
 {
-    if (!pixmanBltsse2 (
+    if (!pixman_blt_sse2 (
 	    src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
 	    src_x, src_y, dst_x, dst_y, width, height))
 
@@ -5040,7 +5040,7 @@ sse2_fill (pixman_implementation_t *imp,
 	   int height,
 	   uint32_t xor)
 {
-    if (!pixmanFillsse2 (bits, stride, bpp, x, y, width, height, xor))
+    if (!pixman_fill_sse2 (bits, stride, bpp, x, y, width, height, xor))
     {
 	return _pixman_implementation_fill (
 	    imp->delegate, bits, stride, bpp, x, y, width, height, xor);
@@ -5056,60 +5056,60 @@ _pixman_implementation_create_sse2 (void)
     pixman_implementation_t *imp = _pixman_implementation_create (mmx);
 
     /* SSE2 constants */
-    Mask565r  = createMask_2x32_128 (0x00f80000, 0x00f80000);
-    Mask565g1 = createMask_2x32_128 (0x00070000, 0x00070000);
-    Mask565g2 = createMask_2x32_128 (0x000000e0, 0x000000e0);
-    Mask565b  = createMask_2x32_128 (0x0000001f, 0x0000001f);
-    MaskRed   = createMask_2x32_128 (0x00f80000, 0x00f80000);
-    MaskGreen = createMask_2x32_128 (0x0000fc00, 0x0000fc00);
-    MaskBlue  = createMask_2x32_128 (0x000000f8, 0x000000f8);
-    Mask565FixRB = createMask_2x32_128 (0x00e000e0, 0x00e000e0);
-    Mask565FixG = createMask_2x32_128  (0x0000c000, 0x0000c000);
-    Mask0080 = createMask_16_128 (0x0080);
-    Mask00ff = createMask_16_128 (0x00ff);
-    Mask0101 = createMask_16_128 (0x0101);
-    Maskffff = createMask_16_128 (0xffff);
-    Maskff000000 = createMask_2x32_128 (0xff000000, 0xff000000);
-    MaskAlpha = createMask_2x32_128 (0x00ff0000, 0x00000000);
+    mask_565_r  = create_mask_2x32_128 (0x00f80000, 0x00f80000);
+    mask_565_g1 = create_mask_2x32_128 (0x00070000, 0x00070000);
+    mask_565_g2 = create_mask_2x32_128 (0x000000e0, 0x000000e0);
+    mask_565_b  = create_mask_2x32_128 (0x0000001f, 0x0000001f);
+    mask_red   = create_mask_2x32_128 (0x00f80000, 0x00f80000);
+    mask_green = create_mask_2x32_128 (0x0000fc00, 0x0000fc00);
+    mask_blue  = create_mask_2x32_128 (0x000000f8, 0x000000f8);
+    mask_565_fix_rb = create_mask_2x32_128 (0x00e000e0, 0x00e000e0);
+    mask_565_fix_g = create_mask_2x32_128  (0x0000c000, 0x0000c000);
+    mask_0080 = create_mask_16_128 (0x0080);
+    mask_00ff = create_mask_16_128 (0x00ff);
+    mask_0101 = create_mask_16_128 (0x0101);
+    mask_ffff = create_mask_16_128 (0xffff);
+    mask_ff000000 = create_mask_2x32_128 (0xff000000, 0xff000000);
+    mask_alpha = create_mask_2x32_128 (0x00ff0000, 0x00000000);
     
     /* MMX constants */
-    mask_x565rgb = createMask_2x32_64 (0x000001f0, 0x003f001f);
-    mask_x565Unpack = createMask_2x32_64 (0x00000084, 0x04100840);
+    mask_x565_rgb = create_mask_2x32_64 (0x000001f0, 0x003f001f);
+    mask_x565_unpack = create_mask_2x32_64 (0x00000084, 0x04100840);
     
-    mask_x0080 = createMask_16_64 (0x0080);
-    mask_x00ff = createMask_16_64 (0x00ff);
-    mask_x0101 = createMask_16_64 (0x0101);
-    mask_xAlpha = createMask_2x32_64 (0x00ff0000, 0x00000000);
+    mask_x0080 = create_mask_16_64 (0x0080);
+    mask_x00ff = create_mask_16_64 (0x00ff);
+    mask_x0101 = create_mask_16_64 (0x0101);
+    mask_x_alpha = create_mask_2x32_64 (0x00ff0000, 0x00000000);
 
     _mm_empty();
 
     /* Set up function pointers */
     
     /* SSE code patch for fbcompose.c */
-    imp->combine_32[PIXMAN_OP_OVER] = sse2CombineOverU;
-    imp->combine_32[PIXMAN_OP_OVER_REVERSE] = sse2CombineOverReverseU;
-    imp->combine_32[PIXMAN_OP_IN] = sse2CombineInU;
-    imp->combine_32[PIXMAN_OP_IN_REVERSE] = sse2CombineInReverseU;
-    imp->combine_32[PIXMAN_OP_OUT] = sse2CombineOutU;
-    imp->combine_32[PIXMAN_OP_OUT_REVERSE] = sse2CombineOutReverseU;
-    imp->combine_32[PIXMAN_OP_ATOP] = sse2CombineAtopU;
-    imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = sse2CombineAtopReverseU;
-    imp->combine_32[PIXMAN_OP_XOR] = sse2CombineXorU;
-    imp->combine_32[PIXMAN_OP_ADD] = sse2CombineAddU;
+    imp->combine_32[PIXMAN_OP_OVER] = sse2combine_over_u;
+    imp->combine_32[PIXMAN_OP_OVER_REVERSE] = sse2combine_over_reverse_u;
+    imp->combine_32[PIXMAN_OP_IN] = sse2combine_in_u;
+    imp->combine_32[PIXMAN_OP_IN_REVERSE] = sse2combine_in_reverse_u;
+    imp->combine_32[PIXMAN_OP_OUT] = sse2combine_out_u;
+    imp->combine_32[PIXMAN_OP_OUT_REVERSE] = sse2combine_out_reverse_u;
+    imp->combine_32[PIXMAN_OP_ATOP] = sse2combine_atop_u;
+    imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = sse2combine_atop_reverse_u;
+    imp->combine_32[PIXMAN_OP_XOR] = sse2combine_xor_u;
+    imp->combine_32[PIXMAN_OP_ADD] = sse2combine_add_u;
     
-    imp->combine_32[PIXMAN_OP_SATURATE] = sse2CombineSaturateU;
+    imp->combine_32[PIXMAN_OP_SATURATE] = sse2combine_saturate_u;
     
-    imp->combine_32_ca[PIXMAN_OP_SRC] = sse2CombineSrcC;
-    imp->combine_32_ca[PIXMAN_OP_OVER] = sse2CombineOverC;
-    imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = sse2CombineOverReverseC;
-    imp->combine_32_ca[PIXMAN_OP_IN] = sse2CombineInC;
-    imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = sse2CombineInReverseC;
-    imp->combine_32_ca[PIXMAN_OP_OUT] = sse2CombineOutC;
-    imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = sse2CombineOutReverseC;
-    imp->combine_32_ca[PIXMAN_OP_ATOP] = sse2CombineAtopC;
-    imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = sse2CombineAtopReverseC;
-    imp->combine_32_ca[PIXMAN_OP_XOR] = sse2CombineXorC;
-    imp->combine_32_ca[PIXMAN_OP_ADD] = sse2CombineAddC;
+    imp->combine_32_ca[PIXMAN_OP_SRC] = sse2combine_src_c;
+    imp->combine_32_ca[PIXMAN_OP_OVER] = sse2combine_over_c;
+    imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = sse2combine_over_reverse_c;
+    imp->combine_32_ca[PIXMAN_OP_IN] = sse2combine_in_c;
+    imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = sse2combine_in_reverse_c;
+    imp->combine_32_ca[PIXMAN_OP_OUT] = sse2combine_out_c;
+    imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = sse2combine_out_reverse_c;
+    imp->combine_32_ca[PIXMAN_OP_ATOP] = sse2combine_atop_c;
+    imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = sse2combine_atop_reverse_c;
+    imp->combine_32_ca[PIXMAN_OP_XOR] = sse2combine_xor_c;
+    imp->combine_32_ca[PIXMAN_OP_ADD] = sse2combine_add_c;
     
     imp->composite = sse2_composite;
     imp->blt = sse2_blt;
diff --git a/pixman/pixman-utils.c b/pixman/pixman-utils.c
index 354ae15..e4563d0 100644
--- a/pixman/pixman-utils.c
+++ b/pixman/pixman-utils.c
@@ -386,10 +386,10 @@ walk_region_internal (pixman_implementation_t *imp,
 		      int16_t dest_y,
 		      uint16_t width,
 		      uint16_t height,
-		      pixman_bool_t srcRepeat,
-		      pixman_bool_t maskRepeat,
+		      pixman_bool_t src_repeat,
+		      pixman_bool_t mask_repeat,
 		      pixman_region32_t *region,
-		      pixman_composite_func_t compositeRect)
+		      pixman_composite_func_t composite_rect)
 {
     int n;
     const pixman_box32_t *pbox;
@@ -411,13 +411,13 @@ walk_region_internal (pixman_implementation_t *imp,
 	    x_msk = pbox->x1 - dest_x + mask_x;
 	    x_dst = pbox->x1;
 	    
-	    if (maskRepeat)
+	    if (mask_repeat)
 	    {
 		y_msk = MOD (y_msk, mask_image->bits.height);
 		if (h_this > mask_image->bits.height - y_msk)
 		    h_this = mask_image->bits.height - y_msk;
 	    }
-	    if (srcRepeat)
+	    if (src_repeat)
 	    {
 		y_src = MOD (y_src, src_image->bits.height);
 		if (h_this > src_image->bits.height - y_src)
@@ -426,19 +426,19 @@ walk_region_internal (pixman_implementation_t *imp,
 	    while (w)
 	    {
 		w_this = w;
-		if (maskRepeat)
+		if (mask_repeat)
 		{
 		    x_msk = MOD (x_msk, mask_image->bits.width);
 		    if (w_this > mask_image->bits.width - x_msk)
 			w_this = mask_image->bits.width - x_msk;
 		}
-		if (srcRepeat)
+		if (src_repeat)
 		{
 		    x_src = MOD (x_src, src_image->bits.width);
 		    if (w_this > src_image->bits.width - x_src)
 			w_this = src_image->bits.width - x_src;
 		}
-		(*compositeRect) (imp,
+		(*composite_rect) (imp,
 				  op, src_image, mask_image, dst_image,
 				  x_src, y_src, x_msk, y_msk, x_dst, y_dst,
 				  w_this, h_this);
@@ -470,7 +470,7 @@ _pixman_walk_composite_region (pixman_implementation_t *imp,
 			       int16_t dest_y,
 			       uint16_t width,
 			       uint16_t height,
-			       pixman_composite_func_t compositeRect)
+			       pixman_composite_func_t composite_rect)
 {
     pixman_region32_t region;
     
@@ -484,7 +484,7 @@ _pixman_walk_composite_region (pixman_implementation_t *imp,
 			      src_x, src_y, mask_x, mask_y, dest_x, dest_y,
 			      width, height, FALSE, FALSE,
 			      &region,
-			      compositeRect);
+			      composite_rect);
     }
 
     pixman_region32_fini (&region);
diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index 67fbbe9..bc0c0b8 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -217,7 +217,7 @@ over (vector unsigned int src, vector unsigned int srca,
         vec_st ((vector unsigned int) tmp1, 0, dest );
 
 static void
-vmxCombineOverU_no_mask (uint32_t *dest, const uint32_t *src, int width)
+vmx_combine_over_u_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     int i;
     vector unsigned int  vdest, vsrc;
@@ -250,7 +250,7 @@ vmxCombineOverU_no_mask (uint32_t *dest, const uint32_t *src, int width)
 }
 
 static void
-vmxCombineOverU_mask (uint32_t *dest,
+vmx_combine_over_u_mask (uint32_t *dest,
                      const uint32_t *src,
                      const uint32_t *mask,
                      int width)
@@ -292,18 +292,18 @@ vmxCombineOverU_mask (uint32_t *dest,
 }
 
 static void
-vmxCombineOverU(pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_over_u(pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dest, const uint32_t *src, const uint32_t *mask,
                 int width)
 {
     if (mask)
-        vmxCombineOverU_mask(dest, src, mask, width);
+        vmx_combine_over_u_mask(dest, src, mask, width);
     else
-        vmxCombineOverU_no_mask(dest, src, width);
+        vmx_combine_over_u_no_mask(dest, src, width);
 }
 
 static void
-vmxCombineOverReverseU_no_mask (uint32_t *dest, const uint32_t *src, int width)
+vmx_combine_over_reverse_u_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     int i;
     vector unsigned int  vdest, vsrc;
@@ -336,7 +336,7 @@ vmxCombineOverReverseU_no_mask (uint32_t *dest, const uint32_t *src, int width)
 }
 
 static void
-vmxCombineOverReverseU_mask (uint32_t *dest,
+vmx_combine_over_reverse_u_mask (uint32_t *dest,
                             const uint32_t *src,
                             const uint32_t *mask,
                             int width)
@@ -376,18 +376,18 @@ vmxCombineOverReverseU_mask (uint32_t *dest,
 }
 
 static void
-vmxCombineOverReverseU (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_over_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 			uint32_t *dest, const uint32_t *src,
                         const uint32_t *mask, int width)
 {
     if (mask)
-        vmxCombineOverReverseU_mask(dest, src, mask, width);
+        vmx_combine_over_reverse_u_mask(dest, src, mask, width);
     else
-        vmxCombineOverReverseU_no_mask(dest, src, width);
+        vmx_combine_over_reverse_u_no_mask(dest, src, width);
 }
 
 static void
-vmxCombineInU_no_mask (uint32_t *dest, const uint32_t *src, int width)
+vmx_combine_in_u_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     int i;
     vector unsigned int  vdest, vsrc;
@@ -419,7 +419,7 @@ vmxCombineInU_no_mask (uint32_t *dest, const uint32_t *src, int width)
 }
 
 static void
-vmxCombineInU_mask (uint32_t *dest,
+vmx_combine_in_u_mask (uint32_t *dest,
                    const uint32_t *src,
                    const uint32_t *mask,
                    int width)
@@ -458,18 +458,18 @@ vmxCombineInU_mask (uint32_t *dest,
 }
 
 static void
-vmxCombineInU (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_in_u (pixman_implementation_t *imp, pixman_op_t op,
 	       uint32_t *dest, const uint32_t *src, const uint32_t *mask,
                int width)
 {
     if (mask)
-        vmxCombineInU_mask(dest, src, mask, width);
+        vmx_combine_in_u_mask(dest, src, mask, width);
     else
-        vmxCombineInU_no_mask(dest, src, width);
+        vmx_combine_in_u_no_mask(dest, src, width);
 }
 
 static void
-vmxCombineInReverseU_no_mask (uint32_t *dest, const uint32_t *src, int width)
+vmx_combine_in_reverse_u_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     int i;
     vector unsigned int  vdest, vsrc;
@@ -500,7 +500,7 @@ vmxCombineInReverseU_no_mask (uint32_t *dest, const uint32_t *src, int width)
 }
 
 static void
-vmxCombineInReverseU_mask (uint32_t *dest,
+vmx_combine_in_reverse_u_mask (uint32_t *dest,
                           const uint32_t *src,
                           const uint32_t *mask,
                           int width)
@@ -540,18 +540,18 @@ vmxCombineInReverseU_mask (uint32_t *dest,
 }
 
 static void
-vmxCombineInReverseU (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_in_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 		      uint32_t *dest, const uint32_t *src,
                       const uint32_t *mask, int width)
 {
     if (mask)
-        vmxCombineInReverseU_mask(dest, src, mask, width);
+        vmx_combine_in_reverse_u_mask(dest, src, mask, width);
     else
-        vmxCombineInReverseU_no_mask(dest, src, width);
+        vmx_combine_in_reverse_u_no_mask(dest, src, width);
 }
 
 static void
-vmxCombineOutU_no_mask (uint32_t *dest, const uint32_t *src, int width)
+vmx_combine_out_u_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     int i;
     vector unsigned int  vdest, vsrc;
@@ -582,7 +582,7 @@ vmxCombineOutU_no_mask (uint32_t *dest, const uint32_t *src, int width)
 }
 
 static void
-vmxCombineOutU_mask (uint32_t *dest,
+vmx_combine_out_u_mask (uint32_t *dest,
                     const uint32_t *src,
                     const uint32_t *mask,
                     int width)
@@ -621,18 +621,18 @@ vmxCombineOutU_mask (uint32_t *dest,
 }
 
 static void
-vmxCombineOutU (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_out_u (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dest, const uint32_t *src, const uint32_t *mask,
                 int width)
 {
     if (mask)
-        vmxCombineOutU_mask(dest, src, mask, width);
+        vmx_combine_out_u_mask(dest, src, mask, width);
     else
-        vmxCombineOutU_no_mask(dest, src, width);
+        vmx_combine_out_u_no_mask(dest, src, width);
 }
 
 static void
-vmxCombineOutReverseU_no_mask (uint32_t *dest, const uint32_t *src, int width)
+vmx_combine_out_reverse_u_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     int i;
     vector unsigned int  vdest, vsrc;
@@ -663,7 +663,7 @@ vmxCombineOutReverseU_no_mask (uint32_t *dest, const uint32_t *src, int width)
 }
 
 static void
-vmxCombineOutReverseU_mask (uint32_t *dest,
+vmx_combine_out_reverse_u_mask (uint32_t *dest,
                            const uint32_t *src,
                            const uint32_t *mask,
                            int width)
@@ -703,20 +703,20 @@ vmxCombineOutReverseU_mask (uint32_t *dest,
 }
 
 static void
-vmxCombineOutReverseU (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_out_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 		       uint32_t *dest,
                        const uint32_t *src,
                        const uint32_t *mask,
                        int width)
 {
     if (mask)
-        vmxCombineOutReverseU_mask(dest, src, mask, width);
+        vmx_combine_out_reverse_u_mask(dest, src, mask, width);
     else
-        vmxCombineOutReverseU_no_mask(dest, src, width);
+        vmx_combine_out_reverse_u_no_mask(dest, src, width);
 }
 
 static void
-vmxCombineAtopU_no_mask (uint32_t *dest, const uint32_t *src, int width)
+vmx_combine_atop_u_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     int i;
     vector unsigned int  vdest, vsrc;
@@ -751,7 +751,7 @@ vmxCombineAtopU_no_mask (uint32_t *dest, const uint32_t *src, int width)
 }
 
 static void
-vmxCombineAtopU_mask (uint32_t *dest,
+vmx_combine_atop_u_mask (uint32_t *dest,
                      const uint32_t *src,
                      const uint32_t *mask,
                      int width)
@@ -795,20 +795,20 @@ vmxCombineAtopU_mask (uint32_t *dest,
 }
 
 static void
-vmxCombineAtopU (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_atop_u (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dest,
                  const uint32_t *src,
                  const uint32_t *mask,
                  int width)
 {
     if (mask)
-        vmxCombineAtopU_mask(dest, src, mask, width);
+        vmx_combine_atop_u_mask(dest, src, mask, width);
     else
-        vmxCombineAtopU_no_mask(dest, src, width);
+        vmx_combine_atop_u_no_mask(dest, src, width);
 }
 
 static void
-vmxCombineAtopReverseU_no_mask (uint32_t *dest, const uint32_t *src, int width)
+vmx_combine_atop_reverse_u_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     int i;
     vector unsigned int  vdest, vsrc;
@@ -843,7 +843,7 @@ vmxCombineAtopReverseU_no_mask (uint32_t *dest, const uint32_t *src, int width)
 }
 
 static void
-vmxCombineAtopReverseU_mask (uint32_t *dest,
+vmx_combine_atop_reverse_u_mask (uint32_t *dest,
                             const uint32_t *src,
                             const uint32_t *mask,
                             int width)
@@ -887,20 +887,20 @@ vmxCombineAtopReverseU_mask (uint32_t *dest,
 }
 
 static void
-vmxCombineAtopReverseU (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_atop_reverse_u (pixman_implementation_t *imp, pixman_op_t op,
 			uint32_t *dest,
                         const uint32_t *src,
                         const uint32_t *mask,
                         int width)
 {
     if (mask)
-        vmxCombineAtopReverseU_mask(dest, src, mask, width);
+        vmx_combine_atop_reverse_u_mask(dest, src, mask, width);
     else
-        vmxCombineAtopReverseU_no_mask(dest, src, width);
+        vmx_combine_atop_reverse_u_no_mask(dest, src, width);
 }
 
 static void
-vmxCombineXorU_no_mask (uint32_t *dest, const uint32_t *src, int width)
+vmx_combine_xor_u_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     int i;
     vector unsigned int  vdest, vsrc;
@@ -935,7 +935,7 @@ vmxCombineXorU_no_mask (uint32_t *dest, const uint32_t *src, int width)
 }
 
 static void
-vmxCombineXorU_mask (uint32_t *dest,
+vmx_combine_xor_u_mask (uint32_t *dest,
                     const uint32_t *src,
                     const uint32_t *mask,
                     int width)
@@ -979,20 +979,20 @@ vmxCombineXorU_mask (uint32_t *dest,
 }
 
 static void
-vmxCombineXorU (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_xor_u (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dest,
                 const uint32_t *src,
                 const uint32_t *mask,
                 int width)
 {
     if (mask)
-        vmxCombineXorU_mask(dest, src, mask, width);
+        vmx_combine_xor_u_mask(dest, src, mask, width);
     else
-        vmxCombineXorU_no_mask(dest, src, width);
+        vmx_combine_xor_u_no_mask(dest, src, width);
 }
 
 static void
-vmxCombineAddU_no_mask (uint32_t *dest, const uint32_t *src, int width)
+vmx_combine_add_u_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     int i;
     vector unsigned int  vdest, vsrc;
@@ -1022,7 +1022,7 @@ vmxCombineAddU_no_mask (uint32_t *dest, const uint32_t *src, int width)
 }
 
 static void
-vmxCombineAddU_mask (uint32_t *dest,
+vmx_combine_add_u_mask (uint32_t *dest,
                     const uint32_t *src,
                     const uint32_t *mask,
                     int width)
@@ -1061,20 +1061,20 @@ vmxCombineAddU_mask (uint32_t *dest,
 }
 
 static void
-vmxCombineAddU (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_add_u (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dest,
                 const uint32_t *src,
                 const uint32_t *mask,
                 int width)
 {
     if (mask)
-        vmxCombineAddU_mask(dest, src, mask, width);
+        vmx_combine_add_u_mask(dest, src, mask, width);
     else
-        vmxCombineAddU_no_mask(dest, src, width);
+        vmx_combine_add_u_no_mask(dest, src, width);
 }
 
 static void
-vmxCombineSrcC (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_src_c (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     int i;
@@ -1106,7 +1106,7 @@ vmxCombineSrcC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-vmxCombineOverC (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_over_c (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     int i;
@@ -1140,7 +1140,7 @@ vmxCombineOverC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-vmxCombineOverReverseC (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_over_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 			uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     int i;
@@ -1175,7 +1175,7 @@ vmxCombineOverReverseC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-vmxCombineInC (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_in_c (pixman_implementation_t *imp, pixman_op_t op,
 	       uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     int i;
@@ -1210,7 +1210,7 @@ vmxCombineInC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-vmxCombineInReverseC (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_in_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 		      uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     int i;
@@ -1245,7 +1245,7 @@ vmxCombineInReverseC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-vmxCombineOutC (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_out_c (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     int i;
@@ -1281,7 +1281,7 @@ vmxCombineOutC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-vmxCombineOutReverseC (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_out_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 		       uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     int i;
@@ -1318,7 +1318,7 @@ vmxCombineOutReverseC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-vmxCombineAtopC (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_atop_c (pixman_implementation_t *imp, pixman_op_t op,
 		 uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     int i;
@@ -1360,7 +1360,7 @@ vmxCombineAtopC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-vmxCombineAtopReverseC (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_atop_reverse_c (pixman_implementation_t *imp, pixman_op_t op,
 			uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     int i;
@@ -1402,7 +1402,7 @@ vmxCombineAtopReverseC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-vmxCombineXorC (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_xor_c (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     int i;
@@ -1444,7 +1444,7 @@ vmxCombineXorC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-vmxCombineAddC (pixman_implementation_t *imp, pixman_op_t op,
+vmx_combine_add_c (pixman_implementation_t *imp, pixman_op_t op,
 		uint32_t *dest, const uint32_t *src, const uint32_t *mask, int width)
 {
     int i;
@@ -1482,7 +1482,7 @@ vmxCombineAddC (pixman_implementation_t *imp, pixman_op_t op,
 
 #if 0
 void
-vmx_CompositeOver_n_8888 (pixman_operator_t	op,
+vmx_composite_over_n_8888 (pixman_operator_t	op,
 			    pixman_image_t * src_image,
 			    pixman_image_t * mask_image,
 			    pixman_image_t * dst_image,
@@ -1496,26 +1496,26 @@ vmx_CompositeOver_n_8888 (pixman_operator_t	op,
 			    uint16_t	height)
 {
     uint32_t	src;
-    uint32_t	*dstLine, *dst;
-    int	dstStride;
+    uint32_t	*dst_line, *dst;
+    int	dst_stride;
 
     _pixman_image_get_solid (src_image, dst_image, src);
 
     if (src >> 24 == 0)
 	return;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-	/* XXX vmxCombineOverU (dst, src, width); */
+	dst = dst_line;
+	dst_line += dst_stride;
+	/* XXX vmx_combine_over_u (dst, src, width); */
     }
 }
 
 void
-vmx_CompositeOver_n_0565 (pixman_operator_t	op,
+vmx_composite_over_n_0565 (pixman_operator_t	op,
 			    pixman_image_t * src_image,
 			    pixman_image_t * mask_image,
 			    pixman_image_t * dst_image,
@@ -1529,22 +1529,22 @@ vmx_CompositeOver_n_0565 (pixman_operator_t	op,
 			    uint16_t	height)
 {
     uint32_t	src;
-    uint16_t	*dstLine, *dst;
+    uint16_t	*dst_line, *dst;
     uint16_t	w;
-    int	dstStride;
+    int	dst_stride;
 
     _pixman_image_get_solid (src_image, dst_image, src);
 
     if (src >> 24 == 0)
 	return;
 
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
 
     while (height--)
     {
-	dst = dstLine;
-	dstLine += dstStride;
-       vmxCombineOverU565(dst, src, width);
+	dst = dst_line;
+	dst_line += dst_stride;
+       vmx_combine_over_u565(dst, src, width);
     }
 }
 
@@ -1566,29 +1566,29 @@ _pixman_implementation_create_vmx (void)
     /* Set up function pointers */
     
     /* SSE code patch for fbcompose.c */
-    imp->combine_32[PIXMAN_OP_OVER] = vmxCombineOverU;
-    imp->combine_32[PIXMAN_OP_OVER_REVERSE] = vmxCombineOverReverseU;
-    imp->combine_32[PIXMAN_OP_IN] = vmxCombineInU;
-    imp->combine_32[PIXMAN_OP_IN_REVERSE] = vmxCombineInReverseU;
-    imp->combine_32[PIXMAN_OP_OUT] = vmxCombineOutU;
-    imp->combine_32[PIXMAN_OP_OUT_REVERSE] = vmxCombineOutReverseU;
-    imp->combine_32[PIXMAN_OP_ATOP] = vmxCombineAtopU;
-    imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = vmxCombineAtopReverseU;
-    imp->combine_32[PIXMAN_OP_XOR] = vmxCombineXorU;
-
-    imp->combine_32[PIXMAN_OP_ADD] = vmxCombineAddU;
-
-    imp->combine_32_ca[PIXMAN_OP_SRC] = vmxCombineSrcC;
-    imp->combine_32_ca[PIXMAN_OP_OVER] = vmxCombineOverC;
-    imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = vmxCombineOverReverseC;
-    imp->combine_32_ca[PIXMAN_OP_IN] = vmxCombineInC;
-    imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = vmxCombineInReverseC;
-    imp->combine_32_ca[PIXMAN_OP_OUT] = vmxCombineOutC;
-    imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = vmxCombineOutReverseC;
-    imp->combine_32_ca[PIXMAN_OP_ATOP] = vmxCombineAtopC;
-    imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = vmxCombineAtopReverseC;
-    imp->combine_32_ca[PIXMAN_OP_XOR] = vmxCombineXorC;
-    imp->combine_32_ca[PIXMAN_OP_ADD] = vmxCombineAddC;
+    imp->combine_32[PIXMAN_OP_OVER] = vmx_combine_over_u;
+    imp->combine_32[PIXMAN_OP_OVER_REVERSE] = vmx_combine_over_reverse_u;
+    imp->combine_32[PIXMAN_OP_IN] = vmx_combine_in_u;
+    imp->combine_32[PIXMAN_OP_IN_REVERSE] = vmx_combine_in_reverse_u;
+    imp->combine_32[PIXMAN_OP_OUT] = vmx_combine_out_u;
+    imp->combine_32[PIXMAN_OP_OUT_REVERSE] = vmx_combine_out_reverse_u;
+    imp->combine_32[PIXMAN_OP_ATOP] = vmx_combine_atop_u;
+    imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = vmx_combine_atop_reverse_u;
+    imp->combine_32[PIXMAN_OP_XOR] = vmx_combine_xor_u;
+
+    imp->combine_32[PIXMAN_OP_ADD] = vmx_combine_add_u;
+
+    imp->combine_32_ca[PIXMAN_OP_SRC] = vmx_combine_src_c;
+    imp->combine_32_ca[PIXMAN_OP_OVER] = vmx_combine_over_c;
+    imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = vmx_combine_over_reverse_c;
+    imp->combine_32_ca[PIXMAN_OP_IN] = vmx_combine_in_c;
+    imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = vmx_combine_in_reverse_c;
+    imp->combine_32_ca[PIXMAN_OP_OUT] = vmx_combine_out_c;
+    imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = vmx_combine_out_reverse_c;
+    imp->combine_32_ca[PIXMAN_OP_ATOP] = vmx_combine_atop_c;
+    imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = vmx_combine_atop_reverse_c;
+    imp->combine_32_ca[PIXMAN_OP_XOR] = vmx_combine_xor_c;
+    imp->combine_32_ca[PIXMAN_OP_ADD] = vmx_combine_add_c;
     
     return imp;
 }
diff --git a/pixman/pixman.c b/pixman/pixman.c
index 0bd5e0b..4c393c5 100644
--- a/pixman/pixman.c
+++ b/pixman/pixman.c
@@ -34,9 +34,9 @@
 typedef struct
 {
     pixman_op_t			op;
-    pixman_op_t			opSrcDstOpaque;
-    pixman_op_t			opSrcOpaque;
-    pixman_op_t			opDstOpaque;
+    pixman_op_t			op_src_dst_opaque;
+    pixman_op_t			op_src_opaque;
+    pixman_op_t			op_dst_opaque;
 } optimized_operator_info_t;
 
 static const optimized_operator_info_t optimized_operators[] =
@@ -92,11 +92,11 @@ pixman_optimize_operator(pixman_op_t op, pixman_image_t *src_image, pixman_image
         return op;
 
     if(is_source_opaque && is_dest_opaque)
-        return info->opSrcDstOpaque;
+        return info->op_src_dst_opaque;
     else if(is_source_opaque)
-        return info->opSrcOpaque;
+        return info->op_src_opaque;
     else if(is_dest_opaque)
-        return info->opDstOpaque;
+        return info->op_dst_opaque;
 
     return op;
 
diff --git a/pixman/pixman.h b/pixman/pixman.h
index 58dfd42..92ed696 100644
--- a/pixman/pixman.h
+++ b/pixman/pixman.h
@@ -398,7 +398,7 @@ typedef struct pixman_region16		pixman_region16_t;
 
 struct pixman_region16_data {
     long		size;
-    long		numRects;
+    long		num_rects;
 /*  pixman_box16_t	rects[size];   in memory but not explicitly declared */
 };
 
@@ -455,10 +455,10 @@ void                    pixman_region_translate           (pixman_region16_t
 							   int                     y);
 pixman_bool_t           pixman_region_copy                (pixman_region16_t      *dest,
 							   pixman_region16_t      *source);
-pixman_bool_t           pixman_region_intersect           (pixman_region16_t      *newReg,
+pixman_bool_t           pixman_region_intersect           (pixman_region16_t      *new_reg,
 							   pixman_region16_t      *reg1,
 							   pixman_region16_t      *reg2);
-pixman_bool_t           pixman_region_union               (pixman_region16_t      *newReg,
+pixman_bool_t           pixman_region_union               (pixman_region16_t      *new_reg,
 							   pixman_region16_t      *reg1,
 							   pixman_region16_t      *reg2);
 pixman_bool_t           pixman_region_union_rect          (pixman_region16_t      *dest,
@@ -467,12 +467,12 @@ pixman_bool_t           pixman_region_union_rect          (pixman_region16_t
 							   int                     y,
 							   unsigned int            width,
 							   unsigned int            height);
-pixman_bool_t           pixman_region_subtract            (pixman_region16_t      *regD,
-							   pixman_region16_t      *regM,
-							   pixman_region16_t      *regS);
-pixman_bool_t           pixman_region_inverse             (pixman_region16_t      *newReg,
+pixman_bool_t           pixman_region_subtract            (pixman_region16_t      *reg_d,
+							   pixman_region16_t      *reg_m,
+							   pixman_region16_t      *reg_s);
+pixman_bool_t           pixman_region_inverse             (pixman_region16_t      *new_reg,
 							   pixman_region16_t      *reg1,
-							   pixman_box16_t         *invRect);
+							   pixman_box16_t         *inv_rect);
 pixman_bool_t           pixman_region_contains_point      (pixman_region16_t      *region,
 							   int                     x,
 							   int                     y,
@@ -500,7 +500,7 @@ typedef struct pixman_region32		pixman_region32_t;
 
 struct pixman_region32_data {
     long		size;
-    long		numRects;
+    long		num_rects;
 /*  pixman_box32_t	rects[size];   in memory but not explicitly declared */
 };
 
@@ -542,10 +542,10 @@ void                    pixman_region32_translate          (pixman_region32_t *r
 							    int                y);
 pixman_bool_t           pixman_region32_copy               (pixman_region32_t *dest,
 							    pixman_region32_t *source);
-pixman_bool_t           pixman_region32_intersect          (pixman_region32_t *newReg,
+pixman_bool_t           pixman_region32_intersect          (pixman_region32_t *new_reg,
 							    pixman_region32_t *reg1,
 							    pixman_region32_t *reg2);
-pixman_bool_t           pixman_region32_union              (pixman_region32_t *newReg,
+pixman_bool_t           pixman_region32_union              (pixman_region32_t *new_reg,
 							    pixman_region32_t *reg1,
 							    pixman_region32_t *reg2);
 pixman_bool_t           pixman_region32_union_rect         (pixman_region32_t *dest,
@@ -554,12 +554,12 @@ pixman_bool_t           pixman_region32_union_rect         (pixman_region32_t *d
 							    int                y,
 							    unsigned int       width,
 							    unsigned int       height);
-pixman_bool_t           pixman_region32_subtract           (pixman_region32_t *regD,
-							    pixman_region32_t *regM,
-							    pixman_region32_t *regS);
-pixman_bool_t           pixman_region32_inverse            (pixman_region32_t *newReg,
+pixman_bool_t           pixman_region32_subtract           (pixman_region32_t *reg_d,
+							    pixman_region32_t *reg_m,
+							    pixman_region32_t *reg_s);
+pixman_bool_t           pixman_region32_inverse            (pixman_region32_t *new_reg,
 							    pixman_region32_t *reg1,
-							    pixman_box32_t    *invRect);
+							    pixman_box32_t    *inv_rect);
 pixman_bool_t           pixman_region32_contains_point     (pixman_region32_t *region,
 							    int                x,
 							    int                y,
commit 437ab049872063c78ee934766596dc6859749a3d
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Sun Jul 5 01:34:07 2009 -0400

    Remove reference to 8888_RevNP

diff --git a/pixman/pixman-sse2.c b/pixman/pixman-sse2.c
index d8fb187..5813649 100644
--- a/pixman/pixman-sse2.c
+++ b/pixman/pixman-sse2.c
@@ -3889,8 +3889,6 @@ sse2_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
     _mm_empty();
 }
 
-/* "8888RevNP" is GdkPixbuf's format: ABGR, non premultiplied */
-
 /* -------------------------------------------------------------------------------------------------
  * fast_Composite_over_pixbuf_8888
  */
commit 55e63bd0f09290cf1165030edbb4e92efb09ee6e
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Sun Jul 5 00:55:45 2009 -0400

    Remove reference to 8888RevNP

diff --git a/pixman/pixman-mmx.c b/pixman/pixman-mmx.c
index 48bb3e3..60259b4 100644
--- a/pixman/pixman-mmx.c
+++ b/pixman/pixman-mmx.c
@@ -2219,8 +2219,6 @@ mmx_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
     _mm_empty();
 }
 
-/* "8888RevNP" is GdkPixbuf's format: ABGR, non premultiplied */
-
 static void
 mmx_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
 				  pixman_op_t op,
commit 01994a59ca642f4e5ce126d3ad01e864d3daa0bb
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Sun Jul 5 00:41:53 2009 -0400

    NoFeatures => NO_FEATURES

diff --git a/pixman/pixman-cpu.c b/pixman/pixman-cpu.c
index e50408e..bf075f6 100644
--- a/pixman/pixman-cpu.c
+++ b/pixman/pixman-cpu.c
@@ -290,7 +290,7 @@ pixman_have_arm_neon (void)
 #endif
 
 typedef enum {
-    NoFeatures = 0,
+    NO_FEATURES = 0,
     MMX = 0x1,
     MMX_EXTENSIONS = 0x2,
     SSE = 0x6,
commit 309d358ea673b5d4c163670c3c449fb855df7775
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Sun Jul 5 00:31:07 2009 -0400

    s/FbScrRight/SCREEN_SHIFT_RIGHT/g

diff --git a/pixman/pixman-edge-imp.h b/pixman/pixman-edge-imp.h
index 58957e3..36fc5e3 100644
--- a/pixman/pixman-edge-imp.h
+++ b/pixman/pixman-edge-imp.h
@@ -81,15 +81,15 @@ RASTERIZE_EDGES (pixman_image_t  *image,
 
 #ifdef WORDS_BIGENDIAN
 #   define SCREEN_SHIFT_LEFT(x,n)	((x) << (n))
-#   define FbScrRight(x,n)	((x) >> (n))
+#   define SCREEN_SHIFT_RIGHT(x,n)	((x) >> (n))
 #else
 #   define SCREEN_SHIFT_LEFT(x,n)	((x) >> (n))
-#   define FbScrRight(x,n)	((x) << (n))
+#   define SCREEN_SHIFT_RIGHT(x,n)	((x) << (n))
 #endif
 
 #define LEFT_MASK(x)							\
 		(((x) & 0x1f) ?						\
-		 FbScrRight (0xffffffff, (x) & 0x1f) : 0)
+		 SCREEN_SHIFT_RIGHT (0xffffffff, (x) & 0x1f) : 0)
 #define RIGHT_MASK(x)							\
 		(((32 - (x)) & 0x1f) ?					\
 		 SCREEN_SHIFT_LEFT (0xffffffff, (32 - (x)) & 0x1f) : 0)
commit 71fe4e3e5c64f177a8756e51eddc190b3a08ea40
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Sun Jul 5 00:26:30 2009 -0400

    CPUFeatures => cpu_features

diff --git a/pixman/pixman-cpu.c b/pixman/pixman-cpu.c
index a61dd91..e50408e 100644
--- a/pixman/pixman-cpu.c
+++ b/pixman/pixman-cpu.c
@@ -289,16 +289,17 @@ pixman_have_arm_neon (void)
 #include <sys/auxv.h>
 #endif
 
-enum CPUFeatures {
+typedef enum {
     NoFeatures = 0,
     MMX = 0x1,
     MMX_EXTENSIONS = 0x2,
     SSE = 0x6,
     SSE2 = 0x8,
     CMOV = 0x10
-};
+} cpu_features_t;
+    
 
-static unsigned int detectCPUFeatures(void) {
+static unsigned int detect_cpu_features(void) {
     unsigned int features = 0;
     unsigned int result = 0;
 
@@ -469,7 +470,7 @@ pixman_have_mmx (void)
 
     if (!initialized)
     {
-        unsigned int features = detectCPUFeatures();
+        unsigned int features = detect_cpu_features();
 	mmx_present = (features & (MMX|MMX_EXTENSIONS)) == (MMX|MMX_EXTENSIONS);
         initialized = TRUE;
     }
@@ -486,7 +487,7 @@ pixman_have_sse2 (void)
 
     if (!initialized)
     {
-        unsigned int features = detectCPUFeatures();
+        unsigned int features = detect_cpu_features();
         sse2_present = (features & (MMX|MMX_EXTENSIONS|SSE|SSE2)) == (MMX|MMX_EXTENSIONS|SSE|SSE2);
         initialized = TRUE;
     }
commit 255ddbe5358b0ed4a7a01ef0ab127833dba94b02
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Sun Jul 5 00:19:18 2009 -0400

    Rename FbGet8 to GET8

diff --git a/pixman/pixman-bits-image.c b/pixman/pixman-bits-image.c
index 8feb495..f22765a 100644
--- a/pixman/pixman-bits-image.c
+++ b/pixman/pixman-bits-image.c
@@ -320,19 +320,19 @@ bits_image_fetch_bilinear_pixels (bits_image_t *image, uint32_t *buffer, int n_p
 	    idistx = 256 - distx;
 	    idisty = 256 - disty;
 
-#define FbGet8(v,i)   ((uint16_t) (uint8_t) ((v) >> i))
+#define GET8(v,i)   ((uint16_t) (uint8_t) ((v) >> i))
 	    
-	    ft = FbGet8(tl,0) * idistx + FbGet8(tr,0) * distx;
-	    fb = FbGet8(bl,0) * idistx + FbGet8(br,0) * distx;
+	    ft = GET8(tl,0) * idistx + GET8(tr,0) * distx;
+	    fb = GET8(bl,0) * idistx + GET8(br,0) * distx;
 	    r = (((ft * idisty + fb * disty) >> 16) & 0xff);
-	    ft = FbGet8(tl,8) * idistx + FbGet8(tr,8) * distx;
-	    fb = FbGet8(bl,8) * idistx + FbGet8(br,8) * distx;
+	    ft = GET8(tl,8) * idistx + GET8(tr,8) * distx;
+	    fb = GET8(bl,8) * idistx + GET8(br,8) * distx;
 	    r |= (((ft * idisty + fb * disty) >> 8) & 0xff00);
-	    ft = FbGet8(tl,16) * idistx + FbGet8(tr,16) * distx;
-	    fb = FbGet8(bl,16) * idistx + FbGet8(br,16) * distx;
+	    ft = GET8(tl,16) * idistx + GET8(tr,16) * distx;
+	    fb = GET8(bl,16) * idistx + GET8(br,16) * distx;
 	    r |= (((ft * idisty + fb * disty)) & 0xff0000);
-	    ft = FbGet8(tl,24) * idistx + FbGet8(tr,24) * distx;
-	    fb = FbGet8(bl,24) * idistx + FbGet8(br,24) * distx;
+	    ft = GET8(tl,24) * idistx + GET8(tr,24) * distx;
+	    fb = GET8(bl,24) * idistx + GET8(br,24) * distx;
 	    r |= (((ft * idisty + fb * disty) << 8) & 0xff000000);
 
 	    buffer[i++] = r;
commit 446276c36fd336531745fc1427c4af2ccdbe9875
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Sun Jul 5 00:11:57 2009 -0400

    Rename RBmask/Gmask => rb_mask/g_mask in pixman-arm-neon.c

diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
index 98f35e3..f88785c 100644
--- a/pixman/pixman-arm-neon.c
+++ b/pixman/pixman-arm-neon.c
@@ -1141,8 +1141,8 @@ neon_CompositeSrc_24_16 (
 		uint16_t *dstPtr = dstLine;
 		uint32_t *srcPtr = srcLine;
 		uint32_t count = width;
-		const uint32_t RBmask = 0x1F;
-		const uint32_t Gmask = 0x3F;
+		const uint32_t rb_mask = 0x1F;
+		const uint32_t g_mask = 0x3F;
 
 		// If you're going to complain about a goto, take a long hard look
 		// at the massive blocks of assembler this skips over.  ;-)
@@ -1268,13 +1268,13 @@ neon_CompositeSrc_24_16 (
 			uint32_t dstPixelsA;
 			uint32_t dstPixelsB;
 
-			dstPixelsA  = ((srcPixelA >>  3) & RBmask);
-			dstPixelsA |= ((srcPixelA >> 10) &  Gmask) << 5;
-			dstPixelsA |= ((srcPixelA >> 19) & RBmask) << 11;
+			dstPixelsA  = ((srcPixelA >>  3) & rb_mask);
+			dstPixelsA |= ((srcPixelA >> 10) &  g_mask) << 5;
+			dstPixelsA |= ((srcPixelA >> 19) & rb_mask) << 11;
 
-			dstPixelsB  = ((srcPixelB >>  3) & RBmask);
-			dstPixelsB |= ((srcPixelB >> 10) &  Gmask) << 5;
-			dstPixelsB |= ((srcPixelB >> 19) & RBmask) << 11;
+			dstPixelsB  = ((srcPixelB >>  3) & rb_mask);
+			dstPixelsB |= ((srcPixelB >> 10) &  g_mask) << 5;
+			dstPixelsB |= ((srcPixelB >> 19) & rb_mask) << 11;
 
 			// little-endian mode only
 			*((uint32_t*) dstPtr) = dstPixelsA | (dstPixelsB << 16);
@@ -1287,9 +1287,9 @@ neon_CompositeSrc_24_16 (
 
 			// ARM is really good at shift-then-ALU ops.
 			// This block should end up as three shift-ANDs and two shift-ORs.
-			uint32_t tmpBlue  = (srcPixel >>  3) & RBmask;
-			uint32_t tmpGreen = (srcPixel >> 10) & Gmask;
-			uint32_t tmpRed   = (srcPixel >> 19) & RBmask;
+			uint32_t tmpBlue  = (srcPixel >>  3) & rb_mask;
+			uint32_t tmpGreen = (srcPixel >> 10) & g_mask;
+			uint32_t tmpRed   = (srcPixel >> 19) & rb_mask;
 			uint16_t dstPixel = (tmpRed << 11) | (tmpGreen << 5) | tmpBlue;
 
 			*dstPtr++ = dstPixel;
commit 412b4b50f7bd8ac29e4c9b20e613154c1b5e371a
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Sun Jul 5 00:06:59 2009 -0400

    Use ALPHA_8 in pixman-image.c instead of Alpha

diff --git a/pixman/pixman-image.c b/pixman/pixman-image.c
index bc1bbc7..5c0a15f 100644
--- a/pixman/pixman-image.c
+++ b/pixman/pixman-image.c
@@ -30,8 +30,7 @@
 #include <assert.h>
 
 #include "pixman-private.h"
-
-#define Alpha(x) ((x) >> 24)
+#include "pixman-combine32.h"
 
 pixman_bool_t
 _pixman_init_gradient (gradient_t     *gradient,
@@ -568,7 +567,7 @@ _pixman_image_is_opaque (pixman_image_t *image)
 	break;
 	
     case SOLID:
-	if (Alpha (image->solid.color) != 0xff)
+	if (ALPHA_8 (image->solid.color) != 0xff)
             return FALSE;
         break;
     }
commit 887383b0adab89bcc131a9a28c4d60af9e4773d1
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Sun Jul 5 00:05:42 2009 -0400

    Uppercase a few more macros in pixman-combine.c.template

diff --git a/pixman/pixman-combine.c.template b/pixman/pixman-combine.c.template
index c8a9586..030bbc9 100644
--- a/pixman/pixman-combine.c.template
+++ b/pixman/pixman-combine.c.template
@@ -1052,13 +1052,13 @@ combineConjointInPart (comp1_t a, comp1_t b)
     return DIV_UNc(b,a);     /* b/a */
 }
 
-#define GetComp(v,i)   ((comp2_t) (comp1_t) ((v) >> i))
+#define GET_COMP(v,i)   ((comp2_t) (comp1_t) ((v) >> i))
 
-#define Add(x,y,i,t)   ((t) = GetComp(x,i) + GetComp(y,i),              \
+#define ADD(x,y,i,t)   ((t) = GET_COMP(x,i) + GET_COMP(y,i),              \
                         (comp4_t) ((comp1_t) ((t) | (0 - ((t) >> G_SHIFT)))) << (i))
 
-#define FbGen(x,y,i,ax,ay,t,u,v) ((t) = (MUL_UNc(GetComp(y,i),ay,(u)) + \
-					 MUL_UNc(GetComp(x,i),ax,(v))), \
+#define GENERIC(x,y,i,ax,ay,t,u,v) ((t) = (MUL_UNc(GET_COMP(y,i),ay,(u)) + \
+					 MUL_UNc(GET_COMP(x,i),ax,(v))), \
 				  	 (comp4_t) ((comp1_t) ((t) |		\
 					 (0 - ((t) >> G_SHIFT)))) << (i))
 
@@ -1103,10 +1103,10 @@ combineDisjointGeneralU (comp4_t *dest, const comp4_t *src, const comp4_t *mask,
             Fb = MASK;
             break;
         }
-        m = FbGen (s,d,0,Fa,Fb,t, u, v);
-        n = FbGen (s,d,G_SHIFT,Fa,Fb,t, u, v);
-        o = FbGen (s,d,R_SHIFT,Fa,Fb,t, u, v);
-        p = FbGen (s,d,A_SHIFT,Fa,Fb,t, u, v);
+        m = GENERIC (s,d,0,Fa,Fb,t, u, v);
+        n = GENERIC (s,d,G_SHIFT,Fa,Fb,t, u, v);
+        o = GENERIC (s,d,R_SHIFT,Fa,Fb,t, u, v);
+        p = GENERIC (s,d,A_SHIFT,Fa,Fb,t, u, v);
         s = m|n|o|p;
 	*(dest + i) = s;
     }
@@ -1225,10 +1225,10 @@ combineConjointGeneralU (comp4_t *dest, const comp4_t *src, const comp4_t *mask,
             Fb = MASK;
             break;
         }
-        m = FbGen (s,d,0,Fa,Fb,t, u, v);
-        n = FbGen (s,d,G_SHIFT,Fa,Fb,t, u, v);
-        o = FbGen (s,d,R_SHIFT,Fa,Fb,t, u, v);
-        p = FbGen (s,d,A_SHIFT,Fa,Fb,t, u, v);
+        m = GENERIC (s,d,0,Fa,Fb,t, u, v);
+        n = GENERIC (s,d,G_SHIFT,Fa,Fb,t, u, v);
+        o = GENERIC (s,d,R_SHIFT,Fa,Fb,t, u, v);
+        p = GENERIC (s,d,A_SHIFT,Fa,Fb,t, u, v);
         s = m|n|o|p;
 	*(dest + i) = s;
     }
@@ -1595,24 +1595,24 @@ combineSaturateC (pixman_implementation_t *imp, pixman_op_t op,
         da = ~d >> A_SHIFT;
 
         if (sb <= da)
-            m = Add(s,d,0,t);
+            m = ADD(s,d,0,t);
         else
-            m = FbGen (s, d, 0, (da << G_SHIFT) / sb, MASK, t, u, v);
+            m = GENERIC (s, d, 0, (da << G_SHIFT) / sb, MASK, t, u, v);
 
         if (sg <= da)
-            n = Add(s,d,G_SHIFT,t);
+            n = ADD(s,d,G_SHIFT,t);
         else
-            n = FbGen (s, d, G_SHIFT, (da << G_SHIFT) / sg, MASK, t, u, v);
+            n = GENERIC (s, d, G_SHIFT, (da << G_SHIFT) / sg, MASK, t, u, v);
 
         if (sr <= da)
-            o = Add(s,d,R_SHIFT,t);
+            o = ADD(s,d,R_SHIFT,t);
         else
-            o = FbGen (s, d, R_SHIFT, (da << G_SHIFT) / sr, MASK, t, u, v);
+            o = GENERIC (s, d, R_SHIFT, (da << G_SHIFT) / sr, MASK, t, u, v);
 
         if (sa <= da)
-            p = Add(s,d,A_SHIFT,t);
+            p = ADD(s,d,A_SHIFT,t);
         else
-            p = FbGen (s, d, A_SHIFT, (da << G_SHIFT) / sa, MASK, t, u, v);
+            p = GENERIC (s, d, A_SHIFT, (da << G_SHIFT) / sa, MASK, t, u, v);
 
 	*(dest + i) = m|n|o|p;
     }
@@ -1685,10 +1685,10 @@ combineDisjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mask,
             Fb = ~0;
             break;
         }
-        m = FbGen (s,d,0,GetComp(Fa,0),GetComp(Fb,0),t, u, v);
-        n = FbGen (s,d,G_SHIFT,GetComp(Fa,G_SHIFT),GetComp(Fb,G_SHIFT),t, u, v);
-        o = FbGen (s,d,R_SHIFT,GetComp(Fa,R_SHIFT),GetComp(Fb,R_SHIFT),t, u, v);
-        p = FbGen (s,d,A_SHIFT,GetComp(Fa,A_SHIFT),GetComp(Fb,A_SHIFT),t, u, v);
+        m = GENERIC (s,d,0,GET_COMP(Fa,0),GET_COMP(Fb,0),t, u, v);
+        n = GENERIC (s,d,G_SHIFT,GET_COMP(Fa,G_SHIFT),GET_COMP(Fb,G_SHIFT),t, u, v);
+        o = GENERIC (s,d,R_SHIFT,GET_COMP(Fa,R_SHIFT),GET_COMP(Fb,R_SHIFT),t, u, v);
+        p = GENERIC (s,d,A_SHIFT,GET_COMP(Fa,A_SHIFT),GET_COMP(Fb,A_SHIFT),t, u, v);
         s = m|n|o|p;
 	*(dest + i) = s;
     }
@@ -1817,10 +1817,10 @@ combineConjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mask,
             Fb = ~0;
             break;
         }
-        m = FbGen (s,d,0,GetComp(Fa,0),GetComp(Fb,0),t, u, v);
-        n = FbGen (s,d,G_SHIFT,GetComp(Fa,G_SHIFT),GetComp(Fb,G_SHIFT),t, u, v);
-        o = FbGen (s,d,R_SHIFT,GetComp(Fa,R_SHIFT),GetComp(Fb,R_SHIFT),t, u, v);
-        p = FbGen (s,d,A_SHIFT,GetComp(Fa,A_SHIFT),GetComp(Fb,A_SHIFT),t, u, v);
+        m = GENERIC (s,d,0,GET_COMP(Fa,0),GET_COMP(Fb,0),t, u, v);
+        n = GENERIC (s,d,G_SHIFT,GET_COMP(Fa,G_SHIFT),GET_COMP(Fb,G_SHIFT),t, u, v);
+        o = GENERIC (s,d,R_SHIFT,GET_COMP(Fa,R_SHIFT),GET_COMP(Fb,R_SHIFT),t, u, v);
+        p = GENERIC (s,d,A_SHIFT,GET_COMP(Fa,A_SHIFT),GET_COMP(Fb,A_SHIFT),t, u, v);
         s = m|n|o|p;
 	*(dest + i) = s;
     }
commit 4153361c52f332bce9e9cc32adf1e01064014e15
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Sun Jul 5 00:02:45 2009 -0400

    Rename macros for non-separable blend modes
    
    Lum => LUM
    Sat => SAT
    Min => CH_MIN
    Max => CH_MAX

diff --git a/pixman/pixman-combine.c.template b/pixman/pixman-combine.c.template
index 45dc170..c8a9586 100644
--- a/pixman/pixman-combine.c.template
+++ b/pixman/pixman-combine.c.template
@@ -668,10 +668,10 @@ PDF_SEPARABLE_BLEND_MODE (Exclusion)
  * to operate in Hsl space, with Cmax, Cmid, Cmin referring to the max, mid 
  * and min value of the red, green and blue components.
  * 
- * Lum (C) = 0.3 × Cred + 0.59 × Cgreen + 0.11 × Cblue
+ * LUM (C) = 0.3 × Cred + 0.59 × Cgreen + 0.11 × Cblue
  *
  * ClipColor (C):
- *   l = Lum (C)
+ *   l = LUM (C)
  *   min = Cmin
  *   max = Cmax
  *   if n < 0.0
@@ -680,14 +680,14 @@ PDF_SEPARABLE_BLEND_MODE (Exclusion)
  *     C = l + ( ( ( C – l ) × ( 1 – l ) ) ⁄ ( max – l ) )
  *   return C
  *
- * SetLum (C, l):
- *   d = l – Lum (C)
+ * set_lum (C, l):
+ *   d = l – LUM (C)
  *   C += d
  *   return ClipColor (C)
  *
- * Sat (C) = Max (C) - Min (C)
+ * SAT (C) = CH_MAX (C) - CH_MIN (C)
  *
- * SetSat (C, s):
+ * set_sat (C, s):
  *  if Cmax > Cmin
  *    Cmid = ( ( ( Cmid – Cmin ) × s ) ⁄ ( Cmax – Cmin ) )
  *    Cmax = s
@@ -698,9 +698,9 @@ PDF_SEPARABLE_BLEND_MODE (Exclusion)
  */
 
 /* For premultiplied colors, we need to know what happens when C is
- * multiplied by a real number. Lum and Sat are linear:
+ * multiplied by a real number. LUM and SAT are linear:
  *
- *    Lum (r × C) = r × Lum (C)		Sat (r * C) = r * Sat (C)
+ *    LUM (r × C) = r × LUM (C)		SAT (r * C) = r * SAT (C)
  *
  * If we extend ClipColor with an extra argument a and change
  *
@@ -716,25 +716,25 @@ PDF_SEPARABLE_BLEND_MODE (Exclusion)
  *
  * for positive r.
  *
- * Similarly, we can extend SetLum with an extra argument that is just passed
+ * Similarly, we can extend set_lum with an extra argument that is just passed
  * on to ClipColor:
  *
- *   r * SetLum ( C, l, a)
+ *   r * set_lum ( C, l, a)
  *
- *   = r × ClipColor ( C + l - Lum (C), a)
+ *   = r × ClipColor ( C + l - LUM (C), a)
  *
- *   = ClipColor ( r * C + r × l - r * Lum (C), r * a)
+ *   = ClipColor ( r * C + r × l - r * LUM (C), r * a)
  *
- *   = SetLum ( r * C, r * l, r * a)
+ *   = set_lum ( r * C, r * l, r * a)
  *
- * Finally, SetSat:
+ * Finally, set_sat:
  *
- *    r * SetSat (C, s) = SetSat (x * C, r * s)
+ *    r * set_sat (C, s) = set_sat (x * C, r * s)
  *
  * The above holds for all non-zero x, because they x'es in the fraction for
  * C_mid cancel out. Specifically, it holds for x = r:
  *
- *    r * SetSat (C, s) = SetSat (rC, rs)
+ *    r * set_sat (C, s) = set_sat (rC, rs)
  *  
  */
 
@@ -744,36 +744,36 @@ PDF_SEPARABLE_BLEND_MODE (Exclusion)
  *   Color:
  *
  *     a_s * a_d * B(s, d)
- *   = a_s * a_d * SetLum (S/a_s, Lum (D/a_d), 1)
- *   = SetLum (S * a_d, a_s * Lum (D), a_s * a_d)
+ *   = a_s * a_d * set_lum (S/a_s, LUM (D/a_d), 1)
+ *   = set_lum (S * a_d, a_s * LUM (D), a_s * a_d)
  *
  *
  *   Luminosity:
  *
  *     a_s * a_d * B(s, d)
- *   = a_s * a_d * SetLum (D/a_d, Lum(S/a_s), 1)
- *   = SetLum (a_s * D, a_d * Lum(S), a_s * a_d)
+ *   = a_s * a_d * set_lum (D/a_d, LUM(S/a_s), 1)
+ *   = set_lum (a_s * D, a_d * LUM(S), a_s * a_d)
  *
  *
  *   Saturation:
  *
  *     a_s * a_d * B(s, d)
- *   = a_s * a_d * SetLum (SetSat (D/a_d, Sat (S/a_s)), Lum (D/a_d), 1)
- *   = SetLum (a_s * a_d * SetSat (D/a_d, Sat (S/a_s)), a_s * Lum (D), a_s * a_d)
- *   = SetLum (SetSat (a_s * D, a_d * Sat (S), a_s * Lum (D), a_s * a_d))
+ *   = a_s * a_d * set_lum (set_sat (D/a_d, SAT (S/a_s)), LUM (D/a_d), 1)
+ *   = set_lum (a_s * a_d * set_sat (D/a_d, SAT (S/a_s)), a_s * LUM (D), a_s * a_d)
+ *   = set_lum (set_sat (a_s * D, a_d * SAT (S), a_s * LUM (D), a_s * a_d))
  *
  *   Hue:
  *
  *     a_s * a_d * B(s, d)
- *   = a_s * a_d * SetLum (SetSat (S/a_s, Sat (D/a_d)), Lum (D/a_d), 1)
- *   = a_s * a_d * SetLum (SetSat (a_d * S, a_s * Sat (D)), a_s * Lum (D), a_s * a_d)
+ *   = a_s * a_d * set_lum (set_sat (S/a_s, SAT (D/a_d)), LUM (D/a_d), 1)
+ *   = a_s * a_d * set_lum (set_sat (a_d * S, a_s * SAT (D)), a_s * LUM (D), a_s * a_d)
  *
  */
     
-#define Min(c) (c[0] < c[1] ? (c[0] < c[2] ? c[0] : c[2]) : (c[1] < c[2] ? c[1] : c[2]))
-#define Max(c) (c[0] > c[1] ? (c[0] > c[2] ? c[0] : c[2]) : (c[1] > c[2] ? c[1] : c[2]))
-#define Lum(c) ((c[0] * 30 + c[1] * 59 + c[2] * 11) / 100)
-#define Sat(c) (Max (c) - Min (c))
+#define CH_MIN(c) (c[0] < c[1] ? (c[0] < c[2] ? c[0] : c[2]) : (c[1] < c[2] ? c[1] : c[2]))
+#define CH_MAX(c) (c[0] > c[1] ? (c[0] > c[2] ? c[0] : c[2]) : (c[1] > c[2] ? c[1] : c[2]))
+#define LUM(c) ((c[0] * 30 + c[1] * 59 + c[2] * 11) / 100)
+#define SAT(c) (CH_MAX (c) - CH_MIN (c))
 
 #define PDF_NON_SEPARABLE_BLEND_MODE(name)					\
 static void								\
@@ -810,7 +810,7 @@ combine ## name ## U (pixman_implementation_t *imp, pixman_op_t op,	\
 }									
 
 static void
-SetLum (comp4_t dest[3], comp4_t src[3], comp4_t sa, comp4_t lum)
+set_lum (comp4_t dest[3], comp4_t src[3], comp4_t sa, comp4_t lum)
 {
   double a, l, min, max;
   double tmp[3];
@@ -820,15 +820,15 @@ SetLum (comp4_t dest[3], comp4_t src[3], comp4_t sa, comp4_t lum)
   tmp[0] = src[0] * (1.0 / MASK);
   tmp[1] = src[1] * (1.0 / MASK);
   tmp[2] = src[2] * (1.0 / MASK);
-  l = l - Lum (tmp);
+  l = l - LUM (tmp);
   tmp[0] += l;
   tmp[1] += l;
   tmp[2] += l;
 
   /* ClipColor */
-  l = Lum (tmp);
-  min = Min (tmp);
-  max = Max (tmp);
+  l = LUM (tmp);
+  min = CH_MIN (tmp);
+  max = CH_MAX (tmp);
 
   if (min < 0) {
     tmp[0] = l + (tmp[0] - l) * l / (l - min);
@@ -846,7 +846,7 @@ SetLum (comp4_t dest[3], comp4_t src[3], comp4_t sa, comp4_t lum)
 }
 
 static void
-SetSat (comp4_t dest[3], comp4_t src[3], comp4_t sat)
+set_sat (comp4_t dest[3], comp4_t src[3], comp4_t sat)
 {
   int id[3];
   comp4_t min, max;
@@ -895,7 +895,7 @@ SetSat (comp4_t dest[3], comp4_t src[3], comp4_t sat)
 
 /*
  * Hue:
- * B(Cb, Cs) = SetLum (SetSat (Cs, Sat (Cb)), Lum (Cb))
+ * B(Cb, Cs) = set_lum (set_sat (Cs, SAT (Cb)), LUM (Cb))
  */
 static inline void
 blend_HslHue (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t sa)
@@ -903,15 +903,15 @@ blend_HslHue (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t sa
     c[0] = sc[0] * da;
     c[1] = sc[1] * da;
     c[2] = sc[2] * da;
-    SetSat (c, c, Sat (dc) * sa);
-    SetLum (c, c, sa * da, Lum (dc) * sa);
+    set_sat (c, c, SAT (dc) * sa);
+    set_lum (c, c, sa * da, LUM (dc) * sa);
 }
 
 PDF_NON_SEPARABLE_BLEND_MODE (HslHue)
 
 /*
  * Saturation:
- * B(Cb, Cs) = SetLum (SetSat (Cb, Sat (Cs)), Lum (Cb))
+ * B(Cb, Cs) = set_lum (set_sat (Cb, SAT (Cs)), LUM (Cb))
  */
 static inline void
 blend_HslSaturation (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t sa)
@@ -919,15 +919,15 @@ blend_HslSaturation (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], com
     c[0] = dc[0] * sa;
     c[1] = dc[1] * sa;
     c[2] = dc[2] * sa;
-    SetSat (c, c, Sat (sc) * da);
-    SetLum (c, c, sa * da, Lum (dc) * sa);
+    set_sat (c, c, SAT (sc) * da);
+    set_lum (c, c, sa * da, LUM (dc) * sa);
 }
 
 PDF_NON_SEPARABLE_BLEND_MODE (HslSaturation)
 
 /*
  * Color:
- * B(Cb, Cs) = SetLum (Cs, Lum (Cb))
+ * B(Cb, Cs) = set_lum (Cs, LUM (Cb))
  */
 static inline void
 blend_HslColor (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t sa)
@@ -935,14 +935,14 @@ blend_HslColor (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t
     c[0] = sc[0] * da;
     c[1] = sc[1] * da;
     c[2] = sc[2] * da;
-    SetLum (c, c, sa * da, Lum (dc) * sa);
+    set_lum (c, c, sa * da, LUM (dc) * sa);
 }
 
 PDF_NON_SEPARABLE_BLEND_MODE (HslColor)
 
 /*
  * Luminosity:
- * B(Cb, Cs) = SetLum (Cb, Lum (Cs))
+ * B(Cb, Cs) = set_lum (Cb, LUM (Cs))
  */
 static inline void
 blend_HslLuminosity (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t sa)
@@ -950,15 +950,15 @@ blend_HslLuminosity (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], com
     c[0] = dc[0] * sa;
     c[1] = dc[1] * sa;
     c[2] = dc[2] * sa;
-    SetLum (c, c, sa * da, Lum (sc) * da);
+    set_lum (c, c, sa * da, LUM (sc) * da);
 }
 
 PDF_NON_SEPARABLE_BLEND_MODE (HslLuminosity)
 
-#undef Sat
-#undef Lum
-#undef Max
-#undef Min
+#undef SAT
+#undef LUM
+#undef CH_MAX
+#undef CH_MIN
 #undef PDF_NON_SEPARABLE_BLEND_MODE
 
 /* Overlay
commit 68405c326db4cd087bdb6290ae42953a98b81838
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Sat Jul 4 23:45:01 2009 -0400

    Rename some macros in pixman-combine.c.template
    
    s/Combine([AB])([a-zA-Z]+)([^a-zA-Z])/COMBINE_$1_\U$2$3/g;
    s/CombineA/COMBINE_A/g;
    s/CombineB/COMBINE_B/g;
    s/CombineXor/COMBINE_XOR/g;
    s/CombineClear/COMBINE_CLEAR/g;

diff --git a/pixman/pixman-combine.c.template b/pixman/pixman-combine.c.template
index 55a3947..45dc170 100644
--- a/pixman/pixman-combine.c.template
+++ b/pixman/pixman-combine.c.template
@@ -987,19 +987,19 @@ PDF_NON_SEPARABLE_BLEND_MODE (HslLuminosity)
 
 */
 
-#define CombineAOut 1
-#define CombineAIn  2
-#define CombineBOut 4
-#define CombineBIn  8
-
-#define CombineClear	0
-#define CombineA	(CombineAOut|CombineAIn)
-#define CombineB	(CombineBOut|CombineBIn)
-#define CombineAOver	(CombineAOut|CombineBOut|CombineAIn)
-#define CombineBOver	(CombineAOut|CombineBOut|CombineBIn)
-#define CombineAAtop	(CombineBOut|CombineAIn)
-#define CombineBAtop	(CombineAOut|CombineBIn)
-#define CombineXor	(CombineAOut|CombineBOut)
+#define COMBINE_A_OUT 1
+#define COMBINE_A_IN  2
+#define COMBINE_B_OUT 4
+#define COMBINE_B_IN  8
+
+#define COMBINE_CLEAR	0
+#define COMBINE_A	(COMBINE_A_OUT|COMBINE_A_IN)
+#define COMBINE_B	(COMBINE_B_OUT|COMBINE_B_IN)
+#define COMBINE_A_OVER	(COMBINE_A_OUT|COMBINE_B_OUT|COMBINE_A_IN)
+#define COMBINE_B_OVER	(COMBINE_A_OUT|COMBINE_B_OUT|COMBINE_B_IN)
+#define COMBINE_A_ATOP	(COMBINE_B_OUT|COMBINE_A_IN)
+#define COMBINE_B_ATOP	(COMBINE_A_OUT|COMBINE_B_IN)
+#define COMBINE_XOR	(COMBINE_A_OUT|COMBINE_B_OUT)
 
 /* portion covered by a but not b */
 static comp1_t
@@ -1074,32 +1074,32 @@ combineDisjointGeneralU (comp4_t *dest, const comp4_t *src, const comp4_t *mask,
         comp1_t sa = s >> A_SHIFT;
         comp1_t da = d >> A_SHIFT;
 
-        switch (combine & CombineA) {
+        switch (combine & COMBINE_A) {
         default:
             Fa = 0;
             break;
-        case CombineAOut:
+        case COMBINE_A_OUT:
             Fa = combineDisjointOutPart (sa, da);
             break;
-        case CombineAIn:
+        case COMBINE_A_IN:
             Fa = combineDisjointInPart (sa, da);
             break;
-        case CombineA:
+        case COMBINE_A:
             Fa = MASK;
             break;
         }
 
-        switch (combine & CombineB) {
+        switch (combine & COMBINE_B) {
         default:
             Fb = 0;
             break;
-        case CombineBOut:
+        case COMBINE_B_OUT:
             Fb = combineDisjointOutPart (da, sa);
             break;
-        case CombineBIn:
+        case COMBINE_B_IN:
             Fb = combineDisjointInPart (da, sa);
             break;
-        case CombineB:
+        case COMBINE_B:
             Fb = MASK;
             break;
         }
@@ -1139,49 +1139,49 @@ static void
 combineDisjointInU (pixman_implementation_t *imp, pixman_op_t op,
 		      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralU (dest, src, mask, width, CombineAIn);
+    combineDisjointGeneralU (dest, src, mask, width, COMBINE_A_IN);
 }
 
 static void
 combineDisjointInReverseU (pixman_implementation_t *imp, pixman_op_t op,
 			     comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralU (dest, src, mask, width, CombineBIn);
+    combineDisjointGeneralU (dest, src, mask, width, COMBINE_B_IN);
 }
 
 static void
 combineDisjointOutU (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralU (dest, src, mask, width, CombineAOut);
+    combineDisjointGeneralU (dest, src, mask, width, COMBINE_A_OUT);
 }
 
 static void
 combineDisjointOutReverseU (pixman_implementation_t *imp, pixman_op_t op,
 			      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralU (dest, src, mask, width, CombineBOut);
+    combineDisjointGeneralU (dest, src, mask, width, COMBINE_B_OUT);
 }
 
 static void
 combineDisjointAtopU (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralU (dest, src, mask, width, CombineAAtop);
+    combineDisjointGeneralU (dest, src, mask, width, COMBINE_A_ATOP);
 }
 
 static void
 combineDisjointAtopReverseU (pixman_implementation_t *imp, pixman_op_t op,
 			       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralU (dest, src, mask, width, CombineBAtop);
+    combineDisjointGeneralU (dest, src, mask, width, COMBINE_B_ATOP);
 }
 
 static void
 combineDisjointXorU (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralU (dest, src, mask, width, CombineXor);
+    combineDisjointGeneralU (dest, src, mask, width, COMBINE_XOR);
 }
 
 static void
@@ -1196,32 +1196,32 @@ combineConjointGeneralU (comp4_t *dest, const comp4_t *src, const comp4_t *mask,
         comp1_t sa = s >> A_SHIFT;
         comp1_t da = d >> A_SHIFT;
 
-        switch (combine & CombineA) {
+        switch (combine & COMBINE_A) {
         default:
             Fa = 0;
             break;
-        case CombineAOut:
+        case COMBINE_A_OUT:
             Fa = combineConjointOutPart (sa, da);
             break;
-        case CombineAIn:
+        case COMBINE_A_IN:
             Fa = combineConjointInPart (sa, da);
             break;
-        case CombineA:
+        case COMBINE_A:
             Fa = MASK;
             break;
         }
 
-        switch (combine & CombineB) {
+        switch (combine & COMBINE_B) {
         default:
             Fb = 0;
             break;
-        case CombineBOut:
+        case COMBINE_B_OUT:
             Fb = combineConjointOutPart (da, sa);
             break;
-        case CombineBIn:
+        case COMBINE_B_IN:
             Fb = combineConjointInPart (da, sa);
             break;
-        case CombineB:
+        case COMBINE_B:
             Fb = MASK;
             break;
         }
@@ -1238,7 +1238,7 @@ static void
 combineConjointOverU (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralU (dest, src, mask, width, CombineAOver);
+    combineConjointGeneralU (dest, src, mask, width, COMBINE_A_OVER);
 }
 
 
@@ -1246,7 +1246,7 @@ static void
 combineConjointOverReverseU (pixman_implementation_t *imp, pixman_op_t op,
 			       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralU (dest, src, mask, width, CombineBOver);
+    combineConjointGeneralU (dest, src, mask, width, COMBINE_B_OVER);
 }
 
 
@@ -1254,7 +1254,7 @@ static void
 combineConjointInU (pixman_implementation_t *imp, pixman_op_t op,
 		      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralU (dest, src, mask, width, CombineAIn);
+    combineConjointGeneralU (dest, src, mask, width, COMBINE_A_IN);
 }
 
 
@@ -1262,42 +1262,42 @@ static void
 combineConjointInReverseU (pixman_implementation_t *imp, pixman_op_t op,
 			     comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralU (dest, src, mask, width, CombineBIn);
+    combineConjointGeneralU (dest, src, mask, width, COMBINE_B_IN);
 }
 
 static void
 combineConjointOutU (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralU (dest, src, mask, width, CombineAOut);
+    combineConjointGeneralU (dest, src, mask, width, COMBINE_A_OUT);
 }
 
 static void
 combineConjointOutReverseU (pixman_implementation_t *imp, pixman_op_t op,
 			      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralU (dest, src, mask, width, CombineBOut);
+    combineConjointGeneralU (dest, src, mask, width, COMBINE_B_OUT);
 }
 
 static void
 combineConjointAtopU (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralU (dest, src, mask, width, CombineAAtop);
+    combineConjointGeneralU (dest, src, mask, width, COMBINE_A_ATOP);
 }
 
 static void
 combineConjointAtopReverseU (pixman_implementation_t *imp, pixman_op_t op,
 			       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralU (dest, src, mask, width, CombineBAtop);
+    combineConjointGeneralU (dest, src, mask, width, COMBINE_B_ATOP);
 }
 
 static void
 combineConjointXorU (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralU (dest, src, mask, width, CombineXor);
+    combineConjointGeneralU (dest, src, mask, width, COMBINE_XOR);
 }
 
 /********************************************************************************/
@@ -1640,48 +1640,48 @@ combineDisjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mask,
 
 	sa = m;
 
-        switch (combine & CombineA) {
+        switch (combine & COMBINE_A) {
         default:
             Fa = 0;
             break;
-        case CombineAOut:
+        case COMBINE_A_OUT:
             m = (comp4_t)combineDisjointOutPart ((comp1_t) (sa >> 0), da);
             n = (comp4_t)combineDisjointOutPart ((comp1_t) (sa >> G_SHIFT), da) << G_SHIFT;
             o = (comp4_t)combineDisjointOutPart ((comp1_t) (sa >> R_SHIFT), da) << R_SHIFT;
             p = (comp4_t)combineDisjointOutPart ((comp1_t) (sa >> A_SHIFT), da) << A_SHIFT;
             Fa = m|n|o|p;
             break;
-        case CombineAIn:
+        case COMBINE_A_IN:
             m = (comp4_t)combineDisjointInPart ((comp1_t) (sa >> 0), da);
             n = (comp4_t)combineDisjointInPart ((comp1_t) (sa >> G_SHIFT), da) << G_SHIFT;
             o = (comp4_t)combineDisjointInPart ((comp1_t) (sa >> R_SHIFT), da) << R_SHIFT;
             p = (comp4_t)combineDisjointInPart ((comp1_t) (sa >> A_SHIFT), da) << A_SHIFT;
             Fa = m|n|o|p;
             break;
-        case CombineA:
+        case COMBINE_A:
             Fa = ~0;
             break;
         }
 
-        switch (combine & CombineB) {
+        switch (combine & COMBINE_B) {
         default:
             Fb = 0;
             break;
-        case CombineBOut:
+        case COMBINE_B_OUT:
             m = (comp4_t)combineDisjointOutPart (da, (comp1_t) (sa >> 0));
             n = (comp4_t)combineDisjointOutPart (da, (comp1_t) (sa >> G_SHIFT)) << G_SHIFT;
             o = (comp4_t)combineDisjointOutPart (da, (comp1_t) (sa >> R_SHIFT)) << R_SHIFT;
             p = (comp4_t)combineDisjointOutPart (da, (comp1_t) (sa >> A_SHIFT)) << A_SHIFT;
             Fb = m|n|o|p;
             break;
-        case CombineBIn:
+        case COMBINE_B_IN:
             m = (comp4_t)combineDisjointInPart (da, (comp1_t) (sa >> 0));
             n = (comp4_t)combineDisjointInPart (da, (comp1_t) (sa >> G_SHIFT)) << G_SHIFT;
             o = (comp4_t)combineDisjointInPart (da, (comp1_t) (sa >> R_SHIFT)) << R_SHIFT;
             p = (comp4_t)combineDisjointInPart (da, (comp1_t) (sa >> A_SHIFT)) << A_SHIFT;
             Fb = m|n|o|p;
             break;
-        case CombineB:
+        case COMBINE_B:
             Fb = ~0;
             break;
         }
@@ -1698,56 +1698,56 @@ static void
 combineDisjointOverC (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralC (dest, src, mask, width, CombineAOver);
+    combineDisjointGeneralC (dest, src, mask, width, COMBINE_A_OVER);
 }
 
 static void
 combineDisjointInC (pixman_implementation_t *imp, pixman_op_t op,
 		      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralC (dest, src, mask, width, CombineAIn);
+    combineDisjointGeneralC (dest, src, mask, width, COMBINE_A_IN);
 }
 
 static void
 combineDisjointInReverseC (pixman_implementation_t *imp, pixman_op_t op,
 			     comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralC (dest, src, mask, width, CombineBIn);
+    combineDisjointGeneralC (dest, src, mask, width, COMBINE_B_IN);
 }
 
 static void
 combineDisjointOutC (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralC (dest, src, mask, width, CombineAOut);
+    combineDisjointGeneralC (dest, src, mask, width, COMBINE_A_OUT);
 }
 
 static void
 combineDisjointOutReverseC (pixman_implementation_t *imp, pixman_op_t op,
 			      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralC (dest, src, mask, width, CombineBOut);
+    combineDisjointGeneralC (dest, src, mask, width, COMBINE_B_OUT);
 }
 
 static void
 combineDisjointAtopC (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralC (dest, src, mask, width, CombineAAtop);
+    combineDisjointGeneralC (dest, src, mask, width, COMBINE_A_ATOP);
 }
 
 static void
 combineDisjointAtopReverseC (pixman_implementation_t *imp, pixman_op_t op,
 			       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralC (dest, src, mask, width, CombineBAtop);
+    combineDisjointGeneralC (dest, src, mask, width, COMBINE_B_ATOP);
 }
 
 static void
 combineDisjointXorC (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineDisjointGeneralC (dest, src, mask, width, CombineXor);
+    combineDisjointGeneralC (dest, src, mask, width, COMBINE_XOR);
 }
 
 static void
@@ -1772,48 +1772,48 @@ combineConjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mask,
 
         sa = m;
 
-        switch (combine & CombineA) {
+        switch (combine & COMBINE_A) {
         default:
             Fa = 0;
             break;
-        case CombineAOut:
+        case COMBINE_A_OUT:
             m = (comp4_t)combineConjointOutPart ((comp1_t) (sa >> 0), da);
             n = (comp4_t)combineConjointOutPart ((comp1_t) (sa >> G_SHIFT), da) << G_SHIFT;
             o = (comp4_t)combineConjointOutPart ((comp1_t) (sa >> R_SHIFT), da) << R_SHIFT;
             p = (comp4_t)combineConjointOutPart ((comp1_t) (sa >> A_SHIFT), da) << A_SHIFT;
             Fa = m|n|o|p;
             break;
-        case CombineAIn:
+        case COMBINE_A_IN:
             m = (comp4_t)combineConjointInPart ((comp1_t) (sa >> 0), da);
             n = (comp4_t)combineConjointInPart ((comp1_t) (sa >> G_SHIFT), da) << G_SHIFT;
             o = (comp4_t)combineConjointInPart ((comp1_t) (sa >> R_SHIFT), da) << R_SHIFT;
             p = (comp4_t)combineConjointInPart ((comp1_t) (sa >> A_SHIFT), da) << A_SHIFT;
             Fa = m|n|o|p;
             break;
-        case CombineA:
+        case COMBINE_A:
             Fa = ~0;
             break;
         }
 
-        switch (combine & CombineB) {
+        switch (combine & COMBINE_B) {
         default:
             Fb = 0;
             break;
-        case CombineBOut:
+        case COMBINE_B_OUT:
             m = (comp4_t)combineConjointOutPart (da, (comp1_t) (sa >> 0));
             n = (comp4_t)combineConjointOutPart (da, (comp1_t) (sa >> G_SHIFT)) << G_SHIFT;
             o = (comp4_t)combineConjointOutPart (da, (comp1_t) (sa >> R_SHIFT)) << R_SHIFT;
             p = (comp4_t)combineConjointOutPart (da, (comp1_t) (sa >> A_SHIFT)) << A_SHIFT;
             Fb = m|n|o|p;
             break;
-        case CombineBIn:
+        case COMBINE_B_IN:
             m = (comp4_t)combineConjointInPart (da, (comp1_t) (sa >> 0));
             n = (comp4_t)combineConjointInPart (da, (comp1_t) (sa >> G_SHIFT)) << G_SHIFT;
             o = (comp4_t)combineConjointInPart (da, (comp1_t) (sa >> R_SHIFT)) << R_SHIFT;
             p = (comp4_t)combineConjointInPart (da, (comp1_t) (sa >> A_SHIFT)) << A_SHIFT;
             Fb = m|n|o|p;
             break;
-        case CombineB:
+        case COMBINE_B:
             Fb = ~0;
             break;
         }
@@ -1830,63 +1830,63 @@ static void
 combineConjointOverC (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralC (dest, src, mask, width, CombineAOver);
+    combineConjointGeneralC (dest, src, mask, width, COMBINE_A_OVER);
 }
 
 static void
 combineConjointOverReverseC (pixman_implementation_t *imp, pixman_op_t op,
 			       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralC (dest, src, mask, width, CombineBOver);
+    combineConjointGeneralC (dest, src, mask, width, COMBINE_B_OVER);
 }
 
 static void
 combineConjointInC (pixman_implementation_t *imp, pixman_op_t op,
 		      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralC (dest, src, mask, width, CombineAIn);
+    combineConjointGeneralC (dest, src, mask, width, COMBINE_A_IN);
 }
 
 static void
 combineConjointInReverseC (pixman_implementation_t *imp, pixman_op_t op,
 			     comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralC (dest, src, mask, width, CombineBIn);
+    combineConjointGeneralC (dest, src, mask, width, COMBINE_B_IN);
 }
 
 static void
 combineConjointOutC (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralC (dest, src, mask, width, CombineAOut);
+    combineConjointGeneralC (dest, src, mask, width, COMBINE_A_OUT);
 }
 
 static void
 combineConjointOutReverseC (pixman_implementation_t *imp, pixman_op_t op,
 			      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralC (dest, src, mask, width, CombineBOut);
+    combineConjointGeneralC (dest, src, mask, width, COMBINE_B_OUT);
 }
 
 static void
 combineConjointAtopC (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralC (dest, src, mask, width, CombineAAtop);
+    combineConjointGeneralC (dest, src, mask, width, COMBINE_A_ATOP);
 }
 
 static void
 combineConjointAtopReverseC (pixman_implementation_t *imp, pixman_op_t op,
 			       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralC (dest, src, mask, width, CombineBAtop);
+    combineConjointGeneralC (dest, src, mask, width, COMBINE_B_ATOP);
 }
 
 static void
 combineConjointXorC (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    combineConjointGeneralC (dest, src, mask, width, CombineXor);
+    combineConjointGeneralC (dest, src, mask, width, COMBINE_XOR);
 }
 
 void
commit 835520b28ff1412bd9b00460a107e72c9ea21e35
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Sat Jul 4 23:24:27 2009 -0400

    Rename U{no}mask => U_{no_}mask in pixman-vmx.c

diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index 94c31c8..67fbbe9 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -217,7 +217,7 @@ over (vector unsigned int src, vector unsigned int srca,
         vec_st ((vector unsigned int) tmp1, 0, dest );
 
 static void
-vmxCombineOverUnomask (uint32_t *dest, const uint32_t *src, int width)
+vmxCombineOverU_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     int i;
     vector unsigned int  vdest, vsrc;
@@ -250,7 +250,7 @@ vmxCombineOverUnomask (uint32_t *dest, const uint32_t *src, int width)
 }
 
 static void
-vmxCombineOverUmask (uint32_t *dest,
+vmxCombineOverU_mask (uint32_t *dest,
                      const uint32_t *src,
                      const uint32_t *mask,
                      int width)
@@ -297,13 +297,13 @@ vmxCombineOverU(pixman_implementation_t *imp, pixman_op_t op,
                 int width)
 {
     if (mask)
-        vmxCombineOverUmask(dest, src, mask, width);
+        vmxCombineOverU_mask(dest, src, mask, width);
     else
-        vmxCombineOverUnomask(dest, src, width);
+        vmxCombineOverU_no_mask(dest, src, width);
 }
 
 static void
-vmxCombineOverReverseUnomask (uint32_t *dest, const uint32_t *src, int width)
+vmxCombineOverReverseU_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     int i;
     vector unsigned int  vdest, vsrc;
@@ -336,7 +336,7 @@ vmxCombineOverReverseUnomask (uint32_t *dest, const uint32_t *src, int width)
 }
 
 static void
-vmxCombineOverReverseUmask (uint32_t *dest,
+vmxCombineOverReverseU_mask (uint32_t *dest,
                             const uint32_t *src,
                             const uint32_t *mask,
                             int width)
@@ -381,13 +381,13 @@ vmxCombineOverReverseU (pixman_implementation_t *imp, pixman_op_t op,
                         const uint32_t *mask, int width)
 {
     if (mask)
-        vmxCombineOverReverseUmask(dest, src, mask, width);
+        vmxCombineOverReverseU_mask(dest, src, mask, width);
     else
-        vmxCombineOverReverseUnomask(dest, src, width);
+        vmxCombineOverReverseU_no_mask(dest, src, width);
 }
 
 static void
-vmxCombineInUnomask (uint32_t *dest, const uint32_t *src, int width)
+vmxCombineInU_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     int i;
     vector unsigned int  vdest, vsrc;
@@ -419,7 +419,7 @@ vmxCombineInUnomask (uint32_t *dest, const uint32_t *src, int width)
 }
 
 static void
-vmxCombineInUmask (uint32_t *dest,
+vmxCombineInU_mask (uint32_t *dest,
                    const uint32_t *src,
                    const uint32_t *mask,
                    int width)
@@ -463,13 +463,13 @@ vmxCombineInU (pixman_implementation_t *imp, pixman_op_t op,
                int width)
 {
     if (mask)
-        vmxCombineInUmask(dest, src, mask, width);
+        vmxCombineInU_mask(dest, src, mask, width);
     else
-        vmxCombineInUnomask(dest, src, width);
+        vmxCombineInU_no_mask(dest, src, width);
 }
 
 static void
-vmxCombineInReverseUnomask (uint32_t *dest, const uint32_t *src, int width)
+vmxCombineInReverseU_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     int i;
     vector unsigned int  vdest, vsrc;
@@ -500,7 +500,7 @@ vmxCombineInReverseUnomask (uint32_t *dest, const uint32_t *src, int width)
 }
 
 static void
-vmxCombineInReverseUmask (uint32_t *dest,
+vmxCombineInReverseU_mask (uint32_t *dest,
                           const uint32_t *src,
                           const uint32_t *mask,
                           int width)
@@ -545,13 +545,13 @@ vmxCombineInReverseU (pixman_implementation_t *imp, pixman_op_t op,
                       const uint32_t *mask, int width)
 {
     if (mask)
-        vmxCombineInReverseUmask(dest, src, mask, width);
+        vmxCombineInReverseU_mask(dest, src, mask, width);
     else
-        vmxCombineInReverseUnomask(dest, src, width);
+        vmxCombineInReverseU_no_mask(dest, src, width);
 }
 
 static void
-vmxCombineOutUnomask (uint32_t *dest, const uint32_t *src, int width)
+vmxCombineOutU_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     int i;
     vector unsigned int  vdest, vsrc;
@@ -582,7 +582,7 @@ vmxCombineOutUnomask (uint32_t *dest, const uint32_t *src, int width)
 }
 
 static void
-vmxCombineOutUmask (uint32_t *dest,
+vmxCombineOutU_mask (uint32_t *dest,
                     const uint32_t *src,
                     const uint32_t *mask,
                     int width)
@@ -626,13 +626,13 @@ vmxCombineOutU (pixman_implementation_t *imp, pixman_op_t op,
                 int width)
 {
     if (mask)
-        vmxCombineOutUmask(dest, src, mask, width);
+        vmxCombineOutU_mask(dest, src, mask, width);
     else
-        vmxCombineOutUnomask(dest, src, width);
+        vmxCombineOutU_no_mask(dest, src, width);
 }
 
 static void
-vmxCombineOutReverseUnomask (uint32_t *dest, const uint32_t *src, int width)
+vmxCombineOutReverseU_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     int i;
     vector unsigned int  vdest, vsrc;
@@ -663,7 +663,7 @@ vmxCombineOutReverseUnomask (uint32_t *dest, const uint32_t *src, int width)
 }
 
 static void
-vmxCombineOutReverseUmask (uint32_t *dest,
+vmxCombineOutReverseU_mask (uint32_t *dest,
                            const uint32_t *src,
                            const uint32_t *mask,
                            int width)
@@ -710,13 +710,13 @@ vmxCombineOutReverseU (pixman_implementation_t *imp, pixman_op_t op,
                        int width)
 {
     if (mask)
-        vmxCombineOutReverseUmask(dest, src, mask, width);
+        vmxCombineOutReverseU_mask(dest, src, mask, width);
     else
-        vmxCombineOutReverseUnomask(dest, src, width);
+        vmxCombineOutReverseU_no_mask(dest, src, width);
 }
 
 static void
-vmxCombineAtopUnomask (uint32_t *dest, const uint32_t *src, int width)
+vmxCombineAtopU_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     int i;
     vector unsigned int  vdest, vsrc;
@@ -751,7 +751,7 @@ vmxCombineAtopUnomask (uint32_t *dest, const uint32_t *src, int width)
 }
 
 static void
-vmxCombineAtopUmask (uint32_t *dest,
+vmxCombineAtopU_mask (uint32_t *dest,
                      const uint32_t *src,
                      const uint32_t *mask,
                      int width)
@@ -802,13 +802,13 @@ vmxCombineAtopU (pixman_implementation_t *imp, pixman_op_t op,
                  int width)
 {
     if (mask)
-        vmxCombineAtopUmask(dest, src, mask, width);
+        vmxCombineAtopU_mask(dest, src, mask, width);
     else
-        vmxCombineAtopUnomask(dest, src, width);
+        vmxCombineAtopU_no_mask(dest, src, width);
 }
 
 static void
-vmxCombineAtopReverseUnomask (uint32_t *dest, const uint32_t *src, int width)
+vmxCombineAtopReverseU_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     int i;
     vector unsigned int  vdest, vsrc;
@@ -843,7 +843,7 @@ vmxCombineAtopReverseUnomask (uint32_t *dest, const uint32_t *src, int width)
 }
 
 static void
-vmxCombineAtopReverseUmask (uint32_t *dest,
+vmxCombineAtopReverseU_mask (uint32_t *dest,
                             const uint32_t *src,
                             const uint32_t *mask,
                             int width)
@@ -894,13 +894,13 @@ vmxCombineAtopReverseU (pixman_implementation_t *imp, pixman_op_t op,
                         int width)
 {
     if (mask)
-        vmxCombineAtopReverseUmask(dest, src, mask, width);
+        vmxCombineAtopReverseU_mask(dest, src, mask, width);
     else
-        vmxCombineAtopReverseUnomask(dest, src, width);
+        vmxCombineAtopReverseU_no_mask(dest, src, width);
 }
 
 static void
-vmxCombineXorUnomask (uint32_t *dest, const uint32_t *src, int width)
+vmxCombineXorU_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     int i;
     vector unsigned int  vdest, vsrc;
@@ -935,7 +935,7 @@ vmxCombineXorUnomask (uint32_t *dest, const uint32_t *src, int width)
 }
 
 static void
-vmxCombineXorUmask (uint32_t *dest,
+vmxCombineXorU_mask (uint32_t *dest,
                     const uint32_t *src,
                     const uint32_t *mask,
                     int width)
@@ -986,13 +986,13 @@ vmxCombineXorU (pixman_implementation_t *imp, pixman_op_t op,
                 int width)
 {
     if (mask)
-        vmxCombineXorUmask(dest, src, mask, width);
+        vmxCombineXorU_mask(dest, src, mask, width);
     else
-        vmxCombineXorUnomask(dest, src, width);
+        vmxCombineXorU_no_mask(dest, src, width);
 }
 
 static void
-vmxCombineAddUnomask (uint32_t *dest, const uint32_t *src, int width)
+vmxCombineAddU_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     int i;
     vector unsigned int  vdest, vsrc;
@@ -1022,7 +1022,7 @@ vmxCombineAddUnomask (uint32_t *dest, const uint32_t *src, int width)
 }
 
 static void
-vmxCombineAddUmask (uint32_t *dest,
+vmxCombineAddU_mask (uint32_t *dest,
                     const uint32_t *src,
                     const uint32_t *mask,
                     int width)
@@ -1068,9 +1068,9 @@ vmxCombineAddU (pixman_implementation_t *imp, pixman_op_t op,
                 int width)
 {
     if (mask)
-        vmxCombineAddUmask(dest, src, mask, width);
+        vmxCombineAddU_mask(dest, src, mask, width);
     else
-        vmxCombineAddUnomask(dest, src, width);
+        vmxCombineAddU_no_mask(dest, src, width);
 }
 
 static void
commit f9bdd1a82c7629a360109bdf4519c73ba5a99225
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Sat Jul 4 23:13:55 2009 -0400

    Change name fbComposeGetStart to PIXMAN_IMAGE_GET_LINE.

diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
index c1a2cb4..98f35e3 100644
--- a/pixman/pixman-arm-neon.c
+++ b/pixman/pixman-arm-neon.c
@@ -144,8 +144,8 @@ neon_CompositeAdd_8000_8000 (
     int dstStride, srcStride;
     uint16_t    w;
 
-    fbComposeGetStart (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
 
     if (width>=8)
     {
@@ -294,8 +294,8 @@ neon_composite_over_8888_8888 (
     int	dstStride, srcStride;
     uint32_t	w;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
     if (width>=8)
     {
@@ -457,8 +457,8 @@ neon_composite_over_8888_n_8888 (
     uint32_t	w;
     uint8x8_t mask_alpha;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
     mask_alpha = vdup_n_u8((mask) >> 24);
@@ -670,8 +670,8 @@ neon_CompositeOver_n_8_8888 (
     sval8.val[2]=vdup_lane_u8(sval2,2);
     sval8.val[3]=vdup_lane_u8(sval2,3);
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     if (width>=8)
     {
@@ -861,8 +861,8 @@ neon_CompositeAdd_8888_8_8 (
     uint32_t    src;
     uint8x8_t   sa;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
     sa = vdup_n_u8((src) >> 24);
 
@@ -980,8 +980,8 @@ neon_CompositeSrc_16_16 (
 		return;
 
 	/* We simply copy 16-bit-aligned pixels from one place to another. */
-	fbComposeGetStart (src_image, src_x, src_y, uint16_t, srcStride, srcLine, 1);
-	fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+	PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint16_t, srcStride, srcLine, 1);
+	PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
 
 	/* Preload the first input scanline */
 	{
@@ -1108,8 +1108,8 @@ neon_CompositeSrc_24_16 (
 		return;
 
 	/* We simply copy pixels from one place to another, assuming that the source's alpha is opaque. */
-	fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
-	fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+	PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+	PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
 
 	/* Preload the first input scanline */
 	{
@@ -1754,8 +1754,8 @@ neon_CompositeOver_n_8_0565 (
 		return;
 	}
 
-	fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
-	fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+	PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+	PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
 	// keep within minimum number of aligned quadwords on width
 	// while also keeping the minimum number of columns to process
@@ -1908,7 +1908,7 @@ neon_CompositeOver_n_0565 (
 		return;
 	}
 
-	fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+	PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
 
 	// keep within minimum number of aligned quadwords on width
 	// while also keeping the minimum number of columns to process
@@ -2044,8 +2044,8 @@ neon_CompositeOver_8888_0565 (
 		return;
 	}
 
-	fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
-	fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+	PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+	PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
 	// keep within minimum number of aligned quadwords on width
 	// while also keeping the minimum number of columns to process
diff --git a/pixman/pixman-arm-simd.c b/pixman/pixman-arm-simd.c
index 7a5b345..71689fe 100644
--- a/pixman/pixman-arm-simd.c
+++ b/pixman/pixman-arm-simd.c
@@ -51,8 +51,8 @@ arm_CompositeAdd_8000_8000 (
     uint16_t	w;
     uint8_t	s, d;
 
-    fbComposeGetStart (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -123,8 +123,8 @@ arm_composite_over_8888_8888 (
     uint32_t upper_component_mask = 0xff00ff00;
     uint32_t alpha_mask = 0xff;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
     while (height--)
     {
@@ -216,8 +216,8 @@ arm_composite_over_8888_n_8888 (
     uint32_t component_half = 0x800080;
     uint32_t alpha_mask = 0xff;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
     mask = (mask) >> 24;
@@ -336,8 +336,8 @@ arm_CompositeOver_n_8_8888 (
     uint32_t src_hi = (src >> 8) & component_mask;
     uint32_t src_lo = src & component_mask;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     while (height--)
     {
diff --git a/pixman/pixman-fast-path.c b/pixman/pixman-fast-path.c
index e519078..db8a51c 100644
--- a/pixman/pixman-fast-path.c
+++ b/pixman/pixman-fast-path.c
@@ -122,9 +122,9 @@ fast_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
     uint32_t s, d;
     uint16_t w;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
     while (height--)
     {
@@ -183,8 +183,8 @@ fast_CompositeIn_n_8_8 (pixman_implementation_t *imp,
 
     srca = src >> 24;
 
-    fbComposeGetStart (iDst, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (iMask, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (iDst, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (iMask, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     if (srca == 0xff) {
 	while (height--)
@@ -261,8 +261,8 @@ fast_CompositeIn_8_8 (pixman_implementation_t *imp,
     uint8_t	s;
     uint16_t	t;
 
-    fbComposeGetStart (iSrc, src_x, src_y, uint8_t, srcStride, srcLine, 1);
-    fbComposeGetStart (iDst, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (iSrc, src_x, src_y, uint8_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (iDst, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -315,8 +315,8 @@ fast_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
     if (src == 0)
 	return;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     while (height--)
     {
@@ -373,8 +373,8 @@ fast_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
     if (src == 0)
 	return;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint32_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, maskStride, maskLine, 1);
 
     while (height--)
     {
@@ -439,8 +439,8 @@ fast_CompositeOver_n_8_0888 (pixman_implementation_t *imp,
     if (src == 0)
 	return;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 3);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 3);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     while (height--)
     {
@@ -502,8 +502,8 @@ fast_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
     if (src == 0)
 	return;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     while (height--)
     {
@@ -569,8 +569,8 @@ fast_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 
     src16 = CONVERT_8888_TO_0565(src);
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint32_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, maskStride, maskLine, 1);
 
     while (height--)
     {
@@ -634,8 +634,8 @@ fast_composite_over_8888_8888 (pixman_implementation_t *imp,
     uint8_t	a;
     uint16_t	w;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
     while (height--)
     {
@@ -680,8 +680,8 @@ fast_CompositeSrc_8888_0888 (pixman_implementation_t *imp,
     int	dstStride, srcStride;
     uint16_t	w;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 3);
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 3);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
     while (height--)
     {
@@ -731,8 +731,8 @@ fast_composite_over_8888_0565 (pixman_implementation_t *imp,
     int	dstStride, srcStride;
     uint16_t	w;
 
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -782,8 +782,8 @@ fast_CompositeSrc_x888_0565 (pixman_implementation_t *imp,
     int	dstStride, srcStride;
     uint16_t	w;
 
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -824,8 +824,8 @@ fast_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
     uint8_t	s, d;
     uint16_t	t;
 
-    fbComposeGetStart (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -874,8 +874,8 @@ fast_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
     uint16_t	w;
     uint32_t	s, d;
 
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -925,8 +925,8 @@ fast_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
     uint32_t	src;
     uint8_t	sa;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
     sa = (src >> 24);
 
@@ -1012,8 +1012,8 @@ fast_CompositeSrc_8888_x888 (pixman_implementation_t *imp,
     int		 dstStride, srcStride;
     uint32_t	 n_bytes = width * sizeof (uint32_t);
 
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, src, 1);
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dst, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, src, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dst, 1);
 
     while (height--)
     {
@@ -1094,10 +1094,10 @@ fast_CompositeSrcScaleNearest (pixman_implementation_t *imp,
     int             i, j;
     pixman_vector_t v;
     
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dst, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dst, 1);
     /* pass in 0 instead of src_x and src_y because src_x and src_y need to be
      * transformed from destination space to source space */
-    fbComposeGetStart (src_image, 0, 0, uint32_t, srcStride, src, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, 0, 0, uint32_t, srcStride, src, 1);
     
     /* reference point is the center of the pixel */
     v.vector[0] = pixman_int_to_fixed(src_x) + pixman_fixed_1 / 2;
diff --git a/pixman/pixman-mmx.c b/pixman/pixman-mmx.c
index 9f4551f..48bb3e3 100644
--- a/pixman/pixman-mmx.c
+++ b/pixman/pixman-mmx.c
@@ -946,7 +946,7 @@ mmx_CompositeOver_n_8888 (pixman_implementation_t *imp,
     if (src >> 24 == 0)
 	return;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
 
     vsrc = load8888 (src);
     vsrca = expand_alpha (vsrc);
@@ -1025,7 +1025,7 @@ mmx_CompositeOver_n_0565 (pixman_implementation_t *imp,
     if (src >> 24 == 0)
 	return;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
 
     vsrc = load8888 (src);
     vsrca = expand_alpha (vsrc);
@@ -1112,8 +1112,8 @@ mmx_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
     if (srca == 0)
 	return;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint32_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, maskStride, maskLine, 1);
 
     vsrc = load8888(src);
     vsrca = expand_alpha(vsrc);
@@ -1212,8 +1212,8 @@ mmx_composite_over_8888_n_8888 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
     mask = mask | mask >> 8 | mask >> 16 | mask >> 24;
@@ -1297,8 +1297,8 @@ mmx_Composite_over_x888_n_8888 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
 
     mask = mask | mask >> 8 | mask >> 16 | mask >> 24;
@@ -1431,8 +1431,8 @@ mmx_composite_over_8888_8888 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
     while (height--)
     {
@@ -1482,8 +1482,8 @@ mmx_composite_over_8888_0565 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
 #if 0
     /* FIXME */
@@ -1595,8 +1595,8 @@ mmx_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
 
     srcsrc = (uint64_t)src << 32 | src;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     vsrc = load8888 (src);
     vsrca = expand_alpha (vsrc);
@@ -1871,8 +1871,8 @@ mmx_CompositeSrc_n_8_8888 (pixman_implementation_t *imp,
 
     srcsrc = (uint64_t)src << 32 | src;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     vsrc = load8888 (src);
     vsrca = expand_alpha (vsrc);
@@ -1997,8 +1997,8 @@ mmx_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
     if (srca == 0)
 	return;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     vsrc = load8888 (src);
     vsrca = expand_alpha (vsrc);
@@ -2121,8 +2121,8 @@ mmx_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
 #if 0
     /* FIXME */
@@ -2243,8 +2243,8 @@ mmx_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
 #if 0
     /* FIXME */
@@ -2350,8 +2350,8 @@ mmx_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
     if (srca == 0)
 	return;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint32_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, maskStride, maskLine, 1);
 
     vsrc = load8888 (src);
     vsrca = expand_alpha (vsrc);
@@ -2452,8 +2452,8 @@ mmx_CompositeIn_n_8_8 (pixman_implementation_t *imp,
     uint8_t	sa;
     __m64	vsrc, vsrca;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
@@ -2533,8 +2533,8 @@ mmx_CompositeIn_8_8 (pixman_implementation_t *imp,
     int	srcStride, dstStride;
     uint16_t	w;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
 
     while (height--)
     {
@@ -2601,8 +2601,8 @@ mmx_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
     uint8_t	sa;
     __m64	vsrc, vsrca;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
@@ -2681,8 +2681,8 @@ mmx_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    fbComposeGetStart (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -2753,8 +2753,8 @@ mmx_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -2974,9 +2974,9 @@ mmx_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
     int		 srcStride, maskStride, dstStride;
     uint16_t w;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
     while (height--)
     {
diff --git a/pixman/pixman-private.h b/pixman/pixman-private.h
index 1c238df..1dc68da 100644
--- a/pixman/pixman-private.h
+++ b/pixman/pixman-private.h
@@ -256,7 +256,7 @@ uint32_t
 _pixman_image_get_solid (pixman_image_t *image,
 			pixman_format_code_t format);
 
-#define fbComposeGetStart(pict,x,y,type,out_stride,line,mul) do {	\
+#define PIXMAN_IMAGE_GET_LINE(pict,x,y,type,out_stride,line,mul) do {	\
 	uint32_t	*__bits__;					\
 	int		__stride__;					\
 									\
diff --git a/pixman/pixman-sse2.c b/pixman/pixman-sse2.c
index 3ebabe3..d8fb187 100644
--- a/pixman/pixman-sse2.c
+++ b/pixman/pixman-sse2.c
@@ -2523,7 +2523,7 @@ sse2_CompositeOver_n_8888 (pixman_implementation_t *imp,
     if (src == 0)
 	return;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
 
     xmmSrc = expandPixel_32_1x128 (src);
     xmmAlpha = expandAlpha_1x128 (xmmSrc);
@@ -2610,7 +2610,7 @@ sse2_CompositeOver_n_0565 (pixman_implementation_t *imp,
     if (src == 0)
         return;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
 
     xmmSrc = expandPixel_32_1x128 (src);
     xmmAlpha = expandAlpha_1x128 (xmmSrc);
@@ -2705,8 +2705,8 @@ sse2_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
     if (src == 0)
 	return;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint32_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, maskStride, maskLine, 1);
 
     xmmSrc = _mm_unpacklo_epi8 (createMask_2x32_128 (src, src), _mm_setzero_si128 ());
     xmmAlpha = expandAlpha_1x128 (xmmSrc);
@@ -2833,8 +2833,8 @@ sse2_composite_over_8888_n_8888 (pixman_implementation_t *imp,
     __m128i xmmDst, xmmDstLo, xmmDstHi;
     __m128i xmmAlphaLo, xmmAlphaHi;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
 
     xmmMask = createMask_16_128 (mask >> 24);
@@ -2945,8 +2945,8 @@ sse2_Composite_over_x888_n_8888 (pixman_implementation_t *imp,
     __m128i xmmSrc, xmmSrcLo, xmmSrcHi;
     __m128i xmmDst, xmmDstLo, xmmDstHi;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
 
     xmmMask = createMask_16_128 (mask >> 24);
@@ -3052,8 +3052,8 @@ sse2_composite_over_8888_8888 (pixman_implementation_t *imp,
     uint32_t	*dstLine, *dst;
     uint32_t	*srcLine, *src;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
     dst = dstLine;
     src = srcLine;
@@ -3106,8 +3106,8 @@ sse2_composite_over_8888_0565 (pixman_implementation_t *imp,
     __m128i xmmSrc, xmmSrcLo, xmmSrcHi;
     __m128i xmmDst, xmmDst0, xmmDst1, xmmDst2, xmmDst3;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
 #if 0
     /* FIXME
@@ -3230,8 +3230,8 @@ sse2_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
     if (src == 0)
 	return;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     xmmDef = createMask_2x32_128 (src, src);
     xmmSrc = expandPixel_32_1x128 (src);
@@ -3510,8 +3510,8 @@ sse2_CompositeSrc_n_8_8888 (pixman_implementation_t *imp,
         return;
     }
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     xmmDef = createMask_2x32_128 (src, src);
     xmmSrc = expandPixel_32_1x128 (src);
@@ -3643,8 +3643,8 @@ sse2_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
     if (src == 0)
 	return;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     xmmSrc = expandPixel_32_1x128 (src);
     xmmAlpha = expandAlpha_1x128 (xmmSrc);
@@ -3785,8 +3785,8 @@ sse2_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
     __m128i xmmSrc, xmmSrcLo, xmmSrcHi;
     __m128i xmmDst, xmmDst0, xmmDst1, xmmDst2, xmmDst3;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
 #if 0
     /* FIXME
@@ -3919,8 +3919,8 @@ sse2_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
     __m128i xmmSrcLo, xmmSrcHi;
     __m128i xmmDstLo, xmmDstHi;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
 #if 0
     /* FIXME
@@ -4043,8 +4043,8 @@ sse2_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
     if (src == 0)
         return;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint32_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, maskStride, maskLine, 1);
 
     xmmSrc = expandPixel_32_1x128 (src);
     xmmAlpha = expandAlpha_1x128 (xmmSrc);
@@ -4184,8 +4184,8 @@ sse2_CompositeIn_n_8_8 (pixman_implementation_t *imp,
     __m128i xmmMask, xmmMaskLo, xmmMaskHi;
     __m128i xmmDst, xmmDstLo, xmmDstHi;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
@@ -4285,8 +4285,8 @@ sse2_CompositeIn_8_8 (pixman_implementation_t *imp,
     __m128i xmmSrc, xmmSrcLo, xmmSrcHi;
     __m128i xmmDst, xmmDstLo, xmmDstHi;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
 
     while (height--)
     {
@@ -4378,8 +4378,8 @@ sse2_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
     __m128i xmmMask, xmmMaskLo, xmmMaskHi;
     __m128i xmmDst, xmmDstLo, xmmDstHi;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
@@ -4478,8 +4478,8 @@ sse2_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
     uint16_t	w;
     uint16_t	t;
 
-    fbComposeGetStart (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -4543,8 +4543,8 @@ sse2_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
     uint32_t	*srcLine, *src;
     int	dstStride, srcStride;
 
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -4754,9 +4754,9 @@ sse2_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
     __m128i xmmDst, xmmDstLo, xmmDstHi;
     __m128i xmmMask, xmmMaskLo, xmmMaskHi;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
-    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
     while (height--)
     {
diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index 608334c..94c31c8 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -1504,7 +1504,7 @@ vmx_CompositeOver_n_8888 (pixman_operator_t	op,
     if (src >> 24 == 0)
 	return;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -1538,7 +1538,7 @@ vmx_CompositeOver_n_0565 (pixman_operator_t	op,
     if (src >> 24 == 0)
 	return;
 
-    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
 
     while (height--)
     {
commit e064aa761831296c8570e0fdfaa0c3585c4a3871
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Sat Jul 4 23:12:18 2009 -0400

    Rename fbCombine* to combine*
    
        s/fbCombine/combine/g;

diff --git a/pixman/pixman-combine.c.template b/pixman/pixman-combine.c.template
index b944677..55a3947 100644
--- a/pixman/pixman-combine.c.template
+++ b/pixman/pixman-combine.c.template
@@ -12,7 +12,7 @@
 /*** per channel helper functions ***/
 
 static void
-fbCombineMaskC (comp4_t *src, comp4_t *mask)
+combineMaskC (comp4_t *src, comp4_t *mask)
 {
     comp4_t a = *mask;
 
@@ -43,7 +43,7 @@ fbCombineMaskC (comp4_t *src, comp4_t *mask)
 }
 
 static void
-fbCombineMaskValueC (comp4_t *src, const comp4_t *mask)
+combineMaskValueC (comp4_t *src, const comp4_t *mask)
 {
     comp4_t a = *mask;
     comp4_t	x;
@@ -63,7 +63,7 @@ fbCombineMaskValueC (comp4_t *src, const comp4_t *mask)
 }
 
 static void
-fbCombineMaskAlphaC (const comp4_t *src, comp4_t *mask)
+combineMaskAlphaC (const comp4_t *src, comp4_t *mask)
 {
     comp4_t a = *(mask);
     comp4_t	x;
@@ -123,14 +123,14 @@ combineMask (const comp4_t *src, const comp4_t *mask, int i)
 }
 
 static void
-fbCombineClear (pixman_implementation_t *imp, pixman_op_t op,
+combineClear (pixman_implementation_t *imp, pixman_op_t op,
 		comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     memset(dest, 0, width*sizeof(comp4_t));
 }
 
 static void
-fbCombineSrcU (pixman_implementation_t *imp, pixman_op_t op,
+combineSrcU (pixman_implementation_t *imp, pixman_op_t op,
 	       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -148,9 +148,9 @@ fbCombineSrcU (pixman_implementation_t *imp, pixman_op_t op,
     }
 }
 
-/* if the Src is opaque, call fbCombineSrcU */
+/* if the Src is opaque, call combineSrcU */
 static void
-fbCombineOverU (pixman_implementation_t *imp, pixman_op_t op,
+combineOverU (pixman_implementation_t *imp, pixman_op_t op,
 		comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -166,7 +166,7 @@ fbCombineOverU (pixman_implementation_t *imp, pixman_op_t op,
 
 /* if the Dst is opaque, this is a noop */
 static void
-fbCombineOverReverseU (pixman_implementation_t *imp, pixman_op_t op,
+combineOverReverseU (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -179,9 +179,9 @@ fbCombineOverReverseU (pixman_implementation_t *imp, pixman_op_t op,
     }
 }
 
-/* if the Dst is opaque, call fbCombineSrcU */
+/* if the Dst is opaque, call combineSrcU */
 static void
-fbCombineInU (pixman_implementation_t *imp, pixman_op_t op,
+combineInU (pixman_implementation_t *imp, pixman_op_t op,
 	      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -195,7 +195,7 @@ fbCombineInU (pixman_implementation_t *imp, pixman_op_t op,
 
 /* if the Src is opaque, this is a noop */
 static void
-fbCombineInReverseU (pixman_implementation_t *imp, pixman_op_t op,
+combineInReverseU (pixman_implementation_t *imp, pixman_op_t op,
 		     comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -208,9 +208,9 @@ fbCombineInReverseU (pixman_implementation_t *imp, pixman_op_t op,
     }
 }
 
-/* if the Dst is opaque, call fbCombineClear */
+/* if the Dst is opaque, call combineClear */
 static void
-fbCombineOutU (pixman_implementation_t *imp, pixman_op_t op,
+combineOutU (pixman_implementation_t *imp, pixman_op_t op,
 	       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -222,9 +222,9 @@ fbCombineOutU (pixman_implementation_t *imp, pixman_op_t op,
     }
 }
 
-/* if the Src is opaque, call fbCombineClear */
+/* if the Src is opaque, call combineClear */
 static void
-fbCombineOutReverseU (pixman_implementation_t *imp, pixman_op_t op,
+combineOutReverseU (pixman_implementation_t *imp, pixman_op_t op,
 		      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -237,11 +237,11 @@ fbCombineOutReverseU (pixman_implementation_t *imp, pixman_op_t op,
     }
 }
 
-/* if the Src is opaque, call fbCombineInU */
-/* if the Dst is opaque, call fbCombineOverU */
-/* if both the Src and Dst are opaque, call fbCombineSrcU */
+/* if the Src is opaque, call combineInU */
+/* if the Dst is opaque, call combineOverU */
+/* if both the Src and Dst are opaque, call combineSrcU */
 static void
-fbCombineAtopU (pixman_implementation_t *imp, pixman_op_t op,
+combineAtopU (pixman_implementation_t *imp, pixman_op_t op,
 		comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -256,11 +256,11 @@ fbCombineAtopU (pixman_implementation_t *imp, pixman_op_t op,
     }
 }
 
-/* if the Src is opaque, call fbCombineOverReverseU */
-/* if the Dst is opaque, call fbCombineInReverseU */
-/* if both the Src and Dst are opaque, call fbCombineDstU */
+/* if the Src is opaque, call combineOverReverseU */
+/* if the Dst is opaque, call combineInReverseU */
+/* if both the Src and Dst are opaque, call combineDstU */
 static void
-fbCombineAtopReverseU (pixman_implementation_t *imp, pixman_op_t op,
+combineAtopReverseU (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -275,11 +275,11 @@ fbCombineAtopReverseU (pixman_implementation_t *imp, pixman_op_t op,
     }
 }
 
-/* if the Src is opaque, call fbCombineOverU */
-/* if the Dst is opaque, call fbCombineOverReverseU */
-/* if both the Src and Dst are opaque, call fbCombineClear */
+/* if the Src is opaque, call combineOverU */
+/* if the Dst is opaque, call combineOverReverseU */
+/* if both the Src and Dst are opaque, call combineClear */
 static void
-fbCombineXorU (pixman_implementation_t *imp, pixman_op_t op,
+combineXorU (pixman_implementation_t *imp, pixman_op_t op,
 	       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -295,7 +295,7 @@ fbCombineXorU (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-fbCombineAddU (pixman_implementation_t *imp, pixman_op_t op,
+combineAddU (pixman_implementation_t *imp, pixman_op_t op,
 	       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -307,11 +307,11 @@ fbCombineAddU (pixman_implementation_t *imp, pixman_op_t op,
     }
 }
 
-/* if the Src is opaque, call fbCombineAddU */
-/* if the Dst is opaque, call fbCombineAddU */
-/* if both the Src and Dst are opaque, call fbCombineAddU */
+/* if the Src is opaque, call combineAddU */
+/* if the Dst is opaque, call combineAddU */
+/* if both the Src and Dst are opaque, call combineAddU */
 static void
-fbCombineSaturateU (pixman_implementation_t *imp, pixman_op_t op,
+combineSaturateU (pixman_implementation_t *imp, pixman_op_t op,
 		    comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -361,7 +361,7 @@ fbCombineSaturateU (pixman_implementation_t *imp, pixman_op_t op,
  */
 
 static void
-fbCombineMultiplyU (pixman_implementation_t *imp, pixman_op_t op,
+combineMultiplyU (pixman_implementation_t *imp, pixman_op_t op,
 		    comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -380,7 +380,7 @@ fbCombineMultiplyU (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-fbCombineMultiplyC (pixman_implementation_t *imp, pixman_op_t op,
+combineMultiplyC (pixman_implementation_t *imp, pixman_op_t op,
                     comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -391,7 +391,7 @@ fbCombineMultiplyC (pixman_implementation_t *imp, pixman_op_t op,
 	comp4_t r = d;
 	comp4_t dest_ia = ALPHA_c (~d);
 
-	fbCombineMaskValueC (&s, &m);
+	combineMaskValueC (&s, &m);
 
 	UNcx4_MUL_UNcx4_ADD_UNcx4_MUL_UNc (r, ~m, s, dest_ia);
 	UNcx4_MUL_UNcx4 (d, s);
@@ -403,7 +403,7 @@ fbCombineMultiplyC (pixman_implementation_t *imp, pixman_op_t op,
 
 #define PDF_SEPARABLE_BLEND_MODE(name)		    \
 static void					    \
-fbCombine ## name ## U (pixman_implementation_t *imp, pixman_op_t op, \
+combine ## name ## U (pixman_implementation_t *imp, pixman_op_t op, \
                         comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width) \
 {						    \
     int i;					    \
@@ -428,7 +428,7 @@ fbCombine ## name ## U (pixman_implementation_t *imp, pixman_op_t op, \
 }						    \
 						    \
 static void				    \
-fbCombine ## name ## C (pixman_implementation_t *imp, pixman_op_t op, \
+combine ## name ## C (pixman_implementation_t *imp, pixman_op_t op, \
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width) \
 {						    \
     int i;					    \
@@ -440,7 +440,7 @@ fbCombine ## name ## C (pixman_implementation_t *imp, pixman_op_t op, \
 	comp1_t ida = ~da;			    \
 	comp4_t result;				    \
 						    \
-	fbCombineMaskValueC (&s, &m);		    \
+	combineMaskValueC (&s, &m);		    \
 						    \
 	result = d;				    \
 	UNcx4_MUL_UNcx4_ADD_UNcx4_MUL_UNc (result, ~m, s, ida);	    \
@@ -777,7 +777,7 @@ PDF_SEPARABLE_BLEND_MODE (Exclusion)
 
 #define PDF_NON_SEPARABLE_BLEND_MODE(name)					\
 static void								\
-fbCombine ## name ## U (pixman_implementation_t *imp, pixman_op_t op,	\
+combine ## name ## U (pixman_implementation_t *imp, pixman_op_t op,	\
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width) \
 {									\
     int i;								\
@@ -1003,7 +1003,7 @@ PDF_NON_SEPARABLE_BLEND_MODE (HslLuminosity)
 
 /* portion covered by a but not b */
 static comp1_t
-fbCombineDisjointOutPart (comp1_t a, comp1_t b)
+combineDisjointOutPart (comp1_t a, comp1_t b)
 {
     /* min (1, (1-b) / a) */
 
@@ -1015,7 +1015,7 @@ fbCombineDisjointOutPart (comp1_t a, comp1_t b)
 
 /* portion covered by both a and b */
 static comp1_t
-fbCombineDisjointInPart (comp1_t a, comp1_t b)
+combineDisjointInPart (comp1_t a, comp1_t b)
 {
     /* max (1-(1-b)/a,0) */
     /*  = - min ((1-b)/a - 1, 0) */
@@ -1029,7 +1029,7 @@ fbCombineDisjointInPart (comp1_t a, comp1_t b)
 
 /* portion covered by a but not b */
 static comp1_t
-fbCombineConjointOutPart (comp1_t a, comp1_t b)
+combineConjointOutPart (comp1_t a, comp1_t b)
 {
     /* max (1-b/a,0) */
     /* = 1-min(b/a,1) */
@@ -1043,7 +1043,7 @@ fbCombineConjointOutPart (comp1_t a, comp1_t b)
 
 /* portion covered by both a and b */
 static comp1_t
-fbCombineConjointInPart (comp1_t a, comp1_t b)
+combineConjointInPart (comp1_t a, comp1_t b)
 {
     /* min (1,b/a) */
 
@@ -1063,7 +1063,7 @@ fbCombineConjointInPart (comp1_t a, comp1_t b)
 					 (0 - ((t) >> G_SHIFT)))) << (i))
 
 static void
-fbCombineDisjointGeneralU (comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width, comp1_t combine)
+combineDisjointGeneralU (comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width, comp1_t combine)
 {
     int i;
     for (i = 0; i < width; ++i) {
@@ -1079,10 +1079,10 @@ fbCombineDisjointGeneralU (comp4_t *dest, const comp4_t *src, const comp4_t *mas
             Fa = 0;
             break;
         case CombineAOut:
-            Fa = fbCombineDisjointOutPart (sa, da);
+            Fa = combineDisjointOutPart (sa, da);
             break;
         case CombineAIn:
-            Fa = fbCombineDisjointInPart (sa, da);
+            Fa = combineDisjointInPart (sa, da);
             break;
         case CombineA:
             Fa = MASK;
@@ -1094,10 +1094,10 @@ fbCombineDisjointGeneralU (comp4_t *dest, const comp4_t *src, const comp4_t *mas
             Fb = 0;
             break;
         case CombineBOut:
-            Fb = fbCombineDisjointOutPart (da, sa);
+            Fb = combineDisjointOutPart (da, sa);
             break;
         case CombineBIn:
-            Fb = fbCombineDisjointInPart (da, sa);
+            Fb = combineDisjointInPart (da, sa);
             break;
         case CombineB:
             Fb = MASK;
@@ -1113,7 +1113,7 @@ fbCombineDisjointGeneralU (comp4_t *dest, const comp4_t *src, const comp4_t *mas
 }
 
 static void
-fbCombineDisjointOverU (pixman_implementation_t *imp, pixman_op_t op,
+combineDisjointOverU (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1126,7 +1126,7 @@ fbCombineDisjointOverU (pixman_implementation_t *imp, pixman_op_t op,
             if (a != MASK)
             {
                 comp4_t d = *(dest + i);
-                a = fbCombineDisjointOutPart (d >> A_SHIFT, a);
+                a = combineDisjointOutPart (d >> A_SHIFT, a);
                 UNcx4_MUL_UNc_ADD_UNcx4(d, a, s);
                 s = d;
             }
@@ -1136,56 +1136,56 @@ fbCombineDisjointOverU (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-fbCombineDisjointInU (pixman_implementation_t *imp, pixman_op_t op,
+combineDisjointInU (pixman_implementation_t *imp, pixman_op_t op,
 		      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineDisjointGeneralU (dest, src, mask, width, CombineAIn);
+    combineDisjointGeneralU (dest, src, mask, width, CombineAIn);
 }
 
 static void
-fbCombineDisjointInReverseU (pixman_implementation_t *imp, pixman_op_t op,
+combineDisjointInReverseU (pixman_implementation_t *imp, pixman_op_t op,
 			     comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineDisjointGeneralU (dest, src, mask, width, CombineBIn);
+    combineDisjointGeneralU (dest, src, mask, width, CombineBIn);
 }
 
 static void
-fbCombineDisjointOutU (pixman_implementation_t *imp, pixman_op_t op,
+combineDisjointOutU (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineDisjointGeneralU (dest, src, mask, width, CombineAOut);
+    combineDisjointGeneralU (dest, src, mask, width, CombineAOut);
 }
 
 static void
-fbCombineDisjointOutReverseU (pixman_implementation_t *imp, pixman_op_t op,
+combineDisjointOutReverseU (pixman_implementation_t *imp, pixman_op_t op,
 			      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineDisjointGeneralU (dest, src, mask, width, CombineBOut);
+    combineDisjointGeneralU (dest, src, mask, width, CombineBOut);
 }
 
 static void
-fbCombineDisjointAtopU (pixman_implementation_t *imp, pixman_op_t op,
+combineDisjointAtopU (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineDisjointGeneralU (dest, src, mask, width, CombineAAtop);
+    combineDisjointGeneralU (dest, src, mask, width, CombineAAtop);
 }
 
 static void
-fbCombineDisjointAtopReverseU (pixman_implementation_t *imp, pixman_op_t op,
+combineDisjointAtopReverseU (pixman_implementation_t *imp, pixman_op_t op,
 			       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineDisjointGeneralU (dest, src, mask, width, CombineBAtop);
+    combineDisjointGeneralU (dest, src, mask, width, CombineBAtop);
 }
 
 static void
-fbCombineDisjointXorU (pixman_implementation_t *imp, pixman_op_t op,
+combineDisjointXorU (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineDisjointGeneralU (dest, src, mask, width, CombineXor);
+    combineDisjointGeneralU (dest, src, mask, width, CombineXor);
 }
 
 static void
-fbCombineConjointGeneralU (comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width, comp1_t combine)
+combineConjointGeneralU (comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width, comp1_t combine)
 {
     int i;
     for (i = 0; i < width; ++i) {
@@ -1201,10 +1201,10 @@ fbCombineConjointGeneralU (comp4_t *dest, const comp4_t *src, const comp4_t *mas
             Fa = 0;
             break;
         case CombineAOut:
-            Fa = fbCombineConjointOutPart (sa, da);
+            Fa = combineConjointOutPart (sa, da);
             break;
         case CombineAIn:
-            Fa = fbCombineConjointInPart (sa, da);
+            Fa = combineConjointInPart (sa, da);
             break;
         case CombineA:
             Fa = MASK;
@@ -1216,10 +1216,10 @@ fbCombineConjointGeneralU (comp4_t *dest, const comp4_t *src, const comp4_t *mas
             Fb = 0;
             break;
         case CombineBOut:
-            Fb = fbCombineConjointOutPart (da, sa);
+            Fb = combineConjointOutPart (da, sa);
             break;
         case CombineBIn:
-            Fb = fbCombineConjointInPart (da, sa);
+            Fb = combineConjointInPart (da, sa);
             break;
         case CombineB:
             Fb = MASK;
@@ -1235,69 +1235,69 @@ fbCombineConjointGeneralU (comp4_t *dest, const comp4_t *src, const comp4_t *mas
 }
 
 static void
-fbCombineConjointOverU (pixman_implementation_t *imp, pixman_op_t op,
+combineConjointOverU (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineConjointGeneralU (dest, src, mask, width, CombineAOver);
+    combineConjointGeneralU (dest, src, mask, width, CombineAOver);
 }
 
 
 static void
-fbCombineConjointOverReverseU (pixman_implementation_t *imp, pixman_op_t op,
+combineConjointOverReverseU (pixman_implementation_t *imp, pixman_op_t op,
 			       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineConjointGeneralU (dest, src, mask, width, CombineBOver);
+    combineConjointGeneralU (dest, src, mask, width, CombineBOver);
 }
 
 
 static void
-fbCombineConjointInU (pixman_implementation_t *imp, pixman_op_t op,
+combineConjointInU (pixman_implementation_t *imp, pixman_op_t op,
 		      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineConjointGeneralU (dest, src, mask, width, CombineAIn);
+    combineConjointGeneralU (dest, src, mask, width, CombineAIn);
 }
 
 
 static void
-fbCombineConjointInReverseU (pixman_implementation_t *imp, pixman_op_t op,
+combineConjointInReverseU (pixman_implementation_t *imp, pixman_op_t op,
 			     comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineConjointGeneralU (dest, src, mask, width, CombineBIn);
+    combineConjointGeneralU (dest, src, mask, width, CombineBIn);
 }
 
 static void
-fbCombineConjointOutU (pixman_implementation_t *imp, pixman_op_t op,
+combineConjointOutU (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineConjointGeneralU (dest, src, mask, width, CombineAOut);
+    combineConjointGeneralU (dest, src, mask, width, CombineAOut);
 }
 
 static void
-fbCombineConjointOutReverseU (pixman_implementation_t *imp, pixman_op_t op,
+combineConjointOutReverseU (pixman_implementation_t *imp, pixman_op_t op,
 			      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineConjointGeneralU (dest, src, mask, width, CombineBOut);
+    combineConjointGeneralU (dest, src, mask, width, CombineBOut);
 }
 
 static void
-fbCombineConjointAtopU (pixman_implementation_t *imp, pixman_op_t op,
+combineConjointAtopU (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineConjointGeneralU (dest, src, mask, width, CombineAAtop);
+    combineConjointGeneralU (dest, src, mask, width, CombineAAtop);
 }
 
 static void
-fbCombineConjointAtopReverseU (pixman_implementation_t *imp, pixman_op_t op,
+combineConjointAtopReverseU (pixman_implementation_t *imp, pixman_op_t op,
 			       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineConjointGeneralU (dest, src, mask, width, CombineBAtop);
+    combineConjointGeneralU (dest, src, mask, width, CombineBAtop);
 }
 
 static void
-fbCombineConjointXorU (pixman_implementation_t *imp, pixman_op_t op,
+combineConjointXorU (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineConjointGeneralU (dest, src, mask, width, CombineXor);
+    combineConjointGeneralU (dest, src, mask, width, CombineXor);
 }
 
 /********************************************************************************/
@@ -1305,14 +1305,14 @@ fbCombineConjointXorU (pixman_implementation_t *imp, pixman_op_t op,
 /********************************************************************************/
 
 static void
-fbCombineClearC (pixman_implementation_t *imp, pixman_op_t op,
+combineClearC (pixman_implementation_t *imp, pixman_op_t op,
 		 comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     memset(dest, 0, width*sizeof(comp4_t));
 }
 
 static void
-fbCombineSrcC (pixman_implementation_t *imp, pixman_op_t op,
+combineSrcC (pixman_implementation_t *imp, pixman_op_t op,
 	       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1321,14 +1321,14 @@ fbCombineSrcC (pixman_implementation_t *imp, pixman_op_t op,
 	comp4_t s = *(src + i);
 	comp4_t m = *(mask + i);
 
-	fbCombineMaskValueC (&s, &m);
+	combineMaskValueC (&s, &m);
 
 	*(dest) = s;
     }
 }
 
 static void
-fbCombineOverC (pixman_implementation_t *imp, pixman_op_t op,
+combineOverC (pixman_implementation_t *imp, pixman_op_t op,
 		comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1338,7 +1338,7 @@ fbCombineOverC (pixman_implementation_t *imp, pixman_op_t op,
 	comp4_t m = *(mask + i);
 	comp4_t a;
 
-	fbCombineMaskC (&s, &m);
+	combineMaskC (&s, &m);
 
 	a = ~m;
         if (a != ~0)
@@ -1355,7 +1355,7 @@ fbCombineOverC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-fbCombineOverReverseC (pixman_implementation_t *imp, pixman_op_t op,
+combineOverReverseC (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1369,7 +1369,7 @@ fbCombineOverReverseC (pixman_implementation_t *imp, pixman_op_t op,
             comp4_t s = *(src + i);
 	    comp4_t m = *(mask + i);
 
-	    fbCombineMaskValueC (&s, &m);
+	    combineMaskValueC (&s, &m);
 
             if (a != MASK)
             {
@@ -1381,7 +1381,7 @@ fbCombineOverReverseC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-fbCombineInC (pixman_implementation_t *imp, pixman_op_t op,
+combineInC (pixman_implementation_t *imp, pixman_op_t op,
 	      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1395,7 +1395,7 @@ fbCombineInC (pixman_implementation_t *imp, pixman_op_t op,
 	    comp4_t m = *(mask + i);
 
 	    s = *(src + i);
-	    fbCombineMaskValueC (&s, &m);
+	    combineMaskValueC (&s, &m);
             if (a != MASK)
             {
                 UNcx4_MUL_UNc(s, a);
@@ -1406,7 +1406,7 @@ fbCombineInC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-fbCombineInReverseC (pixman_implementation_t *imp, pixman_op_t op,
+combineInReverseC (pixman_implementation_t *imp, pixman_op_t op,
 		     comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1416,7 +1416,7 @@ fbCombineInReverseC (pixman_implementation_t *imp, pixman_op_t op,
         comp4_t m = *(mask + i);
         comp4_t a;
 
-	fbCombineMaskAlphaC (&s, &m);
+	combineMaskAlphaC (&s, &m);
 
 	a = m;
         if (a != ~0)
@@ -1433,7 +1433,7 @@ fbCombineInReverseC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-fbCombineOutC (pixman_implementation_t *imp, pixman_op_t op,
+combineOutC (pixman_implementation_t *imp, pixman_op_t op,
 	       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1447,7 +1447,7 @@ fbCombineOutC (pixman_implementation_t *imp, pixman_op_t op,
 	    comp4_t m = *(mask + i);
 
 	    s = *(src + i);
-	    fbCombineMaskValueC (&s, &m);
+	    combineMaskValueC (&s, &m);
 
             if (a != MASK)
             {
@@ -1459,7 +1459,7 @@ fbCombineOutC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-fbCombineOutReverseC (pixman_implementation_t *imp, pixman_op_t op,
+combineOutReverseC (pixman_implementation_t *imp, pixman_op_t op,
 		      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1469,7 +1469,7 @@ fbCombineOutReverseC (pixman_implementation_t *imp, pixman_op_t op,
 	comp4_t m = *(mask + i);
 	comp4_t a;
 
-	fbCombineMaskAlphaC (&s, &m);
+	combineMaskAlphaC (&s, &m);
 
         a = ~m;
         if (a != ~0)
@@ -1486,7 +1486,7 @@ fbCombineOutReverseC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-fbCombineAtopC (pixman_implementation_t *imp, pixman_op_t op,
+combineAtopC (pixman_implementation_t *imp, pixman_op_t op,
 		comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1498,7 +1498,7 @@ fbCombineAtopC (pixman_implementation_t *imp, pixman_op_t op,
         comp4_t ad;
         comp2_t as = d >> A_SHIFT;
 
-	fbCombineMaskC (&s, &m);
+	combineMaskC (&s, &m);
 
         ad = ~m;
 
@@ -1508,7 +1508,7 @@ fbCombineAtopC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-fbCombineAtopReverseC (pixman_implementation_t *imp, pixman_op_t op,
+combineAtopReverseC (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1521,7 +1521,7 @@ fbCombineAtopReverseC (pixman_implementation_t *imp, pixman_op_t op,
         comp4_t ad;
         comp2_t as = ~d >> A_SHIFT;
 
-	fbCombineMaskC (&s, &m);
+	combineMaskC (&s, &m);
 
 	ad = m;
 
@@ -1531,7 +1531,7 @@ fbCombineAtopReverseC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-fbCombineXorC (pixman_implementation_t *imp, pixman_op_t op,
+combineXorC (pixman_implementation_t *imp, pixman_op_t op,
 	       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1543,7 +1543,7 @@ fbCombineXorC (pixman_implementation_t *imp, pixman_op_t op,
         comp4_t ad;
         comp2_t as = ~d >> A_SHIFT;
 
-	fbCombineMaskC (&s, &m);
+	combineMaskC (&s, &m);
 
 	ad = ~m;
 
@@ -1553,7 +1553,7 @@ fbCombineXorC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-fbCombineAddC (pixman_implementation_t *imp, pixman_op_t op,
+combineAddC (pixman_implementation_t *imp, pixman_op_t op,
 	       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1563,7 +1563,7 @@ fbCombineAddC (pixman_implementation_t *imp, pixman_op_t op,
         comp4_t m = *(mask + i);
         comp4_t d = *(dest + i);
 
-	fbCombineMaskValueC (&s, &m);
+	combineMaskValueC (&s, &m);
 
         UNcx4_ADD_UNcx4(d, s);
 	*(dest + i) = d;
@@ -1571,7 +1571,7 @@ fbCombineAddC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-fbCombineSaturateC (pixman_implementation_t *imp, pixman_op_t op,
+combineSaturateC (pixman_implementation_t *imp, pixman_op_t op,
 		    comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
     int i;
@@ -1586,7 +1586,7 @@ fbCombineSaturateC (pixman_implementation_t *imp, pixman_op_t op,
         s = *(src + i);
 	m = *(mask + i);
 
-	fbCombineMaskC (&s, &m);
+	combineMaskC (&s, &m);
 
         sa = (m >> A_SHIFT);
         sr = (m >> R_SHIFT) & MASK;
@@ -1619,7 +1619,7 @@ fbCombineSaturateC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 static void
-fbCombineDisjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width, comp1_t combine)
+combineDisjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width, comp1_t combine)
 {
     int i;
 
@@ -1636,7 +1636,7 @@ fbCombineDisjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mas
         d = *(dest + i);
         da = d >> A_SHIFT;
 
-	fbCombineMaskC (&s, &m);
+	combineMaskC (&s, &m);
 
 	sa = m;
 
@@ -1645,17 +1645,17 @@ fbCombineDisjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mas
             Fa = 0;
             break;
         case CombineAOut:
-            m = (comp4_t)fbCombineDisjointOutPart ((comp1_t) (sa >> 0), da);
-            n = (comp4_t)fbCombineDisjointOutPart ((comp1_t) (sa >> G_SHIFT), da) << G_SHIFT;
-            o = (comp4_t)fbCombineDisjointOutPart ((comp1_t) (sa >> R_SHIFT), da) << R_SHIFT;
-            p = (comp4_t)fbCombineDisjointOutPart ((comp1_t) (sa >> A_SHIFT), da) << A_SHIFT;
+            m = (comp4_t)combineDisjointOutPart ((comp1_t) (sa >> 0), da);
+            n = (comp4_t)combineDisjointOutPart ((comp1_t) (sa >> G_SHIFT), da) << G_SHIFT;
+            o = (comp4_t)combineDisjointOutPart ((comp1_t) (sa >> R_SHIFT), da) << R_SHIFT;
+            p = (comp4_t)combineDisjointOutPart ((comp1_t) (sa >> A_SHIFT), da) << A_SHIFT;
             Fa = m|n|o|p;
             break;
         case CombineAIn:
-            m = (comp4_t)fbCombineDisjointInPart ((comp1_t) (sa >> 0), da);
-            n = (comp4_t)fbCombineDisjointInPart ((comp1_t) (sa >> G_SHIFT), da) << G_SHIFT;
-            o = (comp4_t)fbCombineDisjointInPart ((comp1_t) (sa >> R_SHIFT), da) << R_SHIFT;
-            p = (comp4_t)fbCombineDisjointInPart ((comp1_t) (sa >> A_SHIFT), da) << A_SHIFT;
+            m = (comp4_t)combineDisjointInPart ((comp1_t) (sa >> 0), da);
+            n = (comp4_t)combineDisjointInPart ((comp1_t) (sa >> G_SHIFT), da) << G_SHIFT;
+            o = (comp4_t)combineDisjointInPart ((comp1_t) (sa >> R_SHIFT), da) << R_SHIFT;
+            p = (comp4_t)combineDisjointInPart ((comp1_t) (sa >> A_SHIFT), da) << A_SHIFT;
             Fa = m|n|o|p;
             break;
         case CombineA:
@@ -1668,17 +1668,17 @@ fbCombineDisjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mas
             Fb = 0;
             break;
         case CombineBOut:
-            m = (comp4_t)fbCombineDisjointOutPart (da, (comp1_t) (sa >> 0));
-            n = (comp4_t)fbCombineDisjointOutPart (da, (comp1_t) (sa >> G_SHIFT)) << G_SHIFT;
-            o = (comp4_t)fbCombineDisjointOutPart (da, (comp1_t) (sa >> R_SHIFT)) << R_SHIFT;
-            p = (comp4_t)fbCombineDisjointOutPart (da, (comp1_t) (sa >> A_SHIFT)) << A_SHIFT;
+            m = (comp4_t)combineDisjointOutPart (da, (comp1_t) (sa >> 0));
+            n = (comp4_t)combineDisjointOutPart (da, (comp1_t) (sa >> G_SHIFT)) << G_SHIFT;
+            o = (comp4_t)combineDisjointOutPart (da, (comp1_t) (sa >> R_SHIFT)) << R_SHIFT;
+            p = (comp4_t)combineDisjointOutPart (da, (comp1_t) (sa >> A_SHIFT)) << A_SHIFT;
             Fb = m|n|o|p;
             break;
         case CombineBIn:
-            m = (comp4_t)fbCombineDisjointInPart (da, (comp1_t) (sa >> 0));
-            n = (comp4_t)fbCombineDisjointInPart (da, (comp1_t) (sa >> G_SHIFT)) << G_SHIFT;
-            o = (comp4_t)fbCombineDisjointInPart (da, (comp1_t) (sa >> R_SHIFT)) << R_SHIFT;
-            p = (comp4_t)fbCombineDisjointInPart (da, (comp1_t) (sa >> A_SHIFT)) << A_SHIFT;
+            m = (comp4_t)combineDisjointInPart (da, (comp1_t) (sa >> 0));
+            n = (comp4_t)combineDisjointInPart (da, (comp1_t) (sa >> G_SHIFT)) << G_SHIFT;
+            o = (comp4_t)combineDisjointInPart (da, (comp1_t) (sa >> R_SHIFT)) << R_SHIFT;
+            p = (comp4_t)combineDisjointInPart (da, (comp1_t) (sa >> A_SHIFT)) << A_SHIFT;
             Fb = m|n|o|p;
             break;
         case CombineB:
@@ -1695,63 +1695,63 @@ fbCombineDisjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mas
 }
 
 static void
-fbCombineDisjointOverC (pixman_implementation_t *imp, pixman_op_t op,
+combineDisjointOverC (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineDisjointGeneralC (dest, src, mask, width, CombineAOver);
+    combineDisjointGeneralC (dest, src, mask, width, CombineAOver);
 }
 
 static void
-fbCombineDisjointInC (pixman_implementation_t *imp, pixman_op_t op,
+combineDisjointInC (pixman_implementation_t *imp, pixman_op_t op,
 		      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineDisjointGeneralC (dest, src, mask, width, CombineAIn);
+    combineDisjointGeneralC (dest, src, mask, width, CombineAIn);
 }
 
 static void
-fbCombineDisjointInReverseC (pixman_implementation_t *imp, pixman_op_t op,
+combineDisjointInReverseC (pixman_implementation_t *imp, pixman_op_t op,
 			     comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineDisjointGeneralC (dest, src, mask, width, CombineBIn);
+    combineDisjointGeneralC (dest, src, mask, width, CombineBIn);
 }
 
 static void
-fbCombineDisjointOutC (pixman_implementation_t *imp, pixman_op_t op,
+combineDisjointOutC (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineDisjointGeneralC (dest, src, mask, width, CombineAOut);
+    combineDisjointGeneralC (dest, src, mask, width, CombineAOut);
 }
 
 static void
-fbCombineDisjointOutReverseC (pixman_implementation_t *imp, pixman_op_t op,
+combineDisjointOutReverseC (pixman_implementation_t *imp, pixman_op_t op,
 			      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineDisjointGeneralC (dest, src, mask, width, CombineBOut);
+    combineDisjointGeneralC (dest, src, mask, width, CombineBOut);
 }
 
 static void
-fbCombineDisjointAtopC (pixman_implementation_t *imp, pixman_op_t op,
+combineDisjointAtopC (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineDisjointGeneralC (dest, src, mask, width, CombineAAtop);
+    combineDisjointGeneralC (dest, src, mask, width, CombineAAtop);
 }
 
 static void
-fbCombineDisjointAtopReverseC (pixman_implementation_t *imp, pixman_op_t op,
+combineDisjointAtopReverseC (pixman_implementation_t *imp, pixman_op_t op,
 			       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineDisjointGeneralC (dest, src, mask, width, CombineBAtop);
+    combineDisjointGeneralC (dest, src, mask, width, CombineBAtop);
 }
 
 static void
-fbCombineDisjointXorC (pixman_implementation_t *imp, pixman_op_t op,
+combineDisjointXorC (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineDisjointGeneralC (dest, src, mask, width, CombineXor);
+    combineDisjointGeneralC (dest, src, mask, width, CombineXor);
 }
 
 static void
-fbCombineConjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width, comp1_t combine)
+combineConjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width, comp1_t combine)
 {
     int i;
 
@@ -1768,7 +1768,7 @@ fbCombineConjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mas
         d = *(dest + i);
         da = d >> A_SHIFT;
 
-	fbCombineMaskC (&s, &m);
+	combineMaskC (&s, &m);
 
         sa = m;
 
@@ -1777,17 +1777,17 @@ fbCombineConjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mas
             Fa = 0;
             break;
         case CombineAOut:
-            m = (comp4_t)fbCombineConjointOutPart ((comp1_t) (sa >> 0), da);
-            n = (comp4_t)fbCombineConjointOutPart ((comp1_t) (sa >> G_SHIFT), da) << G_SHIFT;
-            o = (comp4_t)fbCombineConjointOutPart ((comp1_t) (sa >> R_SHIFT), da) << R_SHIFT;
-            p = (comp4_t)fbCombineConjointOutPart ((comp1_t) (sa >> A_SHIFT), da) << A_SHIFT;
+            m = (comp4_t)combineConjointOutPart ((comp1_t) (sa >> 0), da);
+            n = (comp4_t)combineConjointOutPart ((comp1_t) (sa >> G_SHIFT), da) << G_SHIFT;
+            o = (comp4_t)combineConjointOutPart ((comp1_t) (sa >> R_SHIFT), da) << R_SHIFT;
+            p = (comp4_t)combineConjointOutPart ((comp1_t) (sa >> A_SHIFT), da) << A_SHIFT;
             Fa = m|n|o|p;
             break;
         case CombineAIn:
-            m = (comp4_t)fbCombineConjointInPart ((comp1_t) (sa >> 0), da);
-            n = (comp4_t)fbCombineConjointInPart ((comp1_t) (sa >> G_SHIFT), da) << G_SHIFT;
-            o = (comp4_t)fbCombineConjointInPart ((comp1_t) (sa >> R_SHIFT), da) << R_SHIFT;
-            p = (comp4_t)fbCombineConjointInPart ((comp1_t) (sa >> A_SHIFT), da) << A_SHIFT;
+            m = (comp4_t)combineConjointInPart ((comp1_t) (sa >> 0), da);
+            n = (comp4_t)combineConjointInPart ((comp1_t) (sa >> G_SHIFT), da) << G_SHIFT;
+            o = (comp4_t)combineConjointInPart ((comp1_t) (sa >> R_SHIFT), da) << R_SHIFT;
+            p = (comp4_t)combineConjointInPart ((comp1_t) (sa >> A_SHIFT), da) << A_SHIFT;
             Fa = m|n|o|p;
             break;
         case CombineA:
@@ -1800,17 +1800,17 @@ fbCombineConjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mas
             Fb = 0;
             break;
         case CombineBOut:
-            m = (comp4_t)fbCombineConjointOutPart (da, (comp1_t) (sa >> 0));
-            n = (comp4_t)fbCombineConjointOutPart (da, (comp1_t) (sa >> G_SHIFT)) << G_SHIFT;
-            o = (comp4_t)fbCombineConjointOutPart (da, (comp1_t) (sa >> R_SHIFT)) << R_SHIFT;
-            p = (comp4_t)fbCombineConjointOutPart (da, (comp1_t) (sa >> A_SHIFT)) << A_SHIFT;
+            m = (comp4_t)combineConjointOutPart (da, (comp1_t) (sa >> 0));
+            n = (comp4_t)combineConjointOutPart (da, (comp1_t) (sa >> G_SHIFT)) << G_SHIFT;
+            o = (comp4_t)combineConjointOutPart (da, (comp1_t) (sa >> R_SHIFT)) << R_SHIFT;
+            p = (comp4_t)combineConjointOutPart (da, (comp1_t) (sa >> A_SHIFT)) << A_SHIFT;
             Fb = m|n|o|p;
             break;
         case CombineBIn:
-            m = (comp4_t)fbCombineConjointInPart (da, (comp1_t) (sa >> 0));
-            n = (comp4_t)fbCombineConjointInPart (da, (comp1_t) (sa >> G_SHIFT)) << G_SHIFT;
-            o = (comp4_t)fbCombineConjointInPart (da, (comp1_t) (sa >> R_SHIFT)) << R_SHIFT;
-            p = (comp4_t)fbCombineConjointInPart (da, (comp1_t) (sa >> A_SHIFT)) << A_SHIFT;
+            m = (comp4_t)combineConjointInPart (da, (comp1_t) (sa >> 0));
+            n = (comp4_t)combineConjointInPart (da, (comp1_t) (sa >> G_SHIFT)) << G_SHIFT;
+            o = (comp4_t)combineConjointInPart (da, (comp1_t) (sa >> R_SHIFT)) << R_SHIFT;
+            p = (comp4_t)combineConjointInPart (da, (comp1_t) (sa >> A_SHIFT)) << A_SHIFT;
             Fb = m|n|o|p;
             break;
         case CombineB:
@@ -1827,186 +1827,186 @@ fbCombineConjointGeneralC (comp4_t *dest, const comp4_t *src, const comp4_t *mas
 }
 
 static void
-fbCombineConjointOverC (pixman_implementation_t *imp, pixman_op_t op,
+combineConjointOverC (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineConjointGeneralC (dest, src, mask, width, CombineAOver);
+    combineConjointGeneralC (dest, src, mask, width, CombineAOver);
 }
 
 static void
-fbCombineConjointOverReverseC (pixman_implementation_t *imp, pixman_op_t op,
+combineConjointOverReverseC (pixman_implementation_t *imp, pixman_op_t op,
 			       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineConjointGeneralC (dest, src, mask, width, CombineBOver);
+    combineConjointGeneralC (dest, src, mask, width, CombineBOver);
 }
 
 static void
-fbCombineConjointInC (pixman_implementation_t *imp, pixman_op_t op,
+combineConjointInC (pixman_implementation_t *imp, pixman_op_t op,
 		      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineConjointGeneralC (dest, src, mask, width, CombineAIn);
+    combineConjointGeneralC (dest, src, mask, width, CombineAIn);
 }
 
 static void
-fbCombineConjointInReverseC (pixman_implementation_t *imp, pixman_op_t op,
+combineConjointInReverseC (pixman_implementation_t *imp, pixman_op_t op,
 			     comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineConjointGeneralC (dest, src, mask, width, CombineBIn);
+    combineConjointGeneralC (dest, src, mask, width, CombineBIn);
 }
 
 static void
-fbCombineConjointOutC (pixman_implementation_t *imp, pixman_op_t op,
+combineConjointOutC (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineConjointGeneralC (dest, src, mask, width, CombineAOut);
+    combineConjointGeneralC (dest, src, mask, width, CombineAOut);
 }
 
 static void
-fbCombineConjointOutReverseC (pixman_implementation_t *imp, pixman_op_t op,
+combineConjointOutReverseC (pixman_implementation_t *imp, pixman_op_t op,
 			      comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineConjointGeneralC (dest, src, mask, width, CombineBOut);
+    combineConjointGeneralC (dest, src, mask, width, CombineBOut);
 }
 
 static void
-fbCombineConjointAtopC (pixman_implementation_t *imp, pixman_op_t op,
+combineConjointAtopC (pixman_implementation_t *imp, pixman_op_t op,
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineConjointGeneralC (dest, src, mask, width, CombineAAtop);
+    combineConjointGeneralC (dest, src, mask, width, CombineAAtop);
 }
 
 static void
-fbCombineConjointAtopReverseC (pixman_implementation_t *imp, pixman_op_t op,
+combineConjointAtopReverseC (pixman_implementation_t *imp, pixman_op_t op,
 			       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineConjointGeneralC (dest, src, mask, width, CombineBAtop);
+    combineConjointGeneralC (dest, src, mask, width, CombineBAtop);
 }
 
 static void
-fbCombineConjointXorC (pixman_implementation_t *imp, pixman_op_t op,
+combineConjointXorC (pixman_implementation_t *imp, pixman_op_t op,
 		       comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width)
 {
-    fbCombineConjointGeneralC (dest, src, mask, width, CombineXor);
+    combineConjointGeneralC (dest, src, mask, width, CombineXor);
 }
 
 void
 _pixman_setup_combiner_functions_width (pixman_implementation_t *imp)
 {
     /* Unified alpha */
-    imp->combine_width[PIXMAN_OP_CLEAR] = fbCombineClear;
-    imp->combine_width[PIXMAN_OP_SRC] = fbCombineSrcU;
+    imp->combine_width[PIXMAN_OP_CLEAR] = combineClear;
+    imp->combine_width[PIXMAN_OP_SRC] = combineSrcU;
     /* dest */
-    imp->combine_width[PIXMAN_OP_OVER] = fbCombineOverU;
-    imp->combine_width[PIXMAN_OP_OVER_REVERSE] = fbCombineOverReverseU;
-    imp->combine_width[PIXMAN_OP_IN] = fbCombineInU;
-    imp->combine_width[PIXMAN_OP_IN_REVERSE] = fbCombineInReverseU;
-    imp->combine_width[PIXMAN_OP_OUT] = fbCombineOutU;
-    imp->combine_width[PIXMAN_OP_OUT_REVERSE] = fbCombineOutReverseU;
-    imp->combine_width[PIXMAN_OP_ATOP] = fbCombineAtopU;
-    imp->combine_width[PIXMAN_OP_ATOP_REVERSE] = fbCombineAtopReverseU;
-    imp->combine_width[PIXMAN_OP_XOR] = fbCombineXorU;
-    imp->combine_width[PIXMAN_OP_ADD] = fbCombineAddU;
-    imp->combine_width[PIXMAN_OP_SATURATE] = fbCombineSaturateU;
+    imp->combine_width[PIXMAN_OP_OVER] = combineOverU;
+    imp->combine_width[PIXMAN_OP_OVER_REVERSE] = combineOverReverseU;
+    imp->combine_width[PIXMAN_OP_IN] = combineInU;
+    imp->combine_width[PIXMAN_OP_IN_REVERSE] = combineInReverseU;
+    imp->combine_width[PIXMAN_OP_OUT] = combineOutU;
+    imp->combine_width[PIXMAN_OP_OUT_REVERSE] = combineOutReverseU;
+    imp->combine_width[PIXMAN_OP_ATOP] = combineAtopU;
+    imp->combine_width[PIXMAN_OP_ATOP_REVERSE] = combineAtopReverseU;
+    imp->combine_width[PIXMAN_OP_XOR] = combineXorU;
+    imp->combine_width[PIXMAN_OP_ADD] = combineAddU;
+    imp->combine_width[PIXMAN_OP_SATURATE] = combineSaturateU;
 
     /* Disjoint, unified */
-    imp->combine_width[PIXMAN_OP_DISJOINT_CLEAR] = fbCombineClear;
-    imp->combine_width[PIXMAN_OP_DISJOINT_SRC] = fbCombineSrcU;
+    imp->combine_width[PIXMAN_OP_DISJOINT_CLEAR] = combineClear;
+    imp->combine_width[PIXMAN_OP_DISJOINT_SRC] = combineSrcU;
     /* dest */
-    imp->combine_width[PIXMAN_OP_DISJOINT_OVER] = fbCombineDisjointOverU;
-    imp->combine_width[PIXMAN_OP_DISJOINT_OVER_REVERSE] = fbCombineSaturateU;
-    imp->combine_width[PIXMAN_OP_DISJOINT_IN] = fbCombineDisjointInU;
-    imp->combine_width[PIXMAN_OP_DISJOINT_IN_REVERSE] = fbCombineDisjointInReverseU;
-    imp->combine_width[PIXMAN_OP_DISJOINT_OUT] = fbCombineDisjointOutU;
-    imp->combine_width[PIXMAN_OP_DISJOINT_OUT_REVERSE] = fbCombineDisjointOutReverseU;
-    imp->combine_width[PIXMAN_OP_DISJOINT_ATOP] = fbCombineDisjointAtopU;
-    imp->combine_width[PIXMAN_OP_DISJOINT_ATOP_REVERSE] = fbCombineDisjointAtopReverseU;
-    imp->combine_width[PIXMAN_OP_DISJOINT_XOR] = fbCombineDisjointXorU;
+    imp->combine_width[PIXMAN_OP_DISJOINT_OVER] = combineDisjointOverU;
+    imp->combine_width[PIXMAN_OP_DISJOINT_OVER_REVERSE] = combineSaturateU;
+    imp->combine_width[PIXMAN_OP_DISJOINT_IN] = combineDisjointInU;
+    imp->combine_width[PIXMAN_OP_DISJOINT_IN_REVERSE] = combineDisjointInReverseU;
+    imp->combine_width[PIXMAN_OP_DISJOINT_OUT] = combineDisjointOutU;
+    imp->combine_width[PIXMAN_OP_DISJOINT_OUT_REVERSE] = combineDisjointOutReverseU;
+    imp->combine_width[PIXMAN_OP_DISJOINT_ATOP] = combineDisjointAtopU;
+    imp->combine_width[PIXMAN_OP_DISJOINT_ATOP_REVERSE] = combineDisjointAtopReverseU;
+    imp->combine_width[PIXMAN_OP_DISJOINT_XOR] = combineDisjointXorU;
 
     /* Conjoint, unified */
-    imp->combine_width[PIXMAN_OP_CONJOINT_CLEAR] = fbCombineClear;
-    imp->combine_width[PIXMAN_OP_CONJOINT_SRC] = fbCombineSrcU;
+    imp->combine_width[PIXMAN_OP_CONJOINT_CLEAR] = combineClear;
+    imp->combine_width[PIXMAN_OP_CONJOINT_SRC] = combineSrcU;
     /* dest */
-    imp->combine_width[PIXMAN_OP_CONJOINT_OVER] = fbCombineConjointOverU;
-    imp->combine_width[PIXMAN_OP_CONJOINT_OVER_REVERSE] = fbCombineConjointOverReverseU;
-    imp->combine_width[PIXMAN_OP_CONJOINT_IN] = fbCombineConjointInU;
-    imp->combine_width[PIXMAN_OP_CONJOINT_IN_REVERSE] = fbCombineConjointInReverseU;
-    imp->combine_width[PIXMAN_OP_CONJOINT_OUT] = fbCombineConjointOutU;
-    imp->combine_width[PIXMAN_OP_CONJOINT_OUT_REVERSE] = fbCombineConjointOutReverseU;
-    imp->combine_width[PIXMAN_OP_CONJOINT_ATOP] = fbCombineConjointAtopU;
-    imp->combine_width[PIXMAN_OP_CONJOINT_ATOP_REVERSE] = fbCombineConjointAtopReverseU;
-    imp->combine_width[PIXMAN_OP_CONJOINT_XOR] = fbCombineConjointXorU;
-
-    imp->combine_width[PIXMAN_OP_MULTIPLY] = fbCombineMultiplyU;
-    imp->combine_width[PIXMAN_OP_SCREEN] = fbCombineScreenU;
-    imp->combine_width[PIXMAN_OP_OVERLAY] = fbCombineOverlayU;
-    imp->combine_width[PIXMAN_OP_DARKEN] = fbCombineDarkenU;
-    imp->combine_width[PIXMAN_OP_LIGHTEN] = fbCombineLightenU;
-    imp->combine_width[PIXMAN_OP_COLOR_DODGE] = fbCombineColorDodgeU;
-    imp->combine_width[PIXMAN_OP_COLOR_BURN] = fbCombineColorBurnU;
-    imp->combine_width[PIXMAN_OP_HARD_LIGHT] = fbCombineHardLightU;
-    imp->combine_width[PIXMAN_OP_SOFT_LIGHT] = fbCombineSoftLightU;
-    imp->combine_width[PIXMAN_OP_DIFFERENCE] = fbCombineDifferenceU;
-    imp->combine_width[PIXMAN_OP_EXCLUSION] = fbCombineExclusionU;
-    imp->combine_width[PIXMAN_OP_HSL_HUE] = fbCombineHslHueU;
-    imp->combine_width[PIXMAN_OP_HSL_SATURATION] = fbCombineHslSaturationU;
-    imp->combine_width[PIXMAN_OP_HSL_COLOR] = fbCombineHslColorU;
-    imp->combine_width[PIXMAN_OP_HSL_LUMINOSITY] = fbCombineHslLuminosityU;
+    imp->combine_width[PIXMAN_OP_CONJOINT_OVER] = combineConjointOverU;
+    imp->combine_width[PIXMAN_OP_CONJOINT_OVER_REVERSE] = combineConjointOverReverseU;
+    imp->combine_width[PIXMAN_OP_CONJOINT_IN] = combineConjointInU;
+    imp->combine_width[PIXMAN_OP_CONJOINT_IN_REVERSE] = combineConjointInReverseU;
+    imp->combine_width[PIXMAN_OP_CONJOINT_OUT] = combineConjointOutU;
+    imp->combine_width[PIXMAN_OP_CONJOINT_OUT_REVERSE] = combineConjointOutReverseU;
+    imp->combine_width[PIXMAN_OP_CONJOINT_ATOP] = combineConjointAtopU;
+    imp->combine_width[PIXMAN_OP_CONJOINT_ATOP_REVERSE] = combineConjointAtopReverseU;
+    imp->combine_width[PIXMAN_OP_CONJOINT_XOR] = combineConjointXorU;
+
+    imp->combine_width[PIXMAN_OP_MULTIPLY] = combineMultiplyU;
+    imp->combine_width[PIXMAN_OP_SCREEN] = combineScreenU;
+    imp->combine_width[PIXMAN_OP_OVERLAY] = combineOverlayU;
+    imp->combine_width[PIXMAN_OP_DARKEN] = combineDarkenU;
+    imp->combine_width[PIXMAN_OP_LIGHTEN] = combineLightenU;
+    imp->combine_width[PIXMAN_OP_COLOR_DODGE] = combineColorDodgeU;
+    imp->combine_width[PIXMAN_OP_COLOR_BURN] = combineColorBurnU;
+    imp->combine_width[PIXMAN_OP_HARD_LIGHT] = combineHardLightU;
+    imp->combine_width[PIXMAN_OP_SOFT_LIGHT] = combineSoftLightU;
+    imp->combine_width[PIXMAN_OP_DIFFERENCE] = combineDifferenceU;
+    imp->combine_width[PIXMAN_OP_EXCLUSION] = combineExclusionU;
+    imp->combine_width[PIXMAN_OP_HSL_HUE] = combineHslHueU;
+    imp->combine_width[PIXMAN_OP_HSL_SATURATION] = combineHslSaturationU;
+    imp->combine_width[PIXMAN_OP_HSL_COLOR] = combineHslColorU;
+    imp->combine_width[PIXMAN_OP_HSL_LUMINOSITY] = combineHslLuminosityU;
 
     /* Component alpha combiners */
-    imp->combine_width_ca[PIXMAN_OP_CLEAR] = fbCombineClearC;
-    imp->combine_width_ca[PIXMAN_OP_SRC] = fbCombineSrcC;
+    imp->combine_width_ca[PIXMAN_OP_CLEAR] = combineClearC;
+    imp->combine_width_ca[PIXMAN_OP_SRC] = combineSrcC;
     /* dest */
-    imp->combine_width_ca[PIXMAN_OP_OVER] = fbCombineOverC;
-    imp->combine_width_ca[PIXMAN_OP_OVER_REVERSE] = fbCombineOverReverseC;
-    imp->combine_width_ca[PIXMAN_OP_IN] = fbCombineInC;
-    imp->combine_width_ca[PIXMAN_OP_IN_REVERSE] = fbCombineInReverseC;
-    imp->combine_width_ca[PIXMAN_OP_OUT] = fbCombineOutC;
-    imp->combine_width_ca[PIXMAN_OP_OUT_REVERSE] = fbCombineOutReverseC;
-    imp->combine_width_ca[PIXMAN_OP_ATOP] = fbCombineAtopC;
-    imp->combine_width_ca[PIXMAN_OP_ATOP_REVERSE] = fbCombineAtopReverseC;
-    imp->combine_width_ca[PIXMAN_OP_XOR] = fbCombineXorC;
-    imp->combine_width_ca[PIXMAN_OP_ADD] = fbCombineAddC;
-    imp->combine_width_ca[PIXMAN_OP_SATURATE] = fbCombineSaturateC;
+    imp->combine_width_ca[PIXMAN_OP_OVER] = combineOverC;
+    imp->combine_width_ca[PIXMAN_OP_OVER_REVERSE] = combineOverReverseC;
+    imp->combine_width_ca[PIXMAN_OP_IN] = combineInC;
+    imp->combine_width_ca[PIXMAN_OP_IN_REVERSE] = combineInReverseC;
+    imp->combine_width_ca[PIXMAN_OP_OUT] = combineOutC;
+    imp->combine_width_ca[PIXMAN_OP_OUT_REVERSE] = combineOutReverseC;
+    imp->combine_width_ca[PIXMAN_OP_ATOP] = combineAtopC;
+    imp->combine_width_ca[PIXMAN_OP_ATOP_REVERSE] = combineAtopReverseC;
+    imp->combine_width_ca[PIXMAN_OP_XOR] = combineXorC;
+    imp->combine_width_ca[PIXMAN_OP_ADD] = combineAddC;
+    imp->combine_width_ca[PIXMAN_OP_SATURATE] = combineSaturateC;
 
     /* Disjoint CA */
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_CLEAR] = fbCombineClearC;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_SRC] = fbCombineSrcC;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_CLEAR] = combineClearC;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_SRC] = combineSrcC;
     /* dest */
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_OVER] = fbCombineDisjointOverC;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_OVER_REVERSE] = fbCombineSaturateC;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_IN] = fbCombineDisjointInC;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_IN_REVERSE] = fbCombineDisjointInReverseC;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_OUT] = fbCombineDisjointOutC;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_OUT_REVERSE] = fbCombineDisjointOutReverseC;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_ATOP] = fbCombineDisjointAtopC;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_ATOP_REVERSE] = fbCombineDisjointAtopReverseC;
-    imp->combine_width_ca[PIXMAN_OP_DISJOINT_XOR] = fbCombineDisjointXorC;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_OVER] = combineDisjointOverC;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_OVER_REVERSE] = combineSaturateC;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_IN] = combineDisjointInC;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_IN_REVERSE] = combineDisjointInReverseC;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_OUT] = combineDisjointOutC;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_OUT_REVERSE] = combineDisjointOutReverseC;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_ATOP] = combineDisjointAtopC;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_ATOP_REVERSE] = combineDisjointAtopReverseC;
+    imp->combine_width_ca[PIXMAN_OP_DISJOINT_XOR] = combineDisjointXorC;
 
     /* Conjoint CA */
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_CLEAR] = fbCombineClearC;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_SRC] = fbCombineSrcC;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_CLEAR] = combineClearC;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_SRC] = combineSrcC;
     /* dest */
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_OVER] = fbCombineConjointOverC;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_OVER_REVERSE] = fbCombineConjointOverReverseC;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_IN] = fbCombineConjointInC;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_IN_REVERSE] = fbCombineConjointInReverseC;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_OUT] = fbCombineConjointOutC;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_OUT_REVERSE] = fbCombineConjointOutReverseC;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_ATOP] = fbCombineConjointAtopC;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_ATOP_REVERSE] = fbCombineConjointAtopReverseC;
-    imp->combine_width_ca[PIXMAN_OP_CONJOINT_XOR] = fbCombineConjointXorC;
-
-    imp->combine_width_ca[PIXMAN_OP_MULTIPLY] = fbCombineMultiplyC;
-    imp->combine_width_ca[PIXMAN_OP_SCREEN] = fbCombineScreenC;
-    imp->combine_width_ca[PIXMAN_OP_OVERLAY] = fbCombineOverlayC;
-    imp->combine_width_ca[PIXMAN_OP_DARKEN] = fbCombineDarkenC;
-    imp->combine_width_ca[PIXMAN_OP_LIGHTEN] = fbCombineLightenC;
-    imp->combine_width_ca[PIXMAN_OP_COLOR_DODGE] = fbCombineColorDodgeC;
-    imp->combine_width_ca[PIXMAN_OP_COLOR_BURN] = fbCombineColorBurnC;
-    imp->combine_width_ca[PIXMAN_OP_HARD_LIGHT] = fbCombineHardLightC;
-    imp->combine_width_ca[PIXMAN_OP_SOFT_LIGHT] = fbCombineSoftLightC;
-    imp->combine_width_ca[PIXMAN_OP_DIFFERENCE] = fbCombineDifferenceC;
-    imp->combine_width_ca[PIXMAN_OP_EXCLUSION] = fbCombineExclusionC;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_OVER] = combineConjointOverC;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_OVER_REVERSE] = combineConjointOverReverseC;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_IN] = combineConjointInC;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_IN_REVERSE] = combineConjointInReverseC;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_OUT] = combineConjointOutC;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_OUT_REVERSE] = combineConjointOutReverseC;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_ATOP] = combineConjointAtopC;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_ATOP_REVERSE] = combineConjointAtopReverseC;
+    imp->combine_width_ca[PIXMAN_OP_CONJOINT_XOR] = combineConjointXorC;
+
+    imp->combine_width_ca[PIXMAN_OP_MULTIPLY] = combineMultiplyC;
+    imp->combine_width_ca[PIXMAN_OP_SCREEN] = combineScreenC;
+    imp->combine_width_ca[PIXMAN_OP_OVERLAY] = combineOverlayC;
+    imp->combine_width_ca[PIXMAN_OP_DARKEN] = combineDarkenC;
+    imp->combine_width_ca[PIXMAN_OP_LIGHTEN] = combineLightenC;
+    imp->combine_width_ca[PIXMAN_OP_COLOR_DODGE] = combineColorDodgeC;
+    imp->combine_width_ca[PIXMAN_OP_COLOR_BURN] = combineColorBurnC;
+    imp->combine_width_ca[PIXMAN_OP_HARD_LIGHT] = combineHardLightC;
+    imp->combine_width_ca[PIXMAN_OP_SOFT_LIGHT] = combineSoftLightC;
+    imp->combine_width_ca[PIXMAN_OP_DIFFERENCE] = combineDifferenceC;
+    imp->combine_width_ca[PIXMAN_OP_EXCLUSION] = combineExclusionC;
     /* It is not clear that these make sense, so leave them out for now */
     imp->combine_width_ca[PIXMAN_OP_HSL_HUE] = NULL;
     imp->combine_width_ca[PIXMAN_OP_HSL_SATURATION] = NULL;
commit f61855e186519a490b5d013d2de67dcc8da7a0ac
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Mon Jun 29 12:51:28 2009 -0400

    Fix overeager search and replace

diff --git a/pixman/pixman-combine.c.template b/pixman/pixman-combine.c.template
index 5fc07e7..b944677 100644
--- a/pixman/pixman-combine.c.template
+++ b/pixman/pixman-combine.c.template
@@ -63,7 +63,7 @@ fbCombineMaskValueC (comp4_t *src, const comp4_t *mask)
 }
 
 static void
-fbCombineMaskALPHA_cC (const comp4_t *src, comp4_t *mask)
+fbCombineMaskAlphaC (const comp4_t *src, comp4_t *mask)
 {
     comp4_t a = *(mask);
     comp4_t	x;
@@ -1416,7 +1416,7 @@ fbCombineInReverseC (pixman_implementation_t *imp, pixman_op_t op,
         comp4_t m = *(mask + i);
         comp4_t a;
 
-	fbCombineMaskALPHA_cC (&s, &m);
+	fbCombineMaskAlphaC (&s, &m);
 
 	a = m;
         if (a != ~0)
@@ -1469,7 +1469,7 @@ fbCombineOutReverseC (pixman_implementation_t *imp, pixman_op_t op,
 	comp4_t m = *(mask + i);
 	comp4_t a;
 
-	fbCombineMaskALPHA_cC (&s, &m);
+	fbCombineMaskAlphaC (&s, &m);
 
         a = ~m;
         if (a != ~0)
commit 1de32ae2ef8044b349f3ec87ae339fdcedeb83ef
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Mon Jun 29 12:07:56 2009 -0400

    Uppercase some more macro names

diff --git a/pixman/pixman-region.c b/pixman/pixman-region.c
index ea81a09..10db5fd 100644
--- a/pixman/pixman-region.c
+++ b/pixman/pixman-region.c
@@ -474,7 +474,7 @@ pixman_coalesce (
 
 /* Quicky macro to avoid trivial reject procedure calls to pixman_coalesce */
 
-#define Coalesce(newReg, prevBand, curBand)				\
+#define COALESCE(newReg, prevBand, curBand)				\
     if (curBand - prevBand == newReg->data->numRects - curBand) {	\
 	prevBand = pixman_coalesce(newReg, prevBand, curBand);		\
     } else {								\
@@ -527,7 +527,7 @@ pixman_region_appendNonO (
     return TRUE;
 }
 
-#define FindBand(r, rBandEnd, rEnd, ry1)		    \
+#define FIND_BAND(r, rBandEnd, rEnd, ry1)		    \
 {							    \
     ry1 = r->y1;					    \
     rBandEnd = r+1;					    \
@@ -536,7 +536,7 @@ pixman_region_appendNonO (
     }							    \
 }
 
-#define	AppendRegions(newReg, r, rEnd)					\
+#define	APPEND_REGIONS(newReg, r, rEnd)					\
 {									\
     int newRects;							\
     if ((newRects = rEnd - r)) {					\
@@ -703,8 +703,8 @@ pixman_op(
 	assert(r1 != r1End);
 	assert(r2 != r2End);
 
-	FindBand(r1, r1BandEnd, r1End, r1y1);
-	FindBand(r2, r2BandEnd, r2End, r2y1);
+	FIND_BAND(r1, r1BandEnd, r1End, r1y1);
+	FIND_BAND(r2, r2BandEnd, r2End, r2y1);
 
 	/*
 	 * First handle the band that doesn't intersect, if any.
@@ -722,7 +722,7 @@ pixman_op(
 		    curBand = newReg->data->numRects;
 		    if (!pixman_region_appendNonO(newReg, r1, r1BandEnd, top, bot))
 			goto bail;
-		    Coalesce(newReg, prevBand, curBand);
+		    COALESCE(newReg, prevBand, curBand);
 		}
 	    }
 	    ytop = r2y1;
@@ -734,7 +734,7 @@ pixman_op(
 		    curBand = newReg->data->numRects;
 		    if (!pixman_region_appendNonO(newReg, r2, r2BandEnd, top, bot))
 			goto bail;
-		    Coalesce(newReg, prevBand, curBand);
+		    COALESCE(newReg, prevBand, curBand);
 		}
 	    }
 	    ytop = r1y1;
@@ -755,7 +755,7 @@ pixman_op(
 				 ytop, ybot,
 				 overlap))
 		goto bail;
-	    Coalesce(newReg, prevBand, curBand);
+	    COALESCE(newReg, prevBand, curBand);
 	}
 
 	/*
@@ -777,27 +777,27 @@ pixman_op(
 
     if ((r1 != r1End) && appendNon1) {
 	/* Do first nonOverlap1Func call, which may be able to coalesce */
-	FindBand(r1, r1BandEnd, r1End, r1y1);
+	FIND_BAND(r1, r1BandEnd, r1End, r1y1);
 	curBand = newReg->data->numRects;
 	if (!pixman_region_appendNonO(newReg,
 				      r1, r1BandEnd,
 				      MAX(r1y1, ybot), r1->y2))
 	    goto bail;
-	Coalesce(newReg, prevBand, curBand);
+	COALESCE(newReg, prevBand, curBand);
 	/* Just append the rest of the boxes  */
-	AppendRegions(newReg, r1BandEnd, r1End);
+	APPEND_REGIONS(newReg, r1BandEnd, r1End);
 
     } else if ((r2 != r2End) && appendNon2) {
 	/* Do first nonOverlap2Func call, which may be able to coalesce */
-	FindBand(r2, r2BandEnd, r2End, r2y1);
+	FIND_BAND(r2, r2BandEnd, r2End, r2y1);
 	curBand = newReg->data->numRects;
 	if (!pixman_region_appendNonO(newReg,
 				      r2, r2BandEnd,
 				      MAX(r2y1, ybot), r2->y2))
 	    goto bail;
-	Coalesce(newReg, prevBand, curBand);
+	COALESCE(newReg, prevBand, curBand);
 	/* Append rest of boxes */
-	AppendRegions(newReg, r2BandEnd, r2End);
+	APPEND_REGIONS(newReg, r2BandEnd, r2End);
     }
 
     if (oldData)
@@ -1204,7 +1204,7 @@ PREFIX(_union) (region_type_t *newReg,
  *	    Batch Rectangle Union
  *====================================================================*/
 
-#define ExchangeRects(a, b) \
+#define EXCHANGE_RECTS(a, b) \
 {			    \
     box_type_t     t;	    \
     t = rects[a];	    \
@@ -1230,12 +1230,12 @@ QuickSortRects(
 	{
 	    if (rects[0].y1 > rects[1].y1 ||
 		    (rects[0].y1 == rects[1].y1 && rects[0].x1 > rects[1].x1))
-		ExchangeRects(0, 1);
+		EXCHANGE_RECTS(0, 1);
 	    return;
 	}
 
 	/* Choose partition element, stick in location 0 */
-        ExchangeRects(0, numRects >> 1);
+        EXCHANGE_RECTS(0, numRects >> 1);
 	y1 = rects[0].y1;
 	x1 = rects[0].x1;
 
@@ -1258,11 +1258,11 @@ QuickSortRects(
 		j--;
             } while (y1 < r->y1 || (y1 == r->y1 && x1 < r->x1));
             if (i < j)
-		ExchangeRects(i, j);
+		EXCHANGE_RECTS(i, j);
         } while (i < j);
 
         /* Move partition element back to middle */
-        ExchangeRects(0, j);
+        EXCHANGE_RECTS(0, j);
 
 	/* Recurse */
         if (numRects-j-1 > 1)
@@ -1415,7 +1415,7 @@ validate (region_type_t * badreg,
 		/* Put box into new band */
 		if (reg->extents.x2 < riBox->x2) reg->extents.x2 = riBox->x2;
 		if (reg->extents.x1 > box->x1)   reg->extents.x1 = box->x1;
-		Coalesce(reg, rit->prevBand, rit->curBand);
+		COALESCE(reg, rit->prevBand, rit->curBand);
 		rit->curBand = reg->data->numRects;
 		RECTALLOC_BAIL(reg, 1, bail);
 		*PIXREGION_TOP(reg) = *box;
@@ -1459,7 +1459,7 @@ validate (region_type_t * badreg,
 NextRect: ;
     } /* for i */
 
-    /* Make a final pass over each region in order to Coalesce and set
+    /* Make a final pass over each region in order to COALESCE and set
        extents.x2 and extents.y2 */
 
     for (j = numRI, rit = ri; --j >= 0; rit++)
@@ -1468,7 +1468,7 @@ NextRect: ;
 	riBox = PIXREGION_END(reg);
 	reg->extents.y2 = riBox->y2;
 	if (reg->extents.x2 < riBox->x2) reg->extents.x2 = riBox->x2;
-	Coalesce(reg, rit->prevBand, rit->curBand);
+	COALESCE(reg, rit->prevBand, rit->curBand);
 	if (reg->data->numRects == 1) /* keep unions happy below */
 	{
 	    FREE_DATA(reg);
commit 47296209dae2e3d33426532a3e896e06373fc088
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Mon Jun 29 11:40:21 2009 -0400

    Consolidate channel macros in pixman-combine.h
    
    There are now RED_8/RED_16 etc. macros instead of the old Red/Green/Blue.

diff --git a/pixman/make-combine.pl b/pixman/make-combine.pl
index d0b0adc..210a5da 100644
--- a/pixman/make-combine.pl
+++ b/pixman/make-combine.pl
@@ -65,6 +65,10 @@ while (<STDIN>) {
     s/combine_width/combine_$pixel_size/;
     s/_pixman_setup_combiner_functions_width/_pixman_setup_combiner_functions_$pixel_size/;
     s/UNc/UN$size/g;
+    s/ALPHA_c/ALPHA_$size/g;
+    s/RED_c/RED_$size/g;
+    s/GREEN_c/GREEN_$size/g;
+    s/BLUE_c/BLUE_$size/g;
 
     # Convert comp*_t values into the appropriate real types.
     s/comp1_t/uint${size}_t/g;
diff --git a/pixman/pixman-bits-image.c b/pixman/pixman-bits-image.c
index 864c17f..8feb495 100644
--- a/pixman/pixman-bits-image.c
+++ b/pixman/pixman-bits-image.c
@@ -31,10 +31,6 @@
 #include "pixman-private.h"
 #include "pixman-combine32.h"
 
-#define Red(x) (((x) >> 16) & 0xff)
-#define Green(x) (((x) >> 8) & 0xff)
-#define Blue(x) ((x) & 0xff)
-
 /* Store functions */
 
 static void
@@ -441,10 +437,10 @@ bits_image_fetch_convolution_pixels (bits_image_t *image,
 		{
 		    uint32_t c = *u++;
 
-		    srtot += Red(c) * f;
-		    sgtot += Green(c) * f;
-		    sbtot += Blue(c) * f;
-		    satot += Alpha(c) * f;
+		    srtot += RED_8(c) * f;
+		    sgtot += GREEN_8(c) * f;
+		    sbtot += BLUE_8(c) * f;
+		    satot += ALPHA_8(c) * f;
 		}
 	    }
 
diff --git a/pixman/pixman-combine.c.template b/pixman/pixman-combine.c.template
index a963518..5fc07e7 100644
--- a/pixman/pixman-combine.c.template
+++ b/pixman/pixman-combine.c.template
@@ -9,10 +9,6 @@
 
 #include "pixman-combine.h"
 
-#define Red(x) (((x) >> R_SHIFT) & MASK)
-#define Green(x) (((x) >> G_SHIFT) & MASK)
-#define Blue(x) ((x) & MASK)
-
 /*** per channel helper functions ***/
 
 static void
@@ -67,7 +63,7 @@ fbCombineMaskValueC (comp4_t *src, const comp4_t *mask)
 }
 
 static void
-fbCombineMaskAlphaC (const comp4_t *src, comp4_t *mask)
+fbCombineMaskALPHA_cC (const comp4_t *src, comp4_t *mask)
 {
     comp4_t a = *(mask);
     comp4_t	x;
@@ -161,7 +157,7 @@ fbCombineOverU (pixman_implementation_t *imp, pixman_op_t op,
     for (i = 0; i < width; ++i) {
         comp4_t s = combineMask (src, mask, i);
         comp4_t d = *(dest + i);
-        comp4_t ia = Alpha(~s);
+        comp4_t ia = ALPHA_c(~s);
 
         UNcx4_MUL_UNc_ADD_UNcx4(d, ia, s);
 	*(dest + i) = d;
@@ -177,7 +173,7 @@ fbCombineOverReverseU (pixman_implementation_t *imp, pixman_op_t op,
     for (i = 0; i < width; ++i) {
         comp4_t s = combineMask (src, mask, i);
         comp4_t d = *(dest + i);
-        comp4_t ia = Alpha(~*(dest + i));
+        comp4_t ia = ALPHA_c(~*(dest + i));
         UNcx4_MUL_UNc_ADD_UNcx4(s, ia, d);
 	*(dest + i) = s;
     }
@@ -191,7 +187,7 @@ fbCombineInU (pixman_implementation_t *imp, pixman_op_t op,
     int i;
     for (i = 0; i < width; ++i) {
         comp4_t s = combineMask (src, mask, i);
-        comp4_t a = Alpha(*(dest + i));
+        comp4_t a = ALPHA_c(*(dest + i));
         UNcx4_MUL_UNc(s, a);
 	*(dest + i) = s;
     }
@@ -206,7 +202,7 @@ fbCombineInReverseU (pixman_implementation_t *imp, pixman_op_t op,
     for (i = 0; i < width; ++i) {
 	comp4_t s = combineMask (src, mask, i);
 	comp4_t d = *(dest + i);
-        comp4_t a = Alpha(s);
+        comp4_t a = ALPHA_c(s);
         UNcx4_MUL_UNc(d, a);
 	*(dest + i) = d;
     }
@@ -220,7 +216,7 @@ fbCombineOutU (pixman_implementation_t *imp, pixman_op_t op,
     int i;
     for (i = 0; i < width; ++i) {
         comp4_t s = combineMask (src, mask, i);
-        comp4_t a = Alpha(~*(dest + i));
+        comp4_t a = ALPHA_c(~*(dest + i));
         UNcx4_MUL_UNc(s, a);
 	*(dest + i) = s;
     }
@@ -235,7 +231,7 @@ fbCombineOutReverseU (pixman_implementation_t *imp, pixman_op_t op,
     for (i = 0; i < width; ++i) {
 	comp4_t s = combineMask (src, mask, i);
         comp4_t d = *(dest + i);
-        comp4_t a = Alpha(~s);
+        comp4_t a = ALPHA_c(~s);
         UNcx4_MUL_UNc(d, a);
 	*(dest + i) = d;
     }
@@ -252,8 +248,8 @@ fbCombineAtopU (pixman_implementation_t *imp, pixman_op_t op,
     for (i = 0; i < width; ++i) {
         comp4_t s = combineMask (src, mask, i);
         comp4_t d = *(dest + i);
-        comp4_t dest_a = Alpha(d);
-        comp4_t src_ia = Alpha(~s);
+        comp4_t dest_a = ALPHA_c(d);
+        comp4_t src_ia = ALPHA_c(~s);
 
         UNcx4_MUL_UNc_ADD_UNcx4_MUL_UNc(s, dest_a, d, src_ia);
 	*(dest + i) = s;
@@ -271,8 +267,8 @@ fbCombineAtopReverseU (pixman_implementation_t *imp, pixman_op_t op,
     for (i = 0; i < width; ++i) {
         comp4_t s = combineMask (src, mask, i);
         comp4_t d = *(dest + i);
-        comp4_t src_a = Alpha(s);
-        comp4_t dest_ia = Alpha(~d);
+        comp4_t src_a = ALPHA_c(s);
+        comp4_t dest_ia = ALPHA_c(~d);
 
         UNcx4_MUL_UNc_ADD_UNcx4_MUL_UNc(s, dest_ia, d, src_a);
 	*(dest + i) = s;
@@ -290,8 +286,8 @@ fbCombineXorU (pixman_implementation_t *imp, pixman_op_t op,
     for (i = 0; i < width; ++i) {
         comp4_t s = combineMask (src, mask, i);
         comp4_t d = *(dest + i);
-        comp4_t src_ia = Alpha(~s);
-        comp4_t dest_ia = Alpha(~d);
+        comp4_t src_ia = ALPHA_c(~s);
+        comp4_t dest_ia = ALPHA_c(~d);
 
         UNcx4_MUL_UNc_ADD_UNcx4_MUL_UNc(s, dest_ia, d, src_ia);
 	*(dest + i) = s;
@@ -373,8 +369,8 @@ fbCombineMultiplyU (pixman_implementation_t *imp, pixman_op_t op,
         comp4_t s = combineMask (src, mask, i);
         comp4_t d = *(dest + i);
 	comp4_t ss = s;
-        comp4_t src_ia = Alpha (~s);
-	comp4_t dest_ia = Alpha (~d);
+        comp4_t src_ia = ALPHA_c (~s);
+	comp4_t dest_ia = ALPHA_c (~d);
 
 	UNcx4_MUL_UNc_ADD_UNcx4_MUL_UNc (ss, dest_ia, d, src_ia);
 	UNcx4_MUL_UNcx4 (d, s);
@@ -393,7 +389,7 @@ fbCombineMultiplyC (pixman_implementation_t *imp, pixman_op_t op,
         comp4_t s = *(src + i);
 	comp4_t d = *(dest + i);
 	comp4_t r = d;
-	comp4_t dest_ia = Alpha (~d);
+	comp4_t dest_ia = ALPHA_c (~d);
 
 	fbCombineMaskValueC (&s, &m);
 
@@ -414,9 +410,9 @@ fbCombine ## name ## U (pixman_implementation_t *imp, pixman_op_t op, \
     for (i = 0; i < width; ++i) {		    \
         comp4_t s = combineMask (src, mask, i);     \
         comp4_t d = *(dest + i);		    \
-        comp1_t sa = Alpha(s);			    \
+        comp1_t sa = ALPHA_c(s);			    \
         comp1_t isa = ~sa;			    \
-        comp1_t da = Alpha(d);	  		    \
+        comp1_t da = ALPHA_c(d);	  		    \
         comp1_t ida = ~da;			    \
 	comp4_t	result;				    \
 						    \
@@ -425,9 +421,9 @@ fbCombine ## name ## U (pixman_implementation_t *imp, pixman_op_t op, \
 						    \
 	*(dest + i) = result +			    \
 	    (DIV_ONE_UNc (sa * da) << A_SHIFT) +	    \
-	    (blend_ ## name (Red (d), da, Red (s), sa) << R_SHIFT) + \
-	    (blend_ ## name (Green (d), da, Green (s), sa) << G_SHIFT) + \
-	    (blend_ ## name (Blue (d), da, Blue (s), sa)); \
+	    (blend_ ## name (RED_c (d), da, RED_c (s), sa) << R_SHIFT) + \
+	    (blend_ ## name (GREEN_c (d), da, GREEN_c (s), sa) << G_SHIFT) + \
+	    (blend_ ## name (BLUE_c (d), da, BLUE_c (s), sa)); \
     }						    \
 }						    \
 						    \
@@ -440,7 +436,7 @@ fbCombine ## name ## C (pixman_implementation_t *imp, pixman_op_t op, \
 	comp4_t m = *(mask + i);		    \
 	comp4_t s = *(src + i);                     \
 	comp4_t d = *(dest + i);		    \
-	comp1_t da = Alpha(d);  		    \
+	comp1_t da = ALPHA_c(d);  		    \
 	comp1_t ida = ~da;			    \
 	comp4_t result;				    \
 						    \
@@ -450,10 +446,10 @@ fbCombine ## name ## C (pixman_implementation_t *imp, pixman_op_t op, \
 	UNcx4_MUL_UNcx4_ADD_UNcx4_MUL_UNc (result, ~m, s, ida);	    \
 						    \
 	result +=				    \
-	    (DIV_ONE_UNc (Alpha (m) * da) << A_SHIFT) +				\
-	    (blend_ ## name (Red (d), da, Red (s), Red (m)) << R_SHIFT) +	\
-	    (blend_ ## name (Green (d), da, Green (s), Green (m)) << G_SHIFT) +	\
-	    (blend_ ## name (Blue (d), da, Blue (s), Blue (m)));			\
+	    (DIV_ONE_UNc (ALPHA_c (m) * da) << A_SHIFT) +				\
+	    (blend_ ## name (RED_c (d), da, RED_c (s), RED_c (m)) << R_SHIFT) +	\
+	    (blend_ ## name (GREEN_c (d), da, GREEN_c (s), GREEN_c (m)) << G_SHIFT) +	\
+	    (blend_ ## name (BLUE_c (d), da, BLUE_c (s), BLUE_c (m)));			\
 						    \
 	*(dest + i) = result;			    \
     }						    \
@@ -788,21 +784,21 @@ fbCombine ## name ## U (pixman_implementation_t *imp, pixman_op_t op,	\
     for (i = 0; i < width; ++i) {					\
         comp4_t s = combineMask (src, mask, i);				\
         comp4_t d = *(dest + i);					\
-        comp1_t sa = Alpha(s);						\
+        comp1_t sa = ALPHA_c(s);						\
         comp1_t isa = ~sa;						\
-        comp1_t da = Alpha(d);						\
+        comp1_t da = ALPHA_c(d);						\
         comp1_t ida = ~da;						\
 	comp4_t	result;							\
 	comp4_t sc[3], dc[3], c[3];					\
 									\
 	result = d;							\
         UNcx4_MUL_UNc_ADD_UNcx4_MUL_UNc(result, isa, s, ida);				\
-	dc[0] = Red (d);						\
-	sc[0] = Red (s);						\
-	dc[1] = Green (d);						\
-	sc[1] = Green (s);						\
-	dc[2] = Blue (d);						\
-	sc[2] = Blue (s);						\
+	dc[0] = RED_c (d);						\
+	sc[0] = RED_c (s);						\
+	dc[1] = GREEN_c (d);						\
+	sc[1] = GREEN_c (s);						\
+	dc[2] = BLUE_c (d);						\
+	sc[2] = BLUE_c (s);						\
 	blend_ ## name (c, dc, da, sc, sa);				\
 									\
 	*(dest + i) = result +						\
@@ -1420,7 +1416,7 @@ fbCombineInReverseC (pixman_implementation_t *imp, pixman_op_t op,
         comp4_t m = *(mask + i);
         comp4_t a;
 
-	fbCombineMaskAlphaC (&s, &m);
+	fbCombineMaskALPHA_cC (&s, &m);
 
 	a = m;
         if (a != ~0)
@@ -1473,7 +1469,7 @@ fbCombineOutReverseC (pixman_implementation_t *imp, pixman_op_t op,
 	comp4_t m = *(mask + i);
 	comp4_t a;
 
-	fbCombineMaskAlphaC (&s, &m);
+	fbCombineMaskALPHA_cC (&s, &m);
 
         a = ~m;
         if (a != ~0)
diff --git a/pixman/pixman-combine.h.template b/pixman/pixman-combine.h.template
index 46a1aba..302d36a 100644
--- a/pixman/pixman-combine.h.template
+++ b/pixman/pixman-combine.h.template
@@ -15,7 +15,10 @@
 #define RB_ONE_HALF
 #define RB_MASK_PLUS_ONE
 
-#define Alpha(x) ((x) >> A_SHIFT)
+#define ALPHA_c(x) ((x) >> A_SHIFT)
+#define RED_c(x) (((x) >> R_SHIFT) & MASK)
+#define GREEN_c(x) (((x) >> G_SHIFT) & MASK)
+#define BLUE_c(x) ((x) & MASK)
 
 /*
  * Helper macros.
commit 2f3e3d62f7727a652090ea003c98218f3b550818
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Mon Jun 29 11:17:28 2009 -0400

    Change some macro names to be all uppercase

diff --git a/pixman/pixman-matrix.c b/pixman/pixman-matrix.c
index dda6214..29f6025 100644
--- a/pixman/pixman-matrix.c
+++ b/pixman/pixman-matrix.c
@@ -299,13 +299,13 @@ within_epsilon(pixman_fixed_t a, pixman_fixed_t b, pixman_fixed_t epsilon)
 	return t <= epsilon;
 }
 
-#define epsilon	(pixman_fixed_t) (2)
+#define EPSILON	(pixman_fixed_t) (2)
 
-#define IS_SAME(a,b) (within_epsilon(a, b, epsilon))
-#define IS_ZERO(a)   (within_epsilon(a, 0, epsilon))
-#define IS_ONE(a)    (within_epsilon(a, F(1), epsilon))
-#define IS_UNIT(a)   (within_epsilon(a, F( 1), epsilon) || \
-		      within_epsilon(a, F(-1), epsilon) || \
+#define IS_SAME(a,b) (within_epsilon(a, b, EPSILON))
+#define IS_ZERO(a)   (within_epsilon(a, 0, EPSILON))
+#define IS_ONE(a)    (within_epsilon(a, F(1), EPSILON))
+#define IS_UNIT(a)   (within_epsilon(a, F( 1), EPSILON) || \
+		      within_epsilon(a, F(-1), EPSILON) || \
 		      IS_ZERO(a))
 #define IS_INT(a)    (IS_ZERO(pixman_fixed_frac(a)))
 
diff --git a/pixman/pixman-region.c b/pixman/pixman-region.c
index 52ce83e..ea81a09 100644
--- a/pixman/pixman-region.c
+++ b/pixman/pixman-region.c
@@ -63,8 +63,7 @@ SOFTWARE.
 #define PIXREGION_TOP(reg) PIXREGION_BOX(reg, (reg)->data->numRects)
 #define PIXREGION_END(reg) PIXREGION_BOX(reg, (reg)->data->numRects - 1)
 
-
-#define good(reg) assert(PREFIX(_selfcheck) (reg))
+#define GOOD(reg) assert(PREFIX(_selfcheck) (reg))
 
 static const box_type_t PREFIX(_emptyBox_) = {0, 0, 0, 0};
 static const region_data_type_t PREFIX(_emptyData_) = {0, 0};
@@ -160,7 +159,7 @@ PIXREGION_SZOF(size_t n)
 }
 
 static void *
-allocData(size_t n)
+alloc_data(size_t n)
 {
     size_t sz = PIXREGION_SZOF(n);
     if (!sz)
@@ -169,7 +168,7 @@ allocData(size_t n)
     return malloc(sz);
 }
 
-#define freeData(reg) if ((reg)->data && (reg)->data->size) free((reg)->data)
+#define FREE_DATA(reg) if ((reg)->data && (reg)->data->size) free((reg)->data)
 
 #define RECTALLOC_BAIL(region,n,bail) \
 if (!(region)->data || (((region)->data->numRects + (n)) > (region)->data->size)) \
@@ -293,8 +292,8 @@ PREFIX(_init_with_extents) (region_type_t *region, box_type_t *extents)
 PIXMAN_EXPORT void
 PREFIX(_fini) (region_type_t *region)
 {
-    good (region);
-    freeData (region);
+    GOOD (region);
+    FREE_DATA (region);
 }
 
 PIXMAN_EXPORT int
@@ -316,7 +315,7 @@ PREFIX(_rectangles) (region_type_t *region,
 static pixman_bool_t
 pixman_break (region_type_t *region)
 {
-    freeData (region);
+    FREE_DATA (region);
     region->extents = *pixman_region_emptyBox;
     region->data = pixman_brokendata;
     return FALSE;
@@ -330,7 +329,7 @@ pixman_rect_alloc (region_type_t * region, int n)
     if (!region->data)
     {
 	n++;
-	region->data = allocData(n);
+	region->data = alloc_data(n);
 	if (!region->data)
 	    return pixman_break (region);
 	region->data->numRects = 1;
@@ -338,7 +337,7 @@ pixman_rect_alloc (region_type_t * region, int n)
     }
     else if (!region->data->size)
     {
-	region->data = allocData(n);
+	region->data = alloc_data(n);
 	if (!region->data)
 	    return pixman_break (region);
 	region->data->numRects = 0;
@@ -369,21 +368,21 @@ pixman_rect_alloc (region_type_t * region, int n)
 PIXMAN_EXPORT pixman_bool_t
 PREFIX(_copy) (region_type_t *dst, region_type_t *src)
 {
-    good(dst);
-    good(src);
+    GOOD(dst);
+    GOOD(src);
     if (dst == src)
 	return TRUE;
     dst->extents = src->extents;
     if (!src->data || !src->data->size)
     {
-	freeData(dst);
+	FREE_DATA(dst);
 	dst->data = src->data;
 	return TRUE;
     }
     if (!dst->data || (dst->data->size < src->data->numRects))
     {
-	freeData(dst);
-	dst->data = allocData(src->data->numRects);
+	FREE_DATA(dst);
+	dst->data = alloc_data(src->data->numRects);
 	if (!dst->data)
 	    return pixman_break (dst);
 	dst->data->size = src->data->numRects;
@@ -806,13 +805,13 @@ pixman_op(
 
     if (!(numRects = newReg->data->numRects))
     {
-	freeData(newReg);
+	FREE_DATA(newReg);
 	newReg->data = pixman_region_emptyData;
     }
     else if (numRects == 1)
     {
 	newReg->extents = *PIXREGION_BOXPTR(newReg);
-	freeData(newReg);
+	FREE_DATA(newReg);
 	newReg->data = (region_data_type_t *)NULL;
     }
     else
@@ -864,7 +863,7 @@ pixman_set_extents (region_type_t *region)
      * Since box is the first rectangle in the region, it must have the
      * smallest y1 and since boxEnd is the last rectangle in the region,
      * it must have the largest y2, because of banding. Initialize x1 and
-     * x2 from  box and boxEnd, resp., as good things to initialize them
+     * x2 from  box and boxEnd, resp., as GOOD things to initialize them
      * to...
      */
     region->extents.x1 = box->x1;
@@ -952,15 +951,15 @@ PREFIX(_intersect) (region_type_t * 	newReg,
 			 region_type_t * 	reg1,
 			 region_type_t *	reg2)
 {
-    good(reg1);
-    good(reg2);
-    good(newReg);
+    GOOD(reg1);
+    GOOD(reg2);
+    GOOD(newReg);
    /* check for trivial reject */
     if (PIXREGION_NIL(reg1)  || PIXREGION_NIL(reg2) ||
 	!EXTENTCHECK(&reg1->extents, &reg2->extents))
     {
 	/* Covers about 20% of all cases */
-	freeData(newReg);
+	FREE_DATA(newReg);
 	newReg->extents.x2 = newReg->extents.x1;
 	newReg->extents.y2 = newReg->extents.y1;
 	if (PIXREGION_NAR(reg1) || PIXREGION_NAR(reg2))
@@ -978,7 +977,7 @@ PREFIX(_intersect) (region_type_t * 	newReg,
 	newReg->extents.y1 = MAX(reg1->extents.y1, reg2->extents.y1);
 	newReg->extents.x2 = MIN(reg1->extents.x2, reg2->extents.x2);
 	newReg->extents.y2 = MIN(reg1->extents.y2, reg2->extents.y2);
-	freeData(newReg);
+	FREE_DATA(newReg);
 	newReg->data = (region_data_type_t *)NULL;
     }
     else if (!reg2->data && SUBSUMES(&reg2->extents, &reg1->extents))
@@ -1003,7 +1002,7 @@ PREFIX(_intersect) (region_type_t * 	newReg,
 	pixman_set_extents(newReg);
     }
 
-    good(newReg);
+    GOOD(newReg);
     return(TRUE);
 }
 
@@ -1133,9 +1132,9 @@ PREFIX(_union) (region_type_t *newReg,
     /* Return TRUE if some overlap
      * between reg1, reg2
      */
-    good(reg1);
-    good(reg2);
-    good(newReg);
+    GOOD(reg1);
+    GOOD(reg2);
+    GOOD(newReg);
     /*  checks all the simple cases */
 
     /*
@@ -1197,7 +1196,7 @@ PREFIX(_union) (region_type_t *newReg,
     newReg->extents.y1 = MIN(reg1->extents.y1, reg2->extents.y1);
     newReg->extents.x2 = MAX(reg1->extents.x2, reg2->extents.x2);
     newReg->extents.y2 = MAX(reg1->extents.y2, reg2->extents.y2);
-    good(newReg);
+    GOOD(newReg);
     return TRUE;
 }
 
@@ -1334,7 +1333,7 @@ validate (region_type_t * badreg,
     *overlap = FALSE;
     if (!badreg->data)
     {
-	good(badreg);
+	GOOD(badreg);
 	return TRUE;
     }
     numRects = badreg->data->numRects;
@@ -1342,21 +1341,21 @@ validate (region_type_t * badreg,
     {
 	if (PIXREGION_NAR(badreg))
 	    return FALSE;
-	good(badreg);
+	GOOD(badreg);
 	return TRUE;
     }
     if (badreg->extents.x1 < badreg->extents.x2)
     {
 	if ((numRects) == 1)
 	{
-	    freeData(badreg);
+	    FREE_DATA(badreg);
 	    badreg->data = (region_data_type_t *) NULL;
 	}
 	else
 	{
 	    DOWNSIZE(badreg, numRects);
 	}
-	good(badreg);
+	GOOD(badreg);
 	return TRUE;
     }
 
@@ -1472,7 +1471,7 @@ NextRect: ;
 	Coalesce(reg, rit->prevBand, rit->curBand);
 	if (reg->data->numRects == 1) /* keep unions happy below */
 	{
-	    freeData(reg);
+	    FREE_DATA(reg);
 	    reg->data = (region_data_type_t *)NULL;
 	}
     }
@@ -1495,7 +1494,7 @@ NextRect: ;
 		reg->extents.x2 = hreg->extents.x2;
 	    if (hreg->extents.y2 > reg->extents.y2)
 		reg->extents.y2 = hreg->extents.y2;
-	    freeData(hreg);
+	    FREE_DATA(hreg);
 	}
 	numRI -= half;
 	if (!ret)
@@ -1504,11 +1503,11 @@ NextRect: ;
     *badreg = ri[0].reg;
     if (ri != stack_regions)
 	free(ri);
-    good(badreg);
+    GOOD(badreg);
     return ret;
 bail:
     for (i = 0; i < numRI; i++)
-	freeData(&ri[i].reg);
+	FREE_DATA(&ri[i].reg);
     if (ri != stack_regions)
 	free (ri);
 
@@ -1664,9 +1663,9 @@ PREFIX(_subtract) (region_type_t *	regD,
 {
     int overlap; /* result ignored */
 
-    good(regM);
-    good(regS);
-    good(regD);
+    GOOD(regM);
+    GOOD(regS);
+    GOOD(regD);
    /* check for trivial rejects */
     if (PIXREGION_NIL(regM) || PIXREGION_NIL(regS) ||
 	!EXTENTCHECK(&regM->extents, &regS->extents))
@@ -1677,7 +1676,7 @@ PREFIX(_subtract) (region_type_t *	regD,
     }
     else if (regM == regS)
     {
-	freeData(regD);
+	FREE_DATA(regD);
 	regD->extents.x2 = regD->extents.x1;
 	regD->extents.y2 = regD->extents.y1;
 	regD->data = pixman_region_emptyData;
@@ -1698,7 +1697,7 @@ PREFIX(_subtract) (region_type_t *	regD,
      * due to coalescing, so we have to examine fewer rectangles.
      */
     pixman_set_extents(regD);
-    good(regD);
+    GOOD(regD);
     return TRUE;
 }
 
@@ -1730,15 +1729,15 @@ PIXMAN_EXPORT PREFIX(_inverse) (region_type_t * 	  newReg,       /* Destination
 				 * bounding box */
     int	  overlap;	/* result ignored */
 
-    good(reg1);
-    good(newReg);
+    GOOD(reg1);
+    GOOD(newReg);
    /* check for trivial rejects */
     if (PIXREGION_NIL(reg1) || !EXTENTCHECK(invRect, &reg1->extents))
     {
 	if (PIXREGION_NAR(reg1))
 	    return pixman_break (newReg);
 	newReg->extents = *invRect;
-	freeData(newReg);
+	FREE_DATA(newReg);
 	newReg->data = (region_data_type_t *)NULL;
         return TRUE;
     }
@@ -1759,7 +1758,7 @@ PIXMAN_EXPORT PREFIX(_inverse) (region_type_t * 	  newReg,       /* Destination
      * due to coalescing, so we have to examine fewer rectangles.
      */
     pixman_set_extents(newReg);
-    good(newReg);
+    GOOD(newReg);
     return TRUE;
 }
 
@@ -1791,7 +1790,7 @@ PIXMAN_EXPORT PREFIX(_contains_rectangle) (region_type_t *  region,
     int			partIn, partOut;
     int			numRects;
 
-    good(region);
+    GOOD(region);
     numRects = PIXREGION_NUM_RECTS(region);
     /* useful optimization */
     if (!numRects || !EXTENTCHECK(&region->extents, prect))
@@ -1892,7 +1891,7 @@ PREFIX(_translate) (region_type_t * region, int x, int y)
     int nbox;
     box_type_t * pbox;
 
-    good(region);
+    GOOD(region);
     region->extents.x1 = x1 = region->extents.x1 + x;
     region->extents.y1 = y1 = region->extents.y1 + y;
     region->extents.x2 = x2 = region->extents.x2 + x;
@@ -1915,7 +1914,7 @@ PREFIX(_translate) (region_type_t * region, int x, int y)
     {
 	region->extents.x2 = region->extents.x1;
 	region->extents.y2 = region->extents.y1;
-	freeData(region);
+	FREE_DATA(region);
 	region->data = pixman_region_emptyData;
 	return;
     }
@@ -1958,7 +1957,7 @@ PREFIX(_translate) (region_type_t * region, int x, int y)
 	    if (region->data->numRects == 1)
 	    {
 		region->extents = *PIXREGION_BOXPTR(region);
-		freeData(region);
+		FREE_DATA(region);
 		region->data = (region_data_type_t *)NULL;
 	    }
 	    else
@@ -1970,11 +1969,11 @@ PREFIX(_translate) (region_type_t * region, int x, int y)
 PIXMAN_EXPORT void
 PREFIX(_reset) (region_type_t *region, box_type_t *box)
 {
-    good(region);
+    GOOD(region);
     assert(box->x1<=box->x2);
     assert(box->y1<=box->y2);
     region->extents = *box;
-    freeData(region);
+    FREE_DATA(region);
     region->data = (region_data_type_t *)NULL;
 }
 
@@ -1987,7 +1986,7 @@ PREFIX(_contains_point) (region_type_t * region,
     box_type_t *pbox, *pboxEnd;
     int numRects;
 
-    good(region);
+    GOOD(region);
     numRects = PIXREGION_NUM_RECTS(region);
     if (!numRects || !INBOX(&region->extents, x, y))
         return(FALSE);
@@ -2020,14 +2019,14 @@ PREFIX(_contains_point) (region_type_t * region,
 PIXMAN_EXPORT int
 PREFIX(_not_empty) (region_type_t * region)
 {
-    good(region);
+    GOOD(region);
     return(!PIXREGION_NIL(region));
 }
 
 PIXMAN_EXPORT box_type_t *
 PREFIX(_extents) (region_type_t * region)
 {
-    good(region);
+    GOOD(region);
     return(&region->extents);
 }
 
commit 8339a4abc4edcaee6fafbde1a147ba7fcaa9c108
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Mon Jun 29 09:29:32 2009 -0400

    Change names of the FbByte* macros to be more descriptive.
    
    But also more cryptic unfortunately. For example FbByteMul() becomes
    UN8x4_MUL_UN8() to indicate that it multiplies 4 UN8 numbers with one
    UN8 number.

diff --git a/pixman/make-combine.pl b/pixman/make-combine.pl
index 88b1156..d0b0adc 100644
--- a/pixman/make-combine.pl
+++ b/pixman/make-combine.pl
@@ -64,10 +64,7 @@ while (<STDIN>) {
     s/\bFbComposeFunctions\b/FbComposeFunctions$pixel_size/;
     s/combine_width/combine_$pixel_size/;
     s/_pixman_setup_combiner_functions_width/_pixman_setup_combiner_functions_$pixel_size/;
-    s/MUL_UN_width/MUL_UN$size/g;
-    s/DIV_UN_width/DIV_UN$size/g;
-    s/ADD_UN_width/ADD_UN$size/g;
-    s/DIV_ONE_UN_width/DIV_ONE_UN$size/g;
+    s/UNc/UN$size/g;
 
     # Convert comp*_t values into the appropriate real types.
     s/comp1_t/uint${size}_t/g;
diff --git a/pixman/pixman-bits-image.c b/pixman/pixman-bits-image.c
index 3a54a52..864c17f 100644
--- a/pixman/pixman-bits-image.c
+++ b/pixman/pixman-bits-image.c
@@ -160,7 +160,7 @@ bits_image_fetch_alpha_pixels (bits_image_t *image, uint32_t *buffer, int n_pixe
 	    int a = alpha_pixels[j] >> 24;
 	    uint32_t p = buffer[2 * i - j] | 0xff000000;
 
-	    FbByteMul (p, a);
+	    UN8x4_MUL_UN8 (p, a);
 
 	    buffer[i++] = p;
 	}
diff --git a/pixman/pixman-combine.c.template b/pixman/pixman-combine.c.template
index 87d3fdb..a963518 100644
--- a/pixman/pixman-combine.c.template
+++ b/pixman/pixman-combine.c.template
@@ -40,9 +40,9 @@ fbCombineMaskC (comp4_t *src, comp4_t *mask)
     }
 
     xa = x >> A_SHIFT;
-    FbByteMulC(x, a);
+    UNcx4_MUL_UNcx4(x, a);
     *(src) = x;
-    FbByteMul(a, xa);
+    UNcx4_MUL_UNc(a, xa);
     *(mask) = a;
 }
 
@@ -62,7 +62,7 @@ fbCombineMaskValueC (comp4_t *src, const comp4_t *mask)
 	return;
 
     x = *(src);
-    FbByteMulC(x, a);
+    UNcx4_MUL_UNcx4(x, a);
     *(src) =x;
 }
 
@@ -87,7 +87,7 @@ fbCombineMaskAlphaC (const comp4_t *src, comp4_t *mask)
 	return;
     }
 
-    FbByteMul(a, x);
+    UNcx4_MUL_UNc(a, x);
     *(mask) = a;
 }
 
@@ -121,7 +121,7 @@ combineMask (const comp4_t *src, const comp4_t *mask, int i)
     s = *(src + i);
 
     if (mask)
-	FbByteMul (s, m);
+	UNcx4_MUL_UNc (s, m);
 
     return s;
 }
@@ -163,7 +163,7 @@ fbCombineOverU (pixman_implementation_t *imp, pixman_op_t op,
         comp4_t d = *(dest + i);
         comp4_t ia = Alpha(~s);
 
-        FbByteMulAdd(d, ia, s);
+        UNcx4_MUL_UNc_ADD_UNcx4(d, ia, s);
 	*(dest + i) = d;
     }
 }
@@ -178,7 +178,7 @@ fbCombineOverReverseU (pixman_implementation_t *imp, pixman_op_t op,
         comp4_t s = combineMask (src, mask, i);
         comp4_t d = *(dest + i);
         comp4_t ia = Alpha(~*(dest + i));
-        FbByteMulAdd(s, ia, d);
+        UNcx4_MUL_UNc_ADD_UNcx4(s, ia, d);
 	*(dest + i) = s;
     }
 }
@@ -192,7 +192,7 @@ fbCombineInU (pixman_implementation_t *imp, pixman_op_t op,
     for (i = 0; i < width; ++i) {
         comp4_t s = combineMask (src, mask, i);
         comp4_t a = Alpha(*(dest + i));
-        FbByteMul(s, a);
+        UNcx4_MUL_UNc(s, a);
 	*(dest + i) = s;
     }
 }
@@ -207,7 +207,7 @@ fbCombineInReverseU (pixman_implementation_t *imp, pixman_op_t op,
 	comp4_t s = combineMask (src, mask, i);
 	comp4_t d = *(dest + i);
         comp4_t a = Alpha(s);
-        FbByteMul(d, a);
+        UNcx4_MUL_UNc(d, a);
 	*(dest + i) = d;
     }
 }
@@ -221,7 +221,7 @@ fbCombineOutU (pixman_implementation_t *imp, pixman_op_t op,
     for (i = 0; i < width; ++i) {
         comp4_t s = combineMask (src, mask, i);
         comp4_t a = Alpha(~*(dest + i));
-        FbByteMul(s, a);
+        UNcx4_MUL_UNc(s, a);
 	*(dest + i) = s;
     }
 }
@@ -236,7 +236,7 @@ fbCombineOutReverseU (pixman_implementation_t *imp, pixman_op_t op,
 	comp4_t s = combineMask (src, mask, i);
         comp4_t d = *(dest + i);
         comp4_t a = Alpha(~s);
-        FbByteMul(d, a);
+        UNcx4_MUL_UNc(d, a);
 	*(dest + i) = d;
     }
 }
@@ -255,7 +255,7 @@ fbCombineAtopU (pixman_implementation_t *imp, pixman_op_t op,
         comp4_t dest_a = Alpha(d);
         comp4_t src_ia = Alpha(~s);
 
-        FbByteAddMul(s, dest_a, d, src_ia);
+        UNcx4_MUL_UNc_ADD_UNcx4_MUL_UNc(s, dest_a, d, src_ia);
 	*(dest + i) = s;
     }
 }
@@ -274,7 +274,7 @@ fbCombineAtopReverseU (pixman_implementation_t *imp, pixman_op_t op,
         comp4_t src_a = Alpha(s);
         comp4_t dest_ia = Alpha(~d);
 
-        FbByteAddMul(s, dest_ia, d, src_a);
+        UNcx4_MUL_UNc_ADD_UNcx4_MUL_UNc(s, dest_ia, d, src_a);
 	*(dest + i) = s;
     }
 }
@@ -293,7 +293,7 @@ fbCombineXorU (pixman_implementation_t *imp, pixman_op_t op,
         comp4_t src_ia = Alpha(~s);
         comp4_t dest_ia = Alpha(~d);
 
-        FbByteAddMul(s, dest_ia, d, src_ia);
+        UNcx4_MUL_UNc_ADD_UNcx4_MUL_UNc(s, dest_ia, d, src_ia);
 	*(dest + i) = s;
     }
 }
@@ -306,7 +306,7 @@ fbCombineAddU (pixman_implementation_t *imp, pixman_op_t op,
     for (i = 0; i < width; ++i) {
         comp4_t s = combineMask (src, mask, i);
         comp4_t d = *(dest + i);
-        FbByteAdd(d, s);
+        UNcx4_ADD_UNcx4(d, s);
 	*(dest + i) = d;
     }
 }
@@ -328,10 +328,10 @@ fbCombineSaturateU (pixman_implementation_t *imp, pixman_op_t op,
         da = ~d >> A_SHIFT;
         if (sa > da)
         {
-            sa = DIV_UN_width(da, sa);
-            FbByteMul(s, sa);
+            sa = DIV_UNc(da, sa);
+            UNcx4_MUL_UNc(s, sa);
         };
-        FbByteAdd(d, s);
+        UNcx4_ADD_UNcx4(d, s);
 	*(dest + i) = d;
     }
 }
@@ -376,9 +376,9 @@ fbCombineMultiplyU (pixman_implementation_t *imp, pixman_op_t op,
         comp4_t src_ia = Alpha (~s);
 	comp4_t dest_ia = Alpha (~d);
 
-	FbByteAddMul (ss, dest_ia, d, src_ia);
-	FbByteMulC (d, s);
-	FbByteAdd (d, ss); 	
+	UNcx4_MUL_UNc_ADD_UNcx4_MUL_UNc (ss, dest_ia, d, src_ia);
+	UNcx4_MUL_UNcx4 (d, s);
+	UNcx4_ADD_UNcx4 (d, ss); 	
 	*(dest + i) = d;
     }
 }
@@ -397,9 +397,9 @@ fbCombineMultiplyC (pixman_implementation_t *imp, pixman_op_t op,
 
 	fbCombineMaskValueC (&s, &m);
 
-	FbByteAddMulC (r, ~m, s, dest_ia);
-	FbByteMulC (d, s);
-	FbByteAdd (r, d);
+	UNcx4_MUL_UNcx4_ADD_UNcx4_MUL_UNc (r, ~m, s, dest_ia);
+	UNcx4_MUL_UNcx4 (d, s);
+	UNcx4_ADD_UNcx4 (r, d);
 
 	*(dest + i) = r;
     }
@@ -421,10 +421,10 @@ fbCombine ## name ## U (pixman_implementation_t *imp, pixman_op_t op, \
 	comp4_t	result;				    \
 						    \
 	result = d;				    \
-        FbByteAddMul(result, isa, s, ida);	    \
+        UNcx4_MUL_UNc_ADD_UNcx4_MUL_UNc(result, isa, s, ida);	    \
 						    \
 	*(dest + i) = result +			    \
-	    (DIV_ONE_UN_width (sa * da) << A_SHIFT) +	    \
+	    (DIV_ONE_UNc (sa * da) << A_SHIFT) +	    \
 	    (blend_ ## name (Red (d), da, Red (s), sa) << R_SHIFT) + \
 	    (blend_ ## name (Green (d), da, Green (s), sa) << G_SHIFT) + \
 	    (blend_ ## name (Blue (d), da, Blue (s), sa)); \
@@ -447,10 +447,10 @@ fbCombine ## name ## C (pixman_implementation_t *imp, pixman_op_t op, \
 	fbCombineMaskValueC (&s, &m);		    \
 						    \
 	result = d;				    \
-	FbByteAddMulC (result, ~m, s, ida);	    \
+	UNcx4_MUL_UNcx4_ADD_UNcx4_MUL_UNc (result, ~m, s, ida);	    \
 						    \
 	result +=				    \
-	    (DIV_ONE_UN_width (Alpha (m) * da) << A_SHIFT) +				\
+	    (DIV_ONE_UNc (Alpha (m) * da) << A_SHIFT) +				\
 	    (blend_ ## name (Red (d), da, Red (s), Red (m)) << R_SHIFT) +	\
 	    (blend_ ## name (Green (d), da, Green (s), Green (m)) << G_SHIFT) +	\
 	    (blend_ ## name (Blue (d), da, Blue (s), Blue (m)));			\
@@ -467,7 +467,7 @@ fbCombine ## name ## C (pixman_implementation_t *imp, pixman_op_t op, \
 static inline comp4_t
 blend_Screen (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
-  return DIV_ONE_UN_width (sca * da + dca * sa - sca * dca);
+  return DIV_ONE_UNc (sca * da + dca * sa - sca * dca);
 }
 
 PDF_SEPARABLE_BLEND_MODE (Screen)
@@ -490,7 +490,7 @@ blend_Overlay (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 	rca = 2 * sca * dca;
     else
 	rca = sa * da - 2 * (da - dca) * (sa - sca);
-    return DIV_ONE_UN_width (rca);
+    return DIV_ONE_UNc (rca);
 }
 
 PDF_SEPARABLE_BLEND_MODE (Overlay)
@@ -507,7 +507,7 @@ blend_Darken (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
     
     s = sca * da;
     d = dca * sa;
-    return DIV_ONE_UN_width (s > d ? d : s);
+    return DIV_ONE_UNc (s > d ? d : s);
 }
 
 PDF_SEPARABLE_BLEND_MODE (Darken)
@@ -524,7 +524,7 @@ blend_Lighten (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
     
     s = sca * da;
     d = dca * sa;
-    return DIV_ONE_UN_width (s > d ? s : d);
+    return DIV_ONE_UNc (s > d ? s : d);
 }
 
 PDF_SEPARABLE_BLEND_MODE (Lighten)
@@ -542,10 +542,10 @@ static inline comp4_t
 blend_ColorDodge (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
     if (sca >= sa) {
-	return DIV_ONE_UN_width (sa * da);
+	return DIV_ONE_UNc (sa * da);
     } else {
 	comp4_t rca = dca * sa * sa / (sa - sca);
-	return DIV_ONE_UN_width (rca > sa * da ? sa * da : rca);
+	return DIV_ONE_UNc (rca > sa * da ? sa * da : rca);
     }
 }
 
@@ -568,7 +568,7 @@ blend_ColorBurn (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
     } else {
 	comp4_t sada = sa * da;
 	comp4_t rca = (da - dca) * sa * sa / sca;
-	return DIV_ONE_UN_width (rca > sada ? 0 : sada - rca);
+	return DIV_ONE_UNc (rca > sada ? 0 : sada - rca);
     }
 }
 
@@ -586,9 +586,9 @@ static inline comp4_t
 blend_HardLight (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
     if (2 * sca < sa)
-	return DIV_ONE_UN_width (2 * sca * dca);
+	return DIV_ONE_UNc (2 * sca * dca);
     else
-	return DIV_ONE_UN_width (sa * da - 2 * (da - dca) * (sa - sca));
+	return DIV_ONE_UNc (sa * da - 2 * (da - dca) * (sa - sca));
 }
 
 PDF_SEPARABLE_BLEND_MODE (HardLight)
@@ -642,9 +642,9 @@ blend_Difference (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
     comp4_t scada = sca * da;
 
     if (scada < dcasa)
-	return DIV_ONE_UN_width (dcasa - scada);
+	return DIV_ONE_UNc (dcasa - scada);
     else
-	return DIV_ONE_UN_width (scada - dcasa);
+	return DIV_ONE_UNc (scada - dcasa);
 }
 
 PDF_SEPARABLE_BLEND_MODE (Difference)
@@ -660,7 +660,7 @@ PDF_SEPARABLE_BLEND_MODE (Difference)
 static inline comp4_t
 blend_Exclusion (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
-    return DIV_ONE_UN_width (sca * da + dca * sa - 2 * dca * sca);
+    return DIV_ONE_UNc (sca * da + dca * sa - 2 * dca * sca);
 }
 
 PDF_SEPARABLE_BLEND_MODE (Exclusion)
@@ -796,7 +796,7 @@ fbCombine ## name ## U (pixman_implementation_t *imp, pixman_op_t op,	\
 	comp4_t sc[3], dc[3], c[3];					\
 									\
 	result = d;							\
-        FbByteAddMul(result, isa, s, ida);				\
+        UNcx4_MUL_UNc_ADD_UNcx4_MUL_UNc(result, isa, s, ida);				\
 	dc[0] = Red (d);						\
 	sc[0] = Red (s);						\
 	dc[1] = Green (d);						\
@@ -806,10 +806,10 @@ fbCombine ## name ## U (pixman_implementation_t *imp, pixman_op_t op,	\
 	blend_ ## name (c, dc, da, sc, sa);				\
 									\
 	*(dest + i) = result +						\
-	    (DIV_ONE_UN_width (sa * da) << A_SHIFT) +				\
-	    (DIV_ONE_UN_width (c[0]) << R_SHIFT) +				\
-	    (DIV_ONE_UN_width (c[1]) << G_SHIFT) +				\
-	    (DIV_ONE_UN_width (c[2]));						\
+	    (DIV_ONE_UNc (sa * da) << A_SHIFT) +				\
+	    (DIV_ONE_UNc (c[0]) << R_SHIFT) +				\
+	    (DIV_ONE_UNc (c[1]) << G_SHIFT) +				\
+	    (DIV_ONE_UNc (c[2]));						\
     }									\
 }									
 
@@ -1014,7 +1014,7 @@ fbCombineDisjointOutPart (comp1_t a, comp1_t b)
     b = ~b;		    /* 1 - b */
     if (b >= a)		    /* 1 - b >= a -> (1-b)/a >= 1 */
 	return MASK;	    /* 1 */
-    return DIV_UN_width(b,a);     /* (1-b) / a */
+    return DIV_UNc(b,a);     /* (1-b) / a */
 }
 
 /* portion covered by both a and b */
@@ -1028,7 +1028,7 @@ fbCombineDisjointInPart (comp1_t a, comp1_t b)
     b = ~b;		    /* 1 - b */
     if (b >= a)		    /* 1 - b >= a -> (1-b)/a >= 1 */
 	return 0;	    /* 1 - 1 */
-    return ~DIV_UN_width(b,a);    /* 1 - (1-b) / a */
+    return ~DIV_UNc(b,a);    /* 1 - (1-b) / a */
 }
 
 /* portion covered by a but not b */
@@ -1042,7 +1042,7 @@ fbCombineConjointOutPart (comp1_t a, comp1_t b)
 
     if (b >= a)		    /* b >= a -> b/a >= 1 */
 	return 0x00;	    /* 0 */
-    return ~DIV_UN_width(b,a);    /* 1 - b/a */
+    return ~DIV_UNc(b,a);    /* 1 - b/a */
 }
 
 /* portion covered by both a and b */
@@ -1053,7 +1053,7 @@ fbCombineConjointInPart (comp1_t a, comp1_t b)
 
     if (b >= a)		    /* b >= a -> b/a >= 1 */
 	return MASK;	    /* 1 */
-    return DIV_UN_width(b,a);     /* b/a */
+    return DIV_UNc(b,a);     /* b/a */
 }
 
 #define GetComp(v,i)   ((comp2_t) (comp1_t) ((v) >> i))
@@ -1061,8 +1061,8 @@ fbCombineConjointInPart (comp1_t a, comp1_t b)
 #define Add(x,y,i,t)   ((t) = GetComp(x,i) + GetComp(y,i),              \
                         (comp4_t) ((comp1_t) ((t) | (0 - ((t) >> G_SHIFT)))) << (i))
 
-#define FbGen(x,y,i,ax,ay,t,u,v) ((t) = (MUL_UN_width(GetComp(y,i),ay,(u)) + \
-					 MUL_UN_width(GetComp(x,i),ax,(v))), \
+#define FbGen(x,y,i,ax,ay,t,u,v) ((t) = (MUL_UNc(GetComp(y,i),ay,(u)) + \
+					 MUL_UNc(GetComp(x,i),ax,(v))), \
 				  	 (comp4_t) ((comp1_t) ((t) |		\
 					 (0 - ((t) >> G_SHIFT)))) << (i))
 
@@ -1131,7 +1131,7 @@ fbCombineDisjointOverU (pixman_implementation_t *imp, pixman_op_t op,
             {
                 comp4_t d = *(dest + i);
                 a = fbCombineDisjointOutPart (d >> A_SHIFT, a);
-                FbByteMulAdd(d, a, s);
+                UNcx4_MUL_UNc_ADD_UNcx4(d, a, s);
                 s = d;
             }
 	    *(dest + i) = s;
@@ -1350,7 +1350,7 @@ fbCombineOverC (pixman_implementation_t *imp, pixman_op_t op,
             if (a)
             {
                 comp4_t d = *(dest + i);
-                FbByteMulAddC(d, a, s);
+                UNcx4_MUL_UNcx4_ADD_UNcx4(d, a, s);
                 s = d;
             }
 	    *(dest + i) = s;
@@ -1377,7 +1377,7 @@ fbCombineOverReverseC (pixman_implementation_t *imp, pixman_op_t op,
 
             if (a != MASK)
             {
-                FbByteMulAdd(s, a, d);
+                UNcx4_MUL_UNc_ADD_UNcx4(s, a, d);
             }
 	    *(dest + i) = s;
         }
@@ -1402,7 +1402,7 @@ fbCombineInC (pixman_implementation_t *imp, pixman_op_t op,
 	    fbCombineMaskValueC (&s, &m);
             if (a != MASK)
             {
-                FbByteMul(s, a);
+                UNcx4_MUL_UNc(s, a);
             }
         }
 	*(dest + i) = s;
@@ -1429,7 +1429,7 @@ fbCombineInReverseC (pixman_implementation_t *imp, pixman_op_t op,
             if (a)
             {
                 d = *(dest + i);
-                FbByteMulC(d, a);
+                UNcx4_MUL_UNcx4(d, a);
             }
 	    *(dest + i) = d;
         }
@@ -1455,7 +1455,7 @@ fbCombineOutC (pixman_implementation_t *imp, pixman_op_t op,
 
             if (a != MASK)
             {
-                FbByteMul(s, a);
+                UNcx4_MUL_UNc(s, a);
             }
         }
 	*(dest + i) = s;
@@ -1482,7 +1482,7 @@ fbCombineOutReverseC (pixman_implementation_t *imp, pixman_op_t op,
             if (a)
             {
                 d = *(dest + i);
-                FbByteMulC(d, a);
+                UNcx4_MUL_UNcx4(d, a);
             }
 	    *(dest + i) = d;
         }
@@ -1506,7 +1506,7 @@ fbCombineAtopC (pixman_implementation_t *imp, pixman_op_t op,
 
         ad = ~m;
 
-        FbByteAddMulC(d, ad, s, as);
+        UNcx4_MUL_UNcx4_ADD_UNcx4_MUL_UNc(d, ad, s, as);
 	*(dest + i) = d;
     }
 }
@@ -1529,7 +1529,7 @@ fbCombineAtopReverseC (pixman_implementation_t *imp, pixman_op_t op,
 
 	ad = m;
 
-        FbByteAddMulC(d, ad, s, as);
+        UNcx4_MUL_UNcx4_ADD_UNcx4_MUL_UNc(d, ad, s, as);
 	*(dest + i) = d;
     }
 }
@@ -1551,7 +1551,7 @@ fbCombineXorC (pixman_implementation_t *imp, pixman_op_t op,
 
 	ad = ~m;
 
-        FbByteAddMulC(d, ad, s, as);
+        UNcx4_MUL_UNcx4_ADD_UNcx4_MUL_UNc(d, ad, s, as);
 	*(dest + i) = d;
     }
 }
@@ -1569,7 +1569,7 @@ fbCombineAddC (pixman_implementation_t *imp, pixman_op_t op,
 
 	fbCombineMaskValueC (&s, &m);
 
-        FbByteAdd(d, s);
+        UNcx4_ADD_UNcx4(d, s);
 	*(dest + i) = d;
     }
 }
diff --git a/pixman/pixman-combine.h.template b/pixman/pixman-combine.h.template
index 3b92e56..46a1aba 100644
--- a/pixman/pixman-combine.h.template
+++ b/pixman/pixman-combine.h.template
@@ -21,13 +21,13 @@
  * Helper macros.
  */
 
-#define MUL_UN_width(a,b,t) ( (t) = (a) * (b) + ONE_HALF, ( ( ( (t)>>G_SHIFT ) + (t) )>>G_SHIFT ) )
-#define DIV_UN_width(a,b)    (((comp2_t) (a) * MASK) / (b))
-#define ADD_UN_width(x,y,t) (							\
+#define MUL_UNc(a,b,t) ( (t) = (a) * (b) + ONE_HALF, ( ( ( (t)>>G_SHIFT ) + (t) )>>G_SHIFT ) )
+#define DIV_UNc(a,b)    (((comp2_t) (a) * MASK) / (b))
+#define ADD_UNc(x,y,t) (							\
 	(t) = x + y,                                                    \
 	(comp4_t) (comp1_t) ((t) | (0 - ((t) >> G_SHIFT))))
 
-#define DIV_ONE_UN_width(x)      (((x) + ONE_HALF + (((x) + ONE_HALF) >> G_SHIFT)) >> G_SHIFT)
+#define DIV_ONE_UNc(x)      (((x) + ONE_HALF + (((x) + ONE_HALF) >> G_SHIFT)) >> G_SHIFT)
 
 /*
   The methods below use some tricks to be able to do two color
@@ -37,7 +37,7 @@
 /*
   x_c = (x_c * a) / 255
 */
-#define FbByteMul(x, a) do {                                            \
+#define UNcx4_MUL_UNc(x, a) do {                                            \
         comp4_t t = ((x & RB_MASK) * a) + RB_ONE_HALF;                  \
         t = (t + ((t >> COMPONENT_SIZE) & RB_MASK)) >> COMPONENT_SIZE;  \
         t &= RB_MASK;                                                   \
@@ -51,7 +51,7 @@
 /*
   x_c = (x_c * a) / 255 + y_c
 */
-#define FbByteMulAdd(x, a, y) do {                                      \
+#define UNcx4_MUL_UNc_ADD_UNcx4(x, a, y) do {                                      \
         /* multiply and divide: trunc((i + 128)*257/65536) */           \
         comp4_t t = ((x & RB_MASK) * a) + RB_ONE_HALF;                  \
         t = (t + ((t >> COMPONENT_SIZE) & RB_MASK)) >> COMPONENT_SIZE;  \
@@ -84,7 +84,7 @@
 /*
   x_c = (x_c * a + y_c * b) / 255
 */
-#define FbByteAddMul(x, a, y, b) do {                                   \
+#define UNcx4_MUL_UNc_ADD_UNcx4_MUL_UNc(x, a, y, b) do {                                   \
         comp4_t t;                                                      \
         comp4_t r = (x >> A_SHIFT) * a + (y >> A_SHIFT) * b + ONE_HALF; \
         r += (r >> G_SHIFT);                                            \
@@ -116,7 +116,7 @@
 /*
   x_c = (x_c * a_c) / 255
 */
-#define FbByteMulC(x, a) do {                                           \
+#define UNcx4_MUL_UNcx4(x, a) do {                                           \
         comp4_t t;                                                      \
         comp4_t r = (x & MASK) * (a & MASK);                            \
         r |= (x & R_MASK) * ((a >> R_SHIFT) & MASK);                    \
@@ -135,7 +135,7 @@
 /*
   x_c = (x_c * a_c) / 255 + y_c
 */
-#define FbByteMulAddC(x, a, y) do {                                     \
+#define UNcx4_MUL_UNcx4_ADD_UNcx4(x, a, y) do {                                     \
         comp4_t t;                                                      \
         comp4_t r = (x & MASK) * (a & MASK);                            \
         r |= (x & R_MASK) * ((a >> R_SHIFT) & MASK);                    \
@@ -161,7 +161,7 @@
 /*
   x_c = (x_c * a_c + y_c * b) / 255
 */
-#define FbByteAddMulC(x, a, y, b) do {                                  \
+#define UNcx4_MUL_UNcx4_ADD_UNcx4_MUL_UNc(x, a, y, b) do {                                  \
         comp4_t t;                                                      \
         comp4_t r = (x >> A_SHIFT) * (a >> A_SHIFT) +                   \
                      (y >> A_SHIFT) * b;                                \
@@ -194,7 +194,7 @@
 /*
   x_c = min(x_c + y_c, 255)
 */
-#define FbByteAdd(x, y) do {                                            \
+#define UNcx4_ADD_UNcx4(x, y) do {                                            \
         comp4_t t;                                                      \
         comp4_t r = (x & RB_MASK) + (y & RB_MASK);                      \
         r |= RB_MASK_PLUS_ONE - ((r >> G_SHIFT) & RB_MASK);             \
diff --git a/pixman/pixman-fast-path.c b/pixman/pixman-fast-path.c
index 01f0915..e519078 100644
--- a/pixman/pixman-fast-path.c
+++ b/pixman/pixman-fast-path.c
@@ -79,7 +79,7 @@ fbOver (uint32_t src, uint32_t dest)
 {
     uint32_t a = ~src >> 24; 
 
-    FbByteMulAdd(dest, a, src);
+    UN8x4_MUL_UN8_ADD_UN8x4(dest, a, src);
 
     return dest;
 }
@@ -89,7 +89,7 @@ fbIn (uint32_t x, uint8_t y)
 {
     uint16_t  a = y;
 
-    FbByteMul (x, a);
+    UN8x4_MUL_UN8 (x, a);
 
     return x;
 }
@@ -398,10 +398,10 @@ fast_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
 	    {
 		d = *dst;
 
-		FbByteMulC (src, ma);
-		FbByteMul (ma, srca);
+		UN8x4_MUL_UN8x4 (src, ma);
+		UN8x4_MUL_UN8 (ma, srca);
 		ma = ~ma;
-		FbByteMulAddC (d, ma, src);
+		UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ma, src);
 
 		*dst = d;
 	    }
@@ -601,10 +601,10 @@ fast_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 		d = *dst;
 		d = CONVERT_0565_TO_0888(d);
 
-		FbByteMulC (src, ma);
-		FbByteMul (ma, srca);
+		UN8x4_MUL_UN8x4 (src, ma);
+		UN8x4_MUL_UN8 (ma, srca);
 		ma = ~ma;
-		FbByteMulAddC (d, ma, src);
+		UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ma, src);
 		
 		*dst = CONVERT_8888_TO_0565(d);
 	    }
@@ -894,7 +894,7 @@ fast_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
 		{
 		    d = *dst;
 		    if (d)
-			FbByteAdd(s,d);
+			UN8x4_ADD_UN8x4(s,d);
 		}
 		*dst = s;
 	    }
diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index 21f80c7..608334c 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -244,7 +244,7 @@ vmxCombineOverUnomask (uint32_t *dest, const uint32_t *src, int width)
         uint32_t d = dest[i];
         uint32_t ia = Alpha (~s);
 
-        FbByteMulAdd (d, ia, s);
+        UN8x4_MUL_UN8_ADD_UN8x4 (d, ia, s);
         dest[i] = d;
     }
 }
@@ -282,11 +282,11 @@ vmxCombineOverUmask (uint32_t *dest,
         uint32_t d = dest[i];
         uint32_t ia;
 
-        FbByteMul (s, m);
+        UN8x4_MUL_UN8 (s, m);
 
         ia = Alpha (~s);
 
-        FbByteMulAdd (d, ia, s);
+        UN8x4_MUL_UN8_ADD_UN8x4 (d, ia, s);
         dest[i] = d;
     }
 }
@@ -330,7 +330,7 @@ vmxCombineOverReverseUnomask (uint32_t *dest, const uint32_t *src, int width)
         uint32_t d = dest[i];
         uint32_t ia = Alpha (~dest[i]);
 
-        FbByteMulAdd (s, ia, d);
+        UN8x4_MUL_UN8_ADD_UN8x4 (s, ia, d);
         dest[i] = s;
     }
 }
@@ -368,9 +368,9 @@ vmxCombineOverReverseUmask (uint32_t *dest,
         uint32_t d = dest[i];
         uint32_t ia = Alpha (~dest[i]);
 
-        FbByteMul (s, m);
+        UN8x4_MUL_UN8 (s, m);
 
-        FbByteMulAdd (s, ia, d);
+        UN8x4_MUL_UN8_ADD_UN8x4 (s, ia, d);
         dest[i] = s;
     }
 }
@@ -413,7 +413,7 @@ vmxCombineInUnomask (uint32_t *dest, const uint32_t *src, int width)
 
         uint32_t s = src[i];
         uint32_t a = Alpha (dest[i]);
-        FbByteMul (s, a);
+        UN8x4_MUL_UN8 (s, a);
         dest[i] = s;
     }
 }
@@ -450,9 +450,9 @@ vmxCombineInUmask (uint32_t *dest,
         uint32_t s = src[i];
         uint32_t a = Alpha (dest[i]);
 
-        FbByteMul (s, m);
+        UN8x4_MUL_UN8 (s, m);
 
-        FbByteMul (s, a);
+        UN8x4_MUL_UN8 (s, a);
         dest[i] = s;
     }
 }
@@ -494,7 +494,7 @@ vmxCombineInReverseUnomask (uint32_t *dest, const uint32_t *src, int width)
     for (i = width%4; --i >=0;) {
         uint32_t d = dest[i];
         uint32_t a = Alpha (src[i]);
-        FbByteMul (d, a);
+        UN8x4_MUL_UN8 (d, a);
         dest[i] = d;
     }
 }
@@ -531,10 +531,10 @@ vmxCombineInReverseUmask (uint32_t *dest,
         uint32_t d = dest[i];
         uint32_t a = src[i];
 
-        FbByteMul (a, m);
+        UN8x4_MUL_UN8 (a, m);
 
         a = Alpha (a);
-        FbByteMul (d, a);
+        UN8x4_MUL_UN8 (d, a);
         dest[i] = d;
     }
 }
@@ -576,7 +576,7 @@ vmxCombineOutUnomask (uint32_t *dest, const uint32_t *src, int width)
     for (i = width%4; --i >=0;) {
         uint32_t s = src[i];
         uint32_t a = Alpha (~dest[i]);
-        FbByteMul (s, a);
+        UN8x4_MUL_UN8 (s, a);
         dest[i] = s;
     }
 }
@@ -613,9 +613,9 @@ vmxCombineOutUmask (uint32_t *dest,
         uint32_t s = src[i];
         uint32_t a = Alpha (~dest[i]);
 
-        FbByteMul (s, m);
+        UN8x4_MUL_UN8 (s, m);
 
-        FbByteMul (s, a);
+        UN8x4_MUL_UN8 (s, a);
         dest[i] = s;
     }
 }
@@ -657,7 +657,7 @@ vmxCombineOutReverseUnomask (uint32_t *dest, const uint32_t *src, int width)
     for (i = width%4; --i >=0;) {
         uint32_t d = dest[i];
         uint32_t a = Alpha (~src[i]);
-        FbByteMul (d, a);
+        UN8x4_MUL_UN8 (d, a);
         dest[i] = d;
     }
 }
@@ -694,10 +694,10 @@ vmxCombineOutReverseUmask (uint32_t *dest,
         uint32_t d = dest[i];
         uint32_t a = src[i];
 
-        FbByteMul (a, m);
+        UN8x4_MUL_UN8 (a, m);
 
         a = Alpha (~a);
-        FbByteMul (d, a);
+        UN8x4_MUL_UN8 (d, a);
         dest[i] = d;
     }
 }
@@ -745,7 +745,7 @@ vmxCombineAtopUnomask (uint32_t *dest, const uint32_t *src, int width)
         uint32_t dest_a = Alpha (d);
         uint32_t src_ia = Alpha (~s);
 
-        FbByteAddMul (s, dest_a, d, src_ia);
+        UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_a, d, src_ia);
         dest[i] = s;
     }
 }
@@ -785,11 +785,11 @@ vmxCombineAtopUmask (uint32_t *dest,
         uint32_t dest_a = Alpha (d);
         uint32_t src_ia;
 
-        FbByteMul (s, m);
+        UN8x4_MUL_UN8 (s, m);
 
         src_ia = Alpha (~s);
 
-        FbByteAddMul (s, dest_a, d, src_ia);
+        UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_a, d, src_ia);
         dest[i] = s;
     }
 }
@@ -837,7 +837,7 @@ vmxCombineAtopReverseUnomask (uint32_t *dest, const uint32_t *src, int width)
         uint32_t src_a = Alpha (s);
         uint32_t dest_ia = Alpha (~d);
 
-        FbByteAddMul (s, dest_ia, d, src_a);
+        UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_a);
         dest[i] = s;
     }
 }
@@ -877,11 +877,11 @@ vmxCombineAtopReverseUmask (uint32_t *dest,
         uint32_t src_a;
         uint32_t dest_ia = Alpha (~d);
 
-        FbByteMul (s, m);
+        UN8x4_MUL_UN8 (s, m);
 
         src_a = Alpha (s);
 
-        FbByteAddMul (s, dest_ia, d, src_a);
+        UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_a);
         dest[i] = s;
     }
 }
@@ -929,7 +929,7 @@ vmxCombineXorUnomask (uint32_t *dest, const uint32_t *src, int width)
         uint32_t src_ia = Alpha (~s);
         uint32_t dest_ia = Alpha (~d);
 
-        FbByteAddMul (s, dest_ia, d, src_ia);
+        UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_ia);
         dest[i] = s;
     }
 }
@@ -969,11 +969,11 @@ vmxCombineXorUmask (uint32_t *dest,
         uint32_t src_ia;
         uint32_t dest_ia = Alpha (~d);
 
-        FbByteMul (s, m);
+        UN8x4_MUL_UN8 (s, m);
 
         src_ia = Alpha (~s);
 
-        FbByteAddMul (s, dest_ia, d, src_ia);
+        UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_ia);
         dest[i] = s;
     }
 }
@@ -1016,7 +1016,7 @@ vmxCombineAddUnomask (uint32_t *dest, const uint32_t *src, int width)
     for (i = width%4; --i >=0;) {
         uint32_t s = src[i];
         uint32_t d = dest[i];
-        FbByteAdd (d, s);
+        UN8x4_ADD_UN8x4 (d, s);
         dest[i] = d;
     }
 }
@@ -1053,9 +1053,9 @@ vmxCombineAddUmask (uint32_t *dest,
         uint32_t s = src[i];
         uint32_t d = dest[i];
 
-        FbByteMul (s, m);
+        UN8x4_MUL_UN8 (s, m);
 
-        FbByteAdd (d, s);
+        UN8x4_ADD_UN8x4 (d, s);
         dest[i] = d;
     }
 }
@@ -1100,7 +1100,7 @@ vmxCombineSrcC (pixman_implementation_t *imp, pixman_op_t op,
     for (i = width%4; --i >=0;) {
         uint32_t a = mask[i];
         uint32_t s = src[i];
-        FbByteMulC (s, a);
+        UN8x4_MUL_UN8x4 (s, a);
         dest[i] = s;
     }
 }
@@ -1133,8 +1133,8 @@ vmxCombineOverC (pixman_implementation_t *imp, pixman_op_t op,
         uint32_t a = mask[i];
         uint32_t s = src[i];
         uint32_t d = dest[i];
-        FbByteMulC (s, a);
-        FbByteMulAddC (d, ~a, s);
+        UN8x4_MUL_UN8x4 (s, a);
+        UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ~a, s);
         dest[i] = d;
     }
 }
@@ -1168,8 +1168,8 @@ vmxCombineOverReverseC (pixman_implementation_t *imp, pixman_op_t op,
         uint32_t s = src[i];
         uint32_t d = dest[i];
         uint32_t da = Alpha (d);
-        FbByteMulC (s, a);
-        FbByteMulAddC (s, ~da, d);
+        UN8x4_MUL_UN8x4 (s, a);
+        UN8x4_MUL_UN8x4_ADD_UN8x4 (s, ~da, d);
         dest[i] = s;
     }
 }
@@ -1203,8 +1203,8 @@ vmxCombineInC (pixman_implementation_t *imp, pixman_op_t op,
         uint32_t a = mask[i];
         uint32_t s = src[i];
         uint32_t da = Alpha (dest[i]);
-        FbByteMul (s, a);
-        FbByteMul (s, da);
+        UN8x4_MUL_UN8 (s, a);
+        UN8x4_MUL_UN8 (s, da);
         dest[i] = s;
     }
 }
@@ -1238,8 +1238,8 @@ vmxCombineInReverseC (pixman_implementation_t *imp, pixman_op_t op,
         uint32_t a = mask[i];
         uint32_t d = dest[i];
         uint32_t sa = Alpha (src[i]);
-        FbByteMul (a, sa);
-        FbByteMulC (d, a);
+        UN8x4_MUL_UN8 (a, sa);
+        UN8x4_MUL_UN8x4 (d, a);
         dest[i] = d;
     }
 }
@@ -1274,8 +1274,8 @@ vmxCombineOutC (pixman_implementation_t *imp, pixman_op_t op,
         uint32_t s = src[i];
         uint32_t d = dest[i];
         uint32_t da = Alpha (~d);
-        FbByteMulC (s, a);
-        FbByteMulC (s, da);
+        UN8x4_MUL_UN8x4 (s, a);
+        UN8x4_MUL_UN8x4 (s, da);
         dest[i] = s;
     }
 }
@@ -1311,8 +1311,8 @@ vmxCombineOutReverseC (pixman_implementation_t *imp, pixman_op_t op,
         uint32_t s = src[i];
         uint32_t d = dest[i];
         uint32_t sa = Alpha (s);
-        FbByteMulC (a, sa);
-        FbByteMulC (d, ~a);
+        UN8x4_MUL_UN8x4 (a, sa);
+        UN8x4_MUL_UN8x4 (d, ~a);
         dest[i] = d;
     }
 }
@@ -1352,9 +1352,9 @@ vmxCombineAtopC (pixman_implementation_t *imp, pixman_op_t op,
         uint32_t sa = Alpha (s);
         uint32_t da = Alpha (d);
 
-        FbByteMulC (s, a);
-        FbByteMul (a, sa);
-        FbByteAddMulC (d, ~a, s, da);
+        UN8x4_MUL_UN8x4 (s, a);
+        UN8x4_MUL_UN8 (a, sa);
+        UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, ~a, s, da);
         dest[i] = d;
     }
 }
@@ -1394,9 +1394,9 @@ vmxCombineAtopReverseC (pixman_implementation_t *imp, pixman_op_t op,
         uint32_t sa = Alpha (s);
         uint32_t da = Alpha (d);
 
-        FbByteMulC (s, a);
-        FbByteMul (a, sa);
-        FbByteAddMulC (d, a, s, ~da);
+        UN8x4_MUL_UN8x4 (s, a);
+        UN8x4_MUL_UN8 (a, sa);
+        UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, a, s, ~da);
         dest[i] = d;
     }
 }
@@ -1436,9 +1436,9 @@ vmxCombineXorC (pixman_implementation_t *imp, pixman_op_t op,
         uint32_t sa = Alpha (s);
         uint32_t da = Alpha (d);
 
-        FbByteMulC (s, a);
-        FbByteMul (a, sa);
-        FbByteAddMulC (d, ~a, s, ~da);
+        UN8x4_MUL_UN8x4 (s, a);
+        UN8x4_MUL_UN8 (a, sa);
+        UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, ~a, s, ~da);
         dest[i] = d;
     }
 }
@@ -1473,8 +1473,8 @@ vmxCombineAddC (pixman_implementation_t *imp, pixman_op_t op,
         uint32_t s = src[i];
         uint32_t d = dest[i];
 
-        FbByteMulC (s, a);
-        FbByteAdd (s, d);
+        UN8x4_MUL_UN8x4 (s, a);
+        UN8x4_ADD_UN8x4 (s, d);
         dest[i] = s;
     }
 }
commit e7f162a5a81221ca6abca79a9a77924d39bf4e16
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Mon Jun 29 08:42:19 2009 -0400

    Clarify a couple of comments

diff --git a/pixman/pixman-combine.h.template b/pixman/pixman-combine.h.template
index 5e62b4b..3b92e56 100644
--- a/pixman/pixman-combine.h.template
+++ b/pixman/pixman-combine.h.template
@@ -49,7 +49,7 @@
     } while (0)
 
 /*
-  x_c = (x_c * a) / 255 + y
+  x_c = (x_c * a) / 255 + y_c
 */
 #define FbByteMulAdd(x, a, y) do {                                      \
         /* multiply and divide: trunc((i + 128)*257/65536) */           \
@@ -133,7 +133,7 @@
     } while (0)
 
 /*
-  x_c = (x_c * a_c) / 255 + y
+  x_c = (x_c * a_c) / 255 + y_c
 */
 #define FbByteMulAddC(x, a, y) do {                                     \
         comp4_t t;                                                      \
commit b02c33e7da3eb733ca4ada66a6c35b293a191144
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Mon Jun 29 08:30:36 2009 -0400

    Change name of macros that operate on normalized integers.
    
    For example IntMul becomes MUL_UN8 to indicate that it multiplies two
    unsigned normalized 8 bit integers.

diff --git a/pixman/make-combine.pl b/pixman/make-combine.pl
index 8f31259..88b1156 100644
--- a/pixman/make-combine.pl
+++ b/pixman/make-combine.pl
@@ -64,6 +64,10 @@ while (<STDIN>) {
     s/\bFbComposeFunctions\b/FbComposeFunctions$pixel_size/;
     s/combine_width/combine_$pixel_size/;
     s/_pixman_setup_combiner_functions_width/_pixman_setup_combiner_functions_$pixel_size/;
+    s/MUL_UN_width/MUL_UN$size/g;
+    s/DIV_UN_width/DIV_UN$size/g;
+    s/ADD_UN_width/ADD_UN$size/g;
+    s/DIV_ONE_UN_width/DIV_ONE_UN$size/g;
 
     # Convert comp*_t values into the appropriate real types.
     s/comp1_t/uint${size}_t/g;
diff --git a/pixman/pixman-combine.c.template b/pixman/pixman-combine.c.template
index 1b43610..87d3fdb 100644
--- a/pixman/pixman-combine.c.template
+++ b/pixman/pixman-combine.c.template
@@ -328,7 +328,7 @@ fbCombineSaturateU (pixman_implementation_t *imp, pixman_op_t op,
         da = ~d >> A_SHIFT;
         if (sa > da)
         {
-            sa = IntDiv(da, sa);
+            sa = DIV_UN_width(da, sa);
             FbByteMul(s, sa);
         };
         FbByteAdd(d, s);
@@ -424,7 +424,7 @@ fbCombine ## name ## U (pixman_implementation_t *imp, pixman_op_t op, \
         FbByteAddMul(result, isa, s, ida);	    \
 						    \
 	*(dest + i) = result +			    \
-	    (DivOne (sa * da) << A_SHIFT) +	    \
+	    (DIV_ONE_UN_width (sa * da) << A_SHIFT) +	    \
 	    (blend_ ## name (Red (d), da, Red (s), sa) << R_SHIFT) + \
 	    (blend_ ## name (Green (d), da, Green (s), sa) << G_SHIFT) + \
 	    (blend_ ## name (Blue (d), da, Blue (s), sa)); \
@@ -450,7 +450,7 @@ fbCombine ## name ## C (pixman_implementation_t *imp, pixman_op_t op, \
 	FbByteAddMulC (result, ~m, s, ida);	    \
 						    \
 	result +=				    \
-	    (DivOne (Alpha (m) * da) << A_SHIFT) +				\
+	    (DIV_ONE_UN_width (Alpha (m) * da) << A_SHIFT) +				\
 	    (blend_ ## name (Red (d), da, Red (s), Red (m)) << R_SHIFT) +	\
 	    (blend_ ## name (Green (d), da, Green (s), Green (m)) << G_SHIFT) +	\
 	    (blend_ ## name (Blue (d), da, Blue (s), Blue (m)));			\
@@ -467,7 +467,7 @@ fbCombine ## name ## C (pixman_implementation_t *imp, pixman_op_t op, \
 static inline comp4_t
 blend_Screen (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
-  return DivOne (sca * da + dca * sa - sca * dca);
+  return DIV_ONE_UN_width (sca * da + dca * sa - sca * dca);
 }
 
 PDF_SEPARABLE_BLEND_MODE (Screen)
@@ -490,7 +490,7 @@ blend_Overlay (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 	rca = 2 * sca * dca;
     else
 	rca = sa * da - 2 * (da - dca) * (sa - sca);
-    return DivOne (rca);
+    return DIV_ONE_UN_width (rca);
 }
 
 PDF_SEPARABLE_BLEND_MODE (Overlay)
@@ -507,7 +507,7 @@ blend_Darken (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
     
     s = sca * da;
     d = dca * sa;
-    return DivOne (s > d ? d : s);
+    return DIV_ONE_UN_width (s > d ? d : s);
 }
 
 PDF_SEPARABLE_BLEND_MODE (Darken)
@@ -524,7 +524,7 @@ blend_Lighten (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
     
     s = sca * da;
     d = dca * sa;
-    return DivOne (s > d ? s : d);
+    return DIV_ONE_UN_width (s > d ? s : d);
 }
 
 PDF_SEPARABLE_BLEND_MODE (Lighten)
@@ -542,10 +542,10 @@ static inline comp4_t
 blend_ColorDodge (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
     if (sca >= sa) {
-	return DivOne (sa * da);
+	return DIV_ONE_UN_width (sa * da);
     } else {
 	comp4_t rca = dca * sa * sa / (sa - sca);
-	return DivOne (rca > sa * da ? sa * da : rca);
+	return DIV_ONE_UN_width (rca > sa * da ? sa * da : rca);
     }
 }
 
@@ -568,7 +568,7 @@ blend_ColorBurn (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
     } else {
 	comp4_t sada = sa * da;
 	comp4_t rca = (da - dca) * sa * sa / sca;
-	return DivOne (rca > sada ? 0 : sada - rca);
+	return DIV_ONE_UN_width (rca > sada ? 0 : sada - rca);
     }
 }
 
@@ -586,9 +586,9 @@ static inline comp4_t
 blend_HardLight (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
     if (2 * sca < sa)
-	return DivOne (2 * sca * dca);
+	return DIV_ONE_UN_width (2 * sca * dca);
     else
-	return DivOne (sa * da - 2 * (da - dca) * (sa - sca));
+	return DIV_ONE_UN_width (sa * da - 2 * (da - dca) * (sa - sca));
 }
 
 PDF_SEPARABLE_BLEND_MODE (HardLight)
@@ -642,9 +642,9 @@ blend_Difference (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
     comp4_t scada = sca * da;
 
     if (scada < dcasa)
-	return DivOne (dcasa - scada);
+	return DIV_ONE_UN_width (dcasa - scada);
     else
-	return DivOne (scada - dcasa);
+	return DIV_ONE_UN_width (scada - dcasa);
 }
 
 PDF_SEPARABLE_BLEND_MODE (Difference)
@@ -660,7 +660,7 @@ PDF_SEPARABLE_BLEND_MODE (Difference)
 static inline comp4_t
 blend_Exclusion (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
-    return DivOne (sca * da + dca * sa - 2 * dca * sca);
+    return DIV_ONE_UN_width (sca * da + dca * sa - 2 * dca * sca);
 }
 
 PDF_SEPARABLE_BLEND_MODE (Exclusion)
@@ -806,10 +806,10 @@ fbCombine ## name ## U (pixman_implementation_t *imp, pixman_op_t op,	\
 	blend_ ## name (c, dc, da, sc, sa);				\
 									\
 	*(dest + i) = result +						\
-	    (DivOne (sa * da) << A_SHIFT) +				\
-	    (DivOne (c[0]) << R_SHIFT) +				\
-	    (DivOne (c[1]) << G_SHIFT) +				\
-	    (DivOne (c[2]));						\
+	    (DIV_ONE_UN_width (sa * da) << A_SHIFT) +				\
+	    (DIV_ONE_UN_width (c[0]) << R_SHIFT) +				\
+	    (DIV_ONE_UN_width (c[1]) << G_SHIFT) +				\
+	    (DIV_ONE_UN_width (c[2]));						\
     }									\
 }									
 
@@ -1014,7 +1014,7 @@ fbCombineDisjointOutPart (comp1_t a, comp1_t b)
     b = ~b;		    /* 1 - b */
     if (b >= a)		    /* 1 - b >= a -> (1-b)/a >= 1 */
 	return MASK;	    /* 1 */
-    return IntDiv(b,a);     /* (1-b) / a */
+    return DIV_UN_width(b,a);     /* (1-b) / a */
 }
 
 /* portion covered by both a and b */
@@ -1028,7 +1028,7 @@ fbCombineDisjointInPart (comp1_t a, comp1_t b)
     b = ~b;		    /* 1 - b */
     if (b >= a)		    /* 1 - b >= a -> (1-b)/a >= 1 */
 	return 0;	    /* 1 - 1 */
-    return ~IntDiv(b,a);    /* 1 - (1-b) / a */
+    return ~DIV_UN_width(b,a);    /* 1 - (1-b) / a */
 }
 
 /* portion covered by a but not b */
@@ -1042,7 +1042,7 @@ fbCombineConjointOutPart (comp1_t a, comp1_t b)
 
     if (b >= a)		    /* b >= a -> b/a >= 1 */
 	return 0x00;	    /* 0 */
-    return ~IntDiv(b,a);    /* 1 - b/a */
+    return ~DIV_UN_width(b,a);    /* 1 - b/a */
 }
 
 /* portion covered by both a and b */
@@ -1053,7 +1053,7 @@ fbCombineConjointInPart (comp1_t a, comp1_t b)
 
     if (b >= a)		    /* b >= a -> b/a >= 1 */
 	return MASK;	    /* 1 */
-    return IntDiv(b,a);     /* b/a */
+    return DIV_UN_width(b,a);     /* b/a */
 }
 
 #define GetComp(v,i)   ((comp2_t) (comp1_t) ((v) >> i))
@@ -1061,8 +1061,8 @@ fbCombineConjointInPart (comp1_t a, comp1_t b)
 #define Add(x,y,i,t)   ((t) = GetComp(x,i) + GetComp(y,i),              \
                         (comp4_t) ((comp1_t) ((t) | (0 - ((t) >> G_SHIFT)))) << (i))
 
-#define FbGen(x,y,i,ax,ay,t,u,v) ((t) = (IntMult(GetComp(y,i),ay,(u)) + \
-					 IntMult(GetComp(x,i),ax,(v))), \
+#define FbGen(x,y,i,ax,ay,t,u,v) ((t) = (MUL_UN_width(GetComp(y,i),ay,(u)) + \
+					 MUL_UN_width(GetComp(x,i),ax,(v))), \
 				  	 (comp4_t) ((comp1_t) ((t) |		\
 					 (0 - ((t) >> G_SHIFT)))) << (i))
 
diff --git a/pixman/pixman-combine.h.template b/pixman/pixman-combine.h.template
index 35b4c18..5e62b4b 100644
--- a/pixman/pixman-combine.h.template
+++ b/pixman/pixman-combine.h.template
@@ -21,13 +21,13 @@
  * Helper macros.
  */
 
-#define IntMult(a,b,t) ( (t) = (a) * (b) + ONE_HALF, ( ( ( (t)>>G_SHIFT ) + (t) )>>G_SHIFT ) )
-#define IntDiv(a,b)    (((comp2_t) (a) * MASK) / (b))
-#define IntAdd(x,y,t) (							\
+#define MUL_UN_width(a,b,t) ( (t) = (a) * (b) + ONE_HALF, ( ( ( (t)>>G_SHIFT ) + (t) )>>G_SHIFT ) )
+#define DIV_UN_width(a,b)    (((comp2_t) (a) * MASK) / (b))
+#define ADD_UN_width(x,y,t) (							\
 	(t) = x + y,                                                    \
 	(comp4_t) (comp1_t) ((t) | (0 - ((t) >> G_SHIFT))))
 
-#define DivOne(x)      (((x) + ONE_HALF + (((x) + ONE_HALF) >> G_SHIFT)) >> G_SHIFT)
+#define DIV_ONE_UN_width(x)      (((x) + ONE_HALF + (((x) + ONE_HALF) >> G_SHIFT)) >> G_SHIFT)
 
 /*
   The methods below use some tricks to be able to do two color
diff --git a/pixman/pixman-fast-path.c b/pixman/pixman-fast-path.c
index 1bdb323..01f0915 100644
--- a/pixman/pixman-fast-path.c
+++ b/pixman/pixman-fast-path.c
@@ -204,7 +204,7 @@ fast_CompositeIn_n_8_8 (pixman_implementation_t *imp,
 		}
 		else if (m != 0xff)
 		{
-		    *dst = IntMult(m, *dst, t);
+		    *dst = MUL_UN8(m, *dst, t);
 		}
 		dst++;
 	    }
@@ -223,14 +223,14 @@ fast_CompositeIn_n_8_8 (pixman_implementation_t *imp,
 	    while (w--)
 	    {
 		m = *mask++;
-		m = IntMult(m, srca, t);
+		m = MUL_UN8(m, srca, t);
 		if (m == 0)
 		{
 		    *dst = 0;
 		}
 		else if (m != 0xff)
 		{
-		    *dst = IntMult(m, *dst, t);
+		    *dst = MUL_UN8(m, *dst, t);
 		}
 		dst++;
 	    }
@@ -281,7 +281,7 @@ fast_CompositeIn_8_8 (pixman_implementation_t *imp,
 	    }
 	    else if (s != 0xff)
 	    {
-		*dst = IntMult(s, *dst, t);
+		*dst = MUL_UN8(s, *dst, t);
 	    }
 	    dst++;
 	}
@@ -948,8 +948,8 @@ fast_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
 	    a = *mask++;
 	    d = *dst;
 
-	    m = IntMult (sa, a, tmp);
-	    r = IntAdd (m, d, tmp);
+	    m = MUL_UN8 (sa, a, tmp);
+	    r = ADD_UN8 (m, d, tmp);
 
 	    *dst++ = r;
 	}
diff --git a/pixman/pixman-mmx.c b/pixman/pixman-mmx.c
index e9e5071..9f4551f 100644
--- a/pixman/pixman-mmx.c
+++ b/pixman/pixman-mmx.c
@@ -672,7 +672,7 @@ mmxCombineSaturateU (pixman_implementation_t *imp, pixman_op_t op,
         uint32_t da = ~d >> 24;
 
         if (sa > da) {
-            __m64 msa = load8888(IntDiv(da, sa) << 24);
+            __m64 msa = load8888(DIV_UN8(da, sa) << 24);
             msa = expand_alpha(msa);
             ms = pix_multiply(ms, msa);
         }
@@ -2503,8 +2503,8 @@ mmx_CompositeIn_n_8_8 (pixman_implementation_t *imp,
 	    a = *mask++;
 	    d = *dst;
 	    
-	    m = IntMult (sa, a, tmp);
-	    d = IntMult (m, d, tmp);
+	    m = MUL_UN8 (sa, a, tmp);
+	    d = MUL_UN8 (m, d, tmp);
 	    
 	    *dst++ = d;
 	}
@@ -2568,7 +2568,7 @@ mmx_CompositeIn_8_8 (pixman_implementation_t *imp,
 	    s = *src;
 	    d = *dst;
 
-	    *dst = IntMult (s, d, tmp);
+	    *dst = MUL_UN8 (s, d, tmp);
 
 	    src++;
 	    dst++;
@@ -2647,8 +2647,8 @@ mmx_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
 	    a = *mask++;
 	    d = *dst;
 
-	    m = IntMult (sa, a, tmp);
-	    r = IntAdd (m, d, tmp);
+	    m = MUL_UN8 (sa, a, tmp);
+	    r = ADD_UN8 (m, d, tmp);
 
 	    *dst++ = r;
 	}
diff --git a/pixman/pixman-sse2.c b/pixman/pixman-sse2.c
index 02d4511..3ebabe3 100644
--- a/pixman/pixman-sse2.c
+++ b/pixman/pixman-sse2.c
@@ -1331,7 +1331,7 @@ coreCombineSaturateUPixelsse2 (uint32_t src, uint32_t dst)
 
     if (sa > da)
     {
-        ms = pixMultiply_1x64 (ms, expandAlpha_1x64 (unpack_32_1x64 (IntDiv(da, sa) << 24)));
+        ms = pixMultiply_1x64 (ms, expandAlpha_1x64 (unpack_32_1x64 (DIV_UN8(da, sa) << 24)));
     }
 
     return pack_1x64_32 (_mm_adds_pu16 (md, ms));
commit d4a366193b12cf241980a621a15ec0ee67e8f6bb
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Mon Jun 29 08:10:20 2009 -0400

    Fix names in the trap rasterizer.
    
    s/Shift4/SHIFT_4/g;
    s/Get4/GET_4/g;
    s/Put4/PUT_4/g;
    s/DefineAlpha/DEFINE_ALPHA/g;
    s/AddAlpha/ADD_ALPHA/g;
    s/StepAlpha/STEP_ALPHA/g;
    s/add_saturate_8/ADD_SATURATE_8/g;
    s/RenderEdgeStepSmall/RENDER_EDGE_STEP_SMALL/g;
    s/RenderEdgeStepBig/RENDER_EDGE_STEP_BIG/g;
    s/fbRasterizeEdges/b00_re/g;
    s/rasterizeEdges/RASTERIZE_EDGES/g;
    s/b00_re/rasterize_edges_/g;

diff --git a/pixman/pixman-edge-imp.h b/pixman/pixman-edge-imp.h
index 1687c0b..58957e3 100644
--- a/pixman/pixman-edge-imp.h
+++ b/pixman/pixman-edge-imp.h
@@ -24,7 +24,7 @@
 #endif
 
 static void
-rasterizeEdges (pixman_image_t  *image,
+RASTERIZE_EDGES (pixman_image_t  *image,
 		pixman_edge_t	*l,
 		pixman_edge_t	*r,
 		pixman_fixed_t		t,
@@ -132,7 +132,7 @@ rasterizeEdges (pixman_image_t  *image,
 	    }
 #else
 	    {
-		DefineAlpha(line,lxi);
+		DEFINE_ALPHA(line,lxi);
 		int	    lxs;
 		int     rxs;
 
@@ -143,20 +143,20 @@ rasterizeEdges (pixman_image_t  *image,
 		/* Add coverage across row */
 		if (lxi == rxi)
 		{
-		    AddAlpha (rxs - lxs);
+		    ADD_ALPHA (rxs - lxs);
 		}
 		else
 		{
 		    int	xi;
 
-		    AddAlpha (N_X_FRAC(N_BITS) - lxs);
-		    StepAlpha;
+		    ADD_ALPHA (N_X_FRAC(N_BITS) - lxs);
+		    STEP_ALPHA;
 		    for (xi = lxi + 1; xi < rxi; xi++)
 		    {
-			AddAlpha (N_X_FRAC(N_BITS));
-			StepAlpha;
+			ADD_ALPHA (N_X_FRAC(N_BITS));
+			STEP_ALPHA;
 		    }
-		    AddAlpha (rxs);
+		    ADD_ALPHA (rxs);
 		}
 	    }
 #endif
@@ -168,15 +168,15 @@ rasterizeEdges (pixman_image_t  *image,
 #if N_BITS > 1
 	if (pixman_fixed_frac (y) != Y_FRAC_LAST(N_BITS))
 	{
-	    RenderEdgeStepSmall (l);
-	    RenderEdgeStepSmall (r);
+	    RENDER_EDGE_STEP_SMALL (l);
+	    RENDER_EDGE_STEP_SMALL (r);
 	    y += STEP_Y_SMALL(N_BITS);
 	}
 	else
 #endif
 	{
-	    RenderEdgeStepBig (l);
-	    RenderEdgeStepBig (r);
+	    RENDER_EDGE_STEP_BIG (l);
+	    RENDER_EDGE_STEP_BIG (r);
 	    y += STEP_Y_BIG(N_BITS);
 	    line += stride;
 	}
diff --git a/pixman/pixman-edge.c b/pixman/pixman-edge.c
index 43e9604..f6a580e 100644
--- a/pixman/pixman-edge.c
+++ b/pixman/pixman-edge.c
@@ -32,7 +32,7 @@
 /*
  * Step across a small sample grid gap
  */
-#define RenderEdgeStepSmall(edge) { \
+#define RENDER_EDGE_STEP_SMALL(edge) { \
     edge->x += edge->stepx_small;   \
     edge->e += edge->dx_small;	    \
     if (edge->e > 0)		    \
@@ -45,7 +45,7 @@
 /*
  * Step across a large sample grid gap
  */
-#define RenderEdgeStepBig(edge) {   \
+#define RENDER_EDGE_STEP_BIG(edge) {   \
     edge->x += edge->stepx_big;	    \
     edge->e += edge->dx_big;	    \
     if (edge->e > 0)		    \
@@ -66,35 +66,35 @@
  */
 
 #define N_BITS	4
-#define rasterizeEdges	fbRasterizeEdges4
+#define RASTERIZE_EDGES	rasterize_edges_4
 
 #ifndef WORDS_BIG_ENDIAN
-#define Shift4(o)	((o) << 2)
+#define SHIFT_4(o)	((o) << 2)
 #else
-#define Shift4(o)	((1-(o)) << 2)
+#define SHIFT_4(o)	((1-(o)) << 2)
 #endif
 
-#define Get4(x,o)	(((x) >> Shift4(o)) & 0xf)
-#define Put4(x,o,v)	(((x) & ~(0xf << Shift4(o))) | (((v) & 0xf) << Shift4(o)))
+#define GET_4(x,o)	(((x) >> SHIFT_4(o)) & 0xf)
+#define PUT_4(x,o,v)	(((x) & ~(0xf << SHIFT_4(o))) | (((v) & 0xf) << SHIFT_4(o)))
 
-#define DefineAlpha(line,x)			     \
+#define DEFINE_ALPHA(line,x)			     \
     uint8_t   *__ap = (uint8_t *) line + ((x) >> 1); \
     int	    __ao = (x) & 1
 
-#define StepAlpha	((__ap += __ao), (__ao ^= 1))
+#define STEP_ALPHA	((__ap += __ao), (__ao ^= 1))
 
-#define AddAlpha(a) {							\
+#define ADD_ALPHA(a) {							\
 	uint8_t   __o = READ(image, __ap);				\
-	uint8_t   __a = (a) + Get4(__o, __ao);				\
-	WRITE(image, __ap, Put4 (__o, __ao, __a | (0 - ((__a) >> 4))));	\
+	uint8_t   __a = (a) + GET_4(__o, __ao);				\
+	WRITE(image, __ap, PUT_4 (__o, __ao, __a | (0 - ((__a) >> 4))));	\
     }
 
 #include "pixman-edge-imp.h"
 
-#undef AddAlpha
-#undef StepAlpha
-#undef DefineAlpha
-#undef rasterizeEdges
+#undef ADD_ALPHA
+#undef STEP_ALPHA
+#undef DEFINE_ALPHA
+#undef RASTERIZE_EDGES
 #undef N_BITS
 
 
@@ -103,11 +103,11 @@
  */
 
 #define N_BITS 1
-#define rasterizeEdges	fbRasterizeEdges1
+#define RASTERIZE_EDGES	rasterize_edges_1
 
 #include "pixman-edge-imp.h"
 
-#undef rasterizeEdges
+#undef RASTERIZE_EDGES
 #undef N_BITS
 
 /*
@@ -121,7 +121,7 @@ clip255 (int x)
     return x;
 }
 
-#define add_saturate_8(buf,val,length)				\
+#define ADD_SATURATE_8(buf,val,length)				\
     do {							\
 	int i__ = (length);					\
 	uint8_t *buf__ = (buf);					\
@@ -146,7 +146,7 @@ clip255 (int x)
  *                   fill_start       fill_end
  */
 static void
-fbRasterizeEdges8 (pixman_image_t       *image,
+rasterize_edges_8 (pixman_image_t       *image,
 		   pixman_edge_t	*l,
 		   pixman_edge_t	*r,
 		   pixman_fixed_t	t,
@@ -220,7 +220,7 @@ fbRasterizeEdges8 (pixman_image_t       *image,
 			if (lxi >= fill_end || rxi < fill_start)
 			{
 			    /* We're beyond what we saved, just fill it */
-			    add_saturate_8 (ap + fill_start,
+			    ADD_SATURATE_8 (ap + fill_start,
 					    fill_size * N_X_FRAC(8),
 					    fill_end - fill_start);
 			    fill_start = lxi;
@@ -232,28 +232,28 @@ fbRasterizeEdges8 (pixman_image_t       *image,
 			    /* Update fill_start */
 			    if (lxi > fill_start)
 			    {
-				add_saturate_8 (ap + fill_start,
+				ADD_SATURATE_8 (ap + fill_start,
 						fill_size * N_X_FRAC(8),
 						lxi - fill_start);
 				fill_start = lxi;
 			    }
 			    else if (lxi < fill_start)
 			    {
-				add_saturate_8 (ap + lxi, N_X_FRAC(8),
+				ADD_SATURATE_8 (ap + lxi, N_X_FRAC(8),
 						fill_start - lxi);
 			    }
 
 			    /* Update fill_end */
 			    if (rxi < fill_end)
 			    {
-				add_saturate_8 (ap + rxi,
+				ADD_SATURATE_8 (ap + rxi,
 						fill_size * N_X_FRAC(8),
 						fill_end - rxi);
 				fill_end = rxi;
 			    }
 			    else if (fill_end < rxi)
 			    {
-				add_saturate_8 (ap + fill_end,
+				ADD_SATURATE_8 (ap + fill_end,
 						N_X_FRAC(8),
 						rxi - fill_end);
 			    }
@@ -263,7 +263,7 @@ fbRasterizeEdges8 (pixman_image_t       *image,
 		}
 		else
 		{
-		    add_saturate_8 (ap + lxi, N_X_FRAC(8), rxi - lxi);
+		    ADD_SATURATE_8 (ap + lxi, N_X_FRAC(8), rxi - lxi);
 		}
 
 		WRITE(image, ap + rxi, clip255 (READ(image, ap + rxi) + rxs));
@@ -279,7 +279,7 @@ fbRasterizeEdges8 (pixman_image_t       *image,
 		}
 		else
 		{
-		    add_saturate_8 (ap + fill_start, fill_size * N_X_FRAC(8),
+		    ADD_SATURATE_8 (ap + fill_start, fill_size * N_X_FRAC(8),
 				    fill_end - fill_start);
 		}
             }
@@ -288,14 +288,14 @@ fbRasterizeEdges8 (pixman_image_t       *image,
 
 	if (pixman_fixed_frac (y) != Y_FRAC_LAST(8))
 	{
-	    RenderEdgeStepSmall (l);
-	    RenderEdgeStepSmall (r);
+	    RENDER_EDGE_STEP_SMALL (l);
+	    RENDER_EDGE_STEP_SMALL (r);
 	    y += STEP_Y_SMALL(8);
 	}
 	else
 	{
-	    RenderEdgeStepBig (l);
-	    RenderEdgeStepBig (r);
+	    RENDER_EDGE_STEP_BIG (l);
+	    RENDER_EDGE_STEP_BIG (r);
 	    y += STEP_Y_BIG(8);
             if (fill_start != fill_end)
             {
@@ -305,7 +305,7 @@ fbRasterizeEdges8 (pixman_image_t       *image,
 		}
 		else
 		{
-		    add_saturate_8 (ap + fill_start, fill_size * N_X_FRAC(8),
+		    ADD_SATURATE_8 (ap + fill_start, fill_size * N_X_FRAC(8),
 				    fill_end - fill_start);
 		}
                 fill_start = fill_end = -1;
@@ -329,13 +329,13 @@ PIXMAN_RASTERIZE_EDGES (pixman_image_t *image,
     switch (PIXMAN_FORMAT_BPP (image->bits.format))
     {
     case 1:
-	fbRasterizeEdges1 (image, l, r, t, b);
+	rasterize_edges_1 (image, l, r, t, b);
 	break;
     case 4:
-	fbRasterizeEdges4 (image, l, r, t, b);
+	rasterize_edges_4 (image, l, r, t, b);
 	break;
     case 8:
-	fbRasterizeEdges8 (image, l, r, t, b);
+	rasterize_edges_8 (image, l, r, t, b);
 	break;
     }
 }
commit bcdf0861be346a8a4662376f4305474da9236163
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Mon Jun 29 08:02:45 2009 -0400

    Rename QuadwordCopy_neon to neon_quadword_copy
    
        s/QuadwordCopy_neon/neon_quadword_copy/g;

diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
index 97baebb..c1a2cb4 100644
--- a/pixman/pixman-arm-neon.c
+++ b/pixman/pixman-arm-neon.c
@@ -1504,7 +1504,7 @@ pixman_fill_neon (uint32_t *bits,
 // TODO: is there a more generic way of doing this being introduced?
 #define NEON_SCANLINE_BUFFER_PIXELS (1024)
 
-static inline void QuadwordCopy_neon(
+static inline void neon_quadword_copy(
 	void* dst,
 	void* src,
 	uint32_t count,       // of quadwords
@@ -1802,18 +1802,18 @@ neon_CompositeOver_n_8_0565 (
 		// left edge, middle block, right edge
 		for( ; y--; maskLine += maskStride, alignedLine += dstStride, dstLine += dstStride) {
 			// We don't want to overrun the edges of the glyph, so realign the edge data into known buffers
-			QuadwordCopy_neon(glyphLine + copyOffset, maskLine, width >> 4, width & 0xF);
+			neon_quadword_copy(glyphLine + copyOffset, maskLine, width >> 4, width & 0xF);
 
 			// Uncached framebuffer access is really, really slow if we do it piecemeal.
 			// It should be much faster if we grab it all at once.
 			// One scanline should easily fit in L1 cache, so this should not waste RAM bandwidth.
-			QuadwordCopy_neon(scanLine, alignedLine, copyCount, copyTail);
+			neon_quadword_copy(scanLine, alignedLine, copyCount, copyTail);
 
 			// Apply the actual filter
 			SolidOver565_8pix_neon(src, scanLine + kernelOffset, glyphLine + kernelOffset, 8 * sizeof(*dstLine), 8, kernelCount);
 
 			// Copy the modified scanline back
-			QuadwordCopy_neon(dstLine, scanLine + copyOffset, width >> 3, (width & 7) * 2);
+			neon_quadword_copy(dstLine, scanLine + copyOffset, width >> 3, (width & 7) * 2);
 		}
 	}
 }
@@ -1956,13 +1956,13 @@ neon_CompositeOver_n_0565 (
 			// Uncached framebuffer access is really, really slow if we do it piecemeal.
 			// It should be much faster if we grab it all at once.
 			// One scanline should easily fit in L1 cache, so this should not waste RAM bandwidth.
-			QuadwordCopy_neon(scanLine, alignedLine, copyCount, copyTail);
+			neon_quadword_copy(scanLine, alignedLine, copyCount, copyTail);
 
 			// Apply the actual filter
 			PlainOver565_8pix_neon(src, scanLine + kernelOffset, 8 * sizeof(*dstLine), kernelCount);
 
 			// Copy the modified scanline back
-			QuadwordCopy_neon(dstLine, scanLine + copyOffset, width >> 3, (width & 7) * 2);
+			neon_quadword_copy(dstLine, scanLine + copyOffset, width >> 3, (width & 7) * 2);
 		}
 	}
 }
@@ -2118,13 +2118,13 @@ neon_CompositeOver_8888_0565 (
 			// Uncached framebuffer access is really, really slow if we do it piecemeal.
 			// It should be much faster if we grab it all at once.
 			// One scanline should easily fit in L1 cache, so this should not waste RAM bandwidth.
-			QuadwordCopy_neon(scanLine, alignedLine, copyCount, copyTail);
+			neon_quadword_copy(scanLine, alignedLine, copyCount, copyTail);
 
 			// Apply the actual filter
 			ARGB8_Over565_8pix_neon(srcLine, scanLine + kernelOffset, srcStride * sizeof(*srcLine), kernelCount);
 
 			// Copy the modified scanline back
-			QuadwordCopy_neon(dstLine, scanLine + copyOffset, width >> 3, (width & 7) * 2);
+			neon_quadword_copy(dstLine, scanLine + copyOffset, width >> 3, (width & 7) * 2);
 		}
 	}
 }
@@ -2227,7 +2227,7 @@ pixman_blt_neon (
 		uint32_t offset         = byte_width % 16;
 
 		while(height--) {
-			QuadwordCopy_neon(dst_bytes, src_bytes, quadword_count, offset);
+			neon_quadword_copy(dst_bytes, src_bytes, quadword_count, offset);
 			src_bytes += src_stride_bytes;
 			dst_bytes += dst_stride_bytes;
 		}
commit a08548bd5275c69c1e7a7fd894a844ad6ad59638
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Mon Jun 29 08:00:56 2009 -0400

    Fix up the names in pixman_compute_composite_region()
    
        s/miClipPictureSrc/clip_source_image/g;
        s/miClipPictureReg/clip_general_image/g;

diff --git a/pixman/pixman-utils.c b/pixman/pixman-utils.c
index 42ce5ef..354ae15 100644
--- a/pixman/pixman-utils.c
+++ b/pixman/pixman-utils.c
@@ -36,7 +36,7 @@
 #define BOUND(v)	(int16_t) ((v) < INT16_MIN ? INT16_MIN : (v) > INT16_MAX ? INT16_MAX : (v))
 
 static inline pixman_bool_t
-miClipPictureReg (pixman_region32_t *	region,
+clip_general_image (pixman_region32_t *	region,
 		  pixman_region32_t *	clip,
 		  int		dx,
 		  int		dy)
@@ -80,7 +80,7 @@ miClipPictureReg (pixman_region32_t *	region,
 
 
 static inline pixman_bool_t
-miClipPictureSrc (pixman_region32_t *	region,
+clip_source_image (pixman_region32_t *	region,
 		  pixman_image_t *	picture,
 		  int		dx,
 		  int		dy)
@@ -93,7 +93,7 @@ miClipPictureSrc (pixman_region32_t *	region,
     if (!picture->common.clip_sources || !picture->common.client_clip)
 	return TRUE;
 
-    return miClipPictureReg (region,
+    return clip_general_image (region,
 			     &picture->common.clip_region,
 			     dx, dy);
 }
@@ -151,7 +151,7 @@ pixman_compute_composite_region32 (pixman_region32_t *	region,
     
     if (dst_image->common.have_clip_region)
     {
-	if (!miClipPictureReg (region, &dst_image->common.clip_region, 0, 0))
+	if (!clip_general_image (region, &dst_image->common.clip_region, 0, 0))
 	{
 	    pixman_region32_fini (region);
 	    return FALSE;
@@ -160,7 +160,7 @@ pixman_compute_composite_region32 (pixman_region32_t *	region,
     
     if (dst_image->common.alpha_map && dst_image->common.alpha_map->common.have_clip_region)
     {
-	if (!miClipPictureReg (region, &dst_image->common.alpha_map->common.clip_region,
+	if (!clip_general_image (region, &dst_image->common.alpha_map->common.clip_region,
 			       -dst_image->common.alpha_origin_x,
 			       -dst_image->common.alpha_origin_y))
 	{
@@ -172,7 +172,7 @@ pixman_compute_composite_region32 (pixman_region32_t *	region,
     /* clip against src */
     if (src_image->common.have_clip_region)
     {
-	if (!miClipPictureSrc (region, src_image, dest_x - src_x, dest_y - src_y))
+	if (!clip_source_image (region, src_image, dest_x - src_x, dest_y - src_y))
 	{
 	    pixman_region32_fini (region);
 	    return FALSE;
@@ -180,7 +180,7 @@ pixman_compute_composite_region32 (pixman_region32_t *	region,
     }
     if (src_image->common.alpha_map && src_image->common.alpha_map->common.have_clip_region)
     {
-	if (!miClipPictureSrc (region, (pixman_image_t *)src_image->common.alpha_map,
+	if (!clip_source_image (region, (pixman_image_t *)src_image->common.alpha_map,
 			       dest_x - (src_x - src_image->common.alpha_origin_x),
 			       dest_y - (src_y - src_image->common.alpha_origin_y)))
 	{
@@ -191,14 +191,14 @@ pixman_compute_composite_region32 (pixman_region32_t *	region,
     /* clip against mask */
     if (mask_image && mask_image->common.have_clip_region)
     {
-	if (!miClipPictureSrc (region, mask_image, dest_x - mask_x, dest_y - mask_y))
+	if (!clip_source_image (region, mask_image, dest_x - mask_x, dest_y - mask_y))
 	{
 	    pixman_region32_fini (region);
 	    return FALSE;
 	}	
 	if (mask_image->common.alpha_map && mask_image->common.alpha_map->common.have_clip_region)
 	{
-	    if (!miClipPictureSrc (region, (pixman_image_t *)mask_image->common.alpha_map,
+	    if (!clip_source_image (region, (pixman_image_t *)mask_image->common.alpha_map,
 				   dest_x - (mask_x - mask_image->common.alpha_origin_x),
 				   dest_y - (mask_y - mask_image->common.alpha_origin_y)))
 	    {
commit e27b2a1fcc890d3abf272cc27fa2c0a2e8d7ab09
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Mon Jun 29 07:59:10 2009 -0400

    Fix some more pFoo names
    
    s/([^a-z])pReg/$1region/g;
    s/([^a-z])pY/$1y_line/g;
    s/([^a-z])pU/$1u_line/g;
    s/([^a-z])pV/$1v_line/g;
    s/([^a-z])p([A-Z])/$1\l$2/g;

diff --git a/pixman/pixman-access.c b/pixman/pixman-access.c
index 217a02d..47f4c52 100644
--- a/pixman/pixman-access.c
+++ b/pixman/pixman-access.c
@@ -804,18 +804,18 @@ fetch_scanline_yv12 (pixman_image_t *image, int x, int line, int width, uint32_t
 	      const uint32_t *mask, uint32_t mask_bits)
 {
     YV12_SETUP(image);
-    uint8_t *pY = YV12_Y (line);
-    uint8_t *pU = YV12_U (line);
-    uint8_t *pV = YV12_V (line);
+    uint8_t *y_line = YV12_Y (line);
+    uint8_t *u_line = YV12_U (line);
+    uint8_t *v_line = YV12_V (line);
     int16_t y, u, v;
     int32_t r, g, b;
     int   i;
 
     for (i = 0; i < width; i++)
     {
-	y = pY[x + i] - 16;
-	u = pU[(x + i) >> 1] - 128;
-	v = pV[(x + i) >> 1] - 128;
+	y = y_line[x + i] - 16;
+	u = u_line[(x + i) >> 1] - 128;
+	v = v_line[(x + i) >> 1] - 128;
 
 	/* R = 1.164(Y - 16) + 1.596(V - 128) */
 	r = 0x012b27 * y + 0x019a2e * v;
diff --git a/pixman/pixman-mmx.c b/pixman/pixman-mmx.c
index 2cf1713..e9e5071 100644
--- a/pixman/pixman-mmx.c
+++ b/pixman/pixman-mmx.c
@@ -1487,7 +1487,7 @@ mmx_composite_over_8888_0565 (pixman_implementation_t *imp,
 
 #if 0
     /* FIXME */
-    assert (src_image->pDrawable == mask_image->pDrawable);
+    assert (src_image->drawable == mask_image->drawable);
 #endif
 
     while (height--)
@@ -2126,7 +2126,7 @@ mmx_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 
 #if 0
     /* FIXME */
-    assert (src_image->pDrawable == mask_image->pDrawable);
+    assert (src_image->drawable == mask_image->drawable);
 #endif
 
     while (height--)
@@ -2248,7 +2248,7 @@ mmx_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
 
 #if 0
     /* FIXME */
-    assert (src_image->pDrawable == mask_image->pDrawable);
+    assert (src_image->drawable == mask_image->drawable);
 #endif
 
     while (height--)
diff --git a/pixman/pixman-region.c b/pixman/pixman-region.c
index 30db82a..52ce83e 100644
--- a/pixman/pixman-region.c
+++ b/pixman/pixman-region.c
@@ -75,7 +75,7 @@ static region_data_type_t *pixman_region_emptyData = (region_data_type_t *)&PREF
 static region_data_type_t *pixman_brokendata = (region_data_type_t *)&PREFIX(_brokendata_);
 
 static pixman_bool_t
-pixman_break (region_type_t *pReg);
+pixman_break (region_type_t *region);
 
 /*
  * The functions in this file implement the Region abstraction used extensively
@@ -171,13 +171,13 @@ allocData(size_t n)
 
 #define freeData(reg) if ((reg)->data && (reg)->data->size) free((reg)->data)
 
-#define RECTALLOC_BAIL(pReg,n,bail) \
-if (!(pReg)->data || (((pReg)->data->numRects + (n)) > (pReg)->data->size)) \
-    if (!pixman_rect_alloc(pReg, n)) { goto bail; }
+#define RECTALLOC_BAIL(region,n,bail) \
+if (!(region)->data || (((region)->data->numRects + (n)) > (region)->data->size)) \
+    if (!pixman_rect_alloc(region, n)) { goto bail; }
 
-#define RECTALLOC(pReg,n) \
-if (!(pReg)->data || (((pReg)->data->numRects + (n)) > (pReg)->data->size)) \
-    if (!pixman_rect_alloc(pReg, n)) { return FALSE; }
+#define RECTALLOC(region,n) \
+if (!(region)->data || (((region)->data->numRects + (n)) > (region)->data->size)) \
+    if (!pixman_rect_alloc(region, n)) { return FALSE; }
 
 #define ADDRECT(next_rect,nx1,ny1,nx2,ny2)	\
 {						\
@@ -188,17 +188,17 @@ if (!(pReg)->data || (((pReg)->data->numRects + (n)) > (pReg)->data->size)) \
     next_rect++;				\
 }
 
-#define NEWRECT(pReg,next_rect,nx1,ny1,nx2,ny2)			\
+#define NEWRECT(region,next_rect,nx1,ny1,nx2,ny2)			\
 {									\
-    if (!(pReg)->data || ((pReg)->data->numRects == (pReg)->data->size))\
+    if (!(region)->data || ((region)->data->numRects == (region)->data->size))\
     {									\
-	if (!pixman_rect_alloc(pReg, 1))					\
+	if (!pixman_rect_alloc(region, 1))					\
 	    return FALSE;						\
-	next_rect = PIXREGION_TOP(pReg);					\
+	next_rect = PIXREGION_TOP(region);					\
     }									\
     ADDRECT(next_rect,nx1,ny1,nx2,ny2);					\
-    pReg->data->numRects++;						\
-    assert(pReg->data->numRects<=pReg->data->size);			\
+    region->data->numRects++;						\
+    assert(region->data->numRects<=region->data->size);			\
 }
 
 #define DOWNSIZE(reg,numRects)						\
@@ -422,8 +422,8 @@ pixman_coalesce (
     int	    	  	prevStart,  	/* Index of start of previous band   */
     int	    	  	curStart)   	/* Index of start of current band    */
 {
-    box_type_t *	pPrevBox;   	/* Current box in previous band	     */
-    box_type_t *	pCurBox;    	/* Current box in current band       */
+    box_type_t *	prevBox;   	/* Current box in previous band	     */
+    box_type_t *	curBox;    	/* Current box in current band       */
     int  	numRects;	/* Number rectangles in both bands   */
     int	y2;		/* Bottom of current band	     */
     /*
@@ -438,9 +438,9 @@ pixman_coalesce (
      * The bands may only be coalesced if the bottom of the previous
      * matches the top scanline of the current.
      */
-    pPrevBox = PIXREGION_BOX(region, prevStart);
-    pCurBox = PIXREGION_BOX(region, curStart);
-    if (pPrevBox->y2 != pCurBox->y1) return curStart;
+    prevBox = PIXREGION_BOX(region, prevStart);
+    curBox = PIXREGION_BOX(region, curStart);
+    if (prevBox->y2 != curBox->y1) return curStart;
 
     /*
      * Make sure the bands have boxes in the same places. This
@@ -448,14 +448,14 @@ pixman_coalesce (
      * cover the most area possible. I.e. two boxes in a band must
      * have some horizontal space between them.
      */
-    y2 = pCurBox->y2;
+    y2 = curBox->y2;
 
     do {
-	if ((pPrevBox->x1 != pCurBox->x1) || (pPrevBox->x2 != pCurBox->x2)) {
+	if ((prevBox->x1 != curBox->x1) || (prevBox->x2 != curBox->x2)) {
 	    return (curStart);
 	}
-	pPrevBox++;
-	pCurBox++;
+	prevBox++;
+	curBox++;
 	numRects--;
     } while (numRects);
 
@@ -466,8 +466,8 @@ pixman_coalesce (
     numRects = curStart - prevStart;
     region->data->numRects -= numRects;
     do {
-	pPrevBox--;
-	pPrevBox->y2 = y2;
+	prevBox--;
+	prevBox->y2 = y2;
 	numRects--;
     } while (numRects);
     return prevStart;
@@ -560,7 +560,7 @@ pixman_region_appendNonO (
  *
  * Side Effects:
  *	The new region is overwritten.
- *	pOverlap set to TRUE if overlapFunc ever returns TRUE.
+ *	overlap set to TRUE if overlapFunc ever returns TRUE.
  *
  * Notes:
  *	The idea behind this function is to view the two regions as sets.
@@ -585,7 +585,7 @@ typedef pixman_bool_t (*OverlapProcPtr)(
     box_type_t *r2End,
     int    	 y1,
     int    	 y2,
-    int		 *pOverlap);
+    int		 *overlap);
 
 static pixman_bool_t
 pixman_op(
@@ -598,7 +598,7 @@ pixman_op(
 					    /* in region 1 ? */
     int	    appendNon2,		    /* Append non-overlapping bands  */
 					    /* in region 2 ? */
-    int	    *pOverlap)
+    int	    *overlap)
 {
     box_type_t * r1;			    /* Pointer into first region     */
     box_type_t * r2;			    /* Pointer into 2d region	     */
@@ -754,7 +754,7 @@ pixman_op(
 				 r1, r1BandEnd,
 				 r2, r2BandEnd,
 				 ytop, ybot,
-				 pOverlap))
+				 overlap))
 		goto bail;
 	    Coalesce(newReg, prevBand, curBand);
 	}
@@ -909,7 +909,7 @@ pixman_region_intersectO (region_type_t *region,
 			  box_type_t    *r2End,
 			  int    	     y1,
 			  int    	     y2,
-			  int		    *pOverlap)
+			  int		    *overlap)
 {
     int  	x1;
     int  	x2;
@@ -1011,7 +1011,7 @@ PREFIX(_intersect) (region_type_t * 	newReg,
 {								\
     if (r->x1 <= x2) {						\
 	/* Merge with current rectangle */			\
-	if (r->x1 < x2) *pOverlap = TRUE;				\
+	if (r->x1 < x2) *overlap = TRUE;				\
 	if (x2 < r->x2) x2 = r->x2;				\
     } else {							\
 	/* Add current rectangle, start new one */		\
@@ -1037,7 +1037,7 @@ PREFIX(_intersect) (region_type_t * 	newReg,
  *
  * Side Effects:
  *	region is overwritten.
- *	pOverlap is set to TRUE if any boxes overlap.
+ *	overlap is set to TRUE if any boxes overlap.
  *
  *-----------------------------------------------------------------------
  */
@@ -1050,7 +1050,7 @@ pixman_region_unionO (
     box_type_t *r2End,
     int	  y1,
     int	  y2,
-    int		  *pOverlap)
+    int		  *overlap)
 {
     box_type_t *     next_rect;
     int        x1;     /* left and right side of current union */
@@ -1285,7 +1285,7 @@ QuickSortRects(
  *
  * Side Effects:
  *      The passed-in ``region'' may be modified.
- *	pOverlap set to TRUE if any retangles overlapped,
+ *	overlap set to TRUE if any retangles overlapped,
  *      else FALSE;
  *
  * Strategy:
@@ -1307,7 +1307,7 @@ QuickSortRects(
 
 static pixman_bool_t
 validate (region_type_t * badreg,
-	  int *pOverlap)
+	  int *overlap)
 {
     /* Descriptor for regions under construction  in Step 2. */
     typedef struct {
@@ -1331,7 +1331,7 @@ validate (region_type_t * badreg,
     region_type_t *  hreg;       /* ri[j_half].reg			    */
     pixman_bool_t ret = TRUE;
 
-    *pOverlap = FALSE;
+    *overlap = FALSE;
     if (!badreg->data)
     {
 	good(badreg);
@@ -1400,7 +1400,7 @@ validate (region_type_t * badreg,
 		if (box->x1 <= riBox->x2)
 		{
 		    /* Merge it with riBox */
-		    if (box->x1 < riBox->x2) *pOverlap = TRUE;
+		    if (box->x1 < riBox->x2) *overlap = TRUE;
 		    if (box->x2 > riBox->x2) riBox->x2 = box->x2;
 		}
 		else
@@ -1485,7 +1485,7 @@ NextRect: ;
 	{
 	    reg = &ri[j].reg;
 	    hreg = &ri[j+half].reg;
-	    if (!pixman_op(reg, reg, hreg, pixman_region_unionO, TRUE, TRUE, pOverlap))
+	    if (!pixman_op(reg, reg, hreg, pixman_region_unionO, TRUE, TRUE, overlap))
 		ret = FALSE;
 	    if (hreg->extents.x1 < reg->extents.x1)
 		reg->extents.x1 = hreg->extents.x1;
@@ -1543,7 +1543,7 @@ pixman_region_subtractO (
     box_type_t *  	  	r2End,
     int  	y1,
     int  	y2,
-    int		*pOverlap)
+    int		*overlap)
 {
     box_type_t *	next_rect;
     int  	x1;
diff --git a/pixman/pixman-sse2.c b/pixman/pixman-sse2.c
index 4508471..02d4511 100644
--- a/pixman/pixman-sse2.c
+++ b/pixman/pixman-sse2.c
@@ -3115,7 +3115,7 @@ sse2_composite_over_8888_0565 (pixman_implementation_t *imp,
      * I copy the code from MMX one and keep the fixme.
      * If it's a problem there, probably is a problem here.
      */
-    assert (src_image->pDrawable == mask_image->pDrawable);
+    assert (src_image->drawable == mask_image->drawable);
 #endif
 
     while (height--)
@@ -3794,7 +3794,7 @@ sse2_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
      * I copy the code from MMX one and keep the fixme.
      * If it's a problem there, probably is a problem here.
      */
-    assert (src_image->pDrawable == mask_image->pDrawable);
+    assert (src_image->drawable == mask_image->drawable);
 #endif
 
     while (height--)
@@ -3928,7 +3928,7 @@ sse2_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
      * I copy the code from MMX one and keep the fixme.
      * If it's a problem there, probably is a problem here.
      */
-    assert (src_image->pDrawable == mask_image->pDrawable);
+    assert (src_image->drawable == mask_image->drawable);
 #endif
 
     while (height--)
diff --git a/pixman/pixman-utils.c b/pixman/pixman-utils.c
index 8949284..42ce5ef 100644
--- a/pixman/pixman-utils.c
+++ b/pixman/pixman-utils.c
@@ -37,32 +37,32 @@
 
 static inline pixman_bool_t
 miClipPictureReg (pixman_region32_t *	region,
-		  pixman_region32_t *	pClip,
+		  pixman_region32_t *	clip,
 		  int		dx,
 		  int		dy)
 {
     if (pixman_region32_n_rects(region) == 1 &&
-	pixman_region32_n_rects(pClip) == 1)
+	pixman_region32_n_rects(clip) == 1)
     {
-	pixman_box32_t *  pRbox = pixman_region32_rectangles(region, NULL);
-	pixman_box32_t *  pCbox = pixman_region32_rectangles(pClip, NULL);
+	pixman_box32_t *  rbox = pixman_region32_rectangles(region, NULL);
+	pixman_box32_t *  cbox = pixman_region32_rectangles(clip, NULL);
 	int	v;
 	
-	if (pRbox->x1 < (v = pCbox->x1 + dx))
-	    pRbox->x1 = BOUND(v);
-	if (pRbox->x2 > (v = pCbox->x2 + dx))
-	    pRbox->x2 = BOUND(v);
-	if (pRbox->y1 < (v = pCbox->y1 + dy))
-	    pRbox->y1 = BOUND(v);
-	if (pRbox->y2 > (v = pCbox->y2 + dy))
-	    pRbox->y2 = BOUND(v);
-	if (pRbox->x1 >= pRbox->x2 ||
-	    pRbox->y1 >= pRbox->y2)
+	if (rbox->x1 < (v = cbox->x1 + dx))
+	    rbox->x1 = BOUND(v);
+	if (rbox->x2 > (v = cbox->x2 + dx))
+	    rbox->x2 = BOUND(v);
+	if (rbox->y1 < (v = cbox->y1 + dy))
+	    rbox->y1 = BOUND(v);
+	if (rbox->y2 > (v = cbox->y2 + dy))
+	    rbox->y2 = BOUND(v);
+	if (rbox->x1 >= rbox->x2 ||
+	    rbox->y1 >= rbox->y2)
 	{
 	    pixman_region32_init (region);
 	}
     }
-    else if (!pixman_region32_not_empty (pClip))
+    else if (!pixman_region32_not_empty (clip))
     {
 	return FALSE;
     }
@@ -70,7 +70,7 @@ miClipPictureReg (pixman_region32_t *	region,
     {
 	if (dx || dy)
 	    pixman_region32_translate (region, -dx, -dy);
-	if (!pixman_region32_intersect (region, region, pClip))
+	if (!pixman_region32_intersect (region, region, clip))
 	    return FALSE;
 	if (dx || dy)
 	    pixman_region32_translate(region, dx, dy);
@@ -81,7 +81,7 @@ miClipPictureReg (pixman_region32_t *	region,
 
 static inline pixman_bool_t
 miClipPictureSrc (pixman_region32_t *	region,
-		  pixman_image_t *	pPicture,
+		  pixman_image_t *	picture,
 		  int		dx,
 		  int		dy)
 {
@@ -90,11 +90,11 @@ miClipPictureSrc (pixman_region32_t *	region,
      * the clip was not set by a client, then it is a hierarchy
      * clip and those should always be ignored for sources).
      */
-    if (!pPicture->common.clip_sources || !pPicture->common.client_clip)
+    if (!picture->common.clip_sources || !picture->common.client_clip)
 	return TRUE;
 
     return miClipPictureReg (region,
-			     &pPicture->common.clip_region,
+			     &picture->common.clip_region,
 			     dx, dy);
 }
 
commit 006f21b02b23e1865c0e35d0f9b97af63f52a469
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Mon Jun 29 07:54:10 2009 -0400

    Fix the names of some common parameters
    
        s/xDst/dest_x/g;
        s/yDst/dest_y/g;
        s/xMask/mask_x/g;
        s/yMask/mask_y/g;
        s/xSrc/src_x/g;
        s/ySrc/src_y/g;

diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
index dcc0495..97baebb 100644
--- a/pixman/pixman-arm-neon.c
+++ b/pixman/pixman-arm-neon.c
@@ -130,12 +130,12 @@ neon_CompositeAdd_8000_8000 (
                                 pixman_image_t * src_image,
                                 pixman_image_t * mask_image,
                                 pixman_image_t * dst_image,
-                                int32_t      xSrc,
-                                int32_t      ySrc,
-                                int32_t      xMask,
-                                int32_t      yMask,
-                                int32_t      xDst,
-                                int32_t      yDst,
+                                int32_t      src_x,
+                                int32_t      src_y,
+                                int32_t      mask_x,
+                                int32_t      mask_y,
+                                int32_t      dest_x,
+                                int32_t      dest_y,
                                 int32_t      width,
                                 int32_t      height)
 {
@@ -144,8 +144,8 @@ neon_CompositeAdd_8000_8000 (
     int dstStride, srcStride;
     uint16_t    w;
 
-    fbComposeGetStart (src_image, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
-    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
 
     if (width>=8)
     {
@@ -280,12 +280,12 @@ neon_composite_over_8888_8888 (
 			 pixman_image_t * src_image,
 			 pixman_image_t * mask_image,
 			 pixman_image_t * dst_image,
-			 int32_t      xSrc,
-			 int32_t      ySrc,
-			 int32_t      xMask,
-			 int32_t      yMask,
-			 int32_t      xDst,
-			 int32_t      yDst,
+			 int32_t      src_x,
+			 int32_t      src_y,
+			 int32_t      mask_x,
+			 int32_t      mask_y,
+			 int32_t      dest_x,
+			 int32_t      dest_y,
 			 int32_t      width,
 			 int32_t      height)
 {
@@ -294,8 +294,8 @@ neon_composite_over_8888_8888 (
     int	dstStride, srcStride;
     uint32_t	w;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
     if (width>=8)
     {
@@ -441,12 +441,12 @@ neon_composite_over_8888_n_8888 (
 			       pixman_image_t * src_image,
 			       pixman_image_t * mask_image,
 			       pixman_image_t * dst_image,
-			       int32_t	xSrc,
-			       int32_t	ySrc,
-			       int32_t      xMask,
-			       int32_t      yMask,
-			       int32_t      xDst,
-			       int32_t      yDst,
+			       int32_t	src_x,
+			       int32_t	src_y,
+			       int32_t      mask_x,
+			       int32_t      mask_y,
+			       int32_t      dest_x,
+			       int32_t      dest_y,
 			       int32_t      width,
 			       int32_t      height)
 {
@@ -457,8 +457,8 @@ neon_composite_over_8888_n_8888 (
     uint32_t	w;
     uint8x8_t mask_alpha;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
     mask_alpha = vdup_n_u8((mask) >> 24);
@@ -638,12 +638,12 @@ neon_CompositeOver_n_8_8888 (
 			       pixman_image_t * src_image,
 			       pixman_image_t * mask_image,
 			       pixman_image_t * dst_image,
-			       int32_t      xSrc,
-			       int32_t      ySrc,
-			       int32_t      xMask,
-			       int32_t      yMask,
-			       int32_t      xDst,
-			       int32_t      yDst,
+			       int32_t      src_x,
+			       int32_t      src_y,
+			       int32_t      mask_x,
+			       int32_t      mask_y,
+			       int32_t      dest_x,
+			       int32_t      dest_y,
 			       int32_t      width,
 			       int32_t      height)
 {
@@ -670,8 +670,8 @@ neon_CompositeOver_n_8_8888 (
     sval8.val[2]=vdup_lane_u8(sval2,2);
     sval8.val[3]=vdup_lane_u8(sval2,3);
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     if (width>=8)
     {
@@ -845,12 +845,12 @@ neon_CompositeAdd_8888_8_8 (
                             pixman_image_t * src_image,
                             pixman_image_t * mask_image,
                             pixman_image_t * dst_image,
-                            int32_t      xSrc,
-                            int32_t      ySrc,
-                            int32_t      xMask,
-                            int32_t      yMask,
-                            int32_t      xDst,
-                            int32_t      yDst,
+                            int32_t      src_x,
+                            int32_t      src_y,
+                            int32_t      mask_x,
+                            int32_t      mask_y,
+                            int32_t      dest_x,
+                            int32_t      dest_y,
                             int32_t      width,
                             int32_t      height)
 {
@@ -861,8 +861,8 @@ neon_CompositeAdd_8888_8_8 (
     uint32_t    src;
     uint8x8_t   sa;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
     sa = vdup_n_u8((src) >> 24);
 
@@ -964,12 +964,12 @@ neon_CompositeSrc_16_16 (
 	pixman_image_t * src_image,
 	pixman_image_t * mask_image,
 	pixman_image_t * dst_image,
-	int32_t      xSrc,
-	int32_t      ySrc,
-	int32_t      xMask,
-	int32_t      yMask,
-	int32_t      xDst,
-	int32_t      yDst,
+	int32_t      src_x,
+	int32_t      src_y,
+	int32_t      mask_x,
+	int32_t      mask_y,
+	int32_t      dest_x,
+	int32_t      dest_y,
 	int32_t      width,
 	int32_t      height)
 {
@@ -980,8 +980,8 @@ neon_CompositeSrc_16_16 (
 		return;
 
 	/* We simply copy 16-bit-aligned pixels from one place to another. */
-	fbComposeGetStart (src_image, xSrc, ySrc, uint16_t, srcStride, srcLine, 1);
-	fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+	fbComposeGetStart (src_image, src_x, src_y, uint16_t, srcStride, srcLine, 1);
+	fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
 
 	/* Preload the first input scanline */
 	{
@@ -1091,12 +1091,12 @@ neon_CompositeSrc_24_16 (
 	pixman_image_t * src_image,
 	pixman_image_t * mask_image,
 	pixman_image_t * dst_image,
-	int32_t      xSrc,
-	int32_t      ySrc,
-	int32_t      xMask,
-	int32_t      yMask,
-	int32_t      xDst,
-	int32_t      yDst,
+	int32_t      src_x,
+	int32_t      src_y,
+	int32_t      mask_x,
+	int32_t      mask_y,
+	int32_t      dest_x,
+	int32_t      dest_y,
 	int32_t      width,
 	int32_t      height)
 {
@@ -1108,8 +1108,8 @@ neon_CompositeSrc_24_16 (
 		return;
 
 	/* We simply copy pixels from one place to another, assuming that the source's alpha is opaque. */
-	fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
-	fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+	fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+	fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
 
 	/* Preload the first input scanline */
 	{
@@ -1718,12 +1718,12 @@ neon_CompositeOver_n_8_0565 (
 	pixman_image_t * src_image,
 	pixman_image_t * mask_image,
 	pixman_image_t * dst_image,
-	int32_t      xSrc,
-	int32_t      ySrc,
-	int32_t      xMask,
-	int32_t      yMask,
-	int32_t      xDst,
-	int32_t      yDst,
+	int32_t      src_x,
+	int32_t      src_y,
+	int32_t      mask_x,
+	int32_t      mask_y,
+	int32_t      dest_x,
+	int32_t      dest_y,
 	int32_t      width,
 	int32_t      height)
 {
@@ -1748,14 +1748,14 @@ neon_CompositeOver_n_8_0565 (
 		// TODO: there must be a more elegant way of doing this.
 		int x;
 		for(x=0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS) {
-			neon_CompositeOver_n_8_0565(impl, op, src_image, mask_image, dst_image, xSrc+x, ySrc, xMask+x, yMask, xDst+x, yDst,
+			neon_CompositeOver_n_8_0565(impl, op, src_image, mask_image, dst_image, src_x+x, src_y, mask_x+x, mask_y, dest_x+x, dest_y,
 											  (x+NEON_SCANLINE_BUFFER_PIXELS > width) ? width-x : NEON_SCANLINE_BUFFER_PIXELS, height);
 		}
 		return;
 	}
 
-	fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-	fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+	fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+	fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
 	// keep within minimum number of aligned quadwords on width
 	// while also keeping the minimum number of columns to process
@@ -1873,12 +1873,12 @@ neon_CompositeOver_n_0565 (
 	pixman_image_t * src_image,
 	pixman_image_t * mask_image,
 	pixman_image_t * dst_image,
-	int32_t      xSrc,
-	int32_t      ySrc,
-	int32_t      xMask,
-	int32_t      yMask,
-	int32_t      xDst,
-	int32_t      yDst,
+	int32_t      src_x,
+	int32_t      src_y,
+	int32_t      mask_x,
+	int32_t      mask_y,
+	int32_t      dest_x,
+	int32_t      dest_y,
 	int32_t      width,
 	int32_t      height)
 {
@@ -1902,13 +1902,13 @@ neon_CompositeOver_n_0565 (
 		// TODO: there must be a more elegant way of doing this.
 		int x;
 		for(x=0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS) {
-			neon_CompositeOver_n_0565(impl, op, src_image, mask_image, dst_image, xSrc+x, ySrc, xMask+x, yMask, xDst+x, yDst,
+			neon_CompositeOver_n_0565(impl, op, src_image, mask_image, dst_image, src_x+x, src_y, mask_x+x, mask_y, dest_x+x, dest_y,
 										(x+NEON_SCANLINE_BUFFER_PIXELS > width) ? width-x : NEON_SCANLINE_BUFFER_PIXELS, height);
 		}
 		return;
 	}
 
-	fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+	fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
 
 	// keep within minimum number of aligned quadwords on width
 	// while also keeping the minimum number of columns to process
@@ -2016,12 +2016,12 @@ neon_CompositeOver_8888_0565 (
 	pixman_image_t * src_image,
 	pixman_image_t * mask_image,
 	pixman_image_t * dst_image,
-	int32_t      xSrc,
-	int32_t      ySrc,
-	int32_t      xMask,
-	int32_t      yMask,
-	int32_t      xDst,
-	int32_t      yDst,
+	int32_t      src_x,
+	int32_t      src_y,
+	int32_t      mask_x,
+	int32_t      mask_y,
+	int32_t      dest_x,
+	int32_t      dest_y,
 	int32_t      width,
 	int32_t      height)
 {
@@ -2038,14 +2038,14 @@ neon_CompositeOver_8888_0565 (
 		// split the blit, so we can use a fixed-size scanline buffer
 		int x;
 		for(x=0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS) {
-			neon_CompositeOver_8888_0565(impl, op, src_image, mask_image, dst_image, xSrc+x, ySrc, xMask+x, yMask, xDst+x, yDst,
+			neon_CompositeOver_8888_0565(impl, op, src_image, mask_image, dst_image, src_x+x, src_y, mask_x+x, mask_y, dest_x+x, dest_y,
 										  (x+NEON_SCANLINE_BUFFER_PIXELS > width) ? width-x : NEON_SCANLINE_BUFFER_PIXELS, height);
 		}
 		return;
 	}
 
-	fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-	fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+	fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+	fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
 	// keep within minimum number of aligned quadwords on width
 	// while also keeping the minimum number of columns to process
diff --git a/pixman/pixman-arm-simd.c b/pixman/pixman-arm-simd.c
index 9d04b7c..7a5b345 100644
--- a/pixman/pixman-arm-simd.c
+++ b/pixman/pixman-arm-simd.c
@@ -36,12 +36,12 @@ arm_CompositeAdd_8000_8000 (
 				pixman_image_t * src_image,
 				pixman_image_t * mask_image,
 				pixman_image_t * dst_image,
-				int32_t      xSrc,
-				int32_t      ySrc,
-				int32_t      xMask,
-				int32_t      yMask,
-				int32_t      xDst,
-				int32_t      yDst,
+				int32_t      src_x,
+				int32_t      src_y,
+				int32_t      mask_x,
+				int32_t      mask_y,
+				int32_t      dest_x,
+				int32_t      dest_y,
 				int32_t      width,
 				int32_t      height)
 {
@@ -51,8 +51,8 @@ arm_CompositeAdd_8000_8000 (
     uint16_t	w;
     uint8_t	s, d;
 
-    fbComposeGetStart (src_image, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
-    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -106,12 +106,12 @@ arm_composite_over_8888_8888 (
 			 pixman_image_t * src_image,
 			 pixman_image_t * mask_image,
 			 pixman_image_t * dst_image,
-			 int32_t      xSrc,
-			 int32_t      ySrc,
-			 int32_t      xMask,
-			 int32_t      yMask,
-			 int32_t      xDst,
-			 int32_t      yDst,
+			 int32_t      src_x,
+			 int32_t      src_y,
+			 int32_t      mask_x,
+			 int32_t      mask_y,
+			 int32_t      dest_x,
+			 int32_t      dest_y,
 			 int32_t      width,
 			 int32_t      height)
 {
@@ -123,8 +123,8 @@ arm_composite_over_8888_8888 (
     uint32_t upper_component_mask = 0xff00ff00;
     uint32_t alpha_mask = 0xff;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
     while (height--)
     {
@@ -199,12 +199,12 @@ arm_composite_over_8888_n_8888 (
 			       pixman_image_t * src_image,
 			       pixman_image_t * mask_image,
 			       pixman_image_t * dst_image,
-			       int32_t	xSrc,
-			       int32_t	ySrc,
-			       int32_t      xMask,
-			       int32_t      yMask,
-			       int32_t      xDst,
-			       int32_t      yDst,
+			       int32_t	src_x,
+			       int32_t	src_y,
+			       int32_t      mask_x,
+			       int32_t      mask_y,
+			       int32_t      dest_x,
+			       int32_t      dest_y,
 			       int32_t      width,
 			       int32_t      height)
 {
@@ -216,8 +216,8 @@ arm_composite_over_8888_n_8888 (
     uint32_t component_half = 0x800080;
     uint32_t alpha_mask = 0xff;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
     mask = (mask) >> 24;
@@ -308,12 +308,12 @@ arm_CompositeOver_n_8_8888 (
 			       pixman_image_t * src_image,
 			       pixman_image_t * mask_image,
 			       pixman_image_t * dst_image,
-			       int32_t      xSrc,
-			       int32_t      ySrc,
-			       int32_t      xMask,
-			       int32_t      yMask,
-			       int32_t      xDst,
-			       int32_t      yDst,
+			       int32_t      src_x,
+			       int32_t      src_y,
+			       int32_t      mask_x,
+			       int32_t      mask_y,
+			       int32_t      dest_x,
+			       int32_t      dest_y,
 			       int32_t      width,
 			       int32_t      height)
 {
@@ -336,8 +336,8 @@ arm_CompositeOver_n_8_8888 (
     uint32_t src_hi = (src >> 8) & component_mask;
     uint32_t src_lo = src & component_mask;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     while (height--)
     {
diff --git a/pixman/pixman-fast-path.c b/pixman/pixman-fast-path.c
index ea2d1b2..1bdb323 100644
--- a/pixman/pixman-fast-path.c
+++ b/pixman/pixman-fast-path.c
@@ -105,12 +105,12 @@ fast_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
 			     pixman_image_t * src_image,
 			     pixman_image_t * mask_image,
 			     pixman_image_t * dst_image,
-			     int32_t      xSrc,
-			     int32_t      ySrc,
-			     int32_t      xMask,
-			     int32_t      yMask,
-			     int32_t      xDst,
-			     int32_t      yDst,
+			     int32_t      src_x,
+			     int32_t      src_y,
+			     int32_t      mask_x,
+			     int32_t      mask_y,
+			     int32_t      dest_x,
+			     int32_t      dest_y,
 			     int32_t     width,
 			     int32_t     height)
 {
@@ -122,9 +122,9 @@ fast_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
     uint32_t s, d;
     uint16_t w;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
     while (height--)
     {
@@ -163,12 +163,12 @@ fast_CompositeIn_n_8_8 (pixman_implementation_t *imp,
 			      pixman_image_t    *iSrc,
 			      pixman_image_t    *iMask,
 			      pixman_image_t    *iDst,
-			      int32_t      xSrc,
-			      int32_t      ySrc,
-			      int32_t      xMask,
-			      int32_t      yMask,
-			      int32_t      xDst,
-			      int32_t      yDst,
+			      int32_t      src_x,
+			      int32_t      src_y,
+			      int32_t      mask_x,
+			      int32_t      mask_y,
+			      int32_t      dest_x,
+			      int32_t      dest_y,
 			      int32_t     width,
 			      int32_t     height)
 {
@@ -183,8 +183,8 @@ fast_CompositeIn_n_8_8 (pixman_implementation_t *imp,
 
     srca = src >> 24;
 
-    fbComposeGetStart (iDst, xDst, yDst, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (iMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (iDst, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (iMask, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     if (srca == 0xff) {
 	while (height--)
@@ -245,12 +245,12 @@ fast_CompositeIn_8_8 (pixman_implementation_t *imp,
 		      pixman_image_t  *iSrc,
 		      pixman_image_t  *iMask,
 		      pixman_image_t  *iDst,
-		      int32_t          xSrc,
-		      int32_t          ySrc,
-		      int32_t          xMask,
-		      int32_t          yMask,
-		      int32_t          xDst,
-		      int32_t          yDst,
+		      int32_t          src_x,
+		      int32_t          src_y,
+		      int32_t          mask_x,
+		      int32_t          mask_y,
+		      int32_t          dest_x,
+		      int32_t          dest_y,
 		      int32_t         width,
 		      int32_t         height)
 {
@@ -261,8 +261,8 @@ fast_CompositeIn_8_8 (pixman_implementation_t *imp,
     uint8_t	s;
     uint16_t	t;
 
-    fbComposeGetStart (iSrc, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
-    fbComposeGetStart (iDst, xDst, yDst, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (iSrc, src_x, src_y, uint8_t, srcStride, srcLine, 1);
+    fbComposeGetStart (iDst, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -294,12 +294,12 @@ fast_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
 			       pixman_image_t * src_image,
 			       pixman_image_t * mask_image,
 			       pixman_image_t * dst_image,
-			       int32_t      xSrc,
-			       int32_t      ySrc,
-			       int32_t      xMask,
-			       int32_t      yMask,
-			       int32_t      xDst,
-			       int32_t      yDst,
+			       int32_t      src_x,
+			       int32_t      src_y,
+			       int32_t      mask_x,
+			       int32_t      mask_y,
+			       int32_t      dest_x,
+			       int32_t      dest_y,
 			       int32_t     width,
 			       int32_t     height)
 {
@@ -315,8 +315,8 @@ fast_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
     if (src == 0)
 	return;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     while (height--)
     {
@@ -352,12 +352,12 @@ fast_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
 				   pixman_image_t * src_image,
 				   pixman_image_t * mask_image,
 				   pixman_image_t * dst_image,
-				   int32_t      xSrc,
-				   int32_t      ySrc,
-				   int32_t      xMask,
-				   int32_t      yMask,
-				   int32_t      xDst,
-				   int32_t      yDst,
+				   int32_t      src_x,
+				   int32_t      src_y,
+				   int32_t      mask_x,
+				   int32_t      mask_y,
+				   int32_t      dest_x,
+				   int32_t      dest_y,
 				   int32_t     width,
 				   int32_t     height)
 {
@@ -373,8 +373,8 @@ fast_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
     if (src == 0)
 	return;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, xMask, yMask, uint32_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint32_t, maskStride, maskLine, 1);
 
     while (height--)
     {
@@ -417,12 +417,12 @@ fast_CompositeOver_n_8_0888 (pixman_implementation_t *imp,
 			       pixman_image_t * src_image,
 			       pixman_image_t * mask_image,
 			       pixman_image_t * dst_image,
-			       int32_t      xSrc,
-			       int32_t      ySrc,
-			       int32_t      xMask,
-			       int32_t      yMask,
-			       int32_t      xDst,
-			       int32_t      yDst,
+			       int32_t      src_x,
+			       int32_t      src_y,
+			       int32_t      mask_x,
+			       int32_t      mask_y,
+			       int32_t      dest_x,
+			       int32_t      dest_y,
 			       int32_t     width,
 			       int32_t     height)
 {
@@ -439,8 +439,8 @@ fast_CompositeOver_n_8_0888 (pixman_implementation_t *imp,
     if (src == 0)
 	return;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 3);
-    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 3);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     while (height--)
     {
@@ -480,12 +480,12 @@ fast_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 				  pixman_image_t * src_image,
 				  pixman_image_t * mask_image,
 				  pixman_image_t * dst_image,
-				  int32_t      xSrc,
-				  int32_t      ySrc,
-				  int32_t      xMask,
-				  int32_t      yMask,
-				  int32_t      xDst,
-				  int32_t      yDst,
+				  int32_t      src_x,
+				  int32_t      src_y,
+				  int32_t      mask_x,
+				  int32_t      mask_y,
+				  int32_t      dest_x,
+				  int32_t      dest_y,
 				  int32_t     width,
 				  int32_t     height)
 {
@@ -502,8 +502,8 @@ fast_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
     if (src == 0)
 	return;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     while (height--)
     {
@@ -544,12 +544,12 @@ fast_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 				   pixman_image_t * src_image,
 				   pixman_image_t * mask_image,
 				   pixman_image_t * dst_image,
-				   int32_t      xSrc,
-				   int32_t      ySrc,
-				   int32_t      xMask,
-				   int32_t      yMask,
-				   int32_t      xDst,
-				   int32_t      yDst,
+				   int32_t      src_x,
+				   int32_t      src_y,
+				   int32_t      mask_x,
+				   int32_t      mask_y,
+				   int32_t      dest_x,
+				   int32_t      dest_y,
 				   int32_t     width,
 				   int32_t     height)
 {
@@ -569,8 +569,8 @@ fast_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 
     src16 = CONVERT_8888_TO_0565(src);
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, xMask, yMask, uint32_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint32_t, maskStride, maskLine, 1);
 
     while (height--)
     {
@@ -619,12 +619,12 @@ fast_composite_over_8888_8888 (pixman_implementation_t *imp,
 			 pixman_image_t * src_image,
 			 pixman_image_t * mask_image,
 			 pixman_image_t * dst_image,
-			 int32_t      xSrc,
-			 int32_t      ySrc,
-			 int32_t      xMask,
-			 int32_t      yMask,
-			 int32_t      xDst,
-			 int32_t      yDst,
+			 int32_t      src_x,
+			 int32_t      src_y,
+			 int32_t      mask_x,
+			 int32_t      mask_y,
+			 int32_t      dest_x,
+			 int32_t      dest_y,
 			 int32_t     width,
 			 int32_t     height)
 {
@@ -634,8 +634,8 @@ fast_composite_over_8888_8888 (pixman_implementation_t *imp,
     uint8_t	a;
     uint16_t	w;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
     while (height--)
     {
@@ -664,12 +664,12 @@ fast_CompositeSrc_8888_0888 (pixman_implementation_t *imp,
 			 pixman_image_t * src_image,
 			 pixman_image_t * mask_image,
 			 pixman_image_t * dst_image,
-			 int32_t      xSrc,
-			 int32_t      ySrc,
-			 int32_t      xMask,
-			 int32_t      yMask,
-			 int32_t      xDst,
-			 int32_t      yDst,
+			 int32_t      src_x,
+			 int32_t      src_y,
+			 int32_t      mask_x,
+			 int32_t      mask_y,
+			 int32_t      dest_x,
+			 int32_t      dest_y,
 			 int32_t     width,
 			 int32_t     height)
 {
@@ -680,8 +680,8 @@ fast_CompositeSrc_8888_0888 (pixman_implementation_t *imp,
     int	dstStride, srcStride;
     uint16_t	w;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 3);
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 3);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
     while (height--)
     {
@@ -715,12 +715,12 @@ fast_composite_over_8888_0565 (pixman_implementation_t *imp,
 			 pixman_image_t * src_image,
 			 pixman_image_t * mask_image,
 			 pixman_image_t * dst_image,
-			 int32_t      xSrc,
-			 int32_t      ySrc,
-			 int32_t      xMask,
-			 int32_t      yMask,
-			 int32_t      xDst,
-			 int32_t      yDst,
+			 int32_t      src_x,
+			 int32_t      src_y,
+			 int32_t      mask_x,
+			 int32_t      mask_y,
+			 int32_t      dest_x,
+			 int32_t      dest_y,
 			 int32_t     width,
 			 int32_t     height)
 {
@@ -731,8 +731,8 @@ fast_composite_over_8888_0565 (pixman_implementation_t *imp,
     int	dstStride, srcStride;
     uint16_t	w;
 
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
-    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -768,12 +768,12 @@ fast_CompositeSrc_x888_0565 (pixman_implementation_t *imp,
                           pixman_image_t * src_image,
                           pixman_image_t * mask_image,
                           pixman_image_t * dst_image,
-                          int32_t      xSrc,
-                          int32_t      ySrc,
-                          int32_t      xMask,
-                          int32_t      yMask,
-                          int32_t      xDst,
-                          int32_t      yDst,
+                          int32_t      src_x,
+                          int32_t      src_y,
+                          int32_t      mask_x,
+                          int32_t      mask_y,
+                          int32_t      dest_x,
+                          int32_t      dest_y,
                           int32_t     width,
                           int32_t     height)
 {
@@ -782,8 +782,8 @@ fast_CompositeSrc_x888_0565 (pixman_implementation_t *imp,
     int	dstStride, srcStride;
     uint16_t	w;
 
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
-    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -808,12 +808,12 @@ fast_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
 			     pixman_image_t * src_image,
 			     pixman_image_t * mask_image,
 			     pixman_image_t * dst_image,
-			     int32_t      xSrc,
-			     int32_t      ySrc,
-			     int32_t      xMask,
-			     int32_t      yMask,
-			     int32_t      xDst,
-			     int32_t      yDst,
+			     int32_t      src_x,
+			     int32_t      src_y,
+			     int32_t      mask_x,
+			     int32_t      mask_y,
+			     int32_t      dest_x,
+			     int32_t      dest_y,
 			     int32_t     width,
 			     int32_t     height)
 {
@@ -824,8 +824,8 @@ fast_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
     uint8_t	s, d;
     uint16_t	t;
 
-    fbComposeGetStart (src_image, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
-    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -859,12 +859,12 @@ fast_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
 			     pixman_image_t * src_image,
 			     pixman_image_t * mask_image,
 			     pixman_image_t * dst_image,
-			     int32_t      xSrc,
-			     int32_t      ySrc,
-			     int32_t      xMask,
-			     int32_t      yMask,
-			     int32_t      xDst,
-			     int32_t      yDst,
+			     int32_t      src_x,
+			     int32_t      src_y,
+			     int32_t      mask_x,
+			     int32_t      mask_y,
+			     int32_t      dest_x,
+			     int32_t      dest_y,
 			     int32_t     width,
 			     int32_t     height)
 {
@@ -874,8 +874,8 @@ fast_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
     uint16_t	w;
     uint32_t	s, d;
 
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -909,12 +909,12 @@ fast_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
 			    pixman_image_t * src_image,
 			    pixman_image_t * mask_image,
 			    pixman_image_t * dst_image,
-			    int32_t      xSrc,
-			    int32_t      ySrc,
-			    int32_t      xMask,
-			    int32_t      yMask,
-			    int32_t      xDst,
-			    int32_t      yDst,
+			    int32_t      src_x,
+			    int32_t      src_y,
+			    int32_t      mask_x,
+			    int32_t      mask_y,
+			    int32_t      dest_x,
+			    int32_t      dest_y,
 			    int32_t     width,
 			    int32_t     height)
 {
@@ -925,8 +925,8 @@ fast_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
     uint32_t	src;
     uint8_t	sa;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
     src = _pixman_image_get_solid (src_image, dst_image->bits.format);
     sa = (src >> 24);
 
@@ -966,12 +966,12 @@ fast_CompositeSolidFill (pixman_implementation_t *imp,
 		      pixman_image_t * src_image,
 		      pixman_image_t * mask_image,
 		      pixman_image_t * dst_image,
-		      int32_t      xSrc,
-		      int32_t      ySrc,
-		      int32_t      xMask,
-		      int32_t      yMask,
-		      int32_t      xDst,
-		      int32_t      yDst,
+		      int32_t      src_x,
+		      int32_t      src_y,
+		      int32_t      mask_x,
+		      int32_t      mask_y,
+		      int32_t      dest_x,
+		      int32_t      dest_y,
 		      int32_t     width,
 		      int32_t     height)
 {
@@ -987,7 +987,7 @@ fast_CompositeSolidFill (pixman_implementation_t *imp,
 
     pixman_fill (dst_image->bits.bits, dst_image->bits.rowstride,
 		 PIXMAN_FORMAT_BPP (dst_image->bits.format),
-		 xDst, yDst,
+		 dest_x, dest_y,
 		 width, height,
 		 src);
 }
@@ -998,12 +998,12 @@ fast_CompositeSrc_8888_x888 (pixman_implementation_t *imp,
 			  pixman_image_t * src_image,
 			  pixman_image_t * mask_image,
 			  pixman_image_t * dst_image,
-			  int32_t      xSrc,
-			  int32_t      ySrc,
-			  int32_t      xMask,
-			  int32_t      yMask,
-			  int32_t      xDst,
-			  int32_t      yDst,
+			  int32_t      src_x,
+			  int32_t      src_y,
+			  int32_t      mask_x,
+			  int32_t      mask_y,
+			  int32_t      dest_x,
+			  int32_t      dest_y,
 			  int32_t     width,
 			  int32_t     height)
 {
@@ -1012,8 +1012,8 @@ fast_CompositeSrc_8888_x888 (pixman_implementation_t *imp,
     int		 dstStride, srcStride;
     uint32_t	 n_bytes = width * sizeof (uint32_t);
 
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, src, 1);
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dst, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, src, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dst, 1);
 
     while (height--)
     {
@@ -1079,12 +1079,12 @@ fast_CompositeSrcScaleNearest (pixman_implementation_t *imp,
 			    pixman_image_t *src_image,
 			    pixman_image_t *mask_image,
 			    pixman_image_t *dst_image,
-			    int32_t         xSrc,
-			    int32_t         ySrc,
-			    int32_t         xMask,
-			    int32_t         yMask,
-			    int32_t         xDst,
-			    int32_t         yDst,
+			    int32_t         src_x,
+			    int32_t         src_y,
+			    int32_t         mask_x,
+			    int32_t         mask_y,
+			    int32_t         dest_x,
+			    int32_t         dest_y,
 			    int32_t        width,
 			    int32_t        height)
 {
@@ -1094,14 +1094,14 @@ fast_CompositeSrcScaleNearest (pixman_implementation_t *imp,
     int             i, j;
     pixman_vector_t v;
     
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dst, 1);
-    /* pass in 0 instead of xSrc and ySrc because xSrc and ySrc need to be
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dst, 1);
+    /* pass in 0 instead of src_x and src_y because src_x and src_y need to be
      * transformed from destination space to source space */
     fbComposeGetStart (src_image, 0, 0, uint32_t, srcStride, src, 1);
     
     /* reference point is the center of the pixel */
-    v.vector[0] = pixman_int_to_fixed(xSrc) + pixman_fixed_1 / 2;
-    v.vector[1] = pixman_int_to_fixed(ySrc) + pixman_fixed_1 / 2;
+    v.vector[0] = pixman_int_to_fixed(src_x) + pixman_fixed_1 / 2;
+    v.vector[1] = pixman_int_to_fixed(src_y) + pixman_fixed_1 / 2;
     v.vector[2] = pixman_fixed_1;
     
     if (!pixman_transform_point_3d (src_image->common.transform, &v))
diff --git a/pixman/pixman-mmx.c b/pixman/pixman-mmx.c
index fd688e9..2cf1713 100644
--- a/pixman/pixman-mmx.c
+++ b/pixman/pixman-mmx.c
@@ -924,12 +924,12 @@ mmx_CompositeOver_n_8888 (pixman_implementation_t *imp,
 			    pixman_image_t * src_image,
 			    pixman_image_t * mask_image,
 			    pixman_image_t * dst_image,
-			    int32_t	xSrc,
-			    int32_t	ySrc,
-			    int32_t	xMask,
-			    int32_t	yMask,
-			    int32_t	xDst,
-			    int32_t	yDst,
+			    int32_t	src_x,
+			    int32_t	src_y,
+			    int32_t	mask_x,
+			    int32_t	mask_y,
+			    int32_t	dest_x,
+			    int32_t	dest_y,
 			    int32_t	width,
 			    int32_t	height)
 {
@@ -946,7 +946,7 @@ mmx_CompositeOver_n_8888 (pixman_implementation_t *imp,
     if (src >> 24 == 0)
 	return;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
 
     vsrc = load8888 (src);
     vsrca = expand_alpha (vsrc);
@@ -1003,12 +1003,12 @@ mmx_CompositeOver_n_0565 (pixman_implementation_t *imp,
 			    pixman_image_t * src_image,
 			    pixman_image_t * mask_image,
 			    pixman_image_t * dst_image,
-			    int32_t	xSrc,
-			    int32_t	ySrc,
-			    int32_t	xMask,
-			    int32_t	yMask,
-			    int32_t	xDst,
-			    int32_t	yDst,
+			    int32_t	src_x,
+			    int32_t	src_y,
+			    int32_t	mask_x,
+			    int32_t	mask_y,
+			    int32_t	dest_x,
+			    int32_t	dest_y,
 			    int32_t	width,
 			    int32_t	height)
 {
@@ -1025,7 +1025,7 @@ mmx_CompositeOver_n_0565 (pixman_implementation_t *imp,
     if (src >> 24 == 0)
 	return;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
 
     vsrc = load8888 (src);
     vsrca = expand_alpha (vsrc);
@@ -1089,12 +1089,12 @@ mmx_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
 				      pixman_image_t * src_image,
 				      pixman_image_t * mask_image,
 				      pixman_image_t * dst_image,
-				      int32_t	xSrc,
-				      int32_t	ySrc,
-				      int32_t	xMask,
-				      int32_t	yMask,
-				      int32_t	xDst,
-				      int32_t	yDst,
+				      int32_t	src_x,
+				      int32_t	src_y,
+				      int32_t	mask_x,
+				      int32_t	mask_y,
+				      int32_t	dest_x,
+				      int32_t	dest_y,
 				      int32_t	width,
 				      int32_t	height)
 {
@@ -1112,8 +1112,8 @@ mmx_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
     if (srca == 0)
 	return;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, xMask, yMask, uint32_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint32_t, maskStride, maskLine, 1);
 
     vsrc = load8888(src);
     vsrca = expand_alpha(vsrc);
@@ -1193,12 +1193,12 @@ mmx_composite_over_8888_n_8888 (pixman_implementation_t *imp,
 			       pixman_image_t * src_image,
 			       pixman_image_t * mask_image,
 			       pixman_image_t * dst_image,
-			       int32_t	xSrc,
-			       int32_t	ySrc,
-			       int32_t      xMask,
-			       int32_t      yMask,
-			       int32_t      xDst,
-			       int32_t      yDst,
+			       int32_t	src_x,
+			       int32_t	src_y,
+			       int32_t      mask_x,
+			       int32_t      mask_y,
+			       int32_t      dest_x,
+			       int32_t      dest_y,
 			       int32_t     width,
 			       int32_t     height)
 {
@@ -1212,8 +1212,8 @@ mmx_composite_over_8888_n_8888 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
     mask = mask | mask >> 8 | mask >> 16 | mask >> 24;
@@ -1278,12 +1278,12 @@ mmx_Composite_over_x888_n_8888 (pixman_implementation_t *imp,
 			       pixman_image_t * src_image,
 			       pixman_image_t * mask_image,
 			       pixman_image_t * dst_image,
-			       int32_t	xSrc,
-			       int32_t	ySrc,
-			       int32_t      xMask,
-			       int32_t      yMask,
-			       int32_t      xDst,
-			       int32_t      yDst,
+			       int32_t	src_x,
+			       int32_t	src_y,
+			       int32_t      mask_x,
+			       int32_t      mask_y,
+			       int32_t      dest_x,
+			       int32_t      dest_y,
 			       int32_t     width,
 			       int32_t     height)
 {
@@ -1297,8 +1297,8 @@ mmx_Composite_over_x888_n_8888 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
 
     mask = mask | mask >> 8 | mask >> 16 | mask >> 24;
@@ -1413,12 +1413,12 @@ mmx_composite_over_8888_8888 (pixman_implementation_t *imp,
 			     pixman_image_t * src_image,
 			     pixman_image_t * mask_image,
 			     pixman_image_t * dst_image,
-			     int32_t	xSrc,
-			     int32_t	ySrc,
-			     int32_t      xMask,
-			     int32_t      yMask,
-			     int32_t      xDst,
-			     int32_t      yDst,
+			     int32_t	src_x,
+			     int32_t	src_y,
+			     int32_t      mask_x,
+			     int32_t      mask_y,
+			     int32_t      dest_x,
+			     int32_t      dest_y,
 			     int32_t     width,
 			     int32_t     height)
 {
@@ -1431,8 +1431,8 @@ mmx_composite_over_8888_8888 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
     while (height--)
     {
@@ -1466,12 +1466,12 @@ mmx_composite_over_8888_0565 (pixman_implementation_t *imp,
 			     pixman_image_t * src_image,
 			     pixman_image_t * mask_image,
 			     pixman_image_t * dst_image,
-			     int32_t      xSrc,
-			     int32_t      ySrc,
-			     int32_t      xMask,
-			     int32_t      yMask,
-			     int32_t      xDst,
-			     int32_t      yDst,
+			     int32_t      src_x,
+			     int32_t      src_y,
+			     int32_t      mask_x,
+			     int32_t      mask_y,
+			     int32_t      dest_x,
+			     int32_t      dest_y,
 			     int32_t     width,
 			     int32_t     height)
 {
@@ -1482,8 +1482,8 @@ mmx_composite_over_8888_0565 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
 #if 0
     /* FIXME */
@@ -1568,12 +1568,12 @@ mmx_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
 				  pixman_image_t * src_image,
 				  pixman_image_t * mask_image,
 				  pixman_image_t * dst_image,
-				  int32_t      xSrc,
-				  int32_t      ySrc,
-				  int32_t      xMask,
-				  int32_t      yMask,
-				  int32_t      xDst,
-				  int32_t      yDst,
+				  int32_t      src_x,
+				  int32_t      src_y,
+				  int32_t      mask_x,
+				  int32_t      mask_y,
+				  int32_t      dest_x,
+				  int32_t      dest_y,
 				  int32_t     width,
 				  int32_t     height)
 {
@@ -1595,8 +1595,8 @@ mmx_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
 
     srcsrc = (uint64_t)src << 32 | src;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     vsrc = load8888 (src);
     vsrca = expand_alpha (vsrc);
@@ -1840,12 +1840,12 @@ mmx_CompositeSrc_n_8_8888 (pixman_implementation_t *imp,
 				     pixman_image_t * src_image,
 				     pixman_image_t * mask_image,
 				     pixman_image_t * dst_image,
-				     int32_t      xSrc,
-				     int32_t      ySrc,
-				     int32_t      xMask,
-				     int32_t      yMask,
-				     int32_t      xDst,
-				     int32_t      yDst,
+				     int32_t      src_x,
+				     int32_t      src_y,
+				     int32_t      mask_x,
+				     int32_t      mask_y,
+				     int32_t      dest_x,
+				     int32_t      dest_y,
 				     int32_t     width,
 				     int32_t     height)
 {
@@ -1865,14 +1865,14 @@ mmx_CompositeSrc_n_8_8888 (pixman_implementation_t *imp,
     if (srca == 0)
     {
 	pixman_fill_mmx (dst_image->bits.bits, dst_image->bits.rowstride, PIXMAN_FORMAT_BPP (dst_image->bits.format),
-			 xDst, yDst, width, height, 0);
+			 dest_x, dest_y, width, height, 0);
 	return;
     }
 
     srcsrc = (uint64_t)src << 32 | src;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     vsrc = load8888 (src);
     vsrca = expand_alpha (vsrc);
@@ -1972,12 +1972,12 @@ mmx_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 				  pixman_image_t * src_image,
 				  pixman_image_t * mask_image,
 				  pixman_image_t * dst_image,
-				  int32_t      xSrc,
-				  int32_t      ySrc,
-				  int32_t      xMask,
-				  int32_t      yMask,
-				  int32_t      xDst,
-				  int32_t      yDst,
+				  int32_t      src_x,
+				  int32_t      src_y,
+				  int32_t      mask_x,
+				  int32_t      mask_y,
+				  int32_t      dest_x,
+				  int32_t      dest_y,
 				  int32_t     width,
 				  int32_t     height)
 {
@@ -1997,8 +1997,8 @@ mmx_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
     if (srca == 0)
 	return;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     vsrc = load8888 (src);
     vsrca = expand_alpha (vsrc);
@@ -2105,12 +2105,12 @@ mmx_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 				  pixman_image_t * src_image,
 				  pixman_image_t * mask_image,
 				  pixman_image_t * dst_image,
-				  int32_t      xSrc,
-				  int32_t      ySrc,
-				  int32_t      xMask,
-				  int32_t      yMask,
-				  int32_t      xDst,
-				  int32_t      yDst,
+				  int32_t      src_x,
+				  int32_t      src_y,
+				  int32_t      mask_x,
+				  int32_t      mask_y,
+				  int32_t      dest_x,
+				  int32_t      dest_y,
 				  int32_t     width,
 				  int32_t     height)
 {
@@ -2121,8 +2121,8 @@ mmx_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
 #if 0
     /* FIXME */
@@ -2227,12 +2227,12 @@ mmx_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
 				  pixman_image_t * src_image,
 				  pixman_image_t * mask_image,
 				  pixman_image_t * dst_image,
-				  int32_t      xSrc,
-				  int32_t      ySrc,
-				  int32_t      xMask,
-				  int32_t      yMask,
-				  int32_t      xDst,
-				  int32_t      yDst,
+				  int32_t      src_x,
+				  int32_t      src_y,
+				  int32_t      mask_x,
+				  int32_t      mask_y,
+				  int32_t      dest_x,
+				  int32_t      dest_y,
 				  int32_t     width,
 				  int32_t     height)
 {
@@ -2243,8 +2243,8 @@ mmx_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
 #if 0
     /* FIXME */
@@ -2327,12 +2327,12 @@ mmx_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 				      pixman_image_t * src_image,
 				      pixman_image_t * mask_image,
 				      pixman_image_t * dst_image,
-				      int32_t      xSrc,
-				      int32_t      ySrc,
-				      int32_t      xMask,
-				      int32_t      yMask,
-				      int32_t      xDst,
-				      int32_t      yDst,
+				      int32_t      src_x,
+				      int32_t      src_y,
+				      int32_t      mask_x,
+				      int32_t      mask_y,
+				      int32_t      dest_x,
+				      int32_t      dest_y,
 				      int32_t     width,
 				      int32_t     height)
 {
@@ -2350,8 +2350,8 @@ mmx_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
     if (srca == 0)
 	return;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, xMask, yMask, uint32_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint32_t, maskStride, maskLine, 1);
 
     vsrc = load8888 (src);
     vsrca = expand_alpha (vsrc);
@@ -2435,12 +2435,12 @@ mmx_CompositeIn_n_8_8 (pixman_implementation_t *imp,
 			pixman_image_t * src_image,
 			pixman_image_t * mask_image,
 			pixman_image_t * dst_image,
-			int32_t      xSrc,
-			int32_t      ySrc,
-			int32_t      xMask,
-			int32_t      yMask,
-			int32_t      xDst,
-			int32_t      yDst,
+			int32_t      src_x,
+			int32_t      src_y,
+			int32_t      mask_x,
+			int32_t      mask_y,
+			int32_t      dest_x,
+			int32_t      dest_y,
 			int32_t     width,
 			int32_t     height)
 {
@@ -2452,8 +2452,8 @@ mmx_CompositeIn_n_8_8 (pixman_implementation_t *imp,
     uint8_t	sa;
     __m64	vsrc, vsrca;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
@@ -2519,12 +2519,12 @@ mmx_CompositeIn_8_8 (pixman_implementation_t *imp,
 		      pixman_image_t * src_image,
 		      pixman_image_t * mask_image,
 		      pixman_image_t * dst_image,
-		      int32_t      xSrc,
-		      int32_t      ySrc,
-		      int32_t      xMask,
-		      int32_t      yMask,
-		      int32_t      xDst,
-		      int32_t      yDst,
+		      int32_t      src_x,
+		      int32_t      src_y,
+		      int32_t      mask_x,
+		      int32_t      mask_y,
+		      int32_t      dest_x,
+		      int32_t      dest_y,
 		      int32_t     width,
 		      int32_t     height)
 {
@@ -2533,8 +2533,8 @@ mmx_CompositeIn_8_8 (pixman_implementation_t *imp,
     int	srcStride, dstStride;
     uint16_t	w;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
 
     while (height--)
     {
@@ -2584,12 +2584,12 @@ mmx_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
 			       pixman_image_t * src_image,
 			       pixman_image_t * mask_image,
 			       pixman_image_t * dst_image,
-			       int32_t      xSrc,
-			       int32_t      ySrc,
-			       int32_t      xMask,
-			       int32_t      yMask,
-			       int32_t      xDst,
-			       int32_t      yDst,
+			       int32_t      src_x,
+			       int32_t      src_y,
+			       int32_t      mask_x,
+			       int32_t      mask_y,
+			       int32_t      dest_x,
+			       int32_t      dest_y,
 			       int32_t     width,
 			       int32_t     height)
 {
@@ -2601,8 +2601,8 @@ mmx_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
     uint8_t	sa;
     __m64	vsrc, vsrca;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
@@ -2663,12 +2663,12 @@ mmx_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
 				pixman_image_t * src_image,
 				pixman_image_t * mask_image,
 				pixman_image_t * dst_image,
-				int32_t      xSrc,
-				int32_t      ySrc,
-				int32_t      xMask,
-				int32_t      yMask,
-				int32_t      xDst,
-				int32_t      yDst,
+				int32_t      src_x,
+				int32_t      src_y,
+				int32_t      mask_x,
+				int32_t      mask_y,
+				int32_t      dest_x,
+				int32_t      dest_y,
 				int32_t     width,
 				int32_t     height)
 {
@@ -2681,8 +2681,8 @@ mmx_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    fbComposeGetStart (src_image, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
-    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -2736,12 +2736,12 @@ mmx_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
 				pixman_image_t *	src_image,
 				pixman_image_t *	mask_image,
 				pixman_image_t *	 dst_image,
-				int32_t		 xSrc,
-				int32_t      ySrc,
-				int32_t      xMask,
-				int32_t      yMask,
-				int32_t      xDst,
-				int32_t      yDst,
+				int32_t		 src_x,
+				int32_t      src_y,
+				int32_t      mask_x,
+				int32_t      mask_y,
+				int32_t      dest_x,
+				int32_t      dest_y,
 				int32_t     width,
 				int32_t     height)
 {
@@ -2753,8 +2753,8 @@ mmx_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -2935,12 +2935,12 @@ mmx_CompositeCopyArea (pixman_implementation_t *imp,
 			pixman_image_t *	src_image,
 			pixman_image_t *	mask_image,
 			pixman_image_t *	dst_image,
-			int32_t		xSrc,
-			int32_t		ySrc,
-			int32_t		xMask,
-			int32_t		yMask,
-			int32_t		xDst,
-			int32_t		yDst,
+			int32_t		src_x,
+			int32_t		src_y,
+			int32_t		mask_x,
+			int32_t		mask_y,
+			int32_t		dest_x,
+			int32_t		dest_y,
 			int32_t		width,
 			int32_t		height)
 {
@@ -2950,7 +2950,7 @@ mmx_CompositeCopyArea (pixman_implementation_t *imp,
 		    dst_image->bits.rowstride,
 		    PIXMAN_FORMAT_BPP (src_image->bits.format),
 		    PIXMAN_FORMAT_BPP (dst_image->bits.format),
-		    xSrc, ySrc, xDst, yDst, width, height);
+		    src_x, src_y, dest_x, dest_y, width, height);
 }
 
 static void
@@ -2959,12 +2959,12 @@ mmx_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
 				pixman_image_t * src_image,
 				pixman_image_t * mask_image,
 				pixman_image_t * dst_image,
-				int32_t      xSrc,
-				int32_t      ySrc,
-				int32_t      xMask,
-				int32_t      yMask,
-				int32_t      xDst,
-				int32_t      yDst,
+				int32_t      src_x,
+				int32_t      src_y,
+				int32_t      mask_x,
+				int32_t      mask_y,
+				int32_t      dest_x,
+				int32_t      dest_y,
 				int32_t     width,
 				int32_t     height)
 {
@@ -2974,9 +2974,9 @@ mmx_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
     int		 srcStride, maskStride, dstStride;
     uint16_t w;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
     while (height--)
     {
diff --git a/pixman/pixman-private.h b/pixman/pixman-private.h
index 5339881..1c238df 100644
--- a/pixman/pixman-private.h
+++ b/pixman/pixman-private.h
@@ -565,12 +565,12 @@ _pixman_walk_composite_region (pixman_implementation_t *imp,
 			       pixman_image_t * src_image,
 			       pixman_image_t * mask_image,
 			       pixman_image_t * dst_image,
-			       int16_t xSrc,
-			       int16_t ySrc,
-			       int16_t xMask,
-			       int16_t yMask,
-			       int16_t xDst,
-			       int16_t yDst,
+			       int16_t src_x,
+			       int16_t src_y,
+			       int16_t mask_x,
+			       int16_t mask_y,
+			       int16_t dest_x,
+			       int16_t dest_y,
 			       uint16_t width,
 			       uint16_t height,
 			       pixman_composite_func_t compositeRect);
diff --git a/pixman/pixman-sse2.c b/pixman/pixman-sse2.c
index 38e3011..4508471 100644
--- a/pixman/pixman-sse2.c
+++ b/pixman/pixman-sse2.c
@@ -42,13 +42,13 @@
  * Locals
  */
 
-static __m64 xMask0080;
-static __m64 xMask00ff;
-static __m64 xMask0101;
-static __m64 xMaskAlpha;
+static __m64 mask_x0080;
+static __m64 mask_x00ff;
+static __m64 mask_x0101;
+static __m64 mask_xAlpha;
 
-static __m64 xMask565rgb;
-static __m64 xMask565Unpack;
+static __m64 mask_x565rgb;
+static __m64 mask_x565Unpack;
 
 static __m128i Mask0080;
 static __m128i Mask00ff;
@@ -380,23 +380,23 @@ static force_inline __m64
 pixMultiply_1x64 (__m64 data, __m64 alpha)
 {
     return _mm_mulhi_pu16 (_mm_adds_pu16 (_mm_mullo_pi16 (data, alpha),
-                                          xMask0080),
-                           xMask0101);
+                                          mask_x0080),
+                           mask_x0101);
 }
 
 static force_inline __m64
 pixAddMultiply_1x64 (__m64* src, __m64* alphaDst, __m64* dst, __m64* alphaSrc)
 {
     return _mm_mulhi_pu16 (_mm_adds_pu16 (_mm_adds_pu16 (_mm_mullo_pi16 (*src, *alphaDst),
-                                                         xMask0080),
+                                                         mask_x0080),
                                           _mm_mullo_pi16 (*dst, *alphaSrc)),
-                           xMask0101);
+                           mask_x0101);
 }
 
 static force_inline __m64
 negate_1x64 (__m64 data)
 {
-    return _mm_xor_si64 (data, xMask00ff);
+    return _mm_xor_si64 (data, mask_x00ff);
 }
 
 static force_inline __m64
@@ -425,7 +425,7 @@ overRevNonPre_1x64 (__m64 src, __m64 dst)
     __m64 alpha = expandAlpha_1x64 (src);
 
     return over_1x64 (pixMultiply_1x64 (invertColors_1x64 (src),
-                                        _mm_or_si64 (alpha, xMaskAlpha)),
+                                        _mm_or_si64 (alpha, mask_xAlpha)),
                       alpha,
                       dst);
 }
@@ -463,8 +463,8 @@ expand565_16_1x64 (uint16_t pixel)
 
     p = _mm_or_si64 (t1, p);
     p = _mm_or_si64 (t2, p);
-    p = _mm_and_si64 (p, xMask565rgb);
-    p = _mm_mullo_pi16 (p, xMask565Unpack);
+    p = _mm_and_si64 (p, mask_x565rgb);
+    p = _mm_mullo_pi16 (p, mask_x565Unpack);
 
     return _mm_srli_pi16 (p, 8);
 }
@@ -2502,12 +2502,12 @@ sse2_CompositeOver_n_8888 (pixman_implementation_t *imp,
 			    pixman_image_t * src_image,
 			    pixman_image_t * mask_image,
 			    pixman_image_t * dst_image,
-			    int32_t	xSrc,
-			    int32_t	ySrc,
-			    int32_t	xMask,
-			    int32_t	yMask,
-			    int32_t	xDst,
-			    int32_t	yDst,
+			    int32_t	src_x,
+			    int32_t	src_y,
+			    int32_t	mask_x,
+			    int32_t	mask_y,
+			    int32_t	dest_x,
+			    int32_t	dest_y,
 			    int32_t	width,
 			    int32_t	height)
 {
@@ -2523,7 +2523,7 @@ sse2_CompositeOver_n_8888 (pixman_implementation_t *imp,
     if (src == 0)
 	return;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
 
     xmmSrc = expandPixel_32_1x128 (src);
     xmmAlpha = expandAlpha_1x128 (xmmSrc);
@@ -2589,12 +2589,12 @@ sse2_CompositeOver_n_0565 (pixman_implementation_t *imp,
 			    pixman_image_t * src_image,
 			    pixman_image_t * mask_image,
 			    pixman_image_t * dst_image,
-			    int32_t	xSrc,
-			    int32_t	ySrc,
-			    int32_t	xMask,
-			    int32_t	yMask,
-			    int32_t	xDst,
-			    int32_t	yDst,
+			    int32_t	src_x,
+			    int32_t	src_y,
+			    int32_t	mask_x,
+			    int32_t	mask_y,
+			    int32_t	dest_x,
+			    int32_t	dest_y,
 			    int32_t	width,
 			    int32_t	height)
 {
@@ -2610,7 +2610,7 @@ sse2_CompositeOver_n_0565 (pixman_implementation_t *imp,
     if (src == 0)
         return;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
 
     xmmSrc = expandPixel_32_1x128 (src);
     xmmAlpha = expandAlpha_1x128 (xmmSrc);
@@ -2679,12 +2679,12 @@ sse2_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
 				      pixman_image_t * src_image,
 				      pixman_image_t * mask_image,
 				      pixman_image_t * dst_image,
-				      int32_t	xSrc,
-				      int32_t	ySrc,
-				      int32_t	xMask,
-				      int32_t	yMask,
-				      int32_t	xDst,
-				      int32_t	yDst,
+				      int32_t	src_x,
+				      int32_t	src_y,
+				      int32_t	mask_x,
+				      int32_t	mask_y,
+				      int32_t	dest_x,
+				      int32_t	dest_y,
 				      int32_t	width,
 				      int32_t	height)
 {
@@ -2698,19 +2698,19 @@ sse2_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
     __m128i xmmDst, xmmDstLo, xmmDstHi;
     __m128i xmmMask, xmmMaskLo, xmmMaskHi;
 
-    __m64 mmxSrc, mmxAlpha, mmxMask, mmxDst;
+    __m64 mmsrc_x, mmxAlpha, mmmask_x, mmdest_x;
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     if (src == 0)
 	return;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, xMask, yMask, uint32_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint32_t, maskStride, maskLine, 1);
 
     xmmSrc = _mm_unpacklo_epi8 (createMask_2x32_128 (src, src), _mm_setzero_si128 ());
     xmmAlpha = expandAlpha_1x128 (xmmSrc);
-    mmxSrc   = _mm_movepi64_pi64 (xmmSrc);
+    mmsrc_x   = _mm_movepi64_pi64 (xmmSrc);
     mmxAlpha = _mm_movepi64_pi64 (xmmAlpha);
 
     while (height--)
@@ -2733,13 +2733,13 @@ sse2_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
             if (m)
             {
                 d = *pd;
-                mmxMask = unpack_32_1x64 (m);
-                mmxDst = unpack_32_1x64 (d);
+                mmmask_x = unpack_32_1x64 (m);
+                mmdest_x = unpack_32_1x64 (d);
 
-                *pd = pack_1x64_32 (inOver_1x64 (&mmxSrc,
+                *pd = pack_1x64_32 (inOver_1x64 (&mmsrc_x,
                                                  &mmxAlpha,
-                                                 &mmxMask,
-                                                 &mmxDst));
+                                                 &mmmask_x,
+                                                 &mmdest_x));
             }
 
             pd++;
@@ -2785,13 +2785,13 @@ sse2_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
             if (m)
             {
                 d = *pd;
-                mmxMask = unpack_32_1x64 (m);
-                mmxDst = unpack_32_1x64 (d);
+                mmmask_x = unpack_32_1x64 (m);
+                mmdest_x = unpack_32_1x64 (d);
 
-                *pd = pack_1x64_32 (inOver_1x64 (&mmxSrc,
+                *pd = pack_1x64_32 (inOver_1x64 (&mmsrc_x,
                                                  &mmxAlpha,
-                                                 &mmxMask,
-                                                 &mmxDst));
+                                                 &mmmask_x,
+                                                 &mmdest_x));
             }
 
             pd++;
@@ -2813,12 +2813,12 @@ sse2_composite_over_8888_n_8888 (pixman_implementation_t *imp,
 			       pixman_image_t * src_image,
 			       pixman_image_t * mask_image,
 			       pixman_image_t * dst_image,
-			       int32_t	xSrc,
-			       int32_t	ySrc,
-			       int32_t      xMask,
-			       int32_t      yMask,
-			       int32_t      xDst,
-			       int32_t      yDst,
+			       int32_t	src_x,
+			       int32_t	src_y,
+			       int32_t      mask_x,
+			       int32_t      mask_y,
+			       int32_t      dest_x,
+			       int32_t      dest_y,
 			       int32_t     width,
 			       int32_t     height)
 {
@@ -2833,8 +2833,8 @@ sse2_composite_over_8888_n_8888 (pixman_implementation_t *imp,
     __m128i xmmDst, xmmDstLo, xmmDstHi;
     __m128i xmmAlphaLo, xmmAlphaHi;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
 
     xmmMask = createMask_16_128 (mask >> 24);
@@ -2926,12 +2926,12 @@ sse2_Composite_over_x888_n_8888 (pixman_implementation_t *imp,
 			       pixman_image_t * src_image,
 			       pixman_image_t * mask_image,
 			       pixman_image_t * dst_image,
-			       int32_t	xSrc,
-			       int32_t	ySrc,
-			       int32_t      xMask,
-			       int32_t      yMask,
-			       int32_t      xDst,
-			       int32_t      yDst,
+			       int32_t	src_x,
+			       int32_t	src_y,
+			       int32_t      mask_x,
+			       int32_t      mask_y,
+			       int32_t      dest_x,
+			       int32_t      dest_y,
 			       int32_t     width,
 			       int32_t     height)
 {
@@ -2945,8 +2945,8 @@ sse2_Composite_over_x888_n_8888 (pixman_implementation_t *imp,
     __m128i xmmSrc, xmmSrcLo, xmmSrcHi;
     __m128i xmmDst, xmmDstLo, xmmDstHi;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
     mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
 
     xmmMask = createMask_16_128 (mask >> 24);
@@ -3039,12 +3039,12 @@ sse2_composite_over_8888_8888 (pixman_implementation_t *imp,
 			     pixman_image_t * src_image,
 			     pixman_image_t * mask_image,
 			     pixman_image_t * dst_image,
-			     int32_t	xSrc,
-			     int32_t	ySrc,
-			     int32_t      xMask,
-			     int32_t      yMask,
-			     int32_t      xDst,
-			     int32_t      yDst,
+			     int32_t	src_x,
+			     int32_t	src_y,
+			     int32_t      mask_x,
+			     int32_t      mask_y,
+			     int32_t      dest_x,
+			     int32_t      dest_y,
 			     int32_t     width,
 			     int32_t     height)
 {
@@ -3052,8 +3052,8 @@ sse2_composite_over_8888_8888 (pixman_implementation_t *imp,
     uint32_t	*dstLine, *dst;
     uint32_t	*srcLine, *src;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
     dst = dstLine;
     src = srcLine;
@@ -3088,12 +3088,12 @@ sse2_composite_over_8888_0565 (pixman_implementation_t *imp,
 			     pixman_image_t * src_image,
 			     pixman_image_t * mask_image,
 			     pixman_image_t * dst_image,
-			     int32_t      xSrc,
-			     int32_t      ySrc,
-			     int32_t      xMask,
-			     int32_t      yMask,
-			     int32_t      xDst,
-			     int32_t      yDst,
+			     int32_t      src_x,
+			     int32_t      src_y,
+			     int32_t      mask_x,
+			     int32_t      mask_y,
+			     int32_t      dest_x,
+			     int32_t      dest_y,
 			     int32_t     width,
 			     int32_t     height)
 {
@@ -3106,8 +3106,8 @@ sse2_composite_over_8888_0565 (pixman_implementation_t *imp,
     __m128i xmmSrc, xmmSrcLo, xmmSrcHi;
     __m128i xmmDst, xmmDst0, xmmDst1, xmmDst2, xmmDst3;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
 #if 0
     /* FIXME
@@ -3202,12 +3202,12 @@ sse2_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
 				  pixman_image_t * src_image,
 				  pixman_image_t * mask_image,
 				  pixman_image_t * dst_image,
-				  int32_t      xSrc,
-				  int32_t      ySrc,
-				  int32_t      xMask,
-				  int32_t      yMask,
-				  int32_t      xDst,
-				  int32_t      yDst,
+				  int32_t      src_x,
+				  int32_t      src_y,
+				  int32_t      mask_x,
+				  int32_t      mask_y,
+				  int32_t      dest_x,
+				  int32_t      dest_y,
 				  int32_t     width,
 				  int32_t     height)
 {
@@ -3222,7 +3222,7 @@ sse2_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
     __m128i xmmDst, xmmDstLo, xmmDstHi;
     __m128i xmmMask, xmmMaskLo, xmmMaskHi;
 
-    __m64 mmxSrc, mmxAlpha, mmxMask, mmxDest;
+    __m64 mmsrc_x, mmxAlpha, mmmask_x, mmxDest;
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
@@ -3230,13 +3230,13 @@ sse2_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
     if (src == 0)
 	return;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     xmmDef = createMask_2x32_128 (src, src);
     xmmSrc = expandPixel_32_1x128 (src);
     xmmAlpha = expandAlpha_1x128 (xmmSrc);
-    mmxSrc   = _mm_movepi64_pi64 (xmmSrc);
+    mmsrc_x   = _mm_movepi64_pi64 (xmmSrc);
     mmxAlpha = _mm_movepi64_pi64 (xmmAlpha);
 
     while (height--)
@@ -3258,12 +3258,12 @@ sse2_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
             if (m)
             {
                 d = *dst;
-                mmxMask = expandPixel_8_1x64 (m);
+                mmmask_x = expandPixel_8_1x64 (m);
                 mmxDest = unpack_32_1x64 (d);
 
-                *dst = pack_1x64_32 (inOver_1x64 (&mmxSrc,
+                *dst = pack_1x64_32 (inOver_1x64 (&mmsrc_x,
                                                   &mmxAlpha,
-                                                  &mmxMask,
+                                                  &mmmask_x,
                                                   &mmxDest));
             }
 
@@ -3316,12 +3316,12 @@ sse2_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
             if (m)
             {
                 d = *dst;
-                mmxMask = expandPixel_8_1x64 (m);
+                mmmask_x = expandPixel_8_1x64 (m);
                 mmxDest = unpack_32_1x64 (d);
 
-                *dst = pack_1x64_32 (inOver_1x64 (&mmxSrc,
+                *dst = pack_1x64_32 (inOver_1x64 (&mmsrc_x,
                                                   &mmxAlpha,
-                                                  &mmxMask,
+                                                  &mmmask_x,
                                                   &mmxDest));
             }
 
@@ -3480,12 +3480,12 @@ sse2_CompositeSrc_n_8_8888 (pixman_implementation_t *imp,
 				     pixman_image_t * src_image,
 				     pixman_image_t * mask_image,
 				     pixman_image_t * dst_image,
-				     int32_t      xSrc,
-				     int32_t      ySrc,
-				     int32_t      xMask,
-				     int32_t      yMask,
-				     int32_t      xDst,
-				     int32_t      yDst,
+				     int32_t      src_x,
+				     int32_t      src_y,
+				     int32_t      mask_x,
+				     int32_t      mask_y,
+				     int32_t      dest_x,
+				     int32_t      dest_y,
 				     int32_t     width,
 				     int32_t     height)
 {
@@ -3506,12 +3506,12 @@ sse2_CompositeSrc_n_8_8888 (pixman_implementation_t *imp,
     {
         pixmanFillsse2 (dst_image->bits.bits, dst_image->bits.rowstride,
                         PIXMAN_FORMAT_BPP (dst_image->bits.format),
-                        xDst, yDst, width, height, 0);
+                        dest_x, dest_y, width, height, 0);
         return;
     }
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     xmmDef = createMask_2x32_128 (src, src);
     xmmSrc = expandPixel_32_1x128 (src);
@@ -3616,12 +3616,12 @@ sse2_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 				  pixman_image_t * src_image,
 				  pixman_image_t * mask_image,
 				  pixman_image_t * dst_image,
-				  int32_t      xSrc,
-				  int32_t      ySrc,
-				  int32_t      xMask,
-				  int32_t      yMask,
-				  int32_t      xDst,
-				  int32_t      yDst,
+				  int32_t      src_x,
+				  int32_t      src_y,
+				  int32_t      mask_x,
+				  int32_t      mask_y,
+				  int32_t      dest_x,
+				  int32_t      dest_y,
 				  int32_t     width,
 				  int32_t     height)
 {
@@ -3631,7 +3631,7 @@ sse2_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
     int	dstStride, maskStride;
     uint16_t	w;
     uint32_t m;
-    __m64 mmxSrc, mmxAlpha, mmxMask, mmxDest;
+    __m64 mmsrc_x, mmxAlpha, mmmask_x, mmxDest;
 
     __m128i xmmSrc, xmmAlpha;
     __m128i xmmMask, xmmMaskLo, xmmMaskHi;
@@ -3643,12 +3643,12 @@ sse2_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
     if (src == 0)
 	return;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     xmmSrc = expandPixel_32_1x128 (src);
     xmmAlpha = expandAlpha_1x128 (xmmSrc);
-    mmxSrc = _mm_movepi64_pi64 (xmmSrc);
+    mmsrc_x = _mm_movepi64_pi64 (xmmSrc);
     mmxAlpha = _mm_movepi64_pi64 (xmmAlpha);
 
     while (height--)
@@ -3670,12 +3670,12 @@ sse2_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
             if (m)
             {
                 d = *dst;
-                mmxMask = expandAlphaRev_1x64 (unpack_32_1x64 (m));
+                mmmask_x = expandAlphaRev_1x64 (unpack_32_1x64 (m));
                 mmxDest = expand565_16_1x64 (d);
 
-                *dst = pack565_32_16 (pack_1x64_32 (inOver_1x64 (&mmxSrc,
+                *dst = pack565_32_16 (pack_1x64_32 (inOver_1x64 (&mmsrc_x,
                                                                  &mmxAlpha,
-                                                                 &mmxMask,
+                                                                 &mmmask_x,
                                                                  &mmxDest)));
             }
 
@@ -3739,12 +3739,12 @@ sse2_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
             if (m)
             {
                 d = *dst;
-                mmxMask = expandAlphaRev_1x64 (unpack_32_1x64 (m));
+                mmmask_x = expandAlphaRev_1x64 (unpack_32_1x64 (m));
                 mmxDest = expand565_16_1x64 (d);
 
-                *dst = pack565_32_16 (pack_1x64_32 (inOver_1x64 (&mmxSrc,
+                *dst = pack565_32_16 (pack_1x64_32 (inOver_1x64 (&mmsrc_x,
                                                                  &mmxAlpha,
-                                                                 &mmxMask,
+                                                                 &mmmask_x,
                                                                  &mmxDest)));
             }
 
@@ -3766,12 +3766,12 @@ sse2_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 				  pixman_image_t * src_image,
 				  pixman_image_t * mask_image,
 				  pixman_image_t * dst_image,
-				  int32_t      xSrc,
-				  int32_t      ySrc,
-				  int32_t      xMask,
-				  int32_t      yMask,
-				  int32_t      xDst,
-				  int32_t      yDst,
+				  int32_t      src_x,
+				  int32_t      src_y,
+				  int32_t      mask_x,
+				  int32_t      mask_y,
+				  int32_t      dest_x,
+				  int32_t      dest_y,
 				  int32_t     width,
 				  int32_t     height)
 {
@@ -3785,8 +3785,8 @@ sse2_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
     __m128i xmmSrc, xmmSrcLo, xmmSrcHi;
     __m128i xmmDst, xmmDst0, xmmDst1, xmmDst2, xmmDst3;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
 #if 0
     /* FIXME
@@ -3901,12 +3901,12 @@ sse2_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
 				  pixman_image_t * src_image,
 				  pixman_image_t * mask_image,
 				  pixman_image_t * dst_image,
-				  int32_t      xSrc,
-				  int32_t      ySrc,
-				  int32_t      xMask,
-				  int32_t      yMask,
-				  int32_t      xDst,
-				  int32_t      yDst,
+				  int32_t      src_x,
+				  int32_t      src_y,
+				  int32_t      mask_x,
+				  int32_t      mask_y,
+				  int32_t      dest_x,
+				  int32_t      dest_y,
 				  int32_t     width,
 				  int32_t     height)
 {
@@ -3919,8 +3919,8 @@ sse2_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
     __m128i xmmSrcLo, xmmSrcHi;
     __m128i xmmDstLo, xmmDstHi;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
 #if 0
     /* FIXME
@@ -4016,12 +4016,12 @@ sse2_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 				      pixman_image_t * src_image,
 				      pixman_image_t * mask_image,
 				      pixman_image_t * dst_image,
-				      int32_t      xSrc,
-				      int32_t      ySrc,
-				      int32_t      xMask,
-				      int32_t      yMask,
-				      int32_t      xDst,
-				      int32_t      yDst,
+				      int32_t      src_x,
+				      int32_t      src_y,
+				      int32_t      mask_x,
+				      int32_t      mask_y,
+				      int32_t      dest_x,
+				      int32_t      dest_y,
 				      int32_t     width,
 				      int32_t     height)
 {
@@ -4036,19 +4036,19 @@ sse2_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
     __m128i xmmMask, xmmMaskLo, xmmMaskHi;
     __m128i xmmDst, xmmDst0, xmmDst1, xmmDst2, xmmDst3;
 
-    __m64 mmxSrc, mmxAlpha, mmxMask, mmxDest;
+    __m64 mmsrc_x, mmxAlpha, mmmask_x, mmxDest;
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     if (src == 0)
         return;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, xMask, yMask, uint32_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint32_t, maskStride, maskLine, 1);
 
     xmmSrc = expandPixel_32_1x128 (src);
     xmmAlpha = expandAlpha_1x128 (xmmSrc);
-    mmxSrc = _mm_movepi64_pi64 (xmmSrc);
+    mmsrc_x = _mm_movepi64_pi64 (xmmSrc);
     mmxAlpha = _mm_movepi64_pi64 (xmmAlpha);
 
     while (height--)
@@ -4070,12 +4070,12 @@ sse2_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
             if (m)
             {
                 d = *dst;
-                mmxMask = unpack_32_1x64 (m);
+                mmmask_x = unpack_32_1x64 (m);
                 mmxDest = expand565_16_1x64 (d);
 
-                *dst = pack565_32_16 (pack_1x64_32 (inOver_1x64 (&mmxSrc,
+                *dst = pack565_32_16 (pack_1x64_32 (inOver_1x64 (&mmsrc_x,
                                                                  &mmxAlpha,
-                                                                 &mmxMask,
+                                                                 &mmmask_x,
                                                                  &mmxDest)));
             }
 
@@ -4136,12 +4136,12 @@ sse2_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
             if (m)
             {
                 d = *dst;
-                mmxMask = unpack_32_1x64 (m);
+                mmmask_x = unpack_32_1x64 (m);
                 mmxDest = expand565_16_1x64 (d);
 
-                *dst = pack565_32_16 (pack_1x64_32 (inOver_1x64 (&mmxSrc,
+                *dst = pack565_32_16 (pack_1x64_32 (inOver_1x64 (&mmsrc_x,
                                                                  &mmxAlpha,
-                                                                 &mmxMask,
+                                                                 &mmmask_x,
                                                                  &mmxDest)));
             }
 
@@ -4164,12 +4164,12 @@ sse2_CompositeIn_n_8_8 (pixman_implementation_t *imp,
 			pixman_image_t * src_image,
 			pixman_image_t * mask_image,
 			pixman_image_t * dst_image,
-			int32_t      xSrc,
-			int32_t      ySrc,
-			int32_t      xMask,
-			int32_t      yMask,
-			int32_t      xDst,
-			int32_t      yDst,
+			int32_t      src_x,
+			int32_t      src_y,
+			int32_t      mask_x,
+			int32_t      mask_y,
+			int32_t      dest_x,
+			int32_t      dest_y,
 			int32_t     width,
 			int32_t     height)
 {
@@ -4184,8 +4184,8 @@ sse2_CompositeIn_n_8_8 (pixman_implementation_t *imp,
     __m128i xmmMask, xmmMaskLo, xmmMaskHi;
     __m128i xmmDst, xmmDstLo, xmmDstHi;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
@@ -4267,12 +4267,12 @@ sse2_CompositeIn_8_8 (pixman_implementation_t *imp,
 		      pixman_image_t * src_image,
 		      pixman_image_t * mask_image,
 		      pixman_image_t * dst_image,
-		      int32_t      xSrc,
-		      int32_t      ySrc,
-		      int32_t      xMask,
-		      int32_t      yMask,
-		      int32_t      xDst,
-		      int32_t      yDst,
+		      int32_t      src_x,
+		      int32_t      src_y,
+		      int32_t      mask_x,
+		      int32_t      mask_y,
+		      int32_t      dest_x,
+		      int32_t      dest_y,
 		      int32_t     width,
 		      int32_t     height)
 {
@@ -4285,8 +4285,8 @@ sse2_CompositeIn_8_8 (pixman_implementation_t *imp,
     __m128i xmmSrc, xmmSrcLo, xmmSrcHi;
     __m128i xmmDst, xmmDstLo, xmmDstHi;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (src_image, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
 
     while (height--)
     {
@@ -4357,12 +4357,12 @@ sse2_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
 			       pixman_image_t * src_image,
 			       pixman_image_t * mask_image,
 			       pixman_image_t * dst_image,
-			       int32_t      xSrc,
-			       int32_t      ySrc,
-			       int32_t      xMask,
-			       int32_t      yMask,
-			       int32_t      xDst,
-			       int32_t      yDst,
+			       int32_t      src_x,
+			       int32_t      src_y,
+			       int32_t      mask_x,
+			       int32_t      mask_y,
+			       int32_t      dest_x,
+			       int32_t      dest_y,
 			       int32_t     width,
 			       int32_t     height)
 {
@@ -4378,8 +4378,8 @@ sse2_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
     __m128i xmmMask, xmmMaskLo, xmmMaskHi;
     __m128i xmmDst, xmmDstLo, xmmDstHi;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
 
     src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
@@ -4463,12 +4463,12 @@ sse2_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
 				pixman_image_t * src_image,
 				pixman_image_t * mask_image,
 				pixman_image_t * dst_image,
-				int32_t      xSrc,
-				int32_t      ySrc,
-				int32_t      xMask,
-				int32_t      yMask,
-				int32_t      xDst,
-				int32_t      yDst,
+				int32_t      src_x,
+				int32_t      src_y,
+				int32_t      mask_x,
+				int32_t      mask_y,
+				int32_t      dest_x,
+				int32_t      dest_y,
 				int32_t     width,
 				int32_t     height)
 {
@@ -4478,8 +4478,8 @@ sse2_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
     uint16_t	w;
     uint16_t	t;
 
-    fbComposeGetStart (src_image, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
-    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint8_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint8_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -4530,12 +4530,12 @@ sse2_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
 				pixman_image_t *	src_image,
 				pixman_image_t *	mask_image,
 				pixman_image_t *	 dst_image,
-				int32_t		 xSrc,
-				int32_t      ySrc,
-				int32_t      xMask,
-				int32_t      yMask,
-				int32_t      xDst,
-				int32_t      yDst,
+				int32_t		 src_x,
+				int32_t      src_y,
+				int32_t      mask_x,
+				int32_t      mask_y,
+				int32_t      dest_x,
+				int32_t      dest_y,
 				int32_t     width,
 				int32_t     height)
 {
@@ -4543,8 +4543,8 @@ sse2_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
     uint32_t	*srcLine, *src;
     int	dstStride, srcStride;
 
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -4708,12 +4708,12 @@ sse2_CompositeCopyArea (pixman_implementation_t *imp,
 			pixman_image_t *	src_image,
 			pixman_image_t *	mask_image,
 			pixman_image_t *	dst_image,
-			int32_t		xSrc,
-			int32_t		ySrc,
-			int32_t		xMask,
-			int32_t		yMask,
-			int32_t		xDst,
-			int32_t		yDst,
+			int32_t		src_x,
+			int32_t		src_y,
+			int32_t		mask_x,
+			int32_t		mask_y,
+			int32_t		dest_x,
+			int32_t		dest_y,
 			int32_t		width,
 			int32_t		height)
 {
@@ -4723,7 +4723,7 @@ sse2_CompositeCopyArea (pixman_implementation_t *imp,
 		    dst_image->bits.rowstride,
 		    PIXMAN_FORMAT_BPP (src_image->bits.format),
 		    PIXMAN_FORMAT_BPP (dst_image->bits.format),
-		    xSrc, ySrc, xDst, yDst, width, height);
+		    src_x, src_y, dest_x, dest_y, width, height);
 }
 
 #if 0
@@ -4734,12 +4734,12 @@ sse2_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
 				pixman_image_t * src_image,
 				pixman_image_t * mask_image,
 				pixman_image_t * dst_image,
-				int32_t      xSrc,
-				int32_t      ySrc,
-				int32_t      xMask,
-				int32_t      yMask,
-				int32_t      xDst,
-				int32_t      yDst,
+				int32_t      src_x,
+				int32_t      src_y,
+				int32_t      mask_x,
+				int32_t      mask_y,
+				int32_t      dest_x,
+				int32_t      dest_y,
 				int32_t     width,
 				int32_t     height)
 {
@@ -4754,9 +4754,9 @@ sse2_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
     __m128i xmmDst, xmmDstLo, xmmDstHi;
     __m128i xmmMask, xmmMaskLo, xmmMaskHi;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
-    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, mask_x, mask_y, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (src_image, src_x, src_y, uint32_t, srcStride, srcLine, 1);
 
     while (height--)
     {
@@ -4785,7 +4785,7 @@ sse2_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
             if (m != 0xff)
             {
                 ms = inOver_1x64 (ms,
-                                  xMask00ff,
+                                  mask_x00ff,
                                   expandAlphaRev_1x64 (unpack_32_1x64 (m)),
                                   unpack_32_1x64 (d));
             }
@@ -4853,7 +4853,7 @@ sse2_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
                     d = *dst;
 
                     *dst = pack_1x64_32 (inOver_1x64 (unpack_32_1x64 (s),
-                                                      xMask00ff,
+                                                      mask_x00ff,
                                                       expandAlphaRev_1x64 (unpack_32_1x64 (m)),
                                                       unpack_32_1x64 (d)));
                 }
@@ -5075,13 +5075,13 @@ _pixman_implementation_create_sse2 (void)
     MaskAlpha = createMask_2x32_128 (0x00ff0000, 0x00000000);
     
     /* MMX constants */
-    xMask565rgb = createMask_2x32_64 (0x000001f0, 0x003f001f);
-    xMask565Unpack = createMask_2x32_64 (0x00000084, 0x04100840);
+    mask_x565rgb = createMask_2x32_64 (0x000001f0, 0x003f001f);
+    mask_x565Unpack = createMask_2x32_64 (0x00000084, 0x04100840);
     
-    xMask0080 = createMask_16_64 (0x0080);
-    xMask00ff = createMask_16_64 (0x00ff);
-    xMask0101 = createMask_16_64 (0x0101);
-    xMaskAlpha = createMask_2x32_64 (0x00ff0000, 0x00000000);
+    mask_x0080 = createMask_16_64 (0x0080);
+    mask_x00ff = createMask_16_64 (0x00ff);
+    mask_x0101 = createMask_16_64 (0x0101);
+    mask_xAlpha = createMask_2x32_64 (0x00ff0000, 0x00000000);
 
     _mm_empty();
 
diff --git a/pixman/pixman-utils.c b/pixman/pixman-utils.c
index 43fd074..8949284 100644
--- a/pixman/pixman-utils.c
+++ b/pixman/pixman-utils.c
@@ -107,22 +107,22 @@ pixman_compute_composite_region32 (pixman_region32_t *	region,
 				   pixman_image_t *	src_image,
 				   pixman_image_t *	mask_image,
 				   pixman_image_t *	dst_image,
-				   int16_t		xSrc,
-				   int16_t		ySrc,
-				   int16_t		xMask,
-				   int16_t		yMask,
-				   int16_t		xDst,
-				   int16_t		yDst,
+				   int16_t		src_x,
+				   int16_t		src_y,
+				   int16_t		mask_x,
+				   int16_t		mask_y,
+				   int16_t		dest_x,
+				   int16_t		dest_y,
 				   uint16_t		width,
 				   uint16_t		height)
 {
     int		v;
     
-    region->extents.x1 = xDst;
-    v = xDst + width;
+    region->extents.x1 = dest_x;
+    v = dest_x + width;
     region->extents.x2 = BOUND(v);
-    region->extents.y1 = yDst;
-    v = yDst + height;
+    region->extents.y1 = dest_y;
+    v = dest_y + height;
     region->extents.y2 = BOUND(v);
 
     region->extents.x1 = MAX (region->extents.x1, 0);
@@ -172,7 +172,7 @@ pixman_compute_composite_region32 (pixman_region32_t *	region,
     /* clip against src */
     if (src_image->common.have_clip_region)
     {
-	if (!miClipPictureSrc (region, src_image, xDst - xSrc, yDst - ySrc))
+	if (!miClipPictureSrc (region, src_image, dest_x - src_x, dest_y - src_y))
 	{
 	    pixman_region32_fini (region);
 	    return FALSE;
@@ -181,8 +181,8 @@ pixman_compute_composite_region32 (pixman_region32_t *	region,
     if (src_image->common.alpha_map && src_image->common.alpha_map->common.have_clip_region)
     {
 	if (!miClipPictureSrc (region, (pixman_image_t *)src_image->common.alpha_map,
-			       xDst - (xSrc - src_image->common.alpha_origin_x),
-			       yDst - (ySrc - src_image->common.alpha_origin_y)))
+			       dest_x - (src_x - src_image->common.alpha_origin_x),
+			       dest_y - (src_y - src_image->common.alpha_origin_y)))
 	{
 	    pixman_region32_fini (region);
 	    return FALSE;
@@ -191,7 +191,7 @@ pixman_compute_composite_region32 (pixman_region32_t *	region,
     /* clip against mask */
     if (mask_image && mask_image->common.have_clip_region)
     {
-	if (!miClipPictureSrc (region, mask_image, xDst - xMask, yDst - yMask))
+	if (!miClipPictureSrc (region, mask_image, dest_x - mask_x, dest_y - mask_y))
 	{
 	    pixman_region32_fini (region);
 	    return FALSE;
@@ -199,8 +199,8 @@ pixman_compute_composite_region32 (pixman_region32_t *	region,
 	if (mask_image->common.alpha_map && mask_image->common.alpha_map->common.have_clip_region)
 	{
 	    if (!miClipPictureSrc (region, (pixman_image_t *)mask_image->common.alpha_map,
-				   xDst - (xMask - mask_image->common.alpha_origin_x),
-				   yDst - (yMask - mask_image->common.alpha_origin_y)))
+				   dest_x - (mask_x - mask_image->common.alpha_origin_x),
+				   dest_y - (mask_y - mask_image->common.alpha_origin_y)))
 	    {
 		pixman_region32_fini (region);
 		return FALSE;
@@ -216,12 +216,12 @@ pixman_compute_composite_region (pixman_region16_t *	region,
 				 pixman_image_t *	src_image,
 				 pixman_image_t *	mask_image,
 				 pixman_image_t *	dst_image,
-				 int16_t		xSrc,
-				 int16_t		ySrc,
-				 int16_t		xMask,
-				 int16_t		yMask,
-				 int16_t		xDst,
-				 int16_t		yDst,
+				 int16_t		src_x,
+				 int16_t		src_y,
+				 int16_t		mask_x,
+				 int16_t		mask_y,
+				 int16_t		dest_x,
+				 int16_t		dest_y,
 				 uint16_t	width,
 				 uint16_t	height)
 {
@@ -231,7 +231,7 @@ pixman_compute_composite_region (pixman_region16_t *	region,
     pixman_region32_init (&r32);
     
     retval = pixman_compute_composite_region32 (&r32, src_image, mask_image, dst_image,
-						xSrc, ySrc, xMask, yMask, xDst, yDst,
+						src_x, src_y, mask_x, mask_y, dest_x, dest_y,
 						width, height);
 
     if (retval)
@@ -378,12 +378,12 @@ walk_region_internal (pixman_implementation_t *imp,
 		      pixman_image_t * src_image,
 		      pixman_image_t * mask_image,
 		      pixman_image_t * dst_image,
-		      int16_t xSrc,
-		      int16_t ySrc,
-		      int16_t xMask,
-		      int16_t yMask,
-		      int16_t xDst,
-		      int16_t yDst,
+		      int16_t src_x,
+		      int16_t src_y,
+		      int16_t mask_x,
+		      int16_t mask_y,
+		      int16_t dest_x,
+		      int16_t dest_y,
 		      uint16_t width,
 		      uint16_t height,
 		      pixman_bool_t srcRepeat,
@@ -400,15 +400,15 @@ walk_region_internal (pixman_implementation_t *imp,
     while (n--)
     {
 	h = pbox->y2 - pbox->y1;
-	y_src = pbox->y1 - yDst + ySrc;
-	y_msk = pbox->y1 - yDst + yMask;
+	y_src = pbox->y1 - dest_y + src_y;
+	y_msk = pbox->y1 - dest_y + mask_y;
 	y_dst = pbox->y1;
 	while (h)
 	{
 	    h_this = h;
 	    w = pbox->x2 - pbox->x1;
-	    x_src = pbox->x1 - xDst + xSrc;
-	    x_msk = pbox->x1 - xDst + xMask;
+	    x_src = pbox->x1 - dest_x + src_x;
+	    x_msk = pbox->x1 - dest_x + mask_x;
 	    x_dst = pbox->x1;
 	    
 	    if (maskRepeat)
@@ -462,12 +462,12 @@ _pixman_walk_composite_region (pixman_implementation_t *imp,
 			       pixman_image_t * src_image,
 			       pixman_image_t * mask_image,
 			       pixman_image_t * dst_image,
-			       int16_t xSrc,
-			       int16_t ySrc,
-			       int16_t xMask,
-			       int16_t yMask,
-			       int16_t xDst,
-			       int16_t yDst,
+			       int16_t src_x,
+			       int16_t src_y,
+			       int16_t mask_x,
+			       int16_t mask_y,
+			       int16_t dest_x,
+			       int16_t dest_y,
 			       uint16_t width,
 			       uint16_t height,
 			       pixman_composite_func_t compositeRect)
@@ -477,11 +477,11 @@ _pixman_walk_composite_region (pixman_implementation_t *imp,
     pixman_region32_init (&region);
 
     if (pixman_compute_composite_region32 (
-	    &region, src_image, mask_image, dst_image, xSrc, ySrc, xMask, yMask, xDst, yDst, width, height))
+	    &region, src_image, mask_image, dst_image, src_x, src_y, mask_x, mask_y, dest_x, dest_y, width, height))
     {
 	walk_region_internal (imp, op,
 			      src_image, mask_image, dst_image,
-			      xSrc, ySrc, xMask, yMask, xDst, yDst,
+			      src_x, src_y, mask_x, mask_y, dest_x, dest_y,
 			      width, height, FALSE, FALSE,
 			      &region,
 			      compositeRect);
diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index 532c0d4..21f80c7 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -1486,12 +1486,12 @@ vmx_CompositeOver_n_8888 (pixman_operator_t	op,
 			    pixman_image_t * src_image,
 			    pixman_image_t * mask_image,
 			    pixman_image_t * dst_image,
-			    int16_t	xSrc,
-			    int16_t	ySrc,
-			    int16_t	xMask,
-			    int16_t	yMask,
-			    int16_t	xDst,
-			    int16_t	yDst,
+			    int16_t	src_x,
+			    int16_t	src_y,
+			    int16_t	mask_x,
+			    int16_t	mask_y,
+			    int16_t	dest_x,
+			    int16_t	dest_y,
 			    uint16_t	width,
 			    uint16_t	height)
 {
@@ -1504,7 +1504,7 @@ vmx_CompositeOver_n_8888 (pixman_operator_t	op,
     if (src >> 24 == 0)
 	return;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint32_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -1519,12 +1519,12 @@ vmx_CompositeOver_n_0565 (pixman_operator_t	op,
 			    pixman_image_t * src_image,
 			    pixman_image_t * mask_image,
 			    pixman_image_t * dst_image,
-			    int16_t	xSrc,
-			    int16_t	ySrc,
-			    int16_t	xMask,
-			    int16_t	yMask,
-			    int16_t	xDst,
-			    int16_t	yDst,
+			    int16_t	src_x,
+			    int16_t	src_y,
+			    int16_t	mask_x,
+			    int16_t	mask_y,
+			    int16_t	dest_x,
+			    int16_t	dest_y,
 			    uint16_t	width,
 			    uint16_t	height)
 {
@@ -1538,7 +1538,7 @@ vmx_CompositeOver_n_0565 (pixman_operator_t	op,
     if (src >> 24 == 0)
 	return;
 
-    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (dst_image, dest_x, dest_y, uint16_t, dstStride, dstLine, 1);
 
     while (height--)
     {
diff --git a/pixman/pixman.h b/pixman/pixman.h
index fa8c03f..58dfd42 100644
--- a/pixman/pixman.h
+++ b/pixman/pixman.h
@@ -816,12 +816,12 @@ pixman_bool_t pixman_compute_composite_region (pixman_region16_t *region,
 					       pixman_image_t    *src_image,
 					       pixman_image_t    *mask_image,
 					       pixman_image_t    *dst_image,
-					       int16_t            xSrc,
-					       int16_t            ySrc,
-					       int16_t            xMask,
-					       int16_t            yMask,
-					       int16_t            xDst,
-					       int16_t            yDst,
+					       int16_t            src_x,
+					       int16_t            src_y,
+					       int16_t            mask_x,
+					       int16_t            mask_y,
+					       int16_t            dest_x,
+					       int16_t            dest_y,
 					       uint16_t           width,
 					       uint16_t           height);
 void          pixman_image_composite          (pixman_op_t        op,
commit d2a4281376786fc7f31f7367807c7caa8a99d414
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Mon Jun 29 07:46:11 2009 -0400

    Various simple renamings
    
        s/CvtR8G8B8toY15/CONVERT_RGB24_TO_Y15/g;
        s/cvt8888to0565/CONVERT_8888_TO_0565/g;
        s/cvt0565to0888/CONVERT_0565_TO_0888/g;
        s/miIndexToEnt15/RGB16_TO_ENTRY/g;
        s/miIndexToEnt24/RGB24_TO_ENTRY/g;
        s/miIndexToEntY24/RGB24_TO_ENTRY_Y/g;
        s/miCvtR8G8B8to15/CONVERT_RGB24_TO_RGB15/g;
        s/is_same/IS_SAME/g;
        s/is_zero/IS_ZERO/g;
        s/is_int([ (])/IS_INT$1/g;
        s/is_one/IS_ONE/g;
        s/is_unit/IS_UNIT/g;
        s/Fetch4/FETCH_4/g;
        s/Store4/STORE_4/g;
        s/Fetch8/FETCH_8/g;
        s/Store8/STORE_8/g;
        s/Fetch24/fetch_24/g;
        s/Store24/store_24/g;
        s/_64_generic/64_generic/g;
        s/64_generic/_generic_64/g;
        s/32_generic_lossy/_generic_lossy_32/g;
        s/PdfSeparableBlendMode/PDF_SEPARABLE_BLEND_MODE/g;
        s/PdfNonSeparableBlendMode/PDF_NON_SEPARABLE_BLEND_MODE/g;
        s/([^_])HSL/$1Hsl/g;
        s/Blend/blend_/g;
        s/FbScrLeft/SCREEN_SHIFT_LEFT/g;
        s/FbScrRigth/SCREEN_SHIFT_RIGHT/g;
        s/FbLeftMask/LEFT_MASK/g;
        s/FbRightMask/RIGHT_MASK/g;
        s/Splita/SPLIT_A/g;
        s/Split/SPLIT/g;
        s/MMX_Extensions/MMX_EXTENSIONS/g;

diff --git a/pixman/pixman-access.c b/pixman/pixman-access.c
index eacb19c..217a02d 100644
--- a/pixman/pixman-access.c
+++ b/pixman/pixman-access.c
@@ -35,16 +35,16 @@
 #include "pixman-private.h"
 #include "pixman-accessor.h"
 
-#define CvtR8G8B8toY15(s)       (((((s) >> 16) & 0xff) * 153 + \
+#define CONVERT_RGB24_TO_Y15(s)       (((((s) >> 16) & 0xff) * 153 + \
                                   (((s) >>  8) & 0xff) * 301 +		\
                                   (((s)      ) & 0xff) * 58) >> 2)
-#define miCvtR8G8B8to15(s) ((((s) >> 3) & 0x001f) |  \
+#define CONVERT_RGB24_TO_RGB15(s) ((((s) >> 3) & 0x001f) |  \
 			    (((s) >> 6) & 0x03e0) |  \
 			    (((s) >> 9) & 0x7c00))
-#define miIndexToEnt15(mif,rgb15) ((mif)->ent[rgb15])
-#define miIndexToEnt24(mif,rgb24) miIndexToEnt15(mif,miCvtR8G8B8to15(rgb24))
+#define RGB16_TO_ENTRY(mif,rgb15) ((mif)->ent[rgb15])
+#define RGB24_TO_ENTRY(mif,rgb24) RGB16_TO_ENTRY(mif,CONVERT_RGB24_TO_RGB15(rgb24))
 
-#define miIndexToEntY24(mif,rgb24) ((mif)->ent[CvtR8G8B8toY15(rgb24)])
+#define RGB24_TO_ENTRY_Y(mif,rgb24) ((mif)->ent[CONVERT_RGB24_TO_Y15(rgb24)])
 
 /*
  * YV12 setup and access macros
@@ -621,11 +621,11 @@ fetch_scanline_x4a4 (pixman_image_t *image, int x, int y, int width, uint32_t *b
     }
 }
 
-#define Fetch8(img,l,o)    (READ(img, (uint8_t *)(l) + ((o) >> 2)))
+#define FETCH_8(img,l,o)    (READ(img, (uint8_t *)(l) + ((o) >> 2)))
 #ifdef WORDS_BIGENDIAN
-#define Fetch4(img,l,o)    ((o) & 2 ? Fetch8(img,l,o) & 0xf : Fetch8(img,l,o) >> 4)
+#define FETCH_4(img,l,o)    ((o) & 2 ? FETCH_8(img,l,o) & 0xf : FETCH_8(img,l,o) >> 4)
 #else
-#define Fetch4(img,l,o)    ((o) & 2 ? Fetch8(img,l,o) >> 4 : Fetch8(img,l,o) & 0xf)
+#define FETCH_4(img,l,o)    ((o) & 2 ? FETCH_8(img,l,o) >> 4 : FETCH_8(img,l,o) & 0xf)
 #endif
 
 static void
@@ -635,7 +635,7 @@ fetch_scanline_a4 (pixman_image_t *image, int x, int y, int width, uint32_t *buf
     const uint32_t *bits = image->bits.bits + y*image->bits.rowstride;
     int i;
     for (i = 0; i < width; ++i) {
-	uint32_t  p = Fetch4(image, bits, i + x);
+	uint32_t  p = FETCH_4(image, bits, i + x);
 
 	p |= p << 4;
 	*buffer++ = p << 24;
@@ -650,7 +650,7 @@ fetch_scanline_r1g2b1 (pixman_image_t *image, int x, int y, int width, uint32_t
     const uint32_t *bits = image->bits.bits + y*image->bits.rowstride;
     int i;
     for (i = 0; i < width; ++i) {
-	uint32_t  p = Fetch4(image, bits, i + x);
+	uint32_t  p = FETCH_4(image, bits, i + x);
 
 	r = ((p & 0x8) * 0xff) << 13;
 	g = ((p & 0x6) * 0x55) << 7;
@@ -667,7 +667,7 @@ fetch_scanline_b1g2r1 (pixman_image_t *image, int x, int y, int width, uint32_t
     const uint32_t *bits = image->bits.bits + y*image->bits.rowstride;
     int i;
     for (i = 0; i < width; ++i) {
-	uint32_t  p = Fetch4(image, bits, i + x);
+	uint32_t  p = FETCH_4(image, bits, i + x);
 
 	b = ((p & 0x8) * 0xff) >> 3;
 	g = ((p & 0x6) * 0x55) << 7;
@@ -684,7 +684,7 @@ fetch_scanline_a1r1g1b1 (pixman_image_t *image, int x, int y, int width, uint32_
     const uint32_t *bits = image->bits.bits + y*image->bits.rowstride;
     int i;
     for (i = 0; i < width; ++i) {
-	uint32_t  p = Fetch4(image, bits, i + x);
+	uint32_t  p = FETCH_4(image, bits, i + x);
 
 	a = ((p & 0x8) * 0xff) << 21;
 	r = ((p & 0x4) * 0xff) << 14;
@@ -702,7 +702,7 @@ fetch_scanline_a1b1g1r1 (pixman_image_t *image, int x, int y, int width, uint32_
     const uint32_t *bits = image->bits.bits + y*image->bits.rowstride;
     int i;
     for (i = 0; i < width; ++i) {
-	uint32_t  p = Fetch4(image, bits, i + x);
+	uint32_t  p = FETCH_4(image, bits, i + x);
 
 	a = ((p & 0x8) * 0xff) << 21;
 	r = ((p & 0x4) * 0xff) >> 3;
@@ -720,7 +720,7 @@ fetch_scanline_c4 (pixman_image_t *image, int x, int y, int width, uint32_t *buf
     const pixman_indexed_t * indexed = image->bits.indexed;
     int i;
     for (i = 0; i < width; ++i) {
-	uint32_t  p = Fetch4 (image, bits, i + x);
+	uint32_t  p = FETCH_4 (image, bits, i + x);
 
 	*buffer++ = indexed->rgba[p];
     }
@@ -1690,7 +1690,7 @@ fetch_pixels_a4 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 	else
 	{
 	    uint32_t *bits = pict->bits + line*pict->rowstride;
-	    uint32_t  pixel = Fetch4 (pict, bits, offset);
+	    uint32_t  pixel = FETCH_4 (pict, bits, offset);
 	    
 	    pixel |= pixel << 4;
 	    buffer[i] = pixel << 24;
@@ -1716,7 +1716,7 @@ fetch_pixels_r1g2b1 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 	{
 	    uint32_t  r,g,b;
 	    uint32_t *bits = pict->bits + line*pict->rowstride;
-	    uint32_t  pixel = Fetch4 (pict, bits, offset);
+	    uint32_t  pixel = FETCH_4 (pict, bits, offset);
 	    
 	    r = ((pixel & 0x8) * 0xff) << 13;
 	    g = ((pixel & 0x6) * 0x55) << 7;
@@ -1744,7 +1744,7 @@ fetch_pixels_b1g2r1 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 	{
 	    uint32_t  r,g,b;
 	    uint32_t *bits = pict->bits + line*pict->rowstride;
-	    uint32_t  pixel = Fetch4 (pict, bits, offset);
+	    uint32_t  pixel = FETCH_4 (pict, bits, offset);
 	    
 	    b = ((pixel & 0x8) * 0xff) >> 3;
 	    g = ((pixel & 0x6) * 0x55) << 7;
@@ -1772,7 +1772,7 @@ fetch_pixels_a1r1g1b1 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 	{
 	    uint32_t  a,r,g,b;
 	    uint32_t *bits = pict->bits + line*pict->rowstride;
-	    uint32_t  pixel = Fetch4 (pict, bits, offset);
+	    uint32_t  pixel = FETCH_4 (pict, bits, offset);
 	    
 	    a = ((pixel & 0x8) * 0xff) << 21;
 	    r = ((pixel & 0x4) * 0xff) << 14;
@@ -1801,7 +1801,7 @@ fetch_pixels_a1b1g1r1 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 	{
 	    uint32_t  a,r,g,b;
 	    uint32_t *bits = pict->bits + line*pict->rowstride;
-	    uint32_t  pixel = Fetch4 (pict, bits, offset);
+	    uint32_t  pixel = FETCH_4 (pict, bits, offset);
 	    
 	    a = ((pixel & 0x8) * 0xff) << 21;
 	    r = ((pixel & 0x4) * 0xff) >> 3;
@@ -1829,7 +1829,7 @@ fetch_pixels_c4 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 	else
 	{
 	    uint32_t *bits = pict->bits + line*pict->rowstride;
-	    uint32_t  pixel = Fetch4 (pict, bits, offset);
+	    uint32_t  pixel = FETCH_4 (pict, bits, offset);
 	    const pixman_indexed_t * indexed = pict->indexed;
 	    
 	    buffer[i] = indexed->rgba[pixel];
@@ -1981,8 +1981,8 @@ fetch_pixels_yv12 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 
 /*********************************** Store ************************************/
 
-#define Splita(v)	uint32_t	a = ((v) >> 24), r = ((v) >> 16) & 0xff, g = ((v) >> 8) & 0xff, b = (v) & 0xff
-#define Split(v)	uint32_t	r = ((v) >> 16) & 0xff, g = ((v) >> 8) & 0xff, b = (v) & 0xff
+#define SPLIT_A(v)	uint32_t	a = ((v) >> 24), r = ((v) >> 16) & 0xff, g = ((v) >> 8) & 0xff, b = (v) & 0xff
+#define SPLIT(v)	uint32_t	r = ((v) >> 16) & 0xff, g = ((v) >> 8) & 0xff, b = (v) & 0xff
 
 static void
 store_scanline_a2r10g10b10 (bits_image_t *image, int x, int y, int width, const uint32_t *v)
@@ -2207,7 +2207,7 @@ store_scanline_b5g6r5 (bits_image_t *image,
     int i;
 
     for (i = 0; i < width; ++i) {
-	Split(values[i]);
+	SPLIT(values[i]);
 	WRITE(image, pixel++, ((b << 8) & 0xf800) |
 	      ((g << 3) & 0x07e0) |
 	      ((r >> 3)         ));
@@ -2224,7 +2224,7 @@ store_scanline_a1r5g5b5 (bits_image_t *image,
     int i;
 
     for (i = 0; i < width; ++i) {
-	Splita(values[i]);
+	SPLIT_A(values[i]);
 	WRITE(image, pixel++, ((a << 8) & 0x8000) |
 	      ((r << 7) & 0x7c00) |
 	      ((g << 2) & 0x03e0) |
@@ -2242,7 +2242,7 @@ store_scanline_x1r5g5b5 (bits_image_t *image,
     int i;
 
     for (i = 0; i < width; ++i) {
-	Split(values[i]);
+	SPLIT(values[i]);
 	WRITE(image, pixel++, ((r << 7) & 0x7c00) |
 	      ((g << 2) & 0x03e0) |
 	      ((b >> 3)         ));
@@ -2259,7 +2259,7 @@ store_scanline_a1b5g5r5 (bits_image_t *image,
     int i;
 
     for (i = 0; i < width; ++i) {
-	Splita(values[i]);
+	SPLIT_A(values[i]);
 	WRITE(image, pixel++, ((a << 8) & 0x8000) |
 	      ((b << 7) & 0x7c00) |
 	      ((g << 2) & 0x03e0) |
@@ -2277,7 +2277,7 @@ store_scanline_x1b5g5r5 (bits_image_t *image,
     int i;
 
     for (i = 0; i < width; ++i) {
-	Split(values[i]);
+	SPLIT(values[i]);
 	WRITE(image, pixel++, ((b << 7) & 0x7c00) |
 	      ((g << 2) & 0x03e0) |
 	      ((r >> 3)         ));
@@ -2294,7 +2294,7 @@ store_scanline_a4r4g4b4 (bits_image_t *image,
     int i;
 
     for (i = 0; i < width; ++i) {
-	Splita(values[i]);
+	SPLIT_A(values[i]);
 	WRITE(image, pixel++, ((a << 8) & 0xf000) |
 	      ((r << 4) & 0x0f00) |
 	      ((g     ) & 0x00f0) |
@@ -2312,7 +2312,7 @@ store_scanline_x4r4g4b4 (bits_image_t *image,
     int i;
 
     for (i = 0; i < width; ++i) {
-	Split(values[i]);
+	SPLIT(values[i]);
 	WRITE(image, pixel++, ((r << 4) & 0x0f00) |
 	      ((g     ) & 0x00f0) |
 	      ((b >> 4)         ));
@@ -2329,7 +2329,7 @@ store_scanline_a4b4g4r4 (bits_image_t *image,
     int i;
 
     for (i = 0; i < width; ++i) {
-	Splita(values[i]);
+	SPLIT_A(values[i]);
 	WRITE(image, pixel++, ((a << 8) & 0xf000) |
 	      ((b << 4) & 0x0f00) |
 	      ((g     ) & 0x00f0) |
@@ -2347,7 +2347,7 @@ store_scanline_x4b4g4r4 (bits_image_t *image,
     int i;
 
     for (i = 0; i < width; ++i) {
-	Split(values[i]);
+	SPLIT(values[i]);
 	WRITE(image, pixel++, ((b << 4) & 0x0f00) |
 	      ((g     ) & 0x00f0) |
 	      ((r >> 4)         ));
@@ -2378,7 +2378,7 @@ store_scanline_r3g3b2 (bits_image_t *image,
     int i;
 
     for (i = 0; i < width; ++i) {
-	Split(values[i]);
+	SPLIT(values[i]);
 	WRITE(image, pixel++,
 	      ((r     ) & 0xe0) |
 	      ((g >> 3) & 0x1c) |
@@ -2396,7 +2396,7 @@ store_scanline_b2g3r3 (bits_image_t *image,
     int i;
 
     for (i = 0; i < width; ++i) {
-	Split(values[i]);
+	SPLIT(values[i]);
 	WRITE(image, pixel++,
 	      ((b     ) & 0xc0) |
 	      ((g >> 2) & 0x38) |
@@ -2414,7 +2414,7 @@ store_scanline_a2r2g2b2 (bits_image_t *image,
     int i;
 
     for (i = 0; i < width; ++i) {
-	Splita(values[i]);
+	SPLIT_A(values[i]);
 	WRITE(image, pixel++, ((a     ) & 0xc0) |
 	      ((r >> 2) & 0x30) |
 	      ((g >> 4) & 0x0c) |
@@ -2432,7 +2432,7 @@ store_scanline_a2b2g2r2 (bits_image_t *image,
     int i;
 
     for (i = 0; i < width; ++i) {
-	Splita(values[i]);
+	SPLIT_A(values[i]);
 	*(pixel++) =  ((a     ) & 0xc0) |
 	    ((b >> 2) & 0x30) |
 	    ((g >> 4) & 0x0c) |
@@ -2451,7 +2451,7 @@ store_scanline_c8 (bits_image_t *image,
     int i;
     
     for (i = 0; i < width; ++i) {
-	WRITE(image, pixel++, miIndexToEnt24(indexed,values[i]));
+	WRITE(image, pixel++, RGB24_TO_ENTRY(indexed,values[i]));
     }
 }
 
@@ -2469,15 +2469,15 @@ store_scanline_x4a4 (bits_image_t *image,
     }
 }
 
-#define Store8(img,l,o,v)  (WRITE(img, (uint8_t *)(l) + ((o) >> 3), (v)))
+#define STORE_8(img,l,o,v)  (WRITE(img, (uint8_t *)(l) + ((o) >> 3), (v)))
 #ifdef WORDS_BIGENDIAN
-#define Store4(img,l,o,v)  Store8(img,l,o,((o) & 4 ?				\
-				   (Fetch8(img,l,o) & 0xf0) | (v) :		\
-				   (Fetch8(img,l,o) & 0x0f) | ((v) << 4)))
+#define STORE_4(img,l,o,v)  STORE_8(img,l,o,((o) & 4 ?				\
+				   (FETCH_8(img,l,o) & 0xf0) | (v) :		\
+				   (FETCH_8(img,l,o) & 0x0f) | ((v) << 4)))
 #else
-#define Store4(img,l,o,v)  Store8(img,l,o,((o) & 4 ?			       \
-				   (Fetch8(img,l,o) & 0x0f) | ((v) << 4) : \
-				   (Fetch8(img,l,o) & 0xf0) | (v)))
+#define STORE_4(img,l,o,v)  STORE_8(img,l,o,((o) & 4 ?			       \
+				   (FETCH_8(img,l,o) & 0x0f) | ((v) << 4) : \
+				   (FETCH_8(img,l,o) & 0xf0) | (v)))
 #endif
 
 static void
@@ -2489,7 +2489,7 @@ store_scanline_a4 (bits_image_t *image,
     int i;
     
     for (i = 0; i < width; ++i) {
-	Store4(image, bits, i + x, values[i]>>28);
+	STORE_4(image, bits, i + x, values[i]>>28);
     }
 }
 
@@ -2504,11 +2504,11 @@ store_scanline_r1g2b1 (bits_image_t *image,
     for (i = 0; i < width; ++i) {
 	uint32_t  pixel;
 
-	Split(values[i]);
+	SPLIT(values[i]);
 	pixel = (((r >> 4) & 0x8) |
 		 ((g >> 5) & 0x6) |
 		 ((b >> 7)      ));
-	Store4(image, bits, i + x, pixel);
+	STORE_4(image, bits, i + x, pixel);
     }
 }
 
@@ -2523,11 +2523,11 @@ store_scanline_b1g2r1 (bits_image_t *image,
     for (i = 0; i < width; ++i) {
 	uint32_t  pixel;
 
-	Split(values[i]);
+	SPLIT(values[i]);
 	pixel = (((b >> 4) & 0x8) |
 		 ((g >> 5) & 0x6) |
 		 ((r >> 7)      ));
-	Store4(image, bits, i + x, pixel);
+	STORE_4(image, bits, i + x, pixel);
     }
 }
 
@@ -2541,12 +2541,12 @@ store_scanline_a1r1g1b1 (bits_image_t *image,
     
     for (i = 0; i < width; ++i) {
 	uint32_t  pixel;
-	Splita(values[i]);
+	SPLIT_A(values[i]);
 	pixel = (((a >> 4) & 0x8) |
 		 ((r >> 5) & 0x4) |
 		 ((g >> 6) & 0x2) |
 		 ((b >> 7)      ));
-	Store4(image, bits, i + x, pixel);
+	STORE_4(image, bits, i + x, pixel);
     }
 }
 
@@ -2560,12 +2560,12 @@ store_scanline_a1b1g1r1 (bits_image_t *image,
     
     for (i = 0; i < width; ++i) {
 	uint32_t  pixel;
-	Splita(values[i]);
+	SPLIT_A(values[i]);
 	pixel = (((a >> 4) & 0x8) |
 		 ((b >> 5) & 0x4) |
 		 ((g >> 6) & 0x2) |
 		 ((r >> 7)      ));
-	Store4(image, bits, i + x, pixel);
+	STORE_4(image, bits, i + x, pixel);
     }
 }
 
@@ -2581,8 +2581,8 @@ store_scanline_c4 (bits_image_t *image,
     for (i = 0; i < width; ++i) {
 	uint32_t  pixel;
 
-	pixel = miIndexToEnt24(indexed, values[i]);
-	Store4(image, bits, i + x, pixel);
+	pixel = RGB24_TO_ENTRY(indexed, values[i]);
+	STORE_4(image, bits, i + x, pixel);
     }
 }
 
@@ -2626,7 +2626,7 @@ store_scanline_g1 (bits_image_t *image,
 #else
 	mask = 1 << ((i + x) & 0x1f);
 #endif
-	v = miIndexToEntY24 (indexed, values[i]) ? mask : 0;
+	v = RGB24_TO_ENTRY_Y (indexed, values[i]) ? mask : 0;
 	WRITE(image, pixel, (READ(image, pixel) & ~mask) | v);
     }
 }
@@ -2636,7 +2636,7 @@ store_scanline_g1 (bits_image_t *image,
  * store proc. Despite the type, this function expects a uint64_t buffer.
  */
 static void
-store_scanline64_generic (bits_image_t *image, int x, int y, int width, const uint32_t *values)
+store_scanline_generic_64 (bits_image_t *image, int x, int y, int width, const uint32_t *values)
 {
     uint32_t *argb8Pixels;
 
@@ -2658,7 +2658,7 @@ store_scanline64_generic (bits_image_t *image, int x, int y, int width, const ui
 
 /* Despite the type, this function expects both buffer and mask to be uint64_t */
 static void
-fetch_scanline64_generic (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_generic_64 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		   const uint32_t *mask, uint32_t mask_bits)
 {
     /* Fetch the pixels into the first half of buffer and then expand them in
@@ -2671,7 +2671,7 @@ fetch_scanline64_generic (pixman_image_t *image, int x, int y, int width, uint32
 
 /* Despite the type, this function expects a uint64_t *buffer */
 static void
-fetch_pixels64_generic (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_generic_64 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     pict->fetch_pixels_raw_32 (pict, buffer, n_pixels);
     
@@ -2685,7 +2685,7 @@ fetch_pixels64_generic (bits_image_t *pict, uint32_t *buffer, int n_pixels)
  * WARNING: This function loses precision!
  */
 static void
-fetch_pixels32_generic_lossy (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_generic_lossy_32 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     /* Since buffer contains n_pixels coordinate pairs, it also has enough room for
      * n_pixels 64 bit pixels.
@@ -2709,9 +2709,9 @@ typedef struct
 #define FORMAT_INFO(format)						\
     {									\
 	PIXMAN_##format,						\
-	    fetch_scanline_##format, fetch_scanline64_generic,			\
-	    fetch_pixels_##format, fetch_pixels64_generic,		\
-	    store_scanline_##format, store_scanline64_generic				\
+	    fetch_scanline_##format, fetch_scanline_generic_64,			\
+	    fetch_pixels_##format, fetch_pixels_generic_64,		\
+	    store_scanline_##format, store_scanline_generic_64				\
     }
 
 static const format_info_t accessors[] =
@@ -2786,33 +2786,33 @@ static const format_info_t accessors[] =
 
     { PIXMAN_a2r10g10b10,
       NULL, fetch_scanline_a2r10g10b10,
-      fetch_pixels32_generic_lossy, fetch_pixels_a2r10g10b10_64,
+      fetch_pixels_generic_lossy_32, fetch_pixels_a2r10g10b10_64,
       NULL, store_scanline_a2r10g10b10 },
 
     { PIXMAN_x2r10g10b10,
       NULL, fetch_scanline_x2r10g10b10,
-      fetch_pixels32_generic_lossy, fetch_pixels_x2r10g10b10_64,
+      fetch_pixels_generic_lossy_32, fetch_pixels_x2r10g10b10_64,
       NULL, store_scanline_x2r10g10b10 },
 
     { PIXMAN_a2b10g10r10,
       NULL, fetch_scanline_a2b10g10r10,
-      fetch_pixels32_generic_lossy, fetch_pixels_a2b10g10r10_64,
+      fetch_pixels_generic_lossy_32, fetch_pixels_a2b10g10r10_64,
       NULL, store_scanline_a2b10g10r10 },
 
     { PIXMAN_x2b10g10r10,
       NULL, fetch_scanline_x2b10g10r10,
-      fetch_pixels32_generic_lossy, fetch_pixels_x2b10g10r10_64,
+      fetch_pixels_generic_lossy_32, fetch_pixels_x2b10g10r10_64,
       NULL, store_scanline_x2b10g10r10 },
 
 /* YUV formats */
     { PIXMAN_yuy2,
-      fetch_scanline_yuy2, fetch_scanline64_generic,
-      fetch_pixels_yuy2, fetch_pixels64_generic,
+      fetch_scanline_yuy2, fetch_scanline_generic_64,
+      fetch_pixels_yuy2, fetch_pixels_generic_64,
       NULL, NULL },
 
     { PIXMAN_yv12,
-      fetch_scanline_yv12, fetch_scanline64_generic,
-      fetch_pixels_yv12, fetch_pixels64_generic,
+      fetch_scanline_yv12, fetch_scanline_generic_64,
+      fetch_pixels_yv12, fetch_pixels_generic_64,
       NULL, NULL },
     
     { PIXMAN_null },
diff --git a/pixman/pixman-bits-image.c b/pixman/pixman-bits-image.c
index 3b65a01..3a54a52 100644
--- a/pixman/pixman-bits-image.c
+++ b/pixman/pixman-bits-image.c
@@ -741,7 +741,7 @@ bits_image_property_changed (pixman_image_t *image)
     if (bits->common.alpha_map)
     {
 	image->common.get_scanline_64 =
-	    _pixman_image_get_scanline_64_generic;
+	    _pixman_image_get_scanline_generic_64;
 	image->common.get_scanline_32 =
 	    bits_image_fetch_transformed;
     }
@@ -763,7 +763,7 @@ bits_image_property_changed (pixman_image_t *image)
     else
     {
 	image->common.get_scanline_64 =
-	    _pixman_image_get_scanline_64_generic;
+	    _pixman_image_get_scanline_generic_64;
 	image->common.get_scanline_32 =
 	    bits_image_fetch_transformed;
     }
diff --git a/pixman/pixman-combine.c.template b/pixman/pixman-combine.c.template
index 72b4a67..1b43610 100644
--- a/pixman/pixman-combine.c.template
+++ b/pixman/pixman-combine.c.template
@@ -352,8 +352,8 @@ fbCombineSaturateU (pixman_implementation_t *imp, pixman_op_t op,
  * no released draft exists that shows this, as the formulas have not been
  * updated yet after the release of ISO 32000.
  *
- * The default implementation here uses the PdfSeparableBlendMode and 
- * PdfNonSeparableBlendMode macros, which take the blend function as an 
+ * The default implementation here uses the PDF_SEPARABLE_BLEND_MODE and 
+ * PDF_NON_SEPARABLE_BLEND_MODE macros, which take the blend function as an 
  * argument. Note that this implementation operates on premultiplied colors,
  * while the PDF specification does not. Therefore the code uses the formula
  * ar.Cra = (1 – as) . Dca + (1 – ad) . Sca + B(Dca, ad, Sca, as)
@@ -405,7 +405,7 @@ fbCombineMultiplyC (pixman_implementation_t *imp, pixman_op_t op,
     }
 }
 
-#define PdfSeparableBlendMode(name)		    \
+#define PDF_SEPARABLE_BLEND_MODE(name)		    \
 static void					    \
 fbCombine ## name ## U (pixman_implementation_t *imp, pixman_op_t op, \
                         comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width) \
@@ -425,9 +425,9 @@ fbCombine ## name ## U (pixman_implementation_t *imp, pixman_op_t op, \
 						    \
 	*(dest + i) = result +			    \
 	    (DivOne (sa * da) << A_SHIFT) +	    \
-	    (Blend ## name (Red (d), da, Red (s), sa) << R_SHIFT) + \
-	    (Blend ## name (Green (d), da, Green (s), sa) << G_SHIFT) + \
-	    (Blend ## name (Blue (d), da, Blue (s), sa)); \
+	    (blend_ ## name (Red (d), da, Red (s), sa) << R_SHIFT) + \
+	    (blend_ ## name (Green (d), da, Green (s), sa) << G_SHIFT) + \
+	    (blend_ ## name (Blue (d), da, Blue (s), sa)); \
     }						    \
 }						    \
 						    \
@@ -451,9 +451,9 @@ fbCombine ## name ## C (pixman_implementation_t *imp, pixman_op_t op, \
 						    \
 	result +=				    \
 	    (DivOne (Alpha (m) * da) << A_SHIFT) +				\
-	    (Blend ## name (Red (d), da, Red (s), Red (m)) << R_SHIFT) +	\
-	    (Blend ## name (Green (d), da, Green (s), Green (m)) << G_SHIFT) +	\
-	    (Blend ## name (Blue (d), da, Blue (s), Blue (m)));			\
+	    (blend_ ## name (Red (d), da, Red (s), Red (m)) << R_SHIFT) +	\
+	    (blend_ ## name (Green (d), da, Green (s), Green (m)) << G_SHIFT) +	\
+	    (blend_ ## name (Blue (d), da, Blue (s), Blue (m)));			\
 						    \
 	*(dest + i) = result;			    \
     }						    \
@@ -465,12 +465,12 @@ fbCombine ## name ## C (pixman_implementation_t *imp, pixman_op_t op, \
  */
 
 static inline comp4_t
-BlendScreen (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
+blend_Screen (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
   return DivOne (sca * da + dca * sa - sca * dca);
 }
 
-PdfSeparableBlendMode (Screen)
+PDF_SEPARABLE_BLEND_MODE (Screen)
 
 /*
  * Overlay
@@ -482,7 +482,7 @@ PdfSeparableBlendMode (Screen)
  */
 
 static inline comp4_t
-BlendOverlay (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
+blend_Overlay (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
     comp4_t rca;
 
@@ -493,7 +493,7 @@ BlendOverlay (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
     return DivOne (rca);
 }
 
-PdfSeparableBlendMode (Overlay)
+PDF_SEPARABLE_BLEND_MODE (Overlay)
 
 /*
  * Darken
@@ -501,7 +501,7 @@ PdfSeparableBlendMode (Overlay)
  */
 
 static inline comp4_t
-BlendDarken (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
+blend_Darken (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
     comp4_t s, d;
     
@@ -510,7 +510,7 @@ BlendDarken (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
     return DivOne (s > d ? d : s);
 }
 
-PdfSeparableBlendMode (Darken)
+PDF_SEPARABLE_BLEND_MODE (Darken)
 
 /*
  * Lighten
@@ -518,7 +518,7 @@ PdfSeparableBlendMode (Darken)
  */
 
 static inline comp4_t
-BlendLighten (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
+blend_Lighten (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
     comp4_t s, d;
     
@@ -527,7 +527,7 @@ BlendLighten (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
     return DivOne (s > d ? s : d);
 }
 
-PdfSeparableBlendMode (Lighten)
+PDF_SEPARABLE_BLEND_MODE (Lighten)
 
 /*
  * Color dodge
@@ -539,7 +539,7 @@ PdfSeparableBlendMode (Lighten)
  */ 
 
 static inline comp4_t
-BlendColorDodge (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
+blend_ColorDodge (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
     if (sca >= sa) {
 	return DivOne (sa * da);
@@ -549,7 +549,7 @@ BlendColorDodge (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
     }
 }
 
-PdfSeparableBlendMode (ColorDodge)
+PDF_SEPARABLE_BLEND_MODE (ColorDodge)
 
 /*
  * Color burn
@@ -561,7 +561,7 @@ PdfSeparableBlendMode (ColorDodge)
  */
 
 static inline comp4_t
-BlendColorBurn (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
+blend_ColorBurn (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
     if (sca == 0) {
 	return 0;
@@ -572,7 +572,7 @@ BlendColorBurn (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
     }
 }
 
-PdfSeparableBlendMode (ColorBurn)
+PDF_SEPARABLE_BLEND_MODE (ColorBurn)
 
 /*
  * Hard light
@@ -583,7 +583,7 @@ PdfSeparableBlendMode (ColorBurn)
  *     Sa.Da - 2.(Da - Dca).(Sa - Sca)
  */
 static inline comp4_t
-BlendHardLight (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
+blend_HardLight (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
     if (2 * sca < sa)
 	return DivOne (2 * sca * dca);
@@ -591,7 +591,7 @@ BlendHardLight (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 	return DivOne (sa * da - 2 * (da - dca) * (sa - sca));
 }
 
-PdfSeparableBlendMode (HardLight)
+PDF_SEPARABLE_BLEND_MODE (HardLight)
 
 /*
  * Soft light
@@ -605,7 +605,7 @@ PdfSeparableBlendMode (HardLight)
  */
 
 static inline comp4_t
-BlendSoftLight (comp4_t dca_org, comp4_t da_org, comp4_t sca_org, comp4_t sa_org)
+blend_SoftLight (comp4_t dca_org, comp4_t da_org, comp4_t sca_org, comp4_t sa_org)
 {
     double dca = dca_org * (1.0 / MASK);
     double da = da_org * (1.0 / MASK);
@@ -628,7 +628,7 @@ BlendSoftLight (comp4_t dca_org, comp4_t da_org, comp4_t sca_org, comp4_t sa_org
     return rca * MASK + 0.5;
 }
 
-PdfSeparableBlendMode (SoftLight)
+PDF_SEPARABLE_BLEND_MODE (SoftLight)
 
 /*
  * Difference
@@ -636,7 +636,7 @@ PdfSeparableBlendMode (SoftLight)
  */
 
 static inline comp4_t
-BlendDifference (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
+blend_Difference (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
     comp4_t dcasa = dca * sa;
     comp4_t scada = sca * da;
@@ -647,7 +647,7 @@ BlendDifference (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 	return DivOne (scada - dcasa);
 }
 
-PdfSeparableBlendMode (Difference)
+PDF_SEPARABLE_BLEND_MODE (Difference)
 
 /*
  * Exclusion
@@ -655,21 +655,21 @@ PdfSeparableBlendMode (Difference)
  */
 
 /* This can be made faster by writing it directly and not using
- * PdfSeparableBlendMode, but that's a performance optimization */
+ * PDF_SEPARABLE_BLEND_MODE, but that's a performance optimization */
 
 static inline comp4_t
-BlendExclusion (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
+blend_Exclusion (comp4_t dca, comp4_t da, comp4_t sca, comp4_t sa)
 {
     return DivOne (sca * da + dca * sa - 2 * dca * sca);
 }
 
-PdfSeparableBlendMode (Exclusion)
+PDF_SEPARABLE_BLEND_MODE (Exclusion)
 
-#undef PdfSeparableBlendMode
+#undef PDF_SEPARABLE_BLEND_MODE
 
 /*
  * PDF nonseperable blend modes are implemented using the following functions
- * to operate in HSL space, with Cmax, Cmid, Cmin referring to the max, mid 
+ * to operate in Hsl space, with Cmax, Cmid, Cmin referring to the max, mid 
  * and min value of the red, green and blue components.
  * 
  * Lum (C) = 0.3 × Cred + 0.59 × Cgreen + 0.11 × Cblue
@@ -779,7 +779,7 @@ PdfSeparableBlendMode (Exclusion)
 #define Lum(c) ((c[0] * 30 + c[1] * 59 + c[2] * 11) / 100)
 #define Sat(c) (Max (c) - Min (c))
 
-#define PdfNonSeparableBlendMode(name)					\
+#define PDF_NON_SEPARABLE_BLEND_MODE(name)					\
 static void								\
 fbCombine ## name ## U (pixman_implementation_t *imp, pixman_op_t op,	\
 			comp4_t *dest, const comp4_t *src, const comp4_t *mask, int width) \
@@ -803,7 +803,7 @@ fbCombine ## name ## U (pixman_implementation_t *imp, pixman_op_t op,	\
 	sc[1] = Green (s);						\
 	dc[2] = Blue (d);						\
 	sc[2] = Blue (s);						\
-	Blend ## name (c, dc, da, sc, sa);				\
+	blend_ ## name (c, dc, da, sc, sa);				\
 									\
 	*(dest + i) = result +						\
 	    (DivOne (sa * da) << A_SHIFT) +				\
@@ -902,7 +902,7 @@ SetSat (comp4_t dest[3], comp4_t src[3], comp4_t sat)
  * B(Cb, Cs) = SetLum (SetSat (Cs, Sat (Cb)), Lum (Cb))
  */
 static inline void
-BlendHSLHue (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t sa)
+blend_HslHue (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t sa)
 {
     c[0] = sc[0] * da;
     c[1] = sc[1] * da;
@@ -911,14 +911,14 @@ BlendHSLHue (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t sa)
     SetLum (c, c, sa * da, Lum (dc) * sa);
 }
 
-PdfNonSeparableBlendMode (HSLHue)
+PDF_NON_SEPARABLE_BLEND_MODE (HslHue)
 
 /*
  * Saturation:
  * B(Cb, Cs) = SetLum (SetSat (Cb, Sat (Cs)), Lum (Cb))
  */
 static inline void
-BlendHSLSaturation (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t sa)
+blend_HslSaturation (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t sa)
 {
     c[0] = dc[0] * sa;
     c[1] = dc[1] * sa;
@@ -927,14 +927,14 @@ BlendHSLSaturation (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp
     SetLum (c, c, sa * da, Lum (dc) * sa);
 }
 
-PdfNonSeparableBlendMode (HSLSaturation)
+PDF_NON_SEPARABLE_BLEND_MODE (HslSaturation)
 
 /*
  * Color:
  * B(Cb, Cs) = SetLum (Cs, Lum (Cb))
  */
 static inline void
-BlendHSLColor (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t sa)
+blend_HslColor (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t sa)
 {
     c[0] = sc[0] * da;
     c[1] = sc[1] * da;
@@ -942,14 +942,14 @@ BlendHSLColor (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t s
     SetLum (c, c, sa * da, Lum (dc) * sa);
 }
 
-PdfNonSeparableBlendMode (HSLColor)
+PDF_NON_SEPARABLE_BLEND_MODE (HslColor)
 
 /*
  * Luminosity:
  * B(Cb, Cs) = SetLum (Cb, Lum (Cs))
  */
 static inline void
-BlendHSLLuminosity (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t sa)
+blend_HslLuminosity (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp4_t sa)
 {
     c[0] = dc[0] * sa;
     c[1] = dc[1] * sa;
@@ -957,13 +957,13 @@ BlendHSLLuminosity (comp4_t c[3], comp4_t dc[3], comp4_t da, comp4_t sc[3], comp
     SetLum (c, c, sa * da, Lum (sc) * da);
 }
 
-PdfNonSeparableBlendMode (HSLLuminosity)
+PDF_NON_SEPARABLE_BLEND_MODE (HslLuminosity)
 
 #undef Sat
 #undef Lum
 #undef Max
 #undef Min
-#undef PdfNonSeparableBlendMode
+#undef PDF_NON_SEPARABLE_BLEND_MODE
 
 /* Overlay
  *
@@ -1951,10 +1951,10 @@ _pixman_setup_combiner_functions_width (pixman_implementation_t *imp)
     imp->combine_width[PIXMAN_OP_SOFT_LIGHT] = fbCombineSoftLightU;
     imp->combine_width[PIXMAN_OP_DIFFERENCE] = fbCombineDifferenceU;
     imp->combine_width[PIXMAN_OP_EXCLUSION] = fbCombineExclusionU;
-    imp->combine_width[PIXMAN_OP_HSL_HUE] = fbCombineHSLHueU;
-    imp->combine_width[PIXMAN_OP_HSL_SATURATION] = fbCombineHSLSaturationU;
-    imp->combine_width[PIXMAN_OP_HSL_COLOR] = fbCombineHSLColorU;
-    imp->combine_width[PIXMAN_OP_HSL_LUMINOSITY] = fbCombineHSLLuminosityU;
+    imp->combine_width[PIXMAN_OP_HSL_HUE] = fbCombineHslHueU;
+    imp->combine_width[PIXMAN_OP_HSL_SATURATION] = fbCombineHslSaturationU;
+    imp->combine_width[PIXMAN_OP_HSL_COLOR] = fbCombineHslColorU;
+    imp->combine_width[PIXMAN_OP_HSL_LUMINOSITY] = fbCombineHslLuminosityU;
 
     /* Component alpha combiners */
     imp->combine_width_ca[PIXMAN_OP_CLEAR] = fbCombineClearC;
diff --git a/pixman/pixman-conical-gradient.c b/pixman/pixman-conical-gradient.c
index c73be56..cff58d4 100644
--- a/pixman/pixman-conical-gradient.c
+++ b/pixman/pixman-conical-gradient.c
@@ -123,7 +123,7 @@ static void
 conical_gradient_property_changed (pixman_image_t *image)
 {
     image->common.get_scanline_32 = conical_gradient_get_scanline_32;
-    image->common.get_scanline_64 = _pixman_image_get_scanline_64_generic;
+    image->common.get_scanline_64 = _pixman_image_get_scanline_generic_64;
 }
 
 PIXMAN_EXPORT pixman_image_t *
diff --git a/pixman/pixman-cpu.c b/pixman/pixman-cpu.c
index c569f0b..a61dd91 100644
--- a/pixman/pixman-cpu.c
+++ b/pixman/pixman-cpu.c
@@ -292,7 +292,7 @@ pixman_have_arm_neon (void)
 enum CPUFeatures {
     NoFeatures = 0,
     MMX = 0x1,
-    MMX_Extensions = 0x2,
+    MMX_EXTENSIONS = 0x2,
     SSE = 0x6,
     SSE2 = 0x8,
     CMOV = 0x10
@@ -309,7 +309,7 @@ static unsigned int detectCPUFeatures(void) {
         if (result & AV_386_MMX)
             features |= MMX;
         if (result & AV_386_AMD_MMX)
-            features |= MMX_Extensions;
+            features |= MMX_EXTENSIONS;
         if (result & AV_386_SSE)
             features |= SSE;
         if (result & AV_386_SSE2)
@@ -453,7 +453,7 @@ static unsigned int detectCPUFeatures(void) {
             }
 #endif
             if (result & (1<<22))
-                features |= MMX_Extensions;
+                features |= MMX_EXTENSIONS;
         }
     }
 #endif /* HAVE_GETISAX */
@@ -470,7 +470,7 @@ pixman_have_mmx (void)
     if (!initialized)
     {
         unsigned int features = detectCPUFeatures();
-	mmx_present = (features & (MMX|MMX_Extensions)) == (MMX|MMX_Extensions);
+	mmx_present = (features & (MMX|MMX_EXTENSIONS)) == (MMX|MMX_EXTENSIONS);
         initialized = TRUE;
     }
 
@@ -487,7 +487,7 @@ pixman_have_sse2 (void)
     if (!initialized)
     {
         unsigned int features = detectCPUFeatures();
-        sse2_present = (features & (MMX|MMX_Extensions|SSE|SSE2)) == (MMX|MMX_Extensions|SSE|SSE2);
+        sse2_present = (features & (MMX|MMX_EXTENSIONS|SSE|SSE2)) == (MMX|MMX_EXTENSIONS|SSE|SSE2);
         initialized = TRUE;
     }
 
diff --git a/pixman/pixman-edge-imp.h b/pixman/pixman-edge-imp.h
index 79826f2..1687c0b 100644
--- a/pixman/pixman-edge-imp.h
+++ b/pixman/pixman-edge-imp.h
@@ -80,24 +80,24 @@ rasterizeEdges (pixman_image_t  *image,
 	    {
 
 #ifdef WORDS_BIGENDIAN
-#   define FbScrLeft(x,n)	((x) << (n))
+#   define SCREEN_SHIFT_LEFT(x,n)	((x) << (n))
 #   define FbScrRight(x,n)	((x) >> (n))
 #else
-#   define FbScrLeft(x,n)	((x) >> (n))
+#   define SCREEN_SHIFT_LEFT(x,n)	((x) >> (n))
 #   define FbScrRight(x,n)	((x) << (n))
 #endif
 
-#define FbLeftMask(x)							\
+#define LEFT_MASK(x)							\
 		(((x) & 0x1f) ?						\
 		 FbScrRight (0xffffffff, (x) & 0x1f) : 0)
-#define FbRightMask(x)							\
+#define RIGHT_MASK(x)							\
 		(((32 - (x)) & 0x1f) ?					\
-		 FbScrLeft (0xffffffff, (32 - (x)) & 0x1f) : 0)
+		 SCREEN_SHIFT_LEFT (0xffffffff, (32 - (x)) & 0x1f) : 0)
 		
 #define FbMaskBits(x,w,l,n,r) {						\
 		    n = (w);						\
-		    r = FbRightMask ((x) + n);				\
-		    l = FbLeftMask (x);					\
+		    r = RIGHT_MASK ((x) + n);				\
+		    l = LEFT_MASK (x);					\
 		    if (l) {						\
 			n -= 32 - ((x) & 0x1f);				\
 			if (n < 0) {					\
diff --git a/pixman/pixman-fast-path.c b/pixman/pixman-fast-path.c
index a956c24..ea2d1b2 100644
--- a/pixman/pixman-fast-path.c
+++ b/pixman/pixman-fast-path.c
@@ -29,7 +29,7 @@
 #include "pixman-combine32.h"
 
 static force_inline uint32_t
-Fetch24 (uint8_t *a)
+fetch_24 (uint8_t *a)
 {
     if (((unsigned long)a) & 1)
     {
@@ -50,7 +50,7 @@ Fetch24 (uint8_t *a)
 }
 
 static force_inline void
-Store24 (uint8_t *a, uint32_t v)
+store_24 (uint8_t *a, uint32_t v)
 {
     if (((unsigned long)a) & 1)
     {
@@ -459,15 +459,15 @@ fast_CompositeOver_n_8_0888 (pixman_implementation_t *imp,
 		    d = src;
 		else
 		{
-		    d = Fetch24(dst);
+		    d = fetch_24(dst);
 		    d = fbOver (src, d);
 		}
-		Store24(dst, d);
+		store_24(dst, d);
 	    }
 	    else if (m)
 	    {
-		d = fbOver (fbIn(src,m), Fetch24(dst));
-		Store24(dst, d);
+		d = fbOver (fbIn(src,m), fetch_24(dst));
+		store_24(dst, d);
 	    }
 	    dst += 3;
 	}
@@ -523,15 +523,15 @@ fast_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 		else
 		{
 		    d = *dst;
-		    d = fbOver (src, cvt0565to0888(d));
+		    d = fbOver (src, CONVERT_0565_TO_0888(d));
 		}
-		*dst = cvt8888to0565(d);
+		*dst = CONVERT_8888_TO_0565(d);
 	    }
 	    else if (m)
 	    {
 		d = *dst;
-		d = fbOver (fbIn(src,m), cvt0565to0888(d));
-		*dst = cvt8888to0565(d);
+		d = fbOver (fbIn(src,m), CONVERT_0565_TO_0888(d));
+		*dst = CONVERT_8888_TO_0565(d);
 	    }
 	    dst++;
 	}
@@ -567,7 +567,7 @@ fast_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
     if (src == 0)
 	return;
 
-    src16 = cvt8888to0565(src);
+    src16 = CONVERT_8888_TO_0565(src);
 
     fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
     fbComposeGetStart (mask_image, xMask, yMask, uint32_t, maskStride, maskLine, 1);
@@ -592,21 +592,21 @@ fast_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 		else
 		{
 		    d = *dst;
-		    d = fbOver (src, cvt0565to0888(d));
-		    *dst = cvt8888to0565(d);
+		    d = fbOver (src, CONVERT_0565_TO_0888(d));
+		    *dst = CONVERT_8888_TO_0565(d);
 		}
 	    }
 	    else if (ma)
 	    {
 		d = *dst;
-		d = cvt0565to0888(d);
+		d = CONVERT_0565_TO_0888(d);
 
 		FbByteMulC (src, ma);
 		FbByteMul (ma, srca);
 		ma = ~ma;
 		FbByteMulAddC (d, ma, src);
 		
-		*dst = cvt8888to0565(d);
+		*dst = CONVERT_8888_TO_0565(d);
 	    }
 	    dst++;
 	}
@@ -700,9 +700,9 @@ fast_CompositeSrc_8888_0888 (pixman_implementation_t *imp,
 		if (a == 0xff)
 		    d = s;
 		else
-		    d = fbOver (s, Fetch24(dst));
+		    d = fbOver (s, fetch_24(dst));
 
-		Store24(dst, d);
+		store_24(dst, d);
 	    }
 	    dst += 3;
 	}
@@ -753,9 +753,9 @@ fast_composite_over_8888_0565 (pixman_implementation_t *imp,
 		else
 		{
 		    d = *dst;
-		    d = fbOver (s, cvt0565to0888(d));
+		    d = fbOver (s, CONVERT_0565_TO_0888(d));
 		}
-		*dst = cvt8888to0565(d);
+		*dst = CONVERT_8888_TO_0565(d);
 	    }
 	    dst++;
 	}
@@ -796,7 +796,7 @@ fast_CompositeSrc_x888_0565 (pixman_implementation_t *imp,
 	while (w--)
 	{
 	    s = *src++;
-	    *dst = cvt8888to0565(s);
+	    *dst = CONVERT_8888_TO_0565(s);
 	    dst++;
 	}
     }
@@ -983,7 +983,7 @@ fast_CompositeSolidFill (pixman_implementation_t *imp,
 	src = src >> 24;
     else if (dst_image->bits.format == PIXMAN_r5g6b5 ||
 	     dst_image->bits.format == PIXMAN_b5g6r5)
-	src = cvt8888to0565 (src);
+	src = CONVERT_8888_TO_0565 (src);
 
     pixman_fill (dst_image->bits.bits, dst_image->bits.rowstride,
 		 PIXMAN_FORMAT_BPP (dst_image->bits.format),
diff --git a/pixman/pixman-image.c b/pixman/pixman-image.c
index 32b83d3..bc1bbc7 100644
--- a/pixman/pixman-image.c
+++ b/pixman/pixman-image.c
@@ -63,7 +63,7 @@ _pixman_init_gradient (gradient_t     *gradient,
  * depth, but that's a project for the future.
  */
 void
-_pixman_image_get_scanline_64_generic (pixman_image_t * pict, int x, int y,
+_pixman_image_get_scanline_generic_64 (pixman_image_t * pict, int x, int y,
 				       int width, uint32_t *buffer,
 				       const uint32_t *mask, uint32_t maskBits)
 {
diff --git a/pixman/pixman-linear-gradient.c b/pixman/pixman-linear-gradient.c
index f01c62c..f5c9a5a 100644
--- a/pixman/pixman-linear-gradient.c
+++ b/pixman/pixman-linear-gradient.c
@@ -220,7 +220,7 @@ static void
 linear_gradient_property_changed (pixman_image_t *image)
 {
     image->common.get_scanline_32 = linear_gradient_get_scanline_32;
-    image->common.get_scanline_64 = _pixman_image_get_scanline_64_generic;
+    image->common.get_scanline_64 = _pixman_image_get_scanline_generic_64;
 }
 
 PIXMAN_EXPORT pixman_image_t *
diff --git a/pixman/pixman-matrix.c b/pixman/pixman-matrix.c
index 79dae8d..dda6214 100644
--- a/pixman/pixman-matrix.c
+++ b/pixman/pixman-matrix.c
@@ -301,58 +301,58 @@ within_epsilon(pixman_fixed_t a, pixman_fixed_t b, pixman_fixed_t epsilon)
 
 #define epsilon	(pixman_fixed_t) (2)
 
-#define is_same(a,b) (within_epsilon(a, b, epsilon))
-#define is_zero(a)   (within_epsilon(a, 0, epsilon))
-#define is_one(a)    (within_epsilon(a, F(1), epsilon))
-#define is_unit(a)   (within_epsilon(a, F( 1), epsilon) || \
+#define IS_SAME(a,b) (within_epsilon(a, b, epsilon))
+#define IS_ZERO(a)   (within_epsilon(a, 0, epsilon))
+#define IS_ONE(a)    (within_epsilon(a, F(1), epsilon))
+#define IS_UNIT(a)   (within_epsilon(a, F( 1), epsilon) || \
 		      within_epsilon(a, F(-1), epsilon) || \
-		      is_zero(a))
-#define is_int(a)    (is_zero(pixman_fixed_frac(a)))
+		      IS_ZERO(a))
+#define IS_INT(a)    (IS_ZERO(pixman_fixed_frac(a)))
 
 PIXMAN_EXPORT pixman_bool_t
 pixman_transform_is_identity(const struct pixman_transform *t)
 {
-	return ( is_same(t->matrix[0][0], t->matrix[1][1]) &&
-		 is_same(t->matrix[0][0], t->matrix[2][2]) &&
-		!is_zero(t->matrix[0][0]) &&
-		 is_zero(t->matrix[0][1]) &&
-		 is_zero(t->matrix[0][2]) &&
-		 is_zero(t->matrix[1][0]) &&
-		 is_zero(t->matrix[1][2]) &&
-		 is_zero(t->matrix[2][0]) &&
-		 is_zero(t->matrix[2][1]));
+	return ( IS_SAME(t->matrix[0][0], t->matrix[1][1]) &&
+		 IS_SAME(t->matrix[0][0], t->matrix[2][2]) &&
+		!IS_ZERO(t->matrix[0][0]) &&
+		 IS_ZERO(t->matrix[0][1]) &&
+		 IS_ZERO(t->matrix[0][2]) &&
+		 IS_ZERO(t->matrix[1][0]) &&
+		 IS_ZERO(t->matrix[1][2]) &&
+		 IS_ZERO(t->matrix[2][0]) &&
+		 IS_ZERO(t->matrix[2][1]));
 }
 
 PIXMAN_EXPORT pixman_bool_t
 pixman_transform_is_scale(const struct pixman_transform *t)
 {
-	return (!is_zero(t->matrix[0][0]) &&
-		 is_zero(t->matrix[0][1]) &&
-		 is_zero(t->matrix[0][2]) &&
+	return (!IS_ZERO(t->matrix[0][0]) &&
+		 IS_ZERO(t->matrix[0][1]) &&
+		 IS_ZERO(t->matrix[0][2]) &&
 
-		 is_zero(t->matrix[1][0]) &&
-		!is_zero(t->matrix[1][1]) &&
-		 is_zero(t->matrix[1][2]) &&
+		 IS_ZERO(t->matrix[1][0]) &&
+		!IS_ZERO(t->matrix[1][1]) &&
+		 IS_ZERO(t->matrix[1][2]) &&
 
-		 is_zero(t->matrix[2][0]) &&
-		 is_zero(t->matrix[2][1]) &&
-		!is_zero(t->matrix[2][2]));
+		 IS_ZERO(t->matrix[2][0]) &&
+		 IS_ZERO(t->matrix[2][1]) &&
+		!IS_ZERO(t->matrix[2][2]));
 }
 
 PIXMAN_EXPORT pixman_bool_t
 pixman_transform_is_int_translate(const struct pixman_transform *t)
 {
-	return (is_one (t->matrix[0][0]) &&
-		is_zero(t->matrix[0][1]) &&
-		is_int (t->matrix[0][2]) &&
+	return (IS_ONE (t->matrix[0][0]) &&
+		IS_ZERO(t->matrix[0][1]) &&
+		IS_INT (t->matrix[0][2]) &&
 
-		is_zero(t->matrix[1][0]) &&
-		is_one (t->matrix[1][1]) &&
-		is_int (t->matrix[1][2]) &&
+		IS_ZERO(t->matrix[1][0]) &&
+		IS_ONE (t->matrix[1][1]) &&
+		IS_INT (t->matrix[1][2]) &&
 
-		is_zero(t->matrix[2][0]) &&
-		is_zero(t->matrix[2][1]) &&
-		is_one (t->matrix[2][2]));
+		IS_ZERO(t->matrix[2][0]) &&
+		IS_ZERO(t->matrix[2][1]) &&
+		IS_ONE (t->matrix[2][2]));
 }
 
 PIXMAN_EXPORT pixman_bool_t
diff --git a/pixman/pixman-private.h b/pixman/pixman-private.h
index f051552..5339881 100644
--- a/pixman/pixman-private.h
+++ b/pixman/pixman-private.h
@@ -196,7 +196,7 @@ void
 _pixman_bits_image_setup_raw_accessors (bits_image_t   *image);
 
 void
-_pixman_image_get_scanline_64_generic  (pixman_image_t *pict,
+_pixman_image_get_scanline_generic_64  (pixman_image_t *pict,
 					int             x,
 					int             y,
 					int             width,
@@ -621,10 +621,10 @@ pixman_region16_copy_from_region32 (pixman_region16_t *dst,
 
 /* Conversion between 8888 and 0565 */
 
-#define cvt8888to0565(s)    ((((s) >> 3) & 0x001f) | \
+#define CONVERT_8888_TO_0565(s)    ((((s) >> 3) & 0x001f) | \
 			     (((s) >> 5) & 0x07e0) | \
 			     (((s) >> 8) & 0xf800))
-#define cvt0565to0888(s)    (((((s) << 3) & 0xf8) | (((s) >> 2) & 0x7)) | \
+#define CONVERT_0565_TO_0888(s)    (((((s) << 3) & 0xf8) | (((s) >> 2) & 0x7)) | \
 			     ((((s) << 5) & 0xfc00) | (((s) >> 1) & 0x300)) | \
 			     ((((s) << 8) & 0xf80000) | (((s) << 3) & 0x70000)))
 
diff --git a/pixman/pixman-radial-gradient.c b/pixman/pixman-radial-gradient.c
index 00fdae0..ced8213 100644
--- a/pixman/pixman-radial-gradient.c
+++ b/pixman/pixman-radial-gradient.c
@@ -273,7 +273,7 @@ static void
 radial_gradient_property_changed (pixman_image_t *image)
 {
     image->common.get_scanline_32 = radial_gradient_get_scanline_32;
-    image->common.get_scanline_64 = _pixman_image_get_scanline_64_generic;
+    image->common.get_scanline_64 = _pixman_image_get_scanline_generic_64;
 }
 
 PIXMAN_EXPORT pixman_image_t *
diff --git a/pixman/pixman-solid-fill.c b/pixman/pixman-solid-fill.c
index 0801267..1359fcd 100644
--- a/pixman/pixman-solid-fill.c
+++ b/pixman/pixman-solid-fill.c
@@ -51,7 +51,7 @@ static void
 solid_fill_property_changed (pixman_image_t *image)
 {
     image->common.get_scanline_32 = solid_fill_get_scanline_32;
-    image->common.get_scanline_64 = _pixman_image_get_scanline_64_generic;
+    image->common.get_scanline_64 = _pixman_image_get_scanline_generic_64;
 }
 
 static uint32_t
diff --git a/pixman/pixman.c b/pixman/pixman.c
index 0c984f7..0bd5e0b 100644
--- a/pixman/pixman.c
+++ b/pixman/pixman.c
@@ -223,7 +223,7 @@ color_to_pixel (pixman_color_t *color,
 	c = c >> 24;
     else if (format == PIXMAN_r5g6b5 ||
 	     format == PIXMAN_b5g6r5)
-	c = cvt8888to0565 (c);
+	c = CONVERT_8888_TO_0565 (c);
 
 #if 0
     printf ("color: %x %x %x %x\n", color->alpha, color->red, color->green, color->blue);
commit 1c5774bf6d39e7b349c03866c96811ee1754c9d7
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Mon Jun 29 07:35:40 2009 -0400

    Get rid of pFoo names.
    
        s/([^o])pSrc/$1src_image/g;
        s/([^o])pDst/$1dst_image/g;
        s/([^o])pMask/$1mask_image/g;
        s/pRegion/region/g;
        s/pNextRect/next_rect/g;

diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
index 6b14750..dcc0495 100644
--- a/pixman/pixman-arm-neon.c
+++ b/pixman/pixman-arm-neon.c
@@ -127,9 +127,9 @@ static void
 neon_CompositeAdd_8000_8000 (
                             pixman_implementation_t * impl,
                             pixman_op_t op,
-                                pixman_image_t * pSrc,
-                                pixman_image_t * pMask,
-                                pixman_image_t * pDst,
+                                pixman_image_t * src_image,
+                                pixman_image_t * mask_image,
+                                pixman_image_t * dst_image,
                                 int32_t      xSrc,
                                 int32_t      ySrc,
                                 int32_t      xMask,
@@ -144,8 +144,8 @@ neon_CompositeAdd_8000_8000 (
     int dstStride, srcStride;
     uint16_t    w;
 
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
-    fbComposeGetStart (pDst, xDst, yDst, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
 
     if (width>=8)
     {
@@ -277,9 +277,9 @@ static void
 neon_composite_over_8888_8888 (
                             pixman_implementation_t * impl,
                             pixman_op_t op,
-			 pixman_image_t * pSrc,
-			 pixman_image_t * pMask,
-			 pixman_image_t * pDst,
+			 pixman_image_t * src_image,
+			 pixman_image_t * mask_image,
+			 pixman_image_t * dst_image,
 			 int32_t      xSrc,
 			 int32_t      ySrc,
 			 int32_t      xMask,
@@ -294,8 +294,8 @@ neon_composite_over_8888_8888 (
     int	dstStride, srcStride;
     uint32_t	w;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
 
     if (width>=8)
     {
@@ -438,9 +438,9 @@ static void
 neon_composite_over_8888_n_8888 (
                                pixman_implementation_t * impl,
                                pixman_op_t op,
-			       pixman_image_t * pSrc,
-			       pixman_image_t * pMask,
-			       pixman_image_t * pDst,
+			       pixman_image_t * src_image,
+			       pixman_image_t * mask_image,
+			       pixman_image_t * dst_image,
 			       int32_t	xSrc,
 			       int32_t	ySrc,
 			       int32_t      xMask,
@@ -457,10 +457,10 @@ neon_composite_over_8888_n_8888 (
     uint32_t	w;
     uint8x8_t mask_alpha;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
 
-    mask = _pixman_image_get_solid (pMask, pDst->bits.format);
+    mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
     mask_alpha = vdup_n_u8((mask) >> 24);
 
     if (width>=8)
@@ -635,9 +635,9 @@ static void
 neon_CompositeOver_n_8_8888 (
                             pixman_implementation_t * impl,
                             pixman_op_t      op,
-			       pixman_image_t * pSrc,
-			       pixman_image_t * pMask,
-			       pixman_image_t * pDst,
+			       pixman_image_t * src_image,
+			       pixman_image_t * mask_image,
+			       pixman_image_t * dst_image,
 			       int32_t      xSrc,
 			       int32_t      ySrc,
 			       int32_t      xMask,
@@ -657,7 +657,7 @@ neon_CompositeOver_n_8_8888 (
     uint8x8_t    mask_selector=vreinterpret_u8_u64(vcreate_u64(0x0101010100000000ULL));
     uint8x8_t    alpha_selector=vreinterpret_u8_u64(vcreate_u64(0x0707070703030303ULL));
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     // bail out if fully transparent
     srca = src >> 24;
@@ -670,8 +670,8 @@ neon_CompositeOver_n_8_8888 (
     sval8.val[2]=vdup_lane_u8(sval2,2);
     sval8.val[3]=vdup_lane_u8(sval2,3);
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
 
     if (width>=8)
     {
@@ -842,9 +842,9 @@ static void
 neon_CompositeAdd_8888_8_8 (
                             pixman_implementation_t * impl,
                             pixman_op_t op,
-                            pixman_image_t * pSrc,
-                            pixman_image_t * pMask,
-                            pixman_image_t * pDst,
+                            pixman_image_t * src_image,
+                            pixman_image_t * mask_image,
+                            pixman_image_t * dst_image,
                             int32_t      xSrc,
                             int32_t      ySrc,
                             int32_t      xMask,
@@ -861,9 +861,9 @@ neon_CompositeAdd_8888_8_8 (
     uint32_t    src;
     uint8x8_t   sa;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
-    src = _pixman_image_get_solid (pSrc, pDst->bits.format);
+    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    src = _pixman_image_get_solid (src_image, dst_image->bits.format);
     sa = vdup_n_u8((src) >> 24);
 
     if (width>=8)
@@ -961,9 +961,9 @@ static void
 neon_CompositeSrc_16_16 (
 	pixman_implementation_t * impl,
 	pixman_op_t op,
-	pixman_image_t * pSrc,
-	pixman_image_t * pMask,
-	pixman_image_t * pDst,
+	pixman_image_t * src_image,
+	pixman_image_t * mask_image,
+	pixman_image_t * dst_image,
 	int32_t      xSrc,
 	int32_t      ySrc,
 	int32_t      xMask,
@@ -980,8 +980,8 @@ neon_CompositeSrc_16_16 (
 		return;
 
 	/* We simply copy 16-bit-aligned pixels from one place to another. */
-	fbComposeGetStart (pSrc, xSrc, ySrc, uint16_t, srcStride, srcLine, 1);
-	fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+	fbComposeGetStart (src_image, xSrc, ySrc, uint16_t, srcStride, srcLine, 1);
+	fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
 
 	/* Preload the first input scanline */
 	{
@@ -1088,9 +1088,9 @@ static void
 neon_CompositeSrc_24_16 (
 	pixman_implementation_t * impl,
 	pixman_op_t op,
-	pixman_image_t * pSrc,
-	pixman_image_t * pMask,
-	pixman_image_t * pDst,
+	pixman_image_t * src_image,
+	pixman_image_t * mask_image,
+	pixman_image_t * dst_image,
 	int32_t      xSrc,
 	int32_t      ySrc,
 	int32_t      xMask,
@@ -1108,8 +1108,8 @@ neon_CompositeSrc_24_16 (
 		return;
 
 	/* We simply copy pixels from one place to another, assuming that the source's alpha is opaque. */
-	fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
-	fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+	fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+	fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
 
 	/* Preload the first input scanline */
 	{
@@ -1715,9 +1715,9 @@ static void
 neon_CompositeOver_n_8_0565 (
 	pixman_implementation_t * impl,
 	pixman_op_t op,
-	pixman_image_t * pSrc,
-	pixman_image_t * pMask,
-	pixman_image_t * pDst,
+	pixman_image_t * src_image,
+	pixman_image_t * mask_image,
+	pixman_image_t * dst_image,
 	int32_t      xSrc,
 	int32_t      ySrc,
 	int32_t      xMask,
@@ -1734,7 +1734,7 @@ neon_CompositeOver_n_8_0565 (
 	uint32_t     kernelCount, copyCount, copyTail;
 	uint8_t      kernelOffset, copyOffset;
 
-	src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+	src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
 	// bail out if fully transparent or degenerate
 	srca = src >> 24;
@@ -1748,14 +1748,14 @@ neon_CompositeOver_n_8_0565 (
 		// TODO: there must be a more elegant way of doing this.
 		int x;
 		for(x=0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS) {
-			neon_CompositeOver_n_8_0565(impl, op, pSrc, pMask, pDst, xSrc+x, ySrc, xMask+x, yMask, xDst+x, yDst,
+			neon_CompositeOver_n_8_0565(impl, op, src_image, mask_image, dst_image, xSrc+x, ySrc, xMask+x, yMask, xDst+x, yDst,
 											  (x+NEON_SCANLINE_BUFFER_PIXELS > width) ? width-x : NEON_SCANLINE_BUFFER_PIXELS, height);
 		}
 		return;
 	}
 
-	fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-	fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+	fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+	fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
 
 	// keep within minimum number of aligned quadwords on width
 	// while also keeping the minimum number of columns to process
@@ -1870,9 +1870,9 @@ static void
 neon_CompositeOver_n_0565 (
 	pixman_implementation_t * impl,
 	pixman_op_t op,
-	pixman_image_t * pSrc,
-	pixman_image_t * pMask,
-	pixman_image_t * pDst,
+	pixman_image_t * src_image,
+	pixman_image_t * mask_image,
+	pixman_image_t * dst_image,
 	int32_t      xSrc,
 	int32_t      ySrc,
 	int32_t      xMask,
@@ -1888,7 +1888,7 @@ neon_CompositeOver_n_0565 (
 	uint32_t     kernelCount, copyCount, copyTail;
 	uint8_t      kernelOffset, copyOffset;
 
-	src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+	src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
 	// bail out if fully transparent
 	srca = src >> 24;
@@ -1902,13 +1902,13 @@ neon_CompositeOver_n_0565 (
 		// TODO: there must be a more elegant way of doing this.
 		int x;
 		for(x=0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS) {
-			neon_CompositeOver_n_0565(impl, op, pSrc, pMask, pDst, xSrc+x, ySrc, xMask+x, yMask, xDst+x, yDst,
+			neon_CompositeOver_n_0565(impl, op, src_image, mask_image, dst_image, xSrc+x, ySrc, xMask+x, yMask, xDst+x, yDst,
 										(x+NEON_SCANLINE_BUFFER_PIXELS > width) ? width-x : NEON_SCANLINE_BUFFER_PIXELS, height);
 		}
 		return;
 	}
 
-	fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+	fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
 
 	// keep within minimum number of aligned quadwords on width
 	// while also keeping the minimum number of columns to process
@@ -2013,9 +2013,9 @@ static void
 neon_CompositeOver_8888_0565 (
 	pixman_implementation_t * impl,
 	pixman_op_t op,
-	pixman_image_t * pSrc,
-	pixman_image_t * pMask,
-	pixman_image_t * pDst,
+	pixman_image_t * src_image,
+	pixman_image_t * mask_image,
+	pixman_image_t * dst_image,
 	int32_t      xSrc,
 	int32_t      ySrc,
 	int32_t      xMask,
@@ -2038,14 +2038,14 @@ neon_CompositeOver_8888_0565 (
 		// split the blit, so we can use a fixed-size scanline buffer
 		int x;
 		for(x=0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS) {
-			neon_CompositeOver_8888_0565(impl, op, pSrc, pMask, pDst, xSrc+x, ySrc, xMask+x, yMask, xDst+x, yDst,
+			neon_CompositeOver_8888_0565(impl, op, src_image, mask_image, dst_image, xSrc+x, ySrc, xMask+x, yMask, xDst+x, yDst,
 										  (x+NEON_SCANLINE_BUFFER_PIXELS > width) ? width-x : NEON_SCANLINE_BUFFER_PIXELS, height);
 		}
 		return;
 	}
 
-	fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-	fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+	fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+	fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
 
 	// keep within minimum number of aligned quadwords on width
 	// while also keeping the minimum number of columns to process
diff --git a/pixman/pixman-arm-simd.c b/pixman/pixman-arm-simd.c
index 9897e86..9d04b7c 100644
--- a/pixman/pixman-arm-simd.c
+++ b/pixman/pixman-arm-simd.c
@@ -33,9 +33,9 @@ static void
 arm_CompositeAdd_8000_8000 (
                             pixman_implementation_t * impl,
                             pixman_op_t op,
-				pixman_image_t * pSrc,
-				pixman_image_t * pMask,
-				pixman_image_t * pDst,
+				pixman_image_t * src_image,
+				pixman_image_t * mask_image,
+				pixman_image_t * dst_image,
 				int32_t      xSrc,
 				int32_t      ySrc,
 				int32_t      xMask,
@@ -51,8 +51,8 @@ arm_CompositeAdd_8000_8000 (
     uint16_t	w;
     uint8_t	s, d;
 
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
-    fbComposeGetStart (pDst, xDst, yDst, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -103,9 +103,9 @@ static void
 arm_composite_over_8888_8888 (
                             pixman_implementation_t * impl,
                             pixman_op_t op,
-			 pixman_image_t * pSrc,
-			 pixman_image_t * pMask,
-			 pixman_image_t * pDst,
+			 pixman_image_t * src_image,
+			 pixman_image_t * mask_image,
+			 pixman_image_t * dst_image,
 			 int32_t      xSrc,
 			 int32_t      ySrc,
 			 int32_t      xMask,
@@ -123,8 +123,8 @@ arm_composite_over_8888_8888 (
     uint32_t upper_component_mask = 0xff00ff00;
     uint32_t alpha_mask = 0xff;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
 
     while (height--)
     {
@@ -196,9 +196,9 @@ static void
 arm_composite_over_8888_n_8888 (
                             pixman_implementation_t * impl,
                             pixman_op_t op,
-			       pixman_image_t * pSrc,
-			       pixman_image_t * pMask,
-			       pixman_image_t * pDst,
+			       pixman_image_t * src_image,
+			       pixman_image_t * mask_image,
+			       pixman_image_t * dst_image,
 			       int32_t	xSrc,
 			       int32_t	ySrc,
 			       int32_t      xMask,
@@ -216,10 +216,10 @@ arm_composite_over_8888_n_8888 (
     uint32_t component_half = 0x800080;
     uint32_t alpha_mask = 0xff;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
 
-    mask = _pixman_image_get_solid (pMask, pDst->bits.format);
+    mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
     mask = (mask) >> 24;
 
     while (height--)
@@ -305,9 +305,9 @@ static void
 arm_CompositeOver_n_8_8888 (
                             pixman_implementation_t * impl,
                             pixman_op_t      op,
-			       pixman_image_t * pSrc,
-			       pixman_image_t * pMask,
-			       pixman_image_t * pDst,
+			       pixman_image_t * src_image,
+			       pixman_image_t * mask_image,
+			       pixman_image_t * dst_image,
 			       int32_t      xSrc,
 			       int32_t      ySrc,
 			       int32_t      xMask,
@@ -323,7 +323,7 @@ arm_CompositeOver_n_8_8888 (
     int		 dstStride, maskStride;
     uint16_t	 w;
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     // bail out if fully transparent
     srca = src >> 24;
@@ -336,8 +336,8 @@ arm_CompositeOver_n_8_8888 (
     uint32_t src_hi = (src >> 8) & component_mask;
     uint32_t src_lo = src & component_mask;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
 
     while (height--)
     {
diff --git a/pixman/pixman-fast-path.c b/pixman/pixman-fast-path.c
index 1ed7805..a956c24 100644
--- a/pixman/pixman-fast-path.c
+++ b/pixman/pixman-fast-path.c
@@ -102,9 +102,9 @@ fbIn (uint32_t x, uint8_t y)
 static void
 fast_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
 			     pixman_op_t      op,
-			     pixman_image_t * pSrc,
-			     pixman_image_t * pMask,
-			     pixman_image_t * pDst,
+			     pixman_image_t * src_image,
+			     pixman_image_t * mask_image,
+			     pixman_image_t * dst_image,
 			     int32_t      xSrc,
 			     int32_t      ySrc,
 			     int32_t      xMask,
@@ -122,9 +122,9 @@ fast_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
     uint32_t s, d;
     uint16_t w;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
 
     while (height--)
     {
@@ -291,9 +291,9 @@ fast_CompositeIn_8_8 (pixman_implementation_t *imp,
 static void
 fast_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
 			       pixman_op_t      op,
-			       pixman_image_t * pSrc,
-			       pixman_image_t * pMask,
-			       pixman_image_t * pDst,
+			       pixman_image_t * src_image,
+			       pixman_image_t * mask_image,
+			       pixman_image_t * dst_image,
 			       int32_t      xSrc,
 			       int32_t      ySrc,
 			       int32_t      xMask,
@@ -309,14 +309,14 @@ fast_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
     int		 dstStride, maskStride;
     uint16_t	 w;
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     srca = src >> 24;
     if (src == 0)
 	return;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
 
     while (height--)
     {
@@ -349,9 +349,9 @@ fast_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
 static void
 fast_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
 				   pixman_op_t op,
-				   pixman_image_t * pSrc,
-				   pixman_image_t * pMask,
-				   pixman_image_t * pDst,
+				   pixman_image_t * src_image,
+				   pixman_image_t * mask_image,
+				   pixman_image_t * dst_image,
 				   int32_t      xSrc,
 				   int32_t      ySrc,
 				   int32_t      xMask,
@@ -367,14 +367,14 @@ fast_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
     int	dstStride, maskStride;
     uint16_t	w;
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     srca = src >> 24;
     if (src == 0)
 	return;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint32_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, xMask, yMask, uint32_t, maskStride, maskLine, 1);
 
     while (height--)
     {
@@ -414,9 +414,9 @@ fast_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
 static void
 fast_CompositeOver_n_8_0888 (pixman_implementation_t *imp,
 			       pixman_op_t op,
-			       pixman_image_t * pSrc,
-			       pixman_image_t * pMask,
-			       pixman_image_t * pDst,
+			       pixman_image_t * src_image,
+			       pixman_image_t * mask_image,
+			       pixman_image_t * dst_image,
 			       int32_t      xSrc,
 			       int32_t      ySrc,
 			       int32_t      xMask,
@@ -433,14 +433,14 @@ fast_CompositeOver_n_8_0888 (pixman_implementation_t *imp,
     int	dstStride, maskStride;
     uint16_t	w;
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     srca = src >> 24;
     if (src == 0)
 	return;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint8_t, dstStride, dstLine, 3);
-    fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 3);
+    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
 
     while (height--)
     {
@@ -477,9 +477,9 @@ fast_CompositeOver_n_8_0888 (pixman_implementation_t *imp,
 static void
 fast_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 			       pixman_op_t op,
-				  pixman_image_t * pSrc,
-				  pixman_image_t * pMask,
-				  pixman_image_t * pDst,
+				  pixman_image_t * src_image,
+				  pixman_image_t * mask_image,
+				  pixman_image_t * dst_image,
 				  int32_t      xSrc,
 				  int32_t      ySrc,
 				  int32_t      xMask,
@@ -496,14 +496,14 @@ fast_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
     int	dstStride, maskStride;
     uint16_t	w;
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     srca = src >> 24;
     if (src == 0)
 	return;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
 
     while (height--)
     {
@@ -541,9 +541,9 @@ fast_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 static void
 fast_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 				   pixman_op_t op,
-				   pixman_image_t * pSrc,
-				   pixman_image_t * pMask,
-				   pixman_image_t * pDst,
+				   pixman_image_t * src_image,
+				   pixman_image_t * mask_image,
+				   pixman_image_t * dst_image,
 				   int32_t      xSrc,
 				   int32_t      ySrc,
 				   int32_t      xMask,
@@ -561,7 +561,7 @@ fast_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
     int	dstStride, maskStride;
     uint16_t	w;
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     srca = src >> 24;
     if (src == 0)
@@ -569,8 +569,8 @@ fast_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 
     src16 = cvt8888to0565(src);
 
-    fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint32_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, xMask, yMask, uint32_t, maskStride, maskLine, 1);
 
     while (height--)
     {
@@ -616,9 +616,9 @@ fast_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 static void
 fast_composite_over_8888_8888 (pixman_implementation_t *imp,
 			  pixman_op_t op,
-			 pixman_image_t * pSrc,
-			 pixman_image_t * pMask,
-			 pixman_image_t * pDst,
+			 pixman_image_t * src_image,
+			 pixman_image_t * mask_image,
+			 pixman_image_t * dst_image,
 			 int32_t      xSrc,
 			 int32_t      ySrc,
 			 int32_t      xMask,
@@ -634,8 +634,8 @@ fast_composite_over_8888_8888 (pixman_implementation_t *imp,
     uint8_t	a;
     uint16_t	w;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
 
     while (height--)
     {
@@ -661,9 +661,9 @@ fast_composite_over_8888_8888 (pixman_implementation_t *imp,
 static void
 fast_CompositeSrc_8888_0888 (pixman_implementation_t *imp,
 			  pixman_op_t op,
-			 pixman_image_t * pSrc,
-			 pixman_image_t * pMask,
-			 pixman_image_t * pDst,
+			 pixman_image_t * src_image,
+			 pixman_image_t * mask_image,
+			 pixman_image_t * dst_image,
 			 int32_t      xSrc,
 			 int32_t      ySrc,
 			 int32_t      xMask,
@@ -680,8 +680,8 @@ fast_CompositeSrc_8888_0888 (pixman_implementation_t *imp,
     int	dstStride, srcStride;
     uint16_t	w;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint8_t, dstStride, dstLine, 3);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 3);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
 
     while (height--)
     {
@@ -712,9 +712,9 @@ fast_CompositeSrc_8888_0888 (pixman_implementation_t *imp,
 static void
 fast_composite_over_8888_0565 (pixman_implementation_t *imp,
 			  pixman_op_t op,
-			 pixman_image_t * pSrc,
-			 pixman_image_t * pMask,
-			 pixman_image_t * pDst,
+			 pixman_image_t * src_image,
+			 pixman_image_t * mask_image,
+			 pixman_image_t * dst_image,
 			 int32_t      xSrc,
 			 int32_t      ySrc,
 			 int32_t      xMask,
@@ -731,8 +731,8 @@ fast_composite_over_8888_0565 (pixman_implementation_t *imp,
     int	dstStride, srcStride;
     uint16_t	w;
 
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
-    fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -765,9 +765,9 @@ fast_composite_over_8888_0565 (pixman_implementation_t *imp,
 static void
 fast_CompositeSrc_x888_0565 (pixman_implementation_t *imp,
 			  pixman_op_t op,
-                          pixman_image_t * pSrc,
-                          pixman_image_t * pMask,
-                          pixman_image_t * pDst,
+                          pixman_image_t * src_image,
+                          pixman_image_t * mask_image,
+                          pixman_image_t * dst_image,
                           int32_t      xSrc,
                           int32_t      ySrc,
                           int32_t      xMask,
@@ -782,8 +782,8 @@ fast_CompositeSrc_x888_0565 (pixman_implementation_t *imp,
     int	dstStride, srcStride;
     uint16_t	w;
 
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
-    fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -805,9 +805,9 @@ fast_CompositeSrc_x888_0565 (pixman_implementation_t *imp,
 static void
 fast_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
 			     pixman_op_t	op,
-			     pixman_image_t * pSrc,
-			     pixman_image_t * pMask,
-			     pixman_image_t * pDst,
+			     pixman_image_t * src_image,
+			     pixman_image_t * mask_image,
+			     pixman_image_t * dst_image,
 			     int32_t      xSrc,
 			     int32_t      ySrc,
 			     int32_t      xMask,
@@ -824,8 +824,8 @@ fast_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
     uint8_t	s, d;
     uint16_t	t;
 
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
-    fbComposeGetStart (pDst, xDst, yDst, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -856,9 +856,9 @@ fast_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
 static void
 fast_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
 			     pixman_op_t	op,
-			     pixman_image_t * pSrc,
-			     pixman_image_t * pMask,
-			     pixman_image_t * pDst,
+			     pixman_image_t * src_image,
+			     pixman_image_t * mask_image,
+			     pixman_image_t * dst_image,
 			     int32_t      xSrc,
 			     int32_t      ySrc,
 			     int32_t      xMask,
@@ -874,8 +874,8 @@ fast_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
     uint16_t	w;
     uint32_t	s, d;
 
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -906,9 +906,9 @@ fast_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
 static void
 fast_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
 			    pixman_op_t op,
-			    pixman_image_t * pSrc,
-			    pixman_image_t * pMask,
-			    pixman_image_t * pDst,
+			    pixman_image_t * src_image,
+			    pixman_image_t * mask_image,
+			    pixman_image_t * dst_image,
 			    int32_t      xSrc,
 			    int32_t      ySrc,
 			    int32_t      xMask,
@@ -925,9 +925,9 @@ fast_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
     uint32_t	src;
     uint8_t	sa;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
-    src = _pixman_image_get_solid (pSrc, pDst->bits.format);
+    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    src = _pixman_image_get_solid (src_image, dst_image->bits.format);
     sa = (src >> 24);
 
     while (height--)
@@ -963,9 +963,9 @@ fast_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
 static void
 fast_CompositeSolidFill (pixman_implementation_t *imp,
 		      pixman_op_t op,
-		      pixman_image_t * pSrc,
-		      pixman_image_t * pMask,
-		      pixman_image_t * pDst,
+		      pixman_image_t * src_image,
+		      pixman_image_t * mask_image,
+		      pixman_image_t * dst_image,
 		      int32_t      xSrc,
 		      int32_t      ySrc,
 		      int32_t      xMask,
@@ -977,16 +977,16 @@ fast_CompositeSolidFill (pixman_implementation_t *imp,
 {
     uint32_t	src;
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
-    if (pDst->bits.format == PIXMAN_a8)
+    if (dst_image->bits.format == PIXMAN_a8)
 	src = src >> 24;
-    else if (pDst->bits.format == PIXMAN_r5g6b5 ||
-	     pDst->bits.format == PIXMAN_b5g6r5)
+    else if (dst_image->bits.format == PIXMAN_r5g6b5 ||
+	     dst_image->bits.format == PIXMAN_b5g6r5)
 	src = cvt8888to0565 (src);
 
-    pixman_fill (pDst->bits.bits, pDst->bits.rowstride,
-		 PIXMAN_FORMAT_BPP (pDst->bits.format),
+    pixman_fill (dst_image->bits.bits, dst_image->bits.rowstride,
+		 PIXMAN_FORMAT_BPP (dst_image->bits.format),
 		 xDst, yDst,
 		 width, height,
 		 src);
@@ -995,9 +995,9 @@ fast_CompositeSolidFill (pixman_implementation_t *imp,
 static void
 fast_CompositeSrc_8888_x888 (pixman_implementation_t *imp,
 			  pixman_op_t op,
-			  pixman_image_t * pSrc,
-			  pixman_image_t * pMask,
-			  pixman_image_t * pDst,
+			  pixman_image_t * src_image,
+			  pixman_image_t * mask_image,
+			  pixman_image_t * dst_image,
 			  int32_t      xSrc,
 			  int32_t      ySrc,
 			  int32_t      xMask,
@@ -1012,8 +1012,8 @@ fast_CompositeSrc_8888_x888 (pixman_implementation_t *imp,
     int		 dstStride, srcStride;
     uint32_t	 n_bytes = width * sizeof (uint32_t);
 
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, src, 1);
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dst, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, src, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dst, 1);
 
     while (height--)
     {
@@ -1076,9 +1076,9 @@ static const pixman_fast_path_t c_fast_paths[] =
 static void
 fast_CompositeSrcScaleNearest (pixman_implementation_t *imp,
 			    pixman_op_t     op,
-			    pixman_image_t *pSrc,
-			    pixman_image_t *pMask,
-			    pixman_image_t *pDst,
+			    pixman_image_t *src_image,
+			    pixman_image_t *mask_image,
+			    pixman_image_t *dst_image,
 			    int32_t         xSrc,
 			    int32_t         ySrc,
 			    int32_t         xMask,
@@ -1094,17 +1094,17 @@ fast_CompositeSrcScaleNearest (pixman_implementation_t *imp,
     int             i, j;
     pixman_vector_t v;
     
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dst, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dst, 1);
     /* pass in 0 instead of xSrc and ySrc because xSrc and ySrc need to be
      * transformed from destination space to source space */
-    fbComposeGetStart (pSrc, 0, 0, uint32_t, srcStride, src, 1);
+    fbComposeGetStart (src_image, 0, 0, uint32_t, srcStride, src, 1);
     
     /* reference point is the center of the pixel */
     v.vector[0] = pixman_int_to_fixed(xSrc) + pixman_fixed_1 / 2;
     v.vector[1] = pixman_int_to_fixed(ySrc) + pixman_fixed_1 / 2;
     v.vector[2] = pixman_fixed_1;
     
-    if (!pixman_transform_point_3d (pSrc->common.transform, &v))
+    if (!pixman_transform_point_3d (src_image->common.transform, &v))
         return;
     
     /* Round down to closest integer, ensuring that 0.5 rounds to 0, not 1 */
@@ -1122,32 +1122,32 @@ fast_CompositeSrcScaleNearest (pixman_implementation_t *imp,
             y = vy >> 16;
 	    
             /* apply the repeat function */
-            switch (pSrc->common.repeat) {
+            switch (src_image->common.repeat) {
 	    case PIXMAN_REPEAT_NORMAL:
-		x = MOD (x, pSrc->bits.width);
-		y = MOD (y, pSrc->bits.height);
+		x = MOD (x, src_image->bits.width);
+		y = MOD (y, src_image->bits.height);
 		inside_bounds = TRUE;
 		break;
 		
 	    case PIXMAN_REPEAT_PAD:
-		x = CLIP (x, 0, pSrc->bits.width-1);
-		y = CLIP (y, 0, pSrc->bits.height-1);
+		x = CLIP (x, 0, src_image->bits.width-1);
+		y = CLIP (y, 0, src_image->bits.height-1);
 		inside_bounds = TRUE;
 		break;
 		
 	    case PIXMAN_REPEAT_REFLECT:
-		x = MOD (x, pSrc->bits.width * 2);
-		if (x >= pSrc->bits.width)
-		    x = pSrc->bits.width * 2 - x - 1;
-		y = MOD (y, pSrc->bits.height * 2);
-		if (y >= pSrc->bits.height)
-		    y = pSrc->bits.height * 2 - y - 1;
+		x = MOD (x, src_image->bits.width * 2);
+		if (x >= src_image->bits.width)
+		    x = src_image->bits.width * 2 - x - 1;
+		y = MOD (y, src_image->bits.height * 2);
+		if (y >= src_image->bits.height)
+		    y = src_image->bits.height * 2 - y - 1;
 		inside_bounds = TRUE;
 		break;
 		
 	    case PIXMAN_REPEAT_NONE:
 	    default:
-		inside_bounds = (x >= 0 && x < pSrc->bits.width && y >= 0 && y < pSrc->bits.height);
+		inside_bounds = (x >= 0 && x < src_image->bits.width && y >= 0 && y < src_image->bits.height);
 		break;
             }
 	    
@@ -1161,11 +1161,11 @@ fast_CompositeSrcScaleNearest (pixman_implementation_t *imp,
 	    
             /* adjust the x location by a unit vector in the x direction:
              * this is equivalent to transforming x+1 of the destination point to source space */
-            vx += pSrc->common.transform->matrix[0][0];
+            vx += src_image->common.transform->matrix[0][0];
         }
         /* adjust the y location by a unit vector in the y direction
          * this is equivalent to transforming y+1 of the destination point to source space */
-        v.vector[1] += pSrc->common.transform->matrix[1][1];
+        v.vector[1] += src_image->common.transform->matrix[1][1];
         dst += dstStride;
     }
 }
diff --git a/pixman/pixman-mmx.c b/pixman/pixman-mmx.c
index d500321..fd688e9 100644
--- a/pixman/pixman-mmx.c
+++ b/pixman/pixman-mmx.c
@@ -921,9 +921,9 @@ mmxCombineAddC (pixman_implementation_t *imp, pixman_op_t op,
 static void
 mmx_CompositeOver_n_8888 (pixman_implementation_t *imp,
 			    pixman_op_t op,
-			    pixman_image_t * pSrc,
-			    pixman_image_t * pMask,
-			    pixman_image_t * pDst,
+			    pixman_image_t * src_image,
+			    pixman_image_t * mask_image,
+			    pixman_image_t * dst_image,
 			    int32_t	xSrc,
 			    int32_t	ySrc,
 			    int32_t	xMask,
@@ -941,12 +941,12 @@ mmx_CompositeOver_n_8888 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     if (src >> 24 == 0)
 	return;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
 
     vsrc = load8888 (src);
     vsrca = expand_alpha (vsrc);
@@ -1000,9 +1000,9 @@ mmx_CompositeOver_n_8888 (pixman_implementation_t *imp,
 static void
 mmx_CompositeOver_n_0565 (pixman_implementation_t *imp,
 			    pixman_op_t op,
-			    pixman_image_t * pSrc,
-			    pixman_image_t * pMask,
-			    pixman_image_t * pDst,
+			    pixman_image_t * src_image,
+			    pixman_image_t * mask_image,
+			    pixman_image_t * dst_image,
 			    int32_t	xSrc,
 			    int32_t	ySrc,
 			    int32_t	xMask,
@@ -1020,12 +1020,12 @@ mmx_CompositeOver_n_0565 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     if (src >> 24 == 0)
 	return;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
 
     vsrc = load8888 (src);
     vsrca = expand_alpha (vsrc);
@@ -1086,9 +1086,9 @@ mmx_CompositeOver_n_0565 (pixman_implementation_t *imp,
 static void
 mmx_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
 				      pixman_op_t op,
-				      pixman_image_t * pSrc,
-				      pixman_image_t * pMask,
-				      pixman_image_t * pDst,
+				      pixman_image_t * src_image,
+				      pixman_image_t * mask_image,
+				      pixman_image_t * dst_image,
 				      int32_t	xSrc,
 				      int32_t	ySrc,
 				      int32_t	xMask,
@@ -1106,14 +1106,14 @@ mmx_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     srca = src >> 24;
     if (srca == 0)
 	return;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint32_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, xMask, yMask, uint32_t, maskStride, maskLine, 1);
 
     vsrc = load8888(src);
     vsrca = expand_alpha(vsrc);
@@ -1190,9 +1190,9 @@ mmx_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
 static void
 mmx_composite_over_8888_n_8888 (pixman_implementation_t *imp,
 			       pixman_op_t op,
-			       pixman_image_t * pSrc,
-			       pixman_image_t * pMask,
-			       pixman_image_t * pDst,
+			       pixman_image_t * src_image,
+			       pixman_image_t * mask_image,
+			       pixman_image_t * dst_image,
 			       int32_t	xSrc,
 			       int32_t	ySrc,
 			       int32_t      xMask,
@@ -1212,10 +1212,10 @@ mmx_composite_over_8888_n_8888 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
 
-    mask = _pixman_image_get_solid (pMask, pDst->bits.format);
+    mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
     mask = mask | mask >> 8 | mask >> 16 | mask >> 24;
     vmask = load8888 (mask);
     srca = MC(4x00ff);
@@ -1275,9 +1275,9 @@ mmx_composite_over_8888_n_8888 (pixman_implementation_t *imp,
 static void
 mmx_Composite_over_x888_n_8888 (pixman_implementation_t *imp,
 			       pixman_op_t op,
-			       pixman_image_t * pSrc,
-			       pixman_image_t * pMask,
-			       pixman_image_t * pDst,
+			       pixman_image_t * src_image,
+			       pixman_image_t * mask_image,
+			       pixman_image_t * dst_image,
 			       int32_t	xSrc,
 			       int32_t	ySrc,
 			       int32_t      xMask,
@@ -1297,9 +1297,9 @@ mmx_Composite_over_x888_n_8888 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
-    mask = _pixman_image_get_solid (pMask, pDst->bits.format);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
 
     mask = mask | mask >> 8 | mask >> 16 | mask >> 24;
     vmask = load8888 (mask);
@@ -1410,9 +1410,9 @@ mmx_Composite_over_x888_n_8888 (pixman_implementation_t *imp,
 static void
 mmx_composite_over_8888_8888 (pixman_implementation_t *imp,
 			     pixman_op_t op,
-			     pixman_image_t * pSrc,
-			     pixman_image_t * pMask,
-			     pixman_image_t * pDst,
+			     pixman_image_t * src_image,
+			     pixman_image_t * mask_image,
+			     pixman_image_t * dst_image,
 			     int32_t	xSrc,
 			     int32_t	ySrc,
 			     int32_t      xMask,
@@ -1431,8 +1431,8 @@ mmx_composite_over_8888_8888 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
 
     while (height--)
     {
@@ -1463,9 +1463,9 @@ mmx_composite_over_8888_8888 (pixman_implementation_t *imp,
 static void
 mmx_composite_over_8888_0565 (pixman_implementation_t *imp,
 			     pixman_op_t op,
-			     pixman_image_t * pSrc,
-			     pixman_image_t * pMask,
-			     pixman_image_t * pDst,
+			     pixman_image_t * src_image,
+			     pixman_image_t * mask_image,
+			     pixman_image_t * dst_image,
 			     int32_t      xSrc,
 			     int32_t      ySrc,
 			     int32_t      xMask,
@@ -1482,12 +1482,12 @@ mmx_composite_over_8888_0565 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
 
 #if 0
     /* FIXME */
-    assert (pSrc->pDrawable == pMask->pDrawable);
+    assert (src_image->pDrawable == mask_image->pDrawable);
 #endif
 
     while (height--)
@@ -1565,9 +1565,9 @@ mmx_composite_over_8888_0565 (pixman_implementation_t *imp,
 static void
 mmx_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
 				  pixman_op_t op,
-				  pixman_image_t * pSrc,
-				  pixman_image_t * pMask,
-				  pixman_image_t * pDst,
+				  pixman_image_t * src_image,
+				  pixman_image_t * mask_image,
+				  pixman_image_t * dst_image,
 				  int32_t      xSrc,
 				  int32_t      ySrc,
 				  int32_t      xMask,
@@ -1587,7 +1587,7 @@ mmx_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     srca = src >> 24;
     if (srca == 0)
@@ -1595,8 +1595,8 @@ mmx_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
 
     srcsrc = (uint64_t)src << 32 | src;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
 
     vsrc = load8888 (src);
     vsrca = expand_alpha (vsrc);
@@ -1837,9 +1837,9 @@ pixman_fill_mmx (uint32_t *bits,
 static void
 mmx_CompositeSrc_n_8_8888 (pixman_implementation_t *imp,
 				     pixman_op_t op,
-				     pixman_image_t * pSrc,
-				     pixman_image_t * pMask,
-				     pixman_image_t * pDst,
+				     pixman_image_t * src_image,
+				     pixman_image_t * mask_image,
+				     pixman_image_t * dst_image,
 				     int32_t      xSrc,
 				     int32_t      ySrc,
 				     int32_t      xMask,
@@ -1859,20 +1859,20 @@ mmx_CompositeSrc_n_8_8888 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     srca = src >> 24;
     if (srca == 0)
     {
-	pixman_fill_mmx (pDst->bits.bits, pDst->bits.rowstride, PIXMAN_FORMAT_BPP (pDst->bits.format),
+	pixman_fill_mmx (dst_image->bits.bits, dst_image->bits.rowstride, PIXMAN_FORMAT_BPP (dst_image->bits.format),
 			 xDst, yDst, width, height, 0);
 	return;
     }
 
     srcsrc = (uint64_t)src << 32 | src;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
 
     vsrc = load8888 (src);
     vsrca = expand_alpha (vsrc);
@@ -1969,9 +1969,9 @@ mmx_CompositeSrc_n_8_8888 (pixman_implementation_t *imp,
 static void
 mmx_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 				  pixman_op_t op,
-				  pixman_image_t * pSrc,
-				  pixman_image_t * pMask,
-				  pixman_image_t * pDst,
+				  pixman_image_t * src_image,
+				  pixman_image_t * mask_image,
+				  pixman_image_t * dst_image,
 				  int32_t      xSrc,
 				  int32_t      ySrc,
 				  int32_t      xMask,
@@ -1991,14 +1991,14 @@ mmx_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     srca = src >> 24;
     if (srca == 0)
 	return;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
 
     vsrc = load8888 (src);
     vsrca = expand_alpha (vsrc);
@@ -2102,9 +2102,9 @@ mmx_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 static void
 mmx_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 				  pixman_op_t op,
-				  pixman_image_t * pSrc,
-				  pixman_image_t * pMask,
-				  pixman_image_t * pDst,
+				  pixman_image_t * src_image,
+				  pixman_image_t * mask_image,
+				  pixman_image_t * dst_image,
 				  int32_t      xSrc,
 				  int32_t      ySrc,
 				  int32_t      xMask,
@@ -2121,12 +2121,12 @@ mmx_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
 
 #if 0
     /* FIXME */
-    assert (pSrc->pDrawable == pMask->pDrawable);
+    assert (src_image->pDrawable == mask_image->pDrawable);
 #endif
 
     while (height--)
@@ -2224,9 +2224,9 @@ mmx_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 static void
 mmx_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
 				  pixman_op_t op,
-				  pixman_image_t * pSrc,
-				  pixman_image_t * pMask,
-				  pixman_image_t * pDst,
+				  pixman_image_t * src_image,
+				  pixman_image_t * mask_image,
+				  pixman_image_t * dst_image,
 				  int32_t      xSrc,
 				  int32_t      ySrc,
 				  int32_t      xMask,
@@ -2243,12 +2243,12 @@ mmx_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
 
 #if 0
     /* FIXME */
-    assert (pSrc->pDrawable == pMask->pDrawable);
+    assert (src_image->pDrawable == mask_image->pDrawable);
 #endif
 
     while (height--)
@@ -2324,9 +2324,9 @@ mmx_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
 static void
 mmx_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 				      pixman_op_t op,
-				      pixman_image_t * pSrc,
-				      pixman_image_t * pMask,
-				      pixman_image_t * pDst,
+				      pixman_image_t * src_image,
+				      pixman_image_t * mask_image,
+				      pixman_image_t * dst_image,
 				      int32_t      xSrc,
 				      int32_t      ySrc,
 				      int32_t      xMask,
@@ -2344,14 +2344,14 @@ mmx_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     srca = src >> 24;
     if (srca == 0)
 	return;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint32_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, xMask, yMask, uint32_t, maskStride, maskLine, 1);
 
     vsrc = load8888 (src);
     vsrca = expand_alpha (vsrc);
@@ -2432,9 +2432,9 @@ mmx_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 static void
 mmx_CompositeIn_n_8_8 (pixman_implementation_t *imp,
 			pixman_op_t op,
-			pixman_image_t * pSrc,
-			pixman_image_t * pMask,
-			pixman_image_t * pDst,
+			pixman_image_t * src_image,
+			pixman_image_t * mask_image,
+			pixman_image_t * dst_image,
 			int32_t      xSrc,
 			int32_t      ySrc,
 			int32_t      xMask,
@@ -2452,10 +2452,10 @@ mmx_CompositeIn_n_8_8 (pixman_implementation_t *imp,
     uint8_t	sa;
     __m64	vsrc, vsrca;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     sa = src >> 24;
     if (sa == 0)
@@ -2472,8 +2472,8 @@ mmx_CompositeIn_n_8_8 (pixman_implementation_t *imp,
 	maskLine += maskStride;
 	w = width;
 
-	if ((((unsigned long)pDst & 3) == 0) &&
-	    (((unsigned long)pSrc & 3) == 0))
+	if ((((unsigned long)dst_image & 3) == 0) &&
+	    (((unsigned long)src_image & 3) == 0))
 	{
 	    while (w >= 4)
 	    {
@@ -2516,9 +2516,9 @@ mmx_CompositeIn_n_8_8 (pixman_implementation_t *imp,
 static void
 mmx_CompositeIn_8_8 (pixman_implementation_t *imp,
 		      pixman_op_t op,
-		      pixman_image_t * pSrc,
-		      pixman_image_t * pMask,
-		      pixman_image_t * pDst,
+		      pixman_image_t * src_image,
+		      pixman_image_t * mask_image,
+		      pixman_image_t * dst_image,
 		      int32_t      xSrc,
 		      int32_t      ySrc,
 		      int32_t      xMask,
@@ -2533,8 +2533,8 @@ mmx_CompositeIn_8_8 (pixman_implementation_t *imp,
     int	srcStride, dstStride;
     uint16_t	w;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
 
     while (height--)
     {
@@ -2544,8 +2544,8 @@ mmx_CompositeIn_8_8 (pixman_implementation_t *imp,
 	srcLine += srcStride;
 	w = width;
 
-	if ((((unsigned long)pDst & 3) == 0) &&
-	    (((unsigned long)pSrc & 3) == 0))
+	if ((((unsigned long)dst_image & 3) == 0) &&
+	    (((unsigned long)src_image & 3) == 0))
 	{
 	    while (w >= 4)
 	    {
@@ -2581,9 +2581,9 @@ mmx_CompositeIn_8_8 (pixman_implementation_t *imp,
 static void
 mmx_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
 			       pixman_op_t op,
-			       pixman_image_t * pSrc,
-			       pixman_image_t * pMask,
-			       pixman_image_t * pDst,
+			       pixman_image_t * src_image,
+			       pixman_image_t * mask_image,
+			       pixman_image_t * dst_image,
 			       int32_t      xSrc,
 			       int32_t      ySrc,
 			       int32_t      xMask,
@@ -2601,10 +2601,10 @@ mmx_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
     uint8_t	sa;
     __m64	vsrc, vsrca;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     sa = src >> 24;
     if (sa == 0)
@@ -2621,8 +2621,8 @@ mmx_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
 	maskLine += maskStride;
 	w = width;
 
-	if ((((unsigned long)pMask & 3) == 0) &&
-	    (((unsigned long)pDst  & 3) == 0))
+	if ((((unsigned long)mask_image & 3) == 0) &&
+	    (((unsigned long)dst_image  & 3) == 0))
 	{
 	    while (w >= 4)
 	    {
@@ -2660,9 +2660,9 @@ mmx_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
 static void
 mmx_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
 				pixman_op_t op,
-				pixman_image_t * pSrc,
-				pixman_image_t * pMask,
-				pixman_image_t * pDst,
+				pixman_image_t * src_image,
+				pixman_image_t * mask_image,
+				pixman_image_t * dst_image,
 				int32_t      xSrc,
 				int32_t      ySrc,
 				int32_t      xMask,
@@ -2681,8 +2681,8 @@ mmx_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
-    fbComposeGetStart (pDst, xDst, yDst, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -2733,9 +2733,9 @@ mmx_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
 static void
 mmx_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
 				pixman_op_t 	op,
-				pixman_image_t *	pSrc,
-				pixman_image_t *	pMask,
-				pixman_image_t *	 pDst,
+				pixman_image_t *	src_image,
+				pixman_image_t *	mask_image,
+				pixman_image_t *	 dst_image,
 				int32_t		 xSrc,
 				int32_t      ySrc,
 				int32_t      xMask,
@@ -2753,8 +2753,8 @@ mmx_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
 
     CHECKPOINT();
 
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -2932,9 +2932,9 @@ pixman_blt_mmx (uint32_t *src_bits,
 static void
 mmx_CompositeCopyArea (pixman_implementation_t *imp,
 			pixman_op_t       op,
-			pixman_image_t *	pSrc,
-			pixman_image_t *	pMask,
-			pixman_image_t *	pDst,
+			pixman_image_t *	src_image,
+			pixman_image_t *	mask_image,
+			pixman_image_t *	dst_image,
 			int32_t		xSrc,
 			int32_t		ySrc,
 			int32_t		xMask,
@@ -2944,21 +2944,21 @@ mmx_CompositeCopyArea (pixman_implementation_t *imp,
 			int32_t		width,
 			int32_t		height)
 {
-    pixman_blt_mmx (pSrc->bits.bits,
-		    pDst->bits.bits,
-		    pSrc->bits.rowstride,
-		    pDst->bits.rowstride,
-		    PIXMAN_FORMAT_BPP (pSrc->bits.format),
-		    PIXMAN_FORMAT_BPP (pDst->bits.format),
+    pixman_blt_mmx (src_image->bits.bits,
+		    dst_image->bits.bits,
+		    src_image->bits.rowstride,
+		    dst_image->bits.rowstride,
+		    PIXMAN_FORMAT_BPP (src_image->bits.format),
+		    PIXMAN_FORMAT_BPP (dst_image->bits.format),
 		    xSrc, ySrc, xDst, yDst, width, height);
 }
 
 static void
 mmx_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
 				pixman_op_t      op,
-				pixman_image_t * pSrc,
-				pixman_image_t * pMask,
-				pixman_image_t * pDst,
+				pixman_image_t * src_image,
+				pixman_image_t * mask_image,
+				pixman_image_t * dst_image,
 				int32_t      xSrc,
 				int32_t      ySrc,
 				int32_t      xMask,
@@ -2974,9 +2974,9 @@ mmx_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
     int		 srcStride, maskStride, dstStride;
     uint16_t w;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
 
     while (height--)
     {
diff --git a/pixman/pixman-private.h b/pixman/pixman-private.h
index 54d6563..f051552 100644
--- a/pixman/pixman-private.h
+++ b/pixman/pixman-private.h
@@ -562,9 +562,9 @@ _pixman_run_fast_path (const pixman_fast_path_t *paths,
 void
 _pixman_walk_composite_region (pixman_implementation_t *imp,
 			       pixman_op_t op,
-			       pixman_image_t * pSrc,
-			       pixman_image_t * pMask,
-			       pixman_image_t * pDst,
+			       pixman_image_t * src_image,
+			       pixman_image_t * mask_image,
+			       pixman_image_t * dst_image,
 			       int16_t xSrc,
 			       int16_t ySrc,
 			       int16_t xMask,
diff --git a/pixman/pixman-region.c b/pixman/pixman-region.c
index 0e1791c..30db82a 100644
--- a/pixman/pixman-region.c
+++ b/pixman/pixman-region.c
@@ -179,24 +179,24 @@ if (!(pReg)->data || (((pReg)->data->numRects + (n)) > (pReg)->data->size)) \
 if (!(pReg)->data || (((pReg)->data->numRects + (n)) > (pReg)->data->size)) \
     if (!pixman_rect_alloc(pReg, n)) { return FALSE; }
 
-#define ADDRECT(pNextRect,nx1,ny1,nx2,ny2)	\
+#define ADDRECT(next_rect,nx1,ny1,nx2,ny2)	\
 {						\
-    pNextRect->x1 = nx1;			\
-    pNextRect->y1 = ny1;			\
-    pNextRect->x2 = nx2;			\
-    pNextRect->y2 = ny2;			\
-    pNextRect++;				\
+    next_rect->x1 = nx1;			\
+    next_rect->y1 = ny1;			\
+    next_rect->x2 = nx2;			\
+    next_rect->y2 = ny2;			\
+    next_rect++;				\
 }
 
-#define NEWRECT(pReg,pNextRect,nx1,ny1,nx2,ny2)			\
+#define NEWRECT(pReg,next_rect,nx1,ny1,nx2,ny2)			\
 {									\
     if (!(pReg)->data || ((pReg)->data->numRects == (pReg)->data->size))\
     {									\
 	if (!pixman_rect_alloc(pReg, 1))					\
 	    return FALSE;						\
-	pNextRect = PIXREGION_TOP(pReg);					\
+	next_rect = PIXREGION_TOP(pReg);					\
     }									\
-    ADDRECT(pNextRect,nx1,ny1,nx2,ny2);					\
+    ADDRECT(next_rect,nx1,ny1,nx2,ny2);					\
     pReg->data->numRects++;						\
     assert(pReg->data->numRects<=pReg->data->size);			\
 }
@@ -507,7 +507,7 @@ pixman_region_appendNonO (
     int  	y1,
     int  	y2)
 {
-    box_type_t *	pNextRect;
+    box_type_t *	next_rect;
     int	newRects;
 
     newRects = rEnd - r;
@@ -517,11 +517,11 @@ pixman_region_appendNonO (
 
     /* Make sure we have enough space for all rectangles to be added */
     RECTALLOC(region, newRects);
-    pNextRect = PIXREGION_TOP(region);
+    next_rect = PIXREGION_TOP(region);
     region->data->numRects += newRects;
     do {
 	assert(r->x1 < r->x2);
-	ADDRECT(pNextRect, r->x1, y1, r->x2, y2);
+	ADDRECT(next_rect, r->x1, y1, r->x2, y2);
 	r++;
     } while (r != rEnd);
 
@@ -913,9 +913,9 @@ pixman_region_intersectO (region_type_t *region,
 {
     int  	x1;
     int  	x2;
-    box_type_t *	pNextRect;
+    box_type_t *	next_rect;
 
-    pNextRect = PIXREGION_TOP(region);
+    next_rect = PIXREGION_TOP(region);
 
     assert(y1 < y2);
     assert(r1 != r1End && r2 != r2End);
@@ -929,7 +929,7 @@ pixman_region_intersectO (region_type_t *region,
 	 * overlap to the new region.
 	 */
 	if (x1 < x2)
-	    NEWRECT(region, pNextRect, x1, y1, x2, y2);
+	    NEWRECT(region, next_rect, x1, y1, x2, y2);
 
 	/*
 	 * Advance the pointer(s) with the leftmost right side, since the next
@@ -1015,7 +1015,7 @@ PREFIX(_intersect) (region_type_t * 	newReg,
 	if (x2 < r->x2) x2 = r->x2;				\
     } else {							\
 	/* Add current rectangle, start new one */		\
-	NEWRECT(region, pNextRect, x1, y1, x2, y2);		\
+	NEWRECT(region, next_rect, x1, y1, x2, y2);		\
 	x1 = r->x1;						\
 	x2 = r->x2;						\
     }								\
@@ -1052,14 +1052,14 @@ pixman_region_unionO (
     int	  y2,
     int		  *pOverlap)
 {
-    box_type_t *     pNextRect;
+    box_type_t *     next_rect;
     int        x1;     /* left and right side of current union */
     int        x2;
 
     assert (y1 < y2);
     assert(r1 != r1End && r2 != r2End);
 
-    pNextRect = PIXREGION_TOP(region);
+    next_rect = PIXREGION_TOP(region);
 
     /* Start off current rectangle */
     if (r1->x1 < r2->x1)
@@ -1096,7 +1096,7 @@ pixman_region_unionO (
     }
 
     /* Add current rectangle */
-    NEWRECT(region, pNextRect, x1, y1, x2, y2);
+    NEWRECT(region, next_rect, x1, y1, x2, y2);
 
     return TRUE;
 }
@@ -1545,7 +1545,7 @@ pixman_region_subtractO (
     int  	y2,
     int		*pOverlap)
 {
-    box_type_t *	pNextRect;
+    box_type_t *	next_rect;
     int  	x1;
 
     x1 = r1->x1;
@@ -1553,7 +1553,7 @@ pixman_region_subtractO (
     assert(y1<y2);
     assert(r1 != r1End && r2 != r2End);
 
-    pNextRect = PIXREGION_TOP(region);
+    next_rect = PIXREGION_TOP(region);
 
     do
     {
@@ -1596,7 +1596,7 @@ pixman_region_subtractO (
 	     * part of minuend to region and skip to next subtrahend.
 	     */
 	    assert(x1<r2->x1);
-	    NEWRECT(region, pNextRect, x1, y1, r2->x1, y2);
+	    NEWRECT(region, next_rect, x1, y1, r2->x1, y2);
 
 	    x1 = r2->x2;
 	    if (x1 >= r1->x2)
@@ -1622,7 +1622,7 @@ pixman_region_subtractO (
 	     * Minuend used up: add any remaining piece before advancing.
 	     */
 	    if (r1->x2 > x1)
-		NEWRECT(region, pNextRect, x1, y1, r1->x2, y2);
+		NEWRECT(region, next_rect, x1, y1, r1->x2, y2);
 	    r1++;
 	    if (r1 != r1End)
 		x1 = r1->x1;
@@ -1635,7 +1635,7 @@ pixman_region_subtractO (
     while (r1 != r1End)
     {
 	assert(x1<r1->x2);
-	NEWRECT(region, pNextRect, x1, y1, r1->x2, y2);
+	NEWRECT(region, next_rect, x1, y1, r1->x2, y2);
 	r1++;
 	if (r1 != r1End)
 	    x1 = r1->x1;
diff --git a/pixman/pixman-sse2.c b/pixman/pixman-sse2.c
index b160763..38e3011 100644
--- a/pixman/pixman-sse2.c
+++ b/pixman/pixman-sse2.c
@@ -2499,9 +2499,9 @@ sse2CombineAddC (pixman_implementation_t *imp, pixman_op_t op,
 static void
 sse2_CompositeOver_n_8888 (pixman_implementation_t *imp,
 			     pixman_op_t op,
-			    pixman_image_t * pSrc,
-			    pixman_image_t * pMask,
-			    pixman_image_t * pDst,
+			    pixman_image_t * src_image,
+			    pixman_image_t * mask_image,
+			    pixman_image_t * dst_image,
 			    int32_t	xSrc,
 			    int32_t	ySrc,
 			    int32_t	xMask,
@@ -2518,12 +2518,12 @@ sse2_CompositeOver_n_8888 (pixman_implementation_t *imp,
     __m128i xmmSrc, xmmAlpha;
     __m128i xmmDst, xmmDstLo, xmmDstHi;
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     if (src == 0)
 	return;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
 
     xmmSrc = expandPixel_32_1x128 (src);
     xmmAlpha = expandAlpha_1x128 (xmmSrc);
@@ -2586,9 +2586,9 @@ sse2_CompositeOver_n_8888 (pixman_implementation_t *imp,
 static void
 sse2_CompositeOver_n_0565 (pixman_implementation_t *imp,
 			     pixman_op_t op,
-			    pixman_image_t * pSrc,
-			    pixman_image_t * pMask,
-			    pixman_image_t * pDst,
+			    pixman_image_t * src_image,
+			    pixman_image_t * mask_image,
+			    pixman_image_t * dst_image,
 			    int32_t	xSrc,
 			    int32_t	ySrc,
 			    int32_t	xMask,
@@ -2605,12 +2605,12 @@ sse2_CompositeOver_n_0565 (pixman_implementation_t *imp,
     __m128i xmmSrc, xmmAlpha;
     __m128i xmmDst, xmmDst0, xmmDst1, xmmDst2, xmmDst3;
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     if (src == 0)
         return;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
 
     xmmSrc = expandPixel_32_1x128 (src);
     xmmAlpha = expandAlpha_1x128 (xmmSrc);
@@ -2676,9 +2676,9 @@ sse2_CompositeOver_n_0565 (pixman_implementation_t *imp,
 static void
 sse2_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
 				       pixman_op_t op,
-				      pixman_image_t * pSrc,
-				      pixman_image_t * pMask,
-				      pixman_image_t * pDst,
+				      pixman_image_t * src_image,
+				      pixman_image_t * mask_image,
+				      pixman_image_t * dst_image,
 				      int32_t	xSrc,
 				      int32_t	ySrc,
 				      int32_t	xMask,
@@ -2700,13 +2700,13 @@ sse2_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
 
     __m64 mmxSrc, mmxAlpha, mmxMask, mmxDst;
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     if (src == 0)
 	return;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint32_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, xMask, yMask, uint32_t, maskStride, maskLine, 1);
 
     xmmSrc = _mm_unpacklo_epi8 (createMask_2x32_128 (src, src), _mm_setzero_si128 ());
     xmmAlpha = expandAlpha_1x128 (xmmSrc);
@@ -2810,9 +2810,9 @@ sse2_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
 static void
 sse2_composite_over_8888_n_8888 (pixman_implementation_t *imp,
 				pixman_op_t op,
-			       pixman_image_t * pSrc,
-			       pixman_image_t * pMask,
-			       pixman_image_t * pDst,
+			       pixman_image_t * src_image,
+			       pixman_image_t * mask_image,
+			       pixman_image_t * dst_image,
 			       int32_t	xSrc,
 			       int32_t	ySrc,
 			       int32_t      xMask,
@@ -2833,9 +2833,9 @@ sse2_composite_over_8888_n_8888 (pixman_implementation_t *imp,
     __m128i xmmDst, xmmDstLo, xmmDstHi;
     __m128i xmmAlphaLo, xmmAlphaHi;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
-    mask = _pixman_image_get_solid (pMask, pDst->bits.format);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
 
     xmmMask = createMask_16_128 (mask >> 24);
 
@@ -2923,9 +2923,9 @@ sse2_composite_over_8888_n_8888 (pixman_implementation_t *imp,
 static void
 sse2_Composite_over_x888_n_8888 (pixman_implementation_t *imp,
 				pixman_op_t op,
-			       pixman_image_t * pSrc,
-			       pixman_image_t * pMask,
-			       pixman_image_t * pDst,
+			       pixman_image_t * src_image,
+			       pixman_image_t * mask_image,
+			       pixman_image_t * dst_image,
 			       int32_t	xSrc,
 			       int32_t	ySrc,
 			       int32_t      xMask,
@@ -2945,9 +2945,9 @@ sse2_Composite_over_x888_n_8888 (pixman_implementation_t *imp,
     __m128i xmmSrc, xmmSrcLo, xmmSrcHi;
     __m128i xmmDst, xmmDstLo, xmmDstHi;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
-    mask = _pixman_image_get_solid (pMask, pDst->bits.format);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
 
     xmmMask = createMask_16_128 (mask >> 24);
     xmmAlpha = Mask00ff;
@@ -3036,9 +3036,9 @@ sse2_Composite_over_x888_n_8888 (pixman_implementation_t *imp,
 static void
 sse2_composite_over_8888_8888 (pixman_implementation_t *imp,
 			      pixman_op_t op,
-			     pixman_image_t * pSrc,
-			     pixman_image_t * pMask,
-			     pixman_image_t * pDst,
+			     pixman_image_t * src_image,
+			     pixman_image_t * mask_image,
+			     pixman_image_t * dst_image,
 			     int32_t	xSrc,
 			     int32_t	ySrc,
 			     int32_t      xMask,
@@ -3052,8 +3052,8 @@ sse2_composite_over_8888_8888 (pixman_implementation_t *imp,
     uint32_t	*dstLine, *dst;
     uint32_t	*srcLine, *src;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
 
     dst = dstLine;
     src = srcLine;
@@ -3085,9 +3085,9 @@ fast_composite_over_8888_0565pixel (uint32_t src, uint16_t dst)
 static void
 sse2_composite_over_8888_0565 (pixman_implementation_t *imp,
 			      pixman_op_t op,
-			     pixman_image_t * pSrc,
-			     pixman_image_t * pMask,
-			     pixman_image_t * pDst,
+			     pixman_image_t * src_image,
+			     pixman_image_t * mask_image,
+			     pixman_image_t * dst_image,
 			     int32_t      xSrc,
 			     int32_t      ySrc,
 			     int32_t      xMask,
@@ -3106,8 +3106,8 @@ sse2_composite_over_8888_0565 (pixman_implementation_t *imp,
     __m128i xmmSrc, xmmSrcLo, xmmSrcHi;
     __m128i xmmDst, xmmDst0, xmmDst1, xmmDst2, xmmDst3;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
 
 #if 0
     /* FIXME
@@ -3115,7 +3115,7 @@ sse2_composite_over_8888_0565 (pixman_implementation_t *imp,
      * I copy the code from MMX one and keep the fixme.
      * If it's a problem there, probably is a problem here.
      */
-    assert (pSrc->pDrawable == pMask->pDrawable);
+    assert (src_image->pDrawable == mask_image->pDrawable);
 #endif
 
     while (height--)
@@ -3199,9 +3199,9 @@ sse2_composite_over_8888_0565 (pixman_implementation_t *imp,
 static void
 sse2_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
 				   pixman_op_t op,
-				  pixman_image_t * pSrc,
-				  pixman_image_t * pMask,
-				  pixman_image_t * pDst,
+				  pixman_image_t * src_image,
+				  pixman_image_t * mask_image,
+				  pixman_image_t * dst_image,
 				  int32_t      xSrc,
 				  int32_t      ySrc,
 				  int32_t      xMask,
@@ -3224,14 +3224,14 @@ sse2_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
 
     __m64 mmxSrc, mmxAlpha, mmxMask, mmxDest;
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     srca = src >> 24;
     if (src == 0)
 	return;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
 
     xmmDef = createMask_2x32_128 (src, src);
     xmmSrc = expandPixel_32_1x128 (src);
@@ -3477,9 +3477,9 @@ pixmanFillsse2 (uint32_t *bits,
 static void
 sse2_CompositeSrc_n_8_8888 (pixman_implementation_t *imp,
 				      pixman_op_t op,
-				     pixman_image_t * pSrc,
-				     pixman_image_t * pMask,
-				     pixman_image_t * pDst,
+				     pixman_image_t * src_image,
+				     pixman_image_t * mask_image,
+				     pixman_image_t * dst_image,
 				     int32_t      xSrc,
 				     int32_t      ySrc,
 				     int32_t      xMask,
@@ -3499,19 +3499,19 @@ sse2_CompositeSrc_n_8_8888 (pixman_implementation_t *imp,
     __m128i xmmSrc, xmmDef;
     __m128i xmmMask, xmmMaskLo, xmmMaskHi;
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     srca = src >> 24;
     if (src == 0)
     {
-        pixmanFillsse2 (pDst->bits.bits, pDst->bits.rowstride,
-                        PIXMAN_FORMAT_BPP (pDst->bits.format),
+        pixmanFillsse2 (dst_image->bits.bits, dst_image->bits.rowstride,
+                        PIXMAN_FORMAT_BPP (dst_image->bits.format),
                         xDst, yDst, width, height, 0);
         return;
     }
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
 
     xmmDef = createMask_2x32_128 (src, src);
     xmmSrc = expandPixel_32_1x128 (src);
@@ -3613,9 +3613,9 @@ sse2_CompositeSrc_n_8_8888 (pixman_implementation_t *imp,
 static void
 sse2_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 				   pixman_op_t op,
-				  pixman_image_t * pSrc,
-				  pixman_image_t * pMask,
-				  pixman_image_t * pDst,
+				  pixman_image_t * src_image,
+				  pixman_image_t * mask_image,
+				  pixman_image_t * dst_image,
 				  int32_t      xSrc,
 				  int32_t      ySrc,
 				  int32_t      xMask,
@@ -3637,14 +3637,14 @@ sse2_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
     __m128i xmmMask, xmmMaskLo, xmmMaskHi;
     __m128i xmmDst, xmmDst0, xmmDst1, xmmDst2, xmmDst3;
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     srca = src >> 24;
     if (src == 0)
 	return;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
 
     xmmSrc = expandPixel_32_1x128 (src);
     xmmAlpha = expandAlpha_1x128 (xmmSrc);
@@ -3763,9 +3763,9 @@ sse2_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 static void
 sse2_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 				   pixman_op_t op,
-				  pixman_image_t * pSrc,
-				  pixman_image_t * pMask,
-				  pixman_image_t * pDst,
+				  pixman_image_t * src_image,
+				  pixman_image_t * mask_image,
+				  pixman_image_t * dst_image,
 				  int32_t      xSrc,
 				  int32_t      ySrc,
 				  int32_t      xMask,
@@ -3785,8 +3785,8 @@ sse2_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
     __m128i xmmSrc, xmmSrcLo, xmmSrcHi;
     __m128i xmmDst, xmmDst0, xmmDst1, xmmDst2, xmmDst3;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
 
 #if 0
     /* FIXME
@@ -3794,7 +3794,7 @@ sse2_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
      * I copy the code from MMX one and keep the fixme.
      * If it's a problem there, probably is a problem here.
      */
-    assert (pSrc->pDrawable == pMask->pDrawable);
+    assert (src_image->pDrawable == mask_image->pDrawable);
 #endif
 
     while (height--)
@@ -3898,9 +3898,9 @@ sse2_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 static void
 sse2_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
 				   pixman_op_t op,
-				  pixman_image_t * pSrc,
-				  pixman_image_t * pMask,
-				  pixman_image_t * pDst,
+				  pixman_image_t * src_image,
+				  pixman_image_t * mask_image,
+				  pixman_image_t * dst_image,
 				  int32_t      xSrc,
 				  int32_t      ySrc,
 				  int32_t      xMask,
@@ -3919,8 +3919,8 @@ sse2_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
     __m128i xmmSrcLo, xmmSrcHi;
     __m128i xmmDstLo, xmmDstHi;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
 
 #if 0
     /* FIXME
@@ -3928,7 +3928,7 @@ sse2_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
      * I copy the code from MMX one and keep the fixme.
      * If it's a problem there, probably is a problem here.
      */
-    assert (pSrc->pDrawable == pMask->pDrawable);
+    assert (src_image->pDrawable == mask_image->pDrawable);
 #endif
 
     while (height--)
@@ -4013,9 +4013,9 @@ sse2_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
 static void
 sse2_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 				       pixman_op_t op,
-				      pixman_image_t * pSrc,
-				      pixman_image_t * pMask,
-				      pixman_image_t * pDst,
+				      pixman_image_t * src_image,
+				      pixman_image_t * mask_image,
+				      pixman_image_t * dst_image,
 				      int32_t      xSrc,
 				      int32_t      ySrc,
 				      int32_t      xMask,
@@ -4038,13 +4038,13 @@ sse2_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 
     __m64 mmxSrc, mmxAlpha, mmxMask, mmxDest;
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     if (src == 0)
         return;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint32_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, xMask, yMask, uint32_t, maskStride, maskLine, 1);
 
     xmmSrc = expandPixel_32_1x128 (src);
     xmmAlpha = expandAlpha_1x128 (xmmSrc);
@@ -4161,9 +4161,9 @@ sse2_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 static void
 sse2_CompositeIn_n_8_8 (pixman_implementation_t *imp,
 			 pixman_op_t op,
-			pixman_image_t * pSrc,
-			pixman_image_t * pMask,
-			pixman_image_t * pDst,
+			pixman_image_t * src_image,
+			pixman_image_t * mask_image,
+			pixman_image_t * dst_image,
 			int32_t      xSrc,
 			int32_t      ySrc,
 			int32_t      xMask,
@@ -4184,10 +4184,10 @@ sse2_CompositeIn_n_8_8 (pixman_implementation_t *imp,
     __m128i xmmMask, xmmMaskLo, xmmMaskHi;
     __m128i xmmDst, xmmDstLo, xmmDstHi;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     sa = src >> 24;
     if (sa == 0)
@@ -4264,9 +4264,9 @@ sse2_CompositeIn_n_8_8 (pixman_implementation_t *imp,
 static void
 sse2_CompositeIn_8_8 (pixman_implementation_t *imp,
 		       pixman_op_t op,
-		      pixman_image_t * pSrc,
-		      pixman_image_t * pMask,
-		      pixman_image_t * pDst,
+		      pixman_image_t * src_image,
+		      pixman_image_t * mask_image,
+		      pixman_image_t * dst_image,
 		      int32_t      xSrc,
 		      int32_t      ySrc,
 		      int32_t      xMask,
@@ -4285,8 +4285,8 @@ sse2_CompositeIn_8_8 (pixman_implementation_t *imp,
     __m128i xmmSrc, xmmSrcLo, xmmSrcHi;
     __m128i xmmDst, xmmDstLo, xmmDstHi;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
 
     while (height--)
     {
@@ -4354,9 +4354,9 @@ sse2_CompositeIn_8_8 (pixman_implementation_t *imp,
 static void
 sse2_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
 				pixman_op_t op,
-			       pixman_image_t * pSrc,
-			       pixman_image_t * pMask,
-			       pixman_image_t * pDst,
+			       pixman_image_t * src_image,
+			       pixman_image_t * mask_image,
+			       pixman_image_t * dst_image,
 			       int32_t      xSrc,
 			       int32_t      ySrc,
 			       int32_t      xMask,
@@ -4378,10 +4378,10 @@ sse2_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
     __m128i xmmMask, xmmMaskLo, xmmMaskHi;
     __m128i xmmDst, xmmDstLo, xmmDstHi;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
 
-    src = _pixman_image_get_solid(pSrc, pDst->bits.format);
+    src = _pixman_image_get_solid(src_image, dst_image->bits.format);
 
     sa = src >> 24;
     if (sa == 0)
@@ -4460,9 +4460,9 @@ sse2_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
 static void
 sse2_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
 				 pixman_op_t op,
-				pixman_image_t * pSrc,
-				pixman_image_t * pMask,
-				pixman_image_t * pDst,
+				pixman_image_t * src_image,
+				pixman_image_t * mask_image,
+				pixman_image_t * dst_image,
 				int32_t      xSrc,
 				int32_t      ySrc,
 				int32_t      xMask,
@@ -4478,8 +4478,8 @@ sse2_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
     uint16_t	w;
     uint16_t	t;
 
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
-    fbComposeGetStart (pDst, xDst, yDst, uint8_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint8_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -4527,9 +4527,9 @@ sse2_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
 static void
 sse2_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
 				 pixman_op_t 	op,
-				pixman_image_t *	pSrc,
-				pixman_image_t *	pMask,
-				pixman_image_t *	 pDst,
+				pixman_image_t *	src_image,
+				pixman_image_t *	mask_image,
+				pixman_image_t *	 dst_image,
 				int32_t		 xSrc,
 				int32_t      ySrc,
 				int32_t      xMask,
@@ -4543,8 +4543,8 @@ sse2_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
     uint32_t	*srcLine, *src;
     int	dstStride, srcStride;
 
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -4705,9 +4705,9 @@ pixmanBltsse2 (uint32_t *src_bits,
 static void
 sse2_CompositeCopyArea (pixman_implementation_t *imp,
 			 pixman_op_t       op,
-			pixman_image_t *	pSrc,
-			pixman_image_t *	pMask,
-			pixman_image_t *	pDst,
+			pixman_image_t *	src_image,
+			pixman_image_t *	mask_image,
+			pixman_image_t *	dst_image,
 			int32_t		xSrc,
 			int32_t		ySrc,
 			int32_t		xMask,
@@ -4717,12 +4717,12 @@ sse2_CompositeCopyArea (pixman_implementation_t *imp,
 			int32_t		width,
 			int32_t		height)
 {
-    pixmanBltsse2 (pSrc->bits.bits,
-		    pDst->bits.bits,
-		    pSrc->bits.rowstride,
-		    pDst->bits.rowstride,
-		    PIXMAN_FORMAT_BPP (pSrc->bits.format),
-		    PIXMAN_FORMAT_BPP (pDst->bits.format),
+    pixmanBltsse2 (src_image->bits.bits,
+		    dst_image->bits.bits,
+		    src_image->bits.rowstride,
+		    dst_image->bits.rowstride,
+		    PIXMAN_FORMAT_BPP (src_image->bits.format),
+		    PIXMAN_FORMAT_BPP (dst_image->bits.format),
 		    xSrc, ySrc, xDst, yDst, width, height);
 }
 
@@ -4731,9 +4731,9 @@ sse2_CompositeCopyArea (pixman_implementation_t *imp,
 void
 sse2_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
 				 pixman_op_t      op,
-				pixman_image_t * pSrc,
-				pixman_image_t * pMask,
-				pixman_image_t * pDst,
+				pixman_image_t * src_image,
+				pixman_image_t * mask_image,
+				pixman_image_t * dst_image,
 				int32_t      xSrc,
 				int32_t      ySrc,
 				int32_t      xMask,
@@ -4754,9 +4754,9 @@ sse2_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
     __m128i xmmDst, xmmDstLo, xmmDstHi;
     __m128i xmmMask, xmmMaskLo, xmmMaskHi;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (mask_image, xMask, yMask, uint8_t, maskStride, maskLine, 1);
+    fbComposeGetStart (src_image, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
 
     while (height--)
     {
diff --git a/pixman/pixman-utils.c b/pixman/pixman-utils.c
index b7f3af9..43fd074 100644
--- a/pixman/pixman-utils.c
+++ b/pixman/pixman-utils.c
@@ -36,15 +36,15 @@
 #define BOUND(v)	(int16_t) ((v) < INT16_MIN ? INT16_MIN : (v) > INT16_MAX ? INT16_MAX : (v))
 
 static inline pixman_bool_t
-miClipPictureReg (pixman_region32_t *	pRegion,
+miClipPictureReg (pixman_region32_t *	region,
 		  pixman_region32_t *	pClip,
 		  int		dx,
 		  int		dy)
 {
-    if (pixman_region32_n_rects(pRegion) == 1 &&
+    if (pixman_region32_n_rects(region) == 1 &&
 	pixman_region32_n_rects(pClip) == 1)
     {
-	pixman_box32_t *  pRbox = pixman_region32_rectangles(pRegion, NULL);
+	pixman_box32_t *  pRbox = pixman_region32_rectangles(region, NULL);
 	pixman_box32_t *  pCbox = pixman_region32_rectangles(pClip, NULL);
 	int	v;
 	
@@ -59,7 +59,7 @@ miClipPictureReg (pixman_region32_t *	pRegion,
 	if (pRbox->x1 >= pRbox->x2 ||
 	    pRbox->y1 >= pRbox->y2)
 	{
-	    pixman_region32_init (pRegion);
+	    pixman_region32_init (region);
 	}
     }
     else if (!pixman_region32_not_empty (pClip))
@@ -69,18 +69,18 @@ miClipPictureReg (pixman_region32_t *	pRegion,
     else
     {
 	if (dx || dy)
-	    pixman_region32_translate (pRegion, -dx, -dy);
-	if (!pixman_region32_intersect (pRegion, pRegion, pClip))
+	    pixman_region32_translate (region, -dx, -dy);
+	if (!pixman_region32_intersect (region, region, pClip))
 	    return FALSE;
 	if (dx || dy)
-	    pixman_region32_translate(pRegion, dx, dy);
+	    pixman_region32_translate(region, dx, dy);
     }
-    return pixman_region32_not_empty(pRegion);
+    return pixman_region32_not_empty(region);
 }
 
 
 static inline pixman_bool_t
-miClipPictureSrc (pixman_region32_t *	pRegion,
+miClipPictureSrc (pixman_region32_t *	region,
 		  pixman_image_t *	pPicture,
 		  int		dx,
 		  int		dy)
@@ -93,7 +93,7 @@ miClipPictureSrc (pixman_region32_t *	pRegion,
     if (!pPicture->common.clip_sources || !pPicture->common.client_clip)
 	return TRUE;
 
-    return miClipPictureReg (pRegion,
+    return miClipPictureReg (region,
 			     &pPicture->common.clip_region,
 			     dx, dy);
 }
@@ -103,10 +103,10 @@ miClipPictureSrc (pixman_region32_t *	pRegion,
  * an allocation failure, but rendering ignores those anyways.
  */
 static pixman_bool_t
-pixman_compute_composite_region32 (pixman_region32_t *	pRegion,
-				   pixman_image_t *	pSrc,
-				   pixman_image_t *	pMask,
-				   pixman_image_t *	pDst,
+pixman_compute_composite_region32 (pixman_region32_t *	region,
+				   pixman_image_t *	src_image,
+				   pixman_image_t *	mask_image,
+				   pixman_image_t *	dst_image,
 				   int16_t		xSrc,
 				   int16_t		ySrc,
 				   int16_t		xMask,
@@ -118,91 +118,91 @@ pixman_compute_composite_region32 (pixman_region32_t *	pRegion,
 {
     int		v;
     
-    pRegion->extents.x1 = xDst;
+    region->extents.x1 = xDst;
     v = xDst + width;
-    pRegion->extents.x2 = BOUND(v);
-    pRegion->extents.y1 = yDst;
+    region->extents.x2 = BOUND(v);
+    region->extents.y1 = yDst;
     v = yDst + height;
-    pRegion->extents.y2 = BOUND(v);
+    region->extents.y2 = BOUND(v);
 
-    pRegion->extents.x1 = MAX (pRegion->extents.x1, 0);
-    pRegion->extents.y1 = MAX (pRegion->extents.y1, 0);
+    region->extents.x1 = MAX (region->extents.x1, 0);
+    region->extents.y1 = MAX (region->extents.y1, 0);
     
     /* Some X servers rely on an old bug, where pixman would just believe the
      * set clip_region and not clip against the destination geometry. So, 
      * since only X servers set "source clip", we don't clip against
      * destination geometry when that is set.
      */
-    if (!pDst->common.clip_sources)
+    if (!dst_image->common.clip_sources)
     {
-	pRegion->extents.x2 = MIN (pRegion->extents.x2, pDst->bits.width);
-	pRegion->extents.y2 = MIN (pRegion->extents.y2, pDst->bits.height);
+	region->extents.x2 = MIN (region->extents.x2, dst_image->bits.width);
+	region->extents.y2 = MIN (region->extents.y2, dst_image->bits.height);
     }
     
-    pRegion->data = 0;
+    region->data = 0;
     
     /* Check for empty operation */
-    if (pRegion->extents.x1 >= pRegion->extents.x2 ||
-	pRegion->extents.y1 >= pRegion->extents.y2)
+    if (region->extents.x1 >= region->extents.x2 ||
+	region->extents.y1 >= region->extents.y2)
     {
-	pixman_region32_init (pRegion);
+	pixman_region32_init (region);
 	return FALSE;
     }
     
-    if (pDst->common.have_clip_region)
+    if (dst_image->common.have_clip_region)
     {
-	if (!miClipPictureReg (pRegion, &pDst->common.clip_region, 0, 0))
+	if (!miClipPictureReg (region, &dst_image->common.clip_region, 0, 0))
 	{
-	    pixman_region32_fini (pRegion);
+	    pixman_region32_fini (region);
 	    return FALSE;
 	}
     }
     
-    if (pDst->common.alpha_map && pDst->common.alpha_map->common.have_clip_region)
+    if (dst_image->common.alpha_map && dst_image->common.alpha_map->common.have_clip_region)
     {
-	if (!miClipPictureReg (pRegion, &pDst->common.alpha_map->common.clip_region,
-			       -pDst->common.alpha_origin_x,
-			       -pDst->common.alpha_origin_y))
+	if (!miClipPictureReg (region, &dst_image->common.alpha_map->common.clip_region,
+			       -dst_image->common.alpha_origin_x,
+			       -dst_image->common.alpha_origin_y))
 	{
-	    pixman_region32_fini (pRegion);
+	    pixman_region32_fini (region);
 	    return FALSE;
 	}
     }
     
     /* clip against src */
-    if (pSrc->common.have_clip_region)
+    if (src_image->common.have_clip_region)
     {
-	if (!miClipPictureSrc (pRegion, pSrc, xDst - xSrc, yDst - ySrc))
+	if (!miClipPictureSrc (region, src_image, xDst - xSrc, yDst - ySrc))
 	{
-	    pixman_region32_fini (pRegion);
+	    pixman_region32_fini (region);
 	    return FALSE;
 	}
     }
-    if (pSrc->common.alpha_map && pSrc->common.alpha_map->common.have_clip_region)
+    if (src_image->common.alpha_map && src_image->common.alpha_map->common.have_clip_region)
     {
-	if (!miClipPictureSrc (pRegion, (pixman_image_t *)pSrc->common.alpha_map,
-			       xDst - (xSrc - pSrc->common.alpha_origin_x),
-			       yDst - (ySrc - pSrc->common.alpha_origin_y)))
+	if (!miClipPictureSrc (region, (pixman_image_t *)src_image->common.alpha_map,
+			       xDst - (xSrc - src_image->common.alpha_origin_x),
+			       yDst - (ySrc - src_image->common.alpha_origin_y)))
 	{
-	    pixman_region32_fini (pRegion);
+	    pixman_region32_fini (region);
 	    return FALSE;
 	}
     }
     /* clip against mask */
-    if (pMask && pMask->common.have_clip_region)
+    if (mask_image && mask_image->common.have_clip_region)
     {
-	if (!miClipPictureSrc (pRegion, pMask, xDst - xMask, yDst - yMask))
+	if (!miClipPictureSrc (region, mask_image, xDst - xMask, yDst - yMask))
 	{
-	    pixman_region32_fini (pRegion);
+	    pixman_region32_fini (region);
 	    return FALSE;
 	}	
-	if (pMask->common.alpha_map && pMask->common.alpha_map->common.have_clip_region)
+	if (mask_image->common.alpha_map && mask_image->common.alpha_map->common.have_clip_region)
 	{
-	    if (!miClipPictureSrc (pRegion, (pixman_image_t *)pMask->common.alpha_map,
-				   xDst - (xMask - pMask->common.alpha_origin_x),
-				   yDst - (yMask - pMask->common.alpha_origin_y)))
+	    if (!miClipPictureSrc (region, (pixman_image_t *)mask_image->common.alpha_map,
+				   xDst - (xMask - mask_image->common.alpha_origin_x),
+				   yDst - (yMask - mask_image->common.alpha_origin_y)))
 	    {
-		pixman_region32_fini (pRegion);
+		pixman_region32_fini (region);
 		return FALSE;
 	    }
 	}
@@ -212,10 +212,10 @@ pixman_compute_composite_region32 (pixman_region32_t *	pRegion,
 }
 
 PIXMAN_EXPORT pixman_bool_t
-pixman_compute_composite_region (pixman_region16_t *	pRegion,
-				 pixman_image_t *	pSrc,
-				 pixman_image_t *	pMask,
-				 pixman_image_t *	pDst,
+pixman_compute_composite_region (pixman_region16_t *	region,
+				 pixman_image_t *	src_image,
+				 pixman_image_t *	mask_image,
+				 pixman_image_t *	dst_image,
 				 int16_t		xSrc,
 				 int16_t		ySrc,
 				 int16_t		xMask,
@@ -230,13 +230,13 @@ pixman_compute_composite_region (pixman_region16_t *	pRegion,
 
     pixman_region32_init (&r32);
     
-    retval = pixman_compute_composite_region32 (&r32, pSrc, pMask, pDst,
+    retval = pixman_compute_composite_region32 (&r32, src_image, mask_image, dst_image,
 						xSrc, ySrc, xMask, yMask, xDst, yDst,
 						width, height);
 
     if (retval)
     {
-	if (!pixman_region16_copy_from_region32 (pRegion, &r32))
+	if (!pixman_region16_copy_from_region32 (region, &r32))
 	    retval = FALSE;
     }
     
@@ -375,9 +375,9 @@ pixman_contract(uint32_t *dst, const uint64_t *src, int width)
 static void
 walk_region_internal (pixman_implementation_t *imp,
 		      pixman_op_t op,
-		      pixman_image_t * pSrc,
-		      pixman_image_t * pMask,
-		      pixman_image_t * pDst,
+		      pixman_image_t * src_image,
+		      pixman_image_t * mask_image,
+		      pixman_image_t * dst_image,
 		      int16_t xSrc,
 		      int16_t ySrc,
 		      int16_t xMask,
@@ -413,33 +413,33 @@ walk_region_internal (pixman_implementation_t *imp,
 	    
 	    if (maskRepeat)
 	    {
-		y_msk = MOD (y_msk, pMask->bits.height);
-		if (h_this > pMask->bits.height - y_msk)
-		    h_this = pMask->bits.height - y_msk;
+		y_msk = MOD (y_msk, mask_image->bits.height);
+		if (h_this > mask_image->bits.height - y_msk)
+		    h_this = mask_image->bits.height - y_msk;
 	    }
 	    if (srcRepeat)
 	    {
-		y_src = MOD (y_src, pSrc->bits.height);
-		if (h_this > pSrc->bits.height - y_src)
-		    h_this = pSrc->bits.height - y_src;
+		y_src = MOD (y_src, src_image->bits.height);
+		if (h_this > src_image->bits.height - y_src)
+		    h_this = src_image->bits.height - y_src;
 	    }
 	    while (w)
 	    {
 		w_this = w;
 		if (maskRepeat)
 		{
-		    x_msk = MOD (x_msk, pMask->bits.width);
-		    if (w_this > pMask->bits.width - x_msk)
-			w_this = pMask->bits.width - x_msk;
+		    x_msk = MOD (x_msk, mask_image->bits.width);
+		    if (w_this > mask_image->bits.width - x_msk)
+			w_this = mask_image->bits.width - x_msk;
 		}
 		if (srcRepeat)
 		{
-		    x_src = MOD (x_src, pSrc->bits.width);
-		    if (w_this > pSrc->bits.width - x_src)
-			w_this = pSrc->bits.width - x_src;
+		    x_src = MOD (x_src, src_image->bits.width);
+		    if (w_this > src_image->bits.width - x_src)
+			w_this = src_image->bits.width - x_src;
 		}
 		(*compositeRect) (imp,
-				  op, pSrc, pMask, pDst,
+				  op, src_image, mask_image, dst_image,
 				  x_src, y_src, x_msk, y_msk, x_dst, y_dst,
 				  w_this, h_this);
 		w -= w_this;
@@ -459,9 +459,9 @@ walk_region_internal (pixman_implementation_t *imp,
 void
 _pixman_walk_composite_region (pixman_implementation_t *imp,
 			       pixman_op_t op,
-			       pixman_image_t * pSrc,
-			       pixman_image_t * pMask,
-			       pixman_image_t * pDst,
+			       pixman_image_t * src_image,
+			       pixman_image_t * mask_image,
+			       pixman_image_t * dst_image,
 			       int16_t xSrc,
 			       int16_t ySrc,
 			       int16_t xMask,
@@ -477,10 +477,10 @@ _pixman_walk_composite_region (pixman_implementation_t *imp,
     pixman_region32_init (&region);
 
     if (pixman_compute_composite_region32 (
-	    &region, pSrc, pMask, pDst, xSrc, ySrc, xMask, yMask, xDst, yDst, width, height))
+	    &region, src_image, mask_image, dst_image, xSrc, ySrc, xMask, yMask, xDst, yDst, width, height))
     {
 	walk_region_internal (imp, op,
-			      pSrc, pMask, pDst,
+			      src_image, mask_image, dst_image,
 			      xSrc, ySrc, xMask, yMask, xDst, yDst,
 			      width, height, FALSE, FALSE,
 			      &region,
@@ -511,9 +511,9 @@ mask_is_solid (pixman_image_t *mask)
 static const pixman_fast_path_t *
 get_fast_path (const pixman_fast_path_t *fast_paths,
 	       pixman_op_t         op,
-	       pixman_image_t     *pSrc,
-	       pixman_image_t     *pMask,
-	       pixman_image_t     *pDst,
+	       pixman_image_t     *src_image,
+	       pixman_image_t     *mask_image,
+	       pixman_image_t     *dst_image,
 	       pixman_bool_t       is_pixbuf)
 {
     const pixman_fast_path_t *info;
@@ -526,8 +526,8 @@ get_fast_path (const pixman_fast_path_t *fast_paths,
 	if (info->op != op)
 	    continue;
 
-	if ((info->src_format == PIXMAN_solid && _pixman_image_is_solid (pSrc)) ||
-	    (pSrc->type == BITS && info->src_format == pSrc->bits.format))
+	if ((info->src_format == PIXMAN_solid && _pixman_image_is_solid (src_image)) ||
+	    (src_image->type == BITS && info->src_format == src_image->bits.format))
 	{
 	    valid_src = TRUE;
 	}
@@ -535,20 +535,20 @@ get_fast_path (const pixman_fast_path_t *fast_paths,
 	if (!valid_src)
 	    continue;
 
-	if ((info->mask_format == PIXMAN_null && !pMask) ||
-	    (pMask && pMask->type == BITS && info->mask_format == pMask->bits.format))
+	if ((info->mask_format == PIXMAN_null && !mask_image) ||
+	    (mask_image && mask_image->type == BITS && info->mask_format == mask_image->bits.format))
 	{
 	    valid_mask = TRUE;
 
 	    if (info->flags & NEED_SOLID_MASK)
 	    {
-		if (!pMask || !mask_is_solid (pMask))
+		if (!mask_image || !mask_is_solid (mask_image))
 		    valid_mask = FALSE;
 	    }
 
 	    if (info->flags & NEED_COMPONENT_ALPHA)
 	    {
-		if (!pMask || !pMask->common.component_alpha)
+		if (!mask_image || !mask_image->common.component_alpha)
 		    valid_mask = FALSE;
 	    }
 	}
@@ -556,7 +556,7 @@ get_fast_path (const pixman_fast_path_t *fast_paths,
 	if (!valid_mask)
 	    continue;
 	
-	if (info->dest_format != pDst->bits.format)
+	if (info->dest_format != dst_image->bits.format)
 	    continue;
 
 	if ((info->flags & NEED_PIXBUF) && !is_pixbuf)
diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index f6a1afe..532c0d4 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -1483,9 +1483,9 @@ vmxCombineAddC (pixman_implementation_t *imp, pixman_op_t op,
 #if 0
 void
 vmx_CompositeOver_n_8888 (pixman_operator_t	op,
-			    pixman_image_t * pSrc,
-			    pixman_image_t * pMask,
-			    pixman_image_t * pDst,
+			    pixman_image_t * src_image,
+			    pixman_image_t * mask_image,
+			    pixman_image_t * dst_image,
 			    int16_t	xSrc,
 			    int16_t	ySrc,
 			    int16_t	xMask,
@@ -1499,12 +1499,12 @@ vmx_CompositeOver_n_8888 (pixman_operator_t	op,
     uint32_t	*dstLine, *dst;
     int	dstStride;
 
-    _pixman_image_get_solid (pSrc, pDst, src);
+    _pixman_image_get_solid (src_image, dst_image, src);
 
     if (src >> 24 == 0)
 	return;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint32_t, dstStride, dstLine, 1);
 
     while (height--)
     {
@@ -1516,9 +1516,9 @@ vmx_CompositeOver_n_8888 (pixman_operator_t	op,
 
 void
 vmx_CompositeOver_n_0565 (pixman_operator_t	op,
-			    pixman_image_t * pSrc,
-			    pixman_image_t * pMask,
-			    pixman_image_t * pDst,
+			    pixman_image_t * src_image,
+			    pixman_image_t * mask_image,
+			    pixman_image_t * dst_image,
 			    int16_t	xSrc,
 			    int16_t	ySrc,
 			    int16_t	xMask,
@@ -1533,12 +1533,12 @@ vmx_CompositeOver_n_0565 (pixman_operator_t	op,
     uint16_t	w;
     int	dstStride;
 
-    _pixman_image_get_solid (pSrc, pDst, src);
+    _pixman_image_get_solid (src_image, dst_image, src);
 
     if (src >> 24 == 0)
 	return;
 
-    fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
+    fbComposeGetStart (dst_image, xDst, yDst, uint16_t, dstStride, dstLine, 1);
 
     while (height--)
     {
diff --git a/pixman/pixman.c b/pixman/pixman.c
index d566972..0c984f7 100644
--- a/pixman/pixman.c
+++ b/pixman/pixman.c
@@ -76,17 +76,17 @@ pixman_operator_can_be_optimized(pixman_op_t op)
  * The output operator should be mathematically equivalent to the source.
  */
 static pixman_op_t
-pixman_optimize_operator(pixman_op_t op, pixman_image_t *pSrc, pixman_image_t *pMask, pixman_image_t *pDst )
+pixman_optimize_operator(pixman_op_t op, pixman_image_t *src_image, pixman_image_t *mask_image, pixman_image_t *dst_image )
 {
     pixman_bool_t is_source_opaque;
     pixman_bool_t is_dest_opaque;
     const optimized_operator_info_t *info = pixman_operator_can_be_optimized(op);
 
-    if(!info || pMask)
+    if(!info || mask_image)
         return op;
 
-    is_source_opaque = _pixman_image_is_opaque(pSrc);
-    is_dest_opaque = _pixman_image_is_opaque(pDst);
+    is_source_opaque = _pixman_image_is_opaque(src_image);
+    is_dest_opaque = _pixman_image_is_opaque(dst_image);
 
     if(is_source_opaque == FALSE && is_dest_opaque == FALSE)
         return op;
diff --git a/pixman/pixman.h b/pixman/pixman.h
index 289048e..fa8c03f 100644
--- a/pixman/pixman.h
+++ b/pixman/pixman.h
@@ -812,10 +812,10 @@ pixman_bool_t	pixman_image_fill_rectangles	     (pixman_op_t		    op,
 						      const pixman_rectangle16_t   *rects);
 
 /* Composite */
-pixman_bool_t pixman_compute_composite_region (pixman_region16_t *pRegion,
-					       pixman_image_t    *pSrc,
-					       pixman_image_t    *pMask,
-					       pixman_image_t    *pDst,
+pixman_bool_t pixman_compute_composite_region (pixman_region16_t *region,
+					       pixman_image_t    *src_image,
+					       pixman_image_t    *mask_image,
+					       pixman_image_t    *dst_image,
 					       int16_t            xSrc,
 					       int16_t            ySrc,
 					       int16_t            xMask,
commit e3489730c317061a2cd888b927d36bda0590a3f2
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Mon Jun 29 07:30:47 2009 -0400

    Change the name of some routines that were simply misnamed.
    
        s/Src_pixbuf/_over_pixbuf/g;
        s/Src_x888_n/_over_x888_n/g;
        s/CompositeSrc_8888_8888/composite_over_8888_8888/g;
        s/CompositeSrc_8888_0565/composite_over_8888_0565/g;
        s/CompositeSrc_8888_8_8888/composite_over_8888_n_8888/g;

diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
index 1b7fafb..6b14750 100644
--- a/pixman/pixman-arm-neon.c
+++ b/pixman/pixman-arm-neon.c
@@ -274,7 +274,7 @@ neon_CompositeAdd_8000_8000 (
 
 
 static void
-neon_CompositeSrc_8888_8888 (
+neon_composite_over_8888_8888 (
                             pixman_implementation_t * impl,
                             pixman_op_t op,
 			 pixman_image_t * pSrc,
@@ -435,7 +435,7 @@ neon_CompositeSrc_8888_8888 (
 }
 
 static void
-neon_CompositeSrc_8888_8_8888 (
+neon_composite_over_8888_n_8888 (
                                pixman_implementation_t * impl,
                                pixman_op_t op,
 			       pixman_image_t * pSrc,
@@ -2149,12 +2149,12 @@ static const pixman_fast_path_t arm_neon_fast_path_array[] =
     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   neon_CompositeOver_8888_0565,         0 },
     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_CompositeOver_8888_0565,         0 },
 #endif
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, neon_CompositeSrc_8888_8888,          0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, neon_CompositeSrc_8888_8888,          0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, neon_CompositeSrc_8888_8888,          0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, neon_CompositeSrc_8888_8888,          0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, neon_CompositeSrc_8888_8_8888,        NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, neon_CompositeSrc_8888_8_8888,        NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, neon_composite_over_8888_8888,          0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, neon_composite_over_8888_8888,          0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, neon_composite_over_8888_8888,          0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, neon_composite_over_8888_8888,          0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, neon_composite_over_8888_n_8888,        NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, neon_composite_over_8888_n_8888,        NEED_SOLID_MASK },
     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, neon_CompositeOver_n_8_8888,     0 },
     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, neon_CompositeOver_n_8_8888,     0 },
     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, neon_CompositeOver_n_8_8888,     0 },
diff --git a/pixman/pixman-arm-simd.c b/pixman/pixman-arm-simd.c
index d105bc5..9897e86 100644
--- a/pixman/pixman-arm-simd.c
+++ b/pixman/pixman-arm-simd.c
@@ -100,7 +100,7 @@ arm_CompositeAdd_8000_8000 (
 }
 
 static void
-arm_CompositeSrc_8888_8888 (
+arm_composite_over_8888_8888 (
                             pixman_implementation_t * impl,
                             pixman_op_t op,
 			 pixman_image_t * pSrc,
@@ -193,7 +193,7 @@ arm_CompositeSrc_8888_8888 (
 }
 
 static void
-arm_CompositeSrc_8888_8_8888 (
+arm_composite_over_8888_n_8888 (
                             pixman_implementation_t * impl,
                             pixman_op_t op,
 			       pixman_image_t * pSrc,
@@ -419,12 +419,12 @@ arm_CompositeOver_n_8_8888 (
 
 static const pixman_fast_path_t arm_simd_fast_path_array[] =
 {
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, arm_CompositeSrc_8888_8888,      0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,	PIXMAN_x8r8g8b8, arm_CompositeSrc_8888_8888,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_a8b8g8r8, arm_CompositeSrc_8888_8888,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_x8b8g8r8, arm_CompositeSrc_8888_8888,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, arm_CompositeSrc_8888_8_8888,    NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, arm_CompositeSrc_8888_8_8888,	   NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, arm_composite_over_8888_8888,      0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,	PIXMAN_x8r8g8b8, arm_composite_over_8888_8888,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_a8b8g8r8, arm_composite_over_8888_8888,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_x8b8g8r8, arm_composite_over_8888_8888,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, arm_composite_over_8888_n_8888,    NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, arm_composite_over_8888_n_8888,	   NEED_SOLID_MASK },
 
     { PIXMAN_OP_ADD, PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       arm_CompositeAdd_8000_8000,   0 },
 
diff --git a/pixman/pixman-fast-path.c b/pixman/pixman-fast-path.c
index cf2f39e..1ed7805 100644
--- a/pixman/pixman-fast-path.c
+++ b/pixman/pixman-fast-path.c
@@ -614,7 +614,7 @@ fast_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 }
 
 static void
-fast_CompositeSrc_8888_8888 (pixman_implementation_t *imp,
+fast_composite_over_8888_8888 (pixman_implementation_t *imp,
 			  pixman_op_t op,
 			 pixman_image_t * pSrc,
 			 pixman_image_t * pMask,
@@ -710,7 +710,7 @@ fast_CompositeSrc_8888_0888 (pixman_implementation_t *imp,
 }
 
 static void
-fast_CompositeSrc_8888_0565 (pixman_implementation_t *imp,
+fast_composite_over_8888_0565 (pixman_implementation_t *imp,
 			  pixman_op_t op,
 			 pixman_image_t * pSrc,
 			 pixman_image_t * pMask,
@@ -1044,12 +1044,12 @@ static const pixman_fast_path_t c_fast_paths[] =
     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,	PIXMAN_a8r8g8b8, fast_CompositeOver_x888_8_8888,       0 },
     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,	PIXMAN_x8b8g8r8, fast_CompositeOver_x888_8_8888,       0 },
     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,	PIXMAN_a8b8g8r8, fast_CompositeOver_x888_8_8888,       0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, fast_CompositeSrc_8888_8888,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,	PIXMAN_x8r8g8b8, fast_CompositeSrc_8888_8888,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,	PIXMAN_r5g6b5,	 fast_CompositeSrc_8888_0565,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_a8b8g8r8, fast_CompositeSrc_8888_8888,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_x8b8g8r8, fast_CompositeSrc_8888_8888,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   fast_CompositeSrc_8888_0565,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, fast_composite_over_8888_8888,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,	PIXMAN_x8r8g8b8, fast_composite_over_8888_8888,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,	PIXMAN_r5g6b5,	 fast_composite_over_8888_0565,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_a8b8g8r8, fast_composite_over_8888_8888,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_x8b8g8r8, fast_composite_over_8888_8888,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   fast_composite_over_8888_0565,	   0 },
     { PIXMAN_OP_ADD, PIXMAN_a8r8g8b8,  PIXMAN_null,	PIXMAN_a8r8g8b8, fast_CompositeAdd_8888_8888,   0 },
     { PIXMAN_OP_ADD, PIXMAN_a8b8g8r8,  PIXMAN_null,	PIXMAN_a8b8g8r8, fast_CompositeAdd_8888_8888,   0 },
     { PIXMAN_OP_ADD, PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       fast_CompositeAdd_8000_8000,   0 },
diff --git a/pixman/pixman-mmx.c b/pixman/pixman-mmx.c
index f0dca40..d500321 100644
--- a/pixman/pixman-mmx.c
+++ b/pixman/pixman-mmx.c
@@ -1188,7 +1188,7 @@ mmx_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
 }
 
 static void
-mmx_CompositeSrc_8888_8_8888 (pixman_implementation_t *imp,
+mmx_composite_over_8888_n_8888 (pixman_implementation_t *imp,
 			       pixman_op_t op,
 			       pixman_image_t * pSrc,
 			       pixman_image_t * pMask,
@@ -1273,7 +1273,7 @@ mmx_CompositeSrc_8888_8_8888 (pixman_implementation_t *imp,
 }
 
 static void
-mmx_CompositeSrc_x888_n_8888 (pixman_implementation_t *imp,
+mmx_Composite_over_x888_n_8888 (pixman_implementation_t *imp,
 			       pixman_op_t op,
 			       pixman_image_t * pSrc,
 			       pixman_image_t * pMask,
@@ -1408,7 +1408,7 @@ mmx_CompositeSrc_x888_n_8888 (pixman_implementation_t *imp,
 }
 
 static void
-mmx_CompositeSrc_8888_8888 (pixman_implementation_t *imp,
+mmx_composite_over_8888_8888 (pixman_implementation_t *imp,
 			     pixman_op_t op,
 			     pixman_image_t * pSrc,
 			     pixman_image_t * pMask,
@@ -1461,7 +1461,7 @@ mmx_CompositeSrc_8888_8888 (pixman_implementation_t *imp,
 }
 
 static void
-mmx_CompositeSrc_8888_0565 (pixman_implementation_t *imp,
+mmx_composite_over_8888_0565 (pixman_implementation_t *imp,
 			     pixman_op_t op,
 			     pixman_image_t * pSrc,
 			     pixman_image_t * pMask,
@@ -2100,7 +2100,7 @@ mmx_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 }
 
 static void
-mmx_CompositeSrc_pixbuf_0565 (pixman_implementation_t *imp,
+mmx_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 				  pixman_op_t op,
 				  pixman_image_t * pSrc,
 				  pixman_image_t * pMask,
@@ -2222,7 +2222,7 @@ mmx_CompositeSrc_pixbuf_0565 (pixman_implementation_t *imp,
 /* "8888RevNP" is GdkPixbuf's format: ABGR, non premultiplied */
 
 static void
-mmx_CompositeSrc_pixbuf_8888 (pixman_implementation_t *imp,
+mmx_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
 				  pixman_op_t op,
 				  pixman_image_t * pSrc,
 				  pixman_image_t * pMask,
@@ -3032,26 +3032,26 @@ static const pixman_fast_path_t mmx_fast_paths[] =
     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, mmx_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, mmx_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   mmx_CompositeOver_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, mmx_CompositeSrc_pixbuf_8888, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_a8r8g8b8, mmx_CompositeSrc_pixbuf_8888, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, mmx_CompositeSrc_pixbuf_8888, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_x8r8g8b8, mmx_CompositeSrc_pixbuf_8888, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   mmx_CompositeSrc_pixbuf_0565, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_r5g6b5,   mmx_CompositeSrc_pixbuf_0565, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_a8b8g8r8, mmx_CompositeSrc_pixbuf_8888, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, mmx_CompositeSrc_pixbuf_8888, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_x8b8g8r8, mmx_CompositeSrc_pixbuf_8888, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, mmx_CompositeSrc_pixbuf_8888, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_b5g6r5,   mmx_CompositeSrc_pixbuf_0565, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   mmx_CompositeSrc_pixbuf_0565, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, mmx_CompositeSrc_x888_n_8888,    NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, mmx_CompositeSrc_x888_n_8888,	   NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,	PIXMAN_a8b8g8r8, mmx_CompositeSrc_x888_n_8888,	   NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,	PIXMAN_x8b8g8r8, mmx_CompositeSrc_x888_n_8888,	   NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, mmx_CompositeSrc_8888_8_8888,    NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, mmx_CompositeSrc_8888_8_8888,	   NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,	PIXMAN_a8b8g8r8, mmx_CompositeSrc_8888_8_8888,	   NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,	PIXMAN_x8b8g8r8, mmx_CompositeSrc_8888_8_8888,	   NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, mmx_Composite_over_pixbuf_8888, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_a8r8g8b8, mmx_Composite_over_pixbuf_8888, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, mmx_Composite_over_pixbuf_8888, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_x8r8g8b8, mmx_Composite_over_pixbuf_8888, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   mmx_Composite_over_pixbuf_0565, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_r5g6b5,   mmx_Composite_over_pixbuf_0565, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_a8b8g8r8, mmx_Composite_over_pixbuf_8888, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, mmx_Composite_over_pixbuf_8888, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_x8b8g8r8, mmx_Composite_over_pixbuf_8888, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, mmx_Composite_over_pixbuf_8888, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_b5g6r5,   mmx_Composite_over_pixbuf_0565, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   mmx_Composite_over_pixbuf_0565, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, mmx_Composite_over_x888_n_8888,    NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, mmx_Composite_over_x888_n_8888,	   NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,	PIXMAN_a8b8g8r8, mmx_Composite_over_x888_n_8888,	   NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,	PIXMAN_x8b8g8r8, mmx_Composite_over_x888_n_8888,	   NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, mmx_composite_over_8888_n_8888,    NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, mmx_composite_over_8888_n_8888,	   NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,	PIXMAN_a8b8g8r8, mmx_composite_over_8888_n_8888,	   NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,	PIXMAN_x8b8g8r8, mmx_composite_over_8888_n_8888,	   NEED_SOLID_MASK },
 #if 0
     /* FIXME: This code is commented out since it's apparently not actually faster than the generic code. */
     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,	PIXMAN_x8r8g8b8, mmx_CompositeOver_x888_8_8888,   0 },
@@ -3065,12 +3065,12 @@ static const pixman_fast_path_t mmx_fast_paths[] =
     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, mmx_CompositeCopyArea,	   0 },
     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, mmx_CompositeCopyArea,	   0 },
 
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, mmx_CompositeSrc_8888_8888,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,	PIXMAN_x8r8g8b8, mmx_CompositeSrc_8888_8888,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,	PIXMAN_r5g6b5,	 mmx_CompositeSrc_8888_0565,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_a8b8g8r8, mmx_CompositeSrc_8888_8888,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_x8b8g8r8, mmx_CompositeSrc_8888_8888,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   mmx_CompositeSrc_8888_0565,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, mmx_composite_over_8888_8888,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,	PIXMAN_x8r8g8b8, mmx_composite_over_8888_8888,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,	PIXMAN_r5g6b5,	 mmx_composite_over_8888_0565,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_a8b8g8r8, mmx_composite_over_8888_8888,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_x8b8g8r8, mmx_composite_over_8888_8888,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   mmx_composite_over_8888_0565,	   0 },
 
     { PIXMAN_OP_ADD, PIXMAN_a8r8g8b8,  PIXMAN_null,	PIXMAN_a8r8g8b8, mmx_CompositeAdd_8888_8888,   0 },
     { PIXMAN_OP_ADD, PIXMAN_a8b8g8r8,  PIXMAN_null,	PIXMAN_a8b8g8r8, mmx_CompositeAdd_8888_8888,   0 },
diff --git a/pixman/pixman-sse2.c b/pixman/pixman-sse2.c
index c782ffd..b160763 100644
--- a/pixman/pixman-sse2.c
+++ b/pixman/pixman-sse2.c
@@ -2804,11 +2804,11 @@ sse2_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
 
 
 /* -------------------------------------------------------------------------------------------------
- * fast_CompositeSrc_8888_8_8888
+ * fast_composite_over_8888_n_8888
  */
 
 static void
-sse2_CompositeSrc_8888_8_8888 (pixman_implementation_t *imp,
+sse2_composite_over_8888_n_8888 (pixman_implementation_t *imp,
 				pixman_op_t op,
 			       pixman_image_t * pSrc,
 			       pixman_image_t * pMask,
@@ -2918,10 +2918,10 @@ sse2_CompositeSrc_8888_8_8888 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fast_CompositeSrc_x888_n_8888
+ * fast_Composite_over_x888_n_8888
  */
 static void
-sse2_CompositeSrc_x888_n_8888 (pixman_implementation_t *imp,
+sse2_Composite_over_x888_n_8888 (pixman_implementation_t *imp,
 				pixman_op_t op,
 			       pixman_image_t * pSrc,
 			       pixman_image_t * pMask,
@@ -3031,10 +3031,10 @@ sse2_CompositeSrc_x888_n_8888 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fast_CompositeSrc_8888_8888
+ * fast_composite_over_8888_8888
  */
 static void
-sse2_CompositeSrc_8888_8888 (pixman_implementation_t *imp,
+sse2_composite_over_8888_8888 (pixman_implementation_t *imp,
 			      pixman_op_t op,
 			     pixman_image_t * pSrc,
 			     pixman_image_t * pMask,
@@ -3069,10 +3069,10 @@ sse2_CompositeSrc_8888_8888 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fast_CompositeSrc_8888_0565
+ * fast_composite_over_8888_0565
  */
 static force_inline uint16_t
-fast_CompositeSrc_8888_0565pixel (uint32_t src, uint16_t dst)
+fast_composite_over_8888_0565pixel (uint32_t src, uint16_t dst)
 {
     __m64       ms;
 
@@ -3083,7 +3083,7 @@ fast_CompositeSrc_8888_0565pixel (uint32_t src, uint16_t dst)
 }
 
 static void
-sse2_CompositeSrc_8888_0565 (pixman_implementation_t *imp,
+sse2_composite_over_8888_0565 (pixman_implementation_t *imp,
 			      pixman_op_t op,
 			     pixman_image_t * pSrc,
 			     pixman_image_t * pMask,
@@ -3138,7 +3138,7 @@ sse2_CompositeSrc_8888_0565 (pixman_implementation_t *imp,
             s = *src++;
             d = *dst;
 
-            *dst++ = fast_CompositeSrc_8888_0565pixel (s, d);
+            *dst++ = fast_composite_over_8888_0565pixel (s, d);
             w--;
         }
 
@@ -3185,7 +3185,7 @@ sse2_CompositeSrc_8888_0565 (pixman_implementation_t *imp,
             s = *src++;
             d = *dst;
 
-            *dst++ = fast_CompositeSrc_8888_0565pixel (s, d);
+            *dst++ = fast_composite_over_8888_0565pixel (s, d);
         }
     }
 
@@ -3757,11 +3757,11 @@ sse2_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fast_CompositeSrc_pixbuf_0565
+ * fast_Composite_over_pixbuf_0565
  */
 
 static void
-sse2_CompositeSrc_pixbuf_0565 (pixman_implementation_t *imp,
+sse2_Composite_over_pixbuf_0565 (pixman_implementation_t *imp,
 				   pixman_op_t op,
 				  pixman_image_t * pSrc,
 				  pixman_image_t * pMask,
@@ -3892,11 +3892,11 @@ sse2_CompositeSrc_pixbuf_0565 (pixman_implementation_t *imp,
 /* "8888RevNP" is GdkPixbuf's format: ABGR, non premultiplied */
 
 /* -------------------------------------------------------------------------------------------------
- * fast_CompositeSrc_pixbuf_8888
+ * fast_Composite_over_pixbuf_8888
  */
 
 static void
-sse2_CompositeSrc_pixbuf_8888 (pixman_implementation_t *imp,
+sse2_Composite_over_pixbuf_8888 (pixman_implementation_t *imp,
 				   pixman_op_t op,
 				  pixman_image_t * pSrc,
 				  pixman_image_t * pMask,
@@ -4877,12 +4877,12 @@ static const pixman_fast_path_t sse2_fast_paths[] =
     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_CompositeOver_n_8888,           0 },
     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_CompositeOver_n_8888,           0 },
     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_r5g6b5,   sse2_CompositeOver_n_0565,           0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_CompositeSrc_8888_8888,          0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_CompositeSrc_8888_8888,          0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, sse2_CompositeSrc_8888_8888,          0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_CompositeSrc_8888_8888,          0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   sse2_CompositeSrc_8888_0565,          0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   sse2_CompositeSrc_8888_0565,          0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_composite_over_8888_8888,          0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_composite_over_8888_8888,          0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, sse2_composite_over_8888_8888,          0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_composite_over_8888_8888,          0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   sse2_composite_over_8888_0565,          0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   sse2_composite_over_8888_0565,          0 },
     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_CompositeOver_n_8_8888,     0 },
     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_CompositeOver_n_8_8888,     0 },
     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_CompositeOver_n_8_8888,     0 },
@@ -4894,32 +4894,32 @@ static const pixman_fast_path_t sse2_fast_paths[] =
     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_CompositeOver_x888_8_8888,       0 },
     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_CompositeOver_x888_8_8888,       0 },
 #endif
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_CompositeSrc_x888_n_8888,        NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_CompositeSrc_x888_n_8888,        NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_CompositeSrc_x888_n_8888,        NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_CompositeSrc_x888_n_8888,        NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_CompositeSrc_8888_8_8888,        NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_CompositeSrc_8888_8_8888,        NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_CompositeSrc_8888_8_8888,        NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_CompositeSrc_8888_8_8888,        NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_Composite_over_x888_n_8888,        NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_Composite_over_x888_n_8888,        NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_Composite_over_x888_n_8888,        NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_Composite_over_x888_n_8888,        NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_composite_over_8888_n_8888,        NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_composite_over_8888_n_8888,        NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_composite_over_8888_n_8888,        NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_composite_over_8888_n_8888,        NEED_SOLID_MASK },
     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, sse2_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, sse2_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, sse2_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, sse2_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   sse2_CompositeOver_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   sse2_CompositeOver_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, sse2_CompositeSrc_pixbuf_8888,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_a8r8g8b8, sse2_CompositeSrc_pixbuf_8888,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, sse2_CompositeSrc_pixbuf_8888,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_x8r8g8b8, sse2_CompositeSrc_pixbuf_8888,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_a8b8g8r8, sse2_CompositeSrc_pixbuf_8888,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, sse2_CompositeSrc_pixbuf_8888,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_x8b8g8r8, sse2_CompositeSrc_pixbuf_8888,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, sse2_CompositeSrc_pixbuf_8888,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   sse2_CompositeSrc_pixbuf_0565,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_r5g6b5,   sse2_CompositeSrc_pixbuf_0565,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_b5g6r5,   sse2_CompositeSrc_pixbuf_0565,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   sse2_CompositeSrc_pixbuf_0565,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, sse2_Composite_over_pixbuf_8888,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_a8r8g8b8, sse2_Composite_over_pixbuf_8888,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, sse2_Composite_over_pixbuf_8888,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_x8r8g8b8, sse2_Composite_over_pixbuf_8888,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_a8b8g8r8, sse2_Composite_over_pixbuf_8888,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, sse2_Composite_over_pixbuf_8888,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_x8b8g8r8, sse2_Composite_over_pixbuf_8888,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, sse2_Composite_over_pixbuf_8888,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   sse2_Composite_over_pixbuf_0565,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_r5g6b5,   sse2_Composite_over_pixbuf_0565,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_b5g6r5,   sse2_Composite_over_pixbuf_0565,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   sse2_Composite_over_pixbuf_0565,     NEED_PIXBUF },
     { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_CompositeCopyArea,               0 },
     { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_CompositeCopyArea,               0 },
 
commit 90cac1115551c0fd70ace419179bcf2a30d6b1c2
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Sun Jun 28 21:06:01 2009 -0400

    Fix up names of compositing functions
    
        s/SrcAdd/Add/g;
        s/SolidMaskSrc/Src/g;
        s/SolidMaskIn/In/g;
        s/SolidMask/Over/g;
        s/Solid_n/Over_n/g;
        s/SrcIn/In/g;
    
        s/(fb)(Composite.*)sse2/sse2_$2/g;
        s/(fb)(Composite.*)mmx/mmx_$2/g;
        s/(fb)(Composite.*)neon/neon_$2/g;
        s/(fb)(Composite.*)arm/arm_$2/g;
        s/(fb)(Composite.*)vmx/vmx_$2/g;
        s/(fb)(Composite.*)/fast_$2/g;
    
        s/b8g8r8x8/f00bar/g;
        s/8888C/8888_ca/g;
        s/0565C/0565_ca/g;
        s/8888RevNPx/pixbuf_x_/g;
        s/8x0/8_x_0/g;
        s/00x8/00_x_8/g;
        s/8x8/8_x_8/g;
        s/8x8/8_x_8/g;
        s/nx8/n_x_8/g;
        s/24x16/24_x_16/g;
        s/16x16/16_x_16/g;
        s/8xx8/8_x_x8/g;
        s/8xn/8_x_n/g;
        s/nx0/n_x_0/g;
        s/_x_/_/g;
        s/f00bar/b8g8r8x8/;
    
        # Fix up NEON type names
        s/uint8_8/uint8x8/g;

diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
index d389c9a..1b7fafb 100644
--- a/pixman/pixman-arm-neon.c
+++ b/pixman/pixman-arm-neon.c
@@ -124,7 +124,7 @@ static force_inline uint8x8x4_t neon8qadd(uint8x8x4_t x, uint8x8x4_t y)
 
 
 static void
-fbCompositeSrcAdd_8000x8000neon (
+neon_CompositeAdd_8000_8000 (
                             pixman_implementation_t * impl,
                             pixman_op_t op,
                                 pixman_image_t * pSrc,
@@ -274,7 +274,7 @@ fbCompositeSrcAdd_8000x8000neon (
 
 
 static void
-fbCompositeSrc_8888x8888neon (
+neon_CompositeSrc_8888_8888 (
                             pixman_implementation_t * impl,
                             pixman_op_t op,
 			 pixman_image_t * pSrc,
@@ -435,7 +435,7 @@ fbCompositeSrc_8888x8888neon (
 }
 
 static void
-fbCompositeSrc_8888x8x8888neon (
+neon_CompositeSrc_8888_8_8888 (
                                pixman_implementation_t * impl,
                                pixman_op_t op,
 			       pixman_image_t * pSrc,
@@ -632,7 +632,7 @@ fbCompositeSrc_8888x8x8888neon (
 
 
 static void
-fbCompositeSolidMask_nx8x8888neon (
+neon_CompositeOver_n_8_8888 (
                             pixman_implementation_t * impl,
                             pixman_op_t      op,
 			       pixman_image_t * pSrc,
@@ -839,7 +839,7 @@ fbCompositeSolidMask_nx8x8888neon (
 
 
 static void
-fbCompositeSrcAdd_8888x8x8neon (
+neon_CompositeAdd_8888_8_8 (
                             pixman_implementation_t * impl,
                             pixman_op_t op,
                             pixman_image_t * pSrc,
@@ -958,7 +958,7 @@ fbCompositeSrcAdd_8888x8x8neon (
 #ifdef USE_GCC_INLINE_ASM
 
 static void
-fbCompositeSrc_16x16neon (
+neon_CompositeSrc_16_16 (
 	pixman_implementation_t * impl,
 	pixman_op_t op,
 	pixman_image_t * pSrc,
@@ -1085,7 +1085,7 @@ fbCompositeSrc_16x16neon (
 #endif /* USE_GCC_INLINE_ASM */
 
 static void
-fbCompositeSrc_24x16neon (
+neon_CompositeSrc_24_16 (
 	pixman_implementation_t * impl,
 	pixman_op_t op,
 	pixman_image_t * pSrc,
@@ -1712,7 +1712,7 @@ static inline void SolidOver565_8pix_neon(
 }
 
 static void
-fbCompositeSolidMask_nx8x0565neon (
+neon_CompositeOver_n_8_0565 (
 	pixman_implementation_t * impl,
 	pixman_op_t op,
 	pixman_image_t * pSrc,
@@ -1748,7 +1748,7 @@ fbCompositeSolidMask_nx8x0565neon (
 		// TODO: there must be a more elegant way of doing this.
 		int x;
 		for(x=0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS) {
-			fbCompositeSolidMask_nx8x0565neon(impl, op, pSrc, pMask, pDst, xSrc+x, ySrc, xMask+x, yMask, xDst+x, yDst,
+			neon_CompositeOver_n_8_0565(impl, op, pSrc, pMask, pDst, xSrc+x, ySrc, xMask+x, yMask, xDst+x, yDst,
 											  (x+NEON_SCANLINE_BUFFER_PIXELS > width) ? width-x : NEON_SCANLINE_BUFFER_PIXELS, height);
 		}
 		return;
@@ -1867,7 +1867,7 @@ static inline void PlainOver565_8pix_neon(
 }
 
 static void
-fbCompositeSolid_nx0565neon (
+neon_CompositeOver_n_0565 (
 	pixman_implementation_t * impl,
 	pixman_op_t op,
 	pixman_image_t * pSrc,
@@ -1902,7 +1902,7 @@ fbCompositeSolid_nx0565neon (
 		// TODO: there must be a more elegant way of doing this.
 		int x;
 		for(x=0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS) {
-			fbCompositeSolid_nx0565neon(impl, op, pSrc, pMask, pDst, xSrc+x, ySrc, xMask+x, yMask, xDst+x, yDst,
+			neon_CompositeOver_n_0565(impl, op, pSrc, pMask, pDst, xSrc+x, ySrc, xMask+x, yMask, xDst+x, yDst,
 										(x+NEON_SCANLINE_BUFFER_PIXELS > width) ? width-x : NEON_SCANLINE_BUFFER_PIXELS, height);
 		}
 		return;
@@ -2010,7 +2010,7 @@ static inline void ARGB8_Over565_8pix_neon(
 }
 
 static void
-fbCompositeOver_8888x0565neon (
+neon_CompositeOver_8888_0565 (
 	pixman_implementation_t * impl,
 	pixman_op_t op,
 	pixman_image_t * pSrc,
@@ -2038,7 +2038,7 @@ fbCompositeOver_8888x0565neon (
 		// split the blit, so we can use a fixed-size scanline buffer
 		int x;
 		for(x=0; x < width; x += NEON_SCANLINE_BUFFER_PIXELS) {
-			fbCompositeOver_8888x0565neon(impl, op, pSrc, pMask, pDst, xSrc+x, ySrc, xMask+x, yMask, xDst+x, yDst,
+			neon_CompositeOver_8888_0565(impl, op, pSrc, pMask, pDst, xSrc+x, ySrc, xMask+x, yMask, xDst+x, yDst,
 										  (x+NEON_SCANLINE_BUFFER_PIXELS > width) ? width-x : NEON_SCANLINE_BUFFER_PIXELS, height);
 		}
 		return;
@@ -2133,32 +2133,32 @@ fbCompositeOver_8888x0565neon (
 
 static const pixman_fast_path_t arm_neon_fast_path_array[] = 
 {
-    { PIXMAN_OP_ADD,  PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8,       fbCompositeSrcAdd_8888x8x8neon,        0 },
-    { PIXMAN_OP_ADD,  PIXMAN_a8,       PIXMAN_null,     PIXMAN_a8,       fbCompositeSrcAdd_8000x8000neon,       0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   fbCompositeSolidMask_nx8x0565neon,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b5g6r5,   fbCompositeSolidMask_nx8x0565neon,     0 },
-    { PIXMAN_OP_SRC,  PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   fbCompositeSrc_24x16neon,              0 },
-    { PIXMAN_OP_SRC,  PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   fbCompositeSrc_24x16neon,              0 },
-    { PIXMAN_OP_SRC,  PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   fbCompositeSrc_24x16neon,              0 },
-    { PIXMAN_OP_SRC,  PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   fbCompositeSrc_24x16neon,              0 },
+    { PIXMAN_OP_ADD,  PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8,       neon_CompositeAdd_8888_8_8,        0 },
+    { PIXMAN_OP_ADD,  PIXMAN_a8,       PIXMAN_null,     PIXMAN_a8,       neon_CompositeAdd_8000_8000,       0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   neon_CompositeOver_n_8_0565,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b5g6r5,   neon_CompositeOver_n_8_0565,     0 },
+    { PIXMAN_OP_SRC,  PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   neon_CompositeSrc_24_16,              0 },
+    { PIXMAN_OP_SRC,  PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   neon_CompositeSrc_24_16,              0 },
+    { PIXMAN_OP_SRC,  PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_CompositeSrc_24_16,              0 },
+    { PIXMAN_OP_SRC,  PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_CompositeSrc_24_16,              0 },
 #ifdef USE_GCC_INLINE_ASM
-    { PIXMAN_OP_SRC,  PIXMAN_r5g6b5,   PIXMAN_null,     PIXMAN_r5g6b5,   fbCompositeSrc_16x16neon,              0 },
-    { PIXMAN_OP_SRC,  PIXMAN_b5g6r5,   PIXMAN_null,     PIXMAN_b5g6r5,   fbCompositeSrc_16x16neon,              0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_r5g6b5,   fbCompositeSolid_nx0565neon,           0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_b5g6r5,   fbCompositeSolid_nx0565neon,           0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   fbCompositeOver_8888x0565neon,         0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   fbCompositeOver_8888x0565neon,         0 },
+    { PIXMAN_OP_SRC,  PIXMAN_r5g6b5,   PIXMAN_null,     PIXMAN_r5g6b5,   neon_CompositeSrc_16_16,              0 },
+    { PIXMAN_OP_SRC,  PIXMAN_b5g6r5,   PIXMAN_null,     PIXMAN_b5g6r5,   neon_CompositeSrc_16_16,              0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_r5g6b5,   neon_CompositeOver_n_0565,           0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_b5g6r5,   neon_CompositeOver_n_0565,           0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   neon_CompositeOver_8888_0565,         0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_CompositeOver_8888_0565,         0 },
 #endif
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, fbCompositeSrc_8888x8888neon,          0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, fbCompositeSrc_8888x8888neon,          0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, fbCompositeSrc_8888x8888neon,          0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, fbCompositeSrc_8888x8888neon,          0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, fbCompositeSrc_8888x8x8888neon,        NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, fbCompositeSrc_8888x8x8888neon,        NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, fbCompositeSolidMask_nx8x8888neon,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, fbCompositeSolidMask_nx8x8888neon,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, fbCompositeSolidMask_nx8x8888neon,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, fbCompositeSolidMask_nx8x8888neon,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, neon_CompositeSrc_8888_8888,          0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, neon_CompositeSrc_8888_8888,          0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, neon_CompositeSrc_8888_8888,          0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, neon_CompositeSrc_8888_8888,          0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, neon_CompositeSrc_8888_8_8888,        NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, neon_CompositeSrc_8888_8_8888,        NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, neon_CompositeOver_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, neon_CompositeOver_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, neon_CompositeOver_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, neon_CompositeOver_n_8_8888,     0 },
     { PIXMAN_OP_NONE },
 };
 
diff --git a/pixman/pixman-arm-simd.c b/pixman/pixman-arm-simd.c
index fc9e1e5..d105bc5 100644
--- a/pixman/pixman-arm-simd.c
+++ b/pixman/pixman-arm-simd.c
@@ -30,7 +30,7 @@
 #include "pixman-private.h"
 
 static void
-fbCompositeSrcAdd_8000x8000arm (
+arm_CompositeAdd_8000_8000 (
                             pixman_implementation_t * impl,
                             pixman_op_t op,
 				pixman_image_t * pSrc,
@@ -100,7 +100,7 @@ fbCompositeSrcAdd_8000x8000arm (
 }
 
 static void
-fbCompositeSrc_8888x8888arm (
+arm_CompositeSrc_8888_8888 (
                             pixman_implementation_t * impl,
                             pixman_op_t op,
 			 pixman_image_t * pSrc,
@@ -193,7 +193,7 @@ fbCompositeSrc_8888x8888arm (
 }
 
 static void
-fbCompositeSrc_8888x8x8888arm (
+arm_CompositeSrc_8888_8_8888 (
                             pixman_implementation_t * impl,
                             pixman_op_t op,
 			       pixman_image_t * pSrc,
@@ -302,7 +302,7 @@ fbCompositeSrc_8888x8x8888arm (
 }
 
 static void
-fbCompositeSolidMask_nx8x8888arm (
+arm_CompositeOver_n_8_8888 (
                             pixman_implementation_t * impl,
                             pixman_op_t      op,
 			       pixman_image_t * pSrc,
@@ -419,19 +419,19 @@ fbCompositeSolidMask_nx8x8888arm (
 
 static const pixman_fast_path_t arm_simd_fast_path_array[] =
 {
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, fbCompositeSrc_8888x8888arm,      0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,	PIXMAN_x8r8g8b8, fbCompositeSrc_8888x8888arm,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_a8b8g8r8, fbCompositeSrc_8888x8888arm,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_x8b8g8r8, fbCompositeSrc_8888x8888arm,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, fbCompositeSrc_8888x8x8888arm,    NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, fbCompositeSrc_8888x8x8888arm,	   NEED_SOLID_MASK },
-
-    { PIXMAN_OP_ADD, PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       fbCompositeSrcAdd_8000x8000arm,   0 },
-
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, fbCompositeSolidMask_nx8x8888arm,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, fbCompositeSolidMask_nx8x8888arm,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, fbCompositeSolidMask_nx8x8888arm,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, fbCompositeSolidMask_nx8x8888arm,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, arm_CompositeSrc_8888_8888,      0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,	PIXMAN_x8r8g8b8, arm_CompositeSrc_8888_8888,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_a8b8g8r8, arm_CompositeSrc_8888_8888,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_x8b8g8r8, arm_CompositeSrc_8888_8888,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, arm_CompositeSrc_8888_8_8888,    NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, arm_CompositeSrc_8888_8_8888,	   NEED_SOLID_MASK },
+
+    { PIXMAN_OP_ADD, PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       arm_CompositeAdd_8000_8000,   0 },
+
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, arm_CompositeOver_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, arm_CompositeOver_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, arm_CompositeOver_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, arm_CompositeOver_n_8_8888,     0 },
 
     { PIXMAN_OP_NONE },
 };
diff --git a/pixman/pixman-fast-path.c b/pixman/pixman-fast-path.c
index 8669ec2..cf2f39e 100644
--- a/pixman/pixman-fast-path.c
+++ b/pixman/pixman-fast-path.c
@@ -100,7 +100,7 @@ fbIn (uint32_t x, uint8_t y)
  *  opSRCxMASKxDST
  */
 static void
-fbCompositeOver_x888x8x8888 (pixman_implementation_t *imp,
+fast_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
 			     pixman_op_t      op,
 			     pixman_image_t * pSrc,
 			     pixman_image_t * pMask,
@@ -158,7 +158,7 @@ fbCompositeOver_x888x8x8888 (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSolidMaskIn_nx8x8 (pixman_implementation_t *imp,
+fast_CompositeIn_n_8_8 (pixman_implementation_t *imp,
 			      pixman_op_t      op,
 			      pixman_image_t    *iSrc,
 			      pixman_image_t    *iMask,
@@ -240,7 +240,7 @@ fbCompositeSolidMaskIn_nx8x8 (pixman_implementation_t *imp,
 
 
 static void
-fbCompositeSrcIn_8x8 (pixman_implementation_t *imp,
+fast_CompositeIn_8_8 (pixman_implementation_t *imp,
 		      pixman_op_t      op,
 		      pixman_image_t  *iSrc,
 		      pixman_image_t  *iMask,
@@ -289,7 +289,7 @@ fbCompositeSrcIn_8x8 (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSolidMask_nx8x8888 (pixman_implementation_t *imp,
+fast_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
 			       pixman_op_t      op,
 			       pixman_image_t * pSrc,
 			       pixman_image_t * pMask,
@@ -347,7 +347,7 @@ fbCompositeSolidMask_nx8x8888 (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSolidMask_nx8888x8888C (pixman_implementation_t *imp,
+fast_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
 				   pixman_op_t op,
 				   pixman_image_t * pSrc,
 				   pixman_image_t * pMask,
@@ -412,7 +412,7 @@ fbCompositeSolidMask_nx8888x8888C (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSolidMask_nx8x0888 (pixman_implementation_t *imp,
+fast_CompositeOver_n_8_0888 (pixman_implementation_t *imp,
 			       pixman_op_t op,
 			       pixman_image_t * pSrc,
 			       pixman_image_t * pMask,
@@ -475,7 +475,7 @@ fbCompositeSolidMask_nx8x0888 (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSolidMask_nx8x0565 (pixman_implementation_t *imp,
+fast_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 			       pixman_op_t op,
 				  pixman_image_t * pSrc,
 				  pixman_image_t * pMask,
@@ -539,7 +539,7 @@ fbCompositeSolidMask_nx8x0565 (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSolidMask_nx8888x0565C (pixman_implementation_t *imp,
+fast_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 				   pixman_op_t op,
 				   pixman_image_t * pSrc,
 				   pixman_image_t * pMask,
@@ -614,7 +614,7 @@ fbCompositeSolidMask_nx8888x0565C (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSrc_8888x8888 (pixman_implementation_t *imp,
+fast_CompositeSrc_8888_8888 (pixman_implementation_t *imp,
 			  pixman_op_t op,
 			 pixman_image_t * pSrc,
 			 pixman_image_t * pMask,
@@ -659,7 +659,7 @@ fbCompositeSrc_8888x8888 (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSrc_8888x0888 (pixman_implementation_t *imp,
+fast_CompositeSrc_8888_0888 (pixman_implementation_t *imp,
 			  pixman_op_t op,
 			 pixman_image_t * pSrc,
 			 pixman_image_t * pMask,
@@ -710,7 +710,7 @@ fbCompositeSrc_8888x0888 (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSrc_8888x0565 (pixman_implementation_t *imp,
+fast_CompositeSrc_8888_0565 (pixman_implementation_t *imp,
 			  pixman_op_t op,
 			 pixman_image_t * pSrc,
 			 pixman_image_t * pMask,
@@ -763,7 +763,7 @@ fbCompositeSrc_8888x0565 (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSrc_x888x0565 (pixman_implementation_t *imp,
+fast_CompositeSrc_x888_0565 (pixman_implementation_t *imp,
 			  pixman_op_t op,
                           pixman_image_t * pSrc,
                           pixman_image_t * pMask,
@@ -803,7 +803,7 @@ fbCompositeSrc_x888x0565 (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSrcAdd_8000x8000 (pixman_implementation_t *imp,
+fast_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
 			     pixman_op_t	op,
 			     pixman_image_t * pSrc,
 			     pixman_image_t * pMask,
@@ -854,7 +854,7 @@ fbCompositeSrcAdd_8000x8000 (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSrcAdd_8888x8888 (pixman_implementation_t *imp,
+fast_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
 			     pixman_op_t	op,
 			     pixman_image_t * pSrc,
 			     pixman_image_t * pMask,
@@ -904,7 +904,7 @@ fbCompositeSrcAdd_8888x8888 (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSrcAdd_8888x8x8 (pixman_implementation_t *imp,
+fast_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
 			    pixman_op_t op,
 			    pixman_image_t * pSrc,
 			    pixman_image_t * pMask,
@@ -961,7 +961,7 @@ fbCompositeSrcAdd_8888x8x8 (pixman_implementation_t *imp,
  */
 
 static void
-fbCompositeSolidFill (pixman_implementation_t *imp,
+fast_CompositeSolidFill (pixman_implementation_t *imp,
 		      pixman_op_t op,
 		      pixman_image_t * pSrc,
 		      pixman_image_t * pMask,
@@ -993,7 +993,7 @@ fbCompositeSolidFill (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSrc_8888xx888 (pixman_implementation_t *imp,
+fast_CompositeSrc_8888_x888 (pixman_implementation_t *imp,
 			  pixman_op_t op,
 			  pixman_image_t * pSrc,
 			  pixman_image_t * pMask,
@@ -1026,55 +1026,55 @@ fbCompositeSrc_8888xx888 (pixman_implementation_t *imp,
 
 static const pixman_fast_path_t c_fast_paths[] =
 {
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   fbCompositeSolidMask_nx8x0565, 0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b5g6r5,   fbCompositeSolidMask_nx8x0565, 0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r8g8b8,   fbCompositeSolidMask_nx8x0888, 0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b8g8r8,   fbCompositeSolidMask_nx8x0888, 0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, fbCompositeSolidMask_nx8x8888, 0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, fbCompositeSolidMask_nx8x8888, 0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, fbCompositeSolidMask_nx8x8888, 0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, fbCompositeSolidMask_nx8x8888, 0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, fbCompositeSolidMask_nx8888x8888C, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, fbCompositeSolidMask_nx8888x8888C, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   fbCompositeSolidMask_nx8888x0565C, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, fbCompositeSolidMask_nx8888x8888C, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, fbCompositeSolidMask_nx8888x8888C, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   fbCompositeSolidMask_nx8888x0565C, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,	PIXMAN_x8r8g8b8, fbCompositeOver_x888x8x8888,       0 },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,	PIXMAN_a8r8g8b8, fbCompositeOver_x888x8x8888,       0 },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,	PIXMAN_x8b8g8r8, fbCompositeOver_x888x8x8888,       0 },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,	PIXMAN_a8b8g8r8, fbCompositeOver_x888x8x8888,       0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, fbCompositeSrc_8888x8888,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,	PIXMAN_x8r8g8b8, fbCompositeSrc_8888x8888,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,	PIXMAN_r5g6b5,	 fbCompositeSrc_8888x0565,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_a8b8g8r8, fbCompositeSrc_8888x8888,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_x8b8g8r8, fbCompositeSrc_8888x8888,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   fbCompositeSrc_8888x0565,	   0 },
-    { PIXMAN_OP_ADD, PIXMAN_a8r8g8b8,  PIXMAN_null,	PIXMAN_a8r8g8b8, fbCompositeSrcAdd_8888x8888,   0 },
-    { PIXMAN_OP_ADD, PIXMAN_a8b8g8r8,  PIXMAN_null,	PIXMAN_a8b8g8r8, fbCompositeSrcAdd_8888x8888,   0 },
-    { PIXMAN_OP_ADD, PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       fbCompositeSrcAdd_8000x8000,   0 },
-    { PIXMAN_OP_ADD, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8,       fbCompositeSrcAdd_8888x8x8,    0 },
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_null,     PIXMAN_a8r8g8b8, fbCompositeSolidFill, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_null,     PIXMAN_x8r8g8b8, fbCompositeSolidFill, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_null,     PIXMAN_a8b8g8r8, fbCompositeSolidFill, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_null,     PIXMAN_x8b8g8r8, fbCompositeSolidFill, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_null,     PIXMAN_a8,       fbCompositeSolidFill, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_null,     PIXMAN_r5g6b5,   fbCompositeSolidFill, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,     PIXMAN_x8r8g8b8, fbCompositeSrc_8888xx888, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8,  PIXMAN_null,     PIXMAN_x8r8g8b8, fbCompositeSrc_8888xx888, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,     PIXMAN_x8b8g8r8, fbCompositeSrc_8888xx888, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8,  PIXMAN_null,     PIXMAN_x8b8g8r8, fbCompositeSrc_8888xx888, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,     PIXMAN_r5g6b5,   fbCompositeSrc_x888x0565, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8,  PIXMAN_null,     PIXMAN_r5g6b5,   fbCompositeSrc_x888x0565, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,     PIXMAN_b5g6r5,   fbCompositeSrc_x888x0565, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8,  PIXMAN_null,     PIXMAN_b5g6r5,   fbCompositeSrc_x888x0565, 0 },
-    { PIXMAN_OP_IN,  PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       fbCompositeSrcIn_8x8,   0 },
-    { PIXMAN_OP_IN,  PIXMAN_solid,     PIXMAN_a8,	PIXMAN_a8,	 fbCompositeSolidMaskIn_nx8x8, 0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   fast_CompositeOver_n_8_0565, 0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b5g6r5,   fast_CompositeOver_n_8_0565, 0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r8g8b8,   fast_CompositeOver_n_8_0888, 0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b8g8r8,   fast_CompositeOver_n_8_0888, 0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, fast_CompositeOver_n_8_8888, 0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, fast_CompositeOver_n_8_8888, 0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, fast_CompositeOver_n_8_8888, 0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, fast_CompositeOver_n_8_8888, 0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, fast_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, fast_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   fast_CompositeOver_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, fast_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, fast_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   fast_CompositeOver_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,	PIXMAN_x8r8g8b8, fast_CompositeOver_x888_8_8888,       0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,	PIXMAN_a8r8g8b8, fast_CompositeOver_x888_8_8888,       0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,	PIXMAN_x8b8g8r8, fast_CompositeOver_x888_8_8888,       0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,	PIXMAN_a8b8g8r8, fast_CompositeOver_x888_8_8888,       0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, fast_CompositeSrc_8888_8888,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,	PIXMAN_x8r8g8b8, fast_CompositeSrc_8888_8888,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,	PIXMAN_r5g6b5,	 fast_CompositeSrc_8888_0565,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_a8b8g8r8, fast_CompositeSrc_8888_8888,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_x8b8g8r8, fast_CompositeSrc_8888_8888,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   fast_CompositeSrc_8888_0565,	   0 },
+    { PIXMAN_OP_ADD, PIXMAN_a8r8g8b8,  PIXMAN_null,	PIXMAN_a8r8g8b8, fast_CompositeAdd_8888_8888,   0 },
+    { PIXMAN_OP_ADD, PIXMAN_a8b8g8r8,  PIXMAN_null,	PIXMAN_a8b8g8r8, fast_CompositeAdd_8888_8888,   0 },
+    { PIXMAN_OP_ADD, PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       fast_CompositeAdd_8000_8000,   0 },
+    { PIXMAN_OP_ADD, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8,       fast_CompositeAdd_8888_8_8,    0 },
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_null,     PIXMAN_a8r8g8b8, fast_CompositeSolidFill, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_null,     PIXMAN_x8r8g8b8, fast_CompositeSolidFill, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_null,     PIXMAN_a8b8g8r8, fast_CompositeSolidFill, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_null,     PIXMAN_x8b8g8r8, fast_CompositeSolidFill, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_null,     PIXMAN_a8,       fast_CompositeSolidFill, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_null,     PIXMAN_r5g6b5,   fast_CompositeSolidFill, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,     PIXMAN_x8r8g8b8, fast_CompositeSrc_8888_x888, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8,  PIXMAN_null,     PIXMAN_x8r8g8b8, fast_CompositeSrc_8888_x888, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,     PIXMAN_x8b8g8r8, fast_CompositeSrc_8888_x888, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8,  PIXMAN_null,     PIXMAN_x8b8g8r8, fast_CompositeSrc_8888_x888, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,     PIXMAN_r5g6b5,   fast_CompositeSrc_x888_0565, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8,  PIXMAN_null,     PIXMAN_r5g6b5,   fast_CompositeSrc_x888_0565, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,     PIXMAN_b5g6r5,   fast_CompositeSrc_x888_0565, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8,  PIXMAN_null,     PIXMAN_b5g6r5,   fast_CompositeSrc_x888_0565, 0 },
+    { PIXMAN_OP_IN,  PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       fast_CompositeIn_8_8,   0 },
+    { PIXMAN_OP_IN,  PIXMAN_solid,     PIXMAN_a8,	PIXMAN_a8,	 fast_CompositeIn_n_8_8, 0 },
     { PIXMAN_OP_NONE },
 };
 
 static void
-fbCompositeSrcScaleNearest (pixman_implementation_t *imp,
+fast_CompositeSrcScaleNearest (pixman_implementation_t *imp,
 			    pixman_op_t     op,
 			    pixman_image_t *pSrc,
 			    pixman_image_t *pMask,
@@ -1209,7 +1209,7 @@ fast_path_composite (pixman_implementation_t *imp,
 					   mask_x, mask_y,
 					   dest_x, dest_y,
 					   width, height,
-					   fbCompositeSrcScaleNearest);
+					   fast_CompositeSrcScaleNearest);
 	    return;
 	}
     }
diff --git a/pixman/pixman-mmx.c b/pixman/pixman-mmx.c
index 1cf2f17..f0dca40 100644
--- a/pixman/pixman-mmx.c
+++ b/pixman/pixman-mmx.c
@@ -919,7 +919,7 @@ mmxCombineAddC (pixman_implementation_t *imp, pixman_op_t op,
 /* ------------------ MMX code paths called from fbpict.c ----------------------- */
 
 static void
-fbCompositeSolid_nx8888mmx (pixman_implementation_t *imp,
+mmx_CompositeOver_n_8888 (pixman_implementation_t *imp,
 			    pixman_op_t op,
 			    pixman_image_t * pSrc,
 			    pixman_image_t * pMask,
@@ -998,7 +998,7 @@ fbCompositeSolid_nx8888mmx (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSolid_nx0565mmx (pixman_implementation_t *imp,
+mmx_CompositeOver_n_0565 (pixman_implementation_t *imp,
 			    pixman_op_t op,
 			    pixman_image_t * pSrc,
 			    pixman_image_t * pMask,
@@ -1084,7 +1084,7 @@ fbCompositeSolid_nx0565mmx (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSolidMask_nx8888x8888Cmmx (pixman_implementation_t *imp,
+mmx_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
 				      pixman_op_t op,
 				      pixman_image_t * pSrc,
 				      pixman_image_t * pMask,
@@ -1188,7 +1188,7 @@ fbCompositeSolidMask_nx8888x8888Cmmx (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSrc_8888x8x8888mmx (pixman_implementation_t *imp,
+mmx_CompositeSrc_8888_8_8888 (pixman_implementation_t *imp,
 			       pixman_op_t op,
 			       pixman_image_t * pSrc,
 			       pixman_image_t * pMask,
@@ -1273,7 +1273,7 @@ fbCompositeSrc_8888x8x8888mmx (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSrc_x888xnx8888mmx (pixman_implementation_t *imp,
+mmx_CompositeSrc_x888_n_8888 (pixman_implementation_t *imp,
 			       pixman_op_t op,
 			       pixman_image_t * pSrc,
 			       pixman_image_t * pMask,
@@ -1408,7 +1408,7 @@ fbCompositeSrc_x888xnx8888mmx (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSrc_8888x8888mmx (pixman_implementation_t *imp,
+mmx_CompositeSrc_8888_8888 (pixman_implementation_t *imp,
 			     pixman_op_t op,
 			     pixman_image_t * pSrc,
 			     pixman_image_t * pMask,
@@ -1461,7 +1461,7 @@ fbCompositeSrc_8888x8888mmx (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSrc_8888x0565mmx (pixman_implementation_t *imp,
+mmx_CompositeSrc_8888_0565 (pixman_implementation_t *imp,
 			     pixman_op_t op,
 			     pixman_image_t * pSrc,
 			     pixman_image_t * pMask,
@@ -1563,7 +1563,7 @@ fbCompositeSrc_8888x0565mmx (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSolidMask_nx8x8888mmx (pixman_implementation_t *imp,
+mmx_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
 				  pixman_op_t op,
 				  pixman_image_t * pSrc,
 				  pixman_image_t * pMask,
@@ -1835,7 +1835,7 @@ pixman_fill_mmx (uint32_t *bits,
 }
 
 static void
-fbCompositeSolidMaskSrc_nx8x8888mmx (pixman_implementation_t *imp,
+mmx_CompositeSrc_n_8_8888 (pixman_implementation_t *imp,
 				     pixman_op_t op,
 				     pixman_image_t * pSrc,
 				     pixman_image_t * pMask,
@@ -1967,7 +1967,7 @@ fbCompositeSolidMaskSrc_nx8x8888mmx (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSolidMask_nx8x0565mmx (pixman_implementation_t *imp,
+mmx_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 				  pixman_op_t op,
 				  pixman_image_t * pSrc,
 				  pixman_image_t * pMask,
@@ -2100,7 +2100,7 @@ fbCompositeSolidMask_nx8x0565mmx (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSrc_8888RevNPx0565mmx (pixman_implementation_t *imp,
+mmx_CompositeSrc_pixbuf_0565 (pixman_implementation_t *imp,
 				  pixman_op_t op,
 				  pixman_image_t * pSrc,
 				  pixman_image_t * pMask,
@@ -2222,7 +2222,7 @@ fbCompositeSrc_8888RevNPx0565mmx (pixman_implementation_t *imp,
 /* "8888RevNP" is GdkPixbuf's format: ABGR, non premultiplied */
 
 static void
-fbCompositeSrc_8888RevNPx8888mmx (pixman_implementation_t *imp,
+mmx_CompositeSrc_pixbuf_8888 (pixman_implementation_t *imp,
 				  pixman_op_t op,
 				  pixman_image_t * pSrc,
 				  pixman_image_t * pMask,
@@ -2322,7 +2322,7 @@ fbCompositeSrc_8888RevNPx8888mmx (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSolidMask_nx8888x0565Cmmx (pixman_implementation_t *imp,
+mmx_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 				      pixman_op_t op,
 				      pixman_image_t * pSrc,
 				      pixman_image_t * pMask,
@@ -2430,7 +2430,7 @@ fbCompositeSolidMask_nx8888x0565Cmmx (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeIn_nx8x8mmx (pixman_implementation_t *imp,
+mmx_CompositeIn_n_8_8 (pixman_implementation_t *imp,
 			pixman_op_t op,
 			pixman_image_t * pSrc,
 			pixman_image_t * pMask,
@@ -2514,7 +2514,7 @@ fbCompositeIn_nx8x8mmx (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeIn_8x8mmx (pixman_implementation_t *imp,
+mmx_CompositeIn_8_8 (pixman_implementation_t *imp,
 		      pixman_op_t op,
 		      pixman_image_t * pSrc,
 		      pixman_image_t * pMask,
@@ -2579,7 +2579,7 @@ fbCompositeIn_8x8mmx (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSrcAdd_8888x8x8mmx (pixman_implementation_t *imp,
+mmx_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
 			       pixman_op_t op,
 			       pixman_image_t * pSrc,
 			       pixman_image_t * pMask,
@@ -2658,7 +2658,7 @@ fbCompositeSrcAdd_8888x8x8mmx (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSrcAdd_8000x8000mmx (pixman_implementation_t *imp,
+mmx_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
 				pixman_op_t op,
 				pixman_image_t * pSrc,
 				pixman_image_t * pMask,
@@ -2731,7 +2731,7 @@ fbCompositeSrcAdd_8000x8000mmx (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeSrcAdd_8888x8888mmx (pixman_implementation_t *imp,
+mmx_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
 				pixman_op_t 	op,
 				pixman_image_t *	pSrc,
 				pixman_image_t *	pMask,
@@ -2930,7 +2930,7 @@ pixman_blt_mmx (uint32_t *src_bits,
 }
 
 static void
-fbCompositeCopyAreammx (pixman_implementation_t *imp,
+mmx_CompositeCopyArea (pixman_implementation_t *imp,
 			pixman_op_t       op,
 			pixman_image_t *	pSrc,
 			pixman_image_t *	pMask,
@@ -2954,7 +2954,7 @@ fbCompositeCopyAreammx (pixman_implementation_t *imp,
 }
 
 static void
-fbCompositeOver_x888x8x8888mmx (pixman_implementation_t *imp,
+mmx_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
 				pixman_op_t      op,
 				pixman_image_t * pSrc,
 				pixman_image_t * pMask,
@@ -3020,78 +3020,78 @@ fbCompositeOver_x888x8x8888mmx (pixman_implementation_t *imp,
 
 static const pixman_fast_path_t mmx_fast_paths[] =
 {
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   fbCompositeSolidMask_nx8x0565mmx,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b5g6r5,   fbCompositeSolidMask_nx8x0565mmx,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, fbCompositeSolidMask_nx8x8888mmx,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, fbCompositeSolidMask_nx8x8888mmx,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, fbCompositeSolidMask_nx8x8888mmx,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, fbCompositeSolidMask_nx8x8888mmx,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, fbCompositeSolidMask_nx8888x8888Cmmx, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, fbCompositeSolidMask_nx8888x8888Cmmx, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   fbCompositeSolidMask_nx8888x0565Cmmx, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, fbCompositeSolidMask_nx8888x8888Cmmx, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, fbCompositeSolidMask_nx8888x8888Cmmx, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   fbCompositeSolidMask_nx8888x0565Cmmx, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, fbCompositeSrc_8888RevNPx8888mmx, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_a8r8g8b8, fbCompositeSrc_8888RevNPx8888mmx, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, fbCompositeSrc_8888RevNPx8888mmx, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_x8r8g8b8, fbCompositeSrc_8888RevNPx8888mmx, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   fbCompositeSrc_8888RevNPx0565mmx, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_r5g6b5,   fbCompositeSrc_8888RevNPx0565mmx, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_a8b8g8r8, fbCompositeSrc_8888RevNPx8888mmx, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, fbCompositeSrc_8888RevNPx8888mmx, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_x8b8g8r8, fbCompositeSrc_8888RevNPx8888mmx, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, fbCompositeSrc_8888RevNPx8888mmx, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_b5g6r5,   fbCompositeSrc_8888RevNPx0565mmx, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   fbCompositeSrc_8888RevNPx0565mmx, NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, fbCompositeSrc_x888xnx8888mmx,    NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, fbCompositeSrc_x888xnx8888mmx,	   NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,	PIXMAN_a8b8g8r8, fbCompositeSrc_x888xnx8888mmx,	   NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,	PIXMAN_x8b8g8r8, fbCompositeSrc_x888xnx8888mmx,	   NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, fbCompositeSrc_8888x8x8888mmx,    NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, fbCompositeSrc_8888x8x8888mmx,	   NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,	PIXMAN_a8b8g8r8, fbCompositeSrc_8888x8x8888mmx,	   NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,	PIXMAN_x8b8g8r8, fbCompositeSrc_8888x8x8888mmx,	   NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   mmx_CompositeOver_n_8_0565,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b5g6r5,   mmx_CompositeOver_n_8_0565,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, mmx_CompositeOver_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, mmx_CompositeOver_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, mmx_CompositeOver_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, mmx_CompositeOver_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, mmx_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, mmx_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   mmx_CompositeOver_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, mmx_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, mmx_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   mmx_CompositeOver_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, mmx_CompositeSrc_pixbuf_8888, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_a8r8g8b8, mmx_CompositeSrc_pixbuf_8888, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, mmx_CompositeSrc_pixbuf_8888, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_x8r8g8b8, mmx_CompositeSrc_pixbuf_8888, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   mmx_CompositeSrc_pixbuf_0565, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_r5g6b5,   mmx_CompositeSrc_pixbuf_0565, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_a8b8g8r8, mmx_CompositeSrc_pixbuf_8888, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, mmx_CompositeSrc_pixbuf_8888, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_x8b8g8r8, mmx_CompositeSrc_pixbuf_8888, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, mmx_CompositeSrc_pixbuf_8888, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_b5g6r5,   mmx_CompositeSrc_pixbuf_0565, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   mmx_CompositeSrc_pixbuf_0565, NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, mmx_CompositeSrc_x888_n_8888,    NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, mmx_CompositeSrc_x888_n_8888,	   NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,	PIXMAN_a8b8g8r8, mmx_CompositeSrc_x888_n_8888,	   NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,	PIXMAN_x8b8g8r8, mmx_CompositeSrc_x888_n_8888,	   NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, mmx_CompositeSrc_8888_8_8888,    NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, mmx_CompositeSrc_8888_8_8888,	   NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,	PIXMAN_a8b8g8r8, mmx_CompositeSrc_8888_8_8888,	   NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,	PIXMAN_x8b8g8r8, mmx_CompositeSrc_8888_8_8888,	   NEED_SOLID_MASK },
 #if 0
     /* FIXME: This code is commented out since it's apparently not actually faster than the generic code. */
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,	PIXMAN_x8r8g8b8, fbCompositeOver_x888x8x8888mmx,   0 },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,	PIXMAN_a8r8g8b8, fbCompositeOver_x888x8x8888mmx,   0 },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8r8g8, PIXMAN_a8,	PIXMAN_x8b8g8r8, fbCompositeOver_x888x8x8888mmx,   0 },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8r8g8, PIXMAN_a8,	PIXMAN_a8r8g8b8, fbCompositeOver_x888x8x8888mmx,   0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,	PIXMAN_x8r8g8b8, mmx_CompositeOver_x888_8_8888,   0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,	PIXMAN_a8r8g8b8, mmx_CompositeOver_x888_8_8888,   0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8r8g8, PIXMAN_a8,	PIXMAN_x8b8g8r8, mmx_CompositeOver_x888_8_8888,   0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8r8g8, PIXMAN_a8,	PIXMAN_a8r8g8b8, mmx_CompositeOver_x888_8_8888,   0 },
 #endif
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,	PIXMAN_a8r8g8b8, fbCompositeSolid_nx8888mmx,       0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_x8r8g8b8, fbCompositeSolid_nx8888mmx,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_r5g6b5,   fbCompositeSolid_nx0565mmx,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, fbCompositeCopyAreammx,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, fbCompositeCopyAreammx,	   0 },
-
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, fbCompositeSrc_8888x8888mmx,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,	PIXMAN_x8r8g8b8, fbCompositeSrc_8888x8888mmx,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,	PIXMAN_r5g6b5,	 fbCompositeSrc_8888x0565mmx,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_a8b8g8r8, fbCompositeSrc_8888x8888mmx,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_x8b8g8r8, fbCompositeSrc_8888x8888mmx,	   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   fbCompositeSrc_8888x0565mmx,	   0 },
-
-    { PIXMAN_OP_ADD, PIXMAN_a8r8g8b8,  PIXMAN_null,	PIXMAN_a8r8g8b8, fbCompositeSrcAdd_8888x8888mmx,   0 },
-    { PIXMAN_OP_ADD, PIXMAN_a8b8g8r8,  PIXMAN_null,	PIXMAN_a8b8g8r8, fbCompositeSrcAdd_8888x8888mmx,   0 },
-    { PIXMAN_OP_ADD, PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       fbCompositeSrcAdd_8000x8000mmx,   0 },
-    { PIXMAN_OP_ADD, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8,       fbCompositeSrcAdd_8888x8x8mmx,    0 },
-
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8r8g8b8, fbCompositeSolidMaskSrc_nx8x8888mmx, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_x8r8g8b8, fbCompositeSolidMaskSrc_nx8x8888mmx, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8b8g8r8, fbCompositeSolidMaskSrc_nx8x8888mmx, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_x8b8g8r8, fbCompositeSolidMaskSrc_nx8x8888mmx, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,	PIXMAN_a8r8g8b8, fbCompositeCopyAreammx, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,	PIXMAN_a8b8g8r8, fbCompositeCopyAreammx, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,	PIXMAN_x8r8g8b8, fbCompositeCopyAreammx, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,	PIXMAN_x8b8g8r8, fbCompositeCopyAreammx, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8,  PIXMAN_null,	PIXMAN_x8r8g8b8, fbCompositeCopyAreammx, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8,  PIXMAN_null,	PIXMAN_x8b8g8r8, fbCompositeCopyAreammx, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_r5g6b5,    PIXMAN_null,     PIXMAN_r5g6b5,   fbCompositeCopyAreammx, 0 },
-    { PIXMAN_OP_SRC, PIXMAN_b5g6r5,    PIXMAN_null,     PIXMAN_b5g6r5,   fbCompositeCopyAreammx, 0 },    
-
-    { PIXMAN_OP_IN,  PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       fbCompositeIn_8x8mmx,   0 },
-    { PIXMAN_OP_IN,  PIXMAN_solid,     PIXMAN_a8,	PIXMAN_a8,	 fbCompositeIn_nx8x8mmx, 0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,	PIXMAN_a8r8g8b8, mmx_CompositeOver_n_8888,       0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_x8r8g8b8, mmx_CompositeOver_n_8888,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_r5g6b5,   mmx_CompositeOver_n_0565,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, mmx_CompositeCopyArea,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, mmx_CompositeCopyArea,	   0 },
+
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, mmx_CompositeSrc_8888_8888,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,	PIXMAN_x8r8g8b8, mmx_CompositeSrc_8888_8888,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,	PIXMAN_r5g6b5,	 mmx_CompositeSrc_8888_0565,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_a8b8g8r8, mmx_CompositeSrc_8888_8888,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,	PIXMAN_x8b8g8r8, mmx_CompositeSrc_8888_8888,	   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   mmx_CompositeSrc_8888_0565,	   0 },
+
+    { PIXMAN_OP_ADD, PIXMAN_a8r8g8b8,  PIXMAN_null,	PIXMAN_a8r8g8b8, mmx_CompositeAdd_8888_8888,   0 },
+    { PIXMAN_OP_ADD, PIXMAN_a8b8g8r8,  PIXMAN_null,	PIXMAN_a8b8g8r8, mmx_CompositeAdd_8888_8888,   0 },
+    { PIXMAN_OP_ADD, PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       mmx_CompositeAdd_8000_8000,   0 },
+    { PIXMAN_OP_ADD, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8,       mmx_CompositeAdd_8888_8_8,    0 },
+
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8r8g8b8, mmx_CompositeSrc_n_8_8888, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_x8r8g8b8, mmx_CompositeSrc_n_8_8888, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8b8g8r8, mmx_CompositeSrc_n_8_8888, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_x8b8g8r8, mmx_CompositeSrc_n_8_8888, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,	PIXMAN_a8r8g8b8, mmx_CompositeCopyArea, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,	PIXMAN_a8b8g8r8, mmx_CompositeCopyArea, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,	PIXMAN_x8r8g8b8, mmx_CompositeCopyArea, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,	PIXMAN_x8b8g8r8, mmx_CompositeCopyArea, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8,  PIXMAN_null,	PIXMAN_x8r8g8b8, mmx_CompositeCopyArea, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8,  PIXMAN_null,	PIXMAN_x8b8g8r8, mmx_CompositeCopyArea, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_r5g6b5,    PIXMAN_null,     PIXMAN_r5g6b5,   mmx_CompositeCopyArea, 0 },
+    { PIXMAN_OP_SRC, PIXMAN_b5g6r5,    PIXMAN_null,     PIXMAN_b5g6r5,   mmx_CompositeCopyArea, 0 },    
+
+    { PIXMAN_OP_IN,  PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       mmx_CompositeIn_8_8,   0 },
+    { PIXMAN_OP_IN,  PIXMAN_solid,     PIXMAN_a8,	PIXMAN_a8,	 mmx_CompositeIn_n_8_8, 0 },
 
     { PIXMAN_OP_NONE },
 };
diff --git a/pixman/pixman-sse2.c b/pixman/pixman-sse2.c
index 9f60b6a..c782ffd 100644
--- a/pixman/pixman-sse2.c
+++ b/pixman/pixman-sse2.c
@@ -2493,11 +2493,11 @@ sse2CombineAddC (pixman_implementation_t *imp, pixman_op_t op,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fbCompositeSolid_nx8888
+ * fast_CompositeOver_n_8888
  */
 
 static void
-fbCompositeSolid_nx8888sse2 (pixman_implementation_t *imp,
+sse2_CompositeOver_n_8888 (pixman_implementation_t *imp,
 			     pixman_op_t op,
 			    pixman_image_t * pSrc,
 			    pixman_image_t * pMask,
@@ -2581,10 +2581,10 @@ fbCompositeSolid_nx8888sse2 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fbCompositeSolid_nx0565
+ * fast_CompositeOver_n_0565
  */
 static void
-fbCompositeSolid_nx0565sse2 (pixman_implementation_t *imp,
+sse2_CompositeOver_n_0565 (pixman_implementation_t *imp,
 			     pixman_op_t op,
 			    pixman_image_t * pSrc,
 			    pixman_image_t * pMask,
@@ -2670,11 +2670,11 @@ fbCompositeSolid_nx0565sse2 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fbCompositeSolidMask_nx8888x8888C
+ * fast_CompositeOver_n_8888_8888_ca
  */
 
 static void
-fbCompositeSolidMask_nx8888x8888Csse2 (pixman_implementation_t *imp,
+sse2_CompositeOver_n_8888_8888_ca (pixman_implementation_t *imp,
 				       pixman_op_t op,
 				      pixman_image_t * pSrc,
 				      pixman_image_t * pMask,
@@ -2804,11 +2804,11 @@ fbCompositeSolidMask_nx8888x8888Csse2 (pixman_implementation_t *imp,
 
 
 /* -------------------------------------------------------------------------------------------------
- * fbCompositeSrc_8888x8x8888
+ * fast_CompositeSrc_8888_8_8888
  */
 
 static void
-fbCompositeSrc_8888x8x8888sse2 (pixman_implementation_t *imp,
+sse2_CompositeSrc_8888_8_8888 (pixman_implementation_t *imp,
 				pixman_op_t op,
 			       pixman_image_t * pSrc,
 			       pixman_image_t * pMask,
@@ -2918,10 +2918,10 @@ fbCompositeSrc_8888x8x8888sse2 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fbCompositeSrc_x888xnx8888
+ * fast_CompositeSrc_x888_n_8888
  */
 static void
-fbCompositeSrc_x888xnx8888sse2 (pixman_implementation_t *imp,
+sse2_CompositeSrc_x888_n_8888 (pixman_implementation_t *imp,
 				pixman_op_t op,
 			       pixman_image_t * pSrc,
 			       pixman_image_t * pMask,
@@ -3031,10 +3031,10 @@ fbCompositeSrc_x888xnx8888sse2 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fbCompositeSrc_8888x8888
+ * fast_CompositeSrc_8888_8888
  */
 static void
-fbCompositeSrc_8888x8888sse2 (pixman_implementation_t *imp,
+sse2_CompositeSrc_8888_8888 (pixman_implementation_t *imp,
 			      pixman_op_t op,
 			     pixman_image_t * pSrc,
 			     pixman_image_t * pMask,
@@ -3069,10 +3069,10 @@ fbCompositeSrc_8888x8888sse2 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fbCompositeSrc_8888x0565
+ * fast_CompositeSrc_8888_0565
  */
 static force_inline uint16_t
-fbCompositeSrc_8888x0565pixel (uint32_t src, uint16_t dst)
+fast_CompositeSrc_8888_0565pixel (uint32_t src, uint16_t dst)
 {
     __m64       ms;
 
@@ -3083,7 +3083,7 @@ fbCompositeSrc_8888x0565pixel (uint32_t src, uint16_t dst)
 }
 
 static void
-fbCompositeSrc_8888x0565sse2 (pixman_implementation_t *imp,
+sse2_CompositeSrc_8888_0565 (pixman_implementation_t *imp,
 			      pixman_op_t op,
 			     pixman_image_t * pSrc,
 			     pixman_image_t * pMask,
@@ -3138,7 +3138,7 @@ fbCompositeSrc_8888x0565sse2 (pixman_implementation_t *imp,
             s = *src++;
             d = *dst;
 
-            *dst++ = fbCompositeSrc_8888x0565pixel (s, d);
+            *dst++ = fast_CompositeSrc_8888_0565pixel (s, d);
             w--;
         }
 
@@ -3185,7 +3185,7 @@ fbCompositeSrc_8888x0565sse2 (pixman_implementation_t *imp,
             s = *src++;
             d = *dst;
 
-            *dst++ = fbCompositeSrc_8888x0565pixel (s, d);
+            *dst++ = fast_CompositeSrc_8888_0565pixel (s, d);
         }
     }
 
@@ -3193,11 +3193,11 @@ fbCompositeSrc_8888x0565sse2 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fbCompositeSolidMask_nx8x8888
+ * fast_CompositeOver_n_8_8888
  */
 
 static void
-fbCompositeSolidMask_nx8x8888sse2 (pixman_implementation_t *imp,
+sse2_CompositeOver_n_8_8888 (pixman_implementation_t *imp,
 				   pixman_op_t op,
 				  pixman_image_t * pSrc,
 				  pixman_image_t * pMask,
@@ -3334,7 +3334,7 @@ fbCompositeSolidMask_nx8x8888sse2 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fbCompositeSolidMask_nx8x8888
+ * fast_CompositeOver_n_8_8888
  */
 
 pixman_bool_t
@@ -3475,7 +3475,7 @@ pixmanFillsse2 (uint32_t *bits,
 }
 
 static void
-fbCompositeSolidMaskSrc_nx8x8888sse2 (pixman_implementation_t *imp,
+sse2_CompositeSrc_n_8_8888 (pixman_implementation_t *imp,
 				      pixman_op_t op,
 				     pixman_image_t * pSrc,
 				     pixman_image_t * pMask,
@@ -3607,11 +3607,11 @@ fbCompositeSolidMaskSrc_nx8x8888sse2 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fbCompositeSolidMask_nx8x0565
+ * fast_CompositeOver_n_8_0565
  */
 
 static void
-fbCompositeSolidMask_nx8x0565sse2 (pixman_implementation_t *imp,
+sse2_CompositeOver_n_8_0565 (pixman_implementation_t *imp,
 				   pixman_op_t op,
 				  pixman_image_t * pSrc,
 				  pixman_image_t * pMask,
@@ -3757,11 +3757,11 @@ fbCompositeSolidMask_nx8x0565sse2 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fbCompositeSrc_8888RevNPx0565
+ * fast_CompositeSrc_pixbuf_0565
  */
 
 static void
-fbCompositeSrc_8888RevNPx0565sse2 (pixman_implementation_t *imp,
+sse2_CompositeSrc_pixbuf_0565 (pixman_implementation_t *imp,
 				   pixman_op_t op,
 				  pixman_image_t * pSrc,
 				  pixman_image_t * pMask,
@@ -3892,11 +3892,11 @@ fbCompositeSrc_8888RevNPx0565sse2 (pixman_implementation_t *imp,
 /* "8888RevNP" is GdkPixbuf's format: ABGR, non premultiplied */
 
 /* -------------------------------------------------------------------------------------------------
- * fbCompositeSrc_8888RevNPx8888
+ * fast_CompositeSrc_pixbuf_8888
  */
 
 static void
-fbCompositeSrc_8888RevNPx8888sse2 (pixman_implementation_t *imp,
+sse2_CompositeSrc_pixbuf_8888 (pixman_implementation_t *imp,
 				   pixman_op_t op,
 				  pixman_image_t * pSrc,
 				  pixman_image_t * pMask,
@@ -4007,11 +4007,11 @@ fbCompositeSrc_8888RevNPx8888sse2 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fbCompositeSolidMask_nx8888x0565C
+ * fast_CompositeOver_n_8888_0565_ca
  */
 
 static void
-fbCompositeSolidMask_nx8888x0565Csse2 (pixman_implementation_t *imp,
+sse2_CompositeOver_n_8888_0565_ca (pixman_implementation_t *imp,
 				       pixman_op_t op,
 				      pixman_image_t * pSrc,
 				      pixman_image_t * pMask,
@@ -4155,11 +4155,11 @@ fbCompositeSolidMask_nx8888x0565Csse2 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fbCompositeIn_nx8x8
+ * fast_CompositeIn_n_8_8
  */
 
 static void
-fbCompositeIn_nx8x8sse2 (pixman_implementation_t *imp,
+sse2_CompositeIn_n_8_8 (pixman_implementation_t *imp,
 			 pixman_op_t op,
 			pixman_image_t * pSrc,
 			pixman_image_t * pMask,
@@ -4258,11 +4258,11 @@ fbCompositeIn_nx8x8sse2 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fbCompositeIn_8x8
+ * fast_CompositeIn_8_8
  */
 
 static void
-fbCompositeIn_8x8sse2 (pixman_implementation_t *imp,
+sse2_CompositeIn_8_8 (pixman_implementation_t *imp,
 		       pixman_op_t op,
 		      pixman_image_t * pSrc,
 		      pixman_image_t * pMask,
@@ -4348,11 +4348,11 @@ fbCompositeIn_8x8sse2 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fbCompositeSrcAdd_8888x8x8
+ * fast_CompositeAdd_8888_8_8
  */
 
 static void
-fbCompositeSrcAdd_8888x8x8sse2 (pixman_implementation_t *imp,
+sse2_CompositeAdd_8888_8_8 (pixman_implementation_t *imp,
 				pixman_op_t op,
 			       pixman_image_t * pSrc,
 			       pixman_image_t * pMask,
@@ -4454,11 +4454,11 @@ fbCompositeSrcAdd_8888x8x8sse2 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fbCompositeSrcAdd_8000x8000
+ * fast_CompositeAdd_8000_8000
  */
 
 static void
-fbCompositeSrcAdd_8000x8000sse2 (pixman_implementation_t *imp,
+sse2_CompositeAdd_8000_8000 (pixman_implementation_t *imp,
 				 pixman_op_t op,
 				pixman_image_t * pSrc,
 				pixman_image_t * pMask,
@@ -4522,10 +4522,10 @@ fbCompositeSrcAdd_8000x8000sse2 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fbCompositeSrcAdd_8888x8888
+ * fast_CompositeAdd_8888_8888
  */
 static void
-fbCompositeSrcAdd_8888x8888sse2 (pixman_implementation_t *imp,
+sse2_CompositeAdd_8888_8888 (pixman_implementation_t *imp,
 				 pixman_op_t 	op,
 				pixman_image_t *	pSrc,
 				pixman_image_t *	pMask,
@@ -4560,7 +4560,7 @@ fbCompositeSrcAdd_8888x8888sse2 (pixman_implementation_t *imp,
 }
 
 /* -------------------------------------------------------------------------------------------------
- * fbCompositeCopyAreasse2
+ * sse2_CompositeCopyArea
  */
 
 static pixman_bool_t
@@ -4703,7 +4703,7 @@ pixmanBltsse2 (uint32_t *src_bits,
 }
 
 static void
-fbCompositeCopyAreasse2 (pixman_implementation_t *imp,
+sse2_CompositeCopyArea (pixman_implementation_t *imp,
 			 pixman_op_t       op,
 			pixman_image_t *	pSrc,
 			pixman_image_t *	pMask,
@@ -4729,7 +4729,7 @@ fbCompositeCopyAreasse2 (pixman_implementation_t *imp,
 #if 0
 /* This code are buggy in MMX version, now the bug was translated to SSE2 version */
 void
-fbCompositeOver_x888x8x8888sse2 (pixman_implementation_t *imp,
+sse2_CompositeOver_x888_8_8888 (pixman_implementation_t *imp,
 				 pixman_op_t      op,
 				pixman_image_t * pSrc,
 				pixman_image_t * pMask,
@@ -4872,77 +4872,77 @@ fbCompositeOver_x888x8x8888sse2 (pixman_implementation_t *imp,
 
 static const pixman_fast_path_t sse2_fast_paths[] =
 {
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   fbCompositeSolidMask_nx8x0565sse2,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b5g6r5,   fbCompositeSolidMask_nx8x0565sse2,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_a8r8g8b8, fbCompositeSolid_nx8888sse2,           0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_x8r8g8b8, fbCompositeSolid_nx8888sse2,           0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_r5g6b5,   fbCompositeSolid_nx0565sse2,           0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, fbCompositeSrc_8888x8888sse2,          0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, fbCompositeSrc_8888x8888sse2,          0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, fbCompositeSrc_8888x8888sse2,          0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, fbCompositeSrc_8888x8888sse2,          0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   fbCompositeSrc_8888x0565sse2,          0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   fbCompositeSrc_8888x0565sse2,          0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, fbCompositeSolidMask_nx8x8888sse2,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, fbCompositeSolidMask_nx8x8888sse2,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, fbCompositeSolidMask_nx8x8888sse2,     0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, fbCompositeSolidMask_nx8x8888sse2,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   sse2_CompositeOver_n_8_0565,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b5g6r5,   sse2_CompositeOver_n_8_0565,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_CompositeOver_n_8888,           0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_CompositeOver_n_8888,           0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_r5g6b5,   sse2_CompositeOver_n_0565,           0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_CompositeSrc_8888_8888,          0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_CompositeSrc_8888_8888,          0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, sse2_CompositeSrc_8888_8888,          0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_CompositeSrc_8888_8888,          0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   sse2_CompositeSrc_8888_0565,          0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   sse2_CompositeSrc_8888_0565,          0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_CompositeOver_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_CompositeOver_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_CompositeOver_n_8_8888,     0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_CompositeOver_n_8_8888,     0 },
 #if 0
     /* FIXME: This code are buggy in MMX version, now the bug was translated to SSE2 version */
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, fbCompositeOver_x888x8x8888sse2,       0 },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, fbCompositeOver_x888x8x8888sse2,       0 },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, fbCompositeOver_x888x8x8888sse2,       0 },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_a8r8g8b8, fbCompositeOver_x888x8x8888sse2,       0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_CompositeOver_x888_8_8888,       0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_CompositeOver_x888_8_8888,       0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_CompositeOver_x888_8_8888,       0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_CompositeOver_x888_8_8888,       0 },
 #endif
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, fbCompositeSrc_x888xnx8888sse2,        NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, fbCompositeSrc_x888xnx8888sse2,        NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_a8b8g8r8, fbCompositeSrc_x888xnx8888sse2,        NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, fbCompositeSrc_x888xnx8888sse2,        NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, fbCompositeSrc_8888x8x8888sse2,        NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, fbCompositeSrc_8888x8x8888sse2,        NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,       PIXMAN_a8b8g8r8, fbCompositeSrc_8888x8x8888sse2,        NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, fbCompositeSrc_8888x8x8888sse2,        NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, fbCompositeSolidMask_nx8888x8888Csse2, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, fbCompositeSolidMask_nx8888x8888Csse2, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, fbCompositeSolidMask_nx8888x8888Csse2, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, fbCompositeSolidMask_nx8888x8888Csse2, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   fbCompositeSolidMask_nx8888x0565Csse2, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   fbCompositeSolidMask_nx8888x0565Csse2, NEED_COMPONENT_ALPHA },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, fbCompositeSrc_8888RevNPx8888sse2,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_a8r8g8b8, fbCompositeSrc_8888RevNPx8888sse2,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, fbCompositeSrc_8888RevNPx8888sse2,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_x8r8g8b8, fbCompositeSrc_8888RevNPx8888sse2,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_a8b8g8r8, fbCompositeSrc_8888RevNPx8888sse2,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, fbCompositeSrc_8888RevNPx8888sse2,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_x8b8g8r8, fbCompositeSrc_8888RevNPx8888sse2,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, fbCompositeSrc_8888RevNPx8888sse2,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   fbCompositeSrc_8888RevNPx0565sse2,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_r5g6b5,   fbCompositeSrc_8888RevNPx0565sse2,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_b5g6r5,   fbCompositeSrc_8888RevNPx0565sse2,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   fbCompositeSrc_8888RevNPx0565sse2,     NEED_PIXBUF },
-    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, fbCompositeCopyAreasse2,               0 },
-    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, fbCompositeCopyAreasse2,               0 },
-
-    { PIXMAN_OP_ADD,  PIXMAN_a8,       PIXMAN_null,     PIXMAN_a8,       fbCompositeSrcAdd_8000x8000sse2,       0 },
-    { PIXMAN_OP_ADD,  PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, fbCompositeSrcAdd_8888x8888sse2,       0 },
-    { PIXMAN_OP_ADD,  PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, fbCompositeSrcAdd_8888x8888sse2,       0 },
-    { PIXMAN_OP_ADD,  PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8,       fbCompositeSrcAdd_8888x8x8sse2,        0 },
-
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8r8g8b8, fbCompositeSolidMaskSrc_nx8x8888sse2,  0 },
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_x8r8g8b8, fbCompositeSolidMaskSrc_nx8x8888sse2,  0 },
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8b8g8r8, fbCompositeSolidMaskSrc_nx8x8888sse2,  0 },
-    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_x8b8g8r8, fbCompositeSolidMaskSrc_nx8x8888sse2,  0 },
-    { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,     PIXMAN_a8r8g8b8, fbCompositeCopyAreasse2,               0 },
-    { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,     PIXMAN_a8b8g8r8, fbCompositeCopyAreasse2,               0 },
-    { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,     PIXMAN_x8r8g8b8, fbCompositeCopyAreasse2,		0 },
-    { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,	PIXMAN_x8b8g8r8, fbCompositeCopyAreasse2,		0 },
-    { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8,  PIXMAN_null,     PIXMAN_x8r8g8b8, fbCompositeCopyAreasse2,               0 },
-    { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8,  PIXMAN_null,     PIXMAN_x8b8g8r8, fbCompositeCopyAreasse2,               0 },
-    { PIXMAN_OP_SRC, PIXMAN_r5g6b5,    PIXMAN_null,     PIXMAN_r5g6b5,   fbCompositeCopyAreasse2,               0 },
-    { PIXMAN_OP_SRC, PIXMAN_b5g6r5,    PIXMAN_null,     PIXMAN_b5g6r5,   fbCompositeCopyAreasse2,               0 },
-
-    { PIXMAN_OP_IN,  PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       fbCompositeIn_8x8sse2,                 0 },
-    { PIXMAN_OP_IN,  PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8,       fbCompositeIn_nx8x8sse2,               0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_CompositeSrc_x888_n_8888,        NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_CompositeSrc_x888_n_8888,        NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_CompositeSrc_x888_n_8888,        NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_CompositeSrc_x888_n_8888,        NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_CompositeSrc_8888_8_8888,        NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_CompositeSrc_8888_8_8888,        NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_CompositeSrc_8888_8_8888,        NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_CompositeSrc_8888_8_8888,        NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, sse2_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, sse2_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, sse2_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, sse2_CompositeOver_n_8888_8888_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   sse2_CompositeOver_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   sse2_CompositeOver_n_8888_0565_ca, NEED_COMPONENT_ALPHA },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, sse2_CompositeSrc_pixbuf_8888,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_a8r8g8b8, sse2_CompositeSrc_pixbuf_8888,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, sse2_CompositeSrc_pixbuf_8888,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_x8r8g8b8, sse2_CompositeSrc_pixbuf_8888,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_a8b8g8r8, sse2_CompositeSrc_pixbuf_8888,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_a8b8g8r8, sse2_CompositeSrc_pixbuf_8888,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_x8b8g8r8, sse2_CompositeSrc_pixbuf_8888,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_x8b8g8r8, sse2_CompositeSrc_pixbuf_8888,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8r8g8b8, PIXMAN_r5g6b5,   sse2_CompositeSrc_pixbuf_0565,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_a8b8g8r8, PIXMAN_r5g6b5,   sse2_CompositeSrc_pixbuf_0565,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_b5g6r5,   sse2_CompositeSrc_pixbuf_0565,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_a8b8g8r8, PIXMAN_b5g6r5,   sse2_CompositeSrc_pixbuf_0565,     NEED_PIXBUF },
+    { PIXMAN_OP_OVER, PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_CompositeCopyArea,               0 },
+    { PIXMAN_OP_OVER, PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_CompositeCopyArea,               0 },
+
+    { PIXMAN_OP_ADD,  PIXMAN_a8,       PIXMAN_null,     PIXMAN_a8,       sse2_CompositeAdd_8000_8000,       0 },
+    { PIXMAN_OP_ADD,  PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_CompositeAdd_8888_8888,       0 },
+    { PIXMAN_OP_ADD,  PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, sse2_CompositeAdd_8888_8888,       0 },
+    { PIXMAN_OP_ADD,  PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8,       sse2_CompositeAdd_8888_8_8,        0 },
+
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8r8g8b8, sse2_CompositeSrc_n_8_8888,  0 },
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_x8r8g8b8, sse2_CompositeSrc_n_8_8888,  0 },
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8b8g8r8, sse2_CompositeSrc_n_8_8888,  0 },
+    { PIXMAN_OP_SRC, PIXMAN_solid,     PIXMAN_a8,       PIXMAN_x8b8g8r8, sse2_CompositeSrc_n_8_8888,  0 },
+    { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,     PIXMAN_a8r8g8b8, sse2_CompositeCopyArea,               0 },
+    { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,     PIXMAN_a8b8g8r8, sse2_CompositeCopyArea,               0 },
+    { PIXMAN_OP_SRC, PIXMAN_a8r8g8b8,  PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_CompositeCopyArea,		0 },
+    { PIXMAN_OP_SRC, PIXMAN_a8b8g8r8,  PIXMAN_null,	PIXMAN_x8b8g8r8, sse2_CompositeCopyArea,		0 },
+    { PIXMAN_OP_SRC, PIXMAN_x8r8g8b8,  PIXMAN_null,     PIXMAN_x8r8g8b8, sse2_CompositeCopyArea,               0 },
+    { PIXMAN_OP_SRC, PIXMAN_x8b8g8r8,  PIXMAN_null,     PIXMAN_x8b8g8r8, sse2_CompositeCopyArea,               0 },
+    { PIXMAN_OP_SRC, PIXMAN_r5g6b5,    PIXMAN_null,     PIXMAN_r5g6b5,   sse2_CompositeCopyArea,               0 },
+    { PIXMAN_OP_SRC, PIXMAN_b5g6r5,    PIXMAN_null,     PIXMAN_b5g6r5,   sse2_CompositeCopyArea,               0 },
+
+    { PIXMAN_OP_IN,  PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       sse2_CompositeIn_8_8,                 0 },
+    { PIXMAN_OP_IN,  PIXMAN_solid,     PIXMAN_a8,       PIXMAN_a8,       sse2_CompositeIn_n_8_8,               0 },
 
     { PIXMAN_OP_NONE },
 };
diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index bc07c75..f6a1afe 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -1482,7 +1482,7 @@ vmxCombineAddC (pixman_implementation_t *imp, pixman_op_t op,
 
 #if 0
 void
-fbCompositeSolid_nx8888vmx (pixman_operator_t	op,
+vmx_CompositeOver_n_8888 (pixman_operator_t	op,
 			    pixman_image_t * pSrc,
 			    pixman_image_t * pMask,
 			    pixman_image_t * pDst,
@@ -1515,7 +1515,7 @@ fbCompositeSolid_nx8888vmx (pixman_operator_t	op,
 }
 
 void
-fbCompositeSolid_nx0565vmx (pixman_operator_t	op,
+vmx_CompositeOver_n_0565 (pixman_operator_t	op,
 			    pixman_image_t * pSrc,
 			    pixman_image_t * pMask,
 			    pixman_image_t * pDst,
commit e987661667ac5c650af1c3a2ba173558ff287e06
Author: Søren Sandmann Pedersen <sandmann at redhat.com>
Date:   Sun Jun 28 20:45:58 2009 -0400

    Rename fetchers.
    
    s/fbFetchPixel/fetch_pixels/g;
    s/fbFetch/fetch_scanline/g;
    s/fbStore/store_scanline/g;

diff --git a/pixman/pixman-access.c b/pixman/pixman-access.c
index 060bd2c..eacb19c 100644
--- a/pixman/pixman-access.c
+++ b/pixman/pixman-access.c
@@ -80,7 +80,7 @@
 /*********************************** Fetch ************************************/
 
 static void
-fbFetch_a8r8g8b8 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_a8r8g8b8 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		  const uint32_t *mask, uint32_t mask_bits)
 {
     const uint32_t *bits = image->bits.bits + y*image->bits.rowstride;
@@ -90,7 +90,7 @@ fbFetch_a8r8g8b8 (pixman_image_t *image, int x, int y, int width, uint32_t *buff
 }
 
 static void
-fbFetch_x8r8g8b8 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_x8r8g8b8 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		  const uint32_t *mask, uint32_t mask_bits)
 {
     const uint32_t *bits = image->bits.bits + y*image->bits.rowstride;
@@ -102,7 +102,7 @@ fbFetch_x8r8g8b8 (pixman_image_t *image, int x, int y, int width, uint32_t *buff
 }
 
 static void
-fbFetch_a8b8g8r8 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_a8b8g8r8 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		  const uint32_t *mask, uint32_t mask_bits)
 {
     const uint32_t *bits = image->bits.bits + y*image->bits.rowstride;
@@ -117,7 +117,7 @@ fbFetch_a8b8g8r8 (pixman_image_t *image, int x, int y, int width, uint32_t *buff
 }
 
 static void
-fbFetch_x8b8g8r8 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_x8b8g8r8 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		  const uint32_t *mask, uint32_t mask_bits)
 {
     const uint32_t *bits = image->bits.bits + y*image->bits.rowstride;
@@ -133,7 +133,7 @@ fbFetch_x8b8g8r8 (pixman_image_t *image, int x, int y, int width, uint32_t *buff
 }
 
 static void
-fbFetch_b8g8r8a8 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_b8g8r8a8 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		  const uint32_t *mask, uint32_t mask_bits)
 {
     const uint32_t *bits = image->bits.bits + y*image->bits.rowstride;
@@ -149,7 +149,7 @@ fbFetch_b8g8r8a8 (pixman_image_t *image, int x, int y, int width, uint32_t *buff
 }
 
 static void
-fbFetch_b8g8r8x8 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_b8g8r8x8 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		  const uint32_t *mask, uint32_t mask_bits)
 {
     const uint32_t *bits = image->bits.bits + y*image->bits.rowstride;
@@ -166,7 +166,7 @@ fbFetch_b8g8r8x8 (pixman_image_t *image, int x, int y, int width, uint32_t *buff
 
 /* Expects a uint64_t buffer */
 static void
-fbFetch_a2r10g10b10 (pixman_image_t *image, int x, int y, int width, uint32_t *b,
+fetch_scanline_a2r10g10b10 (pixman_image_t *image, int x, int y, int width, uint32_t *b,
 		     const uint32_t *mask, uint32_t mask_bits)
 {
     const uint32_t *bits = image->bits.bits + y*image->bits.rowstride;
@@ -196,7 +196,7 @@ fbFetch_a2r10g10b10 (pixman_image_t *image, int x, int y, int width, uint32_t *b
 
 /* Expects a uint64_t buffer */
 static void
-fbFetch_x2r10g10b10 (pixman_image_t *image, int x, int y, int width, uint32_t *b,
+fetch_scanline_x2r10g10b10 (pixman_image_t *image, int x, int y, int width, uint32_t *b,
 		     const uint32_t *mask, uint32_t mask_bits)
 {
     const uint32_t *bits = image->bits.bits + y*image->bits.rowstride;
@@ -221,8 +221,8 @@ fbFetch_x2r10g10b10 (pixman_image_t *image, int x, int y, int width, uint32_t *b
 
 /* Expects a uint64_t buffer */
 static void
-fbFetch_a2b10g10r10 (pixman_image_t *image, int x, int y, int width, uint32_t *b,
-		     const uint32_t *mask, uint32_t mask_bits)
+fetch_scanline_a2b10g10r10 (pixman_image_t *image, int x, int y, int width, uint32_t *b,
+			    const uint32_t *mask, uint32_t mask_bits)
 {
     const uint32_t *bits = image->bits.bits + y*image->bits.rowstride;
     const uint32_t *pixel = bits + x;
@@ -251,7 +251,7 @@ fbFetch_a2b10g10r10 (pixman_image_t *image, int x, int y, int width, uint32_t *b
 
 /* Expects a uint64_t buffer */
 static void
-fbFetch_x2b10g10r10 (pixman_image_t *image, int x, int y, int width, uint32_t *b,
+fetch_scanline_x2b10g10r10 (pixman_image_t *image, int x, int y, int width, uint32_t *b,
 		     const uint32_t *mask, uint32_t mask_bits)
 {
     const uint32_t *bits = image->bits.bits + y*image->bits.rowstride;
@@ -274,7 +274,7 @@ fbFetch_x2b10g10r10 (pixman_image_t *image, int x, int y, int width, uint32_t *b
 }
 
 static void
-fbFetch_r8g8b8 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_r8g8b8 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		const uint32_t *mask, uint32_t mask_bits)
 {
     const uint32_t *bits = image->bits.bits + y*image->bits.rowstride;
@@ -296,7 +296,7 @@ fbFetch_r8g8b8 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer
 }
 
 static void
-fbFetch_b8g8r8 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_b8g8r8 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		const uint32_t *mask, uint32_t mask_bits)
 {
     const uint32_t *bits = image->bits.bits + y*image->bits.rowstride;
@@ -318,7 +318,7 @@ fbFetch_b8g8r8 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer
 }
 
 static void
-fbFetch_r5g6b5 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_r5g6b5 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		const uint32_t *mask, uint32_t mask_bits)
 {
     const uint32_t *bits = image->bits.bits + y*image->bits.rowstride;
@@ -336,7 +336,7 @@ fbFetch_r5g6b5 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer
 }
 
 static void
-fbFetch_b5g6r5 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_b5g6r5 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		const uint32_t *mask, uint32_t mask_bits)
 {
     uint32_t  r,g,b;
@@ -353,7 +353,7 @@ fbFetch_b5g6r5 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer
 }
 
 static void
-fbFetch_a1r5g5b5 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_a1r5g5b5 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		  const uint32_t *mask, uint32_t mask_bits)
 {
     uint32_t  r,g,b, a;
@@ -372,7 +372,7 @@ fbFetch_a1r5g5b5 (pixman_image_t *image, int x, int y, int width, uint32_t *buff
 }
 
 static void
-fbFetch_x1r5g5b5 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_x1r5g5b5 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		  const uint32_t *mask, uint32_t mask_bits)
 {
     uint32_t  r,g,b;
@@ -390,7 +390,7 @@ fbFetch_x1r5g5b5 (pixman_image_t *image, int x, int y, int width, uint32_t *buff
 }
 
 static void
-fbFetch_a1b5g5r5 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_a1b5g5r5 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		  const uint32_t *mask, uint32_t mask_bits)
 {
     uint32_t  r,g,b, a;
@@ -409,7 +409,7 @@ fbFetch_a1b5g5r5 (pixman_image_t *image, int x, int y, int width, uint32_t *buff
 }
 
 static void
-fbFetch_x1b5g5r5 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_x1b5g5r5 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		  const uint32_t *mask, uint32_t mask_bits)
 {
     uint32_t  r,g,b;
@@ -427,7 +427,7 @@ fbFetch_x1b5g5r5 (pixman_image_t *image, int x, int y, int width, uint32_t *buff
 }
 
 static void
-fbFetch_a4r4g4b4 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_a4r4g4b4 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		  const uint32_t *mask, uint32_t mask_bits)
 {
     uint32_t  r,g,b, a;
@@ -446,7 +446,7 @@ fbFetch_a4r4g4b4 (pixman_image_t *image, int x, int y, int width, uint32_t *buff
 }
 
 static void
-fbFetch_x4r4g4b4 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_x4r4g4b4 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		  const uint32_t *mask, uint32_t mask_bits)
 {
     uint32_t  r,g,b;
@@ -464,7 +464,7 @@ fbFetch_x4r4g4b4 (pixman_image_t *image, int x, int y, int width, uint32_t *buff
 }
 
 static void
-fbFetch_a4b4g4r4 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_a4b4g4r4 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		  const uint32_t *mask, uint32_t mask_bits)
 {
     uint32_t  r,g,b, a;
@@ -483,7 +483,7 @@ fbFetch_a4b4g4r4 (pixman_image_t *image, int x, int y, int width, uint32_t *buff
 }
 
 static void
-fbFetch_x4b4g4r4 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_x4b4g4r4 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		  const uint32_t *mask, uint32_t mask_bits)
 {
     uint32_t  r,g,b;
@@ -501,7 +501,7 @@ fbFetch_x4b4g4r4 (pixman_image_t *image, int x, int y, int width, uint32_t *buff
 }
 
 static void
-fbFetch_a8 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_a8 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 	    const uint32_t *mask, uint32_t mask_bits)
 {
     const uint32_t *bits = image->bits.bits + y*image->bits.rowstride;
@@ -513,7 +513,7 @@ fbFetch_a8 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 }
 
 static void
-fbFetch_r3g3b2 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_r3g3b2 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		const uint32_t *mask, uint32_t mask_bits)
 {
     uint32_t  r,g,b;
@@ -534,7 +534,7 @@ fbFetch_r3g3b2 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer
 }
 
 static void
-fbFetch_b2g3r3 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_b2g3r3 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		const uint32_t *mask, uint32_t mask_bits)
 {
     uint32_t  r,g,b;
@@ -557,7 +557,7 @@ fbFetch_b2g3r3 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer
 }
 
 static void
-fbFetch_a2r2g2b2 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_a2r2g2b2 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		  const uint32_t *mask, uint32_t mask_bits)
 {
     uint32_t   a,r,g,b;
@@ -576,7 +576,7 @@ fbFetch_a2r2g2b2 (pixman_image_t *image, int x, int y, int width, uint32_t *buff
 }
 
 static void
-fbFetch_a2b2g2r2 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_a2b2g2r2 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		  const uint32_t *mask, uint32_t mask_bits)
 {
     uint32_t   a,r,g,b;
@@ -595,7 +595,7 @@ fbFetch_a2b2g2r2 (pixman_image_t *image, int x, int y, int width, uint32_t *buff
 }
 
 static void
-fbFetch_c8 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_c8 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 	    const uint32_t *mask, uint32_t mask_bits)
 {
     const uint32_t *bits = image->bits.bits + y*image->bits.rowstride;
@@ -609,7 +609,7 @@ fbFetch_c8 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 }
 
 static void
-fbFetch_x4a4 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_x4a4 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 	      const uint32_t *mask, uint32_t mask_bits)
 {
     const uint32_t *bits = image->bits.bits + y*image->bits.rowstride;
@@ -629,7 +629,7 @@ fbFetch_x4a4 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 #endif
 
 static void
-fbFetch_a4 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_a4 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 	    const uint32_t *mask, uint32_t mask_bits)
 {
     const uint32_t *bits = image->bits.bits + y*image->bits.rowstride;
@@ -643,7 +643,7 @@ fbFetch_a4 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 }
 
 static void
-fbFetch_r1g2b1 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_r1g2b1 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		const uint32_t *mask, uint32_t mask_bits)
 {
     uint32_t  r,g,b;
@@ -660,7 +660,7 @@ fbFetch_r1g2b1 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer
 }
 
 static void
-fbFetch_b1g2r1 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_b1g2r1 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		const uint32_t *mask, uint32_t mask_bits)
 {
     uint32_t  r,g,b;
@@ -677,7 +677,7 @@ fbFetch_b1g2r1 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer
 }
 
 static void
-fbFetch_a1r1g1b1 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_a1r1g1b1 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		  const uint32_t *mask, uint32_t mask_bits)
 {
     uint32_t  a,r,g,b;
@@ -695,7 +695,7 @@ fbFetch_a1r1g1b1 (pixman_image_t *image, int x, int y, int width, uint32_t *buff
 }
 
 static void
-fbFetch_a1b1g1r1 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_a1b1g1r1 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		  const uint32_t *mask, uint32_t mask_bits)
 {
     uint32_t  a,r,g,b;
@@ -713,7 +713,7 @@ fbFetch_a1b1g1r1 (pixman_image_t *image, int x, int y, int width, uint32_t *buff
 }
 
 static void
-fbFetch_c4 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_c4 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 	    const uint32_t *mask, uint32_t mask_bits)
 {
     const uint32_t *bits = image->bits.bits + y*image->bits.rowstride;
@@ -728,7 +728,7 @@ fbFetch_c4 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 
 
 static void
-fbFetch_a1 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_a1 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 	    const uint32_t *mask, uint32_t mask_bits)
 {
     const uint32_t *bits = image->bits.bits + y*image->bits.rowstride;
@@ -750,7 +750,7 @@ fbFetch_a1 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 }
 
 static void
-fbFetch_g1 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline_g1 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 	    const uint32_t *mask, uint32_t mask_bits)
 {
     const uint32_t *bits = image->bits.bits + y*image->bits.rowstride;
@@ -770,7 +770,7 @@ fbFetch_g1 (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 }
 
 static void
-fbFetch_yuy2 (pixman_image_t *image, int x, int line, int width, uint32_t *buffer,
+fetch_scanline_yuy2 (pixman_image_t *image, int x, int line, int width, uint32_t *buffer,
 	      const uint32_t *mask, uint32_t mask_bits)
 {
     int16_t y, u, v;
@@ -800,7 +800,7 @@ fbFetch_yuy2 (pixman_image_t *image, int x, int line, int width, uint32_t *buffe
 }
 
 static void
-fbFetch_yv12 (pixman_image_t *image, int x, int line, int width, uint32_t *buffer,
+fetch_scanline_yv12 (pixman_image_t *image, int x, int line, int width, uint32_t *buffer,
 	      const uint32_t *mask, uint32_t mask_bits)
 {
     YV12_SETUP(image);
@@ -835,7 +835,7 @@ fbFetch_yv12 (pixman_image_t *image, int x, int line, int width, uint32_t *buffe
 
 /* Despite the type, expects a uint64_t buffer */
 static void
-fbFetchPixel_a2r10g10b10_64 (bits_image_t *pict, uint32_t *b, int n_pixels)
+fetch_pixels_a2r10g10b10_64 (bits_image_t *pict, uint32_t *b, int n_pixels)
 {
     int i;
     uint64_t *buffer = (uint64_t *)b;
@@ -874,7 +874,7 @@ fbFetchPixel_a2r10g10b10_64 (bits_image_t *pict, uint32_t *b, int n_pixels)
 
 /* Despite the type, this function expects a uint64_t buffer */
 static void
-fbFetchPixel_x2r10g10b10_64 (bits_image_t *pict, uint32_t *b, int n_pixels)
+fetch_pixels_x2r10g10b10_64 (bits_image_t *pict, uint32_t *b, int n_pixels)
 {
     uint64_t *buffer = (uint64_t *)b;
     int i;
@@ -907,7 +907,7 @@ fbFetchPixel_x2r10g10b10_64 (bits_image_t *pict, uint32_t *b, int n_pixels)
 
 /* Despite the type, expects a uint64_t buffer */
 static void
-fbFetchPixel_a2b10g10r10_64 (bits_image_t *pict, uint32_t *b, int n_pixels)
+fetch_pixels_a2b10g10r10_64 (bits_image_t *pict, uint32_t *b, int n_pixels)
 {
     int i;
     uint64_t *buffer = (uint64_t *)b;
@@ -946,7 +946,7 @@ fbFetchPixel_a2b10g10r10_64 (bits_image_t *pict, uint32_t *b, int n_pixels)
 
 /* Despite the type, this function expects a uint64_t buffer */
 static void
-fbFetchPixel_x2b10g10r10_64 (bits_image_t *pict, uint32_t *b, int n_pixels)
+fetch_pixels_x2b10g10r10_64 (bits_image_t *pict, uint32_t *b, int n_pixels)
 {
     uint64_t *buffer = (uint64_t *)b;
     int i;
@@ -978,7 +978,7 @@ fbFetchPixel_x2b10g10r10_64 (bits_image_t *pict, uint32_t *b, int n_pixels)
 }
 
 static void
-fbFetchPixel_a8r8g8b8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_a8r8g8b8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
     
@@ -1000,7 +1000,7 @@ fbFetchPixel_a8r8g8b8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_x8r8g8b8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_x8r8g8b8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
     
@@ -1022,7 +1022,7 @@ fbFetchPixel_x8r8g8b8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_a8b8g8r8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_a8b8g8r8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
     
@@ -1049,7 +1049,7 @@ fbFetchPixel_a8b8g8r8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_x8b8g8r8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_x8b8g8r8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
     
@@ -1076,7 +1076,7 @@ fbFetchPixel_x8b8g8r8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_b8g8r8a8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_b8g8r8a8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1103,7 +1103,7 @@ fbFetchPixel_b8g8r8a8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_b8g8r8x8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_b8g8r8x8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1130,7 +1130,7 @@ fbFetchPixel_b8g8r8x8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_r8g8b8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_r8g8b8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1163,7 +1163,7 @@ fbFetchPixel_r8g8b8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_b8g8r8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_b8g8r8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1196,7 +1196,7 @@ fbFetchPixel_b8g8r8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_r5g6b5 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_r5g6b5 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1224,7 +1224,7 @@ fbFetchPixel_r5g6b5 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_b5g6r5 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_b5g6r5 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1252,7 +1252,7 @@ fbFetchPixel_b5g6r5 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_a1r5g5b5 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_a1r5g5b5 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1281,7 +1281,7 @@ fbFetchPixel_a1r5g5b5 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_x1r5g5b5 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_x1r5g5b5 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1309,7 +1309,7 @@ fbFetchPixel_x1r5g5b5 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_a1b5g5r5 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_a1b5g5r5 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1338,7 +1338,7 @@ fbFetchPixel_a1b5g5r5 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_x1b5g5r5 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_x1b5g5r5 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1366,7 +1366,7 @@ fbFetchPixel_x1b5g5r5 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_a4r4g4b4 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_a4r4g4b4 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1395,7 +1395,7 @@ fbFetchPixel_a4r4g4b4 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_x4r4g4b4 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_x4r4g4b4 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1423,7 +1423,7 @@ fbFetchPixel_x4r4g4b4 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_a4b4g4r4 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_a4b4g4r4 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1452,7 +1452,7 @@ fbFetchPixel_a4b4g4r4 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_x4b4g4r4 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_x4b4g4r4 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1480,7 +1480,7 @@ fbFetchPixel_x4b4g4r4 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_a8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_a8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1504,7 +1504,7 @@ fbFetchPixel_a8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_r3g3b2 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_r3g3b2 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1535,7 +1535,7 @@ fbFetchPixel_r3g3b2 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_b2g3r3 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_b2g3r3 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1568,7 +1568,7 @@ fbFetchPixel_b2g3r3 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_a2r2g2b2 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_a2r2g2b2 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1597,7 +1597,7 @@ fbFetchPixel_a2r2g2b2 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_a2b2g2r2 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_a2b2g2r2 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1626,7 +1626,7 @@ fbFetchPixel_a2b2g2r2 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_c8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_c8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1650,7 +1650,7 @@ fbFetchPixel_c8 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_x4a4 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_x4a4 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1674,7 +1674,7 @@ fbFetchPixel_x4a4 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_a4 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_a4 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1699,7 +1699,7 @@ fbFetchPixel_a4 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_r1g2b1 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_r1g2b1 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1727,7 +1727,7 @@ fbFetchPixel_r1g2b1 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_b1g2r1 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_b1g2r1 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1755,7 +1755,7 @@ fbFetchPixel_b1g2r1 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_a1r1g1b1 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_a1r1g1b1 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1784,7 +1784,7 @@ fbFetchPixel_a1r1g1b1 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_a1b1g1r1 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_a1b1g1r1 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1813,7 +1813,7 @@ fbFetchPixel_a1b1g1r1 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_c4 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_c4 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1839,7 +1839,7 @@ fbFetchPixel_c4 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 
 
 static void
-fbFetchPixel_a1 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_a1 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1872,7 +1872,7 @@ fbFetchPixel_a1 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_g1 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_g1 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1903,7 +1903,7 @@ fbFetchPixel_g1 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_yuy2 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_yuy2 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1943,7 +1943,7 @@ fbFetchPixel_yuy2 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 }
 
 static void
-fbFetchPixel_yv12 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels_yv12 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     int i;
 
@@ -1985,7 +1985,7 @@ fbFetchPixel_yv12 (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 #define Split(v)	uint32_t	r = ((v) >> 16) & 0xff, g = ((v) >> 8) & 0xff, b = (v) & 0xff
 
 static void
-fbStore_a2r10g10b10 (bits_image_t *image, int x, int y, int width, const uint32_t *v)
+store_scanline_a2r10g10b10 (bits_image_t *image, int x, int y, int width, const uint32_t *v)
 {
     uint32_t *bits = image->bits + image->rowstride * y;
     uint32_t *pixel = bits + x;
@@ -2002,7 +2002,7 @@ fbStore_a2r10g10b10 (bits_image_t *image, int x, int y, int width, const uint32_
 }
 
 static void
-fbStore_x2r10g10b10 (bits_image_t *image, int x, int y, int width, const uint32_t *v)
+store_scanline_x2r10g10b10 (bits_image_t *image, int x, int y, int width, const uint32_t *v)
 {
     uint32_t *bits = image->bits + image->rowstride * y;
     uint64_t *values = (uint64_t *)v;
@@ -2018,7 +2018,7 @@ fbStore_x2r10g10b10 (bits_image_t *image, int x, int y, int width, const uint32_
 }
 
 static void
-fbStore_a2b10g10r10 (bits_image_t *image, int x, int y, int width, const uint32_t *v)
+store_scanline_a2b10g10r10 (bits_image_t *image, int x, int y, int width, const uint32_t *v)
 {
     uint32_t *bits = image->bits + image->rowstride * y;
     uint32_t *pixel = bits + x;
@@ -2035,7 +2035,7 @@ fbStore_a2b10g10r10 (bits_image_t *image, int x, int y, int width, const uint32_
 }
 
 static void
-fbStore_x2b10g10r10 (bits_image_t *image, int x, int y, int width, const uint32_t *v)
+store_scanline_x2b10g10r10 (bits_image_t *image, int x, int y, int width, const uint32_t *v)
 {
     uint32_t *bits = image->bits + image->rowstride * y;
     uint64_t *values = (uint64_t *)v;
@@ -2051,7 +2051,7 @@ fbStore_x2b10g10r10 (bits_image_t *image, int x, int y, int width, const uint32_
 }
 
 static void
-fbStore_a8r8g8b8 (bits_image_t *image,
+store_scanline_a8r8g8b8 (bits_image_t *image,
 		  int x, int y, int width,
 		  const uint32_t *values)
 {
@@ -2061,7 +2061,7 @@ fbStore_a8r8g8b8 (bits_image_t *image,
 }
 
 static void
-fbStore_x8r8g8b8 (bits_image_t *image,
+store_scanline_x8r8g8b8 (bits_image_t *image,
 		  int x, int y, int width,
 		  const uint32_t *values)
 {
@@ -2074,7 +2074,7 @@ fbStore_x8r8g8b8 (bits_image_t *image,
 }
 
 static void
-fbStore_a8b8g8r8 (bits_image_t *image,
+store_scanline_a8b8g8r8 (bits_image_t *image,
 		  int x, int y, int width,
 		  const uint32_t *values)
 {
@@ -2087,7 +2087,7 @@ fbStore_a8b8g8r8 (bits_image_t *image,
 }
 
 static void
-fbStore_x8b8g8r8 (bits_image_t *image,
+store_scanline_x8b8g8r8 (bits_image_t *image,
 		  int x, int y, int width,
 		  const uint32_t *values)
 {
@@ -2100,7 +2100,7 @@ fbStore_x8b8g8r8 (bits_image_t *image,
 }
 
 static void
-fbStore_b8g8r8a8 (bits_image_t *image,
+store_scanline_b8g8r8a8 (bits_image_t *image,
 		  int x, int y, int width,
 		  const uint32_t *values)
 {
@@ -2117,7 +2117,7 @@ fbStore_b8g8r8a8 (bits_image_t *image,
 }
 
 static void
-fbStore_b8g8r8x8 (bits_image_t *image,
+store_scanline_b8g8r8x8 (bits_image_t *image,
 		  int x, int y, int width,
 		  const uint32_t *values)
 {
@@ -2133,7 +2133,7 @@ fbStore_b8g8r8x8 (bits_image_t *image,
 }
 
 static void
-fbStore_r8g8b8 (bits_image_t *image,
+store_scanline_r8g8b8 (bits_image_t *image,
 		int x, int y, int width,
 		const uint32_t *values)
 {
@@ -2157,7 +2157,7 @@ fbStore_r8g8b8 (bits_image_t *image,
 }
 
 static void
-fbStore_b8g8r8 (bits_image_t *image,
+store_scanline_b8g8r8 (bits_image_t *image,
 		int x, int y, int width,
 		const uint32_t *values)
 {
@@ -2181,7 +2181,7 @@ fbStore_b8g8r8 (bits_image_t *image,
 }
 
 static void
-fbStore_r5g6b5 (bits_image_t *image,
+store_scanline_r5g6b5 (bits_image_t *image,
 		int x, int y, int width,
 		const uint32_t *values)
 {
@@ -2198,7 +2198,7 @@ fbStore_r5g6b5 (bits_image_t *image,
 }
 
 static void
-fbStore_b5g6r5 (bits_image_t *image,
+store_scanline_b5g6r5 (bits_image_t *image,
 		int x, int y, int width,
 		const uint32_t *values)
 {
@@ -2215,7 +2215,7 @@ fbStore_b5g6r5 (bits_image_t *image,
 }
 
 static void
-fbStore_a1r5g5b5 (bits_image_t *image,
+store_scanline_a1r5g5b5 (bits_image_t *image,
 		  int x, int y, int width,
 		  const uint32_t *values)
 {
@@ -2233,7 +2233,7 @@ fbStore_a1r5g5b5 (bits_image_t *image,
 }
 
 static void
-fbStore_x1r5g5b5 (bits_image_t *image,
+store_scanline_x1r5g5b5 (bits_image_t *image,
 		  int x, int y, int width,
 		  const uint32_t *values)
 {
@@ -2250,7 +2250,7 @@ fbStore_x1r5g5b5 (bits_image_t *image,
 }
 
 static void
-fbStore_a1b5g5r5 (bits_image_t *image,
+store_scanline_a1b5g5r5 (bits_image_t *image,
 		  int x, int y, int width,
 		  const uint32_t *values)
 {
@@ -2268,7 +2268,7 @@ fbStore_a1b5g5r5 (bits_image_t *image,
 }
 
 static void
-fbStore_x1b5g5r5 (bits_image_t *image,
+store_scanline_x1b5g5r5 (bits_image_t *image,
 		  int x, int y, int width,
 		  const uint32_t *values)
 {
@@ -2285,7 +2285,7 @@ fbStore_x1b5g5r5 (bits_image_t *image,
 }
 
 static void
-fbStore_a4r4g4b4 (bits_image_t *image,
+store_scanline_a4r4g4b4 (bits_image_t *image,
 		  int x, int y, int width,
 		  const uint32_t *values)
 {
@@ -2303,7 +2303,7 @@ fbStore_a4r4g4b4 (bits_image_t *image,
 }
 
 static void
-fbStore_x4r4g4b4 (bits_image_t *image,
+store_scanline_x4r4g4b4 (bits_image_t *image,
 		  int x, int y, int width,
 		  const uint32_t *values)
 {
@@ -2320,7 +2320,7 @@ fbStore_x4r4g4b4 (bits_image_t *image,
 }
 
 static void
-fbStore_a4b4g4r4 (bits_image_t *image,
+store_scanline_a4b4g4r4 (bits_image_t *image,
 		  int x, int y, int width,
 		  const uint32_t *values)
 {
@@ -2338,7 +2338,7 @@ fbStore_a4b4g4r4 (bits_image_t *image,
 }
 
 static void
-fbStore_x4b4g4r4 (bits_image_t *image,
+store_scanline_x4b4g4r4 (bits_image_t *image,
 		  int x, int y, int width,
 		  const uint32_t *values)
 {
@@ -2355,7 +2355,7 @@ fbStore_x4b4g4r4 (bits_image_t *image,
 }
 
 static void
-fbStore_a8 (bits_image_t *image,
+store_scanline_a8 (bits_image_t *image,
 	    int x, int y, int width,
 	    const uint32_t *values)
 {
@@ -2369,7 +2369,7 @@ fbStore_a8 (bits_image_t *image,
 }
 
 static void
-fbStore_r3g3b2 (bits_image_t *image,
+store_scanline_r3g3b2 (bits_image_t *image,
 		int x, int y, int width,
 		const uint32_t *values)
 {
@@ -2387,7 +2387,7 @@ fbStore_r3g3b2 (bits_image_t *image,
 }
 
 static void
-fbStore_b2g3r3 (bits_image_t *image,
+store_scanline_b2g3r3 (bits_image_t *image,
 		int x, int y, int width,
 		const uint32_t *values)
 {
@@ -2405,7 +2405,7 @@ fbStore_b2g3r3 (bits_image_t *image,
 }
 
 static void
-fbStore_a2r2g2b2 (bits_image_t *image,
+store_scanline_a2r2g2b2 (bits_image_t *image,
 		  int x, int y, int width,
 		  const uint32_t *values)
 {
@@ -2423,7 +2423,7 @@ fbStore_a2r2g2b2 (bits_image_t *image,
 }
 
 static void
-fbStore_a2b2g2r2 (bits_image_t *image,
+store_scanline_a2b2g2r2 (bits_image_t *image,
 		  int x, int y, int width,
 		  const uint32_t *values)
 {
@@ -2441,7 +2441,7 @@ fbStore_a2b2g2r2 (bits_image_t *image,
 }
 
 static void
-fbStore_c8 (bits_image_t *image,
+store_scanline_c8 (bits_image_t *image,
 	    int x, int y, int width,
 	    const uint32_t *values)
 {
@@ -2456,7 +2456,7 @@ fbStore_c8 (bits_image_t *image,
 }
 
 static void
-fbStore_x4a4 (bits_image_t *image,
+store_scanline_x4a4 (bits_image_t *image,
 	      int x, int y, int width,
 	      const uint32_t *values)
 {
@@ -2481,7 +2481,7 @@ fbStore_x4a4 (bits_image_t *image,
 #endif
 
 static void
-fbStore_a4 (bits_image_t *image,
+store_scanline_a4 (bits_image_t *image,
 	    int x, int y, int width,
 	    const uint32_t *values)
 {
@@ -2494,7 +2494,7 @@ fbStore_a4 (bits_image_t *image,
 }
 
 static void
-fbStore_r1g2b1 (bits_image_t *image,
+store_scanline_r1g2b1 (bits_image_t *image,
 		int x, int y, int width,
 		const uint32_t *values)
 {
@@ -2513,7 +2513,7 @@ fbStore_r1g2b1 (bits_image_t *image,
 }
 
 static void
-fbStore_b1g2r1 (bits_image_t *image,
+store_scanline_b1g2r1 (bits_image_t *image,
 		int x, int y, int width,
 		const uint32_t *values)
 {
@@ -2532,7 +2532,7 @@ fbStore_b1g2r1 (bits_image_t *image,
 }
 
 static void
-fbStore_a1r1g1b1 (bits_image_t *image,
+store_scanline_a1r1g1b1 (bits_image_t *image,
 		  int x, int y, int width,
 		  const uint32_t *values)
 {
@@ -2551,7 +2551,7 @@ fbStore_a1r1g1b1 (bits_image_t *image,
 }
 
 static void
-fbStore_a1b1g1r1 (bits_image_t *image,
+store_scanline_a1b1g1r1 (bits_image_t *image,
 		  int x, int y, int width,
 		  const uint32_t *values)
 {
@@ -2570,7 +2570,7 @@ fbStore_a1b1g1r1 (bits_image_t *image,
 }
 
 static void
-fbStore_c4 (bits_image_t *image,
+store_scanline_c4 (bits_image_t *image,
 	    int x, int y, int width,
 	    const uint32_t *values)
 {
@@ -2587,7 +2587,7 @@ fbStore_c4 (bits_image_t *image,
 }
 
 static void
-fbStore_a1 (bits_image_t *image,
+store_scanline_a1 (bits_image_t *image,
 	    int x, int y, int width,
 	    const uint32_t *values)
 {
@@ -2609,7 +2609,7 @@ fbStore_a1 (bits_image_t *image,
 }
 
 static void
-fbStore_g1 (bits_image_t *image,
+store_scanline_g1 (bits_image_t *image,
 	    int x, int y, int width,
 	    const uint32_t *values)
 {
@@ -2636,7 +2636,7 @@ fbStore_g1 (bits_image_t *image,
  * store proc. Despite the type, this function expects a uint64_t buffer.
  */
 static void
-fbStore64_generic (bits_image_t *image, int x, int y, int width, const uint32_t *values)
+store_scanline64_generic (bits_image_t *image, int x, int y, int width, const uint32_t *values)
 {
     uint32_t *argb8Pixels;
 
@@ -2658,7 +2658,7 @@ fbStore64_generic (bits_image_t *image, int x, int y, int width, const uint32_t
 
 /* Despite the type, this function expects both buffer and mask to be uint64_t */
 static void
-fbFetch64_generic (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
+fetch_scanline64_generic (pixman_image_t *image, int x, int y, int width, uint32_t *buffer,
 		   const uint32_t *mask, uint32_t mask_bits)
 {
     /* Fetch the pixels into the first half of buffer and then expand them in
@@ -2671,7 +2671,7 @@ fbFetch64_generic (pixman_image_t *image, int x, int y, int width, uint32_t *buf
 
 /* Despite the type, this function expects a uint64_t *buffer */
 static void
-fbFetchPixel64_generic (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels64_generic (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     pict->fetch_pixels_raw_32 (pict, buffer, n_pixels);
     
@@ -2685,7 +2685,7 @@ fbFetchPixel64_generic (bits_image_t *pict, uint32_t *buffer, int n_pixels)
  * WARNING: This function loses precision!
  */
 static void
-fbFetchPixel32_generic_lossy (bits_image_t *pict, uint32_t *buffer, int n_pixels)
+fetch_pixels32_generic_lossy (bits_image_t *pict, uint32_t *buffer, int n_pixels)
 {
     /* Since buffer contains n_pixels coordinate pairs, it also has enough room for
      * n_pixels 64 bit pixels.
@@ -2709,9 +2709,9 @@ typedef struct
 #define FORMAT_INFO(format)						\
     {									\
 	PIXMAN_##format,						\
-	    fbFetch_##format, fbFetch64_generic,			\
-	    fbFetchPixel_##format, fbFetchPixel64_generic,		\
-	    fbStore_##format, fbStore64_generic				\
+	    fetch_scanline_##format, fetch_scanline64_generic,			\
+	    fetch_pixels_##format, fetch_pixels64_generic,		\
+	    store_scanline_##format, store_scanline64_generic				\
     }
 
 static const format_info_t accessors[] =
@@ -2750,17 +2750,17 @@ static const format_info_t accessors[] =
     
     FORMAT_INFO (c8),
 
-#define fbFetch_g8 fbFetch_c8
-#define fbFetchPixel_g8 fbFetchPixel_c8
-#define fbStore_g8 fbStore_c8
+#define fetch_scanline_g8 fetch_scanline_c8
+#define fetch_pixels_g8 fetch_pixels_c8
+#define store_scanline_g8 store_scanline_c8
     FORMAT_INFO (g8),
-#define fbFetch_x4c4 fbFetch_c8
-#define fbFetchPixel_x4c4 fbFetchPixel_c8
-#define fbStore_x4c4 fbStore_c8
+#define fetch_scanline_x4c4 fetch_scanline_c8
+#define fetch_pixels_x4c4 fetch_pixels_c8
+#define store_scanline_x4c4 store_scanline_c8
     FORMAT_INFO (x4c4),
-#define fbFetch_x4g4 fbFetch_c8
-#define fbFetchPixel_x4g4 fbFetchPixel_c8
-#define fbStore_x4g4 fbStore_c8
+#define fetch_scanline_x4g4 fetch_scanline_c8
+#define fetch_pixels_x4g4 fetch_pixels_c8
+#define store_scanline_x4g4 store_scanline_c8
     FORMAT_INFO (x4g4),
     
     FORMAT_INFO (x4a4),
@@ -2773,9 +2773,9 @@ static const format_info_t accessors[] =
     FORMAT_INFO (a1b1g1r1),
     
     FORMAT_INFO (c4),
-#define fbFetch_g4 fbFetch_c4
-#define fbFetchPixel_g4 fbFetchPixel_c4
-#define fbStore_g4 fbStore_c4
+#define fetch_scanline_g4 fetch_scanline_c4
+#define fetch_pixels_g4 fetch_pixels_c4
+#define store_scanline_g4 store_scanline_c4
     FORMAT_INFO (g4),
     
 /* 1bpp formats */
@@ -2785,34 +2785,34 @@ static const format_info_t accessors[] =
 /* Wide formats */
 
     { PIXMAN_a2r10g10b10,
-      NULL, fbFetch_a2r10g10b10,
-      fbFetchPixel32_generic_lossy, fbFetchPixel_a2r10g10b10_64,
-      NULL, fbStore_a2r10g10b10 },
+      NULL, fetch_scanline_a2r10g10b10,
+      fetch_pixels32_generic_lossy, fetch_pixels_a2r10g10b10_64,
+      NULL, store_scanline_a2r10g10b10 },
 
     { PIXMAN_x2r10g10b10,
-      NULL, fbFetch_x2r10g10b10,
-      fbFetchPixel32_generic_lossy, fbFetchPixel_x2r10g10b10_64,
-      NULL, fbStore_x2r10g10b10 },
+      NULL, fetch_scanline_x2r10g10b10,
+      fetch_pixels32_generic_lossy, fetch_pixels_x2r10g10b10_64,
+      NULL, store_scanline_x2r10g10b10 },
 
     { PIXMAN_a2b10g10r10,
-      NULL, fbFetch_a2b10g10r10,
-      fbFetchPixel32_generic_lossy, fbFetchPixel_a2b10g10r10_64,
-      NULL, fbStore_a2b10g10r10 },
+      NULL, fetch_scanline_a2b10g10r10,
+      fetch_pixels32_generic_lossy, fetch_pixels_a2b10g10r10_64,
+      NULL, store_scanline_a2b10g10r10 },
 
     { PIXMAN_x2b10g10r10,
-      NULL, fbFetch_x2b10g10r10,
-      fbFetchPixel32_generic_lossy, fbFetchPixel_x2b10g10r10_64,
-      NULL, fbStore_x2b10g10r10 },
+      NULL, fetch_scanline_x2b10g10r10,
+      fetch_pixels32_generic_lossy, fetch_pixels_x2b10g10r10_64,
+      NULL, store_scanline_x2b10g10r10 },
 
 /* YUV formats */
     { PIXMAN_yuy2,
-      fbFetch_yuy2, fbFetch64_generic,
-      fbFetchPixel_yuy2, fbFetchPixel64_generic,
+      fetch_scanline_yuy2, fetch_scanline64_generic,
+      fetch_pixels_yuy2, fetch_pixels64_generic,
       NULL, NULL },
 
     { PIXMAN_yv12,
-      fbFetch_yv12, fbFetch64_generic,
-      fbFetchPixel_yv12, fbFetchPixel64_generic,
+      fetch_scanline_yv12, fetch_scanline64_generic,
+      fetch_pixels_yv12, fetch_pixels64_generic,
       NULL, NULL },
     
     { PIXMAN_null },


More information about the xorg-commit mailing list