pixman: Branch 'master' - 20 commits

GitLab Mirror gitlab-mirror at kemper.freedesktop.org
Tue Dec 17 17:51:16 UTC 2024


 .clang-format       |   27 
 pixman/pixman-vmx.c | 1591 +++++++++++++++++++++++-----------------------------
 2 files changed, 740 insertions(+), 878 deletions(-)

New commits:
commit aa6d7161bdd6f0af1ac49d8f90c37da8f76ef0c0
Author: Matt Turner <mattst88 at gmail.com>
Date:   Thu Dec 12 17:16:33 2024 -0500

    vmx: Run clang-format

diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index 6a25846..399cfcc 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -28,13 +28,13 @@
 #ifdef HAVE_CONFIG_H
 #include <pixman-config.h>
 #endif
-#include "pixman-private.h"
 #include "pixman-combine32.h"
 #include "pixman-inlines.h"
+#include "pixman-private.h"
 #include <altivec.h>
 
 static const vector unsigned char vzero = (const vector unsigned char){0};
-static vector unsigned char mask_ff000000;
+static vector unsigned char       mask_ff000000;
 
 static force_inline vector unsigned char
 splat_alpha (vector unsigned char pix)
@@ -177,41 +177,38 @@ in_over (vector unsigned char src,
 
 #ifdef WORDS_BIGENDIAN
 
-#define COMPUTE_SHIFT_MASK(source)					\
-    source ## _mask = vec_lvsl (0, source);
-
-#define COMPUTE_SHIFT_MASKS(dest, source)				\
-    source ## _mask = vec_lvsl (0, source);
-
-#define COMPUTE_SHIFT_MASKC(dest, source, mask)				\
-    mask ## _mask = vec_lvsl (0, mask);					\
-    source ## _mask = vec_lvsl (0, source);
-
-#define LOAD_VECTOR(source)				  \
-do							  \
-{							  \
-    vector unsigned char tmp1, tmp2;			  \
-    tmp1 = (typeof(tmp1))vec_ld (0, source);		  \
-    tmp2 = (typeof(tmp2))vec_ld (15, source);		  \
-    v ## source = (typeof(v ## source)) 		  \
-	vec_perm (tmp1, tmp2, source ## _mask);		  \
-} while (0)
-
-#define LOAD_VECTORS(dest, source)			  \
-do							  \
-{							  \
-    LOAD_VECTOR(source);				  \
-    v ## dest = (typeof(v ## dest))vec_ld (0, dest);	  \
-} while (0)
-
-#define LOAD_VECTORSC(dest, source, mask)		  \
-do							  \
-{							  \
-    LOAD_VECTORS(dest, source); 			  \
-    LOAD_VECTOR(mask);					  \
-} while (0)
-
-#define DECLARE_SRC_MASK_VAR vector unsigned char src_mask
+#define COMPUTE_SHIFT_MASK(source) source##_mask = vec_lvsl (0, source);
+
+#define COMPUTE_SHIFT_MASKS(dest, source) source##_mask = vec_lvsl (0, source);
+
+#define COMPUTE_SHIFT_MASKC(dest, source, mask)                                \
+    mask##_mask   = vec_lvsl (0, mask);                                        \
+    source##_mask = vec_lvsl (0, source);
+
+#define LOAD_VECTOR(source)                                                    \
+    do                                                                         \
+    {                                                                          \
+	vector unsigned char tmp1, tmp2;                                       \
+	tmp1      = (typeof (tmp1))vec_ld (0, source);                         \
+	tmp2      = (typeof (tmp2))vec_ld (15, source);                        \
+	v##source = (typeof (v##source))vec_perm (tmp1, tmp2, source##_mask);  \
+    } while (0)
+
+#define LOAD_VECTORS(dest, source)                                             \
+    do                                                                         \
+    {                                                                          \
+	LOAD_VECTOR (source);                                                  \
+	v##dest = (typeof (v##dest))vec_ld (0, dest);                          \
+    } while (0)
+
+#define LOAD_VECTORSC(dest, source, mask)                                      \
+    do                                                                         \
+    {                                                                          \
+	LOAD_VECTORS (dest, source);                                           \
+	LOAD_VECTOR (mask);                                                    \
+    } while (0)
+
+#define DECLARE_SRC_MASK_VAR  vector unsigned char src_mask
 #define DECLARE_MASK_MASK_VAR vector unsigned char mask_mask
 
 #else
@@ -227,40 +224,37 @@ do							  \
 
 #define COMPUTE_SHIFT_MASKC(dest, source, mask)
 
-# define LOAD_VECTOR(source)				\
-    v ## source = (typeof(v ## source))vec_xl(0, source);
+#define LOAD_VECTOR(source) v##source = (typeof (v##source))vec_xl (0, source);
 
-# define LOAD_VECTORS(dest, source)			\
-    LOAD_VECTOR(source);				\
-    LOAD_VECTOR(dest);					\
+#define LOAD_VECTORS(dest, source)                                             \
+    LOAD_VECTOR (source);                                                      \
+    LOAD_VECTOR (dest);
 
-# define LOAD_VECTORSC(dest, source, mask)		\
-    LOAD_VECTORS(dest, source); 			\
-    LOAD_VECTOR(mask);					\
+#define LOAD_VECTORSC(dest, source, mask)                                      \
+    LOAD_VECTORS (dest, source);                                               \
+    LOAD_VECTOR (mask);
 
 #define DECLARE_SRC_MASK_VAR
 #define DECLARE_MASK_MASK_VAR
 
 #endif /* WORDS_BIGENDIAN */
 
-#define LOAD_VECTORSM(dest, source, mask)				\
-    LOAD_VECTORSC (dest, source, mask); 				\
-    v ## source = pix_multiply (v ## source,				\
-                                splat_alpha (v ## mask));
+#define LOAD_VECTORSM(dest, source, mask)                                      \
+    LOAD_VECTORSC (dest, source, mask);                                        \
+    v##source = pix_multiply (v##source, splat_alpha (v##mask));
 
-#define STORE_VECTOR(dest)						\
-    vec_st ((vector unsigned int) v ## dest, 0, dest);
+#define STORE_VECTOR(dest) vec_st ((vector unsigned int)v##dest, 0, dest);
 
 /* load 4 pixels from a 16-byte boundary aligned address */
 static force_inline vector unsigned char
-load_128_aligned (const uint32_t* src)
+load_128_aligned (const uint32_t *src)
 {
-    return *((vector unsigned char *) src);
+    return *((vector unsigned char *)src);
 }
 
 /* load 4 pixels from a unaligned address */
 static force_inline vector unsigned char
-load_128_unaligned (const uint32_t* src)
+load_128_unaligned (const uint32_t *src)
 {
     vector unsigned char vsrc;
     DECLARE_SRC_MASK_VAR;
@@ -273,10 +267,9 @@ load_128_unaligned (const uint32_t* src)
 
 /* save 4 pixels on a 16-byte boundary aligned address */
 static force_inline void
-save_128_aligned (uint32_t* data,
-		  vector unsigned char vdata)
+save_128_aligned (uint32_t *data, vector unsigned char vdata)
 {
-    STORE_VECTOR(data)
+    STORE_VECTOR (data)
 }
 
 static force_inline int
@@ -302,7 +295,7 @@ core_combine_over_u_pixel_vmx (uint32_t src, uint32_t dst)
 {
     uint32_t a;
 
-    a = ALPHA_8(src);
+    a = ALPHA_8 (src);
 
     if (a == 0xff)
     {
@@ -310,7 +303,7 @@ core_combine_over_u_pixel_vmx (uint32_t src, uint32_t dst)
     }
     else if (src)
     {
-	UN8x4_MUL_UN8_ADD_UN8x4(dst, (~a & MASK), src);
+	UN8x4_MUL_UN8_ADD_UN8x4 (dst, (~a & MASK), src);
     }
 
     return dst;
@@ -322,44 +315,42 @@ combine1 (const uint32_t *ps, const uint32_t *pm)
     uint32_t s = *ps;
 
     if (pm)
-	UN8x4_MUL_UN8(s, ALPHA_8(*pm));
+	UN8x4_MUL_UN8 (s, ALPHA_8 (*pm));
 
     return s;
 }
 
 static force_inline vector unsigned char
-combine4 (const uint32_t* ps, const uint32_t* pm)
+combine4 (const uint32_t *ps, const uint32_t *pm)
 {
     vector unsigned char src, msk;
 
     if (pm)
     {
-	msk = load_128_unaligned(pm);
+	msk = load_128_unaligned (pm);
 
-	if (is_transparent(msk))
+	if (is_transparent (msk))
 	    return vzero;
     }
 
-    src = load_128_unaligned(ps);
+    src = load_128_unaligned (ps);
 
     if (pm)
-	src = pix_multiply(src, msk);
+	src = pix_multiply (src, msk);
 
     return src;
 }
 
 static void
-vmx_combine_over_u_no_mask (uint32_t *      dest,
-                            const uint32_t *src,
-                            int             width)
+vmx_combine_over_u_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
 
     while (width && ((uintptr_t)dest & 15))
     {
-	uint32_t s = *src++;
-	uint32_t d = *dest;
+	uint32_t s  = *src++;
+	uint32_t d  = *dest;
 	uint32_t ia = ALPHA_8 (~s);
 
 	UN8x4_MUL_UN8_ADD_UN8x4 (d, ia, s);
@@ -386,8 +377,8 @@ vmx_combine_over_u_no_mask (uint32_t *      dest,
 
     for (int i = width % 4; --i >= 0;)
     {
-	uint32_t s = src[i];
-	uint32_t d = dest[i];
+	uint32_t s  = src[i];
+	uint32_t d  = dest[i];
 	uint32_t ia = ALPHA_8 (~s);
 
 	UN8x4_MUL_UN8_ADD_UN8x4 (d, ia, s);
@@ -397,10 +388,10 @@ vmx_combine_over_u_no_mask (uint32_t *      dest,
 }
 
 static void
-vmx_combine_over_u_mask (uint32_t *      dest,
-                         const uint32_t *src,
-                         const uint32_t *mask,
-                         int             width)
+vmx_combine_over_u_mask (uint32_t       *dest,
+			 const uint32_t *src,
+			 const uint32_t *mask,
+			 int             width)
 {
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
@@ -456,11 +447,11 @@ vmx_combine_over_u_mask (uint32_t *      dest,
 
 static void
 vmx_combine_over_u (pixman_implementation_t *imp,
-                    pixman_op_t              op,
-                    uint32_t *               dest,
-                    const uint32_t *         src,
-                    const uint32_t *         mask,
-                    int                      width)
+		    pixman_op_t              op,
+		    uint32_t                *dest,
+		    const uint32_t          *src,
+		    const uint32_t          *mask,
+		    int                      width)
 {
     if (mask)
 	vmx_combine_over_u_mask (dest, src, mask, width);
@@ -469,17 +460,17 @@ vmx_combine_over_u (pixman_implementation_t *imp,
 }
 
 static void
-vmx_combine_over_reverse_u_no_mask (uint32_t *      dest,
-                                    const uint32_t *src,
-                                    int             width)
+vmx_combine_over_reverse_u_no_mask (uint32_t       *dest,
+				    const uint32_t *src,
+				    int             width)
 {
     vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
 
     while (width && ((uintptr_t)dest & 15))
     {
-	uint32_t s = *src++;
-	uint32_t d = *dest;
+	uint32_t s  = *src++;
+	uint32_t d  = *dest;
 	uint32_t ia = ALPHA_8 (~d);
 
 	UN8x4_MUL_UN8_ADD_UN8x4 (s, ia, d);
@@ -505,8 +496,8 @@ vmx_combine_over_reverse_u_no_mask (uint32_t *      dest,
 
     for (int i = width % 4; --i >= 0;)
     {
-	uint32_t s = src[i];
-	uint32_t d = dest[i];
+	uint32_t s  = src[i];
+	uint32_t d  = dest[i];
 	uint32_t ia = ALPHA_8 (~dest[i]);
 
 	UN8x4_MUL_UN8_ADD_UN8x4 (s, ia, d);
@@ -515,10 +506,10 @@ vmx_combine_over_reverse_u_no_mask (uint32_t *      dest,
 }
 
 static void
-vmx_combine_over_reverse_u_mask (uint32_t *      dest,
-                                 const uint32_t *src,
-                                 const uint32_t *mask,
-                                 int             width)
+vmx_combine_over_reverse_u_mask (uint32_t       *dest,
+				 const uint32_t *src,
+				 const uint32_t *mask,
+				 int             width)
 {
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
@@ -526,9 +517,9 @@ vmx_combine_over_reverse_u_mask (uint32_t *      dest,
 
     while (width && ((uintptr_t)dest & 15))
     {
-	uint32_t m = ALPHA_8 (*mask++);
-	uint32_t s = *src++;
-	uint32_t d = *dest;
+	uint32_t m  = ALPHA_8 (*mask++);
+	uint32_t s  = *src++;
+	uint32_t d  = *dest;
 	uint32_t ia = ALPHA_8 (~d);
 
 	UN8x4_MUL_UN8 (s, m);
@@ -557,9 +548,9 @@ vmx_combine_over_reverse_u_mask (uint32_t *      dest,
 
     for (int i = width % 4; --i >= 0;)
     {
-	uint32_t m = ALPHA_8 (mask[i]);
-	uint32_t s = src[i];
-	uint32_t d = dest[i];
+	uint32_t m  = ALPHA_8 (mask[i]);
+	uint32_t s  = src[i];
+	uint32_t d  = dest[i];
 	uint32_t ia = ALPHA_8 (~dest[i]);
 
 	UN8x4_MUL_UN8 (s, m);
@@ -571,11 +562,11 @@ vmx_combine_over_reverse_u_mask (uint32_t *      dest,
 
 static void
 vmx_combine_over_reverse_u (pixman_implementation_t *imp,
-                            pixman_op_t              op,
-                            uint32_t *               dest,
-                            const uint32_t *         src,
-                            const uint32_t *         mask,
-                            int                      width)
+			    pixman_op_t              op,
+			    uint32_t                *dest,
+			    const uint32_t          *src,
+			    const uint32_t          *mask,
+			    int                      width)
 {
     if (mask)
 	vmx_combine_over_reverse_u_mask (dest, src, mask, width);
@@ -584,9 +575,7 @@ vmx_combine_over_reverse_u (pixman_implementation_t *imp,
 }
 
 static void
-vmx_combine_in_u_no_mask (uint32_t *      dest,
-                          const uint32_t *src,
-                          int             width)
+vmx_combine_in_u_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
@@ -627,10 +616,10 @@ vmx_combine_in_u_no_mask (uint32_t *      dest,
 }
 
 static void
-vmx_combine_in_u_mask (uint32_t *      dest,
-                       const uint32_t *src,
-                       const uint32_t *mask,
-                       int             width)
+vmx_combine_in_u_mask (uint32_t       *dest,
+		       const uint32_t *src,
+		       const uint32_t *mask,
+		       int             width)
 {
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
@@ -680,11 +669,11 @@ vmx_combine_in_u_mask (uint32_t *      dest,
 
 static void
 vmx_combine_in_u (pixman_implementation_t *imp,
-                  pixman_op_t              op,
-                  uint32_t *               dest,
-                  const uint32_t *         src,
-                  const uint32_t *         mask,
-                  int                      width)
+		  pixman_op_t              op,
+		  uint32_t                *dest,
+		  const uint32_t          *src,
+		  const uint32_t          *mask,
+		  int                      width)
 {
     if (mask)
 	vmx_combine_in_u_mask (dest, src, mask, width);
@@ -693,9 +682,9 @@ vmx_combine_in_u (pixman_implementation_t *imp,
 }
 
 static void
-vmx_combine_in_reverse_u_no_mask (uint32_t *      dest,
-                                  const uint32_t *src,
-                                  int             width)
+vmx_combine_in_reverse_u_no_mask (uint32_t       *dest,
+				  const uint32_t *src,
+				  int             width)
 {
     vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
@@ -738,10 +727,10 @@ vmx_combine_in_reverse_u_no_mask (uint32_t *      dest,
 }
 
 static void
-vmx_combine_in_reverse_u_mask (uint32_t *      dest,
-                               const uint32_t *src,
-                               const uint32_t *mask,
-                               int             width)
+vmx_combine_in_reverse_u_mask (uint32_t       *dest,
+			       const uint32_t *src,
+			       const uint32_t *mask,
+			       int             width)
 {
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
@@ -793,11 +782,11 @@ vmx_combine_in_reverse_u_mask (uint32_t *      dest,
 
 static void
 vmx_combine_in_reverse_u (pixman_implementation_t *imp,
-                          pixman_op_t              op,
-                          uint32_t *               dest,
-                          const uint32_t *         src,
-                          const uint32_t *         mask,
-                          int                      width)
+			  pixman_op_t              op,
+			  uint32_t                *dest,
+			  const uint32_t          *src,
+			  const uint32_t          *mask,
+			  int                      width)
 {
     if (mask)
 	vmx_combine_in_reverse_u_mask (dest, src, mask, width);
@@ -806,9 +795,7 @@ vmx_combine_in_reverse_u (pixman_implementation_t *imp,
 }
 
 static void
-vmx_combine_out_u_no_mask (uint32_t *      dest,
-                           const uint32_t *src,
-                           int             width)
+vmx_combine_out_u_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
@@ -851,10 +838,10 @@ vmx_combine_out_u_no_mask (uint32_t *      dest,
 }
 
 static void
-vmx_combine_out_u_mask (uint32_t *      dest,
-                        const uint32_t *src,
-                        const uint32_t *mask,
-                        int             width)
+vmx_combine_out_u_mask (uint32_t       *dest,
+			const uint32_t *src,
+			const uint32_t *mask,
+			int             width)
 {
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
@@ -904,11 +891,11 @@ vmx_combine_out_u_mask (uint32_t *      dest,
 
 static void
 vmx_combine_out_u (pixman_implementation_t *imp,
-                   pixman_op_t              op,
-                   uint32_t *               dest,
-                   const uint32_t *         src,
-                   const uint32_t *         mask,
-                   int                      width)
+		   pixman_op_t              op,
+		   uint32_t                *dest,
+		   const uint32_t          *src,
+		   const uint32_t          *mask,
+		   int                      width)
 {
     if (mask)
 	vmx_combine_out_u_mask (dest, src, mask, width);
@@ -917,9 +904,9 @@ vmx_combine_out_u (pixman_implementation_t *imp,
 }
 
 static void
-vmx_combine_out_reverse_u_no_mask (uint32_t *      dest,
-                                   const uint32_t *src,
-                                   int             width)
+vmx_combine_out_reverse_u_no_mask (uint32_t       *dest,
+				   const uint32_t *src,
+				   int             width)
 {
     vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
@@ -963,10 +950,10 @@ vmx_combine_out_reverse_u_no_mask (uint32_t *      dest,
 }
 
 static void
-vmx_combine_out_reverse_u_mask (uint32_t *      dest,
-                                const uint32_t *src,
-                                const uint32_t *mask,
-                                int             width)
+vmx_combine_out_reverse_u_mask (uint32_t       *dest,
+				const uint32_t *src,
+				const uint32_t *mask,
+				int             width)
 {
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
@@ -1018,11 +1005,11 @@ vmx_combine_out_reverse_u_mask (uint32_t *      dest,
 
 static void
 vmx_combine_out_reverse_u (pixman_implementation_t *imp,
-                           pixman_op_t              op,
-                           uint32_t *               dest,
-                           const uint32_t *         src,
-                           const uint32_t *         mask,
-                           int                      width)
+			   pixman_op_t              op,
+			   uint32_t                *dest,
+			   const uint32_t          *src,
+			   const uint32_t          *mask,
+			   int                      width)
 {
     if (mask)
 	vmx_combine_out_reverse_u_mask (dest, src, mask, width);
@@ -1031,17 +1018,15 @@ vmx_combine_out_reverse_u (pixman_implementation_t *imp,
 }
 
 static void
-vmx_combine_atop_u_no_mask (uint32_t *      dest,
-                            const uint32_t *src,
-                            int             width)
+vmx_combine_atop_u_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
 
     while (width && ((uintptr_t)dest & 15))
     {
-	uint32_t s = *src++;
-	uint32_t d = *dest;
+	uint32_t s      = *src++;
+	uint32_t d      = *dest;
 	uint32_t dest_a = ALPHA_8 (d);
 	uint32_t src_ia = ALPHA_8 (~s);
 
@@ -1058,8 +1043,8 @@ vmx_combine_atop_u_no_mask (uint32_t *      dest,
     {
 	LOAD_VECTORS (dest, src);
 
-	vdest = pix_add_mul (vsrc, splat_alpha (vdest),
-			     vdest, splat_alpha (negate (vsrc)));
+	vdest = pix_add_mul (vsrc, splat_alpha (vdest), vdest,
+			     splat_alpha (negate (vsrc)));
 
 	STORE_VECTOR (dest);
 
@@ -1069,8 +1054,8 @@ vmx_combine_atop_u_no_mask (uint32_t *      dest,
 
     for (int i = width % 4; --i >= 0;)
     {
-	uint32_t s = src[i];
-	uint32_t d = dest[i];
+	uint32_t s      = src[i];
+	uint32_t d      = dest[i];
 	uint32_t dest_a = ALPHA_8 (d);
 	uint32_t src_ia = ALPHA_8 (~s);
 
@@ -1081,10 +1066,10 @@ vmx_combine_atop_u_no_mask (uint32_t *      dest,
 }
 
 static void
-vmx_combine_atop_u_mask (uint32_t *      dest,
-                         const uint32_t *src,
-                         const uint32_t *mask,
-                         int             width)
+vmx_combine_atop_u_mask (uint32_t       *dest,
+			 const uint32_t *src,
+			 const uint32_t *mask,
+			 int             width)
 {
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
@@ -1092,9 +1077,9 @@ vmx_combine_atop_u_mask (uint32_t *      dest,
 
     while (width && ((uintptr_t)dest & 15))
     {
-	uint32_t m = ALPHA_8 (*mask++);
-	uint32_t s = *src++;
-	uint32_t d = *dest;
+	uint32_t m      = ALPHA_8 (*mask++);
+	uint32_t s      = *src++;
+	uint32_t d      = *dest;
 	uint32_t dest_a = ALPHA_8 (d);
 	uint32_t src_ia;
 
@@ -1115,8 +1100,8 @@ vmx_combine_atop_u_mask (uint32_t *      dest,
     {
 	LOAD_VECTORSM (dest, src, mask);
 
-	vdest = pix_add_mul (vsrc, splat_alpha (vdest),
-			     vdest, splat_alpha (negate (vsrc)));
+	vdest = pix_add_mul (vsrc, splat_alpha (vdest), vdest,
+			     splat_alpha (negate (vsrc)));
 
 	STORE_VECTOR (dest);
 
@@ -1127,9 +1112,9 @@ vmx_combine_atop_u_mask (uint32_t *      dest,
 
     for (int i = width % 4; --i >= 0;)
     {
-	uint32_t m = ALPHA_8 (mask[i]);
-	uint32_t s = src[i];
-	uint32_t d = dest[i];
+	uint32_t m      = ALPHA_8 (mask[i]);
+	uint32_t s      = src[i];
+	uint32_t d      = dest[i];
 	uint32_t dest_a = ALPHA_8 (d);
 	uint32_t src_ia;
 
@@ -1145,11 +1130,11 @@ vmx_combine_atop_u_mask (uint32_t *      dest,
 
 static void
 vmx_combine_atop_u (pixman_implementation_t *imp,
-                    pixman_op_t              op,
-                    uint32_t *               dest,
-                    const uint32_t *         src,
-                    const uint32_t *         mask,
-                    int                      width)
+		    pixman_op_t              op,
+		    uint32_t                *dest,
+		    const uint32_t          *src,
+		    const uint32_t          *mask,
+		    int                      width)
 {
     if (mask)
 	vmx_combine_atop_u_mask (dest, src, mask, width);
@@ -1158,18 +1143,18 @@ vmx_combine_atop_u (pixman_implementation_t *imp,
 }
 
 static void
-vmx_combine_atop_reverse_u_no_mask (uint32_t *      dest,
-                                    const uint32_t *src,
-                                    int             width)
+vmx_combine_atop_reverse_u_no_mask (uint32_t       *dest,
+				    const uint32_t *src,
+				    int             width)
 {
     vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
 
     while (width && ((uintptr_t)dest & 15))
     {
-	uint32_t s = *src++;
-	uint32_t d = *dest;
-	uint32_t src_a = ALPHA_8 (s);
+	uint32_t s       = *src++;
+	uint32_t d       = *dest;
+	uint32_t src_a   = ALPHA_8 (s);
 	uint32_t dest_ia = ALPHA_8 (~d);
 
 	UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_a);
@@ -1185,8 +1170,8 @@ vmx_combine_atop_reverse_u_no_mask (uint32_t *      dest,
     {
 	LOAD_VECTORS (dest, src);
 
-	vdest = pix_add_mul (vdest, splat_alpha (vsrc),
-			     vsrc, splat_alpha (negate (vdest)));
+	vdest = pix_add_mul (vdest, splat_alpha (vsrc), vsrc,
+			     splat_alpha (negate (vdest)));
 
 	STORE_VECTOR (dest);
 
@@ -1196,9 +1181,9 @@ vmx_combine_atop_reverse_u_no_mask (uint32_t *      dest,
 
     for (int i = width % 4; --i >= 0;)
     {
-	uint32_t s = src[i];
-	uint32_t d = dest[i];
-	uint32_t src_a = ALPHA_8 (s);
+	uint32_t s       = src[i];
+	uint32_t d       = dest[i];
+	uint32_t src_a   = ALPHA_8 (s);
 	uint32_t dest_ia = ALPHA_8 (~d);
 
 	UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_a);
@@ -1208,10 +1193,10 @@ vmx_combine_atop_reverse_u_no_mask (uint32_t *      dest,
 }
 
 static void
-vmx_combine_atop_reverse_u_mask (uint32_t *      dest,
-                                 const uint32_t *src,
-                                 const uint32_t *mask,
-                                 int             width)
+vmx_combine_atop_reverse_u_mask (uint32_t       *dest,
+				 const uint32_t *src,
+				 const uint32_t *mask,
+				 int             width)
 {
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
@@ -1242,8 +1227,8 @@ vmx_combine_atop_reverse_u_mask (uint32_t *      dest,
     {
 	LOAD_VECTORSM (dest, src, mask);
 
-	vdest = pix_add_mul (vdest, splat_alpha (vsrc),
-			     vsrc, splat_alpha (negate (vdest)));
+	vdest = pix_add_mul (vdest, splat_alpha (vsrc), vsrc,
+			     splat_alpha (negate (vdest)));
 
 	STORE_VECTOR (dest);
 
@@ -1272,11 +1257,11 @@ vmx_combine_atop_reverse_u_mask (uint32_t *      dest,
 
 static void
 vmx_combine_atop_reverse_u (pixman_implementation_t *imp,
-                            pixman_op_t              op,
-                            uint32_t *               dest,
-                            const uint32_t *         src,
-                            const uint32_t *         mask,
-                            int                      width)
+			    pixman_op_t              op,
+			    uint32_t                *dest,
+			    const uint32_t          *src,
+			    const uint32_t          *mask,
+			    int                      width)
 {
     if (mask)
 	vmx_combine_atop_reverse_u_mask (dest, src, mask, width);
@@ -1285,18 +1270,16 @@ vmx_combine_atop_reverse_u (pixman_implementation_t *imp,
 }
 
 static void
-vmx_combine_xor_u_no_mask (uint32_t *      dest,
-                           const uint32_t *src,
-                           int             width)
+vmx_combine_xor_u_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
 
     while (width && ((uintptr_t)dest & 15))
     {
-	uint32_t s = *src++;
-	uint32_t d = *dest;
-	uint32_t src_ia = ALPHA_8 (~s);
+	uint32_t s       = *src++;
+	uint32_t d       = *dest;
+	uint32_t src_ia  = ALPHA_8 (~s);
 	uint32_t dest_ia = ALPHA_8 (~d);
 
 	UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_ia);
@@ -1312,8 +1295,8 @@ vmx_combine_xor_u_no_mask (uint32_t *      dest,
     {
 	LOAD_VECTORS (dest, src);
 
-	vdest = pix_add_mul (vsrc, splat_alpha (negate (vdest)),
-			     vdest, splat_alpha (negate (vsrc)));
+	vdest = pix_add_mul (vsrc, splat_alpha (negate (vdest)), vdest,
+			     splat_alpha (negate (vsrc)));
 
 	STORE_VECTOR (dest);
 
@@ -1323,9 +1306,9 @@ vmx_combine_xor_u_no_mask (uint32_t *      dest,
 
     for (int i = width % 4; --i >= 0;)
     {
-	uint32_t s = src[i];
-	uint32_t d = dest[i];
-	uint32_t src_ia = ALPHA_8 (~s);
+	uint32_t s       = src[i];
+	uint32_t d       = dest[i];
+	uint32_t src_ia  = ALPHA_8 (~s);
 	uint32_t dest_ia = ALPHA_8 (~d);
 
 	UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_ia);
@@ -1335,10 +1318,10 @@ vmx_combine_xor_u_no_mask (uint32_t *      dest,
 }
 
 static void
-vmx_combine_xor_u_mask (uint32_t *      dest,
-                        const uint32_t *src,
-                        const uint32_t *mask,
-                        int             width)
+vmx_combine_xor_u_mask (uint32_t       *dest,
+			const uint32_t *src,
+			const uint32_t *mask,
+			int             width)
 {
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
@@ -1369,8 +1352,8 @@ vmx_combine_xor_u_mask (uint32_t *      dest,
     {
 	LOAD_VECTORSM (dest, src, mask);
 
-	vdest = pix_add_mul (vsrc, splat_alpha (negate (vdest)),
-			     vdest, splat_alpha (negate (vsrc)));
+	vdest = pix_add_mul (vsrc, splat_alpha (negate (vdest)), vdest,
+			     splat_alpha (negate (vsrc)));
 
 	STORE_VECTOR (dest);
 
@@ -1399,11 +1382,11 @@ vmx_combine_xor_u_mask (uint32_t *      dest,
 
 static void
 vmx_combine_xor_u (pixman_implementation_t *imp,
-                   pixman_op_t              op,
-                   uint32_t *               dest,
-                   const uint32_t *         src,
-                   const uint32_t *         mask,
-                   int                      width)
+		   pixman_op_t              op,
+		   uint32_t                *dest,
+		   const uint32_t          *src,
+		   const uint32_t          *mask,
+		   int                      width)
 {
     if (mask)
 	vmx_combine_xor_u_mask (dest, src, mask, width);
@@ -1412,9 +1395,7 @@ vmx_combine_xor_u (pixman_implementation_t *imp,
 }
 
 static void
-vmx_combine_add_u_no_mask (uint32_t *      dest,
-                           const uint32_t *src,
-                           int             width)
+vmx_combine_add_u_no_mask (uint32_t *dest, const uint32_t *src, int width)
 {
     vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
@@ -1456,10 +1437,10 @@ vmx_combine_add_u_no_mask (uint32_t *      dest,
 }
 
 static void
-vmx_combine_add_u_mask (uint32_t *      dest,
-                        const uint32_t *src,
-                        const uint32_t *mask,
-                        int             width)
+vmx_combine_add_u_mask (uint32_t       *dest,
+			const uint32_t *src,
+			const uint32_t *mask,
+			int             width)
 {
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
@@ -1509,11 +1490,11 @@ vmx_combine_add_u_mask (uint32_t *      dest,
 
 static void
 vmx_combine_add_u (pixman_implementation_t *imp,
-                   pixman_op_t              op,
-                   uint32_t *               dest,
-                   const uint32_t *         src,
-                   const uint32_t *         mask,
-                   int                      width)
+		   pixman_op_t              op,
+		   uint32_t                *dest,
+		   const uint32_t          *src,
+		   const uint32_t          *mask,
+		   int                      width)
 {
     if (mask)
 	vmx_combine_add_u_mask (dest, src, mask, width);
@@ -1523,11 +1504,11 @@ vmx_combine_add_u (pixman_implementation_t *imp,
 
 static void
 vmx_combine_src_ca (pixman_implementation_t *imp,
-                    pixman_op_t              op,
-                    uint32_t *               dest,
-                    const uint32_t *         src,
-                    const uint32_t *         mask,
-                    int                      width)
+		    pixman_op_t              op,
+		    uint32_t                *dest,
+		    const uint32_t          *src,
+		    const uint32_t          *mask,
+		    int                      width)
 {
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
@@ -1573,11 +1554,11 @@ vmx_combine_src_ca (pixman_implementation_t *imp,
 
 static void
 vmx_combine_over_ca (pixman_implementation_t *imp,
-                     pixman_op_t              op,
-                     uint32_t *               dest,
-                     const uint32_t *         src,
-                     const uint32_t *         mask,
-                     int                      width)
+		     pixman_op_t              op,
+		     uint32_t                *dest,
+		     const uint32_t          *src,
+		     const uint32_t          *mask,
+		     int                      width)
 {
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
@@ -1585,9 +1566,9 @@ vmx_combine_over_ca (pixman_implementation_t *imp,
 
     while (width && ((uintptr_t)dest & 15))
     {
-	uint32_t a = *mask++;
-	uint32_t s = *src++;
-	uint32_t d = *dest;
+	uint32_t a  = *mask++;
+	uint32_t s  = *src++;
+	uint32_t d  = *dest;
 	uint32_t sa = ALPHA_8 (s);
 
 	UN8x4_MUL_UN8x4 (s, a);
@@ -1616,9 +1597,9 @@ vmx_combine_over_ca (pixman_implementation_t *imp,
 
     for (int i = width % 4; --i >= 0;)
     {
-	uint32_t a = mask[i];
-	uint32_t s = src[i];
-	uint32_t d = dest[i];
+	uint32_t a  = mask[i];
+	uint32_t s  = src[i];
+	uint32_t d  = dest[i];
 	uint32_t sa = ALPHA_8 (s);
 
 	UN8x4_MUL_UN8x4 (s, a);
@@ -1631,11 +1612,11 @@ vmx_combine_over_ca (pixman_implementation_t *imp,
 
 static void
 vmx_combine_over_reverse_ca (pixman_implementation_t *imp,
-                             pixman_op_t              op,
-                             uint32_t *               dest,
-                             const uint32_t *         src,
-                             const uint32_t *         mask,
-                             int                      width)
+			     pixman_op_t              op,
+			     uint32_t                *dest,
+			     const uint32_t          *src,
+			     const uint32_t          *mask,
+			     int                      width)
 {
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
@@ -1643,9 +1624,9 @@ vmx_combine_over_reverse_ca (pixman_implementation_t *imp,
 
     while (width && ((uintptr_t)dest & 15))
     {
-	uint32_t a = *mask++;
-	uint32_t s = *src++;
-	uint32_t d = *dest;
+	uint32_t a   = *mask++;
+	uint32_t s   = *src++;
+	uint32_t d   = *dest;
 	uint32_t ida = ALPHA_8 (~d);
 
 	UN8x4_MUL_UN8x4 (s, a);
@@ -1673,9 +1654,9 @@ vmx_combine_over_reverse_ca (pixman_implementation_t *imp,
 
     for (int i = width % 4; --i >= 0;)
     {
-	uint32_t a = mask[i];
-	uint32_t s = src[i];
-	uint32_t d = dest[i];
+	uint32_t a   = mask[i];
+	uint32_t s   = src[i];
+	uint32_t d   = dest[i];
 	uint32_t ida = ALPHA_8 (~d);
 
 	UN8x4_MUL_UN8x4 (s, a);
@@ -1687,11 +1668,11 @@ vmx_combine_over_reverse_ca (pixman_implementation_t *imp,
 
 static void
 vmx_combine_in_ca (pixman_implementation_t *imp,
-                   pixman_op_t              op,
-                   uint32_t *               dest,
-                   const uint32_t *         src,
-                   const uint32_t *         mask,
-                   int                      width)
+		   pixman_op_t              op,
+		   uint32_t                *dest,
+		   const uint32_t          *src,
+		   const uint32_t          *mask,
+		   int                      width)
 {
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
@@ -1699,8 +1680,8 @@ vmx_combine_in_ca (pixman_implementation_t *imp,
 
     while (width && ((uintptr_t)dest & 15))
     {
-	uint32_t a = *mask++;
-	uint32_t s = *src++;
+	uint32_t a  = *mask++;
+	uint32_t s  = *src++;
 	uint32_t da = ALPHA_8 (*dest);
 
 	UN8x4_MUL_UN8x4 (s, a);
@@ -1728,8 +1709,8 @@ vmx_combine_in_ca (pixman_implementation_t *imp,
 
     for (int i = width % 4; --i >= 0;)
     {
-	uint32_t a = mask[i];
-	uint32_t s = src[i];
+	uint32_t a  = mask[i];
+	uint32_t s  = src[i];
 	uint32_t da = ALPHA_8 (dest[i]);
 
 	UN8x4_MUL_UN8x4 (s, a);
@@ -1741,11 +1722,11 @@ vmx_combine_in_ca (pixman_implementation_t *imp,
 
 static void
 vmx_combine_in_reverse_ca (pixman_implementation_t *imp,
-                           pixman_op_t              op,
-                           uint32_t *               dest,
-                           const uint32_t *         src,
-                           const uint32_t *         mask,
-                           int                      width)
+			   pixman_op_t              op,
+			   uint32_t                *dest,
+			   const uint32_t          *src,
+			   const uint32_t          *mask,
+			   int                      width)
 {
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
@@ -1753,8 +1734,8 @@ vmx_combine_in_reverse_ca (pixman_implementation_t *imp,
 
     while (width && ((uintptr_t)dest & 15))
     {
-	uint32_t a = *mask++;
-	uint32_t d = *dest;
+	uint32_t a  = *mask++;
+	uint32_t d  = *dest;
 	uint32_t sa = ALPHA_8 (*src++);
 
 	UN8x4_MUL_UN8 (a, sa);
@@ -1783,8 +1764,8 @@ vmx_combine_in_reverse_ca (pixman_implementation_t *imp,
 
     for (int i = width % 4; --i >= 0;)
     {
-	uint32_t a = mask[i];
-	uint32_t d = dest[i];
+	uint32_t a  = mask[i];
+	uint32_t d  = dest[i];
 	uint32_t sa = ALPHA_8 (src[i]);
 
 	UN8x4_MUL_UN8 (a, sa);
@@ -1796,11 +1777,11 @@ vmx_combine_in_reverse_ca (pixman_implementation_t *imp,
 
 static void
 vmx_combine_out_ca (pixman_implementation_t *imp,
-                    pixman_op_t              op,
-                    uint32_t *               dest,
-                    const uint32_t *         src,
-                    const uint32_t *         mask,
-                    int                      width)
+		    pixman_op_t              op,
+		    uint32_t                *dest,
+		    const uint32_t          *src,
+		    const uint32_t          *mask,
+		    int                      width)
 {
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
@@ -1808,9 +1789,9 @@ vmx_combine_out_ca (pixman_implementation_t *imp,
 
     while (width && ((uintptr_t)dest & 15))
     {
-	uint32_t a = *mask++;
-	uint32_t s = *src++;
-	uint32_t d = *dest;
+	uint32_t a  = *mask++;
+	uint32_t s  = *src++;
+	uint32_t d  = *dest;
 	uint32_t da = ALPHA_8 (~d);
 
 	UN8x4_MUL_UN8x4 (s, a);
@@ -1827,8 +1808,8 @@ vmx_combine_out_ca (pixman_implementation_t *imp,
     {
 	LOAD_VECTORSC (dest, src, mask);
 
-	vdest = pix_multiply (
-	    pix_multiply (vsrc, vmask), splat_alpha (negate (vdest)));
+	vdest = pix_multiply (pix_multiply (vsrc, vmask),
+			      splat_alpha (negate (vdest)));
 
 	STORE_VECTOR (dest);
 
@@ -1839,9 +1820,9 @@ vmx_combine_out_ca (pixman_implementation_t *imp,
 
     for (int i = width % 4; --i >= 0;)
     {
-	uint32_t a = mask[i];
-	uint32_t s = src[i];
-	uint32_t d = dest[i];
+	uint32_t a  = mask[i];
+	uint32_t s  = src[i];
+	uint32_t d  = dest[i];
 	uint32_t da = ALPHA_8 (~d);
 
 	UN8x4_MUL_UN8x4 (s, a);
@@ -1853,11 +1834,11 @@ vmx_combine_out_ca (pixman_implementation_t *imp,
 
 static void
 vmx_combine_out_reverse_ca (pixman_implementation_t *imp,
-                            pixman_op_t              op,
-                            uint32_t *               dest,
-                            const uint32_t *         src,
-                            const uint32_t *         mask,
-                            int                      width)
+			    pixman_op_t              op,
+			    uint32_t                *dest,
+			    const uint32_t          *src,
+			    const uint32_t          *mask,
+			    int                      width)
 {
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
@@ -1865,9 +1846,9 @@ vmx_combine_out_reverse_ca (pixman_implementation_t *imp,
 
     while (width && ((uintptr_t)dest & 15))
     {
-	uint32_t a = *mask++;
-	uint32_t s = *src++;
-	uint32_t d = *dest;
+	uint32_t a  = *mask++;
+	uint32_t s  = *src++;
+	uint32_t d  = *dest;
 	uint32_t sa = ALPHA_8 (s);
 
 	UN8x4_MUL_UN8 (a, sa);
@@ -1896,9 +1877,9 @@ vmx_combine_out_reverse_ca (pixman_implementation_t *imp,
 
     for (int i = width % 4; --i >= 0;)
     {
-	uint32_t a = mask[i];
-	uint32_t s = src[i];
-	uint32_t d = dest[i];
+	uint32_t a  = mask[i];
+	uint32_t s  = src[i];
+	uint32_t d  = dest[i];
 	uint32_t sa = ALPHA_8 (s);
 
 	UN8x4_MUL_UN8 (a, sa);
@@ -1910,11 +1891,11 @@ vmx_combine_out_reverse_ca (pixman_implementation_t *imp,
 
 static void
 vmx_combine_atop_ca (pixman_implementation_t *imp,
-                     pixman_op_t              op,
-                     uint32_t *               dest,
-                     const uint32_t *         src,
-                     const uint32_t *         mask,
-                     int                      width)
+		     pixman_op_t              op,
+		     uint32_t                *dest,
+		     const uint32_t          *src,
+		     const uint32_t          *mask,
+		     int                      width)
 {
     vector unsigned char vdest, vsrc, vmask, vsrca;
     DECLARE_SRC_MASK_VAR;
@@ -1922,9 +1903,9 @@ vmx_combine_atop_ca (pixman_implementation_t *imp,
 
     while (width && ((uintptr_t)dest & 15))
     {
-	uint32_t a = *mask++;
-	uint32_t s = *src++;
-	uint32_t d = *dest;
+	uint32_t a  = *mask++;
+	uint32_t s  = *src++;
+	uint32_t d  = *dest;
 	uint32_t sa = ALPHA_8 (s);
 	uint32_t da = ALPHA_8 (d);
 
@@ -1945,11 +1926,10 @@ vmx_combine_atop_ca (pixman_implementation_t *imp,
 
 	vsrca = splat_alpha (vsrc);
 
-	vsrc = pix_multiply (vsrc, vmask);
+	vsrc  = pix_multiply (vsrc, vmask);
 	vmask = pix_multiply (vmask, vsrca);
 
-	vdest = pix_add_mul (vsrc, splat_alpha (vdest),
-			     negate (vmask), vdest);
+	vdest = pix_add_mul (vsrc, splat_alpha (vdest), negate (vmask), vdest);
 
 	STORE_VECTOR (dest);
 
@@ -1960,9 +1940,9 @@ vmx_combine_atop_ca (pixman_implementation_t *imp,
 
     for (int i = width % 4; --i >= 0;)
     {
-	uint32_t a = mask[i];
-	uint32_t s = src[i];
-	uint32_t d = dest[i];
+	uint32_t a  = mask[i];
+	uint32_t s  = src[i];
+	uint32_t d  = dest[i];
 	uint32_t sa = ALPHA_8 (s);
 	uint32_t da = ALPHA_8 (d);
 
@@ -1976,11 +1956,11 @@ vmx_combine_atop_ca (pixman_implementation_t *imp,
 
 static void
 vmx_combine_atop_reverse_ca (pixman_implementation_t *imp,
-                             pixman_op_t              op,
-                             uint32_t *               dest,
-                             const uint32_t *         src,
-                             const uint32_t *         mask,
-                             int                      width)
+			     pixman_op_t              op,
+			     uint32_t                *dest,
+			     const uint32_t          *src,
+			     const uint32_t          *mask,
+			     int                      width)
 {
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
@@ -1988,9 +1968,9 @@ vmx_combine_atop_reverse_ca (pixman_implementation_t *imp,
 
     while (width && ((uintptr_t)dest & 15))
     {
-	uint32_t a = *mask++;
-	uint32_t s = *src++;
-	uint32_t d = *dest;
+	uint32_t a  = *mask++;
+	uint32_t s  = *src++;
+	uint32_t d  = *dest;
 	uint32_t sa = ALPHA_8 (s);
 	uint32_t da = ALPHA_8 (~d);
 
@@ -2009,8 +1989,7 @@ vmx_combine_atop_reverse_ca (pixman_implementation_t *imp,
     {
 	LOAD_VECTORSC (dest, src, mask);
 
-	vdest = pix_add_mul (vdest,
-			     pix_multiply (vmask, splat_alpha (vsrc)),
+	vdest = pix_add_mul (vdest, pix_multiply (vmask, splat_alpha (vsrc)),
 			     pix_multiply (vsrc, vmask),
 			     negate (splat_alpha (vdest)));
 
@@ -2023,9 +2002,9 @@ vmx_combine_atop_reverse_ca (pixman_implementation_t *imp,
 
     for (int i = width % 4; --i >= 0;)
     {
-	uint32_t a = mask[i];
-	uint32_t s = src[i];
-	uint32_t d = dest[i];
+	uint32_t a  = mask[i];
+	uint32_t s  = src[i];
+	uint32_t d  = dest[i];
 	uint32_t sa = ALPHA_8 (s);
 	uint32_t da = ALPHA_8 (~d);
 
@@ -2039,11 +2018,11 @@ vmx_combine_atop_reverse_ca (pixman_implementation_t *imp,
 
 static void
 vmx_combine_xor_ca (pixman_implementation_t *imp,
-                    pixman_op_t              op,
-                    uint32_t *               dest,
-                    const uint32_t *         src,
-                    const uint32_t *         mask,
-                    int                      width)
+		    pixman_op_t              op,
+		    uint32_t                *dest,
+		    const uint32_t          *src,
+		    const uint32_t          *mask,
+		    int                      width)
 {
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
@@ -2051,9 +2030,9 @@ vmx_combine_xor_ca (pixman_implementation_t *imp,
 
     while (width && ((uintptr_t)dest & 15))
     {
-	uint32_t a = *mask++;
-	uint32_t s = *src++;
-	uint32_t d = *dest;
+	uint32_t a  = *mask++;
+	uint32_t s  = *src++;
+	uint32_t d  = *dest;
 	uint32_t sa = ALPHA_8 (s);
 	uint32_t da = ALPHA_8 (~d);
 
@@ -2072,10 +2051,9 @@ vmx_combine_xor_ca (pixman_implementation_t *imp,
     {
 	LOAD_VECTORSC (dest, src, mask);
 
-	vdest = pix_add_mul (vdest,
-			     negate (pix_multiply (vmask, splat_alpha (vsrc))),
-			     pix_multiply (vsrc, vmask),
-			     negate (splat_alpha (vdest)));
+	vdest = pix_add_mul (
+	    vdest, negate (pix_multiply (vmask, splat_alpha (vsrc))),
+	    pix_multiply (vsrc, vmask), negate (splat_alpha (vdest)));
 
 	STORE_VECTOR (dest);
 
@@ -2086,9 +2064,9 @@ vmx_combine_xor_ca (pixman_implementation_t *imp,
 
     for (int i = width % 4; --i >= 0;)
     {
-	uint32_t a = mask[i];
-	uint32_t s = src[i];
-	uint32_t d = dest[i];
+	uint32_t a  = mask[i];
+	uint32_t s  = src[i];
+	uint32_t d  = dest[i];
 	uint32_t sa = ALPHA_8 (s);
 	uint32_t da = ALPHA_8 (~d);
 
@@ -2102,11 +2080,11 @@ vmx_combine_xor_ca (pixman_implementation_t *imp,
 
 static void
 vmx_combine_add_ca (pixman_implementation_t *imp,
-                    pixman_op_t              op,
-                    uint32_t *               dest,
-                    const uint32_t *         src,
-                    const uint32_t *         mask,
-                    int                      width)
+		    pixman_op_t              op,
+		    uint32_t                *dest,
+		    const uint32_t          *src,
+		    const uint32_t          *mask,
+		    int                      width)
 {
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
@@ -2156,36 +2134,36 @@ vmx_combine_add_ca (pixman_implementation_t *imp,
 
 static void
 vmx_composite_over_n_8_8888 (pixman_implementation_t *imp,
-                              pixman_composite_info_t *info)
+			     pixman_composite_info_t *info)
 {
     PIXMAN_COMPOSITE_ARGS (info);
-    uint32_t src, srca;
+    uint32_t  src, srca;
     uint32_t *dst_line, *dst;
-    uint8_t *mask_line;
-    int dst_stride, mask_stride;
-    int32_t w;
-    uint32_t m, d, s, ia;
+    uint8_t  *mask_line;
+    int       dst_stride, mask_stride;
+    int32_t   w;
+    uint32_t  m, d, s, ia;
 
     vector unsigned char vsrc, valpha, vmask, vdst;
 
     src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
 
-    srca = ALPHA_8(src);
+    srca = ALPHA_8 (src);
     if (src == 0)
 	return;
 
-    PIXMAN_IMAGE_GET_LINE (
-	dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
-    PIXMAN_IMAGE_GET_LINE (
-	mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
+    PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride,
+			   dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride,
+			   mask_line, 1);
 
-    vsrc = (vector unsigned char)create_mask_32_128 (src);
-    valpha = splat_alpha(vsrc);
+    vsrc   = (vector unsigned char)create_mask_32_128 (src);
+    valpha = splat_alpha (vsrc);
 
     while (height--)
     {
 	const uint8_t *pm = mask_line;
-	dst = dst_line;
+	dst               = dst_line;
 	dst_line += dst_stride;
 	mask_line += mask_stride;
 	w = width;
@@ -2210,20 +2188,21 @@ vmx_composite_over_n_8_8888 (pixman_implementation_t *imp,
 
 	while (w >= 4)
 	{
-	    m = *((uint32_t*)pm);
+	    m = *((uint32_t *)pm);
 
 	    if (srca == 0xff && m == 0xffffffff)
 	    {
-		save_128_aligned(dst, vsrc);
+		save_128_aligned (dst, vsrc);
 	    }
 	    else if (m)
 	    {
-		vmask = splat_pixel((vector unsigned char)create_mask_32_128 (m));
+		vmask = splat_pixel (
+		    (vector unsigned char)create_mask_32_128 (m));
 
 		/* dst is 16-byte aligned */
 		vdst = in_over (vsrc, valpha, vmask, load_128_aligned (dst));
 
-		save_128_aligned(dst, vdst);
+		save_128_aligned (dst, vdst);
 	    }
 
 	    w -= 4;
@@ -2249,19 +2228,18 @@ vmx_composite_over_n_8_8888 (pixman_implementation_t *imp,
 	    dst++;
 	}
     }
-
 }
 
 static pixman_bool_t
 vmx_fill (pixman_implementation_t *imp,
-           uint32_t *               bits,
-           int                      stride,
-           int                      bpp,
-           int                      x,
-           int                      y,
-           int                      width,
-           int                      height,
-           uint32_t		    filler)
+	  uint32_t                *bits,
+	  int                      stride,
+	  int                      bpp,
+	  int                      x,
+	  int                      y,
+	  int                      width,
+	  int                      height,
+	  uint32_t                 filler)
 {
     uint32_t byte_width;
     uint8_t *byte_line;
@@ -2270,31 +2248,31 @@ vmx_fill (pixman_implementation_t *imp,
 
     if (bpp == 8)
     {
-	uint8_t b;
+	uint8_t  b;
 	uint16_t w;
 
-	stride = stride * (int) sizeof (uint32_t) / 1;
-	byte_line = (uint8_t *)(((uint8_t *)bits) + stride * y + x);
+	stride     = stride * (int)sizeof (uint32_t) / 1;
+	byte_line  = (uint8_t *)(((uint8_t *)bits) + stride * y + x);
 	byte_width = width;
 	stride *= 1;
 
-	b = filler & 0xff;
-	w = (b << 8) | b;
+	b      = filler & 0xff;
+	w      = (b << 8) | b;
 	filler = (w << 16) | w;
     }
     else if (bpp == 16)
     {
-	stride = stride * (int) sizeof (uint32_t) / 2;
-	byte_line = (uint8_t *)(((uint16_t *)bits) + stride * y + x);
+	stride     = stride * (int)sizeof (uint32_t) / 2;
+	byte_line  = (uint8_t *)(((uint16_t *)bits) + stride * y + x);
 	byte_width = 2 * width;
 	stride *= 2;
 
-        filler = (filler & 0xffff) * 0x00010001;
+	filler = (filler & 0xffff) * 0x00010001;
     }
     else if (bpp == 32)
     {
-	stride = stride * (int) sizeof (uint32_t) / 4;
-	byte_line = (uint8_t *)(((uint32_t *)bits) + stride * y + x);
+	stride     = stride * (int)sizeof (uint32_t) / 4;
+	byte_line  = (uint8_t *)(((uint32_t *)bits) + stride * y + x);
 	byte_width = 4 * width;
 	stride *= 4;
     }
@@ -2303,11 +2281,11 @@ vmx_fill (pixman_implementation_t *imp,
 	return FALSE;
     }
 
-    vfiller = create_mask_32_128(filler);
+    vfiller = create_mask_32_128 (filler);
 
     while (height--)
     {
-	int w;
+	int      w;
 	uint8_t *d = byte_line;
 	byte_line += stride;
 	w = byte_width;
@@ -2336,14 +2314,14 @@ vmx_fill (pixman_implementation_t *imp,
 
 	while (w >= 128)
 	{
-	    vec_st(vfiller, 0, (uint32_t *) d);
-	    vec_st(vfiller, 0, (uint32_t *) d + 4);
-	    vec_st(vfiller, 0, (uint32_t *) d + 8);
-	    vec_st(vfiller, 0, (uint32_t *) d + 12);
-	    vec_st(vfiller, 0, (uint32_t *) d + 16);
-	    vec_st(vfiller, 0, (uint32_t *) d + 20);
-	    vec_st(vfiller, 0, (uint32_t *) d + 24);
-	    vec_st(vfiller, 0, (uint32_t *) d + 28);
+	    vec_st (vfiller, 0, (uint32_t *)d);
+	    vec_st (vfiller, 0, (uint32_t *)d + 4);
+	    vec_st (vfiller, 0, (uint32_t *)d + 8);
+	    vec_st (vfiller, 0, (uint32_t *)d + 12);
+	    vec_st (vfiller, 0, (uint32_t *)d + 16);
+	    vec_st (vfiller, 0, (uint32_t *)d + 20);
+	    vec_st (vfiller, 0, (uint32_t *)d + 24);
+	    vec_st (vfiller, 0, (uint32_t *)d + 28);
 
 	    d += 128;
 	    w -= 128;
@@ -2351,10 +2329,10 @@ vmx_fill (pixman_implementation_t *imp,
 
 	if (w >= 64)
 	{
-	    vec_st(vfiller, 0, (uint32_t *) d);
-	    vec_st(vfiller, 0, (uint32_t *) d + 4);
-	    vec_st(vfiller, 0, (uint32_t *) d + 8);
-	    vec_st(vfiller, 0, (uint32_t *) d + 12);
+	    vec_st (vfiller, 0, (uint32_t *)d);
+	    vec_st (vfiller, 0, (uint32_t *)d + 4);
+	    vec_st (vfiller, 0, (uint32_t *)d + 8);
+	    vec_st (vfiller, 0, (uint32_t *)d + 12);
 
 	    d += 64;
 	    w -= 64;
@@ -2362,8 +2340,8 @@ vmx_fill (pixman_implementation_t *imp,
 
 	if (w >= 32)
 	{
-	    vec_st(vfiller, 0, (uint32_t *) d);
-	    vec_st(vfiller, 0, (uint32_t *) d + 4);
+	    vec_st (vfiller, 0, (uint32_t *)d);
+	    vec_st (vfiller, 0, (uint32_t *)d + 4);
 
 	    d += 32;
 	    w -= 32;
@@ -2371,7 +2349,7 @@ vmx_fill (pixman_implementation_t *imp,
 
 	if (w >= 16)
 	{
-	    vec_st(vfiller, 0, (uint32_t *) d);
+	    vec_st (vfiller, 0, (uint32_t *)d);
 
 	    d += 16;
 	    w -= 16;
@@ -2405,18 +2383,18 @@ vmx_fill (pixman_implementation_t *imp,
 
 static void
 vmx_composite_src_x888_8888 (pixman_implementation_t *imp,
-			      pixman_composite_info_t *info)
+			     pixman_composite_info_t *info)
 {
     PIXMAN_COMPOSITE_ARGS (info);
-    uint32_t    *dst_line, *dst;
-    uint32_t    *src_line, *src;
-    int32_t w;
-    int dst_stride, src_stride;
+    uint32_t *dst_line, *dst;
+    uint32_t *src_line, *src;
+    int32_t   w;
+    int       dst_stride, src_stride;
 
-    PIXMAN_IMAGE_GET_LINE (
-	dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
-    PIXMAN_IMAGE_GET_LINE (
-	src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+    PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride,
+			   dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride,
+			   src_line, 1);
 
     while (height--)
     {
@@ -2461,12 +2439,12 @@ vmx_composite_src_x888_8888 (pixman_implementation_t *imp,
 
 static void
 vmx_composite_over_n_8888 (pixman_implementation_t *imp,
-                           pixman_composite_info_t *info)
+			   pixman_composite_info_t *info)
 {
     PIXMAN_COMPOSITE_ARGS (info);
     uint32_t *dst_line, *dst;
-    uint32_t src, ia;
-    int      w, dst_stride;
+    uint32_t  src, ia;
+    int       w, dst_stride;
 
     vector unsigned char vdst, vsrc, via;
 
@@ -2475,12 +2453,12 @@ vmx_composite_over_n_8888 (pixman_implementation_t *imp,
     if (src == 0)
 	return;
 
-    PIXMAN_IMAGE_GET_LINE (
-	dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride,
+			   dst_line, 1);
 
     vsrc = (vector unsigned char)create_mask_32_128 (src);
-    via = negate (splat_alpha (vsrc));
-    ia = ALPHA_8 (~src);
+    via  = negate (splat_alpha (vsrc));
+    ia   = ALPHA_8 (~src);
 
     while (height--)
     {
@@ -2514,40 +2492,40 @@ vmx_composite_over_n_8888 (pixman_implementation_t *imp,
 
 static void
 vmx_composite_over_8888_8888 (pixman_implementation_t *imp,
-                               pixman_composite_info_t *info)
+			      pixman_composite_info_t *info)
 {
     PIXMAN_COMPOSITE_ARGS (info);
-    int dst_stride, src_stride;
-    uint32_t    *dst_line, *dst;
-    uint32_t    *src_line, *src;
+    int       dst_stride, src_stride;
+    uint32_t *dst_line, *dst;
+    uint32_t *src_line, *src;
 
-    PIXMAN_IMAGE_GET_LINE (
-    dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
-    PIXMAN_IMAGE_GET_LINE (
-    src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+    PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride,
+			   dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride,
+			   src_line, 1);
 
     dst = dst_line;
     src = src_line;
 
     while (height--)
     {
-        vmx_combine_over_u (imp, op, dst, src, NULL, width);
+	vmx_combine_over_u (imp, op, dst, src, NULL, width);
 
-        dst += dst_stride;
-        src += src_stride;
+	dst += dst_stride;
+	src += src_stride;
     }
 }
 
 static void
 vmx_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
-                                    pixman_composite_info_t *info)
+				   pixman_composite_info_t *info)
 {
     PIXMAN_COMPOSITE_ARGS (info);
-    uint32_t src, ia;
-    uint32_t    *dst_line, d;
-    uint32_t    *mask_line, m;
-    uint32_t pack_cmp;
-    int dst_stride, mask_stride;
+    uint32_t  src, ia;
+    uint32_t *dst_line, d;
+    uint32_t *mask_line, m;
+    uint32_t  pack_cmp;
+    int       dst_stride, mask_stride;
 
     vector unsigned char vsrc, valpha, vmask, vdest;
 
@@ -2556,21 +2534,21 @@ vmx_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
     if (src == 0)
 	return;
 
-    PIXMAN_IMAGE_GET_LINE (
-	dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
-    PIXMAN_IMAGE_GET_LINE (
-	mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
+    PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride,
+			   dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride,
+			   mask_line, 1);
 
-    vsrc = (vector unsigned char)create_mask_32_128 (src);
-    valpha = splat_alpha(vsrc);
-    ia = ALPHA_8 (src);
+    vsrc   = (vector unsigned char)create_mask_32_128 (src);
+    valpha = splat_alpha (vsrc);
+    ia     = ALPHA_8 (src);
 
     while (height--)
     {
-	int w = width;
+	int             w  = width;
 	const uint32_t *pm = (uint32_t *)mask_line;
-	uint32_t *pd = (uint32_t *)dst_line;
-	uint32_t s;
+	uint32_t       *pd = (uint32_t *)dst_line;
+	uint32_t        s;
 
 	dst_line += dst_stride;
 	mask_line += mask_stride;
@@ -2599,7 +2577,7 @@ vmx_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
 	    /* pm is NOT necessarily 16-byte aligned */
 	    vmask = load_128_unaligned (pm);
 
-	    pack_cmp = vec_all_eq(vmask, vzero);
+	    pack_cmp = vec_all_eq (vmask, vzero);
 
 	    /* if all bits in mask are zero, pack_cmp is not 0 */
 	    if (pack_cmp == 0)
@@ -2607,7 +2585,7 @@ vmx_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
 		/* pd is 16-byte aligned */
 		vdest = in_over (vsrc, valpha, vmask, load_128_aligned (pd));
 
-		save_128_aligned(pd, vdest);
+		save_128_aligned (pd, vdest);
 	    }
 
 	    pd += 4;
@@ -2638,19 +2616,19 @@ vmx_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
 
 static void
 vmx_composite_add_8_8 (pixman_implementation_t *imp,
-            pixman_composite_info_t *info)
+		       pixman_composite_info_t *info)
 {
     PIXMAN_COMPOSITE_ARGS (info);
-    uint8_t     *dst_line, *dst;
-    uint8_t     *src_line, *src;
-    int dst_stride, src_stride;
-    int32_t w;
+    uint8_t *dst_line, *dst;
+    uint8_t *src_line, *src;
+    int      dst_stride, src_stride;
+    int32_t  w;
     uint16_t t;
 
-    PIXMAN_IMAGE_GET_LINE (
-    src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
-    PIXMAN_IMAGE_GET_LINE (
-    dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride,
+			   src_line, 1);
+    PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride,
+			   dst_line, 1);
 
     while (height--)
     {
@@ -2664,13 +2642,13 @@ vmx_composite_add_8_8 (pixman_implementation_t *imp,
 	/* Small head */
 	while (w && (uintptr_t)dst & 3)
 	{
-	    t = (*dst) + (*src++);
+	    t      = (*dst) + (*src++);
 	    *dst++ = t | (0 - (t >> 8));
 	    w--;
 	}
 
-	vmx_combine_add_u (imp, op,
-		    (uint32_t*)dst, (uint32_t*)src, NULL, w >> 2);
+	vmx_combine_add_u (imp, op, (uint32_t *)dst, (uint32_t *)src, NULL,
+			   w >> 2);
 
 	/* Small tail */
 	dst += w & 0xfffc;
@@ -2680,7 +2658,7 @@ vmx_composite_add_8_8 (pixman_implementation_t *imp,
 
 	while (w)
 	{
-	    t = (*dst) + (*src++);
+	    t      = (*dst) + (*src++);
 	    *dst++ = t | (0 - (t >> 8));
 	    w--;
 	}
@@ -2689,17 +2667,17 @@ vmx_composite_add_8_8 (pixman_implementation_t *imp,
 
 static void
 vmx_composite_add_8888_8888 (pixman_implementation_t *imp,
-                              pixman_composite_info_t *info)
+			     pixman_composite_info_t *info)
 {
     PIXMAN_COMPOSITE_ARGS (info);
-    uint32_t    *dst_line, *dst;
-    uint32_t    *src_line, *src;
-    int dst_stride, src_stride;
+    uint32_t *dst_line, *dst;
+    uint32_t *src_line, *src;
+    int       dst_stride, src_stride;
 
-    PIXMAN_IMAGE_GET_LINE (
-	src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
-    PIXMAN_IMAGE_GET_LINE (
-	dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride,
+			   src_line, 1);
+    PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride,
+			   dst_line, 1);
 
     while (height--)
     {
@@ -2713,16 +2691,16 @@ vmx_composite_add_8888_8888 (pixman_implementation_t *imp,
 }
 
 static force_inline void
-scaled_nearest_scanline_vmx_8888_8888_OVER (uint32_t*       pd,
-                                            const uint32_t* ps,
-                                            int32_t         w,
-                                            pixman_fixed_t  vx,
-                                            pixman_fixed_t  unit_x,
-                                            pixman_fixed_t  src_width_fixed,
-                                            pixman_bool_t   fully_transparent_src)
+scaled_nearest_scanline_vmx_8888_8888_OVER (uint32_t       *pd,
+					    const uint32_t *ps,
+					    int32_t         w,
+					    pixman_fixed_t  vx,
+					    pixman_fixed_t  unit_x,
+					    pixman_fixed_t  src_width_fixed,
+					    pixman_bool_t fully_transparent_src)
 {
-    uint32_t s, d;
-    const uint32_t* pm = NULL;
+    uint32_t        s, d;
+    const uint32_t *pm = NULL;
 
     vector unsigned char vsrc, vdst;
 
@@ -2773,7 +2751,7 @@ scaled_nearest_scanline_vmx_8888_8888_OVER (uint32_t*       pd,
 	}
 	else if (!is_zero (vsrc))
 	{
-	    vdst = over(vsrc, splat_alpha(vsrc), load_128_aligned (pd));
+	    vdst = over (vsrc, splat_alpha (vsrc), load_128_aligned (pd));
 
 	    save_128_aligned (pd, vdst);
 	}
@@ -2852,7 +2830,7 @@ static const pixman_fast_path_t vmx_fast_paths[] =
 static uint32_t *
 vmx_fetch_x8r8g8b8 (pixman_iter_t *iter, const uint32_t *mask)
 {
-    int w = iter->width;
+    int       w   = iter->width;
     uint32_t *dst = iter->buffer;
     uint32_t *src = (uint32_t *)iter->bits;
 
@@ -2866,7 +2844,8 @@ vmx_fetch_x8r8g8b8 (pixman_iter_t *iter, const uint32_t *mask)
 
     while (w >= 4)
     {
-	save_128_aligned(dst, vec_or(load_128_unaligned(src), mask_ff000000));
+	save_128_aligned (dst,
+			  vec_or (load_128_unaligned (src), mask_ff000000));
 
 	dst += 4;
 	src += 4;
@@ -2885,9 +2864,9 @@ vmx_fetch_x8r8g8b8 (pixman_iter_t *iter, const uint32_t *mask)
 static uint32_t *
 vmx_fetch_a8 (pixman_iter_t *iter, const uint32_t *mask)
 {
-    int w = iter->width;
+    int       w   = iter->width;
     uint32_t *dst = iter->buffer;
-    uint8_t *src = iter->bits;
+    uint8_t  *src = iter->bits;
 
     vector unsigned char vmx0, vmx1, vmx2, vmx3, vmx4, vmx5, vmx6;
 
@@ -2895,22 +2874,22 @@ vmx_fetch_a8 (pixman_iter_t *iter, const uint32_t *mask)
 
     while (w && (((uintptr_t)dst) & 15))
     {
-        *dst++ = *(src++) << 24;
-        w--;
+	*dst++ = *(src++) << 24;
+	w--;
     }
 
     while (w >= 16)
     {
-	vmx0 = load_128_unaligned((uint32_t *) src);
+	vmx0 = load_128_unaligned ((uint32_t *)src);
 
-	unpack_128_2x128(vzero, vmx0, &vmx1, &vmx2);
-	unpack_128_2x128(vzero, vmx1, &vmx3, &vmx4);
-	unpack_128_2x128(vzero, vmx2, &vmx5, &vmx6);
+	unpack_128_2x128 (vzero, vmx0, &vmx1, &vmx2);
+	unpack_128_2x128 (vzero, vmx1, &vmx3, &vmx4);
+	unpack_128_2x128 (vzero, vmx2, &vmx5, &vmx6);
 
-	save_128_aligned(dst, vmx6);
-	save_128_aligned((dst +  4), vmx5);
-	save_128_aligned((dst +  8), vmx4);
-	save_128_aligned((dst + 12), vmx3);
+	save_128_aligned (dst, vmx6);
+	save_128_aligned ((dst + 4), vmx5);
+	save_128_aligned ((dst + 8), vmx4);
+	save_128_aligned ((dst + 12), vmx3);
 
 	dst += 16;
 	src += 16;
@@ -2926,8 +2905,8 @@ vmx_fetch_a8 (pixman_iter_t *iter, const uint32_t *mask)
     return iter->buffer;
 }
 
-#define IMAGE_FLAGS							\
-    (FAST_PATH_STANDARD_FLAGS | FAST_PATH_ID_TRANSFORM |		\
+#define IMAGE_FLAGS                                                            \
+    (FAST_PATH_STANDARD_FLAGS | FAST_PATH_ID_TRANSFORM |                       \
      FAST_PATH_BITS_IMAGE | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST)
 
 /* clang-format off */
@@ -2946,36 +2925,37 @@ static const pixman_iter_info_t vmx_iters[] =
 pixman_implementation_t *
 _pixman_implementation_create_vmx (pixman_implementation_t *fallback)
 {
-    pixman_implementation_t *imp = _pixman_implementation_create (fallback, vmx_fast_paths);
+    pixman_implementation_t *imp = _pixman_implementation_create (
+	fallback, vmx_fast_paths);
 
     /* VMX constants */
     mask_ff000000 = (vector unsigned char)create_mask_32_128 (0xff000000);
 
     /* Set up function pointers */
 
-    imp->combine_32[PIXMAN_OP_OVER] = vmx_combine_over_u;
+    imp->combine_32[PIXMAN_OP_OVER]         = vmx_combine_over_u;
     imp->combine_32[PIXMAN_OP_OVER_REVERSE] = vmx_combine_over_reverse_u;
-    imp->combine_32[PIXMAN_OP_IN] = vmx_combine_in_u;
-    imp->combine_32[PIXMAN_OP_IN_REVERSE] = vmx_combine_in_reverse_u;
-    imp->combine_32[PIXMAN_OP_OUT] = vmx_combine_out_u;
-    imp->combine_32[PIXMAN_OP_OUT_REVERSE] = vmx_combine_out_reverse_u;
-    imp->combine_32[PIXMAN_OP_ATOP] = vmx_combine_atop_u;
+    imp->combine_32[PIXMAN_OP_IN]           = vmx_combine_in_u;
+    imp->combine_32[PIXMAN_OP_IN_REVERSE]   = vmx_combine_in_reverse_u;
+    imp->combine_32[PIXMAN_OP_OUT]          = vmx_combine_out_u;
+    imp->combine_32[PIXMAN_OP_OUT_REVERSE]  = vmx_combine_out_reverse_u;
+    imp->combine_32[PIXMAN_OP_ATOP]         = vmx_combine_atop_u;
     imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = vmx_combine_atop_reverse_u;
-    imp->combine_32[PIXMAN_OP_XOR] = vmx_combine_xor_u;
+    imp->combine_32[PIXMAN_OP_XOR]          = vmx_combine_xor_u;
 
     imp->combine_32[PIXMAN_OP_ADD] = vmx_combine_add_u;
 
-    imp->combine_32_ca[PIXMAN_OP_SRC] = vmx_combine_src_ca;
-    imp->combine_32_ca[PIXMAN_OP_OVER] = vmx_combine_over_ca;
+    imp->combine_32_ca[PIXMAN_OP_SRC]          = vmx_combine_src_ca;
+    imp->combine_32_ca[PIXMAN_OP_OVER]         = vmx_combine_over_ca;
     imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = vmx_combine_over_reverse_ca;
-    imp->combine_32_ca[PIXMAN_OP_IN] = vmx_combine_in_ca;
-    imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = vmx_combine_in_reverse_ca;
-    imp->combine_32_ca[PIXMAN_OP_OUT] = vmx_combine_out_ca;
-    imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = vmx_combine_out_reverse_ca;
-    imp->combine_32_ca[PIXMAN_OP_ATOP] = vmx_combine_atop_ca;
+    imp->combine_32_ca[PIXMAN_OP_IN]           = vmx_combine_in_ca;
+    imp->combine_32_ca[PIXMAN_OP_IN_REVERSE]   = vmx_combine_in_reverse_ca;
+    imp->combine_32_ca[PIXMAN_OP_OUT]          = vmx_combine_out_ca;
+    imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE]  = vmx_combine_out_reverse_ca;
+    imp->combine_32_ca[PIXMAN_OP_ATOP]         = vmx_combine_atop_ca;
     imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = vmx_combine_atop_reverse_ca;
-    imp->combine_32_ca[PIXMAN_OP_XOR] = vmx_combine_xor_ca;
-    imp->combine_32_ca[PIXMAN_OP_ADD] = vmx_combine_add_ca;
+    imp->combine_32_ca[PIXMAN_OP_XOR]          = vmx_combine_xor_ca;
+    imp->combine_32_ca[PIXMAN_OP_ADD]          = vmx_combine_add_ca;
 
     imp->fill = vmx_fill;
 
commit d09af9f36873ffdf5e5bd5c1b29a5b0273528c8c
Author: Matt Turner <mattst88 at gmail.com>
Date:   Thu Dec 12 17:16:29 2024 -0500

    vmx: Disable clang-format around some data structure declarations

diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index a0034c2..6a25846 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -2800,6 +2800,7 @@ scaled_nearest_scanline_vmx_8888_8888_OVER (uint32_t*       pd,
     }
 }
 
+/* clang-format off */
 FAST_NEAREST_MAINLOOP (vmx_8888_8888_cover_OVER,
 		       scaled_nearest_scanline_vmx_8888_8888_OVER,
 		       uint32_t, uint32_t, COVER)
@@ -2846,6 +2847,7 @@ static const pixman_fast_path_t vmx_fast_paths[] =
 
     {   PIXMAN_OP_NONE	},
 };
+/* clang-format on */
 
 static uint32_t *
 vmx_fetch_x8r8g8b8 (pixman_iter_t *iter, const uint32_t *mask)
@@ -2928,6 +2930,7 @@ vmx_fetch_a8 (pixman_iter_t *iter, const uint32_t *mask)
     (FAST_PATH_STANDARD_FLAGS | FAST_PATH_ID_TRANSFORM |		\
      FAST_PATH_BITS_IMAGE | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST)
 
+/* clang-format off */
 static const pixman_iter_info_t vmx_iters[] =
 {
     { PIXMAN_x8r8g8b8, IMAGE_FLAGS, ITER_NARROW,
@@ -2938,6 +2941,7 @@ static const pixman_iter_info_t vmx_iters[] =
     },
     { PIXMAN_null },
 };
+/* clang-format on */
 
 pixman_implementation_t *
 _pixman_implementation_create_vmx (pixman_implementation_t *fallback)
commit fe3ef3a9121be734b8247e518037bb76ba4b0855
Author: Matt Turner <mattst88 at gmail.com>
Date:   Thu Dec 12 17:16:28 2024 -0500

    vmx: Declare iterator variable in for loop
    
    Reduces churn when running clang-format.

diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index 30f4f19..a0034c2 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -353,7 +353,6 @@ vmx_combine_over_u_no_mask (uint32_t *      dest,
                             const uint32_t *src,
                             int             width)
 {
-    int i;
     vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
 
@@ -372,7 +371,7 @@ vmx_combine_over_u_no_mask (uint32_t *      dest,
     COMPUTE_SHIFT_MASKS (dest, src);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 
 	LOAD_VECTORS (dest, src);
@@ -385,7 +384,7 @@ vmx_combine_over_u_no_mask (uint32_t *      dest,
 	dest += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t s = src[i];
 	uint32_t d = dest[i];
@@ -403,7 +402,6 @@ vmx_combine_over_u_mask (uint32_t *      dest,
                          const uint32_t *mask,
                          int             width)
 {
-    int i;
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
@@ -427,7 +425,7 @@ vmx_combine_over_u_mask (uint32_t *      dest,
     COMPUTE_SHIFT_MASKC (dest, src, mask);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORSM (dest, src, mask);
 
@@ -440,7 +438,7 @@ vmx_combine_over_u_mask (uint32_t *      dest,
 	mask += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t m = ALPHA_8 (mask[i]);
 	uint32_t s = src[i];
@@ -475,7 +473,6 @@ vmx_combine_over_reverse_u_no_mask (uint32_t *      dest,
                                     const uint32_t *src,
                                     int             width)
 {
-    int i;
     vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
 
@@ -493,7 +490,7 @@ vmx_combine_over_reverse_u_no_mask (uint32_t *      dest,
     COMPUTE_SHIFT_MASKS (dest, src);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 
 	LOAD_VECTORS (dest, src);
@@ -506,7 +503,7 @@ vmx_combine_over_reverse_u_no_mask (uint32_t *      dest,
 	dest += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t s = src[i];
 	uint32_t d = dest[i];
@@ -523,7 +520,6 @@ vmx_combine_over_reverse_u_mask (uint32_t *      dest,
                                  const uint32_t *mask,
                                  int             width)
 {
-    int i;
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
@@ -545,7 +541,7 @@ vmx_combine_over_reverse_u_mask (uint32_t *      dest,
     COMPUTE_SHIFT_MASKC (dest, src, mask);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 
 	LOAD_VECTORSM (dest, src, mask);
@@ -559,7 +555,7 @@ vmx_combine_over_reverse_u_mask (uint32_t *      dest,
 	mask += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t m = ALPHA_8 (mask[i]);
 	uint32_t s = src[i];
@@ -592,7 +588,6 @@ vmx_combine_in_u_no_mask (uint32_t *      dest,
                           const uint32_t *src,
                           int             width)
 {
-    int i;
     vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
 
@@ -609,7 +604,7 @@ vmx_combine_in_u_no_mask (uint32_t *      dest,
     COMPUTE_SHIFT_MASKS (dest, src);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORS (dest, src);
 
@@ -621,7 +616,7 @@ vmx_combine_in_u_no_mask (uint32_t *      dest,
 	dest += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t s = src[i];
 	uint32_t a = ALPHA_8 (dest[i]);
@@ -637,7 +632,6 @@ vmx_combine_in_u_mask (uint32_t *      dest,
                        const uint32_t *mask,
                        int             width)
 {
-    int i;
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
@@ -658,7 +652,7 @@ vmx_combine_in_u_mask (uint32_t *      dest,
     COMPUTE_SHIFT_MASKC (dest, src, mask);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORSM (dest, src, mask);
 
@@ -671,7 +665,7 @@ vmx_combine_in_u_mask (uint32_t *      dest,
 	mask += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t m = ALPHA_8 (mask[i]);
 	uint32_t s = src[i];
@@ -703,7 +697,6 @@ vmx_combine_in_reverse_u_no_mask (uint32_t *      dest,
                                   const uint32_t *src,
                                   int             width)
 {
-    int i;
     vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
 
@@ -721,7 +714,7 @@ vmx_combine_in_reverse_u_no_mask (uint32_t *      dest,
     COMPUTE_SHIFT_MASKS (dest, src);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORS (dest, src);
 
@@ -733,7 +726,7 @@ vmx_combine_in_reverse_u_no_mask (uint32_t *      dest,
 	dest += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t d = dest[i];
 	uint32_t a = ALPHA_8 (src[i]);
@@ -750,7 +743,6 @@ vmx_combine_in_reverse_u_mask (uint32_t *      dest,
                                const uint32_t *mask,
                                int             width)
 {
-    int i;
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
@@ -772,7 +764,7 @@ vmx_combine_in_reverse_u_mask (uint32_t *      dest,
     COMPUTE_SHIFT_MASKC (dest, src, mask);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORSM (dest, src, mask);
 
@@ -785,7 +777,7 @@ vmx_combine_in_reverse_u_mask (uint32_t *      dest,
 	mask += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t m = ALPHA_8 (mask[i]);
 	uint32_t d = dest[i];
@@ -818,7 +810,6 @@ vmx_combine_out_u_no_mask (uint32_t *      dest,
                            const uint32_t *src,
                            int             width)
 {
-    int i;
     vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
 
@@ -836,7 +827,7 @@ vmx_combine_out_u_no_mask (uint32_t *      dest,
     COMPUTE_SHIFT_MASKS (dest, src);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORS (dest, src);
 
@@ -848,7 +839,7 @@ vmx_combine_out_u_no_mask (uint32_t *      dest,
 	dest += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t s = src[i];
 	uint32_t a = ALPHA_8 (~dest[i]);
@@ -865,7 +856,6 @@ vmx_combine_out_u_mask (uint32_t *      dest,
                         const uint32_t *mask,
                         int             width)
 {
-    int i;
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
@@ -886,7 +876,7 @@ vmx_combine_out_u_mask (uint32_t *      dest,
     COMPUTE_SHIFT_MASKC (dest, src, mask);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORSM (dest, src, mask);
 
@@ -899,7 +889,7 @@ vmx_combine_out_u_mask (uint32_t *      dest,
 	mask += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t m = ALPHA_8 (mask[i]);
 	uint32_t s = src[i];
@@ -931,7 +921,6 @@ vmx_combine_out_reverse_u_no_mask (uint32_t *      dest,
                                    const uint32_t *src,
                                    int             width)
 {
-    int i;
     vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
 
@@ -949,7 +938,7 @@ vmx_combine_out_reverse_u_no_mask (uint32_t *      dest,
     COMPUTE_SHIFT_MASKS (dest, src);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 
 	LOAD_VECTORS (dest, src);
@@ -962,7 +951,7 @@ vmx_combine_out_reverse_u_no_mask (uint32_t *      dest,
 	dest += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t d = dest[i];
 	uint32_t a = ALPHA_8 (~src[i]);
@@ -979,7 +968,6 @@ vmx_combine_out_reverse_u_mask (uint32_t *      dest,
                                 const uint32_t *mask,
                                 int             width)
 {
-    int i;
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
@@ -1001,7 +989,7 @@ vmx_combine_out_reverse_u_mask (uint32_t *      dest,
     COMPUTE_SHIFT_MASKC (dest, src, mask);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORSM (dest, src, mask);
 
@@ -1014,7 +1002,7 @@ vmx_combine_out_reverse_u_mask (uint32_t *      dest,
 	mask += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t m = ALPHA_8 (mask[i]);
 	uint32_t d = dest[i];
@@ -1047,7 +1035,6 @@ vmx_combine_atop_u_no_mask (uint32_t *      dest,
                             const uint32_t *src,
                             int             width)
 {
-    int i;
     vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
 
@@ -1067,7 +1054,7 @@ vmx_combine_atop_u_no_mask (uint32_t *      dest,
     COMPUTE_SHIFT_MASKS (dest, src);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORS (dest, src);
 
@@ -1080,7 +1067,7 @@ vmx_combine_atop_u_no_mask (uint32_t *      dest,
 	dest += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t s = src[i];
 	uint32_t d = dest[i];
@@ -1099,7 +1086,6 @@ vmx_combine_atop_u_mask (uint32_t *      dest,
                          const uint32_t *mask,
                          int             width)
 {
-    int i;
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
@@ -1125,7 +1111,7 @@ vmx_combine_atop_u_mask (uint32_t *      dest,
     COMPUTE_SHIFT_MASKC (dest, src, mask);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORSM (dest, src, mask);
 
@@ -1139,7 +1125,7 @@ vmx_combine_atop_u_mask (uint32_t *      dest,
 	mask += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t m = ALPHA_8 (mask[i]);
 	uint32_t s = src[i];
@@ -1176,7 +1162,6 @@ vmx_combine_atop_reverse_u_no_mask (uint32_t *      dest,
                                     const uint32_t *src,
                                     int             width)
 {
-    int i;
     vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
 
@@ -1196,7 +1181,7 @@ vmx_combine_atop_reverse_u_no_mask (uint32_t *      dest,
     COMPUTE_SHIFT_MASKS (dest, src);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORS (dest, src);
 
@@ -1209,7 +1194,7 @@ vmx_combine_atop_reverse_u_no_mask (uint32_t *      dest,
 	dest += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t s = src[i];
 	uint32_t d = dest[i];
@@ -1228,7 +1213,6 @@ vmx_combine_atop_reverse_u_mask (uint32_t *      dest,
                                  const uint32_t *mask,
                                  int             width)
 {
-    int i;
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
@@ -1254,7 +1238,7 @@ vmx_combine_atop_reverse_u_mask (uint32_t *      dest,
     COMPUTE_SHIFT_MASKC (dest, src, mask);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORSM (dest, src, mask);
 
@@ -1268,7 +1252,7 @@ vmx_combine_atop_reverse_u_mask (uint32_t *      dest,
 	mask += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t m = ALPHA_8 (mask[i]);
 	uint32_t s = src[i];
@@ -1305,7 +1289,6 @@ vmx_combine_xor_u_no_mask (uint32_t *      dest,
                            const uint32_t *src,
                            int             width)
 {
-    int i;
     vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
 
@@ -1325,7 +1308,7 @@ vmx_combine_xor_u_no_mask (uint32_t *      dest,
     COMPUTE_SHIFT_MASKS (dest, src);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORS (dest, src);
 
@@ -1338,7 +1321,7 @@ vmx_combine_xor_u_no_mask (uint32_t *      dest,
 	dest += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t s = src[i];
 	uint32_t d = dest[i];
@@ -1357,7 +1340,6 @@ vmx_combine_xor_u_mask (uint32_t *      dest,
                         const uint32_t *mask,
                         int             width)
 {
-    int i;
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
@@ -1383,7 +1365,7 @@ vmx_combine_xor_u_mask (uint32_t *      dest,
     COMPUTE_SHIFT_MASKC (dest, src, mask);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORSM (dest, src, mask);
 
@@ -1397,7 +1379,7 @@ vmx_combine_xor_u_mask (uint32_t *      dest,
 	mask += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t m = ALPHA_8 (mask[i]);
 	uint32_t s = src[i];
@@ -1434,7 +1416,6 @@ vmx_combine_add_u_no_mask (uint32_t *      dest,
                            const uint32_t *src,
                            int             width)
 {
-    int i;
     vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
 
@@ -1451,7 +1432,7 @@ vmx_combine_add_u_no_mask (uint32_t *      dest,
 
     COMPUTE_SHIFT_MASKS (dest, src);
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORS (dest, src);
 
@@ -1463,7 +1444,7 @@ vmx_combine_add_u_no_mask (uint32_t *      dest,
 	dest += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t s = src[i];
 	uint32_t d = dest[i];
@@ -1480,7 +1461,6 @@ vmx_combine_add_u_mask (uint32_t *      dest,
                         const uint32_t *mask,
                         int             width)
 {
-    int i;
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
@@ -1501,7 +1481,7 @@ vmx_combine_add_u_mask (uint32_t *      dest,
     COMPUTE_SHIFT_MASKC (dest, src, mask);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORSM (dest, src, mask);
 
@@ -1514,7 +1494,7 @@ vmx_combine_add_u_mask (uint32_t *      dest,
 	mask += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t m = ALPHA_8 (mask[i]);
 	uint32_t s = src[i];
@@ -1549,7 +1529,6 @@ vmx_combine_src_ca (pixman_implementation_t *imp,
                     const uint32_t *         mask,
                     int                      width)
 {
-    int i;
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
@@ -1568,7 +1547,7 @@ vmx_combine_src_ca (pixman_implementation_t *imp,
     COMPUTE_SHIFT_MASKC (dest, src, mask);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORSC (dest, src, mask);
 
@@ -1581,7 +1560,7 @@ vmx_combine_src_ca (pixman_implementation_t *imp,
 	dest += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t a = mask[i];
 	uint32_t s = src[i];
@@ -1600,7 +1579,6 @@ vmx_combine_over_ca (pixman_implementation_t *imp,
                      const uint32_t *         mask,
                      int                      width)
 {
-    int i;
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
@@ -1623,7 +1601,7 @@ vmx_combine_over_ca (pixman_implementation_t *imp,
     COMPUTE_SHIFT_MASKC (dest, src, mask);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORSC (dest, src, mask);
 
@@ -1636,7 +1614,7 @@ vmx_combine_over_ca (pixman_implementation_t *imp,
 	dest += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t a = mask[i];
 	uint32_t s = src[i];
@@ -1659,7 +1637,6 @@ vmx_combine_over_reverse_ca (pixman_implementation_t *imp,
                              const uint32_t *         mask,
                              int                      width)
 {
-    int i;
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
@@ -1681,7 +1658,7 @@ vmx_combine_over_reverse_ca (pixman_implementation_t *imp,
     COMPUTE_SHIFT_MASKC (dest, src, mask);
 
     /* printf("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORSC (dest, src, mask);
 
@@ -1694,7 +1671,7 @@ vmx_combine_over_reverse_ca (pixman_implementation_t *imp,
 	dest += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t a = mask[i];
 	uint32_t s = src[i];
@@ -1716,7 +1693,6 @@ vmx_combine_in_ca (pixman_implementation_t *imp,
                    const uint32_t *         mask,
                    int                      width)
 {
-    int i;
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
@@ -1737,7 +1713,7 @@ vmx_combine_in_ca (pixman_implementation_t *imp,
     COMPUTE_SHIFT_MASKC (dest, src, mask);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORSC (dest, src, mask);
 
@@ -1750,7 +1726,7 @@ vmx_combine_in_ca (pixman_implementation_t *imp,
 	mask += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t a = mask[i];
 	uint32_t s = src[i];
@@ -1771,7 +1747,6 @@ vmx_combine_in_reverse_ca (pixman_implementation_t *imp,
                            const uint32_t *         mask,
                            int                      width)
 {
-    int i;
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
@@ -1792,7 +1767,7 @@ vmx_combine_in_reverse_ca (pixman_implementation_t *imp,
     COMPUTE_SHIFT_MASKC (dest, src, mask);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 
 	LOAD_VECTORSC (dest, src, mask);
@@ -1806,7 +1781,7 @@ vmx_combine_in_reverse_ca (pixman_implementation_t *imp,
 	mask += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t a = mask[i];
 	uint32_t d = dest[i];
@@ -1827,7 +1802,6 @@ vmx_combine_out_ca (pixman_implementation_t *imp,
                     const uint32_t *         mask,
                     int                      width)
 {
-    int i;
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
@@ -1849,7 +1823,7 @@ vmx_combine_out_ca (pixman_implementation_t *imp,
     COMPUTE_SHIFT_MASKC (dest, src, mask);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORSC (dest, src, mask);
 
@@ -1863,7 +1837,7 @@ vmx_combine_out_ca (pixman_implementation_t *imp,
 	mask += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t a = mask[i];
 	uint32_t s = src[i];
@@ -1885,7 +1859,6 @@ vmx_combine_out_reverse_ca (pixman_implementation_t *imp,
                             const uint32_t *         mask,
                             int                      width)
 {
-    int i;
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
@@ -1907,7 +1880,7 @@ vmx_combine_out_reverse_ca (pixman_implementation_t *imp,
     COMPUTE_SHIFT_MASKC (dest, src, mask);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORSC (dest, src, mask);
 
@@ -1921,7 +1894,7 @@ vmx_combine_out_reverse_ca (pixman_implementation_t *imp,
 	mask += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t a = mask[i];
 	uint32_t s = src[i];
@@ -1943,7 +1916,6 @@ vmx_combine_atop_ca (pixman_implementation_t *imp,
                      const uint32_t *         mask,
                      int                      width)
 {
-    int i;
     vector unsigned char vdest, vsrc, vmask, vsrca;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
@@ -1967,7 +1939,7 @@ vmx_combine_atop_ca (pixman_implementation_t *imp,
     COMPUTE_SHIFT_MASKC (dest, src, mask);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORSC (dest, src, mask);
 
@@ -1986,7 +1958,7 @@ vmx_combine_atop_ca (pixman_implementation_t *imp,
 	mask += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t a = mask[i];
 	uint32_t s = src[i];
@@ -2010,7 +1982,6 @@ vmx_combine_atop_reverse_ca (pixman_implementation_t *imp,
                              const uint32_t *         mask,
                              int                      width)
 {
-    int i;
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
@@ -2034,7 +2005,7 @@ vmx_combine_atop_reverse_ca (pixman_implementation_t *imp,
     COMPUTE_SHIFT_MASKC (dest, src, mask);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORSC (dest, src, mask);
 
@@ -2050,7 +2021,7 @@ vmx_combine_atop_reverse_ca (pixman_implementation_t *imp,
 	mask += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t a = mask[i];
 	uint32_t s = src[i];
@@ -2074,7 +2045,6 @@ vmx_combine_xor_ca (pixman_implementation_t *imp,
                     const uint32_t *         mask,
                     int                      width)
 {
-    int i;
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
@@ -2098,7 +2068,7 @@ vmx_combine_xor_ca (pixman_implementation_t *imp,
     COMPUTE_SHIFT_MASKC (dest, src, mask);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORSC (dest, src, mask);
 
@@ -2114,7 +2084,7 @@ vmx_combine_xor_ca (pixman_implementation_t *imp,
 	mask += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t a = mask[i];
 	uint32_t s = src[i];
@@ -2138,7 +2108,6 @@ vmx_combine_add_ca (pixman_implementation_t *imp,
                     const uint32_t *         mask,
                     int                      width)
 {
-    int i;
     vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
@@ -2159,7 +2128,7 @@ vmx_combine_add_ca (pixman_implementation_t *imp,
     COMPUTE_SHIFT_MASKC (dest, src, mask);
 
     /* printf ("%s\n",__PRETTY_FUNCTION__); */
-    for (i = width / 4; i > 0; i--)
+    for (int i = width / 4; i > 0; i--)
     {
 	LOAD_VECTORSC (dest, src, mask);
 
@@ -2172,7 +2141,7 @@ vmx_combine_add_ca (pixman_implementation_t *imp,
 	mask += 4;
     }
 
-    for (i = width % 4; --i >= 0;)
+    for (int i = width % 4; --i >= 0;)
     {
 	uint32_t a = mask[i];
 	uint32_t s = src[i];
@@ -2497,7 +2466,7 @@ vmx_composite_over_n_8888 (pixman_implementation_t *imp,
     PIXMAN_COMPOSITE_ARGS (info);
     uint32_t *dst_line, *dst;
     uint32_t src, ia;
-    int      i, w, dst_stride;
+    int      w, dst_stride;
 
     vector unsigned char vdst, vsrc, via;
 
@@ -2527,14 +2496,14 @@ vmx_composite_over_n_8888 (pixman_implementation_t *imp,
 	    w--;
 	}
 
-	for (i = w / 4; i > 0; i--)
+	for (int i = w / 4; i > 0; i--)
 	{
 	    vdst = pix_multiply (load_128_aligned (dst), via);
 	    save_128_aligned (dst, pix_add (vsrc, vdst));
 	    dst += 4;
 	}
 
-	for (i = w % 4; --i >= 0;)
+	for (int i = w % 4; --i >= 0;)
 	{
 	    uint32_t d = dst[i];
 	    UN8x4_MUL_UN8_ADD_UN8x4 (d, ia, src);
commit b1aa943286c2dba9ce44b9aab60dd2f8dbc3d8a9
Author: Matt Turner <mattst88 at gmail.com>
Date:   Thu Dec 12 17:16:24 2024 -0500

    vmx: Optimize pix_multiply()
    
    Uses the same algorithm, but splits the elements into even-odd groupings
    rather than hi-lo. This allows us to do the initial unpack via the
    `vec_mul{e,o}` which multiplies 8-bit values and produces 16-bit values.
    
    Replaces
            2x `vec_mergeh`
            2x `vec_mergel`
            2x `vec_mladd`
    with
            1x `vec_mule`
            1x `vec_mulo`
            2x `vec_adds`
    
    Slightly improves performance. On a G4, before and after:
    
    over_8888_8888 =  L1: 281.39  L2: 243.76  M: 50.19 ( 75.64%)  HT: 37.40  VT: 33.08  R: 31.01  RT: 15.47 ( 172Kops/s)
    over_8888_8888 =  L1: 359.85  L2: 266.58  M: 50.56 ( 76.10%)  HT: 37.87  VT: 33.60  R: 31.17  RT: 15.54 ( 172Kops/s)

diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index b1d1a9b..30f4f19 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -107,36 +107,27 @@ unpack_128_2x128 (vector unsigned char  data1,
 }
 
 static force_inline vector unsigned char
-pix_multiply (vector unsigned char p, vector unsigned char a)
+pix_multiply (vector unsigned char a, vector unsigned char b)
 {
     const vector unsigned char sel = (vector unsigned char){
 #ifdef WORDS_BIGENDIAN
-	0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
-	0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
+	0x00, 0x10, 0x02, 0x12, 0x04, 0x14, 0x06, 0x16,
+	0x08, 0x18, 0x0a, 0x1a, 0x0c, 0x1c, 0x0e, 0x1e,
 #else
-	0x01, 0x03, 0x05, 0x07, 0x09, 0x0b, 0x0d, 0x1f,
-	0x11, 0x13, 0x15, 0x17, 0x19, 0x1b, 0x1d, 0x1f,
+	0x01, 0x11, 0x03, 0x13, 0x05, 0x15, 0x07, 0x17,
+	0x09, 0x19, 0x0b, 0x1b, 0x0d, 0x1d, 0x0f, 0x1f,
 #endif
     };
-    vector unsigned short hi, lo, mod;
+    vector unsigned short e = vec_mule (a, b);
+    vector unsigned short o = vec_mulo (a, b);
 
-    /* unpack to short */
-    hi = (vector unsigned short) unpackhi_128_16x8(p, vzero);
-    mod = (vector unsigned short) unpackhi_128_16x8(a, vzero);
+    e = vec_adds (e, create_mask_16_128 (128));
+    o = vec_adds (o, create_mask_16_128 (128));
 
-    hi = vec_mladd (hi, mod, create_mask_16_128(128));
+    e = vec_adds (e, vec_sr (e, vec_splat_u16 (8)));
+    o = vec_adds (o, vec_sr (o, vec_splat_u16 (8)));
 
-    hi = vec_adds (hi, vec_sr (hi, vec_splat_u16 (8)));
-
-    /* unpack to short */
-    lo = (vector unsigned short) unpacklo_128_16x8(p, vzero);
-    mod = (vector unsigned short) unpacklo_128_16x8(a, vzero);
-
-    lo = vec_mladd (lo, mod, create_mask_16_128(128));
-
-    lo = vec_adds (lo, vec_sr (lo, vec_splat_u16 (8)));
-
-    return (vector unsigned char)vec_perm (hi, lo, sel);
+    return (vector unsigned char)vec_perm (e, o, sel);
 }
 
 static force_inline vector unsigned char
commit 2cf5da031be6edb4724aecd6cac2f7752311d110
Author: Matt Turner <mattst88 at gmail.com>
Date:   Thu Dec 12 17:16:23 2024 -0500

    vmx: Avoid two shifts in pix_multiply() function
    
    By using `vec_perm` to select the high-bytes of each 16-bit value in
    hi/lo, we can save two `vec_sr` instructions.
    
    Slightly improves performance. On a G4, before and after:
    
    over_8888_8888 =  L1: 257.02  L2: 228.77  M: 49.88 ( 75.29%)  HT: 37.02  VT: 32.95  R: 30.79  RT: 15.36 ( 171Kops/s)
    over_8888_8888 =  L1: 281.39  L2: 243.76  M: 50.19 ( 75.64%)  HT: 37.40  VT: 33.08  R: 31.01  RT: 15.47 ( 172Kops/s)

diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index 694dbf0..b1d1a9b 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -109,6 +109,15 @@ unpack_128_2x128 (vector unsigned char  data1,
 static force_inline vector unsigned char
 pix_multiply (vector unsigned char p, vector unsigned char a)
 {
+    const vector unsigned char sel = (vector unsigned char){
+#ifdef WORDS_BIGENDIAN
+	0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
+	0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
+#else
+	0x01, 0x03, 0x05, 0x07, 0x09, 0x0b, 0x0d, 0x1f,
+	0x11, 0x13, 0x15, 0x17, 0x19, 0x1b, 0x1d, 0x1f,
+#endif
+    };
     vector unsigned short hi, lo, mod;
 
     /* unpack to short */
@@ -119,8 +128,6 @@ pix_multiply (vector unsigned char p, vector unsigned char a)
 
     hi = vec_adds (hi, vec_sr (hi, vec_splat_u16 (8)));
 
-    hi = vec_sr (hi, vec_splat_u16 (8));
-
     /* unpack to short */
     lo = (vector unsigned short) unpacklo_128_16x8(p, vzero);
     mod = (vector unsigned short) unpacklo_128_16x8(a, vzero);
@@ -129,9 +136,7 @@ pix_multiply (vector unsigned char p, vector unsigned char a)
 
     lo = vec_adds (lo, vec_sr (lo, vec_splat_u16 (8)));
 
-    lo = vec_sr (lo, vec_splat_u16 (8));
-
-    return vec_packsu (hi, lo);
+    return (vector unsigned char)vec_perm (hi, lo, sel);
 }
 
 static force_inline vector unsigned char
commit 10be4bf9d252b0d98dd296c0994469e85caee698
Author: Matt Turner <mattst88 at gmail.com>
Date:   Thu Dec 12 17:16:22 2024 -0500

    vmx: Move create_mask_32_128() function

diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index e1df61f..694dbf0 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -70,6 +70,12 @@ create_mask_16_128 (uint32_t mask)
 				   mask, mask, mask, mask};
 }
 
+static force_inline vector unsigned int
+create_mask_32_128 (uint32_t mask)
+{
+    return (vector unsigned int){mask, mask, mask, mask};
+}
+
 static force_inline vector unsigned char
 unpacklo_128_16x8 (vector unsigned char data1, vector unsigned char data2)
 {
@@ -90,7 +96,6 @@ unpackhi_128_16x8 (vector unsigned char data1, vector unsigned char data2)
 #endif
 }
 
-
 static force_inline void
 unpack_128_2x128 (vector unsigned char  data1,
 		  vector unsigned char  data2,
@@ -278,12 +283,6 @@ save_128_aligned (uint32_t* data,
     STORE_VECTOR(data)
 }
 
-static force_inline vector unsigned int
-create_mask_32_128 (uint32_t mask)
-{
-    return (vector unsigned int){mask, mask, mask, mask};
-}
-
 static force_inline int
 is_opaque (vector unsigned char x)
 {
commit 54ef93f17cc8e76e298c313afde937e6d0d7bea1
Author: Matt Turner <mattst88 at gmail.com>
Date:   Thu Dec 12 17:16:20 2024 -0500

    vmx: Simplify unpack{hi,lo}_128_16x8() function

diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index 9a9d155..e1df61f 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -73,35 +73,24 @@ create_mask_16_128 (uint32_t mask)
 static force_inline vector unsigned char
 unpacklo_128_16x8 (vector unsigned char data1, vector unsigned char data2)
 {
-    vector unsigned char lo;
-
-    /* unpack to short */
-    lo =
 #ifdef WORDS_BIGENDIAN
-	vec_mergel (data2, data1);
+    return vec_mergel (data2, data1);
 #else
-	vec_mergel (data1, data2);
+    return vec_mergel (data1, data2);
 #endif
-
-    return lo;
 }
 
 static force_inline vector unsigned char
 unpackhi_128_16x8 (vector unsigned char data1, vector unsigned char data2)
 {
-    vector unsigned char hi;
-
-    /* unpack to short */
-    hi =
 #ifdef WORDS_BIGENDIAN
-	vec_mergeh (data2, data1);
+    return vec_mergeh (data2, data1);
 #else
-	vec_mergeh (data1, data2);
+    return vec_mergeh (data1, data2);
 #endif
-
-    return hi;
 }
 
+
 static force_inline void
 unpack_128_2x128 (vector unsigned char  data1,
 		  vector unsigned char  data2,
commit 554f8fe494082607bc98b48876e24c2206f23f15
Author: Matt Turner <mattst88 at gmail.com>
Date:   Thu Dec 12 17:16:19 2024 -0500

    vmx: Make in_over() a real function

diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index 45ff8a4..9a9d155 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -176,9 +176,14 @@ over (vector unsigned char src,
 }
 
 /* in == pix_multiply */
-#define in_over(src, srca, mask, dest)					\
-    over (pix_multiply (src, mask),					\
-          pix_multiply (srca, mask), dest)
+static force_inline vector unsigned char
+in_over (vector unsigned char src,
+	 vector unsigned char srca,
+	 vector unsigned char mask,
+	 vector unsigned char dest)
+{
+    return over (pix_multiply (src, mask), pix_multiply (srca, mask), dest);
+}
 
 #ifdef WORDS_BIGENDIAN
 
commit 2d8060a3bc54ac68adbff1965aede3784ac61e5a
Author: Matt Turner <mattst88 at gmail.com>
Date:   Thu Dec 12 17:16:17 2024 -0500

    vmx: Simplify over() function
    
    Now that we're using the correct vector types, this function is one
    line.

diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index 897c60f..45ff8a4 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -172,11 +172,7 @@ over (vector unsigned char src,
       vector unsigned char srca,
       vector unsigned char dest)
 {
-    vector unsigned char tmp =
-	pix_multiply (dest, negate (srca));
-
-    tmp = vec_adds (src, tmp);
-    return tmp;
+    return vec_adds (src, pix_multiply (dest, negate (srca)));
 }
 
 /* in == pix_multiply */
commit 6d50860a8667024b9f4b216fca9f355360efe70a
Author: Matt Turner <mattst88 at gmail.com>
Date:   Thu Dec 12 17:16:16 2024 -0500

    vmx: Add and use `vzero` constant
    
    A lot nicer to read than `(vector unsigned char) AVV (0)`.

diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index f876715..897c60f 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -33,8 +33,7 @@
 #include "pixman-inlines.h"
 #include <altivec.h>
 
-#define AVV(x...) {x}
-
+static const vector unsigned char vzero = (const vector unsigned char){0};
 static vector unsigned char mask_ff000000;
 
 static force_inline vector unsigned char
@@ -119,10 +118,8 @@ pix_multiply (vector unsigned char p, vector unsigned char a)
     vector unsigned short hi, lo, mod;
 
     /* unpack to short */
-    hi = (vector unsigned short)
-	unpackhi_128_16x8(p, (vector unsigned char) AVV (0));
-    mod = (vector unsigned short)
-	unpackhi_128_16x8(a, (vector unsigned char) AVV (0));
+    hi = (vector unsigned short) unpackhi_128_16x8(p, vzero);
+    mod = (vector unsigned short) unpackhi_128_16x8(a, vzero);
 
     hi = vec_mladd (hi, mod, create_mask_16_128(128));
 
@@ -131,10 +128,8 @@ pix_multiply (vector unsigned char p, vector unsigned char a)
     hi = vec_sr (hi, vec_splat_u16 (8));
 
     /* unpack to short */
-    lo = (vector unsigned short)
-	unpacklo_128_16x8(p, (vector unsigned char) AVV (0));
-    mod = (vector unsigned short)
-	unpacklo_128_16x8(a, (vector unsigned char) AVV (0));
+    lo = (vector unsigned short) unpacklo_128_16x8(p, vzero);
+    mod = (vector unsigned short) unpacklo_128_16x8(a, vzero);
 
     lo = vec_mladd (lo, mod, create_mask_16_128(128));
 
@@ -308,13 +303,13 @@ is_opaque (vector unsigned char x)
 static force_inline int
 is_zero (vector unsigned char x)
 {
-    return vec_all_eq (x, (vector unsigned char) AVV (0));
+    return vec_all_eq (x, vzero);
 }
 
 static force_inline int
 is_transparent (vector unsigned char x)
 {
-    return vec_all_eq (vec_and (x, mask_ff000000), (vector unsigned char) AVV (0));
+    return vec_all_eq (vec_and (x, mask_ff000000), vzero);
 }
 
 static force_inline uint32_t
@@ -357,7 +352,7 @@ combine4 (const uint32_t* ps, const uint32_t* pm)
 	msk = load_128_unaligned(pm);
 
 	if (is_transparent(msk))
-	    return (vector unsigned char)AVV (0);
+	    return vzero;
     }
 
     src = load_128_unaligned(ps);
@@ -2650,7 +2645,7 @@ vmx_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
 	    /* pm is NOT necessarily 16-byte aligned */
 	    vmask = load_128_unaligned (pm);
 
-	    pack_cmp = vec_all_eq(vmask, (vector unsigned char) AVV(0));
+	    pack_cmp = vec_all_eq(vmask, vzero);
 
 	    /* if all bits in mask are zero, pack_cmp is not 0 */
 	    if (pack_cmp == 0)
@@ -2952,9 +2947,9 @@ vmx_fetch_a8 (pixman_iter_t *iter, const uint32_t *mask)
     {
 	vmx0 = load_128_unaligned((uint32_t *) src);
 
-	unpack_128_2x128((vector unsigned char) AVV(0), vmx0, &vmx1, &vmx2);
-	unpack_128_2x128((vector unsigned char) AVV(0), vmx1, &vmx3, &vmx4);
-	unpack_128_2x128((vector unsigned char) AVV(0), vmx2, &vmx5, &vmx6);
+	unpack_128_2x128(vzero, vmx0, &vmx1, &vmx2);
+	unpack_128_2x128(vzero, vmx1, &vmx3, &vmx4);
+	unpack_128_2x128(vzero, vmx2, &vmx5, &vmx6);
 
 	save_128_aligned(dst, vmx6);
 	save_128_aligned((dst +  4), vmx5);
commit 53f04100e8299edfdf194c08739188e91d6e63c2
Author: Matt Turner <mattst88 at gmail.com>
Date:   Thu Dec 12 17:16:26 2024 -0500

    vmx: Use selector variables
    
    In the future if we enable clang-formatting, this will avoid problems
    with how it wants to format function arguments.
    
    Also removes some usage of the AVV macro.

diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index 0c86e53..f876715 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -40,26 +40,28 @@ static vector unsigned char mask_ff000000;
 static force_inline vector unsigned char
 splat_alpha (vector unsigned char pix)
 {
+    const vector unsigned char sel = (vector unsigned char){
 #ifdef WORDS_BIGENDIAN
-    return vec_perm (pix, pix,
-		     (vector unsigned char)AVV (
-			 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04,
-			 0x08, 0x08, 0x08, 0x08, 0x0C, 0x0C, 0x0C, 0x0C));
+	0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04,
+	0x08, 0x08, 0x08, 0x08, 0x0C, 0x0C, 0x0C, 0x0C,
 #else
-    return vec_perm (pix, pix,
-		     (vector unsigned char)AVV (
-			 0x03, 0x03, 0x03, 0x03, 0x07, 0x07, 0x07, 0x07,
-			 0x0B, 0x0B, 0x0B, 0x0B, 0x0F, 0x0F, 0x0F, 0x0F));
+	0x03, 0x03, 0x03, 0x03, 0x07, 0x07, 0x07, 0x07,
+	0x0B, 0x0B, 0x0B, 0x0B, 0x0F, 0x0F, 0x0F, 0x0F,
 #endif
+    };
+
+    return vec_perm (pix, pix, sel);
 }
 
 static force_inline vector unsigned char
 splat_pixel (vector unsigned char pix)
 {
-    return vec_perm (pix, pix,
-		     (vector unsigned char)AVV (
-			 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01,
-			 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03));
+    const vector unsigned char sel = (vector unsigned char){
+	0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01,
+	0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03,
+    };
+
+    return vec_perm (pix, pix, sel);
 }
 
 static force_inline vector unsigned short
commit ca9e9faa9d94584b6cdb90cff42571d01d947fe5
Author: Matt Turner <mattst88 at gmail.com>
Date:   Thu Dec 12 17:16:15 2024 -0500

    vmx: Add and use create_mask_16_128() function

diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index bc902a8..0c86e53 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -62,6 +62,13 @@ splat_pixel (vector unsigned char pix)
 			 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03));
 }
 
+static force_inline vector unsigned short
+create_mask_16_128 (uint32_t mask)
+{
+    return (vector unsigned short){mask, mask, mask, mask,
+				   mask, mask, mask, mask};
+}
+
 static force_inline vector unsigned char
 unpacklo_128_16x8 (vector unsigned char data1, vector unsigned char data2)
 {
@@ -115,9 +122,7 @@ pix_multiply (vector unsigned char p, vector unsigned char a)
     mod = (vector unsigned short)
 	unpackhi_128_16x8(a, (vector unsigned char) AVV (0));
 
-    hi = vec_mladd (hi, mod, (vector unsigned short)
-                    AVV (0x0080, 0x0080, 0x0080, 0x0080,
-                         0x0080, 0x0080, 0x0080, 0x0080));
+    hi = vec_mladd (hi, mod, create_mask_16_128(128));
 
     hi = vec_adds (hi, vec_sr (hi, vec_splat_u16 (8)));
 
@@ -129,9 +134,7 @@ pix_multiply (vector unsigned char p, vector unsigned char a)
     mod = (vector unsigned short)
 	unpacklo_128_16x8(a, (vector unsigned char) AVV (0));
 
-    lo = vec_mladd (lo, mod, (vector unsigned short)
-                    AVV (0x0080, 0x0080, 0x0080, 0x0080,
-                         0x0080, 0x0080, 0x0080, 0x0080));
+    lo = vec_mladd (lo, mod, create_mask_16_128(128));
 
     lo = vec_adds (lo, vec_sr (lo, vec_splat_u16 (8)));
 
commit c44e0d5f87801c9a26ae4855d493dfc8a3144d71
Author: Matt Turner <mattst88 at gmail.com>
Date:   Thu Dec 12 17:16:13 2024 -0500

    vmx: Use appropriate types
    
    I found working in this code very confusing with the number of casts. If
    we just use the appropriate vector type, we simplify the code greatly.
    
    Somewhat unexpectedly, this also reduces the instruction counts in some
    functions.

diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index eb43874..bc902a8 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -35,10 +35,10 @@
 
 #define AVV(x...) {x}
 
-static vector unsigned int mask_ff000000;
+static vector unsigned char mask_ff000000;
 
-static force_inline vector unsigned int
-splat_alpha (vector unsigned int pix)
+static force_inline vector unsigned char
+splat_alpha (vector unsigned char pix)
 {
 #ifdef WORDS_BIGENDIAN
     return vec_perm (pix, pix,
@@ -53,8 +53,8 @@ splat_alpha (vector unsigned int pix)
 #endif
 }
 
-static force_inline vector unsigned int
-splat_pixel (vector unsigned int pix)
+static force_inline vector unsigned char
+splat_pixel (vector unsigned char pix)
 {
     return vec_perm (pix, pix,
 		     (vector unsigned char)AVV (
@@ -62,62 +62,58 @@ splat_pixel (vector unsigned int pix)
 			 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03));
 }
 
-static force_inline vector unsigned int
-unpacklo_128_16x8 (vector unsigned int data1, vector unsigned int data2)
+static force_inline vector unsigned char
+unpacklo_128_16x8 (vector unsigned char data1, vector unsigned char data2)
 {
     vector unsigned char lo;
 
     /* unpack to short */
-    lo = (vector unsigned char)
+    lo =
 #ifdef WORDS_BIGENDIAN
-	vec_mergel ((vector unsigned char) data2,
-		    (vector unsigned char) data1);
+	vec_mergel (data2, data1);
 #else
-	vec_mergel ((vector unsigned char) data1,
-		    (vector unsigned char) data2);
+	vec_mergel (data1, data2);
 #endif
 
-    return (vector unsigned int) lo;
+    return lo;
 }
 
-static force_inline vector unsigned int
-unpackhi_128_16x8 (vector unsigned int data1, vector unsigned int data2)
+static force_inline vector unsigned char
+unpackhi_128_16x8 (vector unsigned char data1, vector unsigned char data2)
 {
     vector unsigned char hi;
 
     /* unpack to short */
-    hi = (vector unsigned char)
+    hi =
 #ifdef WORDS_BIGENDIAN
-	vec_mergeh ((vector unsigned char) data2,
-		    (vector unsigned char) data1);
+	vec_mergeh (data2, data1);
 #else
-	vec_mergeh ((vector unsigned char) data1,
-		    (vector unsigned char) data2);
+	vec_mergeh (data1, data2);
 #endif
 
-    return (vector unsigned int) hi;
+    return hi;
 }
 
 static force_inline void
-unpack_128_2x128 (vector unsigned int  data1,
-		  vector unsigned int  data2,
-		  vector unsigned int *data_lo,
-		  vector unsigned int *data_hi)
+unpack_128_2x128 (vector unsigned char  data1,
+		  vector unsigned char  data2,
+		  vector unsigned char *data_lo,
+		  vector unsigned char *data_hi)
 {
     *data_lo = unpacklo_128_16x8 (data1, data2);
     *data_hi = unpackhi_128_16x8 (data1, data2);
 }
 
-static force_inline vector unsigned int
-pix_multiply (vector unsigned int p, vector unsigned int a)
+static force_inline vector unsigned char
+pix_multiply (vector unsigned char p, vector unsigned char a)
 {
     vector unsigned short hi, lo, mod;
 
     /* unpack to short */
     hi = (vector unsigned short)
-	unpackhi_128_16x8(p, (vector unsigned int) AVV (0));
+	unpackhi_128_16x8(p, (vector unsigned char) AVV (0));
     mod = (vector unsigned short)
-	unpackhi_128_16x8(a, (vector unsigned int) AVV (0));
+	unpackhi_128_16x8(a, (vector unsigned char) AVV (0));
 
     hi = vec_mladd (hi, mod, (vector unsigned short)
                     AVV (0x0080, 0x0080, 0x0080, 0x0080,
@@ -129,9 +125,9 @@ pix_multiply (vector unsigned int p, vector unsigned int a)
 
     /* unpack to short */
     lo = (vector unsigned short)
-	unpacklo_128_16x8(p, (vector unsigned int) AVV (0));
+	unpacklo_128_16x8(p, (vector unsigned char) AVV (0));
     mod = (vector unsigned short)
-	unpacklo_128_16x8(a, (vector unsigned int) AVV (0));
+	unpacklo_128_16x8(a, (vector unsigned char) AVV (0));
 
     lo = vec_mladd (lo, mod, (vector unsigned short)
                     AVV (0x0080, 0x0080, 0x0080, 0x0080,
@@ -141,23 +137,22 @@ pix_multiply (vector unsigned int p, vector unsigned int a)
 
     lo = vec_sr (lo, vec_splat_u16 (8));
 
-    return (vector unsigned int)vec_packsu (hi, lo);
+    return vec_packsu (hi, lo);
 }
 
-static force_inline vector unsigned int
-pix_add (vector unsigned int a, vector unsigned int b)
+static force_inline vector unsigned char
+pix_add (vector unsigned char a, vector unsigned char b)
 {
-    return (vector unsigned int)vec_adds ((vector unsigned char)a,
-                                          (vector unsigned char)b);
+    return vec_adds (a, b);
 }
 
-static force_inline vector unsigned int
-pix_add_mul (vector unsigned int x,
-             vector unsigned int a,
-             vector unsigned int y,
-             vector unsigned int b)
+static force_inline vector unsigned char
+pix_add_mul (vector unsigned char x,
+	     vector unsigned char a,
+	     vector unsigned char y,
+	     vector unsigned char b)
 {
-    vector unsigned int t1, t2;
+    vector unsigned char t1, t2;
 
     t1 = pix_multiply (x, a);
     t2 = pix_multiply (y, b);
@@ -165,23 +160,23 @@ pix_add_mul (vector unsigned int x,
     return pix_add (t1, t2);
 }
 
-static force_inline vector unsigned int
-negate (vector unsigned int src)
+static force_inline vector unsigned char
+negate (vector unsigned char src)
 {
     return vec_nor (src, src);
 }
 
 /* dest*~srca + src */
-static force_inline vector unsigned int
-over (vector unsigned int src,
-      vector unsigned int srca,
-      vector unsigned int dest)
+static force_inline vector unsigned char
+over (vector unsigned char src,
+      vector unsigned char srca,
+      vector unsigned char dest)
 {
-    vector unsigned char tmp = (vector unsigned char)
+    vector unsigned char tmp =
 	pix_multiply (dest, negate (srca));
 
-    tmp = vec_adds ((vector unsigned char)src, tmp);
-    return (vector unsigned int)tmp;
+    tmp = vec_adds (src, tmp);
+    return tmp;
 }
 
 /* in == pix_multiply */
@@ -266,17 +261,17 @@ do							  \
     vec_st ((vector unsigned int) v ## dest, 0, dest);
 
 /* load 4 pixels from a 16-byte boundary aligned address */
-static force_inline vector unsigned int
+static force_inline vector unsigned char
 load_128_aligned (const uint32_t* src)
 {
-    return *((vector unsigned int *) src);
+    return *((vector unsigned char *) src);
 }
 
 /* load 4 pixels from a unaligned address */
-static force_inline vector unsigned int
+static force_inline vector unsigned char
 load_128_unaligned (const uint32_t* src)
 {
-    vector unsigned int vsrc;
+    vector unsigned char vsrc;
     DECLARE_SRC_MASK_VAR;
 
     COMPUTE_SHIFT_MASK (src);
@@ -288,7 +283,7 @@ load_128_unaligned (const uint32_t* src)
 /* save 4 pixels on a 16-byte boundary aligned address */
 static force_inline void
 save_128_aligned (uint32_t* data,
-		  vector unsigned int vdata)
+		  vector unsigned char vdata)
 {
     STORE_VECTOR(data)
 }
@@ -296,25 +291,25 @@ save_128_aligned (uint32_t* data,
 static force_inline vector unsigned int
 create_mask_32_128 (uint32_t mask)
 {
-    return (vector unsigned int) {mask, mask, mask, mask};
+    return (vector unsigned int){mask, mask, mask, mask};
 }
 
 static force_inline int
-is_opaque (vector unsigned int x)
+is_opaque (vector unsigned char x)
 {
     return vec_all_eq (vec_and (x, mask_ff000000), mask_ff000000);
 }
 
 static force_inline int
-is_zero (vector unsigned int x)
+is_zero (vector unsigned char x)
 {
-    return vec_all_eq (x, (vector unsigned int) AVV (0));
+    return vec_all_eq (x, (vector unsigned char) AVV (0));
 }
 
 static force_inline int
-is_transparent (vector unsigned int x)
+is_transparent (vector unsigned char x)
 {
-    return vec_all_eq (vec_and (x, mask_ff000000), (vector unsigned int) AVV (0));
+    return vec_all_eq (vec_and (x, mask_ff000000), (vector unsigned char) AVV (0));
 }
 
 static force_inline uint32_t
@@ -347,17 +342,17 @@ combine1 (const uint32_t *ps, const uint32_t *pm)
     return s;
 }
 
-static force_inline vector unsigned int
+static force_inline vector unsigned char
 combine4 (const uint32_t* ps, const uint32_t* pm)
 {
-    vector unsigned int src, msk;
+    vector unsigned char src, msk;
 
     if (pm)
     {
 	msk = load_128_unaligned(pm);
 
 	if (is_transparent(msk))
-	    return (vector unsigned int) AVV(0);
+	    return (vector unsigned char)AVV (0);
     }
 
     src = load_128_unaligned(ps);
@@ -374,7 +369,7 @@ vmx_combine_over_u_no_mask (uint32_t *      dest,
                             int             width)
 {
     int i;
-    vector unsigned int vdest, vsrc;
+    vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
 
     while (width && ((uintptr_t)dest & 15))
@@ -424,7 +419,7 @@ vmx_combine_over_u_mask (uint32_t *      dest,
                          int             width)
 {
     int i;
-    vector unsigned int vdest, vsrc, vmask;
+    vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
 
@@ -496,7 +491,7 @@ vmx_combine_over_reverse_u_no_mask (uint32_t *      dest,
                                     int             width)
 {
     int i;
-    vector unsigned int vdest, vsrc;
+    vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
 
     while (width && ((uintptr_t)dest & 15))
@@ -544,7 +539,7 @@ vmx_combine_over_reverse_u_mask (uint32_t *      dest,
                                  int             width)
 {
     int i;
-    vector unsigned int vdest, vsrc, vmask;
+    vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
 
@@ -613,7 +608,7 @@ vmx_combine_in_u_no_mask (uint32_t *      dest,
                           int             width)
 {
     int i;
-    vector unsigned int vdest, vsrc;
+    vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
 
     while (width && ((uintptr_t)dest & 15))
@@ -658,7 +653,7 @@ vmx_combine_in_u_mask (uint32_t *      dest,
                        int             width)
 {
     int i;
-    vector unsigned int vdest, vsrc, vmask;
+    vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
 
@@ -724,7 +719,7 @@ vmx_combine_in_reverse_u_no_mask (uint32_t *      dest,
                                   int             width)
 {
     int i;
-    vector unsigned int vdest, vsrc;
+    vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
 
     while (width && ((uintptr_t)dest & 15))
@@ -771,7 +766,7 @@ vmx_combine_in_reverse_u_mask (uint32_t *      dest,
                                int             width)
 {
     int i;
-    vector unsigned int vdest, vsrc, vmask;
+    vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
 
@@ -839,7 +834,7 @@ vmx_combine_out_u_no_mask (uint32_t *      dest,
                            int             width)
 {
     int i;
-    vector unsigned int vdest, vsrc;
+    vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
 
     while (width && ((uintptr_t)dest & 15))
@@ -886,7 +881,7 @@ vmx_combine_out_u_mask (uint32_t *      dest,
                         int             width)
 {
     int i;
-    vector unsigned int vdest, vsrc, vmask;
+    vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
 
@@ -952,7 +947,7 @@ vmx_combine_out_reverse_u_no_mask (uint32_t *      dest,
                                    int             width)
 {
     int i;
-    vector unsigned int vdest, vsrc;
+    vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
 
     while (width && ((uintptr_t)dest & 15))
@@ -1000,7 +995,7 @@ vmx_combine_out_reverse_u_mask (uint32_t *      dest,
                                 int             width)
 {
     int i;
-    vector unsigned int vdest, vsrc, vmask;
+    vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
 
@@ -1068,7 +1063,7 @@ vmx_combine_atop_u_no_mask (uint32_t *      dest,
                             int             width)
 {
     int i;
-    vector unsigned int vdest, vsrc;
+    vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
 
     while (width && ((uintptr_t)dest & 15))
@@ -1120,7 +1115,7 @@ vmx_combine_atop_u_mask (uint32_t *      dest,
                          int             width)
 {
     int i;
-    vector unsigned int vdest, vsrc, vmask;
+    vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
 
@@ -1197,7 +1192,7 @@ vmx_combine_atop_reverse_u_no_mask (uint32_t *      dest,
                                     int             width)
 {
     int i;
-    vector unsigned int vdest, vsrc;
+    vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
 
     while (width && ((uintptr_t)dest & 15))
@@ -1249,7 +1244,7 @@ vmx_combine_atop_reverse_u_mask (uint32_t *      dest,
                                  int             width)
 {
     int i;
-    vector unsigned int vdest, vsrc, vmask;
+    vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
 
@@ -1326,7 +1321,7 @@ vmx_combine_xor_u_no_mask (uint32_t *      dest,
                            int             width)
 {
     int i;
-    vector unsigned int vdest, vsrc;
+    vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
 
     while (width && ((uintptr_t)dest & 15))
@@ -1378,7 +1373,7 @@ vmx_combine_xor_u_mask (uint32_t *      dest,
                         int             width)
 {
     int i;
-    vector unsigned int vdest, vsrc, vmask;
+    vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
 
@@ -1455,7 +1450,7 @@ vmx_combine_add_u_no_mask (uint32_t *      dest,
                            int             width)
 {
     int i;
-    vector unsigned int vdest, vsrc;
+    vector unsigned char vdest, vsrc;
     DECLARE_SRC_MASK_VAR;
 
     while (width && ((uintptr_t)dest & 15))
@@ -1501,7 +1496,7 @@ vmx_combine_add_u_mask (uint32_t *      dest,
                         int             width)
 {
     int i;
-    vector unsigned int vdest, vsrc, vmask;
+    vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
 
@@ -1570,7 +1565,7 @@ vmx_combine_src_ca (pixman_implementation_t *imp,
                     int                      width)
 {
     int i;
-    vector unsigned int vdest, vsrc, vmask;
+    vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
 
@@ -1621,7 +1616,7 @@ vmx_combine_over_ca (pixman_implementation_t *imp,
                      int                      width)
 {
     int i;
-    vector unsigned int vdest, vsrc, vmask;
+    vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
 
@@ -1680,7 +1675,7 @@ vmx_combine_over_reverse_ca (pixman_implementation_t *imp,
                              int                      width)
 {
     int i;
-    vector unsigned int vdest, vsrc, vmask;
+    vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
 
@@ -1737,7 +1732,7 @@ vmx_combine_in_ca (pixman_implementation_t *imp,
                    int                      width)
 {
     int i;
-    vector unsigned int vdest, vsrc, vmask;
+    vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
 
@@ -1792,7 +1787,7 @@ vmx_combine_in_reverse_ca (pixman_implementation_t *imp,
                            int                      width)
 {
     int i;
-    vector unsigned int vdest, vsrc, vmask;
+    vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
 
@@ -1848,7 +1843,7 @@ vmx_combine_out_ca (pixman_implementation_t *imp,
                     int                      width)
 {
     int i;
-    vector unsigned int vdest, vsrc, vmask;
+    vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
 
@@ -1906,7 +1901,7 @@ vmx_combine_out_reverse_ca (pixman_implementation_t *imp,
                             int                      width)
 {
     int i;
-    vector unsigned int vdest, vsrc, vmask;
+    vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
 
@@ -1964,7 +1959,7 @@ vmx_combine_atop_ca (pixman_implementation_t *imp,
                      int                      width)
 {
     int i;
-    vector unsigned int vdest, vsrc, vmask, vsrca;
+    vector unsigned char vdest, vsrc, vmask, vsrca;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
 
@@ -2031,7 +2026,7 @@ vmx_combine_atop_reverse_ca (pixman_implementation_t *imp,
                              int                      width)
 {
     int i;
-    vector unsigned int vdest, vsrc, vmask;
+    vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
 
@@ -2095,7 +2090,7 @@ vmx_combine_xor_ca (pixman_implementation_t *imp,
                     int                      width)
 {
     int i;
-    vector unsigned int vdest, vsrc, vmask;
+    vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
 
@@ -2159,7 +2154,7 @@ vmx_combine_add_ca (pixman_implementation_t *imp,
                     int                      width)
 {
     int i;
-    vector unsigned int vdest, vsrc, vmask;
+    vector unsigned char vdest, vsrc, vmask;
     DECLARE_SRC_MASK_VAR;
     DECLARE_MASK_MASK_VAR;
 
@@ -2217,7 +2212,7 @@ vmx_composite_over_n_8_8888 (pixman_implementation_t *imp,
     int32_t w;
     uint32_t m, d, s, ia;
 
-    vector unsigned int vsrc, valpha, vmask, vdst;
+    vector unsigned char vsrc, valpha, vmask, vdst;
 
     src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
 
@@ -2230,7 +2225,7 @@ vmx_composite_over_n_8_8888 (pixman_implementation_t *imp,
     PIXMAN_IMAGE_GET_LINE (
 	mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
-    vsrc = create_mask_32_128 (src);
+    vsrc = (vector unsigned char)create_mask_32_128 (src);
     valpha = splat_alpha(vsrc);
 
     while (height--)
@@ -2269,7 +2264,7 @@ vmx_composite_over_n_8_8888 (pixman_implementation_t *imp,
 	    }
 	    else if (m)
 	    {
-		vmask = splat_pixel(create_mask_32_128 (m));
+		vmask = splat_pixel((vector unsigned char)create_mask_32_128 (m));
 
 		/* dst is 16-byte aligned */
 		vdst = in_over (vsrc, valpha, vmask, load_128_aligned (dst));
@@ -2485,7 +2480,7 @@ vmx_composite_src_x888_8888 (pixman_implementation_t *imp,
 
 	while (w >= 16)
 	{
-	    vector unsigned int vmx_src1, vmx_src2, vmx_src3, vmx_src4;
+	    vector unsigned char vmx_src1, vmx_src2, vmx_src3, vmx_src4;
 
 	    vmx_src1 = load_128_unaligned (src);
 	    vmx_src2 = load_128_unaligned (src + 4);
@@ -2518,7 +2513,8 @@ vmx_composite_over_n_8888 (pixman_implementation_t *imp,
     uint32_t *dst_line, *dst;
     uint32_t src, ia;
     int      i, w, dst_stride;
-    vector unsigned int vdst, vsrc, via;
+
+    vector unsigned char vdst, vsrc, via;
 
     src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
 
@@ -2528,7 +2524,7 @@ vmx_composite_over_n_8888 (pixman_implementation_t *imp,
     PIXMAN_IMAGE_GET_LINE (
 	dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
 
-    vsrc = create_mask_32_128 (src);
+    vsrc = (vector unsigned char)create_mask_32_128 (src);
     via = negate (splat_alpha (vsrc));
     ia = ALPHA_8 (~src);
 
@@ -2599,7 +2595,7 @@ vmx_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
     uint32_t pack_cmp;
     int dst_stride, mask_stride;
 
-    vector unsigned int vsrc, valpha, vmask, vdest;
+    vector unsigned char vsrc, valpha, vmask, vdest;
 
     src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format);
 
@@ -2611,7 +2607,7 @@ vmx_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
     PIXMAN_IMAGE_GET_LINE (
 	mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
 
-    vsrc = create_mask_32_128 (src);
+    vsrc = (vector unsigned char)create_mask_32_128 (src);
     valpha = splat_alpha(vsrc);
     ia = ALPHA_8 (src);
 
@@ -2649,7 +2645,7 @@ vmx_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
 	    /* pm is NOT necessarily 16-byte aligned */
 	    vmask = load_128_unaligned (pm);
 
-	    pack_cmp = vec_all_eq(vmask, (vector unsigned int) AVV(0));
+	    pack_cmp = vec_all_eq(vmask, (vector unsigned char) AVV(0));
 
 	    /* if all bits in mask are zero, pack_cmp is not 0 */
 	    if (pack_cmp == 0)
@@ -2774,7 +2770,7 @@ scaled_nearest_scanline_vmx_8888_8888_OVER (uint32_t*       pd,
     uint32_t s, d;
     const uint32_t* pm = NULL;
 
-    vector unsigned int vsrc, vdst;
+    vector unsigned char vsrc, vdst;
 
     if (fully_transparent_src)
 	return;
@@ -2936,7 +2932,8 @@ vmx_fetch_a8 (pixman_iter_t *iter, const uint32_t *mask)
     int w = iter->width;
     uint32_t *dst = iter->buffer;
     uint8_t *src = iter->bits;
-    vector unsigned int vmx0, vmx1, vmx2, vmx3, vmx4, vmx5, vmx6;
+
+    vector unsigned char vmx0, vmx1, vmx2, vmx3, vmx4, vmx5, vmx6;
 
     iter->bits += iter->stride;
 
@@ -2950,9 +2947,9 @@ vmx_fetch_a8 (pixman_iter_t *iter, const uint32_t *mask)
     {
 	vmx0 = load_128_unaligned((uint32_t *) src);
 
-	unpack_128_2x128((vector unsigned int) AVV(0), vmx0, &vmx1, &vmx2);
-	unpack_128_2x128((vector unsigned int) AVV(0), vmx1, &vmx3, &vmx4);
-	unpack_128_2x128((vector unsigned int) AVV(0), vmx2, &vmx5, &vmx6);
+	unpack_128_2x128((vector unsigned char) AVV(0), vmx0, &vmx1, &vmx2);
+	unpack_128_2x128((vector unsigned char) AVV(0), vmx1, &vmx3, &vmx4);
+	unpack_128_2x128((vector unsigned char) AVV(0), vmx2, &vmx5, &vmx6);
 
 	save_128_aligned(dst, vmx6);
 	save_128_aligned((dst +  4), vmx5);
@@ -2994,7 +2991,7 @@ _pixman_implementation_create_vmx (pixman_implementation_t *fallback)
     pixman_implementation_t *imp = _pixman_implementation_create (fallback, vmx_fast_paths);
 
     /* VMX constants */
-    mask_ff000000 = create_mask_32_128 (0xff000000);
+    mask_ff000000 = (vector unsigned char)create_mask_32_128 (0xff000000);
 
     /* Set up function pointers */
 
commit db19202895db410e1e57a104d4e7998f09011fd8
Author: Matt Turner <mattst88 at gmail.com>
Date:   Thu Dec 12 17:16:11 2024 -0500

    vmx: Use create_mask_32_128() more places

diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index 2594217..eb43874 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -2230,7 +2230,7 @@ vmx_composite_over_n_8_8888 (pixman_implementation_t *imp,
     PIXMAN_IMAGE_GET_LINE (
 	mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
 
-    vsrc = (vector unsigned int) {src, src, src, src};
+    vsrc = create_mask_32_128 (src);
     valpha = splat_alpha(vsrc);
 
     while (height--)
@@ -2269,7 +2269,7 @@ vmx_composite_over_n_8_8888 (pixman_implementation_t *imp,
 	    }
 	    else if (m)
 	    {
-		vmask = splat_pixel((vector unsigned int) {m, m, m, m});
+		vmask = splat_pixel(create_mask_32_128 (m));
 
 		/* dst is 16-byte aligned */
 		vdst = in_over (vsrc, valpha, vmask, load_128_aligned (dst));
@@ -2528,7 +2528,7 @@ vmx_composite_over_n_8888 (pixman_implementation_t *imp,
     PIXMAN_IMAGE_GET_LINE (
 	dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
 
-    vsrc = (vector unsigned int){src, src, src, src};
+    vsrc = create_mask_32_128 (src);
     via = negate (splat_alpha (vsrc));
     ia = ALPHA_8 (~src);
 
@@ -2611,7 +2611,7 @@ vmx_composite_over_n_8888_8888_ca (pixman_implementation_t *imp,
     PIXMAN_IMAGE_GET_LINE (
 	mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1);
 
-    vsrc = (vector unsigned int) {src, src, src, src};
+    vsrc = create_mask_32_128 (src);
     valpha = splat_alpha(vsrc);
     ia = ALPHA_8 (src);
 
commit f3d5e47cc47a12d7a55d1fd94bfef48dcc368717
Author: Matt Turner <mattst88 at gmail.com>
Date:   Thu Dec 12 17:16:09 2024 -0500

    vmx: Move and use unpack{hi,lo}_128_16x8 in pix_multiply()

diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index 1254256..2594217 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -63,29 +63,62 @@ splat_pixel (vector unsigned int pix)
 }
 
 static force_inline vector unsigned int
-pix_multiply (vector unsigned int p, vector unsigned int a)
+unpacklo_128_16x8 (vector unsigned int data1, vector unsigned int data2)
 {
-    vector unsigned short hi, lo, mod;
+    vector unsigned char lo;
 
     /* unpack to short */
-    hi = (vector unsigned short)
+    lo = (vector unsigned char)
 #ifdef WORDS_BIGENDIAN
-	vec_mergeh ((vector unsigned char)AVV (0),
-		    (vector unsigned char)p);
+	vec_mergel ((vector unsigned char) data2,
+		    (vector unsigned char) data1);
 #else
-	vec_mergeh ((vector unsigned char) p,
-		    (vector unsigned char) AVV (0));
+	vec_mergel ((vector unsigned char) data1,
+		    (vector unsigned char) data2);
 #endif
 
-    mod = (vector unsigned short)
+    return (vector unsigned int) lo;
+}
+
+static force_inline vector unsigned int
+unpackhi_128_16x8 (vector unsigned int data1, vector unsigned int data2)
+{
+    vector unsigned char hi;
+
+    /* unpack to short */
+    hi = (vector unsigned char)
 #ifdef WORDS_BIGENDIAN
-	vec_mergeh ((vector unsigned char)AVV (0),
-		    (vector unsigned char)a);
+	vec_mergeh ((vector unsigned char) data2,
+		    (vector unsigned char) data1);
 #else
-	vec_mergeh ((vector unsigned char) a,
-		    (vector unsigned char) AVV (0));
+	vec_mergeh ((vector unsigned char) data1,
+		    (vector unsigned char) data2);
 #endif
 
+    return (vector unsigned int) hi;
+}
+
+static force_inline void
+unpack_128_2x128 (vector unsigned int  data1,
+		  vector unsigned int  data2,
+		  vector unsigned int *data_lo,
+		  vector unsigned int *data_hi)
+{
+    *data_lo = unpacklo_128_16x8 (data1, data2);
+    *data_hi = unpackhi_128_16x8 (data1, data2);
+}
+
+static force_inline vector unsigned int
+pix_multiply (vector unsigned int p, vector unsigned int a)
+{
+    vector unsigned short hi, lo, mod;
+
+    /* unpack to short */
+    hi = (vector unsigned short)
+	unpackhi_128_16x8(p, (vector unsigned int) AVV (0));
+    mod = (vector unsigned short)
+	unpackhi_128_16x8(a, (vector unsigned int) AVV (0));
+
     hi = vec_mladd (hi, mod, (vector unsigned short)
                     AVV (0x0080, 0x0080, 0x0080, 0x0080,
                          0x0080, 0x0080, 0x0080, 0x0080));
@@ -96,22 +129,9 @@ pix_multiply (vector unsigned int p, vector unsigned int a)
 
     /* unpack to short */
     lo = (vector unsigned short)
-#ifdef WORDS_BIGENDIAN
-	vec_mergel ((vector unsigned char)AVV (0),
-		    (vector unsigned char)p);
-#else
-	vec_mergel ((vector unsigned char) p,
-		    (vector unsigned char) AVV (0));
-#endif
-
+	unpacklo_128_16x8(p, (vector unsigned int) AVV (0));
     mod = (vector unsigned short)
-#ifdef WORDS_BIGENDIAN
-	vec_mergel ((vector unsigned char)AVV (0),
-		    (vector unsigned char)a);
-#else
-	vec_mergel ((vector unsigned char) a,
-		    (vector unsigned char) AVV (0));
-#endif
+	unpacklo_128_16x8(a, (vector unsigned int) AVV (0));
 
     lo = vec_mladd (lo, mod, (vector unsigned short)
                     AVV (0x0080, 0x0080, 0x0080, 0x0080,
@@ -279,50 +299,6 @@ create_mask_32_128 (uint32_t mask)
     return (vector unsigned int) {mask, mask, mask, mask};
 }
 
-static force_inline vector unsigned int
-unpacklo_128_16x8 (vector unsigned int data1, vector unsigned int data2)
-{
-    vector unsigned char lo;
-
-    /* unpack to short */
-    lo = (vector unsigned char)
-#ifdef WORDS_BIGENDIAN
-	vec_mergel ((vector unsigned char) data2,
-		    (vector unsigned char) data1);
-#else
-	vec_mergel ((vector unsigned char) data1,
-		    (vector unsigned char) data2);
-#endif
-
-    return (vector unsigned int) lo;
-}
-
-static force_inline vector unsigned int
-unpackhi_128_16x8 (vector unsigned int data1, vector unsigned int data2)
-{
-    vector unsigned char hi;
-
-    /* unpack to short */
-    hi = (vector unsigned char)
-#ifdef WORDS_BIGENDIAN
-	vec_mergeh ((vector unsigned char) data2,
-		    (vector unsigned char) data1);
-#else
-	vec_mergeh ((vector unsigned char) data1,
-		    (vector unsigned char) data2);
-#endif
-
-    return (vector unsigned int) hi;
-}
-
-static force_inline void
-unpack_128_2x128 (vector unsigned int data1, vector unsigned int data2,
-		    vector unsigned int* data_lo, vector unsigned int* data_hi)
-{
-    *data_lo = unpacklo_128_16x8(data1, data2);
-    *data_hi = unpackhi_128_16x8(data1, data2);
-}
-
 static force_inline int
 is_opaque (vector unsigned int x)
 {
commit a5bd4c7110b8ff2c31a05380a90f3605f0d3fbfd
Author: Matt Turner <mattst88 at gmail.com>
Date:   Thu Dec 12 17:16:05 2024 -0500

    vmx: Remove unpack{hi,lo}_128_8x16 functions

diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index 6515eae..1254256 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -315,42 +315,6 @@ unpackhi_128_16x8 (vector unsigned int data1, vector unsigned int data2)
     return (vector unsigned int) hi;
 }
 
-static force_inline vector unsigned int
-unpacklo_128_8x16 (vector unsigned int data1, vector unsigned int data2)
-{
-    vector unsigned short lo;
-
-    /* unpack to char */
-    lo = (vector unsigned short)
-#ifdef WORDS_BIGENDIAN
-	vec_mergel ((vector unsigned short) data2,
-		    (vector unsigned short) data1);
-#else
-	vec_mergel ((vector unsigned short) data1,
-		    (vector unsigned short) data2);
-#endif
-
-    return (vector unsigned int) lo;
-}
-
-static force_inline vector unsigned int
-unpackhi_128_8x16 (vector unsigned int data1, vector unsigned int data2)
-{
-    vector unsigned short hi;
-
-    /* unpack to char */
-    hi = (vector unsigned short)
-#ifdef WORDS_BIGENDIAN
-	vec_mergeh ((vector unsigned short) data2,
-		    (vector unsigned short) data1);
-#else
-	vec_mergeh ((vector unsigned short) data1,
-		    (vector unsigned short) data2);
-#endif
-
-    return (vector unsigned int) hi;
-}
-
 static force_inline void
 unpack_128_2x128 (vector unsigned int data1, vector unsigned int data2,
 		    vector unsigned int* data_lo, vector unsigned int* data_hi)
commit f3c77cc209614541d97c268396001f64151e2071
Author: Matt Turner <mattst88 at gmail.com>
Date:   Thu Dec 12 17:16:57 2024 -0500

    vmx: Remove unpack_128_2x128_16()
    
    This function is only used in vmx_fetch_a8, which is takes packed
    unsigned char alpha values and expands them to RGBA8888 with color
    components set to 0.
    
    It currently operates by zero-extending the 8-bit alpha values to 16-bit
    and then again to 32-bit with different functions (`unpack_128_2x128`
    and `unpack_128_2x128_16` respectively). But we can just use the same
    function (`unpack_128_2x128`) twice to insert the zeros.
    
    A subsequent commit will change the VMX code to use appropriate vector
    types (instead of everything just being a `vector unsigned int` with
    casts on most function arguments to select the appropriate `vec_*`
    function overload), and removing this function reduces some complexity
    from that.

diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index f8fd4f3..6515eae 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -359,14 +359,6 @@ unpack_128_2x128 (vector unsigned int data1, vector unsigned int data2,
     *data_hi = unpackhi_128_16x8(data1, data2);
 }
 
-static force_inline void
-unpack_128_2x128_16 (vector unsigned int data1, vector unsigned int data2,
-		    vector unsigned int* data_lo, vector unsigned int* data_hi)
-{
-    *data_lo = unpacklo_128_8x16(data1, data2);
-    *data_hi = unpackhi_128_8x16(data1, data2);
-}
-
 static force_inline int
 is_opaque (vector unsigned int x)
 {
@@ -3019,8 +3011,8 @@ vmx_fetch_a8 (pixman_iter_t *iter, const uint32_t *mask)
 	vmx0 = load_128_unaligned((uint32_t *) src);
 
 	unpack_128_2x128((vector unsigned int) AVV(0), vmx0, &vmx1, &vmx2);
-	unpack_128_2x128_16((vector unsigned int) AVV(0), vmx1, &vmx3, &vmx4);
-	unpack_128_2x128_16((vector unsigned int) AVV(0), vmx2, &vmx5, &vmx6);
+	unpack_128_2x128((vector unsigned int) AVV(0), vmx1, &vmx3, &vmx4);
+	unpack_128_2x128((vector unsigned int) AVV(0), vmx2, &vmx5, &vmx6);
 
 	save_128_aligned(dst, vmx6);
 	save_128_aligned((dst +  4), vmx5);
commit 28f39f7f17083563ee4e0090ba1eeca50feaa0b7
Author: Matt Turner <mattst88 at gmail.com>
Date:   Thu Dec 12 17:14:03 2024 -0500

    vmx: Remove unpack_565_to_8888() and associated constants

diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index 2df244f..f8fd4f3 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -36,11 +36,6 @@
 #define AVV(x...) {x}
 
 static vector unsigned int mask_ff000000;
-static vector unsigned int mask_red;
-static vector unsigned int mask_green;
-static vector unsigned int mask_blue;
-static vector unsigned int mask_565_fix_rb;
-static vector unsigned int mask_565_fix_g;
 
 static force_inline vector unsigned int
 splat_alpha (vector unsigned int pix)
@@ -372,27 +367,6 @@ unpack_128_2x128_16 (vector unsigned int data1, vector unsigned int data2,
     *data_hi = unpackhi_128_8x16(data1, data2);
 }
 
-static force_inline vector unsigned int
-unpack_565_to_8888 (vector unsigned int lo)
-{
-    vector unsigned int r, g, b, rb, t;
-
-    r = vec_and (vec_sl(lo, create_mask_32_128(8)), mask_red);
-    g = vec_and (vec_sl(lo, create_mask_32_128(5)), mask_green);
-    b = vec_and (vec_sl(lo, create_mask_32_128(3)), mask_blue);
-
-    rb = vec_or (r, b);
-    t  = vec_and (rb, mask_565_fix_rb);
-    t  = vec_sr (t, create_mask_32_128(5));
-    rb = vec_or (rb, t);
-
-    t  = vec_and (g, mask_565_fix_g);
-    t  = vec_sr (t, create_mask_32_128(6));
-    g  = vec_or (g, t);
-
-    return vec_or (rb, g);
-}
-
 static force_inline int
 is_opaque (vector unsigned int x)
 {
@@ -3089,11 +3063,6 @@ _pixman_implementation_create_vmx (pixman_implementation_t *fallback)
 
     /* VMX constants */
     mask_ff000000 = create_mask_32_128 (0xff000000);
-    mask_red   = create_mask_32_128 (0x00f80000);
-    mask_green = create_mask_32_128 (0x0000fc00);
-    mask_blue  = create_mask_32_128 (0x000000f8);
-    mask_565_fix_rb = create_mask_32_128 (0x00e000e0);
-    mask_565_fix_g = create_mask_32_128  (0x0000c000);
 
     /* Set up function pointers */
 
commit 2ec98b5afda21534ee01abe3c117c6ff3eac69bc
Author: Matt Turner <mattst88 at gmail.com>
Date:   Thu Dec 12 17:15:22 2024 -0500

    vmx: Remove unnecessary variable

diff --git a/pixman/pixman-vmx.c b/pixman/pixman-vmx.c
index ceac1f3..2df244f 100644
--- a/pixman/pixman-vmx.c
+++ b/pixman/pixman-vmx.c
@@ -2995,7 +2995,6 @@ static uint32_t *
 vmx_fetch_x8r8g8b8 (pixman_iter_t *iter, const uint32_t *mask)
 {
     int w = iter->width;
-    vector unsigned int ff000000 = mask_ff000000;
     uint32_t *dst = iter->buffer;
     uint32_t *src = (uint32_t *)iter->bits;
 
@@ -3009,7 +3008,7 @@ vmx_fetch_x8r8g8b8 (pixman_iter_t *iter, const uint32_t *mask)
 
     while (w >= 4)
     {
-	save_128_aligned(dst, vec_or(load_128_unaligned(src), ff000000));
+	save_128_aligned(dst, vec_or(load_128_unaligned(src), mask_ff000000));
 
 	dst += 4;
 	src += 4;
commit 2f495629fed4790d198fef24ca8104377fc635cb
Author: f.wasil <f.wasil at samsung.com>
Date:   Fri Jul 19 11:51:12 2024 +0200

    .clang-format: Add

diff --git a/.clang-format b/.clang-format
new file mode 100644
index 0000000..4056a36
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,27 @@
+BasedOnStyle: LLVM
+AlwaysBreakAfterReturnType: TopLevelDefinitions
+BraceWrapping:
+  IndentBraces: true
+ConstructorInitializerIndentWidth: 4
+ContinuationIndentWidth: 4
+IndentAccessModifiers: true
+IndentCaseBlocks: true
+IndentCaseLabels: true
+IndentWidth: 4
+BreakBeforeBraces: Allman
+AttributeMacros: ['__attribute__']
+AlignConsecutiveMacros: true
+AlignAfterOpenBracket: Align
+AlignConsecutiveAssignments: true
+AlignConsecutiveDeclarations: true
+AllowAllArgumentsOnNextLine: false
+AllowAllParametersOfDeclarationOnNextLine: false
+BinPackArguments: true
+BinPackParameters: false
+PenaltyBreakAssignment: 1000
+ReflowComments: false
+SpaceBeforeParens: Always
+SpaceInEmptyBlock: true
+SpacesInContainerLiterals: true
+TabWidth: 8
+UseTab: ForContinuationAndIndentation
\ No newline at end of file


More information about the xorg-commit mailing list