pixman: Branch 'master' - 15 commits

Siarhei Siamashka siamashka at kemper.freedesktop.org
Wed Nov 11 08:28:18 PST 2009


 configure.ac                 |   30 
 pixman/Makefile.am           |    8 
 pixman/pixman-arm-neon-asm.S | 1051 +++++++++++++++++++++
 pixman/pixman-arm-neon-asm.h |  787 ++++++++++++++++
 pixman/pixman-arm-neon.c     | 2088 ++++---------------------------------------
 5 files changed, 2099 insertions(+), 1865 deletions(-)

New commits:
commit abefe68ae2a422fecf315f17430c0cda5561be66
Author: Siarhei Siamashka <siarhei.siamashka at nokia.com>
Date:   Wed Nov 4 17:14:14 2009 +0200

    ARM: enabled 'neon_composite_add_8000_8000' fast path

diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
index 7ba62a2..94317a4 100644
--- a/pixman/pixman-arm-neon.c
+++ b/pixman/pixman-arm-neon.c
@@ -321,6 +321,7 @@ static const pixman_fast_path_t arm_neon_fast_path_array[] =
     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, neon_composite_over_8888_8888,   0 },
     { PIXMAN_OP_ADD,  PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8,       neon_composite_add_n_8_8,        0 },
     { PIXMAN_OP_ADD,  PIXMAN_a8,       PIXMAN_a8,       PIXMAN_a8,       neon_composite_add_8_8_8,        0 },
+    { PIXMAN_OP_ADD,  PIXMAN_a8,       PIXMAN_null,     PIXMAN_a8,       neon_composite_add_8000_8000,    0 },
     { PIXMAN_OP_NONE },
 };
 
commit 635f389ff477a0afe82c6038a835e262d5034d99
Author: Siarhei Siamashka <siarhei.siamashka at nokia.com>
Date:   Wed Nov 4 17:13:31 2009 +0200

    ARM: enabled 'neon_composite_add_8_8_8' fast path

diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
index 46212dd..7ba62a2 100644
--- a/pixman/pixman-arm-neon.c
+++ b/pixman/pixman-arm-neon.c
@@ -320,6 +320,7 @@ static const pixman_fast_path_t arm_neon_fast_path_array[] =
     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, neon_composite_over_8888_8888,   0 },
     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, neon_composite_over_8888_8888,   0 },
     { PIXMAN_OP_ADD,  PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8,       neon_composite_add_n_8_8,        0 },
+    { PIXMAN_OP_ADD,  PIXMAN_a8,       PIXMAN_a8,       PIXMAN_a8,       neon_composite_add_8_8_8,        0 },
     { PIXMAN_OP_NONE },
 };
 
commit 7e1bfed6767774a43c288ab780f62a20eccff805
Author: Siarhei Siamashka <siarhei.siamashka at nokia.com>
Date:   Wed Nov 4 17:12:56 2009 +0200

    ARM: enabled 'neon_composite_add_n_8_8' fast path

diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
index 7f32829..46212dd 100644
--- a/pixman/pixman-arm-neon.c
+++ b/pixman/pixman-arm-neon.c
@@ -319,6 +319,7 @@ static const pixman_fast_path_t arm_neon_fast_path_array[] =
     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, neon_composite_over_8888_8888,   0 },
     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, neon_composite_over_8888_8888,   0 },
     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, neon_composite_over_8888_8888,   0 },
+    { PIXMAN_OP_ADD,  PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8,       neon_composite_add_n_8_8,        0 },
     { PIXMAN_OP_NONE },
 };
 
commit deeb67b13a0f9267b59d9755e7a0102da29a6747
Author: Siarhei Siamashka <siarhei.siamashka at nokia.com>
Date:   Wed Nov 4 17:12:14 2009 +0200

    ARM: enabled 'neon_composite_over_8888_8888' fast path

diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
index 08d1421..7f32829 100644
--- a/pixman/pixman-arm-neon.c
+++ b/pixman/pixman-arm-neon.c
@@ -315,6 +315,10 @@ static const pixman_fast_path_t arm_neon_fast_path_array[] =
     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, neon_composite_over_8888_n_8888, NEED_SOLID_MASK },
     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_over_8888_0565,   0 },
     { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_over_8888_0565,   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, neon_composite_over_8888_8888,   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, neon_composite_over_8888_8888,   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, neon_composite_over_8888_8888,   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, neon_composite_over_8888_8888,   0 },
     { PIXMAN_OP_NONE },
 };
 
commit f449364849b2cc75a48cc3b35d2a373d38b71c09
Author: Siarhei Siamashka <siarhei.siamashka at nokia.com>
Date:   Wed Nov 4 17:11:32 2009 +0200

    ARM: enabled 'neon_composite_over_8888_0565' fast path

diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
index 6c3cce3..08d1421 100644
--- a/pixman/pixman-arm-neon.c
+++ b/pixman/pixman-arm-neon.c
@@ -313,6 +313,8 @@ static const pixman_fast_path_t arm_neon_fast_path_array[] =
     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, neon_composite_over_n_8_8888,    0 },
     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, neon_composite_over_8888_n_8888, NEED_SOLID_MASK },
     { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, neon_composite_over_8888_n_8888, NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_over_8888_0565,   0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_over_8888_0565,   0 },
     { PIXMAN_OP_NONE },
 };
 
commit 2dfbf6c4a520da4647bb480a124dfe5cbece225b
Author: Siarhei Siamashka <siarhei.siamashka at nokia.com>
Date:   Wed Nov 4 17:10:55 2009 +0200

    ARM: enabled 'neon_composite_over_8888_n_8888' fast path

diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
index c0da80d..6c3cce3 100644
--- a/pixman/pixman-arm-neon.c
+++ b/pixman/pixman-arm-neon.c
@@ -311,6 +311,8 @@ static const pixman_fast_path_t arm_neon_fast_path_array[] =
     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, neon_composite_over_n_8_8888,    0 },
     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, neon_composite_over_n_8_8888,    0 },
     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, neon_composite_over_n_8_8888,    0 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, neon_composite_over_8888_n_8888, NEED_SOLID_MASK },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, neon_composite_over_8888_n_8888, NEED_SOLID_MASK },
     { PIXMAN_OP_NONE },
 };
 
commit 43824f98f1fc41d923dd8ddd97e74942c01aadf8
Author: Siarhei Siamashka <siarhei.siamashka at nokia.com>
Date:   Wed Nov 4 17:10:09 2009 +0200

    ARM: enabled 'neon_composite_over_n_8_8888' fast path

diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
index 6815018..c0da80d 100644
--- a/pixman/pixman-arm-neon.c
+++ b/pixman/pixman-arm-neon.c
@@ -307,6 +307,10 @@ static const pixman_fast_path_t arm_neon_fast_path_array[] =
     { PIXMAN_OP_SRC,  PIXMAN_r8g8b8,   PIXMAN_null,     PIXMAN_r8g8b8,   neon_composite_src_0888_0888,    0 },
     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   neon_composite_over_n_8_0565,    0 },
     { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b5g6r5,   neon_composite_over_n_8_0565,    0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, neon_composite_over_n_8_8888,    0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, neon_composite_over_n_8_8888,    0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, neon_composite_over_n_8_8888,    0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, neon_composite_over_n_8_8888,    0 },
     { PIXMAN_OP_NONE },
 };
 
commit 189d0d783cc62aa3b739218689042c9235c04fa1
Author: Siarhei Siamashka <siarhei.siamashka at nokia.com>
Date:   Wed Nov 4 17:09:31 2009 +0200

    ARM: enabled 'neon_composite_over_n_8_0565' fast path

diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
index b25aef5..6815018 100644
--- a/pixman/pixman-arm-neon.c
+++ b/pixman/pixman-arm-neon.c
@@ -305,6 +305,8 @@ static const pixman_fast_path_t arm_neon_fast_path_array[] =
     { PIXMAN_OP_SRC,  PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, neon_composite_src_8888_8888,    0 },
     { PIXMAN_OP_SRC,  PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, neon_composite_src_8888_8888,    0 },
     { PIXMAN_OP_SRC,  PIXMAN_r8g8b8,   PIXMAN_null,     PIXMAN_r8g8b8,   neon_composite_src_0888_0888,    0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   neon_composite_over_n_8_0565,    0 },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b5g6r5,   neon_composite_over_n_8_0565,    0 },
     { PIXMAN_OP_NONE },
 };
 
commit cccfc87f4f597f99b74691af172126a2346f9239
Author: Siarhei Siamashka <siarhei.siamashka at nokia.com>
Date:   Wed Nov 4 17:08:48 2009 +0200

    ARM: enabled 'neon_composite_src_0888_0888' fast path

diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
index 612967a..b25aef5 100644
--- a/pixman/pixman-arm-neon.c
+++ b/pixman/pixman-arm-neon.c
@@ -300,6 +300,11 @@ static const pixman_fast_path_t arm_neon_fast_path_array[] =
     { PIXMAN_OP_SRC,  PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_src_8888_0565,    0 },
     { PIXMAN_OP_SRC,  PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_src_8888_0565,    0 },
     { PIXMAN_OP_SRC,  PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_src_8888_0565,    0 },
+    { PIXMAN_OP_SRC,  PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, neon_composite_src_8888_8888,    0 },
+    { PIXMAN_OP_SRC,  PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, neon_composite_src_8888_8888,    0 },
+    { PIXMAN_OP_SRC,  PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, neon_composite_src_8888_8888,    0 },
+    { PIXMAN_OP_SRC,  PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, neon_composite_src_8888_8888,    0 },
+    { PIXMAN_OP_SRC,  PIXMAN_r8g8b8,   PIXMAN_null,     PIXMAN_r8g8b8,   neon_composite_src_0888_0888,    0 },
     { PIXMAN_OP_NONE },
 };
 
commit e89b4f8105beaa27b6098a5dc7dfec62879ebd1d
Author: Siarhei Siamashka <siarhei.siamashka at nokia.com>
Date:   Wed Nov 4 17:08:09 2009 +0200

    ARM: enabled 'neon_composite_src_8888_0565' fast path

diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
index 7ff8fe1..612967a 100644
--- a/pixman/pixman-arm-neon.c
+++ b/pixman/pixman-arm-neon.c
@@ -296,6 +296,10 @@ static const pixman_fast_path_t arm_neon_fast_path_array[] =
 {
     { PIXMAN_OP_SRC,  PIXMAN_r5g6b5,   PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_src_0565_0565,    0 },
     { PIXMAN_OP_SRC,  PIXMAN_b5g6r5,   PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_src_0565_0565,    0 },
+    { PIXMAN_OP_SRC,  PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_src_8888_0565,    0 },
+    { PIXMAN_OP_SRC,  PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_src_8888_0565,    0 },
+    { PIXMAN_OP_SRC,  PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_src_8888_0565,    0 },
+    { PIXMAN_OP_SRC,  PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_src_8888_0565,    0 },
     { PIXMAN_OP_NONE },
 };
 
commit 2d54ed46fb7428aa1d9f114450554fc33acff2c4
Author: Siarhei Siamashka <siarhei.siamashka at nokia.com>
Date:   Wed Nov 4 17:07:36 2009 +0200

    ARM: enabled 'neon_composite_src_0565_0565' fast path

diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
index 5339cbd..7ff8fe1 100644
--- a/pixman/pixman-arm-neon.c
+++ b/pixman/pixman-arm-neon.c
@@ -294,6 +294,8 @@ pixman_fill_neon (uint32_t *bits,
 
 static const pixman_fast_path_t arm_neon_fast_path_array[] =
 {
+    { PIXMAN_OP_SRC,  PIXMAN_r5g6b5,   PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_src_0565_0565,    0 },
+    { PIXMAN_OP_SRC,  PIXMAN_b5g6r5,   PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_src_0565_0565,    0 },
     { PIXMAN_OP_NONE },
 };
 
commit 5d695cb86eaad151c9402ead5dfb7e867ff58d29
Author: Siarhei Siamashka <siarhei.siamashka at nokia.com>
Date:   Wed Nov 4 17:05:46 2009 +0200

    ARM: added 'bindings' for NEON assembly optimized functions
    
    These functions serve as 'adaptors', converting standard internal
    pixman fast path function arguments into arguments expected
    by assembly functions.

diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
index 494f06c..5339cbd 100644
--- a/pixman/pixman-arm-neon.c
+++ b/pixman/pixman-arm-neon.c
@@ -33,6 +33,200 @@
 #include <string.h>
 #include "pixman-private.h"
 
+#define BIND_SRC_NULL_DST(name, src_type, src_cnt, dst_type, dst_cnt)   \
+void                                                                    \
+pixman_##name##_asm_neon (int32_t   w,                                  \
+                          int32_t   h,                                  \
+                          dst_type *dst,                                \
+                          int32_t   dst_stride,                         \
+                          src_type *src,                                \
+                          int32_t   src_stride);                        \
+                                                                        \
+static void                                                             \
+neon_##name (pixman_implementation_t *imp,                              \
+             pixman_op_t              op,                               \
+             pixman_image_t *         src_image,                        \
+             pixman_image_t *         mask_image,                       \
+             pixman_image_t *         dst_image,                        \
+             int32_t                  src_x,                            \
+             int32_t                  src_y,                            \
+             int32_t                  mask_x,                           \
+             int32_t                  mask_y,                           \
+             int32_t                  dest_x,                           \
+             int32_t                  dest_y,                           \
+             int32_t                  width,                            \
+             int32_t                  height)                           \
+{                                                                       \
+    dst_type *dst_line;                                                 \
+    src_type *src_line;                                                 \
+    int32_t dst_stride, src_stride;                                     \
+                                                                        \
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, src_type,           \
+                           src_stride, src_line, src_cnt);              \
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, dst_type,         \
+                           dst_stride, dst_line, dst_cnt);              \
+                                                                        \
+    pixman_##name##_asm_neon (width, height,                            \
+                              dst_line, dst_stride,                     \
+                              src_line, src_stride);                    \
+}
+
+#define BIND_N_MASK_DST(name, mask_type, mask_cnt, dst_type, dst_cnt)   \
+void                                                                    \
+pixman_##name##_asm_neon (int32_t    w,                                 \
+                          int32_t    h,                                 \
+                          dst_type  *dst,                               \
+                          int32_t    dst_stride,                        \
+                          uint32_t   src,                               \
+                          int32_t    unused,                            \
+                          mask_type *mask,                              \
+                          int32_t    mask_stride);                      \
+                                                                        \
+static void                                                             \
+neon_##name (pixman_implementation_t *imp,                              \
+             pixman_op_t              op,                               \
+             pixman_image_t *         src_image,                        \
+             pixman_image_t *         mask_image,                       \
+             pixman_image_t *         dst_image,                        \
+             int32_t                  src_x,                            \
+             int32_t                  src_y,                            \
+             int32_t                  mask_x,                           \
+             int32_t                  mask_y,                           \
+             int32_t                  dest_x,                           \
+             int32_t                  dest_y,                           \
+             int32_t                  width,                            \
+             int32_t                  height)                           \
+{                                                                       \
+    dst_type  *dst_line;                                                \
+    mask_type *mask_line;                                               \
+    int32_t    dst_stride, mask_stride;                                 \
+    uint32_t   src;                                                     \
+                                                                        \
+    src = _pixman_image_get_solid (src_image, dst_image->bits.format);  \
+                                                                        \
+    if (src == 0)                                                       \
+	return;                                                         \
+                                                                        \
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, dst_type,         \
+                           dst_stride, dst_line, dst_cnt);              \
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, mask_type,       \
+                           mask_stride, mask_line, mask_cnt);           \
+                                                                        \
+    pixman_##name##_asm_neon (width, height,                            \
+                              dst_line, dst_stride,                     \
+                              src, 0,                                   \
+                              mask_line, mask_stride);                  \
+}
+
+#define BIND_SRC_N_DST(name, src_type, src_cnt, dst_type, dst_cnt)      \
+void                                                                    \
+pixman_##name##_asm_neon (int32_t    w,                                 \
+                          int32_t    h,                                 \
+                          dst_type  *dst,                               \
+                          int32_t    dst_stride,                        \
+                          src_type  *src,                               \
+                          int32_t    src_stride,                        \
+                          uint32_t   mask);                             \
+                                                                        \
+static void                                                             \
+neon_##name (pixman_implementation_t *imp,                              \
+             pixman_op_t              op,                               \
+             pixman_image_t *         src_image,                        \
+             pixman_image_t *         mask_image,                       \
+             pixman_image_t *         dst_image,                        \
+             int32_t                  src_x,                            \
+             int32_t                  src_y,                            \
+             int32_t                  mask_x,                           \
+             int32_t                  mask_y,                           \
+             int32_t                  dest_x,                           \
+             int32_t                  dest_y,                           \
+             int32_t                  width,                            \
+             int32_t                  height)                           \
+{                                                                       \
+    dst_type  *dst_line;                                                \
+    src_type  *src_line;                                                \
+    int32_t    dst_stride, src_stride;                                  \
+    uint32_t   mask;                                                    \
+                                                                        \
+    mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);\
+                                                                        \
+    if (mask == 0)                                                      \
+	return;                                                         \
+                                                                        \
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, dst_type,         \
+                           dst_stride, dst_line, dst_cnt);              \
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, src_type,           \
+                           src_stride, src_line, src_cnt);              \
+                                                                        \
+    pixman_##name##_asm_neon (width, height,                            \
+                              dst_line, dst_stride,                     \
+                              src_line, src_stride,                     \
+                              mask);                                    \
+}
+
+#define BIND_SRC_MASK_DST(name, src_type, src_cnt, mask_type, mask_cnt, \
+                          dst_type, dst_cnt)                            \
+void                                                                    \
+pixman_##name##_asm_neon (int32_t    w,                                 \
+                          int32_t    h,                                 \
+                          dst_type  *dst,                               \
+                          int32_t    dst_stride,                        \
+                          src_type  *src,                               \
+                          int32_t    src_stride,                        \
+                          mask_type *mask,                              \
+                          int32_t    mask_stride);                      \
+                                                                        \
+static void                                                             \
+neon_##name (pixman_implementation_t *imp,                              \
+             pixman_op_t              op,                               \
+             pixman_image_t *         src_image,                        \
+             pixman_image_t *         mask_image,                       \
+             pixman_image_t *         dst_image,                        \
+             int32_t                  src_x,                            \
+             int32_t                  src_y,                            \
+             int32_t                  mask_x,                           \
+             int32_t                  mask_y,                           \
+             int32_t                  dest_x,                           \
+             int32_t                  dest_y,                           \
+             int32_t                  width,                            \
+             int32_t                  height)                           \
+{                                                                       \
+    dst_type  *dst_line;                                                \
+    src_type  *src_line;                                                \
+    mask_type *mask_line;                                               \
+    int32_t    dst_stride, src_stride, mask_stride;                     \
+                                                                        \
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, dst_type,         \
+                           dst_stride, dst_line, dst_cnt);              \
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, src_type,           \
+                           src_stride, src_line, src_cnt);              \
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, mask_type,       \
+                           mask_stride, mask_line, mask_cnt);           \
+                                                                        \
+    pixman_##name##_asm_neon (width, height,                            \
+                              dst_line, dst_stride,                     \
+                              src_line, src_stride,                     \
+                              mask_line, mask_stride);                  \
+}
+
+
+BIND_SRC_NULL_DST(composite_src_8888_8888, uint32_t, 1, uint32_t, 1)
+BIND_SRC_NULL_DST(composite_src_0565_0565, uint16_t, 1, uint16_t, 1)
+BIND_SRC_NULL_DST(composite_src_0888_0888, uint8_t, 3, uint8_t, 3)
+BIND_SRC_NULL_DST(composite_src_8888_0565, uint32_t, 1, uint16_t, 1)
+BIND_SRC_NULL_DST(composite_add_8000_8000, uint8_t, 1, uint8_t, 1)
+
+BIND_SRC_NULL_DST(composite_over_8888_0565, uint32_t, 1, uint16_t, 1)
+BIND_SRC_NULL_DST(composite_over_8888_8888, uint32_t, 1, uint32_t, 1)
+
+BIND_N_MASK_DST(composite_over_n_8_0565, uint8_t, 1, uint16_t, 1)
+BIND_N_MASK_DST(composite_over_n_8_8888, uint8_t, 1, uint32_t, 1)
+BIND_N_MASK_DST(composite_add_n_8_8, uint8_t, 1, uint8_t, 1)
+
+BIND_SRC_N_DST(composite_over_8888_n_8888, uint32_t, 1, uint32_t, 1)
+
+BIND_SRC_MASK_DST(composite_add_8_8_8, uint8_t, 1, uint8_t, 1, uint8_t, 1)
+
 void
 pixman_composite_src_n_8_asm_neon (int32_t   w,
                                    int32_t   h,
commit dcfade3df96559ce942df5d16b7915c94f7d9e57
Author: Siarhei Siamashka <siarhei.siamashka at nokia.com>
Date:   Wed Nov 4 15:29:27 2009 +0200

    ARM: enabled new implementation for pixman_fill_neon

diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
index 9052061..494f06c 100644
--- a/pixman/pixman-arm-neon.c
+++ b/pixman/pixman-arm-neon.c
@@ -33,6 +33,27 @@
 #include <string.h>
 #include "pixman-private.h"
 
+void
+pixman_composite_src_n_8_asm_neon (int32_t   w,
+                                   int32_t   h,
+                                   uint8_t  *dst,
+                                   int32_t   dst_stride,
+                                   uint8_t   src);
+
+void
+pixman_composite_src_n_0565_asm_neon (int32_t   w,
+                                      int32_t   h,
+                                      uint16_t *dst,
+                                      int32_t   dst_stride,
+                                      uint16_t  src);
+
+void
+pixman_composite_src_n_8888_asm_neon (int32_t   w,
+                                      int32_t   h,
+                                      uint32_t *dst,
+                                      int32_t   dst_stride,
+                                      uint32_t  src);
+
 static pixman_bool_t
 pixman_fill_neon (uint32_t *bits,
                   int       stride,
@@ -43,7 +64,38 @@ pixman_fill_neon (uint32_t *bits,
                   int       height,
                   uint32_t  _xor)
 {
-    return FALSE;
+    /* stride is always multiple of 32bit units in pixman */
+    uint32_t byte_stride = stride * sizeof(uint32_t);
+
+    switch (bpp)
+    {
+    case 8:
+	pixman_composite_src_n_8_asm_neon (
+		width,
+		height,
+		(uint8_t *)(((char *) bits) + y * byte_stride + x),
+		byte_stride,
+		_xor & 0xff);
+	return TRUE;
+    case 16:
+	pixman_composite_src_n_0565_asm_neon (
+		width,
+		height,
+		(uint16_t *)(((char *) bits) + y * byte_stride + x * 2),
+		byte_stride / 2,
+		_xor & 0xffff);
+	return TRUE;
+    case 32:
+	pixman_composite_src_n_8888_asm_neon (
+		width,
+		height,
+		(uint32_t *)(((char *) bits) + y * byte_stride + x * 4),
+		byte_stride / 4,
+		_xor);
+	return TRUE;
+    default:
+	return FALSE;
+    }
 }
 
 static const pixman_fast_path_t arm_neon_fast_path_array[] =
commit bcb4bc79321659635d706bade25851cddf563856
Author: Siarhei Siamashka <siarhei.siamashka at nokia.com>
Date:   Wed Nov 4 15:18:38 2009 +0200

    ARM: introduction of the new framework for NEON fast path optimizations
    
    GNU assembler and its macro preprocessor is now used to generate
    NEON optimized functions from a common template. This automatically
    takes care of nuisances like ensuring optimal alignment, dealing with
    leading/trailing pixels, doing prefetch, etc.
    
    Implementations for a lot of compositing functions are also added,
    but not enabled.

diff --git a/configure.ac b/configure.ac
index 172656a..8ee91d3 100644
--- a/configure.ac
+++ b/configure.ac
@@ -71,6 +71,7 @@ AC_CANONICAL_HOST
 test_CFLAGS=${CFLAGS+set} # We may override autoconf default CFLAGS.
 
 AC_PROG_CC
+AM_PROG_AS
 AC_PROG_LIBTOOL
 AC_CHECK_FUNCS([getisax])
 AC_C_BIGENDIAN
@@ -400,19 +401,20 @@ AC_SUBST(ARM_SIMD_CFLAGS)
 AM_CONDITIONAL(USE_ARM_SIMD, test $have_arm_simd = yes)
 
 dnl ==========================================================================
-dnl Check for ARM NEON instructions
-ARM_NEON_CFLAGS="-mfpu=neon -mcpu=cortex-a8"
-
+dnl Check if assembler is gas compatible and supports NEON instructions
 have_arm_neon=no
-AC_MSG_CHECKING(whether to use ARM NEON)
+AC_MSG_CHECKING(whether to use ARM NEON assembler)
 xserver_save_CFLAGS=$CFLAGS
-CFLAGS="$ARM_NEON_CFLAGS $CFLAGS"
-AC_COMPILE_IFELSE([
-#include <arm_neon.h>
-int main () {
-    uint8x8_t neon_test=vmov_n_u8(0);
-    return 0;
-}], have_arm_neon=yes)
+CFLAGS="-x assembler-with-cpp"
+AC_COMPILE_IFELSE([[
+.text
+.fpu neon
+.altmacro
+#ifndef __ARM_EABI__
+#error EABI is required (to be sure that calling conventions are compatible)
+#endif
+pld [r0]
+vmovn.u16 d0, q0]], have_arm_neon=yes)
 CFLAGS=$xserver_save_CFLAGS
 
 AC_ARG_ENABLE(arm-neon,
@@ -425,13 +427,9 @@ if test $enable_arm_neon = no ; then
 fi
 
 if test $have_arm_neon = yes ; then
-   AC_DEFINE(USE_ARM_NEON, 1, [use ARM NEON compiler intrinsics])
-else
-   ARM_NEON_CFLAGS=
+   AC_DEFINE(USE_ARM_NEON, 1, [use ARM NEON assembly optimizations])
 fi
 
-AC_SUBST(ARM_NEON_CFLAGS)
-
 AM_CONDITIONAL(USE_ARM_NEON, test $have_arm_neon = yes)
 
 AC_MSG_RESULT($have_arm_neon)
diff --git a/pixman/Makefile.am b/pixman/Makefile.am
index 6020623..cd01023 100644
--- a/pixman/Makefile.am
+++ b/pixman/Makefile.am
@@ -109,12 +109,14 @@ endif
 if USE_ARM_NEON
 noinst_LTLIBRARIES += libpixman-arm-neon.la
 libpixman_arm_neon_la_SOURCES = \
-        pixman-arm-neon.c
-libpixman_arm_neon_la_CFLAGS = $(DEP_CFLAGS) $(ARM_NEON_CFLAGS)
+        pixman-arm-neon.c	\
+        pixman-arm-neon-asm.S	\
+        pixman-arm-neon-asm.h
+libpixman_arm_neon_la_CFLAGS = $(DEP_CFLAGS)
 libpixman_arm_neon_la_LIBADD = $(DEP_LIBS)
 libpixman_1_la_LIBADD += libpixman-arm-neon.la
 
-$(libpixman_arm_neon_la_SOURCES:.c=.s) : ASM_CFLAGS=$(ARM_NEON_CFLAGS)
+$(libpixman_arm_neon_la_SOURCES:.c=.s) : ASM_CFLAGS=
 endif
 
 .c.s : $(libpixmaninclude_HEADERS) $(BUILT_SOURCES)
diff --git a/pixman/pixman-arm-neon-asm.S b/pixman/pixman-arm-neon-asm.S
new file mode 100644
index 0000000..e8ccf77
--- /dev/null
+++ b/pixman/pixman-arm-neon-asm.S
@@ -0,0 +1,1051 @@
+/*
+ * Copyright © 2009 Nokia Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Nokia Corporation not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Nokia Corporation makes no
+ * representations about the suitability of this software for any purpose.
+ * It is provided "as is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
+ * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
+ * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
+ * SOFTWARE.
+ *
+ * Author:  Siarhei Siamashka (siarhei.siamashka at nokia.com)
+ */
+
+/*
+ * This file contains implementations of NEON optimized pixel processing
+ * functions. There is no full and detailed tutorial, but some functions
+ * (those which are exposing some new or interesting features) are
+ * extensively commented and can be used as examples.
+ *
+ * You may want to have a look at the comments for following functions:
+ *  - pixman_composite_over_8888_0565_asm_neon
+ *  - pixman_composite_over_n_8_0565_asm_neon
+ */
+
+/* Prevent the stack from becoming executable for no reason... */
+#if defined(__linux__) && defined(__ELF__)
+.section .note.GNU-stack,"",%progbits
+#endif
+
+    .text
+    .fpu neon
+    .altmacro
+
+#include "pixman-arm-neon-asm.h"
+
+/* Global configuration options and preferences */
+
+/*
+ * The code can optionally make use of unaligned memory accesses to improve
+ * performance of handling leading/trailing pixels for each scanline.
+ * Configuration variable RESPECT_STRICT_ALIGNMENT can be set to 0 for
+ * example in linux if unaligned memory accesses are not configured to
+ * generate.exceptions.
+ */
+.set RESPECT_STRICT_ALIGNMENT, 1
+
+/*
+ * Set default prefetch type. There is a choice between the following options:
+ *
+ * PREFETCH_TYPE_NONE (may be useful for the ARM cores where PLD is set to work
+ * as NOP to workaround some HW bugs or for whatever other reason)
+ *
+ * PREFETCH_TYPE_SIMPLE (may be useful for simple single-issue ARM cores where
+ * advanced prefetch intruduces heavy overhead)
+ *
+ * PREFETCH_TYPE_ADVANCED (useful for superscalar cores such as ARM Cortex-A8
+ * which can run ARM and NEON instructions simultaneously so that extra ARM
+ * instructions do not add (many) extra cycles, but improve prefetch efficiency)
+ *
+ * Note: some types of function can't support advanced prefetch and fallback
+ *       to simple one (those which handle 24bpp pixels)
+ */
+.set PREFETCH_TYPE_DEFAULT, PREFETCH_TYPE_ADVANCED
+
+/* Prefetch distance in pixels for simple prefetch */
+.set PREFETCH_DISTANCE_SIMPLE, 64
+
+/*
+ * Implementation of pixman_composite_over_8888_0565_asm_neon
+ *
+ * This function takes a8r8g8b8 source buffer, r5g6b5 destination buffer and
+ * performs OVER compositing operation. Function fast_composite_over_8888_0565
+ * from pixman-fast-path.c does the same in C and can be used as a reference.
+ *
+ * First we need to have some NEON assembly code which can do the actual
+ * operation on the pixels and provide it to the template macro.
+ *
+ * Template macro quite conveniently takes care of emitting all the necessary
+ * code for memory reading and writing (including quite tricky cases of
+ * handling unaligned leading/trailing pixels), so we only need to deal with
+ * the data in NEON registers.
+ *
+ * NEON registers allocation in general is recommented to be the following:
+ * d0,  d1,  d2,  d3  - contain loaded source pixel data
+ * d4,  d5,  d6,  d7  - contain loaded destination pixels (if they are needed)
+ * d24, d25, d26, d27 - contain loading mask pixel data (if mask is used)
+ * d28, d29, d30, d31 - place for storing the result (destination pixels)
+ *
+ * As can be seen above, four 64-bit NEON registers are used for keeping
+ * intermediate pixel data and up to 8 pixels can be processed in one step
+ * for 32bpp formats (16 pixels for 16bpp, 32 pixels for 8bpp).
+ *
+ * This particular function uses the following registers allocation:
+ * d0,  d1,  d2,  d3  - contain loaded source pixel data
+ * d4,  d5            - contain loaded destination pixels (they are needed)
+ * d28, d29           - place for storing the result (destination pixels)
+ */
+
+/*
+ * Step one. We need to have some code to do some arithmetics on pixel data.
+ * This is implemented as a pair of macros: '*_head' and '*_tail'. When used
+ * back-to-back, they take pixel data from {d0, d1, d2, d3} and {d4, d5},
+ * perform all the needed calculations and write the result to {d28, d29}.
+ * The rationale for having two macros and not just one will be explained
+ * later. In practice, any single monolitic function which does the work can
+ * be split into two parts in any arbitrary way without affecting correctness.
+ *
+ * There is one special trick here too. Common template macro can optionally
+ * make our life a bit easier by doing R, G, B, A color components
+ * deinterleaving for 32bpp pixel formats (and this feature is used in
+ * 'pixman_composite_over_8888_0565_asm_neon' function). So it means that
+ * instead of having 8 packed pixels in {d0, d1, d2, d3} registers, we
+ * actually use d0 register for blue channel (a vector of eight 8-bit
+ * values), d1 register for green, d2 for red and d3 for alpha. This
+ * simple conversion can be also done with a few NEON instructions:
+ *
+ * Packed to planar conversion:
+ *  vuzp.8 d0, d1
+ *  vuzp.8 d2, d3
+ *  vuzp.8 d1, d3
+ *  vuzp.8 d0, d2
+ *
+ * Planar to packed conversion:
+ *  vzip.8 d0, d2
+ *  vzip.8 d1, d3
+ *  vzip.8 d2, d3
+ *  vzip.8 d0, d1
+ *
+ * But pixel can be loaded directly in planar format using VLD4.8 NEON
+ * instruction. It is 1 cycle slower than VLD1.32, so this is not always
+ * desirable, that's why deinterleaving is optional.
+ *
+ * But anyway, here is the code:
+ */
+.macro pixman_composite_over_8888_0565_process_pixblock_head
+    /* convert 8 r5g6b5 pixel data from {d4, d5} to planar 8-bit format
+       and put data into d6 - red, d7 - green, d30 - blue */
+    vshrn.u16   d6, q2, #8
+    vshrn.u16   d7, q2, #3
+    vsli.u16    q2, q2, #5
+    vsri.u8     d6, d6, #5
+    vmvn.8      d3, d3      /* invert source alpha */
+    vsri.u8     d7, d7, #6
+    vshrn.u16   d30, q2, #2
+    /* now do alpha blending, storing results in 8-bit planar format
+       into d16 - red, d19 - green, d18 - blue */
+    vmull.u8    q10, d3, d6
+    vmull.u8    q11, d3, d7
+    vmull.u8    q12, d3, d30
+    vrshr.u16   q13, q10, #8
+    vrshr.u16   q3, q11, #8
+    vrshr.u16   q15, q12, #8
+    vraddhn.u16 d20, q10, q13
+    vraddhn.u16 d23, q11, q3
+    vraddhn.u16 d22, q12, q15
+.endm
+
+.macro pixman_composite_over_8888_0565_process_pixblock_tail
+    /* ... continue alpha blending */
+    vqadd.u8    d16, d2, d20
+    vqadd.u8    q9, q0, q11
+    /* convert the result to r5g6b5 and store it into {d28, d29} */
+    vshll.u8    q14, d16, #8
+    vshll.u8    q8, d19, #8
+    vshll.u8    q9, d18, #8
+    vsri.u16    q14, q8, #5
+    vsri.u16    q14, q9, #11
+.endm
+
+/*
+ * OK, now we got almost everything that we need. Using the above two
+ * macros, the work can be done right. But now we want to optimize
+ * it a bit. ARM Cortex-A8 is an in-order core, and benefits really
+ * a lot from good code scheduling and software pipelining.
+ *
+ * Let's construct some code, which will run in the core main loop.
+ * Some pseudo-code of the main loop will look like this:
+ *   head
+ *   while (...) {
+ *     tail
+ *     head
+ *   }
+ *   tail
+ *
+ * It may look a bit weird, but this setup allows to hide instruction
+ * latencies better and also utilize dual-issue capability more
+ * efficiently (make pairs of load-store and ALU instructions).
+ *
+ * So what we need now is a '*_tail_head' macro, which will be used
+ * in the core main loop. A trivial straightforward implementation
+ * of this macro would look like this:
+ *
+ *   pixman_composite_over_8888_0565_process_pixblock_tail
+ *   vst1.16     {d28, d29}, [DST_W, :128]!
+ *   vld1.16     {d4, d5}, [DST_R, :128]!
+ *   vld4.32     {d0, d1, d2, d3}, [SRC]!
+ *   pixman_composite_over_8888_0565_process_pixblock_head
+ *   cache_preload 8, 8
+ *
+ * Now it also got some VLD/VST instructions. We simply can't move from
+ * processing one block of pixels to the other one with just arithmetics.
+ * The previously processed data needs to be written to memory and new
+ * data needs to be fetched. Fortunately, this main loop does not deal
+ * with partial leading/trailing pixels and can load/store a full block
+ * of pixels in a bulk. Additionally, destination buffer is already
+ * 16 bytes aligned here (which is good for performance).
+ *
+ * New things here are DST_R, DST_W, SRC and MASK identifiers. These
+ * are the aliases for ARM registers which are used as pointers for
+ * accessing data. We maintain separate pointers for reading and writing
+ * destination buffer (DST_R and DST_W).
+ *
+ * Another new thing is 'cache_preload' macro. It is used for prefetching
+ * data into CPU L2 cache and improve performance when dealing with large
+ * images which are far larger than cache size. It uses one argument
+ * (actually two, but they need to be the same here) - number of pixels
+ * in a block. Looking into 'pixman-arm-neon-asm.h' can provide some
+ * details about this macro. Moreover, if good performance is needed
+ * the code from this macro needs to be copied into '*_tail_head' macro
+ * and mixed with the rest of code for optimal instructions scheduling.
+ * We are actually doing it below.
+ *
+ * Now after all the explanations, here is the optimized code.
+ * Different instruction streams (originaling from '*_head', '*_tail'
+ * and 'cache_preload' macro) use different indentation levels for
+ * better readability. Actually taking the code from one of these
+ * indentation levels and ignoring a few VLD/VST instructions would
+ * result in exactly the code from '*_head', '*_tail' or 'cache_preload'
+ * macro!
+ */
+
+#if 1
+
+.macro pixman_composite_over_8888_0565_process_pixblock_tail_head
+        vqadd.u8    d16, d2, d20
+    vld1.16     {d4, d5}, [DST_R, :128]!
+        vqadd.u8    q9, q0, q11
+    vshrn.u16   d6, q2, #8
+    vld4.8      {d0, d1, d2, d3}, [SRC]!
+    vshrn.u16   d7, q2, #3
+    vsli.u16    q2, q2, #5
+        vshll.u8    q14, d16, #8
+                                    PF add PF_X, PF_X, #8
+        vshll.u8    q8, d19, #8
+                                    PF tst PF_CTL, #0xF
+    vsri.u8     d6, d6, #5
+                                    PF addne PF_X, PF_X, #8
+    vmvn.8      d3, d3
+                                    PF subne PF_CTL, PF_CTL, #1
+    vsri.u8     d7, d7, #6
+    vshrn.u16   d30, q2, #2
+    vmull.u8    q10, d3, d6
+                                    PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
+    vmull.u8    q11, d3, d7
+    vmull.u8    q12, d3, d30
+                                    PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
+        vsri.u16    q14, q8, #5
+                                    PF cmp PF_X, ORIG_W
+        vshll.u8    q9, d18, #8
+    vrshr.u16   q13, q10, #8
+                                    PF subge PF_X, PF_X, ORIG_W
+    vrshr.u16   q3, q11, #8
+    vrshr.u16   q15, q12, #8
+                                    PF subges PF_CTL, PF_CTL, #0x10
+        vsri.u16    q14, q9, #11
+                                    PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
+    vraddhn.u16 d20, q10, q13
+    vraddhn.u16 d23, q11, q3
+                                    PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
+    vraddhn.u16 d22, q12, q15
+        vst1.16     {d28, d29}, [DST_W, :128]!
+.endm
+
+#else
+
+/* If we did not care much about the performance, we would just use this... */
+.macro pixman_composite_over_8888_0565_process_pixblock_tail_head
+    pixman_composite_over_8888_0565_process_pixblock_tail
+    vst1.16     {d28, d29}, [DST_W, :128]!
+    vld1.16     {d4, d5}, [DST_R, :128]!
+    vld4.32     {d0, d1, d2, d3}, [SRC]!
+    pixman_composite_over_8888_0565_process_pixblock_head
+    cache_preload 8, 8
+.endm
+
+#endif
+
+/*
+ * And now the final part. We are using 'generate_composite_function' macro
+ * to put all the stuff together. We are specifying the name of the function
+ * which we want to get, number of bits per pixel for the source, mask and
+ * destination (0 if unused, like mask in this case). Next come some bit
+ * flags:
+ *   FLAG_DST_READWRITE      - tells that the destination buffer is both read
+ *                             and written, for write-only buffer we would use
+ *                             FLAG_DST_WRITEONLY flag instead
+ *   FLAG_DEINTERLEAVE_32BPP - tells that we prefer to work with planar data
+ *                             and separate color channels for 32bpp format.
+ * The next things are:
+ *  - the number of pixels processed per iteration (8 in this case, because
+ *    that's the maximum what can fit into four 64-bit NEON registers).
+ *  - prefetch distance, measured in pixel blocks. In this case it is 5 times
+ *    by 8 pixels. That would be 40 pixels, or up to 160 bytes. Optimal
+ *    prefetch distance can be selected by running some benchmarks.
+ *
+ * After that we specify some macros, these are 'default_init',
+ * 'default_cleanup' here which are empty (but it is possible to have custom
+ * init/cleanup macros to be able to save/restore some extra NEON registers
+ * like d8-d15 or do anything else) followed by
+ * 'pixman_composite_over_8888_0565_process_pixblock_head',
+ * 'pixman_composite_over_8888_0565_process_pixblock_tail' and
+ * 'pixman_composite_over_8888_0565_process_pixblock_tail_head'
+ * which we got implemented above.
+ *
+ * The last part is the NEON registers allocation scheme.
+ */
+generate_composite_function \
+    pixman_composite_over_8888_0565_asm_neon, 32, 0, 16, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_over_8888_0565_process_pixblock_head, \
+    pixman_composite_over_8888_0565_process_pixblock_tail, \
+    pixman_composite_over_8888_0565_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    4,  /* dst_r_basereg */ \
+    0,  /* src_basereg   */ \
+    24  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_8888_0565_process_pixblock_head
+    vshll.u8    q8, d1, #8
+    vshll.u8    q14, d2, #8
+    vshll.u8    q9, d0, #8
+.endm
+
+.macro pixman_composite_src_8888_0565_process_pixblock_tail
+    vsri.u16    q14, q8, #5
+    vsri.u16    q14, q9, #11
+.endm
+
+.macro pixman_composite_src_8888_0565_process_pixblock_tail_head
+        vsri.u16    q14, q8, #5
+                                    PF add PF_X, PF_X, #8
+                                    PF tst PF_CTL, #0xF
+    vld4.8      {d0, d1, d2, d3}, [SRC]!
+                                    PF addne PF_X, PF_X, #8
+                                    PF subne PF_CTL, PF_CTL, #1
+        vsri.u16    q14, q9, #11
+                                    PF cmp PF_X, ORIG_W
+                                    PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
+    vshll.u8    q8, d1, #8
+        vst1.16     {d28, d29}, [DST_W, :128]!
+                                    PF subge PF_X, PF_X, ORIG_W
+                                    PF subges PF_CTL, PF_CTL, #0x10
+    vshll.u8    q14, d2, #8
+                                    PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
+    vshll.u8    q9, d0, #8
+.endm
+
+generate_composite_function \
+    pixman_composite_src_8888_0565_asm_neon, 32, 0, 16, \
+    FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_src_8888_0565_process_pixblock_head, \
+    pixman_composite_src_8888_0565_process_pixblock_tail, \
+    pixman_composite_src_8888_0565_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_add_8000_8000_process_pixblock_head
+    vqadd.u8    q14, q0, q2
+    vqadd.u8    q15, q1, q3
+.endm
+
+.macro pixman_composite_add_8000_8000_process_pixblock_tail
+.endm
+
+.macro pixman_composite_add_8000_8000_process_pixblock_tail_head
+    vld1.8      {d0, d1, d2, d3}, [SRC]!
+                                    PF add PF_X, PF_X, #32
+                                    PF tst PF_CTL, #0xF
+    vld1.8      {d4, d5, d6, d7}, [DST_R, :128]!
+                                    PF addne PF_X, PF_X, #32
+                                    PF subne PF_CTL, PF_CTL, #1
+        vst1.8      {d28, d29, d30, d31}, [DST_W, :128]!
+                                    PF cmp PF_X, ORIG_W
+                                    PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
+                                    PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
+                                    PF subge PF_X, PF_X, ORIG_W
+                                    PF subges PF_CTL, PF_CTL, #0x10
+    vqadd.u8    q14, q0, q2
+                                    PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
+                                    PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
+    vqadd.u8    q15, q1, q3
+.endm
+
+generate_composite_function \
+    pixman_composite_add_8000_8000_asm_neon, 8, 0, 8, \
+    FLAG_DST_READWRITE, \
+    32, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_add_8000_8000_process_pixblock_head, \
+    pixman_composite_add_8000_8000_process_pixblock_tail, \
+    pixman_composite_add_8000_8000_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_over_8888_8888_process_pixblock_head
+    vmvn.8      d24, d3  /* get inverted alpha */
+    /* do alpha blending */
+    vmull.u8    q8, d24, d4
+    vmull.u8    q9, d24, d5
+    vmull.u8    q10, d24, d6
+    vmull.u8    q11, d24, d7
+.endm
+
+.macro pixman_composite_over_8888_8888_process_pixblock_tail
+    vrshr.u16   q14, q8, #8
+    vrshr.u16   q15, q9, #8
+    vrshr.u16   q12, q10, #8
+    vrshr.u16   q13, q11, #8
+    vraddhn.u16 d28, q14, q8
+    vraddhn.u16 d29, q15, q9
+    vraddhn.u16 d30, q12, q10
+    vraddhn.u16 d31, q13, q11
+    vqadd.u8    q14, q0, q14
+    vqadd.u8    q15, q1, q15
+.endm
+
+.macro pixman_composite_over_8888_8888_process_pixblock_tail_head
+    vld4.8      {d4, d5, d6, d7}, [DST_R, :128]!
+        vrshr.u16   q14, q8, #8
+                                    PF add PF_X, PF_X, #8
+                                    PF tst PF_CTL, #0xF
+        vrshr.u16   q15, q9, #8
+        vrshr.u16   q12, q10, #8
+        vrshr.u16   q13, q11, #8
+                                    PF addne PF_X, PF_X, #8
+                                    PF subne PF_CTL, PF_CTL, #1
+        vraddhn.u16 d28, q14, q8
+        vraddhn.u16 d29, q15, q9
+                                    PF cmp PF_X, ORIG_W
+        vraddhn.u16 d30, q12, q10
+        vraddhn.u16 d31, q13, q11
+        vqadd.u8    q14, q0, q14
+        vqadd.u8    q15, q1, q15
+    vld4.8      {d0, d1, d2, d3}, [SRC]!
+                                    PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
+    vmvn.8      d22, d3
+                                    PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
+        vst4.8      {d28, d29, d30, d31}, [DST_W, :128]!
+                                    PF subge PF_X, PF_X, ORIG_W
+    vmull.u8    q8, d22, d4
+                                    PF subges PF_CTL, PF_CTL, #0x10
+    vmull.u8    q9, d22, d5
+                                    PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
+    vmull.u8    q10, d22, d6
+                                    PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
+    vmull.u8    q11, d22, d7
+.endm
+
+generate_composite_function \
+    pixman_composite_over_8888_8888_asm_neon, 32, 0, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_over_8888_8888_process_pixblock_head, \
+    pixman_composite_over_8888_8888_process_pixblock_tail, \
+    pixman_composite_over_8888_8888_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_over_n_8_0565_process_pixblock_head
+    /* in */
+    vmull.u8    q0, d24, d8
+    vmull.u8    q1, d24, d9
+    vmull.u8    q6, d24, d10
+    vmull.u8    q7, d24, d11
+    vrshr.u16   q10, q0, #8
+    vrshr.u16   q11, q1, #8
+    vrshr.u16   q12, q6, #8
+    vrshr.u16   q13, q7, #8
+    vraddhn.u16 d0, q0, q10
+    vraddhn.u16 d1, q1, q11
+    vraddhn.u16 d2, q6, q12
+    vraddhn.u16 d3, q7, q13
+
+    vshrn.u16   d6, q2, #8
+    vshrn.u16   d7, q2, #3
+    vsli.u16    q2, q2, #5
+    vsri.u8     d6, d6, #5
+    vmvn.8      d3, d3
+    vsri.u8     d7, d7, #6
+    vshrn.u16   d30, q2, #2
+    /* now do alpha blending */
+    vmull.u8    q10, d3, d6
+    vmull.u8    q11, d3, d7
+    vmull.u8    q12, d3, d30
+    vrshr.u16   q13, q10, #8
+    vrshr.u16   q3, q11, #8
+    vrshr.u16   q15, q12, #8
+    vraddhn.u16 d20, q10, q13
+    vraddhn.u16 d23, q11, q3
+    vraddhn.u16 d22, q12, q15
+.endm
+
+.macro pixman_composite_over_n_8_0565_process_pixblock_tail
+    vqadd.u8    d16, d2, d20
+    vqadd.u8    q9, q0, q11
+    /* convert to r5g6b5 */
+    vshll.u8    q14, d16, #8
+    vshll.u8    q8, d19, #8
+    vshll.u8    q9, d18, #8
+    vsri.u16    q14, q8, #5
+    vsri.u16    q14, q9, #11
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_over_n_8_0565_process_pixblock_tail_head
+    pixman_composite_over_n_8_0565_process_pixblock_tail
+    vst1.16     {d28, d29}, [DST_W, :128]!
+    vld1.16     {d4, d5}, [DST_R, :128]!
+    vld1.8      {d24}, [MASK]!
+    cache_preload 8, 8
+    pixman_composite_over_n_8_0565_process_pixblock_head
+.endm
+
+/*
+ * This function needs a special initialization of solid mask.
+ * Solid source pixel data is fetched from stack at ARGS_STACK_OFFSET
+ * offset, split into color components and replicated in d8-d11
+ * registers. Additionally, this function needs all the NEON registers,
+ * so it has to save d8-d15 registers which are callee saved according
+ * to ABI. These registers are restored from 'cleanup' macro. All the
+ * other NEON registers are caller saved, so can be clobbered freely
+ * without introducing any problems.
+ */
+.macro pixman_composite_over_n_8_0565_init
+    add         DUMMY, sp, #ARGS_STACK_OFFSET
+    vpush       {d8-d15}
+    vld1.32     {d11[0]}, [DUMMY]
+    vdup.8      d8, d11[0]
+    vdup.8      d9, d11[1]
+    vdup.8      d10, d11[2]
+    vdup.8      d11, d11[3]
+.endm
+
+.macro pixman_composite_over_n_8_0565_cleanup
+    vpop        {d8-d15}
+.endm
+
+generate_composite_function \
+    pixman_composite_over_n_8_0565_asm_neon, 0, 8, 16, \
+    FLAG_DST_READWRITE, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_over_n_8_0565_init, \
+    pixman_composite_over_n_8_0565_cleanup, \
+    pixman_composite_over_n_8_0565_process_pixblock_head, \
+    pixman_composite_over_n_8_0565_process_pixblock_tail, \
+    pixman_composite_over_n_8_0565_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_src_0565_0565_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_0565_0565_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_0565_0565_process_pixblock_tail_head
+    vst1.16 {d0, d1, d2, d3}, [DST_W, :128]!
+    vld1.16 {d0, d1, d2, d3}, [SRC]!
+    cache_preload 16, 16
+.endm
+
+generate_composite_function \
+    pixman_composite_src_0565_0565_asm_neon, 16, 0, 16, \
+    FLAG_DST_WRITEONLY, \
+    16, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_src_0565_0565_process_pixblock_head, \
+    pixman_composite_src_0565_0565_process_pixblock_tail, \
+    pixman_composite_src_0565_0565_process_pixblock_tail_head, \
+    0, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_n_8_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_n_8_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_n_8_process_pixblock_tail_head
+    vst1.8  {d0, d1, d2, d3}, [DST_W, :128]!
+.endm
+
+.macro pixman_composite_src_n_8_init
+    add         DUMMY, sp, #ARGS_STACK_OFFSET
+    vld1.32     {d0[0]}, [DUMMY]
+    vsli.u64    d0, d0, #8
+    vsli.u64    d0, d0, #16
+    vsli.u64    d0, d0, #32
+    vmov        d1, d0
+    vmov        q1, q0
+.endm
+
+.macro pixman_composite_src_n_8_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_src_n_8_asm_neon, 0, 0, 8, \
+    FLAG_DST_WRITEONLY, \
+    32, /* number of pixels, processed in a single block */ \
+    0,  /* prefetch distance */ \
+    pixman_composite_src_n_8_init, \
+    pixman_composite_src_n_8_cleanup, \
+    pixman_composite_src_n_8_process_pixblock_head, \
+    pixman_composite_src_n_8_process_pixblock_tail, \
+    pixman_composite_src_n_8_process_pixblock_tail_head, \
+    0, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_n_0565_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_n_0565_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_n_0565_process_pixblock_tail_head
+    vst1.16 {d0, d1, d2, d3}, [DST_W, :128]!
+.endm
+
+.macro pixman_composite_src_n_0565_init
+    add         DUMMY, sp, #ARGS_STACK_OFFSET
+    vld1.32     {d0[0]}, [DUMMY]
+    vsli.u64    d0, d0, #16
+    vsli.u64    d0, d0, #32
+    vmov        d1, d0
+    vmov        q1, q0
+.endm
+
+.macro pixman_composite_src_n_0565_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_src_n_0565_asm_neon, 0, 0, 16, \
+    FLAG_DST_WRITEONLY, \
+    16, /* number of pixels, processed in a single block */ \
+    0,  /* prefetch distance */ \
+    pixman_composite_src_n_0565_init, \
+    pixman_composite_src_n_0565_cleanup, \
+    pixman_composite_src_n_0565_process_pixblock_head, \
+    pixman_composite_src_n_0565_process_pixblock_tail, \
+    pixman_composite_src_n_0565_process_pixblock_tail_head, \
+    0, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_n_8888_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_n_8888_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_n_8888_process_pixblock_tail_head
+    vst1.32 {d0, d1, d2, d3}, [DST_W, :128]!
+.endm
+
+.macro pixman_composite_src_n_8888_init
+    add         DUMMY, sp, #ARGS_STACK_OFFSET
+    vld1.32     {d0[0]}, [DUMMY]
+    vsli.u64    d0, d0, #32
+    vmov        d1, d0
+    vmov        q1, q0
+.endm
+
+.macro pixman_composite_src_n_8888_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_src_n_8888_asm_neon, 0, 0, 32, \
+    FLAG_DST_WRITEONLY, \
+    8, /* number of pixels, processed in a single block */ \
+    0, /* prefetch distance */ \
+    pixman_composite_src_n_8888_init, \
+    pixman_composite_src_n_8888_cleanup, \
+    pixman_composite_src_n_8888_process_pixblock_head, \
+    pixman_composite_src_n_8888_process_pixblock_tail, \
+    pixman_composite_src_n_8888_process_pixblock_tail_head, \
+    0, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_8888_8888_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_8888_8888_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_8888_8888_process_pixblock_tail_head
+    vst1.32 {d0, d1, d2, d3}, [DST_W, :128]!
+    vld1.32 {d0, d1, d2, d3}, [SRC]!
+    cache_preload 8, 8
+.endm
+
+generate_composite_function \
+    pixman_composite_src_8888_8888_asm_neon, 32, 0, 32, \
+    FLAG_DST_WRITEONLY, \
+    8, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_src_8888_8888_process_pixblock_head, \
+    pixman_composite_src_8888_8888_process_pixblock_tail, \
+    pixman_composite_src_8888_8888_process_pixblock_tail_head, \
+    0, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_over_n_8_8888_process_pixblock_head
+    /* expecting deinterleaved source data in {d8, d9, d10, d11} */
+    /* d8 - blue, d9 - green, d10 - red, d11 - alpha */
+    /* and destination data in {d4, d5, d6, d7} */
+    /* mask is in d24 (d25, d26, d27 are unused) */
+
+    /* in */
+    vmull.u8    q0, d24, d8
+    vmull.u8    q1, d24, d9
+    vmull.u8    q6, d24, d10
+    vmull.u8    q7, d24, d11
+    vrshr.u16   q10, q0, #8
+    vrshr.u16   q11, q1, #8
+    vrshr.u16   q12, q6, #8
+    vrshr.u16   q13, q7, #8
+    vraddhn.u16 d0, q0, q10
+    vraddhn.u16 d1, q1, q11
+    vraddhn.u16 d2, q6, q12
+    vraddhn.u16 d3, q7, q13
+    vmvn.8      d24, d3  /* get inverted alpha */
+    /* source:      d0 - blue, d1 - green, d2 - red, d3 - alpha */
+    /* destination: d4 - blue, d5 - green, d6 - red, d7 - alpha */
+    /* now do alpha blending */
+    vmull.u8    q8, d24, d4
+    vmull.u8    q9, d24, d5
+    vmull.u8    q10, d24, d6
+    vmull.u8    q11, d24, d7
+.endm
+
+.macro pixman_composite_over_n_8_8888_process_pixblock_tail
+    vrshr.u16   q14, q8, #8
+    vrshr.u16   q15, q9, #8
+    vrshr.u16   q12, q10, #8
+    vrshr.u16   q13, q11, #8
+    vraddhn.u16 d28, q14, q8
+    vraddhn.u16 d29, q15, q9
+    vraddhn.u16 d30, q12, q10
+    vraddhn.u16 d31, q13, q11
+    vqadd.u8    q14, q0, q14
+    vqadd.u8    q15, q1, q15
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_over_n_8_8888_process_pixblock_tail_head
+    pixman_composite_over_n_8_8888_process_pixblock_tail
+    vst4.8      {d28, d29, d30, d31}, [DST_W, :128]!
+    vld4.8      {d4, d5, d6, d7}, [DST_R, :128]!
+    vld1.8      {d24}, [MASK]!
+    cache_preload 8, 8
+    pixman_composite_over_n_8_8888_process_pixblock_head
+.endm
+
+.macro pixman_composite_over_n_8_8888_init
+    add         DUMMY, sp, #ARGS_STACK_OFFSET
+    vpush       {d8-d15}
+    vld1.32     {d11[0]}, [DUMMY]
+    vdup.8      d8, d11[0]
+    vdup.8      d9, d11[1]
+    vdup.8      d10, d11[2]
+    vdup.8      d11, d11[3]
+.endm
+
+.macro pixman_composite_over_n_8_8888_cleanup
+    vpop        {d8-d15}
+.endm
+
+generate_composite_function \
+    pixman_composite_over_n_8_8888_asm_neon, 0, 8, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_over_n_8_8888_init, \
+    pixman_composite_over_n_8_8888_cleanup, \
+    pixman_composite_over_n_8_8888_process_pixblock_head, \
+    pixman_composite_over_n_8_8888_process_pixblock_tail, \
+    pixman_composite_over_n_8_8888_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_add_n_8_8_process_pixblock_head
+    /* expecting source data in {d8, d9, d10, d11} */
+    /* d8 - blue, d9 - green, d10 - red, d11 - alpha */
+    /* and destination data in {d4, d5, d6, d7} */
+    /* mask is in d24, d25, d26, d27 */
+    vmull.u8    q0, d24, d11
+    vmull.u8    q1, d25, d11
+    vmull.u8    q6, d26, d11
+    vmull.u8    q7, d27, d11
+    vrshr.u16   q10, q0, #8
+    vrshr.u16   q11, q1, #8
+    vrshr.u16   q12, q6, #8
+    vrshr.u16   q13, q7, #8
+    vraddhn.u16 d0, q0, q10
+    vraddhn.u16 d1, q1, q11
+    vraddhn.u16 d2, q6, q12
+    vraddhn.u16 d3, q7, q13
+    vqadd.u8    q14, q0, q2
+    vqadd.u8    q15, q1, q3
+.endm
+
+.macro pixman_composite_add_n_8_8_process_pixblock_tail
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_add_n_8_8_process_pixblock_tail_head
+    pixman_composite_add_n_8_8_process_pixblock_tail
+    vst1.8      {d28, d29, d30, d31}, [DST_W, :128]!
+    vld1.8      {d4, d5, d6, d7}, [DST_R, :128]!
+    vld1.8      {d24, d25, d26, d27}, [MASK]!
+    cache_preload 32, 32
+    pixman_composite_add_n_8_8_process_pixblock_head
+.endm
+
+.macro pixman_composite_add_n_8_8_init
+    add         DUMMY, sp, #ARGS_STACK_OFFSET
+    vpush       {d8-d15}
+    vld1.32     {d11[0]}, [DUMMY]
+    vdup.8      d11, d11[3]
+.endm
+
+.macro pixman_composite_add_n_8_8_cleanup
+    vpop        {d8-d15}
+.endm
+
+generate_composite_function \
+    pixman_composite_add_n_8_8_asm_neon, 0, 8, 8, \
+    FLAG_DST_READWRITE, \
+    32, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_add_n_8_8_init, \
+    pixman_composite_add_n_8_8_cleanup, \
+    pixman_composite_add_n_8_8_process_pixblock_head, \
+    pixman_composite_add_n_8_8_process_pixblock_tail, \
+    pixman_composite_add_n_8_8_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_add_8_8_8_process_pixblock_head
+    /* expecting source data in {d0, d1, d2, d3} */
+    /* destination data in {d4, d5, d6, d7} */
+    /* mask in {d24, d25, d26, d27} */
+    vmull.u8    q8, d24, d0
+    vmull.u8    q9, d25, d1
+    vmull.u8    q10, d26, d2
+    vmull.u8    q11, d27, d3
+    vrshr.u16   q0, q8, #8
+    vrshr.u16   q1, q9, #8
+    vrshr.u16   q12, q10, #8
+    vrshr.u16   q13, q11, #8
+    vraddhn.u16 d0, q0, q8
+    vraddhn.u16 d1, q1, q9
+    vraddhn.u16 d2, q12, q10
+    vraddhn.u16 d3, q13, q11
+    vqadd.u8    q14, q0, q2
+    vqadd.u8    q15, q1, q3
+.endm
+
+.macro pixman_composite_add_8_8_8_process_pixblock_tail
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_add_8_8_8_process_pixblock_tail_head
+    pixman_composite_add_8_8_8_process_pixblock_tail
+    vst1.8      {d28, d29, d30, d31}, [DST_W, :128]!
+    vld1.8      {d4, d5, d6, d7}, [DST_R, :128]!
+    vld1.8      {d24, d25, d26, d27}, [MASK]!
+    vld1.8      {d0, d1, d2, d3}, [SRC]!
+    cache_preload 32, 32
+    pixman_composite_add_8_8_8_process_pixblock_head
+.endm
+
+.macro pixman_composite_add_8_8_8_init
+.endm
+
+.macro pixman_composite_add_8_8_8_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_add_8_8_8_asm_neon, 8, 8, 8, \
+    FLAG_DST_READWRITE, \
+    32, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_add_8_8_8_init, \
+    pixman_composite_add_8_8_8_cleanup, \
+    pixman_composite_add_8_8_8_process_pixblock_head, \
+    pixman_composite_add_8_8_8_process_pixblock_tail, \
+    pixman_composite_add_8_8_8_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_over_8888_n_8888_process_pixblock_head
+    /* expecting source data in {d0, d1, d2, d3} */
+    /* destination data in {d4, d5, d6, d7} */
+    /* solid mask is in d15 */
+
+    /* 'in' */
+    vmull.u8    q8, d15, d3
+    vmull.u8    q6, d15, d2
+    vmull.u8    q5, d15, d1
+    vmull.u8    q4, d15, d0
+    vrshr.u16   q13, q8, #8
+    vrshr.u16   q12, q6, #8
+    vrshr.u16   q11, q5, #8
+    vrshr.u16   q10, q4, #8
+    vraddhn.u16 d3, q8, q13
+    vraddhn.u16 d2, q6, q12
+    vraddhn.u16 d1, q5, q11
+    vraddhn.u16 d0, q4, q10
+    vmvn.8      d24, d3  /* get inverted alpha */
+    /* now do alpha blending */
+    vmull.u8    q8, d24, d4
+    vmull.u8    q9, d24, d5
+    vmull.u8    q10, d24, d6
+    vmull.u8    q11, d24, d7
+.endm
+
+.macro pixman_composite_over_8888_n_8888_process_pixblock_tail
+    vrshr.u16   q14, q8, #8
+    vrshr.u16   q15, q9, #8
+    vrshr.u16   q12, q10, #8
+    vrshr.u16   q13, q11, #8
+    vraddhn.u16 d28, q14, q8
+    vraddhn.u16 d29, q15, q9
+    vraddhn.u16 d30, q12, q10
+    vraddhn.u16 d31, q13, q11
+    vqadd.u8    q14, q0, q14
+    vqadd.u8    q15, q1, q15
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_over_8888_n_8888_process_pixblock_tail_head
+    vld4.8     {d4, d5, d6, d7}, [DST_R, :128]!
+    pixman_composite_over_8888_n_8888_process_pixblock_tail
+    vld4.8     {d0, d1, d2, d3}, [SRC]!
+    cache_preload 8, 8
+    pixman_composite_over_8888_n_8888_process_pixblock_head
+    vst4.8     {d28, d29, d30, d31}, [DST_W, :128]!
+.endm
+
+.macro pixman_composite_over_8888_n_8888_init
+    add         DUMMY, sp, #48
+    vpush       {d8-d15}
+    vld1.32     {d15[0]}, [DUMMY]
+    vdup.8      d15, d15[3]
+.endm
+
+.macro pixman_composite_over_8888_n_8888_cleanup
+    vpop        {d8-d15}
+.endm
+
+generate_composite_function \
+    pixman_composite_over_8888_n_8888_asm_neon, 32, 0, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_over_8888_n_8888_init, \
+    pixman_composite_over_8888_n_8888_cleanup, \
+    pixman_composite_over_8888_n_8888_process_pixblock_head, \
+    pixman_composite_over_8888_n_8888_process_pixblock_tail, \
+    pixman_composite_over_8888_n_8888_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_src_0888_0888_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_0888_0888_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_0888_0888_process_pixblock_tail_head
+    vst3.8 {d0, d1, d2}, [DST_W]!
+    vld3.8 {d0, d1, d2}, [SRC]!
+    cache_preload 8, 8
+.endm
+
+generate_composite_function \
+    pixman_composite_src_0888_0888_asm_neon, 24, 0, 24, \
+    FLAG_DST_WRITEONLY, \
+    8, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_src_0888_0888_process_pixblock_head, \
+    pixman_composite_src_0888_0888_process_pixblock_tail, \
+    pixman_composite_src_0888_0888_process_pixblock_tail_head, \
+    0, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
diff --git a/pixman/pixman-arm-neon-asm.h b/pixman/pixman-arm-neon-asm.h
new file mode 100644
index 0000000..e7be5cd
--- /dev/null
+++ b/pixman/pixman-arm-neon-asm.h
@@ -0,0 +1,787 @@
+/*
+ * Copyright © 2009 Nokia Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Nokia Corporation not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Nokia Corporation makes no
+ * representations about the suitability of this software for any purpose.
+ * It is provided "as is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
+ * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
+ * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
+ * SOFTWARE.
+ *
+ * Author:  Siarhei Siamashka (siarhei.siamashka at nokia.com)
+ */
+
+/*
+ * This file contains a macro ('generate_composite_function') which can
+ * construct 2D image processing functions, based on a common template.
+ * Any combinations of source, destination and mask images with 8bpp,
+ * 16bpp, 24bpp, 32bpp color formats are supported.
+ *
+ * This macro takes care of:
+ *  - handling of leading and trailing unaligned pixels
+ *  - doing most of the work related to L2 cache preload
+ *  - encourages the use of software pipelining for better instructions
+ *    scheduling
+ *
+ * The user of this macro has to provide some configuration parameters
+ * (bit depths for the images, prefetch distance, etc.) and a set of
+ * macros, which should implement basic code chunks responsible for
+ * pixels processing. See 'pixman-arm-neon-asm.S' file for the usage
+ * examples.
+ *
+ * TODO:
+ *  - try overlapped pixel method (from Ian Rickards) when processing
+ *    exactly two blocks of pixels
+ *  - maybe add an option to do reverse scanline processing
+ */
+
+/*
+ * Bit flags for 'generate_composite_function' macro which are used
+ * to tune generated functions behavior.
+ */
+.set FLAG_DST_WRITEONLY,       0
+.set FLAG_DST_READWRITE,       1
+.set FLAG_DEINTERLEAVE_32BPP,  2
+
+/*
+ * Offset in stack where mask and source pointer/stride can be accessed
+ * from 'init' macro. This is useful for doing special handling for solid mask.
+ */
+.set ARGS_STACK_OFFSET,        40
+
+/*
+ * Constants for selecting preferable prefetch type.
+ */
+.set PREFETCH_TYPE_NONE,       0 /* No prefetch at all */
+.set PREFETCH_TYPE_SIMPLE,     1 /* A simple, fixed-distance-ahead prefetch */
+.set PREFETCH_TYPE_ADVANCED,   2 /* Advanced fine-grained prefetch */
+
+/*
+ * Definitions of supplementary pixld/pixst macros (for partial load/store of
+ * pixel data).
+ */
+
+.macro pixldst1 op, elem_size, reg1, mem_operand, abits
+.if abits > 0
+    op&.&elem_size {d&reg1}, [&mem_operand&, :&abits&]!
+.else
+    op&.&elem_size {d&reg1}, [&mem_operand&]!
+.endif
+.endm
+
+.macro pixldst2 op, elem_size, reg1, reg2, mem_operand, abits
+.if abits > 0
+    op&.&elem_size {d&reg1, d&reg2}, [&mem_operand&, :&abits&]!
+.else
+    op&.&elem_size {d&reg1, d&reg2}, [&mem_operand&]!
+.endif
+.endm
+
+.macro pixldst4 op, elem_size, reg1, reg2, reg3, reg4, mem_operand, abits
+.if abits > 0
+    op&.&elem_size {d&reg1, d&reg2, d&reg3, d&reg4}, [&mem_operand&, :&abits&]!
+.else
+    op&.&elem_size {d&reg1, d&reg2, d&reg3, d&reg4}, [&mem_operand&]!
+.endif
+.endm
+
+.macro pixldst0 op, elem_size, reg1, idx, mem_operand, abits
+    op&.&elem_size {d&reg1[idx]}, [&mem_operand&]!
+.endm
+
+.macro pixldst3 op, elem_size, reg1, reg2, reg3, mem_operand
+    op&.&elem_size {d&reg1, d&reg2, d&reg3}, [&mem_operand&]!
+.endm
+
+.macro pixldst30 op, elem_size, reg1, reg2, reg3, idx, mem_operand
+    op&.&elem_size {d&reg1[idx], d&reg2[idx], d&reg3[idx]}, [&mem_operand&]!
+.endm
+
+.macro pixldst numbytes, op, elem_size, basereg, mem_operand, abits
+.if numbytes == 32
+    pixldst4 op, elem_size, %(basereg+4), %(basereg+5), \
+                              %(basereg+6), %(basereg+7), mem_operand, abits
+.elseif numbytes == 16
+    pixldst2 op, elem_size, %(basereg+2), %(basereg+3), mem_operand, abits
+.elseif numbytes == 8
+    pixldst1 op, elem_size, %(basereg+1), mem_operand, abits
+.elseif numbytes == 4
+    .if !RESPECT_STRICT_ALIGNMENT || (elem_size == 32)
+        pixldst0 op, 32, %(basereg+0), 1, mem_operand, abits
+    .elseif elem_size == 16
+        pixldst0 op, 16, %(basereg+0), 2, mem_operand, abits
+        pixldst0 op, 16, %(basereg+0), 3, mem_operand, abits
+    .else
+        pixldst0 op, 8, %(basereg+0), 4, mem_operand, abits
+        pixldst0 op, 8, %(basereg+0), 5, mem_operand, abits
+        pixldst0 op, 8, %(basereg+0), 6, mem_operand, abits
+        pixldst0 op, 8, %(basereg+0), 7, mem_operand, abits
+    .endif
+.elseif numbytes == 2
+    .if !RESPECT_STRICT_ALIGNMENT || (elem_size == 16)
+        pixldst0 op, 16, %(basereg+0), 1, mem_operand, abits
+    .else
+        pixldst0 op, 8, %(basereg+0), 2, mem_operand, abits
+        pixldst0 op, 8, %(basereg+0), 3, mem_operand, abits
+    .endif
+.elseif numbytes == 1
+    pixldst0 op, 8, %(basereg+0), 1, mem_operand, abits
+.else
+    .error "unsupported size: numbytes"
+.endif
+.endm
+
+.macro pixld numpix, bpp, basereg, mem_operand, abits=0
+.if bpp > 0
+.if (bpp == 32) && (numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0)
+    pixldst4 vld4, 8, %(basereg+4), %(basereg+5), \
+                      %(basereg+6), %(basereg+7), mem_operand, abits
+.elseif (bpp == 24) && (numpix == 8)
+    pixldst3 vld3, 8, %(basereg+3), %(basereg+4), %(basereg+5), mem_operand
+.elseif (bpp == 24) && (numpix == 4)
+    pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 4, mem_operand
+    pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 5, mem_operand
+    pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 6, mem_operand
+    pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 7, mem_operand
+.elseif (bpp == 24) && (numpix == 2)
+    pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 2, mem_operand
+    pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 3, mem_operand
+.elseif (bpp == 24) && (numpix == 1)
+    pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 1, mem_operand
+.else
+    pixldst %(numpix * bpp / 8), vld1, %(bpp), basereg, mem_operand, abits
+.endif
+.endif
+.endm
+
+.macro pixst numpix, bpp, basereg, mem_operand, abits=0
+.if bpp > 0
+.if (bpp == 32) && (numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0)
+    pixldst4 vst4, 8, %(basereg+4), %(basereg+5), \
+                      %(basereg+6), %(basereg+7), mem_operand, abits
+.elseif (bpp == 24) && (numpix == 8)
+    pixldst3 vst3, 8, %(basereg+3), %(basereg+4), %(basereg+5), mem_operand
+.elseif (bpp == 24) && (numpix == 4)
+    pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 4, mem_operand
+    pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 5, mem_operand
+    pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 6, mem_operand
+    pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 7, mem_operand
+.elseif (bpp == 24) && (numpix == 2)
+    pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 2, mem_operand
+    pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 3, mem_operand
+.elseif (bpp == 24) && (numpix == 1)
+    pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 1, mem_operand
+.else
+    pixldst %(numpix * bpp / 8), vst1, %(bpp), basereg, mem_operand, abits
+.endif
+.endif
+.endm
+
+.macro pixld_a numpix, bpp, basereg, mem_operand
+.if (bpp * numpix) <= 128
+    pixld numpix, bpp, basereg, mem_operand, %(bpp * numpix)
+.else
+    pixld numpix, bpp, basereg, mem_operand, 128
+.endif
+.endm
+
+.macro pixst_a numpix, bpp, basereg, mem_operand
+.if (bpp * numpix) <= 128
+    pixst numpix, bpp, basereg, mem_operand, %(bpp * numpix)
+.else
+    pixst numpix, bpp, basereg, mem_operand, 128
+.endif
+.endm
+
+.macro vuzp8 reg1, reg2
+    vuzp.8 d&reg1, d&reg2
+.endm
+
+.macro vzip8 reg1, reg2
+    vzip.8 d&reg1, d&reg2
+.endm
+
+/* deinterleave B, G, R, A channels for eight 32bpp pixels in 4 registers */
+.macro pixdeinterleave bpp, basereg
+.if (bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0)
+    vuzp8 %(basereg+0), %(basereg+1)
+    vuzp8 %(basereg+2), %(basereg+3)
+    vuzp8 %(basereg+1), %(basereg+3)
+    vuzp8 %(basereg+0), %(basereg+2)
+.endif
+.endm
+
+/* interleave B, G, R, A channels for eight 32bpp pixels in 4 registers */
+.macro pixinterleave bpp, basereg
+.if (bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0)
+    vzip8 %(basereg+0), %(basereg+2)
+    vzip8 %(basereg+1), %(basereg+3)
+    vzip8 %(basereg+2), %(basereg+3)
+    vzip8 %(basereg+0), %(basereg+1)
+.endif
+.endm
+
+/*
+ * This is a macro for implementing cache preload. The main idea is that
+ * cache preload logic is mostly independent from the rest of pixels
+ * processing code. It starts at the top left pixel and moves forward
+ * across pixels and can jump across scanlines. Prefetch distance is
+ * handled in an 'incremental' way: it starts from 0 and advances to the
+ * optimal distance over time. After reaching optimal prefetch distance,
+ * it is kept constant. There are some checks which prevent prefetching
+ * unneeded pixel lines below the image (but it still can prefetch a bit
+ * more data on the right side of the image - not a big issue and may
+ * be actually helpful when rendering text glyphs). Additional trick is
+ * the use of LDR instruction for prefetch instead of PLD when moving to
+ * the next line, the point is that we have a high chance of getting TLB
+ * miss in this case, and PLD would be useless.
+ *
+ * This sounds like it may introduce a noticeable overhead (when working with
+ * fully cached data). But in reality, due to having a separate pipeline and
+ * instruction queue for NEON unit in ARM Cortex-A8, normal ARM code can
+ * execute simultaneously with NEON and be completely shadowed by it. Thus
+ * we get no performance overhead at all (*). This looks like a very nice
+ * feature of Cortex-A8, if used wisely. We don't have a hardware prefetcher,
+ * but still can implement some rather advanced prefetch logic in sofware
+ * for almost zero cost!
+ *
+ * (*) The overhead of the prefetcher is visible when running some trivial
+ * pixels processing like simple copy. Anyway, having prefetch is a must
+ * when working with the graphics data.
+ */
+.macro PF a, x:vararg
+.if (PREFETCH_TYPE_CURRENT == PREFETCH_TYPE_ADVANCED)
+    a x
+.endif
+.endm
+
+.macro cache_preload std_increment, boost_increment
+.if (src_bpp_shift >= 0) || (dst_r_bpp != 0) || (mask_bpp_shift >= 0)
+.if regs_shortage
+    PF ldr ORIG_W, [sp] /* If we are short on regs, ORIG_W is kept on stack */
+.endif
+.if std_increment != 0
+    PF add PF_X, PF_X, #std_increment
+.endif
+    PF tst PF_CTL, #0xF
+    PF addne PF_X, PF_X, #boost_increment
+    PF subne PF_CTL, PF_CTL, #1
+    PF cmp PF_X, ORIG_W
+.if src_bpp_shift >= 0
+    PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
+.endif
+.if dst_r_bpp != 0
+    PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
+.endif
+.if mask_bpp_shift >= 0
+    PF pld, [PF_MASK, PF_X, lsl #mask_bpp_shift]
+.endif
+    PF subge PF_X, PF_X, ORIG_W
+    PF subges PF_CTL, PF_CTL, #0x10
+.if src_bpp_shift >= 0
+    PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
+.endif
+.if dst_r_bpp != 0
+    PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
+.endif
+.if mask_bpp_shift >= 0
+    PF ldrgeb DUMMY, [PF_MASK, MASK_STRIDE, lsl #mask_bpp_shift]!
+.endif
+.endif
+.endm
+
+.macro cache_preload_simple
+.if (PREFETCH_TYPE_CURRENT == PREFETCH_TYPE_SIMPLE)
+.if src_bpp > 0
+    pld [SRC, #(PREFETCH_DISTANCE_SIMPLE * src_bpp / 8)]
+.endif
+.if dst_r_bpp > 0
+    pld [DST_R, #(PREFETCH_DISTANCE_SIMPLE * dst_r_bpp / 8)]
+.endif
+.if mask_bpp > 0
+    pld [MASK, #(PREFETCH_DISTANCE_SIMPLE * mask_bpp / 8)]
+.endif
+.endif
+.endm
+
+/*
+ * Macro which is used to process leading pixels until destination
+ * pointer is properly aligned (at 16 bytes boundary). When destination
+ * buffer uses 16bpp format, this is unnecessary, or even pointless.
+ */
+.macro ensure_destination_ptr_alignment process_pixblock_head, \
+                                        process_pixblock_tail, \
+                                        process_pixblock_tail_head
+.if dst_w_bpp != 24
+    tst         DST_R, #0xF
+    beq         2f
+
+.irp lowbit, 1, 2, 4, 8, 16
+local skip1
+.if (dst_w_bpp <= (lowbit * 8)) && ((lowbit * 8) < (pixblock_size * dst_w_bpp))
+.if lowbit < 16 /* we don't need more than 16-byte alignment */
+    tst         DST_R, #lowbit
+    beq         1f
+.endif
+    pixld       (lowbit * 8 / dst_w_bpp), src_bpp, src_basereg, SRC
+    pixld       (lowbit * 8 / dst_w_bpp), mask_bpp, mask_basereg, MASK
+.if dst_r_bpp > 0
+    pixld_a     (lowbit * 8 / dst_r_bpp), dst_r_bpp, dst_r_basereg, DST_R
+.else
+    add         DST_R, DST_R, #lowbit
+.endif
+    PF add      PF_X, PF_X, #(lowbit * 8 / dst_w_bpp)
+    sub         W, W, #(lowbit * 8 / dst_w_bpp)
+1:
+.endif
+.endr
+    pixdeinterleave src_bpp, src_basereg
+    pixdeinterleave mask_bpp, mask_basereg
+    pixdeinterleave dst_r_bpp, dst_r_basereg
+
+    process_pixblock_head
+    cache_preload 0, pixblock_size
+    cache_preload_simple
+    process_pixblock_tail
+
+    pixinterleave dst_w_bpp, dst_w_basereg
+.irp lowbit, 1, 2, 4, 8, 16
+.if (dst_w_bpp <= (lowbit * 8)) && ((lowbit * 8) < (pixblock_size * dst_w_bpp))
+.if lowbit < 16 /* we don't need more than 16-byte alignment */
+    tst         DST_W, #lowbit
+    beq         1f
+.endif
+    pixst_a     (lowbit * 8 / dst_w_bpp), dst_w_bpp, dst_w_basereg, DST_W
+1:
+.endif
+.endr
+.endif
+2:
+.endm
+
+/*
+ * Special code for processing up to (pixblock_size - 1) remaining
+ * trailing pixels. As SIMD processing performs operation on
+ * pixblock_size pixels, anything smaller than this has to be loaded
+ * and stored in a special way. Loading and storing of pixel data is
+ * performed in such a way that we fill some 'slots' in the NEON
+ * registers (some slots naturally are unused), then perform compositing
+ * operation as usual. In the end, the data is taken from these 'slots'
+ * and saved to memory.
+ *
+ * cache_preload_flag - allows to suppress prefetch if
+ *                      set to 0
+ * dst_aligned_flag   - selects whether destination buffer
+ *                      is aligned
+ */
+.macro process_trailing_pixels cache_preload_flag, \
+                               dst_aligned_flag, \
+                               process_pixblock_head, \
+                               process_pixblock_tail, \
+                               process_pixblock_tail_head
+    tst         W, #(pixblock_size - 1)
+    beq         2f
+.irp chunk_size, 16, 8, 4, 2, 1
+.if pixblock_size > chunk_size
+    tst         W, #chunk_size
+    beq         1f
+    pixld       chunk_size, src_bpp, src_basereg, SRC
+    pixld       chunk_size, mask_bpp, mask_basereg, MASK
+.if dst_aligned_flag != 0
+    pixld_a     chunk_size, dst_r_bpp, dst_r_basereg, DST_R
+.else
+    pixld       chunk_size, dst_r_bpp, dst_r_basereg, DST_R
+.endif
+.if cache_preload_flag != 0
+    PF add      PF_X, PF_X, #chunk_size
+.endif
+1:
+.endif
+.endr
+    pixdeinterleave src_bpp, src_basereg
+    pixdeinterleave mask_bpp, mask_basereg
+    pixdeinterleave dst_r_bpp, dst_r_basereg
+
+    process_pixblock_head
+.if cache_preload_flag != 0
+    cache_preload 0, pixblock_size
+    cache_preload_simple
+.endif
+    process_pixblock_tail
+    pixinterleave dst_w_bpp, dst_w_basereg
+.irp chunk_size, 16, 8, 4, 2, 1
+.if pixblock_size > chunk_size
+    tst         W, #chunk_size
+    beq         1f
+.if dst_aligned_flag != 0
+    pixst_a     chunk_size, dst_w_bpp, dst_w_basereg, DST_W
+.else
+    pixst       chunk_size, dst_w_bpp, dst_w_basereg, DST_W
+.endif
+1:
+.endif
+.endr
+2:
+.endm
+
+/*
+ * Macro, which performs all the needed operations to switch to the next
+ * scanline and start the next loop iteration unless all the scanlines
+ * are already processed.
+ */
+.macro advance_to_next_scanline start_of_loop_label
+.if regs_shortage
+    ldrd        W, [sp] /* load W and H (width and height) from stack */
+.else
+    mov         W, ORIG_W
+.endif
+    add         DST_W, DST_W, DST_STRIDE, lsl #dst_bpp_shift
+.if src_bpp != 0
+    add         SRC, SRC, SRC_STRIDE, lsl #src_bpp_shift
+.endif
+.if mask_bpp != 0
+    add         MASK, MASK, MASK_STRIDE, lsl #mask_bpp_shift
+.endif
+.if (dst_w_bpp != 24)
+    sub         DST_W, DST_W, W, lsl #dst_bpp_shift
+.endif
+.if (src_bpp != 24) && (src_bpp != 0)
+    sub         SRC, SRC, W, lsl #src_bpp_shift
+.endif
+.if (mask_bpp != 24) && (mask_bpp != 0)
+    sub         MASK, MASK, W, lsl #mask_bpp_shift
+.endif
+    subs        H, H, #1
+    mov         DST_R, DST_W
+.if regs_shortage
+    str         H, [sp, #4] /* save updated height to stack */
+.endif
+    bge         start_of_loop_label
+.endm
+
+/*
+ * Registers are allocated in the following way by default:
+ * d0, d1, d2, d3     - reserved for loading source pixel data
+ * d4, d5, d6, d7     - reserved for loading destination pixel data
+ * d24, d25, d26, d27 - reserved for loading mask pixel data
+ * d28, d29, d30, d31 - final destination pixel data for writeback to memory
+ */
+.macro generate_composite_function fname, \
+                                   src_bpp_, \
+                                   mask_bpp_, \
+                                   dst_w_bpp_, \
+                                   flags, \
+                                   pixblock_size_, \
+                                   prefetch_distance, \
+                                   init, \
+                                   cleanup, \
+                                   process_pixblock_head, \
+                                   process_pixblock_tail, \
+                                   process_pixblock_tail_head, \
+                                   dst_w_basereg_ = 28, \
+                                   dst_r_basereg_ = 4, \
+                                   src_basereg_   = 0, \
+                                   mask_basereg_  = 24
+
+    .func fname
+    .global fname
+    /* For ELF format also set function visibility to hidden */
+#ifdef __ELF__
+    .hidden fname
+    .type fname, %function
+#endif
+fname:
+    push        {r4-r12, lr}        /* save all registers */
+
+/*
+ * Select prefetch type for this function. If prefetch distance is
+ * set to 0 or one of the color formats is 24bpp, SIMPLE prefetch
+ * has to be used instead of ADVANCED.
+ */
+    .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_DEFAULT
+.if prefetch_distance == 0
+    .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_NONE
+.elseif (PREFETCH_TYPE_CURRENT > PREFETCH_TYPE_SIMPLE) && \
+        ((src_bpp_ == 24) || (mask_bpp_ == 24) || (dst_w_bpp_ == 24))
+    .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_SIMPLE
+.endif
+
+/*
+ * Make some macro arguments globally visible and accessible
+ * from other macros
+ */
+    .set src_bpp, src_bpp_
+    .set mask_bpp, mask_bpp_
+    .set dst_w_bpp, dst_w_bpp_
+    .set pixblock_size, pixblock_size_
+    .set dst_w_basereg, dst_w_basereg_
+    .set dst_r_basereg, dst_r_basereg_
+    .set src_basereg, src_basereg_
+    .set mask_basereg, mask_basereg_
+
+/*
+ * Assign symbolic names to registers
+ */
+    W           .req        r0      /* width (is updated during processing) */
+    H           .req        r1      /* height (is updated during processing) */
+    DST_W       .req        r2      /* destination buffer pointer for writes */
+    DST_STRIDE  .req        r3      /* destination image stride */
+    SRC         .req        r4      /* source buffer pointer */
+    SRC_STRIDE  .req        r5      /* source image stride */
+    DST_R       .req        r6      /* destination buffer pointer for reads */
+
+    MASK        .req        r7      /* mask pointer */
+    MASK_STRIDE .req        r8      /* mask stride */
+
+    PF_CTL      .req        r9      /* combined lines counter and prefetch */
+                                    /* distance increment counter */
+    PF_X        .req        r10     /* pixel index in a scanline for current */
+                                    /* pretetch position */
+    PF_SRC      .req        r11     /* pointer to source scanline start */
+                                    /* for prefetch purposes */
+    PF_DST      .req        r12     /* pointer to destination scanline start */
+                                    /* for prefetch purposes */
+    PF_MASK     .req        r14     /* pointer to mask scanline start */
+                                    /* for prefetch purposes */
+/*
+ * Check whether we have enough registers for all the local variables.
+ * If we don't have enough registers, original width and height are
+ * kept on top of stack (and 'regs_shortage' variable is set to indicate
+ * this for the rest of code). Even if there are enough registers, the
+ * allocation scheme may be a bit different depending on whether source
+ * or mask is not used.
+ */
+.if (PREFETCH_TYPE_CURRENT < PREFETCH_TYPE_ADVANCED)
+    ORIG_W      .req        r10     /* saved original width */
+    DUMMY       .req        r12     /* temporary register */
+    .set        regs_shortage, 0
+.elseif mask_bpp == 0
+    ORIG_W      .req        r7      /* saved original width */
+    DUMMY       .req        r8      /* temporary register */
+    .set        regs_shortage, 0
+.elseif src_bpp == 0
+    ORIG_W      .req        r4      /* saved original width */
+    DUMMY       .req        r5      /* temporary register */
+    .set        regs_shortage, 0
+.else
+    ORIG_W      .req        r1      /* saved original width */
+    DUMMY       .req        r1      /* temporary register */
+    .set        regs_shortage, 1
+.endif
+
+    .set mask_bpp_shift, -1
+.if src_bpp == 32
+    .set src_bpp_shift, 2
+.elseif src_bpp == 24
+    .set src_bpp_shift, 0
+.elseif src_bpp == 16
+    .set src_bpp_shift, 1
+.elseif src_bpp == 8
+    .set src_bpp_shift, 0
+.elseif src_bpp == 0
+    .set src_bpp_shift, -1
+.else
+    .error "requested src bpp (src_bpp) is not supported"
+.endif
+.if mask_bpp == 32
+    .set mask_bpp_shift, 2
+.elseif mask_bpp == 24
+    .set mask_bpp_shift, 0
+.elseif mask_bpp == 8
+    .set mask_bpp_shift, 0
+.elseif mask_bpp == 0
+    .set mask_bpp_shift, -1
+.else
+    .error "requested mask bpp (mask_bpp) is not supported"
+.endif
+.if dst_w_bpp == 32
+    .set dst_bpp_shift, 2
+.elseif dst_w_bpp == 24
+    .set dst_bpp_shift, 0
+.elseif dst_w_bpp == 16
+    .set dst_bpp_shift, 1
+.elseif dst_w_bpp == 8
+    .set dst_bpp_shift, 0
+.else
+    .error "requested dst bpp (dst_w_bpp) is not supported"
+.endif
+
+.if (((flags) & FLAG_DST_READWRITE) != 0)
+    .set dst_r_bpp, dst_w_bpp
+.else
+    .set dst_r_bpp, 0
+.endif
+.if (((flags) & FLAG_DEINTERLEAVE_32BPP) != 0)
+    .set DEINTERLEAVE_32BPP_ENABLED, 1
+.else
+    .set DEINTERLEAVE_32BPP_ENABLED, 0
+.endif
+
+.if prefetch_distance < 0 || prefetch_distance > 15
+    .error "invalid prefetch distance (prefetch_distance)"
+.endif
+
+.if src_bpp > 0
+    ldr         SRC, [sp, #40]
+.endif
+.if mask_bpp > 0
+    ldr         MASK, [sp, #48]
+.endif
+    PF mov      PF_X, #0
+.if src_bpp > 0
+    ldr         SRC_STRIDE, [sp, #44]
+.endif
+.if mask_bpp > 0
+    ldr         MASK_STRIDE, [sp, #52]
+.endif
+    mov         DST_R, DST_W
+
+.if src_bpp == 24
+    sub         SRC_STRIDE, SRC_STRIDE, W
+    sub         SRC_STRIDE, SRC_STRIDE, W, lsl #1
+.endif
+.if mask_bpp == 24
+    sub         MASK_STRIDE, MASK_STRIDE, W
+    sub         MASK_STRIDE, MASK_STRIDE, W, lsl #1
+.endif
+.if dst_w_bpp == 24
+    sub         DST_STRIDE, DST_STRIDE, W
+    sub         DST_STRIDE, DST_STRIDE, W, lsl #1
+.endif
+
+/*
+ * Setup advanced prefetcher initial state
+ */
+    PF mov      PF_SRC, SRC
+    PF mov      PF_DST, DST_R
+    PF mov      PF_MASK, MASK
+    /* PF_CTL = prefetch_distance | ((h - 1) << 4) */
+    PF mov      PF_CTL, H, lsl #4
+    PF add      PF_CTL, #(prefetch_distance - 0x10)
+
+    init
+.if regs_shortage
+    push        {r0, r1}
+.endif
+    subs        H, H, #1
+.if regs_shortage
+    str         H, [sp, #4] /* save updated height to stack */
+.else
+    mov         ORIG_W, W
+.endif
+    blt         9f
+    cmp         W, #(pixblock_size * 2)
+    blt         8f
+/*
+ * This is the start of the pipelined loop, which if optimized for
+ * long scanlines
+ */
+0:
+    ensure_destination_ptr_alignment process_pixblock_head, \
+                                     process_pixblock_tail, \
+                                     process_pixblock_tail_head
+
+    /* Implement "head (tail_head) ... (tail_head) tail" loop pattern */
+    pixld_a     pixblock_size, dst_r_bpp, \
+                (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R
+    pixld       pixblock_size, src_bpp, \
+                (src_basereg - pixblock_size * src_bpp / 64), SRC
+    pixld       pixblock_size, mask_bpp, \
+                (mask_basereg - pixblock_size * mask_bpp / 64), MASK
+    PF add      PF_X, PF_X, #pixblock_size
+    process_pixblock_head
+    cache_preload 0, pixblock_size
+    cache_preload_simple
+    subs        W, W, #(pixblock_size * 2)
+    blt         2f
+1:
+    process_pixblock_tail_head
+    cache_preload_simple
+    subs        W, W, #pixblock_size
+    bge         1b
+2:
+    process_pixblock_tail
+    pixst_a     pixblock_size, dst_w_bpp, \
+                (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W
+
+    /* Process the remaining trailing pixels in the scanline */
+    process_trailing_pixels 1, 1, \
+                            process_pixblock_head, \
+                            process_pixblock_tail, \
+                            process_pixblock_tail_head
+    advance_to_next_scanline 0b
+
+.if regs_shortage
+    pop         {r0, r1}
+.endif
+    cleanup
+    pop         {r4-r12, pc}  /* exit */
+/*
+ * This is the start of the loop, designed to process images with small width
+ * (less than pixblock_size * 2 pixels). In this case neither pipelining
+ * nor prefetch are used.
+ */
+8:
+    /* Process exactly pixblock_size pixels if needed */
+    tst         W, #pixblock_size
+    beq         1f
+    pixld       pixblock_size, dst_r_bpp, \
+                (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R
+    pixld       pixblock_size, src_bpp, \
+                (src_basereg - pixblock_size * src_bpp / 64), SRC
+    pixld       pixblock_size, mask_bpp, \
+                (mask_basereg - pixblock_size * mask_bpp / 64), MASK
+    process_pixblock_head
+    process_pixblock_tail
+    pixst       pixblock_size, dst_w_bpp, \
+                (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W
+1:
+    /* Process the remaining trailing pixels in the scanline */
+    process_trailing_pixels 0, 0, \
+                            process_pixblock_head, \
+                            process_pixblock_tail, \
+                            process_pixblock_tail_head
+    advance_to_next_scanline 8b
+9:
+.if regs_shortage
+    pop         {r0, r1}
+.endif
+    cleanup
+    pop         {r4-r12, pc}  /* exit */
+
+    .unreq      SRC
+    .unreq      MASK
+    .unreq      DST_R
+    .unreq      DST_W
+    .unreq      ORIG_W
+    .unreq      W
+    .unreq      H
+    .unreq      SRC_STRIDE
+    .unreq      DST_STRIDE
+    .unreq      MASK_STRIDE
+    .unreq      PF_CTL
+    .unreq      PF_X
+    .unreq      PF_SRC
+    .unreq      PF_DST
+    .unreq      PF_MASK
+    .unreq      DUMMY
+    .endfunc
+.endm
+
+.macro default_init
+.endm
+
+.macro default_cleanup
+.endm
commit 1eff0ab487efe4720451b8bd92c8423b9772a69a
Author: Siarhei Siamashka <siarhei.siamashka at nokia.com>
Date:   Wed Nov 4 14:25:27 2009 +0200

    ARM: removed old ARM NEON optimizations

diff --git a/pixman/pixman-arm-neon.c b/pixman/pixman-arm-neon.c
index 9caef61..9052061 100644
--- a/pixman/pixman-arm-neon.c
+++ b/pixman/pixman-arm-neon.c
@@ -30,1671 +30,9 @@
 #include <config.h>
 #endif
 
-#include <arm_neon.h>
 #include <string.h>
 #include "pixman-private.h"
 
-/* Deal with an intrinsic that is defined differently in GCC */
-#if !defined(__ARMCC_VERSION) && !defined(__pld)
-#define __pld(_x) __builtin_prefetch (_x)
-#endif
-
-static force_inline uint8x8x4_t
-unpack0565 (uint16x8_t rgb)
-{
-    uint16x8_t gb, b;
-    uint8x8x4_t res;
-
-    res.val[3] = vdup_n_u8 (0);
-    gb = vshrq_n_u16 (rgb, 5);
-    b = vshrq_n_u16 (rgb, 5 + 6);
-
-    res.val[0] = vmovn_u16 (rgb);  /* get low 5 bits */
-    res.val[1] = vmovn_u16 (gb);   /* get mid 6 bits */
-    res.val[2] = vmovn_u16 (b);    /* get top 5 bits */
-
-    res.val[0] = vshl_n_u8 (res.val[0], 3); /* shift to top */
-    res.val[1] = vshl_n_u8 (res.val[1], 2); /* shift to top */
-    res.val[2] = vshl_n_u8 (res.val[2], 3); /* shift to top */
-
-    res.val[0] = vsri_n_u8 (res.val[0], res.val[0], 5);
-    res.val[1] = vsri_n_u8 (res.val[1], res.val[1], 6);
-    res.val[2] = vsri_n_u8 (res.val[2], res.val[2], 5);
-
-    return res;
-}
-
-#ifdef USE_GCC_INLINE_ASM
-/* Some versions of gcc have problems with vshll_n_u8 intrinsic (Bug 23576) */
-#define vshll_n_u8(a, n) ({ uint16x8_t r; \
-    asm ("vshll.u8 %q0, %P1, %2\n" : "=w" (r) : "w" (a), "i" (n)); r; })
-#endif
-
-static force_inline uint16x8_t
-pack0565 (uint8x8x4_t s)
-{
-    uint16x8_t rgb, val_g, val_r;
-
-    rgb = vshll_n_u8 (s.val[2], 8);
-    val_g = vshll_n_u8 (s.val[1], 8);
-    val_r = vshll_n_u8 (s.val[0], 8);
-    rgb = vsriq_n_u16 (rgb, val_g, 5);
-    rgb = vsriq_n_u16 (rgb, val_r, 5 + 6);
-
-    return rgb;
-}
-
-static force_inline uint8x8_t
-neon2mul (uint8x8_t x,
-          uint8x8_t alpha)
-{
-    uint16x8_t tmp, tmp2;
-    uint8x8_t res;
-
-    tmp = vmull_u8 (x, alpha);
-    tmp2 = vrshrq_n_u16 (tmp, 8);
-    res = vraddhn_u16 (tmp, tmp2);
-
-    return res;
-}
-
-static force_inline uint8x8x4_t
-neon8mul (uint8x8x4_t x,
-          uint8x8_t   alpha)
-{
-    uint16x8x4_t tmp;
-    uint8x8x4_t res;
-    uint16x8_t qtmp1, qtmp2;
-
-    tmp.val[0] = vmull_u8 (x.val[0], alpha);
-    tmp.val[1] = vmull_u8 (x.val[1], alpha);
-    tmp.val[2] = vmull_u8 (x.val[2], alpha);
-    tmp.val[3] = vmull_u8 (x.val[3], alpha);
-
-    qtmp1 = vrshrq_n_u16 (tmp.val[0], 8);
-    qtmp2 = vrshrq_n_u16 (tmp.val[1], 8);
-    res.val[0] = vraddhn_u16 (tmp.val[0], qtmp1);
-    qtmp1 = vrshrq_n_u16 (tmp.val[2], 8);
-    res.val[1] = vraddhn_u16 (tmp.val[1], qtmp2);
-    qtmp2 = vrshrq_n_u16 (tmp.val[3], 8);
-    res.val[2] = vraddhn_u16 (tmp.val[2], qtmp1);
-    res.val[3] = vraddhn_u16 (tmp.val[3], qtmp2);
-
-    return res;
-}
-
-static force_inline uint8x8x4_t
-neon8qadd (uint8x8x4_t x,
-           uint8x8x4_t y)
-{
-    uint8x8x4_t res;
-
-    res.val[0] = vqadd_u8 (x.val[0], y.val[0]);
-    res.val[1] = vqadd_u8 (x.val[1], y.val[1]);
-    res.val[2] = vqadd_u8 (x.val[2], y.val[2]);
-    res.val[3] = vqadd_u8 (x.val[3], y.val[3]);
-
-    return res;
-}
-
-static void
-neon_composite_add_8000_8000 (pixman_implementation_t * impl,
-                              pixman_op_t               op,
-                              pixman_image_t *          src_image,
-                              pixman_image_t *          mask_image,
-                              pixman_image_t *          dst_image,
-                              int32_t                   src_x,
-                              int32_t                   src_y,
-                              int32_t                   mask_x,
-                              int32_t                   mask_y,
-                              int32_t                   dest_x,
-                              int32_t                   dest_y,
-                              int32_t                   width,
-                              int32_t                   height)
-{
-    uint8_t     *dst_line, *dst;
-    uint8_t     *src_line, *src;
-    int dst_stride, src_stride;
-    uint16_t w;
-
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
-
-    if (width >= 8)
-    {
-	/* Use overlapping 8-pixel method */
-	while (height--)
-	{
-	    uint8_t *keep_dst = 0;
-	    uint8x8_t sval, dval, temp;
-
-	    dst = dst_line;
-	    dst_line += dst_stride;
-	    src = src_line;
-	    src_line += src_stride;
-	    w = width;
-
-#ifndef USE_GCC_INLINE_ASM
-	    sval = vld1_u8 ((void *)src);
-	    dval = vld1_u8 ((void *)dst);
-	    keep_dst = dst;
-
-	    temp = vqadd_u8 (dval, sval);
-
-	    src += (w & 7);
-	    dst += (w & 7);
-	    w -= (w & 7);
-
-	    while (w)
-	    {
-		sval = vld1_u8 ((void *)src);
-		dval = vld1_u8 ((void *)dst);
-
-		vst1_u8 ((void *)keep_dst, temp);
-		keep_dst = dst;
-
-		temp = vqadd_u8 (dval, sval);
-
-		src += 8;
-		dst += 8;
-		w -= 8;
-	    }
-
-	    vst1_u8 ((void *)keep_dst, temp);
-#else
-	    asm volatile (
-/* avoid using d8-d15 (q4-q7) aapcs callee-save registers */
-	        "vld1.8  {d0}, [%[src]]\n\t"
-	        "vld1.8  {d4}, [%[dst]]\n\t"
-	        "mov     %[keep_dst], %[dst]\n\t"
-
-	        "and ip, %[w], #7\n\t"
-	        "add %[src], %[src], ip\n\t"
-	        "add %[dst], %[dst], ip\n\t"
-	        "subs %[w], %[w], ip\n\t"
-	        "b 9f\n\t"
-/* LOOP */
-	        "2:\n\t"
-	        "vld1.8  {d0}, [%[src]]!\n\t"
-	        "vld1.8  {d4}, [%[dst]]!\n\t"
-	        "vst1.8  {d20}, [%[keep_dst]]\n\t"
-	        "sub     %[keep_dst], %[dst], #8\n\t"
-	        "subs %[w], %[w], #8\n\t"
-	        "9:\n\t"
-	        "vqadd.u8 d20, d0, d4\n\t"
-
-	        "bne 2b\n\t"
-
-	        "1:\n\t"
-	        "vst1.8  {d20}, [%[keep_dst]]\n\t"
-
-		: [w] "+r" (w), [src] "+r" (src), [dst] "+r" (dst), [keep_dst] "=r" (keep_dst)
-		:
-		: "ip", "cc", "memory", "d0", "d4",
-	        "d20"
-	        );
-#endif
-	}
-    }
-    else
-    {
-	const uint8_t nil = 0;
-	const uint8x8_t vnil = vld1_dup_u8 (&nil);
-
-	while (height--)
-	{
-	    uint8x8_t sval = vnil, dval = vnil;
-	    uint8_t *dst4 = 0, *dst2 = 0;
-
-	    dst = dst_line;
-	    dst_line += dst_stride;
-	    src = src_line;
-	    src_line += src_stride;
-	    w = width;
-
-	    if (w & 4)
-	    {
-		sval = vreinterpret_u8_u32 (
-		    vld1_lane_u32 ((void *)src, vreinterpret_u32_u8 (sval), 1));
-		dval = vreinterpret_u8_u32 (
-		    vld1_lane_u32 ((void *)dst, vreinterpret_u32_u8 (dval), 1));
-
-		dst4 = dst;
-		src += 4;
-		dst += 4;
-	    }
-
-	    if (w & 2)
-	    {
-		sval = vreinterpret_u8_u16 (
-		    vld1_lane_u16 ((void *)src, vreinterpret_u16_u8 (sval), 1));
-		dval = vreinterpret_u8_u16 (
-		    vld1_lane_u16 ((void *)dst, vreinterpret_u16_u8 (dval), 1));
-
-		dst2 = dst;
-		src += 2;
-		dst += 2;
-	    }
-
-	    if (w & 1)
-	    {
-		sval = vld1_lane_u8 (src, sval, 1);
-		dval = vld1_lane_u8 (dst, dval, 1);
-	    }
-
-	    dval = vqadd_u8 (dval, sval);
-
-	    if (w & 1)
-		vst1_lane_u8 (dst, dval, 1);
-
-	    if (w & 2)
-		vst1_lane_u16 ((void *)dst2, vreinterpret_u16_u8 (dval), 1);
-
-	    if (w & 4)
-		vst1_lane_u32 ((void *)dst4, vreinterpret_u32_u8 (dval), 1);
-	}
-    }
-}
-
-static void
-neon_composite_over_8888_8888 (pixman_implementation_t * impl,
-                               pixman_op_t               op,
-                               pixman_image_t *          src_image,
-                               pixman_image_t *          mask_image,
-                               pixman_image_t *          dst_image,
-                               int32_t                   src_x,
-                               int32_t                   src_y,
-                               int32_t                   mask_x,
-                               int32_t                   mask_y,
-                               int32_t                   dest_x,
-                               int32_t                   dest_y,
-                               int32_t                   width,
-                               int32_t                   height)
-{
-    uint32_t    *dst_line, *dst;
-    uint32_t    *src_line, *src;
-    int dst_stride, src_stride;
-    uint32_t w;
-
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
-
-    if (width >= 8)
-    {
-	/* Use overlapping 8-pixel method */
-	while (height--)
-	{
-	    uint32_t *keep_dst = 0;
-	    uint8x8x4_t sval, dval, temp;
-
-	    dst = dst_line;
-	    dst_line += dst_stride;
-	    src = src_line;
-	    src_line += src_stride;
-	    w = width;
-
-#ifndef USE_GCC_INLINE_ASM
-	    sval = vld4_u8 ((void *)src);
-	    dval = vld4_u8 ((void *)dst);
-	    keep_dst = dst;
-
-	    temp = neon8mul (dval, vmvn_u8 (sval.val[3]));
-	    temp = neon8qadd (sval, temp);
-
-	    src += (w & 7);
-	    dst += (w & 7);
-	    w -= (w & 7);
-
-	    while (w)
-	    {
-		sval = vld4_u8 ((void *)src);
-		dval = vld4_u8 ((void *)dst);
-
-		vst4_u8 ((void *)keep_dst, temp);
-		keep_dst = dst;
-
-		temp = neon8mul (dval, vmvn_u8 (sval.val[3]));
-		temp = neon8qadd (sval, temp);
-
-		src += 8;
-		dst += 8;
-		w -= 8;
-	    }
-
-	    vst4_u8 ((void *)keep_dst, temp);
-#else
-	    asm volatile (
-/* avoid using d8-d15 (q4-q7) aapcs callee-save registers */
-	        "vld4.8  {d0-d3}, [%[src]]\n\t"
-	        "vld4.8  {d4-d7}, [%[dst]]\n\t"
-	        "mov     %[keep_dst], %[dst]\n\t"
-
-	        "and ip, %[w], #7\n\t"
-	        "add %[src], %[src], ip, LSL#2\n\t"
-	        "add %[dst], %[dst], ip, LSL#2\n\t"
-	        "subs %[w], %[w], ip\n\t"
-	        "b 9f\n\t"
-/* LOOP */
-	        "2:\n\t"
-	        "vld4.8  {d0-d3}, [%[src]]!\n\t"
-	        "vld4.8  {d4-d7}, [%[dst]]!\n\t"
-	        "vst4.8  {d20-d23}, [%[keep_dst]]\n\t"
-	        "sub     %[keep_dst], %[dst], #8*4\n\t"
-	        "subs %[w], %[w], #8\n\t"
-	        "9:\n\t"
-	        "vmvn.8  d31, d3\n\t"
-	        "vmull.u8 q10, d31, d4\n\t"
-	        "vmull.u8 q11, d31, d5\n\t"
-	        "vmull.u8 q12, d31, d6\n\t"
-	        "vmull.u8 q13, d31, d7\n\t"
-	        "vrshr.u16 q8, q10, #8\n\t"
-	        "vrshr.u16 q9, q11, #8\n\t"
-	        "vraddhn.u16 d20, q10, q8\n\t"
-	        "vraddhn.u16 d21, q11, q9\n\t"
-	        "vrshr.u16 q8, q12, #8\n\t"
-	        "vrshr.u16 q9, q13, #8\n\t"
-	        "vraddhn.u16 d22, q12, q8\n\t"
-	        "vraddhn.u16 d23, q13, q9\n\t"
-/* result in d20-d23 */
-	        "vqadd.u8 d20, d0, d20\n\t"
-	        "vqadd.u8 d21, d1, d21\n\t"
-	        "vqadd.u8 d22, d2, d22\n\t"
-	        "vqadd.u8 d23, d3, d23\n\t"
-
-	        "bne 2b\n\t"
-
-	        "1:\n\t"
-	        "vst4.8  {d20-d23}, [%[keep_dst]]\n\t"
-
-		: [w] "+r" (w), [src] "+r" (src), [dst] "+r" (dst), [keep_dst] "=r" (keep_dst)
-		:
-		: "ip", "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
-	        "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23"
-	        );
-#endif
-	}
-    }
-    else
-    {
-	uint8x8_t alpha_selector = vreinterpret_u8_u64 (
-	    vcreate_u64 (0x0707070703030303ULL));
-
-	/* Handle width < 8 */
-	while (height--)
-	{
-	    dst = dst_line;
-	    dst_line += dst_stride;
-	    src = src_line;
-	    src_line += src_stride;
-	    w = width;
-
-	    while (w >= 2)
-	    {
-		uint8x8_t sval, dval;
-
-		/* two 32-bit pixels packed into D-reg; ad-hoc vectorization */
-		sval = vreinterpret_u8_u32 (vld1_u32 ((void *)src));
-		dval = vreinterpret_u8_u32 (vld1_u32 ((void *)dst));
-		dval = neon2mul (dval, vtbl1_u8 (vmvn_u8 (sval), alpha_selector));
-		vst1_u8 ((void *)dst, vqadd_u8 (sval, dval));
-
-		src += 2;
-		dst += 2;
-		w -= 2;
-	    }
-
-	    if (w)
-	    {
-		uint8x8_t sval, dval;
-
-		/* single 32-bit pixel in lane 0 */
-		sval = vreinterpret_u8_u32 (vld1_dup_u32 ((void *)src));  /* only interested in lane 0 */
-		dval = vreinterpret_u8_u32 (vld1_dup_u32 ((void *)dst));  /* only interested in lane 0 */
-		dval = neon2mul (dval, vtbl1_u8 (vmvn_u8 (sval), alpha_selector));
-		vst1_lane_u32 ((void *)dst, vreinterpret_u32_u8 (vqadd_u8 (sval, dval)), 0);
-	    }
-	}
-    }
-}
-
-static void
-neon_composite_over_8888_n_8888 (pixman_implementation_t * impl,
-                                 pixman_op_t               op,
-                                 pixman_image_t *          src_image,
-                                 pixman_image_t *          mask_image,
-                                 pixman_image_t *          dst_image,
-                                 int32_t                   src_x,
-                                 int32_t                   src_y,
-                                 int32_t                   mask_x,
-                                 int32_t                   mask_y,
-                                 int32_t                   dest_x,
-                                 int32_t                   dest_y,
-                                 int32_t                   width,
-                                 int32_t                   height)
-{
-    uint32_t    *dst_line, *dst;
-    uint32_t    *src_line, *src;
-    uint32_t mask;
-    int dst_stride, src_stride;
-    uint32_t w;
-    uint8x8_t mask_alpha;
-
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
-
-    mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);
-    mask_alpha = vdup_n_u8 ((mask) >> 24);
-
-    if (width >= 8)
-    {
-	/* Use overlapping 8-pixel method */
-	while (height--)
-	{
-	    dst = dst_line;
-	    dst_line += dst_stride;
-	    src = src_line;
-	    src_line += src_stride;
-	    w = width;
-
-	    uint32_t *keep_dst = 0;
-
-#ifndef USE_GCC_INLINE_ASM
-	    uint8x8x4_t sval, dval, temp;
-
-	    sval = vld4_u8 ((void *)src);
-	    dval = vld4_u8 ((void *)dst);
-	    keep_dst = dst;
-
-	    sval = neon8mul (sval, mask_alpha);
-	    temp = neon8mul (dval, vmvn_u8 (sval.val[3]));
-	    temp = neon8qadd (sval, temp);
-
-	    src += (w & 7);
-	    dst += (w & 7);
-	    w -= (w & 7);
-
-	    while (w)
-	    {
-		sval = vld4_u8 ((void *)src);
-		dval = vld4_u8 ((void *)dst);
-
-		vst4_u8 ((void *)keep_dst, temp);
-		keep_dst = dst;
-
-		sval = neon8mul (sval, mask_alpha);
-		temp = neon8mul (dval, vmvn_u8 (sval.val[3]));
-		temp = neon8qadd (sval, temp);
-
-		src += 8;
-		dst += 8;
-		w -= 8;
-	    }
-	    vst4_u8 ((void *)keep_dst, temp);
-#else
-	    asm volatile (
-/* avoid using d8-d15 (q4-q7) aapcs callee-save registers */
-	        "vdup.32      d30, %[mask]\n\t"
-	        "vdup.8       d30, d30[3]\n\t"
-
-	        "vld4.8       {d0-d3}, [%[src]]\n\t"
-	        "vld4.8       {d4-d7}, [%[dst]]\n\t"
-	        "mov  %[keep_dst], %[dst]\n\t"
-
-	        "and  ip, %[w], #7\n\t"
-	        "add  %[src], %[src], ip, LSL#2\n\t"
-	        "add  %[dst], %[dst], ip, LSL#2\n\t"
-	        "subs  %[w], %[w], ip\n\t"
-	        "b 9f\n\t"
-/* LOOP */
-	        "2:\n\t"
-	        "vld4.8       {d0-d3}, [%[src]]!\n\t"
-	        "vld4.8       {d4-d7}, [%[dst]]!\n\t"
-	        "vst4.8       {d20-d23}, [%[keep_dst]]\n\t"
-	        "sub  %[keep_dst], %[dst], #8*4\n\t"
-	        "subs  %[w], %[w], #8\n\t"
-
-	        "9:\n\t"
-	        "vmull.u8     q10, d30, d0\n\t"
-	        "vmull.u8     q11, d30, d1\n\t"
-	        "vmull.u8     q12, d30, d2\n\t"
-	        "vmull.u8     q13, d30, d3\n\t"
-	        "vrshr.u16    q8, q10, #8\n\t"
-	        "vrshr.u16    q9, q11, #8\n\t"
-	        "vraddhn.u16  d0, q10, q8\n\t"
-	        "vraddhn.u16  d1, q11, q9\n\t"
-	        "vrshr.u16    q9, q13, #8\n\t"
-	        "vrshr.u16    q8, q12, #8\n\t"
-	        "vraddhn.u16  d3, q13, q9\n\t"
-	        "vraddhn.u16  d2, q12, q8\n\t"
-
-	        "vmvn.8       d31, d3\n\t"
-	        "vmull.u8     q10, d31, d4\n\t"
-	        "vmull.u8     q11, d31, d5\n\t"
-	        "vmull.u8     q12, d31, d6\n\t"
-	        "vmull.u8     q13, d31, d7\n\t"
-	        "vrshr.u16    q8, q10, #8\n\t"
-	        "vrshr.u16    q9, q11, #8\n\t"
-	        "vraddhn.u16  d20, q10, q8\n\t"
-	        "vrshr.u16    q8, q12, #8\n\t"
-	        "vraddhn.u16  d21, q11, q9\n\t"
-	        "vrshr.u16    q9, q13, #8\n\t"
-	        "vraddhn.u16  d22, q12, q8\n\t"
-	        "vraddhn.u16  d23, q13, q9\n\t"
-
-/* result in d20-d23 */
-	        "vqadd.u8     d20, d0, d20\n\t"
-	        "vqadd.u8     d21, d1, d21\n\t"
-	        "vqadd.u8     d22, d2, d22\n\t"
-	        "vqadd.u8     d23, d3, d23\n\t"
-
-	        "bne  2b\n\t"
-
-	        "1:\n\t"
-	        "vst4.8       {d20-d23}, [%[keep_dst]]\n\t"
-
-		: [w] "+r" (w), [src] "+r" (src), [dst] "+r" (dst), [keep_dst] "=r" (keep_dst)
-		: [mask] "r" (mask)
-		: "ip", "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
-	        "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27",
-	        "d30", "d31"
-	        );
-#endif
-	}
-    }
-    else
-    {
-	uint8x8_t alpha_selector = vreinterpret_u8_u64 (vcreate_u64 (0x0707070703030303ULL));
-
-	/* Handle width < 8 */
-	while (height--)
-	{
-	    dst = dst_line;
-	    dst_line += dst_stride;
-	    src = src_line;
-	    src_line += src_stride;
-	    w = width;
-
-	    while (w >= 2)
-	    {
-		uint8x8_t sval, dval;
-
-		sval = vreinterpret_u8_u32 (vld1_u32 ((void *)src));
-		dval = vreinterpret_u8_u32 (vld1_u32 ((void *)dst));
-
-		/* sval * const alpha_mul */
-		sval = neon2mul (sval, mask_alpha);
-
-		/* dval * 255-(src alpha) */
-		dval = neon2mul (dval, vtbl1_u8 (vmvn_u8 (sval), alpha_selector));
-
-		vst1_u8 ((void *)dst, vqadd_u8 (sval, dval));
-
-		src += 2;
-		dst += 2;
-		w -= 2;
-	    }
-
-	    if (w)
-	    {
-		uint8x8_t sval, dval;
-
-		sval = vreinterpret_u8_u32 (vld1_dup_u32 ((void *)src));
-		dval = vreinterpret_u8_u32 (vld1_dup_u32 ((void *)dst));
-
-		/* sval * const alpha_mul */
-		sval = neon2mul (sval, mask_alpha);
-
-		/* dval * 255-(src alpha) */
-		dval = neon2mul (dval, vtbl1_u8 (vmvn_u8 (sval), alpha_selector));
-
-		vst1_lane_u32 ((void *)dst, vreinterpret_u32_u8 (vqadd_u8 (sval, dval)), 0);
-	    }
-	}
-    }
-}
-
-static void
-neon_composite_over_n_8_0565 (pixman_implementation_t * impl,
-			      pixman_op_t               op,
-			      pixman_image_t *          src_image,
-			      pixman_image_t *          mask_image,
-			      pixman_image_t *          dst_image,
-			      int32_t                   src_x,
-			      int32_t                   src_y,
-			      int32_t                   mask_x,
-			      int32_t                   mask_y,
-			      int32_t                   dest_x,
-			      int32_t                   dest_y,
-			      int32_t                   width,
-			      int32_t                   height)
-{
-    uint32_t     src, srca;
-    uint16_t    *dst_line, *dst;
-    uint8_t     *mask_line, *mask;
-    int          dst_stride, mask_stride;
-    uint32_t     w;
-    uint8x8_t    sval2;
-    uint8x8x4_t  sval8;
-
-    src = _pixman_image_get_solid (src_image, dst_image->bits.format);
-
-    srca = src >> 24;
-    if (src == 0)
-	return;
-
-    sval2=vreinterpret_u8_u32 (vdup_n_u32 (src));
-    sval8.val[0]=vdup_lane_u8 (sval2,0);
-    sval8.val[1]=vdup_lane_u8 (sval2,1);
-    sval8.val[2]=vdup_lane_u8 (sval2,2);
-    sval8.val[3]=vdup_lane_u8 (sval2,3);
-
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
-
-    if (width>=8)
-    {
-	/* Use overlapping 8-pixel method, modified to avoid rewritten dest being reused */
-	while (height--)
-	{
-	    uint16_t *keep_dst=0;
-
-	    dst = dst_line;
-	    dst_line += dst_stride;
-	    mask = mask_line;
-	    mask_line += mask_stride;
-	    w = width;
-
-#ifndef USE_GCC_INLINE_ASM
-	    uint8x8_t alpha;
-	    uint16x8_t dval, temp;
-	    uint8x8x4_t sval8temp;
-
-	    alpha = vld1_u8 ((void *)mask);
-	    dval = vld1q_u16 ((void *)dst);
-	    keep_dst = dst;
-
-	    sval8temp = neon8mul (sval8, alpha);
-	    temp = pack0565 (neon8qadd (sval8temp, neon8mul (unpack0565 (dval), vmvn_u8 (sval8temp.val[3]))));
-
-	    mask += (w & 7);
-	    dst += (w & 7);
-	    w -= (w & 7);
-
-	    while (w)
-	    {
-		dval = vld1q_u16 ((void *)dst);
-		alpha = vld1_u8 ((void *)mask);
-
-		vst1q_u16 ((void *)keep_dst, temp);
-		keep_dst = dst;
-
-		sval8temp = neon8mul (sval8, alpha);
-		temp = pack0565 (neon8qadd (sval8temp, neon8mul (unpack0565 (dval), vmvn_u8 (sval8temp.val[3]))));
-
-		mask+=8;
-		dst+=8;
-		w-=8;
-	    }
-	    vst1q_u16 ((void *)keep_dst, temp);
-#else
-	    asm volatile (
-		"vdup.32      d0, %[src]\n\t"
-		"vdup.8       d1, d0[1]\n\t"
-		"vdup.8       d2, d0[2]\n\t"
-		"vdup.8       d3, d0[3]\n\t"
-		"vdup.8       d0, d0[0]\n\t"
-
-		"vld1.8       {q12}, [%[dst]]\n\t"
-		"vld1.8       {d31}, [%[mask]]\n\t"
-		"mov  %[keep_dst], %[dst]\n\t"
-
-		"and  ip, %[w], #7\n\t"
-		"add  %[mask], %[mask], ip\n\t"
-		"add  %[dst], %[dst], ip, LSL#1\n\t"
-		"subs  %[w], %[w], ip\n\t"
-		"b  9f\n\t"
-/* LOOP */
-		"2:\n\t"
-
-		"vld1.16      {q12}, [%[dst]]!\n\t"
-		"vld1.8       {d31}, [%[mask]]!\n\t"
-		"vst1.16      {q10}, [%[keep_dst]]\n\t"
-		"sub  %[keep_dst], %[dst], #8*2\n\t"
-		"subs  %[w], %[w], #8\n\t"
-		"9:\n\t"
-/* expand 0565 q12 to 8888 {d4-d7} */
-		"vmovn.u16    d4, q12\t\n"
-		"vshr.u16     q11, q12, #5\t\n"
-		"vshr.u16     q10, q12, #6+5\t\n"
-		"vmovn.u16    d5, q11\t\n"
-		"vmovn.u16    d6, q10\t\n"
-		"vshl.u8      d4, d4, #3\t\n"
-		"vshl.u8      d5, d5, #2\t\n"
-		"vshl.u8      d6, d6, #3\t\n"
-		"vsri.u8      d4, d4, #5\t\n"
-		"vsri.u8      d5, d5, #6\t\n"
-		"vsri.u8      d6, d6, #5\t\n"
-
-		"vmull.u8     q10, d31, d0\n\t"
-		"vmull.u8     q11, d31, d1\n\t"
-		"vmull.u8     q12, d31, d2\n\t"
-		"vmull.u8     q13, d31, d3\n\t"
-		"vrshr.u16    q8, q10, #8\n\t"
-		"vrshr.u16    q9, q11, #8\n\t"
-		"vraddhn.u16  d20, q10, q8\n\t"
-		"vraddhn.u16  d21, q11, q9\n\t"
-		"vrshr.u16    q9, q13, #8\n\t"
-		"vrshr.u16    q8, q12, #8\n\t"
-		"vraddhn.u16  d23, q13, q9\n\t"
-		"vraddhn.u16  d22, q12, q8\n\t"
-
-/* duplicate in 4/2/1 & 8pix vsns */
-		"vmvn.8       d30, d23\n\t"
-		"vmull.u8     q14, d30, d6\n\t"
-		"vmull.u8     q13, d30, d5\n\t"
-		"vmull.u8     q12, d30, d4\n\t"
-		"vrshr.u16    q8, q14, #8\n\t"
-		"vrshr.u16    q9, q13, #8\n\t"
-		"vraddhn.u16  d6, q14, q8\n\t"
-		"vrshr.u16    q8, q12, #8\n\t"
-		"vraddhn.u16  d5, q13, q9\n\t"
-		"vqadd.u8     d6, d6, d22\n\t"  /* moved up */
-		"vraddhn.u16  d4, q12, q8\n\t"
-/* intentionally don't calculate alpha */
-/* result in d4-d6 */
-
-/*              "vqadd.u8     d6, d6, d22\n\t"  ** moved up */
-		"vqadd.u8     d5, d5, d21\n\t"
-		"vqadd.u8     d4, d4, d20\n\t"
-
-/* pack 8888 {d20-d23} to 0565 q10 */
-		"vshll.u8     q10, d6, #8\n\t"
-		"vshll.u8     q3, d5, #8\n\t"
-		"vshll.u8     q2, d4, #8\n\t"
-		"vsri.u16     q10, q3, #5\t\n"
-		"vsri.u16     q10, q2, #11\t\n"
-
-		"bne 2b\n\t"
-
-		"1:\n\t"
-		"vst1.16      {q10}, [%[keep_dst]]\n\t"
-
-		: [w] "+r" (w), [dst] "+r" (dst), [mask] "+r" (mask), [keep_dst] "=r" (keep_dst)
-		: [src] "r" (src)
-		: "ip", "cc", "memory", "d0","d1","d2","d3","d4","d5","d6","d7",
-		  "d16","d17","d18","d19","d20","d21","d22","d23","d24","d25","d26","d27","d28","d29",
-		  "d30","d31"
-		);
-#endif
-	}
-    }
-    else
-    {
-	while (height--)
-	{
-	    void *dst4=0, *dst2=0;
-
-	    dst = dst_line;
-	    dst_line += dst_stride;
-	    mask = mask_line;
-	    mask_line += mask_stride;
-	    w = width;
-
-
-#if 1 /* #ifndef USE_GCC_INLINE_ASM */
-	    uint8x8_t alpha;
-	    uint16x8_t dval, temp;
-	    uint8x8x4_t sval8temp;
-
-	    if (w&4)
-	    {
-		alpha = vreinterpret_u8_u32 (vld1_lane_u32 ((void *)mask, vreinterpret_u32_u8 (alpha),1));
-		dval = vreinterpretq_u16_u64 (vld1q_lane_u64 ((void *)dst, vreinterpretq_u64_u16 (dval),1));
-		dst4=dst;
-		mask+=4;
-		dst+=4;
-	    }
-	    if (w&2)
-	    {
-		alpha = vreinterpret_u8_u16 (vld1_lane_u16 ((void *)mask, vreinterpret_u16_u8 (alpha),1));
-		dval = vreinterpretq_u16_u32 (vld1q_lane_u32 ((void *)dst, vreinterpretq_u32_u16 (dval),1));
-		dst2=dst;
-		mask+=2;
-		dst+=2;
-	    }
-	    if (w&1)
-	    {
-		alpha = vld1_lane_u8 ((void *)mask, alpha,1);
-		dval = vld1q_lane_u16 ((void *)dst, dval,1);
-	    }
-
-	    sval8temp = neon8mul (sval8, alpha);
-	    temp = pack0565 (neon8qadd (sval8temp, neon8mul (unpack0565 (dval), vmvn_u8 (sval8temp.val[3]))));
-
-	    if (w&1)
-		vst1q_lane_u16 ((void *)dst, temp,1);
-	    if (w&2)
-		vst1q_lane_u32 ((void *)dst2, vreinterpretq_u32_u16 (temp),1);
-	    if (w&4)
-		vst1q_lane_u64 ((void *)dst4, vreinterpretq_u64_u16 (temp),1);
-#else
-	    /* this code has some bug (does not pass blitters-test) */
-	    asm volatile (
-		"vdup.32      d0, %[src]\n\t"
-		"vdup.8       d1, d0[1]\n\t"
-		"vdup.8       d2, d0[2]\n\t"
-		"vdup.8       d3, d0[3]\n\t"
-		"vdup.8       d0, d0[0]\n\t"
-
-		"tst  %[w], #4\t\n"
-		"beq  skip_load4\t\n"
-
-		"vld1.64      {d25}, [%[dst]]\n\t"
-		"vld1.32      {d31[1]}, [%[mask]]\n\t"
-		"mov  %[dst4], %[dst]\t\n"
-		"add  %[mask], %[mask], #4\t\n"
-		"add  %[dst], %[dst], #4*2\t\n"
-
-		"skip_load4:\t\n"
-		"tst  %[w], #2\t\n"
-		"beq  skip_load2\t\n"
-		"vld1.32      {d24[1]}, [%[dst]]\n\t"
-		"vld1.16      {d31[1]}, [%[mask]]\n\t"
-		"mov  %[dst2], %[dst]\t\n"
-		"add  %[mask], %[mask], #2\t\n"
-		"add  %[dst], %[dst], #2*2\t\n"
-
-		"skip_load2:\t\n"
-		"tst  %[w], #1\t\n"
-		"beq  skip_load1\t\n"
-		"vld1.16      {d24[1]}, [%[dst]]\n\t"
-		"vld1.8       {d31[1]}, [%[mask]]\n\t"
-
-		"skip_load1:\t\n"
-/* expand 0565 q12 to 8888 {d4-d7} */
-		"vmovn.u16    d4, q12\t\n"
-		"vshr.u16     q11, q12, #5\t\n"
-		"vshr.u16     q10, q12, #6+5\t\n"
-		"vmovn.u16    d5, q11\t\n"
-		"vmovn.u16    d6, q10\t\n"
-		"vshl.u8      d4, d4, #3\t\n"
-		"vshl.u8      d5, d5, #2\t\n"
-		"vshl.u8      d6, d6, #3\t\n"
-		"vsri.u8      d4, d4, #5\t\n"
-		"vsri.u8      d5, d5, #6\t\n"
-		"vsri.u8      d6, d6, #5\t\n"
-
-		"vmull.u8     q10, d31, d0\n\t"
-		"vmull.u8     q11, d31, d1\n\t"
-		"vmull.u8     q12, d31, d2\n\t"
-		"vmull.u8     q13, d31, d3\n\t"
-		"vrshr.u16    q8, q10, #8\n\t"
-		"vrshr.u16    q9, q11, #8\n\t"
-		"vraddhn.u16  d20, q10, q8\n\t"
-		"vraddhn.u16  d21, q11, q9\n\t"
-		"vrshr.u16    q9, q13, #8\n\t"
-		"vrshr.u16    q8, q12, #8\n\t"
-		"vraddhn.u16  d23, q13, q9\n\t"
-		"vraddhn.u16  d22, q12, q8\n\t"
-
-/* duplicate in 4/2/1 & 8pix vsns */
-		"vmvn.8       d30, d23\n\t"
-		"vmull.u8     q14, d30, d6\n\t"
-		"vmull.u8     q13, d30, d5\n\t"
-		"vmull.u8     q12, d30, d4\n\t"
-		"vrshr.u16    q8, q14, #8\n\t"
-		"vrshr.u16    q9, q13, #8\n\t"
-		"vraddhn.u16  d6, q14, q8\n\t"
-		"vrshr.u16    q8, q12, #8\n\t"
-		"vraddhn.u16  d5, q13, q9\n\t"
-		"vqadd.u8     d6, d6, d22\n\t"  /* moved up */
-		"vraddhn.u16  d4, q12, q8\n\t"
-/* intentionally don't calculate alpha */
-/* result in d4-d6 */
-
-/*              "vqadd.u8     d6, d6, d22\n\t"  ** moved up */
-		"vqadd.u8     d5, d5, d21\n\t"
-		"vqadd.u8     d4, d4, d20\n\t"
-
-/* pack 8888 {d20-d23} to 0565 q10 */
-		"vshll.u8     q10, d6, #8\n\t"
-		"vshll.u8     q3, d5, #8\n\t"
-		"vshll.u8     q2, d4, #8\n\t"
-		"vsri.u16     q10, q3, #5\t\n"
-		"vsri.u16     q10, q2, #11\t\n"
-
-		"tst  %[w], #1\n\t"
-		"beq skip_store1\t\n"
-		"vst1.16      {d20[1]}, [%[dst]]\t\n"
-		"skip_store1:\t\n"
-		"tst  %[w], #2\n\t"
-		"beq  skip_store2\t\n"
-		"vst1.32      {d20[1]}, [%[dst2]]\t\n"
-		"skip_store2:\t\n"
-		"tst  %[w], #4\n\t"
-		"beq skip_store4\t\n"
-		"vst1.16      {d21}, [%[dst4]]\t\n"
-		"skip_store4:\t\n"
-
-		: [w] "+r" (w), [dst] "+r" (dst), [mask] "+r" (mask), [dst4] "+r" (dst4), [dst2] "+r" (dst2)
-		: [src] "r" (src)
-		: "ip", "cc", "memory", "d0","d1","d2","d3","d4","d5","d6","d7",
-		  "d16","d17","d18","d19","d20","d21","d22","d23","d24","d25","d26","d27","d28","d29",
-		  "d30","d31"
-		);
-#endif
-	}
-    }
-}
-
-static void
-neon_composite_over_n_8_8888 (pixman_implementation_t * impl,
-                              pixman_op_t               op,
-                              pixman_image_t *          src_image,
-                              pixman_image_t *          mask_image,
-                              pixman_image_t *          dst_image,
-                              int32_t                   src_x,
-                              int32_t                   src_y,
-                              int32_t                   mask_x,
-                              int32_t                   mask_y,
-                              int32_t                   dest_x,
-                              int32_t                   dest_y,
-                              int32_t                   width,
-                              int32_t                   height)
-{
-    uint32_t src, srca;
-    uint32_t    *dst_line, *dst;
-    uint8_t     *mask_line, *mask;
-    int dst_stride, mask_stride;
-    uint32_t w;
-    uint8x8_t sval2;
-    uint8x8x4_t sval8;
-    uint8x8_t mask_selector = vreinterpret_u8_u64 (vcreate_u64 (0x0101010100000000ULL));
-    uint8x8_t alpha_selector = vreinterpret_u8_u64 (vcreate_u64 (0x0707070703030303ULL));
-
-    src = _pixman_image_get_solid (src_image, dst_image->bits.format);
-    
-    /* bail out if fully transparent */
-    srca = src >> 24;
-    if (src == 0)
-	return;
-
-    sval2 = vreinterpret_u8_u32 (vdup_n_u32 (src));
-    sval8.val[0] = vdup_lane_u8 (sval2, 0);
-    sval8.val[1] = vdup_lane_u8 (sval2, 1);
-    sval8.val[2] = vdup_lane_u8 (sval2, 2);
-    sval8.val[3] = vdup_lane_u8 (sval2, 3);
-
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
-
-    if (width >= 8)
-    {
-	/* Use overlapping 8-pixel method, modified to avoid
-	 * rewritten dest being reused
-	 */
-	while (height--)
-	{
-	    uint32_t *keep_dst = 0;
-
-	    dst = dst_line;
-	    dst_line += dst_stride;
-	    mask = mask_line;
-	    mask_line += mask_stride;
-	    w = width;
-
-#ifndef USE_GCC_INLINE_ASM
-	    uint8x8_t alpha;
-	    uint8x8x4_t dval, temp;
-
-	    alpha = vld1_u8 ((void *)mask);
-	    dval = vld4_u8 ((void *)dst);
-	    keep_dst = dst;
-
-	    temp = neon8mul (sval8, alpha);
-	    dval = neon8mul (dval, vmvn_u8 (temp.val[3]));
-	    temp = neon8qadd (temp, dval);
-
-	    mask += (w & 7);
-	    dst += (w & 7);
-	    w -= (w & 7);
-
-	    while (w)
-	    {
-		alpha = vld1_u8 ((void *)mask);
-		dval = vld4_u8 ((void *)dst);
-
-		vst4_u8 ((void *)keep_dst, temp);
-		keep_dst = dst;
-
-		temp = neon8mul (sval8, alpha);
-		dval = neon8mul (dval, vmvn_u8 (temp.val[3]));
-		temp = neon8qadd (temp, dval);
-
-		mask += 8;
-		dst += 8;
-		w -= 8;
-	    }
-	    vst4_u8 ((void *)keep_dst, temp);
-#else
-	    asm volatile (
-	        "vdup.32      d0, %[src]\n\t"
-	        "vdup.8       d1, d0[1]\n\t"
-	        "vdup.8       d2, d0[2]\n\t"
-	        "vdup.8       d3, d0[3]\n\t"
-	        "vdup.8       d0, d0[0]\n\t"
-
-	        "vld4.8       {d4-d7}, [%[dst]]\n\t"
-	        "vld1.8       {d31}, [%[mask]]\n\t"
-	        "mov  %[keep_dst], %[dst]\n\t"
-
-	        "and  ip, %[w], #7\n\t"
-	        "add  %[mask], %[mask], ip\n\t"
-	        "add  %[dst], %[dst], ip, LSL#2\n\t"
-	        "subs  %[w], %[w], ip\n\t"
-	        "b 9f\n\t"
-/* LOOP */
-	        "2:\n\t"
-	        "vld4.8       {d4-d7}, [%[dst]]!\n\t"
-	        "vld1.8       {d31}, [%[mask]]!\n\t"
-	        "vst4.8       {d20-d23}, [%[keep_dst]]\n\t"
-	        "sub  %[keep_dst], %[dst], #8*4\n\t"
-	        "subs  %[w], %[w], #8\n\t"
-	        "9:\n\t"
-
-	        "vmull.u8     q10, d31, d0\n\t"
-	        "vmull.u8     q11, d31, d1\n\t"
-	        "vmull.u8     q12, d31, d2\n\t"
-	        "vmull.u8     q13, d31, d3\n\t"
-	        "vrshr.u16    q8, q10, #8\n\t"
-	        "vrshr.u16    q9, q11, #8\n\t"
-	        "vraddhn.u16  d20, q10, q8\n\t"
-	        "vraddhn.u16  d21, q11, q9\n\t"
-	        "vrshr.u16    q9, q13, #8\n\t"
-	        "vrshr.u16    q8, q12, #8\n\t"
-	        "vraddhn.u16  d23, q13, q9\n\t"
-	        "vraddhn.u16  d22, q12, q8\n\t"
-
-	        "vmvn.8       d30, d23\n\t"
-	        "vmull.u8     q12, d30, d4\n\t"
-	        "vmull.u8     q13, d30, d5\n\t"
-	        "vmull.u8     q14, d30, d6\n\t"
-	        "vmull.u8     q15, d30, d7\n\t"
-
-	        "vrshr.u16    q8, q12, #8\n\t"
-	        "vrshr.u16    q9, q13, #8\n\t"
-	        "vraddhn.u16  d4, q12, q8\n\t"
-	        "vrshr.u16    q8, q14, #8\n\t"
-	        "vraddhn.u16  d5, q13, q9\n\t"
-	        "vrshr.u16    q9, q15, #8\n\t"
-	        "vraddhn.u16  d6, q14, q8\n\t"
-	        "vraddhn.u16  d7, q15, q9\n\t"
-/* result in d4-d7 */
-
-	        "vqadd.u8     d20, d4, d20\n\t"
-	        "vqadd.u8     d21, d5, d21\n\t"
-	        "vqadd.u8     d22, d6, d22\n\t"
-	        "vqadd.u8     d23, d7, d23\n\t"
-
-	        "bne 2b\n\t"
-
-	        "1:\n\t"
-	        "vst4.8       {d20-d23}, [%[keep_dst]]\n\t"
-
-		: [w] "+r" (w), [dst] "+r" (dst), [mask] "+r" (mask), [keep_dst] "=r" (keep_dst)
-		: [src] "r" (src)
-		: "ip", "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
-	        "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29",
-	        "d30", "d31"
-	        );
-#endif
-	}
-    }
-    else
-    {
-	while (height--)
-	{
-	    uint8x8_t alpha;
-
-	    dst = dst_line;
-	    dst_line += dst_stride;
-	    mask = mask_line;
-	    mask_line += mask_stride;
-	    w = width;
-
-	    while (w >= 2)
-	    {
-		uint8x8_t dval, temp, res;
-
-		alpha = vtbl1_u8 (
-		    vreinterpret_u8_u16 (vld1_dup_u16 ((void *)mask)), mask_selector);
-		dval = vld1_u8 ((void *)dst);
-
-		temp = neon2mul (sval2, alpha);
-		res = vqadd_u8 (
-		    temp, neon2mul (dval, vtbl1_u8 (vmvn_u8 (temp), alpha_selector)));
-
-		vst1_u8 ((void *)dst, res);
-
-		mask += 2;
-		dst += 2;
-		w -= 2;
-	    }
-
-	    if (w)
-	    {
-		uint8x8_t dval, temp, res;
-
-		alpha = vtbl1_u8 (vld1_dup_u8 ((void *)mask), mask_selector);
-		dval = vreinterpret_u8_u32 (vld1_dup_u32 ((void *)dst));
-
-		temp = neon2mul (sval2, alpha);
-		res = vqadd_u8 (
-		    temp, neon2mul (dval, vtbl1_u8 (vmvn_u8 (temp), alpha_selector)));
-
-		vst1_lane_u32 ((void *)dst, vreinterpret_u32_u8 (res), 0);
-	    }
-	}
-    }
-}
-
-static void
-neon_composite_add_n_8_8 (pixman_implementation_t * impl,
-			  pixman_op_t               op,
-			  pixman_image_t *          src_image,
-			  pixman_image_t *          mask_image,
-			  pixman_image_t *          dst_image,
-			  int32_t                   src_x,
-			  int32_t                   src_y,
-			  int32_t                   mask_x,
-			  int32_t                   mask_y,
-			  int32_t                   dest_x,
-			  int32_t                   dest_y,
-			  int32_t                   width,
-			  int32_t                   height)
-{
-    uint8_t     *dst_line, *dst;
-    uint8_t     *mask_line, *mask;
-    int dst_stride, mask_stride;
-    uint32_t w;
-    uint32_t src;
-    uint8x8_t sa;
-
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
-    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
-    src = _pixman_image_get_solid (src_image, dst_image->bits.format);
-    sa = vdup_n_u8 ((src) >> 24);
-
-    if (width >= 8)
-    {
-	/* Use overlapping 8-pixel method, modified to avoid rewritten dest being reused */
-	while (height--)
-	{
-	    dst = dst_line;
-	    dst_line += dst_stride;
-	    mask = mask_line;
-	    mask_line += mask_stride;
-	    w = width;
-
-	    uint8x8_t mval, dval, res;
-	    uint8_t     *keep_dst;
-
-	    mval = vld1_u8 ((void *)mask);
-	    dval = vld1_u8 ((void *)dst);
-	    keep_dst = dst;
-
-	    res = vqadd_u8 (neon2mul (mval, sa), dval);
-
-	    mask += (w & 7);
-	    dst += (w & 7);
-	    w -= w & 7;
-
-	    while (w)
-	    {
-		mval = vld1_u8 ((void *)mask);
-		dval = vld1_u8 ((void *)dst);
-		vst1_u8 ((void *)keep_dst, res);
-		keep_dst = dst;
-
-		res = vqadd_u8 (neon2mul (mval, sa), dval);
-
-		mask += 8;
-		dst += 8;
-		w -= 8;
-	    }
-	    vst1_u8 ((void *)keep_dst, res);
-	}
-    }
-    else
-    {
-	/* Use 4/2/1 load/store method to handle 1-7 pixels */
-	while (height--)
-	{
-	    dst = dst_line;
-	    dst_line += dst_stride;
-	    mask = mask_line;
-	    mask_line += mask_stride;
-	    w = width;
-
-	    uint8x8_t mval = sa, dval = sa, res;
-	    uint8_t *dst4 = 0, *dst2 = 0;
-
-	    if (w & 4)
-	    {
-		mval = vreinterpret_u8_u32 (
-		    vld1_lane_u32 ((void *)mask, vreinterpret_u32_u8 (mval), 1));
-		dval = vreinterpret_u8_u32 (
-		    vld1_lane_u32 ((void *)dst, vreinterpret_u32_u8 (dval), 1));
-
-		dst4 = dst;
-		mask += 4;
-		dst += 4;
-	    }
-
-	    if (w & 2)
-	    {
-		mval = vreinterpret_u8_u16 (
-		    vld1_lane_u16 ((void *)mask, vreinterpret_u16_u8 (mval), 1));
-		dval = vreinterpret_u8_u16 (
-		    vld1_lane_u16 ((void *)dst, vreinterpret_u16_u8 (dval), 1));
-		dst2 = dst;
-		mask += 2;
-		dst += 2;
-	    }
-
-	    if (w & 1)
-	    {
-		mval = vld1_lane_u8 (mask, mval, 1);
-		dval = vld1_lane_u8 (dst, dval, 1);
-	    }
-
-	    res = vqadd_u8 (neon2mul (mval, sa), dval);
-
-	    if (w & 1)
-		vst1_lane_u8 (dst, res, 1);
-	    if (w & 2)
-		vst1_lane_u16 ((void *)dst2, vreinterpret_u16_u8 (res), 1);
-	    if (w & 4)
-		vst1_lane_u32 ((void *)dst4, vreinterpret_u32_u8 (res), 1);
-	}
-    }
-}
-
-#ifdef USE_GCC_INLINE_ASM
-
-static void
-neon_composite_src_16_16 (pixman_implementation_t * impl,
-                          pixman_op_t               op,
-                          pixman_image_t *          src_image,
-                          pixman_image_t *          mask_image,
-                          pixman_image_t *          dst_image,
-                          int32_t                   src_x,
-                          int32_t                   src_y,
-                          int32_t                   mask_x,
-                          int32_t                   mask_y,
-                          int32_t                   dest_x,
-                          int32_t                   dest_y,
-                          int32_t                   width,
-                          int32_t                   height)
-{
-    uint16_t    *dst_line, *src_line;
-    uint32_t dst_stride, src_stride;
-
-    if (!height || !width)
-	return;
-
-    /* We simply copy 16-bit-aligned pixels from one place to another. */
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint16_t, src_stride, src_line, 1);
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
-
-    /* Preload the first input scanline */
-    {
-	uint16_t *src_ptr = src_line;
-	uint32_t count = width;
-
-	asm volatile (
-	    "0: @ loop							\n"
-	    "	subs    %[count], %[count], #32				\n"
-	    "	pld     [%[src]]					\n"
-	    "	add     %[src], %[src], #64				\n"
-	    "	bgt 0b							\n"
-
-	    /* Clobbered input registers marked as input/outputs */
-	    : [src] "+r" (src_ptr), [count] "+r" (count)
-	    :     /* no unclobbered inputs */
-	    : "cc"
-	    );
-    }
-
-    while (height--)
-    {
-	uint16_t *dst_ptr = dst_line;
-	uint16_t *src_ptr = src_line;
-	uint32_t count = width;
-	uint32_t tmp = 0;
-
-	/* Uses multi-register access and preloading to maximise bandwidth.
-	 * Each pixel is one halfword, so a quadword contains 8px.
-	 * Preload frequency assumed a 64-byte cacheline.
-	 */
-	asm volatile (
-	    "	cmp       %[count], #64				\n"
-	    "	blt 1f    @ skip oversized fragments		\n"
-	    "0: @ start with eight quadwords at a time		\n"
-	    /* preload from next scanline */
-	    "	pld       [%[src], %[src_stride], LSL #1]	\n"
-	    "	sub       %[count], %[count], #64		\n"
-	    "	vld1.16   {d16, d17, d18, d19}, [%[src]]!		\n"
-	    "	vld1.16   {d20, d21, d22, d23}, [%[src]]!		\n"
-	    /* preload from next scanline */
-	    "	pld       [%[src], %[src_stride], LSL #1]	\n"
-	    "	vld1.16   {d24, d25, d26, d27}, [%[src]]!		\n"
-	    "	vld1.16   {d28, d29, d30, d31}, [%[src]]!		\n"
-	    "	cmp       %[count], #64				\n"
-	    "	vst1.16   {d16, d17, d18, d19}, [%[dst]]!		\n"
-	    "	vst1.16   {d20, d21, d22, d23}, [%[dst]]!		\n"
-	    "	vst1.16   {d24, d25, d26, d27}, [%[dst]]!		\n"
-	    "	vst1.16   {d28, d29, d30, d31}, [%[dst]]!		\n"
-	    "	bge 0b						\n"
-	    "	cmp       %[count], #0				\n"
-	    "	beq 7f    @ aligned fastpath			\n"
-	    "1: @ four quadwords				\n"
-	    "	tst       %[count], #32				\n"
-	    "	beq 2f    @ skip oversized fragment		\n"
-	    /* preload from next scanline */
-	    "	pld       [%[src], %[src_stride], LSL #1]	\n"
-	    "	vld1.16   {d16, d17, d18, d19}, [%[src]]!		\n"
-	    "	vld1.16   {d20, d21, d22, d23}, [%[src]]!		\n"
-	    "	vst1.16   {d16, d17, d18, d19}, [%[dst]]!		\n"
-	    "	vst1.16   {d20, d21, d22, d23}, [%[dst]]!		\n"
-	    "2: @ two quadwords					\n"
-	    "	tst       %[count], #16				\n"
-	    "	beq 3f    @ skip oversized fragment		\n"
-	    /* preload from next scanline */
-	    "	pld       [%[src], %[src_stride], LSL #1]	\n"
-	    "	vld1.16   {d16, d17, d18, d19}, [%[src]]!		\n"
-	    "	vst1.16   {d16, d17, d18, d19}, [%[dst]]!		\n"
-	    "3: @ one quadword					\n"
-	    "	tst       %[count], #8				\n"
-	    "	beq 4f    @ skip oversized fragment		\n"
-	    "	vld1.16   {d16, d17}, [%[src]]!			\n"
-	    "	vst1.16   {d16, d17}, [%[dst]]!			\n"
-	    "4: @ one doubleword				\n"
-	    "	tst       %[count], #4				\n"
-	    "	beq 5f    @ skip oversized fragment		\n"
-	    "	vld1.16   {d16}, [%[src]]!			\n"
-	    "	vst1.16   {d16}, [%[dst]]!			\n"
-	    "5: @ one word					\n"
-	    "	tst       %[count], #2				\n"
-	    "	beq 6f    @ skip oversized fragment		\n"
-	    "	ldr       %[tmp], [%[src]], #4			\n"
-	    "	str       %[tmp], [%[dst]], #4			\n"
-	    "6: @ one halfword					\n"
-	    "	tst       %[count], #1				\n"
-	    "	beq 7f    @ skip oversized fragment		\n"
-	    "	ldrh      %[tmp], [%[src]]			\n"
-	    "	strh      %[tmp], [%[dst]]			\n"
-	    "7: @ end						\n"
-
-	    /* Clobbered input registers marked as input/outputs */
-	    : [dst] "+r" (dst_ptr), [src] "+r" (src_ptr),
-	      [count] "+r" (count), [tmp] "+r" (tmp)
-
-	      /* Unclobbered input */
-	    : [src_stride] "r" (src_stride)
-
-	      /* Clobbered vector registers */
-	    : "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
-	      "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31", "cc", "memory"
-	    );
-
-	src_line += src_stride;
-	dst_line += dst_stride;
-    }
-}
-
-#endif /* USE_GCC_INLINE_ASM */
-
-static void
-neon_composite_src_24_16 (pixman_implementation_t * impl,
-                          pixman_op_t               op,
-                          pixman_image_t *          src_image,
-                          pixman_image_t *          mask_image,
-                          pixman_image_t *          dst_image,
-                          int32_t                   src_x,
-                          int32_t                   src_y,
-                          int32_t                   mask_x,
-                          int32_t                   mask_y,
-                          int32_t                   dest_x,
-                          int32_t                   dest_y,
-                          int32_t                   width,
-                          int32_t                   height)
-{
-    uint16_t    *dst_line;
-    uint32_t    *src_line;
-    uint32_t dst_stride, src_stride;
-
-    if (!width || !height)
-	return;
-
-    /* We simply copy pixels from one place to another,
-     * assuming that the source's alpha is opaque.
-     */
-    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
-    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
-
-    /* Preload the first input scanline */
-    {
-	uint8_t *src_ptr = (uint8_t*) src_line;
-	uint32_t count = (width + 15) / 16;
-
-#ifdef USE_GCC_INLINE_ASM
-	asm volatile (
-	    "0: @ loop						\n"
-	    "	subs    %[count], %[count], #1			\n"
-	    "	pld     [%[src]]				\n"
-	    "	add     %[src], %[src], #64			\n"
-	    "	bgt 0b						\n"
-
-	    /* Clobbered input registers marked as input/outputs */
-	    : [src] "+r" (src_ptr), [count] "+r" (count)
-	    :     /* no unclobbered inputs */
-	    : "cc"
-	    );
-#else
-	do
-	{
-	    __pld (src_ptr);
-	    src_ptr += 64;
-	}
-	while (--count);
-#endif
-    }
-
-    while (height--)
-    {
-	uint16_t *dst_ptr = dst_line;
-	uint32_t *src_ptr = src_line;
-	uint32_t count = width;
-	const uint32_t rb_mask = 0x1F;
-	const uint32_t g_mask = 0x3F;
-
-	/* If you're going to complain about a goto, take a long hard look
-	 * at the massive blocks of assembler this skips over.  ;-)
-	 */
-	if (count < 8)
-	    goto small_stuff;
-
-#ifdef USE_GCC_INLINE_ASM
-
-	/* This is not as aggressive as the RGB565-source case.
-	 * Generally the source is in cached RAM when the formats are
-	 * different, so we use preload.
-	 * 
-	 * We don't need to blend, so we are not reading from the
-	 * uncached framebuffer.
-	 */
-	asm volatile (
-	    "	cmp       %[count], #16				\n"
-	    "	blt 1f    @ skip oversized fragments		\n"
-	    "0: @ start with sixteen pixels at a time		\n"
-	    "	sub       %[count], %[count], #16		\n"
-	    "	pld      [%[src], %[src_stride], lsl #2]        @ preload from next scanline			\n"
-	    "	vld4.8    {d0, d1, d2, d3}, [%[src]]!		@ d3 is alpha and ignored, d2-0 are rgb.	\n"
-	    "	vld4.8    {d4, d5, d6, d7}, [%[src]]!		@ d7 is alpha and ignored, d6-4 are rgb.	\n"
-	    "	vshll.u8  q8, d2, #8				@ expand first red for repacking		\n"
-	    "	vshll.u8  q10, d1, #8				@ expand first green for repacking		\n"
-	    "	vshll.u8  q11, d0, #8				@ expand first blue for repacking		\n"
-	    "	vshll.u8  q9, d6, #8				@ expand second red for repacking		\n"
-	    "	vsri.u16  q8, q10, #5				@ insert first green after red			\n"
-	    "	vshll.u8  q10, d5, #8				@ expand second green for repacking		\n"
-	    "	vsri.u16  q8, q11, #11				@ insert first blue after green			\n"
-	    "	vshll.u8  q11, d4, #8				@ expand second blue for repacking		\n"
-	    "	vsri.u16  q9, q10, #5				@ insert second green after red			\n"
-	    "	vsri.u16  q9, q11, #11				@ insert second blue after green		\n"
-	    "	cmp       %[count], #16				\n"
-	    "	vst1.16   {d16, d17, d18, d19}, [%[dst]]!          @ store 16 pixels				\n"
-	    "	bge 0b						\n"
-	    "1: @ end of main loop				\n"
-	    "	cmp       %[count], #8				@ can we still do an 8-pixel block?		\n"
-	    "	blt 2f						\n"
-	    "	sub       %[count], %[count], #8		\n"
-	    "	pld      [%[src], %[src_stride], lsl #2]        @ preload from next scanline			\n"
-	    "	vld4.8    {d0, d1, d2, d3}, [%[src]]!		@ d3 is alpha and ignored, d2-0 are rgb.	\n"
-	    "	vshll.u8  q8, d2, #8				@ expand first red for repacking		\n"
-	    "	vshll.u8  q10, d1, #8				@ expand first green for repacking		\n"
-	    "	vshll.u8  q11, d0, #8				@ expand first blue for repacking		\n"
-	    "	vsri.u16  q8, q10, #5				@ insert first green after red			\n"
-	    "	vsri.u16  q8, q11, #11				@ insert first blue after green			\n"
-	    "	vst1.16   {d16, d17}, [%[dst]]!          @ store 8 pixels				\n"
-	    "2: @ end						\n"
-
-	    /* Clobbered input and working registers marked as input/outputs */
-	    : [dst] "+r" (dst_ptr), [src] "+r" (src_ptr), [count] "+r" (count)
-
-	      /* Unclobbered input */
-	    : [src_stride] "r" (src_stride)
-
-	      /* Clobbered vector registers */
-
-	      /* NB: these are the quad aliases of the
-	       * double registers used in the asm
-	       */
-	    : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d16", "d17",
-	      "d18", "d19", "d20", "d21", "d22", "d23", "cc", "memory"
-	    );
-#else
-	/* A copy of the above code, in intrinsics-form. */
-	while (count >= 16)
-	{
-	    uint8x8x4_t pixel_set_a, pixel_set_b;
-	    uint16x8_t red_a, green_a, blue_a;
-	    uint16x8_t red_b, green_b, blue_b;
-	    uint16x8_t dest_pixels_a, dest_pixels_b;
-
-	    count -= 16;
-	    __pld (src_ptr + src_stride);
-	    pixel_set_a = vld4_u8 ((uint8_t*)(src_ptr));
-	    pixel_set_b = vld4_u8 ((uint8_t*)(src_ptr + 8));
-	    src_ptr += 16;
-
-	    red_a   = vshll_n_u8 (pixel_set_a.val[2], 8);
-	    green_a = vshll_n_u8 (pixel_set_a.val[1], 8);
-	    blue_a  = vshll_n_u8 (pixel_set_a.val[0], 8);
-	    
-	    red_b   = vshll_n_u8 (pixel_set_b.val[2], 8);
-	    green_b = vshll_n_u8 (pixel_set_b.val[1], 8);
-	    blue_b  = vshll_n_u8 (pixel_set_b.val[0], 8);
-	    
-	    dest_pixels_a = vsriq_n_u16 (red_a, green_a, 5);
-	    dest_pixels_b = vsriq_n_u16 (red_b, green_b, 5);
-	    
-	    dest_pixels_a = vsriq_n_u16 (dest_pixels_a, blue_a, 11);
-	    dest_pixels_b = vsriq_n_u16 (dest_pixels_b, blue_b, 11);
-
-	    /* There doesn't seem to be an intrinsic for the
-	     * double-quadword variant
-	     */
-	    vst1q_u16 (dst_ptr, dest_pixels_a);
-	    vst1q_u16 (dst_ptr + 8, dest_pixels_b);
-	    dst_ptr += 16;
-	}
-
-	/* 8-pixel loop */
-	if (count >= 8)
-	{
-	    uint8x8x4_t pixel_set_a;
-	    uint16x8_t red_a, green_a, blue_a;
-	    uint16x8_t dest_pixels_a;
-
-	    __pld (src_ptr + src_stride);
-	    count -= 8;
-	    pixel_set_a = vld4_u8 ((uint8_t*)(src_ptr));
-	    src_ptr += 8;
-
-	    red_a   = vshll_n_u8 (pixel_set_a.val[2], 8);
-	    green_a = vshll_n_u8 (pixel_set_a.val[1], 8);
-	    blue_a  = vshll_n_u8 (pixel_set_a.val[0], 8);
-
-	    dest_pixels_a = vsriq_n_u16 (red_a, green_a, 5);
-	    dest_pixels_a = vsriq_n_u16 (dest_pixels_a, blue_a, 11);
-
-	    vst1q_u16 (dst_ptr, dest_pixels_a);
-	    dst_ptr += 8;
-	}
-
-#endif  /* USE_GCC_INLINE_ASM */
-
-    small_stuff:
-	if (count)
-	    __pld (src_ptr + src_stride);
-
-	while (count >= 2)
-	{
-	    uint32_t src_pixel_a = *src_ptr++;
-	    uint32_t src_pixel_b = *src_ptr++;
-
-	    /* ARM is really good at shift-then-ALU ops. */
-	    /* This should be a total of six shift-ANDs and five shift-ORs. */
-	    uint32_t dst_pixels_a;
-	    uint32_t dst_pixels_b;
-
-	    dst_pixels_a  = ((src_pixel_a >>  3) & rb_mask);
-	    dst_pixels_a |= ((src_pixel_a >> 10) &  g_mask) << 5;
-	    dst_pixels_a |= ((src_pixel_a >> 19) & rb_mask) << 11;
-
-	    dst_pixels_b  = ((src_pixel_b >>  3) & rb_mask);
-	    dst_pixels_b |= ((src_pixel_b >> 10) &  g_mask) << 5;
-	    dst_pixels_b |= ((src_pixel_b >> 19) & rb_mask) << 11;
-
-	    /* little-endian mode only */
-	    *((uint32_t*) dst_ptr) = dst_pixels_a | (dst_pixels_b << 16);
-	    dst_ptr += 2;
-	    count -= 2;
-	}
-
-	if (count)
-	{
-	    uint32_t src_pixel = *src_ptr++;
-
-	    /* ARM is really good at shift-then-ALU ops.
-	     * This block should end up as three shift-ANDs
-	     * and two shift-ORs.
-	     */
-	    uint32_t tmp_blue  = (src_pixel >>  3) & rb_mask;
-	    uint32_t tmp_green = (src_pixel >> 10) & g_mask;
-	    uint32_t tmp_red   = (src_pixel >> 19) & rb_mask;
-	    uint16_t dst_pixel = (tmp_red << 11) | (tmp_green << 5) | tmp_blue;
-
-	    *dst_ptr++ = dst_pixel;
-	    count--;
-	}
-
-	src_line += src_stride;
-	dst_line += dst_stride;
-    }
-}
-
 static pixman_bool_t
 pixman_fill_neon (uint32_t *bits,
                   int       stride,
@@ -1705,226 +43,11 @@ pixman_fill_neon (uint32_t *bits,
                   int       height,
                   uint32_t  _xor)
 {
-    uint32_t byte_stride, color;
-    char *dst;
-
-    /* stride is always multiple of 32bit units in pixman */
-    byte_stride = stride * sizeof(uint32_t);
-
-    switch (bpp)
-    {
-    case 8:
-	dst = ((char *) bits) + y * byte_stride + x;
-	_xor &= 0xff;
-	color = _xor << 24 | _xor << 16 | _xor << 8 | _xor;
-	break;
-
-    case 16:
-	dst = ((char *) bits) + y * byte_stride + x * 2;
-	_xor &= 0xffff;
-	color = _xor << 16 | _xor;
-	width *= 2;         /* width to bytes */
-	break;
-
-    case 32:
-	dst = ((char *) bits) + y * byte_stride + x * 4;
-	color = _xor;
-	width *= 4;         /* width to bytes */
-	break;
-
-    default:
-	return FALSE;
-    }
-
-#ifdef USE_GCC_INLINE_ASM
-    if (width < 16)
-    {
-	/* We have a special case for such small widths that don't allow
-	 * us to use wide 128-bit stores anyway. We don't waste time
-	 * trying to align writes, since there are only very few of them anyway
-	 */
-	asm volatile (
-	    "cmp		%[height], #0\n"/* Check if empty fill */
-	    "beq		3f\n"
-	    "vdup.32	d0, %[color]\n"/* Fill the color to neon req */
-
-	    /* Check if we have a such width that can easily be handled by single
-	     * operation for each scanline. This significantly reduces the number
-	     * of test/branch instructions for each scanline
-	     */
-	    "cmp		%[width], #8\n"
-	    "beq		4f\n"
-	    "cmp		%[width], #4\n"
-	    "beq		5f\n"
-	    "cmp		%[width], #2\n"
-	    "beq		6f\n"
-
-	    /* Loop starts here for each scanline */
-	    "1:\n"
-	    "mov		r4, %[dst]\n" /* Starting address of the current line */
-	    "tst		%[width], #8\n"
-	    "beq		2f\n"
-	    "vst1.8		{d0}, [r4]!\n"
-	    "2:\n"
-	    "tst		%[width], #4\n"
-	    "beq		2f\n"
-	    "str		%[color], [r4], #4\n"
-	    "2:\n"
-	    "tst		%[width], #2\n"
-	    "beq		2f\n"
-	    "strh		%[color], [r4], #2\n"
-	    "2:\n"
-	    "tst		%[width], #1\n"
-	    "beq		2f\n"
-	    "strb		%[color], [r4], #1\n"
-	    "2:\n"
-
-	    "subs		%[height], %[height], #1\n"
-	    "add		%[dst], %[dst], %[byte_stride]\n"
-	    "bne		1b\n"
-	    "b		3f\n"
-
-	    /* Special fillers for those widths that we can do with single operation */
-	    "4:\n"
-	    "subs		%[height], %[height], #1\n"
-	    "vst1.8		{d0}, [%[dst]]\n"
-	    "add		%[dst], %[dst], %[byte_stride]\n"
-	    "bne		4b\n"
-	    "b		3f\n"
-
-	    "5:\n"
-	    "subs		%[height], %[height], #1\n"
-	    "str		%[color], [%[dst]]\n"
-	    "add		%[dst], %[dst], %[byte_stride]\n"
-	    "bne		5b\n"
-	    "b		3f\n"
-
-	    "6:\n"
-	    "subs		%[height], %[height], #1\n"
-	    "strh		%[color], [%[dst]]\n"
-	    "add		%[dst], %[dst], %[byte_stride]\n"
-	    "bne		6b\n"
-
-	    "3:\n"
-	    : [height] "+r" (height), [dst] "+r" (dst)
-	    : [color] "r" (color), [width] "r" (width),
-	      [byte_stride] "r" (byte_stride)
-	    : "memory", "cc", "d0", "r4");
-    }
-    else
-    {
-	asm volatile (
-	    "cmp		%[height], #0\n"/* Check if empty fill */
-	    "beq		5f\n"
-	    "vdup.32	q0, %[color]\n"/* Fill the color to neon req */
-
-	    /* Loop starts here for each scanline */
-	    "1:\n"
-	    "mov		r4, %[dst]\n"/* Starting address of the current line */
-	    "mov		r5, %[width]\n"/* We're going to write this many bytes */
-	    "ands		r6, r4, #15\n"/* Are we at the 128-bit aligned address? */
-	    "beq		2f\n"/* Jump to the best case */
-
-	    /* We're not 128-bit aligned: However, we know that we can get to the
-	       next aligned location, since the fill is at least 16 bytes wide */
-	    "rsb                r6, r6, #16\n" /* We would need to go forward this much */
-	    "sub		r5, r5, r6\n"/* Update bytes left */
-	    "tst		r6, #1\n"
-	    "beq		6f\n"
-	    "vst1.8		{d0[0]}, [r4]!\n"/* Store byte, now we are word aligned */
-	    "6:\n"
-	    "tst		r6, #2\n"
-	    "beq		6f\n"
-	    "vst1.16	{d0[0]}, [r4, :16]!\n"/* Store half word, now we are 16-bit aligned */
-	    "6:\n"
-	    "tst		r6, #4\n"
-	    "beq		6f\n"
-	    "vst1.32	{d0[0]}, [r4, :32]!\n"/* Store word, now we're 32-bit aligned */
-	    "6:\n"
-	    "tst		r6, #8\n"
-	    "beq		2f\n"
-	    "vst1.64	{d0}, [r4, :64]!\n"/* Store qword now we're 64-bit aligned */
-
-	    /* The good case: We're 128-bit aligned for this scanline */
-	    "2:\n"
-	    "and		r6, r5, #15\n"/* Number of tailing bytes */
-	    "cmp		r5, r6\n"/* Do we have at least one qword to write? */
-	    "beq		6f\n"/* No, we just write the tail */
-	    "lsr		r5, r5, #4\n"/* This many full qwords to write */
-
-	    /* The main block: Do 128-bit aligned writes */
-	    "3:\n"
-	    "subs		r5, r5, #1\n"
-	    "vst1.64	{d0, d1}, [r4, :128]!\n"
-	    "bne		3b\n"
-
-	    /* Handle the tailing bytes: Do 64, 32, 16 and 8-bit aligned writes as needed.
-	       We know that we're currently at 128-bit aligned address, so we can just
-	       pick the biggest operations that the remaining write width allows */
-	    "6:\n"
-	    "cmp		r6, #0\n"
-	    "beq		4f\n"
-	    "tst		r6, #8\n"
-	    "beq		6f\n"
-	    "vst1.64	{d0}, [r4, :64]!\n"
-	    "6:\n"
-	    "tst		r6, #4\n"
-	    "beq		6f\n"
-	    "vst1.32	{d0[0]}, [r4, :32]!\n"
-	    "6:\n"
-	    "tst		r6, #2\n"
-	    "beq		6f\n"
-	    "vst1.16	{d0[0]}, [r4, :16]!\n"
-	    "6:\n"
-	    "tst		r6, #1\n"
-	    "beq		4f\n"
-	    "vst1.8		{d0[0]}, [r4]!\n"
-	    "4:\n"
-
-	    /* Handle the next scanline */
-	    "subs		%[height], %[height], #1\n"
-	    "add		%[dst], %[dst], %[byte_stride]\n"
-	    "bne		1b\n"
-	    "5:\n"
-	    : [height] "+r" (height), [dst] "+r" (dst)
-	    : [color] "r" (color), [width] "r" (width),
-	      [byte_stride] "r" (byte_stride)
-	    : "memory", "cc", "d0", "d1", "r4", "r5", "r6");
-    }
-    return TRUE;
-
-#else
-
-    /* TODO: intrinsic version for armcc */
     return FALSE;
-
-#endif
 }
 
 static const pixman_fast_path_t arm_neon_fast_path_array[] =
 {
-    { PIXMAN_OP_ADD,  PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8,       neon_composite_add_n_8_8,        0 },
-    { PIXMAN_OP_ADD,  PIXMAN_a8,       PIXMAN_null,     PIXMAN_a8,       neon_composite_add_8000_8000,    0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   neon_composite_over_n_8_0565,    0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b5g6r5,   neon_composite_over_n_8_0565,    0 },
-    { PIXMAN_OP_SRC,  PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_src_24_16,        0 },
-    { PIXMAN_OP_SRC,  PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_src_24_16,        0 },
-    { PIXMAN_OP_SRC,  PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_src_24_16,        0 },
-    { PIXMAN_OP_SRC,  PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_src_24_16,        0 },
-#ifdef USE_GCC_INLINE_ASM
-    { PIXMAN_OP_SRC,  PIXMAN_r5g6b5,   PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_src_16_16,        0 },
-    { PIXMAN_OP_SRC,  PIXMAN_b5g6r5,   PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_src_16_16,        0 },
-#endif
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, neon_composite_over_8888_8888,   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, neon_composite_over_8888_8888,   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, neon_composite_over_8888_8888,   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, neon_composite_over_8888_8888,   0 },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, neon_composite_over_8888_n_8888, NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, neon_composite_over_8888_n_8888, NEED_SOLID_MASK },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, neon_composite_over_n_8_8888,    0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, neon_composite_over_n_8_8888,    0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, neon_composite_over_n_8_8888,    0 },
-    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, neon_composite_over_n_8_8888,    0 },
     { PIXMAN_OP_NONE },
 };
 
@@ -1992,4 +115,3 @@ _pixman_implementation_create_arm_neon (void)
 
     return imp;
 }
-


More information about the xorg-commit mailing list