xf86-video-intel: 7 commits - src/sna/compiler.h src/sna/gen3_render.c src/sna/gen4_vertex.c src/sna/sna_cpu.c src/sna/sna_driver.c src/sna/sna.h src/sna/sna_render.c

Chris Wilson ickle at kemper.freedesktop.org
Tue Feb 26 03:18:24 PST 2013


 src/sna/compiler.h    |    6 
 src/sna/gen3_render.c |  471 ++++++++++
 src/sna/gen4_vertex.c | 2155 +++++++++++++++++++++++++-------------------------
 src/sna/sna.h         |   10 
 src/sna/sna_cpu.c     |   31 
 src/sna/sna_driver.c  |    3 
 src/sna/sna_render.c  |   19 
 7 files changed, 1586 insertions(+), 1109 deletions(-)

New commits:
commit 8cdebf3b72467f63a35888f38cd83658575fcf10
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 26 11:06:45 2013 +0000

    sna/gen4: Cluster ISA
    
    Otherwise we seem to confuse the poor little compiler. This should also
    make it easier to use CPP to turn off blocks.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/compiler.h b/src/sna/compiler.h
index 407d3b5..3dd8a6d 100644
--- a/src/sna/compiler.h
+++ b/src/sna/compiler.h
@@ -54,8 +54,8 @@
 
 #if defined(__GNUC__) && (__GNUC__ >= 4) /* 4.4 */
 #define sse2 __attribute__((target("sse2,fpmath=sse+387")))
-#define sse4_2 __attribute__((target("sse4.2,sse2")))
-#define avx2 __attribute__((target("avx2,sse4.2,sse2")))
+#define sse4_2 __attribute__((target("sse4.2,sse2,fpmath=sse+387")))
+#define avx2 __attribute__((target("avx2,sse4.2,sse2,fpmath=sse+387")))
 #else
 #define sse2
 #define sse4_2
diff --git a/src/sna/gen4_vertex.c b/src/sna/gen4_vertex.c
index 65624b7..885b87e 100644
--- a/src/sna/gen4_vertex.c
+++ b/src/sna/gen4_vertex.c
@@ -414,66 +414,6 @@ emit_primitive_linear(struct sna *sna,
 	v[5] = compute_linear(&op->src, r->src.x, r->src.y);
 }
 
-sse4_2 fastcall static void
-emit_primitive_linear__sse4_2(struct sna *sna,
-			      const struct sna_composite_op *op,
-			      const struct sna_composite_rectangles *r)
-{
-	float *v;
-	union {
-		struct sna_coordinate p;
-		float f;
-	} dst;
-
-	assert(op->floats_per_rect == 6);
-	assert((sna->render.vertex_used % 2) == 0);
-	v = sna->render.vertices + sna->render.vertex_used;
-	sna->render.vertex_used += 6;
-	assert(sna->render.vertex_used <= sna->render.vertex_size);
-
-	dst.p.x = r->dst.x + r->width;
-	dst.p.y = r->dst.y + r->height;
-	v[0] = dst.f;
-	dst.p.x = r->dst.x;
-	v[2] = dst.f;
-	dst.p.y = r->dst.y;
-	v[4] = dst.f;
-
-	v[1] = compute_linear(&op->src, r->src.x+r->width, r->src.y+r->height);
-	v[3] = compute_linear(&op->src, r->src.x, r->src.y+r->height);
-	v[5] = compute_linear(&op->src, r->src.x, r->src.y);
-}
-
-avx2 fastcall static void
-emit_primitive_linear__avx2(struct sna *sna,
-			    const struct sna_composite_op *op,
-			    const struct sna_composite_rectangles *r)
-{
-	float *v;
-	union {
-		struct sna_coordinate p;
-		float f;
-	} dst;
-
-	assert(op->floats_per_rect == 6);
-	assert((sna->render.vertex_used % 2) == 0);
-	v = sna->render.vertices + sna->render.vertex_used;
-	sna->render.vertex_used += 6;
-	assert(sna->render.vertex_used <= sna->render.vertex_size);
-
-	dst.p.x = r->dst.x + r->width;
-	dst.p.y = r->dst.y + r->height;
-	v[0] = dst.f;
-	dst.p.x = r->dst.x;
-	v[2] = dst.f;
-	dst.p.y = r->dst.y;
-	v[4] = dst.f;
-
-	v[1] = compute_linear(&op->src, r->src.x+r->width, r->src.y+r->height);
-	v[3] = compute_linear(&op->src, r->src.x, r->src.y+r->height);
-	v[5] = compute_linear(&op->src, r->src.x, r->src.y);
-}
-
 sse2 fastcall static void
 emit_boxes_linear(const struct sna_composite_op *op,
 		  const BoxRec *box, int nbox,
@@ -502,62 +442,6 @@ emit_boxes_linear(const struct sna_composite_op *op,
 	} while (--nbox);
 }
 
-sse4_2 fastcall static void
-emit_boxes_linear__sse4_2(const struct sna_composite_op *op,
-			  const BoxRec *box, int nbox,
-			  float *v)
-{
-	union {
-		struct sna_coordinate p;
-		float f;
-	} dst;
-
-	do {
-		dst.p.x = box->x2;
-		dst.p.y = box->y2;
-		v[0] = dst.f;
-		dst.p.x = box->x1;
-		v[2] = dst.f;
-		dst.p.y = box->y1;
-		v[4] = dst.f;
-
-		v[1] = compute_linear(&op->src, box->x2, box->y2);
-		v[3] = compute_linear(&op->src, box->x1, box->y2);
-		v[5] = compute_linear(&op->src, box->x1, box->y1);
-
-		v += 6;
-		box++;
-	} while (--nbox);
-}
-
-avx2 fastcall static void
-emit_boxes_linear__avx2(const struct sna_composite_op *op,
-			const BoxRec *box, int nbox,
-			float *v)
-{
-	union {
-		struct sna_coordinate p;
-		float f;
-	} dst;
-
-	do {
-		dst.p.x = box->x2;
-		dst.p.y = box->y2;
-		v[0] = dst.f;
-		dst.p.x = box->x1;
-		v[2] = dst.f;
-		dst.p.y = box->y1;
-		v[4] = dst.f;
-
-		v[1] = compute_linear(&op->src, box->x2, box->y2);
-		v[3] = compute_linear(&op->src, box->x1, box->y2);
-		v[5] = compute_linear(&op->src, box->x1, box->y1);
-
-		v += 6;
-		box++;
-	} while (--nbox);
-}
-
 sse2 fastcall static void
 emit_primitive_identity_source(struct sna *sna,
 			       const struct sna_composite_op *op,
@@ -589,68 +473,6 @@ emit_primitive_identity_source(struct sna *sna,
 	v[5] = v[2] = v[8] + r->height * op->src.scale[1];
 }
 
-sse4_2 fastcall static void
-emit_primitive_identity_source__sse4_2(struct sna *sna,
-				       const struct sna_composite_op *op,
-				       const struct sna_composite_rectangles *r)
-{
-	union {
-		struct sna_coordinate p;
-		float f;
-	} dst;
-	float *v;
-
-	assert(op->floats_per_rect == 9);
-	assert((sna->render.vertex_used % 3) == 0);
-	v = sna->render.vertices + sna->render.vertex_used;
-	sna->render.vertex_used += 9;
-
-	dst.p.x = r->dst.x + r->width;
-	dst.p.y = r->dst.y + r->height;
-	v[0] = dst.f;
-	dst.p.x = r->dst.x;
-	v[3] = dst.f;
-	dst.p.y = r->dst.y;
-	v[6] = dst.f;
-
-	v[7] = v[4] = (r->src.x + op->src.offset[0]) * op->src.scale[0];
-	v[1] = v[4] + r->width * op->src.scale[0];
-
-	v[8] = (r->src.y + op->src.offset[1]) * op->src.scale[1];
-	v[5] = v[2] = v[8] + r->height * op->src.scale[1];
-}
-
-avx2 fastcall static void
-emit_primitive_identity_source__avx2(struct sna *sna,
-				     const struct sna_composite_op *op,
-				     const struct sna_composite_rectangles *r)
-{
-	union {
-		struct sna_coordinate p;
-		float f;
-	} dst;
-	float *v;
-
-	assert(op->floats_per_rect == 9);
-	assert((sna->render.vertex_used % 3) == 0);
-	v = sna->render.vertices + sna->render.vertex_used;
-	sna->render.vertex_used += 9;
-
-	dst.p.x = r->dst.x + r->width;
-	dst.p.y = r->dst.y + r->height;
-	v[0] = dst.f;
-	dst.p.x = r->dst.x;
-	v[3] = dst.f;
-	dst.p.y = r->dst.y;
-	v[6] = dst.f;
-
-	v[7] = v[4] = (r->src.x + op->src.offset[0]) * op->src.scale[0];
-	v[1] = v[4] + r->width * op->src.scale[0];
-
-	v[8] = (r->src.y + op->src.offset[1]) * op->src.scale[1];
-	v[5] = v[2] = v[8] + r->height * op->src.scale[1];
-}
-
 sse2 fastcall static void
 emit_boxes_identity_source(const struct sna_composite_op *op,
 			   const BoxRec *box, int nbox,
@@ -681,66 +503,6 @@ emit_boxes_identity_source(const struct sna_composite_op *op,
 	} while (--nbox);
 }
 
-sse4_2 fastcall static void
-emit_boxes_identity_source__sse4_2(const struct sna_composite_op *op,
-				   const BoxRec *box, int nbox,
-				   float *v)
-{
-	do {
-		union {
-			struct sna_coordinate p;
-			float f;
-		} dst;
-
-		dst.p.x = box->x2;
-		dst.p.y = box->y2;
-		v[0] = dst.f;
-		dst.p.x = box->x1;
-		v[3] = dst.f;
-		dst.p.y = box->y1;
-		v[6] = dst.f;
-
-		v[7] = v[4] = (box->x1 + op->src.offset[0]) * op->src.scale[0];
-		v[1] = (box->x2 + op->src.offset[0]) * op->src.scale[0];
-
-		v[8] = (box->y1 + op->src.offset[1]) * op->src.scale[1];
-		v[2] = v[5] = (box->y2 + op->src.offset[1]) * op->src.scale[1];
-
-		v += 9;
-		box++;
-	} while (--nbox);
-}
-
-avx2 fastcall static void
-emit_boxes_identity_source__avx2(const struct sna_composite_op *op,
-				 const BoxRec *box, int nbox,
-				 float *v)
-{
-	do {
-		union {
-			struct sna_coordinate p;
-			float f;
-		} dst;
-
-		dst.p.x = box->x2;
-		dst.p.y = box->y2;
-		v[0] = dst.f;
-		dst.p.x = box->x1;
-		v[3] = dst.f;
-		dst.p.y = box->y1;
-		v[6] = dst.f;
-
-		v[7] = v[4] = (box->x1 + op->src.offset[0]) * op->src.scale[0];
-		v[1] = (box->x2 + op->src.offset[0]) * op->src.scale[0];
-
-		v[8] = (box->y1 + op->src.offset[1]) * op->src.scale[1];
-		v[2] = v[5] = (box->y2 + op->src.offset[1]) * op->src.scale[1];
-
-		v += 9;
-		box++;
-	} while (--nbox);
-}
-
 sse2 fastcall static void
 emit_primitive_simple_source(struct sna *sna,
 			     const struct sna_composite_op *op,
@@ -781,168 +543,10 @@ emit_primitive_simple_source(struct sna *sna,
 	v[8] = ((r->src.y + ty) * yy + y0) * sy;
 }
 
-sse4_2 fastcall static void
-emit_primitive_simple_source__sse4_2(struct sna *sna,
-				     const struct sna_composite_op *op,
-				     const struct sna_composite_rectangles *r)
-{
-	float *v;
-	union {
-		struct sna_coordinate p;
-		float f;
-	} dst;
-
-	float xx = op->src.transform->matrix[0][0];
-	float x0 = op->src.transform->matrix[0][2];
-	float yy = op->src.transform->matrix[1][1];
-	float y0 = op->src.transform->matrix[1][2];
-	float sx = op->src.scale[0];
-	float sy = op->src.scale[1];
-	int16_t tx = op->src.offset[0];
-	int16_t ty = op->src.offset[1];
-
-	assert(op->floats_per_rect == 9);
-	assert((sna->render.vertex_used % 3) == 0);
-	v = sna->render.vertices + sna->render.vertex_used;
-	sna->render.vertex_used += 3*3;
-
-	dst.p.x = r->dst.x + r->width;
-	dst.p.y = r->dst.y + r->height;
-	v[0] = dst.f;
-	v[1] = ((r->src.x + r->width + tx) * xx + x0) * sx;
-	v[5] = v[2] = ((r->src.y + r->height + ty) * yy + y0) * sy;
-
-	dst.p.x = r->dst.x;
-	v[3] = dst.f;
-	v[7] = v[4] = ((r->src.x + tx) * xx + x0) * sx;
-
-	dst.p.y = r->dst.y;
-	v[6] = dst.f;
-	v[8] = ((r->src.y + ty) * yy + y0) * sy;
-}
-
-avx2 fastcall static void
-emit_primitive_simple_source__avx2(struct sna *sna,
-				   const struct sna_composite_op *op,
-				   const struct sna_composite_rectangles *r)
-{
-	float *v;
-	union {
-		struct sna_coordinate p;
-		float f;
-	} dst;
-
-	float xx = op->src.transform->matrix[0][0];
-	float x0 = op->src.transform->matrix[0][2];
-	float yy = op->src.transform->matrix[1][1];
-	float y0 = op->src.transform->matrix[1][2];
-	float sx = op->src.scale[0];
-	float sy = op->src.scale[1];
-	int16_t tx = op->src.offset[0];
-	int16_t ty = op->src.offset[1];
-
-	assert(op->floats_per_rect == 9);
-	assert((sna->render.vertex_used % 3) == 0);
-	v = sna->render.vertices + sna->render.vertex_used;
-	sna->render.vertex_used += 3*3;
-
-	dst.p.x = r->dst.x + r->width;
-	dst.p.y = r->dst.y + r->height;
-	v[0] = dst.f;
-	v[1] = ((r->src.x + r->width + tx) * xx + x0) * sx;
-	v[5] = v[2] = ((r->src.y + r->height + ty) * yy + y0) * sy;
-
-	dst.p.x = r->dst.x;
-	v[3] = dst.f;
-	v[7] = v[4] = ((r->src.x + tx) * xx + x0) * sx;
-
-	dst.p.y = r->dst.y;
-	v[6] = dst.f;
-	v[8] = ((r->src.y + ty) * yy + y0) * sy;
-}
-
-sse2 fastcall static void
-emit_boxes_simple_source(const struct sna_composite_op *op,
-			 const BoxRec *box, int nbox,
-			 float *v)
-{
-	float xx = op->src.transform->matrix[0][0];
-	float x0 = op->src.transform->matrix[0][2];
-	float yy = op->src.transform->matrix[1][1];
-	float y0 = op->src.transform->matrix[1][2];
-	float sx = op->src.scale[0];
-	float sy = op->src.scale[1];
-	int16_t tx = op->src.offset[0];
-	int16_t ty = op->src.offset[1];
-
-	do {
-		union {
-			struct sna_coordinate p;
-			float f;
-		} dst;
-
-		dst.p.x = box->x2;
-		dst.p.y = box->y2;
-		v[0] = dst.f;
-		v[1] = ((box->x2 + tx) * xx + x0) * sx;
-		v[5] = v[2] = ((box->y2 + ty) * yy + y0) * sy;
-
-		dst.p.x = box->x1;
-		v[3] = dst.f;
-		v[7] = v[4] = ((box->x1 + tx) * xx + x0) * sx;
-
-		dst.p.y = box->y1;
-		v[6] = dst.f;
-		v[8] = ((box->y1 + ty) * yy + y0) * sy;
-
-		v += 9;
-		box++;
-	} while (--nbox);
-}
-
-sse4_2 fastcall static void
-emit_boxes_simple_source__sse4_2(const struct sna_composite_op *op,
-				 const BoxRec *box, int nbox,
-				 float *v)
-{
-	float xx = op->src.transform->matrix[0][0];
-	float x0 = op->src.transform->matrix[0][2];
-	float yy = op->src.transform->matrix[1][1];
-	float y0 = op->src.transform->matrix[1][2];
-	float sx = op->src.scale[0];
-	float sy = op->src.scale[1];
-	int16_t tx = op->src.offset[0];
-	int16_t ty = op->src.offset[1];
-
-	do {
-		union {
-			struct sna_coordinate p;
-			float f;
-		} dst;
-
-		dst.p.x = box->x2;
-		dst.p.y = box->y2;
-		v[0] = dst.f;
-		v[1] = ((box->x2 + tx) * xx + x0) * sx;
-		v[5] = v[2] = ((box->y2 + ty) * yy + y0) * sy;
-
-		dst.p.x = box->x1;
-		v[3] = dst.f;
-		v[7] = v[4] = ((box->x1 + tx) * xx + x0) * sx;
-
-		dst.p.y = box->y1;
-		v[6] = dst.f;
-		v[8] = ((box->y1 + ty) * yy + y0) * sy;
-
-		v += 9;
-		box++;
-	} while (--nbox);
-}
-
-avx2 fastcall static void
-emit_boxes_simple_source__avx2(const struct sna_composite_op *op,
-			       const BoxRec *box, int nbox,
-			       float *v)
+sse2 fastcall static void
+emit_boxes_simple_source(const struct sna_composite_op *op,
+			 const BoxRec *box, int nbox,
+			 float *v)
 {
 	float xx = op->src.transform->matrix[0][0];
 	float x0 = op->src.transform->matrix[0][2];
@@ -1376,108 +980,507 @@ emit_composite_texcoord_affine(struct sna *sna,
 	OUT_VERTEX_F(t[1] * channel->scale[1]);
 }
 
+/* SSE4_2 */
 
-unsigned gen4_choose_composite_emitter(struct sna *sna, struct sna_composite_op *tmp)
+sse4_2 fastcall static void
+emit_primitive_linear__sse4_2(struct sna *sna,
+			      const struct sna_composite_op *op,
+			      const struct sna_composite_rectangles *r)
 {
-	unsigned vb;
+	float *v;
+	union {
+		struct sna_coordinate p;
+		float f;
+	} dst;
 
-	if (tmp->mask.bo) {
-		if (tmp->mask.transform == NULL) {
-			if (tmp->src.is_solid) {
-				DBG(("%s: solid, identity mask\n", __FUNCTION__));
-				tmp->prim_emit = emit_primitive_identity_mask;
-				tmp->emit_boxes = emit_boxes_identity_mask;
-				tmp->floats_per_vertex = 4;
-				vb = 1 | 2 << 2;
-			} else if (tmp->src.is_linear) {
-				DBG(("%s: linear, identity mask\n", __FUNCTION__));
-				tmp->prim_emit = emit_primitive_linear_identity_mask;
-				tmp->emit_boxes = emit_boxes_linear_identity_mask;
-				tmp->floats_per_vertex = 4;
-				vb = 1 | 2 << 2;
-			} else if (tmp->src.transform == NULL) {
-				DBG(("%s: identity source, identity mask\n", __FUNCTION__));
-				tmp->prim_emit = emit_primitive_identity_source_mask;
-				tmp->floats_per_vertex = 5;
-				vb = 2 << 2 | 2;
-			} else if (tmp->src.is_affine) {
-				tmp->src.scale[0] /= tmp->src.transform->matrix[2][2];
-				tmp->src.scale[1] /= tmp->src.transform->matrix[2][2];
-				if (!sna_affine_transform_is_rotation(tmp->src.transform)) {
-					DBG(("%s: simple src, identity mask\n", __FUNCTION__));
-					tmp->prim_emit = emit_primitive_simple_source_identity;
-				} else {
-					DBG(("%s: affine src, identity mask\n", __FUNCTION__));
-					tmp->prim_emit = emit_primitive_affine_source_identity;
-				}
-				tmp->floats_per_vertex = 5;
-				vb = 2 << 2 | 2;
-			} else {
-				DBG(("%s: projective source, identity mask\n", __FUNCTION__));
-				tmp->prim_emit = emit_primitive_mask;
-				tmp->floats_per_vertex = 6;
-				vb = 2 << 2 | 3;
-			}
-		} else {
-			tmp->prim_emit = emit_primitive_mask;
-			tmp->floats_per_vertex = 1;
-			vb = 0;
-			if (tmp->mask.is_solid) {
-				tmp->floats_per_vertex += 1;
-				vb |= 1 << 2;
-			} else if (tmp->mask.is_affine) {
-				tmp->floats_per_vertex += 2;
-				vb |= 2 << 2;
-			}else {
-				tmp->floats_per_vertex += 3;
-				vb |= 3 << 2;
-			}
-			if (tmp->src.is_solid) {
-				tmp->floats_per_vertex += 1;
-				vb |= 1;
-			} else if (tmp->src.is_affine) {
-				tmp->floats_per_vertex += 2;
-				vb |= 2 ;
-			}else {
-				tmp->floats_per_vertex += 3;
-				vb |= 3;
-			}
-			DBG(("%s: general mask: floats-per-vertex=%d, vb=%x\n",
-			     __FUNCTION__,tmp->floats_per_vertex, vb));
-		}
-	} else {
-		if (tmp->src.is_solid) {
-			DBG(("%s: solid, no mask\n", __FUNCTION__));
-			tmp->prim_emit = emit_primitive_solid;
-			tmp->emit_boxes = emit_boxes_solid;
-			if (tmp->src.is_opaque && tmp->op == PictOpOver)
-				tmp->op = PictOpSrc;
-			tmp->floats_per_vertex = 2;
-			vb = 1;
-		} else if (tmp->src.is_linear) {
-			DBG(("%s: linear, no mask\n", __FUNCTION__));
-			if (sna->cpu_features & AVX2) {
-				tmp->prim_emit = emit_primitive_linear__avx2;
-				tmp->emit_boxes = emit_boxes_linear__avx2;
-			} else  if (sna->cpu_features & SSE4_2) {
-				tmp->prim_emit = emit_primitive_linear__sse4_2;
-				tmp->emit_boxes = emit_boxes_linear__sse4_2;
-			} else {
-				tmp->prim_emit = emit_primitive_linear;
-				tmp->emit_boxes = emit_boxes_linear;
-			}
-			tmp->floats_per_vertex = 2;
-			vb = 1;
-		} else if (tmp->src.transform == NULL) {
-			DBG(("%s: identity src, no mask\n", __FUNCTION__));
-			if (sna->cpu_features & AVX2) {
-				tmp->prim_emit = emit_primitive_identity_source__avx2;
-				tmp->emit_boxes = emit_boxes_identity_source__avx2;
-			} else if (sna->cpu_features & SSE4_2) {
-				tmp->prim_emit = emit_primitive_identity_source__sse4_2;
-				tmp->emit_boxes = emit_boxes_identity_source__sse4_2;
-			} else {
-				tmp->prim_emit = emit_primitive_identity_source;
+	assert(op->floats_per_rect == 6);
+	assert((sna->render.vertex_used % 2) == 0);
+	v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 6;
+	assert(sna->render.vertex_used <= sna->render.vertex_size);
+
+	dst.p.x = r->dst.x + r->width;
+	dst.p.y = r->dst.y + r->height;
+	v[0] = dst.f;
+	dst.p.x = r->dst.x;
+	v[2] = dst.f;
+	dst.p.y = r->dst.y;
+	v[4] = dst.f;
+
+	v[1] = compute_linear(&op->src, r->src.x+r->width, r->src.y+r->height);
+	v[3] = compute_linear(&op->src, r->src.x, r->src.y+r->height);
+	v[5] = compute_linear(&op->src, r->src.x, r->src.y);
+}
+
+sse4_2 fastcall static void
+emit_boxes_linear__sse4_2(const struct sna_composite_op *op,
+			  const BoxRec *box, int nbox,
+			  float *v)
+{
+	union {
+		struct sna_coordinate p;
+		float f;
+	} dst;
+
+	do {
+		dst.p.x = box->x2;
+		dst.p.y = box->y2;
+		v[0] = dst.f;
+		dst.p.x = box->x1;
+		v[2] = dst.f;
+		dst.p.y = box->y1;
+		v[4] = dst.f;
+
+		v[1] = compute_linear(&op->src, box->x2, box->y2);
+		v[3] = compute_linear(&op->src, box->x1, box->y2);
+		v[5] = compute_linear(&op->src, box->x1, box->y1);
+
+		v += 6;
+		box++;
+	} while (--nbox);
+}
+
+sse4_2 fastcall static void
+emit_primitive_identity_source__sse4_2(struct sna *sna,
+				       const struct sna_composite_op *op,
+				       const struct sna_composite_rectangles *r)
+{
+	union {
+		struct sna_coordinate p;
+		float f;
+	} dst;
+	float *v;
+
+	assert(op->floats_per_rect == 9);
+	assert((sna->render.vertex_used % 3) == 0);
+	v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 9;
+
+	dst.p.x = r->dst.x + r->width;
+	dst.p.y = r->dst.y + r->height;
+	v[0] = dst.f;
+	dst.p.x = r->dst.x;
+	v[3] = dst.f;
+	dst.p.y = r->dst.y;
+	v[6] = dst.f;
+
+	v[7] = v[4] = (r->src.x + op->src.offset[0]) * op->src.scale[0];
+	v[1] = v[4] + r->width * op->src.scale[0];
+
+	v[8] = (r->src.y + op->src.offset[1]) * op->src.scale[1];
+	v[5] = v[2] = v[8] + r->height * op->src.scale[1];
+}
+
+sse4_2 fastcall static void
+emit_boxes_identity_source__sse4_2(const struct sna_composite_op *op,
+				   const BoxRec *box, int nbox,
+				   float *v)
+{
+	do {
+		union {
+			struct sna_coordinate p;
+			float f;
+		} dst;
+
+		dst.p.x = box->x2;
+		dst.p.y = box->y2;
+		v[0] = dst.f;
+		dst.p.x = box->x1;
+		v[3] = dst.f;
+		dst.p.y = box->y1;
+		v[6] = dst.f;
+
+		v[7] = v[4] = (box->x1 + op->src.offset[0]) * op->src.scale[0];
+		v[1] = (box->x2 + op->src.offset[0]) * op->src.scale[0];
+
+		v[8] = (box->y1 + op->src.offset[1]) * op->src.scale[1];
+		v[2] = v[5] = (box->y2 + op->src.offset[1]) * op->src.scale[1];
+
+		v += 9;
+		box++;
+	} while (--nbox);
+}
+
+sse4_2 fastcall static void
+emit_primitive_simple_source__sse4_2(struct sna *sna,
+				     const struct sna_composite_op *op,
+				     const struct sna_composite_rectangles *r)
+{
+	float *v;
+	union {
+		struct sna_coordinate p;
+		float f;
+	} dst;
+
+	float xx = op->src.transform->matrix[0][0];
+	float x0 = op->src.transform->matrix[0][2];
+	float yy = op->src.transform->matrix[1][1];
+	float y0 = op->src.transform->matrix[1][2];
+	float sx = op->src.scale[0];
+	float sy = op->src.scale[1];
+	int16_t tx = op->src.offset[0];
+	int16_t ty = op->src.offset[1];
+
+	assert(op->floats_per_rect == 9);
+	assert((sna->render.vertex_used % 3) == 0);
+	v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 3*3;
+
+	dst.p.x = r->dst.x + r->width;
+	dst.p.y = r->dst.y + r->height;
+	v[0] = dst.f;
+	v[1] = ((r->src.x + r->width + tx) * xx + x0) * sx;
+	v[5] = v[2] = ((r->src.y + r->height + ty) * yy + y0) * sy;
+
+	dst.p.x = r->dst.x;
+	v[3] = dst.f;
+	v[7] = v[4] = ((r->src.x + tx) * xx + x0) * sx;
+
+	dst.p.y = r->dst.y;
+	v[6] = dst.f;
+	v[8] = ((r->src.y + ty) * yy + y0) * sy;
+}
+
+sse4_2 fastcall static void
+emit_boxes_simple_source__sse4_2(const struct sna_composite_op *op,
+				 const BoxRec *box, int nbox,
+				 float *v)
+{
+	float xx = op->src.transform->matrix[0][0];
+	float x0 = op->src.transform->matrix[0][2];
+	float yy = op->src.transform->matrix[1][1];
+	float y0 = op->src.transform->matrix[1][2];
+	float sx = op->src.scale[0];
+	float sy = op->src.scale[1];
+	int16_t tx = op->src.offset[0];
+	int16_t ty = op->src.offset[1];
+
+	do {
+		union {
+			struct sna_coordinate p;
+			float f;
+		} dst;
+
+		dst.p.x = box->x2;
+		dst.p.y = box->y2;
+		v[0] = dst.f;
+		v[1] = ((box->x2 + tx) * xx + x0) * sx;
+		v[5] = v[2] = ((box->y2 + ty) * yy + y0) * sy;
+
+		dst.p.x = box->x1;
+		v[3] = dst.f;
+		v[7] = v[4] = ((box->x1 + tx) * xx + x0) * sx;
+
+		dst.p.y = box->y1;
+		v[6] = dst.f;
+		v[8] = ((box->y1 + ty) * yy + y0) * sy;
+
+		v += 9;
+		box++;
+	} while (--nbox);
+}
+
+/* AVX2 */
+
+avx2 fastcall static void
+emit_primitive_linear__avx2(struct sna *sna,
+			    const struct sna_composite_op *op,
+			    const struct sna_composite_rectangles *r)
+{
+	float *v;
+	union {
+		struct sna_coordinate p;
+		float f;
+	} dst;
+
+	assert(op->floats_per_rect == 6);
+	assert((sna->render.vertex_used % 2) == 0);
+	v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 6;
+	assert(sna->render.vertex_used <= sna->render.vertex_size);
+
+	dst.p.x = r->dst.x + r->width;
+	dst.p.y = r->dst.y + r->height;
+	v[0] = dst.f;
+	dst.p.x = r->dst.x;
+	v[2] = dst.f;
+	dst.p.y = r->dst.y;
+	v[4] = dst.f;
+
+	v[1] = compute_linear(&op->src, r->src.x+r->width, r->src.y+r->height);
+	v[3] = compute_linear(&op->src, r->src.x, r->src.y+r->height);
+	v[5] = compute_linear(&op->src, r->src.x, r->src.y);
+}
+
+avx2 fastcall static void
+emit_boxes_linear__avx2(const struct sna_composite_op *op,
+			const BoxRec *box, int nbox,
+			float *v)
+{
+	union {
+		struct sna_coordinate p;
+		float f;
+	} dst;
+
+	do {
+		dst.p.x = box->x2;
+		dst.p.y = box->y2;
+		v[0] = dst.f;
+		dst.p.x = box->x1;
+		v[2] = dst.f;
+		dst.p.y = box->y1;
+		v[4] = dst.f;
+
+		v[1] = compute_linear(&op->src, box->x2, box->y2);
+		v[3] = compute_linear(&op->src, box->x1, box->y2);
+		v[5] = compute_linear(&op->src, box->x1, box->y1);
+
+		v += 6;
+		box++;
+	} while (--nbox);
+}
+
+avx2 fastcall static void
+emit_primitive_identity_source__avx2(struct sna *sna,
+				     const struct sna_composite_op *op,
+				     const struct sna_composite_rectangles *r)
+{
+	union {
+		struct sna_coordinate p;
+		float f;
+	} dst;
+	float *v;
+
+	assert(op->floats_per_rect == 9);
+	assert((sna->render.vertex_used % 3) == 0);
+	v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 9;
+
+	dst.p.x = r->dst.x + r->width;
+	dst.p.y = r->dst.y + r->height;
+	v[0] = dst.f;
+	dst.p.x = r->dst.x;
+	v[3] = dst.f;
+	dst.p.y = r->dst.y;
+	v[6] = dst.f;
+
+	v[7] = v[4] = (r->src.x + op->src.offset[0]) * op->src.scale[0];
+	v[1] = v[4] + r->width * op->src.scale[0];
+
+	v[8] = (r->src.y + op->src.offset[1]) * op->src.scale[1];
+	v[5] = v[2] = v[8] + r->height * op->src.scale[1];
+}
+
+avx2 fastcall static void
+emit_boxes_identity_source__avx2(const struct sna_composite_op *op,
+				 const BoxRec *box, int nbox,
+				 float *v)
+{
+	do {
+		union {
+			struct sna_coordinate p;
+			float f;
+		} dst;
+
+		dst.p.x = box->x2;
+		dst.p.y = box->y2;
+		v[0] = dst.f;
+		dst.p.x = box->x1;
+		v[3] = dst.f;
+		dst.p.y = box->y1;
+		v[6] = dst.f;
+
+		v[7] = v[4] = (box->x1 + op->src.offset[0]) * op->src.scale[0];
+		v[1] = (box->x2 + op->src.offset[0]) * op->src.scale[0];
+
+		v[8] = (box->y1 + op->src.offset[1]) * op->src.scale[1];
+		v[2] = v[5] = (box->y2 + op->src.offset[1]) * op->src.scale[1];
+
+		v += 9;
+		box++;
+	} while (--nbox);
+}
+
+avx2 fastcall static void
+emit_primitive_simple_source__avx2(struct sna *sna,
+				   const struct sna_composite_op *op,
+				   const struct sna_composite_rectangles *r)
+{
+	float *v;
+	union {
+		struct sna_coordinate p;
+		float f;
+	} dst;
+
+	float xx = op->src.transform->matrix[0][0];
+	float x0 = op->src.transform->matrix[0][2];
+	float yy = op->src.transform->matrix[1][1];
+	float y0 = op->src.transform->matrix[1][2];
+	float sx = op->src.scale[0];
+	float sy = op->src.scale[1];
+	int16_t tx = op->src.offset[0];
+	int16_t ty = op->src.offset[1];
+
+	assert(op->floats_per_rect == 9);
+	assert((sna->render.vertex_used % 3) == 0);
+	v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 3*3;
+
+	dst.p.x = r->dst.x + r->width;
+	dst.p.y = r->dst.y + r->height;
+	v[0] = dst.f;
+	v[1] = ((r->src.x + r->width + tx) * xx + x0) * sx;
+	v[5] = v[2] = ((r->src.y + r->height + ty) * yy + y0) * sy;
+
+	dst.p.x = r->dst.x;
+	v[3] = dst.f;
+	v[7] = v[4] = ((r->src.x + tx) * xx + x0) * sx;
+
+	dst.p.y = r->dst.y;
+	v[6] = dst.f;
+	v[8] = ((r->src.y + ty) * yy + y0) * sy;
+}
+
+avx2 fastcall static void
+emit_boxes_simple_source__avx2(const struct sna_composite_op *op,
+			       const BoxRec *box, int nbox,
+			       float *v)
+{
+	float xx = op->src.transform->matrix[0][0];
+	float x0 = op->src.transform->matrix[0][2];
+	float yy = op->src.transform->matrix[1][1];
+	float y0 = op->src.transform->matrix[1][2];
+	float sx = op->src.scale[0];
+	float sy = op->src.scale[1];
+	int16_t tx = op->src.offset[0];
+	int16_t ty = op->src.offset[1];
+
+	do {
+		union {
+			struct sna_coordinate p;
+			float f;
+		} dst;
+
+		dst.p.x = box->x2;
+		dst.p.y = box->y2;
+		v[0] = dst.f;
+		v[1] = ((box->x2 + tx) * xx + x0) * sx;
+		v[5] = v[2] = ((box->y2 + ty) * yy + y0) * sy;
+
+		dst.p.x = box->x1;
+		v[3] = dst.f;
+		v[7] = v[4] = ((box->x1 + tx) * xx + x0) * sx;
+
+		dst.p.y = box->y1;
+		v[6] = dst.f;
+		v[8] = ((box->y1 + ty) * yy + y0) * sy;
+
+		v += 9;
+		box++;
+	} while (--nbox);
+}
+
+unsigned gen4_choose_composite_emitter(struct sna *sna, struct sna_composite_op *tmp)
+{
+	unsigned vb;
+
+	if (tmp->mask.bo) {
+		if (tmp->mask.transform == NULL) {
+			if (tmp->src.is_solid) {
+				DBG(("%s: solid, identity mask\n", __FUNCTION__));
+				tmp->prim_emit = emit_primitive_identity_mask;
+				tmp->emit_boxes = emit_boxes_identity_mask;
+				tmp->floats_per_vertex = 4;
+				vb = 1 | 2 << 2;
+			} else if (tmp->src.is_linear) {
+				DBG(("%s: linear, identity mask\n", __FUNCTION__));
+				tmp->prim_emit = emit_primitive_linear_identity_mask;
+				tmp->emit_boxes = emit_boxes_linear_identity_mask;
+				tmp->floats_per_vertex = 4;
+				vb = 1 | 2 << 2;
+			} else if (tmp->src.transform == NULL) {
+				DBG(("%s: identity source, identity mask\n", __FUNCTION__));
+				tmp->prim_emit = emit_primitive_identity_source_mask;
+				tmp->floats_per_vertex = 5;
+				vb = 2 << 2 | 2;
+			} else if (tmp->src.is_affine) {
+				tmp->src.scale[0] /= tmp->src.transform->matrix[2][2];
+				tmp->src.scale[1] /= tmp->src.transform->matrix[2][2];
+				if (!sna_affine_transform_is_rotation(tmp->src.transform)) {
+					DBG(("%s: simple src, identity mask\n", __FUNCTION__));
+					tmp->prim_emit = emit_primitive_simple_source_identity;
+				} else {
+					DBG(("%s: affine src, identity mask\n", __FUNCTION__));
+					tmp->prim_emit = emit_primitive_affine_source_identity;
+				}
+				tmp->floats_per_vertex = 5;
+				vb = 2 << 2 | 2;
+			} else {
+				DBG(("%s: projective source, identity mask\n", __FUNCTION__));
+				tmp->prim_emit = emit_primitive_mask;
+				tmp->floats_per_vertex = 6;
+				vb = 2 << 2 | 3;
+			}
+		} else {
+			tmp->prim_emit = emit_primitive_mask;
+			tmp->floats_per_vertex = 1;
+			vb = 0;
+			if (tmp->mask.is_solid) {
+				tmp->floats_per_vertex += 1;
+				vb |= 1 << 2;
+			} else if (tmp->mask.is_affine) {
+				tmp->floats_per_vertex += 2;
+				vb |= 2 << 2;
+			}else {
+				tmp->floats_per_vertex += 3;
+				vb |= 3 << 2;
+			}
+			if (tmp->src.is_solid) {
+				tmp->floats_per_vertex += 1;
+				vb |= 1;
+			} else if (tmp->src.is_affine) {
+				tmp->floats_per_vertex += 2;
+				vb |= 2 ;
+			}else {
+				tmp->floats_per_vertex += 3;
+				vb |= 3;
+			}
+			DBG(("%s: general mask: floats-per-vertex=%d, vb=%x\n",
+			     __FUNCTION__,tmp->floats_per_vertex, vb));
+		}
+	} else {
+		if (tmp->src.is_solid) {
+			DBG(("%s: solid, no mask\n", __FUNCTION__));
+			tmp->prim_emit = emit_primitive_solid;
+			tmp->emit_boxes = emit_boxes_solid;
+			if (tmp->src.is_opaque && tmp->op == PictOpOver)
+				tmp->op = PictOpSrc;
+			tmp->floats_per_vertex = 2;
+			vb = 1;
+		} else if (tmp->src.is_linear) {
+			DBG(("%s: linear, no mask\n", __FUNCTION__));
+			if (sna->cpu_features & AVX2) {
+				tmp->prim_emit = emit_primitive_linear__avx2;
+				tmp->emit_boxes = emit_boxes_linear__avx2;
+			} else  if (sna->cpu_features & SSE4_2) {
+				tmp->prim_emit = emit_primitive_linear__sse4_2;
+				tmp->emit_boxes = emit_boxes_linear__sse4_2;
+			} else {
+				tmp->prim_emit = emit_primitive_linear;
+				tmp->emit_boxes = emit_boxes_linear;
+			}
+			tmp->floats_per_vertex = 2;
+			vb = 1;
+		} else if (tmp->src.transform == NULL) {
+			DBG(("%s: identity src, no mask\n", __FUNCTION__));
+			if (sna->cpu_features & AVX2) {
+				tmp->prim_emit = emit_primitive_identity_source__avx2;
+				tmp->emit_boxes = emit_boxes_identity_source__avx2;
+			} else if (sna->cpu_features & SSE4_2) {
+				tmp->prim_emit = emit_primitive_identity_source__sse4_2;
+				tmp->emit_boxes = emit_boxes_identity_source__sse4_2;
+			} else {
+				tmp->prim_emit = emit_primitive_identity_source;
 				tmp->emit_boxes = emit_boxes_identity_source;
 			}
 			tmp->floats_per_vertex = 3;
@@ -1514,69 +1517,380 @@ unsigned gen4_choose_composite_emitter(struct sna *sna, struct sna_composite_op
 	}
 	tmp->floats_per_rect = 3 * tmp->floats_per_vertex;
 
-	return vb;
+	return vb;
+}
+
+inline static void
+emit_span_vertex(struct sna *sna,
+		  const struct sna_composite_spans_op *op,
+		  int16_t x, int16_t y)
+{
+	OUT_VERTEX(x, y);
+	emit_texcoord(sna, &op->base.src, x, y);
+}
+
+sse2 fastcall static void
+emit_composite_spans_primitive(struct sna *sna,
+			       const struct sna_composite_spans_op *op,
+			       const BoxRec *box,
+			       float opacity)
+{
+	emit_span_vertex(sna, op, box->x2, box->y2);
+	OUT_VERTEX_F(opacity);
+
+	emit_span_vertex(sna, op, box->x1, box->y2);
+	OUT_VERTEX_F(opacity);
+
+	emit_span_vertex(sna, op, box->x1, box->y1);
+	OUT_VERTEX_F(opacity);
+}
+
+sse2 fastcall static void
+emit_span_solid(struct sna *sna,
+		 const struct sna_composite_spans_op *op,
+		 const BoxRec *box,
+		 float opacity)
+{
+	float *v;
+	union {
+		struct sna_coordinate p;
+		float f;
+	} dst;
+
+	assert(op->base.floats_per_rect == 9);
+	assert((sna->render.vertex_used % 3) == 0);
+	v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 3*3;
+
+	dst.p.x = box->x2;
+	dst.p.y = box->y2;
+	v[0] = dst.f;
+
+	dst.p.x = box->x1;
+	v[3] = dst.f;
+
+	dst.p.y = box->y1;
+	v[6] = dst.f;
+
+	v[7] = v[4] = v[1] = .5;
+	v[8] = v[5] = v[2] = opacity;
+}
+
+sse2 fastcall static void
+emit_span_boxes_solid(const struct sna_composite_spans_op *op,
+		      const struct sna_opacity_box *b,
+		      int nbox, float *v)
+{
+	do {
+		union {
+			struct sna_coordinate p;
+			float f;
+		} dst;
+
+		dst.p.x = b->box.x2;
+		dst.p.y = b->box.y2;
+		v[0] = dst.f;
+
+		dst.p.x = b->box.x1;
+		v[3] = dst.f;
+
+		dst.p.y = b->box.y1;
+		v[6] = dst.f;
+
+		v[7] = v[4] = v[1] = .5;
+		v[8] = v[5] = v[2] = b->alpha;
+
+		v += 9;
+		b++;
+	} while (--nbox);
 }
 
-inline static void
-emit_span_vertex(struct sna *sna,
+sse2 fastcall static void
+emit_span_identity(struct sna *sna,
+		    const struct sna_composite_spans_op *op,
+		    const BoxRec *box,
+		    float opacity)
+{
+	float *v;
+	union {
+		struct sna_coordinate p;
+		float f;
+	} dst;
+
+	float sx = op->base.src.scale[0];
+	float sy = op->base.src.scale[1];
+	int16_t tx = op->base.src.offset[0];
+	int16_t ty = op->base.src.offset[1];
+
+	assert(op->base.floats_per_rect == 12);
+	assert((sna->render.vertex_used % 4) == 0);
+	v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 3*4;
+	assert(sna->render.vertex_used <= sna->render.vertex_size);
+
+	dst.p.x = box->x2;
+	dst.p.y = box->y2;
+	v[0] = dst.f;
+	v[1] = (box->x2 + tx) * sx;
+	v[6] = v[2] = (box->y2 + ty) * sy;
+
+	dst.p.x = box->x1;
+	v[4] = dst.f;
+	v[9] = v[5] = (box->x1 + tx) * sx;
+
+	dst.p.y = box->y1;
+	v[8] = dst.f;
+	v[10] = (box->y1 + ty) * sy;
+
+	v[11] = v[7] = v[3] = opacity;
+}
+
+sse2 fastcall static void
+emit_span_boxes_identity(const struct sna_composite_spans_op *op,
+			 const struct sna_opacity_box *b, int nbox,
+			 float *v)
+{
+	do {
+		union {
+			struct sna_coordinate p;
+			float f;
+		} dst;
+
+		float sx = op->base.src.scale[0];
+		float sy = op->base.src.scale[1];
+		int16_t tx = op->base.src.offset[0];
+		int16_t ty = op->base.src.offset[1];
+
+		dst.p.x = b->box.x2;
+		dst.p.y = b->box.y2;
+		v[0] = dst.f;
+		v[1] = (b->box.x2 + tx) * sx;
+		v[6] = v[2] = (b->box.y2 + ty) * sy;
+
+		dst.p.x = b->box.x1;
+		v[4] = dst.f;
+		v[9] = v[5] = (b->box.x1 + tx) * sx;
+
+		dst.p.y = b->box.y1;
+		v[8] = dst.f;
+		v[10] = (b->box.y1 + ty) * sy;
+
+		v[11] = v[7] = v[3] = b->alpha;
+
+		v += 12;
+		b++;
+	} while (--nbox);
+}
+
+sse2 fastcall static void
+emit_span_simple(struct sna *sna,
+		 const struct sna_composite_spans_op *op,
+		 const BoxRec *box,
+		 float opacity)
+{
+	float *v;
+	union {
+		struct sna_coordinate p;
+		float f;
+	} dst;
+
+	float xx = op->base.src.transform->matrix[0][0];
+	float x0 = op->base.src.transform->matrix[0][2];
+	float yy = op->base.src.transform->matrix[1][1];
+	float y0 = op->base.src.transform->matrix[1][2];
+	float sx = op->base.src.scale[0];
+	float sy = op->base.src.scale[1];
+	int16_t tx = op->base.src.offset[0];
+	int16_t ty = op->base.src.offset[1];
+
+	assert(op->base.floats_per_rect == 12);
+	assert((sna->render.vertex_used % 4) == 0);
+	v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 3*4;
+	assert(sna->render.vertex_used <= sna->render.vertex_size);
+
+	dst.p.x = box->x2;
+	dst.p.y = box->y2;
+	v[0] = dst.f;
+	v[1] = ((box->x2 + tx) * xx + x0) * sx;
+	v[6] = v[2] = ((box->y2 + ty) * yy + y0) * sy;
+
+	dst.p.x = box->x1;
+	v[4] = dst.f;
+	v[9] = v[5] = ((box->x1 + tx) * xx + x0) * sx;
+
+	dst.p.y = box->y1;
+	v[8] = dst.f;
+	v[10] = ((box->y1 + ty) * yy + y0) * sy;
+
+	v[11] = v[7] = v[3] = opacity;
+}
+
+sse2 fastcall static void
+emit_span_boxes_simple(const struct sna_composite_spans_op *op,
+		       const struct sna_opacity_box *b, int nbox,
+		       float *v)
+{
+	float xx = op->base.src.transform->matrix[0][0];
+	float x0 = op->base.src.transform->matrix[0][2];
+	float yy = op->base.src.transform->matrix[1][1];
+	float y0 = op->base.src.transform->matrix[1][2];
+	float sx = op->base.src.scale[0];
+	float sy = op->base.src.scale[1];
+	int16_t tx = op->base.src.offset[0];
+	int16_t ty = op->base.src.offset[1];
+
+	do {
+		union {
+			struct sna_coordinate p;
+			float f;
+		} dst;
+
+		dst.p.x = b->box.x2;
+		dst.p.y = b->box.y2;
+		v[0] = dst.f;
+		v[1] = ((b->box.x2 + tx) * xx + x0) * sx;
+		v[6] = v[2] = ((b->box.y2 + ty) * yy + y0) * sy;
+
+		dst.p.x = b->box.x1;
+		v[4] = dst.f;
+		v[9] = v[5] = ((b->box.x1 + tx) * xx + x0) * sx;
+
+		dst.p.y = b->box.y1;
+		v[8] = dst.f;
+		v[10] = ((b->box.y1 + ty) * yy + y0) * sy;
+
+		v[11] = v[7] = v[3] = b->alpha;
+
+		v += 12;
+		b++;
+	} while (--nbox);
+}
+
+sse2 fastcall static void
+emit_span_affine(struct sna *sna,
 		  const struct sna_composite_spans_op *op,
-		  int16_t x, int16_t y)
+		  const BoxRec *box,
+		  float opacity)
 {
-	OUT_VERTEX(x, y);
-	emit_texcoord(sna, &op->base.src, x, y);
+	union {
+		struct sna_coordinate p;
+		float f;
+	} dst;
+	float *v;
+
+	assert(op->base.floats_per_rect == 12);
+	assert((sna->render.vertex_used % 4) == 0);
+	v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 12;
+
+	dst.p.x = box->x2;
+	dst.p.y = box->y2;
+	v[0] = dst.f;
+	_sna_get_transformed_scaled(op->base.src.offset[0] + box->x2,
+				    op->base.src.offset[1] + box->y2,
+				    op->base.src.transform,
+				    op->base.src.scale,
+				    &v[1], &v[2]);
+
+	dst.p.x = box->x1;
+	v[4] = dst.f;
+	_sna_get_transformed_scaled(op->base.src.offset[0] + box->x1,
+				    op->base.src.offset[1] + box->y2,
+				    op->base.src.transform,
+				    op->base.src.scale,
+				    &v[5], &v[6]);
+
+	dst.p.y = box->y1;
+	v[8] = dst.f;
+	_sna_get_transformed_scaled(op->base.src.offset[0] + box->x1,
+				    op->base.src.offset[1] + box->y1,
+				    op->base.src.transform,
+				    op->base.src.scale,
+				    &v[9], &v[10]);
+
+	v[11] = v[7] = v[3] = opacity;
 }
 
 sse2 fastcall static void
-emit_composite_spans_primitive(struct sna *sna,
-			       const struct sna_composite_spans_op *op,
-			       const BoxRec *box,
-			       float opacity)
+emit_span_boxes_affine(const struct sna_composite_spans_op *op,
+		       const struct sna_opacity_box *b, int nbox,
+		       float *v)
 {
-	emit_span_vertex(sna, op, box->x2, box->y2);
-	OUT_VERTEX_F(opacity);
+	do {
+		union {
+			struct sna_coordinate p;
+			float f;
+		} dst;
+
+		dst.p.x = b->box.x2;
+		dst.p.y = b->box.y2;
+		v[0] = dst.f;
+		_sna_get_transformed_scaled(op->base.src.offset[0] + b->box.x2,
+					    op->base.src.offset[1] + b->box.y2,
+					    op->base.src.transform,
+					    op->base.src.scale,
+					    &v[1], &v[2]);
+
+		dst.p.x = b->box.x1;
+		v[4] = dst.f;
+		_sna_get_transformed_scaled(op->base.src.offset[0] + b->box.x1,
+					    op->base.src.offset[1] + b->box.y2,
+					    op->base.src.transform,
+					    op->base.src.scale,
+					    &v[5], &v[6]);
+
+		dst.p.y = b->box.y1;
+		v[8] = dst.f;
+		_sna_get_transformed_scaled(op->base.src.offset[0] + b->box.x1,
+					    op->base.src.offset[1] + b->box.y1,
+					    op->base.src.transform,
+					    op->base.src.scale,
+					    &v[9], &v[10]);
 
-	emit_span_vertex(sna, op, box->x1, box->y2);
-	OUT_VERTEX_F(opacity);
+		v[11] = v[7] = v[3] = b->alpha;
 
-	emit_span_vertex(sna, op, box->x1, box->y1);
-	OUT_VERTEX_F(opacity);
+		v += 12;
+		b++;
+	} while (--nbox);
 }
 
 sse2 fastcall static void
-emit_span_solid(struct sna *sna,
+emit_span_linear(struct sna *sna,
 		 const struct sna_composite_spans_op *op,
 		 const BoxRec *box,
 		 float opacity)
 {
-	float *v;
 	union {
 		struct sna_coordinate p;
 		float f;
 	} dst;
+	float *v;
 
 	assert(op->base.floats_per_rect == 9);
 	assert((sna->render.vertex_used % 3) == 0);
 	v = sna->render.vertices + sna->render.vertex_used;
-	sna->render.vertex_used += 3*3;
+	sna->render.vertex_used += 9;
 
 	dst.p.x = box->x2;
 	dst.p.y = box->y2;
 	v[0] = dst.f;
-
 	dst.p.x = box->x1;
 	v[3] = dst.f;
-
 	dst.p.y = box->y1;
 	v[6] = dst.f;
 
-	v[7] = v[4] = v[1] = .5;
+	v[1] = compute_linear(&op->base.src, box->x2, box->y2);
+	v[4] = compute_linear(&op->base.src, box->x1, box->y2);
+	v[7] = compute_linear(&op->base.src, box->x1, box->y1);
+
 	v[8] = v[5] = v[2] = opacity;
 }
 
 sse2 fastcall static void
-emit_span_boxes_solid(const struct sna_composite_spans_op *op,
-		      const struct sna_opacity_box *b,
-		      int nbox, float *v)
+emit_span_boxes_linear(const struct sna_composite_spans_op *op,
+		       const struct sna_opacity_box *b, int nbox,
+		       float *v)
 {
 	do {
 		union {
@@ -1587,14 +1901,15 @@ emit_span_boxes_solid(const struct sna_composite_spans_op *op,
 		dst.p.x = b->box.x2;
 		dst.p.y = b->box.y2;
 		v[0] = dst.f;
-
 		dst.p.x = b->box.x1;
 		v[3] = dst.f;
-
 		dst.p.y = b->box.y1;
 		v[6] = dst.f;
 
-		v[7] = v[4] = v[1] = .5;
+		v[1] = compute_linear(&op->base.src, b->box.x2, b->box.y2);
+		v[4] = compute_linear(&op->base.src, b->box.x1, b->box.y2);
+		v[7] = compute_linear(&op->base.src, b->box.x1, b->box.y1);
+
 		v[8] = v[5] = v[2] = b->alpha;
 
 		v += 9;
@@ -1602,11 +1917,11 @@ emit_span_boxes_solid(const struct sna_composite_spans_op *op,
 	} while (--nbox);
 }
 
-sse2 fastcall static void
-emit_span_identity(struct sna *sna,
-		    const struct sna_composite_spans_op *op,
-		    const BoxRec *box,
-		    float opacity)
+avx2 fastcall static void
+emit_span_identity__avx2(struct sna *sna,
+			 const struct sna_composite_spans_op *op,
+			 const BoxRec *box,
+			 float opacity)
 {
 	float *v;
 	union {
@@ -1642,6 +1957,8 @@ emit_span_identity(struct sna *sna,
 	v[11] = v[7] = v[3] = opacity;
 }
 
+/* SSE4_2 */
+
 sse4_2 fastcall static void
 emit_span_identity__sse4_2(struct sna *sna,
 			   const struct sna_composite_spans_op *op,
@@ -1682,83 +1999,6 @@ emit_span_identity__sse4_2(struct sna *sna,
 	v[11] = v[7] = v[3] = opacity;
 }
 
-avx2 fastcall static void
-emit_span_identity__avx2(struct sna *sna,
-			 const struct sna_composite_spans_op *op,
-			 const BoxRec *box,
-			 float opacity)
-{
-	float *v;
-	union {
-		struct sna_coordinate p;
-		float f;
-	} dst;
-
-	float sx = op->base.src.scale[0];
-	float sy = op->base.src.scale[1];
-	int16_t tx = op->base.src.offset[0];
-	int16_t ty = op->base.src.offset[1];
-
-	assert(op->base.floats_per_rect == 12);
-	assert((sna->render.vertex_used % 4) == 0);
-	v = sna->render.vertices + sna->render.vertex_used;
-	sna->render.vertex_used += 3*4;
-	assert(sna->render.vertex_used <= sna->render.vertex_size);
-
-	dst.p.x = box->x2;
-	dst.p.y = box->y2;
-	v[0] = dst.f;
-	v[1] = (box->x2 + tx) * sx;
-	v[6] = v[2] = (box->y2 + ty) * sy;
-
-	dst.p.x = box->x1;
-	v[4] = dst.f;
-	v[9] = v[5] = (box->x1 + tx) * sx;
-
-	dst.p.y = box->y1;
-	v[8] = dst.f;
-	v[10] = (box->y1 + ty) * sy;
-
-	v[11] = v[7] = v[3] = opacity;
-}
-
-sse2 fastcall static void
-emit_span_boxes_identity(const struct sna_composite_spans_op *op,
-			 const struct sna_opacity_box *b, int nbox,
-			 float *v)
-{
-	do {
-		union {
-			struct sna_coordinate p;
-			float f;
-		} dst;
-
-		float sx = op->base.src.scale[0];
-		float sy = op->base.src.scale[1];
-		int16_t tx = op->base.src.offset[0];
-		int16_t ty = op->base.src.offset[1];
-
-		dst.p.x = b->box.x2;
-		dst.p.y = b->box.y2;
-		v[0] = dst.f;
-		v[1] = (b->box.x2 + tx) * sx;
-		v[6] = v[2] = (b->box.y2 + ty) * sy;
-
-		dst.p.x = b->box.x1;
-		v[4] = dst.f;
-		v[9] = v[5] = (b->box.x1 + tx) * sx;
-
-		dst.p.y = b->box.y1;
-		v[8] = dst.f;
-		v[10] = (b->box.y1 + ty) * sy;
-
-		v[11] = v[7] = v[3] = b->alpha;
-
-		v += 12;
-		b++;
-	} while (--nbox);
-}
-
 sse4_2 fastcall static void
 emit_span_boxes_identity__sse4_2(const struct sna_composite_spans_op *op,
 				 const struct sna_opacity_box *b, int nbox,
@@ -1789,143 +2029,18 @@ emit_span_boxes_identity__sse4_2(const struct sna_composite_spans_op *op,
 		v[8] = dst.f;
 		v[10] = (b->box.y1 + ty) * sy;
 
-		v[11] = v[7] = v[3] = b->alpha;
-
-		v += 12;
-		b++;
-	} while (--nbox);
-}
-
-avx2 fastcall static void
-emit_span_boxes_identity__avx2(const struct sna_composite_spans_op *op,
-			       const struct sna_opacity_box *b, int nbox,
-			       float *v)
-{
-	do {
-		union {
-			struct sna_coordinate p;
-			float f;
-		} dst;
-
-		float sx = op->base.src.scale[0];
-		float sy = op->base.src.scale[1];
-		int16_t tx = op->base.src.offset[0];
-		int16_t ty = op->base.src.offset[1];
-
-		dst.p.x = b->box.x2;
-		dst.p.y = b->box.y2;
-		v[0] = dst.f;
-		v[1] = (b->box.x2 + tx) * sx;
-		v[6] = v[2] = (b->box.y2 + ty) * sy;
-
-		dst.p.x = b->box.x1;
-		v[4] = dst.f;
-		v[9] = v[5] = (b->box.x1 + tx) * sx;
-
-		dst.p.y = b->box.y1;
-		v[8] = dst.f;
-		v[10] = (b->box.y1 + ty) * sy;
-
-		v[11] = v[7] = v[3] = b->alpha;
-
-		v += 12;
-		b++;
-	} while (--nbox);
-}
-
-sse2 fastcall static void
-emit_span_simple(struct sna *sna,
-		 const struct sna_composite_spans_op *op,
-		 const BoxRec *box,
-		 float opacity)
-{
-	float *v;
-	union {
-		struct sna_coordinate p;
-		float f;
-	} dst;
-
-	float xx = op->base.src.transform->matrix[0][0];
-	float x0 = op->base.src.transform->matrix[0][2];
-	float yy = op->base.src.transform->matrix[1][1];
-	float y0 = op->base.src.transform->matrix[1][2];
-	float sx = op->base.src.scale[0];
-	float sy = op->base.src.scale[1];
-	int16_t tx = op->base.src.offset[0];
-	int16_t ty = op->base.src.offset[1];
-
-	assert(op->base.floats_per_rect == 12);
-	assert((sna->render.vertex_used % 4) == 0);
-	v = sna->render.vertices + sna->render.vertex_used;
-	sna->render.vertex_used += 3*4;
-	assert(sna->render.vertex_used <= sna->render.vertex_size);
-
-	dst.p.x = box->x2;
-	dst.p.y = box->y2;
-	v[0] = dst.f;
-	v[1] = ((box->x2 + tx) * xx + x0) * sx;
-	v[6] = v[2] = ((box->y2 + ty) * yy + y0) * sy;
-
-	dst.p.x = box->x1;
-	v[4] = dst.f;
-	v[9] = v[5] = ((box->x1 + tx) * xx + x0) * sx;
-
-	dst.p.y = box->y1;
-	v[8] = dst.f;
-	v[10] = ((box->y1 + ty) * yy + y0) * sy;
-
-	v[11] = v[7] = v[3] = opacity;
-}
-
-sse4_2 fastcall static void
-emit_span_simple__sse4_2(struct sna *sna,
-			 const struct sna_composite_spans_op *op,
-			 const BoxRec *box,
-			 float opacity)
-{
-	float *v;
-	union {
-		struct sna_coordinate p;
-		float f;
-	} dst;
-
-	float xx = op->base.src.transform->matrix[0][0];
-	float x0 = op->base.src.transform->matrix[0][2];
-	float yy = op->base.src.transform->matrix[1][1];
-	float y0 = op->base.src.transform->matrix[1][2];
-	float sx = op->base.src.scale[0];
-	float sy = op->base.src.scale[1];
-	int16_t tx = op->base.src.offset[0];
-	int16_t ty = op->base.src.offset[1];
-
-	assert(op->base.floats_per_rect == 12);
-	assert((sna->render.vertex_used % 4) == 0);
-	v = sna->render.vertices + sna->render.vertex_used;
-	sna->render.vertex_used += 3*4;
-	assert(sna->render.vertex_used <= sna->render.vertex_size);
-
-	dst.p.x = box->x2;
-	dst.p.y = box->y2;
-	v[0] = dst.f;
-	v[1] = ((box->x2 + tx) * xx + x0) * sx;
-	v[6] = v[2] = ((box->y2 + ty) * yy + y0) * sy;
-
-	dst.p.x = box->x1;
-	v[4] = dst.f;
-	v[9] = v[5] = ((box->x1 + tx) * xx + x0) * sx;
-
-	dst.p.y = box->y1;
-	v[8] = dst.f;
-	v[10] = ((box->y1 + ty) * yy + y0) * sy;
+		v[11] = v[7] = v[3] = b->alpha;
 
-	v[11] = v[7] = v[3] = opacity;
+		v += 12;
+		b++;
+	} while (--nbox);
 }
 
-avx2 fastcall static void
-emit_span_simple__avx2(struct sna *sna,
-		       const struct sna_composite_spans_op *op,
-		       const BoxRec *box,
-		       float opacity)
+sse4_2 fastcall static void
+emit_span_simple__sse4_2(struct sna *sna,
+			 const struct sna_composite_spans_op *op,
+			 const BoxRec *box,
+			 float opacity)
 {
 	float *v;
 	union {
@@ -1965,10 +2080,10 @@ emit_span_simple__avx2(struct sna *sna,
 	v[11] = v[7] = v[3] = opacity;
 }
 
-sse2 fastcall static void
-emit_span_boxes_simple(const struct sna_composite_spans_op *op,
-		       const struct sna_opacity_box *b, int nbox,
-		       float *v)
+sse4_2 fastcall static void
+emit_span_boxes_simple__sse4_2(const struct sna_composite_spans_op *op,
+			       const struct sna_opacity_box *b, int nbox,
+			       float *v)
 {
 	float xx = op->base.src.transform->matrix[0][0];
 	float x0 = op->base.src.transform->matrix[0][2];
@@ -2007,19 +2122,55 @@ emit_span_boxes_simple(const struct sna_composite_spans_op *op,
 }
 
 sse4_2 fastcall static void
-emit_span_boxes_simple__sse4_2(const struct sna_composite_spans_op *op,
+emit_span_affine__sse4_2(struct sna *sna,
+			 const struct sna_composite_spans_op *op,
+			 const BoxRec *box,
+			 float opacity)
+{
+	union {
+		struct sna_coordinate p;
+		float f;
+	} dst;
+	float *v;
+
+	assert(op->base.floats_per_rect == 12);
+	assert((sna->render.vertex_used % 4) == 0);
+	v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 12;
+
+	dst.p.x = box->x2;
+	dst.p.y = box->y2;
+	v[0] = dst.f;
+	_sna_get_transformed_scaled(op->base.src.offset[0] + box->x2,
+				    op->base.src.offset[1] + box->y2,
+				    op->base.src.transform,
+				    op->base.src.scale,
+				    &v[1], &v[2]);
+
+	dst.p.x = box->x1;
+	v[4] = dst.f;
+	_sna_get_transformed_scaled(op->base.src.offset[0] + box->x1,
+				    op->base.src.offset[1] + box->y2,
+				    op->base.src.transform,
+				    op->base.src.scale,
+				    &v[5], &v[6]);
+
+	dst.p.y = box->y1;
+	v[8] = dst.f;
+	_sna_get_transformed_scaled(op->base.src.offset[0] + box->x1,
+				    op->base.src.offset[1] + box->y1,
+				    op->base.src.transform,
+				    op->base.src.scale,
+				    &v[9], &v[10]);
+
+	v[11] = v[7] = v[3] = opacity;
+}
+
+sse4_2 fastcall static void
+emit_span_boxes_affine__sse4_2(const struct sna_composite_spans_op *op,
 			       const struct sna_opacity_box *b, int nbox,
 			       float *v)
 {
-	float xx = op->base.src.transform->matrix[0][0];
-	float x0 = op->base.src.transform->matrix[0][2];
-	float yy = op->base.src.transform->matrix[1][1];
-	float y0 = op->base.src.transform->matrix[1][2];
-	float sx = op->base.src.scale[0];
-	float sy = op->base.src.scale[1];
-	int16_t tx = op->base.src.offset[0];
-	int16_t ty = op->base.src.offset[1];
-
 	do {
 		union {
 			struct sna_coordinate p;
@@ -2029,16 +2180,27 @@ emit_span_boxes_simple__sse4_2(const struct sna_composite_spans_op *op,
 		dst.p.x = b->box.x2;
 		dst.p.y = b->box.y2;
 		v[0] = dst.f;
-		v[1] = ((b->box.x2 + tx) * xx + x0) * sx;
-		v[6] = v[2] = ((b->box.y2 + ty) * yy + y0) * sy;
+		_sna_get_transformed_scaled(op->base.src.offset[0] + b->box.x2,
+					    op->base.src.offset[1] + b->box.y2,
+					    op->base.src.transform,
+					    op->base.src.scale,
+					    &v[1], &v[2]);
 
 		dst.p.x = b->box.x1;
 		v[4] = dst.f;
-		v[9] = v[5] = ((b->box.x1 + tx) * xx + x0) * sx;
+		_sna_get_transformed_scaled(op->base.src.offset[0] + b->box.x1,
+					    op->base.src.offset[1] + b->box.y2,
+					    op->base.src.transform,
+					    op->base.src.scale,
+					    &v[5], &v[6]);
 
 		dst.p.y = b->box.y1;
 		v[8] = dst.f;
-		v[10] = ((b->box.y1 + ty) * yy + y0) * sy;
+		_sna_get_transformed_scaled(op->base.src.offset[0] + b->box.x1,
+					    op->base.src.offset[1] + b->box.y1,
+					    op->base.src.transform,
+					    op->base.src.scale,
+					    &v[9], &v[10]);
 
 		v[11] = v[7] = v[3] = b->alpha;
 
@@ -2047,20 +2209,43 @@ emit_span_boxes_simple__sse4_2(const struct sna_composite_spans_op *op,
 	} while (--nbox);
 }
 
-avx2 fastcall static void
-emit_span_boxes_simple__avx2(const struct sna_composite_spans_op *op,
-			     const struct sna_opacity_box *b, int nbox,
-			     float *v)
+sse4_2 fastcall static void
+emit_span_linear__sse4_2(struct sna *sna,
+			 const struct sna_composite_spans_op *op,
+			 const BoxRec *box,
+			 float opacity)
 {
-	float xx = op->base.src.transform->matrix[0][0];
-	float x0 = op->base.src.transform->matrix[0][2];
-	float yy = op->base.src.transform->matrix[1][1];
-	float y0 = op->base.src.transform->matrix[1][2];
-	float sx = op->base.src.scale[0];
-	float sy = op->base.src.scale[1];
-	int16_t tx = op->base.src.offset[0];
-	int16_t ty = op->base.src.offset[1];
+	union {
+		struct sna_coordinate p;
+		float f;
+	} dst;
+	float *v;
+
+	assert(op->base.floats_per_rect == 9);
+	assert((sna->render.vertex_used % 3) == 0);
+	v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 9;
+
+	dst.p.x = box->x2;
+	dst.p.y = box->y2;
+	v[0] = dst.f;
+	dst.p.x = box->x1;
+	v[3] = dst.f;
+	dst.p.y = box->y1;
+	v[6] = dst.f;
+
+	v[1] = compute_linear(&op->base.src, box->x2, box->y2);
+	v[4] = compute_linear(&op->base.src, box->x1, box->y2);
+	v[7] = compute_linear(&op->base.src, box->x1, box->y1);
+
+	v[8] = v[5] = v[2] = opacity;
+}
 
+sse4_2 fastcall static void
+emit_span_boxes_linear__sse4_2(const struct sna_composite_spans_op *op,
+			       const struct sna_opacity_box *b, int nbox,
+			       float *v)
+{
 	do {
 		union {
 			struct sna_coordinate p;
@@ -2070,16 +2255,53 @@ emit_span_boxes_simple__avx2(const struct sna_composite_spans_op *op,
 		dst.p.x = b->box.x2;
 		dst.p.y = b->box.y2;
 		v[0] = dst.f;
-		v[1] = ((b->box.x2 + tx) * xx + x0) * sx;
-		v[6] = v[2] = ((b->box.y2 + ty) * yy + y0) * sy;
+		dst.p.x = b->box.x1;
+		v[3] = dst.f;
+		dst.p.y = b->box.y1;
+		v[6] = dst.f;
+
+		v[1] = compute_linear(&op->base.src, b->box.x2, b->box.y2);
+		v[4] = compute_linear(&op->base.src, b->box.x1, b->box.y2);
+		v[7] = compute_linear(&op->base.src, b->box.x1, b->box.y1);
+
+		v[8] = v[5] = v[2] = b->alpha;
+
+		v += 9;
+		b++;
+	} while (--nbox);
+}
+
+/* AVX2 */
+
+avx2 fastcall static void
+emit_span_boxes_identity__avx2(const struct sna_composite_spans_op *op,
+			       const struct sna_opacity_box *b, int nbox,
+			       float *v)
+{
+	do {
+		union {
+			struct sna_coordinate p;
+			float f;
+		} dst;
+
+		float sx = op->base.src.scale[0];
+		float sy = op->base.src.scale[1];
+		int16_t tx = op->base.src.offset[0];
+		int16_t ty = op->base.src.offset[1];
+
+		dst.p.x = b->box.x2;
+		dst.p.y = b->box.y2;
+		v[0] = dst.f;
+		v[1] = (b->box.x2 + tx) * sx;
+		v[6] = v[2] = (b->box.y2 + ty) * sy;
 
 		dst.p.x = b->box.x1;
 		v[4] = dst.f;
-		v[9] = v[5] = ((b->box.x1 + tx) * xx + x0) * sx;
+		v[9] = v[5] = (b->box.x1 + tx) * sx;
 
 		dst.p.y = b->box.y1;
 		v[8] = dst.f;
-		v[10] = ((b->box.y1 + ty) * yy + y0) * sy;
+		v[10] = (b->box.y1 + ty) * sy;
 
 		v[11] = v[7] = v[3] = b->alpha;
 
@@ -2088,94 +2310,89 @@ emit_span_boxes_simple__avx2(const struct sna_composite_spans_op *op,
 	} while (--nbox);
 }
 
-sse2 fastcall static void
-emit_span_affine(struct sna *sna,
-		  const struct sna_composite_spans_op *op,
-		  const BoxRec *box,
-		  float opacity)
+avx2 fastcall static void
+emit_span_simple__avx2(struct sna *sna,
+		       const struct sna_composite_spans_op *op,
+		       const BoxRec *box,
+		       float opacity)
 {
+	float *v;
 	union {
 		struct sna_coordinate p;
 		float f;
 	} dst;
-	float *v;
+
+	float xx = op->base.src.transform->matrix[0][0];
+	float x0 = op->base.src.transform->matrix[0][2];
+	float yy = op->base.src.transform->matrix[1][1];
+	float y0 = op->base.src.transform->matrix[1][2];
+	float sx = op->base.src.scale[0];
+	float sy = op->base.src.scale[1];
+	int16_t tx = op->base.src.offset[0];
+	int16_t ty = op->base.src.offset[1];
 
 	assert(op->base.floats_per_rect == 12);
 	assert((sna->render.vertex_used % 4) == 0);
 	v = sna->render.vertices + sna->render.vertex_used;
-	sna->render.vertex_used += 12;
+	sna->render.vertex_used += 3*4;
+	assert(sna->render.vertex_used <= sna->render.vertex_size);
 
 	dst.p.x = box->x2;
 	dst.p.y = box->y2;
 	v[0] = dst.f;
-	_sna_get_transformed_scaled(op->base.src.offset[0] + box->x2,
-				    op->base.src.offset[1] + box->y2,
-				    op->base.src.transform,
-				    op->base.src.scale,
-				    &v[1], &v[2]);
+	v[1] = ((box->x2 + tx) * xx + x0) * sx;
+	v[6] = v[2] = ((box->y2 + ty) * yy + y0) * sy;
 
 	dst.p.x = box->x1;
 	v[4] = dst.f;
-	_sna_get_transformed_scaled(op->base.src.offset[0] + box->x1,
-				    op->base.src.offset[1] + box->y2,
-				    op->base.src.transform,
-				    op->base.src.scale,
-				    &v[5], &v[6]);
+	v[9] = v[5] = ((box->x1 + tx) * xx + x0) * sx;
 
 	dst.p.y = box->y1;
 	v[8] = dst.f;
-	_sna_get_transformed_scaled(op->base.src.offset[0] + box->x1,
-				    op->base.src.offset[1] + box->y1,
-				    op->base.src.transform,
-				    op->base.src.scale,
-				    &v[9], &v[10]);
+	v[10] = ((box->y1 + ty) * yy + y0) * sy;
 
 	v[11] = v[7] = v[3] = opacity;
 }
 
-sse4_2 fastcall static void
-emit_span_affine__sse4_2(struct sna *sna,
-			 const struct sna_composite_spans_op *op,
-			 const BoxRec *box,
-			 float opacity)
+avx2 fastcall static void
+emit_span_boxes_simple__avx2(const struct sna_composite_spans_op *op,
+			     const struct sna_opacity_box *b, int nbox,
+			     float *v)
 {
-	union {
-		struct sna_coordinate p;
-		float f;
-	} dst;
-	float *v;
+	float xx = op->base.src.transform->matrix[0][0];
+	float x0 = op->base.src.transform->matrix[0][2];
+	float yy = op->base.src.transform->matrix[1][1];
+	float y0 = op->base.src.transform->matrix[1][2];
+	float sx = op->base.src.scale[0];
+	float sy = op->base.src.scale[1];
+	int16_t tx = op->base.src.offset[0];
+	int16_t ty = op->base.src.offset[1];
+
+	do {
+		union {
+			struct sna_coordinate p;
+			float f;
+		} dst;
 
-	assert(op->base.floats_per_rect == 12);
-	assert((sna->render.vertex_used % 4) == 0);
-	v = sna->render.vertices + sna->render.vertex_used;
-	sna->render.vertex_used += 12;
+		dst.p.x = b->box.x2;
+		dst.p.y = b->box.y2;
+		v[0] = dst.f;
+		v[1] = ((b->box.x2 + tx) * xx + x0) * sx;
+		v[6] = v[2] = ((b->box.y2 + ty) * yy + y0) * sy;
 
-	dst.p.x = box->x2;
-	dst.p.y = box->y2;
-	v[0] = dst.f;
-	_sna_get_transformed_scaled(op->base.src.offset[0] + box->x2,
-				    op->base.src.offset[1] + box->y2,
-				    op->base.src.transform,
-				    op->base.src.scale,
-				    &v[1], &v[2]);
+		dst.p.x = b->box.x1;
+		v[4] = dst.f;
+		v[9] = v[5] = ((b->box.x1 + tx) * xx + x0) * sx;
 
-	dst.p.x = box->x1;
-	v[4] = dst.f;
-	_sna_get_transformed_scaled(op->base.src.offset[0] + box->x1,
-				    op->base.src.offset[1] + box->y2,
-				    op->base.src.transform,
-				    op->base.src.scale,
-				    &v[5], &v[6]);
+		dst.p.y = b->box.y1;
+		v[8] = dst.f;
+		v[10] = ((b->box.y1 + ty) * yy + y0) * sy;
 
-	dst.p.y = box->y1;
-	v[8] = dst.f;
-	_sna_get_transformed_scaled(op->base.src.offset[0] + box->x1,
-				    op->base.src.offset[1] + box->y1,
-				    op->base.src.transform,
-				    op->base.src.scale,
-				    &v[9], &v[10]);
+		v[11] = v[7] = v[3] = b->alpha;
 
-	v[11] = v[7] = v[3] = opacity;
+		v += 12;
+		b++;
+	} while (--nbox);
 }
 
 avx2 fastcall static void
@@ -2223,92 +2440,6 @@ emit_span_affine__avx2(struct sna *sna,
 	v[11] = v[7] = v[3] = opacity;
 }
 
-sse2 fastcall static void
-emit_span_boxes_affine(const struct sna_composite_spans_op *op,
-		       const struct sna_opacity_box *b, int nbox,
-		       float *v)
-{
-	do {
-		union {
-			struct sna_coordinate p;
-			float f;
-		} dst;
-
-		dst.p.x = b->box.x2;
-		dst.p.y = b->box.y2;
-		v[0] = dst.f;
-		_sna_get_transformed_scaled(op->base.src.offset[0] + b->box.x2,
-					    op->base.src.offset[1] + b->box.y2,
-					    op->base.src.transform,
-					    op->base.src.scale,
-					    &v[1], &v[2]);
-
-		dst.p.x = b->box.x1;
-		v[4] = dst.f;
-		_sna_get_transformed_scaled(op->base.src.offset[0] + b->box.x1,
-					    op->base.src.offset[1] + b->box.y2,
-					    op->base.src.transform,
-					    op->base.src.scale,
-					    &v[5], &v[6]);
-
-		dst.p.y = b->box.y1;
-		v[8] = dst.f;
-		_sna_get_transformed_scaled(op->base.src.offset[0] + b->box.x1,
-					    op->base.src.offset[1] + b->box.y1,
-					    op->base.src.transform,
-					    op->base.src.scale,
-					    &v[9], &v[10]);
-
-		v[11] = v[7] = v[3] = b->alpha;
-
-		v += 12;
-		b++;
-	} while (--nbox);
-}
-
-sse4_2 fastcall static void
-emit_span_boxes_affine__sse4_2(const struct sna_composite_spans_op *op,
-			       const struct sna_opacity_box *b, int nbox,
-			       float *v)
-{
-	do {
-		union {
-			struct sna_coordinate p;
-			float f;
-		} dst;
-
-		dst.p.x = b->box.x2;
-		dst.p.y = b->box.y2;
-		v[0] = dst.f;
-		_sna_get_transformed_scaled(op->base.src.offset[0] + b->box.x2,
-					    op->base.src.offset[1] + b->box.y2,
-					    op->base.src.transform,
-					    op->base.src.scale,
-					    &v[1], &v[2]);
-
-		dst.p.x = b->box.x1;
-		v[4] = dst.f;
-		_sna_get_transformed_scaled(op->base.src.offset[0] + b->box.x1,
-					    op->base.src.offset[1] + b->box.y2,
-					    op->base.src.transform,
-					    op->base.src.scale,
-					    &v[5], &v[6]);
-
-		dst.p.y = b->box.y1;
-		v[8] = dst.f;
-		_sna_get_transformed_scaled(op->base.src.offset[0] + b->box.x1,
-					    op->base.src.offset[1] + b->box.y1,
-					    op->base.src.transform,
-					    op->base.src.scale,
-					    &v[9], &v[10]);
-
-		v[11] = v[7] = v[3] = b->alpha;
-
-		v += 12;
-		b++;
-	} while (--nbox);
-}
-
 avx2 fastcall static void
 emit_span_boxes_affine__avx2(const struct sna_composite_spans_op *op,
 			     const struct sna_opacity_box *b, int nbox,
@@ -2352,70 +2483,6 @@ emit_span_boxes_affine__avx2(const struct sna_composite_spans_op *op,
 	} while (--nbox);
 }
 
-sse2 fastcall static void
-emit_span_linear(struct sna *sna,
-		 const struct sna_composite_spans_op *op,
-		 const BoxRec *box,
-		 float opacity)
-{
-	union {
-		struct sna_coordinate p;
-		float f;
-	} dst;
-	float *v;
-
-	assert(op->base.floats_per_rect == 9);
-	assert((sna->render.vertex_used % 3) == 0);
-	v = sna->render.vertices + sna->render.vertex_used;
-	sna->render.vertex_used += 9;
-
-	dst.p.x = box->x2;
-	dst.p.y = box->y2;
-	v[0] = dst.f;
-	dst.p.x = box->x1;
-	v[3] = dst.f;
-	dst.p.y = box->y1;
-	v[6] = dst.f;
-
-	v[1] = compute_linear(&op->base.src, box->x2, box->y2);
-	v[4] = compute_linear(&op->base.src, box->x1, box->y2);
-	v[7] = compute_linear(&op->base.src, box->x1, box->y1);
-
-	v[8] = v[5] = v[2] = opacity;
-}
-
-sse4_2 fastcall static void
-emit_span_linear__sse4_2(struct sna *sna,
-			 const struct sna_composite_spans_op *op,
-			 const BoxRec *box,
-			 float opacity)
-{
-	union {
-		struct sna_coordinate p;
-		float f;
-	} dst;
-	float *v;
-
-	assert(op->base.floats_per_rect == 9);
-	assert((sna->render.vertex_used % 3) == 0);
-	v = sna->render.vertices + sna->render.vertex_used;
-	sna->render.vertex_used += 9;
-
-	dst.p.x = box->x2;
-	dst.p.y = box->y2;
-	v[0] = dst.f;
-	dst.p.x = box->x1;
-	v[3] = dst.f;
-	dst.p.y = box->y1;
-	v[6] = dst.f;
-
-	v[1] = compute_linear(&op->base.src, box->x2, box->y2);
-	v[4] = compute_linear(&op->base.src, box->x1, box->y2);
-	v[7] = compute_linear(&op->base.src, box->x1, box->y1);
-
-	v[8] = v[5] = v[2] = opacity;
-}
-
 avx2 fastcall static void
 emit_span_linear__avx2(struct sna *sna,
 		       const struct sna_composite_spans_op *op,
@@ -2448,66 +2515,6 @@ emit_span_linear__avx2(struct sna *sna,
 	v[8] = v[5] = v[2] = opacity;
 }
 
-sse2 fastcall static void
-emit_span_boxes_linear(const struct sna_composite_spans_op *op,
-		       const struct sna_opacity_box *b, int nbox,
-		       float *v)
-{
-	do {
-		union {
-			struct sna_coordinate p;
-			float f;
-		} dst;
-
-		dst.p.x = b->box.x2;
-		dst.p.y = b->box.y2;
-		v[0] = dst.f;
-		dst.p.x = b->box.x1;
-		v[3] = dst.f;
-		dst.p.y = b->box.y1;
-		v[6] = dst.f;
-
-		v[1] = compute_linear(&op->base.src, b->box.x2, b->box.y2);
-		v[4] = compute_linear(&op->base.src, b->box.x1, b->box.y2);
-		v[7] = compute_linear(&op->base.src, b->box.x1, b->box.y1);
-
-		v[8] = v[5] = v[2] = b->alpha;
-
-		v += 9;
-		b++;
-	} while (--nbox);
-}
-
-sse4_2 fastcall static void
-emit_span_boxes_linear__sse4_2(const struct sna_composite_spans_op *op,
-			       const struct sna_opacity_box *b, int nbox,
-			       float *v)
-{
-	do {
-		union {
-			struct sna_coordinate p;
-			float f;
-		} dst;
-
-		dst.p.x = b->box.x2;
-		dst.p.y = b->box.y2;
-		v[0] = dst.f;
-		dst.p.x = b->box.x1;
-		v[3] = dst.f;
-		dst.p.y = b->box.y1;
-		v[6] = dst.f;
-
-		v[1] = compute_linear(&op->base.src, b->box.x2, b->box.y2);
-		v[4] = compute_linear(&op->base.src, b->box.x1, b->box.y2);
-		v[7] = compute_linear(&op->base.src, b->box.x1, b->box.y1);
-
-		v[8] = v[5] = v[2] = b->alpha;
-
-		v += 9;
-		b++;
-	} while (--nbox);
-}
-
 avx2 fastcall static void
 emit_span_boxes_linear__avx2(const struct sna_composite_spans_op *op,
 			     const struct sna_opacity_box *b, int nbox,
@@ -2538,7 +2545,7 @@ emit_span_boxes_linear__avx2(const struct sna_composite_spans_op *op,
 	} while (--nbox);
 }
 
-inline inline static uint32_t
+inline static uint32_t
 gen4_choose_spans_vertex_buffer(const struct sna_composite_op *op)
 {
 	int id = op->src.is_solid ? 1 : 2 + !op->src.is_affine;
commit 8272d849c0895d6fa66ccca972899f56dab71cc6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 26 10:53:20 2013 +0000

    sna/gen4+: All associated CPUs have sse2 at least
    
    So mark up the basic functions as SSE2 in case we are compiling for
    32-bit only.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_vertex.c b/src/sna/gen4_vertex.c
index 20f85b3..65624b7 100644
--- a/src/sna/gen4_vertex.c
+++ b/src/sna/gen4_vertex.c
@@ -270,7 +270,7 @@ emit_texcoord(struct sna *sna,
 	}
 }
 
-inline static void
+sse2 inline static void
 emit_vertex(struct sna *sna,
 	    const struct sna_composite_op *op,
 	    int16_t srcX, int16_t srcY,
@@ -281,7 +281,7 @@ emit_vertex(struct sna *sna,
 	emit_texcoord(sna, &op->src, srcX, srcY);
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_primitive(struct sna *sna,
 	       const struct sna_composite_op *op,
 	       const struct sna_composite_rectangles *r)
@@ -300,7 +300,7 @@ emit_primitive(struct sna *sna,
 		    r->dst.x,  r->dst.y);
 }
 
-inline static void
+sse2 inline static void
 emit_vertex_mask(struct sna *sna,
 		 const struct sna_composite_op *op,
 		 int16_t srcX, int16_t srcY,
@@ -312,7 +312,7 @@ emit_vertex_mask(struct sna *sna,
 	emit_texcoord(sna, &op->mask, mskX, mskY);
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_primitive_mask(struct sna *sna,
 		    const struct sna_composite_op *op,
 		    const struct sna_composite_rectangles *r)
@@ -331,7 +331,7 @@ emit_primitive_mask(struct sna *sna,
 			 r->dst.x,  r->dst.y);
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_primitive_solid(struct sna *sna,
 		     const struct sna_composite_op *op,
 		     const struct sna_composite_rectangles *r)
@@ -359,7 +359,7 @@ emit_primitive_solid(struct sna *sna,
 	v[5] = v[3] = v[1] = .5;
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_boxes_solid(const struct sna_composite_op *op,
 		 const BoxRec *box, int nbox,
 		 float *v)
@@ -384,7 +384,7 @@ emit_boxes_solid(const struct sna_composite_op *op,
 	} while (--nbox);
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_primitive_linear(struct sna *sna,
 		      const struct sna_composite_op *op,
 		      const struct sna_composite_rectangles *r)
@@ -474,7 +474,7 @@ emit_primitive_linear__avx2(struct sna *sna,
 	v[5] = compute_linear(&op->src, r->src.x, r->src.y);
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_boxes_linear(const struct sna_composite_op *op,
 		  const BoxRec *box, int nbox,
 		  float *v)
@@ -558,7 +558,7 @@ emit_boxes_linear__avx2(const struct sna_composite_op *op,
 	} while (--nbox);
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_primitive_identity_source(struct sna *sna,
 			       const struct sna_composite_op *op,
 			       const struct sna_composite_rectangles *r)
@@ -651,7 +651,7 @@ emit_primitive_identity_source__avx2(struct sna *sna,
 	v[5] = v[2] = v[8] + r->height * op->src.scale[1];
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_boxes_identity_source(const struct sna_composite_op *op,
 			   const BoxRec *box, int nbox,
 			   float *v)
@@ -741,7 +741,7 @@ emit_boxes_identity_source__avx2(const struct sna_composite_op *op,
 	} while (--nbox);
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_primitive_simple_source(struct sna *sna,
 			     const struct sna_composite_op *op,
 			     const struct sna_composite_rectangles *r)
@@ -861,7 +861,7 @@ emit_primitive_simple_source__avx2(struct sna *sna,
 	v[8] = ((r->src.y + ty) * yy + y0) * sy;
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_boxes_simple_source(const struct sna_composite_op *op,
 			 const BoxRec *box, int nbox,
 			 float *v)
@@ -978,7 +978,7 @@ emit_boxes_simple_source__avx2(const struct sna_composite_op *op,
 	} while (--nbox);
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_primitive_affine_source(struct sna *sna,
 			     const struct sna_composite_op *op,
 			     const struct sna_composite_rectangles *r)
@@ -1017,7 +1017,7 @@ emit_primitive_affine_source(struct sna *sna,
 				    &v[7], &v[8]);
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_boxes_affine_source(const struct sna_composite_op *op,
 			 const BoxRec *box, int nbox,
 			 float *v)
@@ -1054,7 +1054,7 @@ emit_boxes_affine_source(const struct sna_composite_op *op,
 	} while (--nbox);
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_primitive_identity_mask(struct sna *sna,
 			     const struct sna_composite_op *op,
 			     const struct sna_composite_rectangles *r)
@@ -1097,7 +1097,7 @@ emit_primitive_identity_mask(struct sna *sna,
 	v[9] = v[5] = v[1] = .5;
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_boxes_identity_mask(const struct sna_composite_op *op,
 			 const BoxRec *box, int nbox,
 			 float *v)
@@ -1131,7 +1131,7 @@ emit_boxes_identity_mask(const struct sna_composite_op *op,
 	} while (--nbox);
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_primitive_linear_identity_mask(struct sna *sna,
 				    const struct sna_composite_op *op,
 				    const struct sna_composite_rectangles *r)
@@ -1176,7 +1176,7 @@ emit_primitive_linear_identity_mask(struct sna *sna,
 	v[9] = compute_linear(&op->src, r->src.x, r->src.y);
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_boxes_linear_identity_mask(const struct sna_composite_op *op,
 				const BoxRec *box, int nbox,
 				float *v)
@@ -1213,7 +1213,7 @@ emit_boxes_linear_identity_mask(const struct sna_composite_op *op,
 	} while (--nbox);
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_primitive_identity_source_mask(struct sna *sna,
 				    const struct sna_composite_op *op,
 				    const struct sna_composite_rectangles *r)
@@ -1262,7 +1262,7 @@ emit_primitive_identity_source_mask(struct sna *sna,
 	v[14] = msk_y * op->mask.scale[1];
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_primitive_simple_source_identity(struct sna *sna,
 				      const struct sna_composite_op *op,
 				      const struct sna_composite_rectangles *r)
@@ -1313,7 +1313,7 @@ emit_primitive_simple_source_identity(struct sna *sna,
 	v[14] = msk_y * op->mask.scale[1];
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_primitive_affine_source_identity(struct sna *sna,
 				      const struct sna_composite_op *op,
 				      const struct sna_composite_rectangles *r)
@@ -1526,7 +1526,7 @@ emit_span_vertex(struct sna *sna,
 	emit_texcoord(sna, &op->base.src, x, y);
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_composite_spans_primitive(struct sna *sna,
 			       const struct sna_composite_spans_op *op,
 			       const BoxRec *box,
@@ -1542,7 +1542,7 @@ emit_composite_spans_primitive(struct sna *sna,
 	OUT_VERTEX_F(opacity);
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_span_solid(struct sna *sna,
 		 const struct sna_composite_spans_op *op,
 		 const BoxRec *box,
@@ -1573,7 +1573,7 @@ emit_span_solid(struct sna *sna,
 	v[8] = v[5] = v[2] = opacity;
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_span_boxes_solid(const struct sna_composite_spans_op *op,
 		      const struct sna_opacity_box *b,
 		      int nbox, float *v)
@@ -1602,7 +1602,7 @@ emit_span_boxes_solid(const struct sna_composite_spans_op *op,
 	} while (--nbox);
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_span_identity(struct sna *sna,
 		    const struct sna_composite_spans_op *op,
 		    const BoxRec *box,
@@ -1722,7 +1722,7 @@ emit_span_identity__avx2(struct sna *sna,
 	v[11] = v[7] = v[3] = opacity;
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_span_boxes_identity(const struct sna_composite_spans_op *op,
 			 const struct sna_opacity_box *b, int nbox,
 			 float *v)
@@ -1833,7 +1833,7 @@ emit_span_boxes_identity__avx2(const struct sna_composite_spans_op *op,
 	} while (--nbox);
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_span_simple(struct sna *sna,
 		 const struct sna_composite_spans_op *op,
 		 const BoxRec *box,
@@ -1965,7 +1965,7 @@ emit_span_simple__avx2(struct sna *sna,
 	v[11] = v[7] = v[3] = opacity;
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_span_boxes_simple(const struct sna_composite_spans_op *op,
 		       const struct sna_opacity_box *b, int nbox,
 		       float *v)
@@ -2088,7 +2088,7 @@ emit_span_boxes_simple__avx2(const struct sna_composite_spans_op *op,
 	} while (--nbox);
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_span_affine(struct sna *sna,
 		  const struct sna_composite_spans_op *op,
 		  const BoxRec *box,
@@ -2223,7 +2223,7 @@ emit_span_affine__avx2(struct sna *sna,
 	v[11] = v[7] = v[3] = opacity;
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_span_boxes_affine(const struct sna_composite_spans_op *op,
 		       const struct sna_opacity_box *b, int nbox,
 		       float *v)
@@ -2352,7 +2352,7 @@ emit_span_boxes_affine__avx2(const struct sna_composite_spans_op *op,
 	} while (--nbox);
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_span_linear(struct sna *sna,
 		 const struct sna_composite_spans_op *op,
 		 const BoxRec *box,
@@ -2448,7 +2448,7 @@ emit_span_linear__avx2(struct sna *sna,
 	v[8] = v[5] = v[2] = opacity;
 }
 
-fastcall static void
+sse2 fastcall static void
 emit_span_boxes_linear(const struct sna_composite_spans_op *op,
 		       const struct sna_opacity_box *b, int nbox,
 		       float *v)
commit 76d46201d7b7abc7c0b6bced9c6fadaf47ad9134
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 26 10:49:42 2013 +0000

    sna: Only use the GPU bo after migrating
    
    If we choose not to migrate the damage to the GPU bo, then it will be
    incoherent. This just flattens the logic out as priv->gpu_bo should be
    NULL here anyway.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 512be95..1073abb 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -463,7 +463,7 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box, bool blt)
 
 	w = box->x2 - box->x1;
 	h = box->y2 - box->y1;
-	if (priv->cpu_bo && !priv->cpu_bo->flush) {
+	if (priv->cpu_bo) {
 		migrate = true;
 	} else if (w == pixmap->drawable.width && h == pixmap->drawable.height) {
 		migrate = priv->source_count++ > SOURCE_BIAS;
@@ -490,14 +490,15 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box, bool blt)
 		migrate = count*w*h > pixmap->drawable.width * pixmap->drawable.height;
 	}
 
-	if (migrate) {
-		if (blt) {
-			if (!sna_pixmap_move_area_to_gpu(pixmap, box, MOVE_READ))
-				return NULL;
-		} else {
-			if (!sna_pixmap_force_to_gpu(pixmap, MOVE_SOURCE_HINT | MOVE_READ))
-				return NULL;
-		}
+	if (!migrate)
+		return NULL;
+
+	if (blt) {
+		if (!sna_pixmap_move_area_to_gpu(pixmap, box, MOVE_READ))
+			return NULL;
+	} else {
+		if (!sna_pixmap_force_to_gpu(pixmap, MOVE_SOURCE_HINT | MOVE_READ))
+			return NULL;
 	}
 
 	return priv->gpu_bo;
commit 1068bf7024d8a6650de020e95efdedcbfd7d3c5e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 26 10:14:03 2013 +0000

    sna: Mention if compiled as 64bit as part of the CPU feature detection
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_cpu.c b/src/sna/sna_cpu.c
index 4dc1144..9110456 100644
--- a/src/sna/sna_cpu.c
+++ b/src/sna/sna_cpu.c
@@ -91,6 +91,10 @@ char *sna_cpu_features_to_string(unsigned features, char *line)
 {
 	char *ret = line;
 
+#ifdef __x86_64__
+	line += sprintf (line, ", x86-64");
+#endif
+
 	if (features & SSE2)
 		line += sprintf (line, ", sse2");
 	if (features & SSE3)
commit 27c71027b1d76979460ef15bba8e8671e3286a24
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 26 10:00:29 2013 +0000

    sna: Ignore SSE4a - not an Intel ISA!
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index fe31b21..f259e2b 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -213,11 +213,10 @@ struct sna {
 #define SSE2 0x4
 #define SSE3 0x8
 #define SSSE3 0x10
-#define SSE4a 0x20
-#define SSE4_1 0x40
-#define SSE4_2 0x80
-#define AVX 0x100
-#define AVX2 0x200
+#define SSE4_1 0x20
+#define SSE4_2 0x40
+#define AVX 0x80
+#define AVX2 0x100
 
 	unsigned watch_flush;
 
diff --git a/src/sna/sna_cpu.c b/src/sna/sna_cpu.c
index d526464..4dc1144 100644
--- a/src/sna/sna_cpu.c
+++ b/src/sna/sna_cpu.c
@@ -70,9 +70,6 @@ unsigned sna_cpu_detect(void)
 	if (edx & bit_SSE2)
 		features |= SSE2;
 
-	if (edx & bit_SSE4a)
-		features |= SSE4a;
-
 	__cpuid(7, eax, ebx, ecx, edx);
 
 	if (eax & bit_AVX2)
@@ -100,8 +97,6 @@ char *sna_cpu_features_to_string(unsigned features, char *line)
 		line += sprintf (line, ", sse3");
 	if (features & SSSE3)
 		line += sprintf (line, ", ssse3");
-	if (features & SSE4a)
-		line += sprintf (line, ", sse4a");
 	if (features & SSE4_1)
 		line += sprintf (line, ", sse4.1");
 	if (features & SSE4_2)
commit 2a6f3989ba2075c322cdf6e33f829f4d5a885be0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 26 09:59:34 2013 +0000

    sna/gen3: Allow conditional use of SSE2
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/compiler.h b/src/sna/compiler.h
index fe2e321..407d3b5 100644
--- a/src/sna/compiler.h
+++ b/src/sna/compiler.h
@@ -53,7 +53,7 @@
 #endif
 
 #if defined(__GNUC__) && (__GNUC__ >= 4) /* 4.4 */
-#define sse2 __attribute__((target("sse2")))
+#define sse2 __attribute__((target("sse2,fpmath=sse+387")))
 #define sse4_2 __attribute__((target("sse4.2,sse2")))
 #define avx2 __attribute__((target("avx2,sse4.2,sse2")))
 #else
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 04ec889..1c6d0ec 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -3625,6 +3625,399 @@ gen3_emit_composite_spans_primitive_identity_gradient__boxes(const struct sna_co
 	} while (--nbox);
 }
 
+#ifndef __x86_64__
+sse2 fastcall static void
+gen3_emit_composite_spans_primitive_constant__sse2(struct sna *sna,
+						   const struct sna_composite_spans_op *op,
+						   const BoxRec *box,
+						   float opacity)
+{
+	float *v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 9;
+
+	v[0] = op->base.dst.x + box->x2;
+	v[6] = v[3] = op->base.dst.x + box->x1;
+	v[4] = v[1] = op->base.dst.y + box->y2;
+	v[7] = op->base.dst.y + box->y1;
+	v[8] = v[5] = v[2] = opacity;
+}
+
+sse2 fastcall static void
+gen3_emit_composite_spans_primitive_constant__sse2__boxes(const struct sna_composite_spans_op *op,
+							  const struct sna_opacity_box *b,
+							  int nbox,
+							  float *v)
+{
+	do {
+		v[0] = op->base.dst.x + b->box.x2;
+		v[6] = v[3] = op->base.dst.x + b->box.x1;
+		v[4] = v[1] = op->base.dst.y + b->box.y2;
+		v[7] = op->base.dst.y + b->box.y1;
+		v[8] = v[5] = v[2] = b->alpha;
+
+		v += 9;
+		b++;
+	} while (--nbox);
+}
+
+sse2 fastcall static void
+gen3_render_composite_spans_constant_box__sse2(struct sna *sna,
+					       const struct sna_composite_spans_op *op,
+					       const BoxRec *box, float opacity)
+{
+	float *v;
+	DBG(("%s: src=+(%d, %d), opacity=%f, dst=+(%d, %d), box=(%d, %d) x (%d, %d)\n",
+	     __FUNCTION__,
+	     op->base.src.offset[0], op->base.src.offset[1],
+	     opacity,
+	     op->base.dst.x, op->base.dst.y,
+	     box->x1, box->y1,
+	     box->x2 - box->x1,
+	     box->y2 - box->y1));
+
+	gen3_get_rectangles(sna, &op->base, 1);
+
+	v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 9;
+
+	v[0] = box->x2;
+	v[6] = v[3] = box->x1;
+	v[4] = v[1] = box->y2;
+	v[7] = box->y1;
+	v[8] = v[5] = v[2] = opacity;
+}
+
+sse2 fastcall static void
+gen3_render_composite_spans_constant_thread__sse2__boxes(struct sna *sna,
+							 const struct sna_composite_spans_op *op,
+							 const struct sna_opacity_box *box,
+							 int nbox)
+{
+	DBG(("%s: nbox=%d, src=+(%d, %d), dst=+(%d, %d)\n",
+	     __FUNCTION__, nbox,
+	     op->base.src.offset[0], op->base.src.offset[1],
+	     op->base.dst.x, op->base.dst.y));
+
+	sna_vertex_lock(&sna->render);
+	do {
+		int nbox_this_time;
+		float *v;
+
+		nbox_this_time = gen3_get_rectangles(sna, &op->base, nbox);
+		assert(nbox_this_time);
+		nbox -= nbox_this_time;
+
+		v = sna->render.vertices + sna->render.vertex_used;
+		sna->render.vertex_used += nbox_this_time * 9;
+
+		sna_vertex_acquire__locked(&sna->render);
+		sna_vertex_unlock(&sna->render);
+
+		do {
+			v[0] = box->box.x2;
+			v[6] = v[3] = box->box.x1;
+			v[4] = v[1] = box->box.y2;
+			v[7] = box->box.y1;
+			v[8] = v[5] = v[2] = box->alpha;
+			v += 9;
+			box++;
+		} while (--nbox_this_time);
+
+		sna_vertex_lock(&sna->render);
+		sna_vertex_release__locked(&sna->render);
+	} while (nbox);
+	sna_vertex_unlock(&sna->render);
+}
+
+sse2 fastcall static void
+gen3_emit_composite_spans_primitive_constant__sse2__no_offset(struct sna *sna,
+							      const struct sna_composite_spans_op *op,
+							      const BoxRec *box,
+							      float opacity)
+{
+	float *v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 9;
+
+	v[0] = box->x2;
+	v[6] = v[3] = box->x1;
+	v[4] = v[1] = box->y2;
+	v[7] = box->y1;
+	v[8] = v[5] = v[2] = opacity;
+}
+
+sse2 fastcall static void
+gen3_emit_composite_spans_primitive_constant__sse2__no_offset__boxes(const struct sna_composite_spans_op *op,
+								     const struct sna_opacity_box *b,
+								     int nbox, float *v)
+{
+	do {
+		v[0] = b->box.x2;
+		v[6] = v[3] = b->box.x1;
+		v[4] = v[1] = b->box.y2;
+		v[7] = b->box.y1;
+		v[8] = v[5] = v[2] = b->alpha;
+
+		v += 9;
+		b++;
+	} while (--nbox);
+}
+
+sse2 fastcall static void
+gen3_emit_composite_spans_primitive_identity_source__sse2(struct sna *sna,
+							  const struct sna_composite_spans_op *op,
+							  const BoxRec *box,
+							  float opacity)
+{
+	float *v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 15;
+
+	v[0] = op->base.dst.x + box->x2;
+	v[1] = op->base.dst.y + box->y2;
+	v[2] = (op->base.src.offset[0] + box->x2) * op->base.src.scale[0];
+	v[3] = (op->base.src.offset[1] + box->y2) * op->base.src.scale[1];
+	v[4] = opacity;
+
+	v[5] = op->base.dst.x + box->x1;
+	v[6] = v[1];
+	v[7] = (op->base.src.offset[0] + box->x1) * op->base.src.scale[0];
+	v[8] = v[3];
+	v[9] = opacity;
+
+	v[10] = v[5];
+	v[11] = op->base.dst.y + box->y1;
+	v[12] = v[7];
+	v[13] = (op->base.src.offset[1] + box->y1) * op->base.src.scale[1];
+	v[14] = opacity;
+}
+
+sse2 fastcall static void
+gen3_emit_composite_spans_primitive_identity_source__sse2__boxes(const struct sna_composite_spans_op *op,
+								 const struct sna_opacity_box *b,
+								 int nbox,
+								 float *v)
+{
+	do {
+		v[0] = op->base.dst.x + b->box.x2;
+		v[1] = op->base.dst.y + b->box.y2;
+		v[2] = (op->base.src.offset[0] + b->box.x2) * op->base.src.scale[0];
+		v[3] = (op->base.src.offset[1] + b->box.y2) * op->base.src.scale[1];
+		v[4] = b->alpha;
+
+		v[5] = op->base.dst.x + b->box.x1;
+		v[6] = v[1];
+		v[7] = (op->base.src.offset[0] + b->box.x1) * op->base.src.scale[0];
+		v[8] = v[3];
+		v[9] = b->alpha;
+
+		v[10] = v[5];
+		v[11] = op->base.dst.y + b->box.y1;
+		v[12] = v[7];
+		v[13] = (op->base.src.offset[1] + b->box.y1) * op->base.src.scale[1];
+		v[14] = b->alpha;
+
+		v += 15;
+		b++;
+	} while (--nbox);
+}
+sse2 fastcall static void
+gen3_emit_composite_spans_primitive_affine_source__sse2(struct sna *sna,
+							const struct sna_composite_spans_op *op,
+							const BoxRec *box,
+							float opacity)
+{
+	PictTransform *transform = op->base.src.transform;
+	float *v;
+
+	v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 15;
+
+	v[0]  = op->base.dst.x + box->x2;
+	v[6]  = v[1] = op->base.dst.y + box->y2;
+	v[10] = v[5] = op->base.dst.x + box->x1;
+	v[11] = op->base.dst.y + box->y1;
+	v[14] = v[9] = v[4]  = opacity;
+
+	_sna_get_transformed_scaled((int)op->base.src.offset[0] + box->x2,
+				    (int)op->base.src.offset[1] + box->y2,
+				    transform, op->base.src.scale,
+				    &v[2], &v[3]);
+
+	_sna_get_transformed_scaled((int)op->base.src.offset[0] + box->x1,
+				    (int)op->base.src.offset[1] + box->y2,
+				    transform, op->base.src.scale,
+				    &v[7], &v[8]);
+
+	_sna_get_transformed_scaled((int)op->base.src.offset[0] + box->x1,
+				    (int)op->base.src.offset[1] + box->y1,
+				    transform, op->base.src.scale,
+				    &v[12], &v[13]);
+}
+
+sse2 fastcall static void
+gen3_emit_composite_spans_primitive_affine_source__sse2__boxes(const struct sna_composite_spans_op *op,
+							       const struct sna_opacity_box *b,
+							       int nbox,
+							       float *v)
+{
+	PictTransform *transform = op->base.src.transform;
+
+	do {
+		v[0]  = op->base.dst.x + b->box.x2;
+		v[6]  = v[1] = op->base.dst.y + b->box.y2;
+		v[10] = v[5] = op->base.dst.x + b->box.x1;
+		v[11] = op->base.dst.y + b->box.y1;
+		v[14] = v[9] = v[4]  = b->alpha;
+
+		_sna_get_transformed_scaled((int)op->base.src.offset[0] + b->box.x2,
+					    (int)op->base.src.offset[1] + b->box.y2,
+					    transform, op->base.src.scale,
+					    &v[2], &v[3]);
+
+		_sna_get_transformed_scaled((int)op->base.src.offset[0] + b->box.x1,
+					    (int)op->base.src.offset[1] + b->box.y2,
+					    transform, op->base.src.scale,
+					    &v[7], &v[8]);
+
+		_sna_get_transformed_scaled((int)op->base.src.offset[0] + b->box.x1,
+					    (int)op->base.src.offset[1] + b->box.y1,
+					    transform, op->base.src.scale,
+					    &v[12], &v[13]);
+		v += 15;
+		b++;
+	} while (--nbox);
+}
+
+sse2 fastcall static void
+gen3_emit_composite_spans_primitive_identity_gradient__sse2(struct sna *sna,
+							    const struct sna_composite_spans_op *op,
+							    const BoxRec *box,
+							    float opacity)
+{
+	float *v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 15;
+
+	v[0] = op->base.dst.x + box->x2;
+	v[1] = op->base.dst.y + box->y2;
+	v[2] = op->base.src.offset[0] + box->x2;
+	v[3] = op->base.src.offset[1] + box->y2;
+	v[4] = opacity;
+
+	v[5] = op->base.dst.x + box->x1;
+	v[6] = v[1];
+	v[7] = op->base.src.offset[0] + box->x1;
+	v[8] = v[3];
+	v[9] = opacity;
+
+	v[10] = v[5];
+	v[11] = op->base.dst.y + box->y1;
+	v[12] = v[7];
+	v[13] = op->base.src.offset[1] + box->y1;
+	v[14] = opacity;
+}
+
+sse2 fastcall static void
+gen3_emit_composite_spans_primitive_identity_gradient__sse2__boxes(const struct sna_composite_spans_op *op,
+								   const struct sna_opacity_box *b,
+								   int nbox,
+								   float *v)
+{
+	do {
+		v[0] = op->base.dst.x + b->box.x2;
+		v[1] = op->base.dst.y + b->box.y2;
+		v[2] = op->base.src.offset[0] + b->box.x2;
+		v[3] = op->base.src.offset[1] + b->box.y2;
+		v[4] = b->alpha;
+
+		v[5] = op->base.dst.x + b->box.x1;
+		v[6] = v[1];
+		v[7] = op->base.src.offset[0] + b->box.x1;
+		v[8] = v[3];
+		v[9] = b->alpha;
+
+		v[10] = v[5];
+		v[11] = op->base.dst.y + b->box.y1;
+		v[12] = v[7];
+		v[13] = op->base.src.offset[1] + b->box.y1;
+		v[14] = b->alpha;
+
+		v += 15;
+		b++;
+	} while (--nbox);
+}
+
+sse2 fastcall static void
+gen3_emit_composite_spans_primitive_affine_gradient__sse2(struct sna *sna,
+							  const struct sna_composite_spans_op *op,
+							  const BoxRec *box,
+							  float opacity)
+{
+	PictTransform *transform = op->base.src.transform;
+	float *v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 15;
+
+	v[0] = op->base.dst.x + box->x2;
+	v[1] = op->base.dst.y + box->y2;
+	_sna_get_transformed_scaled(op->base.src.offset[0] + box->x2,
+				    op->base.src.offset[1] + box->y2,
+				    transform, op->base.src.scale,
+				    &v[2], &v[3]);
+	v[4] = opacity;
+
+	v[5] = op->base.dst.x + box->x1;
+	v[6] = v[1];
+	_sna_get_transformed_scaled(op->base.src.offset[0] + box->x1,
+				    op->base.src.offset[1] + box->y2,
+				    transform, op->base.src.scale,
+				    &v[7], &v[8]);
+	v[9] = opacity;
+
+	v[10] = v[5];
+	v[11] = op->base.dst.y + box->y1;
+	_sna_get_transformed_scaled(op->base.src.offset[0] + box->x1,
+				    op->base.src.offset[1] + box->y1,
+				    transform, op->base.src.scale,
+				    &v[12], &v[13]);
+	v[14] = opacity;
+}
+
+sse2 fastcall static void
+gen3_emit_composite_spans_primitive_affine_gradient__sse2__boxes(const struct sna_composite_spans_op *op,
+								 const struct sna_opacity_box *b,
+								 int nbox,
+								 float *v)
+{
+	PictTransform *transform = op->base.src.transform;
+
+	do {
+		v[0] = op->base.dst.x + b->box.x2;
+		v[1] = op->base.dst.y + b->box.y2;
+		_sna_get_transformed_scaled(op->base.src.offset[0] + b->box.x2,
+					    op->base.src.offset[1] + b->box.y2,
+					    transform, op->base.src.scale,
+					    &v[2], &v[3]);
+		v[4] = b->alpha;
+
+		v[5] = op->base.dst.x + b->box.x1;
+		v[6] = v[1];
+		_sna_get_transformed_scaled(op->base.src.offset[0] + b->box.x1,
+					    op->base.src.offset[1] + b->box.y2,
+					    transform, op->base.src.scale,
+					    &v[7], &v[8]);
+		v[9] = b->alpha;
+
+		v[10] = v[5];
+		v[11] = op->base.dst.y + b->box.y1;
+		_sna_get_transformed_scaled(op->base.src.offset[0] + b->box.x1,
+					    op->base.src.offset[1] + b->box.y1,
+					    transform, op->base.src.scale,
+					    &v[12], &v[13]);
+		v[14] = b->alpha;
+		v += 15;
+		b++;
+	} while (--nbox);
+}
+#endif
+
 fastcall static void
 gen3_emit_composite_spans_primitive_affine_gradient(struct sna *sna,
 						    const struct sna_composite_spans_op *op,
@@ -3987,35 +4380,85 @@ gen3_render_composite_spans(struct sna *sna,
 	case SHADER_WHITE:
 	case SHADER_CONSTANT:
 		if (no_offset) {
-			tmp->box = gen3_render_composite_spans_constant_box;
-			tmp->thread_boxes = gen3_render_composite_spans_constant_thread_boxes;
-			tmp->prim_emit = gen3_emit_composite_spans_primitive_constant_no_offset;
-			tmp->emit_boxes = gen3_emit_composite_spans_primitive_constant_no_offset__boxes;
+#ifndef __x86_64__
+			if (sna->cpu_features & SSE2) {
+				tmp->box = gen3_render_composite_spans_constant_box__sse2;
+				tmp->thread_boxes = gen3_render_composite_spans_constant_thread__sse2__boxes;
+				tmp->prim_emit = gen3_emit_composite_spans_primitive_constant__sse2__no_offset;
+				tmp->emit_boxes = gen3_emit_composite_spans_primitive_constant__sse2__no_offset__boxes;
+			} else
+#endif
+			{
+				tmp->box = gen3_render_composite_spans_constant_box;
+				tmp->thread_boxes = gen3_render_composite_spans_constant_thread_boxes;
+				tmp->prim_emit = gen3_emit_composite_spans_primitive_constant_no_offset;
+				tmp->emit_boxes = gen3_emit_composite_spans_primitive_constant_no_offset__boxes;
+			}
 		} else {
-			tmp->prim_emit = gen3_emit_composite_spans_primitive_constant;
-			tmp->emit_boxes = gen3_emit_composite_spans_primitive_constant__boxes;
+#ifndef __x86_64__
+			if (sna->cpu_features & SSE2) {
+				tmp->prim_emit = gen3_emit_composite_spans_primitive_constant__sse2;
+				tmp->emit_boxes = gen3_emit_composite_spans_primitive_constant__sse2__boxes;
+			} else
+#endif
+			{
+				tmp->prim_emit = gen3_emit_composite_spans_primitive_constant;
+				tmp->emit_boxes = gen3_emit_composite_spans_primitive_constant__boxes;
+			}
 		}
 		break;
 	case SHADER_LINEAR:
 	case SHADER_RADIAL:
 		if (tmp->base.src.transform == NULL) {
-			tmp->prim_emit = gen3_emit_composite_spans_primitive_identity_gradient;
-			tmp->emit_boxes = gen3_emit_composite_spans_primitive_identity_gradient__boxes;
+#ifndef __x86_64__
+			if (sna->cpu_features & SSE2) {
+				tmp->prim_emit = gen3_emit_composite_spans_primitive_identity_gradient__sse2;
+				tmp->emit_boxes = gen3_emit_composite_spans_primitive_identity_gradient__sse2__boxes;
+			} else 
+#endif
+			{
+				tmp->prim_emit = gen3_emit_composite_spans_primitive_identity_gradient;
+				tmp->emit_boxes = gen3_emit_composite_spans_primitive_identity_gradient__boxes;
+			}
 		} else if (tmp->base.src.is_affine) {
 			tmp->base.src.scale[1] = tmp->base.src.scale[0] = 1. / tmp->base.src.transform->matrix[2][2];
-			tmp->prim_emit = gen3_emit_composite_spans_primitive_affine_gradient;
-			tmp->emit_boxes = gen3_emit_composite_spans_primitive_affine_gradient__boxes;
+#ifndef __x86_64__
+			if (sna->cpu_features & SSE2) {
+				tmp->prim_emit = gen3_emit_composite_spans_primitive_affine_gradient__sse2;
+				tmp->emit_boxes = gen3_emit_composite_spans_primitive_affine_gradient__sse2__boxes;
+			} else
+#endif
+			{
+				tmp->prim_emit = gen3_emit_composite_spans_primitive_affine_gradient;
+				tmp->emit_boxes = gen3_emit_composite_spans_primitive_affine_gradient__boxes;
+			}
 		}
 		break;
 	case SHADER_TEXTURE:
 		if (tmp->base.src.transform == NULL) {
-			tmp->prim_emit = gen3_emit_composite_spans_primitive_identity_source;
-			tmp->emit_boxes = gen3_emit_composite_spans_primitive_identity_source__boxes;
+#ifndef __x86_64__
+			if (sna->cpu_features & SSE2) {
+				tmp->prim_emit = gen3_emit_composite_spans_primitive_identity_source__sse2;
+				tmp->emit_boxes = gen3_emit_composite_spans_primitive_identity_source__sse2__boxes;
+			} else
+#endif
+			{
+				tmp->prim_emit = gen3_emit_composite_spans_primitive_identity_source;
+				tmp->emit_boxes = gen3_emit_composite_spans_primitive_identity_source__boxes;
+			}
 		} else if (tmp->base.src.is_affine) {
 			tmp->base.src.scale[0] /= tmp->base.src.transform->matrix[2][2];
 			tmp->base.src.scale[1] /= tmp->base.src.transform->matrix[2][2];
-			tmp->prim_emit = gen3_emit_composite_spans_primitive_affine_source;
-			tmp->emit_boxes = gen3_emit_composite_spans_primitive_affine_source__boxes;
+#ifndef __x86_64__
+			if (sna->cpu_features & SSE2) {
+				tmp->prim_emit = gen3_emit_composite_spans_primitive_affine_source__sse2;
+				tmp->emit_boxes = gen3_emit_composite_spans_primitive_affine_source__sse2__boxes;
+			} else
+#endif
+			{
+				tmp->prim_emit = gen3_emit_composite_spans_primitive_affine_source;
+				tmp->emit_boxes = gen3_emit_composite_spans_primitive_affine_source__boxes;
+			}
 		}
 		break;
 	}
commit 13461a18b1605feb17304d52136d100df50ca296
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 26 09:41:14 2013 +0000

    sna: Print detected CPU features
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 77a52bd..fe31b21 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -868,6 +868,7 @@ inline static bool is_clipped(const RegionRec *r,
 }
 
 unsigned sna_cpu_detect(void);
+char *sna_cpu_features_to_string(unsigned features, char *line);
 
 void sna_threads_init(void);
 int sna_use_threads (int width, int height, int threshold);
diff --git a/src/sna/sna_cpu.c b/src/sna/sna_cpu.c
index c669eb8..d526464 100644
--- a/src/sna/sna_cpu.c
+++ b/src/sna/sna_cpu.c
@@ -44,7 +44,7 @@ unsigned sna_cpu_detect(void)
 	unsigned int eax, ebx, ecx, edx;
 	unsigned features = 0;
 
-	__cpuid(0, eax, ebx, ecx, edx);
+	__cpuid(1, eax, ebx, ecx, edx);
 
 	if (eax & bit_SSE3)
 		features |= SSE3;
@@ -89,3 +89,27 @@ unsigned sna_cpu_detect(void)
 }
 
 #endif
+
+char *sna_cpu_features_to_string(unsigned features, char *line)
+{
+	char *ret = line;
+
+	if (features & SSE2)
+		line += sprintf (line, ", sse2");
+	if (features & SSE3)
+		line += sprintf (line, ", sse3");
+	if (features & SSSE3)
+		line += sprintf (line, ", ssse3");
+	if (features & SSE4a)
+		line += sprintf (line, ", sse4a");
+	if (features & SSE4_1)
+		line += sprintf (line, ", sse4.1");
+	if (features & SSE4_2)
+		line += sprintf (line, ", sse4.2");
+	if (features & AVX)
+		line += sprintf (line, ", avx");
+	if (features & AVX2)
+		line += sprintf (line, ", avx2");
+
+	return ret + 2;
+}
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 1930660..5d7a1a2 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -451,6 +451,7 @@ static Bool sna_option_cast_to_bool(struct sna *sna, int id, Bool val)
 static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 {
 	struct sna *sna;
+	char buf[1024];
 	rgb defaultWeight = { 0, 0, 0 };
 	EntityInfoPtr pEnt;
 	int preferred_depth;
@@ -582,6 +583,8 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 	if (xf86ReturnOptValBool(sna->Options, OPTION_CRTC_PIXMAPS, FALSE))
 		sna->flags |= SNA_FORCE_SHADOW;
 
+	xf86DrvMsg(scrn->scrnIndex, X_PROBED, "CPU: %s\n",
+		   sna_cpu_features_to_string(sna->cpu_features, buf));
 	xf86DrvMsg(scrn->scrnIndex, X_CONFIG, "Framebuffer %s\n",
 		   sna->tiling & SNA_TILING_FB ? "tiled" : "linear");
 	xf86DrvMsg(scrn->scrnIndex, X_CONFIG, "Pixmaps %s\n",


More information about the xorg-commit mailing list