xf86-video-intel: 4 commits - src/sna/gen4_render.c src/sna/gen4_render.h src/sna/gen5_render.c src/sna/sna_dri.c

Chris Wilson ickle at kemper.freedesktop.org
Fri Dec 21 02:23:10 PST 2012


 src/sna/gen4_render.c |  465 ++++++++++++++++++++++++++++++--------------------
 src/sna/gen4_render.h |   56 +++++-
 src/sna/gen5_render.c |  258 +++++++++++++--------------
 src/sna/sna_dri.c     |    5 
 4 files changed, 464 insertions(+), 320 deletions(-)

New commits:
commit 48e4dc4bd4b2980f0f804f572d0e3fc1bb4bc21e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Dec 20 21:54:25 2012 +0000

    sna/gen4: Backport tight vertex packing of renderblits
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index d899ad3..21c860e 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -48,7 +48,6 @@
  * after every rectangle... So until that is resolved, prefer
  * the BLT engine.
  */
-#define PREFER_BLT 1
 #define FORCE_SPANS 0
 
 #define NO_COMPOSITE 0
@@ -172,6 +171,8 @@ static const struct blendinfo {
 #define SAMPLER_OFFSET(sf, se, mf, me, k) \
 	((((((sf) * EXTEND_COUNT + (se)) * FILTER_COUNT + (mf)) * EXTEND_COUNT + (me)) * KERNEL_COUNT + (k)) * 64)
 
+#define VERTEX_2s2s 0
+
 static void
 gen4_emit_pipelined_pointers(struct sna *sna,
 			     const struct sna_composite_op *op,
@@ -882,6 +883,44 @@ gen4_emit_vertex_elements(struct sna *sna,
 		return;
 	render->ve_id = id;
 
+	if (id == VERTEX_2s2s) {
+		DBG(("%s: setup COPY\n", __FUNCTION__));
+		assert(op->floats_per_rect == 6);
+
+		OUT_BATCH(GEN4_3DSTATE_VERTEX_ELEMENTS | ((2 * (1 + 2)) + 1 - 2));
+
+		/* x,y */
+		OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
+			  GEN4_SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT |
+			  0 << VE0_OFFSET_SHIFT);
+		OUT_BATCH(VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
+			  VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
+			  VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
+			  VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT |
+			  4 << VE1_DESTINATION_ELEMENT_OFFSET_SHIFT);
+
+		/* s,t */
+		OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
+			  GEN4_SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT |
+			  4 << VE0_OFFSET_SHIFT);
+		OUT_BATCH(VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
+			  VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
+			  VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
+			  VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT |
+			  8 << VE1_DESTINATION_ELEMENT_OFFSET_SHIFT);
+
+		/* magic */
+		OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
+			  GEN4_SURFACEFORMAT_R32G32B32A32_FLOAT << VE0_FORMAT_SHIFT |
+			  0 << VE0_OFFSET_SHIFT);
+		OUT_BATCH(VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_0_SHIFT |
+			  VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT |
+			  VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
+			  VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_3_SHIFT |
+			  12 << VE1_DESTINATION_ELEMENT_OFFSET_SHIFT);
+		return;
+	}
+
 	/* The VUE layout
 	 *    dword 0-3: position (x, y, 1.0, 1.0),
 	 *    dword 4-7: texture coordinate 0 (u0, v0, w0, 1.0)
@@ -2272,39 +2311,6 @@ gen4_copy_bind_surfaces(struct sna *sna, const struct sna_composite_op *op)
 	gen4_emit_state(sna, op, offset | dirty);
 }
 
-static void
-gen4_render_copy_one(struct sna *sna,
-		     const struct sna_composite_op *op,
-		     int sx, int sy,
-		     int w, int h,
-		     int dx, int dy)
-{
-	gen4_get_rectangles(sna, op, 1, gen4_copy_bind_surfaces);
-
-	OUT_VERTEX(dx+w, dy+h);
-	OUT_VERTEX_F((sx+w)*op->src.scale[0]);
-	OUT_VERTEX_F((sy+h)*op->src.scale[1]);
-
-	OUT_VERTEX(dx, dy+h);
-	OUT_VERTEX_F(sx*op->src.scale[0]);
-	OUT_VERTEX_F((sy+h)*op->src.scale[1]);
-
-	OUT_VERTEX(dx, dy);
-	OUT_VERTEX_F(sx*op->src.scale[0]);
-	OUT_VERTEX_F(sy*op->src.scale[1]);
-}
-
-static inline bool prefer_blt_copy(struct sna *sna, unsigned flags)
-{
-#if PREFER_BLT
-	return true;
-	(void)sna;
-#else
-	return sna->kgem.mode != KGEM_RENDER;
-#endif
-	(void)flags;
-}
-
 static bool
 gen4_render_copy_boxes(struct sna *sna, uint8_t alu,
 		       PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
@@ -2315,8 +2321,7 @@ gen4_render_copy_boxes(struct sna *sna, uint8_t alu,
 
 	DBG(("%s x %d\n", __FUNCTION__, n));
 
-	if (prefer_blt_copy(sna, flags) &&
-	    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
+	if (sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 	    sna_blt_copy_boxes(sna, alu,
 			       src_bo, src_dx, src_dy,
 			       dst_bo, dst_dx, dst_dy,
@@ -2408,20 +2413,20 @@ fallback_blt:
 					       extents.x2 - extents.x1,
 					       extents.y2 - extents.y1))
 			goto fallback_tiled_dst;
+
+		src_dx += tmp.src.offset[0];
+		src_dy += tmp.src.offset[1];
 	} else {
 		tmp.src.bo = kgem_bo_reference(src_bo);
 		tmp.src.width  = src->drawable.width;
 		tmp.src.height = src->drawable.height;
-		tmp.src.offset[0] = tmp.src.offset[1] = 0;
-		tmp.src.scale[0] = 1.f/src->drawable.width;
-		tmp.src.scale[1] = 1.f/src->drawable.height;
 	}
 
 	tmp.is_affine = true;
-	tmp.floats_per_vertex = 3;
-	tmp.floats_per_rect = 9;
+	tmp.floats_per_vertex = 2;
+	tmp.floats_per_rect = 6;
 	tmp.u.gen4.wm_kernel = WM_KERNEL;
-	tmp.u.gen4.ve_id = 2;
+	tmp.u.gen4.ve_id = VERTEX_2s2s;
 	tmp.u.gen4.sf = 0;
 
 	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
@@ -2434,19 +2439,33 @@ fallback_blt:
 	dst_dy += tmp.dst.y;
 	tmp.dst.x = tmp.dst.y = 0;
 
-	src_dx += tmp.src.offset[0];
-	src_dy += tmp.src.offset[1];
-
 	gen4_copy_bind_surfaces(sna, &tmp);
 	gen4_align_vertex(sna, &tmp);
 
 	do {
-		gen4_render_copy_one(sna, &tmp,
-				     box->x1 + src_dx, box->y1 + src_dy,
-				     box->x2 - box->x1, box->y2 - box->y1,
-				     box->x1 + dst_dx, box->y1 + dst_dy);
-		box++;
-	} while (--n);
+		int n_this_time;
+
+		n_this_time = gen4_get_rectangles(sna, &tmp, n,
+						  gen4_copy_bind_surfaces);
+		n -= n_this_time;
+
+		do {
+			DBG(("	(%d, %d) -> (%d, %d) + (%d, %d)\n",
+			     box->x1 + src_dx, box->y1 + src_dy,
+			     box->x1 + dst_dx, box->y1 + dst_dy,
+			     box->x2 - box->x1, box->y2 - box->y1));
+			OUT_VERTEX(box->x2 + dst_dx, box->y2 + dst_dy);
+			OUT_VERTEX(box->x2 + src_dx, box->y2 + src_dy);
+
+			OUT_VERTEX(box->x1 + dst_dx, box->y2 + dst_dy);
+			OUT_VERTEX(box->x1 + src_dx, box->y2 + src_dy);
+
+			OUT_VERTEX(box->x1 + dst_dx, box->y1 + dst_dy);
+			OUT_VERTEX(box->x1 + src_dx, box->y1 + src_dy);
+
+			box++;
+		} while (--n_this_time);
+	} while (n);
 
 	gen4_vertex_flush(sna);
 	sna_render_composite_redirect_done(sna, &tmp);
@@ -2472,7 +2491,19 @@ gen4_render_copy_blt(struct sna *sna,
 		     int16_t w,  int16_t h,
 		     int16_t dx, int16_t dy)
 {
-	gen4_render_copy_one(sna, &op->base, sx, sy, w, h, dx, dy);
+	DBG(("%s: src=(%d, %d), dst=(%d, %d), size=(%d, %d)\n", __FUNCTION__,
+	     sx, sy, dx, dy, w, h));
+
+	gen4_get_rectangles(sna, &op->base, 1, gen4_copy_bind_surfaces);
+
+	OUT_VERTEX(dx+w, dy+h);
+	OUT_VERTEX(sx+w, sy+h);
+
+	OUT_VERTEX(dx, dy+h);
+	OUT_VERTEX(sx, sy+h);
+
+	OUT_VERTEX(dx, dy);
+	OUT_VERTEX(sx, sy);
 }
 
 static void
@@ -2480,16 +2511,8 @@ gen4_render_copy_done(struct sna *sna, const struct sna_copy_op *op)
 {
 	if (sna->render.vertex_offset)
 		gen4_vertex_flush(sna);
-}
 
-static inline bool prefer_blt_fill(struct sna *sna)
-{
-#if PREFER_BLT
-	return true;
-	(void)sna;
-#else
-	return sna->kgem.mode != KGEM_RENDER;
-#endif
+	DBG(("%s()\n", __FUNCTION__));
 }
 
 static bool
@@ -2504,8 +2527,7 @@ gen4_render_copy(struct sna *sna, uint8_t alu,
 	     dst->drawable.serialNumber,
 	     alu));
 
-	if (prefer_blt_fill(sna) &&
-	    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
+	if (sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 	    sna_blt_copy(sna, alu,
 			 src_bo, dst_bo,
 			 dst->drawable.bitsPerPixel,
@@ -2546,16 +2568,14 @@ fallback:
 		gen4_get_card_format(op->base.src.pict_format);
 	op->base.src.width  = src->drawable.width;
 	op->base.src.height = src->drawable.height;
-	op->base.src.scale[0] = 1.f/src->drawable.width;
-	op->base.src.scale[1] = 1.f/src->drawable.height;
 	op->base.src.filter = SAMPLER_FILTER_NEAREST;
 	op->base.src.repeat = SAMPLER_EXTEND_NONE;
 
 	op->base.is_affine = true;
-	op->base.floats_per_vertex = 3;
-	op->base.floats_per_rect = 9;
+	op->base.floats_per_vertex = 2;
+	op->base.floats_per_rect = 6;
 	op->base.u.gen4.wm_kernel = WM_KERNEL;
-	op->base.u.gen4.ve_id = 2;
+	op->base.u.gen4.ve_id = VERTEX_2s2s;
 	op->base.u.gen4.sf = 0;
 
 	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
@@ -2581,26 +2601,6 @@ fallback:
 	return true;
 }
 
-static void
-gen4_render_fill_rectangle(struct sna *sna,
-			   const struct sna_composite_op *op,
-			   int x, int y, int w, int h)
-{
-	gen4_get_rectangles(sna, op, 1, gen4_bind_surfaces);
-
-	OUT_VERTEX(x+w, y+h);
-	OUT_VERTEX_F(1);
-	OUT_VERTEX_F(1);
-
-	OUT_VERTEX(x, y+h);
-	OUT_VERTEX_F(0);
-	OUT_VERTEX_F(1);
-
-	OUT_VERTEX(x, y);
-	OUT_VERTEX_F(0);
-	OUT_VERTEX_F(0);
-}
-
 static bool
 gen4_render_fill_boxes(struct sna *sna,
 		       CARD8 op,
@@ -2618,10 +2618,7 @@ gen4_render_fill_boxes(struct sna *sna,
 		return false;
 	}
 
-	if (op <= PictOpSrc &&
-	    (prefer_blt_fill(sna) ||
-	     too_large(dst->drawable.width, dst->drawable.height) ||
-	     !gen4_check_dst_format(format))) {
+	if (op <= PictOpSrc) {
 		uint8_t alu = GXinvalid;
 
 		pixel = 0;
@@ -2675,10 +2672,10 @@ gen4_render_fill_boxes(struct sna *sna,
 	gen4_composite_solid_init(sna, &tmp.src, pixel);
 
 	tmp.is_affine = true;
-	tmp.floats_per_vertex = 3;
-	tmp.floats_per_rect = 9;
+	tmp.floats_per_vertex = 2;
+	tmp.floats_per_rect = 6;
 	tmp.u.gen4.wm_kernel = WM_KERNEL;
-	tmp.u.gen4.ve_id = 2;
+	tmp.u.gen4.ve_id = VERTEX_2s2s;
 	tmp.u.gen4.sf = 0;
 
 	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
@@ -2690,12 +2687,27 @@ gen4_render_fill_boxes(struct sna *sna,
 	gen4_align_vertex(sna, &tmp);
 
 	do {
-		gen4_render_fill_rectangle(sna, &tmp,
-					   box->x1, box->y1,
-					   box->x2 - box->x1,
-					   box->y2 - box->y1);
-		box++;
-	} while (--n);
+		int n_this_time;
+
+		n_this_time = gen4_get_rectangles(sna, &tmp, n,
+						  gen4_bind_surfaces);
+		n -= n_this_time;
+
+		do {
+			DBG(("	(%d, %d), (%d, %d)\n",
+			     box->x1, box->y1, box->x2, box->y2));
+			OUT_VERTEX(box->x2, box->y2);
+			OUT_VERTEX(1, 1);
+
+			OUT_VERTEX(box->x1, box->y2);
+			OUT_VERTEX(0, 1);
+
+			OUT_VERTEX(box->x1, box->y1);
+			OUT_VERTEX(0, 0);
+
+			box++;
+		} while (--n_this_time);
+	} while (n);
 
 	gen4_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
@@ -2703,10 +2715,22 @@ gen4_render_fill_boxes(struct sna *sna,
 }
 
 static void
-gen4_render_fill_op_blt(struct sna *sna, const struct sna_fill_op *op,
+gen4_render_fill_op_blt(struct sna *sna,
+			const struct sna_fill_op *op,
 			int16_t x, int16_t y, int16_t w, int16_t h)
 {
-	gen4_render_fill_rectangle(sna, &op->base, x, y, w, h);
+	DBG(("%s (%d, %d)x(%d, %d)\n", __FUNCTION__, x,y,w,h));
+
+	gen4_get_rectangles(sna, &op->base, 1, gen4_bind_surfaces);
+
+	OUT_VERTEX(x+w, y+h);
+	OUT_VERTEX(1, 1);
+
+	OUT_VERTEX(x, y+h);
+	OUT_VERTEX(0, 1);
+
+	OUT_VERTEX(x, y);
+	OUT_VERTEX(0, 0);
 }
 
 fastcall static void
@@ -2714,9 +2738,19 @@ gen4_render_fill_op_box(struct sna *sna,
 			const struct sna_fill_op *op,
 			const BoxRec *box)
 {
-	gen4_render_fill_rectangle(sna, &op->base,
-				   box->x1, box->y1,
-				   box->x2-box->x1, box->y2-box->y1);
+	DBG(("%s: (%d, %d),(%d, %d)\n", __FUNCTION__,
+	     box->x1, box->y1, box->x2, box->y2));
+
+	gen4_get_rectangles(sna, &op->base, 1, gen4_bind_surfaces);
+
+	OUT_VERTEX(box->x2, box->y2);
+	OUT_VERTEX(1, 1);
+
+	OUT_VERTEX(box->x1, box->y2);
+	OUT_VERTEX(0, 1);
+
+	OUT_VERTEX(box->x1, box->y1);
+	OUT_VERTEX(0, 0);
 }
 
 fastcall static void
@@ -2725,12 +2759,28 @@ gen4_render_fill_op_boxes(struct sna *sna,
 			  const BoxRec *box,
 			  int nbox)
 {
+	DBG(("%s: (%d, %d),(%d, %d)... x %d\n", __FUNCTION__,
+	     box->x1, box->y1, box->x2, box->y2, nbox));
+
 	do {
-		gen4_render_fill_rectangle(sna, &op->base,
-					   box->x1, box->y1,
-					   box->x2-box->x1, box->y2-box->y1);
-		box++;
-	} while (--nbox);
+		int nbox_this_time;
+
+		nbox_this_time = gen4_get_rectangles(sna, &op->base, nbox,
+						     gen4_bind_surfaces);
+		nbox -= nbox_this_time;
+
+		do {
+			OUT_VERTEX(box->x2, box->y2);
+			OUT_VERTEX(1, 1);
+
+			OUT_VERTEX(box->x1, box->y2);
+			OUT_VERTEX(0, 1);
+
+			OUT_VERTEX(box->x1, box->y1);
+			OUT_VERTEX(0, 0);
+			box++;
+		} while (--nbox_this_time);
+	} while (nbox);
 }
 
 static void
@@ -2739,6 +2789,8 @@ gen4_render_fill_op_done(struct sna *sna, const struct sna_fill_op *op)
 	if (sna->render.vertex_offset)
 		gen4_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, op->base.src.bo);
+
+	DBG(("%s()\n", __FUNCTION__));
 }
 
 static bool
@@ -2747,8 +2799,7 @@ gen4_render_fill(struct sna *sna, uint8_t alu,
 		 uint32_t color,
 		 struct sna_fill_op *op)
 {
-	if (prefer_blt_fill(sna) &&
-	    sna_blt_fill(sna, alu,
+	if (sna_blt_fill(sna, alu,
 			 dst_bo, dst->drawable.bitsPerPixel,
 			 color,
 			 op))
@@ -2782,10 +2833,10 @@ gen4_render_fill(struct sna *sna, uint8_t alu,
 	op->base.mask.bo = NULL;
 
 	op->base.is_affine = true;
-	op->base.floats_per_vertex = 3;
-	op->base.floats_per_rect = 9;
+	op->base.floats_per_vertex = 2;
+	op->base.floats_per_rect = 6;
 	op->base.u.gen4.wm_kernel = WM_KERNEL;
-	op->base.u.gen4.ve_id = 2;
+	op->base.u.gen4.ve_id = VERTEX_2s2s;
 	op->base.u.gen4.sf = 0;
 
 	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
@@ -2859,13 +2910,13 @@ gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	tmp.mask.bo = NULL;
 
 	tmp.is_affine = true;
-	tmp.floats_per_vertex = 3;
-	tmp.floats_per_rect = 9;
+	tmp.floats_per_vertex = 2;
+	tmp.floats_per_rect = 6;
 	tmp.has_component_alpha = false;
 	tmp.need_magic_ca_pass = false;
 
 	tmp.u.gen4.wm_kernel = WM_KERNEL;
-	tmp.u.gen4.ve_id = 2;
+	tmp.u.gen4.ve_id = VERTEX_2s2s;
 	tmp.u.gen4.sf = 0;
 
 	if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
@@ -2876,7 +2927,17 @@ gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	gen4_bind_surfaces(sna, &tmp);
 	gen4_align_vertex(sna, &tmp);
 
-	gen4_render_fill_rectangle(sna, &tmp, x1, y1, x2 - x1, y2 - y1);
+	gen4_get_rectangles(sna, &tmp, 1, gen4_bind_surfaces);
+
+	DBG(("	(%d, %d), (%d, %d)\n", x1, y1, x2, y2));
+	OUT_VERTEX(x2, y2);
+	OUT_VERTEX(1, 1);
+
+	OUT_VERTEX(x1, y2);
+	OUT_VERTEX(0, 1);
+
+	OUT_VERTEX(x1, y1);
+	OUT_VERTEX(0, 0);
 
 	gen4_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
commit 08d2b073692836aa22f65f8ba30db5d14550c03e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Dec 20 21:30:32 2012 +0000

    sna/gen4: Backport more recent state tracking tweaks
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 8f4f1d4..d899ad3 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -215,6 +215,8 @@ static void gen4_magic_ca_pass(struct sna *sna,
 	if (!op->need_magic_ca_pass)
 		return;
 
+	assert(sna->render.vertex_index > sna->render.vertex_start);
+
 	DBG(("%s: CA fixup\n", __FUNCTION__));
 	assert(op->mask.bo != NULL);
 	assert(op->has_component_alpha);
@@ -468,6 +470,17 @@ static bool gen4_check_repeat(PicturePtr picture)
 	}
 }
 
+static uint32_t
+gen4_tiling_bits(uint32_t tiling)
+{
+	switch (tiling) {
+	default: assert(0);
+	case I915_TILING_NONE: return 0;
+	case I915_TILING_X: return GEN4_SURFACE_TILED;
+	case I915_TILING_Y: return GEN4_SURFACE_TILED | GEN4_SURFACE_TILED_Y;
+	}
+}
+
 /**
  * Sets up the common fields for a surface state buffer for the given
  * picture in the given surface state buffer.
@@ -480,9 +493,9 @@ gen4_bind_bo(struct sna *sna,
 	     uint32_t format,
 	     bool is_dst)
 {
-	struct gen4_surface_state *ss;
 	uint32_t domains;
 	uint16_t offset;
+	uint32_t *ss;
 
 	assert(sna->kgem.gen != 040 || !kgem_bo_is_snoop(bo));
 
@@ -496,32 +509,30 @@ gen4_bind_bo(struct sna *sna,
 
 	offset = sna->kgem.surface -=
 		sizeof(struct gen4_surface_state_padded) / sizeof(uint32_t);
-	ss = memset(sna->kgem.batch + offset, 0, sizeof(*ss));
+	ss = sna->kgem.batch + offset;
 
-	ss->ss0.surface_type = GEN4_SURFACE_2D;
-	ss->ss0.surface_format = format;
+	ss[0] = (GEN4_SURFACE_2D << GEN4_SURFACE_TYPE_SHIFT |
+		 GEN4_SURFACE_BLEND_ENABLED |
+		 format << GEN4_SURFACE_FORMAT_SHIFT);
 
 	if (is_dst)
 		domains = I915_GEM_DOMAIN_RENDER << 16 | I915_GEM_DOMAIN_RENDER;
 	else
 		domains = I915_GEM_DOMAIN_SAMPLER << 16;
+	ss[1] = kgem_add_reloc(&sna->kgem, offset + 1, bo, domains, 0);
 
-	ss->ss0.data_return_format = GEN4_SURFACERETURNFORMAT_FLOAT32;
-	ss->ss0.color_blend = 1;
-	ss->ss1.base_addr =
-		kgem_add_reloc(&sna->kgem, offset + 1, bo, domains, 0);
-
-	ss->ss2.height = height - 1;
-	ss->ss2.width  = width - 1;
-	ss->ss3.pitch  = bo->pitch - 1;
-	ss->ss3.tiled_surface = bo->tiling != I915_TILING_NONE;
-	ss->ss3.tile_walk     = bo->tiling == I915_TILING_Y;
+	ss[2] = ((width - 1)  << GEN4_SURFACE_WIDTH_SHIFT |
+		 (height - 1) << GEN4_SURFACE_HEIGHT_SHIFT);
+	ss[3] = (gen4_tiling_bits(bo->tiling) |
+		 (bo->pitch - 1) << GEN4_SURFACE_PITCH_SHIFT);
+	ss[4] = 0;
+	ss[5] = 0;
 
 	kgem_bo_set_binding(bo, format, offset);
 
 	DBG(("[%x] bind bo(handle=%d, addr=%d), format=%d, width=%d, height=%d, pitch=%d, tiling=%d -> %s\n",
-	     offset, bo->handle, ss->ss1.base_addr,
-	     ss->ss0.surface_format, width, height, bo->pitch, bo->tiling,
+	     offset, bo->handle, ss[1],
+	     format, width, height, bo->pitch, bo->tiling,
 	     domains & 0xffff ? "render" : "sampler"));
 
 	return offset * sizeof(uint32_t);
@@ -532,9 +543,12 @@ static void gen4_emit_vertex_buffer(struct sna *sna,
 {
 	int id = op->u.gen4.ve_id;
 
+	assert((sna->render.vb_id & (1 << id)) == 0);
+
 	OUT_BATCH(GEN4_3DSTATE_VERTEX_BUFFERS | 3);
 	OUT_BATCH((id << VB0_BUFFER_INDEX_SHIFT) | VB0_VERTEXDATA |
 		  (4*op->floats_per_vertex << VB0_BUFFER_PITCH_SHIFT));
+	assert(sna->render.nvertex_reloc < ARRAY_SIZE(sna->render.vertex_reloc));
 	sna->render.vertex_reloc[sna->render.nvertex_reloc++] = sna->kgem.nbatch;
 	OUT_BATCH(0);
 	OUT_BATCH(0);
@@ -897,7 +911,7 @@ gen4_emit_vertex_elements(struct sna *sna,
 		src_format = GEN4_SURFACEFORMAT_R32_FLOAT;
 		dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
 		dw |= VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT;
-		dw |= VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT;
+		dw |= VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT;
 		break;
 	default:
 		assert(0);
@@ -905,7 +919,7 @@ gen4_emit_vertex_elements(struct sna *sna,
 		src_format = GEN4_SURFACEFORMAT_R32G32_FLOAT;
 		dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
 		dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT;
-		dw |= VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT;
+		dw |= VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT;
 		break;
 	case 3:
 		src_format = GEN4_SURFACEFORMAT_R32G32B32_FLOAT;
@@ -931,7 +945,7 @@ gen4_emit_vertex_elements(struct sna *sna,
 			src_format = GEN4_SURFACEFORMAT_R32_FLOAT;
 			dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
 			dw |= VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT;
-			dw |= VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT;
+			dw |= VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT;
 			break;
 		default:
 			assert(0);
@@ -939,7 +953,7 @@ gen4_emit_vertex_elements(struct sna *sna,
 			src_format = GEN4_SURFACEFORMAT_R32G32_FLOAT;
 			dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
 			dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT;
-			dw |= VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT;
+			dw |= VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT;
 			break;
 		case 3:
 			src_format = GEN4_SURFACEFORMAT_R32G32B32_FLOAT;
@@ -1014,7 +1028,7 @@ gen4_bind_surfaces(struct sna *sna,
 			     op->src.card_format,
 			     false);
 	if (op->mask.bo) {
-		assert(op->u.gen4.ve_id & 2);
+		assert(op->u.gen4.ve_id >> 2);
 		binding_table[2] =
 			gen4_bind_bo(sna,
 				     op->mask.bo,
@@ -1193,7 +1207,6 @@ static void gen4_video_bind_surfaces(struct sna *sna,
 	gen4_get_batch(sna, op);
 
 	binding_table = gen4_composite_get_binding_table(sna, &offset);
-
 	binding_table[0] =
 		gen4_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
@@ -1580,34 +1593,48 @@ gen4_render_composite_done(struct sna *sna,
 }
 
 static bool
-gen4_composite_set_target(PicturePtr dst, struct sna_composite_op *op)
+gen4_composite_set_target(struct sna *sna,
+			  struct sna_composite_op *op,
+			  PicturePtr dst,
+			  int x, int y, int w, int h)
 {
-	struct sna_pixmap *priv;
-
-	if (!gen4_check_dst_format(dst->format)) {
-		DBG(("%s: incompatible render target format %08x\n",
-		     __FUNCTION__, dst->format));
-		return false;
-	}
+	BoxRec box;
 
 	op->dst.pixmap = get_drawable_pixmap(dst->pDrawable);
 	op->dst.width  = op->dst.pixmap->drawable.width;
 	op->dst.height = op->dst.pixmap->drawable.height;
 	op->dst.format = dst->format;
-	priv = sna_pixmap_force_to_gpu(op->dst.pixmap, MOVE_READ | MOVE_WRITE);
-	if (priv == NULL)
-		return false;
+	if (w && h) {
+		box.x1 = x;
+		box.y1 = y;
+		box.x2 = x + w;
+		box.y2 = y + h;
+	} else
+		sna_render_picture_extents(dst, &box);
 
-	op->dst.bo = priv->gpu_bo;
-	op->damage = &priv->gpu_damage;
-	if (sna_damage_is_all(&priv->gpu_damage, op->dst.width, op->dst.height))
-		op->damage = NULL;
-	DBG(("%s: all-damaged=%d, damage=%p\n", __FUNCTION__,
-	     sna_damage_is_all(&priv->gpu_damage, op->dst.width, op->dst.height),
-	    op->damage));
+	op->dst.bo = sna_drawable_use_bo (dst->pDrawable,
+					  PREFER_GPU | FORCE_GPU | RENDER_GPU,
+					  &box, &op->damage);
+	if (op->dst.bo == NULL)
+		return false;
 
 	get_drawable_deltas(dst->pDrawable, op->dst.pixmap,
 			    &op->dst.x, &op->dst.y);
+
+	DBG(("%s: pixmap=%p, format=%08x, size=%dx%d, pitch=%d, delta=(%d,%d),damage=%p\n",
+	     __FUNCTION__,
+	     op->dst.pixmap, (int)op->dst.format,
+	     op->dst.width, op->dst.height,
+	     op->dst.bo->pitch,
+	     op->dst.x, op->dst.y,
+	     op->damage ? *op->damage : (void *)-1));
+
+	assert(op->dst.bo->proxy == NULL);
+
+	if (too_large(op->dst.width, op->dst.height) &&
+	    !sna_render_composite_redirect(sna, op, x, y, w, h))
+		return false;
+
 	return true;
 }
 
@@ -1690,6 +1717,9 @@ source_is_busy(PixmapPtr pixmap)
 	if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo))
 		return true;
 
+	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
+		return true;
+
 	return priv->gpu_damage && !priv->cpu_damage;
 }
 
@@ -1895,15 +1925,11 @@ gen4_render_composite(struct sna *sna,
 					    width, height,
 					    tmp);
 
-	if (!gen4_composite_set_target(dst, tmp))
-		return false;
-	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
-
-	sna_render_composite_redirect_init(tmp);
-	if (too_large(tmp->dst.width, tmp->dst.height) &&
-	    !sna_render_composite_redirect(sna, tmp,
-					   dst_x, dst_y, width, height))
+	if (!gen4_composite_set_target(sna, tmp, dst,
+				       dst_x, dst_y, width, height)) {
+		DBG(("%s: failed to set composite target\n", __FUNCTION__));
 		return false;
+	}
 
 	tmp->op = op;
 	switch (gen4_composite_picture(sna, src, &tmp->src,
@@ -2023,7 +2049,6 @@ cleanup_dst:
 	return false;
 }
 
-/* A poor man's span interface. But better than nothing? */
 #if !NO_COMPOSITE_SPANS
 fastcall static void
 gen4_render_composite_spans_box(struct sna *sna,
@@ -2056,21 +2081,33 @@ gen4_render_composite_spans_boxes(struct sna *sna,
 	     op->base.dst.x, op->base.dst.y));
 
 	do {
-		gen4_render_composite_spans_box(sna, op, box++, opacity);
-	} while (--nbox);
+		int nbox_this_time;
+
+		nbox_this_time = gen4_get_rectangles(sna, &op->base, nbox,
+						     gen4_bind_surfaces);
+		nbox -= nbox_this_time;
+
+		do {
+			DBG(("  %s: (%d, %d) x (%d, %d)\n", __FUNCTION__,
+			     box->x1, box->y1,
+			     box->x2 - box->x1,
+			     box->y2 - box->y1));
+
+			op->prim_emit(sna, op, box++, opacity);
+		} while (--nbox_this_time);
+	} while (nbox);
 }
 
 fastcall static void
 gen4_render_composite_spans_done(struct sna *sna,
 				 const struct sna_composite_spans_op *op)
 {
-	gen4_vertex_flush(sna);
+	if (sna->render.vertex_offset)
+		gen4_vertex_flush(sna);
 
 	DBG(("%s()\n", __FUNCTION__));
 
-	if (op->base.src.bo)
-		kgem_bo_destroy(&sna->kgem, op->base.src.bo);
-
+	kgem_bo_destroy(&sna->kgem, op->base.src.bo);
 	sna_render_composite_redirect_done(sna, &op->base);
 }
 
@@ -2144,16 +2181,9 @@ gen4_render_composite_spans(struct sna *sna,
 	}
 
 	tmp->base.op = op;
-	if (!gen4_composite_set_target(dst, &tmp->base))
+	if (!gen4_composite_set_target(sna, &tmp->base, dst,
+				       dst_x, dst_y, width, height))
 		return false;
-	sna_render_reduce_damage(&tmp->base, dst_x, dst_y, width, height);
-
-	sna_render_composite_redirect_init(&tmp->base);
-	if (too_large(tmp->base.dst.width, tmp->base.dst.height)) {
-		if (!sna_render_composite_redirect(sna, &tmp->base,
-						   dst_x, dst_y, width, height))
-			return false;
-	}
 
 	switch (gen4_composite_picture(sna, src, &tmp->base.src,
 				       src_x, src_y,
@@ -2448,7 +2478,8 @@ gen4_render_copy_blt(struct sna *sna,
 static void
 gen4_render_copy_done(struct sna *sna, const struct sna_copy_op *op)
 {
-	gen4_vertex_flush(sna);
+	if (sna->render.vertex_offset)
+		gen4_vertex_flush(sna);
 }
 
 static inline bool prefer_blt_fill(struct sna *sna)
@@ -2705,7 +2736,8 @@ gen4_render_fill_op_boxes(struct sna *sna,
 static void
 gen4_render_fill_op_done(struct sna *sna, const struct sna_fill_op *op)
 {
-	gen4_vertex_flush(sna);
+	if (sna->render.vertex_offset)
+		gen4_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, op->base.src.bo);
 }
 
diff --git a/src/sna/gen4_render.h b/src/sna/gen4_render.h
index 2eae1ec..53c7fc2 100644
--- a/src/sna/gen4_render.h
+++ b/src/sna/gen4_render.h
@@ -25,8 +25,8 @@
  *
  **************************************************************************/
 
-#ifndef GEN5_RENDER_H
-#define GEN5_RENDER_H
+#ifndef GEN4_RENDER_H
+#define GEN4_RENDER_H
 
 #define GEN4_3D(Pipeline,Opcode,Subopcode) ((3 << 29) | \
 					   ((Pipeline) << 27) | \
@@ -724,8 +724,8 @@
 #define GEN4_INSTRUCTION_NORMAL    0
 #define GEN4_INSTRUCTION_SATURATE  1
 
-#define GEN4_MASK_ENABLE   0
-#define GEN4_MASK_DISABLE  1
+#define _MASK_ENABLE   0
+#define _MASK_DISABLE  1
 
 #define GEN4_OPCODE_MOV        1
 #define GEN4_OPCODE_SEL        2
@@ -2042,6 +2042,54 @@ struct gen4_surface_state
    } ss5;
 };
 
+/* Surface state DW0 */
+#define GEN4_SURFACE_RC_READ_WRITE       (1 << 8)
+#define GEN4_SURFACE_MIPLAYOUT_SHIFT     10
+#define GEN4_SURFACE_MIPMAPLAYOUT_BELOW   0
+#define GEN4_SURFACE_MIPMAPLAYOUT_RIGHT   1
+#define GEN4_SURFACE_CUBEFACE_ENABLES    0x3f
+#define GEN4_SURFACE_BLEND_ENABLED       (1 << 13)
+#define GEN4_SURFACE_WRITEDISABLE_B_SHIFT        14
+#define GEN4_SURFACE_WRITEDISABLE_G_SHIFT        15
+#define GEN4_SURFACE_WRITEDISABLE_R_SHIFT        16
+#define GEN4_SURFACE_WRITEDISABLE_A_SHIFT        17
+#define GEN4_SURFACE_FORMAT_SHIFT        18
+#define GEN4_SURFACE_FORMAT_MASK         _MASK(26, 18)
+
+#define GEN4_SURFACE_TYPE_SHIFT          29
+#define GEN4_SURFACE_TYPE_MASK           _MASK(31, 29)
+#define GEN4_SURFACE_1D      0
+#define GEN4_SURFACE_2D      1
+#define GEN4_SURFACE_3D      2
+#define GEN4_SURFACE_CUBE    3
+#define GEN4_SURFACE_BUFFER  4
+#define GEN4_SURFACE_NULL    7
+
+/* Surface state DW2 */
+#define GEN4_SURFACE_HEIGHT_SHIFT        19
+#define GEN4_SURFACE_HEIGHT_MASK         _MASK(31, 19)
+#define GEN4_SURFACE_WIDTH_SHIFT         6
+#define GEN4_SURFACE_WIDTH_MASK          _MASK(18, 6)
+#define GEN4_SURFACE_LOD_SHIFT           2
+#define GEN4_SURFACE_LOD_MASK            _MASK(5, 2)
+
+/* Surface state DW3 */
+#define GEN4_SURFACE_DEPTH_SHIFT         21
+#define GEN4_SURFACE_DEPTH_MASK          _MASK(31, 21)
+#define GEN4_SURFACE_PITCH_SHIFT         3
+#define GEN4_SURFACE_PITCH_MASK          _MASK(19, 3)
+#define GEN4_SURFACE_TILED               (1 << 1)
+#define GEN4_SURFACE_TILED_Y             (1 << 0)
+
+/* Surface state DW4 */
+#define GEN4_SURFACE_MIN_LOD_SHIFT       28
+#define GEN4_SURFACE_MIN_LOD_MASK        _MASK(31, 28)
+
+/* Surface state DW5 */
+#define GEN4_SURFACE_X_OFFSET_SHIFT      25
+#define GEN4_SURFACE_X_OFFSET_MASK       _MASK(31, 25)
+#define GEN4_SURFACE_Y_OFFSET_SHIFT      20
+#define GEN4_SURFACE_Y_OFFSET_MASK       _MASK(23, 20)
 
 
 struct gen4_vertex_buffer_state
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 6b3d806..56178a6 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -211,6 +211,8 @@ static void gen5_magic_ca_pass(struct sna *sna,
 	assert(sna->render.vertex_index > sna->render.vertex_start);
 
 	DBG(("%s: CA fixup\n", __FUNCTION__));
+	assert(op->mask.bo != NULL);
+	assert(op->has_component_alpha);
 
 	gen5_emit_pipelined_pointers
 		(sna, op, PictOpAdd,
@@ -595,9 +597,7 @@ static int gen5_get_rectangles__flush(struct sna *sna,
 {
 	if (!kgem_check_batch(&sna->kgem, op->need_magic_ca_pass ? 20 : 6))
 		return 0;
-	if (!kgem_check_exec(&sna->kgem, 1))
-		return 0;
-	if (!kgem_check_reloc(&sna->kgem, 2))
+	if (!kgem_check_reloc_and_exec(&sna->kgem, 1))
 		return 0;
 
 	if (op->need_magic_ca_pass && sna->render.vbo)
@@ -628,7 +628,7 @@ start:
 		     !gen5_rectangle_begin(sna, op)))
 		goto flush;
 
-	if (want * op->floats_per_rect > rem)
+	if (want > 1 && want * op->floats_per_rect > rem)
 		want = rem / op->floats_per_rect;
 
 	sna->render.vertex_index += 3*want;
@@ -648,18 +648,15 @@ static uint32_t *
 gen5_composite_get_binding_table(struct sna *sna,
 				 uint16_t *offset)
 {
-	uint32_t *table;
-
 	sna->kgem.surface -=
 		sizeof(struct gen5_surface_state_padded) / sizeof(uint32_t);
-	/* Clear all surplus entries to zero in case of prefetch */
-	table = memset(sna->kgem.batch + sna->kgem.surface,
-		       0, sizeof(struct gen5_surface_state_padded));
-	*offset = sna->kgem.surface;
 
 	DBG(("%s(%x)\n", __FUNCTION__, 4*sna->kgem.surface));
 
-	return table;
+	/* Clear all surplus entries to zero in case of prefetch */
+	*offset = sna->kgem.surface;
+	return memset(sna->kgem.batch + sna->kgem.surface,
+		      0, sizeof(struct gen5_surface_state_padded));
 }
 
 static void
@@ -765,6 +762,7 @@ gen5_get_batch(struct sna *sna, const struct sna_composite_op *op)
 static void
 gen5_align_vertex(struct sna *sna, const struct sna_composite_op *op)
 {
+	assert(op->floats_per_rect == 3*op->floats_per_vertex);
 	if (op->floats_per_vertex != sna->render_state.gen5.floats_per_vertex) {
 		if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
 			gen4_vertex_finish(sna);
@@ -804,33 +802,36 @@ gen5_emit_pipelined_pointers(struct sna *sna,
 			     const struct sna_composite_op *op,
 			     int blend, int kernel)
 {
-	uint16_t offset = sna->kgem.nbatch, last;
+	uint16_t sp, bp;
+	uint32_t key;
+
+	DBG(("%s: has_mask=%d, src=(%d, %d), mask=(%d, %d),kernel=%d, blend=%d, ca=%d, format=%x\n",
+	     __FUNCTION__, op->u.gen4.ve_id & 2,
+	     op->src.filter, op->src.repeat,
+	     op->mask.filter, op->mask.repeat,
+	     kernel, blend, op->has_component_alpha, (int)op->dst.format));
+
+	sp = SAMPLER_OFFSET(op->src.filter, op->src.repeat,
+			    op->mask.filter, op->mask.repeat,
+			    kernel);
+	bp = gen5_get_blend(blend, op->has_component_alpha, op->dst.format);
+
+	DBG(("%s: sp=%d, bp=%d\n", __FUNCTION__, sp, bp));
+	key = sp | (uint32_t)bp << 16 | (op->mask.bo != NULL) << 31;
+	if (key == sna->render_state.gen5.last_pipelined_pointers)
+		return false;
+
 
 	OUT_BATCH(GEN5_3DSTATE_PIPELINED_POINTERS | 5);
 	OUT_BATCH(sna->render_state.gen5.vs);
 	OUT_BATCH(GEN5_GS_DISABLE); /* passthrough */
 	OUT_BATCH(GEN5_CLIP_DISABLE); /* passthrough */
 	OUT_BATCH(sna->render_state.gen5.sf[op->mask.bo != NULL]);
-	OUT_BATCH(sna->render_state.gen5.wm +
-		  SAMPLER_OFFSET(op->src.filter, op->src.repeat,
-				 op->mask.filter, op->mask.repeat,
-				 kernel));
-	OUT_BATCH(sna->render_state.gen5.cc +
-		  gen5_get_blend(blend, op->has_component_alpha, op->dst.format));
-
-	last = sna->render_state.gen5.last_pipelined_pointers;
-	if (!DBG_NO_STATE_CACHE && last &&
-	    sna->kgem.batch[offset + 1] == sna->kgem.batch[last + 1] &&
-	    sna->kgem.batch[offset + 3] == sna->kgem.batch[last + 3] &&
-	    sna->kgem.batch[offset + 4] == sna->kgem.batch[last + 4] &&
-	    sna->kgem.batch[offset + 5] == sna->kgem.batch[last + 5] &&
-	    sna->kgem.batch[offset + 6] == sna->kgem.batch[last + 6]) {
-		sna->kgem.nbatch = offset;
-		return false;
-	} else {
-		sna->render_state.gen5.last_pipelined_pointers = offset;
-		return true;
-	}
+	OUT_BATCH(sna->render_state.gen5.wm + sp);
+	OUT_BATCH(sna->render_state.gen5.cc + bp);
+
+	sna->render_state.gen5.last_pipelined_pointers = key;
+	return true;
 }
 
 static void
@@ -912,7 +913,6 @@ gen5_emit_vertex_elements(struct sna *sna,
 		return;
 	}
 
-
 	/* The VUE layout
 	 *    dword 0-3: pad (0.0, 0.0, 0.0. 0.0)
 	 *    dword 4-7: position (x, y, 1.0, 1.0),
@@ -1048,7 +1048,8 @@ static void gen5_bind_surfaces(struct sna *sna,
 			     op->src.bo, op->src.width, op->src.height,
 			     op->src.card_format,
 			     false);
-	if (op->mask.bo)
+	if (op->mask.bo) {
+		assert(op->u.gen4.ve_id >> 2);
 		binding_table[2] =
 			gen5_bind_bo(sna,
 				     op->mask.bo,
@@ -1056,6 +1057,7 @@ static void gen5_bind_surfaces(struct sna *sna,
 				     op->mask.height,
 				     op->mask.card_format,
 				     false);
+	}
 
 	if (sna->kgem.surface == offset &&
 	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen5.surface_table) == *(uint64_t*)binding_table &&
@@ -1188,8 +1190,8 @@ static void gen5_video_bind_surfaces(struct sna *sna,
 	int src_height[6];
 	int src_pitch[6];
 	uint32_t *binding_table;
-	int n_src, n;
 	uint16_t offset;
+	int n_src, n;
 
 	src_surf_base[0] = 0;
 	src_surf_base[1] = 0;
@@ -1223,8 +1225,8 @@ static void gen5_video_bind_surfaces(struct sna *sna,
 	}
 
 	gen5_get_batch(sna, op);
-	binding_table = gen5_composite_get_binding_table(sna, &offset);
 
+	binding_table = gen5_composite_get_binding_table(sna, &offset);
 	binding_table[0] =
 		gen5_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
@@ -1349,7 +1351,7 @@ gen5_render_video(struct sna *sna,
 	return true;
 }
 
-static int
+static bool
 gen5_composite_solid_init(struct sna *sna,
 			  struct sna_composite_channel *channel,
 			  uint32_t color)
@@ -1611,10 +1613,9 @@ gen5_composite_set_target(struct sna *sna,
 	BoxRec box;
 
 	op->dst.pixmap = get_drawable_pixmap(dst->pDrawable);
-	op->dst.format = dst->format;
-	op->dst.width = op->dst.pixmap->drawable.width;
+	op->dst.width  = op->dst.pixmap->drawable.width;
 	op->dst.height = op->dst.pixmap->drawable.height;
-
+	op->dst.format = dst->format;
 	if (w && h) {
 		box.x1 = x;
 		box.y1 = y;
@@ -2345,7 +2346,6 @@ fallback_blt:
 			if (box[i].y2 > extents.y2)
 				extents.y2 = box[i].y2;
 		}
-
 		if (!sna_render_composite_redirect(sna, &tmp,
 						   extents.x1 + dst_dx,
 						   extents.y1 + dst_dy,
@@ -2539,7 +2539,7 @@ fallback:
 	op->base.u.gen5.wm_kernel = WM_KERNEL;
 	op->base.u.gen5.ve_id = VERTEX_2s2s;
 
-	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))  {
+	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
 		kgem_submit(&sna->kgem);
 		if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
 			goto fallback;
@@ -2656,16 +2656,19 @@ gen5_render_fill_boxes(struct sna *sna,
 						     dst, dst_bo, box, n);
 	}
 
-	if (op == PictOpClear)
+	if (op == PictOpClear) {
 		pixel = 0;
-	else if (!sna_get_pixel_from_rgba(&pixel,
-					  color->red,
-					  color->green,
-					  color->blue,
-					  color->alpha,
-					  PICT_a8r8g8b8))
+		op = PictOpSrc;
+	} else if (!sna_get_pixel_from_rgba(&pixel,
+					    color->red,
+					    color->green,
+					    color->blue,
+					    color->alpha,
+					    PICT_a8r8g8b8))
 		return false;
 
+	DBG(("%s(%08x x %d)\n", __FUNCTION__, pixel, n));
+
 	memset(&tmp, 0, sizeof(tmp));
 
 	tmp.op = op;
@@ -3178,23 +3181,11 @@ static void gen5_init_wm_state(struct gen5_wm_unit_state *state,
 	state->thread1.binding_table_entry_count = 0;
 }
 
-static uint32_t gen5_create_cc_viewport(struct sna_static_stream *stream)
-{
-	struct gen5_cc_viewport vp;
-
-	vp.min_depth = -1.e35;
-	vp.max_depth = 1.e35;
-
-	return sna_static_stream_add(stream, &vp, sizeof(vp), 32);
-}
-
 static uint32_t gen5_create_cc_unit_state(struct sna_static_stream *stream)
 {
 	uint8_t *ptr, *base;
-	uint32_t vp;
 	int i, j;
 
-	vp = gen5_create_cc_viewport(stream);
 	base = ptr =
 		sna_static_stream_map(stream,
 				      GEN5_BLENDFACTOR_COUNT*GEN5_BLENDFACTOR_COUNT*64,
@@ -3207,7 +3198,6 @@ static uint32_t gen5_create_cc_unit_state(struct sna_static_stream *stream)
 
 			state->cc3.blend_enable =
 				!(j == GEN5_BLENDFACTOR_ZERO && i == GEN5_BLENDFACTOR_ONE);
-			state->cc4.cc_viewport_state_offset = vp >> 5;
 
 			state->cc5.logicop_func = 0xc;	/* COPY */
 			state->cc5.ia_blend_function = GEN5_BLENDFUNCTION_ADD;
@@ -3291,8 +3281,7 @@ static bool gen5_render_setup(struct sna *sna)
 					for (m = 0; m < KERNEL_COUNT; m++) {
 						gen5_init_wm_state(&wm_state->state,
 								   wm_kernels[m].has_mask,
-								   wm[m],
-								   sampler_state);
+								   wm[m], sampler_state);
 						wm_state++;
 					}
 				}
commit 8ff76fad1fadc5e309f9a12c30f883460a432049
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Dec 20 20:57:40 2012 +0000

    sna/gen5: Backport tight vertex packing for simple renderblits
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index f013e09..6b3d806 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -165,6 +165,8 @@ static const struct blendinfo {
 #define SAMPLER_OFFSET(sf, se, mf, me, k) \
 	((((((sf) * EXTEND_COUNT + (se)) * FILTER_COUNT + (mf)) * EXTEND_COUNT + (me)) * KERNEL_COUNT + (k)) * 64)
 
+#define VERTEX_2s2s 0
+
 static bool
 gen5_emit_pipelined_pointers(struct sna *sna,
 			     const struct sna_composite_op *op,
@@ -876,6 +878,41 @@ gen5_emit_vertex_elements(struct sna *sna,
 	DBG(("%s: changing %d -> %d\n", __FUNCTION__, render->ve_id, id));
 	render->ve_id = id;
 
+	if (id == VERTEX_2s2s) {
+		DBG(("%s: setup COPY\n", __FUNCTION__));
+		assert(op->floats_per_rect == 6);
+
+		OUT_BATCH(GEN5_3DSTATE_VERTEX_ELEMENTS | ((2 * (1 + 2)) + 1 - 2));
+
+		OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
+			  GEN5_SURFACEFORMAT_R32G32B32A32_FLOAT << VE0_FORMAT_SHIFT |
+			  0 << VE0_OFFSET_SHIFT);
+		OUT_BATCH(VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_0_SHIFT |
+			  VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT |
+			  VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
+			  VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_3_SHIFT);
+
+		/* x,y */
+		OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
+			  GEN5_SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT |
+			  0 << VE0_OFFSET_SHIFT);
+		OUT_BATCH(VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
+			  VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
+			  VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
+			  VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT);
+
+		/* s,t */
+		OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
+			  GEN5_SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT |
+			  4 << VE0_OFFSET_SHIFT);
+		OUT_BATCH(VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
+			  VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
+			  VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
+			  VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT);
+		return;
+	}
+
+
 	/* The VUE layout
 	 *    dword 0-3: pad (0.0, 0.0, 0.0. 0.0)
 	 *    dword 4-7: position (x, y, 1.0, 1.0),
@@ -2342,20 +2379,20 @@ fallback_blt:
 					       extents.x2 - extents.x1,
 					       extents.y2 - extents.y1))
 			goto fallback_tiled_dst;
+
+		src_dx += tmp.src.offset[0];
+		src_dy += tmp.src.offset[1];
 	} else {
 		tmp.src.bo = kgem_bo_reference(src_bo);
 		tmp.src.width  = src->drawable.width;
 		tmp.src.height = src->drawable.height;
-		tmp.src.offset[0] = tmp.src.offset[1] = 0;
-		tmp.src.scale[0] = 1.f/src->drawable.width;
-		tmp.src.scale[1] = 1.f/src->drawable.height;
 	}
 
 	tmp.is_affine = true;
-	tmp.floats_per_vertex = 3;
-	tmp.floats_per_rect = 9;
+	tmp.floats_per_vertex = 2;
+	tmp.floats_per_rect = 6;
 	tmp.u.gen5.wm_kernel = WM_KERNEL;
-	tmp.u.gen5.ve_id = 2;
+	tmp.u.gen5.ve_id = VERTEX_2s2s;
 
 	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
 		kgem_submit(&sna->kgem);
@@ -2367,9 +2404,6 @@ fallback_blt:
 	dst_dy += tmp.dst.y;
 	tmp.dst.x = tmp.dst.y = 0;
 
-	src_dx += tmp.src.offset[0];
-	src_dy += tmp.src.offset[1];
-
 	gen5_copy_bind_surfaces(sna, &tmp);
 	gen5_align_vertex(sna, &tmp);
 
@@ -2386,16 +2420,13 @@ fallback_blt:
 			     box->x1 + dst_dx, box->y1 + dst_dy,
 			     box->x2 - box->x1, box->y2 - box->y1));
 			OUT_VERTEX(box->x2 + dst_dx, box->y2 + dst_dy);
-			OUT_VERTEX_F((box->x2 + src_dx) * tmp.src.scale[0]);
-			OUT_VERTEX_F((box->y2 + src_dy) * tmp.src.scale[1]);
+			OUT_VERTEX(box->x2 + src_dx, box->y2 + src_dy);
 
 			OUT_VERTEX(box->x1 + dst_dx, box->y2 + dst_dy);
-			OUT_VERTEX_F((box->x1 + src_dx) * tmp.src.scale[0]);
-			OUT_VERTEX_F((box->y2 + src_dy) * tmp.src.scale[1]);
+			OUT_VERTEX(box->x1 + src_dx, box->y2 + src_dy);
 
 			OUT_VERTEX(box->x1 + dst_dx, box->y1 + dst_dy);
-			OUT_VERTEX_F((box->x1 + src_dx) * tmp.src.scale[0]);
-			OUT_VERTEX_F((box->y1 + src_dy) * tmp.src.scale[1]);
+			OUT_VERTEX(box->x1 + src_dx, box->y1 + src_dy);
 
 			box++;
 		} while (--n_this_time);
@@ -2431,16 +2462,13 @@ gen5_render_copy_blt(struct sna *sna,
 	gen5_get_rectangles(sna, &op->base, 1, gen5_copy_bind_surfaces);
 
 	OUT_VERTEX(dx+w, dy+h);
-	OUT_VERTEX_F((sx+w)*op->base.src.scale[0]);
-	OUT_VERTEX_F((sy+h)*op->base.src.scale[1]);
+	OUT_VERTEX(sx+w, sy+h);
 
 	OUT_VERTEX(dx, dy+h);
-	OUT_VERTEX_F(sx*op->base.src.scale[0]);
-	OUT_VERTEX_F((sy+h)*op->base.src.scale[1]);
+	OUT_VERTEX(sx, sy+h);
 
 	OUT_VERTEX(dx, dy);
-	OUT_VERTEX_F(sx*op->base.src.scale[0]);
-	OUT_VERTEX_F(sy*op->base.src.scale[1]);
+	OUT_VERTEX(sx, sy);
 }
 
 static void
@@ -2502,16 +2530,14 @@ fallback:
 		gen5_get_card_format(op->base.src.pict_format);
 	op->base.src.width  = src->drawable.width;
 	op->base.src.height = src->drawable.height;
-	op->base.src.scale[0] = 1.f/src->drawable.width;
-	op->base.src.scale[1] = 1.f/src->drawable.height;
 	op->base.src.filter = SAMPLER_FILTER_NEAREST;
 	op->base.src.repeat = SAMPLER_EXTEND_NONE;
 
 	op->base.is_affine = true;
-	op->base.floats_per_vertex = 3;
-	op->base.floats_per_rect = 9;
+	op->base.floats_per_vertex = 2;
+	op->base.floats_per_rect = 6;
 	op->base.u.gen5.wm_kernel = WM_KERNEL;
-	op->base.u.gen5.ve_id = 2;
+	op->base.u.gen5.ve_id = VERTEX_2s2s;
 
 	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))  {
 		kgem_submit(&sna->kgem);
@@ -2655,10 +2681,10 @@ gen5_render_fill_boxes(struct sna *sna,
 	tmp.src.repeat = SAMPLER_EXTEND_REPEAT;
 
 	tmp.is_affine = true;
-	tmp.floats_per_vertex = 3;
-	tmp.floats_per_rect = 9;
+	tmp.floats_per_vertex = 2;
+	tmp.floats_per_rect = 6;
 	tmp.u.gen5.wm_kernel = WM_KERNEL;
-	tmp.u.gen5.ve_id = 2;
+	tmp.u.gen5.ve_id = VERTEX_2s2s;
 
 	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
 		kgem_submit(&sna->kgem);
@@ -2679,16 +2705,13 @@ gen5_render_fill_boxes(struct sna *sna,
 			DBG(("	(%d, %d), (%d, %d)\n",
 			     box->x1, box->y1, box->x2, box->y2));
 			OUT_VERTEX(box->x2, box->y2);
-			OUT_VERTEX_F(1);
-			OUT_VERTEX_F(1);
+			OUT_VERTEX(1, 1);
 
 			OUT_VERTEX(box->x1, box->y2);
-			OUT_VERTEX_F(0);
-			OUT_VERTEX_F(1);
+			OUT_VERTEX(0, 1);
 
 			OUT_VERTEX(box->x1, box->y1);
-			OUT_VERTEX_F(0);
-			OUT_VERTEX_F(0);
+			OUT_VERTEX(0, 0);
 
 			box++;
 		} while (--n_this_time);
@@ -2709,16 +2732,13 @@ gen5_render_fill_op_blt(struct sna *sna,
 	gen5_get_rectangles(sna, &op->base, 1, gen5_fill_bind_surfaces);
 
 	OUT_VERTEX(x+w, y+h);
-	OUT_VERTEX_F(1);
-	OUT_VERTEX_F(1);
+	OUT_VERTEX(1, 1);
 
 	OUT_VERTEX(x, y+h);
-	OUT_VERTEX_F(0);
-	OUT_VERTEX_F(1);
+	OUT_VERTEX(0, 1);
 
 	OUT_VERTEX(x, y);
-	OUT_VERTEX_F(0);
-	OUT_VERTEX_F(0);
+	OUT_VERTEX(0, 0);
 }
 
 fastcall static void
@@ -2732,16 +2752,13 @@ gen5_render_fill_op_box(struct sna *sna,
 	gen5_get_rectangles(sna, &op->base, 1, gen5_fill_bind_surfaces);
 
 	OUT_VERTEX(box->x2, box->y2);
-	OUT_VERTEX_F(1);
-	OUT_VERTEX_F(1);
+	OUT_VERTEX(1, 1);
 
 	OUT_VERTEX(box->x1, box->y2);
-	OUT_VERTEX_F(0);
-	OUT_VERTEX_F(1);
+	OUT_VERTEX(0, 1);
 
 	OUT_VERTEX(box->x1, box->y1);
-	OUT_VERTEX_F(0);
-	OUT_VERTEX_F(0);
+	OUT_VERTEX(0, 0);
 }
 
 fastcall static void
@@ -2762,16 +2779,13 @@ gen5_render_fill_op_boxes(struct sna *sna,
 
 		do {
 			OUT_VERTEX(box->x2, box->y2);
-			OUT_VERTEX_F(1);
-			OUT_VERTEX_F(1);
+			OUT_VERTEX(1, 1);
 
 			OUT_VERTEX(box->x1, box->y2);
-			OUT_VERTEX_F(0);
-			OUT_VERTEX_F(1);
+			OUT_VERTEX(0, 1);
 
 			OUT_VERTEX(box->x1, box->y1);
-			OUT_VERTEX_F(0);
-			OUT_VERTEX_F(0);
+			OUT_VERTEX(0, 0);
 			box++;
 		} while (--nbox_this_time);
 	} while (nbox);
@@ -2837,10 +2851,10 @@ gen5_render_fill(struct sna *sna, uint8_t alu,
 	op->base.mask.repeat = SAMPLER_EXTEND_NONE;
 
 	op->base.is_affine = true;
-	op->base.floats_per_vertex = 3;
-	op->base.floats_per_rect = 9;
+	op->base.floats_per_vertex = 2;
+	op->base.floats_per_rect = 6;
 	op->base.u.gen5.wm_kernel = WM_KERNEL;
-	op->base.u.gen5.ve_id = 2;
+	op->base.u.gen5.ve_id = VERTEX_2s2s;
 
 	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
 		kgem_submit(&sna->kgem);
@@ -2925,13 +2939,13 @@ gen5_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
 	tmp.is_affine = true;
-	tmp.floats_per_vertex = 3;
-	tmp.floats_per_rect = 9;
+	tmp.floats_per_vertex = 2;
+	tmp.floats_per_rect = 6;
 	tmp.has_component_alpha = 0;
 	tmp.need_magic_ca_pass = false;
 
 	tmp.u.gen5.wm_kernel = WM_KERNEL;
-	tmp.u.gen5.ve_id = 2;
+	tmp.u.gen5.ve_id = VERTEX_2s2s;
 
 	if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
 		_kgem_submit(&sna->kgem);
@@ -2945,16 +2959,13 @@ gen5_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 
 	DBG(("	(%d, %d), (%d, %d)\n", x1, y1, x2, y2));
 	OUT_VERTEX(x2, y2);
-	OUT_VERTEX_F(1);
-	OUT_VERTEX_F(1);
+	OUT_VERTEX(1, 1);
 
 	OUT_VERTEX(x1, y2);
-	OUT_VERTEX_F(0);
-	OUT_VERTEX_F(1);
+	OUT_VERTEX(0, 1);
 
 	OUT_VERTEX(x1, y1);
-	OUT_VERTEX_F(0);
-	OUT_VERTEX_F(0);
+	OUT_VERTEX(0, 0);
 
 	gen4_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
@@ -3041,7 +3052,7 @@ static void gen5_render_reset(struct sna *sna)
 	sna->render_state.gen5.needs_invariant = true;
 	sna->render_state.gen5.ve_id = -1;
 	sna->render_state.gen5.last_primitive = -1;
-	sna->render_state.gen5.last_pipelined_pointers = 0;
+	sna->render_state.gen5.last_pipelined_pointers = -1;
 
 	sna->render_state.gen5.drawrect_offset = -1;
 	sna->render_state.gen5.drawrect_limit = -1;
commit 9144c951915a1e0c1899a72161f9f0f1ab9b9ac4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Dec 21 09:44:52 2012 +0000

    sna/dri: Avoid querying the current-msc with swapbbufers wait disabled
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index fc84e94..72244f8 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -2082,7 +2082,7 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 
 	info->type = swap_type;
 
-	if (*target_msc) {
+	if (*target_msc && (sna->flags & SNA_NO_WAIT) == 0) {
 		/* Get current count */
 		vbl.request.type = DRM_VBLANK_RELATIVE | pipe_select(pipe);
 		vbl.request.sequence = 0;
@@ -2092,6 +2092,9 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 	} else
 		current_msc = 0;
 
+	DBG(("%s: target_msc=%u, current_msc=%u, divisor=%u\n",
+	     __FUNCTION__, *target_msc, current_msc, divisor));
+
 	if (divisor == 0 && current_msc >= *target_msc) {
 		if (can_exchange(sna, draw, front, back)) {
 			sna_dri_immediate_xchg(sna, draw, info);


More information about the xorg-commit mailing list