xf86-video-intel: 13 commits - src/sna/gen2_render.c src/sna/gen3_render.c src/sna/kgem.c src/sna/kgem.h src/sna/sna_accel.c src/sna/sna_blt.c src/sna/sna_io.c src/sna/sna_reg.h src/sna/sna_render.c src/sna/sna_render.h

Chris Wilson ickle at kemper.freedesktop.org
Sun Oct 2 15:28:59 PDT 2011


 src/sna/gen2_render.c |   83 +++++++++++++++++
 src/sna/gen3_render.c |   85 ++++++++++++++++++
 src/sna/kgem.c        |   85 ++++++++++++++----
 src/sna/kgem.h        |   11 ++
 src/sna/sna_accel.c   |  233 ++++++++++++++++++++++++++++++++++++++++----------
 src/sna/sna_blt.c     |   80 ++++++++++-------
 src/sna/sna_io.c      |    2 
 src/sna/sna_reg.h     |    3 
 src/sna/sna_render.c  |   21 ++++
 src/sna/sna_render.h  |    4 
 10 files changed, 512 insertions(+), 95 deletions(-)

New commits:
commit d8c96a6a1d50cd3dd40fd4b78958c8f78337a2fb
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Oct 2 12:57:53 2011 +0100

    sna/blt: Use SETUP_MONO to reduce the number of fill relocations
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 43a7cad..43aaef0 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -2003,6 +2003,87 @@ gen2_render_fill(struct sna *sna, uint8_t alu,
 	return TRUE;
 }
 
+static Bool
+gen2_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
+			     uint32_t color,
+			     int16_t x1, int16_t y1, int16_t x2, int16_t y2,
+			     uint8_t alu)
+{
+	BoxRec box;
+
+	box.x1 = x1;
+	box.y1 = y1;
+	box.x2 = x2;
+	box.y2 = y2;
+
+	return sna_blt_fill_boxes(sna, alu,
+				  bo, dst->drawable.bitsPerPixel,
+				  color, &box, 1);
+}
+
+static Bool
+gen2_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
+		     uint32_t color,
+		     int16_t x1, int16_t y1,
+		     int16_t x2, int16_t y2,
+		     uint8_t alu)
+{
+	struct sna_composite_op tmp;
+
+#if NO_FILL_BOXES
+	return gen2_render_fill_one_try_blt(sna, dst, bo, color,
+					    x1, y1, x2, y2, alu);
+#endif
+
+	/* Prefer to use the BLT if already engaged */
+	if (sna->kgem.mode != KGEM_RENDER &&
+	    gen2_render_fill_one_try_blt(sna, dst, bo, color,
+					 x1, y1, x2, y2, alu))
+		return TRUE;
+
+	/* Must use the BLT if we can't RENDER... */
+	if (!(alu == GXcopy || alu == GXclear) ||
+	    dst->drawable.width > 2048 || dst->drawable.height > 2048 ||
+	    bo->pitch > 8192)
+		return gen2_render_fill_one_try_blt(sna, dst, bo, color,
+						    x1, y1, x2, y2, alu);
+
+	if (alu == GXclear)
+		color = 0;
+
+	memset(&tmp, 0, sizeof(tmp));
+	tmp.op = color == 0 ? PictOpClear : PictOpSrc;
+	tmp.dst.pixmap = dst;
+	tmp.dst.width = dst->drawable.width;
+	tmp.dst.height = dst->drawable.height;
+	tmp.dst.format = sna_format_for_depth(dst->drawable.depth);
+	tmp.dst.bo = bo;
+	tmp.floats_per_vertex = 2;
+
+	tmp.src.u.gen2.pixel =
+		sna_rgba_for_color(color, dst->drawable.depth);
+
+	if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
+		kgem_submit(&sna->kgem);
+		if (gen2_render_fill_one_try_blt(sna, dst, bo, color,
+						 x1, y1, x2, y2, alu))
+			return TRUE;
+	}
+
+	gen2_emit_fill_state(sna, &tmp);
+	gen2_get_rectangles(sna, &tmp, 1);
+	DBG(("	(%d, %d), (%d, %d): %x\n", x1, y1, x2, y2, pixel));
+	OUT_VERTEX(x2);
+	OUT_VERTEX(y2);
+	OUT_VERTEX(x1);
+	OUT_VERTEX(y2);
+	OUT_VERTEX(x1);
+	OUT_VERTEX(y1);
+	gen2_vertex_flush(sna);
+
+	return TRUE;
+}
+
 static void
 gen2_render_copy_setup_source(struct sna_composite_channel *channel,
 			      PixmapPtr pixmap,
@@ -2297,8 +2378,8 @@ Bool gen2_render_init(struct sna *sna)
 	render->composite = gen2_render_composite;
 	render->composite_spans = gen2_render_composite_spans;
 	render->fill_boxes = gen2_render_fill_boxes;
-
 	render->fill = gen2_render_fill;
+	render->fill_one = gen2_render_fill_one;
 	render->copy = gen2_render_copy;
 	render->copy_boxes = gen2_render_copy_boxes;
 
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index bac0353..65176ed 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -3659,6 +3659,90 @@ gen3_render_fill(struct sna *sna, uint8_t alu,
 	return TRUE;
 }
 
+
+static Bool
+gen3_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
+			     uint32_t color,
+			     int16_t x1, int16_t y1, int16_t x2, int16_t y2,
+			     uint8_t alu)
+{
+	BoxRec box;
+
+	box.x1 = x1;
+	box.y1 = y1;
+	box.x2 = x2;
+	box.y2 = y2;
+
+	return sna_blt_fill_boxes(sna, alu,
+				  bo, dst->drawable.bitsPerPixel,
+				  color, &box, 1);
+}
+
+static Bool
+gen3_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
+		     uint32_t color,
+		     int16_t x1, int16_t y1,
+		     int16_t x2, int16_t y2,
+		     uint8_t alu)
+{
+	struct sna_composite_op tmp;
+
+#if NO_FILL_BOXES
+	return gen3_render_fill_one_try_blt(sna, dst, bo, color,
+					    x1, y1, x2, y2, alu);
+#endif
+
+	/* Prefer to use the BLT if already engaged */
+	if (sna->kgem.mode != KGEM_RENDER &&
+	    gen3_render_fill_one_try_blt(sna, dst, bo, color,
+					 x1, y1, x2, y2, alu))
+		return TRUE;
+
+	/* Must use the BLT if we can't RENDER... */
+	if (!(alu == GXcopy || alu == GXclear) ||
+	    dst->drawable.width > 2048 || dst->drawable.height > 2048 ||
+	    bo->pitch > 8192)
+		return gen3_render_fill_one_try_blt(sna, dst, bo, color,
+						    x1, y1, x2, y2, alu);
+
+	if (alu == GXclear)
+		color = 0;
+
+	memset(&tmp, 0, sizeof(tmp));
+	tmp.op = color == 0 ? PictOpClear : PictOpSrc;
+	tmp.dst.pixmap = dst;
+	tmp.dst.width = dst->drawable.width;
+	tmp.dst.height = dst->drawable.height;
+	tmp.dst.format = sna_format_for_depth(dst->drawable.depth);
+	tmp.dst.bo = bo;
+	tmp.floats_per_vertex = 2;
+
+	tmp.src.u.gen3.type = SHADER_CONSTANT;
+	tmp.src.u.gen3.mode =
+		sna_rgba_for_color(color, dst->drawable.depth);
+
+	if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
+		kgem_submit(&sna->kgem);
+		if (gen3_render_fill_one_try_blt(sna, dst, bo, color,
+						 x1, y1, x2, y2, alu))
+			return TRUE;
+	}
+
+	gen3_emit_composite_state(sna, &tmp);
+	gen3_align_vertex(sna, &tmp);
+	gen3_get_rectangles(sna, &tmp, 1);
+	DBG(("	(%d, %d), (%d, %d): %x\n", x1, y1, x2, y2, pixel));
+	OUT_VERTEX(x2);
+	OUT_VERTEX(y2);
+	OUT_VERTEX(x1);
+	OUT_VERTEX(y2);
+	OUT_VERTEX(x1);
+	OUT_VERTEX(y1);
+	gen3_vertex_flush(sna);
+
+	return TRUE;
+}
+
 static void gen3_render_flush(struct sna *sna)
 {
 	gen3_vertex_finish(sna, TRUE);
@@ -3683,6 +3767,7 @@ Bool gen3_render_init(struct sna *sna)
 
 	render->fill_boxes = gen3_render_fill_boxes;
 	render->fill = gen3_render_fill;
+	render->fill_one = gen3_render_fill_one;
 
 	render->reset = gen3_render_reset;
 	render->flush = gen3_render_flush;
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 1b43a8b..a6e639b 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1650,6 +1650,29 @@ sna_spans_extents(DrawablePtr drawable, GCPtr gc,
 	return box_empty(&box);
 }
 
+static struct sna_damage **
+reduce_damage(DrawablePtr drawable,
+	      struct sna_damage **damage,
+	      const BoxRec *box)
+{
+	PixmapPtr pixmap = get_drawable_pixmap(drawable);
+	int16_t dx, dy;
+	BoxRec r;
+
+	if (*damage == NULL)
+		return damage;
+
+	get_drawable_deltas(drawable, pixmap, &dx, &dy);
+
+	r = *box;
+	r.x1 += dx; r.x2 += dx;
+	r.y1 += dy; r.y2 += dy;
+	if (sna_damage_contains_box(*damage, &r) == PIXMAN_REGION_IN)
+		return NULL;
+	else
+		return damage;
+}
+
 static void
 sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect);
 
@@ -1702,13 +1725,14 @@ sna_fill_spans(DrawablePtr drawable, GCPtr gc, int n,
 		if (sna_drawable_use_gpu_bo(drawable, &extents) &&
 		    sna_fill_spans_blt(drawable,
 				       priv->gpu_bo,
-				       priv->gpu_only ? NULL : &priv->gpu_damage,
+				       priv->gpu_only ? NULL : reduce_damage(drawable, &priv->gpu_damage, &extents),
 				       gc, n, pt, width, sorted))
 			return;
 
 		if (sna_drawable_use_cpu_bo(drawable, &extents) &&
 		    sna_fill_spans_blt(drawable,
-				       priv->cpu_bo, &priv->cpu_damage,
+				       priv->cpu_bo,
+				       reduce_damage(drawable, &priv->cpu_damage, &extents),
 				       gc, n, pt, width, sorted))
 			return;
 	} else if (gc->fillStyle == FillTiled) {
@@ -2556,6 +2580,33 @@ sna_poly_fill_rect_blt(DrawablePtr drawable,
 	DBG(("%s x %d [(%d, %d)+(%d, %d)...]\n",
 	     __FUNCTION__, n, rect->x, rect->y, rect->width, rect->height));
 
+	if (n == 1 && REGION_NUM_RECTS(clip) == 1) {
+		BoxPtr box = REGION_RECTS(clip);
+		BoxRec r;
+		bool success = true;
+
+		r.x1 = rect->x + drawable->x;
+		r.y1 = rect->y + drawable->y;
+		r.x2 = bound(r.x1, rect->width);
+		r.y2 = bound(r.y1, rect->height);
+		if (box_intersect(&r, box)) {
+			get_drawable_deltas(drawable, pixmap, &dx, &dy);
+			r.x1 += dx; r.y1 += dy;
+			r.x2 += dx; r.y2 += dy;
+			if (sna->render.fill_one(sna, pixmap, bo, pixel,
+						 r.x1, r.y1, r.x2, r.y2,
+						 gc->alu)) {
+				if (damage) {
+					assert_pixmap_contains_box(pixmap, &r);
+					sna_damage_add_box(damage, &r);
+				}
+			} else
+				success = false;
+		}
+
+		return success;
+	}
+
 	if (!sna_fill_init_blt(&fill, sna, pixmap, bo, gc->alu, pixel)) {
 		DBG(("%s: unsupported blt\n", __FUNCTION__));
 		return FALSE;
@@ -2935,14 +2986,14 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
 		if (sna_drawable_use_gpu_bo(draw, &extents) &&
 		    sna_poly_fill_rect_blt(draw,
 					   priv->gpu_bo,
-					   priv->gpu_only ? NULL : &priv->gpu_damage,
+					   priv->gpu_only ? NULL : reduce_damage(draw, &priv->gpu_damage, &extents),
 					   gc, n, rect))
 			return;
 
 		if (sna_drawable_use_cpu_bo(draw, &extents) &&
 		    sna_poly_fill_rect_blt(draw,
 					   priv->cpu_bo,
-					   &priv->cpu_damage,
+					   reduce_damage(draw, &priv->cpu_damage, &extents),
 					   gc, n, rect))
 			return;
 	} else if (gc->fillStyle == FillTiled) {
@@ -2953,14 +3004,14 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
 		if (sna_drawable_use_gpu_bo(draw, &extents) &&
 		    sna_poly_fill_rect_tiled(draw,
 					     priv->gpu_bo,
-					     priv->gpu_only ? NULL : &priv->gpu_damage,
+					     priv->gpu_only ? NULL : reduce_damage(draw, &priv->gpu_damage, &extents),
 					     gc, n, rect))
 			return;
 
 		if (sna_drawable_use_cpu_bo(draw, &extents) &&
 		    sna_poly_fill_rect_tiled(draw,
 					     priv->cpu_bo,
-					     &priv->cpu_damage,
+					     reduce_damage(draw, &priv->cpu_damage, &extents),
 					     gc, n, rect))
 			return;
 	}
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 5abfb02..86388a8 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -117,10 +117,9 @@ static bool sna_blt_fill_init(struct sna *sna,
 	struct kgem *kgem = &sna->kgem;
 	int pitch;
 
-
 	blt->bo[0] = bo;
 
-	blt->cmd = XY_COLOR_BLT_CMD;
+	blt->cmd = XY_SETUP_MONO_PATTERN_SL_BLT;
 	if (bpp == 32)
 		blt->cmd |= BLT_WRITE_ALPHA | BLT_WRITE_RGB;
 
@@ -133,7 +132,7 @@ static bool sna_blt_fill_init(struct sna *sna,
 		return FALSE;
 
 	blt->overwrites = alu == GXcopy || alu == GXclear;
-	blt->br13 = (fill_ROP[alu] << 16) | pitch;
+	blt->br13 = 1<<31 | (fill_ROP[alu] << 16) | pitch;
 	switch (bpp) {
 	default: assert(0);
 	case 32: blt->br13 |= 1 << 25; /* RGB8888 */
@@ -144,8 +143,29 @@ static bool sna_blt_fill_init(struct sna *sna,
 	blt->pixel = pixel;
 
 	kgem_set_mode(kgem, KGEM_BLT);
-	if (!kgem_check_bo_fenced(kgem, bo, NULL))
+	if (!kgem_check_bo_fenced(kgem, bo, NULL) ||
+	    !kgem_check_batch(kgem, 3)) {
 		_kgem_submit(kgem);
+		_kgem_set_mode(kgem, KGEM_BLT);
+	}
+
+	{
+		uint32_t *b = kgem->batch + kgem->nbatch;
+		b[0] = blt->cmd;
+		b[1] = blt->br13;
+		b[2] = 0;
+		b[3] = 0;
+		b[4] = kgem_add_reloc(kgem, kgem->nbatch + 4, bo,
+				      I915_GEM_DOMAIN_RENDER << 16 |
+				      I915_GEM_DOMAIN_RENDER |
+				      KGEM_RELOC_FENCED,
+				      0);
+		b[5] = pixel;
+		b[6] = pixel;
+		b[7] = 0;
+		b[8] = 0;
+		kgem->nbatch += 9;
+	}
 
 	return TRUE;
 }
@@ -165,36 +185,34 @@ static void sna_blt_fill_one(struct sna *sna,
 	assert(y >= 0);
 	assert((y+height) * blt->bo[0]->pitch <= blt->bo[0]->size);
 
-	/* All too frequently one blt completely overwrites the previous */
-	if (kgem->nbatch >= 6 &&
-	    blt->overwrites &&
-	    kgem->batch[kgem->nbatch-6] == blt->cmd &&
-	    kgem->batch[kgem->nbatch-4] == ((uint32_t)y << 16 | (uint16_t)x) &&
-	    kgem->batch[kgem->nbatch-3] == ((uint32_t)(y+height) << 16 | (uint16_t)(x+width)) &&
-	    kgem->reloc[kgem->nreloc-1].target_handle == blt->bo[0]->handle) {
-		DBG(("%s: replacing last fill\n", __FUNCTION__));
-		kgem->batch[kgem->nbatch-5] = blt->br13;
-		kgem->batch[kgem->nbatch-1] = blt->pixel;
-		return;
-	}
-
-	if (!kgem_check_batch(kgem,  6) ||
-	    kgem->nreloc + 1 > KGEM_RELOC_SIZE(kgem))
+	if (!kgem_check_batch(kgem, 3)) {
 		_kgem_submit(kgem);
+		_kgem_set_mode(kgem, KGEM_BLT);
+
+		b = kgem->batch + kgem->nbatch;
+		b[0] = blt->cmd;
+		b[1] = blt->br13;
+		b[2] = 0;
+		b[3] = 0;
+		b[4] = kgem_add_reloc(kgem, kgem->nbatch + 4, blt->bo[0],
+				      I915_GEM_DOMAIN_RENDER << 16 |
+				      I915_GEM_DOMAIN_RENDER |
+				      KGEM_RELOC_FENCED,
+				      0);
+		b[5] = blt->pixel;
+		b[6] = blt->pixel;
+		b[7] = 0;
+		b[8] = 0;
+		kgem->nbatch += 9;
+	}
 
 	b = kgem->batch + kgem->nbatch;
-	b[0] = blt->cmd;
-	b[1] = blt->br13;
-	b[2] = (y << 16) | x;
-	b[3] = ((y + height) << 16) | (x + width);
-	b[4] = kgem_add_reloc(kgem, kgem->nbatch + 4,
-			      blt->bo[0],
-			      I915_GEM_DOMAIN_RENDER << 16 |
-			      I915_GEM_DOMAIN_RENDER |
-			      KGEM_RELOC_FENCED,
-			      0);
-	b[5] = blt->pixel;
-	kgem->nbatch += 6;
+	b[0] = XY_SCANLINE_BLT;
+	if (kgem->gen >= 40 && blt->bo[0]->tiling)
+		b[0] |= 1 << 11;
+	b[1] = (y << 16) | x;
+	b[2] = ((y + height) << 16) | (x + width);
+	kgem->nbatch += 3;
 }
 
 static Bool sna_blt_copy_init(struct sna *sna,
diff --git a/src/sna/sna_reg.h b/src/sna/sna_reg.h
index f6e5397..f16c64d 100644
--- a/src/sna/sna_reg.h
+++ b/src/sna/sna_reg.h
@@ -78,7 +78,10 @@
 
 #define COLOR_BLT_CMD		((2<<29)|(0x40<<22)|(0x3))
 #define XY_COLOR_BLT_CMD		((2<<29)|(0x50<<22)|(0x4))
+#define XY_SETUP_BLT_CMD		((2<<29)|(1<<22)|6)
+#define XY_SETUP_MONO_PATTERN_SL_BLT	((2<<29)|(0x11<<22)|7)
 #define XY_SETUP_CLIP_BLT_CMD		((2<<29)|(3<<22)|1)
+#define XY_SCANLINE_BLT			((2<<29)|(0x25<<22)|1)
 #define XY_SRC_COPY_BLT_CMD		((2<<29)|(0x53<<22)|6)
 #define SRC_COPY_BLT_CMD		((2<<29)|(0x43<<22)|0x4)
 #define XY_PAT_BLT_IMMEDIATE		((2<<29)|(0x72<<22))
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 292853a..7ab0de1 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -179,6 +179,26 @@ no_render_fill(struct sna *sna, uint8_t alu,
 			    tmp);
 }
 
+static Bool
+no_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
+		   uint32_t color,
+		   int16_t x1, int16_t y1, int16_t x2, int16_t y2,
+		   uint8_t alu)
+{
+	BoxRec box;
+
+	box.x1 = x1;
+	box.y1 = y1;
+	box.x2 = x2;
+	box.y2 = y2;
+
+	DBG(("%s (alu=%d, color=%08x) (%d,%d), (%d, %d)\n",
+	     __FUNCTION__, alu, color, x1, y1, x2, y2));
+	return sna_blt_fill_boxes(sna, alu,
+				  bo, dst->drawable.bitsPerPixel,
+				  color, &box, 1);
+}
+
 static void no_render_reset(struct sna *sna)
 {
 	(void)sna;
@@ -216,6 +236,7 @@ void no_render_init(struct sna *sna)
 
 	render->fill_boxes = no_render_fill_boxes;
 	render->fill = no_render_fill;
+	render->fill_one = no_render_fill_one;
 
 	render->reset = no_render_reset;
 	render->flush = no_render_flush;
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 80a9d00..e46f027 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -201,6 +201,10 @@ struct sna_render {
 		     PixmapPtr dst, struct kgem_bo *dst_bo,
 		     uint32_t color,
 		     struct sna_fill_op *tmp);
+	Bool (*fill_one)(struct sna *sna, PixmapPtr dst, struct kgem_bo *dst_bo,
+			 uint32_t color,
+			 int16_t x1, int16_t y1, int16_t x2, int16_t y2,
+			 uint8_t alu);
 
 	Bool (*copy_boxes)(struct sna *sna, uint8_t alu,
 			   PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
commit 4d227d43f0dfca1fa1822207d68539635aac37b3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Oct 2 12:03:21 2011 +0100

    sna/accel: Correct syntax for constifying BoxPtr
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 524f76d..1b43a8b 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -559,7 +559,7 @@ done:
 }
 
 static inline Bool
-_sna_drawable_use_gpu_bo(DrawablePtr drawable, const BoxPtr box)
+_sna_drawable_use_gpu_bo(DrawablePtr drawable, const BoxRec *box)
 {
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	struct sna_pixmap *priv = sna_pixmap(pixmap);
@@ -588,7 +588,7 @@ _sna_drawable_use_gpu_bo(DrawablePtr drawable, const BoxPtr box)
 }
 
 static inline Bool
-sna_drawable_use_gpu_bo(DrawablePtr drawable, const BoxPtr box)
+sna_drawable_use_gpu_bo(DrawablePtr drawable, const BoxRec *box)
 {
 	Bool ret = _sna_drawable_use_gpu_bo(drawable, box);
 	DBG(("%s((%d, %d), (%d, %d)) = %d\n", __FUNCTION__,
@@ -597,7 +597,7 @@ sna_drawable_use_gpu_bo(DrawablePtr drawable, const BoxPtr box)
 }
 
 static inline Bool
-_sna_drawable_use_cpu_bo(DrawablePtr drawable, const BoxPtr box)
+_sna_drawable_use_cpu_bo(DrawablePtr drawable, const BoxRec *box)
 {
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	struct sna_pixmap *priv = sna_pixmap(pixmap);
@@ -625,7 +625,7 @@ _sna_drawable_use_cpu_bo(DrawablePtr drawable, const BoxPtr box)
 }
 
 static inline Bool
-sna_drawable_use_cpu_bo(DrawablePtr drawable, const BoxPtr box)
+sna_drawable_use_cpu_bo(DrawablePtr drawable, const BoxRec *box)
 {
 	Bool ret = _sna_drawable_use_cpu_bo(drawable, box);
 	DBG(("%s((%d, %d), (%d, %d)) = %d\n", __FUNCTION__,
@@ -1462,7 +1462,7 @@ sna_copy_area(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 }
 
 static Bool
-box_intersect(BoxPtr a, const BoxPtr b)
+box_intersect(BoxPtr a, const BoxRec *b)
 {
 	if (a->x1 < b->x1)
 		a->x1 = b->x1;
@@ -2747,7 +2747,7 @@ sna_poly_fill_rect_tiled(DrawablePtr drawable,
 		}
 
 		if (REGION_NUM_RECTS(clip) == 1) {
-			const BoxPtr box = REGION_RECTS(clip);
+			const BoxRec *box = REGION_RECTS(clip);
 			while (n--) {
 				BoxRec r;
 
commit 3dd80524160d6b6cf0fb668afcc766f2bc6f8383
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Oct 2 09:59:03 2011 +0100

    sna/accel: Only throttle after flushing
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 69e6dec..524f76d 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3589,13 +3589,13 @@ static void sna_accel_throttle(struct sna *sna)
 
 void sna_accel_block_handler(struct sna *sna)
 {
-	if (sna_accel_do_flush(sna))
+	if (sna_accel_do_flush(sna)) {
 		sna_accel_flush(sna);
+		sna_accel_throttle(sna);
+	}
 
 	if (sna_accel_do_expire(sna))
 		sna_accel_expire(sna);
-
-	sna_accel_throttle(sna);
 }
 
 void sna_accel_wakeup_handler(struct sna *sna)
commit 04b8f0a5a16ff71c5e55e0281dbcaa32554703ec
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Oct 2 09:36:38 2011 +0100

    sna/accel: Add a compile option to force flushing on every blockhandler
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 008e552..69e6dec 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -56,6 +56,7 @@
 
 #define FORCE_GPU_ONLY 0
 #define FORCE_FALLBACK 0
+#define FORCE_FLUSH 0
 
 #define USE_SPANS 0
 
@@ -3310,14 +3311,6 @@ static void sna_deferred_free(struct sna *sna)
 	}
 }
 
-static uint64_t read_timer(int fd)
-{
-	uint64_t count = 0;
-	int ret = read(fd, &count, sizeof(count));
-	return count;
-	(void)ret;
-}
-
 static struct sna_pixmap *sna_accel_scanout(struct sna *sna)
 {
 	PixmapPtr front = sna->shadow ? sna->shadow : sna->front;
@@ -3325,10 +3318,18 @@ static struct sna_pixmap *sna_accel_scanout(struct sna *sna)
 	return priv && priv->gpu_bo ? priv : NULL;
 }
 
-#if HAVE_SYS_TIMERFD_H
+#if HAVE_SYS_TIMERFD_H && !FORCE_FLUSH
 #include <sys/timerfd.h>
 #include <errno.h>
 
+static uint64_t read_timer(int fd)
+{
+	uint64_t count = 0;
+	int ret = read(fd, &count, sizeof(count));
+	return count;
+	(void)ret;
+}
+
 static void _sna_accel_disarm_timer(struct sna *sna, int id)
 {
 	struct itimerspec to;
commit 32cef71efe231692f7b24f66913a8ae57bc00aa8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Oct 2 08:55:55 2011 +0100

    sna/accel: Add an compile option to disable use of spans
    
    Using spans has a tremendous effect (~100x) on x11perf, some good but
    mostly bad. However, in reality operations are mixed and so preventing
    migration on alternate opertaions is a win. In the x11perf slowdowns, it
    appears to be CPU bound and so it seems like there should be plenty of
    scope for recovering the lost performance.
    
    However, for the time being, just go back to the old fallbacks.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 683bd1b..008e552 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -57,6 +57,8 @@
 #define FORCE_GPU_ONLY 0
 #define FORCE_FALLBACK 0
 
+#define USE_SPANS 0
+
 DevPrivateKeyRec sna_pixmap_index;
 
 #define PM_IS_SOLID(_draw, _pm) \
@@ -2155,7 +2157,7 @@ sna_poly_line(DrawablePtr drawable, GCPtr gc,
 			return;
 	}
 
-	if (can_fill_spans(drawable, gc) &&
+	if (USE_SPANS && can_fill_spans(drawable, gc) &&
 	    sna_drawable_use_gpu_bo(drawable, &extents)) {
 		DBG(("%s: converting line into spans\n", __FUNCTION__));
 		switch (gc->lineStyle) {
@@ -2390,7 +2392,7 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
 	}
 
 	/* XXX Do we really want to base this decision on the amalgam ? */
-	if (can_fill_spans(drawable, gc) &&
+	if (USE_SPANS && can_fill_spans(drawable, gc) &&
 	    sna_drawable_use_gpu_bo(drawable, &extents)) {
 		void (*line)(DrawablePtr, GCPtr, int, int, DDXPointPtr);
 		int i;
@@ -2515,7 +2517,7 @@ sna_poly_arc(DrawablePtr drawable, GCPtr gc, int n, xArc *arc)
 	}
 
 	/* For "simple" cases use the miPolyArc to spans path */
-	if (arc_to_spans(gc, n) && can_fill_spans(drawable, gc) &&
+	if (USE_SPANS && arc_to_spans(gc, n) && can_fill_spans(drawable, gc) &&
 	    sna_drawable_use_gpu_bo(drawable, &extents)) {
 		DBG(("%s: converting arcs into spans\n", __FUNCTION__));
 		miPolyArc(drawable, gc, n, arc);
commit dc1ec0dd1a5f66114ee4249309d3a283ad84d584
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Oct 1 20:11:47 2011 +0100

    sna/accel: Only disable the flush migitration if nothing changed
    
    Previously we ignored updating the scanout in place, and so we were not
    amoritizing the shadow cost of common core rendering operations.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index e837050..683bd1b 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3428,14 +3428,19 @@ static void _sna_accel_disarm_timer(struct sna *sna, int id) { }
 static void sna_accel_flush(struct sna *sna)
 {
 	struct sna_pixmap *priv = sna_accel_scanout(sna);
+	bool nothing_to_do =
+		priv->cpu_damage == NULL && priv->gpu_bo->rq == NULL;
 
-	DBG(("%s (time=%ld)\n", __FUNCTION__, (long)GetTimeInMillis()));
+	DBG(("%s (time=%ld), nothing_to_do=%d\n",
+	     __FUNCTION__, (long)GetTimeInMillis(), nothing_to_do));
+
+	if (nothing_to_do) {
+		_sna_accel_disarm_timer(sna, FLUSH_TIMER);
+		return;
+	}
 
 	sna_pixmap_move_to_gpu(priv->pixmap);
 	kgem_bo_flush(&sna->kgem, priv->gpu_bo);
-
-	if (priv->gpu_bo->rq == NULL)
-		_sna_accel_disarm_timer(sna, FLUSH_TIMER);
 }
 
 static void sna_accel_expire(struct sna *sna)
commit c6acf1325833b8679ef09ab74f0cb0fd82a8cd92
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Oct 1 11:26:54 2011 +0100

    sna/accel: Micro-optimise sna_fill_spans_blt
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 60ac9dd..e837050 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1504,10 +1504,11 @@ sna_fill_spans_blt(DrawablePtr drawable,
 {
 	struct sna *sna = to_sna_from_drawable(drawable);
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
-	struct sna_fill_op fill;
-	BoxPtr extents, clip;
-	int nclip;
+	BoxPtr extents;
+	int nclip = REGION_NUM_RECTS(gc->pCompositeClip);
+	int need_translation = !gc->miTranslate;
 	int16_t dx, dy;
+	struct sna_fill_op fill;
 
 	if (!sna_fill_init_blt(&fill, sna, pixmap, bo, gc->alu, gc->fgPixel))
 		return false;
@@ -1525,7 +1526,7 @@ sna_fill_spans_blt(DrawablePtr drawable,
 		int y = pt->y;
 		int X2 = X1 + (int)*width;
 
-		if (!gc->miTranslate) {
+		if (need_translation) {
 			X1 += drawable->x;
 			X2 += drawable->x;
 			y += drawable->y;
@@ -1546,22 +1547,19 @@ sna_fill_spans_blt(DrawablePtr drawable,
 		if (X1 >= X2)
 			continue;
 
-		nclip = REGION_NUM_RECTS(gc->pCompositeClip);
+		y += dy;
 		if (nclip == 1) {
 			X1 += dx;
-			if (X1 < 0)
-				X1 = 0;
 			X2 += dx;
-			if (X2 > pixmap->drawable.width)
-				X2 = pixmap->drawable.width;
+			assert(X1 >= 0 && X2 <= pixmap->drawable.width);
 			if (X2 > X1) {
-				fill.blt(sna, &fill, X1, y+dy, X2-X1, 1);
+				fill.blt(sna, &fill, X1, y, X2-X1, 1);
 				if (damage) {
 					BoxRec box;
 
 					box.x1 = X1;
 					box.x2 = X2;
-					box.y1 = y + dy;
+					box.y1 = y;
 					box.y2 = box.y1 + 1;
 
 					assert_pixmap_contains_box(pixmap, &box);
@@ -1569,8 +1567,9 @@ sna_fill_spans_blt(DrawablePtr drawable,
 				}
 			}
 		} else {
-			clip = REGION_RECTS(gc->pCompositeClip);
-			while (nclip--) {
+			int nc = nclip;
+			BoxPtr clip = REGION_RECTS(gc->pCompositeClip);
+			while (nc--) {
 				if (clip->y1 <= y && y < clip->y2) {
 					int x1 = clip->x1;
 					int x2 = clip->x2;
@@ -1588,13 +1587,12 @@ sna_fill_spans_blt(DrawablePtr drawable,
 
 					if (x2 > x1) {
 						fill.blt(sna, &fill,
-							 x1, y + dy,
-							 x2-x1, 1);
+							 x1, y, x2-x1, 1);
 						if (damage) {
 							BoxRec box;
 
 							box.x1 = x1;
-							box.y1 = y + dy;
+							box.y1 = y;
 							box.x2 = x2;
 							box.y2 = box.y1 + 1;
 
commit 8029765515399b130bee18db0a2830eb83f47a07
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Oct 1 11:19:49 2011 +0100

    sna/accel: Don't attempt converting to spans if we will only fallback
    
    As the span code does not yet handle plane masks or stippling, it is
    disadvantageous to convert to spans only to fallback.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 4f50c81..60ac9dd 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1652,6 +1652,15 @@ sna_spans_extents(DrawablePtr drawable, GCPtr gc,
 static void
 sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect);
 
+static bool
+can_fill_spans(DrawablePtr drawable, GCPtr gc)
+{
+	if (!PM_IS_SOLID(drawable, gc->planemask))
+		return false;
+
+	return gc->fillStyle == FillSolid || gc->fillStyle == FillTiled;
+}
+
 static void
 sna_fill_spans(DrawablePtr drawable, GCPtr gc, int n,
 	       DDXPointPtr pt, int *width, int sorted)
@@ -2148,7 +2157,8 @@ sna_poly_line(DrawablePtr drawable, GCPtr gc,
 			return;
 	}
 
-	if (sna_drawable_use_gpu_bo(drawable, &extents)) {
+	if (can_fill_spans(drawable, gc) &&
+	    sna_drawable_use_gpu_bo(drawable, &extents)) {
 		DBG(("%s: converting line into spans\n", __FUNCTION__));
 		switch (gc->lineStyle) {
 		case LineSolid:
@@ -2382,7 +2392,8 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
 	}
 
 	/* XXX Do we really want to base this decision on the amalgam ? */
-	if (sna_drawable_use_gpu_bo(drawable, &extents)) {
+	if (can_fill_spans(drawable, gc) &&
+	    sna_drawable_use_gpu_bo(drawable, &extents)) {
 		void (*line)(DrawablePtr, GCPtr, int, int, DDXPointPtr);
 		int i;
 
@@ -2487,6 +2498,7 @@ arc_to_spans(GCPtr gc, int n)
 static void
 sna_poly_arc(DrawablePtr drawable, GCPtr gc, int n, xArc *arc)
 {
+	struct sna *sna = to_sna_from_drawable(drawable);
 	BoxRec extents;
 	RegionRec region;
 
@@ -2496,14 +2508,23 @@ sna_poly_arc(DrawablePtr drawable, GCPtr gc, int n, xArc *arc)
 	DBG(("%s: extents=(%d, %d), (%d, %d)\n", __FUNCTION__,
 	     extents.x1, extents.y1, extents.x2, extents.y2));
 
+	if (FORCE_FALLBACK)
+		goto fallback;
+
+	if (sna->kgem.wedged) {
+		DBG(("%s: fallback -- wedged\n", __FUNCTION__));
+		goto fallback;
+	}
+
 	/* For "simple" cases use the miPolyArc to spans path */
-	if (arc_to_spans(gc, n) &&
+	if (arc_to_spans(gc, n) && can_fill_spans(drawable, gc) &&
 	    sna_drawable_use_gpu_bo(drawable, &extents)) {
 		DBG(("%s: converting arcs into spans\n", __FUNCTION__));
 		miPolyArc(drawable, gc, n, arc);
 		return;
 	}
 
+fallback:
 	RegionInit(&region, &extents, 1);
 	if (gc->pCompositeClip)
 		RegionIntersect(&region, &region, gc->pCompositeClip);
commit cd11bd69f4c1b961593886945946c7d7c9269900
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Oct 1 10:45:18 2011 +0100

    sna/accel: Use miPolyArc to convert arcs into spans for gpu bo
    
    This is actually tricker than it looks since miPolyArc() sometimes uses
    an intermediate bitmap which performs worse than the fbPolyArc() fallback.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 2614e47..4f50c81 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2469,6 +2469,21 @@ sna_poly_arc_extents(DrawablePtr drawable, GCPtr gc,
 	return box_empty(&box);
 }
 
+static bool
+arc_to_spans(GCPtr gc, int n)
+{
+	if (gc->lineStyle != LineSolid)
+		return false;
+
+	if (gc->lineWidth == 0)
+		return true;
+
+	if (n == 1)
+		return true;
+
+	return false;
+}
+
 static void
 sna_poly_arc(DrawablePtr drawable, GCPtr gc, int n, xArc *arc)
 {
@@ -2481,6 +2496,14 @@ sna_poly_arc(DrawablePtr drawable, GCPtr gc, int n, xArc *arc)
 	DBG(("%s: extents=(%d, %d), (%d, %d)\n", __FUNCTION__,
 	     extents.x1, extents.y1, extents.x2, extents.y2));
 
+	/* For "simple" cases use the miPolyArc to spans path */
+	if (arc_to_spans(gc, n) &&
+	    sna_drawable_use_gpu_bo(drawable, &extents)) {
+		DBG(("%s: converting arcs into spans\n", __FUNCTION__));
+		miPolyArc(drawable, gc, n, arc);
+		return;
+	}
+
 	RegionInit(&region, &extents, 1);
 	if (gc->pCompositeClip)
 		RegionIntersect(&region, &region, gc->pCompositeClip);
commit d07256cc3360cf94d131d9a1b24d9f1c6a70ee04
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Oct 1 10:42:45 2011 +0100

    sna/accel: Convert segments into spans similarly to PolyLine
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 8f3aabc..2614e47 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2381,6 +2381,33 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
 			return;
 	}
 
+	/* XXX Do we really want to base this decision on the amalgam ? */
+	if (sna_drawable_use_gpu_bo(drawable, &extents)) {
+		void (*line)(DrawablePtr, GCPtr, int, int, DDXPointPtr);
+		int i;
+
+		DBG(("%s: converting segments into spans\n", __FUNCTION__));
+
+		switch (gc->lineStyle) {
+		default:
+		case LineSolid:
+			if (gc->lineWidth == 0)
+				line = miZeroLine;
+			else
+				line = miWideLine;
+			break;
+		case LineOnOffDash:
+		case LineDoubleDash:
+			line = miWideDash;
+			break;
+		}
+
+		for (i = 0; i < n; i++)
+			line(drawable, gc, CoordModeOrigin, 2,
+			     (DDXPointPtr)&seg[i]);
+		return;
+	}
+
 fallback:
 	DBG(("%s: fallback\n", __FUNCTION__));
 	RegionInit(&region, &extents, 1);
commit d09a229e32468c810f5056bfd0043f2fb68038d3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Oct 1 10:01:21 2011 +0100

    sna/accel: Use the mi*Line routines to convert the line into spans for gpu bo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 55dd2cd..8f3aabc 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2148,6 +2148,23 @@ sna_poly_line(DrawablePtr drawable, GCPtr gc,
 			return;
 	}
 
+	if (sna_drawable_use_gpu_bo(drawable, &extents)) {
+		DBG(("%s: converting line into spans\n", __FUNCTION__));
+		switch (gc->lineStyle) {
+		case LineSolid:
+			if (gc->lineWidth == 0)
+				miZeroLine(drawable, gc, mode, n, pt);
+			else
+				miWideLine(drawable, gc, mode, n, pt);
+			break;
+		case LineOnOffDash:
+		case LineDoubleDash:
+			miWideDash(drawable, gc, mode, n, pt);
+			break;
+		}
+		return;
+	}
+
 fallback:
 	DBG(("%s: fallback\n", __FUNCTION__));
 	RegionInit(&region, &extents, 1);
commit e7a662b92e3012735f7a3d72d4f7860ca61a3810
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Oct 1 09:26:52 2011 +0100

    sna: Sort partials by remaining space
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 4dd876b..c31e71f 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -732,7 +732,6 @@ static void kgem_finish_partials(struct kgem *kgem)
 		}
 
 		list_del(&bo->base.list);
-
 		if (bo->write && bo->need_io) {
 			DBG(("%s: handle=%d, uploading %d/%d\n",
 			     __FUNCTION__, bo->base.handle, bo->used, bo->alloc));
@@ -740,22 +739,20 @@ static void kgem_finish_partials(struct kgem *kgem)
 			gem_write(kgem->fd, bo->base.handle,
 				  0, bo->used, bo+1);
 			bo->need_io = 0;
+		}
 
-			/* transfer the handle to a minimum bo */
-			if (bo->base.refcnt == 1) {
-				struct kgem_bo *base = malloc(sizeof(*base));
-				if (base) {
-					memcpy(base, &bo->base, sizeof (*base));
-					base->reusable = true;
-					list_init(&base->list);
-					list_replace(&bo->base.request,
-						     &base->request);
-					free(bo);
-					bo = (struct kgem_partial_bo *)base;
-				}
+		/* transfer the handle to a minimum bo */
+		if (bo->base.refcnt == 1) {
+			struct kgem_bo *base = malloc(sizeof(*base));
+			if (base) {
+				memcpy(base, &bo->base, sizeof (*base));
+				base->reusable = true;
+				list_init(&base->list);
+				list_replace(&bo->base.request, &base->request);
+				free(bo);
+				bo = (struct kgem_partial_bo *)base;
 			}
 		}
-
 		kgem_bo_unref(kgem, &bo->base);
 	}
 }
@@ -1891,6 +1888,30 @@ struct kgem_bo *kgem_create_proxy(struct kgem_bo *target,
 	return bo;
 }
 
+#ifndef NDEBUG
+static bool validate_partials(struct kgem *kgem)
+{
+	struct kgem_partial_bo *bo, *next;
+
+	list_for_each_entry_safe(bo, next, &kgem->partial, base.list) {
+		if (bo->base.list.next == &kgem->partial)
+			return true;
+		if (bo->alloc - bo->used < next->alloc - next->used) {
+			ErrorF("this rem: %d, next rem: %d\n",
+			       bo->alloc - bo->used,
+			       next->alloc - next->used);
+			goto err;
+		}
+	}
+	return true;
+
+err:
+	list_for_each_entry(bo, &kgem->partial, base.list)
+		ErrorF("bo: used=%d / %d\n", bo->used, bo->alloc);
+	return false;
+}
+#endif
+
 struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 				   uint32_t size, uint32_t flags,
 				   void **ret)
@@ -1900,11 +1921,15 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	int offset, alloc;
 	uint32_t handle;
 
-	DBG(("%s: size=%d, flags=%x\n", __FUNCTION__, size, flags));
+	DBG(("%s: size=%d, flags=%x [write=%d, last=%d]\n",
+	     __FUNCTION__, size, flags, write, flags & KGEM_BUFFER_LAST));
 
 	list_for_each_entry(bo, &kgem->partial, base.list) {
-		if (bo->write != write)
+		if (bo->write != write) {
+			DBG(("%s: skip write %d buffer, need %d\n",
+			     __FUNCTION__, bo->write, write));
 			continue;
+		}
 
 		if (bo->base.refcnt == 1 && bo->base.exec == NULL)
 			/* no users, so reset */
@@ -1915,10 +1940,12 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			     __FUNCTION__, bo->used, size, bo->alloc));
 			offset = bo->used;
 			bo->used += size;
-			if (kgem->partial.next != &bo->base.list)
-				list_move(&bo->base.list, &kgem->partial);
 			goto done;
 		}
+
+		DBG(("%s: too small (%d < %d)\n",
+		     __FUNCTION__, bo->alloc - bo->used, size));
+		break;
 	}
 
 	alloc = (flags & KGEM_BUFFER_LAST) ? 4096 : 32 * 1024;
@@ -1974,6 +2001,28 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	     __FUNCTION__, alloc, bo->base.handle));
 
 done:
+	/* adjust the position within the list to maintain decreasing order */
+	alloc = bo->alloc - bo->used;
+	{
+		struct kgem_partial_bo *p, *first;
+
+		first = p = list_first_entry(&bo->base.list,
+					     struct kgem_partial_bo,
+					     base.list);
+		while (&p->base.list != &kgem->partial &&
+		       alloc < p->alloc - p->used) {
+			DBG(("%s: this=%d, right=%d\n",
+			     __FUNCTION__, alloc, p->alloc -p->used));
+			p = list_first_entry(&p->base.list,
+					     struct kgem_partial_bo,
+					     base.list);
+		}
+		if (p != first) {
+			__list_del(bo->base.list.prev, bo->base.list.next);
+			list_add_tail(&bo->base.list, &kgem->partial);
+		}
+		assert(validate_partials(kgem));
+	}
 	*ret = (char *)(bo+1) + offset;
 	return kgem_create_proxy(&bo->base, offset, size);
 }
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 8f0c838..af6203c 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -34,6 +34,12 @@
 #ifndef KGEM_H
 #define KGEM_H
 
+#if DEBUG_KGEM
+#define DBG_HDR(x) ErrorF x
+#else
+#define DBG_HDR(x)
+#endif
+
 struct kgem_bo {
 	struct kgem_bo *proxy;
 
@@ -299,6 +305,9 @@ Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
 
 static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
 {
+	DBG_HDR(("%s: exec? %d, gpu? %d, rq? %d\n",
+		 __FUNCTION__, bo->exec != NULL, bo->gpu, bo->rq != NULL));
+
 	if (bo->exec)
 		return true;
 	if (!bo->gpu)
@@ -346,4 +355,6 @@ static inline void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch)
 }
 #endif
 
+#undef DBG_HDR
+
 #endif /* KGEM_H */
commit 13b9b5d8d6b6f6db59fe2418270ac93d9b74436c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Oct 1 09:23:41 2011 +0100

    sna/io: Only mark the buffer as LAST if we know we will flush the IO
    
    Otherwise we can continue to batch up the data upload into larger
    buffers.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 3207dc1..649a359 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -339,7 +339,7 @@ void sna_write_boxes(struct sna *sna,
 		}
 
 		src_bo = kgem_create_buffer(kgem, offset,
-					    KGEM_BUFFER_WRITE | KGEM_BUFFER_LAST,
+					    KGEM_BUFFER_WRITE | (nbox ? KGEM_BUFFER_LAST : 0),
 					    &ptr);
 		if (!src_bo)
 			break;


More information about the xorg-commit mailing list