xf86-video-intel: 7 commits - src/sna/gen2_render.c src/sna/gen3_render.c src/sna/gen4_render.c src/sna/gen5_render.c src/sna/gen6_render.c src/sna/gen7_render.c src/sna/kgem.c src/sna/sna_accel.c src/sna/sna_damage.c src/sna/sna_dri.c src/sna/sna.h src/sna/sna_render.c

Chris Wilson ickle at kemper.freedesktop.org
Thu Jan 12 15:15:35 PST 2012


 src/sna/gen2_render.c |   10 -
 src/sna/gen3_render.c |   11 +-
 src/sna/gen4_render.c |    8 -
 src/sna/gen5_render.c |   11 +-
 src/sna/gen6_render.c |   11 +-
 src/sna/gen7_render.c |   11 +-
 src/sna/kgem.c        |   12 +-
 src/sna/sna.h         |    1 
 src/sna/sna_accel.c   |  232 ++++++++++++++++++++++++++++++++++------------
 src/sna/sna_damage.c  |  252 +++++++++++++++++++++++---------------------------
 src/sna/sna_dri.c     |   22 +++-
 src/sna/sna_render.c  |    7 -
 12 files changed, 353 insertions(+), 235 deletions(-)

New commits:
commit 5c2c6474efd4dbc5a0fc9c68ef4e5b5e5cfee415
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 12 23:11:05 2012 +0000

    sna/dri: Hook up a compile option to switch colour buffers to Y-tiling
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 3beaee1..7334dfb 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -56,6 +56,8 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
 #define DBG(x) ErrorF x
 #endif
 
+#define COLOR_PREFER_TILING_Y 0
+
 enum frame_event_type {
 	DRI2_SWAP,
 	DRI2_SWAP_THROTTLE,
@@ -122,6 +124,18 @@ static inline struct kgem_bo *ref(struct kgem_bo *bo)
 	return bo;
 }
 
+/* Prefer to enable TILING_Y if this buffer will never be a
+ * candidate for pageflipping
+ */
+static bool color_use_tiling_y(struct sna *sna, DrawablePtr drawable)
+{
+	if (!COLOR_PREFER_TILING_Y)
+		return false;
+
+	return (drawable->width != sna->front->drawable.width ||
+		drawable->height != sna->front->drawable.height);
+}
+
 static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 					  PixmapPtr pixmap)
 {
@@ -134,8 +148,9 @@ static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 	if (priv->flush)
 		return ref(priv->gpu_bo);
 
-	if (priv->cpu_damage)
-		list_add(&priv->list, &sna->dirty_pixmaps);
+	if (priv->gpu_bo->tiling != I915_TILING_Y &&
+	    color_use_tiling_y(sna, &pixmap->drawable))
+		sna_pixmap_change_tiling(pixmap, I915_TILING_Y);
 
 	/* We need to submit any modifications to and reads from this
 	 * buffer before we send any reply to the Client.
@@ -194,7 +209,8 @@ sna_dri_create_buffer(DrawablePtr drawable,
 				    drawable->width,
 				    drawable->height,
 				    drawable->bitsPerPixel,
-				    I915_TILING_X, CREATE_EXACT);
+				    color_use_tiling_y(sna, drawable) ?  I915_TILING_Y : I915_TILING_X,
+				    CREATE_EXACT);
 		break;
 
 	case DRI2BufferStencil:
commit 59b79e5952fdc40f8c50f74a72a19363cb426140
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 12 21:05:39 2012 +0000

    sna: Reorder composite-done to destroy mask bo before source bo
    
    Just in the unlikely event that we hit the delete-partial-upload path
    which prefers destroying the last bo first.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 30dd694..1660763 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -1077,12 +1077,11 @@ static void gen2_render_composite_done(struct sna *sna,
 	sna->render.op = NULL;
 	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 
-	sna_render_composite_redirect_done(sna, op);
-
-	if (op->src.bo)
-		kgem_bo_destroy(&sna->kgem, op->src.bo);
 	if (op->mask.bo)
 		kgem_bo_destroy(&sna->kgem, op->mask.bo);
+	if (op->src.bo)
+		kgem_bo_destroy(&sna->kgem, op->src.bo);
+	sna_render_composite_redirect_done(sna, op);
 }
 
 static Bool
@@ -2111,9 +2110,10 @@ gen2_render_composite_spans_done(struct sna *sna,
 
 	DBG(("%s()\n", __FUNCTION__));
 
-	sna_render_composite_redirect_done(sna, &op->base);
 	if (op->base.src.bo)
 		kgem_bo_destroy(&sna->kgem, op->base.src.bo);
+
+	sna_render_composite_redirect_done(sna, &op->base);
 }
 
 static Bool
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index f5946c1..51469dd 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1749,12 +1749,12 @@ gen3_render_composite_done(struct sna *sna,
 
 	DBG(("%s()\n", __FUNCTION__));
 
-	sna_render_composite_redirect_done(sna, op);
-
-	if (op->src.bo)
-		kgem_bo_destroy(&sna->kgem, op->src.bo);
 	if (op->mask.bo)
 		kgem_bo_destroy(&sna->kgem, op->mask.bo);
+	if (op->src.bo)
+		kgem_bo_destroy(&sna->kgem, op->src.bo);
+
+	sna_render_composite_redirect_done(sna, op);
 }
 
 static void
@@ -3021,9 +3021,10 @@ gen3_render_composite_spans_done(struct sna *sna,
 
 	DBG(("%s()\n", __FUNCTION__));
 
-	sna_render_composite_redirect_done(sna, &op->base);
 	if (op->base.src.bo)
 		kgem_bo_destroy(&sna->kgem, op->base.src.bo);
+
+	sna_render_composite_redirect_done(sna, &op->base);
 }
 
 static Bool
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index e9fa94b..3146836 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -1816,12 +1816,12 @@ gen4_render_composite_done(struct sna *sna,
 
 	DBG(("%s()\n", __FUNCTION__));
 
-	sna_render_composite_redirect_done(sna, op);
-
-	if (op->src.bo)
-		kgem_bo_destroy(&sna->kgem, op->src.bo);
 	if (op->mask.bo)
 		kgem_bo_destroy(&sna->kgem, op->mask.bo);
+	if (op->src.bo)
+		kgem_bo_destroy(&sna->kgem, op->src.bo);
+
+	sna_render_composite_redirect_done(sna, op);
 }
 
 static Bool
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 6d4cf6e..017b7ce 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -1852,12 +1852,12 @@ gen5_render_composite_done(struct sna *sna,
 
 	DBG(("%s()\n", __FUNCTION__));
 
-	sna_render_composite_redirect_done(sna, op);
-
-	if (op->src.bo)
-		kgem_bo_destroy(&sna->kgem, op->src.bo);
 	if (op->mask.bo)
 		kgem_bo_destroy(&sna->kgem, op->mask.bo);
+	if (op->src.bo)
+		kgem_bo_destroy(&sna->kgem, op->src.bo);
+
+	sna_render_composite_redirect_done(sna, op);
 }
 
 static Bool
@@ -2479,9 +2479,10 @@ gen5_render_composite_spans_done(struct sna *sna,
 
 	DBG(("%s()\n", __FUNCTION__));
 
-	sna_render_composite_redirect_done(sna, &op->base);
 	if (op->base.src.bo)
 		kgem_bo_destroy(&sna->kgem, op->base.src.bo);
+
+	sna_render_composite_redirect_done(sna, &op->base);
 }
 
 static Bool
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index b1a0905..047c055 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2050,12 +2050,12 @@ static void gen6_render_composite_done(struct sna *sna,
 	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	sna->render.op = NULL;
 
-	sna_render_composite_redirect_done(sna, op);
-
-	if (op->src.bo)
-		kgem_bo_destroy(&sna->kgem, op->src.bo);
 	if (op->mask.bo)
 		kgem_bo_destroy(&sna->kgem, op->mask.bo);
+	if (op->src.bo)
+		kgem_bo_destroy(&sna->kgem, op->src.bo);
+
+	sna_render_composite_redirect_done(sna, op);
 }
 
 static Bool
@@ -2773,9 +2773,10 @@ gen6_render_composite_spans_done(struct sna *sna,
 
 	DBG(("%s()\n", __FUNCTION__));
 
-	sna_render_composite_redirect_done(sna, &op->base);
 	if (op->base.src.bo)
 		kgem_bo_destroy(&sna->kgem, op->base.src.bo);
+
+	sna_render_composite_redirect_done(sna, &op->base);
 }
 
 static Bool
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 2d85a1b..c00548e 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2144,12 +2144,12 @@ static void gen7_render_composite_done(struct sna *sna,
 	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	sna->render.op = NULL;
 
-	sna_render_composite_redirect_done(sna, op);
-
-	if (op->src.bo)
-		kgem_bo_destroy(&sna->kgem, op->src.bo);
 	if (op->mask.bo)
 		kgem_bo_destroy(&sna->kgem, op->mask.bo);
+	if (op->src.bo)
+		kgem_bo_destroy(&sna->kgem, op->src.bo);
+
+	sna_render_composite_redirect_done(sna, op);
 }
 
 static Bool
@@ -2780,9 +2780,10 @@ gen7_render_composite_spans_done(struct sna *sna,
 
 	DBG(("%s()\n", __FUNCTION__));
 
-	sna_render_composite_redirect_done(sna, &op->base);
 	if (op->base.src.bo)
 		kgem_bo_destroy(&sna->kgem, op->base.src.bo);
+
+	sna_render_composite_redirect_done(sna, &op->base);
 }
 
 static Bool
commit 983b755313df8a0d256c59c32ec4106e35f237aa
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 12 21:05:16 2012 +0000

    sna/damage: Fix union of extents with dirty damage but no region
    
    By failing to account for certain paths which would create a damage elt
    without fully initialisating the damage region (only the damage extents),
    we would later overwrite the damage extents with only the extents for
    this operation (rather than the union of this operation with the current
    damage). This fixes a regression from 098592ca5d,
    (sna: Remove the independent tracking of elts from boxes).
    
    Include the associated damage migration debugging code of the callers.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 8aa30fc..30da328 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -64,6 +64,10 @@
 #define USE_ZERO_SPANS 1
 #define USE_BO_FOR_SCRATCH_PIXMAP 1
 
+#define MIGRATE_ALL 0
+
+#define ACCEL_PUT_IMAGE 1
+
 static int sna_font_key;
 
 static const uint8_t copy_ROP[] = {
@@ -1142,8 +1146,33 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 		goto done;
 	}
 
+	if (MIGRATE_ALL && priv->gpu_damage) {
+		BoxPtr box;
+		int n = sna_damage_get_boxes(priv->gpu_damage, &box);
+		if (n) {
+			Bool ok;
+
+			DBG(("%s: forced migration\n", __FUNCTION__));
+
+			assert(pixmap_contains_damage(pixmap, priv->gpu_damage));
+
+			ok = FALSE;
+			if (priv->cpu_bo && sna->kgem.gen >= 60)
+				ok = sna->render.copy_boxes(sna, GXcopy,
+							    pixmap, priv->gpu_bo, 0, 0,
+							    pixmap, priv->cpu_bo, 0, 0,
+							    box, n);
+			if (!ok)
+				sna_read_boxes(sna,
+					       priv->gpu_bo, 0, 0,
+					       pixmap, 0, 0,
+					       box, n);
+		}
+		sna_damage_destroy(&priv->gpu_damage);
+	}
+
 	if (sna_damage_contains_box(priv->gpu_damage,
-				    REGION_EXTENTS(NULL, region)) != PIXMAN_REGION_OUT) {
+				    &region->extents) != PIXMAN_REGION_OUT) {
 		DBG(("%s: region (%dx%d) intersects gpu damage\n",
 		     __FUNCTION__,
 		     region->extents.x2 - region->extents.x1,
@@ -1194,11 +1223,13 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			}
 
 			if (region_subsumes_damage(r, priv->gpu_damage)) {
-				BoxPtr box = REGION_RECTS(&DAMAGE_PTR(priv->gpu_damage)->region);
-				int n = REGION_NUM_RECTS(&DAMAGE_PTR(priv->gpu_damage)->region);
+				BoxPtr box;
+				int n;
 				Bool ok;
 
 				ok = FALSE;
+				n = sna_damage_get_boxes(priv->gpu_damage,
+							 &box);
 				if (priv->cpu_bo && sna->kgem.gen >= 30)
 					ok = sna->render.copy_boxes(sna, GXcopy,
 								    pixmap, priv->gpu_bo, 0, 0,
@@ -1238,7 +1269,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 				if (sna_damage_intersect(priv->gpu_damage, r, &need)) {
 					BoxPtr box = REGION_RECTS(&need);
 					int n = REGION_NUM_RECTS(&need);
-					Bool ok = FALSE;
+					Bool ok;
 
 					ok = FALSE;
 					if (priv->cpu_bo && sna->kgem.gen >= 30)
@@ -1369,11 +1400,11 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box)
 		goto done;
 
 	region_set(&r, box);
-	if (region_subsumes_damage(&r, priv->cpu_damage)) {
-		int n = REGION_NUM_RECTS(&DAMAGE_PTR(priv->cpu_damage)->region);
+	if (MIGRATE_ALL || region_subsumes_damage(&r, priv->cpu_damage)) {
 		Bool ok;
+		int n;
 
-		box = REGION_RECTS(&DAMAGE_PTR(priv->cpu_damage)->region);
+		n = sna_damage_get_boxes(priv->cpu_damage, &box);
 		ok = FALSE;
 		if (priv->cpu_bo)
 			ok = sna->render.copy_boxes(sna, GXcopy,
@@ -1386,8 +1417,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box)
 			    box->x2 >= pixmap->drawable.width &&
 			    box->y2 >= pixmap->drawable.height) {
 				priv->gpu_bo =
-					sna_replace(sna,
-						    pixmap,
+					sna_replace(sna, pixmap,
 						    priv->gpu_bo,
 						    pixmap->devPrivate.ptr,
 						    pixmap->devKind);
@@ -1403,7 +1433,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box)
 
 		sna_damage_destroy(&priv->cpu_damage);
 		list_del(&priv->list);
-	} else if (DAMAGE_IS_ALL(priv->gpu_damage) ||
+	} else if (DAMAGE_IS_ALL(priv->cpu_damage) ||
 		   sna_damage_contains_box__no_reduce(priv->cpu_damage, box)) {
 		Bool ok = FALSE;
 		if (priv->cpu_bo)
@@ -1817,8 +1847,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 			    (box->x2 - box->x1) >= pixmap->drawable.width &&
 			    (box->y2 - box->y1) >= pixmap->drawable.height) {
 				priv->gpu_bo =
-					sna_replace(sna,
-						    pixmap,
+					sna_replace(sna, pixmap,
 						    priv->gpu_bo,
 						    pixmap->devPrivate.ptr,
 						    pixmap->devKind);
@@ -2609,6 +2638,9 @@ sna_put_image(DrawablePtr drawable, GCPtr gc, int depth,
 	if (wedged(sna))
 		goto fallback;
 
+	if (!ACCEL_PUT_IMAGE)
+		goto fallback;
+
 	switch (format) {
 	case ZPixmap:
 		if (!PM_IS_SOLID(drawable, gc->planemask))
@@ -2714,7 +2746,10 @@ sna_self_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	if (dst != src)
 		get_drawable_deltas(dst, pixmap, &tx, &ty);
 
-	if (priv && priv->gpu_bo) {
+	if (priv == NULL || DAMAGE_IS_ALL(priv->cpu_damage))
+		goto fallback;
+
+	if (priv->gpu_bo) {
 		if (!sna_pixmap_move_to_gpu(pixmap, MOVE_WRITE | MOVE_READ)) {
 			DBG(("%s: fallback - not a pure copy and failed to move dst to GPU\n",
 			     __FUNCTION__));
@@ -3040,8 +3075,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 				assert(src_dx + box->x1 + dst_pixmap->drawable.width <= src_pixmap->drawable.width);
 
 				dst_priv->gpu_bo =
-					sna_replace(sna,
-						    dst_pixmap,
+					sna_replace(sna, dst_pixmap,
 						    dst_priv->gpu_bo,
 						    bits, stride);
 
@@ -3052,7 +3086,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 						       dst_pixmap->drawable.height);
 				}
 			} else {
-				DBG(("%s: dst is on the GPU, src is on the CPU, uploading\n",
+				DBG(("%s: dst is on the GPU, src is on the CPU, uploading into dst\n",
 				     __FUNCTION__));
 				sna_write_boxes(sna, dst_pixmap,
 						dst_priv->gpu_bo, dst_dx, dst_dy,
diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index ea98157..6e456cf 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -248,7 +248,7 @@ _sna_damage_create_elt(struct sna_damage *damage,
 	 return damage;
 }
 
-static void
+static struct sna_damage *
 _sna_damage_create_elt_from_boxes(struct sna_damage *damage,
 				  const BoxRec *boxes, int count,
 				  int16_t dx, int16_t dy)
@@ -273,13 +273,13 @@ _sna_damage_create_elt_from_boxes(struct sna_damage *damage,
 		count -=n;
 		boxes += n;
 		if (count == 0)
-			return;
+			return damage;
 	}
 
 	DBG(("    %s(): new elt\n", __FUNCTION__));
 
 	if (!_sna_damage_create_boxes(damage, count))
-		return;
+		return damage;
 
 	for (i = 0; i < count; i++) {
 		damage->box[i].x1 = boxes[i].x1 + dx;
@@ -289,9 +289,11 @@ _sna_damage_create_elt_from_boxes(struct sna_damage *damage,
 	}
 	damage->box += i;
 	damage->remain -= i;
+
+	return damage;
 }
 
-static void
+static struct sna_damage *
 _sna_damage_create_elt_from_rectangles(struct sna_damage *damage,
 				       const xRectangle *r, int count,
 				       int16_t dx, int16_t dy)
@@ -317,13 +319,13 @@ _sna_damage_create_elt_from_rectangles(struct sna_damage *damage,
 		count -=n;
 		r += n;
 		if (count == 0)
-			return;
+			return damage;
 	}
 
 	DBG(("    %s(): new elt\n", __FUNCTION__));
 
 	if (!_sna_damage_create_boxes(damage, count))
-		return;
+		return damage;
 
 	for (i = 0; i < count; i++) {
 		damage->box[i].x1 = r[i].x + dx;
@@ -333,9 +335,11 @@ _sna_damage_create_elt_from_rectangles(struct sna_damage *damage,
 	}
 	damage->box += n;
 	damage->remain -= n;
+
+	return damage;
 }
 
-static void
+static struct sna_damage *
 _sna_damage_create_elt_from_points(struct sna_damage *damage,
 				   const DDXPointRec *p, int count,
 				   int16_t dx, int16_t dy)
@@ -361,13 +365,13 @@ _sna_damage_create_elt_from_points(struct sna_damage *damage,
 		count -=n;
 		p += n;
 		if (count == 0)
-			return;
+			return damage;
 	}
 
 	DBG(("    %s(): new elt\n", __FUNCTION__));
 
 	if (! _sna_damage_create_boxes(damage, count))
-		return;
+		return damage;
 
 	for (i = 0; i < count; i++) {
 		damage->box[i].x1 = p[i].x + dx;
@@ -377,6 +381,8 @@ _sna_damage_create_elt_from_points(struct sna_damage *damage,
 	}
 	damage->box += count;
 	damage->remain -= count;
+
+	return damage;
 }
 
 static void free_list(struct list *head)
@@ -407,6 +413,21 @@ static void __sna_damage_reduce(struct sna_damage *damage)
 	nboxes -= damage->remain;
 	if (nboxes == 0)
 		goto done;
+	if (nboxes == 1) {
+		pixman_region16_t tmp;
+
+		tmp.extents = damage->embedded_box.box[0];
+		tmp.data = NULL;
+
+		if (damage->mode == DAMAGE_ADD)
+			pixman_region_union(region, region, &tmp);
+		else
+			pixman_region_subtract(region, region, &tmp);
+		damage->extents = region->extents;
+
+		goto done;
+	}
+
 	if (damage->mode == DAMAGE_ADD)
 		nboxes += REGION_NUM_RECTS(region);
 
@@ -496,6 +517,23 @@ done:
 	DBG(("    reduce: after region.n=%d\n", REGION_NUM_RECTS(region)));
 }
 
+static void damage_union(struct sna_damage *damage, const BoxRec *box)
+{
+	if (damage->extents.x2 < damage->extents.x1) {
+		damage->extents = *box;
+	} else {
+		if (damage->extents.x1 > box->x1)
+			damage->extents.x1 = box->x1;
+		if (damage->extents.x2 < box->x2)
+			damage->extents.x2 = box->x2;
+
+		if (damage->extents.y1 > box->y1)
+			damage->extents.y1 = box->y1;
+		if (damage->extents.y2 < box->y2)
+			damage->extents.y2 = box->y2;
+	}
+}
+
 inline static struct sna_damage *__sna_damage_add(struct sna_damage *damage,
 						  RegionPtr region)
 {
@@ -516,7 +554,7 @@ inline static struct sna_damage *__sna_damage_add(struct sna_damage *damage,
 
 	if (REGION_NUM_RECTS(&damage->region) <= 1) {
 		pixman_region_union(&damage->region, &damage->region, region);
-		damage->extents = damage->region.extents;
+		damage_union(damage, &region->extents);
 		return damage;
 	}
 
@@ -524,17 +562,7 @@ inline static struct sna_damage *__sna_damage_add(struct sna_damage *damage,
 					     &region->extents) == PIXMAN_REGION_IN)
 		return damage;
 
-
-	if (damage->extents.x1 > region->extents.x1)
-		damage->extents.x1 = region->extents.x1;
-	if (damage->extents.x2 < region->extents.x2)
-		damage->extents.x2 = region->extents.x2;
-
-	if (damage->extents.y1 > region->extents.y1)
-		damage->extents.y1 = region->extents.y1;
-	if (damage->extents.y2 < region->extents.y2)
-		damage->extents.y2 = region->extents.y2;
-
+	damage_union(damage, &region->extents);
 	return _sna_damage_create_elt(damage,
 				      REGION_RECTS(region),
 				      REGION_NUM_RECTS(region));
@@ -566,6 +594,50 @@ fastcall struct sna_damage *_sna_damage_add(struct sna_damage *damage,
 }
 #endif
 
+static void _pixman_region_union_box(RegionRec *region, const BoxRec *box)
+{
+	RegionRec u = { *box, NULL };
+	pixman_region_union(region, region, &u);
+}
+
+static struct sna_damage *__sna_damage_add_box(struct sna_damage *damage,
+					       const BoxRec *box)
+{
+	if (box->y2 <= box->y1 || box->x2 <= box->x1)
+		return damage;
+
+	if (!damage) {
+		damage = _sna_damage_create();
+		if (damage == NULL)
+			return NULL;
+	} else switch (damage->mode) {
+	case DAMAGE_ALL:
+		return damage;
+	case DAMAGE_SUBTRACT:
+		__sna_damage_reduce(damage);
+	case DAMAGE_ADD:
+		break;
+	}
+
+	switch (REGION_NUM_RECTS(&damage->region)) {
+	case 0:
+		pixman_region_init_rects(&damage->region, box, 1);
+		damage_union(damage, box);
+		return damage;
+	case 1:
+		_pixman_region_union_box(&damage->region, box);
+		damage_union(damage, box);
+		return damage;
+	}
+
+	if (pixman_region_contains_rectangle(&damage->region,
+					     (BoxPtr)box) == PIXMAN_REGION_IN)
+		return damage;
+
+	damage_union(damage, box);
+	return _sna_damage_create_elt(damage, box, 1);
+}
+
 inline static struct sna_damage *
 __sna_damage_add_boxes(struct sna_damage *damage,
 		       const BoxRec *box, int n,
@@ -608,29 +680,15 @@ __sna_damage_add_boxes(struct sna_damage *damage,
 	extents.y1 += dy;
 	extents.y2 += dy;
 
+	if (n == 1)
+		return __sna_damage_add_box(damage, &extents);
+
 	if (pixman_region_contains_rectangle(&damage->region,
 					     &extents) == PIXMAN_REGION_IN)
 		return damage;
 
-	_sna_damage_create_elt_from_boxes(damage, box, n, dx, dy);
-
-	if (REGION_NUM_RECTS(&damage->region) == 0) {
-		damage->region.extents = damage->embedded_box.box[0];
-		damage->region.data = NULL;
-		damage->extents = extents;
-	} else {
-		if (damage->extents.x1 > extents.x1)
-			damage->extents.x1 = extents.x1;
-		if (damage->extents.x2 < extents.x2)
-			damage->extents.x2 = extents.x2;
-
-		if (damage->extents.y1 > extents.y1)
-			damage->extents.y1 = extents.y1;
-		if (damage->extents.y2 < extents.y2)
-			damage->extents.y2 = extents.y2;
-	}
-
-	return damage;
+	damage_union(damage, &extents);
+	return _sna_damage_create_elt_from_boxes(damage, box, n, dx, dy);
 }
 
 #if DEBUG_DAMAGE
@@ -660,12 +718,6 @@ struct sna_damage *_sna_damage_add_boxes(struct sna_damage *damage,
 }
 #endif
 
-static void _pixman_region_union_box(RegionRec *region, const BoxRec *box)
-{
-	RegionRec u = { *box, NULL };
-	pixman_region_union(region, region, &u);
-}
-
 inline static struct sna_damage *
 __sna_damage_add_rectangles(struct sna_damage *damage,
 			    const xRectangle *r, int n,
@@ -698,6 +750,9 @@ __sna_damage_add_rectangles(struct sna_damage *damage,
 	extents.y1 += dy;
 	extents.y2 += dy;
 
+	if (n == 1)
+		return __sna_damage_add_box(damage, &extents);
+
 	if (!damage) {
 		damage = _sna_damage_create();
 		if (damage == NULL)
@@ -715,25 +770,8 @@ __sna_damage_add_rectangles(struct sna_damage *damage,
 					     &extents) == PIXMAN_REGION_IN)
 		return damage;
 
-	_sna_damage_create_elt_from_rectangles(damage, r, n, dx, dy);
-
-	if (REGION_NUM_RECTS(&damage->region) == 0) {
-		damage->region.extents = damage->embedded_box.box[0];
-		damage->region.data = NULL;
-		damage->extents = extents;
-	} else {
-		if (damage->extents.x1 > extents.x1)
-			damage->extents.x1 = extents.x1;
-		if (damage->extents.x2 < extents.x2)
-			damage->extents.x2 = extents.x2;
-
-		if (damage->extents.y1 > extents.y1)
-			damage->extents.y1 = extents.y1;
-		if (damage->extents.y2 < extents.y2)
-			damage->extents.y2 = extents.y2;
-	}
-
-	return damage;
+	damage_union(damage, &extents);
+	return _sna_damage_create_elt_from_rectangles(damage, r, n, dx, dy);
 }
 
 #if DEBUG_DAMAGE
@@ -792,6 +830,9 @@ __sna_damage_add_points(struct sna_damage *damage,
 	extents.y1 += dy;
 	extents.y2 += dy + 1;
 
+	if (n == 1)
+		return __sna_damage_add_box(damage, &extents);
+
 	if (!damage) {
 		damage = _sna_damage_create();
 		if (damage == NULL)
@@ -809,24 +850,9 @@ __sna_damage_add_points(struct sna_damage *damage,
 					     &extents) == PIXMAN_REGION_IN)
 		return damage;
 
+	damage_union(damage, &extents);
 	_sna_damage_create_elt_from_points(damage, p, n, dx, dy);
 
-	if (REGION_NUM_RECTS(&damage->region) == 0) {
-		damage->region.extents = damage->embedded_box.box[0];
-		damage->region.data = NULL;
-		damage->extents = extents;
-	} else {
-		if (damage->extents.x1 > extents.x1)
-			damage->extents.x1 = extents.x1;
-		if (damage->extents.x2 < extents.x2)
-			damage->extents.x2 = extents.x2;
-
-		if (damage->extents.y1 > extents.y1)
-			damage->extents.y1 = extents.y1;
-		if (damage->extents.y2 < extents.y2)
-			damage->extents.y2 = extents.y2;
-	}
-
 	return damage;
 }
 
@@ -857,53 +883,6 @@ struct sna_damage *_sna_damage_add_points(struct sna_damage *damage,
 }
 #endif
 
-inline static struct sna_damage *__sna_damage_add_box(struct sna_damage *damage,
-						      const BoxRec *box)
-{
-	if (box->y2 <= box->y1 || box->x2 <= box->x1)
-		return damage;
-
-	if (!damage) {
-		damage = _sna_damage_create();
-		if (damage == NULL)
-			return NULL;
-	} else switch (damage->mode) {
-	case DAMAGE_ALL:
-		return damage;
-	case DAMAGE_SUBTRACT:
-		__sna_damage_reduce(damage);
-	case DAMAGE_ADD:
-		break;
-	}
-
-	switch (REGION_NUM_RECTS(&damage->region)) {
-	case 0:
-		pixman_region_init_rects(&damage->region, box, 1);
-		damage->extents = *box;
-		return damage;
-	case 1:
-		_pixman_region_union_box(&damage->region, box);
-		damage->extents = damage->region.extents;
-		return damage;
-	}
-
-	if (pixman_region_contains_rectangle(&damage->region,
-					     (BoxPtr)box) == PIXMAN_REGION_IN)
-		return damage;
-
-	if (damage->extents.x1 > box->x1)
-		damage->extents.x1 = box->x1;
-	if (damage->extents.x2 < box->x2)
-		damage->extents.x2 = box->x2;
-
-	if (damage->extents.y1 > box->y1)
-		damage->extents.y1 = box->y1;
-	if (damage->extents.y2 < box->y2)
-		damage->extents.y2 = box->y2;
-
-	return _sna_damage_create_elt(damage, box, 1);
-}
-
 #if DEBUG_DAMAGE
 fastcall struct sna_damage *_sna_damage_add_box(struct sna_damage *damage,
 						const BoxRec *box)
@@ -971,12 +950,16 @@ struct sna_damage *_sna_damage_is_all(struct sna_damage *damage,
 	if (damage->region.data)
 		return damage;
 
+	DBG(("%s: (%d, %d), (%d, %d)\n", __FUNCTION__,
+	     damage->extents.x1, damage->extents.y1,
+	     damage->extents.x2, damage->extents.y2));
+
 	assert(damage->extents.x1 == 0 &&
 	       damage->extents.y1 == 0 &&
 	       damage->extents.x2 == width &&
 	       damage->extents.y2 == height);
 
-	return _sna_damage_all(damage, width, height);
+	return __sna_damage_all(damage, width, height);
 }
 
 static bool box_contains(const BoxRec *a, const BoxRec *b)
@@ -1213,16 +1196,15 @@ int _sna_damage_contains_box(struct sna_damage *damage,
 bool _sna_damage_contains_box__no_reduce(const struct sna_damage *damage,
 					 const BoxRec *box)
 {
-	int ret;
-
 	assert(damage && damage->mode != DAMAGE_ALL);
+	if (damage->mode == DAMAGE_SUBTRACT)
+		return false;
+
 	if (!sna_damage_maybe_contains_box(damage, box))
 		return false;
 
-	ret = pixman_region_contains_rectangle((RegionPtr)&damage->region,
-					       (BoxPtr)box);
-	return (!damage->dirty || damage->mode == DAMAGE_ADD) &&
-		ret == PIXMAN_REGION_IN;
+	return pixman_region_contains_rectangle((RegionPtr)&damage->region,
+						(BoxPtr)box) == PIXMAN_REGION_IN;
 }
 
 static Bool __sna_damage_intersect(struct sna_damage *damage,
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 142c1aa..304ff0f 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -69,7 +69,7 @@ no_render_composite(struct sna *sna,
 		    int16_t width, int16_t height,
 		    struct sna_composite_op *tmp)
 {
-	DBG(("%s ()\n", __FUNCTION__));
+	DBG(("%s (op=%d, mask? %d)\n", __FUNCTION__, op, mask != NULL));
 
 	if (mask == NULL &&
 	    sna_blt_composite(sna,
commit 8d2f1eefe142b65db7d8821ba0f80fdb0902b2d5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 12 21:08:41 2012 +0000

    sna: Pass a hint that we may like to perform the fallback in place
    
    If we do not read back from the destination, we may prefer to utilize a
    GTT mapping and perform the fallback inplace. For the rare event that we
    wish to fallback and do not already have a shadow...
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 07ae683..09926ad 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -442,6 +442,7 @@ struct kgem_bo *sna_pixmap_change_tiling(PixmapPtr pixmap, uint32_t tiling);
 
 #define MOVE_WRITE 0x1
 #define MOVE_READ 0x2
+#define MOVE_INPLACE_HINT 0x4
 bool must_check _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned flags);
 static inline bool must_check sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned flags)
 {
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index c8a1e15..8aa30fc 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -60,6 +60,7 @@
 #define FORCE_FLUSH 0
 
 #define USE_SPANS 0
+#define USE_INPLACE 1
 #define USE_ZERO_SPANS 1
 #define USE_BO_FOR_SCRATCH_PIXMAP 1
 
@@ -726,6 +727,9 @@ static inline bool pixmap_inplace(struct sna *sna,
 		sna->kgem.half_cpu_cache_pages;
 }
 
+static bool
+sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box);
+
 bool
 _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 {
@@ -741,6 +745,11 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 		return true;
 	}
 
+	if (DAMAGE_IS_ALL(priv->cpu_damage)) {
+		DBG(("%s: all-damaged\n", __FUNCTION__));
+		goto done;
+	}
+
 	DBG(("%s: gpu_bo=%d, gpu_damage=%p\n",
 	     __FUNCTION__,
 	     priv->gpu_bo ? priv->gpu_bo->handle : 0,
@@ -890,7 +899,7 @@ region_subsumes_damage(const RegionRec *region, struct sna_damage *damage)
 
 	if (re->x2 < de->x2 || re->x1 > de->x1 ||
 	    re->y2 < de->y2 || re->y1 > de->y1) {
-		DBG(("%s: no overlap\n", __FUNCTION__));
+		DBG(("%s: not contained\n", __FUNCTION__));
 		return false;
 	}
 
@@ -1093,6 +1102,28 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 		}
 	}
 
+	if (flags & MOVE_INPLACE_HINT &&
+	    priv->stride && priv->gpu_bo &&
+	    (DAMAGE_IS_ALL(priv->gpu_damage) ||
+	     region_inplace(sna, pixmap, region, priv)) &&
+	    sna_pixmap_move_area_to_gpu(pixmap, &region->extents)) {
+		kgem_bo_submit(&sna->kgem, priv->gpu_bo);
+
+		DBG(("%s: operate inplace\n", __FUNCTION__));
+
+		pixmap->devPrivate.ptr =
+			kgem_bo_map(&sna->kgem, priv->gpu_bo);
+		if (pixmap->devPrivate.ptr != NULL) {
+			priv->mapped = 1;
+			pixmap->devKind = priv->gpu_bo->pitch;
+			if (!DAMAGE_IS_ALL(priv->gpu_damage))
+				sna_damage_add(&priv->gpu_damage, region);
+			return true;
+		}
+
+		priv->mapped = 0;
+	}
+
 	if (priv->mapped) {
 		pixmap->devPrivate.ptr = NULL;
 		priv->mapped = 0;
@@ -1254,10 +1285,50 @@ done:
 	if (dx | dy)
 		RegionTranslate(region, -dx, -dy);
 
-	priv->source_count = SOURCE_BIAS;
 	return true;
 }
 
+static bool alu_overwrites(uint8_t alu)
+{
+	switch (alu) {
+	case GXclear:
+	case GXcopy:
+	case GXcopyInverted:
+	case GXset:
+		return true;
+	default:
+		return false;
+	}
+}
+
+inline static unsigned drawable_gc_flags(DrawablePtr draw, GCPtr gc)
+{
+	unsigned flags = MOVE_READ | MOVE_WRITE;
+
+	if (!USE_INPLACE)
+		return flags;
+
+	if (!alu_overwrites(gc->alu)) {
+		DBG(("%s: read due to alu %d\n", __FUNCTION__, gc->alu));
+		return flags;
+	}
+
+	if (!PM_IS_SOLID(draw, gc->planemask)) {
+		DBG(("%s: read due to planemask %lx\n",
+		     __FUNCTION__, gc->planemask));
+		return flags;
+	}
+
+	if (gc->fillStyle == FillStippled) {
+		DBG(("%s: read due to fill %d\n",
+		     __FUNCTION__, gc->fillStyle));
+		return flags;
+	}
+
+	DBG(("%s: try operating on drawable inplace\n", __FUNCTION__));
+	return flags | MOVE_INPLACE_HINT;
+}
+
 static bool
 sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box)
 {
@@ -1685,8 +1756,10 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 	if (priv == NULL)
 		return NULL;
 
-	if (DAMAGE_IS_ALL(priv->gpu_damage))
+	if (DAMAGE_IS_ALL(priv->gpu_damage)) {
+		DBG(("%s: already all-damaged\n", __FUNCTION__));
 		goto done;
+	}
 
 	if ((flags & MOVE_READ) == 0)
 		sna_damage_destroy(&priv->cpu_damage);
@@ -1768,10 +1841,6 @@ done:
 	sna_damage_reduce_all(&priv->gpu_damage,
 			      pixmap->drawable.width,
 			      pixmap->drawable.height);
-	if (priv->mapped) {
-		pixmap->devPrivate.ptr = NULL;
-		priv->mapped = 0;
-	}
 	if (!priv->pinned)
 		list_move(&priv->inactive, &sna->active_pixmaps);
 	return priv;
@@ -2579,7 +2648,7 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, drawable))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     MOVE_READ | MOVE_WRITE))
+					     drawable_gc_flags(drawable, gc)))
 		goto out;
 
 	DBG(("%s: fbPutImage(%d, %d, %d, %d)\n",
@@ -3659,7 +3728,7 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, drawable))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     MOVE_READ | MOVE_WRITE))
+					     drawable_gc_flags(drawable, gc)))
 		goto out;
 
 	fbFillSpans(drawable, gc, n, pt, width, sorted);
@@ -3688,7 +3757,7 @@ sna_set_spans(DrawablePtr drawable, GCPtr gc, char *src,
 	if (!sna_gc_move_to_cpu(gc, drawable))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     MOVE_READ | MOVE_WRITE))
+					     drawable_gc_flags(drawable, gc)))
 		goto out;
 
 	fbSetSpans(drawable, gc, src, pt, width, n, sorted);
@@ -4308,7 +4377,7 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, drawable))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     MOVE_READ | MOVE_WRITE))
+					     drawable_gc_flags(drawable, gc)))
 		goto out;
 
 	DBG(("%s: fbPolyPoint\n", __FUNCTION__));
@@ -5187,7 +5256,7 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, drawable))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     MOVE_READ | MOVE_WRITE))
+					     drawable_gc_flags(drawable, gc)))
 		goto out;
 
 	DBG(("%s: fbPolyLine\n", __FUNCTION__));
@@ -6040,7 +6109,7 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, drawable))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     MOVE_READ | MOVE_WRITE))
+					     drawable_gc_flags(drawable, gc)))
 		goto out;
 
 	DBG(("%s: fbPolySegment\n", __FUNCTION__));
@@ -6590,7 +6659,7 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, drawable))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     MOVE_READ | MOVE_WRITE))
+					     drawable_gc_flags(drawable, gc)))
 		goto out;
 
 	DBG(("%s: fbPolyRectangle\n", __FUNCTION__));
@@ -6716,7 +6785,7 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, drawable))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     MOVE_READ | MOVE_WRITE))
+					     drawable_gc_flags(drawable, gc)))
 		goto out;
 
 	/* XXX may still fallthrough to miZeroPolyArc */
@@ -8112,19 +8181,6 @@ sna_poly_fill_rect_extents(DrawablePtr drawable, GCPtr gc,
 	return 1 | clipped << 1;
 }
 
-static bool alu_overwrites(uint8_t alu)
-{
-	switch (alu) {
-	case GXclear:
-	case GXcopy:
-	case GXcopyInverted:
-	case GXset:
-		return true;
-	default:
-		return false;
-	}
-}
-
 static void
 sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
 {
@@ -8842,7 +8898,7 @@ sna_image_text8(DrawablePtr drawable, GCPtr gc,
 		if (!sna_gc_move_to_cpu(gc, drawable))
 			goto out;
 		if (!sna_drawable_move_region_to_cpu(drawable, &region,
-						     MOVE_READ | MOVE_WRITE))
+						     drawable_gc_flags(drawable, gc)))
 			goto out;
 
 		DBG(("%s: fallback -- fbImageGlyphBlt\n", __FUNCTION__));
@@ -8909,7 +8965,7 @@ sna_image_text16(DrawablePtr drawable, GCPtr gc,
 		if (!sna_gc_move_to_cpu(gc, drawable))
 			goto out;
 		if(!sna_drawable_move_region_to_cpu(drawable, &region,
-						    MOVE_READ | MOVE_WRITE))
+						    drawable_gc_flags(drawable, gc)))
 			goto out;
 
 		DBG(("%s: fallback -- fbImageGlyphBlt\n", __FUNCTION__));
@@ -9155,7 +9211,8 @@ fallback:
 	DBG(("%s: fallback\n", __FUNCTION__));
 	if (!sna_gc_move_to_cpu(gc, drawable))
 		goto out;
-	if (!sna_drawable_move_region_to_cpu(drawable, &region, true))
+	if (!sna_drawable_move_region_to_cpu(drawable, &region,
+					     drawable_gc_flags(drawable, gc)))
 		goto out;
 
 	DBG(("%s: fallback -- fbImageGlyphBlt\n", __FUNCTION__));
@@ -9395,7 +9452,7 @@ sna_push_pixels(GCPtr gc, PixmapPtr bitmap, DrawablePtr drawable,
 	if (!sna_pixmap_move_to_cpu(bitmap, MOVE_READ))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     MOVE_READ | MOVE_WRITE))
+					     drawable_gc_flags(drawable, gc)))
 		goto out;
 
 	DBG(("%s: fallback, fbPushPixels(%d, %d, %d %d)\n",
commit 48ab72754d0069a3247c5fee8c353a6b593eaed9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 12 21:07:15 2012 +0000

    sna: Use the GPU bo if it is all damaged
    
    By marking the scratch upload pixmap as damaged in both domains, we
    confused the texture upload path and made it upload the pixmap a second
    time. If either bo is all-damaged, use it!
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 9052d68..142c1aa 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -477,13 +477,14 @@ sna_render_pixmap_bo(struct sna *sna,
 
 	priv = sna_pixmap(pixmap);
 	if (priv) {
-		if (priv->gpu_bo && priv->cpu_damage == NULL) {
+		if (priv->gpu_bo &&
+		    (DAMAGE_IS_ALL(priv->gpu_damage) || !priv->cpu_damage)) {
 			channel->bo = kgem_bo_reference(priv->gpu_bo);
 			return 1;
 		}
 
 		if (priv->cpu_bo &&
-		    DAMAGE_IS_ALL(priv->cpu_damage) &&
+		    (DAMAGE_IS_ALL(priv->cpu_damage) || !priv->gpu_damage) &&
 		    priv->cpu_bo->pitch < 4096) {
 			channel->bo = kgem_bo_reference(priv->cpu_bo);
 			return 1;
commit 20a4d7181983c7fed289844be49ccaf3f56965c6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 12 21:05:51 2012 +0000

    sna: Dump batch contents for debugging before modification
    
    We need to dump the batch contents before the maps are made by the
    construction of the batch itself.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 50ed4bf..be01f67 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1432,6 +1432,10 @@ void _kgem_submit(struct kgem *kgem)
 
 	kgem_finish_partials(kgem);
 
+#if DEBUG_BATCH
+	__kgem_batch_debug(kgem, batch_end);
+#endif
+
 	rq = kgem->next_request;
 	if (kgem->surface != kgem->max_batch_size)
 		size = compact_batch_surface(kgem);
@@ -1460,10 +1464,6 @@ void _kgem_submit(struct kgem *kgem)
 
 		kgem_fixup_self_relocs(kgem, rq->bo);
 
-#if DEBUG_BATCH
-		__kgem_batch_debug(kgem, batch_end);
-#endif
-
 		if (kgem_batch_write(kgem, handle, size) == 0) {
 			struct drm_i915_gem_execbuffer2 execbuf;
 			int ret, retry = 3;
@@ -2343,8 +2343,6 @@ static void _kgem_bo_delete_partial(struct kgem *kgem, struct kgem_bo *bo)
 		io->used = bo->delta;
 		bubble_sort_partial(kgem, io);
 	}
-
-	assert(validate_partials(kgem));
 }
 
 void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
@@ -2860,6 +2858,8 @@ struct kgem_bo *kgem_create_proxy(struct kgem_bo *target,
 	struct kgem_bo *bo;
 
 	assert(target->proxy == NULL);
+	DBG(("%s: target handle=%d, offset=%d, length=%d, io=%d\n",
+	     __FUNCTION__, target->handle, offset, length, target->io));
 
 	bo = __kgem_bo_alloc(target->handle, length);
 	if (bo == NULL)
commit 7932a2a259069bb7d19af8566f7b7704f6c2eade
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 12 14:06:22 2012 +0000

    sna: Update for removal of backwards compatible miWideDash
    
    miWideDash() no longer calls miZeroLineDash() when called with
    gc->lineWidth==0, we need to do so ourselves.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 3576176..c8a1e15 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -4976,8 +4976,9 @@ static void
 sna_poly_line(DrawablePtr drawable, GCPtr gc,
 	      int mode, int n, DDXPointPtr pt)
 {
-	PixmapPtr pixmap = get_drawable_pixmap(drawable);
-	struct sna *sna = to_sna_from_pixmap(pixmap);
+	PixmapPtr pixmap;
+	struct sna *sna;
+	struct sna_pixmap *priv;
 	struct sna_damage **damage;
 	RegionRec region;
 	unsigned flags;
@@ -4997,6 +4998,8 @@ sna_poly_line(DrawablePtr drawable, GCPtr gc,
 	if (FORCE_FALLBACK)
 		goto fallback;
 
+	pixmap = get_drawable_pixmap(drawable);
+	sna = to_sna_from_pixmap(pixmap);
 	if (wedged(sna)) {
 		DBG(("%s: fallback -- wedged\n", __FUNCTION__));
 		goto fallback;
@@ -5013,15 +5016,26 @@ sna_poly_line(DrawablePtr drawable, GCPtr gc,
 	if (!PM_IS_SOLID(drawable, gc->planemask))
 		goto fallback;
 
-	if (gc->lineStyle != LineSolid)
+	priv = sna_pixmap(pixmap);
+	if (!priv) {
+		DBG(("%s: not attached to pixmap %ld\n",
+		     __FUNCTION__, pixmap->drawable.serialNumber));
+		goto fallback;
+	}
+
+	if (gc->lineStyle != LineSolid) {
+		DBG(("%s: lineStyle, %d, is not solid\n",
+		     __FUNCTION__, gc->lineStyle));
 		goto spans_fallback;
+	}
 	if (!(gc->lineWidth == 0 ||
-	      (gc->lineWidth == 1 && (n == 1 || gc->alu == GXcopy))))
+	      (gc->lineWidth == 1 && (n == 1 || gc->alu == GXcopy)))) {
+		DBG(("%s: non-zero lineWidth %d\n",
+		     __FUNCTION__, gc->lineWidth));
 		goto spans_fallback;
+	}
 
 	if (gc->fillStyle == FillSolid) {
-		struct sna_pixmap *priv = sna_pixmap(pixmap);
-
 		DBG(("%s: trying solid fill [%08lx]\n",
 		     __FUNCTION__, gc->fgPixel));
 
@@ -5056,8 +5070,6 @@ sna_poly_line(DrawablePtr drawable, GCPtr gc,
 
 		}
 	} else if (flags & 4) {
-		struct sna_pixmap *priv = sna_pixmap(pixmap);
-
 		/* Try converting these to a set of rectangles instead */
 		if (sna_drawable_use_gpu_bo(drawable, &region.extents, &damage)) {
 			DDXPointRec p1, p2;
@@ -5132,15 +5144,26 @@ spans_fallback:
 	    sna_drawable_use_gpu_bo(drawable, &region.extents, &damage)) {
 		DBG(("%s: converting line into spans\n", __FUNCTION__));
 		switch (gc->lineStyle) {
+		default:
+			assert(0);
 		case LineSolid:
-			if (gc->lineWidth == 0)
+			if (gc->lineWidth == 0) {
+				DBG(("%s: miZeroLine\n", __FUNCTION__));
 				miZeroLine(drawable, gc, mode, n, pt);
-			else
+			} else {
+				DBG(("%s: miWideLine\n", __FUNCTION__));
 				miWideLine(drawable, gc, mode, n, pt);
+			}
 			break;
 		case LineOnOffDash:
 		case LineDoubleDash:
-			miWideDash(drawable, gc, mode, n, pt);
+			if (gc->lineWidth == 0) {
+				DBG(("%s: miZeroDashLine\n", __FUNCTION__));
+				miZeroDashLine(drawable, gc, mode, n, pt);
+			} else {
+				DBG(("%s: miWideDash\n", __FUNCTION__));
+				miWideDash(drawable, gc, mode, n, pt);
+			}
 			break;
 		}
 		return;


More information about the xorg-commit mailing list