xf86-video-intel: 8 commits - src/sna/gen3_render.c src/sna/sna_accel.c src/sna/sna_damage.c

Chris Wilson ickle at kemper.freedesktop.org
Wed Jan 18 17:00:36 PST 2012


 src/sna/gen3_render.c |   27 ---
 src/sna/sna_accel.c   |  348 ++++++++++++++++++++++++++++++++++----------------
 src/sna/sna_damage.c  |   98 +++++++-------
 3 files changed, 299 insertions(+), 174 deletions(-)

New commits:
commit 35f81005f91d294e61bb4ced7cbddd1a76ccb324
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 19 00:41:08 2012 +0000

    sna/damage: Always mark the damage as dirty when recording new boxes
    
    A few of the create_elts() routines missed marking the damage as dirty
    so that if only part of the emebbed box was used (i.e. the damage
    contained less than 8 rectangles that needed to included in the damage
    region) then those were being ignored during migration and testing.
    
    Reported-by: Clemens Eisserer <linuxhippy at gmail.com>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=44682
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index eda52c0..490e7a9 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -141,12 +141,12 @@ static const char *_debug_describe_damage(char *buf, int max,
 				damage->mode == DAMAGE_SUBTRACT ? '-' : '+');
 		} else
 			damage_str[0] = '\0';
-		snprintf(buf, max, "[[(%d, %d), (%d, %d)]: %s %s]",
+		snprintf(buf, max, "[[(%d, %d), (%d, %d)]: %s %s]%c",
 			 damage->extents.x1, damage->extents.y1,
 			 damage->extents.x2, damage->extents.y2,
 			 _debug_describe_region(region_str, str_max,
 						&damage->region),
-			 damage_str);
+			 damage_str, damage->dirty ? '*' : ' ');
 	}
 
 	return buf;
@@ -257,6 +257,7 @@ _sna_damage_create_elt_from_boxes(struct sna_damage *damage,
 
 	DBG(("    %s: prev=(remain %d)\n", __FUNCTION__, damage->remain));
 
+	damage->dirty = true;
 	n = count;
 	if (n > damage->remain)
 		n = damage->remain;
@@ -303,6 +304,7 @@ _sna_damage_create_elt_from_rectangles(struct sna_damage *damage,
 	DBG(("    %s: prev=(remain %d), count=%d\n",
 	     __FUNCTION__, damage->remain, count));
 
+	damage->dirty = true;
 	n = count;
 	if (n > damage->remain)
 		n = damage->remain;
@@ -349,6 +351,7 @@ _sna_damage_create_elt_from_points(struct sna_damage *damage,
 	DBG(("    %s: prev=(remain %d), count=%d\n",
 	     __FUNCTION__, damage->remain, count));
 
+	damage->dirty = true;
 	n = count;
 	if (n > damage->remain)
 		n = damage->remain;
commit 36e691ea900d7979950c19714e15c5887d3ea039
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 19 00:39:28 2012 +0000

    sna: Demote MOVE_READ if the GC operation covers the clip
    
    If the write operation fills the entire clip, then we can demote and
    possible avoid having to read back the clip from the GPU provided that
    we do not need the destination data due to arithmetic operation or mask.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index f8688c9..0f21ffa 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -796,7 +796,7 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 	     priv->gpu_damage));
 
 	if ((flags & MOVE_READ) == 0) {
-		assert(flags == MOVE_WRITE);
+		assert(flags & MOVE_WRITE);
 		sna_damage_destroy(&priv->gpu_damage);
 
 		if (priv->gpu && pixmap_inplace(sna, pixmap, priv)) {
@@ -1076,7 +1076,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 	}
 
 	if ((flags & MOVE_READ) == 0) {
-		assert(flags == MOVE_WRITE);
+		assert(flags & MOVE_WRITE);
 
 		if (priv->stride && priv->gpu_bo &&
 		    region_inplace(sna, pixmap, region, priv)) {
@@ -1181,7 +1181,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 		goto done;
 
 	if ((flags & MOVE_READ) == 0) {
-		assert(flags == MOVE_WRITE);
+		assert(flags & MOVE_WRITE);
 		sna_damage_subtract(&priv->gpu_damage, region);
 		goto done;
 	}
@@ -1380,32 +1380,38 @@ static bool alu_overwrites(uint8_t alu)
 	}
 }
 
-inline static unsigned drawable_gc_flags(DrawablePtr draw, GCPtr gc)
+inline static unsigned drawable_gc_flags(DrawablePtr draw,
+					 GCPtr gc,
+					 bool read)
 {
-	unsigned flags = MOVE_READ | MOVE_WRITE;
-
-	if (!USE_INPLACE)
-		return flags;
+	unsigned flags;
 
 	if (!alu_overwrites(gc->alu)) {
 		DBG(("%s: read due to alu %d\n", __FUNCTION__, gc->alu));
-		return flags;
+		return MOVE_READ | MOVE_WRITE;
 	}
 
 	if (!PM_IS_SOLID(draw, gc->planemask)) {
 		DBG(("%s: read due to planemask %lx\n",
 		     __FUNCTION__, gc->planemask));
-		return flags;
+		return MOVE_READ | MOVE_WRITE;
 	}
 
 	if (gc->fillStyle == FillStippled) {
 		DBG(("%s: read due to fill %d\n",
 		     __FUNCTION__, gc->fillStyle));
-		return flags;
+		return MOVE_READ | MOVE_WRITE;
 	}
 
 	DBG(("%s: try operating on drawable inplace\n", __FUNCTION__));
-	return flags | MOVE_INPLACE_HINT;
+	flags = MOVE_WRITE;
+	if (USE_INPLACE)
+		flags |= MOVE_INPLACE_HINT;
+	if (read) {
+		DBG(("%s: partial write\n", __FUNCTION__));
+		flags |= MOVE_READ;
+	}
+	return flags;
 }
 
 static bool
@@ -2806,7 +2812,8 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, drawable))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     drawable_gc_flags(drawable, gc)))
+					     drawable_gc_flags(drawable, gc,
+							       true)))
 		goto out;
 
 	DBG(("%s: fbPutImage(%d, %d, %d, %d)\n",
@@ -3910,7 +3917,8 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, drawable))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     drawable_gc_flags(drawable, gc)))
+					     drawable_gc_flags(drawable,
+							       gc, true)))
 		goto out;
 
 	fbFillSpans(drawable, gc, n, pt, width, sorted);
@@ -3946,7 +3954,8 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, drawable))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     drawable_gc_flags(drawable, gc)))
+					     drawable_gc_flags(drawable,
+							       gc, true)))
 		goto out;
 
 	fbSetSpans(drawable, gc, src, pt, width, n, sorted);
@@ -4575,7 +4584,8 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, drawable))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     drawable_gc_flags(drawable, gc)))
+					     drawable_gc_flags(drawable, gc,
+							       n > 1)))
 		goto out;
 
 	DBG(("%s: fbPolyPoint\n", __FUNCTION__));
@@ -5457,7 +5467,8 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, drawable))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     drawable_gc_flags(drawable, gc)))
+					     drawable_gc_flags(drawable, gc,
+							       n > 2)))
 		goto out;
 
 	DBG(("%s: fbPolyLine\n", __FUNCTION__));
@@ -6313,7 +6324,8 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, drawable))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     drawable_gc_flags(drawable, gc)))
+					     drawable_gc_flags(drawable, gc,
+							       n > 1)))
 		goto out;
 
 	DBG(("%s: fbPolySegment\n", __FUNCTION__));
@@ -6866,7 +6878,8 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, drawable))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     drawable_gc_flags(drawable, gc)))
+					     drawable_gc_flags(drawable,
+							       gc, true)))
 		goto out;
 
 	DBG(("%s: fbPolyRectangle\n", __FUNCTION__));
@@ -6995,7 +7008,8 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, drawable))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     drawable_gc_flags(drawable, gc)))
+					     drawable_gc_flags(drawable,
+							       gc, true)))
 		goto out;
 
 	/* XXX may still fallthrough to miZeroPolyArc */
@@ -8515,11 +8529,9 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, draw))
 		goto out;
 
-	flags = MOVE_WRITE;
-	if (gc->fillStyle == FillStippled  ||
-	    !(gc->alu == GXcopy || gc->alu == GXclear || gc->alu == GXset))
-		flags |= MOVE_READ;
-	if (!sna_drawable_move_region_to_cpu(draw, &region, flags))
+	if (!sna_drawable_move_region_to_cpu(draw, &region,
+					     drawable_gc_flags(draw, gc,
+							       n > 1)))
 		goto out;
 
 	DBG(("%s: fallback - fbPolyFillRect\n", __FUNCTION__));
@@ -8971,7 +8983,8 @@ force_fallback:
 			goto out;
 
 		if (!sna_drawable_move_region_to_cpu(drawable, &region,
-						     MOVE_READ | MOVE_WRITE))
+						     drawable_gc_flags(drawable,
+								       gc, true)))
 			goto out;
 
 		DBG(("%s: fallback -- fbPolyGlyphBlt\n", __FUNCTION__));
@@ -9052,7 +9065,7 @@ force_fallback:
 		if (!sna_gc_move_to_cpu(gc, drawable))
 			goto out;
 		if (!sna_drawable_move_region_to_cpu(drawable, &region,
-						     MOVE_READ | MOVE_WRITE))
+						     drawable_gc_flags(drawable, gc, true)))
 			goto out;
 
 		DBG(("%s: fallback -- fbPolyGlyphBlt\n", __FUNCTION__));
@@ -9133,7 +9146,8 @@ force_fallback:
 		if (!sna_gc_move_to_cpu(gc, drawable))
 			goto out;
 		if (!sna_drawable_move_region_to_cpu(drawable, &region,
-						     drawable_gc_flags(drawable, gc)))
+						     drawable_gc_flags(drawable,
+								       gc, n > 1)))
 			goto out;
 
 		DBG(("%s: fallback -- fbImageGlyphBlt\n", __FUNCTION__));
@@ -9207,7 +9221,8 @@ force_fallback:
 		if (!sna_gc_move_to_cpu(gc, drawable))
 			goto out;
 		if(!sna_drawable_move_region_to_cpu(drawable, &region,
-						    drawable_gc_flags(drawable, gc)))
+						    drawable_gc_flags(drawable,
+								      gc, n > 1)))
 			goto out;
 
 		DBG(("%s: fallback -- fbImageGlyphBlt\n", __FUNCTION__));
@@ -9457,7 +9472,8 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, drawable))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     drawable_gc_flags(drawable, gc)))
+					     drawable_gc_flags(drawable,
+							       gc, n > 1)))
 		goto out;
 
 	DBG(("%s: fallback -- fbImageGlyphBlt\n", __FUNCTION__));
@@ -9524,7 +9540,8 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, drawable))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     MOVE_READ | MOVE_WRITE))
+					     drawable_gc_flags(drawable,
+							       gc, true)))
 		goto out;
 	DBG(("%s: fallback -- fbPolyGlyphBlt\n", __FUNCTION__));
 	fbPolyGlyphBlt(drawable, gc, x, y, n, info, base);
@@ -9700,7 +9717,8 @@ sna_push_pixels(GCPtr gc, PixmapPtr bitmap, DrawablePtr drawable,
 	if (!sna_pixmap_move_to_cpu(bitmap, MOVE_READ))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     drawable_gc_flags(drawable, gc)))
+					     drawable_gc_flags(drawable,
+							       gc, false)))
 		goto out;
 
 	DBG(("%s: fallback, fbPushPixels(%d, %d, %d %d)\n",
commit 17efdbc48c964d9d7b0af630c13a6ceb62fb1787
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 19 00:36:52 2012 +0000

    sna: Clip damage area with source extents for fallback
    
    The damage tracking code asserts that it only handles clip regions.
    However, sna_copy_area() was failing to ensure that its damage region
    was being clipped by the source drawable, leading to out of bounds reads
    during forced fallback.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index dc9ac64..f8688c9 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3361,6 +3361,18 @@ sna_copy_area(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 		region.extents.y2 = region.extents.y1 + height;
 		region.data = NULL;
 		RegionIntersect(&region, &region, gc->pCompositeClip);
+
+		{
+			RegionRec clip;
+
+			clip.extents.x1 = -(src_x - dst_x - dst->x + src->x);
+			clip.extents.y1 = -(src_y - dst_y - dst->y + src->y);
+			clip.extents.x2 = clip.extents.x1 + src->width;
+			clip.extents.y2 = clip.extents.y1 + src->height;
+			clip.data = NULL;
+
+			RegionIntersect(&region, &region, &clip);
+		}
 		if (!RegionNotEmpty(&region))
 			return NULL;
 
commit fb07243c9a08b24e2dc03b86dc16d3cd72cbfcba
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jan 18 21:30:34 2012 +0000

    sna: Fine grained fallback debugging for core drawing routines
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index da14d26..dc9ac64 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -65,7 +65,26 @@
 
 #define MIGRATE_ALL 0
 
+#define ACCEL_FILL_SPANS 1
+#define ACCEL_SET_SPANS 1
 #define ACCEL_PUT_IMAGE 1
+#define ACCEL_COPY_AREA 1
+#define ACCEL_COPY_PLANE 1
+#define ACCEL_POLY_POINT 1
+#define ACCEL_POLY_LINE 1
+#define ACCEL_POLY_SEGMENT 1
+#define ACCEL_POLY_RECTANGLE 1
+#define ACCEL_POLY_ARC 1
+//#define ACCEL_FILL_POLYGON 1
+#define ACCEL_POLY_FILL_RECT 1
+//#define ACCEL_POLY_FILL_ARC 1
+#define ACCEL_POLY_TEXT8 1
+#define ACCEL_POLY_TEXT16 1
+#define ACCEL_IMAGE_TEXT8 1
+#define ACCEL_IMAGE_TEXT16 1
+#define ACCEL_IMAGE_GLYPH 1
+#define ACCEL_POLY_GLYPH 1
+#define ACCEL_PUSH_PIXELS 1
 
 static int sna_font_key;
 
@@ -759,8 +778,11 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 	struct sna *sna = to_sna_from_pixmap(pixmap);
 	struct sna_pixmap *priv;
 
-	DBG(("%s(pixmap=%ld, flags=%x)\n", __FUNCTION__,
-	     pixmap->drawable.serialNumber, flags));
+	DBG(("%s(pixmap=%ld, %dx%d, flags=%x)\n", __FUNCTION__,
+	     pixmap->drawable.serialNumber,
+	     pixmap->drawable.width,
+	     pixmap->drawable.height,
+	     flags));
 
 	priv = sna_pixmap(pixmap);
 	if (priv == NULL) {
@@ -3324,7 +3346,8 @@ sna_copy_area(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	DBG(("%s: src=(%d, %d)x(%d, %d) -> dst=(%d, %d)\n",
 	     __FUNCTION__, src_x, src_y, width, height, dst_x, dst_y));
 
-	if (wedged(sna) || !PM_IS_SOLID(dst, gc->planemask)) {
+	if (FORCE_FALLBACK || !ACCEL_COPY_AREA || wedged(sna) ||
+	    !PM_IS_SOLID(dst, gc->planemask)) {
 		RegionRec region, *ret;
 
 		DBG(("%s: -- fallback, wedged=%d, solid=%d [%x]\n",
@@ -3791,6 +3814,9 @@ sna_fill_spans(DrawablePtr drawable, GCPtr gc, int n,
 	if (FORCE_FALLBACK)
 		goto fallback;
 
+	if (!ACCEL_FILL_SPANS)
+		goto fallback;
+
 	if (wedged(sna)) {
 		DBG(("%s: fallback -- wedged\n", __FUNCTION__));
 		goto fallback;
@@ -3893,6 +3919,13 @@ sna_set_spans(DrawablePtr drawable, GCPtr gc, char *src,
 	     region.extents.x1, region.extents.y1,
 	     region.extents.x2, region.extents.y2));
 
+	if (FORCE_FALLBACK)
+		goto fallback;
+
+	if (!ACCEL_SET_SPANS)
+		goto fallback;
+
+fallback:
 	region.data = NULL;
 	region_maybe_clip(&region, gc->pCompositeClip);
 	if (!RegionNotEmpty(&region))
@@ -4301,6 +4334,12 @@ sna_copy_plane(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	if (!RegionNotEmpty(&region))
 		return NULL;
 
+	if (FORCE_FALLBACK)
+		goto fallback;
+
+	if (!ACCEL_COPY_PLANE)
+		goto fallback;
+
 	if (wedged(sna))
 		goto fallback;
 
@@ -4485,6 +4524,9 @@ sna_poly_point(DrawablePtr drawable, GCPtr gc,
 	if (FORCE_FALLBACK)
 		goto fallback;
 
+	if (!ACCEL_POLY_POINT)
+		goto fallback;
+
 	if (wedged(sna)) {
 		DBG(("%s: fallback -- wedged\n", __FUNCTION__));
 		goto fallback;
@@ -5211,6 +5253,9 @@ sna_poly_line(DrawablePtr drawable, GCPtr gc,
 	if (FORCE_FALLBACK)
 		goto fallback;
 
+	if (!ACCEL_POLY_LINE)
+		goto fallback;
+
 	pixmap = get_drawable_pixmap(drawable);
 	sna = to_sna_from_pixmap(pixmap);
 	if (wedged(sna)) {
@@ -6093,6 +6138,9 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
 	if (FORCE_FALLBACK)
 		goto fallback;
 
+	if (!ACCEL_POLY_SEGMENT)
+		goto fallback;
+
 	pixmap = get_drawable_pixmap(drawable);
 	sna = to_sna_from_pixmap(pixmap);
 
@@ -6755,6 +6803,9 @@ sna_poly_rectangle(DrawablePtr drawable, GCPtr gc, int n, xRectangle *r)
 	if (FORCE_FALLBACK)
 		goto fallback;
 
+	if (!ACCEL_POLY_RECTANGLE)
+		goto fallback;
+
 	if (wedged(sna)) {
 		DBG(("%s: fallback -- wedged\n", __FUNCTION__));
 		goto fallback;
@@ -6893,6 +6944,9 @@ sna_poly_arc(DrawablePtr drawable, GCPtr gc, int n, xArc *arc)
 	if (FORCE_FALLBACK)
 		goto fallback;
 
+	if (!ACCEL_POLY_ARC)
+		goto fallback;
+
 	if (wedged(sna)) {
 		DBG(("%s: fallback -- wedged\n", __FUNCTION__));
 		goto fallback;
@@ -8357,6 +8411,9 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
 	if (FORCE_FALLBACK)
 		goto fallback;
 
+	if (!ACCEL_POLY_FILL_RECT)
+		goto fallback;
+
 	if (priv == NULL) {
 		DBG(("%s: fallback -- unattached\n", __FUNCTION__));
 		goto fallback;
@@ -8863,6 +8920,12 @@ sna_poly_text8(DrawablePtr drawable, GCPtr gc,
 	if (drawable->depth < 8)
 		goto fallback;
 
+	if (FORCE_FALLBACK)
+		goto force_fallback;
+
+	if (!ACCEL_POLY_TEXT8)
+		goto force_fallback;
+
 	for (i = n = 0; i < count; i++) {
 		if (sna_get_glyph8(gc->font, priv, chars[i], &info[n]))
 			n++;
@@ -8887,6 +8950,7 @@ sna_poly_text8(DrawablePtr drawable, GCPtr gc,
 		return x + extents.overallRight;
 
 	if (!sna_glyph_blt(drawable, gc, x, y, n, info, &region, true)) {
+force_fallback:
 		DBG(("%s: fallback\n", __FUNCTION__));
 		gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
 				     Linear8Bit, &n, info);
@@ -8937,6 +9001,12 @@ sna_poly_text16(DrawablePtr drawable, GCPtr gc,
 	if (drawable->depth < 8)
 		goto fallback;
 
+	if (FORCE_FALLBACK)
+		goto force_fallback;
+
+	if (!ACCEL_POLY_TEXT16)
+		goto force_fallback;
+
 	for (i = n =  0; i < count; i++) {
 		if (sna_get_glyph16(gc->font, priv, chars[i], &info[n]))
 			n++;
@@ -8961,6 +9031,7 @@ sna_poly_text16(DrawablePtr drawable, GCPtr gc,
 		return x + extents.overallRight;
 
 	if (!sna_glyph_blt(drawable, gc, x, y, n, info, &region, true)) {
+force_fallback:
 		DBG(("%s: fallback\n", __FUNCTION__));
 		gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
 				     FONTLASTROW(gc->font) ? TwoD16Bit : Linear16Bit,
@@ -9012,6 +9083,12 @@ sna_image_text8(DrawablePtr drawable, GCPtr gc,
 	if (drawable->depth < 8)
 		goto fallback;
 
+	if (FORCE_FALLBACK)
+		goto force_fallback;
+
+	if (!ACCEL_IMAGE_TEXT8)
+		goto force_fallback;
+
 	for (i = n = 0; i < count; i++) {
 		if (sna_get_glyph8(gc->font, priv, chars[i], &info[n]))
 			n++;
@@ -9036,6 +9113,7 @@ sna_image_text8(DrawablePtr drawable, GCPtr gc,
 		return;
 
 	if (!sna_glyph_blt(drawable, gc, x, y, n, info, &region, false)) {
+force_fallback:
 		DBG(("%s: fallback\n", __FUNCTION__));
 		gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
 				     Linear8Bit, &n, info);
@@ -9078,6 +9156,12 @@ sna_image_text16(DrawablePtr drawable, GCPtr gc,
 	if (drawable->depth < 8)
 		goto fallback;
 
+	if (FORCE_FALLBACK)
+		goto force_fallback;
+
+	if (!ACCEL_IMAGE_TEXT16)
+		goto force_fallback;
+
 	for (i = n = 0; i < count; i++) {
 		if (sna_get_glyph16(gc->font, priv, chars[i], &info[n]))
 			n++;
@@ -9102,6 +9186,7 @@ sna_image_text16(DrawablePtr drawable, GCPtr gc,
 		return;
 
 	if (!sna_glyph_blt(drawable, gc, x, y, n, info, &region, false)) {
+force_fallback:
 		DBG(("%s: fallback\n", __FUNCTION__));
 		gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
 				     FONTLASTROW(gc->font) ? TwoD16Bit : Linear16Bit,
@@ -9340,6 +9425,9 @@ sna_image_glyph(DrawablePtr drawable, GCPtr gc,
 	if (FORCE_FALLBACK)
 		goto fallback;
 
+	if (!ACCEL_IMAGE_GLYPH)
+		goto fallback;
+
 	if (wedged(sna)) {
 		DBG(("%s: fallback -- wedged\n", __FUNCTION__));
 		goto fallback;
@@ -9404,6 +9492,9 @@ sna_poly_glyph(DrawablePtr drawable, GCPtr gc,
 	if (FORCE_FALLBACK)
 		goto fallback;
 
+	if (!ACCEL_POLY_GLYPH)
+		goto fallback;
+
 	if (wedged(sna)) {
 		DBG(("%s: fallback -- wedged\n", __FUNCTION__));
 		goto fallback;
commit 05f9764a8860cbdd7ea2ce7f04c221ec8481e0bc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jan 18 21:29:45 2012 +0000

    sna/damage: Fast path singular regions
    
    Mainly for consistency, so that we treat it like the other damage
    addition functions.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index a68db55..eda52c0 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -534,6 +534,50 @@ static void damage_union(struct sna_damage *damage, const BoxRec *box)
 	}
 }
 
+static void _pixman_region_union_box(RegionRec *region, const BoxRec *box)
+{
+	RegionRec u = { *box, NULL };
+	pixman_region_union(region, region, &u);
+}
+
+static struct sna_damage *__sna_damage_add_box(struct sna_damage *damage,
+					       const BoxRec *box)
+{
+	if (box->y2 <= box->y1 || box->x2 <= box->x1)
+		return damage;
+
+	if (!damage) {
+		damage = _sna_damage_create();
+		if (damage == NULL)
+			return NULL;
+	} else switch (damage->mode) {
+	case DAMAGE_ALL:
+		return damage;
+	case DAMAGE_SUBTRACT:
+		__sna_damage_reduce(damage);
+	case DAMAGE_ADD:
+		break;
+	}
+
+	switch (REGION_NUM_RECTS(&damage->region)) {
+	case 0:
+		pixman_region_init_rects(&damage->region, box, 1);
+		damage_union(damage, box);
+		return damage;
+	case 1:
+		_pixman_region_union_box(&damage->region, box);
+		damage_union(damage, box);
+		return damage;
+	}
+
+	if (pixman_region_contains_rectangle(&damage->region,
+					     (BoxPtr)box) == PIXMAN_REGION_IN)
+		return damage;
+
+	damage_union(damage, box);
+	return _sna_damage_create_elt(damage, box, 1);
+}
+
 inline static struct sna_damage *__sna_damage_add(struct sna_damage *damage,
 						  RegionPtr region)
 {
@@ -552,6 +596,9 @@ inline static struct sna_damage *__sna_damage_add(struct sna_damage *damage,
 		break;
 	}
 
+	if (region->data == NULL)
+		return __sna_damage_add_box(damage, &region->extents);
+
 	if (REGION_NUM_RECTS(&damage->region) <= 1) {
 		pixman_region_union(&damage->region, &damage->region, region);
 		damage_union(damage, &region->extents);
@@ -594,50 +641,6 @@ fastcall struct sna_damage *_sna_damage_add(struct sna_damage *damage,
 }
 #endif
 
-static void _pixman_region_union_box(RegionRec *region, const BoxRec *box)
-{
-	RegionRec u = { *box, NULL };
-	pixman_region_union(region, region, &u);
-}
-
-static struct sna_damage *__sna_damage_add_box(struct sna_damage *damage,
-					       const BoxRec *box)
-{
-	if (box->y2 <= box->y1 || box->x2 <= box->x1)
-		return damage;
-
-	if (!damage) {
-		damage = _sna_damage_create();
-		if (damage == NULL)
-			return NULL;
-	} else switch (damage->mode) {
-	case DAMAGE_ALL:
-		return damage;
-	case DAMAGE_SUBTRACT:
-		__sna_damage_reduce(damage);
-	case DAMAGE_ADD:
-		break;
-	}
-
-	switch (REGION_NUM_RECTS(&damage->region)) {
-	case 0:
-		pixman_region_init_rects(&damage->region, box, 1);
-		damage_union(damage, box);
-		return damage;
-	case 1:
-		_pixman_region_union_box(&damage->region, box);
-		damage_union(damage, box);
-		return damage;
-	}
-
-	if (pixman_region_contains_rectangle(&damage->region,
-					     (BoxPtr)box) == PIXMAN_REGION_IN)
-		return damage;
-
-	damage_union(damage, box);
-	return _sna_damage_create_elt(damage, box, 1);
-}
-
 inline static struct sna_damage *
 __sna_damage_add_boxes(struct sna_damage *damage,
 		       const BoxRec *box, int n,
commit 96529e345d0718d253e4abfa0a02cd1f9f7c0406
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jan 18 21:05:40 2012 +0000

    sna: Make sure we create a mappable GPU bo when streaming writes
    
    If we decide to do the CPU fallback inplace on the GPU bo through a WC
    mapping (because it is a large write-only operation), make sure that
    the new GPU bo we create is not active and so will not^W^W is less likely
    to cause a stall when mapped.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 98fef3e..da14d26 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -732,6 +732,27 @@ static inline bool pixmap_inplace(struct sna *sna,
 static bool
 sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box);
 
+static bool
+sna_pixmap_create_mappable_gpu(PixmapPtr pixmap)
+{
+	struct sna *sna = to_sna_from_pixmap(pixmap);
+	struct sna_pixmap *priv = sna_pixmap(pixmap);;
+
+	if (wedged(sna))
+		return false;
+
+	assert(priv->gpu_bo == NULL);
+	priv->gpu_bo =
+		kgem_create_2d(&sna->kgem,
+			       pixmap->drawable.width,
+			       pixmap->drawable.height,
+			       pixmap->drawable.bitsPerPixel,
+			       sna_pixmap_choose_tiling(pixmap),
+			       CREATE_GTT_MAP | CREATE_INACTIVE);
+
+	return priv->gpu_bo != NULL;
+}
+
 bool
 _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 {
@@ -756,24 +777,33 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 		assert(flags == MOVE_WRITE);
 		sna_damage_destroy(&priv->gpu_damage);
 
-		if (priv->stride && priv->gpu_bo &&
-		    pixmap_inplace(sna, pixmap, priv)) {
-			if (kgem_bo_is_busy(priv->gpu_bo) &&
-			    priv->gpu_bo->exec == NULL)
-				kgem_retire(&sna->kgem);
+		if (priv->gpu && pixmap_inplace(sna, pixmap, priv)) {
+			DBG(("%s: write inplace\n", __FUNCTION__));
+			if (priv->gpu_bo) {
+				if (kgem_bo_is_busy(priv->gpu_bo) &&
+				    priv->gpu_bo->exec == NULL)
+					kgem_retire(&sna->kgem);
 
-			if (kgem_bo_is_busy(priv->gpu_bo)) {
-				sna_pixmap_free_gpu(sna, priv);
-				if (!sna_pixmap_move_to_gpu(pixmap, MOVE_WRITE))
-					goto skip_inplace_map;
-			}
+				if (kgem_bo_is_busy(priv->gpu_bo)) {
+					if (priv->pinned)
+						goto skip_inplace_map;
 
-			pixmap->devPrivate.ptr =
-				kgem_bo_map(&sna->kgem, priv->gpu_bo);
-			if (pixmap->devPrivate.ptr == NULL)
+					DBG(("%s: discard busy GPU bo\n", __FUNCTION__));
+					sna_pixmap_free_gpu(sna, priv);
+				}
+			}
+			if (priv->gpu_bo == NULL &&
+			    !sna_pixmap_create_mappable_gpu(pixmap))
 				goto skip_inplace_map;
 
-			priv->mapped = true;
+			if (!priv->mapped) {
+				pixmap->devPrivate.ptr =
+					kgem_bo_map(&sna->kgem, priv->gpu_bo);
+				if (pixmap->devPrivate.ptr == NULL)
+					goto skip_inplace_map;
+
+				priv->mapped = true;
+			}
 			pixmap->devKind = priv->gpu_bo->pitch;
 
 			sna_damage_all(&priv->gpu_damage,
@@ -982,27 +1012,6 @@ static inline bool region_inplace(struct sna *sna,
 		>= sna->kgem.half_cpu_cache_pages;
 }
 
-static bool
-sna_pixmap_create_mappable_gpu(PixmapPtr pixmap)
-{
-	struct sna *sna = to_sna_from_pixmap(pixmap);
-	struct sna_pixmap *priv = sna_pixmap(pixmap);;
-
-	if (wedged(sna))
-		return false;
-
-	assert(priv->gpu_bo == NULL);
-	priv->gpu_bo =
-		kgem_create_2d(&sna->kgem,
-			       pixmap->drawable.width,
-			       pixmap->drawable.height,
-			       pixmap->drawable.bitsPerPixel,
-			       sna_pixmap_choose_tiling(pixmap),
-			       CREATE_GTT_MAP | CREATE_INACTIVE);
-
-	return priv->gpu_bo != NULL;
-}
-
 bool
 sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 				RegionPtr region,
commit efce896e1dbab6177a773a7cf1c8eaf2dab0cdee
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jan 18 20:53:55 2012 +0000

    sna: Check number of boxes to migrate during move-to-cpu
    
    When reducing the damage we may find that it is actually empty and so
    sna_damage_get_boxes() returns 0, be prepared.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 0904f0f..98fef3e 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1235,21 +1235,24 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			if (region_subsumes_damage(r, priv->gpu_damage)) {
 				BoxPtr box;
 				int n;
-				Bool ok;
 
-				ok = FALSE;
 				n = sna_damage_get_boxes(priv->gpu_damage,
 							 &box);
-				if (priv->cpu_bo && sna->kgem.gen >= 30)
-					ok = sna->render.copy_boxes(sna, GXcopy,
-								    pixmap, priv->gpu_bo, 0, 0,
-								    pixmap, priv->cpu_bo, 0, 0,
-								    box, n);
-				if (!ok)
-					sna_read_boxes(sna,
-						       priv->gpu_bo, 0, 0,
-						       pixmap, 0, 0,
-						       box, n);
+				if (n) {
+					Bool ok = FALSE;
+
+					if (priv->cpu_bo && sna->kgem.gen >= 30)
+						ok = sna->render.copy_boxes(sna, GXcopy,
+									    pixmap, priv->gpu_bo, 0, 0,
+									    pixmap, priv->cpu_bo, 0, 0,
+									    box, n);
+
+					if (!ok)
+						sna_read_boxes(sna,
+							       priv->gpu_bo, 0, 0,
+							       pixmap, 0, 0,
+							       box, n);
+				}
 
 				sna_damage_destroy(&priv->gpu_damage);
 				priv->undamaged = true;
@@ -1430,33 +1433,36 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box)
 
 	region_set(&r, box);
 	if (MIGRATE_ALL || region_subsumes_damage(&r, priv->cpu_damage)) {
-		Bool ok;
 		int n;
 
 		n = sna_damage_get_boxes(priv->cpu_damage, &box);
-		ok = FALSE;
-		if (priv->cpu_bo)
-			ok = sna->render.copy_boxes(sna, GXcopy,
-						    pixmap, priv->cpu_bo, 0, 0,
-						    pixmap, priv->gpu_bo, 0, 0,
-						    box, n);
-		if (!ok) {
-			if (n == 1 && !priv->pinned &&
-			    box->x1 <= 0 && box->y1 <= 0 &&
-			    box->x2 >= pixmap->drawable.width &&
-			    box->y2 >= pixmap->drawable.height) {
-				priv->gpu_bo =
-					sna_replace(sna, pixmap,
-						    priv->gpu_bo,
-						    pixmap->devPrivate.ptr,
-						    pixmap->devKind);
-			} else {
-				sna_write_boxes(sna, pixmap,
-						priv->gpu_bo, 0, 0,
-						pixmap->devPrivate.ptr,
-						pixmap->devKind,
-						0, 0,
-						box, n);
+		if (n) {
+			Bool ok;
+
+			ok = FALSE;
+			if (priv->cpu_bo)
+				ok = sna->render.copy_boxes(sna, GXcopy,
+							    pixmap, priv->cpu_bo, 0, 0,
+							    pixmap, priv->gpu_bo, 0, 0,
+							    box, n);
+			if (!ok) {
+				if (n == 1 && !priv->pinned &&
+				    box->x1 <= 0 && box->y1 <= 0 &&
+				    box->x2 >= pixmap->drawable.width &&
+				    box->y2 >= pixmap->drawable.height) {
+					priv->gpu_bo =
+						sna_replace(sna, pixmap,
+							    priv->gpu_bo,
+							    pixmap->devPrivate.ptr,
+							    pixmap->devKind);
+				} else {
+					sna_write_boxes(sna, pixmap,
+							priv->gpu_bo, 0, 0,
+							pixmap->devPrivate.ptr,
+							pixmap->devKind,
+							0, 0,
+							box, n);
+				}
 			}
 		}
 
commit 334f3f70a8b4372cb089773ecded8339d2c7d984
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jan 18 20:09:26 2012 +0000

    sna/gen3: Set the batch mode for emitting video state
    
    The lack of kgem_set_mode() here is causing some recently added
    assertions to fail.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index ffcfff6..a833526 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -3591,29 +3591,12 @@ gen3_emit_video_state(struct sna *sna,
 static void
 gen3_video_get_batch(struct sna *sna)
 {
-	if (!kgem_check_batch(&sna->kgem, 120)) {
-		DBG(("%s: flushing batch: nbatch %d < %d\n",
-		     __FUNCTION__,
-		     batch_space(sna), 120));
-		kgem_submit(&sna->kgem);
-		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
-	}
-
-	if (sna->kgem.nreloc + 4 > KGEM_RELOC_SIZE(&sna->kgem)) {
-		DBG(("%s: flushing batch: reloc %d >= %d\n",
-		     __FUNCTION__,
-		     sna->kgem.nreloc + 4,
-		     (int)KGEM_RELOC_SIZE(&sna->kgem)));
-		kgem_submit(&sna->kgem);
-		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
-	}
+	kgem_set_mode(&sna->kgem, KGEM_RENDER);
 
-	if (sna->kgem.nexec + 2 > KGEM_EXEC_SIZE(&sna->kgem)) {
-		DBG(("%s: flushing batch: exec %d >= %d\n",
-		     __FUNCTION__,
-		     sna->kgem.nexec + 2,
-		     (int)KGEM_EXEC_SIZE(&sna->kgem)));
-		kgem_submit(&sna->kgem);
+	if (!kgem_check_batch(&sna->kgem, 120) ||
+	    !kgem_check_reloc(&sna->kgem, 4) ||
+	    !kgem_check_exec(&sna->kgem, 2)) {
+		_kgem_submit(&sna->kgem);
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 


More information about the xorg-commit mailing list