xf86-video-intel: 4 commits - src/sna/gen4_render.c src/sna/kgem.c src/sna/sna_accel.c src/sna/sna_composite.c src/sna/sna_glyphs.c src/sna/sna_io.c

Chris Wilson ickle at kemper.freedesktop.org
Fri Jun 15 07:44:40 PDT 2012


 src/sna/gen4_render.c   |    3 +++
 src/sna/kgem.c          |    2 --
 src/sna/sna_accel.c     |   39 ++++++++++++++++++++++++++++++++++++---
 src/sna/sna_composite.c |    3 +++
 src/sna/sna_glyphs.c    |   22 ++++++++++++----------
 src/sna/sna_io.c        |   10 +++++-----
 6 files changed, 59 insertions(+), 20 deletions(-)

New commits:
commit 515c8b19d638d4a811b159ef0dc7cf4059e30217
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 15 15:41:14 2012 +0100

    sna: Prefer to operate inplace if already mapped of the GPU is wholly dirty
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index b0021f9..2eee01d 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1283,16 +1283,26 @@ static inline bool region_inplace(struct sna *sna,
 		return false;
 
 	if (priv->flush) {
-		DBG(("%s: exported via dri, will flush\n", __FUNCTION__));
+		DBG(("%s: yes, exported via dri, will flush\n", __FUNCTION__));
 		return true;
 	}
 
 	if (priv->cpu_damage &&
 	    region_overlaps_damage(region, priv->cpu_damage)) {
-		DBG(("%s: uncovered CPU damage pending\n", __FUNCTION__));
+		DBG(("%s: no, uncovered CPU damage pending\n", __FUNCTION__));
 		return false;
 	}
 
+	if (priv->mapped) {
+		DBG(("%s: yes, already mapped, continuiung\n", __FUNCTION__));
+		return true;
+	}
+
+	if (DAMAGE_IS_ALL(priv->gpu_damage)) {
+		DBG(("%s: yes, already wholly damaged on the GPU\n", __FUNCTION__));
+		return true;
+	}
+
 	DBG(("%s: (%dx%d), inplace? %d\n",
 	     __FUNCTION__,
 	     region->extents.x2 - region->extents.x1,
commit d1713941e9db3e7a6d83466be1b253978fb4bf01
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 15 15:29:59 2012 +0100

    sna: Tweaks for DBG missing glyphs through fallbacks
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index ad6f6c5..6379a18 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -1476,6 +1476,9 @@ gen4_emit_state(struct sna *sna,
 	gen4_emit_drawing_rectangle(sna, op);
 
 	if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
+		DBG(("%s: flushing dirty (%d, %d)\n", __FUNCTION__,
+		     kgem_bo_is_dirty(op->src.bo),
+		     kgem_bo_is_dirty(op->mask.bo)));
 		OUT_BATCH(MI_FLUSH);
 		kgem_clear_dirty(&sna->kgem);
 		kgem_bo_mark_dirty(op->dst.bo);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index bc85643..b0021f9 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1563,6 +1563,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			    region->extents.x2 - region->extents.x1 == 1 &&
 			    region->extents.y2 - region->extents.y1 == 1) {
 				/*  Often associated with synchronisation, KISS */
+				DBG(("%s: single pixel read\n", __FUNCTION__));
 				sna_read_boxes(sna,
 					       priv->gpu_bo, 0, 0,
 					       pixmap, 0, 0,
@@ -1571,8 +1572,11 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			}
 		} else {
 			if (sna_damage_contains_box__no_reduce(priv->cpu_damage,
-							       &region->extents))
+							       &region->extents)) {
+				DBG(("%s: region already in CPU damage\n",
+				     __FUNCTION__));
 				goto done;
+			}
 		}
 
 		if (sna_damage_contains_box(priv->gpu_damage,
@@ -1631,6 +1635,9 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 				BoxPtr box;
 				int n;
 
+				DBG(("%s: region wholly contains damage\n",
+				     __FUNCTION__));
+
 				n = sna_damage_get_boxes(priv->gpu_damage,
 							 &box);
 				if (n) {
@@ -1658,6 +1665,9 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 				int n = REGION_NUM_RECTS(r);
 				Bool ok = FALSE;
 
+				DBG(("%s: region wholly inside damage\n",
+				     __FUNCTION__));
+
 				if (use_cpu_bo_for_write(sna, priv))
 					ok = sna->render.copy_boxes(sna, GXcopy,
 								    pixmap, priv->gpu_bo, 0, 0,
@@ -1680,6 +1690,9 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 					int n = REGION_NUM_RECTS(&need);
 					Bool ok = FALSE;
 
+					DBG(("%s: region intersects damage\n",
+					     __FUNCTION__));
+
 					if (use_cpu_bo_for_write(sna, priv))
 						ok = sna->render.copy_boxes(sna, GXcopy,
 									    pixmap, priv->gpu_bo, 0, 0,
@@ -1717,6 +1730,16 @@ done:
 		}
 		if (priv->flush)
 			list_move(&priv->list, &sna->dirty_pixmaps);
+#ifdef HAVE_FULL_DEBUG
+		{
+			RegionRec need;
+
+			RegionNull(&need);
+			assert(priv->gpu_damage == NULL ||
+			       !sna_damage_intersect(priv->gpu_damage, r, &need));
+			RegionUninit(&need);
+		}
+#endif
 	}
 
 	if (dx | dy)
diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 97fe70e..ab1cd39 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -544,12 +544,14 @@ fallback:
 		flags = MOVE_WRITE | MOVE_INPLACE_HINT;
 	else
 		flags = MOVE_WRITE | MOVE_READ;
+	DBG(("%s: fallback -- move dst to cpu\n", __FUNCTION__));
 	if (!sna_drawable_move_region_to_cpu(dst->pDrawable, &region, flags))
 		goto out;
 	if (dst->alphaMap &&
 	    !sna_drawable_move_to_cpu(dst->alphaMap->pDrawable, flags))
 		goto out;
 	if (src->pDrawable) {
+		DBG(("%s: fallback -- move src to cpu\n", __FUNCTION__));
 		if (!sna_drawable_move_to_cpu(src->pDrawable,
 					      MOVE_READ))
 			goto out;
@@ -560,6 +562,7 @@ fallback:
 			goto out;
 	}
 	if (mask && mask->pDrawable) {
+		DBG(("%s: fallback -- move mask to cpu\n", __FUNCTION__));
 		if (!sna_drawable_move_to_cpu(mask->pDrawable,
 					      MOVE_READ))
 			goto out;
diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index b06bcda..17c42d5 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -226,20 +226,22 @@ bail:
 }
 
 static void
-glyph_cache_upload(ScreenPtr screen,
-		   struct sna_glyph_cache *cache,
-		   GlyphPtr glyph,
+glyph_cache_upload(struct sna_glyph_cache *cache,
+		   GlyphPtr glyph, PicturePtr glyph_picture,
 		   int16_t x, int16_t y)
 {
 	DBG(("%s: upload glyph %p to cache (%d, %d)x(%d, %d)\n",
-	     __FUNCTION__, glyph, x, y, glyph->info.width, glyph->info.height));
+	     __FUNCTION__,
+	     glyph, x, y,
+	     glyph_picture->pDrawable->width,
+	     glyph_picture->pDrawable->height));
 	sna_composite(PictOpSrc,
-		      GetGlyphPicture(glyph, screen), 0, cache->picture,
+		      glyph_picture, 0, cache->picture,
 		      0, 0,
 		      0, 0,
 		      x, y,
-		      glyph->info.width,
-		      glyph->info.height);
+		      glyph_picture->pDrawable->width,
+		      glyph_picture->pDrawable->height);
 }
 
 static void
@@ -392,7 +394,7 @@ glyph_cache(ScreenPtr screen,
 		pos >>= 2;
 	}
 
-	glyph_cache_upload(screen, cache, glyph,
+	glyph_cache_upload(cache, glyph, glyph_picture,
 			   priv->coordinate.x, priv->coordinate.y);
 
 	return TRUE;
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index dfa0623..3841e52 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -453,9 +453,9 @@ fallback:
 		int width  = box->x2 - box->x1;
 		int pitch = PITCH(width, cpp);
 
-		DBG(("    copy offset %lx [%08x...%08x]: (%d, %d) x (%d, %d), src pitch=%d, dst pitch=%d, bpp=%d\n",
+		DBG(("    copy offset %lx [%08x...%08x...%08x]: (%d, %d) x (%d, %d), src pitch=%d, dst pitch=%d, bpp=%d\n",
 		     (long)((char *)src - (char *)ptr),
-		     *(uint32_t*)src, *(uint32_t*)(src+pitch*height - 4),
+		     *(uint32_t*)src, *(uint32_t*)(src+pitch*height/2 + pitch/2 - 4), *(uint32_t*)(src+pitch*height - 4),
 		     box->x1 + dst_dx,
 		     box->y1 + dst_dy,
 		     width, height,
@@ -558,8 +558,8 @@ static bool upload_inplace(struct kgem *kgem,
 }
 
 bool sna_write_boxes(struct sna *sna, PixmapPtr dst,
-		     struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
-		     const void *src, int stride, int16_t src_dx, int16_t src_dy,
+		     struct kgem_bo * const dst_bo, int16_t const dst_dx, int16_t const dst_dy,
+		     const void * const src, int const stride, int16_t const src_dx, int16_t const src_dy,
 		     const BoxRec *box, int nbox)
 {
 	struct kgem *kgem = &sna->kgem;
@@ -570,7 +570,7 @@ bool sna_write_boxes(struct sna *sna, PixmapPtr dst,
 	int n, cmd, br13;
 	bool can_blt;
 
-	DBG(("%s x %d\n", __FUNCTION__, nbox));
+	DBG(("%s x %d, src stride=%d,  src dx=(%d, %d)\n", __FUNCTION__, nbox, stride, src_dx, src_dy));
 
 	if (upload_inplace(kgem, dst_bo, box, nbox, dst->drawable.bitsPerPixel)) {
 fallback:
commit 2b23605efba009fb340ec10b37d54caae159b9b1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 15 15:28:31 2012 +0100

    sna: Don't trim prepare for glyphs_via_mask
    
    If we pass the expected width/height without passing the per-glyph
    offset into the preparation function, we make the erroneous mistake of
    analysing the glyph cache only for the mask extents and so will miss
    glyphs that we need to upload for the operation.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 63a6287..b06bcda 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -978,13 +978,13 @@ next_image:
 						ok = sna->render.composite(sna, PictOpAdd,
 									   this_atlas, NULL, mask,
 									   0, 0, 0, 0, 0, 0,
-									   width, height,
+									   0, 0,
 									   &tmp);
 					} else {
 						ok = sna->render.composite(sna, PictOpAdd,
 									   sna->render.white_picture, this_atlas, mask,
 									   0, 0, 0, 0, 0, 0,
-									   width, height,
+									   0, 0,
 									   &tmp);
 					}
 					if (!ok) {
commit 9f66b27114fcc457fa5cb2d5889e875384f89e75
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 15 13:47:33 2012 +0100

    sna: Remove mark-as-cpu after gem_pread
    
    The kernel no longer moves the read bo into the CPU domain, so remove
    the last vestiges of that tracking.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 89921d4..84475fe 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -4147,8 +4147,6 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 			     bo->base.handle, (char *)bo->mem+offset,
 			     offset, length))
 			return;
-
-		kgem_bo_map__cpu(kgem, &bo->base);
 	}
 	kgem_bo_retire(kgem, &bo->base);
 }


More information about the xorg-commit mailing list