xf86-video-intel: 6 commits - src/sna/kgem.c src/sna/kgem.h src/sna/sna_accel.c src/sna/sna_display.c src/sna/sna_render.c test/test.h

Chris Wilson ickle at kemper.freedesktop.org
Fri May 30 15:54:20 PDT 2014


 src/sna/kgem.c        |  142 ++++++++++++++++++++-----------------
 src/sna/kgem.h        |    3 
 src/sna/sna_accel.c   |  189 ++++++++++++++++++++++++++++++--------------------
 src/sna/sna_display.c |    3 
 src/sna/sna_render.c  |    1 
 test/test.h           |   14 +--
 6 files changed, 202 insertions(+), 150 deletions(-)

New commits:
commit ffbe0aa1851c35cc2403633ca493e9fc6a471fd4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri May 30 18:41:39 2014 +0100

    sna: Reuse the first scanout bo
    
    The path of last resort is meant to reuse the first scanout bo if they
    all busy (since it will be the oldest). It chased a dangling pointer
    instead.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 44c7622..167169b 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -4343,34 +4343,34 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 				DBG(("%s: recreate fb %dx%d@%d/%d\n",
 				     __FUNCTION__, width, height, scrn->depth, scrn->bitsPerPixel));
 
-				if (bo->tiling != tiling ||
-				    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
-					if (gem_set_tiling(kgem->fd, bo->handle,
+				if (first->tiling != tiling ||
+				    (tiling != I915_TILING_NONE && first->pitch != pitch)) {
+					if (gem_set_tiling(kgem->fd, first->handle,
 							   tiling, pitch)) {
-						bo->tiling = tiling;
-						bo->pitch = pitch;
+						first->tiling = tiling;
+						first->pitch = pitch;
 					}
 				}
 
-				if (bo->tiling == tiling && bo->pitch == pitch) {
+				if (first->tiling == tiling && first->pitch == pitch) {
 					struct drm_mode_fb_cmd arg;
 
 					VG_CLEAR(arg);
 					arg.width = width;
 					arg.height = height;
-					arg.pitch = bo->pitch;
+					arg.pitch = first->pitch;
 					arg.bpp = scrn->bitsPerPixel;
 					arg.depth = scrn->depth;
-					arg.handle = bo->handle;
+					arg.handle = first->handle;
 
-					kgem_bo_rmfb(kgem, bo);
+					kgem_bo_rmfb(kgem, first);
 					if (do_ioctl(kgem->fd, DRM_IOCTL_MODE_ADDFB, &arg)) {
-						kgem_bo_free(kgem, bo);
+						kgem_bo_free(kgem, first);
 					} else {
 						DBG(("%s: attached fb=%d to handle=%d\n",
 						     __FUNCTION__, arg.fb_id, arg.handle));
-						bo->delta = arg.fb_id;
-						return bo;
+						first->delta = arg.fb_id;
+						return first;
 					}
 				}
 			}
commit 40fe1f2c09a98ac75b05db3663d29ee1a64ed280
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri May 30 18:30:47 2014 +0100

    sna: Do not allow imported buffers to be cached
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 4b7bc64..44c7622 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1758,6 +1758,17 @@ static void kgem_bo_binding_free(struct kgem *kgem, struct kgem_bo *bo)
 	}
 }
 
+static void kgem_bo_rmfb(struct kgem *kgem, struct kgem_bo *bo)
+{
+	if (bo->scanout && bo->delta) {
+		DBG(("%s: releasing fb=%d for handle=%d\n",
+		     __FUNCTION__, bo->delta, bo->handle));
+		/* XXX will leak if we are not DRM_MASTER. *shrug* */
+		do_ioctl(kgem->fd, DRM_IOCTL_MODE_RMFB, &bo->delta);
+		bo->delta = 0;
+	}
+}
+
 static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
 {
 	DBG(("%s: handle=%d, size=%d\n", __FUNCTION__, bo->handle, bytes(bo)));
@@ -1772,6 +1783,7 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
 #endif
 
 	kgem_bo_binding_free(kgem, bo);
+	kgem_bo_rmfb(kgem, bo);
 
 	if (IS_USER_MAP(bo->map__cpu)) {
 		assert(bo->rq == NULL);
@@ -1960,7 +1972,6 @@ static void kgem_bo_move_to_scanout(struct kgem *kgem, struct kgem_bo *bo)
 {
 	assert(bo->refcnt == 0);
 	assert(bo->scanout);
-	assert(bo->delta);
 	assert(!bo->flush);
 	assert(!bo->snoop);
 	assert(!bo->io);
@@ -1988,6 +1999,7 @@ static void kgem_bo_move_to_scanout(struct kgem *kgem, struct kgem_bo *bo)
 static void kgem_bo_move_to_snoop(struct kgem *kgem, struct kgem_bo *bo)
 {
 	assert(bo->reusable);
+	assert(!bo->scanout);
 	assert(!bo->flush);
 	assert(!bo->needs_flush);
 	assert(bo->refcnt == 0);
@@ -2008,6 +2020,30 @@ static void kgem_bo_move_to_snoop(struct kgem *kgem, struct kgem_bo *bo)
 	kgem->need_expire = true;
 }
 
+static bool kgem_bo_move_to_cache(struct kgem *kgem, struct kgem_bo *bo)
+{
+	bool retired = false;
+
+	DBG(("%s: release handle=%d\n", __FUNCTION__, bo->handle));
+
+	if (bo->prime) {
+		DBG(("%s: discarding imported prime handle=%d\n",
+		     __FUNCTION__, bo->handle));
+		kgem_bo_free(kgem, bo);
+	} else if (bo->snoop) {
+		kgem_bo_move_to_snoop(kgem, bo);
+	} else if (bo->scanout) {
+		kgem_bo_move_to_scanout(kgem, bo);
+	} else if ((bo = kgem_bo_replace_io(bo))->reusable &&
+		   kgem_bo_set_purgeable(kgem, bo)) {
+		kgem_bo_move_to_inactive(kgem, bo);
+		retired = true;
+	} else
+		kgem_bo_free(kgem, bo);
+
+	return retired;
+}
+
 static struct kgem_bo *
 search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 {
@@ -2103,6 +2139,9 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 	if (DBG_NO_CACHE)
 		goto destroy;
 
+	if (bo->prime)
+		goto destroy;
+
 	if (bo->snoop && !bo->flush) {
 		DBG(("%s: handle=%d is snooped\n", __FUNCTION__, bo->handle));
 		assert(bo->reusable);
@@ -2116,7 +2155,7 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 	if (!IS_USER_MAP(bo->map__cpu))
 		bo->flush = false;
 
-	if (bo->scanout && bo->delta) {
+	if (bo->scanout) {
 		kgem_bo_move_to_scanout(kgem, bo);
 		return;
 	}
@@ -2227,16 +2266,7 @@ static bool kgem_retire__flushing(struct kgem *kgem)
 		if (bo->refcnt)
 			continue;
 
-		if (bo->snoop) {
-			kgem_bo_move_to_snoop(kgem, bo);
-		} else if (bo->scanout && bo->delta) {
-			kgem_bo_move_to_scanout(kgem, bo);
-		} else if ((bo = kgem_bo_replace_io(bo))->reusable &&
-			   kgem_bo_set_purgeable(kgem, bo)) {
-			kgem_bo_move_to_inactive(kgem, bo);
-			retired = true;
-		} else
-			kgem_bo_free(kgem, bo);
+		retired |= kgem_bo_move_to_cache(kgem, bo);
 	}
 #if HAS_DEBUG_FULL
 	{
@@ -2289,19 +2319,7 @@ static bool __kgem_retire_rq(struct kgem *kgem, struct kgem_request *rq)
 		if (bo->refcnt)
 			continue;
 
-		if (bo->snoop) {
-			kgem_bo_move_to_snoop(kgem, bo);
-		} else if (bo->scanout && bo->delta) {
-			kgem_bo_move_to_scanout(kgem, bo);
-		} else if ((bo = kgem_bo_replace_io(bo))->reusable &&
-			   kgem_bo_set_purgeable(kgem, bo)) {
-			kgem_bo_move_to_inactive(kgem, bo);
-			retired = true;
-		} else {
-			DBG(("%s: closing %d\n",
-			     __FUNCTION__, bo->handle));
-			kgem_bo_free(kgem, bo);
-		}
+		retired |= kgem_bo_move_to_cache(kgem, bo);
 	}
 
 	assert(rq->bo->rq == NULL);
@@ -2827,18 +2845,7 @@ void kgem_reset(struct kgem *kgem)
 			if (bo->refcnt || bo->rq)
 				continue;
 
-			if (bo->snoop) {
-				kgem_bo_move_to_snoop(kgem, bo);
-			} else if (bo->scanout && bo->delta) {
-				kgem_bo_move_to_scanout(kgem, bo);
-			} else if ((bo = kgem_bo_replace_io(bo))->reusable &&
-				   kgem_bo_set_purgeable(kgem, bo)) {
-				kgem_bo_move_to_inactive(kgem, bo);
-			} else {
-				DBG(("%s: closing %d\n",
-				     __FUNCTION__, bo->handle));
-				kgem_bo_free(kgem, bo);
-			}
+			kgem_bo_move_to_cache(kgem, bo);
 		}
 
 		if (rq != &kgem->static_request) {
@@ -3273,8 +3280,8 @@ void kgem_clean_scanout_cache(struct kgem *kgem)
 		bo = list_first_entry(&kgem->scanout, struct kgem_bo, list);
 
 		assert(bo->scanout);
-		assert(bo->delta);
 		assert(!bo->refcnt);
+		assert(!bo->prime);
 		assert(bo->proxy == NULL);
 
 		if (bo->exec || __kgem_busy(kgem, bo->handle))
@@ -3284,9 +3291,7 @@ void kgem_clean_scanout_cache(struct kgem *kgem)
 		     __FUNCTION__, bo->handle, bo->delta, bo->reusable));
 		list_del(&bo->list);
 
-		/* XXX will leak if we are not DRM_MASTER. *shrug* */
-		do_ioctl(kgem->fd, DRM_IOCTL_MODE_RMFB, &bo->delta);
-		bo->delta = 0;
+		kgem_bo_rmfb(kgem, bo);
 		bo->scanout = false;
 
 		if (!bo->purged) {
@@ -3800,7 +3805,7 @@ struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name)
 	bo->unique_id = kgem_get_unique_id(kgem);
 	bo->tiling = tiling.tiling_mode;
 	bo->reusable = false;
-	bo->flush = true;
+	bo->prime = true;
 	bo->purged = true; /* no coherency guarantees */
 
 	debug_alloc__bo(kgem, bo);
@@ -3850,6 +3855,7 @@ struct kgem_bo *kgem_create_for_prime(struct kgem *kgem, int name, uint32_t size
 	bo->unique_id = kgem_get_unique_id(kgem);
 	bo->tiling = tiling.tiling_mode;
 	bo->reusable = false;
+	bo->prime = true;
 	bo->domain = DOMAIN_NONE;
 
 	/* is this a special bo (e.g. scanout or CPU coherent)? */
@@ -4225,10 +4231,8 @@ static void __kgem_bo_make_scanout(struct kgem *kgem,
 	arg.depth = scrn->depth;
 	arg.handle = bo->handle;
 
-	if (gem_set_caching(kgem->fd, bo->handle, DISPLAY) &&
-	    do_ioctl(kgem->fd, DRM_IOCTL_MODE_ADDFB, &arg) == 0) {
+	if (gem_set_caching(kgem->fd, bo->handle, DISPLAY)) {
 		bo->scanout = true;
-		bo->delta = arg.fb_id;
 
 		/* Pre-emptively move the object into the mappable
 		 * portion to avoid rebinding later when busy.
@@ -4240,6 +4244,12 @@ static void __kgem_bo_make_scanout(struct kgem *kgem,
 			bo->domain = DOMAIN_GTT;
 		}
 	}
+
+	if (do_ioctl(kgem->fd, DRM_IOCTL_MODE_ADDFB, &arg) == 0) {
+		DBG(("%s: attached fb=%d to handle=%d\n",
+		     __FUNCTION__, arg.fb_id, arg.handle));
+		bo->delta = arg.fb_id;
+	}
 }
 
 struct kgem_bo *kgem_create_2d(struct kgem *kgem,
@@ -4281,7 +4291,6 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 
 		list_for_each_entry_reverse(bo, &kgem->scanout, list) {
 			assert(bo->scanout);
-			assert(bo->delta);
 			assert(!bo->flush);
 			assert_tiling(kgem, bo);
 
@@ -4292,7 +4301,7 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 				/* No tiling/pitch without recreating fb */
 				continue;
 
-			if (!check_scanout_size(kgem, bo, width, height)) {
+			if (bo->delta && !check_scanout_size(kgem, bo, width, height)) {
 				if (first == NULL)
 					first = bo;
 				continue;
@@ -4354,11 +4363,12 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 					arg.depth = scrn->depth;
 					arg.handle = bo->handle;
 
-					do_ioctl(kgem->fd, DRM_IOCTL_MODE_RMFB, &bo->delta);
+					kgem_bo_rmfb(kgem, bo);
 					if (do_ioctl(kgem->fd, DRM_IOCTL_MODE_ADDFB, &arg)) {
-						bo->scanout = false;
 						kgem_bo_free(kgem, bo);
 					} else {
+						DBG(("%s: attached fb=%d to handle=%d\n",
+						     __FUNCTION__, arg.fb_id, arg.handle));
 						bo->delta = arg.fb_id;
 						return bo;
 					}
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index dbc5ddb..12a99db 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -95,6 +95,7 @@ struct kgem_bo {
 	uint32_t io : 1;
 	uint32_t flush : 1;
 	uint32_t scanout : 1;
+	uint32_t prime : 1;
 	uint32_t purged : 1;
 };
 #define DOMAIN_NONE 0
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 3980650..1761be6 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -339,6 +339,9 @@ static unsigned get_fb(struct sna *sna, struct kgem_bo *bo,
 	}
 	assert(arg.fb_id != 0);
 
+	DBG(("%s: attached fb=%d to handle=%d\n",
+	     __FUNCTION__, arg.fb_id, arg.handle));
+
 	bo->scanout = true;
 	return bo->delta = arg.fb_id;
 }
commit fc1f9b91ae2c761e4b126daecab13e13ae2534d3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri May 30 18:11:16 2014 +0100

    sna: Mark all caches for expiration
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index aaac896..4b7bc64 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1830,8 +1830,6 @@ inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
 	assert_tiling(kgem, bo);
 	ASSERT_IDLE(kgem, bo->handle);
 
-	kgem->need_expire = true;
-
 	if (bucket(bo) >= NUM_CACHE_BUCKETS) {
 		if (bo->map__gtt) {
 			munmap(MAP(bo->map__gtt), bytes(bo));
@@ -1857,6 +1855,8 @@ inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
 			kgem->vma[1].count++;
 		}
 	}
+
+	kgem->need_expire = true;
 }
 
 static struct kgem_bo *kgem_bo_replace_io(struct kgem_bo *bo)
@@ -1980,6 +1980,9 @@ static void kgem_bo_move_to_scanout(struct kgem *kgem, struct kgem_bo *bo)
 		list_move_tail(&bo->list, &kgem->scanout);
 	else
 		list_move(&bo->list, &kgem->scanout);
+
+	kgem->need_expire = true;
+
 }
 
 static void kgem_bo_move_to_snoop(struct kgem *kgem, struct kgem_bo *bo)
@@ -2002,6 +2005,7 @@ static void kgem_bo_move_to_snoop(struct kgem *kgem, struct kgem_bo *bo)
 
 	DBG(("%s: moving %d to snoop cachee\n", __FUNCTION__, bo->handle));
 	list_add(&bo->list, &kgem->snoop);
+	kgem->need_expire = true;
 }
 
 static struct kgem_bo *
commit 3dac734bb0fb0ae1febfef9a9289cf830a87be1c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri May 30 10:31:37 2014 +0100

    test: Only compute the masked pixel value if depth!=32
    
    Minor saving for when we use a8r8g8b8.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/test/test.h b/test/test.h
index 568e000..3ee9411 100644
--- a/test/test.h
+++ b/test/test.h
@@ -52,15 +52,11 @@ int pixel_difference(uint32_t a, uint32_t b);
 
 static inline int pixel_equal(int depth, uint32_t a, uint32_t b)
 {
-	uint32_t mask;
-
-	if (depth == 32)
-		mask = 0xffffffff;
-	else
-		mask = (1 << depth) - 1;
-
-	a &= mask;
-	b &= mask;
+	if (depth != 32) {
+		uint32_t mask = (1 << depth) - 1;
+		a &= mask;
+		b &= mask;
+	}
 
 	if (a == b)
 		return 1;
commit 961139f5878572ebea268a0bbf47caf05af9093f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri May 30 09:45:15 2014 +0100

    sna: Use manual detiling for downloads
    
    If we can CPU mmap the GPU bo, prefer to do so for migration to the CPU
    as this saves an extra serialisation step.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 9ac3f3c..869427d 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1641,9 +1641,81 @@ out:
 		!kgem_bo_is_busy(priv->gpu_bo));
 }
 
-static inline bool use_cpu_bo_for_download(struct sna *sna,
-					   struct sna_pixmap *priv,
-					   int nbox, const BoxRec *box)
+static inline bool gpu_bo_download(struct sna *sna,
+				   struct sna_pixmap *priv,
+				   int n, const BoxRec *box,
+				   bool idle)
+{
+	char *src;
+
+	if (!USE_INPLACE)
+		return false;
+
+	switch (priv->gpu_bo->tiling) {
+	case I915_TILING_Y:
+		return false;
+	case I915_TILING_X:
+		if (!sna->kgem.memcpy_from_tiled_x)
+			return false;
+	default:
+		break;
+	}
+
+	if (!kgem_bo_can_map__cpu(&sna->kgem, priv->gpu_bo, FORCE_FULL_SYNC))
+		return false;
+
+	if (idle && __kgem_bo_is_busy(&sna->kgem, priv->gpu_bo))
+		return false;
+
+	src = kgem_bo_map__cpu(&sna->kgem, priv->gpu_bo);
+	if (src == NULL)
+		return false;
+
+	kgem_bo_sync__cpu_full(&sna->kgem, priv->gpu_bo, FORCE_FULL_SYNC);
+	assert(has_coherent_ptr(sna, priv, MOVE_WRITE));
+
+	if (sigtrap_get())
+		return false;
+
+	if (priv->gpu_bo->tiling) {
+		DBG(("%s: download through a tiled CPU map\n", __FUNCTION__));
+		do {
+			DBG(("%s: box (%d, %d), (%d, %d)\n",
+			     __FUNCTION__, box->x1, box->y1, box->x2, box->y2));
+			memcpy_from_tiled_x(&sna->kgem, src,
+					    priv->pixmap->devPrivate.ptr,
+					    priv->pixmap->drawable.bitsPerPixel,
+					    priv->gpu_bo->pitch,
+					    priv->pixmap->devKind,
+					    box->x1, box->y1,
+					    box->x1, box->y1,
+					    box->x2 - box->x1, box->y2 - box->y1);
+			box++;
+		} while (--n);
+	} else {
+		DBG(("%s: download through a linear CPU map\n", __FUNCTION__));
+		do {
+			DBG(("%s: box (%d, %d), (%d, %d)\n",
+			     __FUNCTION__, box->x1, box->y1, box->x2, box->y2));
+			memcpy_blt(src,
+				   priv->pixmap->devPrivate.ptr,
+				   priv->pixmap->drawable.bitsPerPixel,
+				   priv->gpu_bo->pitch,
+				   priv->pixmap->devKind,
+				   box->x1, box->y1,
+				   box->x1, box->y1,
+				   box->x2 - box->x1, box->y2 - box->y1);
+			box++;
+		} while (--n);
+	}
+
+	sigtrap_put();
+	return true;
+}
+
+static inline bool cpu_bo_download(struct sna *sna,
+				   struct sna_pixmap *priv,
+				   int n, const BoxRec *box)
 {
 	if (DBG_NO_CPU_DOWNLOAD)
 		return false;
@@ -1661,16 +1733,38 @@ static inline bool use_cpu_bo_for_download(struct sna *sna,
 	}
 
 	/* Is it worth detiling? */
-	assert(box[0].y1 < box[nbox-1].y2);
+	assert(box[0].y1 < box[n-1].y2);
 	if (kgem_bo_can_map(&sna->kgem, priv->gpu_bo) &&
-	    (box[nbox-1].y2 - box[0].y1 - 1) * priv->gpu_bo->pitch < 4096) {
+	    (box[n-1].y2 - box[0].y1 - 1) * priv->gpu_bo->pitch < 4096) {
 		DBG(("%s: no, tiny transfer (height=%d, pitch=%d) expect to read inplace\n",
-		     __FUNCTION__, box[nbox-1].y2-box[0].y1, priv->gpu_bo->pitch));
+		     __FUNCTION__, box[n-1].y2-box[0].y1, priv->gpu_bo->pitch));
 		return false;
 	}
 
-	DBG(("%s: yes, default action\n", __FUNCTION__));
-	return true;
+	DBG(("%s: using CPU bo for download from GPU\n", __FUNCTION__));
+	return sna->render.copy_boxes(sna, GXcopy,
+				      priv->pixmap, priv->gpu_bo, 0, 0,
+				      priv->pixmap, priv->cpu_bo, 0, 0,
+				      box, n, COPY_LAST);
+}
+
+static void download_boxes(struct sna *sna,
+			   struct sna_pixmap *priv,
+			   int n, const BoxRec *box)
+{
+	bool ok;
+
+	DBG(("%s: nbox=%d\n", __FUNCTION__, n));
+
+	ok = gpu_bo_download(sna, priv, n, box, true);
+	if (!ok)
+		ok = cpu_bo_download(sna, priv, n, box);
+	if (!ok)
+		ok = gpu_bo_download(sna, priv, n, box, false);
+	if (!ok) {
+		assert(has_coherent_ptr(sna, priv, MOVE_WRITE));
+		sna_read_boxes(sna, priv->pixmap, priv->gpu_bo, box, n);
+	}
 }
 
 static inline bool use_cpu_bo_for_upload(struct sna *sna,
@@ -2212,26 +2306,12 @@ skip_inplace_map:
 
 			n = sna_damage_get_boxes(priv->gpu_damage, &box);
 			if (n) {
-				bool ok = false;
-
 				if (priv->move_to_gpu && !priv->move_to_gpu(sna, priv, MOVE_READ)) {
 					DBG(("%s: move-to-gpu override failed\n", __FUNCTION__));
 					return false;
 				}
 
-
-				if (use_cpu_bo_for_download(sna, priv, n, box)) {
-					DBG(("%s: using CPU bo for download from GPU\n", __FUNCTION__));
-					ok = sna->render.copy_boxes(sna, GXcopy,
-								    pixmap, priv->gpu_bo, 0, 0,
-								    pixmap, priv->cpu_bo, 0, 0,
-								    box, n, COPY_LAST);
-				}
-				if (!ok) {
-					assert(has_coherent_ptr(sna, sna_pixmap(pixmap), MOVE_READ));
-					sna_read_boxes(sna, pixmap, priv->gpu_bo,
-						       box, n);
-				}
+				download_boxes(sna, priv, n, box);
 			}
 
 			__sna_damage_destroy(DAMAGE_PTR(priv->gpu_damage));
@@ -2837,33 +2917,14 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 				DBG(("%s: region wholly contains damage\n",
 				     __FUNCTION__));
 
-				n = sna_damage_get_boxes(priv->gpu_damage,
-							 &box);
-				if (n) {
-					bool ok = false;
-
-					if (use_cpu_bo_for_download(sna, priv, n, box)) {
-						DBG(("%s: using CPU bo for download from GPU\n", __FUNCTION__));
-						ok = sna->render.copy_boxes(sna, GXcopy,
-									    pixmap, priv->gpu_bo, 0, 0,
-									    pixmap, priv->cpu_bo, 0, 0,
-									    box, n, COPY_LAST);
-					}
-
-					if (!ok) {
-						assert(has_coherent_ptr(sna, sna_pixmap(pixmap), MOVE_READ));
-						sna_read_boxes(sna, pixmap, priv->gpu_bo,
-							       box, n);
-					}
-				}
+				n = sna_damage_get_boxes(priv->gpu_damage, &box);
+				if (n)
+					download_boxes(sna, priv, n, box);
 
 				sna_damage_destroy(&priv->gpu_damage);
 			} else if (DAMAGE_IS_ALL(priv->gpu_damage) ||
 				   sna_damage_contains_box__no_reduce(priv->gpu_damage,
 								      &r->extents)) {
-				BoxPtr box = RegionRects(r);
-				int n = RegionNumRects(r);
-				bool ok = false;
 
 				DBG(("%s: region wholly inside damage\n",
 				     __FUNCTION__));
@@ -2871,45 +2932,21 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 				assert(sna_damage_contains_box(priv->gpu_damage, &r->extents) == PIXMAN_REGION_IN);
 				assert(sna_damage_contains_box(priv->cpu_damage, &r->extents) == PIXMAN_REGION_OUT);
 
-				if (use_cpu_bo_for_download(sna, priv, n, box)) {
-					DBG(("%s: using CPU bo for download from GPU\n", __FUNCTION__));
-					ok = sna->render.copy_boxes(sna, GXcopy,
-								    pixmap, priv->gpu_bo, 0, 0,
-								    pixmap, priv->cpu_bo, 0, 0,
-								    box, n, COPY_LAST);
-				}
-				if (!ok) {
-					assert(has_coherent_ptr(sna, sna_pixmap(pixmap), MOVE_READ));
-					sna_read_boxes(sna, pixmap, priv->gpu_bo,
-						       box, n);
-				}
-
+				download_boxes(sna, priv,
+					       RegionNumRects(r),
+					       RegionRects(r));
 				sna_damage_subtract(&priv->gpu_damage, r);
 			} else {
 				RegionRec need;
 
 				pixman_region_init(&need);
 				if (sna_damage_intersect(priv->gpu_damage, r, &need)) {
-					BoxPtr box = RegionRects(&need);
-					int n = RegionNumRects(&need);
-					bool ok = false;
-
 					DBG(("%s: region intersects damage\n",
 					     __FUNCTION__));
 
-					if (use_cpu_bo_for_download(sna, priv, n, box)) {
-						DBG(("%s: using CPU bo for download from GPU\n", __FUNCTION__));
-						ok = sna->render.copy_boxes(sna, GXcopy,
-									    pixmap, priv->gpu_bo, 0, 0,
-									    pixmap, priv->cpu_bo, 0, 0,
-									    box, n, COPY_LAST);
-					}
-					if (!ok) {
-						assert(has_coherent_ptr(sna, sna_pixmap(pixmap), MOVE_READ));
-						sna_read_boxes(sna, pixmap, priv->gpu_bo,
-							       box, n);
-					}
-
+					download_boxes(sna, priv,
+						       RegionNumRects(&need),
+						       RegionRects(&need));
 					sna_damage_subtract(&priv->gpu_damage, r);
 					RegionUninit(&need);
 				}
@@ -16268,6 +16305,8 @@ sna_get_image__fast(PixmapPtr pixmap,
 {
 	struct sna_pixmap *priv = sna_pixmap(pixmap);
 
+	DBG(("%s: attached?=%d, has gpu damage?=%d\n",
+	     __FUNCTION__, priv != NULL,  priv && priv->gpu_damage));
 	if (priv == NULL || priv->gpu_damage == NULL)
 		return false;
 
commit 93d3df493d359f76ea710441d891a0333e755fb8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri May 30 22:49:05 2014 +0100

    sna: Unexport kgem_get_unique_id()
    
    This should always be set during bo creation
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 5fa33ce..aaac896 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1503,7 +1503,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen)
 }
 
 /* XXX hopefully a good approximation */
-uint32_t kgem_get_unique_id(struct kgem *kgem)
+static uint32_t kgem_get_unique_id(struct kgem *kgem)
 {
 	uint32_t id;
 	id = ++kgem->unique_id;
@@ -3793,6 +3793,7 @@ struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name)
 		return NULL;
 	}
 
+	bo->unique_id = kgem_get_unique_id(kgem);
 	bo->tiling = tiling.tiling_mode;
 	bo->reusable = false;
 	bo->flush = true;
@@ -5764,6 +5765,7 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 		return NULL;
 	}
 
+	bo->unique_id = kgem_get_unique_id(kgem);
 	bo->snoop = !kgem->has_llc;
 	debug_alloc__bo(kgem, bo);
 
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 3524e7b..dbc5ddb 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -267,8 +267,6 @@ unsigned kgem_can_create_2d(struct kgem *kgem, int width, int height, int depth)
 #define KGEM_CAN_CREATE_LARGE	0x4
 #define KGEM_CAN_CREATE_GTT	0x8
 
-uint32_t kgem_get_unique_id(struct kgem *kgem);
-
 struct kgem_bo *
 kgem_replace_bo(struct kgem *kgem,
 		struct kgem_bo *src,
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index cbec168..bf17069 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -2034,7 +2034,6 @@ sna_render_composite_redirect(struct sna *sna,
 			}
 
 			assert(op->dst.bo != t->real_bo);
-			op->dst.bo->unique_id = kgem_get_unique_id(&sna->kgem);
 			op->dst.bo->pitch = t->real_bo->pitch;
 
 			op->dst.x -= box.x1;


More information about the xorg-commit mailing list