xf86-video-intel: 3 commits - src/sna/sna_accel.c src/sna/sna_dri.c

Chris Wilson ickle at kemper.freedesktop.org
Thu Jul 5 00:30:40 PDT 2012


 src/sna/sna_accel.c |   71 +++++++++++++++++++++++++++-------------------------
 src/sna/sna_dri.c   |    6 ++--
 2 files changed, 40 insertions(+), 37 deletions(-)

New commits:
commit 7e8060f837475c85cc061ba4a5388140cd227613
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 5 03:31:12 2012 +0100

    sna: Do not force GPU allocation if CPU bo is already busy
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 13bd978..794086c 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2260,8 +2260,8 @@ sna_drawable_use_bo(DrawablePtr drawable,
 			goto use_cpu_bo;
 		}
 
-		if (priv->cpu_damage && !box_inplace(pixmap, box)) {
-			DBG(("%s: damaged with a small operation, will not force allocation\n",
+		if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo)) {
+			DBG(("%s: already using CPU bo, will not force allocation\n",
 			     __FUNCTION__));
 			goto use_cpu_bo;
 		}
@@ -2271,6 +2271,12 @@ sna_drawable_use_bo(DrawablePtr drawable,
 			goto use_cpu_bo;
 		}
 
+		if (priv->cpu_damage && !box_inplace(pixmap, box)) {
+			DBG(("%s: damaged with a small operation, will not force allocation\n",
+			     __FUNCTION__));
+			goto use_cpu_bo;
+		}
+
 		flags = MOVE_WRITE | MOVE_READ;
 		if (prefer_gpu & FORCE_GPU)
 			flags |= __MOVE_FORCE;
commit c32bb286dc9a489232030f6abe9076411fbcecfd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 5 03:18:12 2012 +0100

    sna: Make sure damage is flushed to the CPU bo before use
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 46a9180..13bd978 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1267,7 +1267,7 @@ done:
 		DBG(("%s: syncing CPU bo\n", __FUNCTION__));
 		kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
 	}
-	priv->cpu = true;
+	priv->cpu = (flags & MOVE_ASYNC_HINT) == 0;
 	assert(pixmap->devPrivate.ptr);
 	assert(pixmap->devKind);
 	assert_pixmap_damage(pixmap);
@@ -1898,7 +1898,7 @@ out:
 		DBG(("%s: syncing cpu bo\n", __FUNCTION__));
 		kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
 	}
-	priv->cpu = true;
+	priv->cpu = (flags & MOVE_ASYNC_HINT) == 0;
 	assert(pixmap->devPrivate.ptr);
 	assert(pixmap->devKind);
 	assert_pixmap_damage(pixmap);
@@ -2211,7 +2211,7 @@ sna_drawable_use_bo(DrawablePtr drawable,
 {
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	struct sna_pixmap *priv = sna_pixmap(pixmap);
-	BoxRec extents;
+	RegionRec region;
 	int16_t dx, dy;
 	int ret;
 
@@ -2267,8 +2267,7 @@ sna_drawable_use_bo(DrawablePtr drawable,
 		}
 
 		if (priv->cpu_damage && prefer_gpu == 0) {
-			DBG(("%s: prefer cpu",
-			     __FUNCTION__));
+			DBG(("%s: prefer cpu", __FUNCTION__));
 			goto use_cpu_bo;
 		}
 
@@ -2284,19 +2283,20 @@ sna_drawable_use_bo(DrawablePtr drawable,
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
 
-	extents = *box;
-	extents.x1 += dx;
-	extents.x2 += dx;
-	extents.y1 += dy;
-	extents.y2 += dy;
+	region.extents = *box;
+	region.extents.x1 += dx;
+	region.extents.x2 += dx;
+	region.extents.y1 += dy;
+	region.extents.y2 += dy;
 
 	DBG(("%s extents (%d, %d), (%d, %d)\n", __FUNCTION__,
-	     extents.x1, extents.y1, extents.x2, extents.y2));
+	     region.extents.x1, region.extents.y1,
+	     region.extents.x2, region.extents.y2));
 
 	if (priv->gpu_damage) {
 		if (!priv->cpu_damage) {
 			if (sna_damage_contains_box__no_reduce(priv->gpu_damage,
-							       &extents)) {
+							       &region.extents)) {
 				DBG(("%s: region wholly contained within GPU damage\n",
 				     __FUNCTION__));
 				goto use_gpu_bo;
@@ -2307,7 +2307,7 @@ sna_drawable_use_bo(DrawablePtr drawable,
 			}
 		}
 
-		ret = sna_damage_contains_box(priv->gpu_damage, &extents);
+		ret = sna_damage_contains_box(priv->gpu_damage, &region.extents);
 		if (ret == PIXMAN_REGION_IN) {
 			DBG(("%s: region wholly contained within GPU damage\n",
 			     __FUNCTION__));
@@ -2322,7 +2322,7 @@ sna_drawable_use_bo(DrawablePtr drawable,
 	}
 
 	if (priv->cpu_damage) {
-		ret = sna_damage_contains_box(priv->cpu_damage, &extents);
+		ret = sna_damage_contains_box(priv->cpu_damage, &region.extents);
 		if (ret == PIXMAN_REGION_IN) {
 			DBG(("%s: region wholly contained within CPU damage\n",
 			     __FUNCTION__));
@@ -2342,7 +2342,7 @@ sna_drawable_use_bo(DrawablePtr drawable,
 	}
 
 move_to_gpu:
-	if (!sna_pixmap_move_area_to_gpu(pixmap, &extents,
+	if (!sna_pixmap_move_area_to_gpu(pixmap, &region.extents,
 					 MOVE_READ | MOVE_WRITE)) {
 		DBG(("%s: failed to move-to-gpu, fallback\n", __FUNCTION__));
 		assert(priv->gpu_bo == NULL);
@@ -2379,29 +2379,26 @@ use_cpu_bo:
 	if (priv->cpu_bo == NULL)
 		return NULL;
 
-	if (priv->cpu_bo->sync && !kgem_bo_is_busy(priv->cpu_bo))
+	if (prefer_gpu == 0 && !kgem_bo_is_busy(priv->cpu_bo))
 		return NULL;
 
-	/* Continue to use the shadow pixmap once mapped */
-	if (pixmap->devPrivate.ptr) {
-		/* But only if we do not need to sync the CPU bo */
-		if (prefer_gpu == 0 && !kgem_bo_is_busy(priv->cpu_bo))
-			return NULL;
+	get_drawable_deltas(drawable, pixmap, &dx, &dy);
 
-		/* Both CPU and GPU are busy, prefer to use the GPU */
-		if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo)) {
-			get_drawable_deltas(drawable, pixmap, &dx, &dy);
+	region.extents = *box;
+	region.extents.x1 += dx;
+	region.extents.x2 += dx;
+	region.extents.y1 += dy;
+	region.extents.y2 += dy;
+	region.data = NULL;
 
-			extents = *box;
-			extents.x1 += dx;
-			extents.x2 += dx;
-			extents.y1 += dy;
-			extents.y2 += dy;
-			goto move_to_gpu;
-		}
+	/* Both CPU and GPU are busy, prefer to use the GPU */
+	if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo))
+		goto move_to_gpu;
 
-		priv->mapped = false;
-		pixmap->devPrivate.ptr = NULL;
+	if (!sna_drawable_move_region_to_cpu(&pixmap->drawable, &region,
+					     MOVE_READ | MOVE_ASYNC_HINT)) {
+		DBG(("%s: failed to move-to-cpu, fallback\n", __FUNCTION__));
+		return NULL;
 	}
 
 	if (sna_damage_is_all(&priv->cpu_damage,
commit d46cc00b3cd903bfaf37ad7d4a60676c4b346983
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 5 02:19:26 2012 +0100

    sna/dri: Assert that our pixmaps sizes are invariant
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index a10efc7..36b40a7 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -223,11 +223,11 @@ sna_dri_create_buffer(DrawablePtr drawable,
 					dri_drawable_type, NULL, DixWriteAccess);
 		if (buffer) {
 			private = get_private(buffer);
-			if (private->pixmap == pixmap &&
-			    private->width  == pixmap->drawable.width &&
-			    private->height == pixmap->drawable.height)  {
+			if (private->pixmap == pixmap) {
 				DBG(("%s: reusing front buffer attachment\n",
 				     __FUNCTION__));
+				assert(private->width  == pixmap->drawable.width);
+				assert(private->height == pixmap->drawable.height);
 				private->refcnt++;
 				return buffer;
 			}


More information about the xorg-commit mailing list