xf86-video-intel: 3 commits - src/sna/kgem.c src/sna/sna_accel.c src/sna/sna_damage.c

Chris Wilson ickle at kemper.freedesktop.org
Wed Jan 4 11:42:19 PST 2012


 src/sna/kgem.c       |    1 
 src/sna/sna_accel.c  |    3 +
 src/sna/sna_damage.c |   88 +++++++++++++++++++++++++++++++--------------------
 3 files changed, 58 insertions(+), 34 deletions(-)

New commits:
commit 4119e68fb157fc612bce5e9c5669112ce35b4ca1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jan 4 19:35:30 2012 +0000

    sna/damage: Fix reduction to copy the boxes correctly
    
    We need to be carefully to copy the boxes in a strict lifo order so as
    to avoid overwritting the last boxes when reusing the array allocations.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index 4cccbdc..18ca10d 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -219,7 +219,8 @@ _sna_damage_create_elt(struct sna_damage *damage,
 {
 	int n;
 
-	DBG(("    %s: prev=(remain %d)\n", __FUNCTION__, damage->remain));
+	DBG(("    %s: prev=(remain %d), count=%d\n",
+	     __FUNCTION__, damage->remain, count));
 
 	damage->dirty = true;
 	n = count;
@@ -297,7 +298,8 @@ _sna_damage_create_elt_from_rectangles(struct sna_damage *damage,
 {
 	int i, n;
 
-	DBG(("    %s: prev=(remain %d)\n", __FUNCTION__, damage->remain));
+	DBG(("    %s: prev=(remain %d), count=%d\n",
+	     __FUNCTION__, damage->remain, count));
 
 	n = count;
 	if (n > damage->remain)
@@ -340,7 +342,8 @@ _sna_damage_create_elt_from_points(struct sna_damage *damage,
 {
 	int i, n;
 
-	DBG(("    %s: prev=(remain %d)\n", __FUNCTION__, damage->remain));
+	DBG(("    %s: prev=(remain %d), count=%d\n",
+	     __FUNCTION__, damage->remain, count));
 
 	n = count;
 	if (n > damage->remain)
@@ -412,6 +415,7 @@ static void __sna_damage_reduce(struct sna_damage *damage)
 			  list);
 	n = iter->size - damage->remain;
 	boxes = (BoxRec *)(iter+1);
+	DBG(("   last box count=%d/%d, need=%d\n", n, iter->size, nboxes));
 	if (nboxes > iter->size) {
 		boxes = malloc(sizeof(BoxRec)*nboxes);
 		if (boxes == NULL)
@@ -422,29 +426,36 @@ static void __sna_damage_reduce(struct sna_damage *damage)
 
 	if (boxes != damage->embedded_box.box) {
 		if (list_is_empty(&damage->embedded_box.list)) {
+			DBG(("   copying embedded boxes\n"));
 			memcpy(boxes,
 			       damage->embedded_box.box,
 			       n*sizeof(BoxRec));
 		} else {
-			if (damage->mode == DAMAGE_ADD)
-				nboxes -= REGION_NUM_RECTS(region);
+			if (boxes != (BoxPtr)(iter+1)) {
+				DBG(("   copying %d boxes from last\n", n));
+				memcpy(boxes, iter+1, n*sizeof(BoxRec));
+			}
 
-			memcpy(boxes,
-			       damage->embedded_box.box,
-			       sizeof(damage->embedded_box.box));
-			n = damage->embedded_box.size;
-
-			list_for_each_entry(iter, &damage->embedded_box.list, list) {
-				int len = iter->size;
-				if (n + len > nboxes)
-					len = nboxes - n;
-				DBG(("   copy %d/%d boxes from %d\n", len, iter->size, n));
-				memcpy(boxes + n, iter+1, len * sizeof(BoxRec));
-				n += len;
+			iter = list_entry(iter->list.prev,
+					  struct sna_damage_box,
+					  list);
+			while (&iter->list != &damage->embedded_box.list) {
+				DBG(("   copy %d boxes from %d\n",
+				     iter->size, n));
+				memcpy(boxes + n, iter+1,
+				       iter->size * sizeof(BoxRec));
+				n += iter->size;
+
+				iter = list_entry(iter->list.prev,
+						  struct sna_damage_box,
+						  list);
 			}
 
-			if (damage->mode == DAMAGE_ADD)
-				nboxes += REGION_NUM_RECTS(region);
+			DBG(("   copying embedded boxes to %d\n", n));
+			memcpy(boxes + n,
+			       damage->embedded_box.box,
+			       sizeof(damage->embedded_box.box));
+			n += damage->embedded_box.size;
 		}
 	}
 
@@ -455,19 +466,28 @@ static void __sna_damage_reduce(struct sna_damage *damage)
 		assert(n + REGION_NUM_RECTS(region) == nboxes);
 		pixman_region_fini(region);
 		pixman_region_init_rects(region, boxes, nboxes);
+
+		assert(damage->extents.x1 == region->extents.x1 &&
+		       damage->extents.y1 == region->extents.y1 &&
+		       damage->extents.x2 == region->extents.x2 &&
+		       damage->extents.y2 == region->extents.y2);
 	} else {
 		pixman_region16_t tmp;
 
 		pixman_region_init_rects(&tmp, boxes, nboxes);
 		pixman_region_subtract(region, region, &tmp);
 		pixman_region_fini(&tmp);
+
+		assert(damage->extents.x1 <= region->extents.x1 &&
+		       damage->extents.y1 <= region->extents.y1 &&
+		       damage->extents.x2 >= region->extents.x2 &&
+		       damage->extents.y2 >= region->extents.y2);
+		damage->extents = region->extents;
 	}
 
 	if (free_boxes)
 		free(boxes);
 
-	damage->extents = region->extents;
-
 done:
 	damage->mode = DAMAGE_ADD;
 	free_list(&damage->embedded_box.list);
@@ -595,7 +615,7 @@ __sna_damage_add_boxes(struct sna_damage *damage,
 	_sna_damage_create_elt_from_boxes(damage, box, n, dx, dy);
 
 	if (REGION_NUM_RECTS(&damage->region) == 0) {
-		damage->region.extents = box[0];
+		damage->region.extents = damage->embedded_box.box[0];
 		damage->region.data = NULL;
 		damage->extents = extents;
 	} else {
@@ -698,10 +718,7 @@ __sna_damage_add_rectangles(struct sna_damage *damage,
 	_sna_damage_create_elt_from_rectangles(damage, r, n, dx, dy);
 
 	if (REGION_NUM_RECTS(&damage->region) == 0) {
-		damage->region.extents.x1 = r[0].x + dx;
-		damage->region.extents.x2 = r[0].x + r[0].width + dx;
-		damage->region.extents.y1 = r[0].y + dy;
-		damage->region.extents.y2 = r[0].y + r[0].height + dy;
+		damage->region.extents = damage->embedded_box.box[0];
 		damage->region.data = NULL;
 		damage->extents = extents;
 	} else {
@@ -795,10 +812,7 @@ __sna_damage_add_points(struct sna_damage *damage,
 	_sna_damage_create_elt_from_points(damage, p, n, dx, dy);
 
 	if (REGION_NUM_RECTS(&damage->region) == 0) {
-		damage->region.extents.x1 = p[0].x + dx;
-		damage->region.extents.x2 = p[0].x + dx + 1;
-		damage->region.extents.y1 = p[0].y + dy;
-		damage->region.extents.y2 = p[0].y + dy + 1;
+		damage->region.extents = damage->embedded_box.box[0];
 		damage->region.data = NULL;
 		damage->extents = extents;
 	} else {
@@ -940,12 +954,18 @@ struct sna_damage *_sna_damage_all(struct sna_damage *damage,
 struct sna_damage *_sna_damage_is_all(struct sna_damage *damage,
 				      int width, int height)
 {
+	DBG(("%s(%d, %d)%s\n", __FUNCTION__, width, height,
+	     damage->dirty ? "*" : ""));
+	assert(damage->mode == DAMAGE_ADD);
+
+	assert(damage->extents.x1 == 0 &&
+	       damage->extents.y1 == 0 &&
+	       damage->extents.x2 == width &&
+	       damage->extents.y2 == height);
+
 	if (damage->dirty) {
 		__sna_damage_reduce(damage);
-		if (!RegionNotEmpty(&damage->region)) {
-			__sna_damage_destroy(damage);
-			return NULL;
-		}
+		assert(RegionNotEmpty(&damage->region));
 	}
 
 	if (damage->region.data)
commit 71b0924b586d9a60397e92e941e3d0cfa636ee61
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jan 4 19:34:41 2012 +0000

    sna: Mark tiny CPU pixmaps as completely dirty
    
    Avoid the overhead of tracking damage on small pixmaps when using CPU
    rasterisation; the extra cost of sending the whole pixmap compared to
    the damage is negligble should it ever be required on the GPU.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index da13efa..68c10a5 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -897,6 +897,9 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 		return true;
 	}
 
+	if (priv->stride == 0 && priv->gpu_bo == NULL && flags & MOVE_WRITE)
+		return _sna_pixmap_move_to_cpu(pixmap, flags);
+
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
 	DBG(("%s: delta=(%d, %d)\n", __FUNCTION__, dx, dy));
 	if (dx | dy)
commit fc14e63256a9b756c7c77ffe73f2f13784396c5a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jan 4 19:33:30 2012 +0000

    sna: Only request page size allocations for the replacement buffer
    
    A precondition on bo creation is that the size must be page aligned.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index d34fbd5..51b56eb 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3131,6 +3131,7 @@ kgem_replace_bo(struct kgem *kgem,
 	assert(src->tiling == I915_TILING_NONE);
 
 	size = height * pitch;
+	size = ALIGN(size, PAGE_SIZE);
 
 	dst = search_linear_cache(kgem, size, 0);
 	if (dst == NULL)


More information about the xorg-commit mailing list