xf86-video-intel: 2 commits - src/intel_list.h src/sna/gen4_render.c src/sna/kgem.c src/sna/kgem.h src/sna/sna_render.c

Chris Wilson ickle at kemper.freedesktop.org
Wed Mar 7 10:57:25 PST 2012


 src/intel_list.h      |    5 ++-
 src/sna/gen4_render.c |    5 ++-
 src/sna/kgem.c        |   70 +++++++++++++++++++++++++++++++++++++++++++-------
 src/sna/kgem.h        |    1 
 src/sna/sna_render.c  |   21 ++++++++++-----
 5 files changed, 83 insertions(+), 19 deletions(-)

New commits:
commit 72a7538d4e7bcf0bd7455d9e67d8751e17739e6c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 7 15:45:21 2012 +0000

    sna: Convolution filter fixes
    
    A couple of typos made the convolution filter explode rather than
    convolve.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 404e12b..c3a82a3 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -681,7 +681,10 @@ static uint32_t gen4_check_filter(PicturePtr picture)
 	case PictFilterBilinear:
 		return TRUE;
 	default:
-		DBG(("%s: unknown filter: %d\n", __FUNCTION__, picture->filter));
+		DBG(("%s: unknown filter: %s [%d]\n",
+		     __FUNCTION__,
+		     PictureGetFilterName(picture->filter),
+		     picture->filter));
 		return FALSE;
 	}
 }
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index a345962..74c04af 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -1234,15 +1234,18 @@ sna_render_picture_convolve(struct sna *sna,
 	PixmapPtr pixmap;
 	PicturePtr tmp;
 	pixman_fixed_t *params = picture->filter_params;
-	int x_off = (params[0] - pixman_fixed_1) >> 1;
-	int y_off = (params[1] - pixman_fixed_1) >> 1;
+	int x_off = -pixman_fixed_to_int((params[0] - pixman_fixed_1) >> 1);
+	int y_off = -pixman_fixed_to_int((params[1] - pixman_fixed_1) >> 1);
 	int cw = pixman_fixed_to_int(params[0]);
 	int ch = pixman_fixed_to_int(params[1]);
 	int i, j, error, depth;
+	struct kgem_bo *bo;
 
 	/* Lame multi-pass accumulation implementation of a general convolution
 	 * that works everywhere.
 	 */
+	DBG(("%s: origin=(%d,%d) kernel=%dx%d, size=%dx%d\n",
+	     __FUNCTION__, x_off, y_off, cw, ch, w, h));
 
 	assert(picture->pDrawable);
 	assert(picture->filter == PictFilterConvolution);
@@ -1267,8 +1270,10 @@ sna_render_picture_convolve(struct sna *sna,
 	if (tmp == NULL)
 		return 0;
 
-	if (!sna->render.fill_one(sna, pixmap, sna_pixmap_get_bo(pixmap), 0,
-				  0, 0, w, h, GXclear)) {
+	ValidatePicture(tmp);
+
+	bo = sna_pixmap_get_bo(pixmap);
+	if (!sna->render.clear(sna, pixmap, bo)) {
 		FreePicture(tmp, 0);
 		return 0;
 	}
@@ -1282,6 +1287,8 @@ sna_render_picture_convolve(struct sna *sna,
 
 			color.alpha = *params++;
 			color.red = color.green = color.blue = 0;
+			DBG(("%s: (%d, %d), alpha=%x\n",
+			     __FUNCTION__, i,j, color.alpha));
 
 			if (color.alpha <= 0x00ff)
 				continue;
@@ -1291,7 +1298,7 @@ sna_render_picture_convolve(struct sna *sna,
 				sna_composite(PictOpAdd, picture, alpha, tmp,
 					      x, y,
 					      0, 0,
-					      x_off-i, y_off-j,
+					      x_off+i, y_off+j,
 					      w, h);
 				FreePicture(alpha, 0);
 			}
@@ -1309,7 +1316,7 @@ sna_render_picture_convolve(struct sna *sna,
 	channel->scale[1] = 1.f / h;
 	channel->offset[0] = -dst_x;
 	channel->offset[1] = -dst_y;
-	channel->bo = kgem_bo_reference(sna_pixmap_get_bo(pixmap));
+	channel->bo = kgem_bo_reference(bo); /* transfer ownership */
 	FreePicture(tmp, 0);
 
 	return 1;
@@ -1423,7 +1430,7 @@ sna_render_picture_fixup(struct sna *sna,
 		DBG(("%s: convolution\n", __FUNCTION__));
 		if (picture->pDrawable && is_gpu(picture->pDrawable)) {
 			return sna_render_picture_convolve(sna, picture, channel,
-							   x, y, w, y, dst_x, dst_y);
+							   x, y, w, h, dst_x, dst_y);
 		}
 
 		goto do_fixup;
commit 34fe3cbb316c36c7022735cf9b03d8b655e04434
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 7 17:49:01 2012 +0000

    sna: Avoid recursive calls to kgem_retire_partials()
    
    Whilst iterating the partial list and uploading the buffers, we need to
    avoid trigger a recursive call into retire should we attempt to shrink a
    buffer. Such a recursive call will modify the list beneath us so that we
    chase a stale pointer and wreak havoc with memory corruption.
    
    Reported-by: Clemens Eisserer <linuxhippy at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=47061
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_list.h b/src/intel_list.h
index 366b9e8..cbadebf 100644
--- a/src/intel_list.h
+++ b/src/intel_list.h
@@ -207,8 +207,9 @@ list_append(struct list *entry, struct list *head)
 static inline void
 __list_del(struct list *prev, struct list *next)
 {
-    next->prev = prev;
-    prev->next = next;
+	asert(next->prev == prev->next);
+	next->prev = prev;
+	prev->next = next;
 }
 
 static inline void
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 279face..e4dcbb2 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -121,22 +121,47 @@ static bool validate_partials(struct kgem *kgem)
 
 	list_for_each_entry_safe(bo, next, &kgem->active_partials, base.list) {
 		assert(next->base.list.prev == &bo->base.list);
+		assert(bo->base.refcnt >= 1);
 		assert(bo->base.io);
-		if (bo->base.list.next == &kgem->active_partials)
-			return true;
+
+		if (&next->base.list == &kgem->active_partials)
+			break;
+
 		if (bytes(&bo->base) - bo->used < bytes(&next->base) - next->used) {
-			ErrorF("this rem: %d, next rem: %d\n",
+			ErrorF("active error: this rem: %d, next rem: %d\n",
 			       bytes(&bo->base) - bo->used,
 			       bytes(&next->base) - next->used);
 			goto err;
 		}
 	}
+
+	list_for_each_entry_safe(bo, next, &kgem->inactive_partials, base.list) {
+		assert(next->base.list.prev == &bo->base.list);
+		assert(bo->base.io);
+		assert(bo->base.refcnt == 1);
+
+		if (&next->base.list == &kgem->inactive_partials)
+			break;
+
+		if (bytes(&bo->base) - bo->used < bytes(&next->base) - next->used) {
+			ErrorF("inactive error: this rem: %d, next rem: %d\n",
+			       bytes(&bo->base) - bo->used,
+			       bytes(&next->base) - next->used);
+			goto err;
+		}
+	}
+
 	return true;
 
 err:
+	ErrorF("active partials:\n");
 	list_for_each_entry(bo, &kgem->active_partials, base.list)
-		ErrorF("bo: used=%d / %d, rem=%d\n",
-		       bo->used, bytes(&bo->base), bytes(&bo->base) - bo->used);
+		ErrorF("bo handle=%d: used=%d / %d, rem=%d\n",
+		       bo->base.handle, bo->used, bytes(&bo->base), bytes(&bo->base) - bo->used);
+	ErrorF("inactive partials:\n");
+	list_for_each_entry(bo, &kgem->inactive_partials, base.list)
+		ErrorF("bo handle=%d: used=%d / %d, rem=%d\n",
+		       bo->base.handle, bo->used, bytes(&bo->base), bytes(&bo->base) - bo->used);
 	return false;
 }
 #else
@@ -1220,7 +1245,13 @@ static void kgem_retire_partials(struct kgem *kgem)
 		     bo->base.handle, bo->used, bytes(&bo->base)));
 
 		assert(bo->base.refcnt == 1);
-		assert(bo->mmapped);
+		assert(bo->base.exec == NULL);
+		if (!bo->mmapped) {
+			list_del(&bo->base.list);
+			kgem_bo_unref(kgem, &bo->base);
+			continue;
+		}
+
 		assert(kgem->has_llc || !IS_CPU_MAP(bo->base.map));
 		bo->base.dirty = false;
 		bo->base.needs_flush = false;
@@ -1229,6 +1260,7 @@ static void kgem_retire_partials(struct kgem *kgem)
 		list_move_tail(&bo->base.list, &kgem->inactive_partials);
 		bubble_sort_partial(&kgem->inactive_partials, bo);
 	}
+	assert(validate_partials(kgem));
 }
 
 bool kgem_retire(struct kgem *kgem)
@@ -1423,12 +1455,23 @@ static void kgem_finish_partials(struct kgem *kgem)
 	struct kgem_partial_bo *bo, *next;
 
 	list_for_each_entry_safe(bo, next, &kgem->active_partials, base.list) {
+		DBG(("%s: partial handle=%d, used=%d, exec?=%d, write=%d, mmapped=%d\n",
+		     __FUNCTION__, bo->base.handle, bo->used, bo->base.exec!=NULL,
+		     bo->write, bo->mmapped));
+
 		assert(next->base.list.prev == &bo->base.list);
 		assert(bo->base.io);
 		assert(bo->base.refcnt >= 1);
 
-		if (!bo->base.exec)
+		if (!bo->base.exec) {
+			if (bo->base.refcnt == 1 && bo->used) {
+				bo->used = 0;
+				bubble_sort_partial(&kgem->active_partials, bo);
+			}
+			DBG(("%s: skipping unattached handle=%d, used=%d\n",
+			     __FUNCTION__, bo->base.handle, bo->used));
 			continue;
+		}
 
 		if (!bo->write) {
 			assert(bo->base.exec || bo->base.refcnt > 1);
@@ -1458,12 +1501,14 @@ static void kgem_finish_partials(struct kgem *kgem)
 		assert(bo->base.rq == kgem->next_request);
 		assert(bo->base.domain != DOMAIN_GPU);
 
-		if (bo->base.refcnt == 1 && bo->used < bytes(&bo->base) / 2) {
+		if (bo->base.refcnt == 1 &&
+		    bo->base.size.pages.count > 1 &&
+		    bo->used < bytes(&bo->base) / 2) {
 			struct kgem_bo *shrink;
 
 			shrink = search_linear_cache(kgem,
 						     PAGE_ALIGN(bo->used),
-						     CREATE_INACTIVE);
+						     CREATE_INACTIVE | CREATE_NO_RETIRE);
 			if (shrink) {
 				int n;
 
@@ -1513,6 +1558,8 @@ static void kgem_finish_partials(struct kgem *kgem)
 		bo->need_io = 0;
 
 decouple:
+		DBG(("%s: releasing handle=%d\n",
+		     __FUNCTION__, bo->base.handle));
 		list_del(&bo->base.list);
 		kgem_bo_unref(kgem, &bo->base);
 	}
@@ -2018,6 +2065,11 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 		DBG(("%s: inactive and cache bucket empty\n",
 		     __FUNCTION__));
 
+		if ((flags & CREATE_NO_RETIRE) == 0) {
+			DBG(("%s: can not retire\n"));
+			return NULL;
+		}
+
 		if (!kgem->need_retire || !kgem_retire(kgem)) {
 			DBG(("%s: nothing retired\n", __FUNCTION__));
 			return NULL;
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 9abb72a..96d945e 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -222,6 +222,7 @@ enum {
 	CREATE_GTT_MAP = 0x8,
 	CREATE_SCANOUT = 0x10,
 	CREATE_TEMPORARY = 0x20,
+	CREATE_NO_RETIRE = 0x40,
 };
 struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 			       int width,


More information about the xorg-commit mailing list