xf86-video-intel: 2 commits - src/sna/gen2_render.c src/sna/gen5_render.c src/sna/gen6_render.c src/sna/gen7_render.c src/sna/kgem.c src/sna/kgem.h src/sna/sna_accel.c src/sna/sna_render.c

Chris Wilson ickle at kemper.freedesktop.org
Mon Dec 17 15:29:06 PST 2012


 src/sna/gen2_render.c |    2 -
 src/sna/gen5_render.c |    2 -
 src/sna/gen6_render.c |   13 ++++-------
 src/sna/gen7_render.c |    7 +-----
 src/sna/kgem.c        |   58 ++++++++++++++++++++++++++++++--------------------
 src/sna/kgem.h        |    2 -
 src/sna/sna_accel.c   |    4 +--
 src/sna/sna_render.c  |    3 --
 8 files changed, 48 insertions(+), 43 deletions(-)

New commits:
commit f522fbe7c98ffad86126c3666b2d9f7e616480b8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Dec 17 23:04:25 2012 +0000

    sna: Refine check for an unset context switch
    
    So it appears that we end up performing a context switch on an empty
    batch, but already has a mode. This is caught later, too late, by
    assertions. However, we can change the guards slightly to prevent those
    assertions without altering the code too greatly. And I can then think
    how to detect where we are setting a mode on the batch but doing no
    work - which is likely masking a bigger bug.
    
    Reported-by: Jiri Slaby <jirislaby at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=47597
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 8f92338..8e26ecd 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -3214,7 +3214,7 @@ gen2_render_context_switch(struct kgem *kgem,
 {
 	struct sna *sna = container_of(kgem, struct sna, kgem);
 
-	if (!kgem->mode)
+	if (!kgem->nbatch)
 		return;
 
 	/* Reload BLT registers following a lost context */
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 9b38878..7f0e8fb 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -3571,7 +3571,7 @@ static void
 gen5_render_context_switch(struct kgem *kgem,
 			   int new_mode)
 {
-	if (!kgem->mode)
+	if (!kgem->nbatch)
 		return;
 
 	/* WaNonPipelinedStateCommandFlush
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index bb3bbba..7f17c71 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2405,7 +2405,7 @@ static bool prefer_blt_ring(struct sna *sna)
 	return sna->kgem.ring != KGEM_RENDER;
 }
 
-static bool can_switch_to_blt(struct sna *sna)
+inline static bool can_switch_to_blt(struct sna *sna)
 {
 	if (sna->kgem.ring != KGEM_RENDER)
 		return true;
@@ -4191,13 +4191,10 @@ static void
 gen6_render_context_switch(struct kgem *kgem,
 			   int new_mode)
 {
-	if (!new_mode)
-		return;
-
-	 DBG(("%s: from %d to %d\n", __FUNCTION__, kgem->mode, new_mode));
-
-	if (kgem->mode)
-		kgem_submit(kgem);
+	if (kgem->nbatch) {
+		DBG(("%s: from %d to %d\n", __FUNCTION__, kgem->mode, new_mode));
+		_kgem_submit(kgem);
+	}
 
 	kgem->ring = new_mode;
 }
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 06bef75..f74f56e 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -4252,13 +4252,10 @@ static void
 gen7_render_context_switch(struct kgem *kgem,
 			   int new_mode)
 {
-	if (!new_mode)
-		return;
-
-	if (kgem->mode) {
+	if (kgem->nbatch) {
 		DBG(("%s: switch rings %d -> %d\n",
 		     __FUNCTION__, kgem->mode, new_mode));
-		kgem_submit(kgem);
+		_kgem_submit(kgem);
 	}
 
 	kgem->ring = new_mode;
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index c23b9e3..c5199c2 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -354,7 +354,7 @@ static inline void kgem_set_mode(struct kgem *kgem,
 #endif
 
 	if (kgem->nexec && bo->exec == NULL && kgem_ring_is_idle(kgem, kgem->ring))
-		kgem_submit(kgem);
+		_kgem_submit(kgem);
 
 	if (kgem->mode == mode)
 		return;
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 994f0a6..e92c453 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -246,7 +246,7 @@ static void
 no_render_context_switch(struct kgem *kgem,
 			 int new_mode)
 {
-	if (!kgem->mode)
+	if (!kgem->nbatch)
 		return;
 
 	if (kgem_ring_is_idle(kgem, kgem->ring)) {
@@ -254,7 +254,6 @@ no_render_context_switch(struct kgem *kgem,
 		_kgem_submit(kgem);
 	}
 
-	(void)kgem;
 	(void)new_mode;
 }
 
commit 6c50cf4809816dbbd93d54f589a79b0dab996180
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Dec 17 22:27:14 2012 +0000

    sna: Untangle the confusion of caching large LLC bo
    
    We only use a single cache for very large buffers, so we need to be
    careful that we set the tiling on them. More so, we need to take extra
    care when allocating large CPU bo from that cache to be sure that they
    are untiled and the flags are true.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 67694ff..2933ec4 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -194,13 +194,13 @@ static void kgem_sna_flush(struct kgem *kgem)
 		sna_render_flush_solid(sna);
 }
 
-static int gem_set_tiling(int fd, uint32_t handle, int tiling, int stride)
+static bool gem_set_tiling(int fd, uint32_t handle, int tiling, int stride)
 {
 	struct drm_i915_gem_set_tiling set_tiling;
 	int ret;
 
 	if (DBG_NO_TILING)
-		return I915_TILING_NONE;
+		return false;
 
 	VG_CLEAR(set_tiling);
 	do {
@@ -210,7 +210,7 @@ static int gem_set_tiling(int fd, uint32_t handle, int tiling, int stride)
 
 		ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
 	} while (ret == -1 && (errno == EINTR || errno == EAGAIN));
-	return set_tiling.tiling_mode;
+	return ret == 0;
 }
 
 static bool gem_set_cacheing(int fd, uint32_t handle, int cacheing)
@@ -2913,8 +2913,8 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 			}
 
 			if (I915_TILING_NONE != bo->tiling &&
-			    gem_set_tiling(kgem->fd, bo->handle,
-					   I915_TILING_NONE, 0) != I915_TILING_NONE)
+			    !gem_set_tiling(kgem->fd, bo->handle,
+					    I915_TILING_NONE, 0))
 				continue;
 
 			kgem_bo_remove_from_inactive(kgem, bo);
@@ -2961,11 +2961,12 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 			if (first)
 				continue;
 
-			if (gem_set_tiling(kgem->fd, bo->handle,
-					   I915_TILING_NONE, 0) != I915_TILING_NONE)
+			if (!gem_set_tiling(kgem->fd, bo->handle,
+					    I915_TILING_NONE, 0))
 				continue;
 
 			bo->tiling = I915_TILING_NONE;
+			bo->pitch = 0;
 		}
 
 		if (bo->map) {
@@ -3380,6 +3381,7 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 			assert(!bo->purged);
 			assert(bo->refcnt == 0);
 			assert(bo->reusable);
+			assert(bo->flush == true);
 
 			if (kgem->gen < 040) {
 				if (bo->pitch < pitch) {
@@ -3396,11 +3398,12 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 					continue;
 
 				if (bo->pitch != pitch || bo->tiling != tiling) {
-					if (gem_set_tiling(kgem->fd, bo->handle,
-							   tiling, pitch) != tiling)
+					if (!gem_set_tiling(kgem->fd, bo->handle,
+							    tiling, pitch))
 						continue;
 
 					bo->pitch = pitch;
+					bo->tiling = tiling;
 				}
 			}
 
@@ -3425,10 +3428,12 @@ large_inactive:
 
 			if (bo->tiling != tiling ||
 			    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
-				if (tiling != gem_set_tiling(kgem->fd,
-							     bo->handle,
-							     tiling, pitch))
+				if (!gem_set_tiling(kgem->fd, bo->handle,
+						    tiling, pitch))
 					continue;
+
+				bo->tiling = tiling;
+				bo->pitch = pitch;
 			}
 
 			if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
@@ -3467,6 +3472,7 @@ large_inactive:
 				assert(IS_CPU_MAP(bo->map) == for_cpu);
 				assert(bo->rq == NULL);
 				assert(list_is_empty(&bo->request));
+				assert(bo->flush == false);
 
 				if (size > num_pages(bo)) {
 					DBG(("inactive too small: %d < %d\n",
@@ -3522,6 +3528,7 @@ search_again:
 			assert(bucket(bo) == bucket);
 			assert(bo->reusable);
 			assert(bo->tiling == tiling);
+			assert(bo->flush == false);
 
 			if (kgem->gen < 040) {
 				if (bo->pitch < pitch) {
@@ -3538,9 +3545,10 @@ search_again:
 					continue;
 
 				if (bo->pitch != pitch) {
-					gem_set_tiling(kgem->fd,
-						       bo->handle,
-						       tiling, pitch);
+					if (!gem_set_tiling(kgem->fd,
+							    bo->handle,
+							    tiling, pitch))
+						continue;
 
 					bo->pitch = pitch;
 				}
@@ -3563,6 +3571,7 @@ search_again:
 			assert(bo->refcnt == 0);
 			assert(bo->reusable);
 			assert(bo->tiling == tiling);
+			assert(bo->flush == false);
 
 			if (num_pages(bo) < size)
 				continue;
@@ -3591,13 +3600,14 @@ search_again:
 					assert(!bo->purged);
 					assert(bo->refcnt == 0);
 					assert(bo->reusable);
+					assert(bo->flush == false);
 
 					if (num_pages(bo) < size)
 						continue;
 
-					if (tiling != gem_set_tiling(kgem->fd,
-								     bo->handle,
-								     tiling, pitch))
+					if (!gem_set_tiling(kgem->fd,
+							    bo->handle,
+							    tiling, pitch))
 						continue;
 
 					kgem_bo_remove_from_active(kgem, bo);
@@ -3631,6 +3641,7 @@ search_again:
 				assert(!bo->purged);
 				assert(bo->refcnt == 0);
 				assert(bo->reusable);
+				assert(bo->flush == false);
 
 				if (bo->tiling) {
 					if (bo->pitch < pitch) {
@@ -3670,6 +3681,7 @@ search_inactive:
 	list_for_each_entry(bo, cache, list) {
 		assert(bucket(bo) == bucket);
 		assert(bo->reusable);
+		assert(bo->flush == false);
 
 		if (size > num_pages(bo)) {
 			DBG(("inactive too small: %d < %d\n",
@@ -3679,9 +3691,8 @@ search_inactive:
 
 		if (bo->tiling != tiling ||
 		    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
-			if (tiling != gem_set_tiling(kgem->fd,
-						     bo->handle,
-						     tiling, pitch))
+			if (!gem_set_tiling(kgem->fd, bo->handle,
+					    tiling, pitch))
 				continue;
 
 			if (bo->map)
@@ -3741,8 +3752,9 @@ create:
 	bo->domain = DOMAIN_CPU;
 	bo->unique_id = kgem_get_unique_id(kgem);
 	bo->pitch = pitch;
-	if (tiling != I915_TILING_NONE)
-		bo->tiling = gem_set_tiling(kgem->fd, handle, tiling, pitch);
+	if (tiling != I915_TILING_NONE &&
+	    gem_set_tiling(kgem->fd, handle, tiling, pitch))
+		bo->tiling = tiling;
 	if (bucket >= NUM_CACHE_BUCKETS) {
 		DBG(("%s: marking large bo for automatic flushing\n",
 		     __FUNCTION__));
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 2c6347d..b749e29 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -475,8 +475,8 @@ static void sna_pixmap_free_cpu(struct sna *sna, struct sna_pixmap *priv)
 		sna->debug_memory.cpu_bo_allocs--;
 		sna->debug_memory.cpu_bo_bytes -= kgem_bo_size(priv->cpu_bo);
 #endif
-		if (priv->cpu_bo->flush) {
-			assert(priv->cpu_bo->reusable == false);
+		if (!priv->cpu_bo->reusable) {
+			assert(priv->cpu_bo->flush == true);
 			kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
 			sna_accel_watch_flush(sna, -1);
 		}


More information about the xorg-commit mailing list