xf86-video-intel: 3 commits - src/sna/gen6_render.c src/sna/gen7_render.c src/sna/kgem.c src/sna/kgem.h

Chris Wilson ickle at kemper.freedesktop.org
Fri Dec 7 09:28:11 PST 2012


 src/sna/gen6_render.c |    3 
 src/sna/gen7_render.c |    3 
 src/sna/kgem.c        |  218 +++++++++++++++++++++++++++++---------------------
 src/sna/kgem.h        |   12 ++
 4 files changed, 142 insertions(+), 94 deletions(-)

New commits:
commit 52405b2aed492dc7f76fbf082122842f621e7c06
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Dec 7 17:24:42 2012 +0000

    sna: Only inspect the target ring for busyness
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 6e2242b..50df557 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2411,7 +2411,8 @@ static bool can_switch_to_blt(struct sna *sna)
 	if (!sna->kgem.has_semaphores)
 		return false;
 
-	return sna->kgem.mode == KGEM_NONE || kgem_is_idle(&sna->kgem);
+	return (sna->kgem.mode == KGEM_NONE ||
+		kgem_ring_is_idle(&sna->kgem, KGEM_BLT));
 }
 
 static inline bool untiled_tlb_miss(struct kgem_bo *bo)
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 575d67a..eb34ff2 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2514,7 +2514,8 @@ inline static bool can_switch_to_blt(struct sna *sna)
 	if (!sna->kgem.has_semaphores)
 		return false;
 
-	return sna->kgem.mode == KGEM_NONE || kgem_is_idle(&sna->kgem);
+	return (sna->kgem.mode == KGEM_NONE ||
+		kgem_ring_is_idle(&sna->kgem, KGEM_BLT));
 }
 
 static inline bool untiled_tlb_miss(struct kgem_bo *bo)
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 200c755..c497ce7 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1736,116 +1736,126 @@ static bool kgem_retire__flushing(struct kgem *kgem)
 	return retired;
 }
 
-static bool kgem_retire__requests(struct kgem *kgem)
+static bool kgem_retire__requests_ring(struct kgem *kgem, int ring)
 {
-	struct kgem_bo *bo;
 	bool retired = false;
-	int n;
 
-	for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) {
-		while (!list_is_empty(&kgem->requests[n])) {
-			struct kgem_request *rq;
+	while (!list_is_empty(&kgem->requests[ring])) {
+		struct kgem_request *rq;
 
-			rq = list_first_entry(&kgem->requests[n],
-					      struct kgem_request,
-					      list);
-			if (kgem_busy(kgem, rq->bo->handle))
-				break;
+		rq = list_first_entry(&kgem->requests[ring],
+				      struct kgem_request,
+				      list);
+		if (kgem_busy(kgem, rq->bo->handle))
+			break;
 
-			DBG(("%s: request %d complete\n",
-			     __FUNCTION__, rq->bo->handle));
+		DBG(("%s: request %d complete\n",
+		     __FUNCTION__, rq->bo->handle));
 
-			while (!list_is_empty(&rq->buffers)) {
-				bo = list_first_entry(&rq->buffers,
-						      struct kgem_bo,
-						      request);
+		while (!list_is_empty(&rq->buffers)) {
+			struct kgem_bo *bo;
 
-				assert(bo->rq == rq);
-				assert(bo->exec == NULL);
-				assert(bo->domain == DOMAIN_GPU);
+			bo = list_first_entry(&rq->buffers,
+					      struct kgem_bo,
+					      request);
 
-				list_del(&bo->request);
+			assert(bo->rq == rq);
+			assert(bo->exec == NULL);
+			assert(bo->domain == DOMAIN_GPU);
+
+			list_del(&bo->request);
 
-				if (bo->needs_flush)
-					bo->needs_flush = kgem_busy(kgem, bo->handle);
+			if (bo->needs_flush)
+				bo->needs_flush = kgem_busy(kgem, bo->handle);
+			if (bo->needs_flush) {
+				DBG(("%s: moving %d to flushing\n",
+				     __FUNCTION__, bo->handle));
+				list_add(&bo->request, &kgem->flushing);
+				bo->rq = &_kgem_static_request;
+			} else {
+				bo->domain = DOMAIN_NONE;
+				bo->rq = NULL;
+			}
+
+			if (bo->refcnt)
+				continue;
+
+			if (bo->snoop) {
 				if (bo->needs_flush) {
-					DBG(("%s: moving %d to flushing\n",
-					     __FUNCTION__, bo->handle));
 					list_add(&bo->request, &kgem->flushing);
 					bo->rq = &_kgem_static_request;
 				} else {
-					bo->domain = DOMAIN_NONE;
-					bo->rq = NULL;
+					kgem_bo_move_to_snoop(kgem, bo);
 				}
+				continue;
+			}
 
-				if (bo->refcnt)
-					continue;
-
-				if (bo->snoop) {
-					if (bo->needs_flush) {
-						list_add(&bo->request, &kgem->flushing);
-						bo->rq = &_kgem_static_request;
-					} else {
-						kgem_bo_move_to_snoop(kgem, bo);
-					}
-					continue;
-				}
+			if (!bo->reusable) {
+				DBG(("%s: closing %d\n",
+				     __FUNCTION__, bo->handle));
+				kgem_bo_free(kgem, bo);
+				continue;
+			}
 
-				if (!bo->reusable) {
+			if (!bo->needs_flush) {
+				if (kgem_bo_set_purgeable(kgem, bo)) {
+					kgem_bo_move_to_inactive(kgem, bo);
+					retired = true;
+				} else {
 					DBG(("%s: closing %d\n",
 					     __FUNCTION__, bo->handle));
 					kgem_bo_free(kgem, bo);
-					continue;
-				}
-
-				if (!bo->needs_flush) {
-					if (kgem_bo_set_purgeable(kgem, bo)) {
-						kgem_bo_move_to_inactive(kgem, bo);
-						retired = true;
-					} else {
-						DBG(("%s: closing %d\n",
-						     __FUNCTION__, bo->handle));
-						kgem_bo_free(kgem, bo);
-					}
 				}
 			}
+		}
 
-			assert(rq->bo->rq == NULL);
-			assert(list_is_empty(&rq->bo->request));
+		assert(rq->bo->rq == NULL);
+		assert(list_is_empty(&rq->bo->request));
 
-			if (--rq->bo->refcnt == 0) {
-				if (kgem_bo_set_purgeable(kgem, rq->bo)) {
-					kgem_bo_move_to_inactive(kgem, rq->bo);
-					retired = true;
-				} else {
-					DBG(("%s: closing %d\n",
-					     __FUNCTION__, rq->bo->handle));
-					kgem_bo_free(kgem, rq->bo);
-				}
+		if (--rq->bo->refcnt == 0) {
+			if (kgem_bo_set_purgeable(kgem, rq->bo)) {
+				kgem_bo_move_to_inactive(kgem, rq->bo);
+				retired = true;
+			} else {
+				DBG(("%s: closing %d\n",
+				     __FUNCTION__, rq->bo->handle));
+				kgem_bo_free(kgem, rq->bo);
 			}
-
-			__kgem_request_free(rq);
-			kgem->num_requests--;
 		}
 
+		__kgem_request_free(rq);
+		kgem->num_requests--;
+	}
+
 #if HAS_DEBUG_FULL
-		{
-			int count = 0;
+	{
+		struct kgem_bo *bo;
+		int count = 0;
 
-			list_for_each_entry(bo, &kgem->requests[n], request)
-				count++;
+		list_for_each_entry(bo, &kgem->requests[ring], request)
+			count++;
 
-			bo = NULL;
-			if (!list_is_empty(&kgem->requests[n]))
-				bo = list_first_entry(&kgem->requests[n],
-						      struct kgem_request,
-						      list)->bo;
+		bo = NULL;
+		if (!list_is_empty(&kgem->requests[ring]))
+			bo = list_first_entry(&kgem->requests[ring],
+					      struct kgem_request,
+					      list)->bo;
 
-			ErrorF("%s: ring=%d, %d outstanding requests, oldest=%d\n",
-			       __FUNCTION__, n, count, bo ? bo->handle : 0);
-		}
-#endif
+		ErrorF("%s: ring=%d, %d outstanding requests, oldest=%d\n",
+		       __FUNCTION__, ring, count, bo ? bo->handle : 0);
 	}
+#endif
+
+	return retired;
+}
+
+static bool kgem_retire__requests(struct kgem *kgem)
+{
+	bool retired = false;
+	int n;
+
+	for (n = 0; n < ARRAY_SIZE(kgem->requests); n++)
+		retired |= kgem_retire__requests_ring(kgem, n);
 
 #if HAS_DEBUG_FULL
 	{
@@ -1912,6 +1922,29 @@ bool __kgem_is_idle(struct kgem *kgem)
 	return true;
 }
 
+bool __kgem_ring_is_idle(struct kgem *kgem, int ring)
+{
+	struct kgem_request *rq;
+
+	assert(kgem->num_requests);
+	assert(!list_is_empty(&kgem->requests[ring]));
+
+	rq = list_last_entry(&kgem->requests[ring],
+			     struct kgem_request, list);
+	if (kgem_busy(kgem, rq->bo->handle)) {
+		DBG(("%s: last requests handle=%d still busy\n",
+		     __FUNCTION__, rq->bo->handle));
+		return false;
+	}
+
+	DBG(("%s: ring=%d idle (handle=%d)\n",
+	     __FUNCTION__, ring, rq->bo->handle));
+
+	kgem_retire__requests_ring(kgem, ring);
+	assert(list_is_empty(&kgem->requests[ring]));
+	return true;
+}
+
 static void kgem_commit(struct kgem *kgem)
 {
 	struct kgem_request *rq = kgem->next_request;
@@ -3740,7 +3773,8 @@ bool kgem_check_bo(struct kgem *kgem, ...)
 	if (kgem_flush(kgem))
 		return false;
 
-	if (kgem->aperture > kgem->aperture_low && kgem_is_idle(kgem)) {
+	if (kgem->aperture > kgem->aperture_low &&
+	    kgem_ring_is_idle(kgem, kgem->ring)) {
 		DBG(("%s: current aperture usage (%d) is greater than low water mark (%d)\n",
 		     __FUNCTION__, kgem->aperture, kgem->aperture_low));
 		return false;
@@ -3789,7 +3823,8 @@ bool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo)
 	if (kgem->nexec >= KGEM_EXEC_SIZE(kgem) - 1)
 		return false;
 
-	if (kgem->aperture > kgem->aperture_low && kgem_is_idle(kgem))
+	if (kgem->aperture > kgem->aperture_low &&
+	    kgem_ring_is_idle(kgem, kgem->ring))
 		return false;
 
 	if (kgem->aperture + num_pages(bo) > kgem->aperture_high)
@@ -3860,7 +3895,8 @@ bool kgem_check_many_bo_fenced(struct kgem *kgem, ...)
 		if (kgem_flush(kgem))
 			return false;
 
-		if (kgem->aperture > kgem->aperture_low && kgem_is_idle(kgem))
+		if (kgem->aperture > kgem->aperture_low &&
+		    kgem_ring_is_idle(kgem, kgem->ring))
 			return false;
 
 		if (num_pages + kgem->aperture > kgem->aperture_high)
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 8a3a4fa..ccefc48 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -274,6 +274,7 @@ int kgem_bo_get_swizzling(struct kgem *kgem, struct kgem_bo *bo);
 
 void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo);
 bool kgem_retire(struct kgem *kgem);
+
 bool __kgem_is_idle(struct kgem *kgem);
 static inline bool kgem_is_idle(struct kgem *kgem)
 {
@@ -285,6 +286,15 @@ static inline bool kgem_is_idle(struct kgem *kgem)
 	return __kgem_is_idle(kgem);
 }
 
+bool __kgem_ring_is_idle(struct kgem *kgem, int ring);
+static inline bool kgem_ring_is_idle(struct kgem *kgem, int ring)
+{
+	if (list_is_empty(&kgem->requests[ring]))
+		return true;
+
+	return __kgem_ring_is_idle(kgem, ring);
+}
+
 void _kgem_submit(struct kgem *kgem);
 static inline void kgem_submit(struct kgem *kgem)
 {
@@ -294,7 +304,7 @@ static inline void kgem_submit(struct kgem *kgem)
 
 static inline bool kgem_flush(struct kgem *kgem)
 {
-	return kgem->flush && list_is_empty(&kgem->requests[kgem->ring]);
+	return kgem->flush && kgem_ring_is_idle(kgem, kgem->ring);
 }
 
 static inline void kgem_bo_submit(struct kgem *kgem, struct kgem_bo *bo)
commit 4b7bbb2a23b03bac63f864c33f47fab88dedbf67
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Dec 7 16:43:32 2012 +0000

    sna: Only flush before adding fresh surfaces to the batch
    
    Previously, before every operation we would look to see if the GPU was
    idle and we were running under a DRI compositor. If the GPU was idle, we
    would flush the batch in the hope that we reduce the cost of the context
    switch and copy from the compositor (by completing the work earlier).
    However, we would complete the work far too earlier and as a result
    would need to flush the batch before every single operation resulting in
    extra overhead and reduced performance. For example, the gtkperf
    circles benchmark under gnome-shell/compiz would be 2x slower on
    Ivybridge.
    
    Reported-by: Michael Larabel <michael at phoronix.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 2138f1a..200c755 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3716,9 +3716,6 @@ bool kgem_check_bo(struct kgem *kgem, ...)
 	int num_exec = 0;
 	int num_pages = 0;
 
-	if (kgem_flush(kgem))
-		return false;
-
 	va_start(ap, kgem);
 	while ((bo = va_arg(ap, struct kgem_bo *))) {
 		if (bo->exec)
@@ -3740,6 +3737,9 @@ bool kgem_check_bo(struct kgem *kgem, ...)
 	if (!num_pages)
 		return true;
 
+	if (kgem_flush(kgem))
+		return false;
+
 	if (kgem->aperture > kgem->aperture_low && kgem_is_idle(kgem)) {
 		DBG(("%s: current aperture usage (%d) is greater than low water mark (%d)\n",
 		     __FUNCTION__, kgem->aperture, kgem->aperture_low));
@@ -3765,9 +3765,6 @@ bool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo)
 {
 	uint32_t size;
 
-	if (kgem_flush(kgem))
-		return false;
-
 	while (bo->proxy)
 		bo = bo->proxy;
 	if (bo->exec) {
@@ -3786,6 +3783,9 @@ bool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo)
 		return true;
 	}
 
+	if (kgem_flush(kgem))
+		return false;
+
 	if (kgem->nexec >= KGEM_EXEC_SIZE(kgem) - 1)
 		return false;
 
@@ -3820,9 +3820,6 @@ bool kgem_check_many_bo_fenced(struct kgem *kgem, ...)
 	int num_pages = 0;
 	int fenced_size = 0;
 
-	if (kgem_flush(kgem))
-		return false;
-
 	va_start(ap, kgem);
 	while ((bo = va_arg(ap, struct kgem_bo *))) {
 		while (bo->proxy)
@@ -3860,6 +3857,9 @@ bool kgem_check_many_bo_fenced(struct kgem *kgem, ...)
 	}
 
 	if (num_pages) {
+		if (kgem_flush(kgem))
+			return false;
+
 		if (kgem->aperture > kgem->aperture_low && kgem_is_idle(kgem))
 			return false;
 
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 2d04b53..8a3a4fa 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -294,7 +294,7 @@ static inline void kgem_submit(struct kgem *kgem)
 
 static inline bool kgem_flush(struct kgem *kgem)
 {
-	return kgem->flush && kgem_is_idle(kgem);
+	return kgem->flush && list_is_empty(&kgem->requests[kgem->ring]);
 }
 
 static inline void kgem_bo_submit(struct kgem *kgem, struct kgem_bo *bo)
commit 65a8c23ca1bc8e2ebd087027a30358704d4bf11c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Dec 7 14:56:18 2012 +0000

    sna: Only flush at the low apeture watermark if idle
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index bf457ce..2138f1a 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3789,7 +3789,7 @@ bool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo)
 	if (kgem->nexec >= KGEM_EXEC_SIZE(kgem) - 1)
 		return false;
 
-	if (kgem->aperture > kgem->aperture_low)
+	if (kgem->aperture > kgem->aperture_low && kgem_is_idle(kgem))
 		return false;
 
 	if (kgem->aperture + num_pages(bo) > kgem->aperture_high)
@@ -3860,7 +3860,7 @@ bool kgem_check_many_bo_fenced(struct kgem *kgem, ...)
 	}
 
 	if (num_pages) {
-		if (kgem->aperture > kgem->aperture_low)
+		if (kgem->aperture > kgem->aperture_low && kgem_is_idle(kgem))
 			return false;
 
 		if (num_pages + kgem->aperture > kgem->aperture_high)


More information about the xorg-commit mailing list