xf86-video-intel: src/sna/kgem.c

Chris Wilson ickle at kemper.freedesktop.org
Thu Nov 12 01:26:10 PST 2015


 src/sna/kgem.c |   22 +++++++++++++++++++---
 1 file changed, 19 insertions(+), 3 deletions(-)

New commits:
commit 7490b9ec263b87b3669096579ec0f0066ec328cb
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Nov 12 09:17:17 2015 +0000

    sna: Wait upon the same ring when out-of-memory
    
    The current out-of-memory allocation code was waiting upon the wrong
    ring id instead of using the index - causing an issue if forced to wait
    upon the BLT ring. If we still cannot allocate, make sure that all
    caches are dropped.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=92911
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index bc790c0..6d6e76a 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1769,21 +1769,29 @@ restart:
 	if (kgem->batch_bo)
 		kgem->batch = kgem_bo_map__cpu(kgem, kgem->batch_bo);
 	if (kgem->batch == NULL) {
+		int ring = kgem->ring = KGEM_BLT;
+		assert(ring < ARRAY_SIZE(kgem->requests));
+
 		if (kgem->batch_bo) {
 			kgem_bo_destroy(kgem, kgem->batch_bo);
 			kgem->batch_bo = NULL;
 		}
 
-		assert(kgem->ring < ARRAY_SIZE(kgem->requests));
-		if (!list_is_empty(&kgem->requests[kgem->ring])) {
+		if (!list_is_empty(&kgem->requests[ring])) {
 			struct kgem_request *rq;
 
-			rq = list_first_entry(&kgem->requests[kgem->ring],
+			rq = list_first_entry(&kgem->requests[ring],
 					      struct kgem_request, list);
+			assert(rq->ring == ring);
+			assert(rq->bo);
+			assert(RQ(rq->bo->rq) == rq);
 			if (kgem_bo_wait(kgem, rq->bo) == 0)
 				goto restart;
 		}
 
+		if (kgem_cleanup_cache(kgem))
+			goto restart;
+
 		DBG(("%s: unable to map batch bo, mallocing(size=%d)\n",
 		     __FUNCTION__, sizeof(uint32_t)*kgem->batch_size));
 		if (posix_memalign((void **)&kgem->batch, PAGE_SIZE,
@@ -3159,6 +3167,8 @@ static bool kgem_retire__requests_ring(struct kgem *kgem, int ring)
 				      struct kgem_request,
 				      list);
 		assert(rq->ring == ring);
+		assert(rq->bo);
+		assert(RQ(rq->bo->rq) == rq);
 		if (__kgem_busy(kgem, rq->bo->handle))
 			break;
 
@@ -3252,6 +3262,8 @@ bool __kgem_ring_is_idle(struct kgem *kgem, int ring)
 	rq = list_last_entry(&kgem->requests[ring],
 			     struct kgem_request, list);
 	assert(rq->ring == ring);
+	assert(rq->bo);
+	assert(RQ(rq->bo->rq) == rq);
 	if (__kgem_busy(kgem, rq->bo->handle)) {
 		DBG(("%s: last requests handle=%d still busy\n",
 		     __FUNCTION__, rq->bo->handle));
@@ -3419,6 +3431,7 @@ static void kgem_commit(struct kgem *kgem)
 	} else {
 		assert(rq != (struct kgem_request *)kgem);
 		assert(rq->ring < ARRAY_SIZE(kgem->requests));
+		assert(rq->bo);
 		list_add_tail(&rq->list, &kgem->requests[rq->ring]);
 		kgem->need_throttle = kgem->need_retire = 1;
 
@@ -4442,6 +4455,9 @@ bool kgem_cleanup_cache(struct kgem *kgem)
 					     list);
 
 			DBG(("%s: sync on cleanup\n", __FUNCTION__));
+			assert(rq->ring == n);
+			assert(rq->bo);
+			assert(RQ(rq->bo->rq) == rq);
 			kgem_bo_wait(kgem, rq->bo);
 		}
 		assert(list_is_empty(&kgem->requests[n]));


More information about the xorg-commit mailing list