xf86-video-intel: src/sna/kgem.c

Chris Wilson ickle at kemper.freedesktop.org
Fri Nov 6 12:33:56 PST 2015


 src/sna/kgem.c |   60 +++++++++++++++++++++++++++++----------------------------
 1 file changed, 31 insertions(+), 29 deletions(-)

New commits:
commit 198246201fe9a07a60b4e1084dcf9ba2e06b5ef5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Nov 6 19:52:24 2015 +0000

    sna: Avoid handing back a cached pinned batch
    
    A few places hold on to the request->bo as a means for checking the
    fence completion. This means that it can have an elevated refcnt and so
    we have to be careful to double check that our cache of batch buffers
    not only are idle, but also not being used by anybody else. For example,
    in the DRI2 code, it can happen that the fence is shared between two
    windows and therefore the second window thinks that it's fence is still
    busy as the first issues a new request touching the old fence.
    
    Reported-by: Jan Kundrát
    Reported-by: Ville Syrjälä <ville.syrjala at linux.intel.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 181a49f..7acc69e 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3838,6 +3838,32 @@ static int compact_batch_surface(struct kgem *kgem, int *shrink)
 	return size * sizeof(uint32_t);
 }
 
+static struct kgem_bo *first_available(struct kgem *kgem, struct list *list)
+{
+	struct kgem_bo *bo;
+
+	list_for_each_entry(bo, list, list) {
+		assert(bo->refcnt > 0);
+
+		if (bo->rq) {
+			assert(RQ(bo->rq)->bo == bo);
+			if (__kgem_busy(kgem, bo->handle))
+				break;
+
+			__kgem_retire_rq(kgem, RQ(bo->rq));
+			assert(bo->rq == NULL);
+		}
+
+		if (bo->refcnt > 1)
+			continue;
+
+		list_move_tail(&bo->list, list);
+		return kgem_bo_reference(bo);
+	}
+
+	return NULL;
+}
+
 static struct kgem_bo *
 kgem_create_batch(struct kgem *kgem)
 {
@@ -3851,40 +3877,15 @@ kgem_create_batch(struct kgem *kgem)
 		size = kgem->nbatch * sizeof(uint32_t);
 
 	if (size <= 4096) {
-		bo = list_first_entry(&kgem->pinned_batches[0],
-				      struct kgem_bo,
-				      list);
-		if (!bo->rq) {
-out_4096:
-			assert(bo->refcnt > 0);
-			list_move_tail(&bo->list, &kgem->pinned_batches[0]);
-			bo = kgem_bo_reference(bo);
+		bo = first_available(kgem, &kgem->pinned_batches[0]);
+		if (bo)
 			goto write;
-		}
-
-		if (!__kgem_busy(kgem, bo->handle)) {
-			assert(RQ(bo->rq)->bo == bo);
-			__kgem_retire_rq(kgem, RQ(bo->rq));
-			goto out_4096;
-		}
 	}
 
 	if (size <= 16384) {
-		bo = list_first_entry(&kgem->pinned_batches[1],
-				      struct kgem_bo,
-				      list);
-		if (!bo->rq) {
-out_16384:
-			assert(bo->refcnt > 0);
-			list_move_tail(&bo->list, &kgem->pinned_batches[1]);
-			bo = kgem_bo_reference(bo);
+		bo = first_available(kgem, &kgem->pinned_batches[1]);
+		if (bo)
 			goto write;
-		}
-
-		if (!__kgem_busy(kgem, bo->handle)) {
-			__kgem_retire_rq(kgem, RQ(bo->rq));
-			goto out_16384;
-		}
 	}
 
 	if (kgem->gen == 020) {
@@ -4069,6 +4070,7 @@ void _kgem_submit(struct kgem *kgem)
 	if (rq->bo) {
 		struct drm_i915_gem_execbuffer2 execbuf;
 
+		assert(rq->bo->refcnt == 1);
 		assert(!rq->bo->needs_flush);
 
 		i = kgem->nexec++;


More information about the xorg-commit mailing list