xf86-video-intel: 3 commits - src/sna/kgem.c

Chris Wilson ickle at kemper.freedesktop.org
Fri Aug 7 02:54:41 PDT 2015


 src/sna/kgem.c |  215 ++++++++++++++++++++++++++++++++++-----------------------
 1 file changed, 129 insertions(+), 86 deletions(-)

New commits:
commit ca71199679cac9cc161c84cb09d12f133abf8a64
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Aug 7 10:40:30 2015 +0100

    sna: Harden batch allocation against resource starvation
    
    If the batch allocation fails, retire the oldest request and try again.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=91577
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 251c9cc..44083b4 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1689,6 +1689,36 @@ static void kgem_fixup_relocs(struct kgem *kgem, struct kgem_bo *bo, int shrink)
 	}
 }
 
+static int kgem_bo_wait(struct kgem *kgem, struct kgem_bo *bo)
+{
+	struct drm_i915_gem_wait wait;
+	int ret;
+
+	if (bo->rq == NULL)
+		return 0;
+
+	VG_CLEAR(wait);
+	wait.bo_handle = bo->handle;
+	wait.timeout_ns = -1;
+	ret = do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
+	if (ret) {
+		struct drm_i915_gem_set_domain set_domain;
+
+		VG_CLEAR(set_domain);
+		set_domain.handle = bo->handle;
+		set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+		set_domain.write_domain = I915_GEM_DOMAIN_GTT;
+		ret = do_ioctl(kgem->fd,
+			       DRM_IOCTL_I915_GEM_SET_DOMAIN,
+			       &set_domain);
+	}
+
+	if (ret == 0)
+		__kgem_retire_requests_upto(kgem, bo);
+
+	return ret;
+}
+
 static struct kgem_bo *kgem_new_batch(struct kgem *kgem)
 {
 	struct kgem_bo *last;
@@ -1709,6 +1739,7 @@ static struct kgem_bo *kgem_new_batch(struct kgem *kgem)
 	if (!kgem->has_llc)
 		flags |= CREATE_UNCACHED;
 
+restart:
 	kgem->batch_bo = kgem_create_linear(kgem,
 					    sizeof(uint32_t)*kgem->batch_size,
 					    flags);
@@ -1723,6 +1754,15 @@ static struct kgem_bo *kgem_new_batch(struct kgem *kgem)
 			kgem->batch_bo = NULL;
 		}
 
+		if (!list_is_empty(&kgem->requests[kgem->ring])) {
+			struct kgem_request *rq;
+
+			rq = list_first_entry(&kgem->requests[kgem->ring],
+					      struct kgem_request, list);
+			if (kgem_bo_wait(kgem, rq->bo) == 0)
+				goto restart;
+		}
+
 		if (posix_memalign((void **)&kgem->batch, PAGE_SIZE,
 				   ALIGN(sizeof(uint32_t) * kgem->batch_size, PAGE_SIZE))) {
 			ERR(("%s: batch allocation failed, disabling acceleration\n", __FUNCTION__));
@@ -3777,24 +3817,14 @@ out_16384:
 		}
 
 		if (size < 16384) {
-			struct drm_i915_gem_set_domain set_domain;
-
 			bo = list_first_entry(&kgem->pinned_batches[size > 4096],
 					      struct kgem_bo,
 					      list);
 			list_move_tail(&bo->list, &kgem->pinned_batches[size > 4096]);
 
 			DBG(("%s: syncing due to busy batches\n", __FUNCTION__));
-
-			VG_CLEAR(set_domain);
-			set_domain.handle = bo->handle;
-			set_domain.read_domains = I915_GEM_DOMAIN_GTT;
-			set_domain.write_domain = I915_GEM_DOMAIN_GTT;
-			if (do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) {
-				DBG(("%s: sync: GPU hang detected\n", __FUNCTION__));
-				kgem_throttle(kgem);
+			if (kgem_bo_wait(kgem, bo))
 				return NULL;
-			}
 
 			kgem_retire(kgem);
 			assert(bo->rq == NULL);
commit 8c465d0fbf84b1d29c54d620f09063d2b7ccfeb8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Aug 7 10:15:42 2015 +0100

    sna: Fallback after a bo allocation failure for the batch
    
    If we fail to allocate the next bo to use for the next_request, we can
    just fallback to the delayed allocation used by !llc.
    
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=91577
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index a8b96a4..251c9cc 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3716,12 +3716,10 @@ static int compact_batch_surface(struct kgem *kgem, int *shrink)
 static struct kgem_bo *
 kgem_create_batch(struct kgem *kgem)
 {
-#if !DBG_NO_SHRINK_BATCHES
-	struct drm_i915_gem_set_domain set_domain;
 	struct kgem_bo *bo;
-	int shrink = 0;
-	int size;
+	int size, shrink = 0;
 
+#if !DBG_NO_SHRINK_BATCHES
 	if (kgem->surface != kgem->batch_size)
 		size = compact_batch_surface(kgem, &shrink);
 	else
@@ -3779,6 +3777,8 @@ out_16384:
 		}
 
 		if (size < 16384) {
+			struct drm_i915_gem_set_domain set_domain;
+
 			bo = list_first_entry(&kgem->pinned_batches[size > 4096],
 					      struct kgem_bo,
 					      list);
@@ -3802,8 +3802,14 @@ out_16384:
 			goto write;
 		}
 	}
+#else
+	if (kgem->surface != kgem->batch_size)
+		size = kgem->batch_size * sizeof(uint32_t);
+	else
+		size = kgem->nbatch * sizeof(uint32_t);
+#endif
 
-	if (!kgem->has_llc) {
+	if (!kgem->batch_bo) {
 		bo = kgem_create_linear(kgem, size, CREATE_NO_THROTTLE);
 		if (bo) {
 write:
@@ -3815,7 +3821,7 @@ write:
 			return bo;
 		}
 	}
-#endif
+
 	return kgem_new_batch(kgem);
 }
 
commit 69d8edc11173df021aa2e158b2530257113141fd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Aug 7 10:08:17 2015 +0100

    sna: Handle batch allocation failure
    
    Whilst we currently do not try and submit a failed batch buffer
    allocation, we still treat it as a valid request. This explodes much
    later when we inspect the NULL rq->bo.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=91577
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index dc54a53..a8b96a4 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3912,6 +3912,7 @@ void _kgem_submit(struct kgem *kgem)
 {
 	struct kgem_request *rq;
 	uint32_t batch_end;
+	int i, ret;
 
 	assert(!DBG_NO_HW);
 	assert(!kgem->wedged);
@@ -3946,7 +3947,6 @@ void _kgem_submit(struct kgem *kgem)
 	rq->bo = kgem_create_batch(kgem);
 	if (rq->bo) {
 		struct drm_i915_gem_execbuffer2 execbuf;
-		int i, ret;
 
 		assert(!rq->bo->needs_flush);
 
@@ -3984,90 +3984,97 @@ void _kgem_submit(struct kgem *kgem)
 		}
 
 		ret = do_execbuf(kgem, &execbuf);
-		if (DEBUG_SYNC && ret == 0) {
-			struct drm_i915_gem_set_domain set_domain;
-
-			VG_CLEAR(set_domain);
-			set_domain.handle = rq->bo->handle;
-			set_domain.read_domains = I915_GEM_DOMAIN_GTT;
-			set_domain.write_domain = I915_GEM_DOMAIN_GTT;
-
-			ret = do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
+	} else
+		ret = -ENOMEM;
+
+	if (ret < 0) {
+		kgem_throttle(kgem);
+		if (!kgem->wedged) {
+			xf86DrvMsg(kgem_get_screen_index(kgem), X_ERROR,
+				   "Failed to submit rendering commands (%s), disabling acceleration.\n",
+				   strerror(-ret));
+			__kgem_set_wedged(kgem);
 		}
-		if (ret < 0) {
-			kgem_throttle(kgem);
-			if (!kgem->wedged) {
-				xf86DrvMsg(kgem_get_screen_index(kgem), X_ERROR,
-					   "Failed to submit rendering commands, disabling acceleration.\n");
-				__kgem_set_wedged(kgem);
-			}
 
 #if !NDEBUG
-			ErrorF("batch[%d/%d]: %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d, fenced=%d, high=%d,%d: errno=%d\n",
-			       kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface,
-			       kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture, kgem->aperture_fenced, kgem->aperture_high, kgem->aperture_total, -ret);
+		ErrorF("batch[%d/%d]: %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d, fenced=%d, high=%d,%d: errno=%d\n",
+		       kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface,
+		       kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture, kgem->aperture_fenced, kgem->aperture_high, kgem->aperture_total, -ret);
 
-			for (i = 0; i < kgem->nexec; i++) {
-				struct kgem_bo *bo, *found = NULL;
+		for (i = 0; i < kgem->nexec; i++) {
+			struct kgem_bo *bo, *found = NULL;
 
-				list_for_each_entry(bo, &kgem->next_request->buffers, request) {
-					if (bo->handle == kgem->exec[i].handle) {
-						found = bo;
-						break;
-					}
+			list_for_each_entry(bo, &kgem->next_request->buffers, request) {
+				if (bo->handle == kgem->exec[i].handle) {
+					found = bo;
+					break;
 				}
-				ErrorF("exec[%d] = handle:%d, presumed offset: %x, size: %d, tiling %d, fenced %d, snooped %d, deleted %d\n",
-				       i,
-				       kgem->exec[i].handle,
-				       (int)kgem->exec[i].offset,
-				       found ? kgem_bo_size(found) : -1,
-				       found ? found->tiling : -1,
-				       (int)(kgem->exec[i].flags & EXEC_OBJECT_NEEDS_FENCE),
-				       found ? found->snoop : -1,
-				       found ? found->purged : -1);
 			}
-			for (i = 0; i < kgem->nreloc; i++) {
-				ErrorF("reloc[%d] = pos:%d, target:%d, delta:%d, read:%x, write:%x, offset:%x\n",
-				       i,
-				       (int)kgem->reloc[i].offset,
-				       kgem->reloc[i].target_handle,
-				       kgem->reloc[i].delta,
-				       kgem->reloc[i].read_domains,
-				       kgem->reloc[i].write_domain,
-				       (int)kgem->reloc[i].presumed_offset);
+			ErrorF("exec[%d] = handle:%d, presumed offset: %x, size: %d, tiling %d, fenced %d, snooped %d, deleted %d\n",
+			       i,
+			       kgem->exec[i].handle,
+			       (int)kgem->exec[i].offset,
+			       found ? kgem_bo_size(found) : -1,
+			       found ? found->tiling : -1,
+			       (int)(kgem->exec[i].flags & EXEC_OBJECT_NEEDS_FENCE),
+			       found ? found->snoop : -1,
+			       found ? found->purged : -1);
+		}
+		for (i = 0; i < kgem->nreloc; i++) {
+			ErrorF("reloc[%d] = pos:%d, target:%d, delta:%d, read:%x, write:%x, offset:%x\n",
+			       i,
+			       (int)kgem->reloc[i].offset,
+			       kgem->reloc[i].target_handle,
+			       kgem->reloc[i].delta,
+			       kgem->reloc[i].read_domains,
+			       kgem->reloc[i].write_domain,
+			       (int)kgem->reloc[i].presumed_offset);
+		}
+
+		{
+			struct drm_i915_gem_get_aperture aperture;
+			if (do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture) == 0)
+				ErrorF("Aperture size %lld, available %lld\n",
+				       (long long)aperture.aper_size,
+				       (long long)aperture.aper_available_size);
+		}
+
+		if (ret == -ENOSPC)
+			dump_gtt_info(kgem);
+		if (ret == -EDEADLK)
+			dump_fence_regs(kgem);
+
+		if (DEBUG_SYNC) {
+			int fd = open("/tmp/batchbuffer", O_WRONLY | O_CREAT | O_APPEND, 0666);
+			if (fd != -1) {
+				int ignored = write(fd, kgem->batch, batch_end*sizeof(uint32_t));
+				assert(ignored == batch_end*sizeof(uint32_t));
+				close(fd);
 			}
 
-			{
-				struct drm_i915_gem_get_aperture aperture;
-				if (do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture) == 0)
-					ErrorF("Aperture size %lld, available %lld\n",
-					       (long long)aperture.aper_size,
-					       (long long)aperture.aper_available_size);
-			}
+			FatalError("SNA: failed to submit batchbuffer, errno=%d\n", -ret);
+		}
+#endif
+	} else {
+		if (DEBUG_SYNC) {
+			struct drm_i915_gem_set_domain set_domain;
 
-			if (ret == -ENOSPC)
-				dump_gtt_info(kgem);
-			if (ret == -EDEADLK)
-				dump_fence_regs(kgem);
-
-			if (DEBUG_SYNC) {
-				int fd = open("/tmp/batchbuffer", O_WRONLY | O_CREAT | O_APPEND, 0666);
-				if (fd != -1) {
-					int ignored = write(fd, kgem->batch, batch_end*sizeof(uint32_t));
-					assert(ignored == batch_end*sizeof(uint32_t));
-					close(fd);
-				}
+			VG_CLEAR(set_domain);
+			set_domain.handle = rq->bo->handle;
+			set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+			set_domain.write_domain = I915_GEM_DOMAIN_GTT;
 
-				FatalError("SNA: failed to submit batchbuffer, errno=%d\n", -ret);
-			}
-#endif
+			ret = do_ioctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
 		}
-	}
+
 #if SHOW_BATCH_AFTER
-	if (gem_read(kgem->fd, rq->bo->handle, kgem->batch, 0, batch_end*sizeof(uint32_t)) == 0)
-		__kgem_batch_debug(kgem, batch_end);
+		if (gem_read(kgem->fd, rq->bo->handle, kgem->batch, 0, batch_end*sizeof(uint32_t)) == 0)
+			__kgem_batch_debug(kgem, batch_end);
 #endif
-	kgem_commit(kgem);
+
+		kgem_commit(kgem);
+	}
+
 	if (kgem->wedged)
 		kgem_cleanup(kgem);
 


More information about the xorg-commit mailing list