xf86-video-intel: 9 commits - configure.ac src/intel_module.c src/sna/kgem.c

Chris Wilson ickle at kemper.freedesktop.org
Wed Feb 6 14:24:39 PST 2013


 configure.ac       |    6 +--
 src/intel_module.c |    2 -
 src/sna/kgem.c     |   94 +++++++++++++++++++++++++++++++++++------------------
 3 files changed, 66 insertions(+), 36 deletions(-)

New commits:
commit 974b6a97d78dadf09be8a2c4f61020f15d80d558
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Feb 6 17:02:27 2013 +0000

    sna: Fallback to non-LLC paths after an allocation failure for an LLC buffer
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index a02eb39..d2be862 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -5113,7 +5113,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	if (kgem->has_llc) {
 		bo = buffer_alloc();
 		if (bo == NULL)
-			return NULL;
+			goto skip_llc;
 
 		old = NULL;
 		if ((flags & KGEM_BUFFER_WRITE) == 0)
@@ -5131,7 +5131,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			uint32_t handle = gem_create(kgem->fd, alloc);
 			if (handle == 0) {
 				free(bo);
-				return NULL;
+				goto skip_llc;
 			}
 			__kgem_bo_init(&bo->base, handle, alloc);
 			DBG(("%s: created LLC handle=%d for buffer\n",
@@ -5153,6 +5153,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			kgem_bo_free(kgem, &bo->base);
 		}
 	}
+skip_llc:
 
 	if (PAGE_SIZE * alloc > kgem->aperture_mappable / 4)
 		flags &= ~KGEM_BUFFER_INPLACE;
commit 5c8084ef04cb0a7da064fb1e13c8ef7dae528b1b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Feb 6 16:39:31 2013 +0000

    intel: Becareful not to match UMS against future generations
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_module.c b/src/intel_module.c
index da078f8..141f77a 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -488,7 +488,7 @@ intel_scrn_create(DriverPtr		driver,
 	xf86AddEntityToScreen(scrn, entity_num);
 
 #if !KMS_ONLY
-	if (((struct intel_device_info *)match_data)->gen < 020)
+	if ((unsigned)((struct intel_device_info *)match_data)->gen < 020)
 		return lg_i810_init(scrn);
 #endif
 
commit be241fb25ed0a8d41a642ea811253207f88d0962
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Feb 6 16:38:12 2013 +0000

    sna: Free the handle after pwrite buffer allocation failure
    
    Having just allocated the handle, we need to free it if we then fail to
    allocate memory for the buffer.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index af4f0a8..a02eb39 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -5320,7 +5320,8 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		old = &bo->base;
 		bo = buffer_alloc_with_data(num_pages(old));
 		if (bo == NULL) {
-			free(old);
+			old->refcnt= 0;
+			kgem_bo_free(kgem, old);
 			return NULL;
 		}
 
commit 4b3b25f0be33d3af3ccecfb3193fc2d365445fdf
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Feb 6 16:37:21 2013 +0000

    sna: Flush our caches if we fail to mmap an object
    
    The likely cause for a mmap failure is that we hold too many objects
    open or have exhausted our address space. In both cases, we need to trim
    our caches before continuing.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index ae7ca10..af4f0a8 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -292,6 +292,11 @@ retry_gtt:
 		if (kgem_expire_cache(kgem))
 			goto retry_gtt;
 
+		if (kgem->need_expire) {
+			kgem_cleanup_cache(kgem);
+			goto retry_gtt;
+		}
+
 		return NULL;
 	}
 
@@ -304,6 +309,11 @@ retry_mmap:
 		if (__kgem_throttle_retire(kgem, 0))
 			goto retry_mmap;
 
+		if (kgem->need_expire) {
+			kgem_cleanup_cache(kgem);
+			goto retry_mmap;
+		}
+
 		ptr = NULL;
 	}
 
@@ -4559,6 +4569,11 @@ retry:
 		if (__kgem_throttle_retire(kgem, 0))
 			goto retry;
 
+		if (kgem->need_expire) {
+			kgem_cleanup_cache(kgem);
+			goto retry;
+		}
+
 		return NULL;
 	}
 
@@ -4594,6 +4609,11 @@ retry:
 		if (__kgem_throttle_retire(kgem, 0))
 			goto retry;
 
+		if (kgem->need_expire) {
+			kgem_cleanup_cache(kgem);
+			goto retry;
+		}
+
 		return NULL;
 	}
 
commit daba1ae3e7f0532cc53d9a5178778dbaec203052
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Feb 6 16:17:36 2013 +0000

    sna: Correctly handle failure to CPU map a new allocation
    
    If we fail to CPU map, we want to fallback to just using pwrite with
    normal memory.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 4fbabba..ae7ca10 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -5290,9 +5290,10 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 
 		if (flags & KGEM_BUFFER_WRITE) {
 			bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
-			if (bo->mem != NULL)
+			if (bo->mem != NULL) {
 				kgem_bo_sync__cpu(kgem, &bo->base);
-			goto init;
+				goto init;
+			}
 		}
 
 		DBG(("%s: failing back to new pwrite buffer\n", __FUNCTION__));
commit 0adb0b5e1ebcf3ddfeddae99d96912ec4c090832
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Feb 6 16:02:30 2013 +0000

    sna: Handle mapped buffer allocation failure for LLC
    
    The presumption was that if we had LLC we would have allocated the
    buffer by that point - however, it was remotely possible to have fallen
    through and so we need to handle those cases.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 1ed007c..4fbabba 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -4909,8 +4909,6 @@ create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 	struct kgem_buffer *bo;
 	uint32_t handle;
 
-	assert(!kgem->has_llc);
-
 	if (kgem->has_cacheing) {
 		struct kgem_bo *old;
 
@@ -5228,7 +5226,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		if ((flags & KGEM_BUFFER_WRITE_INPLACE) != KGEM_BUFFER_WRITE_INPLACE) {
 			bo = create_snoopable_buffer(kgem, alloc);
 			if (bo) {
-				flags &= ~KGEM_BUFFER_INPLACE;
+				assert((flags & KGEM_BUFFER_INPLACE) == 0);
 				goto init;
 			}
 		}
commit f4cff22afae598f41adf36cd149223d1f7dd6b6e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Feb 6 15:15:36 2013 +0000

    sna: Relax the buffer size assertion to only be larger than required
    
    Not all paths request alloc pages, a few just request sufficient pages
    for the original size. So we can only assert that condition is
    satisfied.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 2b0723b..1ed007c 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -5316,7 +5316,6 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 init:
 	bo->base.io = true;
 	assert(bo->base.refcnt == 1);
-	assert(num_pages(&bo->base) >= alloc);
 	assert(num_pages(&bo->base) >= NUM_PAGES(size));
 	assert(!bo->need_io || !bo->base.needs_flush);
 	assert(!bo->need_io || bo->base.domain != DOMAIN_GPU);
commit 8bc593c732a2f1ccd1bdabc071c709a44222db61
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Feb 6 15:11:00 2013 +0000

    sna: Make sure we always replace io buffers before inserting into the cache
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 1086c3f..2b0723b 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1527,6 +1527,32 @@ inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
 	}
 }
 
+static struct kgem_bo *kgem_bo_replace_io(struct kgem_bo *bo)
+{
+	struct kgem_bo *base;
+
+	if (!bo->io)
+		return bo;
+
+	assert(!bo->snoop);
+	base = malloc(sizeof(*base));
+	if (base) {
+		DBG(("%s: transferring io handle=%d to bo\n",
+		     __FUNCTION__, bo->handle));
+		/* transfer the handle to a minimum bo */
+		memcpy(base, bo, sizeof(*base));
+		base->io = false;
+		list_init(&base->list);
+		list_replace(&bo->request, &base->request);
+		list_replace(&bo->vma, &base->vma);
+		free(bo);
+		bo = base;
+	} else
+		bo->reusable = false;
+
+	return bo;
+}
+
 inline static void kgem_bo_remove_from_inactive(struct kgem *kgem,
 						struct kgem_bo *bo)
 {
@@ -1593,6 +1619,8 @@ static void kgem_bo_move_to_scanout(struct kgem *kgem, struct kgem_bo *bo)
 	assert(bo->exec == NULL);
 	assert(bo->scanout);
 	assert(bo->delta);
+	assert(!bo->snoop);
+	assert(!bo->io);
 
 	DBG(("%s: moving %d [fb %d] to scanout cachee\n", __FUNCTION__,
 	     bo->handle, bo->delta));
@@ -1703,31 +1731,13 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 		return;
 	}
 
-	if (bo->io) {
-		struct kgem_bo *base;
-
-		assert(!bo->snoop);
-		base = malloc(sizeof(*base));
-		if (base) {
-			DBG(("%s: transferring io handle=%d to bo\n",
-			     __FUNCTION__, bo->handle));
-			/* transfer the handle to a minimum bo */
-			memcpy(base, bo, sizeof(*base));
-			base->io = false;
-			list_init(&base->list);
-			list_replace(&bo->request, &base->request);
-			list_replace(&bo->vma, &base->vma);
-			free(bo);
-			bo = base;
-		} else
-			bo->reusable = false;
-	}
-
 	if (bo->scanout) {
 		kgem_bo_move_to_scanout(kgem, bo);
 		return;
 	}
 
+	if (bo->io)
+		bo = kgem_bo_replace_io(bo);
 	if (!bo->reusable) {
 		DBG(("%s: handle=%d, not reusable\n",
 		     __FUNCTION__, bo->handle));
@@ -1860,7 +1870,8 @@ static bool kgem_retire__flushing(struct kgem *kgem)
 			kgem_bo_move_to_snoop(kgem, bo);
 		} else if (bo->scanout) {
 			kgem_bo_move_to_scanout(kgem, bo);
-		} else if (bo->reusable && kgem_bo_set_purgeable(kgem, bo)) {
+		} else if ((bo = kgem_bo_replace_io(bo))->reusable &&
+			   kgem_bo_set_purgeable(kgem, bo)) {
 			kgem_bo_move_to_inactive(kgem, bo);
 			retired = true;
 		} else
@@ -1920,7 +1931,8 @@ static bool __kgem_retire_rq(struct kgem *kgem, struct kgem_request *rq)
 			kgem_bo_move_to_snoop(kgem, bo);
 		} else if (bo->scanout) {
 			kgem_bo_move_to_scanout(kgem, bo);
-		} else if (bo->reusable && kgem_bo_set_purgeable(kgem, bo)) {
+		} else if ((bo = kgem_bo_replace_io(bo))->reusable &&
+			   kgem_bo_set_purgeable(kgem, bo)) {
 			kgem_bo_move_to_inactive(kgem, bo);
 			retired = true;
 		} else {
commit 5f72158919098dd5684d1c56d1ba643cc3be2c7d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Feb 6 15:10:23 2013 +0000

    configure: XvMC support is optional, so make failure to find xcb non-fatal

diff --git a/configure.ac b/configure.ac
index 5ae4208..13a4b46 100644
--- a/configure.ac
+++ b/configure.ac
@@ -403,10 +403,8 @@ AM_CONDITIONAL(DRI2, test "x$DRI2" = xyes)
 AC_MSG_RESULT([$DRI2])
 
 if test "$XVMC" = yes; then
-	PKG_CHECK_MODULES(XVMCLIB,
-			  [xvmc dri2proto],
-			  [XVMC=yes], [XVMC=no])
-	PKG_CHECK_MODULES(XCB, [x11-xcb xcb-dri2 xcb-aux])
+	PKG_CHECK_MODULES(XVMCLIB, [xvmc dri2proto], [], [XVMC=no])
+	PKG_CHECK_MODULES(XCB, [x11-xcb xcb-dri2 xcb-aux], [], [XVMC=no])
 fi
 AC_MSG_CHECKING([whether to include XvMC support])
 AC_MSG_RESULT([$XVMC])


More information about the xorg-commit mailing list