xf86-video-intel: 2 commits - src/sna/kgem.c src/sna/sna_accel.c

Chris Wilson ickle at kemper.freedesktop.org
Tue Jul 17 03:23:33 PDT 2012


 src/sna/kgem.c      |  112 +++++++++++++++++++++++++++-------------------------
 src/sna/sna_accel.c |    3 -
 2 files changed, 61 insertions(+), 54 deletions(-)

New commits:
commit ed8c729ed02705fd03be1ab22a94b5aae13567c8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 17 11:21:30 2012 +0100

    sna: Catch the short-circuit path for clearing clear on move-to-gpu as well
    
    I thought the short-circuit path was only taken when already clear, I
    was wrong.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index c4b6aba..ebf7a23 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2766,9 +2766,10 @@ done:
 			sna_pixmap_free_cpu(sna, priv);
 		}
 	}
+
+active:
 	if (flags & MOVE_WRITE)
 		priv->clear = false;
-active:
 	assert(!priv->gpu_bo->proxy || (flags & MOVE_WRITE) == 0);
 	return sna_pixmap_mark_active(sna, priv);
 }
commit 359b9cc82de13b0ac89692896ac6104ff3be308b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 17 10:26:27 2012 +0100

    sna: Limit the use of snoopable buffers to read/write uploads
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index d30b8e7..4c6ca57 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3830,10 +3830,13 @@ use_snoopable_buffer(struct kgem *kgem, uint32_t flags)
 {
 	assert(kgem->gen != 40);
 
-	if (kgem->gen < 30)
-		return flags & KGEM_BUFFER_WRITE;
+	if ((flags & KGEM_BUFFER_WRITE_INPLACE) == KGEM_BUFFER_WRITE_INPLACE)
+		return true;
 
-	return true;
+	if ((flags & KGEM_BUFFER_WRITE) == 0)
+		return kgem->gen >= 30;
+
+	return false;
 }
 
 struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
@@ -4077,71 +4080,74 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	/* Be more parsimonious with pwrite/pread buffers */
 	if ((flags & KGEM_BUFFER_INPLACE) == 0)
 		alloc = NUM_PAGES(size);
-	flags &= ~KGEM_BUFFER_INPLACE;
 
-	if (kgem->has_cache_level && use_snoopable_buffer(kgem, flags)) {
-		uint32_t handle;
+	if (use_snoopable_buffer(kgem, flags)) {
+		if (kgem->has_cache_level) {
+			uint32_t handle;
 
-		handle = gem_create(kgem->fd, alloc);
-		if (handle == 0)
-			return NULL;
+			handle = gem_create(kgem->fd, alloc);
+			if (handle == 0)
+				return NULL;
 
-		if (!gem_set_cache_level(kgem->fd, handle, I915_CACHE_LLC)) {
-			gem_close(kgem->fd, handle);
-			return NULL;
-		}
+			if (!gem_set_cache_level(kgem->fd, handle, I915_CACHE_LLC)) {
+				gem_close(kgem->fd, handle);
+				return NULL;
+			}
 
-		bo = malloc(sizeof(*bo));
-		if (bo == NULL) {
-			gem_close(kgem->fd, handle);
-			return NULL;
-		}
+			bo = malloc(sizeof(*bo));
+			if (bo == NULL) {
+				gem_close(kgem->fd, handle);
+				return NULL;
+			}
 
-		debug_alloc(kgem, alloc);
-		__kgem_bo_init(&bo->base, handle, alloc);
-		DBG(("%s: created handle=%d for buffer\n",
-		     __FUNCTION__, bo->base.handle));
+			debug_alloc(kgem, alloc);
+			__kgem_bo_init(&bo->base, handle, alloc);
+			DBG(("%s: created handle=%d for buffer\n",
+			     __FUNCTION__, bo->base.handle));
 
-		bo->base.reusable = false;
-		bo->base.vmap = true;
+			bo->base.reusable = false;
+			bo->base.vmap = true;
 
-		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
-		if (bo->mem) {
-			bo->mmapped = true;
-			bo->need_io = false;
-			bo->base.io = true;
-			goto init;
-		} else {
-			bo->base.refcnt = 0; /* for valgrind */
-			kgem_bo_free(kgem, &bo->base);
-			bo = NULL;
+			bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
+			if (bo->mem) {
+				bo->mmapped = true;
+				bo->need_io = false;
+				bo->base.io = true;
+				goto init;
+			} else {
+				bo->base.refcnt = 0; /* for valgrind */
+				kgem_bo_free(kgem, &bo->base);
+				bo = NULL;
+			}
 		}
-	}
 
-	if (kgem->has_vmap && use_snoopable_buffer(kgem, flags)) {
-		bo = partial_bo_alloc(alloc);
-		if (bo) {
-			uint32_t handle = gem_vmap(kgem->fd, bo->mem,
-						   alloc * PAGE_SIZE, false);
-			if (handle == 0 ||
-			    !__kgem_bo_init(&bo->base, handle, alloc)) {
-				free(bo);
-				bo = NULL;
-			} else {
-				DBG(("%s: created vmap handle=%d for buffer\n",
-				     __FUNCTION__, bo->base.handle));
+		if (kgem->has_vmap) {
+			bo = partial_bo_alloc(alloc);
+			if (bo) {
+				uint32_t handle = gem_vmap(kgem->fd, bo->mem,
+							   alloc * PAGE_SIZE, false);
+				if (handle == 0 ||
+				    !__kgem_bo_init(&bo->base, handle, alloc)) {
+					free(bo);
+					bo = NULL;
+				} else {
+					DBG(("%s: created vmap handle=%d for buffer\n",
+					     __FUNCTION__, bo->base.handle));
 
-				bo->base.io = true;
-				bo->base.vmap = true;
-				bo->base.map = MAKE_VMAP_MAP(bo);
-				bo->mmapped = true;
-				bo->need_io = false;
+					bo->base.io = true;
+					bo->base.vmap = true;
+					bo->base.map = MAKE_VMAP_MAP(bo);
+					bo->mmapped = true;
+					bo->need_io = false;
 
-				goto init;
+					goto init;
+				}
 			}
 		}
 	}
 
+	flags &= ~KGEM_BUFFER_INPLACE;
+
 	old = NULL;
 	if ((flags & KGEM_BUFFER_WRITE) == 0)
 		old = search_linear_cache(kgem, alloc, 0);


More information about the xorg-commit mailing list