xf86-video-intel: src/sna/gen4_render.c src/sna/kgem.c src/sna/kgem.h

Chris Wilson ickle at kemper.freedesktop.org
Mon Jul 16 11:43:12 PDT 2012


 src/sna/gen4_render.c |    2 ++
 src/sna/kgem.c        |   21 +++++++++++++++++----
 src/sna/kgem.h        |    6 ++++++
 3 files changed, 25 insertions(+), 4 deletions(-)

New commits:
commit 107feed2a4ca044313c70f83a62909187ff1f905
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 16 18:58:30 2012 +0100

    sna: Disable snoopable uplaod buffers for gen4
    
    The sampler really does not like using snoopable buffers...
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index c6fbddb..c985c8d 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -725,6 +725,8 @@ gen4_bind_bo(struct sna *sna,
 	uint32_t domains;
 	uint16_t offset;
 
+	assert(!kgem_bo_is_vmap(bo));
+
 	/* After the first bind, we manage the cache domains within the batch */
 	if (is_dst) {
 		domains = I915_GEM_DOMAIN_RENDER << 16 | I915_GEM_DOMAIN_RENDER;
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 51fc29d..d6ed4e0 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3813,6 +3813,18 @@ static struct kgem_partial_bo *partial_bo_alloc(int num_pages)
 	return bo;
 }
 
+static inline bool
+use_snoopable_buffer(struct kgem *kgem, uint32_t flags)
+{
+	if (kgem->gen == 40)
+		return false;
+
+	if (kgem->gen < 30)
+		return flags & KGEM_BUFFER_WRITE;
+
+	return true;
+}
+
 struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 				   uint32_t size, uint32_t flags,
 				   void **ret)
@@ -4056,7 +4068,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		alloc = NUM_PAGES(size);
 	flags &= ~KGEM_BUFFER_INPLACE;
 
-	if (flags & KGEM_BUFFER_WRITE && kgem->has_cache_level) {
+	if (kgem->has_cache_level && use_snoopable_buffer(kgem, flags)) {
 		uint32_t handle;
 
 		handle = gem_create(kgem->fd, alloc);
@@ -4079,13 +4091,14 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		DBG(("%s: created handle=%d for buffer\n",
 		     __FUNCTION__, bo->base.handle));
 
+		bo->base.reusable = false;
+		bo->base.vmap = true;
+
 		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
 		if (bo->mem) {
 			bo->mmapped = true;
 			bo->need_io = false;
 			bo->base.io = true;
-			bo->base.reusable = false;
-			bo->base.vmap = true;
 			goto init;
 		} else {
 			bo->base.refcnt = 0; /* for valgrind */
@@ -4094,7 +4107,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		}
 	}
 
-	if (flags & KGEM_BUFFER_WRITE && kgem->has_vmap) {
+	if (kgem->has_vmap && use_snoopable_buffer(kgem, flags)) {
 		bo = partial_bo_alloc(alloc);
 		if (bo) {
 			uint32_t handle = gem_vmap(kgem->fd, bo->mem,
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 8e9b006..63be218 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -500,6 +500,12 @@ static inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo)
 	return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
 }
 
+static inline bool kgem_bo_is_vmap(struct kgem_bo *bo)
+{
+	while (bo->proxy)
+		bo = bo->proxy;
+	return bo->vmap;
+}
 
 static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
 {


More information about the xorg-commit mailing list