xf86-video-intel: 6 commits - src/intel_list.h src/sna/gen3_render.c src/sna/gen5_render.c src/sna/kgem.c src/sna/kgem.h src/sna/sna_accel.c src/sna/sna_blt.c src/sna/sna.h

Chris Wilson ickle at kemper.freedesktop.org
Thu Mar 1 13:26:06 PST 2012


 src/intel_list.h      |   14 +
 src/sna/gen3_render.c |   68 ++++++++-
 src/sna/gen5_render.c |    6 
 src/sna/kgem.c        |  374 ++++++++++++++++++++++++++++++++++++--------------
 src/sna/kgem.h        |   12 +
 src/sna/sna.h         |    8 -
 src/sna/sna_accel.c   |  265 ++++++++++++++++++++---------------
 src/sna/sna_blt.c     |   52 ++----
 8 files changed, 536 insertions(+), 263 deletions(-)

New commits:
commit 392593e61dac3ac65ee8e32de492c4439413ee85
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 1 17:54:51 2012 +0000

    sna/gen5: Help the compiler avoid an uncached read
    
    Debug builds are excruitatingly slow as the compiler doesn't store the
    temporary in a register but uses an uncached readback instead. Maybe
    this will help...
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index bcba0d8..bccd343 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -849,16 +849,14 @@ gen5_emit_composite_primitive_identity_source(struct sna *sna,
 	dst.p.y = r->dst.y + r->height;
 	v[0] = dst.f;
 	v[1] = (sx + r->width) * sf[0];
-	v[2] = (sy + r->height) * sf[1];
+	v[5] = v[2] = (sy + r->height) * sf[1];
 
 	dst.p.x = r->dst.x;
 	v[3] = dst.f;
-	v[4] = sx * sf[0];
-	v[5] = v[2];
+	v[7] = v[4] = sx * sf[0];
 
 	dst.p.y = r->dst.y;
 	v[6] = dst.f;
-	v[7] = v[4];
 	v[8] = sy * sf[1];
 }
 
commit 9c0c04cac245db046ef17ff24c32e6ab93535f48
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 1 14:52:39 2012 +0000

    sna: Split storage of inactive partials
    
    As we now attempt to keep retain partial buffers after execution, we can
    end up will lots of inactive buffers sitting on the partial buffer list.
    In any one batch, we wish to minimise the number of buffers used, so
    keep all the inactive buffers on a seperate list and only pull from them
    as required.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_list.h b/src/intel_list.h
index 8e8c612..366b9e8 100644
--- a/src/intel_list.h
+++ b/src/intel_list.h
@@ -214,6 +214,8 @@ __list_del(struct list *prev, struct list *next)
 static inline void
 _list_del(struct list *entry)
 {
+    assert(entry->prev->next == entry);
+    assert(entry->next->prev == entry);
     __list_del(entry->prev, entry->next);
 }
 
@@ -327,6 +329,11 @@ list_is_empty(struct list *head)
 	 &pos->member != (head);					\
 	 pos = __container_of(pos->member.next, pos, member))
 
+#define list_for_each_entry_reverse(pos, head, member)				\
+    for (pos = __container_of((head)->prev, pos, member);		\
+	 &pos->member != (head);					\
+	 pos = __container_of(pos->member.prev, pos, member))
+
 /**
  * Loop through the list, keeping a backup pointer to the element. This
  * macro allows for the deletion of a list element while looping through the
@@ -353,6 +360,8 @@ list_add_tail(struct list *entry, struct list *head)
 static inline void
 _list_del(struct list *entry)
 {
+    assert(entry->prev->next == entry);
+    assert(entry->next->prev == entry);
     __list_del(entry->prev, entry->next);
 }
 
@@ -382,6 +391,11 @@ static inline void list_move_tail(struct list *list, struct list *head)
 #define list_last_entry(ptr, type, member) \
     list_entry((ptr)->prev, type, member)
 
+#define list_for_each_entry_reverse(pos, head, member)				\
+    for (pos = __container_of((head)->prev, pos, member);		\
+	 &pos->member != (head);					\
+	 pos = __container_of(pos->member.prev, pos, member))
+
 #endif
 
 #undef container_of
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 4fcdf37..e4ff6a7 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -119,8 +119,10 @@ static bool validate_partials(struct kgem *kgem)
 {
 	struct kgem_partial_bo *bo, *next;
 
-	list_for_each_entry_safe(bo, next, &kgem->partial, base.list) {
-		if (bo->base.list.next == &kgem->partial)
+	list_for_each_entry_safe(bo, next, &kgem->active_partials, base.list) {
+		assert(next->base.list.prev == &bo->base.list);
+		assert(bo->base.io);
+		if (bo->base.list.next == &kgem->active_partials)
 			return true;
 		if (bytes(&bo->base) - bo->used < bytes(&next->base) - next->used) {
 			ErrorF("this rem: %d, next rem: %d\n",
@@ -132,7 +134,7 @@ static bool validate_partials(struct kgem *kgem)
 	return true;
 
 err:
-	list_for_each_entry(bo, &kgem->partial, base.list)
+	list_for_each_entry(bo, &kgem->active_partials, base.list)
 		ErrorF("bo: used=%d / %d, rem=%d\n",
 		       bo->used, bytes(&bo->base), bytes(&bo->base) - bo->used);
 	return false;
@@ -573,7 +575,8 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 
 	kgem->half_cpu_cache_pages = cpu_cache_size() >> 13;
 
-	list_init(&kgem->partial);
+	list_init(&kgem->active_partials);
+	list_init(&kgem->inactive_partials);
 	list_init(&kgem->requests);
 	list_init(&kgem->flushing);
 	list_init(&kgem->sync_list);
@@ -1173,11 +1176,11 @@ static void kgem_bo_unref(struct kgem *kgem, struct kgem_bo *bo)
 		__kgem_bo_destroy(kgem, bo);
 }
 
-static void bubble_sort_partial(struct kgem *kgem, struct kgem_partial_bo *bo)
+static void bubble_sort_partial(struct list *head, struct kgem_partial_bo *bo)
 {
 	int remain = bytes(&bo->base) - bo->used;
 
-	while (bo->base.list.prev != &kgem->partial) {
+	while (bo->base.list.prev != head) {
 		struct kgem_partial_bo *p;
 
 		p = list_entry(bo->base.list.prev,
@@ -1204,21 +1207,25 @@ static void kgem_retire_partials(struct kgem *kgem)
 {
 	struct kgem_partial_bo *bo, *next;
 
-	list_for_each_entry_safe(bo, next, &kgem->partial, base.list) {
-		if (bo->used == 0 || !bo->mmapped)
-			continue;
+	list_for_each_entry_safe(bo, next, &kgem->active_partials, base.list) {
+		assert(next->base.list.prev == &bo->base.list);
+		assert(bo->base.io);
+
 		if (bo->base.refcnt != 1 || bo->base.rq)
 			continue;
 
 		DBG(("%s: handle=%d, used %d/%d\n", __FUNCTION__,
 		     bo->base.handle, bo->used, bytes(&bo->base)));
 
+		assert(bo->base.refcnt == 1);
+		assert(bo->mmapped);
 		assert(kgem->has_llc || !IS_CPU_MAP(bo->base.map));
 		bo->base.dirty = false;
 		bo->base.needs_flush = false;
 		bo->used = 0;
 
-		bubble_sort_partial(kgem, bo);
+		list_move_tail(&bo->base.list, &kgem->inactive_partials);
+		bubble_sort_partial(&kgem->inactive_partials, bo);
 	}
 }
 
@@ -1343,6 +1350,8 @@ static void kgem_commit(struct kgem *kgem)
 	struct kgem_bo *bo, *next;
 
 	list_for_each_entry_safe(bo, next, &rq->buffers, request) {
+		assert(next->request.prev == &bo->request);
+
 		DBG(("%s: release handle=%d (proxy? %d), dirty? %d flush? %d -> offset=%x\n",
 		     __FUNCTION__, bo->handle, bo->proxy != NULL,
 		     bo->dirty, bo->needs_flush, (unsigned)bo->exec->offset));
@@ -1411,15 +1420,18 @@ static void kgem_finish_partials(struct kgem *kgem)
 {
 	struct kgem_partial_bo *bo, *next;
 
-	list_for_each_entry_safe(bo, next, &kgem->partial, base.list) {
+	list_for_each_entry_safe(bo, next, &kgem->active_partials, base.list) {
+		assert(next->base.list.prev == &bo->base.list);
+		assert(bo->base.io);
+
+		if (!bo->base.exec)
+			continue;
+
 		if (!bo->write) {
 			assert(bo->base.exec || bo->base.refcnt > 1);
 			goto decouple;
 		}
 
-		if (!bo->base.exec)
-			continue;
-
 		if (bo->mmapped) {
 			assert(!bo->need_io);
 			if (kgem->has_llc || !IS_CPU_MAP(bo->base.map)) {
@@ -1439,9 +1451,9 @@ static void kgem_finish_partials(struct kgem *kgem)
 			goto decouple;
 		}
 
+		assert(bo->need_io);
 		assert(bo->base.rq == kgem->next_request);
 		assert(bo->base.domain != DOMAIN_GPU);
-		assert(bo->need_io);
 
 		if (bo->base.refcnt == 1 && bo->used < bytes(&bo->base) / 2) {
 			struct kgem_bo *shrink;
@@ -1485,8 +1497,7 @@ static void kgem_finish_partials(struct kgem *kgem)
 				bo->base.needs_flush = false;
 				bo->used = 0;
 
-				bubble_sort_partial(kgem, bo);
-				continue;
+				goto decouple;
 			}
 		}
 
@@ -1508,16 +1519,6 @@ decouple:
 
 static void kgem_cleanup(struct kgem *kgem)
 {
-	while (!list_is_empty(&kgem->partial)) {
-		struct kgem_bo *bo;
-
-		bo = list_first_entry(&kgem->partial,
-				      struct kgem_bo,
-				      list);
-		list_del(&bo->list);
-		kgem_bo_unref(kgem, bo);
-	}
-
 	while (!list_is_empty(&kgem->requests)) {
 		struct kgem_request *rq;
 
@@ -1844,14 +1845,17 @@ void kgem_throttle(struct kgem *kgem)
 
 static void kgem_expire_partial(struct kgem *kgem)
 {
-	struct kgem_partial_bo *bo, *next;
-
-	list_for_each_entry_safe(bo, next, &kgem->partial, base.list) {
-		if (bo->base.refcnt > 1 || bo->base.rq)
-			continue;
-
-		DBG(("%s: discarding unused partial buffer: %d/%d, write? %d\n",
-		     __FUNCTION__, bo->used, bytes(&bo->base), bo->write));
+	while (!list_is_empty(&kgem->inactive_partials)) {
+		struct kgem_partial_bo *bo =
+			list_first_entry(&kgem->inactive_partials,
+					 struct kgem_partial_bo,
+					 base.list);
+
+		DBG(("%s: discarding unused partial buffer: %d, last write? %d\n",
+		     __FUNCTION__, bytes(&bo->base), bo->write));
+		assert(bo->base.list.prev == &kgem->inactive_partials);
+		assert(bo->base.io);
+		assert(bo->base.refcnt == 1);
 		list_del(&bo->base.list);
 		kgem_bo_unref(kgem, &bo->base);
 	}
@@ -2785,7 +2789,7 @@ static void _kgem_bo_delete_partial(struct kgem *kgem, struct kgem_bo *bo)
 
 	if (bo->delta + bo->size.bytes == io->used) {
 		io->used = bo->delta;
-		bubble_sort_partial(kgem, io);
+		bubble_sort_partial(&kgem->active_partials, io);
 	}
 }
 
@@ -3210,10 +3214,16 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 	return bo;
 }
 #else
+static uint32_t gem_vmap(int fd, void *ptr, int size, int read_only)
+{
+	assert(0);
+	return 0;
+}
 struct kgem_bo *kgem_create_map(struct kgem *kgem,
 				void *ptr, uint32_t size,
 				bool read_only)
 {
+	assert(0);
 	return 0;
 }
 #endif
@@ -3243,6 +3253,7 @@ void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo)
 void kgem_bo_set_sync(struct kgem *kgem, struct kgem_bo *bo)
 {
 	assert(!bo->reusable);
+	assert(list_is_empty(&bo->list));
 	list_add(&bo->list, &kgem->sync_list);
 	bo->sync = true;
 }
@@ -3342,17 +3353,16 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	     !!(flags & KGEM_BUFFER_LAST)));
 	assert(size);
 	/* we should never be asked to create anything TOO large */
-	assert(size <= kgem->max_cpu_size);
+	assert(size <= kgem->max_object_size);
 
 	if (kgem->has_llc)
 		flags &= ~KGEM_BUFFER_WRITE_INPLACE;
 
-	list_for_each_entry(bo, &kgem->partial, base.list) {
+	list_for_each_entry(bo, &kgem->active_partials, base.list) {
 		/* We can reuse any write buffer which we can fit */
 		if (flags == KGEM_BUFFER_LAST &&
 		    bo->write == KGEM_BUFFER_WRITE &&
-		    bo->base.exec && !bo->mmapped &&
-		    size <= bytes(&bo->base)) {
+		    !bo->mmapped && size <= bytes(&bo->base)) {
 			assert(bo->base.refcnt == 1);
 			DBG(("%s: reusing write buffer for read of %d bytes? used=%d, total=%d\n",
 			     __FUNCTION__, size, bo->used, bytes(&bo->base)));
@@ -3362,7 +3372,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			bo->write = 0;
 			offset = 0;
 			bo->used = size;
-			bubble_sort_partial(kgem, bo);
+			bubble_sort_partial(&kgem->active_partials, bo);
 			goto done;
 		}
 
@@ -3395,6 +3405,27 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		break;
 	}
 
+	if (flags & KGEM_BUFFER_WRITE) {
+		list_for_each_entry_reverse(bo, &kgem->inactive_partials, base.list) {
+			if (size > bytes(&bo->base))
+				continue;
+
+			if (((bo->write & ~flags) & KGEM_BUFFER_INPLACE) &&
+			    !bo->base.vmap) {
+				DBG(("%s: skip write %x buffer, need %x\n",
+				     __FUNCTION__, bo->write, flags));
+				continue;
+			}
+
+			DBG(("%s: reusing inactive partial buffer? size=%d, total=%d\n",
+			     __FUNCTION__, size, bytes(&bo->base)));
+			offset = 0;
+			bo->used = size;
+			list_move(&bo->base.list, &kgem->active_partials);
+			goto done;
+		}
+	}
+
 #if !DBG_NO_MAP_UPLOAD
 	/* Be a little more generous and hope to hold fewer mmappings */
 	alloc = ALIGN(2*size, kgem->partial_buffer_size);
@@ -3656,7 +3687,8 @@ init:
 	bo->write = flags & KGEM_BUFFER_WRITE_INPLACE;
 	offset = 0;
 
-	list_add(&bo->base.list, &kgem->partial);
+	assert(list_is_empty(&bo->base.list));
+	list_add(&bo->base.list, &kgem->active_partials);
 	DBG(("%s(pages=%d) new handle=%d\n",
 	     __FUNCTION__, alloc, bo->base.handle));
 
@@ -3669,7 +3701,7 @@ done:
 		first = p = list_first_entry(&bo->base.list,
 					     struct kgem_partial_bo,
 					     base.list);
-		while (&p->base.list != &kgem->partial &&
+		while (&p->base.list != &kgem->active_partials &&
 		       alloc < bytes(&p->base) - p->used) {
 			DBG(("%s: this=%d, right=%d\n",
 			     __FUNCTION__, alloc, bytes(&p->base) -p->used));
@@ -3720,7 +3752,7 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
 		if (io->used)
 			io->used -= stride;
 		bo->size.bytes -= stride;
-		bubble_sort_partial(kgem, io);
+		bubble_sort_partial(&kgem->active_partials, io);
 	}
 
 	bo->pitch = stride;
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 58316dc..446ac68 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -125,7 +125,7 @@ struct kgem {
 	struct list large;
 	struct list active[NUM_CACHE_BUCKETS][3];
 	struct list inactive[NUM_CACHE_BUCKETS];
-	struct list partial;
+	struct list active_partials, inactive_partials;
 	struct list requests;
 	struct list sync_list;
 	struct kgem_request *next_request;
commit a438e4ac9ba162e870fb22bc54024d35daa2121e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 28 19:15:34 2012 +0000

    sna: Revamp vmap support
    
    Dust off the kernel patches and update to reflect the changes made to
    support LLC CPU bo, in particular to support the unsynchronized shadow
    buffers.
    
    However, due to the forced synchronisation required for strict client
    coherency we prefer not to use the vmap for shared pixmaps unless we are
    already busy (i.e. sync afterwards rather than before in the hope that
    we can squash a few operations into one). Being able to block the reply
    to the client until the request is actually complete and so avoid the
    sync remains a dream.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 3ba2ec3..4fcdf37 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -76,8 +76,23 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 #define MAX_CPU_VMA_CACHE INT16_MAX
 #define MAP_PRESERVE_TIME 10
 
-#define CPU_MAP(ptr) ((void*)((uintptr_t)(ptr) & ~1))
+#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
 #define MAKE_CPU_MAP(ptr) ((void*)((uintptr_t)(ptr) | 1))
+#define MAKE_VMAP_MAP(ptr) ((void*)((uintptr_t)(ptr) | 3))
+#define IS_VMAP_MAP(ptr) ((uintptr_t)(ptr) & 2)
+
+#if defined(USE_VMAP) && !defined(I915_PARAM_HAS_VMAP)
+#define DRM_I915_GEM_VMAP       0x2c
+#define DRM_IOCTL_I915_GEM_VMAP DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_VMAP, struct drm_i915_gem_vmap)
+#define I915_PARAM_HAS_VMAP              18
+struct drm_i915_gem_vmap {
+	uint64_t user_ptr;
+	uint32_t user_size;
+	uint32_t flags;
+#define I915_VMAP_READ_ONLY 0x1
+	uint32_t handle;
+};
+#endif
 
 struct kgem_partial_bo {
 	struct kgem_bo base;
@@ -561,6 +576,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	list_init(&kgem->partial);
 	list_init(&kgem->requests);
 	list_init(&kgem->flushing);
+	list_init(&kgem->sync_list);
 	list_init(&kgem->large);
 	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
 		list_init(&kgem->inactive[i]);
@@ -577,7 +593,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 
 	kgem->next_request = __kgem_request_alloc();
 
-#if defined(USE_VMAP) && defined(I915_PARAM_HAS_VMAP)
+#if defined(USE_VMAP)
 	if (!DBG_NO_VMAP)
 		kgem->has_vmap = gem_param(kgem, I915_PARAM_HAS_VMAP) > 0;
 #endif
@@ -605,9 +621,8 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 		}
 		kgem->has_llc = has_llc;
 	}
-	kgem->has_cpu_bo = kgem->has_llc;
-	DBG(("%s: cpu bo enabled %d: llc? %d\n", __FUNCTION__,
-	     kgem->has_cpu_bo, kgem->has_llc));
+	DBG(("%s: cpu bo enabled %d: llc? %d, vmap? %d\n", __FUNCTION__,
+	     kgem->has_llc | kgem->has_vmap, kgem->has_llc, kgem->has_vmap));
 
 	kgem->has_semaphores = false;
 	if (gen >= 60 && semaphores_enabled())
@@ -688,10 +703,13 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 		kgem->max_upload_tile_size = half_gpu_max;
 
 	kgem->large_object_size = MAX_CACHE_SIZE;
-	if (kgem->large_object_size > kgem->max_cpu_size)
-		kgem->large_object_size = kgem->max_cpu_size;
 	if (kgem->large_object_size > kgem->max_gpu_size)
 		kgem->large_object_size = kgem->max_gpu_size;
+	if (kgem->has_llc | kgem->has_vmap) {
+		if (kgem->large_object_size > kgem->max_cpu_size)
+			kgem->large_object_size = kgem->max_cpu_size;
+	} else
+		kgem->max_cpu_size = 0;
 
 	DBG(("%s: large object thresold=%d\n",
 	     __FUNCTION__, kgem->large_object_size));
@@ -887,8 +905,10 @@ void _kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo)
 
 	/* XXX is it worth working around gcc here? */
 	kgem->flush |= bo->flush;
-	kgem->sync |= bo->sync;
 	kgem->scanout |= bo->scanout;
+
+	if (bo->sync)
+		kgem->sync = kgem->next_request;
 }
 
 static uint32_t kgem_end_batch(struct kgem *kgem)
@@ -932,12 +952,14 @@ static void kgem_bo_release_map(struct kgem *kgem, struct kgem_bo *bo)
 {
 	int type = IS_CPU_MAP(bo->map);
 
+	assert(!IS_VMAP_MAP(bo->map));
+
 	DBG(("%s: releasing %s vma for handle=%d, count=%d\n",
 	     __FUNCTION__, type ? "CPU" : "GTT",
 	     bo->handle, kgem->vma[type].count));
 
-	VG(if (type) VALGRIND_FREELIKE_BLOCK(CPU_MAP(bo->map), 0));
-	munmap(CPU_MAP(bo->map), bytes(bo));
+	VG(if (type) VALGRIND_FREELIKE_BLOCK(MAP(bo->map), 0));
+	munmap(MAP(bo->map), bytes(bo));
 	bo->map = NULL;
 
 	if (!list_is_empty(&bo->vma)) {
@@ -951,9 +973,15 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
 	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
 	assert(bo->refcnt == 0);
 	assert(bo->exec == NULL);
+	assert(!bo->vmap || bo->rq == NULL);
 
 	kgem_bo_binding_free(kgem, bo);
 
+	if (IS_VMAP_MAP(bo->map)) {
+		assert(bo->rq == NULL);
+		free(MAP(bo->map));
+		bo->map = NULL;
+	}
 	if (bo->map)
 		kgem_bo_release_map(kgem, bo);
 	assert(list_is_empty(&bo->vma));
@@ -978,6 +1006,7 @@ inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
 	assert(!bo->needs_flush);
 	assert(bo->rq == NULL);
 	assert(bo->domain != DOMAIN_GPU);
+	assert(bo->reusable);
 
 	if (bucket(bo) >= NUM_CACHE_BUCKETS) {
 		kgem_bo_free(kgem, bo);
@@ -990,7 +1019,7 @@ inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
 		if (bucket(bo) >= NUM_CACHE_BUCKETS ||
 		    (!type && !kgem_bo_is_mappable(kgem, bo))) {
 			list_del(&bo->vma);
-			munmap(CPU_MAP(bo->map), bytes(bo));
+			munmap(MAP(bo->map), bytes(bo));
 			bo->map = NULL;
 		}
 		if (bo->map) {
@@ -1035,6 +1064,19 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 	if (NO_CACHE)
 		goto destroy;
 
+	if (bo->vmap) {
+		if (bo->rq == NULL) {
+			if (bo->needs_flush && kgem_busy(kgem, bo->handle)) {
+				list_add(&bo->request, &kgem->flushing);
+				bo->rq = &_kgem_static_request;
+			} else
+				kgem_bo_free(kgem, bo);
+		} else {
+			assert(!bo->sync);
+		}
+		return;
+	}
+
 	if (bo->io) {
 		struct kgem_bo *base;
 
@@ -1065,7 +1107,7 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 
 	assert(list_is_empty(&bo->vma));
 	assert(list_is_empty(&bo->list));
-	assert(bo->vmap == false && bo->sync == false);
+	assert(bo->vmap == false);
 	assert(bo->io == false);
 	assert(bo->scanout == false);
 	assert(bo->flush == false);
@@ -1197,7 +1239,7 @@ bool kgem_retire(struct kgem *kgem)
 
 		DBG(("%s: moving %d from flush to inactive\n",
 		     __FUNCTION__, bo->handle));
-		if (kgem_bo_set_purgeable(kgem, bo)) {
+		if (bo->reusable && kgem_bo_set_purgeable(kgem, bo)) {
 			bo->needs_flush = false;
 			bo->domain = DOMAIN_NONE;
 			bo->rq = NULL;
@@ -1278,6 +1320,9 @@ bool kgem_retire(struct kgem *kgem)
 			kgem_bo_free(kgem, rq->bo);
 		}
 
+		if (kgem->sync == rq)
+			kgem->sync = NULL;
+
 		_list_del(&rq->list);
 		free(rq);
 	}
@@ -1308,7 +1353,7 @@ static void kgem_commit(struct kgem *kgem)
 		bo->presumed_offset = bo->exec->offset;
 		bo->exec = NULL;
 
-		if (!bo->refcnt && !bo->reusable) {
+		if (!bo->refcnt && !bo->reusable && !bo->vmap) {
 			kgem_bo_free(kgem, bo);
 			continue;
 		}
@@ -1708,8 +1753,10 @@ void _kgem_submit(struct kgem *kgem)
 #if !NDEBUG
 			if (ret < 0) {
 				int i;
-				ErrorF("batch (end=%d, size=%d) submit failed: %d\n",
-				       batch_end, size, errno);
+
+				ErrorF("batch[%d/%d]: %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d: errno=%d\n",
+				       kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface,
+				       kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture, errno);
 
 				i = open("/tmp/batchbuffer", O_WRONLY | O_CREAT | O_APPEND, 0666);
 				if (i != -1) {
@@ -1746,8 +1793,7 @@ void _kgem_submit(struct kgem *kgem)
 					       kgem->reloc[i].write_domain,
 					       (int)kgem->reloc[i].presumed_offset);
 				}
-				FatalError("SNA: failed to submit batchbuffer: ret=%d\n",
-					   errno);
+				FatalError("SNA: failed to submit batchbuffer\n");
 			}
 #endif
 
@@ -2594,6 +2640,7 @@ search_inactive:
 	cache = &kgem->inactive[bucket];
 	list_for_each_entry_safe(bo, next, cache, list) {
 		assert(bucket(bo) == bucket);
+		assert(bo->reusable);
 
 		if (size > num_pages(bo)) {
 			DBG(("inactive too small: %d < %d\n",
@@ -2673,6 +2720,59 @@ create:
 	return bo;
 }
 
+struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
+				   int width,
+				   int height,
+				   int bpp,
+				   uint32_t flags)
+{
+	struct kgem_bo *bo;
+
+	DBG(("%s(%dx%d, bpp=%d)\n", __FUNCTION__, width, height, bpp));
+
+	if (kgem->has_llc) {
+		bo = kgem_create_2d(kgem, width, height, bpp,
+				    I915_TILING_NONE, flags);
+		if (bo == NULL)
+			return bo;
+
+		if (kgem_bo_map__cpu(kgem, bo) == NULL) {
+			_kgem_bo_destroy(kgem, bo);
+			return NULL;
+		}
+
+		return bo;
+	}
+
+	if (kgem->has_vmap) {
+		int stride, size;
+		void *ptr;
+
+		stride = ALIGN(width, 2) * bpp >> 3;
+		stride = ALIGN(stride, 4);
+		size = ALIGN(height, 2) * stride;
+
+		assert(size >= PAGE_SIZE);
+
+		/* XXX */
+		//if (posix_memalign(&ptr, 64, ALIGN(size, 64)))
+		if (posix_memalign(&ptr, PAGE_SIZE, ALIGN(size, PAGE_SIZE)))
+			return NULL;
+
+		bo = kgem_create_map(kgem, ptr, size, false);
+		if (bo == NULL) {
+			free(ptr);
+			return NULL;
+		}
+
+		bo->map = MAKE_VMAP_MAP(ptr);
+		bo->pitch = stride;
+		return bo;
+	}
+
+	return NULL;
+}
+
 static void _kgem_bo_delete_partial(struct kgem *kgem, struct kgem_bo *bo)
 {
 	struct kgem_partial_bo *io = (struct kgem_partial_bo *)bo->proxy;
@@ -2702,9 +2802,6 @@ void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 		return;
 	}
 
-	if (bo->vmap)
-		kgem_bo_sync__cpu(kgem, bo);
-
 	__kgem_bo_destroy(kgem, bo);
 }
 
@@ -2915,8 +3012,8 @@ static void kgem_trim_vma_cache(struct kgem *kgem, int type, int bucket)
 		assert(bo->map);
 		assert(bo->rq == NULL);
 
-		VG(if (type) VALGRIND_FREELIKE_BLOCK(CPU_MAP(bo->map), 0));
-		munmap(CPU_MAP(bo->map), bytes(bo));
+		VG(if (type) VALGRIND_FREELIKE_BLOCK(MAP(bo->map), 0));
+		munmap(MAP(bo->map), bytes(bo));
 		bo->map = NULL;
 		list_del(&bo->vma);
 		kgem->vma[type].count--;
@@ -2996,7 +3093,7 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
 void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo)
 {
 	if (bo->map)
-		return CPU_MAP(bo->map);
+		return MAP(bo->map);
 
 	kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
 	return bo->map = gem_mmap(kgem->fd, bo->handle, bytes(bo),
@@ -3012,7 +3109,7 @@ void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo)
 	assert(list_is_empty(&bo->list));
 
 	if (IS_CPU_MAP(bo->map))
-		return CPU_MAP(bo->map);
+		return MAP(bo->map);
 
 	if (bo->map)
 		kgem_bo_release_map(kgem, bo);
@@ -3067,7 +3164,7 @@ uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo)
 	return flink.name;
 }
 
-#if defined(USE_VMAP) && defined(I915_PARAM_HAS_VMAP)
+#if defined(USE_VMAP)
 static uint32_t gem_vmap(int fd, void *ptr, int size, int read_only)
 {
 	struct drm_i915_gem_vmap vmap;
@@ -3095,14 +3192,11 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 	if (!kgem->has_vmap)
 		return NULL;
 
-	if (size >= MAX_CACHE_SIZE)
-		return NULL;
-
 	handle = gem_vmap(kgem->fd, ptr, size, read_only);
 	if (handle == 0)
 		return NULL;
 
-	bo = __kgem_bo_alloc(handle, size);
+	bo = __kgem_bo_alloc(handle, NUM_PAGES(size));
 	if (bo == NULL) {
 		gem_close(kgem->fd, handle);
 		return NULL;
@@ -3110,10 +3204,9 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 
 	bo->reusable = false;
 	bo->vmap = true;
-	bo->sync = true;
 
-	DBG(("%s(ptr=%p, size=%d, read_only=%d) => handle=%d\n",
-	     __FUNCTION__, ptr, size, read_only, handle));
+	DBG(("%s(ptr=%p, size=%d, pages=%d, read_only=%d) => handle=%d\n",
+	     __FUNCTION__, ptr, size, NUM_PAGES(size), read_only, handle));
 	return bo;
 }
 #else
@@ -3129,7 +3222,6 @@ void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo)
 {
 	kgem_bo_submit(kgem, bo);
 
-	/* XXX assumes bo is snoopable */
 	if (bo->domain != DOMAIN_CPU) {
 		struct drm_i915_gem_set_domain set_domain;
 
@@ -3148,28 +3240,40 @@ void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo)
 	}
 }
 
+void kgem_bo_set_sync(struct kgem *kgem, struct kgem_bo *bo)
+{
+	assert(!bo->reusable);
+	list_add(&bo->list, &kgem->sync_list);
+	bo->sync = true;
+}
+
 void kgem_sync(struct kgem *kgem)
 {
+	struct drm_i915_gem_set_domain set_domain;
+	struct kgem_request *rq;
+	struct kgem_bo *bo;
+
 	DBG(("%s\n", __FUNCTION__));
 
-	if (!list_is_empty(&kgem->requests)) {
-		struct drm_i915_gem_set_domain set_domain;
-		struct kgem_request *rq;
+	rq = kgem->sync;
+	if (rq == NULL)
+		return;
 
-		rq = list_first_entry(&kgem->requests,
-				      struct kgem_request,
-				      list);
+	if (rq == kgem->next_request)
+		_kgem_submit(kgem);
 
-		VG_CLEAR(set_domain);
-		set_domain.handle = rq->bo->handle;
-		set_domain.read_domains = I915_GEM_DOMAIN_GTT;
-		set_domain.write_domain = I915_GEM_DOMAIN_GTT;
+	VG_CLEAR(set_domain);
+	set_domain.handle = rq->bo->handle;
+	set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+	set_domain.write_domain = I915_GEM_DOMAIN_GTT;
 
-		drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
-		kgem_retire(kgem);
-	}
+	drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
+	kgem_retire(kgem);
+
+	list_for_each_entry(bo, &kgem->sync_list, list)
+		kgem_bo_sync__cpu(kgem, bo);
 
-	kgem->sync = false;
+	assert (kgem->sync == NULL);
 }
 
 void kgem_clear_dirty(struct kgem *kgem)
@@ -3262,11 +3366,20 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			goto done;
 		}
 
-		if ((bo->write & KGEM_BUFFER_WRITE) != (flags & KGEM_BUFFER_WRITE) ||
-		    (bo->write & ~flags) & KGEM_BUFFER_INPLACE) {
-			DBG(("%s: skip write %x buffer, need %x\n",
-			     __FUNCTION__, bo->write, flags));
-			continue;
+		if (flags & KGEM_BUFFER_WRITE) {
+			if ((bo->write & KGEM_BUFFER_WRITE) == 0 ||
+			    (((bo->write & ~flags) & KGEM_BUFFER_INPLACE) &&
+			     !bo->base.vmap)) {
+				DBG(("%s: skip write %x buffer, need %x\n",
+				     __FUNCTION__, bo->write, flags));
+				continue;
+			}
+		} else {
+			if (bo->write & KGEM_BUFFER_WRITE) {
+				DBG(("%s: skip write %x buffer, need %x\n",
+				     __FUNCTION__, bo->write, flags));
+				continue;
+			}
 		}
 
 		if (bo->used + size <= bytes(&bo->base)) {
@@ -3290,7 +3403,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	if (alloc > MAX_CACHE_SIZE)
 		alloc = PAGE_ALIGN(size);
 	alloc /= PAGE_SIZE;
-	if (kgem->has_cpu_bo) {
+	if (kgem->has_llc) {
 		bo = malloc(sizeof(*bo));
 		if (bo == NULL)
 			return NULL;
@@ -3420,6 +3533,29 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		alloc = PAGE_ALIGN(size) / PAGE_SIZE;
 	flags &= ~KGEM_BUFFER_INPLACE;
 
+	if (kgem->has_vmap) {
+		bo = partial_bo_alloc(alloc);
+		if (bo) {
+			if (!__kgem_bo_init(&bo->base,
+					    gem_vmap(kgem->fd, bo->mem,
+						     alloc * PAGE_SIZE, false),
+					    alloc)) {
+				free(bo);
+				return NULL;
+			}
+
+			DBG(("%s: created vmap handle=%d for buffer\n",
+			     __FUNCTION__, bo->base.handle));
+
+			bo->need_io = false;
+			bo->base.io = true;
+			bo->base.vmap = true;
+			bo->mmapped = true;
+
+			goto init;
+		}
+	}
+
 	old = NULL;
 	if ((flags & KGEM_BUFFER_WRITE) == 0)
 		old = search_linear_cache(kgem, alloc, 0);
@@ -3561,7 +3697,7 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
 	assert(width > 0 && height > 0);
 	assert(ret != NULL);
 	stride = ALIGN(width, 2) * bpp >> 3;
-	stride = ALIGN(stride, kgem->min_alignment);
+	stride = ALIGN(stride, 4);
 
 	DBG(("%s: %dx%d, %d bpp, stride=%d\n",
 	     __FUNCTION__, width, height, bpp, stride));
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 30303ce..58316dc 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -127,7 +127,9 @@ struct kgem {
 	struct list inactive[NUM_CACHE_BUCKETS];
 	struct list partial;
 	struct list requests;
+	struct list sync_list;
 	struct kgem_request *next_request;
+	struct kgem_request *sync;
 
 	struct {
 		struct list inactive[NUM_CACHE_BUCKETS];
@@ -142,7 +144,6 @@ struct kgem {
 	uint16_t max_batch_size;
 
 	uint32_t flush:1;
-	uint32_t sync:1;
 	uint32_t need_expire:1;
 	uint32_t need_purge:1;
 	uint32_t need_retire:1;
@@ -154,7 +155,6 @@ struct kgem {
 	uint32_t has_relaxed_fencing :1;
 	uint32_t has_semaphores :1;
 	uint32_t has_llc :1;
-	uint32_t has_cpu_bo :1;
 
 	uint16_t fence_max;
 	uint16_t half_cpu_cache_pages;
@@ -229,6 +229,11 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 			       int bpp,
 			       int tiling,
 			       uint32_t flags);
+struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
+				   int width,
+				   int height,
+				   int bpp,
+				   uint32_t flags);
 
 uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format);
 void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset);
@@ -359,6 +364,7 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo);
 void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
 void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
 void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
+void kgem_bo_set_sync(struct kgem *kgem, struct kgem_bo *bo);
 uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
 
 Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 00fc80a..c772d7d 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -115,11 +115,6 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "sna_damage.h"
 #include "sna_render.h"
 
-#ifndef CREATE_PIXMAP_USAGE_SCRATCH_HEADER
-#define FAKE_CREATE_PIXMAP_USAGE_SCRATCH_HEADER 1
-#define CREATE_PIXMAP_USAGE_SCRATCH_HEADER (unsigned)-1
-#endif
-
 #define SNA_CURSOR_X			64
 #define SNA_CURSOR_Y			SNA_CURSOR_X
 
@@ -226,13 +221,14 @@ struct sna {
 #define SNA_NO_THROTTLE		0x1
 #define SNA_NO_DELAYED_FLUSH	0x2
 
+	unsigned flush;
+
 	int timer[NUM_TIMERS];
 	uint16_t timer_active;
 	uint16_t timer_ready;
 
 	int vblank_interval;
 
-	struct list deferred_free;
 	struct list dirty_pixmaps;
 	struct list active_pixmaps;
 	struct list inactive_clock[2];
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 8fa59a6..52e75c7 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -44,6 +44,7 @@
 #include <fbpict.h>
 #endif
 #include <miline.h>
+#include <shmint.h>
 
 #include <sys/time.h>
 #include <sys/mman.h>
@@ -61,7 +62,8 @@
 #define USE_INPLACE 1
 #define USE_WIDE_SPANS 1 /* -1 force CPU, 1 force GPU */
 #define USE_ZERO_SPANS 1 /* -1 force CPU, 1 force GPU */
-#define USE_BO_FOR_SCRATCH_PIXMAP 1
+#define USE_SHM_VMAP 0
+#define PREFER_VMAP 0
 
 #define MIGRATE_ALL 0
 
@@ -302,27 +304,21 @@ sna_pixmap_alloc_cpu(struct sna *sna,
 	DBG(("%s: pixmap=%ld\n", __FUNCTION__, pixmap->drawable.serialNumber));
 	assert(priv->stride);
 
-	if ((sna->kgem.has_cpu_bo || (priv->create & KGEM_CAN_CREATE_GPU) == 0) &&
-	    (priv->create & KGEM_CAN_CREATE_CPU)) {
+	if (priv->create & KGEM_CAN_CREATE_CPU) {
 		DBG(("%s: allocating CPU buffer (%dx%d)\n", __FUNCTION__,
 		     pixmap->drawable.width, pixmap->drawable.height));
 
-		priv->cpu_bo = kgem_create_2d(&sna->kgem,
-					      pixmap->drawable.width,
-					      pixmap->drawable.height,
-					      pixmap->drawable.bitsPerPixel,
-					      I915_TILING_NONE,
-					      from_gpu ? 0 : CREATE_CPU_MAP | CREATE_INACTIVE);
+		priv->cpu_bo = kgem_create_cpu_2d(&sna->kgem,
+						  pixmap->drawable.width,
+						  pixmap->drawable.height,
+						  pixmap->drawable.bitsPerPixel,
+						  from_gpu ? 0 : CREATE_CPU_MAP | CREATE_INACTIVE);
 		DBG(("%s: allocated CPU handle=%d\n", __FUNCTION__,
 		     priv->cpu_bo->handle));
 
 		if (priv->cpu_bo) {
 			priv->ptr = kgem_bo_map__cpu(&sna->kgem, priv->cpu_bo);
-			if (priv->ptr == NULL) {
-				kgem_bo_destroy(&sna->kgem, priv->cpu_bo);
-				priv->cpu_bo = NULL;
-			} else
-				priv->stride = priv->cpu_bo->pitch;
+			priv->stride = priv->cpu_bo->pitch;
 		}
 	}
 
@@ -349,7 +345,8 @@ static void sna_pixmap_free_cpu(struct sna *sna, struct sna_pixmap *priv)
 	if (priv->cpu_bo) {
 		DBG(("%s: discarding CPU buffer, handle=%d, size=%d\n",
 		     __FUNCTION__, priv->cpu_bo->handle, kgem_bo_size(priv->cpu_bo)));
-
+		if (priv->cpu_bo->sync)
+			kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
 		kgem_bo_destroy(&sna->kgem, priv->cpu_bo);
 		priv->cpu_bo = NULL;
 	} else
@@ -377,14 +374,6 @@ static Bool sna_destroy_private(PixmapPtr pixmap, struct sna_pixmap *priv)
 	if (priv->ptr)
 		sna_pixmap_free_cpu(sna, priv);
 
-	if (priv->cpu_bo) {
-		if (priv->cpu_bo->vmap && kgem_bo_is_busy(priv->cpu_bo)) {
-			list_add_tail(&priv->list, &sna->deferred_free);
-			return false;
-		}
-		kgem_bo_destroy(&sna->kgem, priv->cpu_bo);
-	}
-
 	if (!sna->freed_pixmap && priv->header) {
 		sna->freed_pixmap = pixmap;
 		assert(priv->ptr == NULL);
@@ -524,7 +513,6 @@ _sna_pixmap_reset(PixmapPtr pixmap)
 
 	assert(pixmap->drawable.type == DRAWABLE_PIXMAP);
 	assert(pixmap->drawable.class == 0);
-	assert(pixmap->drawable.id == 0);
 	assert(pixmap->drawable.x == 0);
 	assert(pixmap->drawable.y == 0);
 
@@ -561,11 +549,6 @@ struct sna_pixmap *_sna_pixmap_attach(PixmapPtr pixmap)
 	     pixmap->usage_hint));
 
 	switch (pixmap->usage_hint) {
-	case CREATE_PIXMAP_USAGE_SCRATCH_HEADER:
-#if !FAKE_CREATE_PIXMAP_USAGE_SCRATCH_HEADER
-		if (sna->kgem.has_vmap)
-			break;
-#endif
 	case CREATE_PIXMAP_USAGE_GLYPH_PICTURE:
 		DBG(("%s: not attaching due to crazy usage: %d\n",
 		     __FUNCTION__, pixmap->usage_hint));
@@ -594,15 +577,6 @@ struct sna_pixmap *_sna_pixmap_attach(PixmapPtr pixmap)
 		       pixmap->drawable.width,
 		       pixmap->drawable.height);
 
-	if (pixmap->usage_hint == CREATE_PIXMAP_USAGE_SCRATCH_HEADER) {
-		priv->cpu_bo = kgem_create_map(&sna->kgem,
-					       pixmap->devPrivate.ptr,
-					       pixmap_size(pixmap),
-					       0);
-		if (priv->cpu_bo)
-			priv->cpu_bo->pitch = pixmap->devKind;
-	}
-
 	return priv;
 }
 
@@ -630,6 +604,75 @@ create_pixmap(struct sna *sna, ScreenPtr screen,
 }
 
 static PixmapPtr
+sna_pixmap_create_shm(ScreenPtr screen,
+		      int width, int height, int depth,
+		      char *addr)
+{
+	struct sna *sna = to_sna_from_screen(screen);
+	int bpp = BitsPerPixel(depth);
+	int pitch = PixmapBytePad(width, depth);
+	struct sna_pixmap *priv;
+	PixmapPtr pixmap;
+
+	DBG(("%s(%d, %d, %d)\n", __FUNCTION__,
+	     width, height, depth));
+
+	if (sna->freed_pixmap) {
+		pixmap = sna->freed_pixmap;
+		sna->freed_pixmap = NULL;
+
+		pixmap->usage_hint = -1;
+		pixmap->refcnt = 1;
+
+		pixmap->drawable.width = width;
+		pixmap->drawable.height = height;
+		pixmap->drawable.depth = depth;
+		pixmap->drawable.bitsPerPixel = bpp;
+		pixmap->drawable.serialNumber = NEXT_SERIAL_NUMBER;
+
+		DBG(("%s: serial=%ld, %dx%d\n",
+		     __FUNCTION__,
+		     pixmap->drawable.serialNumber,
+		     pixmap->drawable.width,
+		     pixmap->drawable.height));
+
+		priv = _sna_pixmap_reset(pixmap);
+	} else {
+		pixmap = create_pixmap(sna, screen, 0, 0, depth, -1);
+		if (pixmap == NullPixmap)
+			return NullPixmap;
+
+		pixmap->drawable.width = width;
+		pixmap->drawable.height = height;
+		pixmap->drawable.depth = depth;
+		pixmap->drawable.bitsPerPixel = bpp;
+
+		priv = __sna_pixmap_attach(sna, pixmap);
+		if (!priv) {
+			fbDestroyPixmap(pixmap);
+			return NullPixmap;
+		}
+	}
+
+	priv->cpu_bo = kgem_create_map(&sna->kgem, addr, pitch*height, false);
+	if (priv->cpu_bo == NULL) {
+		free(priv);
+		fbDestroyPixmap(pixmap);
+		return GetScratchPixmapHeader(screen, width, height, depth,
+					      bpp, pitch, addr);
+	}
+	kgem_bo_set_sync(&sna->kgem, priv->cpu_bo);
+	priv->cpu_bo->pitch = pitch;
+
+	priv->header = true;
+	sna_damage_all(&priv->cpu_damage, width, height);
+
+	pixmap->devKind = pitch;
+	pixmap->devPrivate.ptr = addr;
+	return pixmap;
+}
+
+static PixmapPtr
 sna_pixmap_create_scratch(ScreenPtr screen,
 			  int width, int height, int depth,
 			  uint32_t tiling)
@@ -723,6 +766,11 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 	DBG(("%s(%d, %d, %d, usage=%x)\n", __FUNCTION__,
 	     width, height, depth, usage));
 
+	if ((width|height) == 0) {
+		usage = -1;
+		goto fallback;
+	}
+
 	if (!sna->have_render)
 		goto fallback;
 
@@ -733,11 +781,6 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 		goto fallback;
 	}
 
-#if FAKE_CREATE_PIXMAP_USAGE_SCRATCH_HEADER
-	if (width == 0 || height == 0)
-		goto fallback;
-#endif
-
 	if (usage == CREATE_PIXMAP_USAGE_SCRATCH) {
 		if (flags & KGEM_CAN_CREATE_GPU)
 			return sna_pixmap_create_scratch(screen,
@@ -919,14 +962,16 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 			sna_damage_destroy(&priv->cpu_damage);
 			priv->undamaged = false;
 			list_del(&priv->list);
-			if (priv->cpu_bo)
+			if (priv->cpu_bo) {
+				assert(!priv->cpu_bo->sync);
 				sna_pixmap_free_cpu(sna, priv);
+			}
 
 			return true;
 		}
 
 skip_inplace_map:
-		if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo)) {
+		if (priv->cpu_bo && !priv->cpu_bo->sync && kgem_bo_is_busy(priv->cpu_bo)) {
 			if (priv->cpu_bo->exec == NULL)
 				kgem_retire(&sna->kgem);
 
@@ -941,6 +986,7 @@ skip_inplace_map:
 					list_del(&priv->list);
 					priv->undamaged = false;
 				}
+				assert(!priv->cpu_bo->sync);
 				sna_pixmap_free_cpu(sna, priv);
 			}
 		}
@@ -982,7 +1028,7 @@ skip_inplace_map:
 	}
 
 	if (priv->clear) {
-		if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
+		if (priv->cpu_bo && !priv->cpu_bo->sync && kgem_bo_is_busy(priv->cpu_bo))
 			sna_pixmap_free_cpu(sna, priv);
 		sna_damage_destroy(&priv->gpu_damage);
 		priv->undamaged = true;
@@ -1282,7 +1328,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			}
 		}
 
-		if (priv->cpu_bo && !priv->cpu_bo->vmap) {
+		if (priv->cpu_bo && !priv->cpu_bo->sync) {
 			if (sync_will_stall(priv->cpu_bo) && priv->cpu_bo->exec == NULL)
 				kgem_retire(&sna->kgem);
 			if (sync_will_stall(priv->cpu_bo)) {
@@ -1372,7 +1418,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			assert(pixmap_contains_damage(pixmap, priv->gpu_damage));
 
 			ok = FALSE;
-			if (priv->cpu_bo && sna->kgem.gen >= 60)
+			if (priv->cpu_bo && sna->kgem.gen >= 30)
 				ok = sna->render.copy_boxes(sna, GXcopy,
 							    pixmap, priv->gpu_bo, 0, 0,
 							    pixmap, priv->cpu_bo, 0, 0,
@@ -1910,6 +1956,9 @@ use_cpu_bo:
 	if (priv->cpu_bo == NULL)
 		return NULL;
 
+	if (priv->cpu_bo->sync && !kgem_bo_is_busy(priv->cpu_bo))
+		return NULL;
+
 	/* Continue to use the shadow pixmap once mapped */
 	if (pixmap->devPrivate.ptr) {
 		/* But only if we do not need to sync the CPU bo */
@@ -2084,6 +2133,7 @@ sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags)
 		sna_damage_destroy(&priv->cpu_damage);
 		priv->undamaged = false;
 		list_del(&priv->list);
+		assert(!priv->cpu_bo->sync);
 		sna_pixmap_free_cpu(to_sna_from_pixmap(pixmap), priv);
 	}
 
@@ -2098,8 +2148,6 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 	BoxPtr box;
 	int n;
 
-	assert(pixmap->usage_hint != CREATE_PIXMAP_USAGE_SCRATCH_HEADER);
-
 	DBG(("%s(pixmap=%ld, usage=%d)\n",
 	     __FUNCTION__, pixmap->drawable.serialNumber, pixmap->usage_hint));
 
@@ -2209,8 +2257,10 @@ done:
 			      pixmap->drawable.height);
 	if (DAMAGE_IS_ALL(priv->gpu_damage)) {
 		priv->undamaged = false;
-		if (priv->ptr)
+		if (priv->ptr) {
+			assert(!priv->cpu_bo->sync);
 			sna_pixmap_free_cpu(sna, priv);
+		}
 	}
 active:
 	return sna_pixmap_mark_active(to_sna_from_pixmap(pixmap), priv);
@@ -2410,8 +2460,6 @@ sna_put_image_upload_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
 	struct sna_pixmap *priv = sna_pixmap(pixmap);
-	struct kgem_bo *src_bo;
-	Bool ok = FALSE;
 	BoxPtr box;
 	int nbox;
 	int16_t dx, dy;
@@ -2423,14 +2471,16 @@ sna_put_image_upload_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 	     __FUNCTION__, nbox,
 	     box->x1, box->y1, box->x2, box->y2));
 
+	if (gc->alu != GXcopy)
+		return FALSE;
+
 	if (priv->gpu_bo == NULL &&
 	    !sna_pixmap_create_mappable_gpu(pixmap))
 		return FALSE;
 
 	assert(priv->gpu_bo);
 
-	if (gc->alu == GXcopy &&
-	    !priv->pinned && nbox == 1 &&
+	if (!priv->pinned && nbox == 1 &&
 	    box->x1 <= 0 && box->y1 <= 0 &&
 	    box->x2 >= pixmap->drawable.width &&
 	    box->y2 >= pixmap->drawable.height)
@@ -2440,25 +2490,10 @@ sna_put_image_upload_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 	x += dx + drawable->x;
 	y += dy + drawable->y;
 
-	src_bo = kgem_create_map(&sna->kgem, bits, stride*h, 1);
-	if (src_bo) {
-		src_bo->pitch = stride;
-		ok = sna->render.copy_boxes(sna, gc->alu,
-					    pixmap, src_bo, -x, -y,
-					    pixmap, priv->gpu_bo, 0, 0,
-					    box, nbox);
-		kgem_bo_destroy(&sna->kgem, src_bo);
-	}
-
-	if (!ok && gc->alu == GXcopy)
-		ok = sna_write_boxes(sna, pixmap,
-				     priv->gpu_bo, 0, 0,
-				     bits,
-				     stride,
-				     -x, -y,
-				     box, nbox);
-
-	return ok;
+	return sna_write_boxes(sna, pixmap,
+			       priv->gpu_bo, 0, 0,
+			       bits, stride, -x, -y,
+			       box, nbox);
 }
 
 static bool upload_inplace(struct sna *sna,
@@ -2561,7 +2596,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		if (sync_will_stall(priv->cpu_bo) && priv->cpu_bo->exec == NULL)
 			kgem_retire(&sna->kgem);
 		if (sync_will_stall(priv->cpu_bo)) {
-			if (priv->cpu_bo->vmap) {
+			if (priv->cpu_bo->sync) {
 				if (sna_put_image_upload_blt(drawable, gc, region,
 							     x, y, w, h, bits, stride)) {
 					if (!DAMAGE_IS_ALL(priv->gpu_damage)) {
@@ -2603,6 +2638,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 					list_del(&priv->list);
 					priv->undamaged = false;
 				}
+				assert(!priv->cpu_bo->sync);
 				sna_pixmap_free_cpu(sna, priv);
 			}
 		}
@@ -3236,6 +3272,22 @@ static bool copy_use_gpu_bo(struct sna *sna,
 	return kgem_bo_is_busy(priv->cpu_bo);
 }
 
+static bool
+copy_use_cpu_bo(struct sna_pixmap *priv, struct kgem_bo *dst_bo)
+{
+	if (priv == NULL || priv->cpu_bo == NULL)
+		return false;
+
+	if (PREFER_VMAP) {
+		return true;
+	} else {
+		if (kgem_bo_is_busy(priv->cpu_bo) || kgem_bo_is_busy(dst_bo))
+			return true;
+
+		return !priv->cpu_bo->sync;
+	}
+}
+
 static void
 sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	       BoxPtr box, int n,
@@ -3433,7 +3485,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 					RegionTranslate(&region, -dst_dx, -dst_dy);
 				}
 			}
-		} else if (src_priv && src_priv->cpu_bo) {
+		} else if (copy_use_cpu_bo(src_priv, dst_priv->gpu_bo)) {
 			if (!sna->render.copy_boxes(sna, alu,
 						    src_pixmap, src_priv->cpu_bo, src_dx, src_dy,
 						    dst_pixmap, dst_priv->gpu_bo, dst_dx, dst_dy,
@@ -11445,17 +11497,30 @@ static Bool sna_change_window_attributes(WindowPtr win, unsigned long mask)
 }
 
 static void
+sna_accel_reply_callback(CallbackListPtr *list,
+			 pointer user_data, pointer call_data)
+{
+	struct sna *sna = user_data;
+	ReplyInfoRec *info = call_data;
+
+	if (sna->flush || !info->startOfReply)
+		return;
+
+	sna->flush = sna->kgem.flush || sna->kgem.sync;
+}
+
+static void
 sna_accel_flush_callback(CallbackListPtr *list,
 			 pointer user_data, pointer call_data)
 {
 	struct sna *sna = user_data;
 	struct list preserve;
 
-	if ((sna->kgem.sync|sna->kgem.flush) == 0 &&
-	    list_is_empty(&sna->dirty_pixmaps))
+	if (!sna->flush)
 		return;
 
-	DBG(("%s\n", __FUNCTION__));
+	DBG(("%s: need_sync=%d, need_flush=%d, dirty? %d\n", __FUNCTION__,
+	     sna->kgem.sync!=NULL, sna->kgem.flush, !list_is_empty(&sna->dirty_pixmaps)));
 
 	/* flush any pending damage from shadow copies to tfp clients */
 	list_init(&preserve);
@@ -11476,35 +11541,9 @@ sna_accel_flush_callback(CallbackListPtr *list,
 	kgem_submit(&sna->kgem);
 	sna->kgem.flush_now = 0;
 
-	if (sna->kgem.sync) {
-		kgem_sync(&sna->kgem);
+	kgem_sync(&sna->kgem);
 
-		while (!list_is_empty(&sna->deferred_free)) {
-			struct sna_pixmap *priv =
-				list_first_entry(&sna->deferred_free,
-						 struct sna_pixmap,
-						 list);
-			list_del(&priv->list);
-			kgem_bo_destroy(&sna->kgem, priv->cpu_bo);
-			fbDestroyPixmap(priv->pixmap);
-			free(priv);
-		}
-	}
-}
-
-static void sna_deferred_free(struct sna *sna)
-{
-	struct sna_pixmap *priv, *next;
-
-	list_for_each_entry_safe(priv, next, &sna->deferred_free, list) {
-		if (kgem_bo_is_busy(priv->cpu_bo))
-			continue;
-
-		list_del(&priv->list);
-		kgem_bo_destroy(&sna->kgem, priv->cpu_bo);
-		fbDestroyPixmap(priv->pixmap);
-		free(priv);
-	}
+	sna->flush = false;
 }
 
 static struct sna_pixmap *sna_accel_scanout(struct sna *sna)
@@ -11768,6 +11807,7 @@ static void sna_accel_inactive(struct sna *sna)
 			sna_damage_destroy(&priv->cpu_damage);
 			list_del(&priv->list);
 
+			assert(!priv->cpu_bo->sync);
 			sna_pixmap_free_cpu(sna, priv);
 			priv->undamaged = false;
 
@@ -11819,10 +11859,14 @@ Bool sna_accel_pre_init(struct sna *sna)
 	return TRUE;
 }
 
+static ShmFuncs shm_funcs = { sna_pixmap_create_shm, NULL };
+
 Bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 {
 	const char *backend;
 
+	if (!AddCallback(&ReplyCallback, sna_accel_reply_callback, sna))
+		return FALSE;
 	if (!AddCallback(&FlushCallback, sna_accel_flush_callback, sna))
 		return FALSE;
 
@@ -11830,7 +11874,6 @@ Bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 	screen->RealizeFont = sna_realize_font;
 	screen->UnrealizeFont = sna_unrealize_font;
 
-	list_init(&sna->deferred_free);
 	list_init(&sna->dirty_pixmaps);
 	list_init(&sna->active_pixmaps);
 	list_init(&sna->inactive_clock[0]);
@@ -11866,6 +11909,9 @@ Bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 	}
 #endif
 
+	if (USE_SHM_VMAP && sna->kgem.has_vmap)
+		ShmRegisterFuncs(screen, &shm_funcs);
+
 	backend = "no";
 	sna->have_render = false;
 	sna->default_tiling = I915_TILING_X;
@@ -11933,6 +11979,7 @@ void sna_accel_close(struct sna *sna)
 	sna_glyphs_close(sna);
 
 	DeleteCallback(&FlushCallback, sna_accel_flush_callback, sna);
+	DeleteCallback(&ReplyCallback, sna_accel_reply_callback, sna);
 
 	kgem_cleanup_cache(&sna->kgem);
 }
@@ -11976,8 +12023,6 @@ void sna_accel_wakeup_handler(struct sna *sna, fd_set *ready)
 	for (id = 0; id < NUM_TIMERS; id++)
 		if (active & (1 << id) && FD_ISSET(sna->timer[id], ready))
 			sna->timer_ready |= 1 << id;
-
-	sna_deferred_free(sna);
 }
 
 void sna_accel_free(struct sna *sna)
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index a9ec899..8c51a77 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -907,7 +907,7 @@ prepare_blt_clear(struct sna *sna,
 {
 	DBG(("%s\n", __FUNCTION__));
 
-	op->blt   = blt_composite_fill;
+	op->blt = blt_composite_fill;
 	if (op->dst.x|op->dst.y) {
 		op->box   = blt_composite_fill_box;
 		op->boxes = blt_composite_fill_boxes;
@@ -915,7 +915,7 @@ prepare_blt_clear(struct sna *sna,
 		op->box   = blt_composite_fill_box_no_offset;
 		op->boxes = blt_composite_fill_boxes_no_offset;
 	}
-	op->done  = nop_done;
+	op->done = nop_done;
 
 	return sna_blt_fill_init(sna, &op->u.blt,
 				 op->dst.bo,
@@ -930,7 +930,7 @@ prepare_blt_fill(struct sna *sna,
 {
 	DBG(("%s\n", __FUNCTION__));
 
-	op->blt   = blt_composite_fill;
+	op->blt = blt_composite_fill;
 	if (op->dst.x|op->dst.y) {
 		op->box   = blt_composite_fill_box;
 		op->boxes = blt_composite_fill_boxes;
@@ -938,7 +938,7 @@ prepare_blt_fill(struct sna *sna,
 		op->box   = blt_composite_fill_box_no_offset;
 		op->boxes = blt_composite_fill_boxes_no_offset;
 	}
-	op->done  = nop_done;
+	op->done = nop_done;
 
 	return sna_blt_fill_init(sna, &op->u.blt, op->dst.bo,
 				 op->dst.pixmap->drawable.bitsPerPixel,
@@ -1126,9 +1126,9 @@ prepare_blt_copy(struct sna *sna,
 	DBG(("%s\n", __FUNCTION__));
 
 	if (sna->kgem.gen >= 60)
-		op->done  = gen6_blt_copy_done;
+		op->done = gen6_blt_copy_done;
 	else
-		op->done  = nop_done;
+		op->done = nop_done;
 
 	if (alpha_fixup) {
 		op->blt   = blt_composite_copy_with_alpha;
@@ -1153,14 +1153,6 @@ prepare_blt_copy(struct sna *sna,
 	}
 }
 
-static void blt_vmap_done(struct sna *sna, const struct sna_composite_op *op)
-{
-	struct kgem_bo *bo = (struct kgem_bo *)op->u.blt.src_pixmap;
-
-	if (bo)
-		kgem_bo_destroy(&sna->kgem, bo);
-}
-
 fastcall static void
 blt_put_composite(struct sna *sna,
 		  const struct sna_composite_op *op,
@@ -1395,26 +1387,18 @@ prepare_blt_put(struct sna *sna,
 		uint32_t alpha_fixup)
 {
 	PixmapPtr src = op->u.blt.src_pixmap;
-	struct sna_pixmap *priv = sna_pixmap_attach(src);
-	struct kgem_bo *src_bo = NULL;
-	struct kgem_bo *free_bo = NULL;
+	struct sna_pixmap *priv;
+	struct kgem_bo *src_bo;
 
 	DBG(("%s\n", __FUNCTION__));
 
-	if (priv) {
+	op->done = nop_done;
+
+	src_bo = NULL;
+	priv = _sna_pixmap_attach(src);
+	if (priv)
 		src_bo = priv->cpu_bo;
-	} else {
-		src_bo = kgem_create_map(&sna->kgem,
-					 src->devPrivate.ptr,
-					 pixmap_size(src),
-					 0);
-		free_bo = src_bo;
-	}
 	if (src_bo) {
-		op->u.blt.src_pixmap = (void *)free_bo;
-		op->done = blt_vmap_done;
-
-		src_bo->pitch = src->devKind;
 		if (alpha_fixup) {
 			op->blt   = blt_composite_copy_with_alpha;
 			op->box   = blt_composite_copy_box_with_alpha;
@@ -1435,12 +1419,15 @@ prepare_blt_put(struct sna *sna,
 						 GXcopy);
 		}
 	} else {
-		if (alpha_fixup)
-			return FALSE; /* XXX */
-
 		if (!sna_pixmap_move_to_cpu(src, MOVE_READ))
 			return FALSE;
 
+		assert(src->devKind);
+		assert(src->devPrivate.ptr);
+
+		if (alpha_fixup)
+			return FALSE; /* XXX */
+
 		if (alpha_fixup) {
 			op->u.blt.pixel = alpha_fixup;
 			op->blt   = blt_put_composite_with_alpha;
@@ -1451,7 +1438,6 @@ prepare_blt_put(struct sna *sna,
 			op->box   = blt_put_composite_box;
 			op->boxes = blt_put_composite_boxes;
 		}
-		op->done  = nop_done;
 	}
 
 	return TRUE;
commit 272f5d9f8407d8084846b429c1722bddb3e861e9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 1 11:10:03 2012 +0000

    sna: Discard use of inplace GTT uploads on LLC architectures
    
    As the buffer is cache-coherent, we can read as well as write to any
    partial buffer so the distinction is irrelevant.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index ee81c1b..3ba2ec3 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1171,7 +1171,6 @@ static void kgem_retire_partials(struct kgem *kgem)
 		DBG(("%s: handle=%d, used %d/%d\n", __FUNCTION__,
 		     bo->base.handle, bo->used, bytes(&bo->base)));
 
-		assert(bo->write & KGEM_BUFFER_WRITE_INPLACE);
 		assert(kgem->has_llc || !IS_CPU_MAP(bo->base.map));
 		bo->base.dirty = false;
 		bo->base.needs_flush = false;
@@ -1377,7 +1376,6 @@ static void kgem_finish_partials(struct kgem *kgem)
 			continue;
 
 		if (bo->mmapped) {
-			assert(bo->write & KGEM_BUFFER_WRITE_INPLACE);
 			assert(!bo->need_io);
 			if (kgem->has_llc || !IS_CPU_MAP(bo->base.map)) {
 				DBG(("%s: retaining partial upload buffer (%d/%d)\n",
@@ -3242,10 +3240,14 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	/* we should never be asked to create anything TOO large */
 	assert(size <= kgem->max_cpu_size);
 
+	if (kgem->has_llc)
+		flags &= ~KGEM_BUFFER_WRITE_INPLACE;
+
 	list_for_each_entry(bo, &kgem->partial, base.list) {
 		/* We can reuse any write buffer which we can fit */
 		if (flags == KGEM_BUFFER_LAST &&
-		    bo->write == KGEM_BUFFER_WRITE && bo->base.exec &&
+		    bo->write == KGEM_BUFFER_WRITE &&
+		    bo->base.exec && !bo->mmapped &&
 		    size <= bytes(&bo->base)) {
 			assert(bo->base.refcnt == 1);
 			DBG(("%s: reusing write buffer for read of %d bytes? used=%d, total=%d\n",
commit 43b1a717bae047c7ebbf99e6fa4c03b7a67896b8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 28 13:37:14 2012 +0000

    sna: Sort the partial buffers after stealing a write buffer
    
    It will be decoupled and not used again, but this keeps the sanity
    checks happy.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 3dd7863..ee81c1b 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3254,7 +3254,9 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 				  0, bo->used, bo->mem);
 			bo->need_io = 0;
 			bo->write = 0;
-			offset = bo->used = 0;
+			offset = 0;
+			bo->used = size;
+			bubble_sort_partial(kgem, bo);
 			goto done;
 		}
 
commit 8198e5872c3771e2aefabe1e3e93afa94d2ea0ec
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 28 10:42:19 2012 +0000

    sna/gen3: Tweak glyph rendering fast paths
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 8f597cf..bd1eddd 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -524,6 +524,31 @@ gen3_emit_composite_primitive_identity_source(struct sna *sna,
 }
 
 fastcall static void
+gen3_emit_composite_primitive_identity_source_no_offset(struct sna *sna,
+							const struct sna_composite_op *op,
+							const struct sna_composite_rectangles *r)
+{
+	float w = r->width;
+	float h = r->height;
+	float *v;
+
+	v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 12;
+
+	v[8] = v[4] = r->dst.x;
+	v[9] = r->dst.y;
+
+	v[0] = v[4] + w;
+	v[5] = v[1] = v[9] + h;
+
+	v[10] = v[6] = r->src.x * op->src.scale[0];
+	v[11] = r->src.y * op->src.scale[1];
+
+	v[2] = v[6] + w * op->src.scale[0];
+	v[7] = v[3] = v[11] + h * op->src.scale[1];
+}
+
+fastcall static void
 gen3_emit_composite_primitive_affine_source(struct sna *sna,
 					    const struct sna_composite_op *op,
 					    const struct sna_composite_rectangles *r)
@@ -584,6 +609,31 @@ gen3_emit_composite_primitive_constant_identity_mask(struct sna *sna,
 }
 
 fastcall static void
+gen3_emit_composite_primitive_constant_identity_mask_no_offset(struct sna *sna,
+							       const struct sna_composite_op *op,
+							       const struct sna_composite_rectangles *r)
+{
+	float w = r->width;
+	float h = r->height;
+	float *v;
+
+	v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 12;
+
+	v[8] = v[4] = r->dst.x;
+	v[9] = r->dst.y;
+
+	v[0] = v[4] + w;
+	v[5] = v[1] = v[9] + h;
+
+	v[10] = v[6] = r->mask.x * op->mask.scale[0];
+	v[11] = r->mask.y * op->mask.scale[1];
+
+	v[2] = v[6] + w * op->mask.scale[0];
+	v[7] = v[3] = v[11] + h * op->mask.scale[1];
+}
+
+fastcall static void
 gen3_emit_composite_primitive_identity_source_mask(struct sna *sna,
 						   const struct sna_composite_op *op,
 						   const struct sna_composite_rectangles *r)
@@ -2831,17 +2881,23 @@ gen3_render_composite(struct sna *sna,
 				tmp->prim_emit = gen3_emit_composite_primitive_affine_gradient;
 			break;
 		case SHADER_TEXTURE:
-			if (tmp->src.transform == NULL)
-				tmp->prim_emit = gen3_emit_composite_primitive_identity_source;
-			else if (tmp->src.is_affine)
+			if (tmp->src.transform == NULL) {
+				if ((tmp->src.offset[0]|tmp->src.offset[1]|tmp->dst.x|tmp->dst.y) == 0)
+					tmp->prim_emit = gen3_emit_composite_primitive_identity_source_no_offset;
+				else
+					tmp->prim_emit = gen3_emit_composite_primitive_identity_source;
+			} else if (tmp->src.is_affine)
 				tmp->prim_emit = gen3_emit_composite_primitive_affine_source;
 			break;
 		}
 	} else if (tmp->mask.u.gen3.type == SHADER_TEXTURE) {
 		if (tmp->mask.transform == NULL) {
-			if (is_constant_ps(tmp->src.u.gen3.type))
-				tmp->prim_emit = gen3_emit_composite_primitive_constant_identity_mask;
-			else if (tmp->src.transform == NULL)
+			if (is_constant_ps(tmp->src.u.gen3.type)) {
+				if ((tmp->mask.offset[0]|tmp->mask.offset[1]|tmp->dst.x|tmp->dst.y) == 0)
+					tmp->prim_emit = gen3_emit_composite_primitive_constant_identity_mask_no_offset;
+				else
+					tmp->prim_emit = gen3_emit_composite_primitive_constant_identity_mask;
+			} else if (tmp->src.transform == NULL)
 				tmp->prim_emit = gen3_emit_composite_primitive_identity_source_mask;
 			else if (tmp->src.is_affine)
 				tmp->prim_emit = gen3_emit_composite_primitive_affine_source_mask;


More information about the xorg-commit mailing list