xf86-video-intel: 17 commits - src/sna/gen2_render.c src/sna/gen3_render.c src/sna/gen4_render.c src/sna/gen5_render.c src/sna/gen6_render.c src/sna/gen7_render.c src/sna/kgem.c src/sna/kgem.h src/sna/Makefile.am src/sna/sna_accel.c src/sna/sna_blt.c src/sna/sna_dri.c src/sna/sna_glyphs.c src/sna/sna.h src/sna/sna_io.c src/sna/sna_render.c src/sna/sna_render.h src/sna/sna_render_inline.h src/sna/sna_trapezoids.c

Chris Wilson ickle at kemper.freedesktop.org
Sat Jan 14 14:11:19 PST 2012


 src/sna/Makefile.am         |    2 
 src/sna/gen2_render.c       |   83 ++++++---------
 src/sna/gen3_render.c       |  222 ++++++++++++++++++++++++----------------
 src/sna/gen4_render.c       |  182 +++++++++++++++++++++------------
 src/sna/gen5_render.c       |  201 +++++++++++++++++++++++++------------
 src/sna/gen6_render.c       |  239 ++++++++++++++++++++++++++++----------------
 src/sna/gen7_render.c       |  221 +++++++++++++++++++++++++++-------------
 src/sna/kgem.c              |  238 +++++++++++++++++++++++++++++++------------
 src/sna/kgem.h              |    9 +
 src/sna/sna.h               |    4 
 src/sna/sna_accel.c         |   38 +++---
 src/sna/sna_blt.c           |   19 ---
 src/sna/sna_dri.c           |    3 
 src/sna/sna_glyphs.c        |   39 ++++---
 src/sna/sna_io.c            |   15 +-
 src/sna/sna_render.c        |    7 -
 src/sna/sna_render.h        |    7 -
 src/sna/sna_render_inline.h |   24 +++-
 src/sna/sna_trapezoids.c    |  162 ++++++++++++++++-------------
 19 files changed, 1093 insertions(+), 622 deletions(-)

New commits:
commit 5d5da35c9fab4e7154921861ecbb83befb8840e5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jan 14 18:28:24 2012 +0000

    sna/gen[23]: Check for room in the batch before emitting pipeline flushes
    
    Use a single idiom and reuse the check built into the state emission,
    for both spans/boxes paths.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 2513307..b448145 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -492,14 +492,6 @@ static void gen2_emit_invariant(struct sna *sna)
 	sna->render_state.gen2.need_invariant = FALSE;
 }
 
-static bool
-gen2_check_batch(struct sna *sna)
-{
-	return (kgem_check_batch(&sna->kgem, 30+40) &&
-		kgem_check_reloc(&sna->kgem, 3) &&
-		kgem_check_exec(&sna->kgem, 3));
-}
-
 static void
 gen2_get_batch(struct sna *sna)
 {
@@ -1780,19 +1772,18 @@ gen2_render_composite(struct sna *sna,
 			   NULL))
 		kgem_submit(&sna->kgem);
 
+	gen2_emit_composite_state(sna, tmp);
 	if (kgem_bo_is_dirty(tmp->src.bo) || kgem_bo_is_dirty(tmp->mask.bo)) {
 		if (tmp->src.bo == tmp->dst.bo || tmp->mask.bo == tmp->dst.bo) {
 			kgem_emit_flush(&sna->kgem);
-		} else if (gen2_check_batch(sna)) {
+		} else {
 			BATCH(_3DSTATE_MODES_5_CMD |
 			      PIPELINE_FLUSH_RENDER_CACHE |
 			      PIPELINE_FLUSH_TEXTURE_CACHE);
 			kgem_clear_dirty(&sna->kgem);
-		} else
-			kgem_submit(&sna->kgem);
+		}
+		assert(sna->kgem.mode == KGEM_RENDER);
 	}
-
-	gen2_emit_composite_state(sna, tmp);
 	return TRUE;
 
 cleanup_src:
@@ -2190,17 +2181,6 @@ gen2_render_composite_spans(struct sna *sna,
 			tmp->prim_emit = gen2_emit_composite_spans_primitive_identity_source;
 		else if (tmp->base.src.is_affine)
 			tmp->prim_emit = gen2_emit_composite_spans_primitive_affine_source;
-
-		if (kgem_bo_is_dirty(tmp->base.src.bo)) {
-			if (tmp->base.src.bo == tmp->base.dst.bo) {
-				kgem_emit_flush(&sna->kgem);
-			} else {
-				BATCH(_3DSTATE_MODES_5_CMD |
-				      PIPELINE_FLUSH_RENDER_CACHE |
-				      PIPELINE_FLUSH_TEXTURE_CACHE);
-				kgem_clear_dirty(&sna->kgem);
-			}
-		}
 	}
 	tmp->base.floats_per_rect = 3*tmp->base.floats_per_vertex;
 
@@ -2214,6 +2194,17 @@ gen2_render_composite_spans(struct sna *sna,
 		kgem_submit(&sna->kgem);
 
 	gen2_emit_composite_spans_state(sna, tmp);
+	if (kgem_bo_is_dirty(tmp->base.src.bo)) {
+		if (tmp->base.src.bo == tmp->base.dst.bo) {
+			kgem_emit_flush(&sna->kgem);
+		} else {
+			BATCH(_3DSTATE_MODES_5_CMD |
+			      PIPELINE_FLUSH_RENDER_CACHE |
+			      PIPELINE_FLUSH_TEXTURE_CACHE);
+			kgem_clear_dirty(&sna->kgem);
+		}
+		assert(sna->kgem.mode == KGEM_RENDER);
+	}
 	return TRUE;
 
 cleanup_dst:
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 8b7dbbd..a3db4bb 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1200,14 +1200,6 @@ static void gen3_emit_invariant(struct sna *sna)
 
 #define MAX_OBJECTS 3 /* worst case: dst + src + mask  */
 
-static bool
-gen3_check_batch(struct sna *sna)
-{
-	return (kgem_check_batch(&sna->kgem, 200) &&
-		kgem_check_reloc(&sna->kgem, MAX_OBJECTS) &&
-		kgem_check_exec(&sna->kgem, MAX_OBJECTS));
-}
-
 static void
 gen3_get_batch(struct sna *sna)
 {
@@ -2752,19 +2744,19 @@ gen3_render_composite(struct sna *sna,
 			   NULL))
 		kgem_submit(&sna->kgem);
 
+	gen3_emit_composite_state(sna, tmp);
 	if (kgem_bo_is_dirty(tmp->src.bo) || kgem_bo_is_dirty(tmp->mask.bo)) {
 		if (tmp->src.bo == tmp->dst.bo || tmp->mask.bo == tmp->dst.bo) {
 			kgem_emit_flush(&sna->kgem);
-		} else if (gen3_check_batch(sna)) {
+		} else {
 			OUT_BATCH(_3DSTATE_MODES_5_CMD |
 				  PIPELINE_FLUSH_RENDER_CACHE |
 				  PIPELINE_FLUSH_TEXTURE_CACHE);
 			kgem_clear_dirty(&sna->kgem);
-		} else
-			kgem_submit(&sna->kgem);
+		}
+		assert(sna->kgem.mode == KGEM_RENDER);
 	}
 
-	gen3_emit_composite_state(sna, tmp);
 	gen3_align_vertex(sna, tmp);
 	return TRUE;
 
@@ -3193,6 +3185,7 @@ gen3_render_composite_spans(struct sna *sna,
 			   NULL))
 		kgem_submit(&sna->kgem);
 
+	gen3_emit_composite_state(sna, &tmp->base);
 	if (kgem_bo_is_dirty(tmp->base.src.bo)) {
 		if (tmp->base.src.bo == tmp->base.dst.bo) {
 			kgem_emit_flush(&sna->kgem);
@@ -3202,9 +3195,9 @@ gen3_render_composite_spans(struct sna *sna,
 				  PIPELINE_FLUSH_TEXTURE_CACHE);
 			kgem_clear_dirty(&sna->kgem);
 		}
+		assert(sna->kgem.mode == KGEM_RENDER);
 	}
 
-	gen3_emit_composite_state(sna, &tmp->base);
 	gen3_align_vertex(sna, &tmp->base);
 	return TRUE;
 
commit f7e4799687d5010ea0b56aeb6fbbb2e854b1b363
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jan 14 16:53:38 2012 +0000

    sna/gen6: Allow greater use of BLT
    
    Typically we will be bound to the RENDER ring as once engaged we try not
    to switch. However, with semaphores enabled we may switch more freely
    and there it is advantageous to use as much of the faster BLT as is
    feasible.
    
    The most contentious point here is the choice of whether to use BLT for
    copies by default. microbenchmarks (compwinwin) benefit from the
    coallescing performed in the render batch, but the more complex traces
    seem to prefer utilizing the blitter. The debate will continue...
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index d1dd1ba..d25ece1 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -412,14 +412,6 @@ gen6_choose_composite_kernel(int op, Bool has_mask, Bool is_ca, Bool is_affine)
 }
 
 static void
-gen6_emit_sip(struct sna *sna)
-{
-	/* Set system instruction pointer */
-	OUT_BATCH(GEN6_STATE_SIP | 0);
-	OUT_BATCH(0);
-}
-
-static void
 gen6_emit_urb(struct sna *sna)
 {
 	OUT_BATCH(GEN6_3DSTATE_URB | (3 - 2));
@@ -555,7 +547,6 @@ gen6_emit_invariant(struct sna *sna)
 	OUT_BATCH(GEN6_3DSTATE_SAMPLE_MASK | (2 - 2));
 	OUT_BATCH(1);
 
-	gen6_emit_sip(sna);
 	gen6_emit_urb(sna);
 
 	gen6_emit_state_base_address(sna);
@@ -2185,7 +2176,7 @@ try_blt(struct sna *sna,
 	PicturePtr dst, PicturePtr src,
 	int width, int height)
 {
-	if (sna->kgem.ring == KGEM_BLT) {
+	if (sna->kgem.ring != KGEM_RENDER) {
 		DBG(("%s: already performing BLT\n", __FUNCTION__));
 		return TRUE;
 	}
@@ -3002,7 +2993,7 @@ static inline bool prefer_blt_copy(struct sna *sna,
 				   struct kgem_bo *src_bo,
 				   struct kgem_bo *dst_bo)
 {
-	return (sna->kgem.ring == KGEM_BLT ||
+	return (sna->kgem.ring != KGEM_RENDER ||
 		untiled_tlb_miss(src_bo) ||
 		untiled_tlb_miss(dst_bo));
 }
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 7334dfb..9de7faa 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -440,9 +440,6 @@ sna_dri_copy(struct sna *sna, DrawablePtr draw, RegionPtr region,
 		 * as well).
 		 */
 		kgem_set_mode(&sna->kgem, KGEM_RENDER);
-	} else if (sna->kgem.mode == KGEM_NONE) {
-		/* Otherwise employ the BLT unless it means a context switch */
-		_kgem_set_mode(&sna->kgem, KGEM_BLT);
 	}
 
 	damage(pixmap, region);
commit c1ce34d4509a3f3e963d82ac0569a21706892f8e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jan 14 15:08:18 2012 +0000

    sna/gen6: Tidy markup for when using the BLT is truly preferrable
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index c6fa134..d1dd1ba 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -3315,6 +3315,12 @@ gen6_emit_fill_state(struct sna *sna, const struct sna_composite_op *op)
 	gen6_emit_state(sna, op, offset);
 }
 
+static inline bool prefer_blt_fill(struct sna *sna,
+				   struct kgem_bo *bo)
+{
+	return sna->kgem.ring != KGEM_RENDER || untiled_tlb_miss(bo);
+}
+
 static Bool
 gen6_render_fill_boxes(struct sna *sna,
 		       CARD8 op,
@@ -3336,7 +3342,7 @@ gen6_render_fill_boxes(struct sna *sna,
 		return FALSE;
 	}
 
-	if (sna->kgem.ring != KGEM_RENDER ||
+	if (prefer_blt_fill(sna, dst_bo) ||
 	    too_large(dst->drawable.width, dst->drawable.height) ||
 	    !gen6_check_dst_format(format)) {
 		uint8_t alu = -1;
@@ -3558,7 +3564,7 @@ gen6_render_fill(struct sna *sna, uint8_t alu,
 			    op);
 #endif
 
-	if (sna->kgem.ring != KGEM_RENDER &&
+	if (prefer_blt_fill(sna, dst_bo) &&
 	    sna_blt_fill(sna, alu,
 			 dst_bo, dst->drawable.bitsPerPixel,
 			 color,
@@ -3652,7 +3658,7 @@ gen6_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 #endif
 
 	/* Prefer to use the BLT if already engaged */
-	if (sna->kgem.ring != KGEM_RENDER &&
+	if (prefer_blt_fill(sna, bo) &&
 	    gen6_render_fill_one_try_blt(sna, dst, bo, color,
 					 x1, y1, x2, y2, alu))
 		return TRUE;
@@ -3756,7 +3762,7 @@ gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	     dst->drawable.width,
 	     dst->drawable.height));
 
-	/* Prefer to use the BLT if already engaged */
+	/* Prefer to use the BLT if, and only if, already engaged */
 	if (sna->kgem.ring == KGEM_BLT &&
 	    gen6_render_clear_try_blt(sna, dst, bo))
 		return TRUE;
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index a2ec715..22038ea 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -3280,6 +3280,12 @@ gen7_emit_fill_state(struct sna *sna, const struct sna_composite_op *op)
 	gen7_emit_state(sna, op, offset);
 }
 
+static inline bool prefer_blt_fill(struct sna *sna,
+				   struct kgem_bo *bo)
+{
+	return sna->kgem.ring != KGEM_RENDER || untiled_tlb_miss(bo);
+}
+
 static Bool
 gen7_render_fill_boxes(struct sna *sna,
 		       CARD8 op,
@@ -3301,7 +3307,7 @@ gen7_render_fill_boxes(struct sna *sna,
 		return FALSE;
 	}
 
-	if (sna->kgem.ring != KGEM_RENDER ||
+	if (prefer_blt_fill(sna, dst_bo) ||
 	    too_large(dst->drawable.width, dst->drawable.height) ||
 	    !gen7_check_dst_format(format)) {
 		uint8_t alu = -1;
@@ -3524,7 +3530,7 @@ gen7_render_fill(struct sna *sna, uint8_t alu,
 			    op);
 #endif
 
-	if (sna->kgem.ring != KGEM_RENDER &&
+	if (prefer_blt_fill(sna, dst_bo) &&
 	    sna_blt_fill(sna, alu,
 			 dst_bo, dst->drawable.bitsPerPixel,
 			 color,
@@ -3618,7 +3624,7 @@ gen7_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 #endif
 
 	/* Prefer to use the BLT if already engaged */
-	if (sna->kgem.ring != KGEM_RENDER &&
+	if (prefer_blt_fill(sna, bo) &&
 	    gen7_render_fill_one_try_blt(sna, dst, bo, color,
 					 x1, y1, x2, y2, alu))
 		return TRUE;
commit b64751dbdb1b88b91ad97aaf995b4261876cf860
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jan 14 13:50:50 2012 +0000

    sna: Be more lenient wrt switching rings if the kernel supports semaphores
    
    If the kernel uses GPU semaphores for its coherency mechanism between
    rings rather than CPU waits, allow the ring to be chosen on the basis
    of the subsequent operation following a submission of batch. (However,
    since batches are likely to be submitted in the middle of a draw, then
    the likelihood is for ddx to remain on one ring until forced to switch
    for an operation or idle, which is the same situation as before and so
    the difference is miniscule.)
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 5503484..43feadb 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -85,6 +85,7 @@ static inline void list_replace(struct list *old,
 #define DBG_NO_HW 0
 #define DBG_NO_TILING 0
 #define DBG_NO_VMAP 0
+#define DBG_NO_SEMAPHORES 0
 #define DBG_NO_MADV 0
 #define DBG_NO_MAP_UPLOAD 0
 #define DBG_NO_RELAXED_FENCING 0
@@ -531,6 +532,25 @@ static int gem_param(struct kgem *kgem, int name)
 	return v;
 }
 
+static bool semaphores_enabled(void)
+{
+	FILE *file;
+	bool detected = false;
+
+	if (DBG_NO_SEMAPHORES)
+		return false;
+
+	file = fopen("/sys/module/i915/parameters/semaphores", "r");
+	if (file) {
+		int value;
+		if (fscanf(file, "%d", &value) == 1)
+			detected = value > 0;
+		fclose(file);
+	}
+
+	return detected;
+}
+
 void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 {
 	struct drm_i915_gem_get_aperture aperture;
@@ -579,9 +599,15 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 		}
 	} else
 		kgem->has_relaxed_fencing = 1;
-	DBG(("%s: has relaxed fencing=%d\n", __FUNCTION__,
+	DBG(("%s: has relaxed fencing? %d\n", __FUNCTION__,
 	     kgem->has_relaxed_fencing));
 
+	kgem->has_semaphores = false;
+	if (gen >= 60 && semaphores_enabled())
+		kgem->has_semaphores = true;
+	DBG(("%s: semaphores enabled? %d\n", __FUNCTION__,
+	     kgem->has_semaphores));
+
 	VG_CLEAR(aperture);
 	aperture.aper_size = 64*1024*1024;
 	(void)drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
@@ -1380,6 +1406,8 @@ void kgem_reset(struct kgem *kgem)
 	kgem->nbatch = 0;
 	kgem->surface = kgem->max_batch_size;
 	kgem->mode = KGEM_NONE;
+	if (kgem->has_semaphores)
+		kgem->ring = KGEM_NONE;
 	kgem->flush = 0;
 	kgem->scanout = 0;
 
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 363c7f9..d9fdd68 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -143,6 +143,7 @@ struct kgem {
 
 	uint32_t has_vmap :1;
 	uint32_t has_relaxed_fencing :1;
+	uint32_t has_semaphores :1;
 
 	uint16_t fence_max;
 	uint16_t half_cpu_cache_pages;
commit 295a22d2709b2442b5254968437f897dac22a0ec
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jan 14 14:12:25 2012 +0000

    sna: Ensure that the batch mode is always declared before emitting dwords
    
    Initially, the batch->mode was only set upon an actual mode switch,
    batch submission would not reset the mode. However, to facilitate fast
    ring switching with semaphores, reseting the mode upon batch submission
    is desired which means that if we submit the batch in the middle of an
    operation we must redeclare its mode before continuing.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index ef32f01..2513307 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -976,6 +976,7 @@ inline static int gen2_get_rectangles(struct sna *sna,
 	if (rem < need + size) {
 		gen2_vertex_flush(sna, op);
 		kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 		return 0;
 	}
 
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 883e622..8b7dbbd 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -3540,6 +3540,7 @@ gen3_video_get_batch(struct sna *sna)
 		     __FUNCTION__,
 		     batch_space(sna), 120));
 		kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
 	if (sna->kgem.nreloc + 4 > KGEM_RELOC_SIZE(&sna->kgem)) {
@@ -3548,6 +3549,7 @@ gen3_video_get_batch(struct sna *sna)
 		     sna->kgem.nreloc + 4,
 		     (int)KGEM_RELOC_SIZE(&sna->kgem)));
 		kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
 	if (sna->kgem.nexec + 2 > KGEM_EXEC_SIZE(&sna->kgem)) {
@@ -3556,6 +3558,7 @@ gen3_video_get_batch(struct sna *sna)
 		     sna->kgem.nexec + 2,
 		     (int)KGEM_EXEC_SIZE(&sna->kgem)));
 		kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
 	if (sna->render_state.gen3.need_invariant)
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 1e9d930..c6fa134 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -1939,8 +1939,10 @@ gen6_render_video(struct sna *sna,
 	tmp.u.gen6.ve_id = 1;
 
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
-	if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	}
 
 	if (kgem_bo_is_dirty(frame->bo))
 		kgem_emit_flush(&sna->kgem);
@@ -2539,8 +2541,10 @@ gen6_render_composite(struct sna *sna,
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	if (!kgem_check_bo(&sna->kgem,
 			   tmp->dst.bo, tmp->src.bo, tmp->mask.bo,
-			   NULL))
+			   NULL)) {
 		kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	}
 
 	if (kgem_bo_is_dirty(tmp->src.bo) || kgem_bo_is_dirty(tmp->mask.bo))
 		kgem_emit_flush(&sna->kgem);
@@ -2939,8 +2943,10 @@ gen6_render_composite_spans(struct sna *sna,
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	if (!kgem_check_bo(&sna->kgem,
 			   tmp->base.dst.bo, tmp->base.src.bo,
-			   NULL))
+			   NULL)){
 		kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	}
 
 	if (kgem_bo_is_dirty(tmp->base.src.bo))
 		kgem_emit_flush(&sna->kgem);
@@ -3113,8 +3119,10 @@ gen6_render_copy_boxes(struct sna *sna, uint8_t alu,
 	tmp.u.gen6.ve_id = 1;
 
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
-	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	}
 
 	if (kgem_bo_is_dirty(src_bo))
 		kgem_emit_flush(&sna->kgem);
@@ -3260,8 +3268,10 @@ gen6_render_copy(struct sna *sna, uint8_t alu,
 	op->base.u.gen6.ve_id = 1;
 
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
-	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	}
 
 	if (kgem_bo_is_dirty(src_bo))
 		kgem_emit_flush(&sna->kgem);
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index b14bc70..a2ec715 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2031,8 +2031,10 @@ gen7_render_video(struct sna *sna,
 	tmp.u.gen7.ve_id = 1;
 
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
-	if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	}
 
 	if (kgem_bo_is_dirty(frame->bo))
 		kgem_emit_flush(&sna->kgem);
@@ -2624,8 +2626,10 @@ gen7_render_composite(struct sna *sna,
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	if (!kgem_check_bo(&sna->kgem,
 			   tmp->dst.bo, tmp->src.bo, tmp->mask.bo,
-			   NULL))
+			   NULL)) {
 		kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	}
 
 	if (kgem_bo_is_dirty(tmp->src.bo) || kgem_bo_is_dirty(tmp->mask.bo))
 		kgem_emit_flush(&sna->kgem);
@@ -2935,8 +2939,10 @@ gen7_render_composite_spans(struct sna *sna,
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	if (!kgem_check_bo(&sna->kgem,
 			   tmp->base.dst.bo, tmp->base.src.bo,
-			   NULL))
+			   NULL)) {
 		kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	}
 
 	if (kgem_bo_is_dirty(tmp->base.src.bo))
 		kgem_emit_flush(&sna->kgem);
@@ -3072,8 +3078,10 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 	tmp.u.gen7.ve_id = 1;
 
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
-	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	}
 
 	if (kgem_bo_is_dirty(src_bo))
 		kgem_emit_flush(&sna->kgem);
@@ -3219,8 +3227,10 @@ gen7_render_copy(struct sna *sna, uint8_t alu,
 	op->base.u.gen7.ve_id = 1;
 
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
-	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	}
 
 	if (kgem_bo_is_dirty(src_bo))
 		kgem_emit_flush(&sna->kgem);
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index a455c6a..363c7f9 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -285,6 +285,8 @@ static inline void kgem_set_mode(struct kgem *kgem, enum kgem_mode mode)
 
 static inline void _kgem_set_mode(struct kgem *kgem, enum kgem_mode mode)
 {
+	assert(kgem->mode == KGEM_NONE);
+	kgem->context_switch(kgem, mode);
 	kgem->mode = mode;
 }
 
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index eda952f..6607b11 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -96,14 +96,6 @@ static void nop_done(struct sna *sna, const struct sna_composite_op *op)
 	(void)op;
 }
 
-static void blt_done(struct sna *sna, const struct sna_composite_op *op)
-{
-	struct kgem *kgem = &sna->kgem;
-
-	_kgem_set_mode(kgem, KGEM_BLT);
-	(void)op;
-}
-
 static void gen6_blt_copy_done(struct sna *sna, const struct sna_composite_op *op)
 {
 	struct kgem *kgem = &sna->kgem;
@@ -114,8 +106,6 @@ static void gen6_blt_copy_done(struct sna *sna, const struct sna_composite_op *o
 		b[1] = b[2] = 0;
 		kgem->nbatch += 3;
 	}
-
-	_kgem_set_mode(kgem, KGEM_BLT);
 	(void)op;
 }
 
@@ -919,7 +909,7 @@ prepare_blt_clear(struct sna *sna,
 		op->box   = blt_composite_fill_box_no_offset;
 		op->boxes = blt_composite_fill_boxes_no_offset;
 	}
-	op->done  = blt_done;
+	op->done  = nop_done;
 
 	return sna_blt_fill_init(sna, &op->u.blt,
 				 op->dst.bo,
@@ -942,7 +932,7 @@ prepare_blt_fill(struct sna *sna,
 		op->box   = blt_composite_fill_box_no_offset;
 		op->boxes = blt_composite_fill_boxes_no_offset;
 	}
-	op->done  = blt_done;
+	op->done  = nop_done;
 
 	return sna_blt_fill_init(sna, &op->u.blt, op->dst.bo,
 				 op->dst.pixmap->drawable.bitsPerPixel,
@@ -1129,7 +1119,7 @@ prepare_blt_copy(struct sna *sna,
 	if (sna->kgem.gen >= 60)
 		op->done  = gen6_blt_copy_done;
 	else
-		op->done  = blt_done;
+		op->done  = nop_done;
 
 	if (alpha_fixup) {
 		op->blt   = blt_composite_copy_with_alpha;
@@ -1158,7 +1148,6 @@ static void blt_vmap_done(struct sna *sna, const struct sna_composite_op *op)
 {
 	struct kgem_bo *bo = (struct kgem_bo *)op->u.blt.src_pixmap;
 
-	blt_done(sna, op);
 	if (bo)
 		kgem_bo_destroy(&sna->kgem, bo);
 }
@@ -1740,7 +1729,6 @@ fastcall static void sna_blt_fill_op_boxes(struct sna *sna,
 static void sna_blt_fill_op_done(struct sna *sna,
 				 const struct sna_fill_op *fill)
 {
-	blt_done(sna, &fill->base);
 }
 
 bool sna_blt_fill(struct sna *sna, uint8_t alu,
@@ -1786,7 +1774,6 @@ static void sna_blt_copy_op_blt(struct sna *sna,
 static void sna_blt_copy_op_done(struct sna *sna,
 				 const struct sna_copy_op *op)
 {
-	blt_done(sna, &op->base);
 }
 
 static void gen6_blt_copy_op_done(struct sna *sna,
diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index 27f4909..c9d2b5f 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -52,6 +52,7 @@ static inline int batch_space(struct sna *sna)
 
 static inline void batch_emit(struct sna *sna, uint32_t dword)
 {
+	assert(sna->kgem.mode != KGEM_NONE);
 	assert(sna->kgem.nbatch + KGEM_BATCH_RESERVED < sna->kgem.surface);
 	sna->kgem.batch[sna->kgem.nbatch++] = dword;
 }
commit 0d2a50772200d868d094f90dc1d30c1b4d7930ba
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jan 14 11:03:55 2012 +0000

    sna/glyphs: Cache the glyph image on the fallback path as well
    
    The glyph cache grew to accommodate the fallback pixman image for mask
    generation, and is equally applicable along the full fallback path.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 95cc8c0..6efe801 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -1106,19 +1106,29 @@ glyphs_fallback(CARD8 op,
 		y += list->yOff;
 		while (n--) {
 			GlyphPtr g = *glyphs++;
-			PicturePtr picture;
 			pixman_image_t *glyph_image;
 
 			if (g->info.width == 0 || g->info.height == 0)
 				goto next_glyph;
 
-			picture = GlyphPicture(g)[screen];
-			if (picture == NULL)
-				goto next_glyph;
+			glyph_image = sna_glyph(g)->image;
+			if (glyph_image == NULL) {
+				PicturePtr picture;
+				int dx, dy;
 
-			glyph_image = image_from_pict(picture, FALSE, &dx, &dy);
-			if (!glyph_image)
-				goto next_glyph;
+				picture = GlyphPicture(g)[screen];
+				if (picture == NULL)
+					goto next_glyph;
+
+				glyph_image = image_from_pict(picture,
+							      FALSE,
+							      &dx, &dy);
+				if (!glyph_image)
+					goto next_glyph;
+
+				assert(dx == 0 && dy == 0);
+				sna_glyph(g)->image = glyph_image;
+			}
 
 			if (mask_format) {
 				DBG(("%s: glyph+(%d,%d) to mask (%d, %d)x(%d, %d)\n",
@@ -1163,8 +1173,6 @@ glyphs_fallback(CARD8 op,
 						       g->info.width,
 						       g->info.height);
 			}
-			free_pixman_pict(picture, glyph_image);
-
 next_glyph:
 			x += g->info.xOff;
 			y += g->info.yOff;
commit f3e0ba4f65db2b85f89aa3868d153434bc41c811
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jan 14 10:17:27 2012 +0000

    sna/gen5: Disable render glyphs_to_dst
    
    Processing more than a single rectangle using the CA path on ILK is
    extremely hit-or-miss, often resulting in the absence of the second
    primitive (ie. the glyphs are cleared but not added.) This is
    reminiscent of the complete breakage of the BRW shaders, none of which
    can handle more than a single rectangle.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 71625b8..95cc8c0 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -1233,11 +1233,16 @@ sna_glyphs(CARD8 op,
 		goto fallback;
 	}
 
+	_mask = mask;
 	/* XXX discard the mask for non-overlapping glyphs? */
 
-	if (!mask ||
+	/* XXX more shader breakage?: CA to dst is fubar on ilk */
+	if (sna->kgem.gen == 50 && !_mask)
+		_mask = list[0].format;
+
+	if (!_mask ||
 	    (((nlist == 1 && list->len == 1) || op == PictOpAdd) &&
-	     dst->format == (mask->depth << 24 | mask->format))) {
+	     dst->format == (_mask->depth << 24 | _mask->format))) {
 		if (glyphs_to_dst(sna, op,
 				  src, dst,
 				  src_x, src_y,
@@ -1245,7 +1250,6 @@ sna_glyphs(CARD8 op,
 			return;
 	}
 
-	_mask = mask;
 	if (!_mask)
 		_mask = glyphs_format(nlist, list, glyphs);
 	if (_mask) {
commit fb92818ba4dc81ce62d58a87b5af6cb1e3a96708
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jan 14 01:48:11 2012 +0000

    sna: Pass render operation to flush and avoid the implicit flush-on-batch-end
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 87f9e8c..ef32f01 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -510,6 +510,7 @@ gen2_get_batch(struct sna *sna)
 		     __FUNCTION__, 30+40,
 		     sna->kgem.surface-sna->kgem.nbatch));
 		kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
 	if (sna->kgem.nreloc + 3 > KGEM_RELOC_SIZE(&sna->kgem)) {
@@ -518,6 +519,7 @@ gen2_get_batch(struct sna *sna)
 		     sna->kgem.nreloc + 3,
 		     (int)KGEM_RELOC_SIZE(&sna->kgem)));
 		kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
 	if (sna->kgem.nexec + 3 > KGEM_EXEC_SIZE(&sna->kgem)) {
@@ -526,6 +528,7 @@ gen2_get_batch(struct sna *sna)
 		     sna->kgem.nexec + 1,
 		     (int)KGEM_EXEC_SIZE(&sna->kgem)));
 		kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
 	if (sna->render_state.gen2.need_invariant)
@@ -936,7 +939,8 @@ static void gen2_magic_ca_pass(struct sna *sna,
 		*dst++ = *src++;
 }
 
-static void gen2_vertex_flush(struct sna *sna)
+static void gen2_vertex_flush(struct sna *sna,
+			      const struct sna_composite_op *op)
 {
 	if (sna->render.vertex_index == 0)
 		return;
@@ -944,8 +948,7 @@ static void gen2_vertex_flush(struct sna *sna)
 	sna->kgem.batch[sna->render_state.gen2.vertex_offset] |=
 		sna->render.vertex_index - 1;
 
-	if (sna->render.op)
-		gen2_magic_ca_pass(sna, sna->render.op);
+	gen2_magic_ca_pass(sna, op);
 
 	sna->render_state.gen2.vertex_offset = 0;
 	sna->render.vertex_index = 0;
@@ -971,7 +974,8 @@ inline static int gen2_get_rectangles(struct sna *sna,
 	DBG(("%s: want=%d, need=%d,size=%d, rem=%d\n",
 	     __FUNCTION__, want, need, size, rem));
 	if (rem < need + size) {
-		kgem_submit (&sna->kgem);
+		gen2_vertex_flush(sna, op);
+		kgem_submit(&sna->kgem);
 		return 0;
 	}
 
@@ -1073,9 +1077,7 @@ gen2_render_composite_boxes(struct sna *sna,
 static void gen2_render_composite_done(struct sna *sna,
 				       const struct sna_composite_op *op)
 {
-	gen2_vertex_flush(sna);
-	sna->render.op = NULL;
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	gen2_vertex_flush(sna, op);
 
 	if (op->mask.bo)
 		kgem_bo_destroy(&sna->kgem, op->mask.bo);
@@ -1790,8 +1792,6 @@ gen2_render_composite(struct sna *sna,
 	}
 
 	gen2_emit_composite_state(sna, tmp);
-
-	sna->render.op = tmp;
 	return TRUE;
 
 cleanup_src:
@@ -2105,11 +2105,10 @@ fastcall static void
 gen2_render_composite_spans_done(struct sna *sna,
 				 const struct sna_composite_spans_op *op)
 {
-	gen2_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
-
 	DBG(("%s()\n", __FUNCTION__));
 
+	gen2_vertex_flush(sna, &op->base);
+
 	if (op->base.src.bo)
 		kgem_bo_destroy(&sna->kgem, op->base.src.bo);
 
@@ -2436,8 +2435,7 @@ gen2_render_fill_boxes(struct sna *sna,
 		} while (--n_this_time);
 	} while (n);
 
-	gen2_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	gen2_vertex_flush(sna, &tmp);
 	return TRUE;
 }
 
@@ -2540,8 +2538,7 @@ gen2_render_fill_op_boxes(struct sna *sna,
 static void
 gen2_render_fill_op_done(struct sna *sna, const struct sna_fill_op *op)
 {
-	gen2_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	gen2_vertex_flush(sna, &op->base);
 }
 
 static Bool
@@ -2677,7 +2674,7 @@ gen2_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	VERTEX(y2);
 	VERTEX(x1);
 	VERTEX(y1);
-	gen2_vertex_flush(sna);
+	gen2_vertex_flush(sna, &tmp);
 
 	return TRUE;
 }
@@ -2862,8 +2859,7 @@ gen2_render_copy_boxes(struct sna *sna, uint8_t alu,
 		} while (--n_this_time);
 	} while (n);
 
-	gen2_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	gen2_vertex_flush(sna, &tmp);
 	return TRUE;
 }
 
@@ -2898,8 +2894,7 @@ gen2_render_copy_blt(struct sna *sna,
 static void
 gen2_render_copy_done(struct sna *sna, const struct sna_copy_op *op)
 {
-	gen2_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	gen2_vertex_flush(sna, &op->base);
 }
 
 static Bool
@@ -2984,7 +2979,7 @@ gen2_render_reset(struct sna *sna)
 static void
 gen2_render_flush(struct sna *sna)
 {
-	gen2_vertex_flush(sna);
+	assert(sna->render.vertex_index == 0);
 }
 
 static void
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index fc896d9..883e622 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1218,6 +1218,7 @@ gen3_get_batch(struct sna *sna)
 		     __FUNCTION__, 200,
 		     sna->kgem.surface-sna->kgem.nbatch));
 		kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
 	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - MAX_OBJECTS) {
@@ -1226,6 +1227,7 @@ gen3_get_batch(struct sna *sna)
 		     sna->kgem.nreloc,
 		     (int)KGEM_RELOC_SIZE(&sna->kgem) - MAX_OBJECTS));
 		kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
 	if (sna->kgem.nexec > KGEM_EXEC_SIZE(&sna->kgem) - MAX_OBJECTS - 1) {
@@ -1234,6 +1236,7 @@ gen3_get_batch(struct sna *sna)
 		     sna->kgem.nexec,
 		     (int)KGEM_EXEC_SIZE(&sna->kgem) - MAX_OBJECTS - 1));
 		kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
 	if (sna->render_state.gen3.need_invariant)
@@ -1492,9 +1495,7 @@ static void gen3_magic_ca_pass(struct sna *sna,
 
 static void gen3_vertex_flush(struct sna *sna)
 {
-	if (sna->render_state.gen3.vertex_offset == 0 ||
-	    sna->render.vertex_index == sna->render.vertex_start)
-		return;
+	assert(sna->render_state.gen3.vertex_offset);
 
 	DBG(("%s[%x] = %d\n", __FUNCTION__,
 	     4*sna->render_state.gen3.vertex_offset,
@@ -1506,9 +1507,6 @@ static void gen3_vertex_flush(struct sna *sna)
 	sna->kgem.batch[sna->render_state.gen3.vertex_offset + 1] =
 		sna->render.vertex_start;
 
-	if (sna->render.op)
-		gen3_magic_ca_pass(sna, sna->render.op);
-
 	sna->render_state.gen3.vertex_offset = 0;
 }
 
@@ -1516,9 +1514,7 @@ static int gen3_vertex_finish(struct sna *sna)
 {
 	struct kgem_bo *bo;
 
-	gen3_vertex_flush(sna);
-	if (!sna->render.vertex_used)
-		return sna->render.vertex_size;
+	assert(sna->render.vertex_used);
 
 	bo = sna->render.vbo;
 	if (bo) {
@@ -1560,8 +1556,6 @@ static void gen3_vertex_close(struct sna *sna)
 	struct kgem_bo *bo;
 	int delta = 0;
 
-	gen3_vertex_flush(sna);
-
 	if (!sna->render.vertex_used) {
 		assert(sna->render.vbo == NULL);
 		assert(sna->render.vertices == sna->render.vertex_data);
@@ -1656,9 +1650,15 @@ static bool gen3_rectangle_begin(struct sna *sna,
 	return true;
 }
 
-static int gen3_get_rectangles__flush(struct sna *sna, bool ca)
+static int gen3_get_rectangles__flush(struct sna *sna,
+				      const struct sna_composite_op *op)
 {
-	if (!kgem_check_batch(&sna->kgem, ca ? 105: 5))
+	if (sna->render_state.gen3.vertex_offset) {
+		gen3_vertex_flush(sna);
+		gen3_magic_ca_pass(sna, op);
+	}
+
+	if (!kgem_check_batch(&sna->kgem, op->need_magic_ca_pass ? 105: 5))
 		return 0;
 	if (sna->kgem.nexec > KGEM_EXEC_SIZE(&sna->kgem) - 2)
 		return 0;
@@ -1681,7 +1681,7 @@ inline static int gen3_get_rectangles(struct sna *sna,
 	if (op->floats_per_rect > rem) {
 		DBG(("flushing vbo for %s: %d < %d\n",
 		     __FUNCTION__, rem, op->floats_per_rect));
-		rem = gen3_get_rectangles__flush(sna, op->need_magic_ca_pass);
+		rem = gen3_get_rectangles__flush(sna, op);
 		if (rem == 0)
 			return 0;
 	}
@@ -1791,14 +1791,13 @@ static void
 gen3_render_composite_done(struct sna *sna,
 			   const struct sna_composite_op *op)
 {
-	assert(sna->render.op == op);
-
-	gen3_vertex_flush(sna);
-	sna->render.op = NULL;
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
-
 	DBG(("%s()\n", __FUNCTION__));
 
+	if (sna->render_state.gen3.vertex_offset) {
+		gen3_vertex_flush(sna);
+		gen3_magic_ca_pass(sna, op);
+	}
+
 	if (op->mask.bo)
 		kgem_bo_destroy(&sna->kgem, op->mask.bo);
 	if (op->src.bo)
@@ -2767,8 +2766,6 @@ gen3_render_composite(struct sna *sna,
 
 	gen3_emit_composite_state(sna, tmp);
 	gen3_align_vertex(sna, tmp);
-
-	sna->render.op = tmp;
 	return TRUE;
 
 cleanup_mask:
@@ -3069,8 +3066,8 @@ fastcall static void
 gen3_render_composite_spans_done(struct sna *sna,
 				 const struct sna_composite_spans_op *op)
 {
-	gen3_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	if (sna->render_state.gen3.vertex_offset)
+		gen3_vertex_flush(sna);
 
 	DBG(("%s()\n", __FUNCTION__));
 
@@ -3828,7 +3825,6 @@ gen3_render_copy_boxes(struct sna *sna, uint8_t alu,
 	} while (n);
 
 	gen3_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	return TRUE;
 }
 
@@ -3863,8 +3859,8 @@ gen3_render_copy_blt(struct sna *sna,
 static void
 gen3_render_copy_done(struct sna *sna, const struct sna_copy_op *op)
 {
-	gen3_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	if (sna->render_state.gen3.vertex_offset)
+		gen3_vertex_flush(sna);
 }
 
 static Bool
@@ -4103,7 +4099,6 @@ gen3_render_fill_boxes(struct sna *sna,
 	} while (n);
 
 	gen3_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	return TRUE;
 }
 
@@ -4175,8 +4170,8 @@ gen3_render_fill_op_boxes(struct sna *sna,
 static void
 gen3_render_fill_op_done(struct sna *sna, const struct sna_fill_op *op)
 {
-	gen3_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	if (sna->render_state.gen3.vertex_offset)
+		gen3_vertex_flush(sna);
 }
 
 static Bool
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 942f8fb..12281e3 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -63,12 +63,12 @@
 #define NO_FILL_BOXES 0
 
 #if FLUSH_EVERY_VERTEX
-#define FLUSH() do { \
-	gen4_vertex_flush(sna); \
+#define FLUSH(OP) do { \
+	gen4_vertex_flush(sna, OP); \
 	OUT_BATCH(MI_FLUSH | MI_INHIBIT_RENDER_CACHE_FLUSH); \
 } while (0)
 #else
-#define FLUSH()
+#define FLUSH(OP)
 #endif
 
 #define GEN4_GRF_BLOCKS(nreg)    ((nreg + 15) / 16 - 1)
@@ -342,7 +342,8 @@ static void gen4_magic_ca_pass(struct sna *sna,
 	state->last_primitive = sna->kgem.nbatch;
 }
 
-static void gen4_vertex_flush(struct sna *sna)
+static void gen4_vertex_flush(struct sna *sna,
+			      const struct sna_composite_op *op)
 {
 	if (sna->render_state.gen4.vertex_offset == 0)
 		return;
@@ -354,8 +355,7 @@ static void gen4_vertex_flush(struct sna *sna)
 		sna->render.vertex_index - sna->render.vertex_start;
 	sna->render_state.gen4.vertex_offset = 0;
 
-	if (sna->render.op)
-		gen4_magic_ca_pass(sna, sna->render.op);
+	gen4_magic_ca_pass(sna, op);
 }
 
 static int gen4_vertex_finish(struct sna *sna)
@@ -363,9 +363,7 @@ static int gen4_vertex_finish(struct sna *sna)
 	struct kgem_bo *bo;
 	unsigned int i;
 
-	gen4_vertex_flush(sna);
-	if (!sna->render.vertex_used)
-		return sna->render.vertex_size;
+	assert(sna->render.vertex_used);
 
 	/* Note: we only need dword alignment (currently) */
 
@@ -417,7 +415,6 @@ static void gen4_vertex_close(struct sna *sna)
 	struct kgem_bo *bo;
 	unsigned int i, delta = 0;
 
-	gen4_vertex_flush(sna);
 	if (!sna->render.vertex_used) {
 		assert(sna->render.vbo == NULL);
 		assert(sna->render.vertices == sna->render.vertex_data);
@@ -1114,8 +1111,11 @@ static bool gen4_rectangle_begin(struct sna *sna,
 	return true;
 }
 
-static int gen4_get_rectangles__flush(struct sna *sna)
+static int gen4_get_rectangles__flush(struct sna *sna,
+				      const struct sna_composite_op *op)
 {
+	gen4_vertex_flush(sna, op);
+
 	if (!kgem_check_batch(&sna->kgem, 25))
 		return 0;
 	if (sna->kgem.nexec > KGEM_EXEC_SIZE(&sna->kgem) - 1)
@@ -1135,7 +1135,7 @@ inline static int gen4_get_rectangles(struct sna *sna,
 	if (rem < 3*op->floats_per_vertex) {
 		DBG(("flushing vbo for %s: %d < %d\n",
 		     __FUNCTION__, rem, 3*op->floats_per_vertex));
-		rem = gen4_get_rectangles__flush(sna);
+		rem = gen4_get_rectangles__flush(sna, op);
 		if (rem == 0)
 			return 0;
 	}
@@ -1262,6 +1262,7 @@ gen4_get_batch(struct sna *sna)
 		     __FUNCTION__, sna->kgem.surface - sna->kgem.nbatch,
 		     150, 4*8));
 		kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
 	if (sna->render_state.gen4.needs_invariant)
@@ -1513,7 +1514,7 @@ gen4_render_composite_blt(struct sna *sna,
 	op->prim_emit(sna, op, r);
 
 	/* XXX are the shaders fubar? */
-	FLUSH();
+	FLUSH(op);
 }
 
 fastcall static void
@@ -1751,7 +1752,7 @@ gen4_render_video(struct sna *sna,
 		OUT_VERTEX_F((box->x1 - dxo) * src_scale_x);
 		OUT_VERTEX_F((box->y1 - dyo) * src_scale_y);
 
-		FLUSH();
+		FLUSH(&tmp);
 
 		if (!DAMAGE_IS_ALL(priv->gpu_damage)) {
 			sna_damage_add_box(&priv->gpu_damage, &r);
@@ -1760,8 +1761,7 @@ gen4_render_video(struct sna *sna,
 		box++;
 	}
 
-	gen4_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	gen4_vertex_flush(sna, &tmp);
 	return TRUE;
 }
 
@@ -1876,12 +1876,10 @@ static void
 gen4_render_composite_done(struct sna *sna,
 			   const struct sna_composite_op *op)
 {
-	gen4_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
-	sna->render.op = NULL;
-
 	DBG(("%s()\n", __FUNCTION__));
 
+	gen4_vertex_flush(sna, op);
+
 	if (op->mask.bo)
 		kgem_bo_destroy(&sna->kgem, op->mask.bo);
 	if (op->src.bo)
@@ -2274,8 +2272,6 @@ gen4_render_composite(struct sna *sna,
 
 	gen4_bind_surfaces(sna, tmp);
 	gen4_align_vertex(sna, tmp);
-
-	sna->render.op = tmp;
 	return TRUE;
 
 cleanup_src:
@@ -2367,7 +2363,7 @@ gen4_render_copy_one(struct sna *sna,
 	OUT_VERTEX_F(sx*op->src.scale[0]);
 	OUT_VERTEX_F(sy*op->src.scale[1]);
 
-	FLUSH();
+	FLUSH(op);
 }
 
 static Bool
@@ -2452,8 +2448,6 @@ gen4_render_copy_boxes(struct sna *sna, uint8_t alu,
 				     box->x1, box->y1);
 		box++;
 	} while (--n);
-
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	return TRUE;
 }
 
@@ -2470,8 +2464,7 @@ gen4_render_copy_blt(struct sna *sna,
 static void
 gen4_render_copy_done(struct sna *sna, const struct sna_copy_op *op)
 {
-	gen4_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	gen4_vertex_flush(sna, &op->base);
 }
 
 static Bool
@@ -2599,7 +2592,7 @@ gen4_render_fill_rectangle(struct sna *sna,
 	OUT_VERTEX_F(0);
 	OUT_VERTEX_F(0);
 
-	FLUSH();
+	FLUSH(op);
 }
 
 static Bool
@@ -2706,7 +2699,6 @@ gen4_render_fill_boxes(struct sna *sna,
 	} while (--n);
 
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	return TRUE;
 }
 
@@ -2744,9 +2736,8 @@ gen4_render_fill_op_boxes(struct sna *sna,
 static void
 gen4_render_fill_op_done(struct sna *sna, const struct sna_fill_op *op)
 {
-	gen4_vertex_flush(sna);
+	gen4_vertex_flush(sna, &op->base);
 	kgem_bo_destroy(&sna->kgem, op->base.src.bo);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 }
 
 static Bool
@@ -2899,9 +2890,8 @@ gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 
 	gen4_render_fill_rectangle(sna, &tmp, x1, y1, x2 - x1, y2 - y1);
 
-	gen4_vertex_flush(sna);
+	gen4_vertex_flush(sna, &tmp);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 
 	return TRUE;
 }
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 573478d..560830e 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -312,6 +312,8 @@ static void gen5_magic_ca_pass(struct sna *sna,
 	if (!op->need_magic_ca_pass)
 		return;
 
+	assert(sna->render.vertex_index > sna->render.vertex_start);
+
 	DBG(("%s: CA fixup\n", __FUNCTION__));
 
 	gen5_emit_pipelined_pointers
@@ -335,8 +337,8 @@ static void gen5_magic_ca_pass(struct sna *sna,
 
 static void gen5_vertex_flush(struct sna *sna)
 {
-	if (sna->render_state.gen5.vertex_offset == 0)
-		return;
+	assert(sna->render_state.gen5.vertex_offset);
+	assert(sna->render.vertex_index > sna->render.vertex_start);
 
 	DBG(("%s[%x] = %d\n", __FUNCTION__,
 	     4*sna->render_state.gen5.vertex_offset,
@@ -344,9 +346,6 @@ static void gen5_vertex_flush(struct sna *sna)
 	sna->kgem.batch[sna->render_state.gen5.vertex_offset] =
 		sna->render.vertex_index - sna->render.vertex_start;
 	sna->render_state.gen5.vertex_offset = 0;
-
-	if (sna->render.op)
-		gen5_magic_ca_pass(sna, sna->render.op);
 }
 
 static int gen5_vertex_finish(struct sna *sna)
@@ -354,9 +353,7 @@ static int gen5_vertex_finish(struct sna *sna)
 	struct kgem_bo *bo;
 	unsigned int i;
 
-	gen5_vertex_flush(sna);
-	if (!sna->render.vertex_used)
-		return sna->render.vertex_size;
+	assert(sna->render.vertex_used);
 
 	/* Note: we only need dword alignment (currently) */
 
@@ -414,7 +411,6 @@ static void gen5_vertex_close(struct sna *sna)
 	struct kgem_bo *bo;
 	unsigned int i, delta = 0;
 
-	gen5_vertex_flush(sna);
 	if (!sna->render.vertex_used) {
 		assert(sna->render.vbo == NULL);
 		assert(sna->render.vertices == sna->render.vertex_data);
@@ -1145,13 +1141,19 @@ static bool gen5_rectangle_begin(struct sna *sna,
 	return true;
 }
 
-static int gen5_get_rectangles__flush(struct sna *sna)
+static int gen5_get_rectangles__flush(struct sna *sna,
+				      const struct sna_composite_op *op)
 {
-	if (!kgem_check_batch(&sna->kgem, 25))
+	if (sna->render_state.gen5.vertex_offset) {
+		gen5_vertex_flush(sna);
+		gen5_magic_ca_pass(sna, op);
+	}
+
+	if (!kgem_check_batch(&sna->kgem, op->need_magic_ca_pass ? 20 : 6))
 		return 0;
 	if (sna->kgem.nexec > KGEM_EXEC_SIZE(&sna->kgem) - 1)
 		return 0;
-	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 1)
+	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 2)
 		return 0;
 
 	return gen5_vertex_finish(sna);
@@ -1166,7 +1168,7 @@ inline static int gen5_get_rectangles(struct sna *sna,
 	if (rem < op->floats_per_rect) {
 		DBG(("flushing vbo for %s: %d < %d\n",
 		     __FUNCTION__, rem, op->floats_per_rect));
-		rem = gen5_get_rectangles__flush(sna);
+		rem = gen5_get_rectangles__flush(sna, op);
 		if (rem == 0)
 			return 0;
 	}
@@ -1292,6 +1294,7 @@ gen5_get_batch(struct sna *sna)
 		     __FUNCTION__, sna->kgem.surface - sna->kgem.nbatch,
 		     150, 4*8));
 		kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
 	if (sna->render_state.gen5.needs_invariant)
@@ -1810,7 +1813,6 @@ gen5_render_video(struct sna *sna,
 	}
 
 	gen5_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	return TRUE;
 }
 
@@ -1919,9 +1921,10 @@ static void
 gen5_render_composite_done(struct sna *sna,
 			   const struct sna_composite_op *op)
 {
-	gen5_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
-	sna->render.op = NULL;
+	if (sna->render_state.gen5.vertex_offset) {
+		gen5_vertex_flush(sna);
+		gen5_magic_ca_pass(sna,op);
+	}
 
 	DBG(("%s()\n", __FUNCTION__));
 
@@ -2333,8 +2336,6 @@ gen5_render_composite(struct sna *sna,
 
 	gen5_bind_surfaces(sna, tmp);
 	gen5_align_vertex(sna, tmp);
-
-	sna->render.op = tmp;
 	return TRUE;
 
 cleanup_src:
@@ -2548,8 +2549,8 @@ fastcall static void
 gen5_render_composite_spans_done(struct sna *sna,
 				 const struct sna_composite_spans_op *op)
 {
-	gen5_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	if (sna->render_state.gen5.vertex_offset)
+		gen5_vertex_flush(sna);
 
 	DBG(("%s()\n", __FUNCTION__));
 
@@ -2796,7 +2797,6 @@ gen5_render_copy_boxes(struct sna *sna, uint8_t alu,
 	} while (n);
 
 	gen5_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	return TRUE;
 }
 
@@ -2832,8 +2832,8 @@ static void
 gen5_render_copy_done(struct sna *sna,
 		      const struct sna_copy_op *op)
 {
-	gen5_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	if (sna->render_state.gen5.vertex_offset)
+		gen5_vertex_flush(sna);
 
 	DBG(("%s()\n", __FUNCTION__));
 }
@@ -3071,7 +3071,6 @@ gen5_render_fill_boxes(struct sna *sna,
 
 	gen5_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	return TRUE;
 }
 
@@ -3164,9 +3163,9 @@ static void
 gen5_render_fill_op_done(struct sna *sna,
 			 const struct sna_fill_op *op)
 {
-	gen5_vertex_flush(sna);
+	if (sna->render_state.gen5.vertex_offset)
+		gen5_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, op->base.src.bo);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 
 	DBG(("%s()\n", __FUNCTION__));
 }
@@ -3340,7 +3339,6 @@ gen5_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 
 	gen5_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 
 	return TRUE;
 }
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 1ec6f06..1e9d930 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -896,8 +896,7 @@ static void gen6_magic_ca_pass(struct sna *sna,
 
 static void gen6_vertex_flush(struct sna *sna)
 {
-	if (sna->render_state.gen6.vertex_offset == 0)
-		return;
+	assert(sna->render_state.gen6.vertex_offset);
 
 	DBG(("%s[%x] = %d\n", __FUNCTION__,
 	     4*sna->render_state.gen6.vertex_offset,
@@ -905,9 +904,6 @@ static void gen6_vertex_flush(struct sna *sna)
 	sna->kgem.batch[sna->render_state.gen6.vertex_offset] =
 		sna->render.vertex_index - sna->render.vertex_start;
 	sna->render_state.gen6.vertex_offset = 0;
-
-	if (sna->render.op)
-		gen6_magic_ca_pass(sna, sna->render.op);
 }
 
 static int gen6_vertex_finish(struct sna *sna)
@@ -915,9 +911,7 @@ static int gen6_vertex_finish(struct sna *sna)
 	struct kgem_bo *bo;
 	unsigned int i;
 
-	gen6_vertex_flush(sna);
-	if (!sna->render.vertex_used)
-		return sna->render.vertex_size;
+	assert(sna->render.vertex_used);
 
 	/* Note: we only need dword alignment (currently) */
 
@@ -976,7 +970,6 @@ static void gen6_vertex_close(struct sna *sna)
 	struct kgem_bo *bo;
 	unsigned int i, delta = 0;
 
-	gen6_vertex_flush(sna);
 	if (!sna->render.vertex_used) {
 		assert(sna->render.vbo == NULL);
 		assert(sna->render.vertices == sna->render.vertex_data);
@@ -1541,13 +1534,19 @@ static bool gen6_rectangle_begin(struct sna *sna,
 	return true;
 }
 
-static int gen6_get_rectangles__flush(struct sna *sna, bool ca)
+static int gen6_get_rectangles__flush(struct sna *sna,
+				      const struct sna_composite_op *op)
 {
-	if (!kgem_check_batch(&sna->kgem, ca ? 65 : 5))
+	if (sna->render_state.gen6.vertex_offset) {
+		gen6_vertex_flush(sna);
+		gen6_magic_ca_pass(sna, op);
+	}
+
+	if (!kgem_check_batch(&sna->kgem, op->need_magic_ca_pass ? 65 : 5))
 		return 0;
 	if (sna->kgem.nexec > KGEM_EXEC_SIZE(&sna->kgem) - 1)
 		return 0;
-	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 1)
+	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 2)
 		return 0;
 
 	return gen6_vertex_finish(sna);
@@ -1562,7 +1561,7 @@ inline static int gen6_get_rectangles(struct sna *sna,
 	if (rem < op->floats_per_rect) {
 		DBG(("flushing vbo for %s: %d < %d\n",
 		     __FUNCTION__, rem, op->floats_per_rect));
-		rem = gen6_get_rectangles__flush(sna, op->need_magic_ca_pass);
+		rem = gen6_get_rectangles__flush(sna, op);
 		if (rem == 0)
 			return 0;
 	}
@@ -1613,6 +1612,7 @@ gen6_get_batch(struct sna *sna)
 		     __FUNCTION__, sna->kgem.surface - sna->kgem.nbatch,
 		     150, 4*8));
 		kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
 	if (sna->render_state.gen6.needs_invariant)
@@ -1666,6 +1666,7 @@ gen6_align_vertex(struct sna *sna, const struct sna_composite_op *op)
 	assert (sna->render_state.gen6.vertex_offset == 0);
 	if (op->floats_per_vertex != sna->render_state.gen6.floats_per_vertex) {
 		if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
+			/* XXX propagate failure */
 			gen6_vertex_finish(sna);
 
 		DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
@@ -2000,7 +2001,6 @@ gen6_render_video(struct sna *sna,
 	}
 
 	gen6_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	return TRUE;
 }
 
@@ -2119,9 +2119,10 @@ static void gen6_composite_channel_convert(struct sna_composite_channel *channel
 static void gen6_render_composite_done(struct sna *sna,
 				       const struct sna_composite_op *op)
 {
-	gen6_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
-	sna->render.op = NULL;
+	if (sna->render_state.gen6.vertex_offset) {
+		gen6_vertex_flush(sna);
+		gen6_magic_ca_pass(sna, op);
+	}
 
 	if (op->mask.bo)
 		kgem_bo_destroy(&sna->kgem, op->mask.bo);
@@ -2546,8 +2547,6 @@ gen6_render_composite(struct sna *sna,
 
 	gen6_emit_composite_state(sna, tmp);
 	gen6_align_vertex(sna, tmp);
-
-	sna->render.op = tmp;
 	return TRUE;
 
 cleanup_src:
@@ -2841,8 +2840,8 @@ fastcall static void
 gen6_render_composite_spans_done(struct sna *sna,
 				 const struct sna_composite_spans_op *op)
 {
-	gen6_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	if (sna->render_state.gen6.vertex_offset)
+		gen6_vertex_flush(sna);
 
 	DBG(("%s()\n", __FUNCTION__));
 
@@ -3158,7 +3157,6 @@ gen6_render_copy_boxes(struct sna *sna, uint8_t alu,
 	} while (n);
 
 	gen6_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	return TRUE;
 }
 
@@ -3190,8 +3188,8 @@ gen6_render_copy_blt(struct sna *sna,
 static void
 gen6_render_copy_done(struct sna *sna, const struct sna_copy_op *op)
 {
-	gen6_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	if (sna->render_state.gen6.vertex_offset)
+		gen6_vertex_flush(sna);
 }
 
 static Bool
@@ -3439,7 +3437,6 @@ gen6_render_fill_boxes(struct sna *sna,
 
 	gen6_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	return TRUE;
 }
 
@@ -3531,9 +3528,9 @@ gen6_render_op_fill_boxes(struct sna *sna,
 static void
 gen6_render_op_fill_done(struct sna *sna, const struct sna_fill_op *op)
 {
-	gen6_vertex_flush(sna);
+	if (sna->render_state.gen6.vertex_offset)
+		gen6_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, op->base.src.bo);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 }
 
 static Bool
@@ -3716,7 +3713,6 @@ gen6_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 
 	gen6_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 
 	return TRUE;
 }
@@ -3812,7 +3808,6 @@ gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 
 	gen6_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 
 	return TRUE;
 }
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 16c389c..b14bc70 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -999,8 +999,7 @@ static void gen7_magic_ca_pass(struct sna *sna,
 
 static void gen7_vertex_flush(struct sna *sna)
 {
-	if (sna->render_state.gen7.vertex_offset == 0)
-		return;
+	assert(sna->render_state.gen7.vertex_offset);
 
 	DBG(("%s[%x] = %d\n", __FUNCTION__,
 	     4*sna->render_state.gen7.vertex_offset,
@@ -1008,9 +1007,6 @@ static void gen7_vertex_flush(struct sna *sna)
 	sna->kgem.batch[sna->render_state.gen7.vertex_offset] =
 		sna->render.vertex_index - sna->render.vertex_start;
 	sna->render_state.gen7.vertex_offset = 0;
-
-	if (sna->render.op)
-		gen7_magic_ca_pass(sna, sna->render.op);
 }
 
 static int gen7_vertex_finish(struct sna *sna)
@@ -1018,9 +1014,7 @@ static int gen7_vertex_finish(struct sna *sna)
 	struct kgem_bo *bo;
 	unsigned int i;
 
-	gen7_vertex_flush(sna);
-	if (!sna->render.vertex_used)
-		return sna->render.vertex_size;
+	assert(sna->render.vertex_used);
 
 	/* Note: we only need dword alignment (currently) */
 
@@ -1079,7 +1073,6 @@ static void gen7_vertex_close(struct sna *sna)
 	struct kgem_bo *bo;
 	unsigned int i, delta = 0;
 
-	gen7_vertex_flush(sna);
 	if (!sna->render.vertex_used) {
 		assert(sna->render.vbo == NULL);
 		assert(sna->render.vertices == sna->render.vertex_data);
@@ -1636,13 +1629,19 @@ static bool gen7_rectangle_begin(struct sna *sna,
 	return true;
 }
 
-static int gen7_get_rectangles__flush(struct sna *sna, bool ca)
+static int gen7_get_rectangles__flush(struct sna *sna,
+				      const struct sna_composite_op *op)
 {
-	if (!kgem_check_batch(&sna->kgem, ca ? 65 : 5))
+	if (sna->render_state.gen7.vertex_offset) {
+		gen7_vertex_flush(sna);
+		gen7_magic_ca_pass(sna, op);
+	}
+
+	if (!kgem_check_batch(&sna->kgem, op->need_magic_ca_pass ? 65 : 6))
 		return 0;
 	if (sna->kgem.nexec > KGEM_EXEC_SIZE(&sna->kgem) - 1)
 		return 0;
-	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 1)
+	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 2)
 		return 0;
 
 	return gen7_vertex_finish(sna);
@@ -1657,7 +1656,7 @@ inline static int gen7_get_rectangles(struct sna *sna,
 	if (rem < op->floats_per_rect) {
 		DBG(("flushing vbo for %s: %d < %d\n",
 		     __FUNCTION__, rem, op->floats_per_rect));
-		rem = gen7_get_rectangles__flush(sna, op->need_magic_ca_pass);
+		rem = gen7_get_rectangles__flush(sna, op);
 		if (rem == 0)
 			return 0;
 	}
@@ -1708,6 +1707,7 @@ gen7_get_batch(struct sna *sna)
 		     __FUNCTION__, sna->kgem.surface - sna->kgem.nbatch,
 		     150, 4*8));
 		kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
 	if (sna->render_state.gen7.needs_invariant)
@@ -2093,7 +2093,6 @@ gen7_render_video(struct sna *sna,
 	}
 
 	gen7_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	return TRUE;
 }
 
@@ -2212,9 +2211,10 @@ static void gen7_composite_channel_convert(struct sna_composite_channel *channel
 static void gen7_render_composite_done(struct sna *sna,
 				       const struct sna_composite_op *op)
 {
-	gen7_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
-	sna->render.op = NULL;
+	if (sna->render_state.gen7.vertex_offset) {
+		gen7_vertex_flush(sna);
+		gen7_magic_ca_pass(sna, op);
+	}
 
 	if (op->mask.bo)
 		kgem_bo_destroy(&sna->kgem, op->mask.bo);
@@ -2632,8 +2632,6 @@ gen7_render_composite(struct sna *sna,
 
 	gen7_emit_composite_state(sna, tmp);
 	gen7_align_vertex(sna, tmp);
-
-	sna->render.op = tmp;
 	return TRUE;
 
 cleanup_src:
@@ -2847,8 +2845,8 @@ fastcall static void
 gen7_render_composite_spans_done(struct sna *sna,
 				 const struct sna_composite_spans_op *op)
 {
-	gen7_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	if (sna->render_state.gen7.vertex_offset)
+		gen7_vertex_flush(sna);
 
 	DBG(("%s()\n", __FUNCTION__));
 
@@ -3118,7 +3116,6 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 	} while (n);
 
 	gen7_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	return TRUE;
 }
 
@@ -3150,8 +3147,8 @@ gen7_render_copy_blt(struct sna *sna,
 static void
 gen7_render_copy_done(struct sna *sna, const struct sna_copy_op *op)
 {
-	gen7_vertex_flush(sna);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	if (sna->render_state.gen7.vertex_offset)
+		gen7_vertex_flush(sna);
 }
 
 static Bool
@@ -3403,9 +3400,9 @@ gen7_render_fill_boxes(struct sna *sna,
 		} while (--n_this_time);
 	} while (n);
 
-	gen7_vertex_flush(sna);
+	if (sna->render_state.gen7.vertex_offset)
+		gen7_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	return TRUE;
 }
 
@@ -3497,9 +3494,9 @@ gen7_render_fill_op_boxes(struct sna *sna,
 static void
 gen7_render_fill_op_done(struct sna *sna, const struct sna_fill_op *op)
 {
-	gen7_vertex_flush(sna);
+	if (sna->render_state.gen7.vertex_offset)
+		gen7_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, op->base.src.bo);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 }
 
 static Bool
@@ -3682,7 +3679,6 @@ gen7_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 
 	gen7_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 
 	return TRUE;
 }
@@ -3778,7 +3774,6 @@ gen7_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 
 	gen7_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 
 	return TRUE;
 }
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index abb19dc..04543de 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -278,7 +278,6 @@ struct sna_render {
 	float *vertices;
 
 	float vertex_data[1024];
-	const struct sna_composite_op *op;
 };
 
 struct gen2_render_state {
commit a62429a1f79b8fa4a5ddaf61b2bc80fc8dbe576c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jan 13 19:00:01 2012 +0000

    sna: Upload continuation vertices into mmapped buffers
    
    In the common case, we expect a very small number of vertices which will
    fit into the batch along with the commands. However, in full flow we
    overflow the on-stack buffer and likely several continuation buffers.
    Streaming those straight into the GTT seems like a good idea, with the
    usual caveats over aperture pressure. (Since these are linear we could
    use snoopable bo for the architectures that support such for vertex
    buffers and if we had kernel support.)
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 457e694..fc896d9 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -456,7 +456,7 @@ gen3_emit_composite_primitive_identity_source(struct sna *sna,
 	float h = r->height;
 	float *v;
 
-	v = sna->render.vertex_data + sna->render.vertex_used;
+	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 12;
 
 	v[8] = v[4] = r->dst.x + op->dst.x;
@@ -516,7 +516,7 @@ gen3_emit_composite_primitive_constant_identity_mask(struct sna *sna,
 	float h = r->height;
 	float *v;
 
-	v = sna->render.vertex_data + sna->render.vertex_used;
+	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 12;
 
 	v[8] = v[4] = r->dst.x + op->dst.x;
@@ -552,7 +552,7 @@ gen3_emit_composite_primitive_identity_source_mask(struct sna *sna,
 	w = r->width;
 	h = r->height;
 
-	v = sna->render.vertex_data + sna->render.vertex_used;
+	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 18;
 
 	v[0] = dst_x + w;
@@ -597,7 +597,7 @@ gen3_emit_composite_primitive_affine_source_mask(struct sna *sna,
 	w = r->width;
 	h = r->height;
 
-	v = sna->render.vertex_data + sna->render.vertex_used;
+	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 18;
 
 	v[0] = dst_x + w;
@@ -1512,54 +1512,107 @@ static void gen3_vertex_flush(struct sna *sna)
 	sna->render_state.gen3.vertex_offset = 0;
 }
 
-static void gen3_vertex_finish(struct sna *sna, Bool last)
+static int gen3_vertex_finish(struct sna *sna)
 {
 	struct kgem_bo *bo;
-	int delta;
-
-	DBG(("%s: last? %d\n", __FUNCTION__, last));
 
 	gen3_vertex_flush(sna);
 	if (!sna->render.vertex_used)
-		return;
+		return sna->render.vertex_size;
+
+	bo = sna->render.vbo;
+	if (bo) {
+		DBG(("%s: reloc = %d\n", __FUNCTION__,
+		     sna->render.vertex_reloc[0]));
+
+		sna->kgem.batch[sna->render.vertex_reloc[0]] =
+			kgem_add_reloc(&sna->kgem, sna->render.vertex_reloc[0],
+				       bo, I915_GEM_DOMAIN_VERTEX << 16, 0);
+
+		sna->render.vertex_reloc[0] = 0;
+		sna->render.vertex_used = 0;
+		sna->render.vertex_index = 0;
+
+		kgem_bo_destroy(&sna->kgem, bo);
+	}
+
+	sna->render.vertices = NULL;
+	sna->render.vbo = kgem_create_linear(&sna->kgem, 256*1024);
+	if (sna->render.vbo)
+		sna->render.vertices = kgem_bo_map(&sna->kgem, sna->render.vbo);
+	if (sna->render.vertices == NULL) {
+		kgem_bo_destroy(&sna->kgem, sna->render.vbo);
+		sna->render.vbo = NULL;
+		return 0;
+	}
 
-	if (last && sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
-		DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
-		     sna->render.vertex_used, sna->kgem.nbatch));
-		memcpy(sna->kgem.batch + sna->kgem.nbatch,
+	if (sna->render.vertex_used) {
+		memcpy(sna->render.vertices,
 		       sna->render.vertex_data,
-		       sna->render.vertex_used * 4);
-		delta = sna->kgem.nbatch * 4;
-		bo = NULL;
-		sna->kgem.nbatch += sna->render.vertex_used;
-	} else {
-		bo = kgem_create_linear(&sna->kgem, 4*sna->render.vertex_used);
-		if (bo && !kgem_bo_write(&sna->kgem, bo,
-					 sna->render.vertex_data,
-					 4*sna->render.vertex_used)) {
-			kgem_bo_destroy(&sna->kgem, bo);
-			return;
+		       sizeof(float)*sna->render.vertex_used);
+	}
+	sna->render.vertex_size = 64 * 1024 - 1;
+	return sna->render.vertex_size - sna->render.vertex_used;
+}
+
+static void gen3_vertex_close(struct sna *sna)
+{
+	struct kgem_bo *bo;
+	int delta = 0;
+
+	gen3_vertex_flush(sna);
+
+	if (!sna->render.vertex_used) {
+		assert(sna->render.vbo == NULL);
+		assert(sna->render.vertices == sna->render.vertex_data);
+		assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data));
+		return;
+	}
+
+	DBG(("%s: used=%d\n", __FUNCTION__, sna->render.vertex_used));
+
+	bo = sna->render.vbo;
+	if (bo == NULL) {
+		if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
+			DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
+			     sna->render.vertex_used, sna->kgem.nbatch));
+			memcpy(sna->kgem.batch + sna->kgem.nbatch,
+			       sna->render.vertex_data,
+			       sna->render.vertex_used * 4);
+			delta = sna->kgem.nbatch * 4;
+			bo = NULL;
+			sna->kgem.nbatch += sna->render.vertex_used;
+		} else {
+			bo = kgem_create_linear(&sna->kgem,
+						4*sna->render.vertex_used);
+			if (bo && !kgem_bo_write(&sna->kgem, bo,
+						 sna->render.vertex_data,
+						 4*sna->render.vertex_used)) {
+				kgem_bo_destroy(&sna->kgem, bo);
+				goto reset;
+			}
+			DBG(("%s: new vbo: %d\n", __FUNCTION__,
+			     sna->render.vertex_used));
 		}
-		delta = 0;
-		DBG(("%s: new vbo: %d\n", __FUNCTION__,
-		     sna->render.vertex_used));
 	}
 
 	DBG(("%s: reloc = %d\n", __FUNCTION__,
 	     sna->render.vertex_reloc[0]));
 
 	sna->kgem.batch[sna->render.vertex_reloc[0]] =
-		kgem_add_reloc(&sna->kgem,
-			       sna->render.vertex_reloc[0],
-			       bo,
-			       I915_GEM_DOMAIN_VERTEX << 16,
-			       delta);
+		kgem_add_reloc(&sna->kgem, sna->render.vertex_reloc[0],
+			       bo, I915_GEM_DOMAIN_VERTEX << 16, delta);
+	if (bo)
+		kgem_bo_destroy(&sna->kgem, bo);
+
+reset:
 	sna->render.vertex_reloc[0] = 0;
 	sna->render.vertex_used = 0;
 	sna->render.vertex_index = 0;
 
-	if (bo)
-		kgem_bo_destroy(&sna->kgem, bo);
+	sna->render.vbo = NULL;
+	sna->render.vertices = sna->render.vertex_data;
+	sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
 }
 
 static bool gen3_rectangle_begin(struct sna *sna,
@@ -1612,10 +1665,7 @@ static int gen3_get_rectangles__flush(struct sna *sna, bool ca)
 	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 1)
 		return 0;
 
-	gen3_vertex_finish(sna, FALSE);
-	assert(sna->render.vertex_index == 0);
-	assert(sna->render.vertex_used == 0);
-	return ARRAY_SIZE(sna->render.vertex_data);
+	return gen3_vertex_finish(sna);
 }
 
 inline static int gen3_get_rectangles(struct sna *sna,
@@ -1647,7 +1697,7 @@ inline static int gen3_get_rectangles(struct sna *sna,
 	sna->render.vertex_index += 3*want;
 
 	assert(want);
-	assert(sna->render.vertex_index * op->floats_per_vertex <= ARRAY_SIZE(sna->render.vertex_data));
+	assert(sna->render.vertex_index * op->floats_per_vertex <= sna->render.vertex_size);
 	return want;
 }
 
@@ -2205,6 +2255,9 @@ gen3_align_vertex(struct sna *sna,
 		  struct sna_composite_op *op)
 {
 	if (op->floats_per_vertex != sna->render_state.gen3.last_floats_per_vertex) {
+		if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
+			gen3_vertex_finish(sna);
+
 		DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
 		     sna->render_state.gen3.last_floats_per_vertex,
 		     op->floats_per_vertex,
@@ -2747,7 +2800,7 @@ gen3_emit_composite_spans_primitive_zero(struct sna *sna,
 					 const BoxRec *box,
 					 float opacity)
 {
-	float *v = sna->render.vertex_data + sna->render.vertex_used;
+	float *v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 6;
 
 	v[0] = op->base.dst.x + box->x2;
@@ -2766,7 +2819,7 @@ gen3_emit_composite_spans_primitive_zero_no_offset(struct sna *sna,
 						   const BoxRec *box,
 						   float opacity)
 {
-	float *v = sna->render.vertex_data + sna->render.vertex_used;
+	float *v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 6;
 
 	v[0] = box->x2;
@@ -2781,7 +2834,7 @@ gen3_emit_composite_spans_primitive_constant(struct sna *sna,
 					     const BoxRec *box,
 					     float opacity)
 {
-	float *v = sna->render.vertex_data + sna->render.vertex_used;
+	float *v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 9;
 
 	v[0] = op->base.dst.x + box->x2;
@@ -2797,7 +2850,7 @@ gen3_emit_composite_spans_primitive_constant_no_offset(struct sna *sna,
 						       const BoxRec *box,
 						       float opacity)
 {
-	float *v = sna->render.vertex_data + sna->render.vertex_used;
+	float *v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 9;
 
 	v[0] = box->x2;
@@ -2813,7 +2866,7 @@ gen3_emit_composite_spans_primitive_identity_source(struct sna *sna,
 						    const BoxRec *box,
 						    float opacity)
 {
-	float *v = sna->render.vertex_data + sna->render.vertex_used;
+	float *v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 15;
 
 	v[0] = op->base.dst.x + box->x2;
@@ -2844,7 +2897,7 @@ gen3_emit_composite_spans_primitive_affine_source(struct sna *sna,
 	PictTransform *transform = op->base.src.transform;
 	float x, y, *v;
 
-	v = sna->render.vertex_data + sna->render.vertex_used;
+	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 15;
 
 	v[0]  = op->base.dst.x + box->x2;
@@ -2883,7 +2936,7 @@ gen3_emit_composite_spans_primitive_identity_gradient(struct sna *sna,
 						      const BoxRec *box,
 						      float opacity)
 {
-	float *v = sna->render.vertex_data + sna->render.vertex_used;
+	float *v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 15;
 
 	v[0] = op->base.dst.x + box->x2;
@@ -2912,7 +2965,7 @@ gen3_emit_composite_spans_primitive_affine_gradient(struct sna *sna,
 						    float opacity)
 {
 	PictTransform *transform = op->base.src.transform;
-	float *v = sna->render.vertex_data + sna->render.vertex_used;
+	float *v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 15;
 
 	v[0] = op->base.dst.x + box->x2;
@@ -4278,7 +4331,7 @@ gen3_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 
 static void gen3_render_flush(struct sna *sna)
 {
-	gen3_vertex_finish(sna, TRUE);
+	gen3_vertex_close(sna);
 }
 
 static void
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 972b718..942f8fb 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -358,37 +358,97 @@ static void gen4_vertex_flush(struct sna *sna)
 		gen4_magic_ca_pass(sna, sna->render.op);
 }
 
-static void gen4_vertex_finish(struct sna *sna, Bool last)
+static int gen4_vertex_finish(struct sna *sna)
 {
 	struct kgem_bo *bo;
-	unsigned int i, delta;
+	unsigned int i;
 
 	gen4_vertex_flush(sna);
 	if (!sna->render.vertex_used)
-		return;
+		return sna->render.vertex_size;
 
 	/* Note: we only need dword alignment (currently) */
 
-	if (last && sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
-		DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
-		     sna->render.vertex_used, sna->kgem.nbatch));
-		memcpy(sna->kgem.batch + sna->kgem.nbatch,
+	bo = sna->render.vbo;
+	if (bo) {
+		for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
+			if (sna->render.vertex_reloc[i]) {
+				DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
+				     i, sna->render.vertex_reloc[i]));
+
+				sna->kgem.batch[sna->render.vertex_reloc[i]] =
+					kgem_add_reloc(&sna->kgem,
+						       sna->render.vertex_reloc[i],
+						       bo,
+						       I915_GEM_DOMAIN_VERTEX << 16,
+						       0);
+				sna->render.vertex_reloc[i] = 0;
+			}
+		}
+
+		sna->render.vertex_used = 0;
+		sna->render.vertex_index = 0;
+		sna->render_state.gen4.vb_id = 0;
+
+		kgem_bo_destroy(&sna->kgem, bo);
+	}
+
+	sna->render.vertices = NULL;
+	sna->render.vbo = kgem_create_linear(&sna->kgem, 256*1024);
+	if (sna->render.vbo)
+		sna->render.vertices = kgem_bo_map(&sna->kgem, sna->render.vbo);
+	if (sna->render.vertices == NULL) {
+		kgem_bo_destroy(&sna->kgem, sna->render.vbo);
+		sna->render.vbo = NULL;
+		return 0;
+	}
+
+	if (sna->render.vertex_used) {
+		memcpy(sna->render.vertices,
 		       sna->render.vertex_data,
-		       sna->render.vertex_used * 4);
-		delta = sna->kgem.nbatch * 4;
-		bo = NULL;
-		sna->kgem.nbatch += sna->render.vertex_used;
-	} else {
-		bo = kgem_create_linear(&sna->kgem, 4*sna->render.vertex_used);
-		if (bo && !kgem_bo_write(&sna->kgem, bo,
-					 sna->render.vertex_data,
-					 4*sna->render.vertex_used)) {
-			kgem_bo_destroy(&sna->kgem, bo);
-			return;
+		       sizeof(float)*sna->render.vertex_used);
+	}
+	sna->render.vertex_size = 64 * 1024 - 1;
+	return sna->render.vertex_size - sna->render.vertex_used;
+}
+
+static void gen4_vertex_close(struct sna *sna)
+{
+	struct kgem_bo *bo;
+	unsigned int i, delta = 0;
+
+	gen4_vertex_flush(sna);
+	if (!sna->render.vertex_used) {
+		assert(sna->render.vbo == NULL);
+		assert(sna->render.vertices == sna->render.vertex_data);
+		assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data));
+		return;
+	}
+
+	DBG(("%s: used=%d\n", __FUNCTION__, sna->render.vertex_used));
+
+	bo = sna->render.vbo;
+	if (bo == NULL) {
+		if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
+			DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
+			     sna->render.vertex_used, sna->kgem.nbatch));
+			memcpy(sna->kgem.batch + sna->kgem.nbatch,
+			       sna->render.vertex_data,
+			       sna->render.vertex_used * 4);
+			delta = sna->kgem.nbatch * 4;
+			bo = NULL;
+			sna->kgem.nbatch += sna->render.vertex_used;
+		} else {
+			bo = kgem_create_linear(&sna->kgem, 4*sna->render.vertex_used);
+			if (bo && !kgem_bo_write(&sna->kgem, bo,
+						 sna->render.vertex_data,
+						 4*sna->render.vertex_used)) {
+				kgem_bo_destroy(&sna->kgem, bo);
+				goto reset;
+			}
+			DBG(("%s: new vbo: %d\n", __FUNCTION__,
+			     sna->render.vertex_used));
 		}
-		delta = 0;
-		DBG(("%s: new vbo: %d\n", __FUNCTION__,
-		     sna->render.vertex_used));
 	}
 
 	for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
@@ -409,11 +469,17 @@ static void gen4_vertex_finish(struct sna *sna, Bool last)
 	if (bo)
 		kgem_bo_destroy(&sna->kgem, bo);
 
+reset:
 	sna->render.vertex_used = 0;
 	sna->render.vertex_index = 0;
 	sna->render_state.gen4.vb_id = 0;
+
+	sna->render.vbo = NULL;
+	sna->render.vertices = sna->render.vertex_data;
+	sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
 }
 
+
 static uint32_t gen4_get_blend(int op,
 			       Bool has_component_alpha,
 			       uint32_t dst_format)
@@ -696,7 +762,7 @@ gen4_emit_composite_primitive_solid(struct sna *sna,
 		float f;
 	} dst;
 
-	v = sna->render.vertex_data + sna->render.vertex_used;
+	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 9;
 
 	dst.p.x = r->dst.x + r->width;
@@ -728,7 +794,7 @@ gen4_emit_composite_primitive_identity_source(struct sna *sna,
 		float f;
 	} dst;
 
-	v = sna->render.vertex_data + sna->render.vertex_used;
+	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 9;
 
 	sx = r->src.x + op->src.offset[0];
@@ -762,7 +828,7 @@ gen4_emit_composite_primitive_affine_source(struct sna *sna,
 	} dst;
 	float *v;
 
-	v = sna->render.vertex_data + sna->render.vertex_used;
+	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 9;
 
 	dst.p.x = r->dst.x + r->width;
@@ -815,7 +881,7 @@ gen4_emit_composite_primitive_identity_source_mask(struct sna *sna,
 	w = r->width;
 	h = r->height;
 
-	v = sna->render.vertex_data + sna->render.vertex_used;
+	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 15;
 
 	dst.p.x = r->dst.x + r->width;
@@ -1057,10 +1123,7 @@ static int gen4_get_rectangles__flush(struct sna *sna)
 	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 1)
 		return 0;
 
-	gen4_vertex_finish(sna, FALSE);
-	sna->render.vertex_index = 0;
-
-	return ARRAY_SIZE(sna->render.vertex_data);
+	return gen4_vertex_finish(sna);
 }
 
 inline static int gen4_get_rectangles(struct sna *sna,
@@ -1209,6 +1272,9 @@ static void
 gen4_align_vertex(struct sna *sna, const struct sna_composite_op *op)
 {
 	if (op->floats_per_vertex != sna->render_state.gen4.floats_per_vertex) {
+		if (sna->render.vertex_size - sna->render.vertex_used < 6*op->floats_per_vertex)
+			gen4_vertex_finish(sna);
+
 		DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
 		     sna->render_state.gen4.floats_per_vertex,
 		     op->floats_per_vertex,
@@ -2843,7 +2909,7 @@ gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 static void
 gen4_render_flush(struct sna *sna)
 {
-	gen4_vertex_finish(sna, TRUE);
+	gen4_vertex_close(sna);
 }
 
 static void gen4_render_reset(struct sna *sna)
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 6347b3c..573478d 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -349,37 +349,104 @@ static void gen5_vertex_flush(struct sna *sna)
 		gen5_magic_ca_pass(sna, sna->render.op);
 }
 
-static void gen5_vertex_finish(struct sna *sna, Bool last)
+static int gen5_vertex_finish(struct sna *sna)
 {
 	struct kgem_bo *bo;
-	unsigned int i, delta;
+	unsigned int i;
 
 	gen5_vertex_flush(sna);
 	if (!sna->render.vertex_used)
-		return;
+		return sna->render.vertex_size;
 
 	/* Note: we only need dword alignment (currently) */
 
-	if (last && sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
-		DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
-		     sna->render.vertex_used, sna->kgem.nbatch));
-		memcpy(sna->kgem.batch + sna->kgem.nbatch,
+	bo = sna->render.vbo;
+	if (bo) {
+		for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
+			if (sna->render.vertex_reloc[i]) {
+				DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
+				     i, sna->render.vertex_reloc[i]));
+
+				sna->kgem.batch[sna->render.vertex_reloc[i]] =
+					kgem_add_reloc(&sna->kgem,
+						       sna->render.vertex_reloc[i],
+						       bo,
+						       I915_GEM_DOMAIN_VERTEX << 16,
+						       0);
+				sna->kgem.batch[sna->render.vertex_reloc[i]+1] =
+					kgem_add_reloc(&sna->kgem,
+						       sna->render.vertex_reloc[i]+1,
+						       bo,
+						       I915_GEM_DOMAIN_VERTEX << 16,
+						       sna->render.vertex_used * 4 - 1);
+				sna->render.vertex_reloc[i] = 0;
+			}
+		}
+
+		sna->render.vertex_used = 0;
+		sna->render.vertex_index = 0;
+		sna->render_state.gen5.vb_id = 0;
+
+		kgem_bo_destroy(&sna->kgem, bo);
+	}
+
+	sna->render.vertices = NULL;
+	sna->render.vbo = kgem_create_linear(&sna->kgem, 256*1024);
+	if (sna->render.vbo)
+		sna->render.vertices = kgem_bo_map(&sna->kgem, sna->render.vbo);
+	if (sna->render.vertices == NULL) {
+		kgem_bo_destroy(&sna->kgem, sna->render.vbo);
+		sna->render.vbo = NULL;
+		return 0;
+	}
+
+	if (sna->render.vertex_used) {
+		memcpy(sna->render.vertices,
 		       sna->render.vertex_data,
-		       sna->render.vertex_used * 4);
-		delta = sna->kgem.nbatch * 4;
-		bo = NULL;
-		sna->kgem.nbatch += sna->render.vertex_used;
-	} else {
-		bo = kgem_create_linear(&sna->kgem, 4*sna->render.vertex_used);
-		if (bo && !kgem_bo_write(&sna->kgem, bo,
-					 sna->render.vertex_data,
-					 4*sna->render.vertex_used)) {
-			kgem_bo_destroy(&sna->kgem, bo);
-			return;
+		       sizeof(float)*sna->render.vertex_used);
+	}
+	sna->render.vertex_size = 64 * 1024 - 1;
+	return sna->render.vertex_size - sna->render.vertex_used;
+}
+
+static void gen5_vertex_close(struct sna *sna)
+{
+	struct kgem_bo *bo;
+	unsigned int i, delta = 0;
+
+	gen5_vertex_flush(sna);
+	if (!sna->render.vertex_used) {
+		assert(sna->render.vbo == NULL);
+		assert(sna->render.vertices == sna->render.vertex_data);
+		assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data));
+		return;
+	}
+
+	DBG(("%s: used=%d\n", __FUNCTION__, sna->render.vertex_used));
+
+	bo = sna->render.vbo;
+	if (bo == NULL) {
+
+		if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
+			DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
+			     sna->render.vertex_used, sna->kgem.nbatch));
+			memcpy(sna->kgem.batch + sna->kgem.nbatch,
+			       sna->render.vertex_data,
+			       sna->render.vertex_used * 4);
+			delta = sna->kgem.nbatch * 4;
+			bo = NULL;
+			sna->kgem.nbatch += sna->render.vertex_used;
+		} else {
+			bo = kgem_create_linear(&sna->kgem, 4*sna->render.vertex_used);
+			if (bo && !kgem_bo_write(&sna->kgem, bo,
+						 sna->render.vertex_data,
+						 4*sna->render.vertex_used)) {
+				kgem_bo_destroy(&sna->kgem, bo);
+				goto reset;
+			}
+			DBG(("%s: new vbo: %d\n", __FUNCTION__,
+			     sna->render.vertex_used));
 		}
-		delta = 0;
-		DBG(("%s: new vbo: %d\n", __FUNCTION__,
-		     sna->render.vertex_used));
 	}
 
 	for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
@@ -406,9 +473,14 @@ static void gen5_vertex_finish(struct sna *sna, Bool last)
 	if (bo)
 		kgem_bo_destroy(&sna->kgem, bo);
 
+reset:
 	sna->render.vertex_used = 0;
 	sna->render.vertex_index = 0;
 	sna->render_state.gen5.vb_id = 0;
+
+	sna->render.vbo = NULL;
+	sna->render.vertices = sna->render.vertex_data;
+	sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
 }
 
 static uint32_t gen5_get_blend(int op,
@@ -727,7 +799,7 @@ gen5_emit_composite_primitive_solid(struct sna *sna,
 		float f;
 	} dst;
 
-	v = sna->render.vertex_data + sna->render.vertex_used;
+	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 9;
 
 	dst.p.x = r->dst.x + r->width;
@@ -759,7 +831,7 @@ gen5_emit_composite_primitive_identity_source(struct sna *sna,
 		float f;
 	} dst;
 
-	v = sna->render.vertex_data + sna->render.vertex_used;
+	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 9;
 
 	sx = r->src.x + op->src.offset[0];
@@ -793,7 +865,7 @@ gen5_emit_composite_primitive_affine_source(struct sna *sna,
 	} dst;
 	float *v;
 
-	v = sna->render.vertex_data + sna->render.vertex_used;
+	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 9;
 
 	dst.p.x = r->dst.x + r->width;
@@ -846,7 +918,7 @@ gen5_emit_composite_primitive_identity_source_mask(struct sna *sna,
 	w = r->width;
 	h = r->height;
 
-	v = sna->render.vertex_data + sna->render.vertex_used;
+	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 15;
 
 	dst.p.x = r->dst.x + r->width;
@@ -1082,10 +1154,7 @@ static int gen5_get_rectangles__flush(struct sna *sna)
 	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 1)
 		return 0;
 
-	gen5_vertex_finish(sna, FALSE);
-	sna->render.vertex_index = 0;
-
-	return  ARRAY_SIZE(sna->render.vertex_data);
+	return gen5_vertex_finish(sna);
 }
 
 inline static int gen5_get_rectangles(struct sna *sna,
@@ -1094,9 +1163,9 @@ inline static int gen5_get_rectangles(struct sna *sna,
 {
 	int rem = vertex_space(sna);
 
-	if (rem < 3*op->floats_per_vertex) {
+	if (rem < op->floats_per_rect) {
 		DBG(("flushing vbo for %s: %d < %d\n",
-		     __FUNCTION__, rem, 3*op->floats_per_vertex));
+		     __FUNCTION__, rem, op->floats_per_rect));
 		rem = gen5_get_rectangles__flush(sna);
 		if (rem == 0)
 			return 0;
@@ -1105,8 +1174,8 @@ inline static int gen5_get_rectangles(struct sna *sna,
 	if (!gen5_rectangle_begin(sna, op))
 		return 0;
 
-	if (want * op->floats_per_vertex*3 > rem)
-		want = rem / (3*op->floats_per_vertex);
+	if (want * op->floats_per_rect > rem)
+		want = rem / op->floats_per_rect;
 
 	sna->render.vertex_index += 3*want;
 	return want;
@@ -1233,6 +1302,9 @@ static void
 gen5_align_vertex(struct sna *sna, const struct sna_composite_op *op)
 {
 	if (op->floats_per_vertex != sna->render_state.gen5.floats_per_vertex) {
+		if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
+			gen5_vertex_finish(sna);
+
 		DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
 		     sna->render_state.gen5.floats_per_vertex,
 		     op->floats_per_vertex,
@@ -1674,6 +1746,7 @@ gen5_render_video(struct sna *sna,
 	tmp.u.gen5.ve_id = 1;
 	tmp.is_affine = TRUE;
 	tmp.floats_per_vertex = 3;
+	tmp.floats_per_rect = 9;
 
 	if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL))
 		kgem_submit(&sna->kgem);
@@ -2226,6 +2299,7 @@ gen5_render_composite(struct sna *sna,
 
 		tmp->floats_per_vertex = 3 + !tmp->is_affine;
 	}
+	tmp->floats_per_rect = 3*tmp->floats_per_vertex;
 
 	tmp->u.gen5.wm_kernel =
 		gen5_choose_composite_kernel(tmp->op,
@@ -2668,6 +2742,7 @@ gen5_render_copy_boxes(struct sna *sna, uint8_t alu,
 
 	tmp.is_affine = TRUE;
 	tmp.floats_per_vertex = 3;
+	tmp.floats_per_rect = 9;
 	tmp.u.gen5.wm_kernel = WM_KERNEL;
 	tmp.u.gen5.ve_id = 1;
 
@@ -2810,6 +2885,7 @@ gen5_render_copy(struct sna *sna, uint8_t alu,
 
 	op->base.is_affine = true;
 	op->base.floats_per_vertex = 3;
+	op->base.floats_per_rect = 9;
 	op->base.u.gen5.wm_kernel = WM_KERNEL;
 	op->base.u.gen5.ve_id = 1;
 
@@ -2957,6 +3033,7 @@ gen5_render_fill_boxes(struct sna *sna,
 
 	tmp.is_affine = TRUE;
 	tmp.floats_per_vertex = 3;
+	tmp.floats_per_rect = 9;
 	tmp.u.gen5.wm_kernel = WM_KERNEL;
 	tmp.u.gen5.ve_id = 1;
 
@@ -3144,6 +3221,7 @@ gen5_render_fill(struct sna *sna, uint8_t alu,
 
 	op->base.is_affine = TRUE;
 	op->base.floats_per_vertex = 3;
+	op->base.floats_per_rect = 9;
 	op->base.u.gen5.wm_kernel = WM_KERNEL;
 	op->base.u.gen5.ve_id = 1;
 
@@ -3229,6 +3307,7 @@ gen5_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 
 	tmp.is_affine = TRUE;
 	tmp.floats_per_vertex = 3;
+	tmp.floats_per_rect = 9;
 	tmp.has_component_alpha = 0;
 	tmp.need_magic_ca_pass = FALSE;
 
@@ -3269,7 +3348,7 @@ gen5_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 static void
 gen5_render_flush(struct sna *sna)
 {
-	gen5_vertex_finish(sna, TRUE);
+	gen5_vertex_close(sna);
 }
 
 static void
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index cd043c3..1ec6f06 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -910,37 +910,105 @@ static void gen6_vertex_flush(struct sna *sna)
 		gen6_magic_ca_pass(sna, sna->render.op);
 }
 
-static void gen6_vertex_finish(struct sna *sna, Bool last)
+static int gen6_vertex_finish(struct sna *sna)
 {
 	struct kgem_bo *bo;
-	unsigned int i, delta;
+	unsigned int i;
 
 	gen6_vertex_flush(sna);
 	if (!sna->render.vertex_used)
-		return;
+		return sna->render.vertex_size;
 
 	/* Note: we only need dword alignment (currently) */
 
-	if (last && sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
-		DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
-		     sna->render.vertex_used, sna->kgem.nbatch));
-		memcpy(sna->kgem.batch + sna->kgem.nbatch,
+	bo = sna->render.vbo;
+	if (bo) {
+		for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
+			if (sna->render.vertex_reloc[i]) {
+				DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
+				     i, sna->render.vertex_reloc[i]));
+
+				sna->kgem.batch[sna->render.vertex_reloc[i]] =
+					kgem_add_reloc(&sna->kgem,
+						       sna->render.vertex_reloc[i],
+						       bo,
+						       I915_GEM_DOMAIN_VERTEX << 16,
+						       0);
+				sna->kgem.batch[sna->render.vertex_reloc[i]+1] =
+					kgem_add_reloc(&sna->kgem,
+						       sna->render.vertex_reloc[i]+1,
+						       bo,
+						       I915_GEM_DOMAIN_VERTEX << 16,
+						       0 + sna->render.vertex_used * 4 - 1);
+				sna->render.vertex_reloc[i] = 0;
+			}
+		}
+
+		sna->render.vertex_used = 0;
+		sna->render.vertex_index = 0;
+		sna->render_state.gen6.vb_id = 0;
+
+		kgem_bo_destroy(&sna->kgem, bo);
+	}
+
+	sna->render.vertices = NULL;
+	sna->render.vbo = kgem_create_linear(&sna->kgem, 256*1024);
+	if (sna->render.vbo)
+		sna->render.vertices = kgem_bo_map__cpu(&sna->kgem, sna->render.vbo);
+	if (sna->render.vertices == NULL) {
+		kgem_bo_destroy(&sna->kgem, sna->render.vbo);
+		sna->render.vbo = NULL;
+		return 0;
+	}
+
+	kgem_bo_sync__cpu(&sna->kgem, sna->render.vbo);
+	if (sna->render.vertex_used) {
+		memcpy(sna->render.vertices,
 		       sna->render.vertex_data,
-		       sna->render.vertex_used * 4);
-		delta = sna->kgem.nbatch * 4;
-		bo = NULL;
-		sna->kgem.nbatch += sna->render.vertex_used;
-	} else {
-		bo = kgem_create_linear(&sna->kgem, 4*sna->render.vertex_used);
-		if (bo && !kgem_bo_write(&sna->kgem, bo,
-					 sna->render.vertex_data,
-					 4*sna->render.vertex_used)) {
-			kgem_bo_destroy(&sna->kgem, bo);
-			return;
+		       sizeof(float)*sna->render.vertex_used);
+	}
+	sna->render.vertex_size = 64 * 1024 - 1;
+	return sna->render.vertex_size - sna->render.vertex_used;
+}
+
+static void gen6_vertex_close(struct sna *sna)
+{
+	struct kgem_bo *bo;
+	unsigned int i, delta = 0;
+
+	gen6_vertex_flush(sna);
+	if (!sna->render.vertex_used) {
+		assert(sna->render.vbo == NULL);
+		assert(sna->render.vertices == sna->render.vertex_data);
+		assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data));
+		return;
+	}
+
+	DBG(("%s: used=%d / %d\n", __FUNCTION__,
+	     sna->render.vertex_used, sna->render.vertex_size));
+
+	bo = sna->render.vbo;
+	if (bo == NULL) {
+		if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
+			DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
+			     sna->render.vertex_used, sna->kgem.nbatch));
+			memcpy(sna->kgem.batch + sna->kgem.nbatch,
+			       sna->render.vertex_data,
+			       sna->render.vertex_used * 4);
+			delta = sna->kgem.nbatch * 4;
+			bo = NULL;
+			sna->kgem.nbatch += sna->render.vertex_used;
+		} else {
+			bo = kgem_create_linear(&sna->kgem, 4*sna->render.vertex_used);
+			if (bo && !kgem_bo_write(&sna->kgem, bo,
+						 sna->render.vertex_data,
+						 4*sna->render.vertex_used)) {
+				kgem_bo_destroy(&sna->kgem, bo);
+				goto reset;
+			}
+			DBG(("%s: new vbo: %d\n", __FUNCTION__,
+			     sna->render.vertex_used));
 		}
-		delta = 0;
-		DBG(("%s: new vbo: %d\n", __FUNCTION__,
-		     sna->render.vertex_used));
 	}
 
 	for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
@@ -967,9 +1035,14 @@ static void gen6_vertex_finish(struct sna *sna, Bool last)
 	if (bo)
 		kgem_bo_destroy(&sna->kgem, bo);
 
+reset:
 	sna->render.vertex_used = 0;
 	sna->render.vertex_index = 0;
 	sna->render_state.gen6.vb_id = 0;
+
+	sna->render.vbo = NULL;
+	sna->render.vertices = sna->render.vertex_data;
+	sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
 }
 
 typedef struct gen6_surface_state_padded {
@@ -1134,7 +1207,7 @@ gen6_emit_composite_primitive_solid(struct sna *sna,
 		float f;
 	} dst;
 
-	v = sna->render.vertex_data + sna->render.vertex_used;
+	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 9;
 
 	dst.p.x = r->dst.x + r->width;
@@ -1165,7 +1238,7 @@ gen6_emit_composite_primitive_identity_source(struct sna *sna,
 	} dst;
 	float *v;
 
-	v = sna->render.vertex_data + sna->render.vertex_used;
+	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 9;
 
 	dst.p.x = r->dst.x + r->width;
@@ -1194,7 +1267,7 @@ gen6_emit_composite_primitive_affine_source(struct sna *sna,
 	} dst;
 	float *v;
 
-	v = sna->render.vertex_data + sna->render.vertex_used;
+	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 9;
 
 	dst.p.x = r->dst.x + r->width;
@@ -1247,7 +1320,7 @@ gen6_emit_composite_primitive_identity_source_mask(struct sna *sna,
 	w = r->width;
 	h = r->height;
 
-	v = sna->render.vertex_data + sna->render.vertex_used;
+	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 15;
 
 	dst.p.x = r->dst.x + r->width;
@@ -1477,10 +1550,7 @@ static int gen6_get_rectangles__flush(struct sna *sna, bool ca)
 	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 1)
 		return 0;
 
-	gen6_vertex_finish(sna, FALSE);
-	sna->render.vertex_index = 0;
-
-	return  ARRAY_SIZE(sna->render.vertex_data);
+	return gen6_vertex_finish(sna);
 }
 
 inline static int gen6_get_rectangles(struct sna *sna,
@@ -1595,6 +1665,9 @@ gen6_align_vertex(struct sna *sna, const struct sna_composite_op *op)
 {
 	assert (sna->render_state.gen6.vertex_offset == 0);
 	if (op->floats_per_vertex != sna->render_state.gen6.floats_per_vertex) {
+		if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
+			gen6_vertex_finish(sna);
+
 		DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
 		     sna->render_state.gen6.floats_per_vertex,
 		     op->floats_per_vertex,
@@ -2621,7 +2694,7 @@ gen6_emit_composite_spans_identity(struct sna *sna,
 	int16_t tx = op->base.src.offset[0];
 	int16_t ty = op->base.src.offset[1];
 
-	v = sna->render.vertex_data + sna->render.vertex_used;
+	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 3*5;
 
 	dst.p.x = box->x2;
@@ -2663,7 +2736,7 @@ gen6_emit_composite_spans_simple(struct sna *sna,
 	int16_t tx = op->base.src.offset[0];
 	int16_t ty = op->base.src.offset[1];
 
-	v = sna->render.vertex_data + sna->render.vertex_used;
+	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 3*5;
 
 	dst.p.x = box->x2;
@@ -3061,7 +3134,7 @@ gen6_render_copy_boxes(struct sna *sna, uint8_t alu,
 		}
 		n -= n_this_time;
 
-		v = sna->render.vertex_data + sna->render.vertex_used;
+		v = sna->render.vertices + sna->render.vertex_used;
 		sna->render.vertex_used += 9 * n_this_time;
 		do {
 
@@ -3746,7 +3819,7 @@ gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 
 static void gen6_render_flush(struct sna *sna)
 {
-	gen6_vertex_finish(sna, TRUE);
+	gen6_vertex_close(sna);
 }
 
 static void
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 7a5ee84..16c389c 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1013,37 +1013,104 @@ static void gen7_vertex_flush(struct sna *sna)
 		gen7_magic_ca_pass(sna, sna->render.op);
 }
 
-static void gen7_vertex_finish(struct sna *sna, Bool last)
+static int gen7_vertex_finish(struct sna *sna)
 {
 	struct kgem_bo *bo;
-	unsigned int i, delta;
+	unsigned int i;
 
 	gen7_vertex_flush(sna);
 	if (!sna->render.vertex_used)
-		return;
+		return sna->render.vertex_size;
 
 	/* Note: we only need dword alignment (currently) */
 
-	if (last && sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
-		DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
-		     sna->render.vertex_used, sna->kgem.nbatch));
-		memcpy(sna->kgem.batch + sna->kgem.nbatch,
+	bo = sna->render.vbo;
+	if (bo) {
+		for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
+			if (sna->render.vertex_reloc[i]) {
+				DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
+				     i, sna->render.vertex_reloc[i]));
+
+				sna->kgem.batch[sna->render.vertex_reloc[i]] =
+					kgem_add_reloc(&sna->kgem,
+						       sna->render.vertex_reloc[i],
+						       bo,
+						       I915_GEM_DOMAIN_VERTEX << 16,
+						       0);
+				sna->kgem.batch[sna->render.vertex_reloc[i]+1] =
+					kgem_add_reloc(&sna->kgem,
+						       sna->render.vertex_reloc[i]+1,
+						       bo,
+						       I915_GEM_DOMAIN_VERTEX << 16,
+						       sna->render.vertex_used * 4 - 1);
+				sna->render.vertex_reloc[i] = 0;
+			}
+		}
+
+		sna->render.vertex_used = 0;
+		sna->render.vertex_index = 0;
+		sna->render_state.gen7.vb_id = 0;
+
+		kgem_bo_destroy(&sna->kgem, bo);
+	}
+
+	sna->render.vertices = NULL;
+	sna->render.vbo = kgem_create_linear(&sna->kgem, 256*1024);
+	if (sna->render.vbo)
+		sna->render.vertices = kgem_bo_map__cpu(&sna->kgem, sna->render.vbo);
+	if (sna->render.vertices == NULL) {
+		kgem_bo_destroy(&sna->kgem, sna->render.vbo);
+		sna->render.vbo = NULL;
+		return 0;
+	}
+
+	kgem_bo_sync__cpu(&sna->kgem, sna->render.vbo);
+	if (sna->render.vertex_used) {
+		memcpy(sna->render.vertices,
 		       sna->render.vertex_data,
-		       sna->render.vertex_used * 4);
-		delta = sna->kgem.nbatch * 4;
-		bo = NULL;
-		sna->kgem.nbatch += sna->render.vertex_used;
-	} else {
-		bo = kgem_create_linear(&sna->kgem, 4*sna->render.vertex_used);
-		if (bo && !kgem_bo_write(&sna->kgem, bo,
-					 sna->render.vertex_data,
-					 4*sna->render.vertex_used)) {
-			kgem_bo_destroy(&sna->kgem, bo);
-			return;
+		       sizeof(float)*sna->render.vertex_used);
+	}
+	sna->render.vertex_size = 64 * 1024 - 1;
+	return sna->render.vertex_size - sna->render.vertex_used;
+}
+
+static void gen7_vertex_close(struct sna *sna)
+{
+	struct kgem_bo *bo;
+	unsigned int i, delta = 0;
+
+	gen7_vertex_flush(sna);
+	if (!sna->render.vertex_used) {
+		assert(sna->render.vbo == NULL);
+		assert(sna->render.vertices == sna->render.vertex_data);
+		assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data));
+		return;
+	}
+
+	DBG(("%s: used=%d\n", __FUNCTION__, sna->render.vertex_used));
+
+	bo = sna->render.vbo;
+	if (bo == NULL) {
+		if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
+			DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
+			     sna->render.vertex_used, sna->kgem.nbatch));
+			memcpy(sna->kgem.batch + sna->kgem.nbatch,
+			       sna->render.vertex_data,
+			       sna->render.vertex_used * 4);
+			delta = sna->kgem.nbatch * 4;
+			bo = NULL;
+			sna->kgem.nbatch += sna->render.vertex_used;
+		} else {
+			bo = kgem_create_linear(&sna->kgem, 4*sna->render.vertex_used);
+			if (bo && !kgem_bo_write(&sna->kgem, bo,
+						 sna->render.vertex_data,
+						 4*sna->render.vertex_used)) {
+				kgem_bo_destroy(&sna->kgem, bo);
+				goto reset;
+			}
+			DBG(("%s: new vbo: %d\n", __FUNCTION__,
+			     sna->render.vertex_used));
 		}
-		delta = 0;
-		DBG(("%s: new vbo: %d\n", __FUNCTION__,
-		     sna->render.vertex_used));
 	}
 
 	for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
@@ -1070,9 +1137,14 @@ static void gen7_vertex_finish(struct sna *sna, Bool last)
 	if (bo)
 		kgem_bo_destroy(&sna->kgem, bo);
 
+reset:
 	sna->render.vertex_used = 0;
 	sna->render.vertex_index = 0;
 	sna->render_state.gen7.vb_id = 0;
+
+	sna->render.vbo = NULL;
+	sna->render.vertices = sna->render.vertex_data;
+	sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
 }
 
 static void null_create(struct sna_static_stream *stream)
@@ -1231,7 +1303,7 @@ gen7_emit_composite_primitive_solid(struct sna *sna,
 		float f;
 	} dst;
 
-	v = sna->render.vertex_data + sna->render.vertex_used;
+	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 9;
 
 	dst.p.x = r->dst.x + r->width;
@@ -1262,7 +1334,7 @@ gen7_emit_composite_primitive_identity_source(struct sna *sna,
 	} dst;
 	float *v;
 
-	v = sna->render.vertex_data + sna->render.vertex_used;
+	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 9;
 
 	dst.p.x = r->dst.x + r->width;
@@ -1291,7 +1363,7 @@ gen7_emit_composite_primitive_affine_source(struct sna *sna,
 	} dst;
 	float *v;
 
-	v = sna->render.vertex_data + sna->render.vertex_used;
+	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 9;
 
 	dst.p.x = r->dst.x + r->width;
@@ -1344,7 +1416,7 @@ gen7_emit_composite_primitive_identity_source_mask(struct sna *sna,
 	w = r->width;
 	h = r->height;
 
-	v = sna->render.vertex_data + sna->render.vertex_used;
+	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 15;
 
 	dst.p.x = r->dst.x + r->width;
@@ -1573,10 +1645,7 @@ static int gen7_get_rectangles__flush(struct sna *sna, bool ca)
 	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 1)
 		return 0;
 
-	gen7_vertex_finish(sna, FALSE);
-	sna->render.vertex_index = 0;
-
-	return  ARRAY_SIZE(sna->render.vertex_data);
+	return gen7_vertex_finish(sna);
 }
 
 inline static int gen7_get_rectangles(struct sna *sna,
@@ -1690,6 +1759,9 @@ static void
 gen7_align_vertex(struct sna *sna, const struct sna_composite_op *op)
 {
 	if (op->floats_per_vertex != sna->render_state.gen7.floats_per_vertex) {
+		if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
+			gen7_vertex_finish(sna);
+
 		DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
 		     sna->render_state.gen7.floats_per_vertex,
 		     op->floats_per_vertex,
@@ -3022,7 +3094,7 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 		}
 		n -= n_this_time;
 
-		v = sna->render.vertex_data + sna->render.vertex_used;
+		v = sna->render.vertices + sna->render.vertex_used;
 		sna->render.vertex_used += 9 * n_this_time;
 		do {
 
@@ -3713,7 +3785,7 @@ gen7_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 
 static void gen7_render_flush(struct sna *sna)
 {
-	gen7_vertex_finish(sna, TRUE);
+	gen7_vertex_close(sna);
 }
 
 static void
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index abad1ee..979b2b0 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -239,6 +239,9 @@ void no_render_init(struct sna *sna)
 
 	memset (render,0, sizeof (*render));
 
+	render->vertices = render->vertex_data;
+	render->vertex_size = ARRAY_SIZE(render->vertex_data);
+
 	render->composite = no_render_composite;
 
 	render->copy_boxes = no_render_copy_boxes;
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 19dfdfb..abb19dc 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -271,9 +271,13 @@ struct sna_render {
 	uint16_t vertex_start;
 	uint16_t vertex_index;
 	uint16_t vertex_used;
+	uint16_t vertex_size;
 	uint16_t vertex_reloc[8];
 
-	float vertex_data[16*1024];
+	struct kgem_bo *vbo;
+	float *vertices;
+
+	float vertex_data[1024];
 	const struct sna_composite_op *op;
 };
 
diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index ee55db7..27f4909 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -19,17 +19,17 @@ static inline bool need_redirect(struct sna *sna, PixmapPtr dst)
 
 static inline int vertex_space(struct sna *sna)
 {
-	return ARRAY_SIZE(sna->render.vertex_data) - sna->render.vertex_used;
+	return sna->render.vertex_size - sna->render.vertex_used;
 }
 static inline void vertex_emit(struct sna *sna, float v)
 {
-	assert(sna->render.vertex_used < ARRAY_SIZE(sna->render.vertex_data));
-	sna->render.vertex_data[sna->render.vertex_used++] = v;
+	assert(sna->render.vertex_used < sna->render.vertex_size);
+	sna->render.vertices[sna->render.vertex_used++] = v;
 }
 static inline void vertex_emit_2s(struct sna *sna, int16_t x, int16_t y)
 {
-	int16_t *v = (int16_t *)&sna->render.vertex_data[sna->render.vertex_used++];
-	assert(sna->render.vertex_used <= ARRAY_SIZE(sna->render.vertex_data));
+	int16_t *v = (int16_t *)&sna->render.vertices[sna->render.vertex_used++];
+	assert(sna->render.vertex_used <= sna->render.vertex_size);
 	v[0] = x;
 	v[1] = y;
 }
commit 24df8ab9742f771cfeb6d30bd8a61a17a9e22ca7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jan 13 16:15:21 2012 +0000

    sna: Reverse the chronological sort order of inactive vma entries
    
    The goal is to reuse the most recently bound GTT mapping in the hope
    that is still mappable at the time of reuse.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 30cdff6..5503484 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -858,7 +858,7 @@ inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
 			bo->map = NULL;
 		}
 		if (bo->map) {
-			list_move_tail(&bo->vma, &kgem->vma[type].inactive[bo->bucket]);
+			list_move(&bo->vma, &kgem->vma[type].inactive[bo->bucket]);
 			kgem->vma[type].count++;
 		}
 	}
@@ -2552,9 +2552,7 @@ static void kgem_trim_vma_cache(struct kgem *kgem, int type, int bucket)
 		     j++) {
 			struct list *head = &kgem->vma[type].inactive[i++%ARRAY_SIZE(kgem->vma[type].inactive)];
 			if (!list_is_empty(head))
-				bo = list_first_entry(head,
-						      struct kgem_bo,
-						      vma);
+				bo = list_last_entry(head, struct kgem_bo, vma);
 		}
 		if (bo == NULL)
 			break;
commit 2f26bbe3dd55bfe26b6f93fc1f9e9813fc11f7c4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jan 13 15:59:51 2012 +0000

    sna: Remove the short-circuiting of all-damage in move_to_cpu
    
    To allow a replacement of the complete pixmap to be performed in place.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 0c7ab57..932eef0 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -243,6 +243,7 @@ static void sna_pixmap_free_cpu(struct sna *sna, struct sna_pixmap *priv)
 {
 	assert(priv->stride);
 	assert(priv->cpu_damage == NULL);
+	assert(list_is_empty(&priv->list));
 
 	if (priv->cpu_bo) {
 		DBG(("%s: discarding CPU buffer, handle=%d, size=%d\n",
@@ -750,11 +751,6 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 		return true;
 	}
 
-	if (DAMAGE_IS_ALL(priv->cpu_damage)) {
-		DBG(("%s: all-damaged\n", __FUNCTION__));
-		goto done;
-	}
-
 	DBG(("%s: gpu_bo=%d, gpu_damage=%p\n",
 	     __FUNCTION__,
 	     priv->gpu_bo ? priv->gpu_bo->handle : 0,
@@ -762,10 +758,7 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 
 	if ((flags & MOVE_READ) == 0) {
 		assert(flags == MOVE_WRITE);
-
-		sna_damage_destroy(&priv->cpu_damage);
 		sna_damage_destroy(&priv->gpu_damage);
-		list_del(&priv->list);
 
 		if (priv->stride && priv->gpu_bo &&
 		    pixmap_inplace(sna, pixmap, priv)) {
@@ -790,6 +783,8 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 			sna_damage_all(&priv->gpu_damage,
 				       pixmap->drawable.width,
 				       pixmap->drawable.height);
+			sna_damage_destroy(&priv->cpu_damage);
+			list_del(&priv->list);
 			if (priv->cpu_bo)
 				sna_pixmap_free_cpu(sna, priv);
 
@@ -803,6 +798,8 @@ skip_inplace_map:
 
 			if (kgem_bo_is_busy(priv->cpu_bo)) {
 				DBG(("%s: discarding busy CPU bo\n", __FUNCTION__));
+				sna_damage_destroy(&priv->cpu_damage);
+				list_del(&priv->list);
 				sna_pixmap_free_cpu(sna, priv);
 			}
 		}
commit c81dba18e6a308d716a11b90da6e53ca7f5f8a02
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jan 13 14:21:13 2012 +0000

    sna: Hint whether we prefer to use the GPU for a pixmap
    
    This includes the condition where the pixmap is too large, as well as
    being too small, to be allocatable on the GPU. It is only a hint set
    during creation, and may be overridden if required.
    
    This fixes the regression in ocitysmap which decided to render glyphs
    into a GPU mask for a destination that does not fit into the aperture.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 76e3299..6f72698 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -145,6 +145,7 @@ struct sna_pixmap {
 	uint8_t pinned :1;
 	uint8_t mapped :1;
 	uint8_t flush :1;
+	uint8_t gpu :1;
 	uint8_t header :1;
 };
 
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 2d6d1ee..0c7ab57 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -690,6 +690,7 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 		}
 
 		priv->stride = pad;
+		priv->gpu = true;
 	}
 
 	return pixmap;
diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index ca8e839..ee55db7 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -90,7 +90,18 @@ is_cpu(DrawablePtr drawable)
 static inline Bool
 too_small(DrawablePtr drawable)
 {
-	return ((uint32_t)drawable->width * drawable->height * drawable->bitsPerPixel <= 8*4096) && !is_gpu(drawable);
+	struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
+
+	if (priv == NULL)
+		return true;
+
+	if (priv->gpu_damage)
+		return false;
+
+	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
+		return false;
+
+	return !priv->gpu;
 }
 
 static inline Bool
commit 2bd942d55314426d7f429d7ccc0b62a622a79009
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jan 13 14:08:34 2012 +0000

    sna/trapezoids: Quieten the debugging of the gory details of the rasteriser
    
    Hide the noise under another level of debugging so that hopefully the
    reason why it chose a particular path become clear.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 800a278..4f91842 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -44,6 +44,12 @@
 #define DBG(x) ErrorF x
 #endif
 
+#if 0
+#define __DBG(x) ErrorF x
+#else
+#define __DBG(x)
+#endif
+
 #define NO_ACCEL 0
 #define NO_ALIGNED_BOXES 0
 #define NO_UNALIGNED_BOXES 0
@@ -571,8 +577,8 @@ cell_list_add_subspan(struct cell_list *cells,
 	FAST_SAMPLES_X_TO_INT_FRAC(x1, ix1, fx1);
 	FAST_SAMPLES_X_TO_INT_FRAC(x2, ix2, fx2);
 
-	DBG(("%s: x1=%d (%d+%d), x2=%d (%d+%d)\n", __FUNCTION__,
-	     x1, ix1, fx1, x2, ix2, fx2));
+	__DBG(("%s: x1=%d (%d+%d), x2=%d (%d+%d)\n", __FUNCTION__,
+	       x1, ix1, fx1, x2, ix2, fx2));
 
 	cell = cell_list_find(cells, ix1);
 	if (ix1 != ix2) {
@@ -677,15 +683,15 @@ polygon_add_edge(struct polygon *polygon,
 	grid_scaled_y_t ymin = polygon->ymin;
 	grid_scaled_y_t ymax = polygon->ymax;
 
-	DBG(("%s: edge=(%d [%d.%d], %d [%d.%d]), (%d [%d.%d], %d [%d.%d]), top=%d [%d.%d], bottom=%d [%d.%d], dir=%d\n",
-	     __FUNCTION__,
-	     x1, FAST_SAMPLES_INT(x1), FAST_SAMPLES_FRAC(x1),
-	     y1, FAST_SAMPLES_INT(y1), FAST_SAMPLES_FRAC(y1),
-	     x2, FAST_SAMPLES_INT(x2), FAST_SAMPLES_FRAC(x2),
-	     y2, FAST_SAMPLES_INT(y2), FAST_SAMPLES_FRAC(y2),
-	     top, FAST_SAMPLES_INT(top), FAST_SAMPLES_FRAC(top),
-	     bottom, FAST_SAMPLES_INT(bottom), FAST_SAMPLES_FRAC(bottom),
-	     dir));
+	__DBG(("%s: edge=(%d [%d.%d], %d [%d.%d]), (%d [%d.%d], %d [%d.%d]), top=%d [%d.%d], bottom=%d [%d.%d], dir=%d\n",
+	       __FUNCTION__,
+	       x1, FAST_SAMPLES_INT(x1), FAST_SAMPLES_FRAC(x1),
+	       y1, FAST_SAMPLES_INT(y1), FAST_SAMPLES_FRAC(y1),
+	       x2, FAST_SAMPLES_INT(x2), FAST_SAMPLES_FRAC(x2),
+	       y2, FAST_SAMPLES_INT(y2), FAST_SAMPLES_FRAC(y2),
+	       top, FAST_SAMPLES_INT(top), FAST_SAMPLES_FRAC(top),
+	       bottom, FAST_SAMPLES_INT(bottom), FAST_SAMPLES_FRAC(bottom),
+	       dir));
 	assert (dy > 0);
 
 	e->dy = dy;
@@ -732,8 +738,8 @@ polygon_add_line(struct polygon *polygon,
 	if (dy == 0)
 		return;
 
-	DBG(("%s: line=(%d, %d), (%d, %d)\n",
-	     __FUNCTION__, (int)p1->x, (int)p1->y, (int)p2->x, (int)p2->y));
+	__DBG(("%s: line=(%d, %d), (%d, %d)\n",
+	       __FUNCTION__, (int)p1->x, (int)p1->y, (int)p2->x, (int)p2->y));
 
 	e->dir = 1;
 	if (dy < 0) {
@@ -1066,11 +1072,11 @@ tor_fini(struct tor *converter)
 static int
 tor_init(struct tor *converter, const BoxRec *box, int num_edges)
 {
-	DBG(("%s: (%d, %d),(%d, %d) x (%d, %d), num_edges=%d\n",
-	     __FUNCTION__,
-	     box->x1, box->y1, box->x2, box->y2,
-	     FAST_SAMPLES_X, FAST_SAMPLES_Y,
-	     num_edges));
+	__DBG(("%s: (%d, %d),(%d, %d) x (%d, %d), num_edges=%d\n",
+	       __FUNCTION__,
+	       box->x1, box->y1, box->x2, box->y2,
+	       FAST_SAMPLES_X, FAST_SAMPLES_Y,
+	       num_edges));
 
 	converter->xmin = box->x1;
 	converter->ymin = box->y1;
@@ -1120,7 +1126,7 @@ tor_blt_span(struct sna *sna,
 	     const BoxRec *box,
 	     int coverage)
 {
-	DBG(("%s: %d -> %d @ %d\n", __FUNCTION__, box->x1, box->x2, coverage));
+	__DBG(("%s: %d -> %d @ %d\n", __FUNCTION__, box->x1, box->x2, coverage));
 
 	op->box(sna, op, box, AREA_TO_ALPHA(coverage));
 	apply_damage_box(&op->base, box);
@@ -1137,7 +1143,7 @@ tor_blt_span_clipped(struct sna *sna,
 	float opacity;
 
 	opacity = AREA_TO_ALPHA(coverage);
-	DBG(("%s: %d -> %d @ %f\n", __FUNCTION__, box->x1, box->x2, opacity));
+	__DBG(("%s: %d -> %d @ %f\n", __FUNCTION__, box->x1, box->x2, opacity));
 
 	pixman_region_init_rects(&region, box, 1);
 	RegionIntersect(&region, &region, clip);
@@ -1219,9 +1225,9 @@ tor_blt(struct sna *sna,
 
 	/* Skip cells to the left of the clip region. */
 	while (cell->x < xmin) {
-		DBG(("%s: skipping cell (%d, %d, %d)\n",
-		     __FUNCTION__,
-		     cell->x, cell->covered_height, cell->uncovered_area));
+		__DBG(("%s: skipping cell (%d, %d, %d)\n",
+		       __FUNCTION__,
+		       cell->x, cell->covered_height, cell->uncovered_area));
 
 		cover += cell->covered_height;
 		cell = cell->next;
@@ -1239,17 +1245,17 @@ tor_blt(struct sna *sna,
 		if (x >= xmax)
 			break;
 
-		DBG(("%s: cell=(%d, %d, %d), cover=%d, max=%d\n", __FUNCTION__,
-		     cell->x, cell->covered_height, cell->uncovered_area,
-		     cover, xmax));
+		__DBG(("%s: cell=(%d, %d, %d), cover=%d, max=%d\n", __FUNCTION__,
+		       cell->x, cell->covered_height, cell->uncovered_area,
+		       cover, xmax));
 
 		box.x2 = x;
 		if (box.x2 > box.x1 && (unbounded || cover)) {
-			DBG(("%s: span (%d, %d)x(%d, %d) @ %d\n", __FUNCTION__,
-			     box.x1, box.y1,
-			     box.x2 - box.x1,
-			     box.y2 - box.y1,
-			     cover));
+			__DBG(("%s: span (%d, %d)x(%d, %d) @ %d\n", __FUNCTION__,
+			       box.x1, box.y1,
+			       box.x2 - box.x1,
+			       box.y2 - box.y1,
+			       cover));
 			span(sna, op, clip, &box, cover);
 		}
 		box.x1 = box.x2;
@@ -1260,11 +1266,11 @@ tor_blt(struct sna *sna,
 			int area = cover - cell->uncovered_area;
 			box.x2 = x + 1;
 			if (unbounded || area) {
-				DBG(("%s: span (%d, %d)x(%d, %d) @ %d\n", __FUNCTION__,
-				     box.x1, box.y1,
-				     box.x2 - box.x1,
-				     box.y2 - box.y1,
-				     area));
+				__DBG(("%s: span (%d, %d)x(%d, %d) @ %d\n", __FUNCTION__,
+				       box.x1, box.y1,
+				       box.x2 - box.x1,
+				       box.y2 - box.y1,
+				       area));
 				span(sna, op, clip, &box, area);
 			}
 			box.x1 = box.x2;
@@ -1273,11 +1279,11 @@ tor_blt(struct sna *sna,
 
 	box.x2 = xmax;
 	if (box.x2 > box.x1 && (unbounded || cover)) {
-		DBG(("%s: span (%d, %d)x(%d, %d) @ %d\n", __FUNCTION__,
-		     box.x1, box.y1,
-		     box.x2 - box.x1,
-		     box.y2 - box.y1,
-		     cover));
+		__DBG(("%s: span (%d, %d)x(%d, %d) @ %d\n", __FUNCTION__,
+		       box.x1, box.y1,
+		       box.x2 - box.x1,
+		       box.y2 - box.y1,
+		       cover));
 		span(sna, op, clip, &box, cover);
 	}
 }
@@ -1325,7 +1331,7 @@ tor_render(struct sna *sna,
 	struct active_list *active = converter->active;
 	struct edge *buckets[FAST_SAMPLES_Y] = { 0 };
 
-	DBG(("%s: unbounded=%d\n", __FUNCTION__, unbounded));
+	__DBG(("%s: unbounded=%d\n", __FUNCTION__, unbounded));
 
 	/* Render each pixel row. */
 	for (i = 0; i < h; i = j) {
@@ -1341,8 +1347,8 @@ tor_render(struct sna *sna,
 				active->is_vertical = 1;
 				for (; j < h && !polygon->y_buckets[j]; j++)
 					;
-				DBG(("%s: no new edges and no exisiting edges, skipping, %d -> %d\n",
-				     __FUNCTION__, i, j));
+				__DBG(("%s: no new edges and no exisiting edges, skipping, %d -> %d\n",
+				       __FUNCTION__, i, j));
 
 				if (unbounded)
 					tor_blt_empty(sna, op, clip, span, i+ymin, j-i, xmin, xmax);
@@ -1352,12 +1358,12 @@ tor_render(struct sna *sna,
 			do_full_step = can_full_step(active);
 		}
 
-		DBG(("%s: y=%d [%d], do_full_step=%d, new edges=%d, min_height=%d, vertical=%d\n",
-		     __FUNCTION__,
-		     i, i+ymin, do_full_step,
-		     polygon->y_buckets[i] != NULL,
-		     active->min_height,
-		     active->is_vertical));
+		__DBG(("%s: y=%d [%d], do_full_step=%d, new edges=%d, min_height=%d, vertical=%d\n",
+		       __FUNCTION__,
+		       i, i+ymin, do_full_step,
+		       polygon->y_buckets[i] != NULL,
+		       active->min_height,
+		       active->is_vertical));
 		if (do_full_step) {
 			nonzero_row(active, coverages);
 
@@ -1372,8 +1378,8 @@ tor_render(struct sna *sna,
 				if (j != i + 1)
 					step_edges(active, j - (i + 1));
 
-				DBG(("%s: vertical edges, full step (%d, %d)\n",
-				    __FUNCTION__,  i, j));
+				__DBG(("%s: vertical edges, full step (%d, %d)\n",
+				       __FUNCTION__,  i, j));
 			}
 		} else {
 			grid_scaled_y_t suby;
@@ -1488,12 +1494,12 @@ mono_add_line(struct mono *mono,
 	pixman_fixed_t dy;
 	int y, ytop, ybot;
 
-	DBG(("%s: top=%d, bottom=%d, line=(%d, %d), (%d, %d) delta=%dx%d, dir=%d\n",
-	     __FUNCTION__,
-	     (int)top, (int)bottom,
-	     (int)p1->x, (int)p1->y, (int)p2->x, (int)p2->y,
-	     dst_x, dst_y,
-	     dir));
+	__DBG(("%s: top=%d, bottom=%d, line=(%d, %d), (%d, %d) delta=%dx%d, dir=%d\n",
+	       __FUNCTION__,
+	       (int)top, (int)bottom,
+	       (int)p1->x, (int)p1->y, (int)p2->x, (int)p2->y,
+	       dst_x, dst_y,
+	       dir));
 
 	if (top > bottom) {
 		xPointFixed *t;
@@ -1516,7 +1522,7 @@ mono_add_line(struct mono *mono,
 	ybot = MIN(y, mono->clip.extents.y2);
 
 	if (ybot <= ytop) {
-		DBG(("discard clipped line\n"));
+		__DBG(("discard clipped line\n"));
 		return;
 	}
 
@@ -1697,7 +1703,7 @@ mono_span(struct mono *c, int x1, int x2, BoxPtr box)
 	if (x2 <= x1)
 		return;
 
-	DBG(("%s [%d, %d]\n", __FUNCTION__, x1, x2));
+	__DBG(("%s [%d, %d]\n", __FUNCTION__, x1, x2));
 
 	box->x1 = x1;
 	box->x2 = x2;
@@ -1978,8 +1984,8 @@ trapezoids_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 		if (bounds.y1 >= bounds.y2 || bounds.x1 >= bounds.x2)
 			return;
 
-		DBG(("%s: bounds (%d, %d), (%d, %d)\n",
-		     __FUNCTION__, bounds.x1, bounds.y1, bounds.x2, bounds.y2));
+		DBG(("%s: bounds (%d, %d), (%d, %d)\n", __FUNCTION__,
+		     bounds.x1, bounds.y1, bounds.x2, bounds.y2));
 
 		if (!sna_compute_composite_extents(&bounds,
 						   src, NULL, dst,
@@ -1990,8 +1996,8 @@ trapezoids_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 						   bounds.y2 - bounds.y1))
 			return;
 
-		DBG(("%s: extents (%d, %d), (%d, %d)\n",
-		     __FUNCTION__, bounds.x1, bounds.y1, bounds.x2, bounds.y2));
+		DBG(("%s: extents (%d, %d), (%d, %d)\n", __FUNCTION__,
+		     bounds.x1, bounds.y1, bounds.x2, bounds.y2));
 
 		width  = bounds.x2 - bounds.x1;
 		height = bounds.y2 - bounds.y1;
commit 5dbcfc2ee3af64846298dbcb20db27c93b3d57f2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jan 13 13:52:10 2012 +0000

    sna: Be more lenient in not forcing to the GPU if the sources have CPU damage
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 1660763..87f9e8c 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -1542,7 +1542,7 @@ gen2_composite_fallback(struct sna *sna,
 
 	if (src_pixmap && !is_solid(src) && !source_fallback(src)) {
 		priv = sna_pixmap(src_pixmap);
-		if (priv && priv->gpu_damage) {
+		if (priv && priv->gpu_damage && !priv->cpu_damage) {
 			DBG(("%s: src is already on the GPU, try to use GPU\n",
 			     __FUNCTION__));
 			return FALSE;
@@ -1550,7 +1550,7 @@ gen2_composite_fallback(struct sna *sna,
 	}
 	if (mask_pixmap && !is_solid(mask) && !source_fallback(mask)) {
 		priv = sna_pixmap(mask_pixmap);
-		if (priv && priv->gpu_damage) {
+		if (priv && priv->gpu_damage && !priv->cpu_damage) {
 			DBG(("%s: mask is already on the GPU, try to use GPU\n",
 			     __FUNCTION__));
 			return FALSE;
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 51469dd..457e694 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2367,7 +2367,7 @@ gen3_composite_fallback(struct sna *sna,
 
 	if (src_pixmap && !is_solid(src) && !source_fallback(src)) {
 		priv = sna_pixmap(src_pixmap);
-		if (priv && priv->gpu_damage) {
+		if (priv && priv->gpu_damage && !priv->cpu_damage) {
 			DBG(("%s: src is already on the GPU, try to use GPU\n",
 			     __FUNCTION__));
 			return FALSE;
@@ -2375,7 +2375,7 @@ gen3_composite_fallback(struct sna *sna,
 	}
 	if (mask_pixmap && !is_solid(mask) && !source_fallback(mask)) {
 		priv = sna_pixmap(mask_pixmap);
-		if (priv && priv->gpu_damage) {
+		if (priv && priv->gpu_damage && !priv->cpu_damage) {
 			DBG(("%s: mask is already on the GPU, try to use GPU\n",
 			     __FUNCTION__));
 			return FALSE;
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 3146836..972b718 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -1974,7 +1974,7 @@ gen4_composite_fallback(struct sna *sna,
 
 	if (src_pixmap && !is_solid(src) && !source_fallback(src)) {
 		priv = sna_pixmap(src_pixmap);
-		if (priv && priv->gpu_damage) {
+		if (priv && priv->gpu_damage && !priv->cpu_damage) {
 			DBG(("%s: src is already on the GPU, try to use GPU\n",
 			     __FUNCTION__));
 			return FALSE;
@@ -1982,7 +1982,7 @@ gen4_composite_fallback(struct sna *sna,
 	}
 	if (mask_pixmap && !is_solid(mask) && !source_fallback(mask)) {
 		priv = sna_pixmap(mask_pixmap);
-		if (priv && priv->gpu_damage) {
+		if (priv && priv->gpu_damage && !priv->cpu_damage) {
 			DBG(("%s: mask is already on the GPU, try to use GPU\n",
 			     __FUNCTION__));
 			return FALSE;
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 017b7ce..6347b3c 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2016,7 +2016,7 @@ gen5_composite_fallback(struct sna *sna,
 
 	if (src_pixmap && !is_solid(src) && !source_fallback(src)) {
 		priv = sna_pixmap(src_pixmap);
-		if (priv && priv->gpu_damage) {
+		if (priv && priv->gpu_damage && !priv->cpu_damage) {
 			DBG(("%s: src is already on the GPU, try to use GPU\n",
 			     __FUNCTION__));
 			return FALSE;
@@ -2024,7 +2024,7 @@ gen5_composite_fallback(struct sna *sna,
 	}
 	if (mask_pixmap && !is_solid(mask) && !source_fallback(mask)) {
 		priv = sna_pixmap(mask_pixmap);
-		if (priv && priv->gpu_damage) {
+		if (priv && priv->gpu_damage && !priv->cpu_damage) {
 			DBG(("%s: mask is already on the GPU, try to use GPU\n",
 			     __FUNCTION__));
 			return FALSE;
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 047c055..cd043c3 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2211,7 +2211,7 @@ gen6_composite_fallback(struct sna *sna,
 
 	if (src_pixmap && !is_solid(src) && !source_fallback(src)) {
 		priv = sna_pixmap(src_pixmap);
-		if (priv && priv->gpu_damage) {
+		if (priv && priv->gpu_damage && !priv->cpu_damage) {
 			DBG(("%s: src is already on the GPU, try to use GPU\n",
 			     __FUNCTION__));
 			return FALSE;
@@ -2219,7 +2219,7 @@ gen6_composite_fallback(struct sna *sna,
 	}
 	if (mask_pixmap && !is_solid(mask) && !source_fallback(mask)) {
 		priv = sna_pixmap(mask_pixmap);
-		if (priv && priv->gpu_damage) {
+		if (priv && priv->gpu_damage && !priv->cpu_damage) {
 			DBG(("%s: mask is already on the GPU, try to use GPU\n",
 			     __FUNCTION__));
 			return FALSE;
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index c00548e..7a5ee84 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2305,7 +2305,7 @@ gen7_composite_fallback(struct sna *sna,
 
 	if (src_pixmap && !is_solid(src) && !source_fallback(src)) {
 		priv = sna_pixmap(src_pixmap);
-		if (priv && priv->gpu_damage) {
+		if (priv && priv->gpu_damage && !priv->cpu_damage) {
 			DBG(("%s: src is already on the GPU, try to use GPU\n",
 			     __FUNCTION__));
 			return FALSE;
@@ -2313,7 +2313,7 @@ gen7_composite_fallback(struct sna *sna,
 	}
 	if (mask_pixmap && !is_solid(mask) && !source_fallback(mask)) {
 		priv = sna_pixmap(mask_pixmap);
-		if (priv && priv->gpu_damage) {
+		if (priv && priv->gpu_damage && !priv->cpu_damage) {
 			DBG(("%s: mask is already on the GPU, try to use GPU\n",
 			     __FUNCTION__));
 			return FALSE;
commit 20ff4a1d73cc10e1f53050b19b8799ccbb5c7d04
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jan 13 13:30:13 2012 +0000

    sna: Use top_srcdir to detect .git rather than top_builddir
    
    For srcdir != builddir builds, we need to be searching the source tree
    for the git id.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/Makefile.am b/src/sna/Makefile.am
index 6043000..2809617 100644
--- a/src/sna/Makefile.am
+++ b/src/sna/Makefile.am
@@ -105,7 +105,7 @@ libsna_la_SOURCES += \
 endif
 
 if HAVE_DOT_GIT
-git_version.h: $(top_builddir)/.git/HEAD $(shell sed -e '/ref:/!d' -e 's#ref: *#$(top_builddir)/.git/#' < $(top_builddir)/.git/HEAD)
+git_version.h: $(top_srcdir)/.git/HEAD $(shell sed -e '/ref:/!d' -e 's#ref: *#$(top_srcdir)/.git/#' < $(top_srcdir)/.git/HEAD)
 	@echo "Recording git-tree used for compilation: `git describe`"
 	@V=`git describe`; echo "static const char git_version[] = \"$$V\";" > git_version.h
 sna_driver.c: git_version.h
commit a4d5d725996b94e314ae7697c7a597ed2f60e8cd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jan 13 10:41:44 2012 +0000

    sna: Experiment with GTT mmapped upload buffers
    
    In a few places, we can stream the source into the GTT and so upload in
    place through the WC mapping. Notably, in many other places we want to
    rasterise on a partial in cacheable memory. So we need to notify the
    backend of the intended usage for the buffer and when we think it is
    appropriate we can allocate a GTT mapped pointer for zero-copy upload.
    
    The biggest improvement tends to be in the PutComposite style of
    microbenchmark, yet throughput for trapezoid masks seems to suffer (e.g.
    swfdec-giant-steps on i3 and gen2 in general). As expected, the culprit
    of the regression is the aperture pressure causing eviction stalls, which
    the pwrite paths sidesteps by doing a cached copy when there is no GTT
    space. This could be alleviated with an is-mappable ioctl predicting when
    use of the buffer would block and so falling back in those cases to
    pwrite. However, I suspect that this will improve dispatch latency in
    the common idle case for which I have no good metric.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 117cc5d..30cdff6 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -86,6 +86,7 @@ static inline void list_replace(struct list *old,
 #define DBG_NO_TILING 0
 #define DBG_NO_VMAP 0
 #define DBG_NO_MADV 0
+#define DBG_NO_MAP_UPLOAD 0
 #define DBG_NO_RELAXED_FENCING 0
 #define DBG_DUMP 0
 
@@ -111,7 +112,7 @@ struct kgem_partial_bo {
 	void *mem;
 	uint32_t used;
 	uint32_t need_io : 1;
-	uint32_t write : 1;
+	uint32_t write : 2;
 	uint32_t mmapped : 1;
 };
 
@@ -2579,7 +2580,6 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
 {
 	void *ptr;
 
-	assert(bo->refcnt);
 	assert(!bo->purged);
 	assert(bo->exec == NULL);
 	assert(list_is_empty(&bo->list));
@@ -2641,7 +2641,6 @@ void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo)
 	struct drm_i915_gem_mmap mmap_arg;
 
 	DBG(("%s(handle=%d, size=%d)\n", __FUNCTION__, bo->handle, bo->size));
-	assert(bo->refcnt);
 	assert(!bo->purged);
 	assert(list_is_empty(&bo->list));
 
@@ -2897,12 +2896,14 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 				   void **ret)
 {
 	struct kgem_partial_bo *bo;
-	bool write = !!(flags & KGEM_BUFFER_WRITE);
 	unsigned offset, alloc;
 	uint32_t handle;
 
-	DBG(("%s: size=%d, flags=%x [write=%d, last=%d]\n",
-	     __FUNCTION__, size, flags, write, flags & KGEM_BUFFER_LAST));
+	DBG(("%s: size=%d, flags=%x [write?=%d, inplace?=%d, last?=%d]\n",
+	     __FUNCTION__, size, flags,
+	     !!(flags & KGEM_BUFFER_WRITE),
+	     !!(flags & KGEM_BUFFER_INPLACE),
+	     !!(flags & KGEM_BUFFER_LAST)));
 	assert(size);
 
 	list_for_each_entry(bo, &kgem->partial, base.list) {
@@ -2923,9 +2924,10 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			}
 		}
 
-		if (bo->write != write) {
-			DBG(("%s: skip write %d buffer, need %d\n",
-			     __FUNCTION__, bo->write, write));
+		if ((bo->write & KGEM_BUFFER_WRITE) != (flags & KGEM_BUFFER_WRITE) ||
+		    (bo->write & ~flags) & KGEM_BUFFER_INPLACE) {
+			DBG(("%s: skip write %x buffer, need %x\n",
+			     __FUNCTION__, bo->write, flags));
 			continue;
 		}
 
@@ -2942,9 +2944,11 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		break;
 	}
 
-	alloc = (flags & KGEM_BUFFER_LAST) ? 4096 : 32 * 1024;
-	alloc = ALIGN(size, alloc);
+	/* Be a little more generous and hope to hold fewer mmappings */
+	alloc = ALIGN(size, kgem->aperture_mappable >> 10);
+	bo = NULL;
 
+#if !DBG_NO_MAP_UPLOAD
 	if (!DEBUG_NO_LLC && kgem->gen >= 60) {
 		struct kgem_bo *old;
 
@@ -2952,11 +2956,8 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		if (bo == NULL)
 			return NULL;
 
-		/* Be a little more generous and hope to hold fewer mmappings */
-		alloc = ALIGN(size, 128*1024);
-
 		old = NULL;
-		if (!write)
+		if ((flags & KGEM_BUFFER_WRITE) == 0)
 			old = search_linear_cache(kgem, alloc, CREATE_CPU_MAP);
 		if (old == NULL)
 			old = search_linear_cache(kgem, alloc, CREATE_INACTIVE | CREATE_CPU_MAP);
@@ -2985,72 +2986,145 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		}
 
 		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
-		if (bo->mem == NULL) {
-			bo->base.refcnt = 0; /* for valgrind */
-			kgem_bo_free(kgem, &bo->base);
-			return NULL;
-		}
+		if (bo->mem) {
+			if (flags & KGEM_BUFFER_WRITE)
+				kgem_bo_sync__cpu(kgem, &bo->base);
 
-		if (write)
-			kgem_bo_sync__cpu(kgem, &bo->base);
-
-		bo->need_io = false;
-		bo->base.io = true;
-		bo->mmapped = true;
-
-		alloc = bo->base.size;
-	} else if (HAVE_VMAP && kgem->has_vmap) {
-		bo = partial_bo_alloc(alloc);
-		if (bo == NULL)
-			return NULL;
-
-		handle = gem_vmap(kgem->fd, bo->mem, alloc, write);
-		if (handle) {
-			__kgem_bo_init(&bo->base, handle, alloc);
-			bo->base.vmap = true;
 			bo->need_io = false;
+			bo->base.io = true;
+			bo->mmapped = true;
+
+			alloc = bo->base.size;
 		} else {
-			free(bo);
-			return NULL;
+			bo->base.refcnt = 0; /* for valgrind */
+			kgem_bo_free(kgem, &bo->base);
+			bo = NULL;
 		}
-	} else {
+	} else if ((flags & KGEM_BUFFER_WRITE_INPLACE) == KGEM_BUFFER_WRITE_INPLACE) {
 		struct kgem_bo *old;
 
-		old = NULL;
-		if (!write)
-			old = search_linear_cache(kgem, alloc, 0);
-		if (old == NULL)
-			old = search_linear_cache(kgem, alloc, CREATE_INACTIVE);
+		/* The issue with using a GTT upload buffer is that we may
+		 * cause eviction-stalls in order to free up some GTT space.
+		 * An is-mappable? ioctl could help us detect when we are
+		 * about to block, or some per-page magic in the kernel.
+		 *
+		 * XXX This is especially noticeable on memory constrained
+		 * devices like gen2 or with relatively slow gpu like i3.
+		 */
+		old = search_linear_cache(kgem, alloc,
+					  CREATE_INACTIVE | CREATE_GTT_MAP);
+#if HAVE_I915_GEM_BUFFER_INFO
 		if (old) {
-			alloc = old->size;
-			bo = partial_bo_alloc(alloc);
+			struct drm_i915_gem_buffer_info info;
+
+			/* An example of such a non-blocking ioctl might work */
+
+			VG_CLEAR(info);
+			info.handle = handle;
+			if (drmIoctl(kgem->fd,
+				     DRM_IOCTL_I915_GEM_BUFFER_INFO,
+				     &fino) == 0) {
+				old->presumed_offset = info.addr;
+				if ((info.flags & I915_GEM_MAPPABLE) == 0) {
+					kgem_bo_move_to_inactive(kgem, old);
+					old = NULL;
+				}
+			}
+		}
+#endif
+		if (old) {
+			DBG(("%s: reusing handle=%d for buffer\n",
+			     __FUNCTION__, old->handle));
+
+			bo = malloc(sizeof(*bo));
 			if (bo == NULL)
 				return NULL;
 
 			memcpy(&bo->base, old, sizeof(*old));
 			if (old->rq)
-				list_replace(&old->request,
-					     &bo->base.request);
+				list_replace(&old->request, &bo->base.request);
 			else
 				list_init(&bo->base.request);
 			list_replace(&old->vma, &bo->base.vma);
 			list_init(&bo->base.list);
 			free(old);
-			bo->base.refcnt = 1;
-		} else {
+
+			bo->mem = kgem_bo_map(kgem, &bo->base);
+			if (bo->mem) {
+				bo->need_io = false;
+				bo->base.io = true;
+				bo->mmapped = true;
+				bo->base.refcnt = 1;
+
+				alloc = bo->base.size;
+			} else {
+				kgem_bo_free(kgem, &bo->base);
+				bo = NULL;
+			}
+		}
+	}
+#endif
+
+	if (bo == NULL) {
+		/* Be more parsimonious with pwrite/pread buffers */
+		if ((flags & KGEM_BUFFER_INPLACE) == 0)
+			alloc = PAGE_ALIGN(size);
+		flags &= ~KGEM_BUFFER_INPLACE;
+
+		if (HAVE_VMAP && kgem->has_vmap) {
 			bo = partial_bo_alloc(alloc);
 			if (bo == NULL)
 				return NULL;
 
-			if (!__kgem_bo_init(&bo->base,
-					    gem_create(kgem->fd, alloc),
-					    alloc)) {
+			handle = gem_vmap(kgem->fd, bo->mem, alloc,
+					  (flags & KGEM_BUFFER_WRITE) == 0);
+			if (handle) {
+				__kgem_bo_init(&bo->base, handle, alloc);
+				bo->base.vmap = true;
+				bo->need_io = false;
+			} else {
 				free(bo);
 				return NULL;
 			}
+		} else {
+			struct kgem_bo *old;
+
+			old = NULL;
+			if ((flags & KGEM_BUFFER_WRITE) == 0)
+				old = search_linear_cache(kgem, alloc, 0);
+			if (old == NULL)
+				old = search_linear_cache(kgem, alloc, CREATE_INACTIVE);
+			if (old) {
+				alloc = old->size;
+				bo = partial_bo_alloc(alloc);
+				if (bo == NULL)
+					return NULL;
+
+				memcpy(&bo->base, old, sizeof(*old));
+				if (old->rq)
+					list_replace(&old->request,
+						     &bo->base.request);
+				else
+					list_init(&bo->base.request);
+				list_replace(&old->vma, &bo->base.vma);
+				list_init(&bo->base.list);
+				free(old);
+				bo->base.refcnt = 1;
+			} else {
+				bo = partial_bo_alloc(alloc);
+				if (bo == NULL)
+					return NULL;
+
+				if (!__kgem_bo_init(&bo->base,
+						    gem_create(kgem->fd, alloc),
+						    alloc)) {
+					free(bo);
+					return NULL;
+				}
+			}
+			bo->need_io = flags & KGEM_BUFFER_WRITE;
+			bo->base.io = true;
 		}
-		bo->need_io = write;
-		bo->base.io = true;
 	}
 	bo->base.reusable = false;
 	assert(bo->base.size == alloc);
@@ -3058,7 +3132,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	assert(!bo->need_io || bo->base.domain != DOMAIN_GPU);
 
 	bo->used = size;
-	bo->write = write;
+	bo->write = flags & KGEM_BUFFER_WRITE_INPLACE;
 	offset = 0;
 
 	list_add(&bo->base.list, &kgem->partial);
@@ -3139,7 +3213,7 @@ struct kgem_bo *kgem_upload_source_image(struct kgem *kgem,
 
 	bo = kgem_create_buffer_2d(kgem,
 				   width, height, bpp,
-				   KGEM_BUFFER_WRITE, &dst);
+				   KGEM_BUFFER_WRITE_INPLACE, &dst);
 	if (bo)
 		memcpy_blt(data, dst, bpp,
 			   stride, bo->pitch,
@@ -3167,7 +3241,8 @@ struct kgem_bo *kgem_upload_source_image_halved(struct kgem *kgem,
 
 	bo = kgem_create_buffer_2d(kgem,
 				   width, height, bpp,
-				   KGEM_BUFFER_WRITE, &dst);
+				   KGEM_BUFFER_WRITE_INPLACE,
+				   &dst);
 	if (bo == NULL)
 		return NULL;
 
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 3186a99..a455c6a 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -414,7 +414,11 @@ static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
 void kgem_sync(struct kgem *kgem);
 
 #define KGEM_BUFFER_WRITE	0x1
-#define KGEM_BUFFER_LAST	0x2
+#define KGEM_BUFFER_INPLACE	0x2
+#define KGEM_BUFFER_LAST	0x4
+
+#define KGEM_BUFFER_WRITE_INPLACE (KGEM_BUFFER_WRITE | KGEM_BUFFER_INPLACE)
+
 struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 				   uint32_t size, uint32_t flags,
 				   void **ret);
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 09926ad..76e3299 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -434,7 +434,8 @@ inline static struct sna_pixmap *sna_pixmap_attach(PixmapPtr pixmap)
 }
 
 PixmapPtr sna_pixmap_create_upload(ScreenPtr screen,
-				   int width, int height, int depth);
+				   int width, int height, int depth,
+				   unsigned flags);
 
 struct sna_pixmap *sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags);
 struct sna_pixmap *sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 9f60605..2d6d1ee 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1640,7 +1640,8 @@ sna_drawable_use_cpu_bo(DrawablePtr drawable,
 
 PixmapPtr
 sna_pixmap_create_upload(ScreenPtr screen,
-			 int width, int height, int depth)
+			 int width, int height, int depth,
+			 unsigned flags)
 {
 	struct sna *sna = to_sna_from_screen(screen);
 	PixmapPtr pixmap;
@@ -1690,7 +1691,7 @@ sna_pixmap_create_upload(ScreenPtr screen,
 
 	priv->gpu_bo = kgem_create_buffer_2d(&sna->kgem,
 					     width, height, bpp,
-					     KGEM_BUFFER_WRITE,
+					     flags,
 					     &ptr);
 	if (!priv->gpu_bo) {
 		free(priv);
@@ -2399,7 +2400,7 @@ sna_put_xybitmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 
 		upload = kgem_create_buffer(&sna->kgem,
 					    bstride*bh,
-					    KGEM_BUFFER_WRITE,
+					    KGEM_BUFFER_WRITE_INPLACE,
 					    &ptr);
 		if (!upload)
 			break;
@@ -2529,7 +2530,7 @@ sna_put_xypixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 
 			upload = kgem_create_buffer(&sna->kgem,
 						    bstride*bh,
-						    KGEM_BUFFER_WRITE,
+						    KGEM_BUFFER_WRITE_INPLACE,
 						    &ptr);
 			if (!upload)
 				break;
@@ -3018,7 +3019,8 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			tmp = sna_pixmap_create_upload(src->pScreen,
 						       src->width,
 						       src->height,
-						       src->depth);
+						       src->depth,
+						       KGEM_BUFFER_WRITE_INPLACE);
 			if (tmp == NullPixmap)
 				return;
 
@@ -3909,7 +3911,7 @@ sna_copy_bitmap_blt(DrawablePtr _bitmap, DrawablePtr drawable, GCPtr gc,
 
 			upload = kgem_create_buffer(&sna->kgem,
 						    bstride*bh,
-						    KGEM_BUFFER_WRITE,
+						    KGEM_BUFFER_WRITE_INPLACE,
 						    &ptr);
 			if (!upload)
 				break;
@@ -4029,7 +4031,7 @@ sna_copy_plane_blt(DrawablePtr source, DrawablePtr drawable, GCPtr gc,
 
 		upload = kgem_create_buffer(&sna->kgem,
 					    bstride*bh,
-					    KGEM_BUFFER_WRITE,
+					    KGEM_BUFFER_WRITE_INPLACE,
 					    &ptr);
 		if (!upload)
 			break;
@@ -7594,7 +7596,7 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 
 				upload = kgem_create_buffer(&sna->kgem,
 							    bstride*bh,
-							    KGEM_BUFFER_WRITE,
+							    KGEM_BUFFER_WRITE_INPLACE,
 							    &ptr);
 				if (!upload)
 					break;
@@ -7733,7 +7735,7 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 
 					upload = kgem_create_buffer(&sna->kgem,
 								    bstride*bh,
-								    KGEM_BUFFER_WRITE,
+								    KGEM_BUFFER_WRITE_INPLACE,
 								    &ptr);
 					if (!upload)
 						break;
@@ -7873,7 +7875,7 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 
 						upload = kgem_create_buffer(&sna->kgem,
 									    bstride*bh,
-									    KGEM_BUFFER_WRITE,
+									    KGEM_BUFFER_WRITE_INPLACE,
 									    &ptr);
 						if (!upload)
 							break;
@@ -9386,7 +9388,7 @@ sna_push_pixels_solid_blt(GCPtr gc,
 
 		upload = kgem_create_buffer(&sna->kgem,
 					    bstride*bh,
-					    KGEM_BUFFER_WRITE,
+					    KGEM_BUFFER_WRITE_INPLACE,
 					    &ptr);
 		if (!upload)
 			break;
diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index f64d2f9..71625b8 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -730,7 +730,8 @@ glyphs_via_mask(struct sna *sna,
 upload:
 		pixmap = sna_pixmap_create_upload(screen,
 						  width, height,
-						  format->depth);
+						  format->depth,
+						  KGEM_BUFFER_WRITE);
 		if (!pixmap)
 			return FALSE;
 
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 7670211..d6b988f 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -398,7 +398,7 @@ fallback:
 					       tmp.drawable.width,
 					       tmp.drawable.height,
 					       tmp.drawable.bitsPerPixel,
-					       KGEM_BUFFER_WRITE,
+					       KGEM_BUFFER_WRITE_INPLACE,
 					       &ptr);
 		if (!src_bo)
 			goto fallback;
@@ -473,7 +473,7 @@ fallback:
 		}
 
 		src_bo = kgem_create_buffer(kgem, offset,
-					    KGEM_BUFFER_WRITE | (nbox ? KGEM_BUFFER_LAST : 0),
+					    KGEM_BUFFER_WRITE_INPLACE | (nbox ? KGEM_BUFFER_LAST : 0),
 					    &ptr);
 		if (!src_bo)
 			break;
@@ -633,7 +633,7 @@ fallback:
 					       tmp.drawable.width,
 					       tmp.drawable.height,
 					       tmp.drawable.bitsPerPixel,
-					       KGEM_BUFFER_WRITE,
+					       KGEM_BUFFER_WRITE_INPLACE,
 					       &ptr);
 		if (!src_bo)
 			goto fallback;
@@ -709,7 +709,7 @@ fallback:
 		}
 
 		src_bo = kgem_create_buffer(kgem, offset,
-					    KGEM_BUFFER_WRITE | (nbox ? KGEM_BUFFER_LAST : 0),
+					    KGEM_BUFFER_WRITE_INPLACE | (nbox ? KGEM_BUFFER_LAST : 0),
 					    &ptr);
 		if (!src_bo)
 			break;
@@ -803,7 +803,7 @@ indirect_replace(struct sna *sna,
 					       pixmap->drawable.width,
 					       pixmap->drawable.height,
 					       pixmap->drawable.bitsPerPixel,
-					       KGEM_BUFFER_WRITE,
+					       KGEM_BUFFER_WRITE_INPLACE,
 					       &ptr);
 		if (!src_bo)
 			return false;
@@ -832,7 +832,7 @@ indirect_replace(struct sna *sna,
 
 		src_bo = kgem_create_buffer(kgem,
 					    pitch * pixmap->drawable.height,
-					    KGEM_BUFFER_WRITE,
+					    KGEM_BUFFER_WRITE_INPLACE,
 					    &ptr);
 		if (!src_bo)
 			return false;
@@ -907,7 +907,8 @@ struct kgem_bo *sna_replace(struct sna *sna,
 	     pixmap->drawable.bitsPerPixel,
 	     bo->tiling));
 
-	if (indirect_replace(sna, pixmap, bo, src, stride))
+	if ((!bo->map || bo->rq) &&
+	    indirect_replace(sna, pixmap, bo, src, stride))
 		return bo;
 
 	if (kgem_bo_is_busy(bo)) {
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 304ff0f..abad1ee 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -1185,7 +1185,7 @@ do_fixup:
 
 	channel->bo = kgem_create_buffer_2d(&sna->kgem,
 					    w, h, PIXMAN_FORMAT_BPP(channel->pict_format),
-					    KGEM_BUFFER_WRITE,
+					    KGEM_BUFFER_WRITE_INPLACE,
 					    &ptr);
 	if (!channel->bo) {
 		DBG(("%s: failed to create upload buffer, using clear\n",
@@ -1347,7 +1347,7 @@ sna_render_picture_convert(struct sna *sna,
 
 	channel->bo = kgem_create_buffer_2d(&sna->kgem,
 					    w, h, PIXMAN_FORMAT_BPP(channel->pict_format),
-					    KGEM_BUFFER_WRITE,
+					    KGEM_BUFFER_WRITE_INPLACE,
 					    &ptr);
 	if (!channel->bo) {
 		pixman_image_unref(src);
diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index b87116e..800a278 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -2003,7 +2003,8 @@ trapezoids_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 		DBG(("%s: mask (%dx%d) depth=%d, format=%08x\n",
 		     __FUNCTION__, width, height, depth, format));
 		scratch = sna_pixmap_create_upload(screen,
-						   width, height, depth);
+						   width, height, depth,
+						   KGEM_BUFFER_WRITE);
 		if (!scratch)
 			return;
 
@@ -2438,7 +2439,7 @@ composite_unaligned_boxes_fallback(CARD8 op,
 		scratch = sna_pixmap_create_upload(screen,
 						   extents.x2 - extents.x1,
 						   extents.y2 - extents.y1,
-						   8);
+						   8, KGEM_BUFFER_WRITE);
 		if (!scratch)
 			continue;
 
@@ -3018,7 +3019,9 @@ trapezoid_mask_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 
 	DBG(("%s: mask (%dx%d), dx=(%d, %d)\n",
 	     __FUNCTION__, extents.x2, extents.y2, dx, dy));
-	scratch = sna_pixmap_create_upload(screen, extents.x2, extents.y2, 8);
+	scratch = sna_pixmap_create_upload(screen,
+					   extents.x2, extents.y2, 8,
+					   KGEM_BUFFER_WRITE_INPLACE);
 	if (!scratch)
 		return true;
 
@@ -3998,7 +4001,7 @@ trap_mask_converter(PicturePtr picture,
 	scratch = sna_pixmap_create_upload(screen,
 					   extents.x2-extents.x1,
 					   extents.y2-extents.y1,
-					   8);
+					   8, KGEM_BUFFER_WRITE_INPLACE);
 	if (!scratch)
 		return true;
 
@@ -4109,7 +4112,9 @@ trap_upload(PicturePtr picture,
 
 	DBG(("%s: tmp (%dx%d) depth=%d\n",
 	     __FUNCTION__, width, height, depth));
-	scratch = sna_pixmap_create_upload(screen, width, height, depth);
+	scratch = sna_pixmap_create_upload(screen,
+					   width, height, depth,
+					   KGEM_BUFFER_WRITE);
 	if (!scratch)
 		return true;
 
@@ -4510,7 +4515,9 @@ triangles_mask_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 
 	DBG(("%s: mask (%dx%d)\n",
 	     __FUNCTION__, extents.x2, extents.y2));
-	scratch = sna_pixmap_create_upload(screen, extents.x2, extents.y2, 8);
+	scratch = sna_pixmap_create_upload(screen,
+					   extents.x2, extents.y2, 8,
+					   KGEM_BUFFER_WRITE_INPLACE);
 	if (!scratch)
 		return true;
 
@@ -4615,7 +4622,8 @@ triangles_fallback(CARD8 op,
 		DBG(("%s: mask (%dx%d) depth=%d, format=%08x\n",
 		     __FUNCTION__, width, height, depth, format));
 		scratch = sna_pixmap_create_upload(screen,
-						   width, height, depth);
+						   width, height, depth,
+						   KGEM_BUFFER_WRITE);
 		if (!scratch)
 			return;
 
@@ -4857,7 +4865,8 @@ tristrip_fallback(CARD8 op,
 		DBG(("%s: mask (%dx%d) depth=%d, format=%08x\n",
 		     __FUNCTION__, width, height, depth, format));
 		scratch = sna_pixmap_create_upload(screen,
-						   width, height, depth);
+						   width, height, depth,
+						   KGEM_BUFFER_WRITE);
 		if (!scratch)
 			return;
 
@@ -4991,7 +5000,8 @@ trifan_fallback(CARD8 op,
 		DBG(("%s: mask (%dx%d) depth=%d, format=%08x\n",
 		     __FUNCTION__, width, height, depth, format));
 		scratch = sna_pixmap_create_upload(screen,
-						   width, height, depth);
+						   width, height, depth,
+						   KGEM_BUFFER_WRITE);
 		if (!scratch)
 			return;
 
commit 252f3818250ecf4776f20afa8111d7f1f6f29c18
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jan 13 11:30:32 2012 +0000

    sna: Relinquish the GTT mmap on inactive buffers if moved out of the aperture
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index be01f67..117cc5d 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -851,8 +851,15 @@ inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
 	list_move(&bo->list, &kgem->inactive[bo->bucket]);
 	if (bo->map) {
 		int type = IS_CPU_MAP(bo->map);
-		list_move_tail(&bo->vma, &kgem->vma[type].inactive[bo->bucket]);
-		kgem->vma[type].count++;
+		if (!type && !kgem_bo_is_mappable(kgem, bo)) {
+			list_del(&bo->vma);
+			munmap(CPU_MAP(bo->map), bo->size);
+			bo->map = NULL;
+		}
+		if (bo->map) {
+			list_move_tail(&bo->vma, &kgem->vma[type].inactive[bo->bucket]);
+			kgem->vma[type].count++;
+		}
 	}
 
 	kgem->need_expire = true;


More information about the xorg-commit mailing list