xf86-video-intel: 11 commits - src/sna/gen2_render.c src/sna/gen3_render.c src/sna/gen4_render.c src/sna/gen5_render.c src/sna/gen6_render.c src/sna/gen7_render.c src/sna/kgem.c src/sna/kgem.h src/sna/sna_accel.c src/sna/sna_driver.c src/sna/sna.h src/sna/sna_io.c

Chris Wilson ickle at kemper.freedesktop.org
Tue May 8 08:23:56 PDT 2012


 src/sna/gen2_render.c |    4 -
 src/sna/gen3_render.c |   41 +++++++-----
 src/sna/gen4_render.c |   37 ++++++-----
 src/sna/gen5_render.c |   44 ++++++-------
 src/sna/gen6_render.c |   19 ++---
 src/sna/gen7_render.c |   23 ++-----
 src/sna/kgem.c        |  162 +++++++++++++++++++++++++++++++-------------------
 src/sna/kgem.h        |   46 ++++++--------
 src/sna/sna.h         |    4 -
 src/sna/sna_accel.c   |   98 ++++++++++++++++++++----------
 src/sna/sna_driver.c  |    2 
 src/sna/sna_io.c      |   16 ++--
 12 files changed, 283 insertions(+), 213 deletions(-)

New commits:
commit a3d37fb29f8dffb0e370ad95783994aaa7eccfaf
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 8 15:47:14 2012 +0100

    sna: Force remapping for IO transfer
    
    Should fix regression from fcccc5528 (sna: Improve handling of inplace
    IO for large transfers) whereby it was aborting the transfer it we need
    to remap the buffer for the upload.
    
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=49546
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index c120681..5a4bf75 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -452,32 +452,23 @@ static inline bool kgem_bo_mapped(struct kgem_bo *bo)
 	return IS_CPU_MAP(bo->map) == !bo->tiling;
 }
 
-static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
+static inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo)
 {
-	DBG_HDR(("%s: domain: %d exec? %d, rq? %d\n",
-		 __FUNCTION__, bo->domain, bo->exec != NULL, bo->rq != NULL));
-	return bo->rq;
-}
-
-static inline bool kgem_bo_map_will_stall(struct kgem *kgem, struct kgem_bo *bo)
-{
-	DBG(("%s? handle=%d, domain=%d, offset=%x, size=%x\n",
-	     __FUNCTION__, bo->handle,
-	     bo->domain, bo->presumed_offset, bo->size));
-
-	if (!kgem_bo_is_mappable(kgem, bo) && kgem_bo_is_busy(bo))
+	if (kgem_bo_mapped(bo))
 		return true;
 
-	if (kgem->wedged)
-		return false;
-
-	if (kgem_bo_is_busy(bo))
+	if (!bo->tiling && kgem->has_llc)
 		return true;
 
-	if (bo->presumed_offset == 0)
-		return !list_is_empty(&kgem->requests);
+	return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
+}
+
 
-	return false;
+static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
+{
+	DBG_HDR(("%s: domain: %d exec? %d, rq? %d\n",
+		 __FUNCTION__, bo->domain, bo->exec != NULL, bo->rq != NULL));
+	return bo->rq;
 }
 
 static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 2c0fd57..d52328d 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -981,8 +981,7 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 				    priv->gpu_bo->exec == NULL)
 					kgem_retire(&sna->kgem);
 
-				if (kgem_bo_map_will_stall(&sna->kgem,
-							   priv->gpu_bo)) {
+				if (kgem_bo_is_busy(priv->gpu_bo)) {
 					if (priv->pinned)
 						goto skip_inplace_map;
 
@@ -1049,7 +1048,7 @@ skip_inplace_map:
 
 	if (flags & MOVE_INPLACE_HINT &&
 	    priv->stride && priv->gpu_bo &&
-	    !kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo) &&
+	    !kgem_bo_is_busy(priv->gpu_bo) &&
 	    pixmap_inplace(sna, pixmap, priv) &&
 	    sna_pixmap_move_to_gpu(pixmap, flags)) {
 		assert(flags & MOVE_WRITE);
@@ -1356,7 +1355,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			    priv->gpu_bo->exec == NULL)
 				kgem_retire(&sna->kgem);
 
-			if (!kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo)) {
+			if (!kgem_bo_is_busy(priv->gpu_bo)) {
 				pixmap->devPrivate.ptr =
 					kgem_bo_map(&sna->kgem, priv->gpu_bo);
 				if (pixmap->devPrivate.ptr == NULL)
@@ -1422,7 +1421,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 
 	if (flags & MOVE_INPLACE_HINT &&
 	    priv->stride && priv->gpu_bo &&
-	    !kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo) &&
+	    !kgem_bo_is_busy(priv->gpu_bo) &&
 	    region_inplace(sna, pixmap, region, priv) &&
 	    sna_pixmap_move_area_to_gpu(pixmap, &region->extents, flags)) {
 		assert(flags & MOVE_WRITE);
@@ -2733,7 +2732,7 @@ static bool upload_inplace(struct sna *sna,
 	if (priv->gpu_bo) {
 		assert(priv->gpu_bo->proxy == NULL);
 
-		if (!kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo))
+		if (!kgem_bo_is_busy(priv->gpu_bo))
 			return true;
 
 		if (!priv->pinned &&
@@ -2795,7 +2794,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 
 		/* And mark as having a valid GTT mapping for future uploads */
 		if (priv->stride &&
-		    !kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo)) {
+		    !kgem_bo_is_busy(priv->gpu_bo)) {
 			pixmap->devPrivate.ptr =
 				kgem_bo_map(&sna->kgem, priv->gpu_bo);
 			if (pixmap->devPrivate.ptr) {
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 9b56c99..c39b1f1 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -116,8 +116,7 @@ static bool download_inplace(struct kgem *kgem, struct kgem_bo *bo)
 	if (FORCE_INPLACE)
 		return FORCE_INPLACE > 0;
 
-	return !kgem_bo_map_will_stall(kgem, bo) ||
-		bo->tiling == I915_TILING_NONE;
+	return !kgem_bo_is_busy(bo) || bo->tiling == I915_TILING_NONE;
 }
 
 void sna_read_boxes(struct sna *sna,
@@ -480,7 +479,7 @@ static bool write_boxes_inplace(struct kgem *kgem,
 	DBG(("%s x %d, handle=%d, tiling=%d\n",
 	     __FUNCTION__, n, bo->handle, bo->tiling));
 
-	if (!kgem_bo_is_mappable(kgem, bo))
+	if (!kgem_bo_can_map(kgem, bo))
 		return false;
 
 	kgem_bo_submit(kgem, bo);
@@ -525,11 +524,14 @@ static bool upload_inplace(struct kgem *kgem,
 	if (FORCE_INPLACE)
 		return FORCE_INPLACE > 0;
 
+	if (!kgem_bo_can_map(kgem, bo))
+		return false;
+
 	/* If we are writing through the GTT, check first if we might be
 	 * able to almagamate a series of small writes into a single
 	 * operation.
 	 */
-	if (!bo->map || kgem_bo_map_will_stall(kgem, bo)) {
+	if (!kgem_bo_mapped(bo) || kgem_bo_is_busy(bo)) {
 		unsigned int bytes = 0;
 		while (n--) {
 			bytes += (box->x2 - box->x1) * (box->y2 - box->y1);
@@ -1146,7 +1148,7 @@ bool sna_replace(struct sna *sna,
 	     pixmap->drawable.bitsPerPixel,
 	     bo->tiling));
 
-	if ((!kgem_bo_mapped(bo) || bo->rq) &&
+	if ((!kgem_bo_mapped(bo) || kgem_bo_is_busy(bo)) &&
 	    indirect_replace(sna, pixmap, bo, src, stride))
 		return true;
 
commit 2a9a93e4484e0c616724610f4c8019fcbaa7ad53
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 8 15:12:51 2012 +0100

    sna: Only avoid ring switching for indirect uploads
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 3de164b..9b56c99 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -582,9 +582,7 @@ fallback:
 	}
 
 	/* Try to avoid switching rings... */
-	if (!can_blt ||
-	    kgem->ring == KGEM_RENDER ||
-	    (kgem->has_semaphores && kgem->mode == KGEM_NONE) ||
+	if (!can_blt || kgem->ring == KGEM_RENDER ||
 	    upload_too_large(sna, extents.x2 - extents.x1, extents.y2 - extents.y1)) {
 		PixmapRec tmp;
 
commit 613902b60e0f2ca2a916e68306a1a37bc236d00d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 8 14:17:46 2012 +0100

    sna: Fix off-by-one in checking available execbuffer slots
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 9717aac..e1b31d7 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -524,7 +524,7 @@ gen2_get_batch(struct sna *sna)
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
-	if (sna->kgem.nreloc + 3 > KGEM_RELOC_SIZE(&sna->kgem)) {
+	if (!kgem_check_reloc(&sna->kgem, 3)) {
 		DBG(("%s: flushing batch: reloc %d >= %d\n",
 		     __FUNCTION__,
 		     sna->kgem.nreloc + 3,
@@ -533,7 +533,7 @@ gen2_get_batch(struct sna *sna)
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
-	if (sna->kgem.nexec + 3 > KGEM_EXEC_SIZE(&sna->kgem)) {
+	if (!kgem_check_exec(&sna->kgem, 3)) {
 		DBG(("%s: flushing batch: exec %d >= %d\n",
 		     __FUNCTION__,
 		     sna->kgem.nexec + 1,
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 39601d9..a0de1ee 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1314,7 +1314,7 @@ gen3_get_batch(struct sna *sna)
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
-	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - MAX_OBJECTS) {
+	if (!kgem_check_reloc(&sna->kgem, MAX_OBJECTS)) {
 		DBG(("%s: flushing batch: reloc %d >= %d\n",
 		     __FUNCTION__,
 		     sna->kgem.nreloc,
@@ -1323,7 +1323,7 @@ gen3_get_batch(struct sna *sna)
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
-	if (sna->kgem.nexec > KGEM_EXEC_SIZE(&sna->kgem) - MAX_OBJECTS - 1) {
+	if (!kgem_check_exec(&sna->kgem, MAX_OBJECTS)) {
 		DBG(("%s: flushing batch: exec %d >= %d\n",
 		     __FUNCTION__,
 		     sna->kgem.nexec,
@@ -1792,9 +1792,9 @@ static int gen3_get_rectangles__flush(struct sna *sna,
 {
 	if (!kgem_check_batch(&sna->kgem, op->need_magic_ca_pass ? 105: 5))
 		return 0;
-	if (sna->kgem.nexec > KGEM_EXEC_SIZE(&sna->kgem) - 2)
+	if (!kgem_check_exec(&sna->kgem, 1))
 		return 0;
-	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 1)
+	if (!kgem_check_reloc(&sna->kgem, 1))
 		return 0;
 
 	if (op->need_magic_ca_pass && sna->render.vbo)
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 86e9ff8..3372e7e 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -1157,9 +1157,9 @@ static int gen4_get_rectangles__flush(struct sna *sna,
 {
 	if (!kgem_check_batch(&sna->kgem, 25))
 		return 0;
-	if (sna->kgem.nexec > KGEM_EXEC_SIZE(&sna->kgem) - 1)
+	if (!kgem_check_exec(&sna->kgem, 1))
 		return 0;
-	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 1)
+	if (!kgem_check_reloc(&sna->kgem, 1))
 		return 0;
 
 	if (op->need_magic_ca_pass && sna->render.vbo)
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 85343d8..b4d9203 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -1161,9 +1161,9 @@ static int gen5_get_rectangles__flush(struct sna *sna,
 {
 	if (!kgem_check_batch(&sna->kgem, op->need_magic_ca_pass ? 20 : 6))
 		return 0;
-	if (sna->kgem.nexec > KGEM_EXEC_SIZE(&sna->kgem) - 1)
+	if (!kgem_check_exec(&sna->kgem, 1))
 		return 0;
-	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 2)
+	if (!kgem_check_reloc(&sna->kgem, 2))
 		return 0;
 
 	if (op->need_magic_ca_pass && sna->render.vbo)
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 1613749..f3b7537 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -1543,9 +1543,9 @@ static int gen6_get_rectangles__flush(struct sna *sna,
 {
 	if (!kgem_check_batch(&sna->kgem, op->need_magic_ca_pass ? 65 : 5))
 		return 0;
-	if (sna->kgem.nexec > KGEM_EXEC_SIZE(&sna->kgem) - 1)
+	if (!kgem_check_exec(&sna->kgem, 1))
 		return 0;
-	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 2)
+	if (!kgem_check_reloc(&sna->kgem, 2))
 		return 0;
 
 	if (op->need_magic_ca_pass && sna->render.vbo)
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 5492c23..362ddff 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1647,9 +1647,9 @@ static int gen7_get_rectangles__flush(struct sna *sna,
 {
 	if (!kgem_check_batch(&sna->kgem, op->need_magic_ca_pass ? 65 : 6))
 		return 0;
-	if (sna->kgem.nexec > KGEM_EXEC_SIZE(&sna->kgem) - 1)
+	if (!kgem_check_exec(&sna->kgem, 1))
 		return 0;
-	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 2)
+	if (!kgem_check_reloc(&sna->kgem, 2))
 		return 0;
 
 	if (op->need_magic_ca_pass && sna->render.vbo)
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 11950e4..c120681 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -330,7 +330,8 @@ static inline bool kgem_check_batch_with_surfaces(struct kgem *kgem,
 						  int num_surfaces)
 {
 	return (int)(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED) <= (int)(kgem->surface - num_surfaces*8) &&
-		kgem_check_reloc(kgem, num_surfaces);
+		kgem_check_reloc(kgem, num_surfaces) &&
+		kgem_check_exec(kgem, num_surfaces);
 }
 
 static inline uint32_t *kgem_get_batch(struct kgem *kgem, int num_dwords)
commit 663e387b35c314c4c2bee8137d6b70d27fa9f729
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 8 11:45:34 2012 +0100

    sna: Only submit a batch to the scanout if it is not already busy
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index bf24664..6703e7e 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3001,10 +3001,10 @@ void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 	__kgem_bo_destroy(kgem, bo);
 }
 
-void __kgem_flush(struct kgem *kgem, struct kgem_bo *bo)
+bool __kgem_flush(struct kgem *kgem, struct kgem_bo *bo)
 {
 	/* The kernel will emit a flush *and* update its own flushing lists. */
-	kgem_busy(kgem, bo->handle);
+	return kgem_busy(kgem, bo->handle);
 }
 
 bool kgem_check_bo(struct kgem *kgem, ...)
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 69562f4..11950e4 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -258,7 +258,7 @@ static inline void kgem_bo_submit(struct kgem *kgem, struct kgem_bo *bo)
 		_kgem_submit(kgem);
 }
 
-void __kgem_flush(struct kgem *kgem, struct kgem_bo *bo);
+bool __kgem_flush(struct kgem *kgem, struct kgem_bo *bo);
 static inline void kgem_bo_flush(struct kgem *kgem, struct kgem_bo *bo)
 {
 	kgem_bo_submit(kgem, bo);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index dbf7e30..2c0fd57 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -11998,7 +11998,7 @@ static bool sna_accel_do_flush(struct sna *sna)
 			DBG(("%s (time=%ld), triggered\n", __FUNCTION__, (long)sna->time));
 			sna->timer_expire[FLUSH_TIMER] =
 				sna->time + sna->vblank_interval;
-			return true;
+			return priv->cpu_damage || !__kgem_flush(&sna->kgem, priv->gpu_bo);
 		}
 	} else {
 		if (priv->cpu_damage == NULL && priv->gpu_bo->exec == NULL) {
commit cd7a56b7313233190a4c4a735d4a141e99c9b688
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 8 08:53:41 2012 +0100

    sna: Throttle independently of scanout updates
    
    As we are now throttling to relieve GTT pressure, it is a benefit to
    consistently throttle before blocking.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index c897658..bf24664 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -216,6 +216,9 @@ static bool __kgem_throttle_retire(struct kgem *kgem, unsigned flags)
 	if (kgem_retire(kgem))
 		return true;
 
+	if (!kgem->need_throttle)
+		return false;
+
 	if ((flags & CREATE_NO_THROTTLE) == 0)
 		kgem_throttle(kgem);
 
@@ -1520,7 +1523,7 @@ static void kgem_commit(struct kgem *kgem)
 		gem_close(kgem->fd, rq->bo->handle);
 	} else {
 		list_add_tail(&rq->list, &kgem->requests);
-		kgem->need_retire = 1;
+		kgem->need_throttle = kgem->need_retire = 1;
 	}
 
 	kgem->next_request = NULL;
@@ -1979,6 +1982,8 @@ void kgem_throttle(struct kgem *kgem)
 			   "When reporting this, please include i915_error_state from debugfs and the full dmesg.\n");
 		warned = 1;
 	}
+
+	kgem->need_throttle = 0;
 }
 
 static void kgem_expire_partial(struct kgem *kgem)
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 9bb3bc9..69562f4 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -149,6 +149,7 @@ struct kgem {
 	uint32_t need_expire:1;
 	uint32_t need_purge:1;
 	uint32_t need_retire:1;
+	uint32_t need_throttle:1;
 	uint32_t scanout:1;
 	uint32_t flush_now:1;
 	uint32_t busy:1;
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 023c091..790f5ff 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -209,6 +209,7 @@ enum {
 
 enum {
 	FLUSH_TIMER = 0,
+	THROTTLE_TIMER,
 	EXPIRE_TIMER,
 	INACTIVE_TIMER,
 	NUM_TIMERS
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index b37ee59..dbf7e30 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12015,6 +12015,31 @@ static bool sna_accel_do_flush(struct sna *sna)
 	return false;
 }
 
+static bool sna_accel_do_throttle(struct sna *sna)
+{
+	if (sna->flags & SNA_NO_THROTTLE)
+		return false;
+
+	if (sna->timer_active & (1<<(THROTTLE_TIMER))) {
+		if (sna->timer_ready & (1<<(THROTTLE_TIMER))) {
+			DBG(("%s (time=%ld), triggered\n", __FUNCTION__, (long)sna->time));
+			sna->timer_expire[THROTTLE_TIMER] = sna->time + 20;
+			return true;
+		}
+	} else {
+		if (!sna->kgem.need_retire) {
+			DBG(("%s -- no pending activity\n", __FUNCTION__));
+		} else {
+			DBG(("%s (time=%ld), starting\n", __FUNCTION__, (long)sna->time));
+			sna->timer_active |= 1 << THROTTLE_TIMER;
+			sna->timer_ready |= 1 << THROTTLE_TIMER;
+			sna->timer_expire[THROTTLE_TIMER] = sna->time + 20;
+		}
+	}
+
+	return false;
+}
+
 static bool sna_accel_do_expire(struct sna *sna)
 {
 	if (sna->timer_active & (1<<(EXPIRE_TIMER))) {
@@ -12089,18 +12114,17 @@ static CARD32 sna_timeout(OsTimerPtr timer, CARD32 now, pointer arg)
 	return next;
 }
 
-static bool sna_accel_flush(struct sna *sna)
+static void sna_accel_flush(struct sna *sna)
 {
 	struct sna_pixmap *priv = sna_accel_scanout(sna);
-	bool need_throttle = priv->gpu_bo->rq;
-	bool busy = priv->cpu_damage || need_throttle;
+	bool busy = priv->cpu_damage || priv->gpu_bo->rq;
 
-	DBG(("%s (time=%ld), cpu damage? %p, exec? %d nbatch=%d, busy? %d, need_throttle=%d\n",
+	DBG(("%s (time=%ld), cpu damage? %p, exec? %d nbatch=%d, busy? %d\n",
 	     __FUNCTION__, (long)sna->time,
 	     priv->cpu_damage,
 	     priv->gpu_bo->exec != NULL,
 	     sna->kgem.nbatch,
-	     sna->kgem.busy, need_throttle));
+	     sna->kgem.busy));
 
 	if (!sna->kgem.busy && !busy)
 		sna_accel_disarm_timer(sna, FLUSH_TIMER);
@@ -12111,8 +12135,17 @@ static bool sna_accel_flush(struct sna *sna)
 
 	kgem_bo_flush(&sna->kgem, priv->gpu_bo);
 	sna->kgem.flush_now = 0;
+}
+
+static void sna_accel_throttle(struct sna *sna)
+{
+	DBG(("%s (time=%ld)\n", __FUNCTION__, (long)sna->time));
+
+	if (sna->kgem.need_throttle)
+		kgem_throttle(&sna->kgem);
 
-	return need_throttle;
+	if (!sna->kgem.need_retire)
+		sna_accel_disarm_timer(sna, THROTTLE_TIMER);
 }
 
 static void sna_accel_expire(struct sna *sna)
@@ -12362,21 +12395,14 @@ void sna_accel_close(struct sna *sna)
 	kgem_cleanup_cache(&sna->kgem);
 }
 
-static void sna_accel_throttle(struct sna *sna)
-{
-	if (sna->flags & SNA_NO_THROTTLE)
-		return;
-
-	DBG(("%s (time=%ld)\n", __FUNCTION__, (long)sna->time));
-
-	kgem_throttle(&sna->kgem);
-}
-
 void sna_accel_block_handler(struct sna *sna, struct timeval **tv)
 {
 	sna->time = GetTimeInMillis();
 
-	if (sna_accel_do_flush(sna) && sna_accel_flush(sna))
+	if (sna_accel_do_flush(sna))
+		sna_accel_flush(sna);
+
+	if (sna_accel_do_throttle(sna))
 		sna_accel_throttle(sna);
 
 	if (sna_accel_do_expire(sna))
commit 2372176f73b7c945f56b7673eab5eccb86366416
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 7 12:57:35 2012 +0100

    sna: Throttle execution when searching for inactive buffers
    
    If we have some active buffers that we may reuse and old outstanding
    requests, throttling before retiring should prevent the CPU from running
    away from the GPU and hogging the entire GTT and RAM.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index b1e2654..c897658 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -205,6 +205,23 @@ static int gem_set_tiling(int fd, uint32_t handle, int tiling, int stride)
 	return set_tiling.tiling_mode;
 }
 
+static bool __kgem_throttle_retire(struct kgem *kgem, unsigned flags)
+{
+	if (flags & CREATE_NO_RETIRE)
+		return false;
+
+	if (!kgem->need_retire)
+		return false;
+
+	if (kgem_retire(kgem))
+		return true;
+
+	if ((flags & CREATE_NO_THROTTLE) == 0)
+		kgem_throttle(kgem);
+
+	return kgem_retire(kgem);
+}
+
 static void *__kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo)
 {
 	struct drm_i915_gem_mmap_gtt mmap_arg;
@@ -219,8 +236,7 @@ retry_gtt:
 	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg)) {
 		ErrorF("%s: failed to achieve GTT offset for handle=%d: %d\n",
 		       __FUNCTION__, bo->handle, errno);
-		kgem_throttle(kgem);
-		kgem_retire(kgem);
+		__kgem_throttle_retire(kgem, 0);
 		if (kgem_expire_cache(kgem))
 			goto retry_gtt;
 
@@ -234,8 +250,7 @@ retry_mmap:
 	if (ptr == MAP_FAILED) {
 		ErrorF("%s: failed to mmap %d, %d bytes, into GTT domain: %d\n",
 		       __FUNCTION__, bo->handle, bytes(bo), errno);
-		kgem_throttle(kgem);
-		if (kgem_retire(kgem))
+		if (__kgem_throttle_retire(kgem, 0))
 			goto retry_mmap;
 
 		assert(0);
@@ -1810,7 +1825,7 @@ void _kgem_submit(struct kgem *kgem)
 		size = compact_batch_surface(kgem);
 	else
 		size = kgem->nbatch * sizeof(kgem->batch[0]);
-	rq->bo = kgem_create_linear(kgem, size, 0);
+	rq->bo = kgem_create_linear(kgem, size, CREATE_NO_THROTTLE);
 	if (rq->bo) {
 		uint32_t handle = rq->bo->handle;
 		int i;
@@ -2150,7 +2165,7 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 			return NULL;
 		}
 
-		if (!kgem->need_retire || !kgem_retire(kgem)) {
+		if (!__kgem_throttle_retire(kgem, 0)) {
 			DBG(("%s: nothing retired\n", __FUNCTION__));
 			return NULL;
 		}
@@ -2641,7 +2656,8 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 				bo->refcnt = 1;
 				return bo;
 			}
-		} while (!list_is_empty(cache) && kgem_retire(kgem));
+		} while (!list_is_empty(cache) &&
+			 __kgem_throttle_retire(kgem, flags));
 	}
 
 	if (flags & CREATE_INACTIVE)
@@ -2858,11 +2874,9 @@ search_inactive:
 		return bo;
 	}
 
-	if (flags & CREATE_INACTIVE && !list_is_empty(&kgem->requests)) {
-		if (kgem_retire(kgem)) {
-			flags &= ~CREATE_INACTIVE;
-			goto search_inactive;
-		}
+	if (flags & CREATE_INACTIVE && __kgem_throttle_retire(kgem, flags)) {
+		flags &= ~CREATE_INACTIVE;
+		goto search_inactive;
 	}
 
 	if (--retry) {
@@ -3375,8 +3389,7 @@ retry:
 	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) {
 		ErrorF("%s: failed to mmap %d, %d bytes, into CPU domain: %d\n",
 		       __FUNCTION__, bo->handle, bytes(bo), errno);
-		kgem_throttle(kgem);
-		if (kgem_retire(kgem))
+		if (__kgem_throttle_retire(kgem, 0))
 			goto retry;
 
 		return NULL;
@@ -3708,7 +3721,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			}
 
 			goto done;
-		} while (kgem_retire(kgem));
+		} while (__kgem_throttle_retire(kgem, 0));
 	}
 
 #if !DBG_NO_MAP_UPLOAD
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 52bc6f2..9bb3bc9 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -225,6 +225,7 @@ enum {
 	CREATE_SCANOUT = 0x10,
 	CREATE_TEMPORARY = 0x20,
 	CREATE_NO_RETIRE = 0x40,
+	CREATE_NO_THROTTLE = 0x40,
 };
 struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 			       int width,
commit 4df228749729dd540b639368400fa20118cdf412
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 7 10:42:30 2012 +0100

    sna: Rate-limit and shrink bo usage if we hit system resource limits
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 31c1a35..b1e2654 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -205,23 +205,39 @@ static int gem_set_tiling(int fd, uint32_t handle, int tiling, int stride)
 	return set_tiling.tiling_mode;
 }
 
-static void *gem_mmap(int fd, uint32_t handle, int size, int prot)
+static void *__kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo)
 {
 	struct drm_i915_gem_mmap_gtt mmap_arg;
 	void *ptr;
 
-	DBG(("%s(handle=%d, size=%d, prot=%s)\n", __FUNCTION__,
-	     handle, size, prot & PROT_WRITE ? "read/write" : "read-only"));
+	DBG(("%s(handle=%d, size=%d)\n", __FUNCTION__,
+	     bo->handle, bytes(bo)));
 
+retry_gtt:
 	VG_CLEAR(mmap_arg);
-	mmap_arg.handle = handle;
-	if (drmIoctl(fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg)) {
+	mmap_arg.handle = bo->handle;
+	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg)) {
+		ErrorF("%s: failed to achieve GTT offset for handle=%d: %d\n",
+		       __FUNCTION__, bo->handle, errno);
+		kgem_throttle(kgem);
+		kgem_retire(kgem);
+		if (kgem_expire_cache(kgem))
+			goto retry_gtt;
+
 		assert(0);
 		return NULL;
 	}
 
-	ptr = mmap(0, size, prot, MAP_SHARED, fd, mmap_arg.offset);
+retry_mmap:
+	ptr = mmap(0, bytes(bo), PROT_READ | PROT_WRITE, MAP_SHARED,
+		   kgem->fd, mmap_arg.offset);
 	if (ptr == MAP_FAILED) {
+		ErrorF("%s: failed to mmap %d, %d bytes, into GTT domain: %d\n",
+		       __FUNCTION__, bo->handle, bytes(bo), errno);
+		kgem_throttle(kgem);
+		if (kgem_retire(kgem))
+			goto retry_mmap;
+
 		assert(0);
 		ptr = NULL;
 	}
@@ -3255,8 +3271,7 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
 
 		kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
 
-		ptr = gem_mmap(kgem->fd, bo->handle, bytes(bo),
-			       PROT_READ | PROT_WRITE);
+		ptr = __kgem_bo_map__gtt(kgem, bo);
 		if (ptr == NULL)
 			return NULL;
 
@@ -3310,8 +3325,7 @@ void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo)
 
 		kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
 
-		ptr = gem_mmap(kgem->fd, bo->handle, bytes(bo),
-			       PROT_READ | PROT_WRITE);
+		ptr = __kgem_bo_map__gtt(kgem, bo);
 		if (ptr == NULL)
 			return NULL;
 
@@ -3333,8 +3347,7 @@ void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo)
 		return MAP(bo->map);
 
 	kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
-	return bo->map = gem_mmap(kgem->fd, bo->handle, bytes(bo),
-				  PROT_READ | PROT_WRITE);
+	return bo->map = __kgem_bo_map__gtt(kgem, bo);
 }
 
 void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo)
@@ -3354,13 +3367,18 @@ void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo)
 
 	kgem_trim_vma_cache(kgem, MAP_CPU, bucket(bo));
 
+retry:
 	VG_CLEAR(mmap_arg);
 	mmap_arg.handle = bo->handle;
 	mmap_arg.offset = 0;
 	mmap_arg.size = bytes(bo);
 	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) {
-		ErrorF("%s: failed to mmap %d, %d bytes, into CPU domain\n",
-		       __FUNCTION__, bo->handle, bytes(bo));
+		ErrorF("%s: failed to mmap %d, %d bytes, into CPU domain: %d\n",
+		       __FUNCTION__, bo->handle, bytes(bo), errno);
+		kgem_throttle(kgem);
+		if (kgem_retire(kgem))
+			goto retry;
+
 		return NULL;
 	}
 
commit ca4d2296e6e42e837627756790b262cae0fd3b6c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 7 10:23:19 2012 +0100

    sna: Expand batch buffers
    
    As batch buffers are compacted to fit into the smallest bo, the only
    cost is the larger static array allocation (and presumably cache
    misses).
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 01651a5..86e9ff8 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -745,7 +745,7 @@ static bool gen4_check_repeat(PicturePtr picture)
  * Sets up the common fields for a surface state buffer for the given
  * picture in the given surface state buffer.
  */
-static int
+static uint32_t
 gen4_bind_bo(struct sna *sna,
 	     struct kgem_bo *bo,
 	     uint32_t width,
@@ -766,14 +766,11 @@ gen4_bind_bo(struct sna *sna,
 
 	offset = kgem_bo_get_binding(bo, format);
 	if (offset)
-		return offset;
-
-	offset = sna->kgem.surface - sizeof(struct gen4_surface_state_padded) / sizeof(uint32_t);
-	offset *= sizeof(uint32_t);
+		return offset * sizeof(uint32_t);
 
-	sna->kgem.surface -=
+	offset = sna->kgem.surface -=
 		sizeof(struct gen4_surface_state_padded) / sizeof(uint32_t);
-	ss = memset(sna->kgem.batch + sna->kgem.surface, 0, sizeof(*ss));
+	ss = memset(sna->kgem.batch + offset, 0, sizeof(*ss));
 
 	ss->ss0.surface_type = GEN4_SURFACE_2D;
 	ss->ss0.surface_format = format;
@@ -781,9 +778,7 @@ gen4_bind_bo(struct sna *sna,
 	ss->ss0.data_return_format = GEN4_SURFACERETURNFORMAT_FLOAT32;
 	ss->ss0.color_blend = 1;
 	ss->ss1.base_addr =
-		kgem_add_reloc(&sna->kgem,
-			       sna->kgem.surface + 1,
-			       bo, domains, 0);
+		kgem_add_reloc(&sna->kgem, offset + 1, bo, domains, 0);
 
 	ss->ss2.height = height - 1;
 	ss->ss2.width  = width - 1;
@@ -798,7 +793,7 @@ gen4_bind_bo(struct sna *sna,
 	     ss->ss0.surface_format, width, height, bo->pitch, bo->tiling,
 	     domains & 0xffff ? "render" : "sampler"));
 
-	return offset;
+	return offset * sizeof(uint32_t);
 }
 
 fastcall static void
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 0b63009..85343d8 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -749,7 +749,7 @@ gen5_tiling_bits(uint32_t tiling)
  * Sets up the common fields for a surface state buffer for the given
  * picture in the given surface state buffer.
  */
-static int
+static uint32_t
 gen5_bind_bo(struct sna *sna,
 	     struct kgem_bo *bo,
 	     uint32_t width,
@@ -771,23 +771,18 @@ gen5_bind_bo(struct sna *sna,
 	if (!DBG_NO_SURFACE_CACHE) {
 		offset = kgem_bo_get_binding(bo, format);
 		if (offset)
-			return offset;
+			return offset * sizeof(uint32_t);
 	}
 
-	offset = sna->kgem.surface - sizeof(struct gen5_surface_state_padded) / sizeof(uint32_t);
-	offset *= sizeof(uint32_t);
-
-	sna->kgem.surface -=
+	offset = sna->kgem.surface -=
 		sizeof(struct gen5_surface_state_padded) / sizeof(uint32_t);
-	ss = sna->kgem.batch + sna->kgem.surface;
+	ss = sna->kgem.batch + offset;
 
 	ss[0] = (GEN5_SURFACE_2D << GEN5_SURFACE_TYPE_SHIFT |
 		 GEN5_SURFACE_BLEND_ENABLED |
 		 format << GEN5_SURFACE_FORMAT_SHIFT);
 
-	ss[1] = kgem_add_reloc(&sna->kgem,
-			       sna->kgem.surface + 1,
-			       bo, domains, 0);
+	ss[1] = kgem_add_reloc(&sna->kgem, offset + 1, bo, domains, 0);
 
 	ss[2] = ((width - 1)  << GEN5_SURFACE_WIDTH_SHIFT |
 		 (height - 1) << GEN5_SURFACE_HEIGHT_SHIFT);
@@ -803,7 +798,7 @@ gen5_bind_bo(struct sna *sna,
 	     format, width, height, bo->pitch, bo->tiling,
 	     domains & 0xffff ? "render" : "sampler"));
 
-	return offset;
+	return offset * sizeof(uint32_t);
 }
 
 fastcall static void
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 55673bf..1613749 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -1198,21 +1198,16 @@ gen6_bind_bo(struct sna *sna,
 		DBG(("[%x]  bo(handle=%d), format=%d, reuse %s binding\n",
 		     offset, bo->handle, format,
 		     domains & 0xffff ? "render" : "sampler"));
-		return offset;
+		return offset * sizeof(uint32_t);
 	}
 
-	offset = sna->kgem.surface - sizeof(struct gen6_surface_state_padded) / sizeof(uint32_t);
-	offset *= sizeof(uint32_t);
-
-	sna->kgem.surface -=
+	offset = sna->kgem.surface -=
 		sizeof(struct gen6_surface_state_padded) / sizeof(uint32_t);
-	ss = sna->kgem.batch + sna->kgem.surface;
+	ss = sna->kgem.batch + offset;
 	ss[0] = (GEN6_SURFACE_2D << GEN6_SURFACE_TYPE_SHIFT |
 		 GEN6_SURFACE_BLEND_ENABLED |
 		 format << GEN6_SURFACE_FORMAT_SHIFT);
-	ss[1] = kgem_add_reloc(&sna->kgem,
-			       sna->kgem.surface + 1,
-			       bo, domains, 0);
+	ss[1] = kgem_add_reloc(&sna->kgem, offset + 1, bo, domains, 0);
 	ss[2] = ((width - 1)  << GEN6_SURFACE_WIDTH_SHIFT |
 		 (height - 1) << GEN6_SURFACE_HEIGHT_SHIFT);
 	assert(bo->pitch <= (1 << 18));
@@ -1228,7 +1223,7 @@ gen6_bind_bo(struct sna *sna,
 	     format, width, height, bo->pitch, bo->tiling,
 	     domains & 0xffff ? "render" : "sampler"));
 
-	return offset;
+	return offset * sizeof(uint32_t);
 }
 
 fastcall static void
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index e128f5c..5492c23 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1293,7 +1293,7 @@ gen7_tiling_bits(uint32_t tiling)
  * Sets up the common fields for a surface state buffer for the given
  * picture in the given surface state buffer.
  */
-static int
+static uint32_t
 gen7_bind_bo(struct sna *sna,
 	     struct kgem_bo *bo,
 	     uint32_t width,
@@ -1303,7 +1303,7 @@ gen7_bind_bo(struct sna *sna,
 {
 	uint32_t *ss;
 	uint32_t domains;
-	uint16_t offset;
+	int offset;
 
 	COMPILE_TIME_ASSERT(sizeof(struct gen7_surface_state) == 32);
 
@@ -1316,20 +1316,15 @@ gen7_bind_bo(struct sna *sna,
 
 	offset = kgem_bo_get_binding(bo, format);
 	if (offset)
-		return offset;
+		return offset * sizeof(uint32_t);
 
-	offset = sna->kgem.surface - sizeof(struct gen7_surface_state) / sizeof(uint32_t);
-	offset *= sizeof(uint32_t);
-
-	sna->kgem.surface -=
+	offset = sna->kgem.surface -=
 		sizeof(struct gen7_surface_state) / sizeof(uint32_t);
-	ss = sna->kgem.batch + sna->kgem.surface;
+	ss = sna->kgem.batch + offset;
 	ss[0] = (GEN7_SURFACE_2D << GEN7_SURFACE_TYPE_SHIFT |
 		 gen7_tiling_bits(bo->tiling) |
 		 format << GEN7_SURFACE_FORMAT_SHIFT);
-	ss[1] = kgem_add_reloc(&sna->kgem,
-			       sna->kgem.surface + 1,
-			       bo, domains, 0);
+	ss[1] = kgem_add_reloc(&sna->kgem, offset + 1, bo, domains, 0);
 	ss[2] = ((width - 1)  << GEN7_SURFACE_WIDTH_SHIFT |
 		 (height - 1) << GEN7_SURFACE_HEIGHT_SHIFT);
 	ss[3] = (bo->pitch - 1) << GEN7_SURFACE_PITCH_SHIFT;
@@ -1345,7 +1340,7 @@ gen7_bind_bo(struct sna *sna,
 	     format, width, height, bo->pitch, bo->tiling,
 	     domains & 0xffff ? "render" : "sampler"));
 
-	return offset;
+	return offset * sizeof(uint32_t);
 }
 
 fastcall static void
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 0799ea7..31c1a35 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -595,10 +595,12 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	kgem->wedged = drmCommandNone(kgem->fd, DRM_I915_GEM_THROTTLE) == -EIO;
 	kgem->wedged |= DBG_NO_HW;
 
-	kgem->max_batch_size = ARRAY_SIZE(kgem->batch);
+	kgem->batch_size = ARRAY_SIZE(kgem->batch);
 	if (gen == 22)
 		/* 865g cannot handle a batch spanning multiple pages */
-		kgem->max_batch_size = PAGE_SIZE / sizeof(uint32_t);
+		kgem->batch_size = PAGE_SIZE / sizeof(uint32_t);
+	if (gen == 70)
+		kgem->batch_size = 16*1024;
 
 	kgem->min_alignment = 4;
 	if (gen < 40)
@@ -1656,16 +1658,16 @@ static int kgem_batch_write(struct kgem *kgem, uint32_t handle, uint32_t size)
 	assert(!kgem_busy(kgem, handle));
 
 	/* If there is no surface data, just upload the batch */
-	if (kgem->surface == kgem->max_batch_size)
+	if (kgem->surface == kgem->batch_size)
 		return gem_write(kgem->fd, handle,
 				 0, sizeof(uint32_t)*kgem->nbatch,
 				 kgem->batch);
 
 	/* Are the batch pages conjoint with the surface pages? */
-	if (kgem->surface < kgem->nbatch + PAGE_SIZE/4) {
-		assert(size == sizeof(kgem->batch));
+	if (kgem->surface < kgem->nbatch + PAGE_SIZE/sizeof(uint32_t)) {
+		assert(size == PAGE_ALIGN(kgem->batch_size*sizeof(uint32_t)));
 		return gem_write(kgem->fd, handle,
-				 0, sizeof(kgem->batch),
+				 0, kgem->batch_size*sizeof(uint32_t),
 				 kgem->batch);
 	}
 
@@ -1676,11 +1678,11 @@ static int kgem_batch_write(struct kgem *kgem, uint32_t handle, uint32_t size)
 	if (ret)
 		return ret;
 
-	assert(kgem->nbatch*sizeof(uint32_t) <=
-	       sizeof(uint32_t)*kgem->surface - (sizeof(kgem->batch)-size));
+	ret = PAGE_ALIGN(sizeof(uint32_t) * kgem->batch_size);
+	ret -= sizeof(uint32_t) * kgem->surface;
+	assert(size-ret >= kgem->nbatch*sizeof(uint32_t));
 	return __gem_write(kgem->fd, handle,
-			sizeof(uint32_t)*kgem->surface - (sizeof(kgem->batch)-size),
-			sizeof(kgem->batch) - sizeof(uint32_t)*kgem->surface,
+			size - ret, (kgem->batch_size - kgem->surface)*sizeof(uint32_t),
 			kgem->batch + kgem->surface);
 }
 
@@ -1719,7 +1721,7 @@ void kgem_reset(struct kgem *kgem)
 	kgem->aperture = 0;
 	kgem->aperture_fenced = 0;
 	kgem->nbatch = 0;
-	kgem->surface = kgem->max_batch_size;
+	kgem->surface = kgem->batch_size;
 	kgem->mode = KGEM_NONE;
 	kgem->flush = 0;
 	kgem->scanout = 0;
@@ -1734,24 +1736,26 @@ static int compact_batch_surface(struct kgem *kgem)
 	int size, shrink, n;
 
 	/* See if we can pack the contents into one or two pages */
-	size = kgem->max_batch_size - kgem->surface + kgem->nbatch;
-	if (size > 2048)
-		return sizeof(kgem->batch);
-	else if (size > 1024)
-		size = 8192, shrink = 2*4096;
-	else
-		size = 4096, shrink = 3*4096;
-
-	for (n = 0; n < kgem->nreloc; n++) {
-		if (kgem->reloc[n].read_domains == I915_GEM_DOMAIN_INSTRUCTION &&
-		    kgem->reloc[n].target_handle == 0)
-			kgem->reloc[n].delta -= shrink;
-
-		if (kgem->reloc[n].offset >= size)
-			kgem->reloc[n].offset -= shrink;
+	n = ALIGN(kgem->batch_size, 1024);
+	size = n - kgem->surface + kgem->nbatch;
+	size = ALIGN(size, 1024);
+
+	shrink = n - size;
+	if (shrink) {
+		DBG(("shrinking from %d to %d\n", kgem->batch_size, size));
+
+		shrink *= sizeof(uint32_t);
+		for (n = 0; n < kgem->nreloc; n++) {
+			if (kgem->reloc[n].read_domains == I915_GEM_DOMAIN_INSTRUCTION &&
+			    kgem->reloc[n].target_handle == 0)
+				kgem->reloc[n].delta -= shrink;
+
+			if (kgem->reloc[n].offset >= sizeof(uint32_t)*kgem->nbatch)
+				kgem->reloc[n].offset -= shrink;
+		}
 	}
 
-	return size;
+	return size * sizeof(uint32_t);
 }
 
 void _kgem_submit(struct kgem *kgem)
@@ -1769,11 +1773,11 @@ void _kgem_submit(struct kgem *kgem)
 	batch_end = kgem_end_batch(kgem);
 	kgem_sna_flush(kgem);
 
-	DBG(("batch[%d/%d]: %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d\n",
-	     kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface,
+	DBG(("batch[%d/%d]: %d %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d\n",
+	     kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface, kgem->batch_size,
 	     kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture));
 
-	assert(kgem->nbatch <= kgem->max_batch_size);
+	assert(kgem->nbatch <= kgem->batch_size);
 	assert(kgem->nbatch <= kgem->surface);
 	assert(kgem->nreloc <= ARRAY_SIZE(kgem->reloc));
 	assert(kgem->nexec < ARRAY_SIZE(kgem->exec));
@@ -1786,7 +1790,7 @@ void _kgem_submit(struct kgem *kgem)
 #endif
 
 	rq = kgem->next_request;
-	if (kgem->surface != kgem->max_batch_size)
+	if (kgem->surface != kgem->batch_size)
 		size = compact_batch_surface(kgem);
 	else
 		size = kgem->nbatch * sizeof(kgem->batch[0]);
@@ -1821,7 +1825,7 @@ void _kgem_submit(struct kgem *kgem)
 			execbuf.buffers_ptr = (uintptr_t)kgem->exec;
 			execbuf.buffer_count = kgem->nexec;
 			execbuf.batch_start_offset = 0;
-			execbuf.batch_len = batch_end*4;
+			execbuf.batch_len = batch_end*sizeof(uint32_t);
 			execbuf.cliprects_ptr = 0;
 			execbuf.num_cliprects = 0;
 			execbuf.DR1 = 0;
@@ -1835,7 +1839,7 @@ void _kgem_submit(struct kgem *kgem)
 					      O_WRONLY | O_CREAT | O_APPEND,
 					      0666);
 				if (fd != -1) {
-					ret = write(fd, kgem->batch, batch_end*4);
+					ret = write(fd, kgem->batch, batch_end*sizeof(uint32_t));
 					fd = close(fd);
 				}
 			}
@@ -1864,7 +1868,7 @@ void _kgem_submit(struct kgem *kgem)
 
 				i = open("/tmp/batchbuffer", O_WRONLY | O_CREAT | O_APPEND, 0666);
 				if (i != -1) {
-					ret = write(i, kgem->batch, batch_end*4);
+					ret = write(i, kgem->batch, batch_end*sizeof(uint32_t));
 					close(i);
 				}
 
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 186eaa0..52bc6f2 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -142,7 +142,7 @@ struct kgem {
 	uint16_t nreloc;
 	uint16_t nfence;
 	uint16_t wait;
-	uint16_t max_batch_size;
+	uint16_t batch_size;
 	uint16_t min_alignment;
 
 	uint32_t flush:1;
@@ -170,9 +170,9 @@ struct kgem {
 	void (*context_switch)(struct kgem *kgem, int new_mode);
 	void (*retire)(struct kgem *kgem);
 
-	uint32_t batch[4*1024];
+	uint32_t batch[64*1024-8];
 	struct drm_i915_gem_exec_object2 exec[256];
-	struct drm_i915_gem_relocation_entry reloc[612];
+	struct drm_i915_gem_relocation_entry reloc[4096];
 };
 
 #define KGEM_BATCH_RESERVED 1
@@ -180,7 +180,7 @@ struct kgem {
 #define KGEM_EXEC_RESERVED 1
 
 #define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
-#define KGEM_BATCH_SIZE(K) ((K)->max_batch_size-KGEM_BATCH_RESERVED)
+#define KGEM_BATCH_SIZE(K) ((K)->batch_size-KGEM_BATCH_RESERVED)
 #define KGEM_EXEC_SIZE(K) (int)(ARRAY_SIZE((K)->exec)-KGEM_EXEC_RESERVED)
 #define KGEM_RELOC_SIZE(K) (int)(ARRAY_SIZE((K)->reloc)-KGEM_RELOC_RESERVED)
 
commit 9281b80644ce76ad9e0f3f8f812cbae97c10814a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 7 23:25:22 2012 +0100

    sna/gen[345]: Clear used vertices when discarding unmappable vbo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index e73c707..39601d9 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1620,7 +1620,11 @@ static int gen3_vertex_finish(struct sna *sna)
 {
 	struct kgem_bo *bo;
 
+	DBG(("%s: used=%d/%d, vbo active? %d\n",
+	     __FUNCTION__, sna->render.vertex_used, sna->render.vertex_size,
+	     sna->render.vbo ? sna->render.vbo->handle : 0));
 	assert(sna->render.vertex_used);
+	assert(sna->render.vertex_used <= sna->render.vertex_size);
 
 	bo = sna->render.vbo;
 	if (bo) {
@@ -1668,8 +1672,9 @@ static void gen3_vertex_close(struct sna *sna)
 
 	assert(sna->render_state.gen3.vertex_offset == 0);
 
-	DBG(("%s: used=%d, vbo active? %d\n",
-	     __FUNCTION__, sna->render.vertex_used, sna->render.vbo != NULL));
+	DBG(("%s: used=%d/%d, vbo active? %d\n",
+	     __FUNCTION__, sna->render.vertex_used, sna->render.vertex_size,
+	     sna->render.vbo ? sna->render.vbo->handle : 0));
 
 	if (sna->render.vertex_used == 0) {
 		assert(sna->render.vbo == NULL);
@@ -1730,6 +1735,7 @@ static void gen3_vertex_close(struct sna *sna)
 	}
 
 	if (sna->render.vbo == NULL) {
+		DBG(("%s: resetting vbo\n", __FUNCTION__));
 		sna->render.vertex_used = 0;
 		sna->render.vertex_index = 0;
 		assert(sna->render.vertices == sna->render.vertex_data);
@@ -1937,6 +1943,17 @@ gen3_render_composite_done(struct sna *sna,
 }
 
 static void
+discard_vbo(struct sna *sna)
+{
+	kgem_bo_destroy(&sna->kgem, sna->render.vbo);
+	sna->render.vbo = NULL;
+	sna->render.vertices = sna->render.vertex_data;
+	sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+	sna->render.vertex_used = 0;
+	sna->render.vertex_index = 0;
+}
+
+static void
 gen3_render_reset(struct sna *sna)
 {
 	struct gen3_render_state *state = &sna->render_state.gen3;
@@ -1961,10 +1978,7 @@ gen3_render_reset(struct sna *sna)
 	if (sna->render.vbo &&
 	    !kgem_bo_is_mappable(&sna->kgem, sna->render.vbo)) {
 		DBG(("%s: discarding unmappable vbo\n", __FUNCTION__));
-		kgem_bo_destroy(&sna->kgem, sna->render.vbo);
-		sna->render.vbo = NULL;
-		sna->render.vertices = sna->render.vertex_data;
-		sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+		discard_vbo(sna);
 	}
 }
 
@@ -1976,12 +1990,7 @@ gen3_render_retire(struct kgem *kgem)
 	sna = container_of(kgem, struct sna, kgem);
 	if (!kgem->need_retire && kgem->nbatch == 0 && sna->render.vbo) {
 		DBG(("%s: discarding vbo\n", __FUNCTION__));
-		kgem_bo_destroy(kgem, sna->render.vbo);
-		sna->render.vbo = NULL;
-		sna->render.vertices = sna->render.vertex_data;
-		sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
-		sna->render.vertex_used = 0;
-		sna->render.vertex_index = 0;
+		discard_vbo(sna);
 	}
 }
 
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 9cad75e..01651a5 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -3224,6 +3224,17 @@ gen4_render_flush(struct sna *sna)
 	gen4_vertex_close(sna);
 }
 
+static void
+discard_vbo(struct sna *sna)
+{
+	kgem_bo_destroy(&sna->kgem, sna->render.vbo);
+	sna->render.vbo = NULL;
+	sna->render.vertices = sna->render.vertex_data;
+	sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+	sna->render.vertex_used = 0;
+	sna->render.vertex_index = 0;
+}
+
 static void gen4_render_reset(struct sna *sna)
 {
 	sna->render_state.gen4.needs_invariant = TRUE;
@@ -3240,10 +3251,7 @@ static void gen4_render_reset(struct sna *sna)
 	if (sna->render.vbo &&
 	    !kgem_bo_is_mappable(&sna->kgem, sna->render.vbo)) {
 		DBG(("%s: discarding unmappable vbo\n", __FUNCTION__));
-		kgem_bo_destroy(&sna->kgem, sna->render.vbo);
-		sna->render.vbo = NULL;
-		sna->render.vertices = sna->render.vertex_data;
-		sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+		discard_vbo(sna);
 	}
 }
 
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 54d0b22..0b63009 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -3648,6 +3648,17 @@ gen5_render_context_switch(struct kgem *kgem,
 }
 
 static void
+discard_vbo(struct sna *sna)
+{
+	kgem_bo_destroy(&sna->kgem, sna->render.vbo);
+	sna->render.vbo = NULL;
+	sna->render.vertices = sna->render.vertex_data;
+	sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+	sna->render.vertex_used = 0;
+	sna->render.vertex_index = 0;
+}
+
+static void
 gen5_render_retire(struct kgem *kgem)
 {
 	struct sna *sna;
@@ -3655,12 +3666,7 @@ gen5_render_retire(struct kgem *kgem)
 	sna = container_of(kgem, struct sna, kgem);
 	if (!kgem->need_retire && kgem->nbatch == 0 && sna->render.vbo) {
 		DBG(("%s: discarding vbo\n", __FUNCTION__));
-		kgem_bo_destroy(kgem, sna->render.vbo);
-		sna->render.vbo = NULL;
-		sna->render.vertices = sna->render.vertex_data;
-		sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
-		sna->render.vertex_used = 0;
-		sna->render.vertex_index = 0;
+		discard_vbo(sna);
 	}
 }
 
@@ -3679,10 +3685,7 @@ static void gen5_render_reset(struct sna *sna)
 	if (sna->render.vbo &&
 	    !kgem_bo_is_mappable(&sna->kgem, sna->render.vbo)) {
 		DBG(("%s: discarding unmappable vbo\n", __FUNCTION__));
-		kgem_bo_destroy(&sna->kgem, sna->render.vbo);
-		sna->render.vbo = NULL;
-		sna->render.vertices = sna->render.vertex_data;
-		sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+		discard_vbo(sna);
 	}
 }
 
commit a6ee376e93517659391905e6c9018b3bb735135d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 7 10:24:21 2012 +0100

    sna: Use the correct invocation of kgem_bo_destroy() for sanity-checks
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 6e0c3d0..0799ea7 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2891,7 +2891,7 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 			return bo;
 
 		if (kgem_bo_map__cpu(kgem, bo) == NULL) {
-			_kgem_bo_destroy(kgem, bo);
+			kgem_bo_destroy(kgem, bo);
 			return NULL;
 		}
 
commit c89c7e9a04314e40cee5514a182a8364c4f99374
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 7 11:54:50 2012 +0100

    sna: Update select timeout when installing a timer in the block handler
    
    The block handler is run after the timers are queried for their
    expiration and so if we install a timer in the block hander, we must
    set the timeout ourselves.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index d7a20b9..023c091 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -230,6 +230,7 @@ struct sna {
 	uint32_t timer_expire[NUM_TIMERS];
 	uint16_t timer_active;
 	uint16_t timer_ready;
+	struct timeval timer_tv;
 
 	int vblank_interval;
 
@@ -561,7 +562,7 @@ static inline uint32_t pixmap_size(PixmapPtr pixmap)
 
 Bool sna_accel_pre_init(struct sna *sna);
 Bool sna_accel_init(ScreenPtr sreen, struct sna *sna);
-void sna_accel_block_handler(struct sna *sna);
+void sna_accel_block_handler(struct sna *sna, struct timeval **tv);
 void sna_accel_wakeup_handler(struct sna *sna, fd_set *ready);
 void sna_accel_watch_flush(struct sna *sna, int enable);
 void sna_accel_close(struct sna *sna);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 274facb..b37ee59 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12372,7 +12372,7 @@ static void sna_accel_throttle(struct sna *sna)
 	kgem_throttle(&sna->kgem);
 }
 
-void sna_accel_block_handler(struct sna *sna)
+void sna_accel_block_handler(struct sna *sna, struct timeval **tv)
 {
 	sna->time = GetTimeInMillis();
 
@@ -12392,15 +12392,24 @@ void sna_accel_block_handler(struct sna *sna)
 	}
 
 	if (sna->timer_ready) {
+		int32_t timeout;
+
 		DBG(("%s: evaluating timers, ready=%x\n",
 		     __FUNCTION__, sna->timer_ready));
 		sna->timer_ready = 0;
-		TimerSet(sna->timer, 0,
-			 sna_timeout(sna->timer,
-				     sna->time,
-				     sna),
-			 sna_timeout, sna);
-		assert(sna->timer_ready == 0);
+		timeout = sna_timeout(sna->timer, sna->time, sna);
+		TimerSet(sna->timer, 0, timeout, sna_timeout, sna);
+		if (timeout) {
+			if (*tv == NULL) {
+				*tv = &sna->timer_tv;
+				goto set_tv;
+			}
+			if ((*tv)->tv_sec * 1000 + (*tv)->tv_usec / 1000 > timeout) {
+set_tv:
+				(*tv)->tv_sec = timeout / 1000;
+				(*tv)->tv_usec = timeout % 1000 * 1000;
+			}
+		}
 	}
 }
 
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 150e973..c213ff4 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -590,7 +590,7 @@ sna_block_handler(int i, pointer data, pointer timeout, pointer read_mask)
 	sna->BlockHandler(i, sna->BlockData, timeout, read_mask);
 
 	if (*tv == NULL || ((*tv)->tv_usec | (*tv)->tv_sec))
-		sna_accel_block_handler(sna);
+		sna_accel_block_handler(sna, tv);
 }
 
 static void


More information about the xorg-commit mailing list