xf86-video-intel: 12 commits - configure.ac src/intel_uxa.c src/sna/gen2_render.c src/sna/gen3_render.c src/sna/kgem.c src/sna/kgem_debug.c src/sna/kgem_debug_gen3.c src/sna/kgem.h src/sna/sna_accel.c src/sna/sna.h src/sna/sna_render.h

Chris Wilson ickle at kemper.freedesktop.org
Mon Oct 17 05:20:06 PDT 2011


 configure.ac              |    4 
 src/intel_uxa.c           |   25 +-
 src/sna/gen2_render.c     |  575 +++++++++++++++++++++++-----------------------
 src/sna/gen3_render.c     |   69 +++--
 src/sna/kgem.c            |   19 -
 src/sna/kgem.h            |   13 -
 src/sna/kgem_debug.c      |    6 
 src/sna/kgem_debug_gen3.c |    1 
 src/sna/sna.h             |    1 
 src/sna/sna_accel.c       |  167 ++++++++++++-
 src/sna/sna_render.h      |    8 
 11 files changed, 525 insertions(+), 363 deletions(-)

New commits:
commit 46f97127c22ea42bc8fdae59d2a133e4b8b6c997
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Oct 16 21:40:15 2011 +0100

    snb,ivb: Workaround unknown blitter death
    
    The first workaround was a performance killing MI_FLUSH_DW after every
    op. This workaround appears to be a stable compromise instead, only
    requiring a redundant command after every BLT command with little
    impact on throughput.
    
    Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=27892
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=39524
    Tested-by: Daniel Vetter <daniel.vetter at ffwll.ch>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_uxa.c b/src/intel_uxa.c
index 30717d0..9e58c69 100644
--- a/src/intel_uxa.c
+++ b/src/intel_uxa.c
@@ -340,13 +340,6 @@ static void intel_uxa_solid(PixmapPtr pixmap, int x1, int y1, int x2, int y2)
 	}
 }
 
-static void intel_uxa_done_solid(PixmapPtr pixmap)
-{
-	ScrnInfoPtr scrn = xf86Screens[pixmap->drawable.pScreen->myNum];
-
-	intel_debug_flush(scrn);
-}
-
 /**
  * TODO:
  *   - support planemask using FULL_BLT_CMD?
@@ -501,9 +494,19 @@ intel_uxa_copy(PixmapPtr dest, int src_x1, int src_y1, int dst_x1,
 	}
 }
 
-static void intel_uxa_done_copy(PixmapPtr dest)
+static void intel_uxa_done(PixmapPtr pixmap)
 {
-	ScrnInfoPtr scrn = xf86Screens[dest->drawable.pScreen->myNum];
+	ScrnInfoPtr scrn = xf86Screens[pixmap->drawable.pScreen->myNum];
+	intel_screen_private *intel = intel_get_screen_private(scrn);
+
+	if (IS_GEN6(intel) || IS_GEN7(intel)) {
+		/* workaround a random BLT hang */
+		BEGIN_BATCH_BLT(3);
+		OUT_BATCH(XY_SETUP_CLIP_BLT_CMD);
+		OUT_BATCH(0);
+		OUT_BATCH(0);
+		ADVANCE_BATCH();
+	}
 
 	intel_debug_flush(scrn);
 }
@@ -1225,13 +1228,13 @@ Bool intel_uxa_init(ScreenPtr screen)
 	intel->uxa_driver->check_solid = intel_uxa_check_solid;
 	intel->uxa_driver->prepare_solid = intel_uxa_prepare_solid;
 	intel->uxa_driver->solid = intel_uxa_solid;
-	intel->uxa_driver->done_solid = intel_uxa_done_solid;
+	intel->uxa_driver->done_solid = intel_uxa_done;
 
 	/* Copy */
 	intel->uxa_driver->check_copy = intel_uxa_check_copy;
 	intel->uxa_driver->prepare_copy = intel_uxa_prepare_copy;
 	intel->uxa_driver->copy = intel_uxa_copy;
-	intel->uxa_driver->done_copy = intel_uxa_done_copy;
+	intel->uxa_driver->done_copy = intel_uxa_done;
 
 	/* Composite */
 	if (IS_GEN2(intel)) {
commit 71bf291e563ec5224777b4907a5347a3fbfe64c5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Oct 17 12:53:19 2011 +0100

    sna/gen3: Micro-optimise gen3_rectangle_begin()
    
    We only need to emit the vbo description either at the beginning of a
    new op (when the state may have changed) or after finishing a full vbo.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 50883a5..0a51e64 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1457,21 +1457,16 @@ static void gen3_vertex_finish(struct sna *sna, Bool last)
 static bool gen3_rectangle_begin(struct sna *sna,
 				 const struct sna_composite_op *op)
 {
-	int ndwords, i1_cmd = 0, i1_len = 0;
 	struct gen3_render_state *state = &sna->render_state.gen3;
+	int ndwords, i1_cmd = 0, i1_len = 0;
 
-	ndwords = 0;
-	if (state->vertex_offset == 0) {
-		ndwords += 2;
-		if (op->need_magic_ca_pass)
-			ndwords += 100;
-	}
+	ndwords = 2;
+	if (op->need_magic_ca_pass)
+		ndwords += 100;
 	if (sna->render.vertex_reloc[0] == 0)
 		i1_len++, i1_cmd |= I1_LOAD_S(0), ndwords++;
 	if (state->floats_per_vertex != op->floats_per_vertex)
 		i1_len++, i1_cmd |= I1_LOAD_S(1), ndwords++;
-	if (ndwords == 0)
-		return true;
 
 	if (!kgem_check_batch(&sna->kgem, ndwords+1))
 		return false;
@@ -1487,16 +1482,14 @@ static bool gen3_rectangle_begin(struct sna *sna,
 		}
 	}
 
-	if (state->vertex_offset == 0) {
-		if (sna->kgem.nbatch == 2 + state->last_vertex_offset) {
-			state->vertex_offset = state->last_vertex_offset;
-		} else {
-			state->vertex_offset = sna->kgem.nbatch;
-			OUT_BATCH(MI_NOOP); /* to be filled later */
-			OUT_BATCH(MI_NOOP);
-			sna->render.vertex_start = sna->render.vertex_index;
-			state->last_vertex_offset = state->vertex_offset;
-		}
+	if (sna->kgem.nbatch == 2 + state->last_vertex_offset) {
+		state->vertex_offset = state->last_vertex_offset;
+	} else {
+		state->vertex_offset = sna->kgem.nbatch;
+		OUT_BATCH(MI_NOOP); /* to be filled later */
+		OUT_BATCH(MI_NOOP);
+		sna->render.vertex_start = sna->render.vertex_index;
+		state->last_vertex_offset = state->vertex_offset;
 	}
 
 	return true;
@@ -1535,7 +1528,8 @@ inline static int gen3_get_rectangles(struct sna *sna,
 			return 0;
 	}
 
-	if (!gen3_rectangle_begin(sna, op)) {
+	if (sna->render_state.gen3.vertex_offset == 0 &&
+	    !gen3_rectangle_begin(sna, op)) {
 		DBG(("%s: flushing batch\n", __FUNCTION__));
 		return 0;
 	}
commit a032feb2531439b9945d7ae1e0c2e3ab95a960e1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Oct 17 12:41:21 2011 +0100

    sna/gen3: Store floats_per_rect alongside floats_per_vertex
    
    Moves a multiply out of the hot path.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 4e24e03..50883a5 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1524,12 +1524,12 @@ inline static int gen3_get_rectangles(struct sna *sna,
 	int rem = vertex_space(sna);
 
 	DBG(("%s: want=%d, rem=%d\n",
-	     __FUNCTION__, 3*want*op->floats_per_vertex, rem));
+	     __FUNCTION__, want*op->floats_per_rect, rem));
 
 	assert(sna->render.vertex_index * op->floats_per_vertex == sna->render.vertex_used);
-	if (op->floats_per_vertex*3 > rem) {
+	if (op->floats_per_rect > rem) {
 		DBG(("flushing vbo for %s: %d < %d\n",
-		     __FUNCTION__, rem, 3*op->floats_per_vertex));
+		     __FUNCTION__, rem, op->floats_per_rect));
 		rem = gen3_get_rectangles__flush(sna, op->need_magic_ca_pass);
 		if (rem == 0)
 			return 0;
@@ -1540,8 +1540,8 @@ inline static int gen3_get_rectangles(struct sna *sna,
 		return 0;
 	}
 
-	if (want > 1 && want * op->floats_per_vertex*3 > rem)
-		want = rem / (3*op->floats_per_vertex);
+	if (want > 1 && want * op->floats_per_rect > rem)
+		want = rem / op->floats_per_rect;
 	sna->render.vertex_index += 3*want;
 
 	assert(want);
@@ -2383,6 +2383,7 @@ gen3_render_composite(struct sna *sna,
 	      tmp->mask.u.gen3.type != SHADER_CONSTANT) ?
 	     tmp->mask.is_affine ? 2 : 4 : 0,
 	     tmp->floats_per_vertex));
+	tmp->floats_per_rect = 3 * tmp->floats_per_vertex;
 
 	tmp->blt   = gen3_render_composite_blt;
 	tmp->box   = gen3_render_composite_box;
@@ -2824,6 +2825,7 @@ gen3_render_composite_spans(struct sna *sna,
 		tmp->base.floats_per_vertex += tmp->base.src.is_affine ? 2 : 3;
 	tmp->base.floats_per_vertex +=
 		tmp->base.mask.u.gen3.type == SHADER_OPACITY;
+	tmp->base.floats_per_rect = 3 * tmp->base.floats_per_vertex;
 
 	tmp->box   = gen3_render_composite_spans_box;
 	tmp->boxes = gen3_render_composite_spans_boxes;
@@ -3387,6 +3389,7 @@ gen3_render_copy_boxes(struct sna *sna, uint8_t alu,
 	gen3_render_copy_setup_source(&tmp.src, src, src_bo);
 
 	tmp.floats_per_vertex = 4;
+	tmp.floats_per_rect = 12;
 	tmp.mask.u.gen3.type = SHADER_NONE;
 
 	gen3_emit_composite_state(sna, &tmp);
@@ -3515,6 +3518,7 @@ gen3_render_copy(struct sna *sna, uint8_t alu,
 	gen3_render_copy_setup_source(&tmp->base.src, src, src_bo);
 
 	tmp->base.floats_per_vertex = 4;
+	tmp->base.floats_per_rect = 12;
 	tmp->base.mask.u.gen3.type = SHADER_NONE;
 
 	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
@@ -3639,6 +3643,7 @@ gen3_render_fill_boxes(struct sna *sna,
 	tmp.dst.format = format;
 	tmp.dst.bo = dst_bo;
 	tmp.floats_per_vertex = 2;
+	tmp.floats_per_rect = 6;
 
 	tmp.src.u.gen3.type = op == PictOpClear ? SHADER_ZERO : SHADER_CONSTANT;
 	tmp.src.u.gen3.mode = pixel;
@@ -3740,6 +3745,7 @@ gen3_render_fill(struct sna *sna, uint8_t alu,
 	tmp->base.dst.format = sna_format_for_depth(dst->drawable.depth);
 	tmp->base.dst.bo = dst_bo;
 	tmp->base.floats_per_vertex = 2;
+	tmp->base.floats_per_rect = 6;
 
 	tmp->base.src.u.gen3.type = SHADER_CONSTANT;
 	tmp->base.src.u.gen3.mode =
@@ -3813,6 +3819,7 @@ gen3_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	tmp.dst.format = sna_format_for_depth(dst->drawable.depth);
 	tmp.dst.bo = bo;
 	tmp.floats_per_vertex = 2;
+	tmp.floats_per_rect = 6;
 
 	tmp.src.u.gen3.type = SHADER_CONSTANT;
 	tmp.src.u.gen3.mode =
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index f0a99a7..1cbd0f5 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -72,7 +72,8 @@ struct sna_composite_op {
 	uint32_t need_magic_ca_pass : 1;
 	uint32_t rb_reversed : 1;
 
-	int floats_per_vertex;
+	int16_t floats_per_vertex;
+	int16_t floats_per_rect;
 	fastcall void (*prim_emit)(struct sna *sna,
 				   const struct sna_composite_op *op,
 				   const struct sna_composite_rectangles *r);
commit 2cbe79b85b46796d561105afa980e7a52f4f1889
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Oct 17 11:57:07 2011 +0100

    sna/gen3: Improve reduction of render operator to blt
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 1d4df78..4e24e03 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -3541,6 +3541,9 @@ gen3_render_fill_boxes_try_blt(struct sna *sna,
 	uint8_t alu = GXcopy;
 	uint32_t pixel;
 
+	if (dst_bo->tiling == I915_TILING_Y)
+		return FALSE;
+
 	if (!sna_get_pixel_from_rgba(&pixel,
 				     color->red,
 				     color->green,
@@ -3549,17 +3552,22 @@ gen3_render_fill_boxes_try_blt(struct sna *sna,
 				     format))
 		return FALSE;
 
+	if (color->alpha >= 0xff00) {
+		if (op == PictOpOver)
+			op = PictOpSrc;
+		else if (op == PictOpOutReverse)
+			op = PictOpClear;
+		else if (op == PictOpAdd &&
+			 (color->red & color->green & color->blue) >= 0xff00)
+			op = PictOpSrc;
+	}
+
 	if (op == PictOpClear) {
 		alu = GXclear;
 		pixel = 0;
 		op = PictOpSrc;
 	}
 
-	if (op == PictOpOver) {
-		if ((pixel & 0xff000000) == 0xff000000)
-			op = PictOpSrc;
-	}
-
 	if (op != PictOpSrc)
 		return FALSE;
 
commit 606e18bd360d904a8aed01aef43644fb4a51d929
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Oct 17 10:50:33 2011 +0100

    configure: Add a check for pixman version
    
    As SNA requires a fairly recent release and calls directly into pixman.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index e269006..0a60bd7 100644
--- a/configure.ac
+++ b/configure.ac
@@ -129,8 +129,10 @@ AM_CONDITIONAL(SNA, test x$SNA != xno)
 AC_MSG_CHECKING([whether to include SNA support])
 
 required_xorg_xserver_version=1.6
+required_pixman_version=0.16
 if test "x$SNA" != "xno"; then
 	required_xorg_xserver_version=1.10
+	required_pixman_version=0.23
 	AC_DEFINE(USE_SNA, 1, [Enable SNA support])
 fi
 AC_MSG_RESULT([$SNA])
@@ -160,7 +162,7 @@ XORG_DRIVER_CHECK_EXT(XF86DRI, xextproto x11)
 XORG_DRIVER_CHECK_EXT(DPMSExtension, xextproto)
 
 # Obtain compiler/linker options for the driver dependencies
-PKG_CHECK_MODULES(XORG, [xorg-server >= $required_xorg_xserver_version xproto fontsproto $REQUIRED_MODULES])
+PKG_CHECK_MODULES(XORG, [xorg-server >= $required_xorg_xserver_version xproto fontsproto pixman-1 >= $required_pixman_version $REQUIRED_MODULES])
 PKG_CHECK_MODULES(DRM, [libdrm >= 2.4.23])
 PKG_CHECK_MODULES(DRI, [xf86driproto], , DRI=no)
 PKG_CHECK_MODULES(DRI2, [dri2proto >= 2.6])
commit 17acaf8e232a8bad9bc2c653a9fceb06f0f65f6b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Oct 17 00:55:51 2011 +0100

    sna: Defer CPU fallback if fb calls into mi
    
    If the fb routine does not access the pixel data directly, but instead
    calls into an mi routine, we can defer the readback and possibly avoid
    it.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 51d51f3..49bd604 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2365,6 +2365,14 @@ sna_poly_line(DrawablePtr drawable, GCPtr gc,
 	}
 
 fallback:
+	if (gc->lineWidth) {
+		if (gc->lineStyle != LineSolid)
+			miWideDash(drawable, gc, mode, n, pt);
+		else
+			miWideLine(drawable, gc, mode, n, pt);
+		return;
+	}
+
 	DBG(("%s: fallback\n", __FUNCTION__));
 	region_set(&region, &extents);
 	region_maybe_clip(&region, gc->pCompositeClip);
@@ -2670,6 +2678,11 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
 	}
 
 fallback:
+	if (gc->lineWidth) {
+		miPolySegment(drawable, gc, n, seg);
+		return;
+	}
+
 	DBG(("%s: fallback\n", __FUNCTION__));
 	region_set(&region, &extents);
 	region_maybe_clip(&region, gc->pCompositeClip);
@@ -2774,6 +2787,11 @@ sna_poly_arc(DrawablePtr drawable, GCPtr gc, int n, xArc *arc)
 	}
 
 fallback:
+	if (gc->lineWidth) {
+		miPolyArc(drawable, gc, n, arc);
+		return;
+	}
+
 	region_set(&region, &extents);
 	region_maybe_clip(&region, gc->pCompositeClip);
 	if (!RegionNotEmpty(&region))
@@ -2783,6 +2801,7 @@ fallback:
 	sna_drawable_move_region_to_cpu(drawable, &region, true);
 	RegionUninit(&region);
 
+	/* XXX may still fallthrough to miZeroPolyArc */
 	fbPolyArc(drawable, gc, n, arc);
 }
 
commit c348b69f66cbe1cafc88396523da9768e1006889
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Oct 16 18:23:33 2011 +0100

    sna: Prefer to accelerate non-RENDER operations if already using the GPU
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index e27c333..4ba3f57 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -140,6 +140,7 @@ struct sna_pixmap {
 	uint8_t pinned :1;
 	uint8_t gpu_only :1;
 	uint8_t flush :1;
+	uint8_t gpu :1;
 };
 
 static inline PixmapPtr get_drawable_pixmap(DrawablePtr drawable)
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 3d0d1cf..51d51f3 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -410,6 +410,8 @@ done:
 		if (priv->flush)
 			list_move(&priv->list, &sna->dirty_pixmaps);
 	}
+
+	priv->gpu = false;
 }
 
 static Bool
@@ -569,6 +571,72 @@ done:
 
 	if (dx | dy)
 		RegionTranslate(region, -dx, -dy);
+
+	priv->gpu = false;
+}
+
+static void
+sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box)
+{
+	struct sna *sna = to_sna_from_drawable(&pixmap->drawable);
+	struct sna_pixmap *priv = sna_pixmap(pixmap);
+	RegionRec i, r;
+
+	DBG(("%s()\n", __FUNCTION__));
+
+	assert(priv->gpu);
+	assert(priv->gpu_bo);
+
+	sna_damage_reduce(&priv->cpu_damage);
+	DBG(("%s: CPU damage? %d\n", __FUNCTION__, priv->cpu_damage != NULL));
+
+	if (priv->cpu_damage == NULL)
+		goto done;
+
+	region_set(&r, box);
+	if (sna_damage_intersect(priv->cpu_damage, &r, &i)) {
+		BoxPtr box = REGION_RECTS(&i);
+		int n = REGION_NUM_RECTS(&i);
+		struct kgem_bo *src_bo;
+		Bool ok = FALSE;
+
+		src_bo = pixmap_vmap(&sna->kgem, pixmap);
+		if (src_bo)
+			ok = sna->render.copy_boxes(sna, GXcopy,
+						    pixmap, src_bo, 0, 0,
+						    pixmap, priv->gpu_bo, 0, 0,
+						    box, n);
+		if (!ok) {
+			if (n == 1 && !priv->pinned &&
+			    box->x1 <= 0 && box->y1 <= 0 &&
+			    box->x2 >= pixmap->drawable.width &&
+			    box->y2 >= pixmap->drawable.height) {
+				priv->gpu_bo =
+					sna_replace(sna,
+						    priv->gpu_bo,
+						    pixmap->drawable.width,
+						    pixmap->drawable.height,
+						    pixmap->drawable.bitsPerPixel,
+						    pixmap->devPrivate.ptr,
+						    pixmap->devKind);
+			} else {
+				sna_write_boxes(sna,
+						priv->gpu_bo, 0, 0,
+						pixmap->devPrivate.ptr,
+						pixmap->devKind,
+						pixmap->drawable.bitsPerPixel,
+						0, 0,
+						box, n);
+			}
+		}
+
+		sna_damage_subtract(&priv->cpu_damage, &r);
+		RegionUninit(&i);
+	}
+
+done:
+	if (priv->cpu_damage == NULL)
+		list_del(&priv->list);
 }
 
 static inline Bool
@@ -596,8 +664,19 @@ _sna_drawable_use_gpu_bo(DrawablePtr drawable, const BoxRec *box)
 	extents.y1 += dy;
 	extents.y2 += dy;
 
-	return sna_damage_contains_box(priv->cpu_damage,
-				       &extents) == PIXMAN_REGION_OUT;
+	if (sna_damage_contains_box(priv->cpu_damage,
+				    &extents) == PIXMAN_REGION_OUT)
+		return TRUE;
+
+	if (!priv->gpu || priv->gpu_damage == NULL)
+		return FALSE;
+
+	if (sna_damage_contains_box(priv->gpu_damage,
+				    &extents) == PIXMAN_REGION_OUT)
+		return FALSE;
+
+	sna_pixmap_move_area_to_gpu(pixmap, &extents);
+	return TRUE;
 }
 
 static inline Bool
@@ -822,6 +901,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap)
 	sna_damage_reduce(&priv->gpu_damage);
 done:
 	list_del(&priv->list);
+	priv->gpu = true;
 	return priv;
 }
 
commit 6fdc9e3fd3f8defb7ad62de11f8cb069a10e5736
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Oct 16 17:57:55 2011 +0100

    sna: Simplify busy tracking by trusting the bo->gpu flag
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 15ddd7a..d27ba78 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -190,9 +190,8 @@ Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
 		return FALSE;
 
 	bo->needs_flush = false;
-	if (bo->rq)
+	if (bo->gpu)
 		kgem_retire(kgem);
-	bo->gpu = false;
 	return TRUE;
 }
 
@@ -510,6 +509,7 @@ void _kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo)
 {
 	bo->exec = kgem_add_handle(kgem, bo);
 	bo->rq = kgem->next_request;
+	bo->gpu = true;
 	list_move(&bo->request, &kgem->next_request->buffers);
 	kgem->flush |= bo->flush;
 }
@@ -665,7 +665,6 @@ static void kgem_commit(struct kgem *kgem)
 		bo->presumed_offset = bo->exec->offset;
 		bo->exec = NULL;
 		bo->dirty = false;
-		bo->gpu = true;
 		bo->cpu_read = false;
 		bo->cpu_write = false;
 
@@ -841,7 +840,6 @@ void kgem_reset(struct kgem *kgem)
 		bo->src_bound = bo->dst_bound = 0;
 		bo->exec = NULL;
 		bo->dirty = false;
-		bo->gpu = true;
 		bo->cpu_read = false;
 		bo->cpu_write = false;
 
@@ -1713,11 +1711,8 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo, int prot)
 		return NULL;
 
 	bo->needs_flush = false;
-	if (prot & PROT_WRITE) {
-		if (bo->rq)
-			kgem_retire(kgem);
-		bo->gpu = false;
-	}
+	if (prot & PROT_WRITE && bo->gpu)
+		kgem_retire(kgem);
 
 	return ptr;
 }
@@ -1820,9 +1815,8 @@ void kgem_bo_sync(struct kgem *kgem, struct kgem_bo *bo, bool for_write)
 
 	drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
 	bo->needs_flush = false;
-	if (bo->rq)
+	if (bo->gpu)
 		kgem_retire(kgem);
-	bo->gpu = false;
 	bo->cpu_read = true;
 	if (for_write)
 		bo->cpu_write = true;
@@ -2136,9 +2130,8 @@ void kgem_buffer_sync(struct kgem *kgem, struct kgem_bo *_bo)
 		else
 			gem_read(kgem->fd, bo->base.handle, bo+1, bo->used);
 		bo->base.needs_flush = false;
-		if (bo->base.rq)
+		if (bo->base.gpu)
 			kgem_retire(kgem);
-		bo->base.gpu = false;
 		bo->need_io = 0;
 	}
 
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 49ca3d7..aba8d3f 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -305,15 +305,12 @@ Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
 
 static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
 {
-	DBG_HDR(("%s: exec? %d, gpu? %d, rq? %d\n",
-		 __FUNCTION__, bo->exec != NULL, bo->gpu, bo->rq != NULL));
+	DBG_HDR(("%s: gpu? %d exec? %d, rq? %d\n",
+		 __FUNCTION__, bo->gpu, bo->exec != NULL, bo->rq != NULL));
 
-	if (bo->exec)
-		return true;
-	if (!bo->gpu)
-		return false;
-
-	return bo->rq != NULL;
+	assert(bo->proxy == NULL);
+	assert(bo->gpu || bo->rq == NULL);
+	return bo->gpu;
 }
 
 static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
commit 30dff81cd62b5fc5a28d5175a08c5a3ee09667bd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Oct 16 17:41:34 2011 +0100

    sna/accel: Upload source to GPU for !copy CopyAreas
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 67693b3..3d0d1cf 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1156,7 +1156,8 @@ sna_put_image(DrawablePtr drawable, GCPtr gc, int depth,
 }
 
 static bool
-move_to_gpu(PixmapPtr pixmap, struct sna_pixmap *priv, const BoxRec *box)
+move_to_gpu(PixmapPtr pixmap, struct sna_pixmap *priv,
+	    const BoxRec *box, uint8_t alu)
 {
 	int w = box->x2 - box->x1;
 	int h = box->y2 - box->y1;
@@ -1164,6 +1165,9 @@ move_to_gpu(PixmapPtr pixmap, struct sna_pixmap *priv, const BoxRec *box)
 	if (priv->gpu_bo)
 		return TRUE;
 
+	if (alu != GXcopy)
+		return TRUE;
+
 	return ++priv->source_count * w*h >= 2 * pixmap->drawable.width * pixmap->drawable.height;
 }
 
@@ -1259,7 +1263,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 		}
 
 		if (src_priv &&
-		    move_to_gpu(src_pixmap, src_priv, &region.extents) &&
+		    move_to_gpu(src_pixmap, src_priv, &region.extents, alu) &&
 		    sna_pixmap_move_to_gpu(src_pixmap)) {
 			if (!sna->render.copy_boxes(sna, alu,
 						    src_pixmap, src_priv->gpu_bo, src_dx, src_dy,
commit eefa925e3d3e009b21ecc0b428d93a6c732bfa14
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Oct 16 17:39:06 2011 +0100

    sna/accel: Fall-forward for handling a non-copy CopyArea to a dst gpu bo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem_debug.c b/src/sna/kgem_debug.c
index 2cc7b3a..ad0a533 100644
--- a/src/sna/kgem_debug.c
+++ b/src/sna/kgem_debug.c
@@ -255,8 +255,9 @@ decode_2d(struct kgem *kgem, uint32_t offset)
 			break;
 		}
 
-		kgem_debug_print(data, offset, 1, "format %s, pitch %d, "
+		kgem_debug_print(data, offset, 1, "format %s, rop %x, pitch %d, "
 			  "clipping %sabled\n", format,
+			  (data[1] >> 16) & 0xff,
 			  (short)(data[1] & 0xffff),
 			  data[1] & (1 << 30) ? "en" : "dis");
 		kgem_debug_print(data, offset, 2, "(%d,%d)\n",
@@ -303,8 +304,9 @@ decode_2d(struct kgem *kgem, uint32_t offset)
 			break;
 		}
 
-		kgem_debug_print(data, offset, 1, "format %s, dst pitch %d, "
+		kgem_debug_print(data, offset, 1, "format %s, rop %x, dst pitch %d, "
 				 "clipping %sabled\n", format,
+				 (data[1] >> 16) & 0xff,
 				 (short)(data[1] & 0xffff),
 				 data[1] & (1 << 30) ? "en" : "dis");
 		kgem_debug_print(data, offset, 2, "dst (%d,%d)\n",
diff --git a/src/sna/kgem_debug_gen3.c b/src/sna/kgem_debug_gen3.c
index 17ddb6b..d152b60 100644
--- a/src/sna/kgem_debug_gen3.c
+++ b/src/sna/kgem_debug_gen3.c
@@ -1156,7 +1156,6 @@ gen3_decode_3d_1d(struct kgem *kgem, uint32_t offset)
 			if (data[1] & (1 << map)) {
 				int width, height, pitch, dword;
 				struct drm_i915_gem_relocation_entry *reloc;
-				struct kgem_bo *bo = NULL;
 				const char *tiling;
 
 				reloc = kgem_debug_get_reloc_entry(kgem, &data[i] - kgem->batch);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 963ff9b..67693b3 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1285,13 +1285,63 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 							    &region);
 				RegionTranslate(&region, -dst_dx, -dst_dy);
 			}
-		} else {
-			if (alu != GXcopy) {
-				DBG(("%s: fallback - not a copy and source is on the CPU\n",
+		} else if (alu != GXcopy) {
+			PixmapPtr tmp;
+			int i;
+
+			assert (src_pixmap->drawable.depth != 1);
+
+			DBG(("%s: creating temporary source upload for non-copy alu [%d]\n",
+			     __FUNCTION__, alu));
+
+			tmp = sna_pixmap_create_upload(src->pScreen,
+						       src->width,
+						       src->height,
+						       src->depth);
+			if (tmp == NullPixmap)
+				return;
+
+			for (i = 0; i < n; i++) {
+				assert(box->x1 + src_dx >= 0);
+				assert(box->y1 + src_dy >= 0);
+				assert(box->x2 + src_dx <= src_pixmap->drawable.width);
+				assert(box->y2 + src_dy <= src_pixmap->drawable.height);
+
+				assert(box->x1 + dx >= 0);
+				assert(box->y1 + dy >= 0);
+				assert(box->x2 + dx <= tmp->drawable.width);
+				assert(box->y2 + dy <= tmp->drawable.height);
+
+				memcpy_blt(src_pixmap->devPrivate.ptr,
+					   tmp->devPrivate.ptr,
+					   src_pixmap->drawable.bitsPerPixel,
+					   src_pixmap->devKind,
+					   tmp->devKind,
+					   box[i].x1 + src_dx,
+					   box[i].y1 + src_dy,
+					   box[i].x1 + dx,
+					   box[i].y1 + dy,
+					   box[i].x2 - box[i].x1,
+					   box[i].y2 - box[i].y1);
+			}
+
+			if (!sna->render.copy_boxes(sna, alu,
+						    tmp, sna_pixmap_get_bo(tmp), dx, dy,
+						    dst_pixmap, dst_priv->gpu_bo, dst_dx, dst_dy,
+						    box, n)) {
+				DBG(("%s: fallback - accelerated copy boxes failed\n",
 				     __FUNCTION__));
+				tmp->drawable.pScreen->DestroyPixmap(tmp);
 				goto fallback;
 			}
+			tmp->drawable.pScreen->DestroyPixmap(tmp);
 
+			RegionTranslate(&region, dst_dx, dst_dy);
+			assert_pixmap_contains_box(dst_pixmap,
+						   RegionExtents(&region));
+			sna_damage_add(&dst_priv->gpu_damage, &region);
+			RegionTranslate(&region, -dst_dx, -dst_dy);
+		} else {
 			if (src_priv) {
 				RegionTranslate(&region, src_dx, src_dy);
 				sna_drawable_move_region_to_cpu(&src_pixmap->drawable,
commit 01f41e624b31af88d433a25eaefce557e7ff3d62
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Oct 16 17:38:26 2011 +0100

    sna/gen2: Eliminate redundant diffuse and rectlist emission
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index f1f5e72..34c4432 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -652,8 +652,11 @@ static void gen2_emit_composite_state(struct sna *sna,
 			texcoordfmt |= TEXCOORDFMT_3D << (2*tex);
 		gen2_emit_texture(sna, &op->src, tex++);
 	} else {
-		BATCH(_3DSTATE_DFLT_DIFFUSE_CMD);
-		BATCH(op->src.u.gen2.pixel);
+		if (op->src.u.gen2.pixel != sna->render_state.gen2.diffuse) {
+			BATCH(_3DSTATE_DFLT_DIFFUSE_CMD);
+			BATCH(op->src.u.gen2.pixel);
+			sna->render_state.gen2.diffuse = op->src.u.gen2.pixel;
+		}
 	}
 	if (op->mask.bo) {
 		if (op->mask.is_affine)
@@ -911,8 +914,17 @@ inline static int gen2_get_rectangles(struct sna *sna,
 
 	rem -= need;
 	if (state->vertex_offset == 0) {
-		state->vertex_offset = sna->kgem.nbatch;
-		BATCH(PRIM3D_INLINE | PRIM3D_RECTLIST);
+		if ((sna->kgem.batch[sna->kgem.nbatch-1] & ~0xffff) ==
+		    (PRIM3D_INLINE | PRIM3D_RECTLIST)) {
+			uint32_t *b = &sna->kgem.batch[sna->kgem.nbatch-1];
+			sna->render.vertex_index = 1 + (*b & 0xffff);
+			*b = PRIM3D_INLINE | PRIM3D_RECTLIST;
+			state->vertex_offset = sna->kgem.nbatch - 1;
+			assert(!op->need_magic_ca_pass);
+		} else {
+			state->vertex_offset = sna->kgem.nbatch;
+			BATCH(PRIM3D_INLINE | PRIM3D_RECTLIST);
+		}
 	}
 
 	if (want > 1 && want * size > rem)
@@ -1819,8 +1831,11 @@ static void gen2_emit_fill_composite_state(struct sna *sna,
 
 	gen2_emit_fill_pipeline(sna, op);
 
-	BATCH(_3DSTATE_DFLT_DIFFUSE_CMD);
-	BATCH(pixel);
+	if (pixel != sna->render_state.gen2.diffuse) {
+		BATCH(_3DSTATE_DFLT_DIFFUSE_CMD);
+		BATCH(pixel);
+		sna->render_state.gen2.diffuse = pixel;
+	}
 }
 
 static Bool
@@ -1895,7 +1910,7 @@ gen2_render_fill_boxes(struct sna *sna,
 						      dst, dst_bo,
 						      box, n);
 
-	if (!PREFER_3D_FILL_BOXES &&
+	if (!PREFER_3D_FILL_BOXES && sna->kgem.mode != KGEM_RENDER &&
 	    gen2_render_fill_boxes_try_blt(sna, op, format, color,
 					   dst, dst_bo,
 					   box, n))
@@ -1979,8 +1994,11 @@ static void gen2_emit_fill_state(struct sna *sna,
 	gen2_enable_logic_op(sna, op->op);
 	gen2_emit_fill_pipeline(sna, op);
 
-	BATCH(_3DSTATE_DFLT_DIFFUSE_CMD);
-	BATCH(op->src.u.gen2.pixel);
+	if (op->src.u.gen2.pixel != sna->render_state.gen2.diffuse) {
+		BATCH(_3DSTATE_DFLT_DIFFUSE_CMD);
+		BATCH(op->src.u.gen2.pixel);
+		sna->render_state.gen2.diffuse = op->src.u.gen2.pixel;
+	}
 }
 
 static void
@@ -2441,6 +2459,8 @@ gen2_render_reset(struct sna *sna)
 	sna->render_state.gen2.ls1 = 0;
 	sna->render_state.gen2.ls2 = 0;
 	sna->render_state.gen2.vft = 0;
+
+	sna->render_state.gen2.diffuse = 0x0c0ffee0;
 }
 
 static void
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 979ff85..f0a99a7 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -267,6 +267,7 @@ struct gen2_render_state {
 	Bool logic_op_enabled;
 	uint32_t ls1, ls2, vft;
 	uint16_t vertex_offset;
+	uint32_t diffuse;
 };
 
 struct gen3_render_state {
commit d9e3dbffcb7e6cff35e10100d81544936813095a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Oct 16 12:48:23 2011 +0100

    sna/gen2: Fix fill-one-box
    
    Lets only have one special gen2 value for the source channel pixel
    colour and so remove the confusion and misrendering.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index d862235..f1f5e72 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -50,16 +50,17 @@
 #define NO_COPY 0
 #define NO_COPY_BOXES 0
 #define NO_FILL 0
+#define NO_FILL_ONE 0
 #define NO_FILL_BOXES 0
 
-#define PREFER_COPY 0
-#define PREFER_COPY_BOXES 0
-#define PREFER_FILL 0
-#define PREFER_FILL_BOXES 0
+#define PREFER_3D_COPY 0
+#define PREFER_3D_COPY_BOXES 0
+#define PREFER_3D_FILL 0
+#define PREFER_3D_FILL_BOXES 0
 
-#define OUT_BATCH(v) batch_emit(sna, v)
-#define OUT_BATCH_F(v) batch_emit_float(sna, v)
-#define OUT_VERTEX(v) batch_emit_float(sna, v)
+#define BATCH(v) batch_emit(sna, v)
+#define BATCH_F(v) batch_emit_float(sna, v)
+#define VERTEX(v) batch_emit_float(sna, v)
 
 /* TODO: Remaining items for the sufficiently motivated reader
  *
@@ -298,24 +299,24 @@ gen2_emit_texture(struct sna *sna,
 		break;
 	}
 
-	OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_2 | LOAD_TEXTURE_MAP(unit) | 4);
-	OUT_BATCH(kgem_add_reloc(&sna->kgem, sna->kgem.nbatch,
-				 channel->bo,
-				 I915_GEM_DOMAIN_SAMPLER << 16,
-				 0));
-	OUT_BATCH(((channel->height - 1) << TM0S1_HEIGHT_SHIFT) |
-		  ((channel->width - 1) << TM0S1_WIDTH_SHIFT) |
-		  gen2_get_card_format(sna, channel->pict_format) |
-		  gen2_sampler_tiling_bits(channel->bo->tiling));
-	OUT_BATCH((channel->bo->pitch / 4 - 1) << TM0S2_PITCH_SHIFT | TM0S2_MAP_2D);
-	OUT_BATCH(filter);
-	OUT_BATCH(0);	/* default color */
-
-	OUT_BATCH(_3DSTATE_MAP_COORD_SET_CMD | TEXCOORD_SET(unit) |
-		  ENABLE_TEXCOORD_PARAMS | TEXCOORDS_ARE_NORMAL |
-		  texcoordtype |
-		  ENABLE_ADDR_V_CNTL | TEXCOORD_ADDR_V_MODE(wrap_mode) |
-		  ENABLE_ADDR_U_CNTL | TEXCOORD_ADDR_U_MODE(wrap_mode));
+	BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_2 | LOAD_TEXTURE_MAP(unit) | 4);
+	BATCH(kgem_add_reloc(&sna->kgem, sna->kgem.nbatch,
+			     channel->bo,
+			     I915_GEM_DOMAIN_SAMPLER << 16,
+			     0));
+	BATCH(((channel->height - 1) << TM0S1_HEIGHT_SHIFT) |
+	      ((channel->width - 1) << TM0S1_WIDTH_SHIFT) |
+	      gen2_get_card_format(sna, channel->pict_format) |
+	      gen2_sampler_tiling_bits(channel->bo->tiling));
+	BATCH((channel->bo->pitch / 4 - 1) << TM0S2_PITCH_SHIFT | TM0S2_MAP_2D);
+	BATCH(filter);
+	BATCH(0);	/* default color */
+
+	BATCH(_3DSTATE_MAP_COORD_SET_CMD | TEXCOORD_SET(unit) |
+	      ENABLE_TEXCOORD_PARAMS | TEXCOORDS_ARE_NORMAL |
+	      texcoordtype |
+	      ENABLE_ADDR_V_CNTL | TEXCOORD_ADDR_V_MODE(wrap_mode) |
+	      ENABLE_ADDR_U_CNTL | TEXCOORD_ADDR_U_MODE(wrap_mode));
 }
 
 static void
@@ -434,81 +435,62 @@ static uint32_t gen2_get_blend_cntl(int op,
 
 static void gen2_emit_invariant(struct sna *sna)
 {
-	OUT_BATCH(_3DSTATE_MAP_CUBE | MAP_UNIT(0));
-	OUT_BATCH(_3DSTATE_MAP_CUBE | MAP_UNIT(1));
-	OUT_BATCH(_3DSTATE_MAP_CUBE | MAP_UNIT(2));
-	OUT_BATCH(_3DSTATE_MAP_CUBE | MAP_UNIT(3));
-
-	OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD | MAP_UNIT(0) |
-		  DISABLE_TEX_STREAM_BUMP |
-		  ENABLE_TEX_STREAM_COORD_SET | TEX_STREAM_COORD_SET(0) |
-		  ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(0));
-	OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD | MAP_UNIT(1) |
-		  DISABLE_TEX_STREAM_BUMP |
-		  ENABLE_TEX_STREAM_COORD_SET | TEX_STREAM_COORD_SET(1) |
-		  ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(1));
-	OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD | MAP_UNIT(2) |
-		  DISABLE_TEX_STREAM_BUMP |
-		  ENABLE_TEX_STREAM_COORD_SET | TEX_STREAM_COORD_SET(2) |
-		  ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(2));
-	OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD | MAP_UNIT(3) |
-		  DISABLE_TEX_STREAM_BUMP |
-		  ENABLE_TEX_STREAM_COORD_SET | TEX_STREAM_COORD_SET(3) |
-		  ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(3));
-
-	OUT_BATCH(_3DSTATE_MAP_COORD_TRANSFORM);
-	OUT_BATCH(DISABLE_TEX_TRANSFORM | TEXTURE_SET(0));
-	OUT_BATCH(_3DSTATE_MAP_COORD_TRANSFORM);
-	OUT_BATCH(DISABLE_TEX_TRANSFORM | TEXTURE_SET(1));
-	OUT_BATCH(_3DSTATE_MAP_COORD_TRANSFORM);
-	OUT_BATCH(DISABLE_TEX_TRANSFORM | TEXTURE_SET(2));
-	OUT_BATCH(_3DSTATE_MAP_COORD_TRANSFORM);
-	OUT_BATCH(DISABLE_TEX_TRANSFORM | TEXTURE_SET(3));
-
-	OUT_BATCH(_3DSTATE_MAP_COORD_SETBIND_CMD);
-	OUT_BATCH(TEXBIND_SET3(TEXCOORDSRC_VTXSET_3) |
-		  TEXBIND_SET2(TEXCOORDSRC_VTXSET_2) |
-		  TEXBIND_SET1(TEXCOORDSRC_VTXSET_1) |
-		  TEXBIND_SET0(TEXCOORDSRC_VTXSET_0));
-
-	OUT_BATCH(_3DSTATE_SCISSOR_ENABLE_CMD | DISABLE_SCISSOR_RECT);
-
-	OUT_BATCH(_3DSTATE_VERTEX_TRANSFORM);
-	OUT_BATCH(DISABLE_VIEWPORT_TRANSFORM | DISABLE_PERSPECTIVE_DIVIDE);
-
-	OUT_BATCH(_3DSTATE_W_STATE_CMD);
-	OUT_BATCH(MAGIC_W_STATE_DWORD1);
-	OUT_BATCH_F(1.0);
-
-	OUT_BATCH(_3DSTATE_INDPT_ALPHA_BLEND_CMD |
-		  DISABLE_INDPT_ALPHA_BLEND |
-		  ENABLE_ALPHA_BLENDFUNC | ABLENDFUNC_ADD);
-
-	OUT_BATCH(_3DSTATE_CONST_BLEND_COLOR_CMD);
-	OUT_BATCH(0);
-
-	OUT_BATCH(_3DSTATE_MODES_1_CMD |
-		  ENABLE_COLR_BLND_FUNC | BLENDFUNC_ADD |
-		  ENABLE_SRC_BLND_FACTOR | SRC_BLND_FACT(BLENDFACTOR_ONE) |
-		  ENABLE_DST_BLND_FACTOR | DST_BLND_FACT(BLENDFACTOR_ZERO));
-
-	OUT_BATCH(_3DSTATE_ENABLES_1_CMD |
-		  DISABLE_LOGIC_OP |
-		  DISABLE_STENCIL_TEST |
-		  DISABLE_DEPTH_BIAS |
-		  DISABLE_SPEC_ADD |
-		  DISABLE_FOG |
-		  DISABLE_ALPHA_TEST |
-		  DISABLE_DEPTH_TEST |
-		  ENABLE_COLOR_BLEND);
-
-	OUT_BATCH(_3DSTATE_ENABLES_2_CMD |
-		  DISABLE_STENCIL_WRITE |
-		  DISABLE_DITHER |
-		  DISABLE_DEPTH_WRITE |
-		  ENABLE_COLOR_MASK |
-		  ENABLE_COLOR_WRITE |
-		  ENABLE_TEX_CACHE);
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		BATCH(_3DSTATE_MAP_CUBE | MAP_UNIT(i));
+		BATCH(_3DSTATE_MAP_TEX_STREAM_CMD | MAP_UNIT(i) |
+		      DISABLE_TEX_STREAM_BUMP |
+		      ENABLE_TEX_STREAM_COORD_SET | TEX_STREAM_COORD_SET(i) |
+		      ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(i));
+		BATCH(_3DSTATE_MAP_COORD_TRANSFORM);
+		BATCH(DISABLE_TEX_TRANSFORM | TEXTURE_SET(i));
+	}
+
+	BATCH(_3DSTATE_MAP_COORD_SETBIND_CMD);
+	BATCH(TEXBIND_SET3(TEXCOORDSRC_VTXSET_3) |
+	      TEXBIND_SET2(TEXCOORDSRC_VTXSET_2) |
+	      TEXBIND_SET1(TEXCOORDSRC_VTXSET_1) |
+	      TEXBIND_SET0(TEXCOORDSRC_VTXSET_0));
+
+	BATCH(_3DSTATE_SCISSOR_ENABLE_CMD | DISABLE_SCISSOR_RECT);
+
+	BATCH(_3DSTATE_VERTEX_TRANSFORM);
+	BATCH(DISABLE_VIEWPORT_TRANSFORM | DISABLE_PERSPECTIVE_DIVIDE);
+
+	BATCH(_3DSTATE_W_STATE_CMD);
+	BATCH(MAGIC_W_STATE_DWORD1);
+	BATCH_F(1.0);
+
+	BATCH(_3DSTATE_INDPT_ALPHA_BLEND_CMD |
+	      DISABLE_INDPT_ALPHA_BLEND |
+	      ENABLE_ALPHA_BLENDFUNC | ABLENDFUNC_ADD);
+
+	BATCH(_3DSTATE_CONST_BLEND_COLOR_CMD);
+	BATCH(0);
+
+	BATCH(_3DSTATE_MODES_1_CMD |
+	      ENABLE_COLR_BLND_FUNC | BLENDFUNC_ADD |
+	      ENABLE_SRC_BLND_FACTOR | SRC_BLND_FACT(BLENDFACTOR_ONE) |
+	      ENABLE_DST_BLND_FACTOR | DST_BLND_FACT(BLENDFACTOR_ZERO));
+
+	BATCH(_3DSTATE_ENABLES_1_CMD |
+	      DISABLE_LOGIC_OP |
+	      DISABLE_STENCIL_TEST |
+	      DISABLE_DEPTH_BIAS |
+	      DISABLE_SPEC_ADD |
+	      DISABLE_FOG |
+	      DISABLE_ALPHA_TEST |
+	      DISABLE_DEPTH_TEST |
+	      ENABLE_COLOR_BLEND);
+
+	BATCH(_3DSTATE_ENABLES_2_CMD |
+	      DISABLE_STENCIL_WRITE |
+	      DISABLE_DITHER |
+	      DISABLE_DEPTH_WRITE |
+	      ENABLE_COLOR_MASK |
+	      ENABLE_COLOR_WRITE |
+	      ENABLE_TEX_CACHE);
 
 	sna->render_state.gen2.need_invariant = FALSE;
 }
@@ -554,25 +536,25 @@ static void gen2_emit_target(struct sna *sna, const struct sna_composite_op *op)
 		return;
 	}
 
-	OUT_BATCH(_3DSTATE_BUF_INFO_CMD);
-	OUT_BATCH(BUF_3D_ID_COLOR_BACK |
-		  gen2_buf_tiling(op->dst.bo->tiling) |
-		  BUF_3D_PITCH(op->dst.bo->pitch));
-	OUT_BATCH(kgem_add_reloc(&sna->kgem, sna->kgem.nbatch,
-				 op->dst.bo,
-				 I915_GEM_DOMAIN_RENDER << 16 |
-				 I915_GEM_DOMAIN_RENDER,
-				 0));
-
-	OUT_BATCH(_3DSTATE_DST_BUF_VARS_CMD);
-	OUT_BATCH(gen2_get_dst_format(op->dst.format));
-
-	OUT_BATCH(_3DSTATE_DRAW_RECT_CMD);
-	OUT_BATCH(0);
-	OUT_BATCH(0);	/* ymin, xmin */
-	OUT_BATCH(DRAW_YMAX(op->dst.height - 1) |
-		  DRAW_XMAX(op->dst.width - 1));
-	OUT_BATCH(0);	/* yorig, xorig */
+	BATCH(_3DSTATE_BUF_INFO_CMD);
+	BATCH(BUF_3D_ID_COLOR_BACK |
+	      gen2_buf_tiling(op->dst.bo->tiling) |
+	      BUF_3D_PITCH(op->dst.bo->pitch));
+	BATCH(kgem_add_reloc(&sna->kgem, sna->kgem.nbatch,
+			     op->dst.bo,
+			     I915_GEM_DOMAIN_RENDER << 16 |
+			     I915_GEM_DOMAIN_RENDER,
+			     0));
+
+	BATCH(_3DSTATE_DST_BUF_VARS_CMD);
+	BATCH(gen2_get_dst_format(op->dst.format));
+
+	BATCH(_3DSTATE_DRAW_RECT_CMD);
+	BATCH(0);
+	BATCH(0);	/* ymin, xmin */
+	BATCH(DRAW_YMAX(op->dst.height - 1) |
+	      DRAW_XMAX(op->dst.width - 1));
+	BATCH(0);	/* yorig, xorig */
 
 	sna->render_state.gen2.target = op->dst.bo->unique_id;
 }
@@ -582,15 +564,15 @@ static void gen2_disable_logic_op(struct sna *sna)
 	if (!sna->render_state.gen2.logic_op_enabled)
 		return;
 
-	OUT_BATCH(_3DSTATE_ENABLES_1_CMD |
-		  DISABLE_LOGIC_OP | ENABLE_COLOR_BLEND);
+	BATCH(_3DSTATE_ENABLES_1_CMD |
+	      DISABLE_LOGIC_OP | ENABLE_COLOR_BLEND);
 
 	sna->render_state.gen2.logic_op_enabled = 0;
 }
 
 static void gen2_enable_logic_op(struct sna *sna, int op)
 {
-	uint8_t logic_op[] = {
+	static const uint8_t logic_op[] = {
 		LOGICOP_CLEAR,		/* GXclear */
 		LOGICOP_AND,		/* GXand */
 		LOGICOP_AND_RVRSE, 	/* GXandReverse */
@@ -609,14 +591,15 @@ static void gen2_enable_logic_op(struct sna *sna, int op)
 		LOGICOP_SET		/* GXset */
 	};
 
-	if (!sna->render_state.gen2.logic_op_enabled) {
-		OUT_BATCH(_3DSTATE_ENABLES_1_CMD |
-			  ENABLE_LOGIC_OP | DISABLE_COLOR_BLEND);
-		sna->render_state.gen2.logic_op_enabled = 1;
-	}
+	if (sna->render_state.gen2.logic_op_enabled != op+1) {
+		if (!sna->render_state.gen2.logic_op_enabled)
+			BATCH(_3DSTATE_ENABLES_1_CMD |
+			      ENABLE_LOGIC_OP | DISABLE_COLOR_BLEND);
 
-	OUT_BATCH(_3DSTATE_MODES_4_CMD |
-		  ENABLE_LOGIC_OP_FUNC | LOGIC_OP_FUNC(logic_op[op]));
+		BATCH(_3DSTATE_MODES_4_CMD |
+		      ENABLE_LOGIC_OP_FUNC | LOGIC_OP_FUNC(logic_op[op]));
+		sna->render_state.gen2.logic_op_enabled = op+1;
+	}
 }
 
 static void gen2_emit_composite_state(struct sna *sna,
@@ -630,15 +613,15 @@ static void gen2_emit_composite_state(struct sna *sna,
 	gen2_emit_target(sna, op);
 
 	unwind = sna->kgem.nbatch;
-	OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 |
-		  I1_LOAD_S(2) | I1_LOAD_S(3) | I1_LOAD_S(8) | 2);
-	OUT_BATCH((!op->src.is_solid + (op->mask.bo != NULL)) << 12);
-	OUT_BATCH(S3_CULLMODE_NONE | S3_VERTEXHAS_XY);
-	OUT_BATCH(S8_ENABLE_COLOR_BLEND | S8_BLENDFUNC_ADD |
-		  gen2_get_blend_cntl(op->op,
-				      op->has_component_alpha,
-				      op->dst.format) |
-		  S8_ENABLE_COLOR_BUFFER_WRITE);
+	BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 |
+	      I1_LOAD_S(2) | I1_LOAD_S(3) | I1_LOAD_S(8) | 2);
+	BATCH((!op->src.is_solid + (op->mask.bo != NULL)) << 12);
+	BATCH(S3_CULLMODE_NONE | S3_VERTEXHAS_XY);
+	BATCH(S8_ENABLE_COLOR_BLEND | S8_BLENDFUNC_ADD |
+	      gen2_get_blend_cntl(op->op,
+				  op->has_component_alpha,
+				  op->dst.format) |
+	      S8_ENABLE_COLOR_BUFFER_WRITE);
 	if (memcmp (sna->kgem.batch + sna->render_state.gen2.ls1,
 		    sna->kgem.batch + unwind,
 		    4 * sizeof(uint32_t)) == 0)
@@ -650,10 +633,10 @@ static void gen2_emit_composite_state(struct sna *sna,
 
 	gen2_get_blend_factors(op, op->op, &cblend, &ablend);
 	unwind = sna->kgem.nbatch;
-	OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_2 |
-		  LOAD_TEXTURE_BLEND_STAGE(0) | 1);
-	OUT_BATCH(cblend);
-	OUT_BATCH(ablend);
+	BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_2 |
+	      LOAD_TEXTURE_BLEND_STAGE(0) | 1);
+	BATCH(cblend);
+	BATCH(ablend);
 	if (memcmp (sna->kgem.batch + sna->render_state.gen2.ls2 + 1,
 		    sna->kgem.batch + unwind + 1,
 		    2 * sizeof(uint32_t)) == 0)
@@ -669,8 +652,8 @@ static void gen2_emit_composite_state(struct sna *sna,
 			texcoordfmt |= TEXCOORDFMT_3D << (2*tex);
 		gen2_emit_texture(sna, &op->src, tex++);
 	} else {
-		OUT_BATCH(_3DSTATE_DFLT_DIFFUSE_CMD);
-		OUT_BATCH(op->src.u.gen2.pixel);
+		BATCH(_3DSTATE_DFLT_DIFFUSE_CMD);
+		BATCH(op->src.u.gen2.pixel);
 	}
 	if (op->mask.bo) {
 		if (op->mask.is_affine)
@@ -682,7 +665,7 @@ static void gen2_emit_composite_state(struct sna *sna,
 
 	v = _3DSTATE_VERTEX_FORMAT_2_CMD | texcoordfmt;
 	if (sna->render_state.gen2.vft != v) {
-		OUT_BATCH(v);
+		BATCH(v);
 		sna->render_state.gen2.vft = v;
 	}
 }
@@ -690,8 +673,8 @@ static void gen2_emit_composite_state(struct sna *sna,
 static inline void
 gen2_emit_composite_dstcoord(struct sna *sna, int dstX, int dstY)
 {
-	OUT_VERTEX(dstX);
-	OUT_VERTEX(dstY);
+	VERTEX(dstX);
+	VERTEX(dstY);
 }
 
 static void
@@ -708,15 +691,15 @@ gen2_emit_composite_texcoord(struct sna *sna,
 		sna_get_transformed_coordinates(x, y,
 						channel->transform,
 						&s, &t);
-		OUT_VERTEX(s * channel->scale[0]);
-		OUT_VERTEX(t * channel->scale[1]);
+		VERTEX(s * channel->scale[0]);
+		VERTEX(t * channel->scale[1]);
 	} else {
 		sna_get_transformed_coordinates_3d(x, y,
 						   channel->transform,
 						   &s, &t, &w);
-		OUT_VERTEX(s * channel->scale[0]);
-		OUT_VERTEX(t * channel->scale[1]);
-		OUT_VERTEX(w);
+		VERTEX(s * channel->scale[0]);
+		VERTEX(t * channel->scale[1]);
+		VERTEX(w);
 	}
 }
 
@@ -817,22 +800,22 @@ gen2_emit_composite_primitive_affine(struct sna *sna,
 					 &sx, &sy);
 
 	gen2_emit_composite_dstcoord(sna, dst_x + r->width, dst_y + r->height);
-	OUT_VERTEX(sx * op->src.scale[0]);
-	OUT_VERTEX(sy * op->src.scale[1]);
+	VERTEX(sx * op->src.scale[0]);
+	VERTEX(sy * op->src.scale[1]);
 
 	_sna_get_transformed_coordinates(src_x, src_y + r->height,
 					 transform,
 					 &sx, &sy);
 	gen2_emit_composite_dstcoord(sna, dst_x, dst_y + r->height);
-	OUT_VERTEX(sx * op->src.scale[0]);
-	OUT_VERTEX(sy * op->src.scale[1]);
+	VERTEX(sx * op->src.scale[0]);
+	VERTEX(sy * op->src.scale[1]);
 
 	_sna_get_transformed_coordinates(src_x, src_y,
 					 transform,
 					 &sx, &sy);
 	gen2_emit_composite_dstcoord(sna, dst_x, dst_y);
-	OUT_VERTEX(sx * op->src.scale[0]);
-	OUT_VERTEX(sy * op->src.scale[1]);
+	VERTEX(sx * op->src.scale[0]);
+	VERTEX(sy * op->src.scale[1]);
 }
 
 fastcall static void
@@ -868,17 +851,17 @@ static void gen2_magic_ca_pass(struct sna *sna,
 	if (!op->need_magic_ca_pass)
 		return;
 
-	OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 | I1_LOAD_S(8) | 0);
-	OUT_BATCH(S8_ENABLE_COLOR_BLEND | S8_BLENDFUNC_ADD |
-		  gen2_get_blend_cntl(PictOpAdd, TRUE, op->dst.format) |
-		  S8_ENABLE_COLOR_BUFFER_WRITE);
+	BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 | I1_LOAD_S(8) | 0);
+	BATCH(S8_ENABLE_COLOR_BLEND | S8_BLENDFUNC_ADD |
+	      gen2_get_blend_cntl(PictOpAdd, TRUE, op->dst.format) |
+	      S8_ENABLE_COLOR_BUFFER_WRITE);
 	sna->render_state.gen2.ls1 = 0;
 
 	gen2_get_blend_factors(op, PictOpAdd, &cblend, &ablend);
-	OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_2 |
-		  LOAD_TEXTURE_BLEND_STAGE(0) | 1);
-	OUT_BATCH(cblend);
-	OUT_BATCH(ablend);
+	BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_2 |
+	      LOAD_TEXTURE_BLEND_STAGE(0) | 1);
+	BATCH(cblend);
+	BATCH(ablend);
 	sna->render_state.gen2.ls2 = 0;
 
 	memcpy(sna->kgem.batch + sna->kgem.nbatch,
@@ -929,7 +912,7 @@ inline static int gen2_get_rectangles(struct sna *sna,
 	rem -= need;
 	if (state->vertex_offset == 0) {
 		state->vertex_offset = sna->kgem.nbatch;
-		OUT_BATCH(PRIM3D_INLINE | PRIM3D_RECTLIST);
+		BATCH(PRIM3D_INLINE | PRIM3D_RECTLIST);
 	}
 
 	if (want > 1 && want * size > rem)
@@ -1385,9 +1368,9 @@ gen2_render_composite(struct sna *sna,
 		if (tmp->src.bo == tmp->dst.bo || tmp->mask.bo == tmp->dst.bo) {
 			kgem_emit_flush(&sna->kgem);
 		} else {
-			OUT_BATCH(_3DSTATE_MODES_5_CMD |
-				  PIPELINE_FLUSH_RENDER_CACHE |
-				  PIPELINE_FLUSH_TEXTURE_CACHE);
+			BATCH(_3DSTATE_MODES_5_CMD |
+			      PIPELINE_FLUSH_RENDER_CACHE |
+			      PIPELINE_FLUSH_TEXTURE_CACHE);
 			kgem_clear_dirty(&sna->kgem);
 		}
 	}
@@ -1508,7 +1491,7 @@ gen2_emit_composite_spans_vertex(struct sna *sna,
 				 float opacity)
 {
 	gen2_emit_composite_dstcoord(sna, x + op->base.dst.x, y + op->base.dst.y);
-	OUT_BATCH((uint8_t)(opacity * 255) << 24);
+	BATCH((uint8_t)(opacity * 255) << 24);
 	gen2_emit_composite_texcoord(sna, &op->base.src, x, y);
 }
 
@@ -1560,10 +1543,10 @@ gen2_emit_spans_pipeline(struct sna *sna,
 	}
 
 	unwind = sna->kgem.nbatch;
-	OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_2 |
-		  LOAD_TEXTURE_BLEND_STAGE(0) | 1);
-	OUT_BATCH(cblend);
-	OUT_BATCH(ablend);
+	BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_2 |
+	      LOAD_TEXTURE_BLEND_STAGE(0) | 1);
+	BATCH(cblend);
+	BATCH(ablend);
 	if (memcmp (sna->kgem.batch + sna->render_state.gen2.ls2 + 1,
 		    sna->kgem.batch + unwind + 1,
 		    2 * sizeof(uint32_t)) == 0)
@@ -1581,13 +1564,13 @@ static void gen2_emit_composite_spans_state(struct sna *sna,
 	gen2_emit_target(sna, &op->base);
 
 	unwind = sna->kgem.nbatch;
-	OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 |
-		  I1_LOAD_S(2) | I1_LOAD_S(3) | I1_LOAD_S(8) | 2);
-	OUT_BATCH(!op->base.src.is_solid << 12);
-	OUT_BATCH(S3_CULLMODE_NONE | S3_VERTEXHAS_XY | S3_DIFFUSE_PRESENT);
-	OUT_BATCH(S8_ENABLE_COLOR_BLEND | S8_BLENDFUNC_ADD |
-		  gen2_get_blend_cntl(op->base.op, FALSE, op->base.dst.format) |
-		  S8_ENABLE_COLOR_BUFFER_WRITE);
+	BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 |
+	      I1_LOAD_S(2) | I1_LOAD_S(3) | I1_LOAD_S(8) | 2);
+	BATCH(!op->base.src.is_solid << 12);
+	BATCH(S3_CULLMODE_NONE | S3_VERTEXHAS_XY | S3_DIFFUSE_PRESENT);
+	BATCH(S8_ENABLE_COLOR_BLEND | S8_BLENDFUNC_ADD |
+	      gen2_get_blend_cntl(op->base.op, FALSE, op->base.dst.format) |
+	      S8_ENABLE_COLOR_BUFFER_WRITE);
 	if (memcmp (sna->kgem.batch + sna->render_state.gen2.ls1,
 		    sna->kgem.batch + unwind,
 		    4 * sizeof(uint32_t)) == 0)
@@ -1599,13 +1582,13 @@ static void gen2_emit_composite_spans_state(struct sna *sna,
 	gen2_emit_spans_pipeline(sna, op);
 
 	if (op->base.src.is_solid) {
-		OUT_BATCH(_3DSTATE_DFLT_SPECULAR_CMD);
-		OUT_BATCH(op->base.src.u.gen2.pixel);
+		BATCH(_3DSTATE_DFLT_SPECULAR_CMD);
+		BATCH(op->base.src.u.gen2.pixel);
 	} else {
 		uint32_t v =_3DSTATE_VERTEX_FORMAT_2_CMD |
 			(op->base.src.is_affine ? TEXCOORDFMT_2D : TEXCOORDFMT_3D);
 		if (sna->render_state.gen2.vft != v) {
-			OUT_BATCH(v);
+			BATCH(v);
 			sna->render_state.gen2.vft = v;
 		}
 		gen2_emit_texture(sna, &op->base.src, 0);
@@ -1756,9 +1739,9 @@ gen2_render_composite_spans(struct sna *sna,
 			if (tmp->base.src.bo == tmp->base.dst.bo) {
 				kgem_emit_flush(&sna->kgem);
 			} else {
-				OUT_BATCH(_3DSTATE_MODES_5_CMD |
-					  PIPELINE_FLUSH_RENDER_CACHE |
-					  PIPELINE_FLUSH_TEXTURE_CACHE);
+				BATCH(_3DSTATE_MODES_5_CMD |
+				      PIPELINE_FLUSH_RENDER_CACHE |
+				      PIPELINE_FLUSH_TEXTURE_CACHE);
 				kgem_clear_dirty(&sna->kgem);
 			}
 		}
@@ -1788,19 +1771,19 @@ gen2_emit_fill_pipeline(struct sna *sna, const struct sna_composite_op *op)
 	uint32_t blend, unwind;
 
 	unwind = sna->kgem.nbatch;
-	OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_2 |
-		  LOAD_TEXTURE_BLEND_STAGE(0) | 1);
+	BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_2 |
+	      LOAD_TEXTURE_BLEND_STAGE(0) | 1);
 
 	blend = TB0C_LAST_STAGE | TB0C_RESULT_SCALE_1X | TB0C_OP_ARG1 |
 		TB0C_ARG1_SEL_DIFFUSE |
 		TB0C_OUTPUT_WRITE_CURRENT;
 	if (op->dst.format == PICT_a8)
 		blend |= TB0C_ARG1_REPLICATE_ALPHA;
-	OUT_BATCH(blend);
+	BATCH(blend);
 
-	OUT_BATCH(TB0A_RESULT_SCALE_1X | TB0A_OP_ARG1 |
-		  TB0A_ARG1_SEL_DIFFUSE |
-		  TB0A_OUTPUT_WRITE_CURRENT);
+	BATCH(TB0A_RESULT_SCALE_1X | TB0A_OP_ARG1 |
+	      TB0A_ARG1_SEL_DIFFUSE |
+	      TB0A_OUTPUT_WRITE_CURRENT);
 
 	if (memcmp (sna->kgem.batch + sna->render_state.gen2.ls2 + 1,
 		    sna->kgem.batch + unwind + 1,
@@ -1820,13 +1803,13 @@ static void gen2_emit_fill_composite_state(struct sna *sna,
 	gen2_emit_target(sna, op);
 
 	ls1 = sna->kgem.nbatch;
-	OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 |
-		  I1_LOAD_S(2) | I1_LOAD_S(3) | I1_LOAD_S(8) | 2);
-	OUT_BATCH(0);
-	OUT_BATCH(S3_CULLMODE_NONE | S3_VERTEXHAS_XY);
-	OUT_BATCH(S8_ENABLE_COLOR_BLEND | S8_BLENDFUNC_ADD |
-		  gen2_get_blend_cntl(op->op, FALSE, op->dst.format) |
-		  S8_ENABLE_COLOR_BUFFER_WRITE);
+	BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 |
+	      I1_LOAD_S(2) | I1_LOAD_S(3) | I1_LOAD_S(8) | 2);
+	BATCH(0);
+	BATCH(S3_CULLMODE_NONE | S3_VERTEXHAS_XY);
+	BATCH(S8_ENABLE_COLOR_BLEND | S8_BLENDFUNC_ADD |
+	      gen2_get_blend_cntl(op->op, FALSE, op->dst.format) |
+	      S8_ENABLE_COLOR_BUFFER_WRITE);
 	if (memcmp (sna->kgem.batch + sna->render_state.gen2.ls1,
 		    sna->kgem.batch + ls1,
 		    4 * sizeof(uint32_t)) == 0)
@@ -1836,8 +1819,8 @@ static void gen2_emit_fill_composite_state(struct sna *sna,
 
 	gen2_emit_fill_pipeline(sna, op);
 
-	OUT_BATCH(_3DSTATE_DFLT_DIFFUSE_CMD);
-	OUT_BATCH(pixel);
+	BATCH(_3DSTATE_DFLT_DIFFUSE_CMD);
+	BATCH(pixel);
 }
 
 static Bool
@@ -1912,7 +1895,7 @@ gen2_render_fill_boxes(struct sna *sna,
 						      dst, dst_bo,
 						      box, n);
 
-	if (!PREFER_FILL_BOXES &&
+	if (!PREFER_3D_FILL_BOXES &&
 	    gen2_render_fill_boxes_try_blt(sna, op, format, color,
 					   dst, dst_bo,
 					   box, n))
@@ -1957,12 +1940,12 @@ gen2_render_fill_boxes(struct sna *sna,
 		do {
 			DBG(("	(%d, %d), (%d, %d): %x\n",
 			     box->x1, box->y1, box->x2, box->y2, pixel));
-			OUT_VERTEX(box->x2);
-			OUT_VERTEX(box->y2);
-			OUT_VERTEX(box->x1);
-			OUT_VERTEX(box->y2);
-			OUT_VERTEX(box->x1);
-			OUT_VERTEX(box->y1);
+			VERTEX(box->x2);
+			VERTEX(box->y2);
+			VERTEX(box->x1);
+			VERTEX(box->y2);
+			VERTEX(box->x1);
+			VERTEX(box->y1);
 			box++;
 		} while (--n_this_time);
 	} while (n);
@@ -1981,11 +1964,11 @@ static void gen2_emit_fill_state(struct sna *sna,
 	gen2_emit_target(sna, op);
 
 	ls1 = sna->kgem.nbatch;
-	OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 |
-		  I1_LOAD_S(2) | I1_LOAD_S(3) | I1_LOAD_S(8) | 2);
-	OUT_BATCH(0);
-	OUT_BATCH(S3_CULLMODE_NONE | S3_VERTEXHAS_XY);
-	OUT_BATCH(S8_ENABLE_COLOR_BUFFER_WRITE);
+	BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 |
+	      I1_LOAD_S(2) | I1_LOAD_S(3) | I1_LOAD_S(8) | 2);
+	BATCH(0);
+	BATCH(S3_CULLMODE_NONE | S3_VERTEXHAS_XY);
+	BATCH(S8_ENABLE_COLOR_BUFFER_WRITE);
 	if (memcmp (sna->kgem.batch + sna->render_state.gen2.ls1,
 		    sna->kgem.batch + ls1,
 		    4 * sizeof(uint32_t)) == 0)
@@ -1996,8 +1979,8 @@ static void gen2_emit_fill_state(struct sna *sna,
 	gen2_enable_logic_op(sna, op->op);
 	gen2_emit_fill_pipeline(sna, op);
 
-	OUT_BATCH(_3DSTATE_DFLT_DIFFUSE_CMD);
-	OUT_BATCH(op->u.gen2.pixel);
+	BATCH(_3DSTATE_DFLT_DIFFUSE_CMD);
+	BATCH(op->src.u.gen2.pixel);
 }
 
 static void
@@ -2010,12 +1993,12 @@ gen2_render_fill_blt(struct sna *sna,
 		gen2_get_rectangles(sna, &op->base, 1);
 	}
 
-	OUT_VERTEX(x+w);
-	OUT_VERTEX(y+h);
-	OUT_VERTEX(x);
-	OUT_VERTEX(y+h);
-	OUT_VERTEX(x);
-	OUT_VERTEX(y);
+	VERTEX(x+w);
+	VERTEX(y+h);
+	VERTEX(x);
+	VERTEX(y+h);
+	VERTEX(x);
+	VERTEX(y);
 }
 
 static void
@@ -2039,7 +2022,7 @@ gen2_render_fill(struct sna *sna, uint8_t alu,
 #endif
 
 	/* Prefer to use the BLT if already engaged */
-	if (!PREFER_FILL && sna->kgem.mode != KGEM_RENDER &&
+	if (!PREFER_3D_FILL && sna->kgem.mode != KGEM_RENDER &&
 	    sna_blt_fill(sna, alu,
 			 dst_bo, dst->drawable.bitsPerPixel,
 			 color,
@@ -2063,11 +2046,16 @@ gen2_render_fill(struct sna *sna, uint8_t alu,
 	tmp->base.dst.bo = dst_bo;
 	tmp->base.floats_per_vertex = 2;
 
-	tmp->base.u.gen2.pixel =
+	tmp->base.src.u.gen2.pixel =
 		sna_rgba_for_color(color, dst->drawable.depth);
 
-	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		return sna_blt_fill(sna, alu,
+				    dst_bo, dst->drawable.bitsPerPixel,
+				    color,
+				    tmp);
+	}
 
 	tmp->blt  = gen2_render_fill_blt;
 	tmp->done = gen2_render_fill_done;
@@ -2103,55 +2091,52 @@ gen2_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 {
 	struct sna_composite_op tmp;
 
-#if NO_FILL_BOXES
+#if NO_FILL_ONE
 	return gen2_render_fill_one_try_blt(sna, dst, bo, color,
 					    x1, y1, x2, y2, alu);
 #endif
 
 	/* Prefer to use the BLT if already engaged */
-	if (sna->kgem.mode != KGEM_RENDER &&
+	if (!PREFER_3D_FILL && sna->kgem.mode != KGEM_RENDER &&
 	    gen2_render_fill_one_try_blt(sna, dst, bo, color,
 					 x1, y1, x2, y2, alu))
 		return TRUE;
 
 	/* Must use the BLT if we can't RENDER... */
-	if (!(alu == GXcopy || alu == GXclear) ||
-	    dst->drawable.width > 2048 || dst->drawable.height > 2048 ||
+	if (dst->drawable.width > 2048 || dst->drawable.height > 2048 ||
 	    bo->pitch > 8192)
 		return gen2_render_fill_one_try_blt(sna, dst, bo, color,
 						    x1, y1, x2, y2, alu);
 
-	if (alu == GXclear)
-		color = 0;
+	if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
+		kgem_submit(&sna->kgem);
+		if (gen2_render_fill_one_try_blt(sna, dst, bo, color,
+						 x1, y1, x2, y2, alu))
+			return TRUE;
+	}
 
-	memset(&tmp, 0, sizeof(tmp));
-	tmp.op = color == 0 ? PictOpClear : PictOpSrc;
+	tmp.op = alu;
 	tmp.dst.pixmap = dst;
 	tmp.dst.width = dst->drawable.width;
 	tmp.dst.height = dst->drawable.height;
 	tmp.dst.format = sna_format_for_depth(dst->drawable.depth);
 	tmp.dst.bo = bo;
 	tmp.floats_per_vertex = 2;
+	tmp.need_magic_ca_pass = false;
 
 	tmp.src.u.gen2.pixel =
 		sna_rgba_for_color(color, dst->drawable.depth);
 
-	if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
-		kgem_submit(&sna->kgem);
-		if (gen2_render_fill_one_try_blt(sna, dst, bo, color,
-						 x1, y1, x2, y2, alu))
-			return TRUE;
-	}
-
 	gen2_emit_fill_state(sna, &tmp);
 	gen2_get_rectangles(sna, &tmp, 1);
-	DBG(("	(%d, %d), (%d, %d): %x\n", x1, y1, x2, y2, color));
-	OUT_VERTEX(x2);
-	OUT_VERTEX(y2);
-	OUT_VERTEX(x1);
-	OUT_VERTEX(y2);
-	OUT_VERTEX(x1);
-	OUT_VERTEX(y1);
+	DBG(("%s: (%d, %d), (%d, %d): %x\n", __FUNCTION__,
+	     x1, y1, x2, y2, tmp.src.u.gen2.pixel));
+	VERTEX(x2);
+	VERTEX(y2);
+	VERTEX(x1);
+	VERTEX(y2);
+	VERTEX(x1);
+	VERTEX(y1);
 	gen2_vertex_flush(sna);
 
 	return TRUE;
@@ -2181,8 +2166,8 @@ gen2_emit_copy_pipeline(struct sna *sna, const struct sna_composite_op *op)
 	uint32_t blend, unwind;
 
 	unwind = sna->kgem.nbatch;
-	OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_2 |
-		  LOAD_TEXTURE_BLEND_STAGE(0) | 1);
+	BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_2 |
+	      LOAD_TEXTURE_BLEND_STAGE(0) | 1);
 
 	blend = TB0C_LAST_STAGE | TB0C_RESULT_SCALE_1X | TB0C_OP_ARG1 |
 		TB0C_OUTPUT_WRITE_CURRENT;
@@ -2192,7 +2177,7 @@ gen2_emit_copy_pipeline(struct sna *sna, const struct sna_composite_op *op)
 		blend |= TB0C_ARG1_SEL_TEXEL0;
 	else
 		blend |= TB0C_ARG1_SEL_ONE | TB0C_ARG1_INVERT;	/* 0.0 */
-	OUT_BATCH(blend);
+	BATCH(blend);
 
 	blend = TB0A_RESULT_SCALE_1X | TB0A_OP_ARG1 |
 		TB0A_OUTPUT_WRITE_CURRENT;
@@ -2200,7 +2185,7 @@ gen2_emit_copy_pipeline(struct sna *sna, const struct sna_composite_op *op)
 		blend |= TB0A_ARG1_SEL_ONE;
 	else
 		blend |= TB0A_ARG1_SEL_TEXEL0;
-	OUT_BATCH(blend);
+	BATCH(blend);
 
 	if (memcmp (sna->kgem.batch + sna->render_state.gen2.ls2 + 1,
 		    sna->kgem.batch + unwind + 1,
@@ -2218,11 +2203,11 @@ static void gen2_emit_copy_state(struct sna *sna, const struct sna_composite_op
 	gen2_emit_target(sna, op);
 
 	ls1 = sna->kgem.nbatch;
-	OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 |
-		  I1_LOAD_S(2) | I1_LOAD_S(3) | I1_LOAD_S(8) | 2);
-	OUT_BATCH(1<<12);
-	OUT_BATCH(S3_CULLMODE_NONE | S3_VERTEXHAS_XY);
-	OUT_BATCH(S8_ENABLE_COLOR_BUFFER_WRITE);
+	BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 |
+	      I1_LOAD_S(2) | I1_LOAD_S(3) | I1_LOAD_S(8) | 2);
+	BATCH(1<<12);
+	BATCH(S3_CULLMODE_NONE | S3_VERTEXHAS_XY);
+	BATCH(S8_ENABLE_COLOR_BUFFER_WRITE);
 	if (memcmp (sna->kgem.batch + sna->render_state.gen2.ls1,
 		    sna->kgem.batch + ls1,
 		    4 * sizeof(uint32_t)) == 0)
@@ -2235,7 +2220,7 @@ static void gen2_emit_copy_state(struct sna *sna, const struct sna_composite_op
 
 	v = _3DSTATE_VERTEX_FORMAT_2_CMD | TEXCOORDFMT_2D;
 	if (sna->render_state.gen2.vft != v) {
-		OUT_BATCH(v);
+		BATCH(v);
 		sna->render_state.gen2.vft = v;
 	}
 
@@ -2264,7 +2249,7 @@ gen2_render_copy_boxes(struct sna *sna, uint8_t alu,
 	DBG(("%s (%d, %d)->(%d, %d) x %d\n",
 	     __FUNCTION__, src_dx, src_dy, dst_dx, dst_dy, n));
 
-	if (!PREFER_COPY_BOXES &&
+	if (!PREFER_3D_COPY_BOXES &&
 	    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 	    sna_blt_copy_boxes(sna, alu,
 			       src_bo, src_dx, src_dy,
@@ -2324,20 +2309,20 @@ gen2_render_copy_boxes(struct sna *sna, uint8_t alu,
 			     box->x1 + src_dx, box->y1 + src_dy,
 			     box->x1 + dst_dx, box->y1 + dst_dy,
 			     box->x2 - box->x1, box->y2 - box->y1));
-			OUT_VERTEX(box->x2 + dst_dx);
-			OUT_VERTEX(box->y2 + dst_dy);
-			OUT_VERTEX((box->x2 + src_dx) * tmp.src.scale[0]);
-			OUT_VERTEX((box->y2 + src_dy) * tmp.src.scale[1]);
+			VERTEX(box->x2 + dst_dx);
+			VERTEX(box->y2 + dst_dy);
+			VERTEX((box->x2 + src_dx) * tmp.src.scale[0]);
+			VERTEX((box->y2 + src_dy) * tmp.src.scale[1]);
 
-			OUT_VERTEX(box->x1 + dst_dx);
-			OUT_VERTEX(box->y2 + dst_dy);
-			OUT_VERTEX((box->x1 + src_dx) * tmp.src.scale[0]);
-			OUT_VERTEX((box->y2 + src_dy) * tmp.src.scale[1]);
+			VERTEX(box->x1 + dst_dx);
+			VERTEX(box->y2 + dst_dy);
+			VERTEX((box->x1 + src_dx) * tmp.src.scale[0]);
+			VERTEX((box->y2 + src_dy) * tmp.src.scale[1]);
 
-			OUT_VERTEX(box->x1 + dst_dx);
-			OUT_VERTEX(box->y1 + dst_dy);
-			OUT_VERTEX((box->x1 + src_dx) * tmp.src.scale[0]);
-			OUT_VERTEX((box->y1 + src_dy) * tmp.src.scale[1]);
+			VERTEX(box->x1 + dst_dx);
+			VERTEX(box->y1 + dst_dy);
+			VERTEX((box->x1 + src_dx) * tmp.src.scale[0]);
+			VERTEX((box->y1 + src_dy) * tmp.src.scale[1]);
 
 			box++;
 		} while (--n_this_time);
@@ -2360,20 +2345,20 @@ gen2_render_copy_blt(struct sna *sna,
 		gen2_get_rectangles(sna, &op->base, 1);
 	}
 
-	OUT_VERTEX(dx+w);
-	OUT_VERTEX(dy+h);
-	OUT_VERTEX((sx+w)*op->base.src.scale[0]);
-	OUT_VERTEX((sy+h)*op->base.src.scale[1]);
+	VERTEX(dx+w);
+	VERTEX(dy+h);
+	VERTEX((sx+w)*op->base.src.scale[0]);
+	VERTEX((sy+h)*op->base.src.scale[1]);
 
-	OUT_VERTEX(dx);
-	OUT_VERTEX(dy+h);
-	OUT_VERTEX(sx*op->base.src.scale[0]);
-	OUT_VERTEX((sy+h)*op->base.src.scale[1]);
+	VERTEX(dx);
+	VERTEX(dy+h);
+	VERTEX(sx*op->base.src.scale[0]);
+	VERTEX((sy+h)*op->base.src.scale[1]);
 
-	OUT_VERTEX(dx);
-	OUT_VERTEX(dy);
-	OUT_VERTEX(sx*op->base.src.scale[0]);
-	OUT_VERTEX(sy*op->base.src.scale[1]);
+	VERTEX(dx);
+	VERTEX(dy);
+	VERTEX(sx*op->base.src.scale[0]);
+	VERTEX(sy*op->base.src.scale[1]);
 }
 
 static void
@@ -2400,7 +2385,7 @@ gen2_render_copy(struct sna *sna, uint8_t alu,
 #endif
 
 	/* Prefer to use the BLT */
-	if (!PREFER_COPY && sna->kgem.mode != KGEM_RENDER &&
+	if (!PREFER_3D_COPY && sna->kgem.mode != KGEM_RENDER &&
 	    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 	    sna_blt_copy(sna, alu,
 			 src_bo, dst_bo,
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 223cae6..979ff85 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -98,10 +98,6 @@ struct sna_composite_op {
 		} blt;
 
 		struct {
-			uint32_t pixel;
-		} gen2;
-
-		struct {
 			float constants[8];
 			uint32_t num_constants;
 		} gen3;


More information about the xorg-commit mailing list