xf86-video-intel: 5 commits - src/sna/gen6_render.c src/sna/kgem.c src/sna/kgem.h src/sna/sna_accel.c src/sna/sna_blt.c src/sna/sna_composite.c src/sna/sna_glyphs.c src/sna/sna.h src/sna/sna_render_inline.h uxa/uxa-priv.h uxa/uxa-render.c uxa/uxa-unaccel.c

Chris Wilson ickle at kemper.freedesktop.org
Fri Nov 4 06:30:23 PDT 2011


 src/sna/gen6_render.c       |  303 ++++++++++++++++++++++++++++++++++++++++++++
 src/sna/kgem.c              |   38 ++++-
 src/sna/kgem.h              |    3 
 src/sna/sna.h               |    7 -
 src/sna/sna_accel.c         |   21 ++-
 src/sna/sna_blt.c           |    9 -
 src/sna/sna_composite.c     |   14 +-
 src/sna/sna_glyphs.c        |    7 -
 src/sna/sna_render_inline.h |    6 
 uxa/uxa-priv.h              |    4 
 uxa/uxa-render.c            |   16 +-
 uxa/uxa-unaccel.c           |   50 +++++--
 12 files changed, 435 insertions(+), 43 deletions(-)

New commits:
commit a26c5d44ccaa7e9ff931948032b073f0a550343a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Nov 4 12:36:40 2011 +0000

    uxa: Ensure that alphaMaps are mapped into the CPU for fallbacks
    
    Reported-by: Hans-Peter Budek <peter.budek at gmx.de>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/uxa/uxa-priv.h b/uxa/uxa-priv.h
index ac206af..6b9a9c1 100644
--- a/uxa/uxa-priv.h
+++ b/uxa/uxa-priv.h
@@ -325,9 +325,11 @@ uxa_check_composite(CARD8 op,
 
 /* uxa.c */
 Bool uxa_prepare_access(DrawablePtr pDrawable, uxa_access_t access);
-
 void uxa_finish_access(DrawablePtr pDrawable);
 
+Bool uxa_picture_prepare_access(PicturePtr picture, int mode);
+void uxa_picture_finish_access(PicturePtr picture);
+
 void
 uxa_get_drawable_deltas(DrawablePtr pDrawable, PixmapPtr pPixmap,
 			int *xp, int *yp);
diff --git a/uxa/uxa-render.c b/uxa/uxa-render.c
index b26be80..34257c6 100644
--- a/uxa/uxa-render.c
+++ b/uxa/uxa-render.c
@@ -568,10 +568,10 @@ uxa_picture_from_pixman_image(ScreenPtr screen,
 		}
 		ValidatePicture(src);
 
-		if (uxa_prepare_access(picture->pDrawable, UXA_ACCESS_RW)) {
+		if (uxa_picture_prepare_access(picture, UXA_ACCESS_RW)) {
 			fbComposite(PictOpSrc, src, NULL, picture,
 				    0, 0, 0, 0, 0, 0, width, height);
-			uxa_finish_access(picture->pDrawable);
+			uxa_picture_finish_access(picture);
 		}
 
 		FreePicture(src, 0);
@@ -699,10 +699,10 @@ uxa_acquire_pattern(ScreenPtr pScreen,
 	if (!pDst)
 		return 0;
 
-	if (uxa_prepare_access(pDst->pDrawable, UXA_ACCESS_RW)) {
+	if (uxa_picture_prepare_access(pDst, UXA_ACCESS_RW)) {
 		fbComposite(PictOpSrc, pSrc, NULL, pDst,
 			    x, y, 0, 0, 0, 0, width, height);
-		uxa_finish_access(pDst->pDrawable);
+		uxa_picture_finish_access(pDst);
 		return pDst;
 	} else {
 		FreePicture(pDst, 0);
@@ -756,14 +756,14 @@ uxa_render_picture(ScreenPtr screen,
 	if (!picture)
 		return 0;
 
-	if (uxa_prepare_access(picture->pDrawable, UXA_ACCESS_RW)) {
-		if (uxa_prepare_access(src->pDrawable, UXA_ACCESS_RO)) {
+	if (uxa_picture_prepare_access(picture, UXA_ACCESS_RW)) {
+		if (uxa_picture_prepare_access(src, UXA_ACCESS_RO)) {
 			ret = 1;
 			fbComposite(PictOpSrc, src, NULL, picture,
 				    x, y, 0, 0, 0, 0, width, height);
-			uxa_finish_access(src->pDrawable);
+			uxa_picture_finish_access(src);
 		}
-		uxa_finish_access(picture->pDrawable);
+		uxa_picture_finish_access(picture);
 	}
 
 	if (!ret) {
diff --git a/uxa/uxa-unaccel.c b/uxa/uxa-unaccel.c
index 15be821..1d4b2c0 100644
--- a/uxa/uxa-unaccel.c
+++ b/uxa/uxa-unaccel.c
@@ -67,6 +67,34 @@ void uxa_finish_access_gc(GCPtr pGC)
 		uxa_finish_access(&pGC->stipple->drawable);
 }
 
+Bool uxa_picture_prepare_access(PicturePtr picture, int mode)
+{
+	if (picture->pDrawable == NULL)
+		return TRUE;
+
+	if (!uxa_prepare_access(picture->pDrawable, mode))
+		return FALSE;
+
+	if (picture->alphaMap &&
+	    !uxa_prepare_access(picture->alphaMap->pDrawable, mode)) {
+		uxa_finish_access(picture->pDrawable);
+		return FALSE;
+	}
+
+	return TRUE;
+}
+
+void uxa_picture_finish_access(PicturePtr picture)
+{
+	if (picture->pDrawable == NULL)
+		return;
+
+	uxa_finish_access(picture->pDrawable);
+	if (picture->alphaMap)
+		uxa_finish_access(picture->alphaMap->pDrawable);
+}
+
+
 char uxa_drawable_location(DrawablePtr pDrawable)
 {
 	return uxa_drawable_is_offscreen(pDrawable) ? 's' : 'm';
@@ -362,24 +390,20 @@ uxa_check_composite(CARD8 op,
 
 	UXA_FALLBACK(("from picts %p/%p to pict %p\n", pSrc, pMask, pDst));
 
-	if (uxa_prepare_access(pDst->pDrawable, UXA_ACCESS_RW)) {
-		if (pSrc->pDrawable == NULL ||
-		    uxa_prepare_access(pSrc->pDrawable, UXA_ACCESS_RO)) {
-			if (!pMask || pMask->pDrawable == NULL ||
-			    uxa_prepare_access(pMask->pDrawable, UXA_ACCESS_RO))
-			{
+	if (uxa_picture_prepare_access(pDst, UXA_ACCESS_RW)) {
+		if (uxa_picture_prepare_access(pSrc, UXA_ACCESS_RO)) {
+			if (!pMask || uxa_picture_prepare_access(pMask, UXA_ACCESS_RO)) {
 				fbComposite(op, pSrc, pMask, pDst,
 					    xSrc, ySrc,
 					    xMask, yMask,
 					    xDst, yDst,
 					    width, height);
-				if (pMask && pMask->pDrawable != NULL)
-					uxa_finish_access(pMask->pDrawable);
+				if (pMask)
+					uxa_picture_finish_access(pMask);
 			}
-			if (pSrc->pDrawable != NULL)
-				uxa_finish_access(pSrc->pDrawable);
+			uxa_picture_finish_access(pSrc);
 		}
-		uxa_finish_access(pDst->pDrawable);
+		uxa_picture_finish_access(pDst);
 	}
 }
 
@@ -391,9 +415,9 @@ uxa_check_add_traps(PicturePtr pPicture,
 
 	UXA_FALLBACK(("to pict %p (%c)\n", pPicture,
 		      uxa_drawable_location(pPicture->pDrawable)));
-	if (uxa_prepare_access(pPicture->pDrawable, UXA_ACCESS_RW)) {
+	if (uxa_picture_prepare_access(pPicture, UXA_ACCESS_RW)) {
 		fbAddTraps(pPicture, x_off, y_off, ntrap, traps);
-		uxa_finish_access(pPicture->pDrawable);
+		uxa_picture_finish_access(pPicture);
 	}
 }
 
commit 34758895cdd93bd7671a78464e79b3891bca113d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Nov 4 11:57:09 2011 +0000

    sna: Ensure operations on a ShmPixmap are synchronous with clients
    
    If we are rendering to or from a ShmPixmap, we need to be sure that the
    operation is complete prior to sending an XSync response to client in
    order to preserve mixed rendering coherency.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index e6df36f..310f8b5 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -509,8 +509,12 @@ void _kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo)
 	bo->exec = kgem_add_handle(kgem, bo);
 	bo->rq = kgem->next_request;
 	bo->gpu = true;
+
 	list_move(&bo->request, &kgem->next_request->buffers);
+
+	/* XXX is it worth working around gcc here?
 	kgem->flush |= bo->flush;
+	kgem->sync |= bo->sync;
 }
 
 static uint32_t kgem_end_batch(struct kgem *kgem)
@@ -766,7 +770,7 @@ static void kgem_finish_partials(struct kgem *kgem)
 		}
 
 		/* transfer the handle to a minimum bo */
-		if (bo->base.refcnt == 1 && !bo->base.sync) {
+		if (bo->base.refcnt == 1 && !bo->base.vmap) {
 			struct kgem_bo *base = malloc(sizeof(*base));
 			if (base) {
 				memcpy(base, &bo->base, sizeof (*base));
@@ -1845,13 +1849,35 @@ void kgem_bo_sync(struct kgem *kgem, struct kgem_bo *bo, bool for_write)
 
 	drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
 	bo->needs_flush = false;
-	if (bo->gpu)
+	if (bo->gpu) {
+		kgem->sync = false;
 		kgem_retire(kgem);
+	}
 	bo->cpu_read = true;
 	if (for_write)
 		bo->cpu_write = true;
 }
 
+void kgem_sync(struct kgem *kgem)
+{
+	if (!list_is_empty(&kgem->requests)) {
+		struct drm_i915_gem_set_domain set_domain;
+		struct kgem_request *rq;
+
+		rq = list_first_entry(&kgem->requests,
+				      struct kgem_request,
+				      list);
+		set_domain.handle = rq->bo->handle;
+		set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+		set_domain.write_domain = I915_GEM_DOMAIN_GTT;
+
+		drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
+		kgem_retire(kgem);
+	}
+
+	kgem->sync = false;
+}
+
 void kgem_clear_dirty(struct kgem *kgem)
 {
 	struct kgem_request *rq = kgem->next_request;
@@ -2023,7 +2049,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		bo->need_io = true;
 	} else {
 		__kgem_bo_init(&bo->base, handle, alloc);
-		bo->base.sync = true;
+		bo->base.vmap = true;
 		bo->need_io = 0;
 	}
 	bo->base.reusable = false;
@@ -2161,10 +2187,10 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 
 	bo = (struct kgem_partial_bo *)_bo;
 
-	DBG(("%s(offset=%d, length=%d, sync=%d)\n", __FUNCTION__,
-	     offset, length, bo->base.sync));
+	DBG(("%s(offset=%d, length=%d, vmap=%d)\n", __FUNCTION__,
+	     offset, length, bo->base.vmap));
 
-	if (!bo->base.sync) {
+	if (!bo->base.vmap) {
 		gem_read(kgem->fd, bo->base.handle,
 			 (char *)(bo+1)+offset, length);
 		bo->base.needs_flush = false;
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 0453ac5..70a5810 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -72,6 +72,7 @@ struct kgem_bo {
 	uint32_t needs_flush : 1;
 	uint32_t cpu_read : 1;
 	uint32_t cpu_write : 1;
+	uint32_t vmap : 1;
 	uint32_t flush : 1;
 	uint32_t sync : 1;
 	uint32_t deleted : 1;
@@ -110,6 +111,7 @@ struct kgem {
 	uint16_t nfence;
 
 	uint32_t flush:1;
+	uint32_t sync:1;
 	uint32_t need_expire:1;
 	uint32_t need_purge:1;
 	uint32_t need_retire:1;
@@ -334,6 +336,7 @@ static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
 }
 
 void kgem_bo_sync(struct kgem *kgem, struct kgem_bo *bo, bool for_write);
+void kgem_sync(struct kgem *kgem);
 
 #define KGEM_BUFFER_WRITE	0x1
 #define KGEM_BUFFER_LAST	0x2
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 1097d74..7a7db45 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -520,7 +520,7 @@ static inline struct kgem_bo *pixmap_vmap(struct kgem *kgem, PixmapPtr pixmap)
 {
 	struct sna_pixmap *priv;
 
-	if (kgem->wedged)
+	if (unlikely(kgem->wedged))
 		return NULL;
 
 	priv = sna_pixmap_attach(pixmap);
@@ -532,8 +532,11 @@ static inline struct kgem_bo *pixmap_vmap(struct kgem *kgem, PixmapPtr pixmap)
 					       pixmap->devPrivate.ptr,
 					       pixmap_size(pixmap),
 					       0);
-		if (priv->cpu_bo)
+		if (priv->cpu_bo) {
 			priv->cpu_bo->pitch = pixmap->devKind;
+			if (pixmap->usage_hint == CREATE_PIXMAP_USAGE_SCRATCH_HEADER)
+				priv->cpu_bo->sync = true;
+		}
 	}
 
 	return priv->cpu_bo;
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 9e1f75f..48d3d6a 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -188,8 +188,7 @@ static Bool sna_destroy_private(PixmapPtr pixmap, struct sna_pixmap *priv)
 		kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
 
 	if (priv->cpu_bo) {
-		if (pixmap->usage_hint != CREATE_PIXMAP_USAGE_SCRATCH_HEADER &&
-		    kgem_bo_is_busy(priv->cpu_bo)) {
+		if (kgem_bo_is_busy(priv->cpu_bo)) {
 			list_add_tail(&priv->list, &sna->deferred_free);
 			return false;
 		}
@@ -7965,7 +7964,8 @@ sna_accel_flush_callback(CallbackListPtr *list,
 {
 	struct sna *sna = user_data;
 
-	if (sna->kgem.flush == 0 && list_is_empty(&sna->dirty_pixmaps))
+	if ((sna->kgem.sync|sna->kgem.flush) == 0 &&
+	    list_is_empty(&sna->dirty_pixmaps))
 		return;
 
 	DBG(("%s\n", __FUNCTION__));
@@ -7979,6 +7979,21 @@ sna_accel_flush_callback(CallbackListPtr *list,
 	}
 
 	kgem_submit(&sna->kgem);
+
+	if (sna->kgem.sync) {
+		kgem_sync(&sna->kgem);
+
+		while (!list_is_empty(&sna->deferred_free)) {
+			struct sna_pixmap *priv =
+				list_first_entry(&sna->deferred_free,
+						 struct sna_pixmap,
+						 list);
+			list_del(&priv->list);
+			kgem_bo_destroy(&sna->kgem, priv->cpu_bo);
+			fbDestroyPixmap(priv->pixmap);
+			free(priv);
+		}
+	}
 }
 
 static void sna_deferred_free(struct sna *sna)
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 5420712..9ea3efb 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -1120,13 +1120,8 @@ prepare_blt_put(struct sna *sna,
 	if (priv) {
 		if (!priv->gpu_only) {
 			src_bo = priv->cpu_bo;
-			if (!src_bo) {
-				src_bo = kgem_create_map(&sna->kgem,
-							 src->devPrivate.ptr,
-							 pixmap_size(src),
-							 1);
-				priv->cpu_bo = src_bo;
-			}
+			if (!src_bo)
+				src_bo = pixmap_vmap(&sna->kgem, src);
 		}
 	} else {
 		src_bo = kgem_create_map(&sna->kgem,
commit 5525691eb024f2a04b486652c24e0e34176fdd5e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Nov 4 10:49:48 2011 +0000

    sna/gen6: Poor man's spans layered on top of the exisiting composite
    
    Performance of this lazy interface looks inconclusive:
    
    Speedups
    ========
     xlib         swfdec-giant-steps  1063.56 -> 710.68:     1.50x speedup
     xlib          firefox-asteroids  3612.55 -> 3012.58:    1.20x speedup
     xlib       firefox-canvas-alpha  15837.62 -> 13442.98:  1.18x speedup
     xlib                  ocitysmap  1106.35 -> 970.66:     1.14x speedup
     xlib             firefox-canvas  33140.27) -> 30616.08: 1.08x speedup
     xlib                    poppler  629.97 -> 585.95:      1.08x speedup
     xlib          firefox-talos-gfx  2754.37 -> 2562.00:    1.08x speedup
    Slowdowns
    =========
     xlib                       gvim  1363.16 -> 1439.64:    1.06x slowdown
     xlib              midori-zoomed  758.48 -> 904.37:      1.19x slowdown
     xlib           firefox-fishbowl  22068.29 -> 26547.84:  1.20x slowdown
     xlib       firefox-planet-gnome  2995.96 -> 4231.44:    1.41x slowdown
    
    It remains off and a curiosity for the time being.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 02b051f..99655b8 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -50,6 +50,7 @@
 #endif
 
 #define NO_COMPOSITE 0
+#define NO_COMPOSITE_SPANS 1
 #define NO_COPY 0
 #define NO_COPY_BOXES 0
 #define NO_FILL 0
@@ -2244,6 +2245,305 @@ cleanup_dst:
 	return FALSE;
 }
 
+/* A poor man's span interface. But better than nothing? */
+#if !NO_COMPOSITE_SPANS
+static Bool
+gen6_composite_alpha_gradient_init(struct sna *sna,
+				   struct sna_composite_channel *channel)
+{
+	DBG(("%s\n", __FUNCTION__));
+
+	channel->filter = PictFilterNearest;
+	channel->repeat = RepeatPad;
+	channel->is_affine = TRUE;
+	channel->is_solid  = FALSE;
+	channel->transform = NULL;
+	channel->width  = 256;
+	channel->height = 1;
+	channel->card_format = GEN6_SURFACEFORMAT_B8G8R8A8_UNORM;
+
+	channel->bo = sna_render_get_alpha_gradient(sna);
+
+	channel->scale[0]  = channel->scale[1]  = 1;
+	channel->offset[0] = channel->offset[1] = 0;
+	return channel->bo != NULL;
+}
+
+inline static void
+gen6_emit_composite_texcoord(struct sna *sna,
+			     const struct sna_composite_channel *channel,
+			     int16_t x, int16_t y)
+{
+	float t[3];
+
+	if (channel->is_affine) {
+		sna_get_transformed_coordinates(x + channel->offset[0],
+						y + channel->offset[1],
+						channel->transform,
+						&t[0], &t[1]);
+		OUT_VERTEX_F(t[0] * channel->scale[0]);
+		OUT_VERTEX_F(t[1] * channel->scale[1]);
+	} else {
+		t[0] = t[1] = 0; t[2] = 1;
+		sna_get_transformed_coordinates_3d(x + channel->offset[0],
+						   y + channel->offset[1],
+						   channel->transform,
+						   &t[0], &t[1], &t[2]);
+		OUT_VERTEX_F(t[0] * channel->scale[0]);
+		OUT_VERTEX_F(t[1] * channel->scale[1]);
+		OUT_VERTEX_F(t[2]);
+	}
+}
+
+inline static void
+gen6_emit_composite_texcoord_affine(struct sna *sna,
+				    const struct sna_composite_channel *channel,
+				    int16_t x, int16_t y)
+{
+	float t[2];
+
+	sna_get_transformed_coordinates(x + channel->offset[0],
+					y + channel->offset[1],
+					channel->transform,
+					&t[0], &t[1]);
+	OUT_VERTEX_F(t[0] * channel->scale[0]);
+	OUT_VERTEX_F(t[1] * channel->scale[1]);
+}
+
+inline static void
+gen6_emit_composite_spans_vertex(struct sna *sna,
+				 const struct sna_composite_spans_op *op,
+				 int16_t x, int16_t y)
+{
+	OUT_VERTEX(x, y);
+	gen6_emit_composite_texcoord(sna, &op->base.src, x, y);
+}
+
+fastcall static void
+gen6_emit_composite_spans_primitive(struct sna *sna,
+				    const struct sna_composite_spans_op *op,
+				    const BoxRec *box,
+				    float opacity)
+{
+	gen6_emit_composite_spans_vertex(sna, op, box->x2, box->y2);
+	OUT_VERTEX_F(opacity);
+	OUT_VERTEX_F(1);
+	if (!op->base.is_affine)
+		OUT_VERTEX_F(1);
+
+	gen6_emit_composite_spans_vertex(sna, op, box->x1, box->y2);
+	OUT_VERTEX_F(opacity);
+	OUT_VERTEX_F(1);
+	if (!op->base.is_affine)
+		OUT_VERTEX_F(1);
+
+	gen6_emit_composite_spans_vertex(sna, op, box->x1, box->y1);
+	OUT_VERTEX_F(opacity);
+	OUT_VERTEX_F(0);
+	if (!op->base.is_affine)
+		OUT_VERTEX_F(1);
+}
+
+fastcall static void
+gen6_emit_composite_spans_solid(struct sna *sna,
+				const struct sna_composite_spans_op *op,
+				const BoxRec *box,
+				float opacity)
+{
+	OUT_VERTEX(box->x2, box->y2);
+	OUT_VERTEX_F(1); OUT_VERTEX_F(1);
+	OUT_VERTEX_F(opacity); OUT_VERTEX_F(1);
+
+	OUT_VERTEX(box->x1, box->y2);
+	OUT_VERTEX_F(0); OUT_VERTEX_F(1);
+	OUT_VERTEX_F(opacity); OUT_VERTEX_F(1);
+
+	OUT_VERTEX(box->x1, box->y1);
+	OUT_VERTEX_F(0); OUT_VERTEX_F(0);
+	OUT_VERTEX_F(opacity); OUT_VERTEX_F(0);
+}
+
+fastcall static void
+gen6_emit_composite_spans_affine(struct sna *sna,
+				 const struct sna_composite_spans_op *op,
+				 const BoxRec *box,
+				 float opacity)
+{
+	OUT_VERTEX(box->x2, box->y2);
+	gen6_emit_composite_texcoord_affine(sna, &op->base.src,
+					    box->x2, box->y2);
+	OUT_VERTEX_F(opacity);
+	OUT_VERTEX_F(1);
+
+	OUT_VERTEX(box->x1, box->y2);
+	gen6_emit_composite_texcoord_affine(sna, &op->base.src,
+					    box->x1, box->y2);
+	OUT_VERTEX_F(opacity);
+	OUT_VERTEX_F(1);
+
+	OUT_VERTEX(box->x1, box->y1);
+	gen6_emit_composite_texcoord_affine(sna, &op->base.src,
+					    box->x1, box->y1);
+	OUT_VERTEX_F(opacity);
+	OUT_VERTEX_F(0);
+}
+
+fastcall static void
+gen6_render_composite_spans_box(struct sna *sna,
+				const struct sna_composite_spans_op *op,
+				const BoxRec *box, float opacity)
+{
+	DBG(("%s: src=+(%d, %d), opacity=%f, dst=+(%d, %d), box=(%d, %d) x (%d, %d)\n",
+	     __FUNCTION__,
+	     op->base.src.offset[0], op->base.src.offset[1],
+	     opacity,
+	     op->base.dst.x, op->base.dst.y,
+	     box->x1, box->y1,
+	     box->x2 - box->x1,
+	     box->y2 - box->y1));
+
+	if (gen6_get_rectangles(sna, &op->base, 1) == 0) {
+		gen6_emit_composite_state(sna, &op->base);
+		gen6_get_rectangles(sna, &op->base, 1);
+	}
+
+	op->prim_emit(sna, op, box, opacity);
+}
+
+static void
+gen6_render_composite_spans_boxes(struct sna *sna,
+				  const struct sna_composite_spans_op *op,
+				  const BoxRec *box, int nbox,
+				  float opacity)
+{
+	DBG(("%s: nbox=%d, src=+(%d, %d), opacity=%f, dst=+(%d, %d)\n",
+	     __FUNCTION__, nbox,
+	     op->base.src.offset[0], op->base.src.offset[1],
+	     opacity,
+	     op->base.dst.x, op->base.dst.y));
+
+	do {
+		int nbox_this_time;
+
+		nbox_this_time = gen6_get_rectangles(sna, &op->base, nbox);
+		if (nbox_this_time == 0) {
+			gen6_emit_composite_state(sna, &op->base);
+			nbox_this_time = gen6_get_rectangles(sna, &op->base, nbox);
+		}
+		nbox -= nbox_this_time;
+
+		do {
+			DBG(("  %s: (%d, %d) x (%d, %d)\n", __FUNCTION__,
+			     box->x1, box->y1,
+			     box->x2 - box->x1,
+			     box->y2 - box->y1));
+
+			op->prim_emit(sna, op, box++, opacity);
+		} while (--nbox_this_time);
+	} while (nbox);
+}
+
+fastcall static void
+gen6_render_composite_spans_done(struct sna *sna,
+				 const struct sna_composite_spans_op *op)
+{
+	gen6_vertex_flush(sna);
+	_kgem_set_mode(&sna->kgem, KGEM_RENDER);
+
+	DBG(("%s()\n", __FUNCTION__));
+
+	sna_render_composite_redirect_done(sna, &op->base);
+	if (op->base.src.bo)
+		kgem_bo_destroy(&sna->kgem, op->base.src.bo);
+}
+
+static Bool
+gen6_render_composite_spans(struct sna *sna,
+			    uint8_t op,
+			    PicturePtr src,
+			    PicturePtr dst,
+			    int16_t src_x,  int16_t src_y,
+			    int16_t dst_x,  int16_t dst_y,
+			    int16_t width,  int16_t height,
+			    struct sna_composite_spans_op *tmp)
+{
+	DBG(("%s: %dx%d, current mode=%d\n", __FUNCTION__,
+	     width, height, sna->kgem.ring));
+
+	if (op >= ARRAY_SIZE(gen6_blend_op))
+		return FALSE;
+
+	if (need_tiling(sna, width, height))
+		return FALSE;
+
+	tmp->base.op = op;
+	if (!gen6_composite_set_target(&tmp->base, dst))
+		return FALSE;
+
+	if (tmp->base.dst.width > 8192 || tmp->base.dst.height > 8192) {
+		if (!sna_render_composite_redirect(sna, &tmp->base,
+						   dst_x, dst_y, width, height))
+			return FALSE;
+	}
+
+	switch (gen6_composite_picture(sna, src, &tmp->base.src,
+				       src_x, src_y,
+				       width, height,
+				       dst_x, dst_y)) {
+	case -1:
+		goto cleanup_dst;
+	case 0:
+		gen6_composite_solid_init(sna, &tmp->base.src, 0);
+	case 1:
+		gen6_composite_channel_convert(&tmp->base.src);
+		break;
+	}
+
+	tmp->base.is_affine = tmp->base.src.is_affine;
+	tmp->base.has_component_alpha = FALSE;
+	tmp->base.need_magic_ca_pass = FALSE;
+
+	gen6_composite_alpha_gradient_init(sna, &tmp->base.mask);
+
+	tmp->prim_emit = gen6_emit_composite_spans_primitive;
+	if (tmp->base.src.is_solid)
+		tmp->prim_emit = gen6_emit_composite_spans_solid;
+	else if (tmp->base.is_affine)
+		tmp->prim_emit = gen6_emit_composite_spans_affine;
+	tmp->base.floats_per_vertex = 5 + 2*!tmp->base.is_affine;
+	tmp->base.floats_per_rect = 3 * tmp->base.floats_per_vertex;
+
+	tmp->base.u.gen6.wm_kernel =
+		gen6_choose_composite_kernel(tmp->base.op,
+					     TRUE, FALSE,
+					     tmp->base.is_affine);
+	tmp->base.u.gen6.nr_surfaces = 3;
+	tmp->base.u.gen6.nr_inputs = 2;
+	tmp->base.u.gen6.ve_id = 1 << 1 | tmp->base.is_affine;
+
+	tmp->box   = gen6_render_composite_spans_box;
+	tmp->boxes = gen6_render_composite_spans_boxes;
+	tmp->done  = gen6_render_composite_spans_done;
+
+	if (!kgem_check_bo(&sna->kgem,
+			   tmp->base.dst.bo, tmp->base.src.bo,
+			   NULL))
+		kgem_submit(&sna->kgem);
+
+	if (kgem_bo_is_dirty(tmp->base.src.bo))
+		kgem_emit_flush(&sna->kgem);
+
+	gen6_emit_composite_state(sna, &tmp->base);
+	gen6_align_vertex(sna, &tmp->base);
+	return TRUE;
+
+cleanup_dst:
+	if (tmp->base.redirect.real_bo)
+		kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo);
+	return FALSE;
+}
+#endif
+
 static void
 gen6_emit_copy_state(struct sna *sna,
 		     const struct sna_composite_op *op)
@@ -3052,6 +3352,9 @@ Bool gen6_render_init(struct sna *sna)
 	sna->kgem.context_switch = gen6_render_context_switch;
 
 	sna->render.composite = gen6_render_composite;
+#if !NO_COMPOSITE_SPANS
+	sna->render.composite_spans = gen6_render_composite_spans;
+#endif
 	sna->render.video = gen6_render_video;
 
 	sna->render.copy_boxes = gen6_render_copy_boxes;
diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index fe6d5b8..f05063d 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -116,4 +116,10 @@ static inline Bool sna_blt_compare_depth(DrawablePtr src, DrawablePtr dst)
 	return FALSE;
 }
 
+static inline struct kgem_bo *
+sna_render_get_alpha_gradient(struct sna *sna)
+{
+	return kgem_bo_reference(sna->render.alpha_cache.cache_bo);
+}
+
 #endif /* SNA_RENDER_INLINE_H */
commit 5dba7028cd5024282b601c41ddfc9081eaa1d189
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Nov 4 11:24:03 2011 +0000

    sna: Make sure the alpha map is accessible by the CPU on fallbacks
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 7c599be..725a3cf 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -493,10 +493,18 @@ fallback:
 	     width, height));
 
 	dst_move_area_to_cpu(dst, op, &region.extents);
-	if (src->pDrawable)
+	if (dst->alphaMap)
+		sna_drawable_move_to_cpu(dst->alphaMap->pDrawable, true);
+	if (src->pDrawable) {
 		sna_drawable_move_to_cpu(src->pDrawable, false);
-	if (mask && mask->pDrawable)
+		if (src->alphaMap)
+			sna_drawable_move_to_cpu(src->alphaMap->pDrawable, false);
+	}
+	if (mask && mask->pDrawable) {
 		sna_drawable_move_to_cpu(mask->pDrawable, false);
+		if (mask->alphaMap)
+			sna_drawable_move_to_cpu(mask->alphaMap->pDrawable, false);
+	}
 
 	DBG(("%s: fallback -- fbCompposite\n", __FUNCTION__));
 	fbComposite(op, src, mask, dst,
@@ -733,6 +741,8 @@ sna_composite_rectangles(CARD8		 op,
 fallback:
 	DBG(("%s: fallback\n", __FUNCTION__));
 	sna_drawable_move_region_to_cpu(&pixmap->drawable, &region, true);
+	if (dst->alphaMap)
+		sna_drawable_move_to_cpu(dst->alphaMap->pDrawable, true);
 
 	if (op == PictOpSrc || op == PictOpClear) {
 		PixmapPtr pixmap = get_drawable_pixmap(dst->pDrawable);
diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 7c4b73f..2a16299 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -941,8 +941,13 @@ glyphs_fallback(CARD8 op,
 		return;
 
 	sna_drawable_move_region_to_cpu(dst->pDrawable, &region, true);
-	if (src->pDrawable)
+	if (dst->alphaMap)
+		sna_drawable_move_to_cpu(dst->alphaMap->pDrawable, true);
+	if (src->pDrawable) {
 		sna_drawable_move_to_cpu(src->pDrawable, false);
+		if (src->alphaMap)
+			sna_drawable_move_to_cpu(src->alphaMap->pDrawable, false);
+	}
 	RegionTranslate(&region, -dst->pDrawable->x, -dst->pDrawable->y);
 
 	dst_image = image_from_pict(dst, TRUE, &x, &y);
commit 70c9e70f35af21de2b5b56d72f2a9e7fe9ed49d6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Nov 4 10:19:21 2011 +0000

    sna: Don't reuse partial vmapped bo
    
    A fun use after free.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index abf7e82..e6df36f 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -766,7 +766,7 @@ static void kgem_finish_partials(struct kgem *kgem)
 		}
 
 		/* transfer the handle to a minimum bo */
-		if (bo->base.refcnt == 1) {
+		if (bo->base.refcnt == 1 && !bo->base.sync) {
 			struct kgem_bo *base = malloc(sizeof(*base));
 			if (base) {
 				memcpy(base, &bo->base, sizeof (*base));


More information about the xorg-commit mailing list