xf86-video-intel: 11 commits - src/sna/kgem.c src/sna/sna_accel.c src/sna/sna_composite.c src/sna/sna_glyphs.c src/sna/sna.h src/sna/sna_io.c src/sna/sna_render.c src/sna/sna_trapezoids.c

Chris Wilson ickle at kemper.freedesktop.org
Thu Jan 5 05:12:02 PST 2012


 src/sna/kgem.c           |    4 +-
 src/sna/sna.h            |   16 +++++++-
 src/sna/sna_accel.c      |   91 +++++++++++++++++++++++++++++------------------
 src/sna/sna_composite.c  |   10 ++---
 src/sna/sna_glyphs.c     |    7 +++
 src/sna/sna_io.c         |   61 +++++++++++++++++++++++++++++++
 src/sna/sna_render.c     |   11 ++++-
 src/sna/sna_trapezoids.c |   20 +++++++---
 8 files changed, 169 insertions(+), 51 deletions(-)

New commits:
commit f5d02d90f3349f33341954fc55824d12a7829e6c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 5 12:56:21 2012 +0000

    sna: Avoid switching rings for uploading data
    
    On systems that incur painful overhead for ring switches, it is usually
    better to create a large buffer and perform a sparse copy on the same
    ring than create a compact buffer and use the BLT.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 6cdc3fa..494a3bf 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -320,6 +320,7 @@ void sna_write_boxes(struct sna *sna,
 
 	if (DEBUG_NO_IO || kgem->wedged || dst_bo->tiling == I915_TILING_Y ||
 	    !kgem_bo_map_will_stall(kgem, dst_bo)) {
+fallback:
 		write_boxes_inplace(kgem,
 				    src, stride, bpp, src_dx, src_dy,
 				    dst_bo, dst_dx, dst_dy,
@@ -327,6 +328,66 @@ void sna_write_boxes(struct sna *sna,
 		return;
 	}
 
+	/* Try to avoid switching rings... */
+	if (kgem->ring == KGEM_RENDER) {
+		PixmapRec tmp;
+		BoxRec extents;
+
+		extents = box[0];
+		for (n = 1; n < nbox; n++) {
+			if (box[n].x1 < extents.x1)
+				extents.x1 = box[n].x1;
+			if (box[n].x2 > extents.x2)
+				extents.x2 = box[n].x2;
+
+			if (box[n].y1 < extents.y1)
+				extents.y1 = box[n].y1;
+			if (box[n].y2 > extents.y2)
+				extents.y2 = box[n].y2;
+		}
+
+		tmp.drawable.width = extents.x2 - extents.x1;
+		tmp.drawable.height = extents.y2 - extents.y1;
+		tmp.drawable.depth = bpp;
+		tmp.drawable.bitsPerPixel = bpp;
+		tmp.devPrivate.ptr = NULL;
+
+		tmp.devKind = tmp.drawable.width * bpp / 8;
+		tmp.devKind = ALIGN(tmp.devKind, 4);
+
+		src_bo = kgem_create_buffer(kgem,
+					    tmp.drawable.height * tmp.devKind,
+					    KGEM_BUFFER_WRITE,
+					    &ptr);
+		if (!src_bo)
+			goto fallback;
+
+		src_bo->pitch = tmp.devKind;
+
+		for (n = 0; n < nbox; n++) {
+			memcpy_blt(src, ptr, bpp,
+				   stride, tmp.devKind,
+				   box[n].x1 + src_dx,
+				   box[n].y1 + src_dy,
+				   box[n].x1 - extents.x1,
+				   box[n].y1 - extents.y1,
+				   box[n].x2 - box[n].x1,
+				   box[n].y2 - box[n].y1);
+		}
+
+		n = sna->render.copy_boxes(sna, GXcopy,
+					   &tmp, src_bo, -extents.x1, -extents.y1,
+					   &tmp, dst_bo, dst_dx, dst_dy,
+					   box, nbox);
+
+		kgem_bo_destroy(&sna->kgem, src_bo);
+
+		if (!n)
+			goto fallback;
+
+		return;
+	}
+
 	cmd = XY_SRC_COPY_BLT_CMD;
 	if (bpp == 32)
 		cmd |= BLT_WRITE_ALPHA | BLT_WRITE_RGB;
commit a42111ff94911d5fad34e6dbac8aa4a11b2d8032
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 5 12:12:22 2012 +0000

    sna/trapezoids: Try creating the trapezoids mask inplace if not using spans
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 2a48e3d..42cebb9 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -3213,7 +3213,8 @@ tor_blt_add_clipped_mono(struct sna *sna,
 static bool
 trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 		       PictFormatPtr maskFormat, INT16 src_x, INT16 src_y,
-		       int ntrap, xTrapezoid *traps)
+		       int ntrap, xTrapezoid *traps,
+		       bool fallback)
 {
 	struct tor tor;
 	struct inplace inplace;
@@ -3246,9 +3247,11 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 	}
 
 	switch (op) {
-	case PictOpSrc:
 	case PictOpIn:
 	case PictOpAdd:
+		if (!fallback && is_gpu(dst->pDrawable))
+			return false;
+	case PictOpSrc:
 		break;
 	default:
 		DBG(("%s: fallback -- can not perform op [%d] in place\n",
@@ -3265,7 +3268,8 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 		do {
 			/* XXX unwind errors? */
 			if (!trapezoid_span_inplace(op, src, dst, NULL,
-						    src_x, src_y, 1, traps++))
+						    src_x, src_y, 1, traps++,
+						    fallback))
 				return false;
 		} while (--ntrap);
 		return true;
@@ -3355,7 +3359,7 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 
 	region.data = NULL;
 	if (!sna_drawable_move_region_to_cpu(dst->pDrawable, &region,
-					     MOVE_WRITE))
+					     op == PictOpSrc ? MOVE_WRITE : MOVE_WRITE | MOVE_READ))
 		return true;
 
 	pixmap = get_drawable_pixmap(dst->pDrawable);
@@ -3622,13 +3626,19 @@ sna_composite_trapezoids(CARD8 op,
 				     xSrc, ySrc, ntrap, traps))
 		return;
 
+	if (trapezoid_span_inplace(op, src, dst, maskFormat,
+				   xSrc, ySrc, ntrap, traps,
+				   false))
+		return;
+
 	if (trapezoid_mask_converter(op, src, dst, maskFormat,
 				     xSrc, ySrc, ntrap, traps))
 		return;
 
 fallback:
 	if (trapezoid_span_inplace(op, src, dst, maskFormat,
-				   xSrc, ySrc, ntrap, traps))
+				   xSrc, ySrc, ntrap, traps,
+				   true))
 		return;
 
 	if (trapezoid_span_fallback(op, src, dst, maskFormat,
commit 4c6ccbd46a1681c73382b79a0246f7d8cb097606
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 5 11:09:01 2012 +0000

    sna: Throttle, throttle, throttle.
    
    This reverts 281425551bdab7eb38ae167a3205b14ae3599c49 as it was causing
    insufferable lag in firefox.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index e34ecf0..9ecf3c6 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -9499,7 +9499,7 @@ static bool sna_accel_flush(struct sna *sna)
 	sna->kgem.busy = !nothing_to_do;
 	kgem_bo_flush(&sna->kgem, priv->gpu_bo);
 	sna->kgem.flush_now = 0;
-	return need_throttle && !sna->kgem.busy;
+	return need_throttle;
 }
 
 static void sna_accel_expire(struct sna *sna)
commit dbb495c3aeef34eab2aabfdfaafb81facdea4100
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 5 10:34:08 2012 +0000

    sna: Attach to the pixmap for source counting when deferring uploads
    
    If we decide to defer the upload for this instance of the source pixmap,
    mark it so. Then if we do use it again we will upload it to a GPU bo and
    hopefully reuse those pixels.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index f633daa..8355d40 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -323,7 +323,7 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box)
 				       pixmap->drawable.width,
 				       pixmap->drawable.height,
 				       pixmap->drawable.bitsPerPixel) == I915_TILING_NONE) {
-			priv = sna_pixmap(pixmap);
+			priv = sna_pixmap_attach(pixmap);
 			upload = priv && priv->source_count++ > SOURCE_BIAS;
 		}
 
@@ -340,7 +340,7 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box)
 		return FALSE;
 
 	count = SOURCE_BIAS;
-	priv = sna_pixmap(pixmap);
+	priv = sna_pixmap_attach(pixmap);
 	if (priv)
 		count = priv->source_count++;
 
commit 292097590ba0f81b7505aedb2a4fbd18021641a5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 5 10:31:15 2012 +0000

    sna: Try doing a cheap is-contained check before reducing damage
    
    If the pixmap is entirely within the current CPU damage, we can forgo
    reducing either the GPU or CPU damage when checking whether we need to
    upload dirty pixels for a source texture.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index faa2730..f633daa 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -374,6 +374,12 @@ _texture_is_cpu(PixmapPtr pixmap, const BoxRec *box)
 	if (!priv->cpu_damage)
 		return FALSE;
 
+	if (priv->cpu_damage->mode == DAMAGE_ALL)
+		return TRUE;
+
+	if (sna_damage_contains_box__no_reduce(priv->cpu_damage, box))
+		return TRUE;
+
 	if (sna_damage_contains_box(priv->gpu_damage, box) != PIXMAN_REGION_OUT)
 		return FALSE;
 
commit 5ddae8bf21067e7d2a031a6b9020c02ab977cfed
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 5 10:27:37 2012 +0000

    sna: Check the composite-rectangles will be on the GPU before forcibly attaching
    
    If we think that the operation is better performed on the CPU, avoid the
    overhead of manipulating our privates.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 5732047..b1bee98 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -735,6 +735,11 @@ sna_composite_rectangles(CARD8		 op,
 
 	boxes = pixman_region_rectangles(&region, &num_boxes);
 
+	if (too_small(dst->pDrawable)) {
+		DBG(("%s: fallback, dst is too small\n", __FUNCTION__));
+		goto fallback;
+	}
+
 	/* If we going to be overwriting any CPU damage with a subsequent
 	 * operation, then we may as well delete it without moving it
 	 * first to the GPU.
@@ -745,11 +750,6 @@ sna_composite_rectangles(CARD8		 op,
 			sna_damage_subtract(&priv->cpu_damage, &region);
 	}
 
-	if (too_small(dst->pDrawable)) {
-		DBG(("%s: fallback, dst is too small\n", __FUNCTION__));
-		goto fallback;
-	}
-
 	priv = sna_pixmap_move_to_gpu(pixmap, MOVE_READ | MOVE_WRITE);
 	if (priv == NULL) {
 		DBG(("%s: fallback due to no GPU bo\n", __FUNCTION__));
commit 6ffd3605640bacaf0d744075622463aacf78d797
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 5 10:25:12 2012 +0000

    sna: Deferred attachment to a pixmap needs to mark the pixmap as entirely dirty
    
    As demonstrated with oversized glyphs and a chain of catastrophy, when
    attaching our private to a pixmap after creation we need to mark the
    entire CPU pixmap as dirty as we never tracked exactly which bits were
    dirtied.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index d08faa4..24a47aa 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2158,7 +2158,7 @@ static void _kgem_bo_delete_partial(struct kgem *kgem, struct kgem_bo *bo)
 	struct kgem_partial_bo *io = (struct kgem_partial_bo *)bo->proxy;
 
 	if (list_is_empty(&io->base.list))
-		    return;
+		return;
 
 	if (bo->size == io->used) {
 		assert(io->base.exec == NULL);
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 1e6a803..a1737c1 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -422,7 +422,17 @@ static inline Bool pixmap_is_scanout(PixmapPtr pixmap)
 	return pixmap == screen->GetScreenPixmap(screen);
 }
 
-struct sna_pixmap *sna_pixmap_attach(PixmapPtr pixmap);
+struct sna_pixmap *_sna_pixmap_attach(PixmapPtr pixmap);
+inline static struct sna_pixmap *sna_pixmap_attach(PixmapPtr pixmap)
+{
+	struct sna_pixmap *priv;
+
+	priv = sna_pixmap(pixmap);
+	if (priv)
+		return priv;
+
+	return _sna_pixmap_attach(pixmap);
+}
 
 PixmapPtr sna_pixmap_create_upload(ScreenPtr screen,
 				   int width, int height, int depth);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 8b450b6..e34ecf0 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -410,8 +410,8 @@ _sna_pixmap_reset(PixmapPtr pixmap)
 	return _sna_pixmap_init(priv, pixmap);
 }
 
-static struct sna_pixmap *_sna_pixmap_attach(struct sna *sna,
-					     PixmapPtr pixmap)
+static struct sna_pixmap *__sna_pixmap_attach(struct sna *sna,
+					      PixmapPtr pixmap)
 {
 	struct sna_pixmap *priv;
 
@@ -423,16 +423,18 @@ static struct sna_pixmap *_sna_pixmap_attach(struct sna *sna,
 	return _sna_pixmap_init(priv, pixmap);
 }
 
-struct sna_pixmap *sna_pixmap_attach(PixmapPtr pixmap)
+struct sna_pixmap *_sna_pixmap_attach(PixmapPtr pixmap)
 {
-	struct sna *sna;
+	struct sna *sna = to_sna_from_pixmap(pixmap);
 	struct sna_pixmap *priv;
 
-	priv = sna_pixmap(pixmap);
-	if (priv)
-		return priv;
+	DBG(("%s: serial=%ld, %dx%d, usage=%d\n",
+	     __FUNCTION__,
+	     pixmap->drawable.serialNumber,
+	     pixmap->drawable.width,
+	     pixmap->drawable.height,
+	     pixmap->usage_hint));
 
-	sna = to_sna_from_pixmap(pixmap);
 	switch (pixmap->usage_hint) {
 	case CREATE_PIXMAP_USAGE_GLYPH_PICTURE:
 #if FAKE_CREATE_PIXMAP_USAGE_SCRATCH_HEADER
@@ -454,7 +456,17 @@ struct sna_pixmap *sna_pixmap_attach(PixmapPtr pixmap)
 		break;
 	}
 
-	return _sna_pixmap_attach(sna, pixmap);
+	priv = __sna_pixmap_attach(sna, pixmap);
+	if (priv == NULL)
+		return NULL;
+
+	DBG(("%s: created priv and marking all cpu damaged\n", __FUNCTION__));
+
+	sna_damage_all(&priv->cpu_damage,
+		       pixmap->drawable.width,
+		       pixmap->drawable.height);
+
+	return priv;
 }
 
 static inline PixmapPtr
@@ -468,6 +480,13 @@ create_pixmap(struct sna *sna, ScreenPtr screen,
 	if (pixmap == NullPixmap)
 		return NullPixmap;
 
+	DBG(("%s: serial=%ld, usage=%d, %dx%d\n",
+	     __FUNCTION__,
+	     pixmap->drawable.serialNumber,
+	     pixmap->usage_hint,
+	     pixmap->drawable.width,
+	     pixmap->drawable.height));
+
 	assert(sna_private_index.offset == 0);
 	dixSetPrivate(&pixmap->devPrivates, &sna_private_index, sna);
 	return pixmap;
@@ -518,6 +537,13 @@ sna_pixmap_create_scratch(ScreenPtr screen,
 		pixmap->drawable.bitsPerPixel = bpp;
 		pixmap->drawable.serialNumber = NEXT_SERIAL_NUMBER;
 
+		DBG(("%s: serial=%ld, usage=%d, %dx%d\n",
+		     __FUNCTION__,
+		     pixmap->drawable.serialNumber,
+		     pixmap->usage_hint,
+		     pixmap->drawable.width,
+		     pixmap->drawable.height));
+
 		priv = _sna_pixmap_reset(pixmap);
 	} else {
 		pixmap = create_pixmap(sna, screen, 0, 0, depth,
@@ -530,7 +556,7 @@ sna_pixmap_create_scratch(ScreenPtr screen,
 		pixmap->drawable.depth = depth;
 		pixmap->drawable.bitsPerPixel = bpp;
 
-		priv = _sna_pixmap_attach(sna, pixmap);
+		priv = __sna_pixmap_attach(sna, pixmap);
 		if (!priv) {
 			fbDestroyPixmap(pixmap);
 			return NullPixmap;
@@ -610,7 +636,7 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 		if (pixmap == NullPixmap)
 			return NullPixmap;
 
-		sna_pixmap_attach(pixmap);
+		__sna_pixmap_attach(sna, pixmap);
 	} else {
 		struct sna_pixmap *priv;
 
@@ -623,7 +649,7 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 		pixmap->devKind = pad;
 		pixmap->devPrivate.ptr = NULL;
 
-		priv = _sna_pixmap_attach(sna, pixmap);
+		priv = __sna_pixmap_attach(sna, pixmap);
 		if (priv == NULL) {
 			free(pixmap);
 			return create_pixmap(sna, screen,
@@ -1363,6 +1389,11 @@ sna_pixmap_create_upload(ScreenPtr screen,
 		pixmap->usage_hint = CREATE_PIXMAP_USAGE_SCRATCH;
 		pixmap->drawable.serialNumber = NEXT_SERIAL_NUMBER;
 		pixmap->refcnt = 1;
+
+		DBG(("%s: serial=%ld, usage=%d\n",
+		     __FUNCTION__,
+		     pixmap->drawable.serialNumber,
+		     pixmap->usage_hint));
 	} else {
 		pixmap = create_pixmap(sna, screen, 0, 0, depth,
 				       CREATE_PIXMAP_USAGE_SCRATCH);
@@ -1410,19 +1441,11 @@ sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags)
 
 	DBG(("%s(pixmap=%p)\n", __FUNCTION__, pixmap));
 
-	priv = sna_pixmap(pixmap);
-	if (priv == NULL) {
-		priv = sna_pixmap_attach(pixmap);
-		if (priv == NULL)
-			return NULL;
-
-		DBG(("%s: created priv and marking all cpu damaged\n",
-		     __FUNCTION__));
-		sna_damage_all(&priv->cpu_damage,
-				 pixmap->drawable.width,
-				 pixmap->drawable.height);
-	}
+	priv = sna_pixmap_attach(pixmap);
+	if (priv == NULL)
+		return NULL;
 
+	/* Unlike move-to-gpu, we ignore wedged and always create the GPU bo */
 	if (priv->gpu_bo == NULL) {
 		struct sna *sna = to_sna_from_pixmap(pixmap);
 		unsigned flags;
@@ -1459,7 +1482,8 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 	BoxPtr box;
 	int n;
 
-	DBG(("%s()\n", __FUNCTION__));
+	DBG(("%s(pixmap=%ld, usage=%d)\n",
+	     __FUNCTION__, pixmap->drawable.serialNumber, pixmap->usage_hint));
 
 	priv = sna_pixmap(pixmap);
 	if (priv == NULL)
@@ -1516,9 +1540,8 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 						    box, n);
 		if (!ok) {
 			if (n == 1 && !priv->pinned &&
-			    box->x1 <= 0 && box->y1 <= 0 &&
-			    box->x2 >= pixmap->drawable.width &&
-			    box->y2 >= pixmap->drawable.height) {
+			    (box->x2 - box->x1) >= pixmap->drawable.width &&
+			    (box->y2 - box->y1) >= pixmap->drawable.height) {
 				priv->gpu_bo =
 					sna_replace(sna,
 						    pixmap,
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index aa9c160..faa2730 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -505,7 +505,6 @@ sna_render_pixmap_bo(struct sna *sna,
 		 */
 		if (pixmap->usage_hint != CREATE_PIXMAP_USAGE_SCRATCH_HEADER &&
 		    w * pixmap->drawable.bitsPerPixel * h > 8*4096) {
-			priv = sna_pixmap_attach(pixmap);
 			bo = pixmap_vmap(&sna->kgem, pixmap);
 			if (bo)
 				bo = kgem_bo_reference(bo);
commit a09ebe0b6cc66d08e52b57851d9b8a6a1f71df2b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 5 10:23:25 2012 +0000

    sna: Immediately upload oversized glyphs
    
    Glyphs, even large ones, we suspect will be reused and so the deferred
    upload is counterproductive. Upload them immediately and mark them as
    special creatures for later debugging.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 110eae9..1e6a803 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -705,6 +705,7 @@ memcpy_blt(const void *src, void *dst, int bpp,
 
 #define SNA_CREATE_FB 0x10
 #define SNA_CREATE_SCRATCH 0x11
+#define SNA_CREATE_GLYPH 0x12
 
 inline static bool is_power_of_two(unsigned x)
 {
diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 95836fe..209b199 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -307,7 +307,12 @@ glyph_cache(ScreenPtr screen,
 
 	if (glyph->info.width > GLYPH_MAX_SIZE ||
 	    glyph->info.height > GLYPH_MAX_SIZE) {
-		((PixmapPtr)glyph_picture->pDrawable)->usage_hint = 0;
+		PixmapPtr pixmap = (PixmapPtr)glyph_picture->pDrawable;
+		assert(glyph_picture->pDrawable->type == DRAWABLE_PIXMAP);
+		if (pixmap->drawable.depth >= 8) {
+			pixmap->usage_hint = SNA_CREATE_GLYPH;
+			sna_pixmap_force_to_gpu(pixmap, MOVE_READ);
+		}
 		return FALSE;
 	}
 
commit 797b27365dc09a3f84349a33cffbfdc67cba0baa
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 5 10:30:47 2012 +0000

    sna: Don't perform a deferred attachment for vmapping if not supported
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index b4d109c..110eae9 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -572,6 +572,9 @@ static inline struct kgem_bo *pixmap_vmap(struct kgem *kgem, PixmapPtr pixmap)
 {
 	struct sna_pixmap *priv;
 
+	if (!kgem->has_vmap)
+		return NULL;
+
 	if (unlikely(kgem->wedged))
 		return NULL;
 
commit 23e3959e1f603c3ba0bd4f01128290241cf93edf
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 5 04:00:32 2012 +0000

    sna: Use the created cpu-bo for upload and download
    
    As we explicitly create CPU bo when wanted, we no longer desire to
    spontaneously create vmaps for simply uploading to the GPU bo.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 68c10a5..8b450b6 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -760,7 +760,7 @@ skip_inplace_map:
 
 			dst_bo = NULL;
 			if (sna->kgem.gen >= 30)
-				dst_bo = pixmap_vmap(&sna->kgem, pixmap);
+				dst_bo = priv->cpu_bo;
 			if (dst_bo)
 				ok = sna->render.copy_boxes(sna, GXcopy,
 							    pixmap, priv->gpu_bo, 0, 0,
@@ -1056,7 +1056,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 
 				dst_bo = NULL;
 				if (sna->kgem.gen >= 30)
-					dst_bo = pixmap_vmap(&sna->kgem, pixmap);
+					dst_bo = priv->cpu_bo;
 				if (dst_bo)
 					ok = sna->render.copy_boxes(sna, GXcopy,
 								    pixmap, priv->gpu_bo, 0, 0,
@@ -1148,7 +1148,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box)
 		Bool ok = FALSE;
 
 		box = REGION_RECTS(&i);
-		src_bo = pixmap_vmap(&sna->kgem, pixmap);
+		src_bo = priv->cpu_bo;
 		if (src_bo)
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, src_bo, 0, 0,
@@ -1508,7 +1508,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 		struct kgem_bo *src_bo;
 		Bool ok = FALSE;
 
-		src_bo = pixmap_vmap(&sna->kgem, pixmap);
+		src_bo = priv->cpu_bo;
 		if (src_bo)
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, src_bo, 0, 0,
@@ -9511,7 +9511,7 @@ sna_pixmap_free_gpu(struct sna *sna, struct sna_pixmap *priv)
 
 			dst_bo = NULL;
 			if (sna->kgem.gen >= 30)
-				dst_bo = pixmap_vmap(&sna->kgem, pixmap);
+				dst_bo = priv->cpu_bo;
 			if (dst_bo)
 				ok = sna->render.copy_boxes(sna, GXcopy,
 							    pixmap, priv->gpu_bo, 0, 0,
commit 4530b87e445aea30ccbe751699963118ad5672d4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 5 00:01:27 2012 +0000

    sna: Fix assertion as we may want to create active bo with cpu mappings
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 51b56eb..d08faa4 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1552,7 +1552,7 @@ search_linear_cache(struct kgem *kgem, unsigned int size, unsigned flags)
 
 	if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
 		int for_cpu = !!(flags & CREATE_CPU_MAP);
-		assert(use_active == false);
+		assert(for_cpu || use_active == false);
 		list_for_each_entry(bo, &kgem->vma_inactive, vma) {
 			if (IS_CPU_MAP(bo->map) != for_cpu)
 				continue;


More information about the xorg-commit mailing list