xf86-video-intel: 4 commits - configure.ac src/sna/kgem.c src/sna/sna_accel.c src/sna/sna_dri.c src/sna/sna.h src/sna/sna_render.c src/sna/sna_trapezoids.c

Chris Wilson ickle at kemper.freedesktop.org
Thu Jun 7 16:27:18 PDT 2012


 configure.ac             |   11 ++++-
 src/sna/kgem.c           |    5 ++
 src/sna/sna.h            |    2 
 src/sna/sna_accel.c      |   37 +++++++++++-------
 src/sna/sna_dri.c        |    6 +-
 src/sna/sna_render.c     |   56 +++++++++++----------------
 src/sna/sna_trapezoids.c |   95 ++++++++++++++---------------------------------
 7 files changed, 95 insertions(+), 117 deletions(-)

New commits:
commit a62ad4e80722db187766c83a16fa84ec236cc5eb
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 7 23:37:59 2012 +0100

    sna: Skip flushing the active queue if there is not a suitable bo pending
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 3078f49..d2ca995 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2136,6 +2136,11 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 			return NULL;
 		}
 
+		if (list_is_empty(active(kgem, num_pages, I915_TILING_NONE))) {
+			DBG(("%s: active cache bucket empty\n", __FUNCTION__));
+			return NULL;
+		}
+
 		if (!__kgem_throttle_retire(kgem, 0)) {
 			DBG(("%s: nothing retired\n", __FUNCTION__));
 			return NULL;
commit 318982566bbc7145847bd03601087150eef7a8d8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 7 21:56:04 2012 +0100

    sna/dri: Disable experimental code by default
    
    Since these require non-upstream patches to other components, we don't
    want it enabled by default and randomly breaking builds.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index a025521..cb740a7 100644
--- a/configure.ac
+++ b/configure.ac
@@ -197,7 +197,7 @@ AC_MSG_RESULT($accel)
 
 AC_ARG_ENABLE(vmap,
 	      AS_HELP_STRING([--enable-vmap],
-			     [Enable use of vmap [default=no]]),
+			     [Enable use of vmap (experimental) [default=no]]),
 	      [VMAP="$enableval"],
 	      [VMAP=no])
 AM_CONDITIONAL(USE_VMAP, test x$VMAP = xyes)
@@ -205,6 +205,15 @@ if test "x$VMAP" = xyes; then
 	AC_DEFINE(USE_VMAP,1,[Assume VMAP support])
 fi
 
+AC_ARG_ENABLE(async-swap,
+	      AS_HELP_STRING([--enable-async-swap],
+			     [Enable use of asynchronous swaps (experimental) [default=no]]),
+	      [ASYNC_SWAP="$enableval"],
+	      [ASYNC_SWAP=no])
+AM_CONDITIONAL(USE_ASYNC_SWAP, test x$ASYNC_SWAP = xyes)
+if test "x$ASYNC_SWAP" = xyes; then
+	AC_DEFINE(USE_ASYNC_SWAP,1,[Assume asynchronous swap support])
+fi
 
 AC_ARG_ENABLE(debug,
 	      AS_HELP_STRING([--enable-debug],
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index da49e12..b9f9b85 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -1232,7 +1232,7 @@ static void sna_dri_flip_event(struct sna *sna,
 			sna_dri_frame_event_info_free(flip);
 		break;
 
-#if DRI2INFOREC_VERSION >= 7
+#if USE_ASYNC_SWAP && DRI2INFOREC_VERSION >= 7
 	case DRI2_ASYNC_FLIP:
 		DBG(("%s: async swap flip completed on pipe %d, pending? %d, new? %d\n",
 		     __FUNCTION__, flip->pipe,
@@ -1700,7 +1700,7 @@ blit_fallback:
 	return TRUE;
 }
 
-#if DRI2INFOREC_VERSION >= 7
+#if USE_ASYNC_SWAP && DRI2INFOREC_VERSION >= 7
 static void
 sna_dri_exchange_attachment(DRI2BufferPtr front, DRI2BufferPtr back)
 {
@@ -2070,7 +2070,7 @@ Bool sna_dri_open(struct sna *sna, ScreenPtr screen)
 	info.ReuseBufferNotify = NULL;
 #endif
 
-#if DRI2INFOREC_VERSION >= 7
+#if USE_AYSYNC_SWAP && DRI2INFOREC_VERSION >= 7
 	info.version = 7;
 	info.AsyncSwap = sna_dri_async_swap;
 #endif
commit ebf84b8e572b5cb1a509000d412dfa5be3d1aca3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 7 20:15:53 2012 +0100

    sna/trapezoids: Micro-optimise cell allocation
    
    The pool is a fixed size so we can delete the generic code to handle
    variable sizes.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 780a0fa..a3bdb16 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -163,8 +163,6 @@ struct quorem {
 
 struct _pool_chunk {
 	size_t size;
-	size_t capacity;
-
 	struct _pool_chunk *prev_chunk;
 	/* Actual data starts here.	 Well aligned for pointers. */
 };
@@ -177,9 +175,6 @@ struct pool {
 	struct _pool_chunk *current;
 	struct _pool_chunk *first_free;
 
-	/* The default capacity of a chunk. */
-	size_t default_capacity;
-
 	/* Header for the sentinel chunk.  Directly following the pool
 	 * struct should be some space for embedded elements from which
 	 * the sentinel chunk allocates from. */
@@ -292,7 +287,7 @@ struct cell_list {
 	 * allocated from this pool.  */
 	struct {
 		struct pool base[1];
-		struct cell embedded[32];
+		struct cell embedded[256];
 	} cell_pool;
 };
 
@@ -351,42 +346,34 @@ floored_muldivrem(int x, int a, int b)
 	return qr;
 }
 
-static void
-_pool_chunk_init(
-    struct _pool_chunk *p,
-    struct _pool_chunk *prev_chunk,
-    size_t capacity)
+static inline void
+_pool_chunk_init(struct _pool_chunk *p,
+		 struct _pool_chunk *prev_chunk)
 {
 	p->prev_chunk = prev_chunk;
-	p->size = 0;
-	p->capacity = capacity;
+	p->size = sizeof(*p);
 }
 
 static struct _pool_chunk *
-_pool_chunk_create(struct _pool_chunk *prev_chunk, size_t size)
+_pool_chunk_create(struct _pool_chunk *prev_chunk)
 {
+	size_t size = 256*sizeof(struct cell);
 	struct _pool_chunk *p;
-	size_t size_with_head = size + sizeof(struct _pool_chunk);
-
-	if (size_with_head < size)
-		return NULL;
 
-	p = malloc(size_with_head);
-	if (p)
-		_pool_chunk_init(p, prev_chunk, size);
+	p = malloc(size + sizeof(struct _pool_chunk));
+	if (unlikely (p == NULL))
+		abort();
 
+	_pool_chunk_init(p, prev_chunk);
 	return p;
 }
 
 static void
-pool_init(struct pool *pool,
-	  size_t default_capacity,
-	  size_t embedded_capacity)
+pool_init(struct pool *pool)
 {
 	pool->current = pool->sentinel;
 	pool->first_free = NULL;
-	pool->default_capacity = default_capacity;
-	_pool_chunk_init(pool->sentinel, NULL, embedded_capacity);
+	_pool_chunk_init(pool->sentinel, NULL);
 }
 
 static void
@@ -403,57 +390,39 @@ pool_fini(struct pool *pool)
 		p = pool->first_free;
 		pool->first_free = NULL;
 	} while (NULL != p);
-	pool_init(pool, 0, 0);
 }
 
-/* Satisfy an allocation by first allocating a new large enough chunk
- * and adding it to the head of the pool's chunk list. This function
- * is called as a fallback if pool_alloc() couldn't do a quick
- * allocation from the current chunk in the pool. */
 static void *
-_pool_alloc_from_new_chunk(struct pool *pool, size_t size)
+_pool_alloc_from_new_chunk(struct pool *pool)
 {
 	struct _pool_chunk *chunk;
 	void *obj;
-	size_t capacity;
-
-	/* If the allocation is smaller than the default chunk size then
-	 * try getting a chunk off the free list.  Force alloc of a new
-	 * chunk for large requests. */
-	capacity = size;
-	chunk = NULL;
-	if (size < pool->default_capacity) {
-		capacity = pool->default_capacity;
-		chunk = pool->first_free;
-		if (chunk) {
-			pool->first_free = chunk->prev_chunk;
-			_pool_chunk_init(chunk, pool->current, chunk->capacity);
-		}
-	}
 
-	if (NULL == chunk) {
-		chunk = _pool_chunk_create (pool->current, capacity);
-		if (unlikely (NULL == chunk))
-			return NULL;
+	chunk = pool->first_free;
+	if (chunk) {
+		pool->first_free = chunk->prev_chunk;
+		_pool_chunk_init(chunk, pool->current);
+	} else {
+		chunk = _pool_chunk_create(pool->current);
 	}
 	pool->current = chunk;
 
-	obj = ((unsigned char*)chunk + sizeof(*chunk) + chunk->size);
-	chunk->size += size;
+	obj = (unsigned char*)chunk + chunk->size;
+	chunk->size += sizeof(struct cell);
 	return obj;
 }
 
 inline static void *
-pool_alloc(struct pool *pool, size_t size)
+pool_alloc(struct pool *pool)
 {
 	struct _pool_chunk *chunk = pool->current;
 
-	if (size <= chunk->capacity - chunk->size) {
-		void *obj = ((unsigned char*)chunk + sizeof(*chunk) + chunk->size);
-		chunk->size += size;
+	if (chunk->size < 256*sizeof(struct cell)+sizeof(*chunk)) {
+		void *obj = (unsigned char*)chunk + chunk->size;
+		chunk->size += sizeof(struct cell);
 		return obj;
 	} else
-		return _pool_alloc_from_new_chunk(pool, size);
+		return _pool_alloc_from_new_chunk(pool);
 }
 
 static void
@@ -471,7 +440,7 @@ pool_reset(struct pool *pool)
 
 	/* Reset the sentinel as the current chunk. */
 	pool->current = pool->sentinel;
-	pool->sentinel->size = 0;
+	pool->sentinel->size = sizeof(*chunk);
 }
 
 /* Rewinds the cell list's cursor to the beginning.  After rewinding
@@ -485,9 +454,7 @@ cell_list_rewind(struct cell_list *cells)
 static void
 cell_list_init(struct cell_list *cells)
 {
-	pool_init(cells->cell_pool.base,
-		  256*sizeof(struct cell),
-		  sizeof(cells->cell_pool.embedded));
+	pool_init(cells->cell_pool.base);
 	cells->tail.next = NULL;
 	cells->tail.x = INT_MAX;
 	cells->head.x = INT_MIN;
@@ -516,9 +483,7 @@ cell_list_alloc(struct cell_list *cells,
 {
 	struct cell *cell;
 
-	cell = pool_alloc(cells->cell_pool.base, sizeof (struct cell));
-	if (unlikely(NULL == cell))
-		abort();
+	cell = pool_alloc(cells->cell_pool.base);
 
 	cell->next = tail->next;
 	tail->next = cell;
commit 0a25fc68c5cd82cad4b99b0f2357f430c8783c3f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 7 19:13:09 2012 +0100

    sna: Actually create Y-tiled source pixmaps
    
    An inconsistency highlighted by 7c51cabaecac revealed that we had a
    mismatch between the check in move_to_gpu() and how we created the
    pixmap. This mismatch resulted in us creating and uploading tiled
    pixmaps for single shot textures, and the increase aperture pressure was
    causing a regression in firefox-fishbowl on pnv, for example.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index f8ec796..6aa54d1 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -270,7 +270,6 @@ struct sna {
 		struct gen7_render_state gen7;
 	} render_state;
 	uint32_t have_render;
-	uint32_t default_tiling;
 
 	Bool directRenderingOpen;
 	char *deviceName;
@@ -418,6 +417,7 @@ struct kgem_bo *sna_pixmap_change_tiling(PixmapPtr pixmap, uint32_t tiling);
 #define MOVE_READ 0x2
 #define MOVE_INPLACE_HINT 0x4
 #define MOVE_ASYNC_HINT 0x8
+#define MOVE_SOURCE_HINT 0x10
 bool must_check _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned flags);
 static inline bool must_check sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned flags)
 {
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 4a7d55e..0840056 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -59,6 +59,8 @@
 #define FORCE_FALLBACK 0
 #define FORCE_FLUSH 0
 
+#define DEFAULT_TILING I915_TILING_X
+
 #define USE_INPLACE 1
 #define USE_WIDE_SPANS 0 /* -1 force CPU, 1 force GPU */
 #define USE_ZERO_SPANS 1 /* -1 force CPU, 1 force GPU */
@@ -442,7 +444,8 @@ static Bool sna_destroy_private(PixmapPtr pixmap, struct sna_pixmap *priv)
 	return true;
 }
 
-static inline uint32_t default_tiling(PixmapPtr pixmap)
+static inline uint32_t default_tiling(PixmapPtr pixmap,
+				      uint32_t tiling)
 {
 	struct sna_pixmap *priv = sna_pixmap(pixmap);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
@@ -471,20 +474,21 @@ static inline uint32_t default_tiling(PixmapPtr pixmap)
 		return I915_TILING_Y;
 	}
 
-	return sna->default_tiling;
+	return tiling;
 }
 
-constant static uint32_t sna_pixmap_choose_tiling(PixmapPtr pixmap)
+constant static uint32_t sna_pixmap_choose_tiling(PixmapPtr pixmap,
+						  uint32_t tiling)
 {
 	struct sna *sna = to_sna_from_pixmap(pixmap);
-	uint32_t tiling, bit;
+	uint32_t bit;
 
 	/* Use tiling by default, but disable per user request */
 	if (pixmap->usage_hint == SNA_CREATE_FB) {
 		tiling = -I915_TILING_X;
 		bit = SNA_TILING_FB;
 	} else {
-		tiling = default_tiling(pixmap);
+		tiling = default_tiling(pixmap, tiling);
 		bit = SNA_TILING_2D;
 	}
 	if ((sna->tiling && (1 << bit)) == 0)
@@ -924,7 +928,7 @@ sna_pixmap_create_mappable_gpu(PixmapPtr pixmap)
 			       pixmap->drawable.width,
 			       pixmap->drawable.height,
 			       pixmap->drawable.bitsPerPixel,
-			       sna_pixmap_choose_tiling(pixmap),
+			       sna_pixmap_choose_tiling(pixmap, DEFAULT_TILING),
 			       CREATE_GTT_MAP | CREATE_INACTIVE);
 
 	return priv->gpu_bo && kgem_bo_is_mappable(&sna->kgem, priv->gpu_bo);
@@ -1396,7 +1400,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 		}
 
 		if (priv->gpu_bo == NULL && priv->stride &&
-		    sna_pixmap_choose_tiling(pixmap) != I915_TILING_NONE &&
+		    sna_pixmap_choose_tiling(pixmap, DEFAULT_TILING) != I915_TILING_NONE &&
 		    region_inplace(sna, pixmap, region, priv) &&
 		    sna_pixmap_create_mappable_gpu(pixmap)) {
 			pixmap->devPrivate.ptr =
@@ -1833,7 +1837,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 	}
 
 	if (priv->gpu_bo == NULL) {
-		unsigned create;
+		unsigned create, tiling;
 
 		create = 0;
 		if (priv->cpu_damage)
@@ -1841,11 +1845,14 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 		if (pixmap->usage_hint == SNA_CREATE_FB)
 			create |= CREATE_EXACT | CREATE_SCANOUT;
 
+		tiling = (flags & MOVE_SOURCE_HINT) ? I915_TILING_Y : DEFAULT_TILING;
+
 		priv->gpu_bo = kgem_create_2d(&sna->kgem,
 					      pixmap->drawable.width,
 					      pixmap->drawable.height,
 					      pixmap->drawable.bitsPerPixel,
-					      sna_pixmap_choose_tiling(pixmap),
+					      sna_pixmap_choose_tiling(pixmap,
+								       tiling),
 					      create);
 		if (priv->gpu_bo == NULL)
 			return false;
@@ -2307,7 +2314,8 @@ sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags)
 					      pixmap->drawable.width,
 					      pixmap->drawable.height,
 					      pixmap->drawable.bitsPerPixel,
-					      sna_pixmap_choose_tiling(pixmap),
+					      sna_pixmap_choose_tiling(pixmap,
+								       DEFAULT_TILING),
 					      mode);
 		if (priv->gpu_bo == NULL)
 			return NULL;
@@ -2401,7 +2409,8 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 					       pixmap->drawable.width,
 					       pixmap->drawable.height,
 					       pixmap->drawable.bitsPerPixel,
-					       sna_pixmap_choose_tiling(pixmap),
+					       sna_pixmap_choose_tiling(pixmap,
+									DEFAULT_TILING),
 					       (priv->cpu_damage && priv->cpu_bo == NULL) ? CREATE_GTT_MAP | CREATE_INACTIVE : 0);
 		}
 		if (priv->gpu_bo == NULL) {
@@ -3387,7 +3396,7 @@ move_to_gpu(PixmapPtr pixmap, struct sna_pixmap *priv,
 		return FALSE;
 
 	if (priv->cpu_bo) {
-		if (sna_pixmap_choose_tiling(pixmap) == I915_TILING_NONE)
+		if (sna_pixmap_choose_tiling(pixmap, DEFAULT_TILING) == I915_TILING_NONE)
 			return FALSE;
 
 		return (priv->source_count++-SOURCE_BIAS) * w*h >=
@@ -3636,7 +3645,8 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	if (dst_priv->gpu_bo == NULL &&
 	    ((dst_priv->cpu_damage == NULL && copy_use_gpu_bo(sna, dst_priv, &region)) ||
 	     (src_priv && (src_priv->gpu_bo != NULL || (src_priv->cpu_bo && kgem_bo_is_busy(src_priv->cpu_bo)))))) {
-		uint32_t tiling = sna_pixmap_choose_tiling(dst_pixmap);
+		uint32_t tiling = sna_pixmap_choose_tiling(dst_pixmap,
+							   DEFAULT_TILING);
 
 		DBG(("%s: create dst GPU bo for upload\n", __FUNCTION__));
 
@@ -12362,7 +12372,6 @@ Bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 
 	backend = "no";
 	sna->have_render = false;
-	sna->default_tiling = I915_TILING_X;
 	no_render_init(sna);
 
 #if !DEBUG_NO_RENDER
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index e28823f..8d61f40 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -388,49 +388,38 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box)
 	}
 
 	if (DBG_FORCE_UPLOAD < 0)
-		migrate = true;
+		return sna_pixmap_force_to_gpu(pixmap,
+					       MOVE_SOURCE_HINT | MOVE_READ);
 
 	w = box->x2 - box->x1;
 	h = box->y2 - box->y1;
 	if (w == pixmap->drawable.width && h == pixmap->drawable.height) {
-		migrate = true;
-		if ((priv->create & KGEM_CAN_CREATE_GPU) == 0 ||
-		    kgem_choose_tiling(&to_sna_from_pixmap(pixmap)->kgem,
-				       I915_TILING_Y,
-				       pixmap->drawable.width,
-				       pixmap->drawable.height,
-				       pixmap->drawable.bitsPerPixel) == I915_TILING_NONE)
-			migrate = priv->source_count++ > SOURCE_BIAS;
+		migrate = priv->source_count++ > SOURCE_BIAS;
 
 		DBG(("%s: migrating whole pixmap (%dx%d) for source (%d,%d),(%d,%d), count %d? %d\n",
 		     __FUNCTION__,
 		     pixmap->drawable.width, pixmap->drawable.height,
 		     box->x1, box->y1, box->x2, box->y2, priv->source_count,
 		     migrate));
-	} else {
-		/* ignore tiny fractions */
-		if (64*w*h > pixmap->drawable.width * pixmap->drawable.height) {
-			count = priv->source_count++;
-			if ((priv->create & KGEM_CAN_CREATE_GPU) == 0 ||
-			    kgem_choose_tiling(&to_sna_from_pixmap(pixmap)->kgem,
-					       I915_TILING_Y,
-					       pixmap->drawable.width,
-					       pixmap->drawable.height,
-					       pixmap->drawable.bitsPerPixel) == I915_TILING_NONE)
-				count -= SOURCE_BIAS;
-
-			DBG(("%s: migrate box (%d, %d), (%d, %d)? source count=%d, fraction=%d/%d [%d]\n",
-			     __FUNCTION__,
-			     box->x1, box->y1, box->x2, box->y2,
-			     count, w*h,
-			     pixmap->drawable.width * pixmap->drawable.height,
-			     pixmap->drawable.width * pixmap->drawable.height / (w*h)));
-
-			migrate =  count*w*h > pixmap->drawable.width * pixmap->drawable.height;
-		}
+	} else if (kgem_choose_tiling(&to_sna_from_pixmap(pixmap)->kgem,
+				      I915_TILING_Y, w, h,
+				      pixmap->drawable.bitsPerPixel) != I915_TILING_NONE) {
+		count = priv->source_count++;
+		if ((priv->create & KGEM_CAN_CREATE_GPU) == 0)
+			count -= SOURCE_BIAS;
+
+		DBG(("%s: migrate box (%d, %d), (%d, %d)? source count=%d, fraction=%d/%d [%d]\n",
+		     __FUNCTION__,
+		     box->x1, box->y1, box->x2, box->y2,
+		     count, w*h,
+		     pixmap->drawable.width * pixmap->drawable.height,
+		     pixmap->drawable.width * pixmap->drawable.height / (w*h)));
+
+		migrate = count*w*h > pixmap->drawable.width * pixmap->drawable.height;
 	}
 
-	if (migrate && !sna_pixmap_force_to_gpu(pixmap, MOVE_READ))
+	if (migrate && !sna_pixmap_force_to_gpu(pixmap,
+						MOVE_SOURCE_HINT | MOVE_READ))
 		return NULL;
 
 	return priv->gpu_bo;
@@ -680,7 +669,7 @@ static int sna_render_picture_downsample(struct sna *sna,
 	DBG(("%s: creating temporary GPU bo %dx%d\n",
 	     __FUNCTION__, width, height));
 
-	if (!sna_pixmap_force_to_gpu(pixmap, MOVE_READ))
+	if (!sna_pixmap_force_to_gpu(pixmap, MOVE_SOURCE_HINT | MOVE_READ))
 		return sna_render_picture_fixup(sna, picture, channel,
 						x, y, ow, oh,
 						dst_x, dst_y);
@@ -943,7 +932,8 @@ sna_render_picture_partial(struct sna *sna,
 
 		bo = sna_pixmap(pixmap)->cpu_bo;
 	} else {
-		if (!sna_pixmap_force_to_gpu(pixmap, MOVE_READ))
+		if (!sna_pixmap_force_to_gpu(pixmap,
+					     MOVE_SOURCE_HINT | MOVE_READ))
 			return 0;
 
 		bo = sna_pixmap(pixmap)->gpu_bo;


More information about the xorg-commit mailing list