xf86-video-intel: 3 commits - src/sna/blt.c src/sna/kgem.h src/sna/sna_display.c src/sna/sna_dri2.c src/sna/sna_render.c

Chris Wilson ickle at kemper.freedesktop.org
Sat May 7 14:58:57 UTC 2016


 src/sna/blt.c         |  157 ++++++++++++++++++++++++++++++++++++++++++++++++--
 src/sna/kgem.h        |   19 ++----
 src/sna/sna_display.c |  115 +++++-------------------------------
 src/sna/sna_dri2.c    |    1 
 src/sna/sna_render.c  |   73 ++++++++++++++++++-----
 5 files changed, 238 insertions(+), 127 deletions(-)

New commits:
commit 88733a7874f7c9b45da5d612802947a9de12893a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat May 7 15:36:53 2016 +0100

    sna/dri2: Force consideration of the DRI2CopyRegion source as unclean
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri2.c b/src/sna/sna_dri2.c
index bb7070e..d3fe18b 100644
--- a/src/sna/sna_dri2.c
+++ b/src/sna/sna_dri2.c
@@ -1246,6 +1246,7 @@ __sna_dri2_copy_region(struct sna *sna, DrawablePtr draw, RegionPtr region,
 
 	src_bo = src_priv->bo;
 	assert(src_bo->refcnt);
+	kgem_bo_unclean(&sna->kgem, src_bo);
 	if (is_front(src->attachment)) {
 		struct sna_pixmap *priv;
 
commit 08865b0af288e0460c38c2e3ca20a7f9d0311f27
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat May 7 15:24:28 2016 +0100

    sna: Add a special case for fast DRI2CopyRegion and NoAccel
    
    Enable copying onto a scanout buffer using a WC mmap - so long as it is
    X-tiled and no swizzling.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/blt.c b/src/sna/blt.c
index eced971..ab7bd22 100644
--- a/src/sna/blt.c
+++ b/src/sna/blt.c
@@ -349,6 +349,71 @@ memcpy_from_tiled_x__swizzle_0(const void *src, void *dst, int bpp,
 	}
 }
 
+static fast_memcpy void
+memcpy_between_tiled_x__swizzle_0(const void *src, void *dst, int bpp,
+				  int32_t src_stride, int32_t dst_stride,
+				  int16_t src_x, int16_t src_y,
+				  int16_t dst_x, int16_t dst_y,
+				  uint16_t width, uint16_t height)
+{
+	const unsigned tile_width = 512;
+	const unsigned tile_height = 8;
+	const unsigned tile_size = 4096;
+
+	const unsigned cpp = bpp / 8;
+	const unsigned tile_pixels = tile_width / cpp;
+	const unsigned tile_shift = ffs(tile_pixels) - 1;
+	const unsigned tile_mask = tile_pixels - 1;
+
+	DBG(("%s(bpp=%d): src=(%d, %d), dst=(%d, %d), size=%dx%d, pitch=%d/%d\n",
+	     __FUNCTION__, bpp, src_x, src_y, dst_x, dst_y, width, height, src_stride, dst_stride));
+	assert(src != dst);
+	assert((dst_x & tile_mask) == (src_x & tile_mask));
+
+	while (height--) {
+		unsigned w = width * cpp;
+		uint8_t *dst_row = dst;
+		const uint8_t *src_row = src;
+
+		dst_row += dst_y / tile_height * dst_stride * tile_height;
+		dst_row += (dst_y & (tile_height-1)) * tile_width;
+		if (dst_x)
+			dst_row += (dst_x >> tile_shift) * tile_size;
+		dst_y++;
+
+		src_row += src_y / tile_height * src_stride * tile_height;
+		src_row += (src_y & (tile_height-1)) * tile_width;
+		if (src_x)
+			src_row += (src_x >> tile_shift) * tile_size;
+		src_y++;
+
+		if (dst_x & tile_mask) {
+			const unsigned x = (dst_x & tile_mask) * cpp;
+			const unsigned len = min(tile_width - x, w);
+
+			memcpy(assume_misaligned(dst_row + x, tile_width, x),
+			       assume_misaligned(src_row + x, tile_width, x),
+			       len);
+
+			dst_row += tile_size;
+			src_row += tile_size;
+			w -= len;
+		}
+
+		while (w >= tile_width) {
+			memcpy(assume_aligned(dst_row, tile_width),
+			       assume_aligned(src_row, tile_width),
+			       tile_width);
+			dst_row += tile_size;
+			src_row += tile_size;
+			w -= tile_width;
+		}
+		memcpy(assume_aligned(dst_row, tile_width),
+		       assume_aligned(src_row, tile_width),
+		       w);
+	}
+}
+
 #if defined(sse2) && defined(__x86_64__)
 
 sse2 static force_inline void
@@ -461,7 +526,7 @@ sse2 static void to_memcpy(uint8_t *dst, const uint8_t *src, unsigned len)
 	while (len >= 64) {
 		to_sse64(dst, src);
 		dst += 64;
-		src = (const uint8_t *)src + 64;
+		src += 64;
 		len -= 64;
 	}
 	if (len == 0)
@@ -470,22 +535,22 @@ sse2 static void to_memcpy(uint8_t *dst, const uint8_t *src, unsigned len)
 	if (len & 32) {
 		to_sse32(dst, src);
 		dst += 32;
-		src = (const uint8_t *)src + 32;
+		src += 32;
 	}
 	if (len & 16) {
 		to_sse16(dst, src);
 		dst += 16;
-		src = (const uint8_t *)src + 16;
+		src += 16;
 	}
 	if (len & 8) {
 		*(uint64_t *)dst = *(uint64_t *)src;
 		dst += 8;
-		src = (const uint8_t *)src + 8;
+		src += 8;
 	}
 	if (len & 4) {
 		*(uint32_t *)dst = *(uint32_t *)src;
 		dst += 4;
-		src = (const uint8_t *)src + 4;
+		src += 4;
 	}
 	memcpy(dst, src, len & 3);
 }
@@ -820,6 +885,86 @@ memcpy_from_tiled_x__swizzle_0__sse2(const void *src, void *dst, int bpp,
 	}
 }
 
+sse2 static fast_memcpy void
+memcpy_between_tiled_x__swizzle_0__sse2(const void *src, void *dst, int bpp,
+					int32_t src_stride, int32_t dst_stride,
+					int16_t src_x, int16_t src_y,
+					int16_t dst_x, int16_t dst_y,
+					uint16_t width, uint16_t height)
+{
+	const unsigned tile_width = 512;
+	const unsigned tile_height = 8;
+	const unsigned tile_size = 4096;
+
+	const unsigned cpp = bpp / 8;
+	const unsigned tile_pixels = tile_width / cpp;
+	const unsigned tile_shift = ffs(tile_pixels) - 1;
+	const unsigned tile_mask = tile_pixels - 1;
+
+	unsigned ox, lx;
+
+	DBG(("%s(bpp=%d): src=(%d, %d), dst=(%d, %d), size=%dx%d, pitch=%d/%d\n",
+	     __FUNCTION__, bpp, src_x, src_y, dst_x, dst_y, width, height, src_stride, dst_stride));
+	assert(src != dst);
+
+	width *= cpp;
+	dst_stride *= tile_height;
+	src_stride *= tile_height;
+
+	assert((dst_x & tile_mask) == (src_x & tile_mask));
+	if (dst_x & tile_mask) {
+		ox = (dst_x & tile_mask) * cpp;
+		lx = min(tile_width - ox, width);
+		assert(lx != 0);
+	} else
+		lx = 0;
+
+	if (dst_x)
+		dst = (uint8_t *)dst + (dst_x >> tile_shift) * tile_size;
+	if (src_x)
+		src = (const uint8_t *)src + (src_x >> tile_shift) * tile_size;
+
+	while (height--) {
+		const uint8_t *src_row;
+		uint8_t *dst_row;
+		unsigned w = width;
+
+		dst_row = dst;
+		dst_row += dst_y / tile_height * dst_stride;
+		dst_row += (dst_y & (tile_height-1)) * tile_width;
+		dst_y++;
+
+		src_row = src;
+		src_row += src_y / tile_height * src_stride;
+		src_row += (src_y & (tile_height-1)) * tile_width;
+		src_y++;
+
+		if (lx) {
+			to_memcpy(dst_row + ox, src_row + ox, lx);
+			dst_row += tile_size;
+			src_row += tile_size;
+			w -= lx;
+		}
+		while (w >= tile_width) {
+			assert(((uintptr_t)dst_row & (tile_width - 1)) == 0);
+			assert(((uintptr_t)src_row & (tile_width - 1)) == 0);
+			to_sse128xN(assume_aligned(dst_row, tile_width),
+				    assume_aligned(src_row, tile_width),
+				    tile_width);
+			dst_row += tile_size;
+			src_row += tile_size;
+			w -= tile_width;
+		}
+		if (w) {
+			assert(((uintptr_t)dst_row & (tile_width - 1)) == 0);
+			assert(((uintptr_t)src_row & (tile_width - 1)) == 0);
+			to_memcpy(assume_aligned(dst_row, tile_width),
+				  assume_aligned(src_row, tile_width),
+				  w);
+		}
+	}
+}
+
 #endif
 
 #define memcpy_to_tiled_x(swizzle) \
@@ -1100,11 +1245,13 @@ void choose_memcpy_tiled_x(struct kgem *kgem, int swizzling, unsigned cpu)
 		if (cpu & SSE2) {
 			kgem->memcpy_to_tiled_x = memcpy_to_tiled_x__swizzle_0__sse2;
 			kgem->memcpy_from_tiled_x = memcpy_from_tiled_x__swizzle_0__sse2;
+			kgem->memcpy_between_tiled_x = memcpy_between_tiled_x__swizzle_0__sse2;
 		} else
 #endif
 	       	{
 			kgem->memcpy_to_tiled_x = memcpy_to_tiled_x__swizzle_0;
 			kgem->memcpy_from_tiled_x = memcpy_from_tiled_x__swizzle_0;
+			kgem->memcpy_between_tiled_x = memcpy_between_tiled_x__swizzle_0;
 		}
 		break;
 	case I915_BIT_6_SWIZZLE_9:
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index cd07756..ded8f78 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -113,6 +113,12 @@ enum {
 	NUM_MAP_TYPES,
 };
 
+typedef void (*memcpy_box_func)(const void *src, void *dst, int bpp,
+				int32_t src_stride, int32_t dst_stride,
+				int16_t src_x, int16_t src_y,
+				int16_t dst_x, int16_t dst_y,
+				uint16_t width, uint16_t height);
+
 struct kgem {
 	unsigned wedged;
 	int fd;
@@ -212,16 +218,9 @@ struct kgem {
 	void (*retire)(struct kgem *kgem);
 	void (*expire)(struct kgem *kgem);
 
-	void (*memcpy_to_tiled_x)(const void *src, void *dst, int bpp,
-				  int32_t src_stride, int32_t dst_stride,
-				  int16_t src_x, int16_t src_y,
-				  int16_t dst_x, int16_t dst_y,
-				  uint16_t width, uint16_t height);
-	void (*memcpy_from_tiled_x)(const void *src, void *dst, int bpp,
-				    int32_t src_stride, int32_t dst_stride,
-				    int16_t src_x, int16_t src_y,
-				    int16_t dst_x, int16_t dst_y,
-				    uint16_t width, uint16_t height);
+	memcpy_box_func memcpy_to_tiled_x;
+	memcpy_box_func memcpy_from_tiled_x;
+	memcpy_box_func memcpy_between_tiled_x;
 
 	struct kgem_bo *batch_bo;
 
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 5a8df06..f8281e9 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -2298,16 +2298,22 @@ static bool can_copy_cpu(struct sna *sna,
 			 struct kgem_bo *src,
 			 struct kgem_bo *dst)
 {
-	if (src->tiling != dst->tiling)
-		return false;
+	DBG(("%s: tiling=%d:%d, pitch=%d:%d, can_map=%d:%d[%d]\n",
+	     __FUNCTION__,
+	     src->tiling, dst->tiling,
+	     src->pitch, dst->pitch,
+	     kgem_bo_can_map__cpu(&sna->kgem, src, false),
+	     kgem_bo_can_map__cpu(&sna->kgem, dst, true),
+	     sna->kgem.has_wc_mmap));
 
-	if (src->pitch != dst->pitch)
+	if (src->tiling != dst->tiling)
 		return false;
 
 	if (!kgem_bo_can_map__cpu(&sna->kgem, src, false))
 		return false;
 
-	if (!kgem_bo_can_map__cpu(&sna->kgem, dst, true))
+	if (!kgem_bo_can_map__cpu(&sna->kgem, dst, true) &&
+	    !sna->kgem.has_wc_mmap)
 		return false;
 
 	DBG(("%s -- yes, src handle=%d, dst handle=%d\n", __FUNCTION__, src->handle, dst->handle));
@@ -2320,8 +2326,8 @@ memcpy_copy_boxes(struct sna *sna, uint8_t op,
 		  const DrawableRec *dst_draw, struct kgem_bo *dst_bo, int16_t dx, int16_t dy,
 		  const BoxRec *box, int n, unsigned flags)
 {
+	memcpy_box_func detile = NULL;
 	void *dst, *src;
-	bool clipped;
 
 	if (op != GXcopy)
 		return false;
@@ -2329,25 +2335,53 @@ memcpy_copy_boxes(struct sna *sna, uint8_t op,
 	if (src_draw->depth != dst_draw->depth)
 		return false;
 
-	clipped = (n > 1 ||
-		   box->x1 + dx > 0 ||
-		   box->y1 + dy > 0 ||
-		   box->x2 + dx < dst_draw->width ||
-		   box->y2 + dy < dst_draw->height);
-
 	dst = src = NULL;
-	if (!clipped && can_copy_cpu(sna, src_bo, dst_bo)) {
-		dst = kgem_bo_map__cpu(&sna->kgem, dst_bo);
+	if (can_copy_cpu(sna, src_bo, dst_bo)) {
+		if (src_bo->pitch != dst_bo->pitch ||
+		    dx != sx || dy != sy || n > 1 ||
+		    box->x1 + dx > 0 ||
+		    box->y1 + dy > 0 ||
+		    box->x2 + dx < dst_draw->width ||
+		    box->y2 + dy < dst_draw->height) {
+			if (dx != sx) /* not implemented in memcpy yet */
+				goto use_gtt;
+
+			switch (dst_bo->tiling) {
+			default:
+			case I915_TILING_Y:
+				goto use_gtt;
+
+			case I915_TILING_X:
+				detile = sna->kgem.memcpy_between_tiled_x;
+				if (detile == NULL)
+					goto use_gtt;
+				break;
+
+			case I915_TILING_NONE:
+				break;
+			}
+		}
+
+		if (kgem_bo_can_map__cpu(&sna->kgem, dst_bo, true))
+			dst = kgem_bo_map__cpu(&sna->kgem, dst_bo);
+		else
+			dst = kgem_bo_map__wc(&sna->kgem, dst_bo);
 		src = kgem_bo_map__cpu(&sna->kgem, src_bo);
 	}
 
 	if (dst == NULL || src == NULL) {
+use_gtt:
 		dst = kgem_bo_map__gtt(&sna->kgem, dst_bo);
 		src = kgem_bo_map__gtt(&sna->kgem, src_bo);
 		if (dst == NULL || src == NULL)
 			return false;
+
+		detile = NULL;
 	} else {
-		kgem_bo_sync__cpu_full(&sna->kgem, dst_bo, true);
+		if (dst == dst_bo->map__wc)
+			kgem_bo_sync__gtt(&sna->kgem, dst_bo);
+		else
+			kgem_bo_sync__cpu_full(&sna->kgem, dst_bo, true);
 		kgem_bo_sync__cpu_full(&sna->kgem, src_bo, false);
 	}
 
@@ -2355,7 +2389,16 @@ memcpy_copy_boxes(struct sna *sna, uint8_t op,
 	     __FUNCTION__, sx, sy, dx, dy, n));
 
 	if (sigtrap_get() == 0) {
-		do {
+		if (detile) {
+			do {
+				detile(src, dst, dst_draw->bitsPerPixel,
+				       src_bo->pitch, dst_bo->pitch,
+				       box->x1 + sx, box->y1 + sy,
+				       box->x1 + dx, box->y1 + dy,
+				       box->x2 - box->x1, box->y2 - box->y1);
+				box++;
+			} while (--n);
+		} else do {
 			memcpy_blt(src, dst, dst_draw->bitsPerPixel,
 				   src_bo->pitch, dst_bo->pitch,
 				   box->x1 + sx, box->y1 + sy,
commit b89f203b0d65b607bc906b9a1ac184ebef7b41df
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat May 7 15:20:32 2016 +0100

    sna: Do not force ping-pong migration for TearFree + NoAccel
    
    If acceleration is disabled, but we are using TearFree, then ideally we
    want to flip the shadow buffer onto the scanout. If the shadow buffer is
    already on the GPU, e.g. having been swapped in by a compositor, then we
    do not want to move it to the CPU domain only to copy it back to a new
    buffer and then flipped for a TearFree update.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 759659d..2c6059d 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -210,8 +210,6 @@ struct sna_crtc {
 
 	struct pict_f_transform cursor_to_fb, fb_to_cursor;
 
-	RegionRec client_damage; /* XXX overlap with shadow damage? */
-
 	uint16_t shadow_bo_width, shadow_bo_height;
 
 	uint32_t rotation;
@@ -1665,12 +1663,13 @@ static bool wait_for_shadow(struct sna *sna,
 		     sna->mode.shadow_region.extents.y1,
 		     sna->mode.shadow_region.extents.x2,
 		     sna->mode.shadow_region.extents.y2));
-		ret = sna->render.copy_boxes(sna, GXcopy,
-					     &pixmap->drawable, priv->gpu_bo, 0, 0,
-					     &pixmap->drawable, bo, 0, 0,
-					     region_rects(&sna->mode.shadow_region),
-					     region_num_rects(&sna->mode.shadow_region),
-					     0);
+		if (!sna->render.copy_boxes(sna, GXcopy,
+					    &pixmap->drawable, priv->gpu_bo, 0, 0,
+					    &pixmap->drawable, bo, 0, 0,
+					    region_rects(&sna->mode.shadow_region),
+					    region_num_rects(&sna->mode.shadow_region),
+					    0))
+			ERR(("%s: copy failed\n", __FUNCTION__));
 	}
 
 	if (priv->cow)
@@ -2552,7 +2551,7 @@ out_shadow:
 				return NULL;
 			}
 
-			if (sna->mode.shadow == NULL && !wedged(sna)) {
+			if (sna->mode.shadow == NULL) {
 				struct kgem_bo *shadow;
 
 				DBG(("%s: creating TearFree shadow bo\n", __FUNCTION__));
@@ -2743,7 +2742,6 @@ sna_crtc_damage(xf86CrtcPtr crtc)
 	     __FUNCTION__, sna_crtc_id(crtc),
 	     region.extents.x1, region.extents.y1,
 	     region.extents.x2, region.extents.y2));
-	to_sna_crtc(crtc)->client_damage = region;
 
 	assert(sna->mode.shadow_damage && sna->mode.shadow_active);
 	damage = DamageRegion(sna->mode.shadow_damage);
@@ -8516,6 +8514,9 @@ static bool move_crtc_to_gpu(struct sna *sna)
 	xf86CrtcConfigPtr config = XF86_CRTC_CONFIG_PTR(sna->scrn);
 	int i;
 
+	if (sna->flags & SNA_TEAR_FREE)
+		return true;
+
 	for (i = 0; i < sna->mode.num_real_crtc; i++) {
 		struct sna_crtc *crtc = to_sna_crtc(config->crtc[i]);
 
@@ -8530,6 +8531,9 @@ static bool move_crtc_to_gpu(struct sna *sna)
 		if (crtc->client_bo)
 			continue;
 
+		if (crtc->shadow_bo)
+			continue;
+
 		DBG(("%s: CRTC %d [pipe=%d] requires frontbuffer\n",
 		     __FUNCTION__, __sna_crtc_id(crtc), __sna_crtc_pipe(crtc)));
 		return sna_pixmap_move_to_gpu(sna->front,
@@ -8592,7 +8596,7 @@ void sna_mode_redisplay(struct sna *sna)
 			return;
 	}
 
-	if (wedged(sna) || !move_crtc_to_gpu(sna)) {
+	if (!move_crtc_to_gpu(sna)) {
 		DBG(("%s: forcing scanout update using the CPU\n", __FUNCTION__));
 		if (!sna_pixmap_move_to_cpu(sna->front, MOVE_READ))
 			return;
@@ -8613,97 +8617,14 @@ void sna_mode_redisplay(struct sna *sna)
 			damage.data = NULL;
 			RegionIntersect(&damage, &damage, region);
 			if (!box_empty(&damage.extents)) {
-				struct kgem_bo *bo = NULL;
-
 				DBG(("%s: fallback intersects pipe=%d [(%d, %d), (%d, %d)]\n",
 				     __FUNCTION__, __sna_crtc_pipe(sna_crtc),
 				     damage.extents.x1, damage.extents.y1,
 				     damage.extents.x2, damage.extents.y2));
 
-				if (sna->flags & SNA_TEAR_FREE) {
-					RegionRec new_damage;
-
-					RegionNull(&new_damage);
-					RegionCopy(&new_damage, &damage);
-
-					bo = sna_crtc->cache_bo;
-					if (bo == NULL) {
-						damage.extents = crtc->bounds;
-						damage.data = NULL;
-						bo = kgem_create_2d(&sna->kgem,
-								crtc->mode.HDisplay,
-								crtc->mode.VDisplay,
-								crtc->scrn->bitsPerPixel,
-								sna_crtc->bo->tiling,
-								CREATE_SCANOUT);
-					} else
-						RegionUnion(&damage, &damage, &sna_crtc->client_damage);
-
-					DBG(("%s: TearFree fallback, shadow handle=%d, crtc handle=%d\n", __FUNCTION__, bo->handle, sna_crtc->bo->handle));
-
-					sna_crtc->client_damage = new_damage;
-				}
-
-				if (bo == NULL)
-					bo = sna_crtc->bo;
-				sna_crtc_redisplay__fallback(crtc, &damage, bo);
-
-				if (bo != sna_crtc->bo) {
-					struct drm_mode_crtc_page_flip arg;
-
-					arg.crtc_id = __sna_crtc_id(sna_crtc);
-					arg.fb_id = get_fb(sna, bo,
-							   crtc->mode.HDisplay,
-							   crtc->mode.VDisplay);
-
-					arg.user_data = (uintptr_t)sna_crtc;
-					arg.flags = DRM_MODE_PAGE_FLIP_EVENT;
-					arg.reserved = 0;
-
-					if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_PAGE_FLIP, &arg)) {
-						if (sna_crtc_flip(sna, sna_crtc, bo, 0, 0)) {
-							DBG(("%s: removing handle=%d [active_scanout=%d] from scanout, installing handle=%d [active_scanout=%d]\n",
-							     __FUNCTION__, sna_crtc->bo->handle, sna_crtc->bo->active_scanout,
-							     bo->handle, bo->active_scanout));
-							assert(sna_crtc->bo->active_scanout);
-							assert(sna_crtc->bo->refcnt >= sna_crtc->bo->active_scanout);
-							sna_crtc->bo->active_scanout--;
-							kgem_bo_destroy(&sna->kgem, sna_crtc->bo);
-
-							sna_crtc->bo = bo;
-							sna_crtc->bo->active_scanout++;
-							sna_crtc->cache_bo = NULL;
-						} else {
-							DBG(("%s: flip [fb=%d] on crtc %d [%d, pipe=%d] failed - %d\n",
-							     __FUNCTION__, arg.fb_id, i, __sna_crtc_id(sna_crtc), __sna_crtc_pipe(sna_crtc), errno));
-							xf86DrvMsg(sna->scrn->scrnIndex, X_ERROR,
-								   "Page flipping failed, disabling TearFree\n");
-							sna->flags &= ~SNA_TEAR_FREE;
-
-							damage.extents = crtc->bounds;
-							damage.data = NULL;
-							sna_crtc_redisplay__fallback(crtc, &damage, sna_crtc->bo);
-
-							kgem_bo_destroy(&sna->kgem, bo);
-							sna_crtc->cache_bo = NULL;
-						}
-					} else {
-						sna->mode.flip_active++;
-
-						assert(sna_crtc->flip_bo == NULL);
-						sna_crtc->flip_handler = shadow_flip_handler;
-						sna_crtc->flip_data = sna;
-						sna_crtc->flip_bo = bo;
-						sna_crtc->flip_bo->active_scanout++;
-						sna_crtc->flip_serial = sna_crtc->mode_serial;
-						sna_crtc->flip_pending = true;
-
-						sna_crtc->cache_bo = kgem_bo_reference(sna_crtc->bo);
-
-						DBG(("%s: recording flip on CRTC:%d handle=%d, active_scanout=%d, serial=%d\n",
-						     __FUNCTION__, __sna_crtc_id(sna_crtc), sna_crtc->flip_bo->handle, sna_crtc->flip_bo->active_scanout, sna_crtc->flip_serial));
-					}
-				}
+				sna_crtc_redisplay__fallback(crtc,
+							     &damage,
+							     sna_crtc->bo);
 			}
 			RegionUninit(&damage);
 


More information about the xorg-commit mailing list