xf86-video-intel: 5 commits - src/sna/sna_display.c src/sna/sna.h src/sna/sna_present.c src/sna/sna_render.c

Chris Wilson ickle at kemper.freedesktop.org
Thu Apr 5 17:19:11 UTC 2018


 src/sna/sna.h         |    1 
 src/sna/sna_display.c |   90 +++++++++++++++++---------------------------------
 src/sna/sna_present.c |    6 ---
 src/sna/sna_render.c  |    3 +
 4 files changed, 34 insertions(+), 66 deletions(-)

New commits:
commit d9bf46e45cff7122765d096c2b6dc1c3c88d1c69
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Apr 4 15:20:01 2018 +0100

    sna: Discard the per-crtc TearFree back buffer on resize
    
    When we mix TearFree and per-crtc pixmaps (e.g. for RandR transformations),
    we stash the old buffer on the CRTC for double buffering. However, this
    buffer needs to be reallocated when we change output resolutions, as the
    CRTC size may change.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index a611b46d..cf5a96f5 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -2586,6 +2586,11 @@ static struct kgem_bo *sna_crtc_attach(xf86CrtcPtr crtc)
 	}
 	sna_crtc->rotation = RR_Rotate_0;
 
+	if (sna_crtc->cache_bo) {
+		kgem_bo_destroy(&sna->kgem, sna_crtc->cache_bo);
+		sna_crtc->cache_bo = NULL;
+	}
+
 	if (use_shadow(sna, crtc)) {
 		PixmapPtr front;
 		unsigned long tiled_limit;
commit 4953aa13b93cb753f3ee8c6acf984e123cbf32ae
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Apr 4 13:17:34 2018 +0100

    sna: Always sync before using mmap pointers in memcpy_copy_boxes
    
    kgem_bo_map__(cpu|gtt) leaves the sync up to the caller, in particular
    so that the obtaining the pointer and controlling the cache domains are
    not conflated and can be separated. However, it does mean that the
    caller can not assume that obtaining the pointer updates the cache
    domains, as it does not. memcpy_copy_boxes fell into this trap.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index bba7c942..1787ffae 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -2370,6 +2370,9 @@ use_gtt:
 		if (dst == NULL || src == NULL)
 			return false;
 
+		kgem_bo_sync__gtt(&sna->kgem, dst_bo);
+		kgem_bo_sync__gtt(&sna->kgem, src_bo);
+
 		detile = NULL;
 	} else {
 		if (dst == dst_bo->map__wc)
commit 12db28ab3d509dfada4f0514858b311f48ed8cc4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Apr 3 19:18:55 2018 +0100

    sna: Reorder vblank/flip event handling to avoid TearFree recursion
    
    TearFree wants to grab the most recently used scanout for rendering the
    next frame into. If the flip event was still pending, we would then
    query the drm event buffer for any pending completions, but this would
    proceed to execute all the other events before the flip events as well.
    Since we they were out of sequence, we pushed them into a buffer to
    execute afterwards, however we forgot the side effects of the flip
    handlers, for example see commit af36a4ab78cc ("sna: Defer submission
    of the next shadow frame until halfway through") and that there may have
    been events read from drm into a local buffer inside sna_mode_wakeup()
    that haven't been processed yet.
    
    Eliminate the need for calling sna_mode_wakeup() by ensuring that all
    flip events have been completed first before handing the vblank
    callbacks and potential drawing, ensuring the correct ordering.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=105720
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 3ccc74c8..6fe1f0d2 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -310,7 +310,6 @@ struct sna {
 		unsigned flip_active;
 		unsigned hidden;
 		bool shadow_enabled;
-		bool shadow_wait;
 		bool dirty;
 
 		struct drm_event_vblank *shadow_events;
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 71500734..a611b46d 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -1703,8 +1703,8 @@ static bool wait_for_shadow(struct sna *sna,
 	int flip_active;
 	bool ret = true;
 
-	DBG(("%s: enabled? %d waiting? %d, flags=%x, flips=%d, pixmap=%ld [front?=%d], handle=%d, shadow=%d\n",
-	     __FUNCTION__, sna->mode.shadow_enabled, sna->mode.shadow_wait,
+	DBG(("%s: enabled? %d flags=%x, flips=%d, pixmap=%ld [front?=%d], handle=%d, shadow=%d\n",
+	     __FUNCTION__, sna->mode.shadow_enabled,
 	     flags, sna->mode.flip_active,
 	     pixmap->drawable.serialNumber, pixmap == sna->front,
 	     priv->gpu_bo->handle, sna->mode.shadow->handle));
@@ -1716,7 +1716,6 @@ static bool wait_for_shadow(struct sna *sna,
 		goto done;
 
 	assert(sna->mode.shadow_damage);
-	assert(!sna->mode.shadow_wait);
 
 	if ((flags & MOVE_WRITE) == 0) {
 		if ((flags & __MOVE_SCANOUT) == 0) {
@@ -1758,7 +1757,6 @@ static bool wait_for_shadow(struct sna *sna,
 	}
 
 	assert(sna->mode.shadow_active);
-	sna->mode.shadow_wait = true;
 
 	flip_active = sna->mode.flip_active;
 	if (flip_active) {
@@ -1768,21 +1766,6 @@ static bool wait_for_shadow(struct sna *sna,
 		DBG(("%s: %d flips still pending, shadow flip_active=%d\n",
 		     __FUNCTION__, sna->mode.flip_active, flip_active));
 	}
-	if (flip_active) {
-		/* raw cmd to avoid setting wedged in the middle of an op */
-		drmIoctl(sna->kgem.fd, DRM_IOCTL_I915_GEM_THROTTLE, 0);
-		sna->kgem.need_throttle = false;
-
-		while (flip_active && sna_mode_wakeup(sna)) {
-			struct sna_crtc *crtc;
-
-			flip_active = sna->mode.flip_active;
-			list_for_each_entry(crtc, &sna->mode.shadow_crtc, shadow_link)
-				flip_active -= crtc->flip_pending;
-		}
-		DBG(("%s: after waiting %d flips outstanding, flip_active=%d\n",
-		     __FUNCTION__, sna->mode.flip_active, flip_active));
-	}
 
 	bo = sna->mode.shadow;
 	if (flip_active) {
@@ -1805,8 +1788,6 @@ static bool wait_for_shadow(struct sna *sna,
 		sna->mode.shadow_region.extents.y2 = pixmap->drawable.height;
 		sna->mode.shadow_region.data = NULL;
 	}
-	assert(sna->mode.shadow_wait);
-	sna->mode.shadow_wait = false;
 
 	if (bo->refcnt > 1) {
 		bo = kgem_create_2d(&sna->kgem,
@@ -1914,9 +1895,6 @@ done:
 	priv->move_to_gpu_data = NULL;
 	priv->move_to_gpu = NULL;
 
-	assert(!sna->mode.shadow_wait);
-	flush_events(sna);
-
 	assert(sna->mode.shadow->active_scanout);
 	return ret;
 }
@@ -9447,6 +9425,7 @@ fixup_flip:
 
 int sna_mode_wakeup(struct sna *sna)
 {
+	bool defer_vblanks = sna->mode.flip_active && sna->mode.shadow_enabled;
 	char buffer[1024];
 	int len, i;
 	int ret = 0;
@@ -9457,14 +9436,14 @@ again:
 	 * event from drm.
 	 */
 	if (!event_pending(sna->kgem.fd))
-		return ret;
+		goto done;
 
 	/* The DRM read semantics guarantees that we always get only
 	 * complete events.
 	 */
 	len = read(sna->kgem.fd, buffer, sizeof (buffer));
 	if (len < (int)sizeof(struct drm_event))
-		return ret;
+		goto done;
 
 	/* Note that we cannot rely on the passed in struct sna matching
 	 * the struct sna used for the vblank event (in case it was submitted
@@ -9479,7 +9458,7 @@ again:
 		struct drm_event *e = (struct drm_event *)&buffer[i];
 		switch (e->type) {
 		case DRM_EVENT_VBLANK:
-			if (sna->mode.shadow_wait)
+			if (defer_vblanks)
 				defer_event(sna, e);
 			else if (((uintptr_t)((struct drm_event_vblank *)e)->user_data) & 2)
 				sna_present_vblank_handler((struct drm_event_vblank *)e);
@@ -9550,4 +9529,8 @@ again:
 	}
 
 	goto again;
+
+done:
+	flush_events(sna);
+	return ret;
 }
diff --git a/src/sna/sna_present.c b/src/sna/sna_present.c
index a2ce808f..523261cd 100644
--- a/src/sna/sna_present.c
+++ b/src/sna/sna_present.c
@@ -460,12 +460,6 @@ sna_present_vblank_handler(struct drm_event_vblank *event)
 
 	msc = sna_crtc_record_event(info->crtc, event);
 
-	if (info->sna->mode.shadow_wait) {
-		DBG(("%s: recursed from TearFree\n", __FUNCTION__));
-		if (TimerSet(NULL, 0, 1, sna_fake_vblank_handler, info))
-			return;
-	}
-
 	vblank_complete(info, ust64(event->tv_sec, event->tv_usec), msc);
 }
 
commit ca6a57d5ea0e89fa8a6a55869cd91af5144fc367
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Apr 3 19:05:45 2018 +0100

    sna: Skip shadow redisplay if flips still pending
    
    We shouldn't even be attempting to redisplay if there are flips pending,
    so exit early and expect to be called again after the pending flips
    complete. Exiting early avoids having to call sna_mode_wakeup() in what
    used to be a potentially recursive manner (see commit af36a4ab78cc
    "sna: Defer submission of the next shadow frame until halfway through").
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 07c7ad61..71500734 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -9045,6 +9045,12 @@ void sna_mode_redisplay(struct sna *sna)
 	if (sna->mode.dirty)
 		return;
 
+	if (sna->mode.flip_active) {
+		DBG(("%s: %d outstanding flips\n",
+		     __FUNCTION__, sna->mode.flip_active));
+		return;
+	}
+
 	region = DamageRegion(sna->mode.shadow_damage);
 	if (RegionNil(region))
 		return;
@@ -9054,23 +9060,6 @@ void sna_mode_redisplay(struct sna *sna)
 	     region->extents.x1, region->extents.y1,
 	     region->extents.x2, region->extents.y2));
 
-	if (sna->mode.flip_active) {
-		DBG(("%s: checking for %d outstanding flip completions\n",
-		     __FUNCTION__, sna->mode.flip_active));
-
-		sna->mode.dirty = true;
-		while (sna->mode.flip_active && sna_mode_wakeup(sna))
-			;
-		sna->mode.dirty = false;
-
-		DBG(("%s: now %d outstanding flip completions (enabled? %d)\n",
-		     __FUNCTION__,
-		     sna->mode.flip_active,
-		     sna->mode.shadow_enabled));
-		if (sna->mode.flip_active || !sna->mode.shadow_enabled)
-			return;
-	}
-
 	if (!move_crtc_to_gpu(sna)) {
 		DBG(("%s: forcing scanout update using the CPU\n", __FUNCTION__));
 		if (!sna_pixmap_move_to_cpu(sna->front, MOVE_READ))
commit 0a8a852940d69bfa306da6e5744f616b8f8d8ee5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Apr 3 19:03:01 2018 +0100

    sna: Report the move_to_gpu failed if the allocation failed
    
    Do not try and workaround the failure by forcing the wait-for-flip as we
    may be inside a vblank handler already. Just report the move failed and
    expect the caller to skip the draw, fairly standard practice for
    allocation failure handling (stale output rather than crash).
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index becc6061..07c7ad61 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -1792,23 +1792,18 @@ static bool wait_for_shadow(struct sna *sna,
 				    pixmap->drawable.bitsPerPixel,
 				    priv->gpu_bo->tiling,
 				    CREATE_EXACT | CREATE_SCANOUT);
-		if (bo != NULL) {
-			DBG(("%s: replacing still-attached GPU bo handle=%d, flips=%d\n",
-			     __FUNCTION__, priv->gpu_bo->tiling, sna->mode.flip_active));
+		if (bo == NULL)
+			return false;
 
-			RegionUninit(&sna->mode.shadow_region);
-			sna->mode.shadow_region.extents.x1 = 0;
-			sna->mode.shadow_region.extents.y1 = 0;
-			sna->mode.shadow_region.extents.x2 = pixmap->drawable.width;
-			sna->mode.shadow_region.extents.y2 = pixmap->drawable.height;
-			sna->mode.shadow_region.data = NULL;
-		} else {
-			while (sna->mode.flip_active &&
-			       sna_mode_wait_for_event(sna))
-				sna_mode_wakeup(sna);
+		DBG(("%s: replacing still-attached GPU bo handle=%d, flips=%d\n",
+		     __FUNCTION__, priv->gpu_bo->tiling, sna->mode.flip_active));
 
-			bo = sna->mode.shadow;
-		}
+		RegionUninit(&sna->mode.shadow_region);
+		sna->mode.shadow_region.extents.x1 = 0;
+		sna->mode.shadow_region.extents.y1 = 0;
+		sna->mode.shadow_region.extents.x2 = pixmap->drawable.width;
+		sna->mode.shadow_region.extents.y2 = pixmap->drawable.height;
+		sna->mode.shadow_region.data = NULL;
 	}
 	assert(sna->mode.shadow_wait);
 	sna->mode.shadow_wait = false;


More information about the xorg-commit mailing list