xf86-video-intel: 5 commits - src/sna/kgem.c src/sna/kgem.h src/sna/sna_accel.c src/sna/sna_gradient.c src/sna/sna_render.c src/sna/sna_trapezoids.c

Chris Wilson ickle at kemper.freedesktop.org
Fri May 18 13:09:58 PDT 2012


 src/sna/kgem.c           |   65 ++++++++++++++++++++++++++---------------------
 src/sna/kgem.h           |    3 +-
 src/sna/sna_accel.c      |    9 ++----
 src/sna/sna_gradient.c   |   11 +++++--
 src/sna/sna_render.c     |    6 ++--
 src/sna/sna_trapezoids.c |   13 +++++----
 6 files changed, 58 insertions(+), 49 deletions(-)

New commits:
commit f91dcc44dcc15850f82666b1bcdd27182400e7dc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri May 18 20:09:41 2012 +0100

    sna: Give the proxy a unique name
    
    So that if we cache the current destination bo (for example, gen3) then
    a new proxy (or even just a new batchbuffer) will indeed cause the
    destination buffer to be updated.
    
    Reported-and-tested-by: Clemens Eisserer <linuxhippy at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=48636
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index df69b90..470dd24 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3499,7 +3499,8 @@ void kgem_clear_dirty(struct kgem *kgem)
 	}
 }
 
-struct kgem_bo *kgem_create_proxy(struct kgem_bo *target,
+struct kgem_bo *kgem_create_proxy(struct kgem *kgem,
+				  struct kgem_bo *target,
 				  int offset, int length)
 {
 	struct kgem_bo *bo;
@@ -3512,6 +3513,7 @@ struct kgem_bo *kgem_create_proxy(struct kgem_bo *target,
 	if (bo == NULL)
 		return NULL;
 
+	bo->unique_id = kgem_get_unique_id(kgem);
 	bo->reusable = false;
 	bo->size.bytes = length;
 
@@ -3903,7 +3905,7 @@ done:
 	bo->used = ALIGN(bo->used, 64);
 	assert(bo->mem);
 	*ret = (char *)bo->mem + offset;
-	return kgem_create_proxy(&bo->base, offset, size);
+	return kgem_create_proxy(kgem, &bo->base, offset, size);
 }
 
 bool kgem_buffer_is_inplace(struct kgem_bo *_bo)
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 0c26630..0a95da7 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -195,7 +195,8 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name);
 
 struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags);
-struct kgem_bo *kgem_create_proxy(struct kgem_bo *target,
+struct kgem_bo *kgem_create_proxy(struct kgem *kgem,
+				  struct kgem_bo *target,
 				  int offset, int length);
 
 struct kgem_bo *kgem_upload_source_image(struct kgem *kgem,
diff --git a/src/sna/sna_gradient.c b/src/sna/sna_gradient.c
index 32d26c8..a52cfb5 100644
--- a/src/sna/sna_gradient.c
+++ b/src/sna/sna_gradient.c
@@ -258,7 +258,8 @@ sna_render_finish_solid(struct sna *sna, bool force)
 	DBG(("sna_render_finish_solid reset\n"));
 
 	cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color), 0);
-	cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
+	cache->bo[0] = kgem_create_proxy(&sna->kgem, cache->cache_bo,
+					 0, sizeof(uint32_t));
 	cache->bo[0]->pitch = 4;
 	if (force)
 		cache->size = 1;
@@ -308,7 +309,7 @@ sna_render_get_solid(struct sna *sna, uint32_t color)
 	DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
 
 create:
-	cache->bo[i] = kgem_create_proxy(cache->cache_bo,
+	cache->bo[i] = kgem_create_proxy(&sna->kgem, cache->cache_bo,
 					 i*sizeof(uint32_t), sizeof(uint32_t));
 	cache->bo[i]->pitch = 4;
 
@@ -331,7 +332,8 @@ static Bool sna_alpha_cache_init(struct sna *sna)
 
 	for (i = 0; i < 256; i++) {
 		color[i] = i << 24;
-		cache->bo[i] = kgem_create_proxy(cache->cache_bo,
+		cache->bo[i] = kgem_create_proxy(&sna->kgem,
+						 cache->cache_bo,
 						 sizeof(uint32_t)*i,
 						 sizeof(uint32_t));
 		cache->bo[i]->pitch = 4;
@@ -356,7 +358,8 @@ static Bool sna_solid_cache_init(struct sna *sna)
 	 * zeroth slot simplifies some of the checks.
 	 */
 	cache->color[0] = 0xffffffff;
-	cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
+	cache->bo[0] = kgem_create_proxy(&sna->kgem, cache->cache_bo,
+					 0, sizeof(uint32_t));
 	cache->bo[0]->pitch = 4;
 	cache->dirty = 1;
 	cache->size = 1;
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 880e173..7feaa24 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -870,7 +870,7 @@ sna_render_pixmap_partial(struct sna *sna,
 	}
 
 	/* How many tiles across are we? */
-	channel->bo = kgem_create_proxy(bo,
+	channel->bo = kgem_create_proxy(&sna->kgem, bo,
 					box.y1 * bo->pitch + offset,
 					h * bo->pitch);
 	if (channel->bo == NULL)
@@ -989,7 +989,7 @@ sna_render_picture_partial(struct sna *sna,
 		return 0;
 
 	/* How many tiles across are we? */
-	channel->bo = kgem_create_proxy(bo,
+	channel->bo = kgem_create_proxy(&sna->kgem, bo,
 					box.y1 * bo->pitch + offset,
 					h * bo->pitch);
 	if (channel->bo == NULL)
@@ -1821,7 +1821,7 @@ sna_render_composite_redirect(struct sna *sna,
 			}
 
 			/* How many tiles across are we? */
-			op->dst.bo = kgem_create_proxy(op->dst.bo,
+			op->dst.bo = kgem_create_proxy(&sna->kgem, op->dst.bo,
 						       box.y1 * op->dst.bo->pitch + offset,
 						       h * op->dst.bo->pitch);
 			if (!op->dst.bo) {
commit ee073d613bba38f90951405d5ecddfcf3ac5e043
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri May 18 15:19:00 2012 +0100

    sna/traps: Fix processing of full-steps
    
    A missing factor of Y-height caused the computation of coverage for the
    spans to be completely wrong. This affects the vertical segments of
    rounded rectangles, for instance.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 357c4c4..c0565fa 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -607,14 +607,14 @@ cell_list_add_span(struct cell_list *cells,
 
 	cell = cell_list_find(cells, ix1);
 	if (ix1 != ix2) {
-		cell->uncovered_area += 2*fx1;
+		cell->uncovered_area += 2*fx1*FAST_SAMPLES_Y;
 		cell->covered_height += FAST_SAMPLES_Y;
 
 		cell = cell_list_find(cells, ix2);
-		cell->uncovered_area -= 2*fx2;
+		cell->uncovered_area -= 2*fx2*FAST_SAMPLES_Y;
 		cell->covered_height -= FAST_SAMPLES_Y;
 	} else
-		cell->uncovered_area += 2*(fx1-fx2);
+		cell->uncovered_area += 2*(fx1-fx2)*FAST_SAMPLES_Y;
 }
 
 static void
@@ -1025,6 +1025,7 @@ nonzero_subrow(struct active_list *active, struct cell_list *coverages)
 		} else {
 			edge->prev->next = next;
 			next->prev = edge->prev;
+			active->min_height = -1;
 		}
 
 		edge = next;
@@ -1718,7 +1719,7 @@ tor_inplace(struct tor *converter, PixmapPtr scratch, int mono, uint8_t *buf)
 	int stride = scratch->devKind;
 	int width = scratch->drawable.width;
 
-	__DBG(("%s: mono=%d, buf=%d\n", __FUNCTION__, mono, buf));
+	__DBG(("%s: mono=%d, buf?=%d\n", __FUNCTION__, mono, buf != NULL));
 	assert(!mono);
 	assert(converter->ymin == 0);
 	assert(converter->xmin == 0);
@@ -1750,9 +1751,9 @@ tor_inplace(struct tor *converter, PixmapPtr scratch, int mono, uint8_t *buf)
 			do_full_step = can_full_step(active);
 		}
 
-		__DBG(("%s: y=%d [%d], do_full_step=%d, new edges=%d, min_height=%d, vertical=%d\n",
+		__DBG(("%s: y=%d, do_full_step=%d, new edges=%d, min_height=%d, vertical=%d\n",
 		       __FUNCTION__,
-		       i, i+ymin, do_full_step,
+		       i, do_full_step,
 		       polygon->y_buckets[i] != NULL,
 		       active->min_height,
 		       active->is_vertical));
commit 8ba800c63906fb29d34f40b9437092a665bffb14
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 17 22:46:18 2012 +0100

    sna: Don't consider uploading inplace if the dst bo is unmappable
    
    Handle (and take advantage of) the fallback at the high level rather
    than masquerading an inplace write.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 1b671b0..8b1ab65 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2726,6 +2726,9 @@ static bool upload_inplace(struct sna *sna,
 	if (priv->gpu_bo) {
 		assert(priv->gpu_bo->proxy == NULL);
 
+		if (!kgem_bo_can_map(&sna->kgem, priv->gpu_bo))
+			return false;
+
 		if (!kgem_bo_is_busy(priv->gpu_bo))
 			return true;
 
commit dad24721a13ce3a357e8ddae3c2dea61045f6fc2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 17 21:44:22 2012 +0100

    Revert "sna: Always try to operate inplace if we an LLC gpu bo"
    
    This reverts commit 10b4a9bb5f46ab9d9c8b165084ce4174b54a8d39 as it
    causes a regression for pixel data uploads to active buffers.

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 8915724..1b671b0 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -904,9 +904,6 @@ static inline bool pixmap_inplace(struct sna *sna,
 	if (priv->mapped)
 		return true;
 
-	if (sna->kgem.has_llc && pixmap != sna->front)
-		return !priv->cpu_bo;
-
 	return (pixmap->devKind * pixmap->drawable.height >> 12) >
 		sna->kgem.half_cpu_cache_pages;
 }
@@ -1266,9 +1263,6 @@ static inline bool region_inplace(struct sna *sna,
 		return false;
 	}
 
-	if (sna->kgem.has_llc && pixmap != sna->front)
-		return !priv->cpu_bo;
-
 	DBG(("%s: (%dx%d), inplace? %d\n",
 	     __FUNCTION__,
 	     region->extents.x2 - region->extents.x1,
commit 681c6e72412fff96b203a09be6ac8d393f3489a5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 17 19:43:02 2012 +0100

    sna: Track flushing status of live bo
    
    Currently we only move a bo with an outstanding kernel flush onto the
    flushing list if it is no longer in use. This leaves us potentially
    stalling on a flush if we try then to write to the object believing it
    to be retired and idle.
    
    Reported-by: Jiri Slaby <jirislaby at gmail.com>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=47597
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 759860c..df69b90 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -303,9 +303,9 @@ void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo)
 		assert(list_is_empty(&bo->vma));
 		bo->rq = NULL;
 		list_del(&bo->request);
-		bo->needs_flush = bo->flush;
 	}
 
+	bo->needs_flush = false;
 	bo->domain = DOMAIN_NONE;
 }
 
@@ -1280,7 +1280,6 @@ static bool kgem_retire__flushing(struct kgem *kgem)
 	bool retired = false;
 
 	list_for_each_entry_safe(bo, next, &kgem->flushing, request) {
-		assert(bo->refcnt == 0);
 		assert(bo->rq == &_kgem_static_request);
 		assert(bo->exec == NULL);
 
@@ -1289,16 +1288,19 @@ static bool kgem_retire__flushing(struct kgem *kgem)
 
 		DBG(("%s: moving %d from flush to inactive\n",
 		     __FUNCTION__, bo->handle));
-		if (bo->reusable && kgem_bo_set_purgeable(kgem, bo)) {
-			bo->needs_flush = false;
-			bo->domain = DOMAIN_NONE;
-			bo->rq = NULL;
-			list_del(&bo->request);
-			kgem_bo_move_to_inactive(kgem, bo);
-		} else
-			kgem_bo_free(kgem, bo);
+		bo->needs_flush = false;
+		bo->domain = DOMAIN_NONE;
+		bo->rq = NULL;
+		list_del(&bo->request);
 
-		retired = true;
+		if (!bo->refcnt) {
+			assert(bo->reusable);
+			if (kgem_bo_set_purgeable(kgem, bo)) {
+				kgem_bo_move_to_inactive(kgem, bo);
+				retired = true;
+			} else
+				kgem_bo_free(kgem, bo);
+		}
 	}
 
 	return retired;
@@ -1331,12 +1333,18 @@ static bool kgem_retire__requests(struct kgem *kgem)
 			assert(bo->domain == DOMAIN_GPU);
 
 			list_del(&bo->request);
-			bo->rq = NULL;
 
 			if (bo->needs_flush)
 				bo->needs_flush = kgem_busy(kgem, bo->handle);
-			if (!bo->needs_flush)
+			if (bo->needs_flush) {
+				DBG(("%s: moving %d to flushing\n",
+				     __FUNCTION__, bo->handle));
+				list_add(&bo->request, &kgem->flushing);
+				bo->rq = &_kgem_static_request;
+			} else {
 				bo->domain = DOMAIN_NONE;
+				bo->rq = NULL;
+			}
 
 			if (bo->refcnt)
 				continue;
@@ -1348,20 +1356,17 @@ static bool kgem_retire__requests(struct kgem *kgem)
 				continue;
 			}
 
-			if (bo->needs_flush) {
-				DBG(("%s: moving %d to flushing\n",
-				     __FUNCTION__, bo->handle));
-				list_add(&bo->request, &kgem->flushing);
-				bo->rq = &_kgem_static_request;
-			} else if (kgem_bo_set_purgeable(kgem, bo)) {
-				DBG(("%s: moving %d to inactive\n",
-				     __FUNCTION__, bo->handle));
-				kgem_bo_move_to_inactive(kgem, bo);
-				retired = true;
-			} else {
-				DBG(("%s: closing %d\n",
-				     __FUNCTION__, bo->handle));
-				kgem_bo_free(kgem, bo);
+			if (!bo->needs_flush) {
+				if (kgem_bo_set_purgeable(kgem, bo)) {
+					DBG(("%s: moving %d to inactive\n",
+					     __FUNCTION__, bo->handle));
+					kgem_bo_move_to_inactive(kgem, bo);
+					retired = true;
+				} else {
+					DBG(("%s: closing %d\n",
+					     __FUNCTION__, bo->handle));
+					kgem_bo_free(kgem, bo);
+				}
 			}
 		}
 


More information about the xorg-commit mailing list