xf86-video-intel: 4 commits - src/sna/gen4_vertex.c src/sna/gen6_render.c src/sna/gen7_render.c src/sna/sna_accel.c src/sna/sna_damage.h

Chris Wilson ickle at kemper.freedesktop.org
Mon Sep 9 03:21:35 PDT 2013


 src/sna/gen4_vertex.c |   60 +++++++++++++++++++++++++++++---------
 src/sna/gen6_render.c |   77 +++++++++++++++++++++++++++++++++++++++++++++++---
 src/sna/gen7_render.c |   37 +++++++++++++++++++-----
 src/sna/sna_accel.c   |    4 +-
 src/sna/sna_damage.h  |   13 ++++++++
 5 files changed, 164 insertions(+), 27 deletions(-)

New commits:
commit 6d5df67b3e4ed5d5b9f1aabf01b5657d4c9eeac7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Sep 9 11:19:19 2013 +0100

    sna: Remember to offset the box before asserting damage
    
    When using sna_copy_boxes__inplace(), we need to remember that the
    region is in destination space, so we need to offset the boxes when
    comparing against the source. The assertion forgot to do so, and so
    failed as soon as it met a little complexity.
    
    Reported-by: Jiri Slaby <jirislaby at gmail.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 8a7a249..9ddc633 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -5109,8 +5109,8 @@ sna_copy_boxes__inplace(struct sna *sna, RegionPtr region, int alu,
 		return false;
 	}
 
-	assert(sna_damage_contains_box(src_priv->gpu_damage, &region->extents) == PIXMAN_REGION_IN);
-	assert(sna_damage_contains_box(src_priv->cpu_damage, &region->extents) == PIXMAN_REGION_OUT);
+	assert(sna_damage_contains_box(src_priv->gpu_damage, &region->extents, dx, dy) == PIXMAN_REGION_IN);
+	assert(sna_damage_contains_box(src_priv->cpu_damage, &region->extents, dx, dy) == PIXMAN_REGION_OUT);
 
 	src = kgem_bo_map__cpu(&sna->kgem, src_priv->gpu_bo);
 	if (src == NULL) {
diff --git a/src/sna/sna_damage.h b/src/sna/sna_damage.h
index 03a54a3..187d312 100644
--- a/src/sna/sna_damage.h
+++ b/src/sna/sna_damage.h
@@ -215,6 +215,19 @@ static inline int sna_damage_contains_box(struct sna_damage *damage,
 
 	return _sna_damage_contains_box(damage, box);
 }
+static inline int sna_damage_contains_box__offset(struct sna_damage *damage,
+						  const BoxRec *box, int dx, int dy)
+{
+	BoxRec b;
+
+	if (DAMAGE_IS_ALL(damage))
+		return PIXMAN_REGION_IN;
+
+	b = *box;
+	b.x1 += dx; b.x2 += dx;
+	b.y1 += dy; b.y2 += dy;
+	return _sna_damage_contains_box(damage, &b);
+}
 bool _sna_damage_contains_box__no_reduce(const struct sna_damage *damage,
 					const BoxRec *box);
 static inline bool
commit 634748486f33658f09c8a7f4508840fad6df85f2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Sep 8 23:02:29 2013 +0100

    sna/gen4+: Flush batch if idle on filling vbo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_vertex.c b/src/sna/gen4_vertex.c
index dab11e8..adeed3c 100644
--- a/src/sna/gen4_vertex.c
+++ b/src/sna/gen4_vertex.c
@@ -67,6 +67,8 @@ int gen4_vertex_finish(struct sna *sna)
 
 	/* Note: we only need dword alignment (currently) */
 
+	hint = CREATE_GTT_MAP;
+
 	bo = sna->render.vbo;
 	if (bo) {
 		for (i = 0; i < sna->render.nvertex_reloc; i++) {
@@ -88,11 +90,15 @@ int gen4_vertex_finish(struct sna *sna)
 		sna->render.vb_id = 0;
 
 		kgem_bo_destroy(&sna->kgem, bo);
+		hint |= CREATE_CACHED | CREATE_NO_THROTTLE;
+	} else {
+		if (kgem_is_idle(&sna->kgem)) {
+			sna->render.vertices = sna->render.vertex_data;
+			sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+			return 0;
+		}
 	}
 
-	hint = CREATE_GTT_MAP;
-	if (bo)
-		hint |= CREATE_CACHED | CREATE_NO_THROTTLE;
 
 	size = 256*1024;
 	assert(!sna->render.active);
@@ -186,18 +192,44 @@ void gen4_vertex_close(struct sna *sna)
 			bo = NULL;
 			sna->kgem.nbatch += sna->render.vertex_used;
 		} else {
-			bo = kgem_create_linear(&sna->kgem,
-						4*sna->render.vertex_used,
-						CREATE_NO_THROTTLE);
-			if (bo && !kgem_bo_write(&sna->kgem, bo,
-						 sna->render.vertex_data,
-						 4*sna->render.vertex_used)) {
-				kgem_bo_destroy(&sna->kgem, bo);
-				bo = NULL;
+			sna->render.vbo = kgem_create_linear(&sna->kgem,
+							     256*1024, CREATE_GTT_MAP | CREATE_NO_RETIRE | CREATE_CACHED);
+			if (sna->render.vbo)
+				sna->render.vertices = kgem_bo_map(&sna->kgem, sna->render.vbo);
+			if (sna->render.vertices != NULL) {
+				int size;
+
+				assert(sizeof(float)*sna->render.vertex_used <=
+				       __kgem_bo_size(sna->render.vbo));
+				memcpy(sna->render.vertices,
+				       sna->render.vertex_data,
+				       sizeof(float)*sna->render.vertex_used);
+
+				size = __kgem_bo_size(sna->render.vbo)/4;
+				if (size >= UINT16_MAX)
+					size = UINT16_MAX - 1;
+
+				sna->render.vertex_size = size;
+
+				bo = sna->render.vbo;
+			} else {
+				if (sna->render.vbo) {
+					kgem_bo_destroy(&sna->kgem, sna->render.vbo);
+					sna->render.vbo = NULL;
+				}
+				bo = kgem_create_linear(&sna->kgem,
+							4*sna->render.vertex_used,
+							CREATE_NO_THROTTLE);
+				if (bo && !kgem_bo_write(&sna->kgem, bo,
+							 sna->render.vertex_data,
+							 4*sna->render.vertex_used)) {
+					kgem_bo_destroy(&sna->kgem, bo);
+					bo = NULL;
+				}
+				DBG(("%s: new vbo: %d\n", __FUNCTION__,
+				     sna->render.vertex_used));
+				free_bo = bo;
 			}
-			DBG(("%s: new vbo: %d\n", __FUNCTION__,
-			     sna->render.vertex_used));
-			free_bo = bo;
 		}
 	}
 
commit 77d74ec777d511df6f5aa484f3b7752f2c0b96ad
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Sep 8 22:34:09 2013 +0100

    sna/gen6: Prefer the RCS on large GT systems
    
    For SNB, the different between RCS and BCS is more marginal but it is
    slightly in favour of using rendercopy on GT2.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 52e0171..70821f7 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -77,6 +77,7 @@ struct gt_info {
 		int max_vs_entries;
 		int max_gs_entries;
 	} urb;
+	int gt;
 };
 
 static const struct gt_info gt1_info = {
@@ -85,6 +86,7 @@ static const struct gt_info gt1_info = {
 	.max_gs_threads = 21,
 	.max_wm_threads = 40,
 	.urb = { 32, 256, 256 },
+	.gt = 1,
 };
 
 static const struct gt_info gt2_info = {
@@ -93,6 +95,7 @@ static const struct gt_info gt2_info = {
 	.max_gs_threads = 60,
 	.max_wm_threads = 80,
 	.urb = { 64, 256, 256 },
+	.gt = 2,
 };
 
 static const uint32_t ps_kernel_packed[][4] = {
@@ -1889,6 +1892,24 @@ inline static bool can_switch_to_blt(struct sna *sna,
 	return kgem_ring_is_idle(&sna->kgem, KGEM_BLT);
 }
 
+inline static bool can_switch_to_render(struct sna *sna,
+					struct kgem_bo *bo)
+{
+	if (sna->kgem.ring == KGEM_RENDER)
+		return true;
+
+	if (NO_RING_SWITCH)
+		return false;
+
+	if (!sna->kgem.has_semaphores)
+		return false;
+
+	if (bo && !RQ_IS_BLT(bo->rq) && !bo->scanout)
+		return true;
+
+	return !kgem_ring_is_idle(&sna->kgem, KGEM_RENDER);
+}
+
 static inline bool untiled_tlb_miss(struct kgem_bo *bo)
 {
 	if (kgem_bo_is_render(bo))
@@ -1908,16 +1929,42 @@ static int prefer_blt_bo(struct sna *sna, struct kgem_bo *bo)
 	return bo->tiling == I915_TILING_NONE || bo->scanout;
 }
 
+inline static bool force_blt_ring(struct sna *sna)
+{
+	if (sna->flags & SNA_POWERSAVE)
+		return true;
+
+	if (sna->kgem.mode == KGEM_RENDER)
+		return false;
+
+	if (sna->render_state.gen6.info->gt < 2)
+		return true;
+
+	return false;
+}
+
 inline static bool prefer_blt_ring(struct sna *sna,
 				   struct kgem_bo *bo,
 				   unsigned flags)
 {
-	if (sna->flags & SNA_POWERSAVE)
-		return true;
+	assert(!force_blt_ring(sna));
+	assert(!kgem_bo_is_render(bo));
 
 	return can_switch_to_blt(sna, bo, flags);
 }
 
+inline static bool prefer_render_ring(struct sna *sna,
+				      struct kgem_bo *bo)
+{
+	if (sna->flags & SNA_POWERSAVE)
+		return false;
+
+	if (sna->render_state.gen6.info->gt < 2)
+		return false;
+
+	return can_switch_to_render(sna, bo);
+}
+
 static bool
 try_blt(struct sna *sna,
 	PicturePtr dst, PicturePtr src,
@@ -2171,10 +2218,16 @@ prefer_blt_composite(struct sna *sna, struct sna_composite_op *tmp)
 	    untiled_tlb_miss(tmp->src.bo))
 		return true;
 
+	if (force_blt_ring(sna))
+		return true;
+
 	if (kgem_bo_is_render(tmp->dst.bo) ||
 	    kgem_bo_is_render(tmp->src.bo))
 		return false;
 
+	if (prefer_render_ring(sna, tmp->dst.bo))
+		return false;
+
 	if (!prefer_blt_ring(sna, tmp->dst.bo, 0))
 		return false;
 
@@ -2636,10 +2689,16 @@ static inline bool prefer_blt_copy(struct sna *sna,
 	    untiled_tlb_miss(dst_bo))
 		return true;
 
+	if (force_blt_ring(sna))
+		return true;
+
 	if (kgem_bo_is_render(dst_bo) ||
 	    kgem_bo_is_render(src_bo))
 		return false;
 
+	if (prefer_render_ring(sna, dst_bo))
+		return false;
+
 	if (!prefer_blt_ring(sna, dst_bo, flags))
 		return false;
 
@@ -3051,11 +3110,17 @@ static inline bool prefer_blt_fill(struct sna *sna,
 	if (PREFER_RENDER)
 		return PREFER_RENDER < 0;
 
+	if (untiled_tlb_miss(bo))
+		return true;
+
+	if (force_blt_ring(sna))
+		return true;
+
 	if (kgem_bo_is_render(bo))
 		return false;
 
-	if (untiled_tlb_miss(bo))
-		return true;
+	if (prefer_render_ring(sna, bo))
+		return false;
 
 	if (!prefer_blt_ring(sna, bo, 0))
 		return false;
@@ -3159,6 +3224,7 @@ gen6_render_fill_boxes(struct sna *sna,
 	assert(GEN6_SAMPLER(tmp.u.gen6.flags) == FILL_SAMPLER);
 	assert(GEN6_VERTEX(tmp.u.gen6.flags) == FILL_VERTEX);
 
+	kgem_set_mode(&sna->kgem, KGEM_RENDER, dst_bo);
 	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
 		kgem_submit(&sna->kgem);
 		assert(kgem_check_bo(&sna->kgem, dst_bo, NULL));
@@ -3338,6 +3404,7 @@ gen6_render_fill(struct sna *sna, uint8_t alu,
 	assert(GEN6_SAMPLER(op->base.u.gen6.flags) == FILL_SAMPLER);
 	assert(GEN6_VERTEX(op->base.u.gen6.flags) == FILL_VERTEX);
 
+	kgem_set_mode(&sna->kgem, KGEM_RENDER, dst_bo);
 	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
 		kgem_submit(&sna->kgem);
 		assert(kgem_check_bo(&sna->kgem, dst_bo, NULL));
@@ -3418,6 +3485,7 @@ gen6_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	assert(GEN6_SAMPLER(tmp.u.gen6.flags) == FILL_SAMPLER);
 	assert(GEN6_VERTEX(tmp.u.gen6.flags) == FILL_VERTEX);
 
+	kgem_set_mode(&sna->kgem, KGEM_RENDER, bo);
 	if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
 		kgem_submit(&sna->kgem);
 		if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
@@ -3504,6 +3572,7 @@ gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	assert(GEN6_SAMPLER(tmp.u.gen6.flags) == FILL_SAMPLER);
 	assert(GEN6_VERTEX(tmp.u.gen6.flags) == FILL_VERTEX);
 
+	kgem_set_mode(&sna->kgem, KGEM_RENDER, bo);
 	if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
 		kgem_submit(&sna->kgem);
 		if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
commit 287727ee223d56705e5792fe0a6c9a99077559e6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Sep 8 17:59:10 2013 +0100

    sna/gen7: Prefer the BLT for gt1 systems
    
    On gt1, the BCS is faster than the RCS for all equivalent operations,
    unlike gt2+ where the RCS is faster (but at greater power draw).
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index c0d22df..53d3688 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2163,6 +2163,9 @@ inline static bool can_switch_to_render(struct sna *sna,
 
 static inline bool untiled_tlb_miss(struct kgem_bo *bo)
 {
+	if (kgem_bo_is_render(bo))
+		return false;
+
 	return bo->tiling == I915_TILING_NONE && bo->pitch >= 4096;
 }
 
@@ -2177,16 +2180,27 @@ static int prefer_blt_bo(struct sna *sna, struct kgem_bo *bo)
 	return bo->tiling == I915_TILING_NONE || (bo->scanout && !sna->kgem.has_wt);
 }
 
-inline static bool prefer_blt_ring(struct sna *sna,
-				   struct kgem_bo *bo,
-				   unsigned flags)
+inline static bool force_blt_ring(struct sna *sna)
 {
 	if (sna->flags & SNA_POWERSAVE)
 		return true;
 
-	if (kgem_bo_is_render(bo))
+	if (sna->kgem.mode == KGEM_RENDER)
 		return false;
 
+	if (sna->render_state.gen7.info->gt < 2)
+		return true;
+
+	return false;
+}
+
+inline static bool prefer_blt_ring(struct sna *sna,
+				   struct kgem_bo *bo,
+				   unsigned flags)
+{
+	assert(!force_blt_ring(sna));
+	assert(!kgem_bo_is_render(bo));
+
 	return can_switch_to_blt(sna, bo, flags);
 }
 
@@ -2455,6 +2469,9 @@ prefer_blt_composite(struct sna *sna, struct sna_composite_op *tmp)
 	    untiled_tlb_miss(tmp->src.bo))
 		return true;
 
+	if (force_blt_ring(sna))
+		return true;
+
 	if (kgem_bo_is_render(tmp->dst.bo) ||
 	    kgem_bo_is_render(tmp->src.bo))
 		return false;
@@ -2899,6 +2916,9 @@ static inline bool prefer_blt_copy(struct sna *sna,
 	    untiled_tlb_miss(dst_bo))
 		return true;
 
+	if (force_blt_ring(sna))
+		return true;
+
 	if (kgem_bo_is_render(dst_bo) ||
 	    kgem_bo_is_render(src_bo))
 		return false;
@@ -3305,12 +3325,15 @@ gen7_emit_fill_state(struct sna *sna, const struct sna_composite_op *op)
 static inline bool prefer_blt_fill(struct sna *sna,
 				   struct kgem_bo *bo)
 {
-	if (kgem_bo_is_render(bo))
-		return false;
-
 	if (untiled_tlb_miss(bo))
 		return true;
 
+	if (force_blt_ring(sna))
+		return true;
+
+	if (kgem_bo_is_render(bo))
+		return false;
+
 	if (prefer_render_ring(sna, bo))
 		return false;
 


More information about the xorg-commit mailing list