xf86-video-intel: 4 commits - src/sna/gen4_render.c src/sna/gen5_render.c src/sna/sna_glyphs.c

Chris Wilson ickle at kemper.freedesktop.org
Fri Nov 23 07:52:10 PST 2012


 src/sna/gen4_render.c |   37 +++++++++++++++----------------------
 src/sna/gen5_render.c |    2 +-
 src/sna/sna_glyphs.c  |    3 ++-
 3 files changed, 18 insertions(+), 24 deletions(-)

New commits:
commit 4023b2044757a9a67d564be0c8adf4885973a6e3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Nov 23 15:42:18 2012 +0000

    sna/gen4: Force composite(WHITE, glyph) for building the glyphstring mask
    
    For reasons that are not apparent, if we don't composite with
    source/mask for the glyph strings, there appears to be some cache
    corruption. About as bizarre as the rest of gen4 idiosynacracies.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 9a6ad4b..51ef2b0 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -1090,7 +1090,8 @@ next_image:
 					     __FUNCTION__,
 					     (int)this_atlas->format,
 					     (int)(format->depth << 24 | format->format)));
-					if (this_atlas->format == (format->depth << 24 | format->format)) {
+					if (this_atlas->format == (format->depth << 24 | format->format) &&
+					    !(sna->kgem.gen >= 40 && sna->kgem.gen < 50)) { /* XXX cache corruption? how? */
 						ok = sna->render.composite(sna, PictOpAdd,
 									   this_atlas, NULL, mask,
 									   0, 0, 0, 0, 0, 0,
commit f74b62755c6e41097c23cc506984859e556a3415
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Nov 23 14:59:42 2012 +0000

    sna/gen4: Set composite op before testing for a BLT compatible op
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index c3a96da..1627048 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -2332,6 +2332,7 @@ gen4_render_composite(struct sna *sna,
 					   dst_x, dst_y, width, height))
 		return false;
 
+	tmp->op = op;
 	switch (gen4_composite_picture(sna, src, &tmp->src,
 				       src_x, src_y,
 				       width, height,
@@ -2355,7 +2356,6 @@ gen4_render_composite(struct sna *sna,
 		break;
 	}
 
-	tmp->op = op;
 	tmp->is_affine = tmp->src.is_affine;
 	tmp->has_component_alpha = false;
 	tmp->need_magic_ca_pass = false;
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 0e3aec6..b49b25e 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2317,6 +2317,7 @@ gen5_render_composite(struct sna *sna,
 	}
 
 	DBG(("%s: preparing source\n", __FUNCTION__));
+	tmp->op = op;
 	switch (gen5_composite_picture(sna, src, &tmp->src,
 				       src_x, src_y,
 				       width, height,
@@ -2340,7 +2341,6 @@ gen5_render_composite(struct sna *sna,
 		break;
 	}
 
-	tmp->op = op;
 	tmp->is_affine = tmp->src.is_affine;
 	tmp->has_component_alpha = false;
 	tmp->need_magic_ca_pass = false;
commit 4c922eb52cadb867a0a15929e5a214c84a5992f3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Nov 23 14:19:59 2012 +0000

    sna/gen4: Pass the mask channel explicitly rather than through a dummy mask
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 9f8f4ba..c3a96da 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -867,6 +867,7 @@ gen4_emit_composite_primitive(struct sna *sna,
 	bool is_affine = op->is_affine;
 	const float *src_sf = op->src.scale;
 	const float *mask_sf = op->mask.scale;
+	bool has_mask = op->u.gen4.ve_id & 2;
 
 	if (is_affine) {
 		sna_get_transformed_coordinates(r->src.x + op->src.offset[0],
@@ -907,7 +908,7 @@ gen4_emit_composite_primitive(struct sna *sna,
 						   &src_w[2]);
 	}
 
-	if (op->mask.bo) {
+	if (has_mask) {
 		if (is_affine) {
 			sna_get_transformed_coordinates(r->mask.x + op->mask.offset[0],
 							r->mask.y + op->mask.offset[1],
@@ -953,7 +954,7 @@ gen4_emit_composite_primitive(struct sna *sna,
 	OUT_VERTEX_F(src_y[2] * src_sf[1]);
 	if (!is_affine)
 		OUT_VERTEX_F(src_w[2]);
-	if (op->mask.bo) {
+	if (has_mask) {
 		OUT_VERTEX_F(mask_x[2] * mask_sf[0]);
 		OUT_VERTEX_F(mask_y[2] * mask_sf[1]);
 		if (!is_affine)
@@ -965,7 +966,7 @@ gen4_emit_composite_primitive(struct sna *sna,
 	OUT_VERTEX_F(src_y[1] * src_sf[1]);
 	if (!is_affine)
 		OUT_VERTEX_F(src_w[1]);
-	if (op->mask.bo) {
+	if (has_mask) {
 		OUT_VERTEX_F(mask_x[1] * mask_sf[0]);
 		OUT_VERTEX_F(mask_y[1] * mask_sf[1]);
 		if (!is_affine)
@@ -977,7 +978,7 @@ gen4_emit_composite_primitive(struct sna *sna,
 	OUT_VERTEX_F(src_y[0] * src_sf[1]);
 	if (!is_affine)
 		OUT_VERTEX_F(src_w[0]);
-	if (op->mask.bo) {
+	if (has_mask) {
 		OUT_VERTEX_F(mask_x[0] * mask_sf[0]);
 		OUT_VERTEX_F(mask_y[0] * mask_sf[1]);
 		if (!is_affine)
@@ -1259,7 +1260,7 @@ gen4_emit_pipelined_pointers(struct sna *sna,
 	uint16_t sp, bp;
 
 	DBG(("%s: has_mask=%d, src=(%d, %d), mask=(%d, %d),kernel=%d, blend=%d, ca=%d, format=%x\n",
-	     __FUNCTION__, op->mask.bo != NULL,
+	     __FUNCTION__, op->u.gen4.ve_id & 2,
 	     op->src.filter, op->src.repeat,
 	     op->mask.filter, op->mask.repeat,
 	     kernel, blend, op->has_component_alpha, (int)op->dst.format));
@@ -1279,7 +1280,7 @@ gen4_emit_pipelined_pointers(struct sna *sna,
 	OUT_BATCH(sna->render_state.gen4.vs);
 	OUT_BATCH(GEN4_GS_DISABLE); /* passthrough */
 	OUT_BATCH(GEN4_CLIP_DISABLE); /* passthrough */
-	OUT_BATCH(sna->render_state.gen4.sf[op->mask.bo != NULL]);
+	OUT_BATCH(sna->render_state.gen4.sf[!!(op->u.gen4.ve_id & 2)]);
 	OUT_BATCH(sna->render_state.gen4.wm + sp);
 	OUT_BATCH(sna->render_state.gen4.cc + bp);
 
@@ -1429,7 +1430,8 @@ gen4_bind_surfaces(struct sna *sna,
 			     op->src.bo, op->src.width, op->src.height,
 			     op->src.card_format,
 			     false);
-	if (op->mask.bo)
+	if (op->mask.bo) {
+		assert(op->u.gen4.ve_id & 2);
 		binding_table[2] =
 			gen4_bind_bo(sna,
 				     op->mask.bo,
@@ -1437,6 +1439,7 @@ gen4_bind_surfaces(struct sna *sna,
 				     op->mask.height,
 				     op->mask.card_format,
 				     false);
+	}
 
 	if (sna->kgem.surface == offset &&
 	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen4.surface_table) == *(uint64_t*)binding_table &&
@@ -2407,10 +2410,6 @@ gen4_render_composite(struct sna *sna,
 			tmp->prim_emit = gen4_emit_composite_primitive_identity_source_mask;
 
 	} else {
-		/* Use a dummy mask to w/a the flushing issues */
-		if (!gen4_composite_solid_init(sna, &tmp->mask, 0))
-			goto cleanup_src;
-
 		if (tmp->src.is_solid)
 			tmp->prim_emit = gen4_emit_composite_primitive_solid;
 		else if (tmp->src.transform == NULL)
@@ -2620,7 +2619,6 @@ gen4_render_composite_spans_done(struct sna *sna,
 
 	DBG(("%s()\n", __FUNCTION__));
 
-	kgem_bo_destroy(&sna->kgem, op->base.mask.bo);
 	if (op->base.src.bo)
 		kgem_bo_destroy(&sna->kgem, op->base.src.bo);
 
@@ -2714,8 +2712,7 @@ gen4_render_composite_spans(struct sna *sna,
 		break;
 	}
 
-	if (!gen4_composite_solid_init(sna, &tmp->base.mask, 0))
-		goto cleanup_src;
+	tmp->base.mask.bo = NULL;
 
 	tmp->base.is_affine = tmp->base.src.is_affine;
 	tmp->base.has_component_alpha = false;
@@ -3192,7 +3189,6 @@ gen4_render_fill_boxes(struct sna *sna,
 	tmp.dst.bo = dst_bo;
 
 	gen4_composite_solid_init(sna, &tmp.src, pixel);
-	gen4_composite_solid_init(sna, &tmp.mask, 0);
 
 	tmp.is_affine = true;
 	tmp.floats_per_vertex = 5;
@@ -3218,7 +3214,6 @@ gen4_render_fill_boxes(struct sna *sna,
 
 	gen4_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	kgem_bo_destroy(&sna->kgem, tmp.mask.bo);
 	return true;
 }
 
@@ -3258,7 +3253,6 @@ gen4_render_fill_op_done(struct sna *sna, const struct sna_fill_op *op)
 {
 	gen4_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, op->base.src.bo);
-	kgem_bo_destroy(&sna->kgem, op->base.mask.bo);
 }
 
 static bool
@@ -3299,7 +3293,7 @@ gen4_render_fill(struct sna *sna, uint8_t alu,
 	gen4_composite_solid_init(sna, &op->base.src,
 				  sna_rgba_for_color(color,
 						     dst->drawable.depth));
-	gen4_composite_solid_init(sna, &op->base.mask, 0);
+	op->base.mask.bo = NULL;
 
 	op->base.is_affine = true;
 	op->base.floats_per_vertex = 5;
@@ -3375,7 +3369,7 @@ gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	gen4_composite_solid_init(sna, &tmp.src,
 				  sna_rgba_for_color(color,
 						     dst->drawable.depth));
-	gen4_composite_solid_init(sna, &tmp.mask, 0);
+	tmp.mask.bo = NULL;
 
 	tmp.is_affine = true;
 	tmp.floats_per_vertex = 5;
@@ -3398,7 +3392,6 @@ gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 
 	gen4_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	kgem_bo_destroy(&sna->kgem, tmp.mask.bo);
 
 	return true;
 }
commit 2e68efa8ec66b4c89e9816bfa15067b398da5e3e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Nov 23 14:04:51 2012 +0000

    sna/gen4: Reduce the flush before performing the CA pass
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index e8ad253..9f8f4ba 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -237,7 +237,7 @@ static void gen4_magic_ca_pass(struct sna *sna,
 	gen4_emit_pipelined_pointers(sna, op, PictOpAdd,
 				     gen4_choose_composite_kernel(PictOpAdd,
 								  true, true, op->is_affine));
-	OUT_BATCH(MI_FLUSH);
+	OUT_BATCH(MI_FLUSH | MI_INHIBIT_RENDER_CACHE_FLUSH);
 
 	OUT_BATCH(GEN4_3DPRIMITIVE |
 		  GEN4_3DPRIMITIVE_VERTEX_SEQUENTIAL |


More information about the xorg-commit mailing list