xf86-video-intel: 5 commits - src/sna/gen4_render.c src/sna/sna_composite.c src/sna/sna_glyphs.c src/sna/sna_gradient.c src/sna/sna_render.c src/sna/sna_render.h

Chris Wilson ickle at kemper.freedesktop.org
Fri Jun 10 00:27:06 PDT 2011


 src/sna/gen4_render.c   |  115 +++++++++++++++++++++++++++++++++++-----------
 src/sna/sna_composite.c |   54 ++++++++++-----------
 src/sna/sna_glyphs.c    |  118 ++++++++++++++++++++++++++++++------------------
 src/sna/sna_gradient.c  |   37 ++++++++++-----
 src/sna/sna_render.c    |    4 +
 src/sna/sna_render.h    |    4 -
 6 files changed, 220 insertions(+), 112 deletions(-)

New commits:
commit 0594724dc4e9e459240b8b290ddf42ed711be22b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 10 08:18:07 2011 +0100

    sna/gen4: Restore normal state after CA pass and FLUSH_EVERY_VERTEX
    
    By working around the broken shaders, we emitted the CA rectangle in the
    middle of a sequence of glyphs and left the state setup for CA. So we
    need to reset the pipeline state at the start of every composite blt.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 0c31019..582e398 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -58,6 +58,12 @@
 #define PREFER_BLT 1
 #define FLUSH_EVERY_VERTEX 1
 
+#define NO_COMPOSITE 0
+#define NO_COPY 0
+#define NO_COPY_BOXES 0
+#define NO_FILL 0
+#define NO_FILL_BOXES 0
+
 #if FLUSH_EVERY_VERTEX
 #define FLUSH() do { \
 	gen4_vertex_flush(sna); \
@@ -276,7 +282,7 @@ static const struct formatinfo {
 #define SAMPLER_OFFSET(sf, se, mf, me, k) \
 	((((((sf) * EXTEND_COUNT + (se)) * FILTER_COUNT + (mf)) * EXTEND_COUNT + (me)) * KERNEL_COUNT + (k)) * 64)
 
-static bool
+static void
 gen4_emit_pipelined_pointers(struct sna *sna,
 			     const struct sna_composite_op *op,
 			     int blend, int kernel);
@@ -313,14 +319,15 @@ static void gen4_magic_ca_pass(struct sna *sna,
 		return;
 
 	DBG(("%s: CA fixup\n", __FUNCTION__));
+	assert(op->mask.bo != NULL);
+	assert(op->has_component_alpha);
 
 	if (FLUSH_EVERY_VERTEX)
 		OUT_BATCH(MI_FLUSH | MI_INHIBIT_RENDER_CACHE_FLUSH);
 
-	gen4_emit_pipelined_pointers
-		(sna, op, PictOpAdd,
-		 gen4_choose_composite_kernel(PictOpAdd,
-					      TRUE, TRUE, op->is_affine));
+	gen4_emit_pipelined_pointers(sna, op, PictOpAdd,
+				     gen4_choose_composite_kernel(PictOpAdd,
+								  TRUE, TRUE, op->is_affine));
 
 	OUT_BATCH(GEN4_3DPRIMITIVE |
 		  GEN4_3DPRIMITIVE_VERTEX_SEQUENTIAL |
@@ -447,9 +454,10 @@ static uint32_t gen4_get_blend(int op,
 static uint32_t gen4_get_dest_format(PictFormat format)
 {
 	switch (format) {
+	default:
+		assert(0);
 	case PICT_a8r8g8b8:
 	case PICT_x8r8g8b8:
-	default:
 		return GEN4_SURFACEFORMAT_B8G8R8A8_UNORM;
 	case PICT_a8b8g8r8:
 	case PICT_x8b8g8r8:
@@ -1068,7 +1076,7 @@ static int gen4_get_rectangles__flush(struct sna *sna)
 	gen4_vertex_finish(sna, FALSE);
 	sna->render.vertex_index = 0;
 
-	return  ARRAY_SIZE(sna->render.vertex_data);
+	return ARRAY_SIZE(sna->render.vertex_data);
 }
 
 inline static int gen4_get_rectangles(struct sna *sna,
@@ -1088,7 +1096,7 @@ inline static int gen4_get_rectangles(struct sna *sna,
 	if (!gen4_rectangle_begin(sna, op))
 		return 0;
 
-	if (want * op->floats_per_vertex*3 > rem)
+	if (want > 1 && want * op->floats_per_vertex*3 > rem)
 		want = rem / (3*op->floats_per_vertex);
 
 	sna->render.vertex_index += 3*want;
@@ -1099,19 +1107,15 @@ static uint32_t *gen4_composite_get_binding_table(struct sna *sna,
 						  const struct sna_composite_op *op,
 						  uint16_t *offset)
 {
-	uint32_t *table;
-
 	sna->kgem.surface -=
 		sizeof(struct gen4_surface_state_padded) / sizeof(uint32_t);
-	/* Clear all surplus entries to zero in case of prefetch */
-	table = memset(sna->kgem.batch + sna->kgem.surface,
-		       0, sizeof(struct gen4_surface_state_padded));
-
-	*offset = sna->kgem.surface;
 
 	DBG(("%s(%x)\n", __FUNCTION__, 4*sna->kgem.surface));
 
-	return table;
+	/* Clear all surplus entries to zero in case of prefetch */
+	*offset = sna->kgem.surface;
+	return memset(sna->kgem.batch + sna->kgem.surface,
+		      0, sizeof(struct gen4_surface_state_padded));
 }
 
 static void
@@ -1131,6 +1135,9 @@ gen4_emit_urb(struct sna *sna)
 	int urb_sf_start, urb_sf_size;
 	int urb_cs_start, urb_cs_size;
 
+	if (!sna->render_state.gen4.needs_urb)
+		return;
+
 	urb_vs_start = 0;
 	urb_vs_size = URB_VS_ENTRIES * URB_VS_ENTRY_SIZE;
 	urb_gs_start = urb_vs_start + urb_vs_size;
@@ -1158,6 +1165,8 @@ gen4_emit_urb(struct sna *sna)
 	/* Constant buffer state */
 	OUT_BATCH(GEN4_CS_URB_STATE | 0);
 	OUT_BATCH((URB_CS_ENTRY_SIZE - 1) << 4 | URB_CS_ENTRIES << 0);
+
+	sna->render_state.gen4.needs_urb = false;
 }
 
 static void
@@ -1246,13 +1255,19 @@ gen4_emit_binding_table(struct sna *sna, uint16_t offset)
 	OUT_BATCH(offset*4);
 }
 
-static bool
+static void
 gen4_emit_pipelined_pointers(struct sna *sna,
 			     const struct sna_composite_op *op,
 			     int blend, int kernel)
 {
 	uint16_t offset = sna->kgem.nbatch, last;
 
+	DBG(("%s: has_mask=%d, src=(%d, %d), mask=(%d, %d),kernel=%d, blend=%d, ca=%d, format=%x\n",
+	     __FUNCTION__, op->mask.bo != NULL,
+	     op->src.filter, op->src.repeat,
+	     op->mask.filter, op->mask.repeat,
+	     kernel, blend, op->has_component_alpha, op->dst.format));
+
 	OUT_BATCH(GEN4_3DSTATE_PIPELINED_POINTERS | 5);
 	OUT_BATCH(sna->render_state.gen4.vs);
 	OUT_BATCH(GEN4_GS_DISABLE); /* passthrough */
@@ -1267,16 +1282,13 @@ gen4_emit_pipelined_pointers(struct sna *sna,
 
 	last = sna->render_state.gen4.last_pipelined_pointers;
 	if (last &&
-	    sna->kgem.batch[offset + 1] == sna->kgem.batch[last + 1] &&
-	    sna->kgem.batch[offset + 3] == sna->kgem.batch[last + 3] &&
 	    sna->kgem.batch[offset + 4] == sna->kgem.batch[last + 4] &&
 	    sna->kgem.batch[offset + 5] == sna->kgem.batch[last + 5] &&
 	    sna->kgem.batch[offset + 6] == sna->kgem.batch[last + 6]) {
 		sna->kgem.nbatch = offset;
-		return false;
 	} else {
 		sna->render_state.gen4.last_pipelined_pointers = offset;
-		return true;
+		gen4_emit_urb(sna);
 	}
 }
 
@@ -1377,8 +1389,7 @@ gen4_emit_state(struct sna *sna,
 	       	uint16_t wm_binding_table)
 {
 	gen4_emit_binding_table(sna, wm_binding_table);
-	if (gen4_emit_pipelined_pointers(sna, op, op->op, op->u.gen4.wm_kernel))
-		gen4_emit_urb(sna);
+	gen4_emit_pipelined_pointers(sna, op, op->op, op->u.gen4.wm_kernel);
 	gen4_emit_vertex_elements(sna, op);
 	gen4_emit_drawing_rectangle(sna, op);
 }
@@ -1413,6 +1424,14 @@ gen4_bind_surfaces(struct sna *sna,
 				     op->mask.card_format,
 				     FALSE);
 
+	if (sna->kgem.surface == offset &&
+	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen4.surface_table) == *(uint64_t*)binding_table &&
+	    (op->mask.bo == NULL ||
+	     sna->kgem.batch[sna->render_state.gen4.surface_table+2] == binding_table[2])) {
+		sna->kgem.surface += sizeof(struct gen4_surface_state_padded) / sizeof(uint32_t);
+		offset = sna->render_state.gen4.surface_table;
+	}
+
 	gen4_emit_state(sna, op, offset);
 }
 
@@ -1428,6 +1447,11 @@ gen4_render_composite_blt(struct sna *sna,
 	     r->dst.x, r->dst.y, op->dst.x, op->dst.y,
 	     r->width, r->height));
 
+	if (FLUSH_EVERY_VERTEX && op->need_magic_ca_pass)
+		/* We have to reset the state after every FLUSH */
+		gen4_emit_pipelined_pointers(sna, op,
+					     op->op, op->u.gen4.wm_kernel);
+
 	if (!gen4_get_rectangles(sna, op, 1)) {
 		gen4_bind_surfaces(sna, op);
 		gen4_get_rectangles(sna, op, 1);
@@ -1828,17 +1852,14 @@ picture_is_cpu(PicturePtr picture)
 	return is_cpu(picture->pDrawable);
 }
 
-#if PREFER_BLT
 static inline bool prefer_blt(struct sna *sna)
 {
+#if PREFER_BLT
 	return true;
-}
 #else
-static inline bool prefer_blt(struct sna *sna)
-{
 	return sna->kgem.mode != KGEM_RENDER;
-}
 #endif
+}
 
 static Bool
 try_blt(struct sna *sna,
@@ -1876,6 +1897,17 @@ gen4_render_composite(struct sna *sna,
 	DBG(("%s: %dx%d, current mode=%d\n", __FUNCTION__,
 	     width, height, sna->kgem.mode));
 
+#if NO_COMPOSITE
+	if (mask)
+		return FALSE;
+
+	return sna_blt_composite(sna, op,
+				 src, dst,
+				 src_x, src_y,
+				 dst_x, dst_y,
+				 width, height, tmp);
+#endif
+
 	if (mask == NULL &&
 	    try_blt(sna, dst, src, width, height) &&
 	    sna_blt_composite(sna, op,
@@ -2107,6 +2139,14 @@ gen4_render_copy_boxes(struct sna *sna, uint8_t alu,
 {
 	struct sna_composite_op tmp;
 
+#if NO_COPY_BOXES
+	return sna_blt_copy_boxes(sna, alu,
+				  src_bo, src_dx, src_dy,
+				  dst_bo, dst_dx, dst_dy,
+				  dst->drawable.bitsPerPixel,
+				  box, n);
+#endif
+
 	if (prefer_blt(sna) &&
 	    sna_blt_copy_boxes(sna, alu,
 			       src_bo, src_dx, src_dy,
@@ -2198,6 +2238,13 @@ gen4_render_copy(struct sna *sna, uint8_t alu,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
 		 struct sna_copy_op *op)
 {
+#if NO_COPY
+	return sna_blt_copy(sna, alu,
+			    src_bo, dst_bo,
+			    dst->drawable.bitsPerPixel,
+			    op);
+#endif
+
 	if (prefer_blt(sna) &&
 	    sna_blt_copy(sna, alu,
 			 src_bo, dst_bo,
@@ -2357,6 +2404,10 @@ gen4_render_fill_boxes(struct sna *sna,
 			return FALSE;
 	}
 
+#if NO_FILL_BOXES
+	return FALSE;
+#endif
+
 	if (!sna_get_pixel_from_rgba(&pixel,
 				     color->red,
 				     color->green,
@@ -2425,6 +2476,13 @@ gen4_render_fill(struct sna *sna, uint8_t alu,
 		 uint32_t color,
 		 struct sna_fill_op *op)
 {
+#if NO_FILL
+	return sna_blt_fill(sna, alu,
+			    dst_bo, dst->drawable.bitsPerPixel,
+			    color,
+			    op);
+#endif
+
 	if (prefer_blt(sna) &&
 	    sna_blt_fill(sna, alu,
 			 dst_bo, dst->drawable.bitsPerPixel,
@@ -2482,6 +2540,7 @@ gen4_render_flush(struct sna *sna)
 static void gen4_render_reset(struct sna *sna)
 {
 	sna->render_state.gen4.needs_invariant = TRUE;
+	sna->render_state.gen4.needs_urb = TRUE;
 	sna->render_state.gen4.vb_id = 0;
 	sna->render_state.gen4.ve_id = -1;
 	sna->render_state.gen4.last_primitive = -1;
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 3d56a40..55ca549 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -282,6 +282,7 @@ struct gen4_render_state {
 	uint16_t last_pipelined_pointers;
 
 	Bool needs_invariant;
+	Bool needs_urb;
 };
 
 struct gen5_render_state {
commit 2dff681efe1b89ffe59e5d33979c6e059b72a786
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 10 08:23:19 2011 +0100

    sna/blt: Remove a couple of unused members from its op-state
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 43fbff7..c89be51 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -300,6 +300,10 @@ static struct kgem_bo *upload(struct sna *sna,
 
 	DBG(("%s: origin=(%d, %d), box=(%d, %d), (%d, %d), pixmap=%dx%d\n",
 	     __FUNCTION__, x, y, box->x1, box->y1, box->x2, box->y2, pixmap->drawable.width, pixmap->drawable.height));
+	assert(box->x1 >= 0);
+	assert(box->y1 >= 0);
+	assert(box->x2 <= pixmap->drawable.width);
+	assert(box->y2 <= pixmap->drawable.height);
 
 	bo = kgem_upload_source_image(&sna->kgem,
 				      pixmap->devPrivate.ptr,
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index bd075aa..3d56a40 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -82,12 +82,11 @@ struct sna_composite_op {
 			uint32_t inplace :1;
 			uint32_t overwrites:1;
 
-			int hdr;
 			uint32_t cmd;
 			uint32_t br13;
 			uint32_t pitch[2];
 			uint32_t pixel;
-			struct kgem_bo *bo[3];
+			struct kgem_bo *bo[2];
 		} blt;
 
 		struct {
commit fbbd1c9dde3b2fafd8aaebb88daf3c546d86e5a4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 10 08:22:00 2011 +0100

    sna/glyphs: Fix glyphs to dst fallback path

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index bb4b9cd..224154e 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -77,6 +77,11 @@
 #define NDEBUG 1
 #endif
 
+#define FALLBACK 0
+#define NO_GLYPHS_TO_DST 0
+#define NO_GLYPHS_VIA_MASK 0
+#define NO_GLYPHS_SLOW 0
+
 #define CACHE_PICTURE_SIZE 1024
 #define GLYPH_MIN_SIZE 8
 #define GLYPH_MAX_SIZE 64
@@ -402,6 +407,9 @@ glyphs_to_dst(struct sna *sna,
 	int nrect;
 	int16_t x, y;
 
+	if (NO_GLYPHS_TO_DST)
+		return FALSE;
+
 	memset(&tmp, 0, sizeof(tmp));
 
 	DBG(("%s(op=%d, src=(%d, %d), nlist=%d,  dst=(%d, %d)+(%d, %d))\n",
@@ -513,18 +521,21 @@ next_glyph:
 }
 
 static Bool
-glyphs_to_dst_slow(struct sna *sna,
-		   CARD8 op,
-		   PicturePtr src,
-		   PicturePtr dst,
-		   INT16 src_x, INT16 src_y,
-		   int nlist, GlyphListPtr list, GlyphPtr *glyphs)
+glyphs_slow(struct sna *sna,
+	    CARD8 op,
+	    PicturePtr src,
+	    PicturePtr dst,
+	    INT16 src_x, INT16 src_y,
+	    int nlist, GlyphListPtr list, GlyphPtr *glyphs)
 {
 	struct sna_composite_op tmp;
 	ScreenPtr screen = dst->pDrawable->pScreen;
 	const int priv_offset = sna_glyph_key.offset;
 	int index = screen->myNum;
-	int x, y, n;
+	int16_t x, y;
+
+	if (NO_GLYPHS_SLOW)
+		return FALSE;
 
 	memset(&tmp, 0, sizeof(tmp));
 
@@ -538,9 +549,9 @@ glyphs_to_dst_slow(struct sna *sna,
 	src_y -= list->yOff + y;
 
 	while (nlist--) {
+		int n = list->len;
 		x += list->xOff;
 		y += list->yOff;
-		n = list->len;
 		while (n--) {
 			GlyphPtr glyph = *glyphs++;
 			struct sna_glyph priv;
@@ -560,6 +571,16 @@ glyphs_to_dst_slow(struct sna *sna,
 					priv = *GET_PRIVATE(glyph);
 			}
 
+			DBG(("%s: glyph=(%d, %d)x(%d, %d), src=(%d, %d), mask=(%d, %d)\n",
+			     __FUNCTION__,
+			     x - glyph->info.x,
+			     y - glyph->info.y,
+			     glyph->info.width,
+			     glyph->info.height,
+			     src_x + x - glyph->info.x,
+			     src_y + y - glyph->info.y,
+			     priv.coordinate.x, priv.coordinate.y));
+
 			if (!sna->render.composite(sna,
 						   op, src, priv.atlas, dst,
 						   src_x + x - glyph->info.x,
@@ -576,14 +597,12 @@ glyphs_to_dst_slow(struct sna *sna,
 			nrect = REGION_NUM_RECTS(dst->pCompositeClip);
 			do {
 				struct sna_composite_rectangles r;
-				int16_t dx, dy;
 				int16_t x2, y2;
 
 				r.dst.x = x - glyph->info.x;
 				r.dst.y = y - glyph->info.y;
 				x2 = r.dst.x + glyph->info.width;
 				y2 = r.dst.y + glyph->info.height;
-				dx = dy = 0;
 
 				DBG(("%s: glyph=(%d, %d), (%d, %d), clip=(%d, %d), (%d, %d)\n",
 				     __FUNCTION__,
@@ -594,24 +613,21 @@ glyphs_to_dst_slow(struct sna *sna,
 					break;
 
 				if (r.dst.x < rects->x1)
-					dx = rects->x1 - r.dst.x, r.dst.x = rects->x1;
+					r.dst.x = rects->x1;
 				if (x2 > rects->x2)
 					x2 = rects->x2;
+
 				if (r.dst.y < rects->y1)
-					dy = rects->y1 - r.dst.y, r.dst.y = rects->y1;
+					r.dst.y = rects->y1;
 				if (y2 > rects->y2)
 					y2 = rects->y2;
 
 				if (r.dst.x < x2 && r.dst.y < y2) {
 					DBG(("%s: blt=(%d, %d), (%d, %d)\n",
 					     __FUNCTION__, r.dst.x, r.dst.y, x2, y2));
-
-					r.src.x = r.dst.x + src_x;
-					r.src.y = r.dst.y + src_y;
-					r.mask.x = dx + priv.coordinate.x;
-					r.mask.y = dy + priv.coordinate.y;
 					r.width  = x2 - r.dst.x;
 					r.height = y2 - r.dst.y;
+					r.src = r.mask = r .dst;
 					tmp.blt(sna, &tmp, &r);
 					apply_damage(&tmp, &r);
 				}
@@ -661,9 +677,12 @@ glyphs_via_mask(struct sna *sna,
 	PixmapPtr pixmap;
 	PicturePtr glyph_atlas, mask;
 	int16_t x, y, width, height;
-	int n, error;
+	int error;
 	BoxRec box;
 
+	if (NO_GLYPHS_VIA_MASK)
+		return FALSE;
+
 	DBG(("%s(op=%d, src=(%d, %d), nlist=%d,  dst=(%d, %d)+(%d, %d))\n",
 	     __FUNCTION__, op, src_x, src_y, nlist,
 	     list->xOff, list->yOff, dst->pDrawable->x, dst->pDrawable->y));
@@ -687,8 +706,6 @@ glyphs_via_mask(struct sna *sna,
 	DBG(("%s: extents=((%d, %d), (%d, %d))\n", __FUNCTION__,
 	     box.x1, box.y1, box.x2, box.y2));
 
-	memset(&tmp, 0, sizeof(tmp));
-
 	width  = box.x2 - box.x1;
 	height = box.y2 - box.y1;
 	box.x1 -= dst->pDrawable->x;
@@ -728,11 +745,12 @@ glyphs_via_mask(struct sna *sna,
 		return FALSE;
 	}
 
+	memset(&tmp, 0, sizeof(tmp));
 	glyph_atlas = NULL;
 	do {
+		int n = list->len;
 		x += list->xOff;
 		y += list->yOff;
-		n = list->len;
 		while (n--) {
 			GlyphPtr glyph = *glyphs++;
 			struct sna_glyph *priv;
@@ -935,17 +953,18 @@ glyphs_fallback(CARD8 op,
 	if (!RegionNotEmpty(&region))
 		return;
 
-	sna_drawable_move_region_to_cpu(dst->pDrawable, &region,
-					true);
+	sna_drawable_move_region_to_cpu(dst->pDrawable, &region, true);
 	if (src->pDrawable)
 		sna_drawable_move_to_cpu(src->pDrawable, false);
 
 	dst_image = image_from_pict(dst, TRUE, &x, &y);
 	DBG(("%s: dst offset (%d, %d)\n", __FUNCTION__, x, y));
-	box.x1 += x;
-	box.x2 += x;
-	box.y1 += y;
-	box.y2 += y;
+	if (x | y) {
+		region.extents.x1 += x;
+		region.extents.x2 += x;
+		region.extents.y1 += y;
+		region.extents.y2 += y;
+	}
 
 	src_image = image_from_pict(src, FALSE, &dx, &dy);
 	DBG(("%s: src offset (%d, %d)\n", __FUNCTION__, dx, dy));
@@ -953,15 +972,21 @@ glyphs_fallback(CARD8 op,
 	src_y += dy - list->yOff - y;
 
 	if (mask_format) {
+		DBG(("%s: create mask %dx%dca? %d\n",
+		     __FUNCTION__,
+		     region.extents.x2 - region.extents.x1,
+		     region.extents.y2 - region.extents.y1,
+		     NeedsComponent(mask_format->format)));
 		mask_image =
 			pixman_image_create_bits(mask_format->depth << 24 | mask_format->format,
-						 box.x2 - box.x1, box.y2 - box.y1,
+						 region.extents.x2 - region.extents.x1,
+						 region.extents.y2 - region.extents.y1,
 						 NULL, 0);
 		if (NeedsComponent(mask_format->format))
 			pixman_image_set_component_alpha(mask_image, TRUE);
 
-		x -= box.x1;
-		y -= box.y1;
+		x -= region.extents.x1;
+		y -= region.extents.y1;
 	} else
 		mask_image = dst_image;
 
@@ -1038,17 +1063,18 @@ next_glyph:
 	if (mask_format) {
 		DBG(("%s: glyph mask composite src=(%d,%d) dst=(%d, %d)x(%d, %d)\n",
 		     __FUNCTION__,
-		     src_x + box.x1,
-		     src_y + box.y1,
-		     box.x1, box.y1,
-		     box.x2-box.x1, box.y2-box.y1));
+		     src_x + region.extents.x1,
+		     src_y + region.extents.y1,
+		     region.extents.x1, region.extents.y1,
+		     region.extents.x2 - region.extents.x1,
+		     region.extents.y2 - region.extents.y1));
 		pixman_image_composite(op, src_image, mask_image, dst_image,
-				       src_x + box.x1,
-				       src_y + box.y1,
+				       src_x + region.extents.x1,
+				       src_y + region.extents.y1,
 				       0, 0,
-				       box.x1, box.y1,
-				       box.x2 - box.x1,
-				       box.y2 - box.y1);
+				       region.extents.x1, region.extents.y1,
+				       region.extents.x2 - region.extents.x1,
+				       region.extents.y2 - region.extents.y1);
 		pixman_image_unref(mask_image);
 	}
 
@@ -1074,6 +1100,9 @@ sna_glyphs(CARD8 op,
 	if (REGION_NUM_RECTS(dst->pCompositeClip) == 0)
 		return;
 
+	if (FALLBACK)
+		goto fallback;
+
 	if (sna->kgem.wedged || !sna->have_render) {
 		DBG(("%s: no render (wedged=%d)\n",
 		     __FUNCTION__, sna->kgem.wedged));
@@ -1081,7 +1110,8 @@ sna_glyphs(CARD8 op,
 	}
 
 	if (too_small(sna, dst->pDrawable) && !picture_is_gpu(src)) {
-		DBG(("%s: fallback -- too small\n", __FUNCTION__));
+		DBG(("%s: fallback -- too small (%dx%d)\n",
+		     __FUNCTION__, dst->pDrawable->width, dst->pDrawable->height));
 		goto fallback;
 	}
 
@@ -1110,10 +1140,10 @@ sna_glyphs(CARD8 op,
 				    nlist, list, glyphs))
 			return;
 	} else {
-		if (glyphs_to_dst_slow(sna, op,
-				       src, dst,
-				       src_x, src_y,
-				       nlist, list, glyphs))
+		if (glyphs_slow(sna, op,
+				src, dst,
+				src_x, src_y,
+				nlist, list, glyphs))
 			return;
 	}
 
commit 3f46c34d534e389f541b5c7987b78981c556d868
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 10 08:20:12 2011 +0100

    sna/composite: Tweak setup for the minimal composite extents
    
    Clip the operation extents before passing to the backend to setup the
    operation, so that we only need to upload the minimum amount of data
    necessary, or avoid tiling etc.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 1640309..761016a 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -45,19 +45,14 @@
 
 static void dst_move_area_to_cpu(PicturePtr picture,
 				 uint8_t op,
-				 int x, int y,
-				 int width, int height)
+				 BoxPtr box)
 {
 	RegionRec area;
-	BoxRec box;
 
-	DBG(("%s: (%d, %d), (%d %d)\n", __FUNCTION__, x, y, width, height));
+	DBG(("%s: (%d, %d), (%d %d)\n", __FUNCTION__,
+	     box->x1, box->y1, box->x2, box->y2));
 
-	box.x1 = x;
-	box.y1 = y;
-	box.x2 = x + width;
-	box.y2 = y + height;
-	RegionInit(&area, &box, 1);
+	RegionInit(&area, box, 1);
 	if (picture->pCompositeClip)
 		RegionIntersect(&area, &area, picture->pCompositeClip);
 	sna_drawable_move_region_to_cpu(picture->pDrawable, &area, true);
@@ -101,11 +96,11 @@ clip_to_dst(pixman_region16_t *region,
 	} else if (!pixman_region_not_empty(clip)) {
 		return FALSE;
 	} else {
-		if (dx || dy)
+		if (dx | dy)
 			pixman_region_translate(region, -dx, -dy);
 		if (!pixman_region_intersect(region, region, clip))
 			return FALSE;
-		if (dx || dy)
+		if (dx | dy)
 			pixman_region_translate(region, dx, dy);
 	}
 	return pixman_region_not_empty(region);
@@ -405,6 +400,7 @@ sna_composite(CARD8 op,
 	struct sna *sna = to_sna_from_drawable(dst->pDrawable);
 	struct sna_composite_op tmp;
 	RegionRec region;
+	int dx, dy;
 
 	DBG(("%s(%d src=(%d, %d), mask=(%d, %d), dst=(%d, %d)+(%d, %d), size=(%d, %d)\n",
 	     __FUNCTION__, op,
@@ -413,6 +409,14 @@ sna_composite(CARD8 op,
 	     dst_x, dst_y, dst->pDrawable->x, dst->pDrawable->y,
 	     width, height));
 
+	if (!sna_compute_composite_region(&region,
+					  src, mask, dst,
+					  src_x,  src_y,
+					  mask_x, mask_y,
+					  dst_x, dst_y,
+					  width,  height))
+		return;
+
 	if (sna->kgem.wedged) {
 		DBG(("%s: fallback -- wedged\n", __FUNCTION__));
 		goto fallback;
@@ -429,16 +433,12 @@ sna_composite(CARD8 op,
 		goto fallback;
 	}
 
-	if (!sna_compute_composite_region(&region,
-					  src, mask, dst,
-					  src_x,  src_y,
-					  mask_x, mask_y,
-					  dst_x, dst_y,
-					  width,  height))
-		return;
+	dx = region.extents.x1 - (dst_x + dst->pDrawable->x);
+	dy = region.extents.y1 - (dst_y + dst->pDrawable->y);
 
-	DBG(("%s: composite region extents: (%d, %d), (%d, %d) + (%d, %d)\n",
+	DBG(("%s: composite region extents:+(%d, %d) -> (%d, %d), (%d, %d) + (%d, %d)\n",
 	     __FUNCTION__,
+	     dx, dy,
 	     region.extents.x1, region.extents.y1,
 	     region.extents.x2, region.extents.y2,
 	     get_drawable_dx(dst->pDrawable),
@@ -447,11 +447,12 @@ sna_composite(CARD8 op,
 	memset(&tmp, 0, sizeof(tmp));
 	if (!sna->render.composite(sna,
 				   op, src, mask, dst,
-				   src_x,  src_y,
-				   mask_x, mask_y,
-				   dst_x + dst->pDrawable->x,
-				   dst_y + dst->pDrawable->y,
-				   width,  height,
+				   src_x + dx,  src_y + dy,
+				   mask_x + dx, mask_y + dy,
+				   region.extents.x1,
+				   region.extents.y1,
+				   region.extents.x2 - region.extents.x1,
+				   region.extents.y2 - region.extents.y1,
 				   &tmp)) {
 		DBG(("%s: fallback due unhandled composite op\n", __FUNCTION__));
 		goto fallback;
@@ -473,10 +474,7 @@ fallback:
 	     dst->pDrawable->x, dst->pDrawable->y,
 	     width, height));
 
-	dst_move_area_to_cpu(dst, op,
-			     dst_x + dst->pDrawable->x,
-			     dst_y + dst->pDrawable->y,
-			     width, height);
+	dst_move_area_to_cpu(dst, op, &region.extents);
 	if (src->pDrawable)
 		sna_drawable_move_to_cpu(src->pDrawable, false);
 	if (mask && mask->pDrawable)
commit a34caf1e3ded03dca53bbea3432dcd45101460fd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 9 20:45:18 2011 +0100

    sna/gradient: Check solid busy status not current domain
    
    We can write into the bo if it has no outstanding requests, whereas we
    were checking to see it was last on the gpu instead.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_gradient.c b/src/sna/sna_gradient.c
index 5cfc81a..5fb74eb 100644
--- a/src/sna/sna_gradient.c
+++ b/src/sna/sna_gradient.c
@@ -218,6 +218,7 @@ sna_render_flush_solid(struct sna *sna)
 	kgem_bo_write(&sna->kgem, cache->cache_bo,
 		      cache->color, cache->size*sizeof(uint32_t));
 	cache->dirty = 0;
+	cache->last = 0;
 }
 
 static void
@@ -227,16 +228,21 @@ sna_render_finish_solid(struct sna *sna, bool force)
 	int i;
 
 	DBG(("sna_render_finish_solid(force=%d, busy=%d, dirty=%d)\n",
-	     force, cache->cache_bo->gpu, cache->dirty));
+	     force, cache->cache_bo->rq != NULL, cache->dirty));
 
-	if (!force && !cache->cache_bo->gpu)
+	if (!force && !cache->cache_bo->rq)
 		return;
 
 	if (cache->dirty)
 		sna_render_flush_solid(sna);
 
-	for (i = 0; i < cache->size; i++)
+	for (i = 0; i < cache->size; i++) {
+		if (cache->bo[i] == NULL)
+			continue;
+
 		kgem_bo_destroy(&sna->kgem, cache->bo[i]);
+		cache->bo[i] = NULL;
+	}
 	kgem_bo_destroy(&sna->kgem, cache->cache_bo);
 
 	DBG(("sna_render_finish_solid reset\n"));
@@ -244,7 +250,8 @@ sna_render_finish_solid(struct sna *sna, bool force)
 	cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
 	cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
 	cache->bo[0]->pitch = 4;
-	cache->size = 1;
+	if (force)
+		cache->size = 1;
 }
 
 struct kgem_bo *
@@ -266,9 +273,15 @@ sna_render_get_solid(struct sna *sna, uint32_t color)
 
 	for (i = 1; i < cache->size; i++) {
 		if (cache->color[i] == color) {
-			DBG(("sna_render_get_solid(%d) = %x (old)\n",
-			     i, color));
-			goto done;
+			if (cache->bo[i] == NULL) {
+				DBG(("sna_render_get_solid(%d) = %x (recreate)\n",
+				     i, color));
+				goto create;
+			} else {
+				DBG(("sna_render_get_solid(%d) = %x (old)\n",
+				     i, color));
+				goto done;
+			}
 		}
 	}
 
@@ -276,11 +289,13 @@ sna_render_get_solid(struct sna *sna, uint32_t color)
 
 	i = cache->size++;
 	cache->color[i] = color;
+	DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
+
+create:
 	cache->bo[i] = kgem_create_proxy(cache->cache_bo,
 					 i*sizeof(uint32_t), sizeof(uint32_t));
 	cache->bo[i]->pitch = 4;
 	cache->dirty = 1;
-	DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
 
 done:
 	cache->last = i;
@@ -314,8 +329,10 @@ void sna_gradients_close(struct sna *sna)
 
 	if (sna->render.solid_cache.cache_bo)
 		kgem_bo_destroy(&sna->kgem, sna->render.solid_cache.cache_bo);
-	for (i = 0; i < sna->render.solid_cache.size; i++)
-		kgem_bo_destroy(&sna->kgem, sna->render.solid_cache.bo[i]);
+	for (i = 0; i < sna->render.solid_cache.size; i++) {
+		if (sna->render.solid_cache.bo[i])
+			kgem_bo_destroy(&sna->kgem, sna->render.solid_cache.bo[i]);
+	}
 	sna->render.solid_cache.cache_bo = 0;
 	sna->render.solid_cache.size = 0;
 	sna->render.solid_cache.dirty = 0;


More information about the xorg-commit mailing list