xf86-video-intel: 4 commits - src/sna/kgem.c src/sna/sna_io.c

Chris Wilson ickle at kemper.freedesktop.org
Tue Jan 17 01:36:23 PST 2012


 src/sna/kgem.c   |   12 ++++++---
 src/sna/sna_io.c |   70 +++++++++++++++++++++++++++++++++++++++++++++++++++++--
 2 files changed, 76 insertions(+), 6 deletions(-)

New commits:
commit d14341cb22e37f52070cd92b707fec5e08038e96
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jan 17 00:31:43 2012 +0000

    sna: Add a render ring detiling read path
    
    For SNB, in case you really, really want to use GPU detiling and not
    incur the ring switch. Tweaking when to just mmap the target seems to
    gain most anyway...
    
    The ulterior motive is that this provides fallback paths for avoiding
    the use of TILING_Y with GTT mmaps which is broken on 855gm.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 6b08532..4174b0e 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -89,8 +89,9 @@ void sna_read_boxes(struct sna *sna,
 {
 	struct kgem *kgem = &sna->kgem;
 	struct kgem_bo *dst_bo;
-	int tmp_nbox;
+	BoxRec extents;
 	const BoxRec *tmp_box;
+	int tmp_nbox;
 	char *src;
 	void *ptr;
 	int src_pitch, cpp, offset;
@@ -123,7 +124,8 @@ void sna_read_boxes(struct sna *sna,
 
 	if (DEBUG_NO_IO || kgem->wedged ||
 	    !kgem_bo_map_will_stall(kgem, src_bo) ||
-	    src_bo->tiling != I915_TILING_X) {
+	    src_bo->tiling == I915_TILING_NONE) {
+fallback:
 		read_boxes_inplace(kgem,
 				   src_bo, src_dx, src_dy,
 				   dst, dst_dx, dst_dy,
@@ -131,6 +133,70 @@ void sna_read_boxes(struct sna *sna,
 		return;
 	}
 
+	/* Is it worth detiling? */
+	extents = box[0];
+	for (n = 1; n < nbox; n++) {
+		if (box[n].x1 < extents.x1)
+			extents.x1 = box[n].x1;
+		if (box[n].x2 > extents.x2)
+			extents.x2 = box[n].x2;
+
+		if (box[n].y1 < extents.y1)
+			extents.y1 = box[n].y1;
+		if (box[n].y2 > extents.y2)
+			extents.y2 = box[n].y2;
+	}
+	if ((extents.y2 - extents.y1) * src_bo->pitch < 4096)
+		goto fallback;
+
+	/* Try to avoid switching rings... */
+	if (src_bo->tiling == I915_TILING_Y || kgem->ring == KGEM_RENDER) {
+		PixmapRec tmp;
+
+		tmp.drawable.width  = extents.x2 - extents.x1;
+		tmp.drawable.height = extents.y2 - extents.y1;
+		tmp.drawable.depth  = dst->drawable.depth;
+		tmp.drawable.bitsPerPixel = dst->drawable.bitsPerPixel;
+		tmp.devPrivate.ptr = NULL;
+
+		assert(tmp.drawable.width);
+		assert(tmp.drawable.height);
+
+		dst_bo = kgem_create_buffer_2d(kgem,
+					       tmp.drawable.width,
+					       tmp.drawable.height,
+					       tmp.drawable.bitsPerPixel,
+					       KGEM_BUFFER_LAST,
+					       &ptr);
+		if (!dst_bo)
+			goto fallback;
+
+		if (!sna->render.copy_boxes(sna, GXcopy,
+					    dst, src_bo, src_dx, src_dy,
+					    &tmp, dst_bo, -extents.x1, -extents.y1,
+					    box, nbox)) {
+			kgem_bo_destroy(&sna->kgem, dst_bo);
+			goto fallback;
+		}
+
+		kgem_bo_submit(&sna->kgem, dst_bo);
+		kgem_buffer_read_sync(kgem, dst_bo);
+
+		for (n = 0; n < nbox; n++) {
+			memcpy_blt(ptr, dst->devPrivate.ptr, tmp.drawable.bitsPerPixel,
+				   dst_bo->pitch, dst->devKind,
+				   box[n].x1 - extents.x1,
+				   box[n].y1 - extents.y1,
+				   box[n].x1 + dst_dx,
+				   box[n].y1 + dst_dy,
+				   box[n].x2 - box[n].x1,
+				   box[n].y2 - box[n].y1);
+		}
+
+		kgem_bo_destroy(&sna->kgem, dst_bo);
+		return;
+	}
+
 	/* count the total number of bytes to be read and allocate a bo */
 	cpp = dst->drawable.bitsPerPixel / 8;
 	offset = 0;
commit 3620f9ca459f7bbbb01786c95b5583023b6d870c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jan 17 00:23:04 2012 +0000

    sna: Cap pwrite buffer alignment to 64k
    
    We only want to create huge pwrite buffers when populating the inactive
    cache for mmapped uploads. In the absence of using mmap for upload, be
    more conservative with the alignment value so as not to simply waste
    valuable aperture and memory.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 48f8182..970e462 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2966,10 +2966,10 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	}
 
 	/* Be a little more generous and hope to hold fewer mmappings */
-	alloc = ALIGN(size, kgem->partial_buffer_size);
 	bo = NULL;
 
 #if !DBG_NO_MAP_UPLOAD
+	alloc = ALIGN(size, kgem->partial_buffer_size);
 	if (kgem->has_cpu_bo) {
 		struct kgem_bo *old;
 
@@ -3084,6 +3084,8 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			}
 		}
 	}
+#else
+	alloc = ALIGN(size, 64*1024);
 #endif
 
 	if (bo == NULL) {
commit b9f59b1099eeba3fb8b25693d4f37dd13267d8a3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jan 17 00:20:51 2012 +0000

    sna: correct adjust of a stolen 2d read buffer
    
    If we steal a write buffer for creating a pixmap for read back, then we
    need to be careful as we will have set the used amount to 0 and then try
    to incorrectly decrease by the last row. Fortunately, we do not yet have
    any code that attempts to create a 2d buffer for reading.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index f79f660..48f8182 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1289,7 +1289,7 @@ static void kgem_finish_partials(struct kgem *kgem)
 			}
 		}
 
-		if (bo->need_io) {
+		if (bo->used && bo->need_io) {
 			DBG(("%s: handle=%d, uploading %d/%d\n",
 			     __FUNCTION__, bo->base.handle, bo->used, bo->base.size));
 			assert(!kgem_busy(kgem, bo->base.handle));
@@ -3176,6 +3176,7 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
 	struct kgem_bo *bo;
 	int stride;
 
+	assert(width > 0 && height > 0);
 	stride = ALIGN(width, 2) * bpp >> 3;
 	stride = ALIGN(stride, kgem->min_alignment);
 
@@ -3193,7 +3194,8 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
 		 * the last pair of rows is valid, remove the padding so
 		 * that it can be allocated to other pixmaps.
 		 */
-		io->used -= stride;
+		if (io->used)
+			io->used -= stride;
 		bo->size -= stride;
 		bubble_sort_partial(kgem, io);
 	}
commit 6fc4cdafeba4517b94202ff292f05fdd90e52b2e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jan 16 21:34:43 2012 +0000

    sna: Correct assertion for a partial read buffer
    
    The batch may legitimately be submitted prior to the attachment of the
    read buffer, if, for example, we need to switch rings. Therefore update
    the assertion to only check that the bo remains in existence via either
    a reference from the exec or from the user
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 9169dd7..f79f660 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1224,7 +1224,7 @@ static void kgem_finish_partials(struct kgem *kgem)
 
 	list_for_each_entry_safe(bo, next, &kgem->partial, base.list) {
 		if (!bo->write) {
-			assert(bo->base.exec);
+			assert(bo->base.exec || bo->base.refcnt > 1);
 			goto decouple;
 		}
 


More information about the xorg-commit mailing list