xf86-video-intel: 8 commits - src/sna/gen5_render.c src/sna/kgem.c src/sna/kgem_debug_gen3.c src/sna/kgem_debug_gen4.c src/sna/kgem_debug_gen5.c src/sna/kgem_debug_gen6.c src/sna/kgem_debug_gen7.c src/sna/kgem.h src/sna/sna_accel.c src/sna/sna_dri.c src/sna/sna_io.c src/sna/sna_render_inline.h src/sna/sna_video.c

Chris Wilson ickle at kemper.freedesktop.org
Sat Dec 10 17:04:04 PST 2011


 src/sna/gen5_render.c       |    6 --
 src/sna/kgem.c              |  118 ++++++++++++++++++++++++++++++++++----------
 src/sna/kgem.h              |    6 ++
 src/sna/kgem_debug_gen3.c   |    4 -
 src/sna/kgem_debug_gen4.c   |    8 +-
 src/sna/kgem_debug_gen5.c   |    8 +-
 src/sna/kgem_debug_gen6.c   |   10 +--
 src/sna/kgem_debug_gen7.c   |   10 +--
 src/sna/sna_accel.c         |   36 ++++++++++---
 src/sna/sna_dri.c           |    3 +
 src/sna/sna_io.c            |    5 -
 src/sna/sna_render_inline.h |    2 
 src/sna/sna_video.c         |    1 
 13 files changed, 153 insertions(+), 64 deletions(-)

New commits:
commit 051a18063df075536cb1ac0dc4dfc3c1306ab74e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Dec 10 22:45:25 2011 +0000

    sna: Implement a VMA cache
    
    A VMA cache appears unavoidable thanks to compiz and an excrutiatingly
    slow GTT pagefault, though it does look like it will be ineffectual
    during everyday usage. Compiz (and presumably other compositing
    managers) appears to be undoing all the pagefault minimisation as
    demonstrated on gen5 with large XPutImage. It also appears the CPU to
    memory bandwidth ratio plays a crucial role in determining whether
    going straight to GTT or through the CPU cache is a win - so no trivial
    heuristic.
    
    x11perf -putimage10 -putimage500 on i5-2467m:
    Before:
      bare:   1150,000   2,410
      compiz:  438,000   2,670
    After:
      bare:   1190,000   2,730
      compiz:  437,000   2,690
    UXA:
      bare:    658,000   2,670
      compiz:  389,000   2,520
    
    On i3-330m
    Before:
      bare:    537,000   1,080
      compiz:  263,000     398
    After:
      bare:    606,000   1,360
      compiz:  203,000     985
    UXA:
      bare:    294,000   1,070
      compiz:  197,000     821
    
    On pnv:
    Before:
      bare:    179,000   213
      compiz:  106,000   123
    After:
      bare:    181,000   246
      compiz:  103,000   197
    UXA:
      bare:    114,000   312
      compiz:   75,700   191
    
    Reported-by: Michael Larabel <Michael at phoronix.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 68a1831..3609a6f 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -45,6 +45,12 @@ static inline void list_move(struct list *list, struct list *head)
 	list_add(list, head);
 }
 
+static inline void list_move_tail(struct list *list, struct list *head)
+{
+	__list_del(list->prev, list->next);
+	list_add_tail(list, head);
+}
+
 static inline void list_replace(struct list *old,
 				struct list *new)
 {
@@ -75,6 +81,7 @@ static inline void list_replace(struct list *old,
 #endif
 
 #define PAGE_SIZE 4096
+#define MAX_VMA_CACHE 128
 
 struct kgem_partial_bo {
 	struct kgem_bo base;
@@ -125,7 +132,6 @@ static int gem_set_tiling(int fd, uint32_t handle, int tiling, int stride)
 static void *gem_mmap(int fd, uint32_t handle, int size, int prot)
 {
 	struct drm_i915_gem_mmap_gtt mmap_arg;
-	struct drm_i915_gem_set_domain set_domain;
 	void *ptr;
 
 	DBG(("%s(handle=%d, size=%d, prot=%s)\n", __FUNCTION__,
@@ -144,12 +150,6 @@ static void *gem_mmap(int fd, uint32_t handle, int size, int prot)
 		ptr = NULL;
 	}
 
-	VG_CLEAR(set_domain);
-	set_domain.handle = handle;
-	set_domain.read_domains = I915_GEM_DOMAIN_GTT;
-	set_domain.write_domain = prot & PROT_WRITE ? I915_GEM_DOMAIN_GTT : 0;
-	drmIoctl(fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
-
 	return ptr;
 }
 
@@ -274,6 +274,7 @@ static struct kgem_bo *__kgem_bo_init(struct kgem_bo *bo,
 	bo->cpu_write = true;
 	list_init(&bo->request);
 	list_init(&bo->list);
+	list_init(&bo->vma);
 
 	return bo;
 }
@@ -352,6 +353,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	list_init(&kgem->partial);
 	list_init(&kgem->requests);
 	list_init(&kgem->flushing);
+	list_init(&kgem->vma_cache);
 	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
 		list_init(&kgem->inactive[i]);
 	for (i = 0; i < ARRAY_SIZE(kgem->active); i++)
@@ -594,6 +596,12 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
 		b = next;
 	}
 
+	if (bo->map) {
+		munmap(bo->map, bo->size);
+		list_del(&bo->vma);
+		kgem->vma_count--;
+	}
+
 	list_del(&bo->list);
 	list_del(&bo->request);
 	gem_close(kgem->fd, bo->handle);
@@ -620,6 +628,7 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 			base->reusable = true;
 			list_init(&base->list);
 			list_replace(&bo->request, &base->request);
+			list_replace(&bo->vma, &base->vma);
 			free(bo);
 			bo = base;
 		}
@@ -1814,19 +1823,76 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo, int prot)
 {
 	void *ptr;
 
-	ptr = gem_mmap(kgem->fd, bo->handle, bo->size, prot);
-	if (ptr == NULL)
-		return NULL;
+	ptr = bo->map;
+	if (ptr == NULL) {
+		/* vma are limited on a per-process basis to around 64k.
+		 * This includes all malloc arenas as well as other file
+		 * mappings. In order to be fair and not hog the cache,
+		 * and more importantly not to exhaust that limit and to
+		 * start failing mappings, we keep our own number of open
+		 * vma to within a conservative value.
+		 */
+		while (kgem->vma_count > MAX_VMA_CACHE) {
+			struct kgem_bo *old;
+
+			old = list_first_entry(&kgem->vma_cache,
+					       struct kgem_bo,
+					       vma);
+			DBG(("%s: discarding vma cache for %d\n",
+			     __FUNCTION__, old->handle));
+			munmap(old->map, old->size);
+			old->map = NULL;
+			list_del(&old->vma);
+			kgem->vma_count--;
+		}
+
+		ptr = gem_mmap(kgem->fd, bo->handle, bo->size,
+			       PROT_READ | PROT_WRITE);
+		if (ptr == NULL)
+			return NULL;
+
+		/* Cache this mapping to avoid the overhead of an
+		 * excruciatingly slow GTT pagefault. This is more an
+		 * issue with compositing managers which need to frequently
+		 * flush CPU damage to their GPU bo.
+		 */
+		bo->map = ptr;
+		kgem->vma_count++;
+
+		DBG(("%s: caching vma for %d\n",
+		     __FUNCTION__, bo->handle));
+	}
+
+	if (bo->needs_flush | bo->gpu) {
+		struct drm_i915_gem_set_domain set_domain;
+
+		VG_CLEAR(set_domain);
+		set_domain.handle = bo->handle;
+		set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+		set_domain.write_domain = prot & PROT_WRITE ? I915_GEM_DOMAIN_GTT : 0;
+		drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
 
-	if (prot & PROT_WRITE) {
 		bo->needs_flush = false;
 		if (bo->gpu)
 			kgem_retire(kgem);
 	}
 
+	list_move_tail(&bo->vma, &kgem->vma_cache);
+
 	return ptr;
 }
 
+void kgem_bo_unmap(struct kgem *kgem, struct kgem_bo *bo)
+{
+	assert(bo->map);
+
+	munmap(bo->map, bo->size);
+	bo->map = NULL;
+
+	list_del(&bo->vma);
+	kgem->vma_count--;
+}
+
 uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo)
 {
 	struct drm_gem_flink flink;
@@ -2151,6 +2217,8 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 					     &bo->base.request);
 			else
 				list_init(&bo->base.request);
+			list_replace(&old->vma,
+				     &bo->base.vma);
 			free(old);
 			bo->base.refcnt = 1;
 		} else {
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index e9e7cdc..0d85f64 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -47,7 +47,9 @@ struct kgem_bo {
 
 	struct list list;
 	struct list request;
+	struct list vma;
 
+	void *map;
 	struct kgem_request *rq;
 	struct drm_i915_gem_exec_object2 *exec;
 
@@ -103,6 +105,7 @@ struct kgem {
 	struct list flushing, active[16], inactive[16];
 	struct list partial;
 	struct list requests;
+	struct list vma_cache;
 	struct kgem_request *next_request;
 
 	uint16_t nbatch;
@@ -110,6 +113,7 @@ struct kgem {
 	uint16_t nexec;
 	uint16_t nreloc;
 	uint16_t nfence;
+	uint16_t vma_count;
 
 	uint32_t flush:1;
 	uint32_t sync:1;
@@ -314,6 +318,7 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
 			uint32_t delta);
 
 void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo, int prot);
+void kgem_bo_unmap(struct kgem *kgem, struct kgem_bo *bo);
 uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
 
 Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
diff --git a/src/sna/kgem_debug_gen3.c b/src/sna/kgem_debug_gen3.c
index d152b60..0238b73 100644
--- a/src/sna/kgem_debug_gen3.c
+++ b/src/sna/kgem_debug_gen3.c
@@ -102,7 +102,7 @@ static void gen3_update_vertex_buffer_addr(struct kgem *kgem,
 	ptr = (char *)base + kgem->reloc[i].delta;
 
 	if (state.vb.current)
-		munmap(state.vb.base, state.vb.current->size);
+		kgem_bo_unmap(kgem, state.vb.current);
 
 	state.vb.current = bo;
 	state.vb.base = base;
@@ -1613,7 +1613,7 @@ int kgem_gen3_decode_3d(struct kgem *kgem, uint32_t offset)
 void kgem_gen3_finish_state(struct kgem *kgem)
 {
 	if (state.vb.current)
-		munmap(state.vb.base, state.vb.current->size);
+		kgem_bo_unmap(kgem, state.vb.current);
 
 	memset(&state, 0, sizeof(state));
 }
diff --git a/src/sna/kgem_debug_gen4.c b/src/sna/kgem_debug_gen4.c
index d736cbd..0f91d29 100644
--- a/src/sna/kgem_debug_gen4.c
+++ b/src/sna/kgem_debug_gen4.c
@@ -90,7 +90,7 @@ static void gen4_update_vertex_buffer(struct kgem *kgem, const uint32_t *data)
 
 	i = data[0] >> 27;
 	if (state.vb[i].current)
-		munmap(state.vb[i].base, state.vb[i].current->size);
+		kgem_bo_unmap(kgem, state.vb[i].current);
 
 	state.vb[i].current = bo;
 	state.vb[i].base = base;
@@ -420,7 +420,7 @@ static void
 put_reloc(struct kgem *kgem, struct reloc *r)
 {
 	if (r->bo != NULL)
-		munmap(r->base, r->bo->size);
+		kgem_bo_unmap(kgem, r->bo);
 }
 #endif
 
@@ -697,7 +697,7 @@ static void finish_vertex_buffers(struct kgem *kgem)
 
 	for (i = 0; i < ARRAY_SIZE(state.vb); i++)
 		if (state.vb[i].current)
-			munmap(state.vb[i].base, state.vb[i].current->size);
+			kgem_bo_unmap(kgem, state.vb[i].current);
 }
 
 void kgem_gen4_finish_state(struct kgem *kgem)
@@ -705,7 +705,7 @@ void kgem_gen4_finish_state(struct kgem *kgem)
 	finish_vertex_buffers(kgem);
 
 	if (state.dynamic_state.current)
-		munmap(state.dynamic_state.base, state.dynamic_state.current->size);
+		kgem_bo_unmap(kgem, state.dynamic_state.base);
 
 	memset(&state, 0, sizeof(state));
 }
diff --git a/src/sna/kgem_debug_gen5.c b/src/sna/kgem_debug_gen5.c
index 78ba443..c4f5df1 100644
--- a/src/sna/kgem_debug_gen5.c
+++ b/src/sna/kgem_debug_gen5.c
@@ -85,7 +85,7 @@ static void gen5_update_vertex_buffer(struct kgem *kgem, const uint32_t *data)
 
 	i = data[0] >> 27;
 	if (state.vb[i].current)
-		munmap(state.vb[i].base, state.vb[i].current->size);
+		kgem_bo_unmap(kgem, state.vb[i].current);
 
 	state.vb[i].handle = reloc->target_handle;
 	state.vb[i].current = bo;
@@ -394,7 +394,7 @@ static void
 put_reloc(struct kgem *kgem, struct reloc *r)
 {
 	if (r->bo != NULL)
-		munmap(r->base, r->bo->size);
+		kgem_bo_umap(kgem, r->bo);
 }
 #endif
 
@@ -673,7 +673,7 @@ static void finish_vertex_buffers(struct kgem *kgem)
 
 	for (i = 0; i < ARRAY_SIZE(state.vb); i++)
 		if (state.vb[i].current)
-			munmap(state.vb[i].base, state.vb[i].current->size);
+			kgem_bo_unmap(kgem, state.vb[i].current);
 }
 
 void kgem_gen5_finish_state(struct kgem *kgem)
@@ -681,7 +681,7 @@ void kgem_gen5_finish_state(struct kgem *kgem)
 	finish_vertex_buffers(kgem);
 
 	if (state.dynamic_state.current)
-		munmap(state.dynamic_state.base, state.dynamic_state.current->size);
+		kgem_bo_unmap(kgem,state. dynamic_state.current);
 
 	memset(&state, 0, sizeof(state));
 }
diff --git a/src/sna/kgem_debug_gen6.c b/src/sna/kgem_debug_gen6.c
index d441b53..5bcd85d 100644
--- a/src/sna/kgem_debug_gen6.c
+++ b/src/sna/kgem_debug_gen6.c
@@ -89,7 +89,7 @@ static void gen6_update_vertex_buffer(struct kgem *kgem, const uint32_t *data)
 
 	i = data[0] >> 26;
 	if (state.vb[i].current)
-		munmap(state.vb[i].base, state.vb[i].current->size);
+		kgem_bo_unmap(kgem, state.vb[i].current);
 
 	state.vb[i].current = bo;
 	state.vb[i].base = base;
@@ -130,7 +130,7 @@ static void gen6_update_dynamic_buffer(struct kgem *kgem, const uint32_t offset)
 	}
 
 	if (state.dynamic_state.current)
-		munmap(state.dynamic_state.base, state.dynamic_state.current->size);
+		kgem_bo_unmap(kgem, state.dynamic_state.current);
 
 	state.dynamic_state.current = bo;
 	state.dynamic_state.base = base;
@@ -306,7 +306,7 @@ static void finish_vertex_buffers(struct kgem *kgem)
 
 	for (i = 0; i < ARRAY_SIZE(state.vb); i++)
 		if (state.vb[i].current)
-			munmap(state.vb[i].base, state.vb[i].current->size);
+			kgem_bo_unmap(kgem, state.vb[i].current);
 }
 
 static void finish_state(struct kgem *kgem)
@@ -314,7 +314,7 @@ static void finish_state(struct kgem *kgem)
 	finish_vertex_buffers(kgem);
 
 	if (state.dynamic_state.current)
-		munmap(state.dynamic_state.base, state.dynamic_state.current->size);
+		kgem_bo_unmap(kgem, state.dynamic_state.base);
 
 	memset(&state, 0, sizeof(state));
 }
@@ -482,7 +482,7 @@ static void
 put_reloc(struct kgem *kgem, struct reloc *r)
 {
 	if (r->bo != NULL)
-		munmap(r->base, r->bo->size);
+		kgem_bo_unmap(kgem, r->bo);
 }
 
 static const char *
diff --git a/src/sna/kgem_debug_gen7.c b/src/sna/kgem_debug_gen7.c
index f6a4975..a33a918 100644
--- a/src/sna/kgem_debug_gen7.c
+++ b/src/sna/kgem_debug_gen7.c
@@ -89,7 +89,7 @@ static void gen7_update_vertex_buffer(struct kgem *kgem, const uint32_t *data)
 
 	i = data[0] >> 26;
 	if (state.vb[i].current)
-		munmap(state.vb[i].base, state.vb[i].current->size);
+		kgem_bo_unmap(kgem, state.vb[i].base);
 
 	state.vb[i].current = bo;
 	state.vb[i].base = base;
@@ -130,7 +130,7 @@ static void gen7_update_dynamic_buffer(struct kgem *kgem, const uint32_t offset)
 	}
 
 	if (state.dynamic_state.current)
-		munmap(state.dynamic_state.base, state.dynamic_state.current->size);
+		kgem_bo_unmap(kgem, state.dynamic_state.base);
 
 	state.dynamic_state.current = bo;
 	state.dynamic_state.base = base;
@@ -306,7 +306,7 @@ static void finish_vertex_buffers(struct kgem *kgem)
 
 	for (i = 0; i < ARRAY_SIZE(state.vb); i++)
 		if (state.vb[i].current)
-			munmap(state.vb[i].base, state.vb[i].current->size);
+			kgem_bo_unmap(kgem, state.vb[i].current);
 }
 
 static void finish_state(struct kgem *kgem)
@@ -314,7 +314,7 @@ static void finish_state(struct kgem *kgem)
 	finish_vertex_buffers(kgem);
 
 	if (state.dynamic_state.current)
-		munmap(state.dynamic_state.base, state.dynamic_state.current->size);
+		kgem_bo_unmap(kgem, state.dynamic_state.base);
 
 	memset(&state, 0, sizeof(state));
 }
@@ -482,7 +482,7 @@ static void
 put_reloc(struct kgem *kgem, struct reloc *r)
 {
 	if (r->bo != NULL)
-		munmap(r->base, r->bo->size);
+		kgem_bo_unmap(kgem, r->bo);
 }
 
 static const char *
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index bb52770..44580be 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -187,9 +187,6 @@ static Bool sna_destroy_private(PixmapPtr pixmap, struct sna_pixmap *priv)
 	sna_damage_destroy(&priv->gpu_damage);
 	sna_damage_destroy(&priv->cpu_damage);
 
-	if (priv->mapped)
-		munmap(pixmap->devPrivate.ptr, priv->gpu_bo->size);
-
 	/* Always release the gpu bo back to the lower levels of caching */
 	if (priv->gpu_bo)
 		kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
@@ -1407,9 +1404,10 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 
 	/* XXX performing the upload inplace is currently about 20x slower
 	 * for putimage10 on gen6 -- mostly due to slow page faulting in kernel.
+	 * So we try again with vma caching and only for pixmaps who will be
+	 * immediately flushed...
 	 */
-#if 0
-	if (priv->gpu_bo->rq == NULL &&
+	if (priv->flush &&
 	    sna_put_image_upload_blt(drawable, gc, region,
 				     x, y, w, h, bits, stride)) {
 		if (region_subsumes_drawable(region, &pixmap->drawable)) {
@@ -1425,7 +1423,6 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 
 		return true;
 	}
-#endif
 
 	if (priv->cpu_bo)
 		kgem_bo_sync(&sna->kgem, priv->cpu_bo, true);
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index aba636c..767824f 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -80,8 +80,6 @@ static void read_boxes_inplace(struct kgem *kgem,
 			   box->x2 - box->x1, box->y2 - box->y1);
 		box++;
 	} while (--n);
-
-	munmap(src, bo->size);
 }
 
 void sna_read_boxes(struct sna *sna,
@@ -283,8 +281,6 @@ static void write_boxes_inplace(struct kgem *kgem,
 			   box->x2 - box->x1, box->y2 - box->y1);
 		box++;
 	} while (--n);
-
-	munmap(dst, bo->size);
 }
 
 void sna_write_boxes(struct sna *sna,
@@ -464,7 +460,6 @@ struct kgem_bo *sna_replace(struct sna *sna,
 				   0, 0,
 				   pixmap->drawable.width,
 				   pixmap->drawable.height);
-			munmap(dst, bo->size);
 		}
 	}
 
diff --git a/src/sna/sna_video.c b/src/sna/sna_video.c
index bd5ff14..d6d56f4 100644
--- a/src/sna/sna_video.c
+++ b/src/sna/sna_video.c
@@ -481,7 +481,6 @@ sna_video_copy_data(struct sna *sna,
 	else
 		sna_copy_packed_data(video, frame, buf, dst);
 
-	munmap(dst, frame->bo->size);
 	return TRUE;
 }
 
commit 735a15208dd600eefa3090f344186df9cac0462d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Dec 10 23:45:56 2011 +0000

    sna/gen5: Remove a redundant format check
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index e8a9606..d09d6bc 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -1849,11 +1849,7 @@ gen5_composite_set_target(PicturePtr dst, struct sna_composite_op *op)
 
 	DBG(("%s: dst=%p\n", __FUNCTION__, dst));
 
-	if (!gen5_check_dst_format(dst->format)) {
-		DBG(("%s: incompatible dst format %08x\n",
-		     __FUNCTION__, dst->format));
-		return FALSE;
-	}
+	assert(gen5_check_dst_format(dst->format));
 
 	op->dst.pixmap = get_drawable_pixmap(dst->pDrawable);
 	priv = sna_pixmap(op->dst.pixmap);
commit c5584252c34b792313578cc31d56248d8990b571
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Dec 10 23:34:16 2011 +0000

    sna: Remember to assign a new unique id for the replaced bo
    
    Missed from the previous patch.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 973afcb..68a1831 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2391,6 +2391,7 @@ kgem_replace_bo(struct kgem *kgem,
 		dst = __kgem_bo_alloc(handle, size);
 	}
 	dst->pitch = pitch;
+	dst->unique_id = kgem_get_unique_id(kgem);
 
 	kgem_set_mode(kgem, KGEM_BLT);
 	if (!kgem_check_batch(kgem, 8) ||
commit 9c764dc13be40fc14238a4c130f7d8b44dbdf7db
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Dec 10 22:38:57 2011 +0000

    sna: Be more pessimistic with CPU sources
    
    Try to avoid a few more unnecessary context switches.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index b9997a2..9e3676a 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -77,7 +77,7 @@ static inline Bool
 is_cpu(DrawablePtr drawable)
 {
 	struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
-	return !priv || priv->gpu_bo == NULL;
+	return !priv || priv->cpu_damage != NULL;
 }
 
 static inline Bool
commit 358aaef6dbff0737f026046b0c9a2e21d8cfdf2a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Dec 10 22:38:33 2011 +0000

    sna/dri: Prefer using the BLT for DRICopyRegion on pre-SNB
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 941da87..e572a6b 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -428,6 +428,9 @@ sna_dri_copy(struct sna *sna, DrawablePtr draw, RegionPtr region,
 		 * as well).
 		 */
 		kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	} else if (sna->kgem.mode == KGEM_NONE) {
+		/* Otherwise employ the BLT unless it means a context switch */
+		_kgem_set_mode(&sna->kgem, KGEM_BLT);
 	}
 
 	damage(pixmap, region);
commit c295ad8da91e39c8fffa540901097651df5d24b2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Dec 10 22:37:31 2011 +0000

    sna: Transfer the whole bo for a replacement XCopyArea
    
    If we are copying over the entire source onto the destination,just copy
    across the GPU bo. This is often used for caching images as pixmaps.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 09e8a0c..bb52770 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2015,6 +2015,31 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	     src_priv ? src_priv->cpu_bo : NULL,
 	     replaces));
 
+	if (replaces) {
+		if (dst_priv && src_priv && src_priv->gpu_damage == NULL) {
+			if (sna_pixmap_move_to_gpu(src_pixmap)) {
+				DBG(("%s: transferring src GPU bo to dst\n", __FUNCTION__));
+				if (dst_priv->gpu_bo)
+					kgem_bo_destroy(&sna->kgem, dst_priv->gpu_bo);
+				sna_damage_destroy(&dst_priv->cpu_damage);
+				sna_damage_all(&dst_priv->gpu_damage,
+					       dst_pixmap->drawable.width,
+					       dst_pixmap->drawable.height);
+
+				dst_priv->gpu_bo = src_priv->gpu_bo;
+				src_priv->gpu_bo = NULL;
+
+				sna_damage_all(&src_priv->cpu_damage,
+					       src_pixmap->drawable.width,
+					       src_pixmap->drawable.height);
+				return;
+			}
+		}
+
+		if (!src_priv || src_priv->gpu_bo == NULL || src_priv->cpu_damage)
+			goto fallback;
+	}
+
 	/* Try to maintain the data on the GPU */
 	if (dst_priv && dst_priv->gpu_bo == NULL &&
 	    src_priv && src_priv->gpu_bo != NULL) {
commit ece7fc8afeb8eefcf0ad1a054f02e7fac8db6327
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Dec 10 21:04:08 2011 +0000

    sna: Only use the 64-byte pitch alignment for scanout
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index f5ad111..973afcb 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -433,6 +433,7 @@ static uint32_t kgem_get_unique_id(struct kgem *kgem)
 
 static uint32_t kgem_surface_size(struct kgem *kgem,
 				  bool relaxed_fencing,
+				  bool scanout,
 				  uint32_t width,
 				  uint32_t height,
 				  uint32_t bpp,
@@ -447,13 +448,13 @@ static uint32_t kgem_surface_size(struct kgem *kgem,
 			tile_width = 512;
 			tile_height = 16;
 		} else {
-			tile_width = bpp > 8 ? 64 : 4;
+			tile_width = scanout ? 64 : 4;
 			tile_height = 2;
 		}
 	} else switch (tiling) {
 	default:
 	case I915_TILING_NONE:
-		tile_width = bpp > 8 ? 64 : 4;
+		tile_width = scanout ? 64 : 4;
 		tile_height = 2;
 		break;
 	case I915_TILING_X:
@@ -1373,7 +1374,7 @@ int kgem_choose_tiling(struct kgem *kgem, int tiling, int width, int height, int
 
 	/* First check that we can fence the whole object */
 	if (tiling &&
-	    kgem_surface_size(kgem, false,
+	    kgem_surface_size(kgem, false, false,
 			      width, height, bpp, tiling,
 			      &pitch) > kgem->max_object_size) {
 		DBG(("%s: too large (%dx%d) to be fenced, discarding tiling\n",
@@ -1405,8 +1406,8 @@ int kgem_choose_tiling(struct kgem *kgem, int tiling, int width, int height, int
 	}
 
 	if (tiling == I915_TILING_X && width * bpp < 8*512/2) {
-		DBG(("%s: too thin [%d] for TILING_X\n",
-		     __FUNCTION__, width));
+		DBG(("%s: too thin [width %d, %d bpp] for TILING_X\n",
+		     __FUNCTION__, width, bpp));
 		tiling = I915_TILING_NONE;
 		goto done;
 	}
@@ -1444,10 +1445,10 @@ static bool _kgem_can_create_2d(struct kgem *kgem,
 	if (tiling < 0)
 		tiling = -tiling;
 
-	size = kgem_surface_size(kgem, false,
+	size = kgem_surface_size(kgem, false, false,
 				 width, height, bpp, tiling, &pitch);
 	if (size == 0 || size > kgem->max_object_size)
-		size = kgem_surface_size(kgem, false,
+		size = kgem_surface_size(kgem, false, false,
 					 width, height, bpp,
 					 I915_TILING_NONE, &pitch);
 	return size > 0 && size <= kgem->max_object_size;
@@ -1504,11 +1505,14 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 	if (tiling < 0)
 		tiling = -tiling, exact = 1;
 
-	DBG(("%s(%dx%d, bpp=%d, tiling=%d, exact=%d, inactive=%d)\n", __FUNCTION__,
-	     width, height, bpp, tiling, !!exact, !!(flags & CREATE_INACTIVE)));
+	DBG(("%s(%dx%d, bpp=%d, tiling=%d, exact=%d, inactive=%d, scanout?=%d)\n", __FUNCTION__,
+	     width, height, bpp, tiling,
+	     !!exact, !!(flags & CREATE_INACTIVE), !!(flags & CREATE_SCANOUT)));
 
 	assert(_kgem_can_create_2d(kgem, width, height, bpp, exact ? -tiling : tiling));
-	size = kgem_surface_size(kgem, kgem->has_relaxed_fencing,
+	size = kgem_surface_size(kgem,
+				 kgem->has_relaxed_fencing,
+				 flags & CREATE_SCANOUT,
 				 width, height, bpp, tiling, &pitch);
 	assert(size && size <= kgem->max_object_size);
 	if (flags & CREATE_INACTIVE)
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 8649512..e9e7cdc 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -181,6 +181,7 @@ kgem_replace_bo(struct kgem *kgem,
 enum {
 	CREATE_EXACT = 0x1,
 	CREATE_INACTIVE = 0x2,
+	CREATE_SCANOUT = 0x4,
 };
 struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 			       int width,
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index db22507..09e8a0c 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1037,7 +1037,7 @@ sna_pixmap_force_to_gpu(PixmapPtr pixmap)
 		if (priv->cpu_damage)
 			flags |= CREATE_INACTIVE;
 		if (pixmap->usage_hint == SNA_CREATE_FB)
-			flags |= CREATE_EXACT;
+			flags |= CREATE_EXACT | CREATE_SCANOUT;
 
 		priv->gpu_bo = kgem_create_2d(&sna->kgem,
 					      pixmap->drawable.width,
commit b3816cf3a99d23d0c3ab4cd716b24ea544a07283
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Dec 10 20:32:56 2011 +0000

    sna: Remove assertions that external bo are not busy
    
    We have to be careful to assume bo via exposed are under our full
    control, in particular not to assert their state. :(
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 34ce745..f5ad111 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -214,7 +214,6 @@ Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
 	if (gem_write(kgem->fd, bo->handle, 0, length, data))
 		return FALSE;
 
-	assert(!kgem_busy(kgem, bo->handle));
 	bo->needs_flush = false;
 	if (bo->gpu)
 		kgem_retire(kgem);
@@ -728,7 +727,6 @@ bool kgem_retire(struct kgem *kgem)
 						DBG(("%s: moving %d to inactive\n",
 						     __FUNCTION__, bo->handle));
 						bo->purged = true;
-						assert(!kgem_busy(kgem,bo->handle));
 						list_move(&bo->list,
 							  inactive(kgem, bo->size));
 						retired = true;
@@ -1812,14 +1810,11 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo, int prot)
 {
 	void *ptr;
 
-	assert(prot == PROT_READ || !kgem_busy(kgem, bo->handle));
-
 	ptr = gem_mmap(kgem->fd, bo->handle, bo->size, prot);
 	if (ptr == NULL)
 		return NULL;
 
 	if (prot & PROT_WRITE) {
-		assert(!kgem_busy(kgem, bo->handle));
 		bo->needs_flush = false;
 		if (bo->gpu)
 			kgem_retire(kgem);


More information about the xorg-commit mailing list