xf86-video-intel: 3 commits - configure.ac src/sna/gen4_render.c src/sna/kgem.c src/sna/kgem.h src/sna/sna_accel.c src/sna/sna_render.c

Chris Wilson ickle at kemper.freedesktop.org
Sat Jul 21 05:04:50 PDT 2012


 configure.ac          |   16 ++--
 src/sna/gen4_render.c |    2 
 src/sna/kgem.c        |  165 +++++++++++++++++++++++++-------------------------
 src/sna/kgem.h        |   12 +--
 src/sna/sna_accel.c   |    7 --
 src/sna/sna_render.c  |    4 -
 6 files changed, 103 insertions(+), 103 deletions(-)

New commits:
commit 37dfdb0e9e86effc3ca8b590c98aa2382e8f0cea
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 21 13:01:09 2012 +0100

    sna: Correct assertion for __kgem_bo_size()
    
    Only proxies are measured in bytes not pages.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 3853379..dbe6423 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -127,7 +127,7 @@ static struct drm_i915_gem_exec_object2 _kgem_dummy_exec;
 
 static inline int bytes(struct kgem_bo *bo)
 {
-	return kgem_bo_size(bo);
+	return __kgem_bo_size(bo);
 }
 
 #define bucket(B) (B)->size.pages.bucket
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index f1ded24..7b388fb 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -423,7 +423,7 @@ static inline int __kgem_buffer_size(struct kgem_bo *bo)
 
 static inline int __kgem_bo_size(struct kgem_bo *bo)
 {
-	assert(!(bo->proxy && bo->io));
+	assert(bo->proxy == NULL);
 	return PAGE_SIZE * bo->size.pages.count;
 }
 
commit 83ad661bc73e9d0094b669c5203e25afc3937bb7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 21 12:51:41 2012 +0100

    sna: Change the vmap interface name to userptr
    
    This is in common with the other drivers and avoids the conflict with
    'vmalloc/vmap' used by the kernel for allocation of contiguous virtual
    mappings.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index b3a786d..45157a8 100644
--- a/configure.ac
+++ b/configure.ac
@@ -247,14 +247,14 @@ if test "x$accel" = xno; then
 	AC_MSG_ERROR([No default acceleration option])
 fi
 
-AC_ARG_ENABLE(vmap,
-	      AS_HELP_STRING([--enable-vmap],
-			     [Enable use of vmap (experimental) [default=no]]),
-	      [VMAP="$enableval"],
-	      [VMAP=no])
-AM_CONDITIONAL(USE_VMAP, test x$VMAP = xyes)
-if test "x$VMAP" = xyes; then
-	AC_DEFINE(USE_VMAP,1,[Assume VMAP support])
+AC_ARG_ENABLE(userptr,
+	      AS_HELP_STRING([--enable-userptr],
+			     [Enable use of userptr (experimental) [default=no]]),
+	      [USERPTR="$enableval"],
+	      [USERPTR=no])
+AM_CONDITIONAL(USE_USERPTR, test x$USERPTR = xyes)
+if test "x$USERPTR" = xyes; then
+	AC_DEFINE(USE_USERPTR,1,[Assume USERPTR support])
 fi
 
 AC_ARG_ENABLE(async-swap,
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 578c8e4..3853379 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -60,7 +60,7 @@ search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 #define DBG_NO_TILING 0
 #define DBG_NO_CACHE 0
 #define DBG_NO_CACHE_LEVEL 0
-#define DBG_NO_VMAP 0
+#define DBG_NO_USERPTR 0
 #define DBG_NO_LLC 0
 #define DBG_NO_SEMAPHORES 0
 #define DBG_NO_MADV 0
@@ -88,17 +88,17 @@ search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 
 #define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
 #define MAKE_CPU_MAP(ptr) ((void*)((uintptr_t)(ptr) | 1))
-#define MAKE_VMAP_MAP(ptr) ((void*)((uintptr_t)(ptr) | 3))
-#define IS_VMAP_MAP(ptr) ((uintptr_t)(ptr) & 2)
+#define MAKE_USER_MAP(ptr) ((void*)((uintptr_t)(ptr) | 3))
+#define IS_USER_MAP(ptr) ((uintptr_t)(ptr) & 2)
 #define __MAP_TYPE(ptr) ((uintptr_t)(ptr) & 3)
 
-#define LOCAL_I915_GEM_VMAP       0x32
-#define LOCAL_IOCTL_I915_GEM_VMAP DRM_IOWR (DRM_COMMAND_BASE + LOCAL_I915_GEM_VMAP, struct local_i915_gem_vmap)
-struct local_i915_gem_vmap {
+#define LOCAL_I915_GEM_USERPTR       0x32
+#define LOCAL_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + LOCAL_I915_GEM_USERPTR, struct local_i915_gem_userptr)
+struct local_i915_gem_userptr {
 	uint64_t user_ptr;
 	uint32_t user_size;
 	uint32_t flags;
-#define I915_VMAP_READ_ONLY 0x1
+#define I915_USERPTR_READ_ONLY 0x1
 	uint32_t handle;
 };
 
@@ -195,24 +195,24 @@ static bool gem_set_cacheing(int fd, uint32_t handle, int cacheing)
 	return drmIoctl(fd, LOCAL_IOCTL_I915_GEM_SET_CACHEING, &arg) == 0;
 }
 
-static uint32_t gem_vmap(int fd, void *ptr, int size, int read_only)
+static uint32_t gem_userptr(int fd, void *ptr, int size, int read_only)
 {
-	struct local_i915_gem_vmap vmap;
+	struct local_i915_gem_userptr arg;
 
-	VG_CLEAR(vmap);
-	vmap.user_ptr = (uintptr_t)ptr;
-	vmap.user_size = size;
-	vmap.flags = 0;
+	VG_CLEAR(arg);
+	arg.user_ptr = (uintptr_t)ptr;
+	arg.user_size = size;
+	arg.flags = 0;
 	if (read_only)
-		vmap.flags |= I915_VMAP_READ_ONLY;
+		arg.flags |= I915_USERPTR_READ_ONLY;
 
-	if (drmIoctl(fd, LOCAL_IOCTL_I915_GEM_VMAP, &vmap)) {
+	if (drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &arg)) {
 		DBG(("%s: failed to map %p + %d bytes: %d\n",
 		     __FUNCTION__, ptr, size, errno));
 		return 0;
 	}
 
-	return vmap.handle;
+	return arg.handle;
 }
 
 static bool __kgem_throttle_retire(struct kgem *kgem, unsigned flags)
@@ -716,13 +716,13 @@ static bool test_has_cacheing(struct kgem *kgem)
 	return ret;
 }
 
-static bool test_has_vmap(struct kgem *kgem)
+static bool test_has_userptr(struct kgem *kgem)
 {
-#if defined(USE_VMAP)
+#if defined(USE_USERPTR)
 	uint32_t handle;
 	void *ptr;
 
-	if (DBG_NO_VMAP)
+	if (DBG_NO_USERPTR)
 		return false;
 
 	/* Incoherent blt and sampler hangs the GPU */
@@ -730,7 +730,7 @@ static bool test_has_vmap(struct kgem *kgem)
 		return false;
 
 	ptr = malloc(PAGE_SIZE);
-	handle = gem_vmap(kgem->fd, ptr, PAGE_SIZE, false);
+	handle = gem_userptr(kgem->fd, ptr, PAGE_SIZE, false);
 	gem_close(kgem->fd, handle);
 	free(ptr);
 
@@ -781,9 +781,9 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	DBG(("%s: has set-cache-level? %d\n", __FUNCTION__,
 	     kgem->has_cacheing));
 
-	kgem->has_vmap = test_has_vmap(kgem);
-	DBG(("%s: has vmap? %d\n", __FUNCTION__,
-	     kgem->has_vmap));
+	kgem->has_userptr = test_has_userptr(kgem);
+	DBG(("%s: has userptr? %d\n", __FUNCTION__,
+	     kgem->has_userptr));
 
 	kgem->has_semaphores = false;
 	if (kgem->has_blt && test_has_semaphores_enabled())
@@ -846,9 +846,9 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 
 	kgem->next_request = __kgem_request_alloc();
 
-	DBG(("%s: cpu bo enabled %d: llc? %d, set-cache-level? %d, vmap? %d\n", __FUNCTION__,
-	     kgem->has_llc | kgem->has_vmap | kgem->has_cacheing,
-	     kgem->has_llc, kgem->has_cacheing, kgem->has_vmap));
+	DBG(("%s: cpu bo enabled %d: llc? %d, set-cache-level? %d, userptr? %d\n", __FUNCTION__,
+	     kgem->has_llc | kgem->has_userptr | kgem->has_cacheing,
+	     kgem->has_llc, kgem->has_cacheing, kgem->has_userptr));
 
 	VG_CLEAR(aperture);
 	aperture.aper_size = 64*1024*1024;
@@ -919,7 +919,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	kgem->large_object_size = MAX_CACHE_SIZE;
 	if (kgem->large_object_size > kgem->max_gpu_size)
 		kgem->large_object_size = kgem->max_gpu_size;
-	if (kgem->has_llc | kgem->has_cacheing | kgem->has_vmap) {
+	if (kgem->has_llc | kgem->has_cacheing | kgem->has_userptr) {
 		if (kgem->large_object_size > kgem->max_cpu_size)
 			kgem->large_object_size = kgem->max_cpu_size;
 	} else
@@ -1174,7 +1174,7 @@ static void kgem_bo_release_map(struct kgem *kgem, struct kgem_bo *bo)
 {
 	int type = IS_CPU_MAP(bo->map);
 
-	assert(!IS_VMAP_MAP(bo->map));
+	assert(!IS_USER_MAP(bo->map));
 
 	DBG(("%s: releasing %s vma for handle=%d, count=%d\n",
 	     __FUNCTION__, type ? "CPU" : "GTT",
@@ -1204,7 +1204,7 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
 
 	kgem_bo_binding_free(kgem, bo);
 
-	if (IS_VMAP_MAP(bo->map)) {
+	if (IS_USER_MAP(bo->map)) {
 		assert(bo->rq == NULL);
 		assert(MAP(bo->map) != bo || bo->io);
 		if (bo != MAP(bo->map)) {
@@ -3296,7 +3296,7 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 		return bo;
 	}
 
-	if (kgem->has_vmap) {
+	if (kgem->has_userptr) {
 		void *ptr;
 
 		/* XXX */
@@ -3310,7 +3310,7 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 			return NULL;
 		}
 
-		bo->map = MAKE_VMAP_MAP(ptr);
+		bo->map = MAKE_USER_MAP(ptr);
 		bo->pitch = stride;
 		return bo;
 	}
@@ -3788,10 +3788,10 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 	struct kgem_bo *bo;
 	uint32_t handle;
 
-	if (!kgem->has_vmap)
+	if (!kgem->has_userptr)
 		return NULL;
 
-	handle = gem_vmap(kgem->fd, ptr, size, read_only);
+	handle = gem_userptr(kgem->fd, ptr, size, read_only);
 	if (handle == 0)
 		return NULL;
 
@@ -4052,7 +4052,7 @@ create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 		return bo;
 	}
 
-	if (kgem->has_vmap) {
+	if (kgem->has_userptr) {
 		bo = buffer_alloc();
 		if (bo == NULL)
 			return NULL;
@@ -4063,7 +4063,7 @@ create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 			return NULL;
 		}
 
-		handle = gem_vmap(kgem->fd, bo->mem, alloc * PAGE_SIZE, false);
+		handle = gem_userptr(kgem->fd, bo->mem, alloc * PAGE_SIZE, false);
 		if (handle == 0) {
 			free(bo->mem);
 			free(bo);
@@ -4081,7 +4081,7 @@ create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 		bo->base.refcnt = 1;
 		bo->base.snoop = true;
 		bo->base.reusable = false;
-		bo->base.map = MAKE_VMAP_MAP(bo->mem);
+		bo->base.map = MAKE_USER_MAP(bo->mem);
 
 		return bo;
 	}
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 351aa32..f1ded24 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -149,7 +149,7 @@ struct kgem {
 	uint32_t need_throttle:1;
 	uint32_t busy:1;
 
-	uint32_t has_vmap :1;
+	uint32_t has_userptr :1;
 	uint32_t has_blt :1;
 	uint32_t has_relaxed_fencing :1;
 	uint32_t has_relaxed_delta :1;
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index f792430..32f7007 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -60,7 +60,6 @@
 #define USE_INPLACE 1
 #define USE_WIDE_SPANS 0 /* -1 force CPU, 1 force GPU */
 #define USE_ZERO_SPANS 1 /* -1 force CPU, 1 force GPU */
-#define USE_SHM_VMAP 1
 
 #define MIGRATE_ALL 0
 #define DBG_NO_CPU_UPLOAD 0
@@ -13053,7 +13052,7 @@ bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 	assert(screen->SetWindowPixmap == NULL);
 	screen->SetWindowPixmap = sna_set_window_pixmap;
 
-	if (USE_SHM_VMAP && sna->kgem.has_vmap)
+	if (sna->kgem.has_userptr)
 		ShmRegisterFuncs(screen, &shm_funcs);
 	else
 		ShmRegisterFbFuncs(screen);
commit 8dcccd308222bcf1b96f2ee15842b4558ea5f29e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 21 12:47:21 2012 +0100

    sna: s/vmap/snoop/ since we use the flag more generically
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index de6c8c4..bc37615 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -727,7 +727,7 @@ gen4_bind_bo(struct sna *sna,
 	uint32_t domains;
 	uint16_t offset;
 
-	assert(!kgem_bo_is_vmap(bo));
+	assert(!kgem_bo_is_snoop(bo));
 
 	/* After the first bind, we manage the cache domains within the batch */
 	if (is_dst) {
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 5af0a9e..578c8e4 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -54,7 +54,7 @@ static struct kgem_bo *
 search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 
 static struct kgem_bo *
-search_vmap_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
+search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 
 #define DBG_NO_HW 0
 #define DBG_NO_TILING 0
@@ -830,7 +830,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	list_init(&kgem->requests);
 	list_init(&kgem->flushing);
 	list_init(&kgem->large);
-	list_init(&kgem->vmap);
+	list_init(&kgem->snoop);
 	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
 		list_init(&kgem->inactive[i]);
 	for (i = 0; i < ARRAY_SIZE(kgem->active); i++) {
@@ -1195,7 +1195,7 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
 	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
 	assert(bo->refcnt == 0);
 	assert(bo->exec == NULL);
-	assert(!bo->vmap || bo->rq == NULL);
+	assert(!bo->snoop || bo->rq == NULL);
 
 #ifdef DEBUG_MEMORY
 	kgem->debug_memory.bo_allocs--;
@@ -1208,7 +1208,7 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
 		assert(bo->rq == NULL);
 		assert(MAP(bo->map) != bo || bo->io);
 		if (bo != MAP(bo->map)) {
-			DBG(("%s: freeing vmap base\n", __FUNCTION__));
+			DBG(("%s: freeing snooped base\n", __FUNCTION__));
 			free(MAP(bo->map));
 		}
 		bo->map = NULL;
@@ -1320,7 +1320,7 @@ static void _kgem_bo_delete_buffer(struct kgem *kgem, struct kgem_bo *bo)
 		io->used = bo->delta;
 }
 
-static void kgem_bo_move_to_vmap(struct kgem *kgem, struct kgem_bo *bo)
+static void kgem_bo_move_to_snoop(struct kgem *kgem, struct kgem_bo *bo)
 {
 	if (num_pages(bo) > kgem->max_cpu_size >> 13) {
 		DBG(("%s handle=%d discarding large CPU buffer (%d >%d pages)\n",
@@ -1332,18 +1332,18 @@ static void kgem_bo_move_to_vmap(struct kgem *kgem, struct kgem_bo *bo)
 	assert(bo->tiling == I915_TILING_NONE);
 	assert(bo->rq == NULL);
 
-	DBG(("%s: moving %d to vmap\n", __FUNCTION__, bo->handle));
-	list_add(&bo->list, &kgem->vmap);
+	DBG(("%s: moving %d to snoop cachee\n", __FUNCTION__, bo->handle));
+	list_add(&bo->list, &kgem->snoop);
 }
 
 static struct kgem_bo *
-search_vmap_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
+search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 {
 	struct kgem_bo *bo, *first = NULL;
 
 	DBG(("%s: num_pages=%d, flags=%x\n", __FUNCTION__, num_pages, flags));
 
-	if (list_is_empty(&kgem->vmap)) {
+	if (list_is_empty(&kgem->snoop)) {
 		DBG(("%s: inactive and cache empty\n", __FUNCTION__));
 		if (!__kgem_throttle_retire(kgem, flags)) {
 			DBG(("%s: nothing retired\n", __FUNCTION__));
@@ -1351,9 +1351,9 @@ search_vmap_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 		}
 	}
 
-	list_for_each_entry(bo, &kgem->vmap, list) {
+	list_for_each_entry(bo, &kgem->snoop, list) {
 		assert(bo->refcnt == 0);
-		assert(bo->vmap);
+		assert(bo->snoop);
 		assert(bo->proxy == NULL);
 		assert(bo->tiling == I915_TILING_NONE);
 		assert(bo->rq == NULL);
@@ -1371,7 +1371,7 @@ search_vmap_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 		bo->pitch = 0;
 		bo->delta = 0;
 
-		DBG(("  %s: found handle=%d (num_pages=%d) in vmap cache\n",
+		DBG(("  %s: found handle=%d (num_pages=%d) in snoop cache\n",
 		     __FUNCTION__, bo->handle, num_pages(bo)));
 		return bo;
 	}
@@ -1381,7 +1381,7 @@ search_vmap_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 		first->pitch = 0;
 		first->delta = 0;
 
-		DBG(("  %s: found handle=%d (num_pages=%d) in vmap cache\n",
+		DBG(("  %s: found handle=%d (num_pages=%d) in snoop cache\n",
 		     __FUNCTION__, first->handle, num_pages(first)));
 		return first;
 	}
@@ -1404,27 +1404,27 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 	if (DBG_NO_CACHE)
 		goto destroy;
 
-	if (bo->vmap && !bo->flush) {
-		DBG(("%s: handle=%d is vmapped\n", __FUNCTION__, bo->handle));
+	if (bo->snoop && !bo->flush) {
+		DBG(("%s: handle=%d is snooped\n", __FUNCTION__, bo->handle));
 		assert(!bo->flush);
 		assert(list_is_empty(&bo->list));
 		if (bo->rq == NULL) {
 			if (bo->needs_flush && kgem_busy(kgem, bo->handle)) {
-				DBG(("%s: handle=%d is vmapped, tracking until free\n",
+				DBG(("%s: handle=%d is snooped, tracking until free\n",
 				     __FUNCTION__, bo->handle));
 				list_add(&bo->request, &kgem->flushing);
 				bo->rq = &_kgem_static_request;
 			}
 		}
 		if (bo->rq == NULL)
-			kgem_bo_move_to_vmap(kgem, bo);
+			kgem_bo_move_to_snoop(kgem, bo);
 		return;
 	}
 
 	if (bo->io) {
 		struct kgem_bo *base;
 
-		assert(!bo->vmap);
+		assert(!bo->snoop);
 		base = malloc(sizeof(*base));
 		if (base) {
 			DBG(("%s: transferring io handle=%d to bo\n",
@@ -1452,7 +1452,7 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 
 	assert(list_is_empty(&bo->vma));
 	assert(list_is_empty(&bo->list));
-	assert(bo->vmap == false);
+	assert(bo->snoop == false);
 	assert(bo->io == false);
 	assert(bo->scanout == false);
 	assert(bo->flush == false);
@@ -1576,8 +1576,8 @@ static bool kgem_retire__flushing(struct kgem *kgem)
 		list_del(&bo->request);
 
 		if (!bo->refcnt) {
-			if (bo->vmap) {
-				kgem_bo_move_to_vmap(kgem, bo);
+			if (bo->snoop) {
+				kgem_bo_move_to_snoop(kgem, bo);
 			} else if (kgem_bo_set_purgeable(kgem, bo)) {
 				assert(bo->reusable);
 				kgem_bo_move_to_inactive(kgem, bo);
@@ -1641,12 +1641,12 @@ static bool kgem_retire__requests(struct kgem *kgem)
 			if (bo->refcnt)
 				continue;
 
-			if (bo->vmap) {
+			if (bo->snoop) {
 				if (bo->needs_flush) {
 					list_add(&bo->request, &kgem->flushing);
 					bo->rq = &_kgem_static_request;
 				} else {
-					kgem_bo_move_to_vmap(kgem, bo);
+					kgem_bo_move_to_snoop(kgem, bo);
 				}
 				continue;
 			}
@@ -1738,9 +1738,10 @@ static void kgem_commit(struct kgem *kgem)
 	list_for_each_entry_safe(bo, next, &rq->buffers, request) {
 		assert(next->request.prev == &bo->request);
 
-		DBG(("%s: release handle=%d (proxy? %d), dirty? %d flush? %d -> offset=%x\n",
+		DBG(("%s: release handle=%d (proxy? %d), dirty? %d flush? %d, snoop? %d -> offset=%x\n",
 		     __FUNCTION__, bo->handle, bo->proxy != NULL,
-		     bo->dirty, bo->needs_flush, (unsigned)bo->exec->offset));
+		     bo->dirty, bo->needs_flush, bo->snoop,
+		     (unsigned)bo->exec->offset));
 
 		assert(!bo->purged);
 		assert(bo->rq == rq || (bo->proxy->rq == rq));
@@ -1748,7 +1749,7 @@ static void kgem_commit(struct kgem *kgem)
 		bo->presumed_offset = bo->exec->offset;
 		bo->exec = NULL;
 
-		if (!bo->refcnt && !bo->reusable && !bo->vmap) {
+		if (!bo->refcnt && !bo->reusable && !bo->snoop) {
 			kgem_bo_free(kgem, bo);
 			continue;
 		}
@@ -1834,7 +1835,7 @@ static void kgem_finish_buffers(struct kgem *kgem)
 			    (kgem->has_llc || !IS_CPU_MAP(bo->base.map))) {
 				DBG(("%s: retaining upload buffer (%d/%d)\n",
 				     __FUNCTION__, bo->used, bytes(&bo->base)));
-				assert(!bo->base.vmap);
+				assert(!bo->base.snoop);
 				list_move(&bo->base.list,
 					  &kgem->active_buffers);
 				continue;
@@ -2285,7 +2286,7 @@ bool kgem_expire_cache(struct kgem *kgem)
 
 
 	expire = 0;
-	list_for_each_entry(bo, &kgem->vmap, list) {
+	list_for_each_entry(bo, &kgem->snoop, list) {
 		if (bo->delta) {
 			expire = now - MAX_INACTIVE_TIME/2;
 			break;
@@ -2294,8 +2295,8 @@ bool kgem_expire_cache(struct kgem *kgem)
 		bo->delta = now;
 	}
 	if (expire) {
-		while (!list_is_empty(&kgem->vmap)) {
-			bo = list_last_entry(&kgem->vmap, struct kgem_bo, list);
+		while (!list_is_empty(&kgem->snoop)) {
+			bo = list_last_entry(&kgem->snoop, struct kgem_bo, list);
 
 			if (bo->delta > expire)
 				break;
@@ -2411,9 +2412,9 @@ void kgem_cleanup_cache(struct kgem *kgem)
 						     struct kgem_bo, list));
 	}
 
-	while (!list_is_empty(&kgem->vmap))
+	while (!list_is_empty(&kgem->snoop))
 		kgem_bo_free(kgem,
-			     list_last_entry(&kgem->vmap,
+			     list_last_entry(&kgem->snoop,
 					     struct kgem_bo, list));
 
 	while (__kgem_freed_bo) {
@@ -3262,10 +3263,10 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 	DBG(("%s: %dx%d, %d bpp, stride=%d\n",
 	     __FUNCTION__, width, height, bpp, stride));
 
-	bo = search_vmap_cache(kgem, NUM_PAGES(size), 0);
+	bo = search_snoop_cache(kgem, NUM_PAGES(size), 0);
 	if (bo) {
 		assert(bo->tiling == I915_TILING_NONE);
-		assert(bo->vmap);
+		assert(bo->snoop);
 		bo->refcnt = 1;
 		bo->pitch = stride;
 		return bo;
@@ -3284,7 +3285,7 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 		}
 
 		bo->reusable = false;
-		bo->vmap = true;
+		bo->snoop = true;
 
 		if (kgem_bo_map__cpu(kgem, bo) == NULL) {
 			kgem_bo_destroy(kgem, bo);
@@ -3801,7 +3802,7 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 	}
 
 	bo->reusable = false;
-	bo->vmap = true;
+	bo->snoop = true;
 
 	debug_alloc__bo(kgem, bo);
 
@@ -3964,7 +3965,7 @@ search_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 	struct kgem_buffer *bo;
 	struct kgem_bo *old;
 
-	old = search_vmap_cache(kgem, alloc, 0);
+	old = search_snoop_cache(kgem, alloc, 0);
 	if (old) {
 		if (!old->io) {
 			bo = buffer_alloc();
@@ -3980,7 +3981,7 @@ search_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 		DBG(("%s: created CPU handle=%d for buffer, size %d\n",
 		     __FUNCTION__, bo->base.handle, num_pages(&bo->base)));
 
-		assert(bo->base.vmap);
+		assert(bo->base.snoop);
 		assert(bo->base.tiling == I915_TILING_NONE);
 		assert(num_pages(&bo->base) >= alloc);
 		assert(bo->mmapped == true);
@@ -4040,7 +4041,7 @@ create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 		assert(bo->need_io == false);
 
 		bo->base.reusable = false;
-		bo->base.vmap = true;
+		bo->base.snoop = true;
 
 		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
 		if (bo->mem == NULL) {
@@ -4071,14 +4072,14 @@ create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 
 		debug_alloc(kgem, alloc);
 		__kgem_bo_init(&bo->base, handle, alloc);
-		DBG(("%s: created vmap handle=%d for buffer\n",
+		DBG(("%s: created snoop handle=%d for buffer\n",
 		     __FUNCTION__, bo->base.handle));
 
 		assert(bo->mmapped == true);
 		assert(bo->need_io == false);
 
 		bo->base.refcnt = 1;
-		bo->base.vmap = true;
+		bo->base.snoop = true;
 		bo->base.reusable = false;
 		bo->base.map = MAKE_VMAP_MAP(bo->mem);
 
@@ -4133,7 +4134,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		if (flags & KGEM_BUFFER_WRITE) {
 			if ((bo->write & KGEM_BUFFER_WRITE) == 0 ||
 			    (((bo->write & ~flags) & KGEM_BUFFER_INPLACE) &&
-			     !bo->base.vmap)) {
+			     !bo->base.snoop)) {
 				DBG(("%s: skip write %x buffer, need %x\n",
 				     __FUNCTION__, bo->write, flags));
 				continue;
@@ -4161,7 +4162,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			assert(bo->base.io);
 			assert(bo->base.refcnt >= 1);
 			assert(bo->mmapped);
-			assert(!bo->base.vmap);
+			assert(!bo->base.snoop);
 
 			if ((bo->write & ~flags) & KGEM_BUFFER_INPLACE) {
 				DBG(("%s: skip write %x buffer, need %x\n",
@@ -4546,8 +4547,8 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 
 	bo = (struct kgem_buffer *)_bo;
 
-	DBG(("%s(offset=%d, length=%d, vmap=%d)\n", __FUNCTION__,
-	     offset, length, bo->base.vmap));
+	DBG(("%s(offset=%d, length=%d, snooped=%d)\n", __FUNCTION__,
+	     offset, length, bo->base.snoop));
 
 	if (bo->mmapped) {
 		struct drm_i915_gem_set_domain set_domain;
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index ff592e0..351aa32 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -82,7 +82,7 @@ struct kgem_bo {
 	uint32_t dirty : 1;
 	uint32_t domain : 2;
 	uint32_t needs_flush : 1;
-	uint32_t vmap : 1;
+	uint32_t snoop : 1;
 	uint32_t io : 1;
 	uint32_t flush : 1;
 	uint32_t scanout : 1;
@@ -124,7 +124,7 @@ struct kgem {
 	struct list large;
 	struct list active[NUM_CACHE_BUCKETS][3];
 	struct list inactive[NUM_CACHE_BUCKETS];
-	struct list vmap;
+	struct list snoop;
 	struct list batch_buffers, active_buffers;
 	struct list requests;
 	struct kgem_request *next_request;
@@ -503,11 +503,11 @@ static inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo)
 	return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
 }
 
-static inline bool kgem_bo_is_vmap(struct kgem_bo *bo)
+static inline bool kgem_bo_is_snoop(struct kgem_bo *bo)
 {
 	while (bo->proxy)
 		bo = bo->proxy;
-	return bo->vmap;
+	return bo->snoop;
 }
 
 static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index dee8c02..f792430 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -422,8 +422,8 @@ sna_pixmap_alloc_cpu(struct sna *sna,
 						  pixmap->drawable.bitsPerPixel,
 						  from_gpu ? 0 : CREATE_CPU_MAP | CREATE_INACTIVE);
 		if (priv->cpu_bo) {
-			DBG(("%s: allocated CPU handle=%d (vmap? %d)\n", __FUNCTION__,
-			     priv->cpu_bo->handle, priv->cpu_bo->vmap));
+			DBG(("%s: allocated CPU handle=%d (snooped? %d)\n", __FUNCTION__,
+			     priv->cpu_bo->handle, priv->cpu_bo->snoop));
 
 			priv->ptr = kgem_bo_map__cpu(&sna->kgem, priv->cpu_bo);
 			priv->stride = priv->cpu_bo->pitch;
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index a8b5a06..17ac814 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -310,7 +310,7 @@ use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box, bool blt)
 		return NULL;
 	}
 
-	if (priv->cpu_bo->vmap && priv->source_count > SOURCE_BIAS) {
+	if (priv->cpu_bo->snoop && priv->source_count > SOURCE_BIAS) {
 		DBG(("%s: promoting snooped CPU bo due to reuse\n",
 		     __FUNCTION__));
 		return NULL;
@@ -558,7 +558,7 @@ sna_render_pixmap_bo(struct sna *sna,
 
 		if (priv->cpu_bo &&
 		    (DAMAGE_IS_ALL(priv->cpu_damage) || !priv->gpu_damage) &&
-		    !priv->cpu_bo->vmap && priv->cpu_bo->pitch < 4096) {
+		    !priv->cpu_bo->snoop && priv->cpu_bo->pitch < 4096) {
 			DBG(("%s: CPU all damaged\n", __FUNCTION__));
 			channel->bo = priv->cpu_bo;
 			goto done;


More information about the xorg-commit mailing list