xf86-video-intel: 5 commits - src/sna/gen3_render.c src/sna/gen4_vertex.c src/sna/kgem.c src/sna/kgem.h src/sna/sna_accel.c src/sna/sna_dri.c src/sna/sna_driver.c src/sna/sna_io.c tools/virtual.c

Chris Wilson ickle at kemper.freedesktop.org
Mon Sep 23 02:51:03 PDT 2013


 src/sna/gen3_render.c |    2 
 src/sna/gen4_vertex.c |    2 
 src/sna/kgem.c        |  289 +++++++++++++++++---------------------------------
 src/sna/kgem.h        |   17 +-
 src/sna/sna_accel.c   |   79 ++++++-------
 src/sna/sna_dri.c     |  122 +++++++++++++++------
 src/sna/sna_driver.c  |    4 
 src/sna/sna_io.c      |    6 -
 tools/virtual.c       |   29 +++--
 9 files changed, 265 insertions(+), 285 deletions(-)

New commits:
commit bee26a8e2eaefdcb2072ce8ba92585d3bf713c82
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Sep 23 09:46:36 2013 +0100

    sna/dri: Sanitize the backbuffer flip cache
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index cf37f94..2f1ccff 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -93,6 +93,12 @@ static inline struct kgem_bo *ref(struct kgem_bo *bo)
 	return bo;
 }
 
+static inline void unref(struct kgem_bo *bo)
+{
+	assert(bo->refcnt > 1);
+	bo->refcnt--;
+}
+
 /* Prefer to enable TILING_Y if this buffer will never be a
  * candidate for pageflipping
  */
@@ -854,6 +860,12 @@ static inline int sna_wait_vblank(struct sna *sna, drmVBlank *vbl)
 
 #if DRI2INFOREC_VERSION >= 4
 
+struct dri_bo {
+	struct list link;
+	struct kgem_bo *bo;
+	uint32_t name;
+};
+
 struct sna_dri_frame_event {
 	DrawablePtr draw;
 	ClientPtr client;
@@ -874,15 +886,16 @@ struct sna_dri_frame_event {
 	unsigned int fe_tv_sec;
 	unsigned int fe_tv_usec;
 
-	struct dri_bo {
+	struct {
 		struct kgem_bo *bo;
 		uint32_t name;
-	} scanout[2], cache;
+	} scanout[2];
+
+	struct list cache;
 
 	int mode;
 };
 
-
 static inline struct sna_dri_frame_event *
 to_frame_event(uintptr_t  data)
 {
@@ -1000,8 +1013,19 @@ sna_dri_frame_event_info_free(struct sna *sna,
 		kgem_bo_destroy(&sna->kgem, info->scanout[0].bo);
 	}
 
-	if (info->cache.bo)
-		kgem_bo_destroy(&sna->kgem, info->cache.bo);
+	while (!list_is_empty(&info->cache)) {
+		struct dri_bo *c;
+
+		c = list_first_entry(&info->cache, struct dri_bo, link);
+		list_del(&c->link);
+
+		if (c->bo) {
+			assert(c->bo->refcnt == 1);
+			kgem_bo_destroy(&sna->kgem, c->bo);
+		}
+
+		free(c);
+	}
 
 	if (info->bo)
 		kgem_bo_destroy(&sna->kgem, info->bo);
@@ -1439,33 +1463,39 @@ sna_dri_immediate_blit(struct sna *sna,
 	return ret;
 }
 
-
 static void
 sna_dri_flip_get_back(struct sna *sna, struct sna_dri_frame_event *info)
 {
+	struct dri_bo *c;
 	struct kgem_bo *bo;
 	uint32_t name;
 
-	DBG(("%s: scanout=(%d, %d), back=%d, cache=%d\n",
+	DBG(("%s: scanout=(%d, %d), back=%d, cache?=%d\n",
 	     __FUNCTION__,
 	     info->scanout[0].bo ? info->scanout[0].bo->handle : 0,
 	     info->scanout[1].bo ? info->scanout[1].bo->handle : 0,
 	     get_private(info->back)->bo->handle,
-	     info->cache.bo ? info->cache.bo->handle : 0));
+	     !list_is_empty(&info->cache)));
 
 	bo = get_private(info->back)->bo;
 	assert(bo->refcnt);
 	assert(bo->flush);
-	if (!(bo == info->scanout[0].bo || bo == info->scanout[1].bo))
+	if (!(bo == info->scanout[0].bo || bo == info->scanout[1].bo)) {
+		DBG(("%s: reuse unattached back\n", __FUNCTION__));
 		return;
+	}
 
-	bo = info->cache.bo;
-	name = info->cache.name;
-	if (bo == NULL ||
-	    bo == info->scanout[0].bo ||
-	    bo == info->scanout[1].bo) {
-		struct kgem_bo *old_bo = bo;
-
+	bo = NULL;
+	if (!list_is_empty(&info->cache)) {
+		c = list_first_entry(&info->cache, struct dri_bo, link);
+		bo = c->bo;
+		name = c->name;
+		DBG(("%s: reuse cache handle=%d,name=%d\n", __FUNCTION__,
+		     bo->handle, name));
+		list_move_tail(&c->link, &info->cache);
+		c->bo = NULL;
+	}
+	if (bo == NULL) {
 		DBG(("%s: allocating new backbuffer\n", __FUNCTION__));
 		bo = kgem_create_2d(&sna->kgem,
 				    info->draw->width,
@@ -1481,25 +1511,19 @@ sna_dri_flip_get_back(struct sna *sna, struct sna_dri_frame_event *info)
 			kgem_bo_destroy(&sna->kgem, bo);
 			return;
 		}
-
-		if (old_bo) {
-			DBG(("%s: discarding old backbuffer\n", __FUNCTION__));
-			kgem_bo_destroy(&sna->kgem, old_bo);
-		}
 	}
 
-	info->cache.bo = get_private(info->back)->bo;
-	info->cache.name = info->back->name;
-	assert(info->cache.bo->refcnt);
-	assert(info->cache.name);
+	assert(!(bo == info->scanout[0].bo || bo == info->scanout[1].bo));
+	assert(name);
 
+	unref(get_private(info->back)->bo);
 	get_private(info->back)->bo = bo;
 	info->back->name = name;
 
 	assert(get_private(info->back)->bo != info->scanout[0].bo);
 	assert(get_private(info->back)->bo != info->scanout[1].bo);
 
-	assert(bo->refcnt);
+	assert(bo->refcnt == 1);
 	assert(bo->flush);
 }
 
@@ -1587,14 +1611,40 @@ static void sna_dri_flip_event(struct sna *sna,
 	     flip->fe_tv_usec,
 	     flip->type));
 
-	if (flip->cache.bo == NULL) {
-		flip->cache = flip->scanout[1];
-		flip->scanout[1].bo = NULL;
-	}
 	if (flip->scanout[1].bo) {
-		kgem_bo_destroy(&sna->kgem, flip->scanout[1].bo);
+		struct dri_bo *c = NULL;
+
+		DBG(("%s: retiring previous scanout handle=%d,name=%d\n",
+		     __FUNCTION__,
+		     flip->scanout[1].bo->handle,
+		     flip->scanout[1].name));
+
+		if (flip->scanout[1].bo != flip->scanout[0].bo) {
+			assert(flip->scanout[1].bo->refcnt == 1);
+
+			if (!list_is_empty(&flip->cache))
+				c = list_last_entry(&flip->cache, struct dri_bo, link);
+			if (c) {
+				if (c->bo == NULL)
+					_list_del(&c->link);
+				else
+					c = NULL;
+			}
+			if (c == NULL)
+				c = malloc(sizeof(*c));
+			if (c != NULL) {
+				c->bo = flip->scanout[1].bo;
+				c->name = flip->scanout[1].name;
+				list_add(&c->link, &flip->cache);
+			}
+		}
+
+		if (c == NULL)
+			kgem_bo_destroy(&sna->kgem, flip->scanout[1].bo);
+
 		flip->scanout[1].bo = NULL;
 	}
+
 	if (sna->dri.flip_pending == flip)
 		sna->dri.flip_pending = NULL;
 
@@ -1771,6 +1821,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw,
 		if (info == NULL)
 			return false;
 
+		list_init(&info->cache);
 		info->type = use_triple_buffer(sna, client);
 		info->draw = draw;
 		info->client = client;
@@ -1822,6 +1873,7 @@ out:
 	if (info == NULL)
 		return false;
 
+	list_init(&info->cache);
 	info->draw = draw;
 	info->client = client;
 	info->event_complete = func;
@@ -1993,6 +2045,7 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 	if (!info)
 		goto blit;
 
+	list_init(&info->cache);
 	info->draw = draw;
 	info->client = client;
 	info->event_complete = func;
@@ -2202,6 +2255,7 @@ sna_dri_schedule_wait_msc(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 	if (!info)
 		goto out_complete;
 
+	list_init(&info->cache);
 	info->draw = draw;
 	info->client = client;
 	info->type = DRI2_WAITMSC;
commit c4f5da7ab9e02c3994fe9668630480f85e706e89
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Sep 23 09:45:13 2013 +0100

    sna/dri: Report saner target_msc values for vblank_mode=0
    
    If desired target_msc is 0 report 0 rather than (CARD64)-1.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index a146f74..cf37f94 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -1757,6 +1757,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw,
 				     __FUNCTION__));
 				sna_dri_exchange_buffers(draw, front, back);
 				info->mode = 2;
+				current_msc = *target_msc;
 				goto new_back;
 			} else {
 				DBG(("%s: chaining flip\n", __FUNCTION__));
@@ -2014,7 +2015,8 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		bool sync = current_msc < *target_msc;
 		if (!sna_dri_immediate_blit(sna, info, sync, true))
 			sna_dri_frame_event_info_free(sna, draw, info);
-		*target_msc = current_msc + sync;
+		if (*target_msc)
+			*target_msc = current_msc + sync;
 		return TRUE;
 	}
 
commit f296872f373d3439b08e055b03584763e6c2ffec
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Sep 22 17:29:33 2013 +0100

    sna/dri: Fix SwapbufferWait
    
    A regression from
    
    commit f99e49f7642545f75bac682274767c45c2e6192a
    Author: Chris Wilson <chris at chris-wilson.co.uk>
    Date:   Tue Sep 17 09:15:40 2013 +0100
    
        intel: Make the option to control VSync and PageFlip explict
    
    which used the inverse meaning of the option to disable waiting on
    swapbuffers.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 6584e66..a146f74 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -1973,7 +1973,7 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 
 	/* Drawable not displayed... just complete the swap */
 	pipe = -1;
-	if (sna->flags & SNA_NO_WAIT)
+	if ((sna->flags & SNA_NO_WAIT) == 0)
 		pipe = sna_dri_get_pipe(draw);
 	if (pipe == -1) {
 		DBG(("%s: off-screen, immediate update\n", __FUNCTION__));
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index cce8d2a..6d4420f 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -579,18 +579,22 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 
 	if (!xf86ReturnOptValBool(sna->Options, OPTION_SWAPBUFFERS_WAIT, TRUE))
 		sna->flags |= SNA_NO_WAIT;
+	DBG(("%s: swapbuffer wait? %s\n", __FUNCTION__, sna->flags & SNA_NO_WAIT ? "disabled" : "enabled"));
 
 	if (!has_vsync(sna) ||
 	    !xf86ReturnOptValBool(sna->Options, OPTION_VSYNC, TRUE))
 		sna->flags |= SNA_NO_VSYNC;
+	DBG(("%s: vsync? %s\n", __FUNCTION__, sna->flags & SNA_NO_VSYNC ? "disabled" : "enabled"));
 
 	if (!has_pageflipping(sna) ||
 	    !xf86ReturnOptValBool(sna->Options, OPTION_PAGEFLIP, TRUE))
 		sna->flags |= SNA_NO_FLIP;
+	DBG(("%s: page flips? %s\n", __FUNCTION__, sna->flags & SNA_NO_FLIP ? "disabled" : "enabled"));
 
 	if ((sna->flags & (SNA_NO_VSYNC | SNA_NO_FLIP | SNA_NO_WAIT)) == 0 &&
 	    xf86ReturnOptValBool(sna->Options, OPTION_TRIPLE_BUFFER, TRUE))
 		sna->flags |= SNA_TRIPLE_BUFFER;
+	DBG(("%s: triple buffer? %s\n", __FUNCTION__, sna->flags & SNA_TRIPLE_BUFFER ? "enabled" : "disabled"));
 
 	if ((sna->flags & (SNA_NO_VSYNC | SNA_NO_FLIP)) == 0 &&
 	    xf86ReturnOptValBool(sna->Options, OPTION_TEAR_FREE, FALSE))
commit 42330fbae862cda9ca17ec62eb0d2e4fb86032b8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Sep 22 14:39:57 2013 +0100

    sna: Track CPU/GTT maps independently
    
    Now that we use CPU mmaps to read/write to tiled X surfaces, we find
    ourselves frequently switching between CPU and GTT mmaps and so wish to
    cache both.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 63dd5cc..cb8f046 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2305,7 +2305,7 @@ static void gen3_vertex_close(struct sna *sna)
 			sna->render.vertices = sna->render.vertex_data;
 			sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
 			free_bo = bo;
-		} else if (IS_CPU_MAP(bo->map)) {
+		} else if (sna->render.vertices == MAP(bo->map__cpu)) {
 			DBG(("%s: converting CPU map to GTT\n", __FUNCTION__));
 			sna->render.vertices = kgem_bo_map__gtt(&sna->kgem, bo);
 			if (sna->render.vertices == NULL) {
diff --git a/src/sna/gen4_vertex.c b/src/sna/gen4_vertex.c
index 3c4911a..85e7413 100644
--- a/src/sna/gen4_vertex.c
+++ b/src/sna/gen4_vertex.c
@@ -169,7 +169,7 @@ void gen4_vertex_close(struct sna *sna)
 			sna->render.vertices = sna->render.vertex_data;
 			sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
 			free_bo = bo;
-		} else if (IS_CPU_MAP(bo->map) && !sna->kgem.has_llc) {
+		} else if (!sna->kgem.has_llc && sna->render.vertices == MAP(bo->map__cpu)) {
 			DBG(("%s: converting CPU map to GTT\n", __FUNCTION__));
 			sna->render.vertices =
 				kgem_bo_map__gtt(&sna->kgem, sna->render.vbo);
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 5863d2a..ce89658 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -110,10 +110,8 @@ search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 #define MAX_CPU_VMA_CACHE INT16_MAX
 #define MAP_PRESERVE_TIME 10
 
-#define MAKE_CPU_MAP(ptr) ((void*)((uintptr_t)(ptr) | 1))
-#define MAKE_USER_MAP(ptr) ((void*)((uintptr_t)(ptr) | 3))
-#define IS_USER_MAP(ptr) ((uintptr_t)(ptr) & 2)
-#define __MAP_TYPE(ptr) ((uintptr_t)(ptr) & 3)
+#define MAKE_USER_MAP(ptr) ((void*)((uintptr_t)(ptr) | 1))
+#define IS_USER_MAP(ptr) ((uintptr_t)(ptr) & 1)
 
 #define MAKE_REQUEST(rq, ring) ((struct kgem_request *)((uintptr_t)(rq) | (ring)))
 
@@ -176,7 +174,12 @@ struct kgem_buffer {
 	uint32_t used;
 	uint32_t need_io : 1;
 	uint32_t write : 2;
-	uint32_t mmapped : 1;
+	uint32_t mmapped : 2;
+};
+enum {
+	MMAPPED_NONE,
+	MMAPPED_GTT,
+	MMAPPED_CPU
 };
 
 static struct kgem_bo *__kgem_freed_bo;
@@ -1628,26 +1631,6 @@ static void kgem_bo_binding_free(struct kgem *kgem, struct kgem_bo *bo)
 	}
 }
 
-static void kgem_bo_release_map(struct kgem *kgem, struct kgem_bo *bo)
-{
-	int type = IS_CPU_MAP(bo->map);
-
-	assert(!IS_USER_MAP(bo->map));
-
-	DBG(("%s: releasing %s vma for handle=%d, count=%d\n",
-	     __FUNCTION__, type ? "CPU" : "GTT",
-	     bo->handle, kgem->vma[type].count));
-
-	VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map), bytes(bo)));
-	munmap(MAP(bo->map), bytes(bo));
-	bo->map = NULL;
-
-	if (!list_is_empty(&bo->vma)) {
-		list_del(&bo->vma);
-		kgem->vma[type].count--;
-	}
-}
-
 static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
 {
 	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
@@ -1663,21 +1646,31 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
 
 	kgem_bo_binding_free(kgem, bo);
 
-	if (IS_USER_MAP(bo->map)) {
+	if (IS_USER_MAP(bo->map__cpu)) {
 		assert(bo->rq == NULL);
 		assert(!__kgem_busy(kgem, bo->handle));
-		assert(MAP(bo->map) != bo || bo->io || bo->flush);
+		assert(MAP(bo->map__cpu) != bo || bo->io || bo->flush);
 		if (!(bo->io || bo->flush)) {
 			DBG(("%s: freeing snooped base\n", __FUNCTION__));
-			assert(bo != MAP(bo->map));
-			free(MAP(bo->map));
+			assert(bo != MAP(bo->map__cpu));
+			free(MAP(bo->map__cpu));
 		}
-		bo->map = NULL;
+		bo->map__cpu = NULL;
+	}
+
+	DBG(("%s: releasing %p:%p vma for handle=%d, count=%d\n",
+	     __FUNCTION__, bo->map__gtt, bo->map__cpu,
+	     bo->handle, list_is_empty(&bo->vma) ? 0 : kgem->vma[bo->map__gtt == NULL].count));
+
+	if (!list_is_empty(&bo->vma)) {
+		_list_del(&bo->vma);
+		kgem->vma[bo->map__gtt == NULL].count--;
 	}
-	if (bo->map)
-		kgem_bo_release_map(kgem, bo);
-	assert(list_is_empty(&bo->vma));
-	assert(bo->map == NULL);
+
+	if (bo->map__gtt)
+		munmap(MAP(bo->map__gtt), bytes(bo));
+	if (bo->map__cpu)
+		munmap(MAP(bo->map__cpu), bytes(bo));
 
 	_list_del(&bo->list);
 	_list_del(&bo->request);
@@ -1719,18 +1712,21 @@ inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
 
 	assert(bo->flush == false);
 	list_move(&bo->list, &kgem->inactive[bucket(bo)]);
-	if (bo->map) {
-		int type = IS_CPU_MAP(bo->map);
+	if (bo->map__gtt) {
 		if (bucket(bo) >= NUM_CACHE_BUCKETS ||
-		    (!type && !__kgem_bo_is_mappable(kgem, bo))) {
-			munmap(MAP(bo->map), bytes(bo));
-			bo->map = NULL;
+		    !__kgem_bo_is_mappable(kgem, bo)) {
+			munmap(MAP(bo->map__gtt), bytes(bo));
+			bo->map__gtt = NULL;
 		}
-		if (bo->map) {
-			list_add(&bo->vma, &kgem->vma[type].inactive[bucket(bo)]);
-			kgem->vma[type].count++;
+		if (bo->map__gtt) {
+			list_add(&bo->vma, &kgem->vma[0].inactive[bucket(bo)]);
+			kgem->vma[0].count++;
 		}
 	}
+	if (bo->map__cpu && !bo->map__gtt) {
+		list_add(&bo->vma, &kgem->vma[1].inactive[bucket(bo)]);
+		kgem->vma[1].count++;
+	}
 }
 
 static struct kgem_bo *kgem_bo_replace_io(struct kgem_bo *bo)
@@ -1767,10 +1763,10 @@ inline static void kgem_bo_remove_from_inactive(struct kgem *kgem,
 	list_del(&bo->list);
 	assert(bo->rq == NULL);
 	assert(bo->exec == NULL);
-	if (bo->map) {
+	if (bo->map__gtt || bo->map__cpu) {
 		assert(!list_is_empty(&bo->vma));
 		list_del(&bo->vma);
-		kgem->vma[IS_CPU_MAP(bo->map)].count--;
+		kgem->vma[bo->map__gtt == NULL].count--;
 	}
 }
 
@@ -1971,7 +1967,7 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 			kgem_bo_move_to_snoop(kgem, bo);
 		return;
 	}
-	if (!IS_USER_MAP(bo->map))
+	if (!IS_USER_MAP(bo->map__cpu))
 		bo->flush = false;
 
 	if (bo->scanout) {
@@ -1987,9 +1983,6 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 		goto destroy;
 	}
 
-	if (!kgem->has_llc && IS_CPU_MAP(bo->map) && bo->domain != DOMAIN_CPU)
-		kgem_bo_release_map(kgem, bo);
-
 	assert(list_is_empty(&bo->vma));
 	assert(list_is_empty(&bo->list));
 	assert(bo->flush == false);
@@ -2018,7 +2011,7 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 	assert(bo->exec == NULL);
 	assert(list_is_empty(&bo->request));
 
-	if (!IS_CPU_MAP(bo->map)) {
+	if (bo->map__cpu == NULL) {
 		if (!kgem_bo_set_purgeable(kgem, bo))
 			goto destroy;
 
@@ -2053,9 +2046,9 @@ static void kgem_buffer_release(struct kgem *kgem, struct kgem_buffer *bo)
 		assert(cached->proxy == &bo->base);
 		list_del(&cached->vma);
 
-		assert(*(struct kgem_bo **)cached->map == cached);
-		*(struct kgem_bo **)cached->map = NULL;
-		cached->map = NULL;
+		assert(*(struct kgem_bo **)cached->map__gtt == cached);
+		*(struct kgem_bo **)cached->map__gtt = NULL;
+		cached->map__gtt = NULL;
 
 		kgem_bo_destroy(kgem, cached);
 	}
@@ -2346,7 +2339,8 @@ static void kgem_commit(struct kgem *kgem)
 		kgem_retire(kgem);
 		assert(list_is_empty(&rq->buffers));
 
-		assert(rq->bo->map == NULL);
+		assert(rq->bo->map__gtt == NULL);
+		assert(rq->bo->map__cpu == NULL);
 		gem_close(kgem->fd, rq->bo->handle);
 		kgem_cleanup_cache(kgem);
 	} else {
@@ -2378,7 +2372,7 @@ static void kgem_finish_buffers(struct kgem *kgem)
 	list_for_each_entry_safe(bo, next, &kgem->batch_buffers, base.list) {
 		DBG(("%s: buffer handle=%d, used=%d, exec?=%d, write=%d, mmapped=%s\n",
 		     __FUNCTION__, bo->base.handle, bo->used, bo->base.exec!=NULL,
-		     bo->write, bo->mmapped ? IS_CPU_MAP(bo->base.map) ? "cpu" : "gtt" : "no"));
+		     bo->write, bo->mmapped == MMAPPED_CPU ? "cpu" : bo->mmapped == MMAPPED_GTT ? "gtt" : "no"));
 
 		assert(next->base.list.prev == &bo->base.list);
 		assert(bo->base.io);
@@ -2403,7 +2397,7 @@ static void kgem_finish_buffers(struct kgem *kgem)
 			used = ALIGN(bo->used, PAGE_SIZE);
 			if (!DBG_NO_UPLOAD_ACTIVE &&
 			    used + PAGE_SIZE <= bytes(&bo->base) &&
-			    (kgem->has_llc || !IS_CPU_MAP(bo->base.map) || bo->base.snoop)) {
+			    (kgem->has_llc || bo->mmapped == MMAPPED_GTT || bo->base.snoop)) {
 				DBG(("%s: retaining upload buffer (%d/%d)\n",
 				     __FUNCTION__, bo->used, bytes(&bo->base)));
 				bo->used = used;
@@ -2412,7 +2406,7 @@ static void kgem_finish_buffers(struct kgem *kgem)
 				continue;
 			}
 			DBG(("%s: discarding mmapped buffer, used=%d, map type=%d\n",
-			     __FUNCTION__, bo->used, (int)__MAP_TYPE(bo->base.map)));
+			     __FUNCTION__, bo->used, bo->mmapped));
 			goto decouple;
 		}
 
@@ -3157,7 +3151,7 @@ bool kgem_expire_cache(struct kgem *kgem)
 				break;
 			}
 
-			if (bo->map && bo->delta + MAP_PRESERVE_TIME > expire) {
+			if (bo->map__cpu && bo->delta + MAP_PRESERVE_TIME > expire) {
 				idle = false;
 				list_move_tail(&bo->list, &preserve);
 			} else {
@@ -3349,7 +3343,7 @@ discard:
 		     __FUNCTION__, for_cpu ? "cpu" : "gtt"));
 		cache = &kgem->vma[for_cpu].inactive[cache_bucket(num_pages)];
 		list_for_each_entry(bo, cache, vma) {
-			assert(IS_CPU_MAP(bo->map) == for_cpu);
+			assert(for_cpu ? bo->map__cpu : bo->map__gtt);
 			assert(bucket(bo) == cache_bucket(num_pages));
 			assert(bo->proxy == NULL);
 			assert(bo->rq == NULL);
@@ -3429,10 +3423,10 @@ discard:
 			bo->pitch = 0;
 		}
 
-		if (bo->map) {
+		if (bo->map__gtt || bo->map__cpu) {
 			if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
 				int for_cpu = !!(flags & CREATE_CPU_MAP);
-				if (IS_CPU_MAP(bo->map) != for_cpu) {
+				if (for_cpu ? bo->map__cpu : bo->map__gtt){
 					if (first != NULL)
 						break;
 
@@ -4067,8 +4061,7 @@ large_inactive:
 				assert(bucket(bo) == bucket);
 				assert(bo->refcnt == 0);
 				assert(!bo->scanout);
-				assert(bo->map);
-				assert(IS_CPU_MAP(bo->map) == for_cpu);
+				assert(for_cpu ? bo->map__cpu : bo->map__gtt);
 				assert(bo->rq == NULL);
 				assert(list_is_empty(&bo->request));
 				assert(bo->flush == false);
@@ -4319,9 +4312,6 @@ search_inactive:
 			if (!gem_set_tiling(kgem->fd, bo->handle,
 					    tiling, pitch))
 				continue;
-
-			if (bo->map)
-				kgem_bo_release_map(kgem, bo);
 		}
 
 		if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
@@ -4859,6 +4849,7 @@ static void kgem_trim_vma_cache(struct kgem *kgem, int type, int bucket)
 	i = 0;
 	while (kgem->vma[type].count > 0) {
 		struct kgem_bo *bo = NULL;
+		void **ptr;
 
 		for (j = 0;
 		     bo == NULL && j < ARRAY_SIZE(kgem->vma[type].inactive);
@@ -4871,15 +4862,14 @@ static void kgem_trim_vma_cache(struct kgem *kgem, int type, int bucket)
 			break;
 
 		DBG(("%s: discarding inactive %s vma cache for %d\n",
-		     __FUNCTION__,
-		     IS_CPU_MAP(bo->map) ? "CPU" : "GTT", bo->handle));
-		assert(IS_CPU_MAP(bo->map) == type);
-		assert(bo->map);
+		     __FUNCTION__, type ? "CPU" : "GTT", bo->handle));
+
+		ptr = type ? &bo->map__cpu : &bo->map__gtt;
 		assert(bo->rq == NULL);
 
-		VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map), bytes(bo)));
-		munmap(MAP(bo->map), bytes(bo));
-		bo->map = NULL;
+		VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(*ptr), bytes(bo)));
+		munmap(MAP(*ptr), bytes(bo));
+		*ptr = NULL;
 		list_del(&bo->vma);
 		kgem->vma[type].count--;
 
@@ -4895,12 +4885,11 @@ void *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo)
 {
 	void *ptr;
 
-	DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__,
-	     bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain));
+	DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p:%p, domain=%d\n", __FUNCTION__,
+	     bo->handle, bo->presumed_offset, bo->tiling, bo->map__gtt, bo->map__cpu, bo->domain));
 
 	assert(bo->proxy == NULL);
 	assert(list_is_empty(&bo->list));
-	assert(!IS_USER_MAP(bo->map));
 	assert_tiling(kgem, bo);
 
 	if (bo->tiling == I915_TILING_NONE && !bo->scanout && kgem->has_llc) {
@@ -4909,10 +4898,7 @@ void *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo)
 		return kgem_bo_map__cpu(kgem, bo);
 	}
 
-	if (IS_CPU_MAP(bo->map))
-		kgem_bo_release_map(kgem, bo);
-
-	ptr = bo->map;
+	ptr = MAP(bo->map__gtt);
 	if (ptr == NULL) {
 		assert(kgem_bo_size(bo) <= kgem->aperture_mappable / 2);
 
@@ -4927,7 +4913,7 @@ void *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo)
 		 * issue with compositing managers which need to frequently
 		 * flush CPU damage to their GPU bo.
 		 */
-		bo->map = ptr;
+		bo->map__gtt = ptr;
 		DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle));
 	}
 
@@ -4938,12 +4924,11 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
 {
 	void *ptr;
 
-	DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__,
-	     bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain));
+	DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p:%p, domain=%d\n", __FUNCTION__,
+	     bo->handle, bo->presumed_offset, bo->tiling, bo->map__gtt, bo->map__cpu, bo->domain));
 
 	assert(bo->proxy == NULL);
 	assert(list_is_empty(&bo->list));
-	assert(!IS_USER_MAP(bo->map));
 	assert(bo->exec == NULL);
 	assert_tiling(kgem, bo);
 
@@ -4957,10 +4942,7 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
 		return ptr;
 	}
 
-	if (IS_CPU_MAP(bo->map))
-		kgem_bo_release_map(kgem, bo);
-
-	ptr = bo->map;
+	ptr = MAP(bo->map__gtt);
 	if (ptr == NULL) {
 		assert(kgem_bo_size(bo) <= kgem->aperture_mappable / 2);
 		assert(kgem->gen != 021 || bo->tiling != I915_TILING_Y);
@@ -4976,7 +4958,7 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
 		 * issue with compositing managers which need to frequently
 		 * flush CPU damage to their GPU bo.
 		 */
-		bo->map = ptr;
+		bo->map__gtt = ptr;
 		DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle));
 	}
 
@@ -5006,18 +4988,14 @@ void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo)
 {
 	void *ptr;
 
-	DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__,
-	     bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain));
+	DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p:%p, domain=%d\n", __FUNCTION__,
+	     bo->handle, bo->presumed_offset, bo->tiling, bo->map__gtt, bo->map__cpu, bo->domain));
 
 	assert(bo->exec == NULL);
 	assert(list_is_empty(&bo->list));
-	assert(!IS_USER_MAP(bo->map));
 	assert_tiling(kgem, bo);
 
-	if (IS_CPU_MAP(bo->map))
-		kgem_bo_release_map(kgem, bo);
-
-	ptr = bo->map;
+	ptr = MAP(bo->map__gtt);
 	if (ptr == NULL) {
 		assert(bytes(bo) <= kgem->aperture_mappable / 4);
 
@@ -5032,7 +5010,7 @@ void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo)
 		 * issue with compositing managers which need to frequently
 		 * flush CPU damage to their GPU bo.
 		 */
-		bo->map = ptr;
+		bo->map__gtt = ptr;
 		DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle));
 	}
 
@@ -5041,28 +5019,21 @@ void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo)
 
 void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo)
 {
-	if (bo->map)
-		return MAP(bo->map);
-
-	kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
-	return bo->map = __kgem_bo_map__gtt(kgem, bo);
+	return kgem_bo_map__async(kgem, bo);
 }
 
 void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo)
 {
 	struct drm_i915_gem_mmap mmap_arg;
 
-	DBG(("%s(handle=%d, size=%d, mapped? %d)\n",
-	     __FUNCTION__, bo->handle, bytes(bo), (int)__MAP_TYPE(bo->map)));
+	DBG(("%s(handle=%d, size=%d, map=%p:%p)\n",
+	     __FUNCTION__, bo->handle, bytes(bo), bo->map__gtt, bo->map__cpu));
 	assert(!bo->purged);
 	assert(list_is_empty(&bo->list));
 	assert(bo->proxy == NULL);
 
-	if (IS_CPU_MAP(bo->map))
-		return MAP(bo->map);
-
-	if (bo->map)
-		kgem_bo_release_map(kgem, bo);
+	if (bo->map__cpu)
+		return MAP(bo->map__cpu);
 
 	kgem_trim_vma_cache(kgem, MAP_CPU, bucket(bo));
 
@@ -5092,71 +5063,7 @@ retry:
 	VG(VALGRIND_MAKE_MEM_DEFINED(mmap_arg.addr_ptr, bytes(bo)));
 
 	DBG(("%s: caching CPU vma for %d\n", __FUNCTION__, bo->handle));
-	bo->map = MAKE_CPU_MAP(mmap_arg.addr_ptr);
-	return (void *)(uintptr_t)mmap_arg.addr_ptr;
-}
-
-void *__kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo)
-{
-	struct drm_i915_gem_mmap mmap_arg;
-
-	DBG(("%s(handle=%d, size=%d, mapped? %d)\n",
-	     __FUNCTION__, bo->handle, bytes(bo), (int)__MAP_TYPE(bo->map)));
-        assert(bo->refcnt);
-	assert(!bo->purged);
-	assert(list_is_empty(&bo->list));
-	assert(bo->proxy == NULL);
-
-	if (IS_CPU_MAP(bo->map))
-		return MAP(bo->map);
-
-retry:
-	VG_CLEAR(mmap_arg);
-	mmap_arg.handle = bo->handle;
-	mmap_arg.offset = 0;
-	mmap_arg.size = bytes(bo);
-	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) {
-		int err = errno;
-
-		assert(err != EINVAL);
-
-		if (__kgem_throttle_retire(kgem, 0))
-			goto retry;
-
-		if (kgem->need_expire) {
-			kgem_cleanup_cache(kgem);
-			goto retry;
-		}
-
-		ErrorF("%s: failed to mmap handle=%d, %d bytes, into CPU domain: %d\n",
-		       __FUNCTION__, bo->handle, bytes(bo), err);
-		return NULL;
-	}
-
-	VG(VALGRIND_MAKE_MEM_DEFINED(mmap_arg.addr_ptr, bytes(bo)));
-	if (bo->map && bo->domain == DOMAIN_CPU) {
-		DBG(("%s: discarding GTT vma for %d\n", __FUNCTION__, bo->handle));
-		kgem_bo_release_map(kgem, bo);
-	}
-	if (bo->map == NULL) {
-		DBG(("%s: caching CPU vma for %d\n", __FUNCTION__, bo->handle));
-		bo->map = MAKE_CPU_MAP(mmap_arg.addr_ptr);
-	}
-	return (void *)(uintptr_t)mmap_arg.addr_ptr;
-}
-
-void __kgem_bo_unmap__cpu(struct kgem *kgem, struct kgem_bo *bo, void *ptr)
-{
-	DBG(("%s(handle=%d, size=%d)\n",
-	     __FUNCTION__, bo->handle, bytes(bo)));
-        assert(bo->refcnt);
-
-	if (IS_CPU_MAP(bo->map)) {
-                assert(ptr == MAP(bo->map));
-                return;
-        }
-
-	munmap(ptr, bytes(bo));
+	return bo->map__cpu = (void *)(mmap_arg.addr_ptr);
 }
 
 uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo)
@@ -5237,7 +5144,7 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 		bo = proxy;
 	}
 
-	bo->map = MAKE_USER_MAP(ptr);
+	bo->map__cpu = MAKE_USER_MAP(ptr);
 
 	DBG(("%s(ptr=%p, size=%d, pages=%d, read_only=%d) => handle=%d (proxy? %d)\n",
 	     __FUNCTION__, ptr, size, NUM_PAGES(size), read_only, handle, bo->proxy != NULL));
@@ -5404,7 +5311,7 @@ buffer_alloc(void)
 
 	bo->mem = NULL;
 	bo->need_io = false;
-	bo->mmapped = true;
+	bo->mmapped = MMAPPED_CPU;
 
 	return bo;
 }
@@ -5479,7 +5386,7 @@ search_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 		assert(bo->base.snoop);
 		assert(bo->base.tiling == I915_TILING_NONE);
 		assert(num_pages(&bo->base) >= alloc);
-		assert(bo->mmapped == true);
+		assert(bo->mmapped == MMAPPED_CPU);
 		assert(bo->need_io == false);
 
 		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
@@ -5526,7 +5433,7 @@ create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 		}
 
 		assert(bo->base.refcnt == 1);
-		assert(bo->mmapped == true);
+		assert(bo->mmapped == MMAPPED_CPU);
 		assert(bo->need_io == false);
 
 		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
@@ -5562,7 +5469,7 @@ create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 		}
 
 		assert(bo->base.refcnt == 1);
-		assert(bo->mmapped == true);
+		assert(bo->mmapped == MMAPPED_CPU);
 		assert(bo->need_io == false);
 
 		if (!gem_set_caching(kgem->fd, bo->base.handle, SNOOPED))
@@ -5604,12 +5511,12 @@ free_caching:
 		DBG(("%s: created snoop handle=%d for buffer\n",
 		     __FUNCTION__, bo->base.handle));
 
-		assert(bo->mmapped == true);
+		assert(bo->mmapped == MMAPPED_CPU);
 		assert(bo->need_io == false);
 
 		bo->base.refcnt = 1;
 		bo->base.snoop = true;
-		bo->base.map = MAKE_USER_MAP(bo->mem);
+		bo->base.map__cpu = MAKE_USER_MAP(bo->mem);
 
 		return bo;
 	}
@@ -5642,7 +5549,8 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		/* We can reuse any write buffer which we can fit */
 		if (flags == KGEM_BUFFER_LAST &&
 		    bo->write == KGEM_BUFFER_WRITE &&
-		    bo->base.refcnt == 1 && !bo->mmapped &&
+		    bo->base.refcnt == 1 &&
+		    bo->mmapped == MMAPPED_NONE &&
 		    size <= bytes(&bo->base)) {
 			DBG(("%s: reusing write buffer for read of %d bytes? used=%d, total=%d\n",
 			     __FUNCTION__, size, bo->used, bytes(&bo->base)));
@@ -5687,7 +5595,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			assert(bo->base.io);
 			assert(bo->base.refcnt >= 1);
 			assert(bo->mmapped);
-			assert(!IS_CPU_MAP(bo->base.map) || kgem->has_llc || bo->base.snoop);
+			assert(bo->mmapped == MMAPPED_GTT || kgem->has_llc || bo->base.snoop);
 
 			if (!kgem->has_llc && (bo->write & ~flags) & KGEM_BUFFER_INPLACE) {
 				DBG(("%s: skip write %x buffer, need %x\n",
@@ -5828,8 +5736,10 @@ skip_llc:
 
 			bo->mem = kgem_bo_map(kgem, &bo->base);
 			if (bo->mem) {
-				if (IS_CPU_MAP(bo->base.map))
+				if (bo->mem == MAP(bo->base.map__cpu))
 					flags &= ~KGEM_BUFFER_INPLACE;
+				else
+					bo->mmapped = MMAPPED_GTT;
 				goto init;
 			} else {
 				bo->base.refcnt = 0;
@@ -5948,7 +5858,8 @@ init:
 	assert(!bo->need_io || !bo->base.needs_flush);
 	assert(!bo->need_io || bo->base.domain != DOMAIN_GPU);
 	assert(bo->mem);
-	assert(!bo->mmapped || bo->base.map != NULL);
+	assert(bo->mmapped != MMAPPED_GTT || MAP(bo->base.map__gtt) == bo->mem);
+	assert(bo->mmapped != MMAPPED_CPU || MAP(bo->base.map__cpu) == bo->mem);
 
 	bo->used = size;
 	bo->write = flags & KGEM_BUFFER_WRITE_INPLACE;
@@ -6018,7 +5929,7 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
 		bo->size.bytes -= stride;
 	}
 
-	bo->map = MAKE_CPU_MAP(*ret);
+	bo->map__cpu = *ret;
 	bo->pitch = stride;
 	bo->unique_id = kgem_get_unique_id(kgem);
 	return bo;
@@ -6063,10 +5974,10 @@ void kgem_proxy_bo_attach(struct kgem_bo *bo,
 			  struct kgem_bo **ptr)
 {
 	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
-	assert(bo->map == NULL || IS_CPU_MAP(bo->map));
+	assert(bo->map__gtt == NULL);
 	assert(bo->proxy);
 	list_add(&bo->vma, &bo->proxy->vma);
-	bo->map = ptr;
+	bo->map__gtt = ptr;
 	*ptr = kgem_bo_reference(bo);
 }
 
@@ -6099,13 +6010,13 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 		     bo->base.domain,
 		     __kgem_busy(kgem, bo->base.handle)));
 
-		assert(!IS_CPU_MAP(bo->base.map) || bo->base.snoop || kgem->has_llc);
+		assert(bo->mmapped == MMAPPED_GTT || bo->base.snoop || kgem->has_llc);
 
 		VG_CLEAR(set_domain);
 		set_domain.handle = bo->base.handle;
 		set_domain.write_domain = 0;
 		set_domain.read_domains =
-			IS_CPU_MAP(bo->base.map) ? I915_GEM_DOMAIN_CPU : I915_GEM_DOMAIN_GTT;
+			bo->mmapped == MMAPPED_CPU ? I915_GEM_DOMAIN_CPU : I915_GEM_DOMAIN_GTT;
 
 		if (drmIoctl(kgem->fd,
 			     DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain))
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 1461557..83d9c74 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -55,9 +55,8 @@ struct kgem_bo {
 	struct list request;
 	struct list vma;
 
-	void *map;
-#define IS_CPU_MAP(ptr) ((uintptr_t)(ptr) & 1)
-#define IS_GTT_MAP(ptr) (ptr && ((uintptr_t)(ptr) & 1) == 0)
+	void *map__cpu;
+	void *map__gtt;
 #define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
 
 	struct kgem_bo_binding {
@@ -462,8 +461,6 @@ void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
 void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
 void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
 void kgem_bo_sync__cpu_full(struct kgem *kgem, struct kgem_bo *bo, bool write);
-void *__kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
-void __kgem_bo_unmap__cpu(struct kgem *kgem, struct kgem_bo *bo, void *ptr);
 uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
 
 bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
@@ -553,14 +550,14 @@ static inline bool kgem_bo_is_mappable(struct kgem *kgem,
 
 static inline bool kgem_bo_mapped(struct kgem *kgem, struct kgem_bo *bo)
 {
-	DBG(("%s: map=%p, tiling=%d, domain=%d\n",
-	     __FUNCTION__, bo->map, bo->tiling, bo->domain));
+	DBG(("%s: map=%p:%p, tiling=%d, domain=%d\n",
+	     __FUNCTION__, bo->map__gtt, bo->map__cpu, bo->tiling, bo->domain));
 	assert(bo->refcnt);
 
-	if (bo->map == NULL)
-		return bo->tiling == I915_TILING_NONE && bo->domain == DOMAIN_CPU;
+	if (bo->tiling == I915_TILING_NONE && (bo->domain == DOMAIN_CPU || kgem->has_llc))
+		return bo->map__cpu != NULL;
 
-	return IS_CPU_MAP(bo->map) == !bo->tiling;
+	return bo->map__gtt != NULL;
 }
 
 static inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo)
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 8a55e12..fb8a228 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -350,7 +350,7 @@ static void assert_pixmap_damage(PixmapPtr p)
 	}
 
 	if (DAMAGE_IS_ALL(priv->gpu_damage)) {
-		assert(priv->cpu == false || (priv->mapped && IS_CPU_MAP(priv->gpu_bo->map)));
+		assert(priv->cpu == false || (priv->mapped && p->devPrivate.ptr == MAP(priv->gpu_bo->map__cpu)));
 	}
 
 	assert(!DAMAGE_IS_ALL(priv->gpu_damage) || priv->cpu_damage == NULL);
@@ -1403,9 +1403,8 @@ static inline bool has_coherent_map(struct sna *sna,
 				    unsigned flags)
 {
 	assert(bo);
-	assert(bo->map);
 
-	if (!IS_CPU_MAP(bo->map))
+	if (kgem_bo_mapped(&sna->kgem, bo))
 		return true;
 
 	if (bo->tiling == I915_TILING_Y)
@@ -1414,7 +1413,7 @@ static inline bool has_coherent_map(struct sna *sna,
 	return kgem_bo_can_map__cpu(&sna->kgem, bo, flags & MOVE_WRITE);
 }
 
-static inline bool has_coherent_ptr(struct sna_pixmap *priv)
+static inline bool has_coherent_ptr(struct sna *sna, struct sna_pixmap *priv)
 {
 	if (priv == NULL)
 		return true;
@@ -1423,13 +1422,16 @@ static inline bool has_coherent_ptr(struct sna_pixmap *priv)
 		if (!priv->cpu_bo)
 			return true;
 
-		return priv->pixmap->devPrivate.ptr == MAP(priv->cpu_bo->map);
+		return priv->pixmap->devPrivate.ptr == MAP(priv->cpu_bo->map__cpu);
 	}
 
-	if (priv->cpu && !IS_CPU_MAP(priv->gpu_bo->map))
-		return false;
+	if (priv->pixmap->devPrivate.ptr == MAP(priv->gpu_bo->map__cpu))
+		return priv->gpu_bo->tiling == I915_TILING_NONE && (priv->gpu_bo->domain == DOMAIN_CPU || sna->kgem.has_llc);
+
+	if (priv->pixmap->devPrivate.ptr == MAP(priv->gpu_bo->map__gtt))
+		return true;
 
-	return priv->pixmap->devPrivate.ptr == MAP(priv->gpu_bo->map);
+	return false;
 }
 
 static inline bool pixmap_inplace(struct sna *sna,
@@ -1991,10 +1993,9 @@ skip_inplace_map:
 			}
 			priv->cpu = true;
 
-			assert(IS_CPU_MAP(priv->gpu_bo->map));
+			assert(pixmap->devPrivate.ptr == MAP(priv->gpu_bo->map__cpu));
 			kgem_bo_sync__cpu_full(&sna->kgem, priv->gpu_bo,
 					       FORCE_FULL_SYNC || flags & MOVE_WRITE);
-			assert(pixmap->devPrivate.ptr == (void *)((unsigned long)priv->gpu_bo->map & ~3));
 			assert((flags & MOVE_WRITE) == 0 || !kgem_bo_is_busy(priv->gpu_bo));
 			assert_pixmap_damage(pixmap);
 			DBG(("%s: operate inplace (CPU)\n", __FUNCTION__));
@@ -2024,7 +2025,7 @@ skip_inplace_map:
 		if (priv->cpu_bo) {
 			DBG(("%s: syncing CPU bo\n", __FUNCTION__));
 			kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
-			assert(pixmap->devPrivate.ptr == (void *)((unsigned long)priv->cpu_bo->map & ~3));
+			assert(pixmap->devPrivate.ptr == MAP(priv->cpu_bo->map__cpu));
 		}
 
 		if (priv->clear_color == 0 ||
@@ -2069,7 +2070,7 @@ skip_inplace_map:
 							    box, n, COPY_LAST);
 			}
 			if (!ok) {
-				assert(has_coherent_ptr(sna_pixmap(pixmap)));
+				assert(has_coherent_ptr(sna, sna_pixmap(pixmap)));
 				sna_read_boxes(sna, pixmap, priv->gpu_bo,
 					       box, n);
 			}
@@ -2111,10 +2112,9 @@ done:
 	if (priv->cpu_bo) {
 		if ((flags & MOVE_ASYNC_HINT) == 0) {
 			DBG(("%s: syncing CPU bo\n", __FUNCTION__));
-			assert(IS_CPU_MAP(priv->cpu_bo->map));
+			assert(pixmap->devPrivate.ptr == MAP(priv->cpu_bo->map__cpu));
 			kgem_bo_sync__cpu_full(&sna->kgem, priv->cpu_bo,
 					       FORCE_FULL_SYNC || flags & MOVE_WRITE);
-			assert(pixmap->devPrivate.ptr == (void *)((unsigned long)priv->cpu_bo->map & ~3));
 			assert((flags & MOVE_WRITE) == 0 || !kgem_bo_is_busy(priv->cpu_bo));
 		}
 	}
@@ -2124,7 +2124,7 @@ done:
 	assert(pixmap->devPrivate.ptr);
 	assert(pixmap->devKind);
 	assert_pixmap_damage(pixmap);
-	assert(has_coherent_ptr(sna_pixmap(pixmap)));
+	assert(has_coherent_ptr(sna, sna_pixmap(pixmap)));
 	return true;
 }
 
@@ -2210,7 +2210,7 @@ static inline bool region_inplace(struct sna *sna,
 	if (DAMAGE_IS_ALL(priv->gpu_damage)) {
 		DBG(("%s: yes, already wholly damaged on the GPU\n", __FUNCTION__));
 		assert(priv->gpu_bo);
-		assert(priv->cpu == false || (priv->mapped && IS_CPU_MAP(priv->gpu_bo->map)));
+		assert(priv->cpu == false || (priv->mapped && pixmap->devPrivate.ptr == MAP(priv->gpu_bo->map__cpu)));
 		return true;
 	}
 
@@ -2396,7 +2396,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			kgem_bo_map__cpu(&sna->kgem, priv->gpu_bo);
 		if (pixmap->devPrivate.ptr != NULL) {
 			assert(has_coherent_map(sna, priv->gpu_bo, flags));
-			assert(IS_CPU_MAP(priv->gpu_bo->map));
+			assert(pixmap->devPrivate.ptr == MAP(priv->gpu_bo->map__cpu));
 			pixmap->devKind = priv->gpu_bo->pitch;
 			priv->cpu = true;
 			priv->mapped = true;
@@ -2420,7 +2420,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			assert_pixmap_damage(pixmap);
 			kgem_bo_sync__cpu_full(&sna->kgem, priv->gpu_bo,
 					       FORCE_FULL_SYNC || flags & MOVE_WRITE);
-			assert(pixmap->devPrivate.ptr == (void *)((unsigned long)priv->gpu_bo->map & ~3));
+			assert(pixmap->devPrivate.ptr == MAP(priv->gpu_bo->map__cpu));
 			assert((flags & MOVE_WRITE) == 0 || !kgem_bo_is_busy(priv->gpu_bo));
 			assert_pixmap_damage(pixmap);
 			if (dx | dy)
@@ -2468,7 +2468,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 		if (priv->cpu_bo) {
 			DBG(("%s: syncing CPU bo\n", __FUNCTION__));
 			kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
-			assert(pixmap->devPrivate.ptr == (void *)((unsigned long)priv->cpu_bo->map & ~3));
+			assert(pixmap->devPrivate.ptr == MAP(priv->cpu_bo->map__cpu));
 		}
 
 		do {
@@ -2516,7 +2516,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 							    box, n, COPY_LAST);
 			}
 			if (!ok) {
-				assert(has_coherent_ptr(sna_pixmap(pixmap)));
+				assert(has_coherent_ptr(sna, sna_pixmap(pixmap)));
 				sna_read_boxes(sna, pixmap, priv->gpu_bo,
 					       box, n);
 			}
@@ -2630,7 +2630,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 					}
 
 					if (!ok) {
-						assert(has_coherent_ptr(sna_pixmap(pixmap)));
+						assert(has_coherent_ptr(sna, sna_pixmap(pixmap)));
 						sna_read_boxes(sna, pixmap, priv->gpu_bo,
 							       box, n);
 					}
@@ -2658,7 +2658,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 								    box, n, COPY_LAST);
 				}
 				if (!ok) {
-					assert(has_coherent_ptr(sna_pixmap(pixmap)));
+					assert(has_coherent_ptr(sna, sna_pixmap(pixmap)));
 					sna_read_boxes(sna, pixmap, priv->gpu_bo,
 						       box, n);
 				}
@@ -2684,7 +2684,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 									    box, n, COPY_LAST);
 					}
 					if (!ok) {
-						assert(has_coherent_ptr(sna_pixmap(pixmap)));
+						assert(has_coherent_ptr(sna, sna_pixmap(pixmap)));
 						sna_read_boxes(sna, pixmap, priv->gpu_bo,
 							       box, n);
 					}
@@ -2731,10 +2731,9 @@ out:
 	}
 	if ((flags & MOVE_ASYNC_HINT) == 0 && priv->cpu_bo) {
 		DBG(("%s: syncing cpu bo\n", __FUNCTION__));
-		assert(IS_CPU_MAP(priv->cpu_bo->map));
+		assert(pixmap->devPrivate.ptr == MAP(priv->cpu_bo->map__cpu));
 		kgem_bo_sync__cpu_full(&sna->kgem, priv->cpu_bo,
 				       FORCE_FULL_SYNC || flags & MOVE_WRITE);
-		assert(pixmap->devPrivate.ptr == (void *)((unsigned long)priv->cpu_bo->map & ~3));
 		assert((flags & MOVE_WRITE) == 0 || !kgem_bo_is_busy(priv->cpu_bo));
 	}
 	priv->cpu =
@@ -2743,7 +2742,7 @@ out:
 	assert(pixmap->devPrivate.ptr);
 	assert(pixmap->devKind);
 	assert_pixmap_damage(pixmap);
-	assert(has_coherent_ptr(sna_pixmap(pixmap)));
+	assert(has_coherent_ptr(sna, sna_pixmap(pixmap)));
 	return true;
 }
 
@@ -2924,7 +2923,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 			      pixmap->drawable.height)) {
 		assert(priv->gpu_bo);
 		assert(priv->gpu_bo->proxy == NULL);
-		assert(priv->cpu == false || (priv->mapped && IS_CPU_MAP(priv->gpu_bo->map)));
+		assert(priv->cpu == false || (priv->mapped && pixmap->devPrivate.ptr == MAP(priv->gpu_bo->map__cpu)));
 		sna_damage_destroy(&priv->cpu_damage);
 		list_del(&priv->flush_list);
 		goto done;
@@ -3210,7 +3209,7 @@ sna_drawable_use_bo(DrawablePtr drawable, unsigned flags, const BoxRec *box,
 		assert(priv->cpu_damage == NULL);
 		assert(priv->gpu_bo);
 		assert(priv->gpu_bo->proxy == NULL);
-		assert(priv->cpu == false || (priv->mapped && IS_CPU_MAP(priv->gpu_bo->map)));
+		assert(priv->cpu == false || (priv->mapped && pixmap->devPrivate.ptr == MAP(priv->gpu_bo->map__cpu)));
 		goto use_gpu_bo;
 	}
 
@@ -3626,7 +3625,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 		DBG(("%s: already all-damaged\n", __FUNCTION__));
 		assert(priv->gpu_bo);
 		assert(priv->gpu_bo->proxy == NULL);
-		assert(priv->cpu == false || (priv->mapped && IS_CPU_MAP(priv->gpu_bo->map)));
+		assert(priv->cpu == false || (priv->mapped && pixmap->devPrivate.ptr == MAP(priv->gpu_bo->map__cpu)));
 		sna_damage_destroy(&priv->cpu_damage);
 		list_del(&priv->flush_list);
 		goto active;
@@ -4288,7 +4287,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		assert(box->x2 - x <= w);
 		assert(box->y2 - y <= h);
 
-		assert(has_coherent_ptr(sna_pixmap(pixmap)));
+		assert(has_coherent_ptr(to_sna_from_pixmap(pixmap), sna_pixmap(pixmap)));
 		memcpy_blt(bits, pixmap->devPrivate.ptr,
 			   pixmap->drawable.bitsPerPixel,
 			   stride, pixmap->devKind,
@@ -4710,7 +4709,7 @@ move_to_gpu(PixmapPtr pixmap, struct sna_pixmap *priv,
 
 	if (DAMAGE_IS_ALL(priv->gpu_damage)) {
 		assert(priv->gpu_bo);
-		assert(priv->cpu == false || (priv->mapped && IS_CPU_MAP(priv->gpu_bo->map)));
+		assert(priv->cpu == false || (priv->mapped && pixmap->devPrivate.ptr == MAP(priv->gpu_bo->map__cpu)));
 		return true;
 	}
 
@@ -5520,8 +5519,8 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 				assert(box[i].x2 + dx <= tmp->drawable.width);
 				assert(box[i].y2 + dy <= tmp->drawable.height);
 
-				assert(has_coherent_ptr(sna_pixmap(src_pixmap)));
-				assert(has_coherent_ptr(sna_pixmap(tmp)));
+				assert(has_coherent_ptr(sna, sna_pixmap(src_pixmap)));
+				assert(has_coherent_ptr(sna, sna_pixmap(tmp)));
 				memcpy_blt(src_pixmap->devPrivate.ptr,
 					   tmp->devPrivate.ptr,
 					   src_pixmap->drawable.bitsPerPixel,
@@ -5698,8 +5697,8 @@ fallback:
 				assert(box->y1 + src_dy >= 0);
 				assert(box->x2 + src_dx <= src_pixmap->drawable.width);
 				assert(box->y2 + src_dy <= src_pixmap->drawable.height);
-				assert(has_coherent_ptr(sna_pixmap(src_pixmap)));
-				assert(has_coherent_ptr(sna_pixmap(dst_pixmap)));
+				assert(has_coherent_ptr(sna, sna_pixmap(src_pixmap)));
+				assert(has_coherent_ptr(sna, sna_pixmap(dst_pixmap)));
 				memcpy_blt(src_bits, dst_bits, bpp,
 					   src_stride, dst_stride,
 					   box->x1, box->y1,
@@ -10752,7 +10751,7 @@ sna_pixmap_get_source_bo(PixmapPtr pixmap)
 		if (upload == NULL)
 			return NULL;
 
-		assert(has_coherent_ptr(sna_pixmap(pixmap)));
+		assert(has_coherent_ptr(sna, sna_pixmap(pixmap)));
 		memcpy_blt(pixmap->devPrivate.ptr, ptr,
 			   pixmap->drawable.bitsPerPixel,
 			   pixmap->devKind, upload->pitch,
@@ -11121,7 +11120,7 @@ sna_poly_fill_rect_tiled_nxm_blt(DrawablePtr drawable,
 
 	assert(tile->drawable.height && tile->drawable.height <= 8);
 	assert(tile->drawable.width && tile->drawable.width <= 8);
-	assert(has_coherent_ptr(sna_pixmap(tile)));
+	assert(has_coherent_ptr(sna, sna_pixmap(tile)));
 
 	cpp = tile->drawable.bitsPerPixel/8;
 	for (h = 0; h < tile->drawable.height; h++) {
@@ -14851,7 +14850,7 @@ sna_get_image(DrawablePtr drawable,
 		     __FUNCTION__,
 		     region.extents.x1, region.extents.y1,
 		     region.extents.x2, region.extents.y2));
-		assert(has_coherent_ptr(sna_pixmap(pixmap)));
+		assert(has_coherent_ptr(to_sna_from_pixmap(pixmap), sna_pixmap(pixmap)));
 		memcpy_blt(pixmap->devPrivate.ptr, dst, drawable->bitsPerPixel,
 			   pixmap->devKind, PixmapBytePad(w, drawable->depth),
 			   region.extents.x1, region.extents.y1, 0, 0, w, h);
@@ -15256,8 +15255,8 @@ fallback:
 				assert(box->x2 <= src->drawable.width);
 				assert(box->y2 <= src->drawable.height);
 
-				assert(has_coherent_ptr(sna_pixmap(src)));
-				assert(has_coherent_ptr(sna_pixmap(dst)));
+				assert(has_coherent_ptr(sna, sna_pixmap(src)));
+				assert(has_coherent_ptr(sna, sna_pixmap(dst)));
 				memcpy_blt(src->devPrivate.ptr,
 					   dst->devPrivate.ptr,
 					   src->drawable.bitsPerPixel,
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 2bd6b82..2a31820 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -108,7 +108,7 @@ read_boxes_inplace__cpu(struct kgem *kgem,
 	assert(kgem_bo_can_map__cpu(kgem, bo, false));
 	assert(bo->tiling != I915_TILING_Y);
 
-	src = __kgem_bo_map__cpu(kgem, bo);
+	src = kgem_bo_map__cpu(kgem, bo);
 	if (src == NULL)
 		return false;
 
@@ -131,7 +131,6 @@ read_boxes_inplace__cpu(struct kgem *kgem,
 			box++;
 		} while (--n);
 	}
-	__kgem_bo_unmap__cpu(kgem, bo, src);
 
 	return true;
 }
@@ -574,7 +573,7 @@ write_boxes_inplace__tiled(struct kgem *kgem,
 
 	assert(bo->tiling == I915_TILING_X);
 
-	dst = __kgem_bo_map__cpu(kgem, bo);
+	dst = kgem_bo_map__cpu(kgem, bo);
 	if (dst == NULL)
 		return false;
 
@@ -586,7 +585,6 @@ write_boxes_inplace__tiled(struct kgem *kgem,
 				  box->x2 - box->x1, box->y2 - box->y1);
 		box++;
 	} while (--n);
-	__kgem_bo_unmap__cpu(kgem, bo, dst);
 
 	return true;
 }
commit dd130d1b06e8828d7a2471761bac36093b9a2391
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Sep 22 13:11:50 2013 +0100

    intel-virtual-overlay: Grab the server around mode manipulations
    
    Prevent other clients from seeing the intermediate states.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/tools/virtual.c b/tools/virtual.c
index 9987476..74f402d 100644
--- a/tools/virtual.c
+++ b/tools/virtual.c
@@ -452,6 +452,7 @@ static int clone_update_modes__randr(struct clone *clone)
 	clone->src.rr_crtc = 0;
 
 	/* Create matching modes for the real output on the virtual */
+	XGrabServer(clone->src.dpy);
 	for (i = 0; i < from_info->nmode; i++) {
 		XRRModeInfo *mode, *old;
 		RRMode id;
@@ -492,6 +493,7 @@ static int clone_update_modes__randr(struct clone *clone)
 
 		XRRAddOutputMode(clone->src.dpy, clone->src.rr_output, id);
 	}
+	XUngrabServer(clone->src.dpy);
 	ret = 0;
 
 err:
@@ -526,6 +528,8 @@ static int clone_update_modes__fixed(struct clone *clone)
 	if (info == NULL)
 		goto err;
 
+	XGrabServer(clone->src.dpy);
+
 	/* Clear all current UserModes on the output, including any active ones */
 	if (info->crtc) {
 		DBG(("%s(%s-%s): disabling active CRTC\n", __func__,
@@ -560,12 +564,14 @@ static int clone_update_modes__fixed(struct clone *clone)
 
 	XRRAddOutputMode(clone->src.dpy, clone->src.rr_output, id);
 
+	XUngrabServer(clone->src.dpy);
 	ret = 0;
 err:
 	if (info)
 		XRRFreeOutputInfo(info);
 	if (res)
 		XRRFreeScreenResources(res);
+
 	return ret;
 }
 
@@ -577,14 +583,15 @@ static RROutput claim_virtual(struct display *display, char *output_name, int nc
 	XRROutputInfo *output;
 	XRRModeInfo mode;
 	RRMode id;
-	RROutput rr_output;
+	RROutput rr_output = 0;
 	int i;
 
 	DBG(("%s(%d)\n", __func__, nclone));
+	XGrabServer(dpy);
 
 	res = _XRRGetScreenResourcesCurrent(dpy, display->root);
 	if (res == NULL)
-		return 0;
+		goto out;
 
 	sprintf(output_name, "VIRTUAL%d", nclone);
 
@@ -606,7 +613,7 @@ static RROutput claim_virtual(struct display *display, char *output_name, int nc
 
 	DBG(("%s(%s): rr_output=%ld\n", __func__, output_name, (long)rr_output));
 	if (rr_output == 0)
-		return 0;
+		goto out;
 
 	/* Set any mode on the VirtualHead to make the Xserver allocate another */
 	memset(&mode, 0, sizeof(mode));
@@ -622,7 +629,7 @@ static RROutput claim_virtual(struct display *display, char *output_name, int nc
 	/* Force a redetection for the ddx to spot the new outputs */
 	res = XRRGetScreenResources(dpy, display->root);
 	if (res == NULL)
-		return 0;
+		goto out;
 
 	/* Some else may have interrupted us and installed that new mode! */
 	output = XRRGetOutputInfo(dpy, res, rr_output);
@@ -637,6 +644,9 @@ static RROutput claim_virtual(struct display *display, char *output_name, int nc
 	XRRDeleteOutputMode(dpy, rr_output, id);
 	XRRDestroyMode(dpy, id);
 
+out:
+	XUngrabServer(dpy);
+
 	return rr_output;
 }
 
@@ -837,7 +847,7 @@ static void context_update(struct context *ctx)
 			c = XRRGetCrtcInfo(dpy, res, o->crtc);
 		if (c) {
 			DBG(("%s-%s: (x=%d, y=%d, rotation=%d, mode=%ld) -> (x=%d, y=%d, rotation=%d, mode=%ld)\n",
-			     DisplayString(ctx->display->dpy), output->name,
+			     DisplayString(dpy), output->name,
 			     output->x, output->y, output->rotation, output->mode.id,
 			     c->x, c->y, output->rotation, c->mode));
 
@@ -855,7 +865,7 @@ static void context_update(struct context *ctx)
 			XRRFreeCrtcInfo(c);
 		} else {
 			DBG(("%s-%s: (x=%d, y=%d, rotation=%d, mode=%ld) -> off\n",
-			     DisplayString(ctx->display->dpy), output->name,
+			     DisplayString(dpy), output->name,
 			     output->x, output->y, output->rotation, output->mode.id));
 		}
 		output->rr_crtc = o->crtc;
@@ -884,7 +894,7 @@ static void context_update(struct context *ctx)
 	}
 	XRRFreeScreenResources(res);
 
-	DBG(("%s changed? %d\n", DisplayString(ctx->display->dpy), context_changed));
+	DBG(("%s changed? %d\n", DisplayString(dpy), context_changed));
 	if (!context_changed)
 		return;
 
@@ -933,6 +943,7 @@ static void context_update(struct context *ctx)
 		if (res == NULL)
 			continue;
 
+		XGrabServer(display->dpy);
 		for (clone = display->clone; clone; clone = clone->next) {
 			struct output *src = &clone->src;
 			struct output *dst = &clone->dst;
@@ -1018,6 +1029,7 @@ err:
 					 &dst->rr_output, 1);
 			dst->rr_crtc = rr_crtc;
 		}
+		XUngrabServer(display->dpy);
 
 		XRRFreeScreenResources(res);
 	}
@@ -2375,6 +2387,8 @@ static void context_cleanup(struct context *ctx)
 	if (res == NULL)
 		return;
 
+	XGrabServer(dpy);
+
 	for (i = 0; i < ctx->nclone; i++) {
 		struct clone *clone = &ctx->clones[i];
 		XRROutputInfo *output;
@@ -2407,6 +2421,7 @@ static void context_cleanup(struct context *ctx)
 		}
 	}
 
+	XUngrabServer(dpy);
 	XRRFreeScreenResources(res);
 
 	if (ctx->singleton)


More information about the xorg-commit mailing list