xf86-video-intel: 6 commits - src/sna/kgem.c src/sna/kgem.h src/sna/sna_accel.c src/sna/sna.h

Chris Wilson ickle at kemper.freedesktop.org
Fri Jul 20 03:43:16 PDT 2012


 src/sna/kgem.c      |  241 +++++++++++++++++++++++-----------------------------
 src/sna/kgem.h      |    7 -
 src/sna/sna.h       |    1 
 src/sna/sna_accel.c |   79 +++++++++--------
 4 files changed, 154 insertions(+), 174 deletions(-)

New commits:
commit 3b56588fbaa2c4ccdfb2f2a8f5656d2cda9dacd7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 20 10:19:25 2012 +0100

    sna: Update WIP userptr example usage
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index ab736ec..95d634a 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -91,11 +91,10 @@ search_vmap_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 #define MAKE_VMAP_MAP(ptr) ((void*)((uintptr_t)(ptr) | 3))
 #define IS_VMAP_MAP(ptr) ((uintptr_t)(ptr) & 2)
 
-#if defined(USE_VMAP) && !defined(I915_PARAM_HAS_VMAP)
-#define DRM_I915_GEM_VMAP       0x2d
-#define DRM_IOCTL_I915_GEM_VMAP DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_VMAP, struct drm_i915_gem_vmap)
-#define I915_PARAM_HAS_VMAP              19
-struct drm_i915_gem_vmap {
+#if defined(USE_VMAP)
+#define LOCAL_I915_GEM_VMAP       0x32
+#define LOCAL_IOCTL_I915_GEM_VMAP DRM_IOWR (DRM_COMMAND_BASE + LOCAL_I915_GEM_VMAP, struct local_i915_gem_vmap)
+struct local_i915_gem_vmap {
 	uint64_t user_ptr;
 	uint32_t user_size;
 	uint32_t flags;
@@ -699,6 +698,8 @@ static bool test_has_cacheing(struct kgem *kgem)
 static bool test_has_vmap(struct kgem *kgem)
 {
 #if defined(USE_VMAP)
+	uint32_t handle;
+
 	if (DBG_NO_VMAP)
 		return false;
 
@@ -706,7 +707,12 @@ static bool test_has_vmap(struct kgem *kgem)
 	if (kgem->gen == 40)
 		return false;
 
-	return gem_param(kgem, I915_PARAM_HAS_VMAP) > 0;
+	ptr = malloc(PAGE_SIZE);
+	handle = gem_vmap(kgem->fd, ptr, PAGE_SIZE, false);
+	gem_close(kgem->fd, handle);
+	free(ptr);
+
+	return handle != 0;
 #else
 	return false;
 #endif
@@ -3738,7 +3744,7 @@ uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo)
 #if defined(USE_VMAP)
 static uint32_t gem_vmap(int fd, void *ptr, int size, int read_only)
 {
-	struct drm_i915_gem_vmap vmap;
+	struct local_i915_gem_vmap vmap;
 
 	VG_CLEAR(vmap);
 	vmap.user_ptr = (uintptr_t)ptr;
@@ -3747,7 +3753,7 @@ static uint32_t gem_vmap(int fd, void *ptr, int size, int read_only)
 	if (read_only)
 		vmap.flags |= I915_VMAP_READ_ONLY;
 
-	if (drmIoctl(fd, DRM_IOCTL_I915_GEM_VMAP, &vmap)) {
+	if (drmIoctl(fd, LOCAL_IOCTL_I915_GEM_VMAP, &vmap)) {
 		DBG(("%s: failed to map %p + %d bytes: %d\n",
 		     __FUNCTION__, ptr, size, errno));
 		return 0;
commit 473a1dfb683ed576d86b37aba36aaa0e379f4606
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 20 09:56:13 2012 +0100

    sna: Rename kgem_partial_bo to kgem_buffer
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 5361fa6..ab736ec 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -115,7 +115,7 @@ struct local_i915_gem_cacheing {
 #define LOCAL_I915_GEM_SET_CACHEING	0x2f
 #define LOCAL_IOCTL_I915_GEM_SET_CACHEING DRM_IOW(DRM_COMMAND_BASE + LOCAL_I915_GEM_SET_CACHEING, struct local_i915_gem_cacheing)
 
-struct kgem_partial_bo {
+struct kgem_buffer {
 	struct kgem_bo base;
 	void *mem;
 	uint32_t used;
@@ -797,8 +797,8 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	DBG(("%s: half cpu cache %d pages\n", __FUNCTION__,
 	     kgem->half_cpu_cache_pages));
 
-	list_init(&kgem->batch_partials);
-	list_init(&kgem->active_partials);
+	list_init(&kgem->batch_buffers);
+	list_init(&kgem->active_buffers);
 	list_init(&kgem->requests);
 	list_init(&kgem->flushing);
 	list_init(&kgem->sync_list);
@@ -841,11 +841,11 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	DBG(("%s: aperture mappable=%d [%d MiB]\n", __FUNCTION__,
 	     kgem->aperture_mappable, kgem->aperture_mappable / (1024*1024)));
 
-	kgem->partial_buffer_size = 64 * 1024;
-	while (kgem->partial_buffer_size < kgem->aperture_mappable >> 10)
-		kgem->partial_buffer_size *= 2;
-	DBG(("%s: partial buffer size=%d [%d KiB]\n", __FUNCTION__,
-	     kgem->partial_buffer_size, kgem->partial_buffer_size / 1024));
+	kgem->buffer_size = 64 * 1024;
+	while (kgem->buffer_size < kgem->aperture_mappable >> 10)
+		kgem->buffer_size *= 2;
+	DBG(("%s: buffer size=%d [%d KiB]\n", __FUNCTION__,
+	     kgem->buffer_size, kgem->buffer_size / 1024));
 
 	kgem->max_object_size = 2 * aperture.aper_size / 3;
 	kgem->max_gpu_size = kgem->max_object_size;
@@ -1277,9 +1277,9 @@ static void kgem_bo_clear_scanout(struct kgem *kgem, struct kgem_bo *bo)
 	bo->reusable = true;
 }
 
-static void _kgem_bo_delete_partial(struct kgem *kgem, struct kgem_bo *bo)
+static void _kgem_bo_delete_buffer(struct kgem *kgem, struct kgem_bo *bo)
 {
-	struct kgem_partial_bo *io = (struct kgem_partial_bo *)bo->proxy;
+	struct kgem_buffer *io = (struct kgem_buffer *)bo->proxy;
 
 	DBG(("%s: size=%d, offset=%d, parent used=%d\n",
 	     __FUNCTION__, bo->size.bytes, bo->delta, io->used));
@@ -1479,8 +1479,7 @@ static void kgem_bo_unref(struct kgem *kgem, struct kgem_bo *bo)
 		__kgem_bo_destroy(kgem, bo);
 }
 
-static void kgem_partial_buffer_release(struct kgem *kgem,
-					struct kgem_partial_bo *bo)
+static void kgem_buffer_release(struct kgem *kgem, struct kgem_buffer *bo)
 {
 	while (!list_is_empty(&bo->base.vma)) {
 		struct kgem_bo *cached;
@@ -1497,14 +1496,14 @@ static void kgem_partial_buffer_release(struct kgem *kgem,
 	}
 }
 
-static bool kgem_retire__partials(struct kgem *kgem)
+static bool kgem_retire__buffers(struct kgem *kgem)
 {
 	bool retired = false;
 
-	while (!list_is_empty(&kgem->active_partials)) {
-		struct kgem_partial_bo *bo =
-			list_last_entry(&kgem->active_partials,
-					struct kgem_partial_bo,
+	while (!list_is_empty(&kgem->active_buffers)) {
+		struct kgem_buffer *bo =
+			list_last_entry(&kgem->active_buffers,
+					struct kgem_buffer,
 					base.list);
 
 		if (bo->base.rq)
@@ -1513,7 +1512,7 @@ static bool kgem_retire__partials(struct kgem *kgem)
 		DBG(("%s: releasing upload cache for handle=%d? %d\n",
 		     __FUNCTION__, bo->base.handle, !list_is_empty(&bo->base.vma)));
 		list_del(&bo->base.list);
-		kgem_partial_buffer_release(kgem, bo);
+		kgem_buffer_release(kgem, bo);
 		kgem_bo_unref(kgem, &bo->base);
 		retired = true;
 	}
@@ -1680,7 +1679,7 @@ bool kgem_retire(struct kgem *kgem)
 
 	retired |= kgem_retire__flushing(kgem);
 	retired |= kgem_retire__requests(kgem);
-	retired |= kgem_retire__partials(kgem);
+	retired |= kgem_retire__buffers(kgem);
 
 	kgem->need_retire =
 		!list_is_empty(&kgem->requests) ||
@@ -1766,12 +1765,12 @@ static void kgem_close_inactive(struct kgem *kgem)
 		kgem_close_list(kgem, &kgem->inactive[i]);
 }
 
-static void kgem_finish_partials(struct kgem *kgem)
+static void kgem_finish_buffers(struct kgem *kgem)
 {
-	struct kgem_partial_bo *bo, *next;
+	struct kgem_buffer *bo, *next;
 
-	list_for_each_entry_safe(bo, next, &kgem->batch_partials, base.list) {
-		DBG(("%s: partial handle=%d, used=%d, exec?=%d, write=%d, mmapped=%d\n",
+	list_for_each_entry_safe(bo, next, &kgem->batch_buffers, base.list) {
+		DBG(("%s: buffer handle=%d, used=%d, exec?=%d, write=%d, mmapped=%d\n",
 		     __FUNCTION__, bo->base.handle, bo->used, bo->base.exec!=NULL,
 		     bo->write, bo->mmapped));
 
@@ -1795,11 +1794,11 @@ static void kgem_finish_partials(struct kgem *kgem)
 			if (!DBG_NO_UPLOAD_ACTIVE &&
 			    bo->used + PAGE_SIZE <= bytes(&bo->base) &&
 			    (kgem->has_llc || !IS_CPU_MAP(bo->base.map))) {
-				DBG(("%s: retaining partial upload buffer (%d/%d)\n",
+				DBG(("%s: retaining upload buffer (%d/%d)\n",
 				     __FUNCTION__, bo->used, bytes(&bo->base)));
 				assert(!bo->base.vmap);
 				list_move(&bo->base.list,
-					  &kgem->active_partials);
+					  &kgem->active_buffers);
 				continue;
 			}
 			goto decouple;
@@ -1808,7 +1807,7 @@ static void kgem_finish_partials(struct kgem *kgem)
 		if (!bo->used) {
 			/* Unless we replace the handle in the execbuffer,
 			 * then this bo will become active. So decouple it
-			 * from the partial list and track it in the normal
+			 * from the buffer list and track it in the normal
 			 * manner.
 			 */
 			goto decouple;
@@ -2044,7 +2043,7 @@ void _kgem_submit(struct kgem *kgem)
 	assert(kgem->nexec < ARRAY_SIZE(kgem->exec));
 	assert(kgem->nfence <= kgem->fence_max);
 
-	kgem_finish_partials(kgem);
+	kgem_finish_buffers(kgem);
 
 #if HAS_DEBUG_FULL && SHOW_BATCH
 	__kgem_batch_debug(kgem, batch_end);
@@ -3280,7 +3279,7 @@ void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 		_list_del(&bo->vma);
 		_list_del(&bo->request);
 		if (bo->io && bo->exec == NULL)
-			_kgem_bo_delete_partial(kgem, bo);
+			_kgem_bo_delete_buffer(kgem, bo);
 		kgem_bo_unref(kgem, bo->proxy);
 		kgem_bo_binding_free(kgem, bo);
 		free(bo);
@@ -3907,9 +3906,9 @@ struct kgem_bo *kgem_create_proxy(struct kgem *kgem,
 	return bo;
 }
 
-static struct kgem_partial_bo *partial_bo_alloc(int num_pages)
+static struct kgem_buffer *buffer_alloc(int num_pages)
 {
-	struct kgem_partial_bo *bo;
+	struct kgem_buffer *bo;
 
 	bo = malloc(sizeof(*bo) + 2*UPLOAD_ALIGNMENT + num_pages * PAGE_SIZE);
 	if (bo) {
@@ -3929,10 +3928,10 @@ use_snoopable_buffer(struct kgem *kgem, uint32_t flags)
 	return true;
 }
 
-static struct kgem_partial_bo *
+static struct kgem_buffer *
 search_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 {
-	struct kgem_partial_bo *bo;
+	struct kgem_buffer *bo;
 	struct kgem_bo *old;
 
 	old = search_vmap_cache(kgem, alloc, 0);
@@ -3973,7 +3972,7 @@ search_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 }
 
 static void
-init_buffer_from_bo(struct kgem_partial_bo *bo, struct kgem_bo *old)
+init_buffer_from_bo(struct kgem_buffer *bo, struct kgem_bo *old)
 {
 	DBG(("%s: reusing handle=%d for buffer\n",
 	     __FUNCTION__, old->handle));
@@ -3991,10 +3990,10 @@ init_buffer_from_bo(struct kgem_partial_bo *bo, struct kgem_bo *old)
 	assert(bo->base.tiling == I915_TILING_NONE);
 }
 
-static struct kgem_partial_bo *
+static struct kgem_buffer *
 create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 {
-	struct kgem_partial_bo *bo;
+	struct kgem_buffer *bo;
 
 	if (kgem->has_cacheing) {
 		struct kgem_bo *old;
@@ -4044,7 +4043,7 @@ create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 	}
 
 	if (kgem->has_vmap) {
-		bo = partial_bo_alloc(alloc);
+		bo = buffer_alloc(alloc);
 		if (bo) {
 			uint32_t handle = gem_vmap(kgem->fd, bo->mem,
 						   alloc * PAGE_SIZE, false);
@@ -4073,7 +4072,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 				   uint32_t size, uint32_t flags,
 				   void **ret)
 {
-	struct kgem_partial_bo *bo;
+	struct kgem_buffer *bo;
 	unsigned offset, alloc;
 	struct kgem_bo *old;
 
@@ -4090,7 +4089,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		flags &= ~KGEM_BUFFER_INPLACE;
 
 #if !DBG_NO_UPLOAD_CACHE
-	list_for_each_entry(bo, &kgem->batch_partials, base.list) {
+	list_for_each_entry(bo, &kgem->batch_buffers, base.list) {
 		assert(bo->base.io);
 		assert(bo->base.refcnt >= 1);
 
@@ -4103,7 +4102,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			     __FUNCTION__, size, bo->used, bytes(&bo->base)));
 			gem_write(kgem->fd, bo->base.handle,
 				  0, bo->used, bo->mem);
-			kgem_partial_buffer_release(kgem, bo);
+			kgem_buffer_release(kgem, bo);
 			bo->need_io = 0;
 			bo->write = 0;
 			offset = 0;
@@ -4129,7 +4128,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		}
 
 		if (bo->used + size <= bytes(&bo->base)) {
-			DBG(("%s: reusing partial buffer? used=%d + size=%d, total=%d\n",
+			DBG(("%s: reusing buffer? used=%d + size=%d, total=%d\n",
 			     __FUNCTION__, bo->used, size, bytes(&bo->base)));
 			offset = bo->used;
 			bo->used += size;
@@ -4138,7 +4137,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	}
 
 	if (flags & KGEM_BUFFER_WRITE) {
-		list_for_each_entry(bo, &kgem->active_partials, base.list) {
+		list_for_each_entry(bo, &kgem->active_buffers, base.list) {
 			assert(bo->base.io);
 			assert(bo->base.refcnt >= 1);
 			assert(bo->mmapped);
@@ -4151,11 +4150,11 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			}
 
 			if (bo->used + size <= bytes(&bo->base)) {
-				DBG(("%s: reusing partial buffer? used=%d + size=%d, total=%d\n",
+				DBG(("%s: reusing buffer? used=%d + size=%d, total=%d\n",
 				     __FUNCTION__, bo->used, size, bytes(&bo->base)));
 				offset = bo->used;
 				bo->used += size;
-				list_move(&bo->base.list, &kgem->batch_partials);
+				list_move(&bo->base.list, &kgem->batch_buffers);
 				goto done;
 			}
 		}
@@ -4164,9 +4163,9 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 
 #if !DBG_NO_MAP_UPLOAD
 	/* Be a little more generous and hope to hold fewer mmappings */
-	alloc = ALIGN(2*size, kgem->partial_buffer_size);
+	alloc = ALIGN(2*size, kgem->buffer_size);
 	if (alloc > MAX_CACHE_SIZE)
-		alloc = ALIGN(size, kgem->partial_buffer_size);
+		alloc = ALIGN(size, kgem->buffer_size);
 	if (alloc > MAX_CACHE_SIZE)
 		alloc = PAGE_ALIGN(size);
 	alloc /= PAGE_SIZE;
@@ -4319,7 +4318,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		DBG(("%s: reusing ordinary handle %d for io\n",
 		     __FUNCTION__, old->handle));
 		alloc = num_pages(old);
-		bo = partial_bo_alloc(alloc);
+		bo = buffer_alloc(alloc);
 		if (bo == NULL)
 			return NULL;
 
@@ -4370,7 +4369,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 
 		DBG(("%s: failing back to new pwrite buffer\n", __FUNCTION__));
 		old = &bo->base;
-		bo = partial_bo_alloc(alloc);
+		bo = buffer_alloc(alloc);
 		if (bo == NULL) {
 			free(old);
 			return NULL;
@@ -4401,7 +4400,7 @@ init:
 	offset = 0;
 
 	assert(list_is_empty(&bo->base.list));
-	list_add(&bo->base.list, &kgem->batch_partials);
+	list_add(&bo->base.list, &kgem->batch_buffers);
 
 	DBG(("%s(pages=%d) new handle=%d\n",
 	     __FUNCTION__, alloc, bo->base.handle));
@@ -4415,7 +4414,7 @@ done:
 
 bool kgem_buffer_is_inplace(struct kgem_bo *_bo)
 {
-	struct kgem_partial_bo *bo = (struct kgem_partial_bo *)_bo->proxy;
+	struct kgem_buffer *bo = (struct kgem_buffer *)_bo->proxy;
 	return bo->write & KGEM_BUFFER_WRITE_INPLACE;
 }
 
@@ -4444,7 +4443,7 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
 	assert(*ret != NULL);
 
 	if (height & 1) {
-		struct kgem_partial_bo *io = (struct kgem_partial_bo *)bo->proxy;
+		struct kgem_buffer *io = (struct kgem_buffer *)bo->proxy;
 		int min;
 
 		assert(io->used);
@@ -4456,7 +4455,7 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
 		min = bo->delta + height * stride;
 		min = ALIGN(min, UPLOAD_ALIGNMENT);
 		if (io->used != min) {
-			DBG(("%s: trimming partial buffer from %d to %d\n",
+			DBG(("%s: trimming buffer from %d to %d\n",
 			     __FUNCTION__, io->used, min));
 			io->used = min;
 		}
@@ -4513,7 +4512,7 @@ void kgem_proxy_bo_attach(struct kgem_bo *bo,
 
 void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 {
-	struct kgem_partial_bo *bo;
+	struct kgem_buffer *bo;
 	uint32_t offset = _bo->delta, length = _bo->size.bytes;
 
 	assert(_bo->io);
@@ -4525,7 +4524,7 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 	assert(_bo->proxy == NULL);
 	assert(_bo->exec == NULL);
 
-	bo = (struct kgem_partial_bo *)_bo;
+	bo = (struct kgem_buffer *)_bo;
 
 	DBG(("%s(offset=%d, length=%d, vmap=%d)\n", __FUNCTION__,
 	     offset, length, bo->base.vmap));
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index bd1219a..533a919 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -125,7 +125,7 @@ struct kgem {
 	struct list active[NUM_CACHE_BUCKETS][3];
 	struct list inactive[NUM_CACHE_BUCKETS];
 	struct list vmap;
-	struct list batch_partials, active_partials;
+	struct list batch_buffers, active_buffers;
 	struct list requests;
 	struct list sync_list;
 	struct kgem_request *next_request;
@@ -167,7 +167,7 @@ struct kgem {
 	uint32_t max_upload_tile_size, max_copy_tile_size;
 	uint32_t max_gpu_size, max_cpu_size;
 	uint32_t large_object_size, max_object_size;
-	uint32_t partial_buffer_size;
+	uint32_t buffer_size;
 
 	void (*context_switch)(struct kgem *kgem, int new_mode);
 	void (*retire)(struct kgem *kgem);
commit 8e6e8a2fa8adda9ae9be8a88fbb14851e9d2df2e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 20 09:51:46 2012 +0100

    sna: Allow the snoopable upload buffer to take pages from the CPU vma cache
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index eeb6774..5361fa6 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3972,26 +3972,54 @@ search_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 	return NULL;
 }
 
+static void
+init_buffer_from_bo(struct kgem_partial_bo *bo, struct kgem_bo *old)
+{
+	DBG(("%s: reusing handle=%d for buffer\n",
+	     __FUNCTION__, old->handle));
+
+	memcpy(&bo->base, old, sizeof(*old));
+	if (old->rq)
+		list_replace(&old->request, &bo->base.request);
+	else
+		list_init(&bo->base.request);
+	list_replace(&old->vma, &bo->base.vma);
+	list_init(&bo->base.list);
+	free(old);
+	bo->base.refcnt = 1;
+
+	assert(bo->base.tiling == I915_TILING_NONE);
+}
+
 static struct kgem_partial_bo *
 create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 {
 	struct kgem_partial_bo *bo;
 
 	if (kgem->has_cacheing) {
+		struct kgem_bo *old;
 		uint32_t handle;
 
-		handle = gem_create(kgem->fd, alloc);
-		if (handle == 0)
+		bo = malloc(sizeof(*bo));
+		if (bo == NULL)
 			return NULL;
 
-		if (!gem_set_cacheing(kgem->fd, handle, SNOOPED)) {
-			gem_close(kgem->fd, handle);
+		old = search_linear_cache(kgem, alloc,
+					 CREATE_INACTIVE | CREATE_CPU_MAP | CREATE_EXACT);
+		if (old) {
+			init_buffer_from_bo(bo, old);
+			return bo;
+		}
+
+		handle = gem_create(kgem->fd, alloc);
+		if (handle == 0) {
+			free(bo);
 			return NULL;
 		}
 
-		bo = malloc(sizeof(*bo));
-		if (bo == NULL) {
+		if (!gem_set_cacheing(kgem->fd, handle, SNOOPED)) {
 			gem_close(kgem->fd, handle);
+			free(bo);
 			return NULL;
 		}
 
@@ -4155,18 +4183,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		if (old == NULL)
 			old = search_linear_cache(kgem, NUM_PAGES(size), CREATE_INACTIVE | CREATE_CPU_MAP);
 		if (old) {
-			DBG(("%s: reusing handle=%d for buffer\n",
-			     __FUNCTION__, old->handle));
-
-			memcpy(&bo->base, old, sizeof(*old));
-			if (old->rq)
-				list_replace(&old->request, &bo->base.request);
-			else
-				list_init(&bo->base.request);
-			list_replace(&old->vma, &bo->base.vma);
-			list_init(&bo->base.list);
-			free(old);
-			bo->base.refcnt = 1;
+			init_buffer_from_bo(bo, old);
 		} else {
 			uint32_t handle = gem_create(kgem->fd, alloc);
 			if (handle == 0 ||
@@ -4251,16 +4268,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			if (bo == NULL)
 				return NULL;
 
-			memcpy(&bo->base, old, sizeof(*old));
-			if (old->rq)
-				list_replace(&old->request, &bo->base.request);
-			else
-				list_init(&bo->base.request);
-			list_replace(&old->vma, &bo->base.vma);
-			list_init(&bo->base.list);
-			free(old);
-
-			assert(bo->base.tiling == I915_TILING_NONE);
+			init_buffer_from_bo(bo, old);
 			assert(num_pages(&bo->base) >= NUM_PAGES(size));
 
 			bo->mem = kgem_bo_map(kgem, &bo->base);
@@ -4268,11 +4276,11 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 				bo->need_io = false;
 				bo->base.io = true;
 				bo->mmapped = true;
-				bo->base.refcnt = 1;
 
 				alloc = num_pages(&bo->base);
 				goto init;
 			} else {
+				bo->base.refcnt = 0;
 				kgem_bo_free(kgem, &bo->base);
 				bo = NULL;
 			}
@@ -4315,17 +4323,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		if (bo == NULL)
 			return NULL;
 
-		memcpy(&bo->base, old, sizeof(*old));
-		if (old->rq)
-			list_replace(&old->request,
-				     &bo->base.request);
-		else
-			list_init(&bo->base.request);
-		list_replace(&old->vma, &bo->base.vma);
-		list_init(&bo->base.list);
-		free(old);
-		bo->base.refcnt = 1;
-
+		init_buffer_from_bo(bo, old);
 		bo->need_io = flags & KGEM_BUFFER_WRITE;
 		bo->base.io = true;
 	} else {
@@ -4345,16 +4343,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			DBG(("%s: reusing cpu map handle=%d for buffer\n",
 			     __FUNCTION__, old->handle));
 			alloc = num_pages(old);
-
-			memcpy(&bo->base, old, sizeof(*old));
-			if (old->rq)
-				list_replace(&old->request, &bo->base.request);
-			else
-				list_init(&bo->base.request);
-			list_replace(&old->vma, &bo->base.vma);
-			list_init(&bo->base.list);
-			free(old);
-			bo->base.refcnt = 1;
+			init_buffer_from_bo(bo, old);
 		} else {
 			uint32_t handle = gem_create(kgem->fd, alloc);
 			if (handle == 0 ||
commit 979035bb9ce04db5fe30efa4f6daab0a40f6af57
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 19 20:06:28 2012 +0100

    sna: Remove topmost unused 'flush' attribute
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 3ced2b4..f274de9 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -192,7 +192,6 @@ struct sna {
 #define SNA_FORCE_SHADOW	0x20
 
 	unsigned watch_flush;
-	unsigned flush;
 
 	struct timeval timer_tv;
 	uint32_t timer_expire[NUM_TIMERS];
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 6ec982e..4ef52d7 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12428,7 +12428,6 @@ sna_accel_flush_callback(CallbackListPtr *list,
 			 pointer user_data, pointer call_data)
 {
 	struct sna *sna = user_data;
-	struct list preserve;
 
 	/* XXX we should be able to reduce the frequency of flushes further
 	 * by checking for outgoing damage events or sync replies. Tricky,
@@ -12438,26 +12437,32 @@ sna_accel_flush_callback(CallbackListPtr *list,
 		return;
 
 	/* flush any pending damage from shadow copies to tfp clients */
-	list_init(&preserve);
-	while (!list_is_empty(&sna->dirty_pixmaps)) {
-		struct sna_pixmap *priv = list_first_entry(&sna->dirty_pixmaps,
-							   struct sna_pixmap,
-							   list);
-		if (!sna_pixmap_move_to_gpu(priv->pixmap, MOVE_READ))
-			list_move(&priv->list, &preserve);
-	}
-	if (!list_is_empty(&preserve)) {
-		sna->dirty_pixmaps.next = preserve.next;
-		preserve.next->prev = &sna->dirty_pixmaps;
-		preserve.prev->next = &sna->dirty_pixmaps;
-		sna->dirty_pixmaps.prev = preserve.prev;
+	if (!list_is_empty(&sna->dirty_pixmaps)) {
+		struct list preserve;
+
+		list_init(&preserve);
+
+		do {
+			struct sna_pixmap *priv;
+
+			priv = list_first_entry(&sna->dirty_pixmaps,
+						struct sna_pixmap, list);
+			if (!sna_pixmap_move_to_gpu(priv->pixmap, MOVE_READ))
+				list_move(&priv->list, &preserve);
+
+		} while (!list_is_empty(&sna->dirty_pixmaps));
+
+		if (!list_is_empty(&preserve)) {
+			sna->dirty_pixmaps.next = preserve.next;
+			preserve.next->prev = &sna->dirty_pixmaps;
+			preserve.prev->next = &sna->dirty_pixmaps;
+			sna->dirty_pixmaps.prev = preserve.prev;
+		}
 	}
 
 	kgem_submit(&sna->kgem);
-
 	kgem_sync(&sna->kgem);
 
-	sna->flush = false;
 	sna->kgem.flush = false;
 }
 
@@ -13121,7 +13126,7 @@ void sna_accel_block_handler(struct sna *sna, struct timeval **tv)
 	if (sna_accel_do_debug_memory(sna))
 		sna_accel_debug_memory(sna);
 
-	if (sna->flush == 0 && sna->watch_flush == 1) {
+	if (sna->watch_flush == 1) {
 		DBG(("%s: removing watchers\n", __FUNCTION__));
 		DeleteCallback(&FlushCallback, sna_accel_flush_callback, sna);
 		sna->watch_flush = 0;
commit b83011909aaf185f05fc2df743882c2410eff46d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 19 19:51:46 2012 +0100

    sna: Replace 'sync' flag with equivalent 'flush'
    
    The only difference is in semantics. Currently 'sync' was only used on
    CPU buffers for shared memory segments with 2D clients, and 'flush' on GPU
    buffers shared with DRI clients.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index f80690d..eeb6774 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1104,9 +1104,6 @@ void _kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo)
 
 	/* XXX is it worth working around gcc here? */
 	kgem->flush |= bo->flush;
-
-	if (bo->sync)
-		kgem->sync = kgem->next_request;
 }
 
 static uint32_t kgem_end_batch(struct kgem *kgem)
@@ -1650,9 +1647,6 @@ static bool kgem_retire__requests(struct kgem *kgem)
 			}
 		}
 
-		if (kgem->sync == rq)
-			kgem->sync = NULL;
-
 		_list_del(&rq->list);
 		free(rq);
 	}
@@ -3853,31 +3847,23 @@ void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo)
 
 void kgem_bo_set_sync(struct kgem *kgem, struct kgem_bo *bo)
 {
+	assert(bo->vmap);
 	assert(!bo->reusable);
 	assert(list_is_empty(&bo->list));
 	list_add(&bo->list, &kgem->sync_list);
-	bo->sync = true;
+	bo->flush = true;
 }
 
 void kgem_sync(struct kgem *kgem)
 {
-	struct kgem_request *rq;
 	struct kgem_bo *bo;
 
 	DBG(("%s\n", __FUNCTION__));
 
-	rq = kgem->sync;
-	if (rq == NULL)
-		return;
-
-	if (rq == kgem->next_request)
-		_kgem_submit(kgem);
-
-	kgem_bo_sync__gtt(kgem, rq->bo);
-	list_for_each_entry(bo, &kgem->sync_list, list)
+	list_for_each_entry(bo, &kgem->sync_list, list) {
+		kgem_bo_submit(kgem, bo);
 		kgem_bo_sync__cpu(kgem, bo);
-
-	assert(kgem->sync == NULL);
+	}
 }
 
 void kgem_clear_dirty(struct kgem *kgem)
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 0117dcd..bd1219a 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -86,7 +86,6 @@ struct kgem_bo {
 	uint32_t io : 1;
 	uint32_t flush : 1;
 	uint32_t scanout : 1;
-	uint32_t sync : 1;
 	uint32_t purged : 1;
 };
 #define DOMAIN_NONE 0
@@ -130,7 +129,6 @@ struct kgem {
 	struct list requests;
 	struct list sync_list;
 	struct kgem_request *next_request;
-	struct kgem_request *sync;
 
 	struct {
 		struct list inactive[NUM_CACHE_BUCKETS];
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index f52c848..6ec982e 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -468,7 +468,7 @@ static void sna_pixmap_free_cpu(struct sna *sna, struct sna_pixmap *priv)
 		sna->debug_memory.cpu_bo_allocs--;
 		sna->debug_memory.cpu_bo_bytes -= kgem_bo_size(priv->cpu_bo);
 #endif
-		if (priv->cpu_bo->sync) {
+		if (priv->cpu_bo->flush) {
 			kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
 			sna_accel_watch_flush(sna, -1);
 		}
@@ -1190,7 +1190,7 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 			priv->cpu = false;
 			list_del(&priv->list);
 			if (priv->cpu_bo) {
-				assert(!priv->cpu_bo->sync);
+				assert(!priv->cpu_bo->flush);
 				sna_pixmap_free_cpu(sna, priv);
 			}
 
@@ -1200,7 +1200,7 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 
 skip_inplace_map:
 		sna_damage_destroy(&priv->gpu_damage);
-		if (priv->cpu_bo && !priv->cpu_bo->sync && kgem_bo_is_busy(priv->cpu_bo)) {
+		if (priv->cpu_bo && !priv->cpu_bo->flush && kgem_bo_is_busy(priv->cpu_bo)) {
 			if (priv->cpu_bo->exec == NULL)
 				kgem_retire(&sna->kgem);
 
@@ -1262,7 +1262,7 @@ skip_inplace_map:
 	}
 
 	if (priv->clear) {
-		if (priv->cpu_bo && !priv->cpu_bo->sync && kgem_bo_is_busy(priv->cpu_bo))
+		if (priv->cpu_bo && !priv->cpu_bo->flush && kgem_bo_is_busy(priv->cpu_bo))
 			sna_pixmap_free_cpu(sna, priv);
 		sna_damage_destroy(&priv->gpu_damage);
 	}
@@ -1333,8 +1333,10 @@ skip_inplace_map:
 		sna_pixmap_free_gpu(sna, priv);
 		priv->undamaged = false;
 
-		if (priv->flush)
+		if (priv->flush) {
 			list_move(&priv->list, &sna->dirty_pixmaps);
+			sna->kgem.flush |= 1;
+		}
 	}
 
 done:
@@ -1609,7 +1611,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			}
 		}
 
-		if (priv->cpu_bo && !priv->cpu_bo->sync) {
+		if (priv->cpu_bo && !priv->cpu_bo->flush) {
 			if (sync_will_stall(priv->cpu_bo) && priv->cpu_bo->exec == NULL)
 				kgem_retire(&sna->kgem);
 			if (sync_will_stall(priv->cpu_bo)) {
@@ -1971,8 +1973,10 @@ done:
 			}
 			priv->undamaged = false;
 		}
-		if (priv->flush)
+		if (priv->flush) {
 			list_move(&priv->list, &sna->dirty_pixmaps);
+			sna->kgem.flush |= 1;
+		}
 	}
 
 	if (dx | dy)
@@ -2762,7 +2766,7 @@ done:
 	if (DAMAGE_IS_ALL(priv->gpu_damage)) {
 		priv->undamaged = false;
 		if (priv->ptr) {
-			assert(priv->cpu_bo == NULL || !priv->cpu_bo->sync);
+			assert(priv->cpu_bo == NULL || !priv->cpu_bo->flush);
 			sna_pixmap_free_cpu(sna, priv);
 		}
 	}
@@ -3177,7 +3181,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		if (sync_will_stall(priv->cpu_bo) && priv->cpu_bo->exec == NULL)
 			kgem_retire(&sna->kgem);
 		if (sync_will_stall(priv->cpu_bo)) {
-			if (priv->cpu_bo->sync) {
+			if (priv->cpu_bo->flush) {
 				if (sna_put_image_upload_blt(drawable, gc, region,
 							     x, y, w, h, bits, stride)) {
 					if (!DAMAGE_IS_ALL(priv->gpu_damage)) {
@@ -3221,7 +3225,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 					list_del(&priv->list);
 					priv->undamaged = false;
 				}
-				assert(!priv->cpu_bo->sync);
+				assert(!priv->cpu_bo->flush);
 				sna_pixmap_free_cpu(sna, priv);
 			}
 		}
@@ -3291,8 +3295,10 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 				priv->undamaged = false;
 			}
 		}
-		if (priv->flush)
+		if (priv->flush) {
 			list_move(&priv->list, &sna->dirty_pixmaps);
+			sna->kgem.flush |= 1;
+		}
 	}
 	priv->cpu = true;
 
@@ -12428,13 +12434,8 @@ sna_accel_flush_callback(CallbackListPtr *list,
 	 * by checking for outgoing damage events or sync replies. Tricky,
 	 * and doesn't appear to mitigate the performance loss.
 	 */
-	if (!(sna->kgem.flush ||
-	      sna->kgem.sync ||
-	      !list_is_empty(&sna->dirty_pixmaps)))
-	    return;
-
-	DBG(("%s: need_sync=%d, need_flush=%d, dirty? %d\n", __FUNCTION__,
-	     sna->kgem.sync!=NULL, sna->kgem.flush, !list_is_empty(&sna->dirty_pixmaps)));
+	if (!sna->kgem.flush)
+		return;
 
 	/* flush any pending damage from shadow copies to tfp clients */
 	list_init(&preserve);
@@ -12766,7 +12767,7 @@ static void sna_accel_inactive(struct sna *sna)
 			sna_damage_destroy(&priv->cpu_damage);
 			list_del(&priv->list);
 
-			assert(priv->cpu_bo == NULL || !priv->cpu_bo->sync);
+			assert(priv->cpu_bo == NULL || !priv->cpu_bo->flush);
 			sna_pixmap_free_cpu(sna, priv);
 			priv->undamaged = false;
 			priv->cpu = false;
commit 88bee3caeaacbbb1b4d789ea3db9a3802a62b59d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 19 19:40:34 2012 +0100

    sna: Remove unused scanout-is-dirty? flag
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index e5c97f6..f80690d 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1104,7 +1104,6 @@ void _kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo)
 
 	/* XXX is it worth working around gcc here? */
 	kgem->flush |= bo->flush;
-	kgem->scanout |= bo->scanout;
 
 	if (bo->sync)
 		kgem->sync = kgem->next_request;
@@ -1990,8 +1989,6 @@ void kgem_reset(struct kgem *kgem)
 	kgem->nbatch = 0;
 	kgem->surface = kgem->batch_size;
 	kgem->mode = KGEM_NONE;
-	kgem->flush = 0;
-	kgem->scanout = 0;
 
 	kgem->next_request = __kgem_request_alloc();
 
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index f7ee5b4..0117dcd 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -150,7 +150,6 @@ struct kgem {
 	uint32_t need_purge:1;
 	uint32_t need_retire:1;
 	uint32_t need_throttle:1;
-	uint32_t scanout:1;
 	uint32_t busy:1;
 
 	uint32_t has_vmap :1;
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index d3dad62..f52c848 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12457,6 +12457,7 @@ sna_accel_flush_callback(CallbackListPtr *list,
 	kgem_sync(&sna->kgem);
 
 	sna->flush = false;
+	sna->kgem.flush = false;
 }
 
 static struct sna_pixmap *sna_accel_scanout(struct sna *sna)


More information about the xorg-commit mailing list