xf86-video-intel: src/sna/kgem.c src/sna/kgem.h

Chris Wilson ickle at kemper.freedesktop.org
Fri Apr 20 05:23:58 PDT 2012


 src/sna/kgem.c |   41 ++++++++++++++++++++++++++++++-----------
 src/sna/kgem.h |    1 +
 2 files changed, 31 insertions(+), 11 deletions(-)

New commits:
commit aff3614efd5c12e658fa5723934e5bd50a83a316
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Apr 20 13:21:40 2012 +0100

    sna: Always clear the mmapped domains when reusing  partial upload buffers
    
    As we need to make sure that we do invalidate the caches appropriately
    on reuse. Mildly paranoid, but strictly required by the spec.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 72b6ad7..d97f559 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3414,6 +3414,29 @@ void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo)
 	}
 }
 
+void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo)
+{
+	assert(bo->proxy == NULL);
+	kgem_bo_submit(kgem, bo);
+
+	if (bo->domain != DOMAIN_GTT) {
+		struct drm_i915_gem_set_domain set_domain;
+
+		DBG(("%s: sync: needs_flush? %d, domain? %d, busy? %d\n", __FUNCTION__,
+		     bo->needs_flush, bo->domain, kgem_busy(kgem, bo->handle)));
+
+		VG_CLEAR(set_domain);
+		set_domain.handle = bo->handle;
+		set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+		set_domain.write_domain = I915_GEM_DOMAIN_GTT;
+
+		if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain) == 0) {
+			kgem_bo_retire(kgem, bo);
+			bo->domain = DOMAIN_GTT;
+		}
+	}
+}
+
 void kgem_bo_set_sync(struct kgem *kgem, struct kgem_bo *bo)
 {
 	assert(!bo->reusable);
@@ -3424,7 +3447,6 @@ void kgem_bo_set_sync(struct kgem *kgem, struct kgem_bo *bo)
 
 void kgem_sync(struct kgem *kgem)
 {
-	struct drm_i915_gem_set_domain set_domain;
 	struct kgem_request *rq;
 	struct kgem_bo *bo;
 
@@ -3437,14 +3459,7 @@ void kgem_sync(struct kgem *kgem)
 	if (rq == kgem->next_request)
 		_kgem_submit(kgem);
 
-	VG_CLEAR(set_domain);
-	set_domain.handle = rq->bo->handle;
-	set_domain.read_domains = I915_GEM_DOMAIN_GTT;
-	set_domain.write_domain = I915_GEM_DOMAIN_GTT;
-
-	drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
-	kgem_retire(kgem);
-
+	kgem_bo_sync__gtt(kgem, rq->bo);
 	list_for_each_entry(bo, &kgem->sync_list, list)
 		kgem_bo_sync__cpu(kgem, bo);
 
@@ -3599,8 +3614,12 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			bo->used = size;
 			list_move(&bo->base.list, &kgem->active_partials);
 
-			if (bo->base.vmap)
-				kgem_bo_sync__cpu(kgem, &bo->base);
+			if (bo->mmapped) {
+				if (IS_CPU_MAP(bo->base.map))
+					kgem_bo_sync__cpu(kgem, &bo->base);
+				else
+					kgem_bo_sync__gtt(kgem, &bo->base);
+			}
 
 			goto done;
 		} while (kgem_retire(kgem));
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 913e1a9..1235b83 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -365,6 +365,7 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
 
 void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo);
 void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo);
+void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo);
 void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
 void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
 void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);


More information about the xorg-commit mailing list