xf86-video-intel: 4 commits - src/sna/kgem.c src/sna/sna_accel.c src/sna/sna_blt.c src/sna/sna_damage.c

Chris Wilson ickle at kemper.freedesktop.org
Wed Dec 14 06:34:58 PST 2011


 src/sna/kgem.c       |   71 +++++++++++++++++++++++++++++++++-------------
 src/sna/sna_accel.c  |   40 +++++++++++++-------------
 src/sna/sna_blt.c    |    3 +
 src/sna/sna_damage.c |   78 +++++++++++++++++++++++++++++++++++++++------------
 4 files changed, 135 insertions(+), 57 deletions(-)

New commits:
commit 43a22743124a83310379122d509c35840b583b2e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Dec 14 13:26:05 2011 +0000

    sna: Use the provided bo for blitting rather than assume priv->gpu_bo
    
    Reported-by: nkalkhof at web.de
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=43802
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index b3ad150..a730006 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -6564,9 +6564,9 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 	kgem_set_mode(&sna->kgem, KGEM_BLT);
 
 	br00 = 3 << 20;
-	br13 = priv->gpu_bo->pitch;
+	br13 = bo->pitch;
 	if (sna->kgem.gen >= 40) {
-		if (priv->gpu_bo->tiling)
+		if (bo->tiling)
 			br00 |= BLT_DST_TILED;
 		br13 >>= 2;
 	}
@@ -6598,7 +6598,7 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 			if (src_stride <= 128) {
 				src_stride = ALIGN(src_stride, 8) / 4;
 				if (!kgem_check_batch(&sna->kgem, 7+src_stride) ||
-				    !kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL) ||
+				    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
 				    !kgem_check_reloc(&sna->kgem, 1)) {
 					_kgem_submit(&sna->kgem);
 					_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -6610,8 +6610,8 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 				b[1] = br13;
 				b[2] = (r->y + dy) << 16 | (r->x + dx);
 				b[3] = (r->y + r->height + dy) << 16 | (r->x + r->width + dx);
-				b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
-						      priv->gpu_bo,
+				b[4] = kgem_add_reloc(&sna->kgem,
+						      sna->kgem.nbatch + 4, bo,
 						      I915_GEM_DOMAIN_RENDER << 16 |
 						      I915_GEM_DOMAIN_RENDER |
 						      KGEM_RELOC_FENCED,
@@ -6640,7 +6640,7 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 				void *ptr;
 
 				if (!kgem_check_batch(&sna->kgem, 8) ||
-				    !kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL) ||
+				    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
 				    !kgem_check_reloc(&sna->kgem, 2)) {
 					_kgem_submit(&sna->kgem);
 					_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -6673,8 +6673,8 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 				b[1] = br13;
 				b[2] = (r->y + dy) << 16 | (r->x + dx);
 				b[3] = (r->y + r->height + dy) << 16 | (r->x + r->width + dx);
-				b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
-						      priv->gpu_bo,
+				b[4] = kgem_add_reloc(&sna->kgem,
+						      sna->kgem.nbatch + 4, bo,
 						      I915_GEM_DOMAIN_RENDER << 16 |
 						      I915_GEM_DOMAIN_RENDER |
 						      KGEM_RELOC_FENCED,
@@ -6740,7 +6740,7 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 				if (src_stride <= 128) {
 					src_stride = ALIGN(src_stride, 8) / 4;
 					if (!kgem_check_batch(&sna->kgem, 7+src_stride) ||
-					    !kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL) ||
+					    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
 					    !kgem_check_reloc(&sna->kgem, 1)) {
 						_kgem_submit(&sna->kgem);
 						_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -6752,8 +6752,8 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 					b[1] = br13;
 					b[2] = (box.y1 + dy) << 16 | (box.x1 + dx);
 					b[3] = (box.y2 + dy) << 16 | (box.x2 + dx);
-					b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
-							      priv->gpu_bo,
+					b[4] = kgem_add_reloc(&sna->kgem,
+							      sna->kgem.nbatch + 4, bo,
 							      I915_GEM_DOMAIN_RENDER << 16 |
 							      I915_GEM_DOMAIN_RENDER |
 							      KGEM_RELOC_FENCED,
@@ -6779,7 +6779,7 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 					} while (--bh);
 				} else {
 					if (!kgem_check_batch(&sna->kgem, 8) ||
-					    !kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL) ||
+					    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
 					    !kgem_check_reloc(&sna->kgem, 2)) {
 						_kgem_submit(&sna->kgem);
 						_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -6813,8 +6813,8 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 					b[1] = br13;
 					b[2] = (box.y1 + dy) << 16 | (box.x1 + dx);
 					b[3] = (box.y2 + dy) << 16 | (box.x2 + dx);
-					b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
-							      priv->gpu_bo,
+					b[4] = kgem_add_reloc(&sna->kgem,
+							      sna->kgem.nbatch + 4, bo,
 							      I915_GEM_DOMAIN_RENDER << 16 |
 							      I915_GEM_DOMAIN_RENDER |
 							      KGEM_RELOC_FENCED,
@@ -6880,7 +6880,7 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 					if (src_stride <= 128) {
 						src_stride = ALIGN(src_stride, 8) / 4;
 						if (!kgem_check_batch(&sna->kgem, 7+src_stride) ||
-						    !kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL) ||
+						    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
 						    !kgem_check_reloc(&sna->kgem, 1)) {
 							_kgem_submit(&sna->kgem);
 							_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -6892,8 +6892,8 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 						b[1] = br13;
 						b[2] = (box.y1 + dy) << 16 | (box.x1 + dx);
 						b[3] = (box.y2 + dy) << 16 | (box.x2 + dx);
-						b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
-								      priv->gpu_bo,
+						b[4] = kgem_add_reloc(&sna->kgem,
+								      sna->kgem.nbatch + 4, bo,
 								      I915_GEM_DOMAIN_RENDER << 16 |
 								      I915_GEM_DOMAIN_RENDER |
 								      KGEM_RELOC_FENCED,
@@ -6919,7 +6919,7 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 						} while (--bh);
 					} else {
 						if (!kgem_check_batch(&sna->kgem, 8) ||
-						    !kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL) ||
+						    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
 						    !kgem_check_reloc(&sna->kgem, 2)) {
 							_kgem_submit(&sna->kgem);
 							_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -6953,8 +6953,8 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 						b[1] = br13;
 						b[2] = (box.y1 + dy) << 16 | (box.x1 + dx);
 						b[3] = (box.y2 + dy) << 16 | (box.x2 + dx);
-						b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
-								      priv->gpu_bo,
+						b[4] = kgem_add_reloc(&sna->kgem,
+								      sna->kgem.nbatch + 4, bo,
 								      I915_GEM_DOMAIN_RENDER << 16 |
 								      I915_GEM_DOMAIN_RENDER |
 								      KGEM_RELOC_FENCED,
commit 30f5ee11f8ec3688807bbaded92561e96f9a439b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Dec 14 12:30:40 2011 +0000

    sna: Use a static request and synchronous rendering in case of malloc failure
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 4887df5..65b600e 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -307,14 +307,15 @@ static struct kgem_bo *__kgem_bo_alloc(int handle, int size)
 	return __kgem_bo_init(bo, handle, size);
 }
 
+static struct kgem_request _kgem_static_request;
+
 static struct kgem_request *__kgem_request_alloc(void)
 {
 	struct kgem_request *rq;
 
 	rq = malloc(sizeof(*rq));
-	assert(rq);
 	if (rq == NULL)
-		return rq;
+		rq = &_kgem_static_request;
 
 	list_init(&rq->buffers);
 
@@ -394,8 +395,6 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 
 	if (gen < 40) {
 		if (!DBG_NO_RELAXED_FENCING) {
-			drm_i915_getparam_t gp;
-
 			v = 0;
 			VG_CLEAR(gp);
 			gp.param = I915_PARAM_HAS_RELAXED_FENCING;
@@ -834,9 +833,26 @@ static void kgem_commit(struct kgem *kgem)
 		}
 	}
 
-	list_add_tail(&rq->list, &kgem->requests);
-	kgem->next_request = __kgem_request_alloc();
-	kgem->need_retire = 1;
+	if (rq == &_kgem_static_request) {
+		struct drm_i915_gem_set_domain set_domain;
+
+		VG_CLEAR(set_domain);
+		set_domain.handle = rq->bo->handle;
+		set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+		set_domain.write_domain = I915_GEM_DOMAIN_GTT;
+		if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) {
+			DBG(("%s: sync: GPU hang detected\n", __FUNCTION__));
+			kgem->wedged = 1;
+		}
+
+		kgem_retire(kgem);
+		gem_close(kgem->fd, rq->bo->handle);
+	} else {
+		list_add_tail(&rq->list, &kgem->requests);
+		kgem->need_retire = 1;
+	}
+
+	kgem->next_request = NULL;
 }
 
 static void kgem_close_list(struct kgem *kgem, struct list *head)
@@ -955,19 +971,33 @@ static int kgem_batch_write(struct kgem *kgem, uint32_t handle)
 
 void kgem_reset(struct kgem *kgem)
 {
-	struct kgem_request *rq = kgem->next_request;
-	struct kgem_bo *bo;
+	if (kgem->next_request) {
+		struct kgem_request *rq = kgem->next_request;
 
-	while (!list_is_empty(&rq->buffers)) {
-		bo = list_first_entry(&rq->buffers, struct kgem_bo, request);
+		while (!list_is_empty(&rq->buffers)) {
+			struct kgem_bo *bo =
+				list_first_entry(&rq->buffers,
+						 struct kgem_bo,
+						 request);
+
+			bo->binding.offset = 0;
+			bo->exec = NULL;
+			bo->dirty = false;
+			bo->cpu_read = false;
+			bo->cpu_write = false;
+			bo->rq = NULL;
 
-		bo->binding.offset = 0;
-		bo->exec = NULL;
-		bo->dirty = false;
-		bo->cpu_read = false;
-		bo->cpu_write = false;
+			list_del(&bo->request);
 
-		list_del(&bo->request);
+			if (!bo->refcnt) {
+				DBG(("%s: discarding handle=%d\n",
+				     __FUNCTION__, bo->handle));
+				kgem_bo_free(kgem, bo);
+			}
+		}
+
+		if (kgem->next_request != &_kgem_static_request)
+			free(kgem->next_request);
 	}
 
 	kgem->nfence = 0;
@@ -980,6 +1010,8 @@ void kgem_reset(struct kgem *kgem)
 	kgem->mode = KGEM_NONE;
 	kgem->flush = 0;
 
+	kgem->next_request = __kgem_request_alloc();
+
 	kgem_sna_reset(kgem);
 }
 
@@ -1127,7 +1159,6 @@ void _kgem_submit(struct kgem *kgem)
 
 			if (DEBUG_FLUSH_SYNC) {
 				struct drm_i915_gem_set_domain set_domain;
-				int ret;
 
 				VG_CLEAR(set_domain);
 				set_domain.handle = handle;
@@ -1141,9 +1172,9 @@ void _kgem_submit(struct kgem *kgem)
 				}
 			}
 		}
-	}
 
-	kgem_commit(kgem);
+		kgem_commit(kgem);
+	}
 	if (kgem->wedged)
 		kgem_cleanup(kgem);
 
commit 23fb2cebbe1d4d7df7403a64635339efa08a4dfe
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Dec 14 12:34:03 2011 +0000

    sna/blt: Add a missing allocation check upon the source CPU pixmap
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 0fec71c..a4af20d 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -1140,6 +1140,9 @@ prepare_blt_put(struct sna *sna,
 				       GXcopy))
 			return FALSE;
 	} else {
+		if (!sna_pixmap_move_to_cpu(src, false))
+			return FALSE;
+
 		op->blt   = blt_put_composite;
 		op->box   = blt_put_composite_box;
 		op->boxes = blt_put_composite_boxes;
commit 5b0f3ff9a83b7cc932e96400999dc5e4a49369cb
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Dec 14 12:23:04 2011 +0000

    sna/damage: Guard against malloc failures
    
    In the event of failure, we choose to loose track of the damage and
    choose rendering corruption over crashing.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index bc5740d..b194e93 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -159,11 +159,11 @@ static struct sna_damage *_sna_damage_create(void)
 	if (__freed_damage) {
 		damage = __freed_damage;
 		__freed_damage = NULL;
-	} else
+	} else {
 		damage = malloc(sizeof(*damage));
-	damage->n = 0;
-	damage->size = 16;
-	damage->elts = malloc(sizeof(*damage->elts) * damage->size);
+		if (damage == NULL)
+			return NULL;
+	}
 	list_init(&damage->boxes);
 	damage->last_box = NULL;
 	damage->mode = DAMAGE_ADD;
@@ -171,6 +171,14 @@ static struct sna_damage *_sna_damage_create(void)
 	damage->extents.x1 = damage->extents.y1 = MAXSHORT;
 	damage->extents.x2 = damage->extents.y2 = MINSHORT;
 
+	damage->n = 0;
+	damage->size = 16;
+	damage->elts = malloc(sizeof(*damage->elts) * damage->size);
+	if (damage->elts == NULL) {
+		__freed_damage = damage;
+		return NULL;
+	}
+
 	return damage;
 }
 
@@ -196,6 +204,9 @@ static BoxPtr _sna_damage_create_boxes(struct sna_damage *damage,
 	DBG(("    %s(%d->%d): new\n", __FUNCTION__, count, n));
 
 	box = malloc(sizeof(*box) + sizeof(BoxRec)*n);
+	if (box == NULL)
+		return NULL;
+
 	box->size = n;
 	box->remain = n - count;
 	list_add(&box->list, &damage->boxes);
@@ -249,8 +260,13 @@ _sna_damage_create_elt(struct sna_damage *damage,
 
 	elt = damage->elts + damage->n++;
 	elt->n = count;
-	elt->box = memcpy(_sna_damage_create_boxes(damage, count),
-			  boxes, count * sizeof(BoxRec));
+	elt->box = _sna_damage_create_boxes(damage, count);
+	if (elt->box == NULL){
+		damage->n--;
+		return;
+	}
+
+	 memcpy(elt->box, boxes, count * sizeof(BoxRec));
 }
 
 static void
@@ -308,6 +324,11 @@ _sna_damage_create_elt_from_boxes(struct sna_damage *damage,
 	elt = damage->elts + damage->n++;
 	elt->n = count;
 	elt->box = _sna_damage_create_boxes(damage, count);
+	if (elt->box == NULL) {
+		damage->n--;
+		return;
+	}
+
 	for (i = 0; i < count; i++) {
 		elt->box[i].x1 = boxes[i].x1 + dx;
 		elt->box[i].x2 = boxes[i].x2 + dx;
@@ -371,6 +392,11 @@ _sna_damage_create_elt_from_rectangles(struct sna_damage *damage,
 	elt = damage->elts + damage->n++;
 	elt->n = count;
 	elt->box = _sna_damage_create_boxes(damage, count);
+	if (elt->box == NULL) {
+		damage->n--;
+		return;
+	}
+
 	for (i = 0; i < count; i++) {
 		elt->box[i].x1 = r[i].x + dx;
 		elt->box[i].x2 = elt->box[i].x1 + r[i].width;
@@ -434,6 +460,11 @@ _sna_damage_create_elt_from_points(struct sna_damage *damage,
 	elt = damage->elts + damage->n++;
 	elt->n = count;
 	elt->box = _sna_damage_create_boxes(damage, count);
+	if (elt->box == NULL) {
+		damage->n--;
+		return;
+	}
+
 	for (i = 0; i < count; i++) {
 		elt->box[i].x1 = p[i].x + dx;
 		elt->box[i].x2 = elt->box[i].x1 + 1;
@@ -506,9 +537,11 @@ inline static struct sna_damage *__sna_damage_add(struct sna_damage *damage,
 	if (!RegionNotEmpty(region))
 		return damage;
 
-	if (!damage)
+	if (!damage) {
 		damage = _sna_damage_create();
-	else switch (damage->mode) {
+		if (damage == NULL)
+			return NULL;
+	} else switch (damage->mode) {
 	case DAMAGE_ALL:
 		return damage;
 	case DAMAGE_SUBTRACT:
@@ -579,9 +612,11 @@ __sna_damage_add_boxes(struct sna_damage *damage,
 
 	assert(n);
 
-	if (!damage)
+	if (!damage) {
 		damage = _sna_damage_create();
-	else switch (damage->mode) {
+		if (damage == NULL)
+			return NULL;
+	} else switch (damage->mode) {
 	case DAMAGE_ALL:
 		return damage;
 	case DAMAGE_SUBTRACT:
@@ -699,9 +734,11 @@ __sna_damage_add_rectangles(struct sna_damage *damage,
 	extents.y1 += dy;
 	extents.y2 += dy;
 
-	if (!damage)
+	if (!damage) {
 		damage = _sna_damage_create();
-	else switch (damage->mode) {
+		if (damage == NULL)
+			return NULL;
+	} else switch (damage->mode) {
 	case DAMAGE_ALL:
 		return damage;
 	case DAMAGE_SUBTRACT:
@@ -791,9 +828,11 @@ __sna_damage_add_points(struct sna_damage *damage,
 	extents.y1 += dy;
 	extents.y2 += dy + 1;
 
-	if (!damage)
+	if (!damage) {
 		damage = _sna_damage_create();
-	else switch (damage->mode) {
+		if (damage == NULL)
+			return NULL;
+	} else switch (damage->mode) {
 	case DAMAGE_ALL:
 		return damage;
 	case DAMAGE_SUBTRACT:
@@ -860,9 +899,11 @@ inline static struct sna_damage *__sna_damage_add_box(struct sna_damage *damage,
 	if (box->y2 <= box->y1 || box->x2 <= box->x1)
 		return damage;
 
-	if (!damage)
+	if (!damage) {
 		damage = _sna_damage_create();
-	else switch (damage->mode) {
+		if (damage == NULL)
+			return NULL;
+	} else switch (damage->mode) {
 	case DAMAGE_ALL:
 		return damage;
 	case DAMAGE_SUBTRACT:
@@ -936,8 +977,11 @@ struct sna_damage *_sna_damage_all(struct sna_damage *damage,
 		pixman_region_fini(&damage->region);
 		damage->n = 0;
 		damage->last_box = NULL;
-	} else
+	} else {
 		damage = _sna_damage_create();
+		if (damage == NULL)
+			return NULL;
+	}
 
 	pixman_region_init_rect(&damage->region, 0, 0, width, height);
 	damage->extents = damage->region.extents;


More information about the xorg-commit mailing list