xf86-video-intel: 5 commits - src/sna/kgem.c src/sna/kgem.h src/sna/sna_accel.c src/sna/sna_io.c
Chris Wilson
ickle at kemper.freedesktop.org
Mon Jun 18 15:55:32 PDT 2012
src/sna/kgem.c | 17 ++++++---
src/sna/kgem.h | 9 ++++
src/sna/sna_accel.c | 94 +++++++++++++++++++++++++++++++++++++++-------------
src/sna/sna_io.c | 9 ++--
4 files changed, 97 insertions(+), 32 deletions(-)
New commits:
commit 17f3a83fdc8c0ef5c12fb4be34d86021c0c865e5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Mon Jun 18 23:48:16 2012 +0100
sna: Review sna_copy_boxes
A couple of ordering issue and more assertions.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 73808f7..da50942 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3792,11 +3792,10 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
if (dst_priv == NULL)
goto fallback;
- if (src_priv == NULL &&
- !copy_use_gpu_bo(sna, dst_priv, ®ion, alu_overwrites(alu))) {
- DBG(("%s: fallback - unattached to source and not use dst gpu bo\n",
- __FUNCTION__));
- goto fallback;
+ if (dst_priv->gpu_bo && dst_priv->gpu_bo->proxy) {
+ DBG(("%s: discarding cached upload\n", __FUNCTION__));
+ kgem_bo_destroy(&sna->kgem, dst_priv->gpu_bo);
+ dst_priv->gpu_bo = NULL;
}
if (replaces) {
@@ -3807,10 +3806,11 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
dst_priv->clear = false;
}
- if (dst_priv->gpu_bo && dst_priv->gpu_bo->proxy) {
- DBG(("%s: discarding cached upload\n", __FUNCTION__));
- kgem_bo_destroy(&sna->kgem, dst_priv->gpu_bo);
- dst_priv->gpu_bo = NULL;
+ if (src_priv == NULL &&
+ !copy_use_gpu_bo(sna, dst_priv, ®ion, alu_overwrites(alu))) {
+ DBG(("%s: fallback - unattached to source and not use dst gpu bo\n",
+ __FUNCTION__));
+ goto fallback;
}
/* Try to maintain the data on the GPU */
@@ -4023,13 +4023,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
}
assert_pixmap_damage(dst_pixmap);
} else {
- if (src_priv) {
- RegionTranslate(®ion, src_dx, src_dy);
- if (!sna_drawable_move_region_to_cpu(&src_pixmap->drawable,
- ®ion, MOVE_READ))
- goto out;
- RegionTranslate(®ion, -src_dx, -src_dy);
- }
+ assert(!src_priv->gpu_bo);
if (!dst_priv->pinned && replaces) {
stride = src_pixmap->devKind;
@@ -4054,6 +4048,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
} else {
DBG(("%s: dst is on the GPU, src is on the CPU, uploading into dst\n",
__FUNCTION__));
+ assert(!DAMAGE_IS_ALL(dst_priv->cpu_damage));
if (!sna_write_boxes(sna, dst_pixmap,
dst_priv->gpu_bo, dst_dx, dst_dy,
src_pixmap->devPrivate.ptr,
@@ -4084,7 +4079,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
}
goto out;
- } else if (dst_priv->cpu_bo &&
+ } else if (use_cpu_bo_for_write(sna, dst_priv) &&
src_priv && DAMAGE_IS_ALL(src_priv->gpu_damage) && !src_priv->clear) {
assert(src_priv->gpu_bo != NULL); /* guaranteed by gpu_damage */
if (!sna->render.copy_boxes(sna, alu,
@@ -4188,7 +4183,7 @@ fallback:
dst_stride = dst_pixmap->devKind;
src_stride = src_pixmap->devKind;
- if (alu == GXcopy && !reverse && !upsidedown && bpp >= 8) {
+ if (alu == GXcopy && bpp >= 8) {
dst_bits = (FbBits *)
((char *)dst_pixmap->devPrivate.ptr +
dst_dy * dst_stride + dst_dx * bpp / 8);
commit a9045699b9cd66d0b0d96bfc964458c96845f97f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Mon Jun 18 23:47:21 2012 +0100
sna: Reset region after transferring to cpu
If we adjust the region for the pixmap offset, be sure that we reset it
before returning it back to the caller.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 2623e70..73808f7 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1467,8 +1467,11 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
if (!kgem_bo_is_busy(priv->gpu_bo)) {
pixmap->devPrivate.ptr =
kgem_bo_map(&sna->kgem, priv->gpu_bo);
- if (pixmap->devPrivate.ptr == NULL)
+ if (pixmap->devPrivate.ptr == NULL) {
+ if (dx | dy)
+ RegionTranslate(region, -dx, -dy);
return false;
+ }
priv->mapped = true;
pixmap->devKind = priv->gpu_bo->pitch;
@@ -1486,6 +1489,8 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
priv->clear = false;
assert_pixmap_damage(pixmap);
+ if (dx | dy)
+ RegionTranslate(region, -dx, -dy);
return true;
}
}
@@ -1495,8 +1500,11 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
kgem_retire(&sna->kgem);
if (sync_will_stall(priv->cpu_bo)) {
sna_damage_subtract(&priv->cpu_damage, region);
- if (!sna_pixmap_move_to_gpu(pixmap, MOVE_WRITE))
+ if (!sna_pixmap_move_to_gpu(pixmap, MOVE_WRITE)) {
+ if (dx | dy)
+ RegionTranslate(region, -dx, -dy);
return false;
+ }
sna_pixmap_free_cpu(sna, priv);
}
@@ -1508,8 +1516,11 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
sna_pixmap_create_mappable_gpu(pixmap)) {
pixmap->devPrivate.ptr =
kgem_bo_map(&sna->kgem, priv->gpu_bo);
- if (pixmap->devPrivate.ptr == NULL)
+ if (pixmap->devPrivate.ptr == NULL) {
+ if (dx | dy)
+ RegionTranslate(region, -dx, -dy);
return false;
+ }
priv->mapped = true;
pixmap->devKind = priv->gpu_bo->pitch;
@@ -1526,6 +1537,8 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
assert_pixmap_damage(pixmap);
priv->clear = false;
+ if (dx | dy)
+ RegionTranslate(region, -dx, -dy);
return true;
}
}
@@ -1558,6 +1571,8 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
}
assert_pixmap_damage(pixmap);
priv->clear = false;
+ if (dx | dy)
+ RegionTranslate(region, -dx, -dy);
return true;
}
@@ -1577,8 +1592,11 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
}
if (pixmap->devPrivate.ptr == NULL &&
- !sna_pixmap_alloc_cpu(sna, pixmap, priv, priv->gpu_damage != NULL))
+ !sna_pixmap_alloc_cpu(sna, pixmap, priv, priv->gpu_damage != NULL)) {
+ if (dx | dy)
+ RegionTranslate(region, -dx, -dy);
return false;
+ }
if (priv->gpu_bo == NULL) {
assert(priv->gpu_damage == NULL);
commit 9f51311a7d95bf4bc23926b8e6bf8ee52afd180c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Mon Jun 18 23:45:50 2012 +0100
sna: Check if the busy is truly busy before commiting to an indirect upload
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index b747dc7..40730c7 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -486,6 +486,15 @@ static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
return bo->rq;
}
+static inline bool __kgem_bo_is_busy(struct kgem *kgem, struct kgem_bo *bo)
+{
+ DBG_HDR(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
+ bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
+ if (bo->rq && !bo->exec)
+ kgem_retire(kgem);
+ return kgem_bo_is_busy(bo);
+}
+
static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
{
if (bo == NULL)
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 3841e52..01b8d2c 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -42,7 +42,7 @@
#define PITCH(x, y) ALIGN((x)*(y), 4)
-#define FORCE_INPLACE 0
+#define FORCE_INPLACE 0 /* 1 upload directly, -1 force indirect */
/* XXX Need to avoid using GTT fenced access for I915_TILING_Y on 855GM */
@@ -545,7 +545,7 @@ static bool upload_inplace(struct kgem *kgem,
* able to almagamate a series of small writes into a single
* operation.
*/
- if (kgem_bo_is_busy(bo)) {
+ if (__kgem_bo_is_busy(kgem, bo)) {
unsigned int bytes = 0;
while (n--) {
bytes += (box->x2 - box->x1) * (box->y2 - box->y1);
@@ -763,9 +763,8 @@ tile:
}
kgem_set_mode(kgem, KGEM_BLT);
- if (kgem->nexec + 2 > KGEM_EXEC_SIZE(kgem) ||
- kgem->nreloc + 2 > KGEM_RELOC_SIZE(kgem) ||
- !kgem_check_batch(kgem, 8) ||
+ if (!kgem_check_batch(kgem, 8) ||
+ !kgem_check_reloc_and_exec(kgem, 2) ||
!kgem_check_bo_fenced(kgem, dst_bo)) {
_kgem_submit(kgem);
_kgem_set_mode(kgem, KGEM_BLT);
commit 291b3c4367b455bfc5a772f8caaeee73f36d826a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Mon Jun 18 23:43:00 2012 +0100
sna: Align upload buffers to 128
This seems to be a restriction (observed on 965gm at least) that we
have incoherent sampler cache if we write within 128 bytes of a busy
buffer. This is either due to a restriction on neighbouring cachelines
(like the earlier BLT limitations) or an effect of sampler prefetch.
Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
References: https://bugs.freedesktop.org/show_bug.cgi?id=50477
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index aaddda4..016ff48 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -71,6 +71,13 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
#define DBG(x) ErrorF x
#endif
+/* Worst case seems to be 965gm where we cannot write within a cacheline that
+ * is being simultaneously being read by the GPU, or within the sampler
+ * prefetch. In general, the chipsets seem to have a requirement that sampler
+ * offsets be aligned to a cacheline (64 bytes).
+ */
+#define UPLOAD_ALIGNMENT 128
+
#define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE)
#define NUM_PAGES(x) (((x) + PAGE_SIZE-1) / PAGE_SIZE)
@@ -1134,7 +1141,7 @@ static void _kgem_bo_delete_partial(struct kgem *kgem, struct kgem_bo *bo)
DBG(("%s: size=%d, offset=%d, parent used=%d\n",
__FUNCTION__, bo->size.bytes, bo->delta, io->used));
- if (ALIGN(bo->delta + bo->size.bytes, 64) == io->used)
+ if (ALIGN(bo->delta + bo->size.bytes, UPLOAD_ALIGNMENT) == io->used)
io->used = bo->delta;
}
@@ -3619,9 +3626,9 @@ static struct kgem_partial_bo *partial_bo_alloc(int num_pages)
{
struct kgem_partial_bo *bo;
- bo = malloc(sizeof(*bo) + 128 + num_pages * PAGE_SIZE);
+ bo = malloc(sizeof(*bo) + 2*UPLOAD_ALIGNMENT + num_pages * PAGE_SIZE);
if (bo) {
- bo->mem = (void *)ALIGN((uintptr_t)bo + sizeof(*bo), 64);
+ bo->mem = (void *)ALIGN((uintptr_t)bo + sizeof(*bo), UPLOAD_ALIGNMENT);
bo->mmapped = false;
}
@@ -4005,7 +4012,7 @@ init:
__FUNCTION__, alloc, bo->base.handle));
done:
- bo->used = ALIGN(bo->used, 64);
+ bo->used = ALIGN(bo->used, UPLOAD_ALIGNMENT);
assert(bo->mem);
*ret = (char *)bo->mem + offset;
return kgem_create_proxy(kgem, &bo->base, offset, size);
@@ -4052,7 +4059,7 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
* that it can be allocated to other pixmaps.
*/
min = bo->delta + height * stride;
- min = ALIGN(min, 64);
+ min = ALIGN(min, UPLOAD_ALIGNMENT);
if (io->used != min) {
DBG(("%s: trimming partial buffer from %d to %d\n",
__FUNCTION__, io->used, min));
commit 39e5c7491535999643c1761bb1602ad757ab486c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Mon Jun 18 21:58:27 2012 +0100
sna: Assert damage is valid after every addition
Even more paranoia than just checking upon migration.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index c0c8ca4..2623e70 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2967,6 +2967,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
}
}
+ assert_pixmap_damage(pixmap);
priv->clear = false;
return true;
}
@@ -3006,6 +3007,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
sna_damage_add(&priv->gpu_damage, region);
}
+ assert_pixmap_damage(pixmap);
priv->clear = false;
return true;
}
@@ -3145,6 +3147,7 @@ blt:
box++;
} while (--n);
+ assert_pixmap_damage(pixmap);
return true;
}
@@ -3195,6 +3198,7 @@ sna_put_xybitmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
assert_pixmap_contains_box(pixmap, RegionExtents(region));
if (damage)
sna_damage_add(damage, region);
+ assert_pixmap_damage(pixmap);
DBG(("%s: upload(%d, %d, %d, %d)\n", __FUNCTION__, x, y, w, h));
@@ -3318,6 +3322,7 @@ sna_put_xypixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
assert_pixmap_contains_box(pixmap, RegionExtents(region));
if (damage)
sna_damage_add(damage, region);
+ assert_pixmap_damage(pixmap);
DBG(("%s: upload(%d, %d, %d, %d)\n", __FUNCTION__, x, y, w, h));
@@ -3605,6 +3610,7 @@ sna_self_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
if (!DAMAGE_IS_ALL(priv->gpu_damage))
sna_damage_add_boxes(&priv->gpu_damage, box, n, tx, ty);
+ assert_pixmap_damage(pixmap);
} else {
FbBits *dst_bits, *src_bits;
int stride, bpp;
@@ -3737,6 +3743,9 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
src_pixmap->drawable.width, src_pixmap->drawable.height,
dst_pixmap->drawable.width, dst_pixmap->drawable.height));
+ assert_pixmap_damage(dst_pixmap);
+ assert_pixmap_damage(src_pixmap);
+
pixman_region_init_rects(®ion, box, n);
bpp = dst_pixmap->drawable.bitsPerPixel;
@@ -3870,6 +3879,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
assert_pixmap_contains_box(dst_pixmap,
RegionExtents(®ion));
sna_damage_add(&dst_priv->gpu_damage, ®ion);
+ assert_pixmap_damage(dst_pixmap);
}
}
@@ -3905,6 +3915,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
sna_damage_add(&dst_priv->gpu_damage, ®ion);
RegionTranslate(®ion, -dst_dx, -dst_dy);
}
+ assert_pixmap_damage(dst_pixmap);
}
} else if (copy_use_cpu_bo(src_priv, dst_priv->gpu_bo)) {
if (!sna->render.copy_boxes(sna, alu,
@@ -3931,6 +3942,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
sna_damage_add(&dst_priv->gpu_damage, ®ion);
RegionTranslate(®ion, -dst_dx, -dst_dy);
}
+ assert_pixmap_damage(dst_pixmap);
}
} else if (alu != GXcopy) {
PixmapPtr tmp;
@@ -3991,6 +4003,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
sna_damage_add(&dst_priv->gpu_damage, ®ion);
RegionTranslate(®ion, -dst_dx, -dst_dy);
}
+ assert_pixmap_damage(dst_pixmap);
} else {
if (src_priv) {
RegionTranslate(®ion, src_dx, src_dy);
@@ -4047,6 +4060,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
®ion);
RegionTranslate(®ion, -dst_dx, -dst_dy);
}
+ assert_pixmap_damage(dst_pixmap);
}
}
}
@@ -4076,6 +4090,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
sna_damage_add(&dst_priv->cpu_damage, ®ion);
RegionTranslate(®ion, -dst_dx, -dst_dy);
}
+ assert_pixmap_damage(dst_pixmap);
if (dst_priv->flush)
list_move(&dst_priv->list, &sna->dirty_pixmaps);
@@ -5088,6 +5103,7 @@ damage_clipped:
done:
fill.done(sna, &fill);
+ assert_pixmap_damage(pixmap);
return TRUE;
}
@@ -5387,6 +5403,7 @@ sna_copy_bitmap_blt(DrawablePtr _bitmap, DrawablePtr drawable, GCPtr gc,
assert_pixmap_contains_boxes(pixmap, box, n, dx, dy);
if (arg->damage)
sna_damage_add_boxes(arg->damage, box, n, dx, dy);
+ assert_pixmap_damage(pixmap);
br00 = 3 << 20;
br13 = arg->bo->pitch;
@@ -5548,6 +5565,7 @@ sna_copy_plane_blt(DrawablePtr source, DrawablePtr drawable, GCPtr gc,
assert_pixmap_contains_boxes(dst_pixmap, box, n, dx, dy);
if (arg->damage)
sna_damage_add_boxes(arg->damage, box, n, dx, dy);
+ assert_pixmap_damage(dst_pixmap);
br00 = XY_MONO_SRC_COPY;
if (drawable->bitsPerPixel == 32)
@@ -5937,6 +5955,7 @@ sna_poly_point_blt(DrawablePtr drawable,
}
}
fill.done(sna, &fill);
+ assert_pixmap_damage(pixmap);
return TRUE;
}
@@ -6388,6 +6407,7 @@ Y2_continue:
done:
fill.done(sna, &fill);
+ assert_pixmap_damage(pixmap);
RegionUninit(&clip);
return true;
@@ -6641,6 +6661,7 @@ sna_poly_line_blt(DrawablePtr drawable,
sna_damage_add_boxes(damage, boxes, b - boxes, 0, 0);
}
fill.done(sna, &fill);
+ assert_pixmap_damage(pixmap);
return TRUE;
}
@@ -7041,6 +7062,7 @@ spans_fallback:
pixman_region_translate(&data.region, data.dx, data.dy);
assert_pixmap_contains_box(data.pixmap, &data.region.extents);
sna_damage_add(data.damage, &data.region);
+ assert_pixmap_damage(data.pixmap);
}
RegionUninit(&data.region);
return;
@@ -7257,6 +7279,7 @@ sna_poly_segment_blt(DrawablePtr drawable,
}
done:
fill.done(sna, &fill);
+ assert_pixmap_damage(pixmap);
return TRUE;
}
@@ -7553,6 +7576,7 @@ Y2_continue:
done:
fill.done(sna, &fill);
+ assert_pixmap_damage(pixmap);
RegionUninit(&clip);
return true;
@@ -7894,6 +7918,7 @@ spans_fallback:
assert_pixmap_contains_box(data.pixmap, &data.region.extents);
sna_damage_add(data.damage, &data.region);
}
+ assert_pixmap_damage(data.pixmap);
RegionUninit(&data.region);
return;
}
@@ -8435,6 +8460,7 @@ done:
sna_damage_add_boxes(damage, boxes, b-boxes, 0, 0);
}
fill.done(sna, &fill);
+ assert_pixmap_damage(pixmap);
return TRUE;
}
@@ -8669,6 +8695,7 @@ sna_poly_arc(DrawablePtr drawable, GCPtr gc, int n, xArc *arc)
assert_pixmap_contains_box(data.pixmap, &data.region.extents);
sna_damage_add(data.damage, &data.region);
}
+ assert_pixmap_damage(data.pixmap);
RegionUninit(&data.region);
return;
}
@@ -8755,6 +8782,7 @@ sna_poly_fill_rect_blt(DrawablePtr drawable,
} else
sna_damage_add_box(damage, &r);
}
+ assert_pixmap_damage(pixmap);
if ((gc->alu == GXcopy || gc->alu == GXclear) &&
r.x2 - r.x1 == pixmap->drawable.width &&
@@ -8897,6 +8925,7 @@ sna_poly_fill_rect_blt(DrawablePtr drawable,
}
done:
fill.done(sna, &fill);
+ assert_pixmap_damage(pixmap);
return TRUE;
}
@@ -9022,6 +9051,7 @@ sna_poly_fill_polygon(DrawablePtr draw, GCPtr gc,
assert_pixmap_contains_box(data.pixmap, &data.region.extents);
sna_damage_add(data.damage, &data.region);
}
+ assert_pixmap_damage(data.pixmap);
RegionUninit(&data.region);
return;
}
@@ -9337,6 +9367,7 @@ sna_poly_fill_rect_tiled_blt(DrawablePtr drawable,
}
done:
copy.done(sna, ©);
+ assert_pixmap_damage(pixmap);
kgem_bo_destroy(&sna->kgem, tile_bo);
return TRUE;
}
@@ -9528,6 +9559,7 @@ sna_poly_fill_rect_stippled_8x8_blt(DrawablePtr drawable,
}
}
+ assert_pixmap_damage(pixmap);
sna->blt_state.fill_bo = 0;
return true;
}
@@ -10248,6 +10280,7 @@ sna_poly_fill_rect_stippled_n_blt(DrawablePtr drawable,
}
}
+ assert_pixmap_damage(pixmap);
sna->blt_state.fill_bo = 0;
return true;
}
@@ -10616,6 +10649,7 @@ sna_poly_fill_arc(DrawablePtr draw, GCPtr gc, int n, xArc *arc)
assert_pixmap_contains_box(data.pixmap, &data.region.extents);
sna_damage_add(data.damage, &data.region);
}
+ assert_pixmap_damage(data.pixmap);
RegionUninit(&data.region);
return;
}
@@ -10888,6 +10922,7 @@ skip:
}
} while (1);
+ assert_pixmap_damage(pixmap);
sna->blt_state.fill_bo = 0;
return true;
}
@@ -11601,6 +11636,7 @@ skip:
}
} while (1);
+ assert_pixmap_damage(pixmap);
sna->blt_state.fill_bo = 0;
return true;
}
@@ -11803,6 +11839,7 @@ sna_push_pixels_solid_blt(GCPtr gc,
assert_pixmap_contains_box(pixmap, RegionExtents(region));
if (damage)
sna_damage_add(damage, region);
+ assert_pixmap_damage(pixmap);
DBG(("%s: upload(%d, %d, %d, %d)\n", __FUNCTION__,
region->extents.x1, region->extents.y1,
More information about the xorg-commit
mailing list