xf86-video-intel: 2 commits - configure.ac src/sna/kgem.c src/sna/kgem.h src/sna/sna_accel.c src/sna/sna.h src/sna/sna_render.c src/sna/sna_render_inline.h

Chris Wilson ickle at kemper.freedesktop.org
Mon Jan 30 07:52:19 PST 2012


 configure.ac                |    1 
 src/sna/kgem.c              |  115 ++++++++++++++++++++------------------------
 src/sna/kgem.h              |   10 ++-
 src/sna/sna.h               |    2 
 src/sna/sna_accel.c         |  111 +++++++++++++++++++-----------------------
 src/sna/sna_render.c        |    6 +-
 src/sna/sna_render_inline.h |    2 
 7 files changed, 118 insertions(+), 129 deletions(-)

New commits:
commit ed1c1a7468d78e99cb4f9a4a8b8a6b00c3257a75
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jan 30 13:43:59 2012 +0000

    sna: Track large objects and limit prefer-gpu hint to small objects
    
    As the GATT is irrespective of actual RAM size, we need to be careful
    not to be too generous when allocating GPU bo and their shadows. So
    first of all we limit default render targets to those small enough to
    fit comfortably in RAM alongside others, and secondly we try to only
    keep a single copy of large objects in memory.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index 63beb64..e953ae5 100644
--- a/configure.ac
+++ b/configure.ac
@@ -132,6 +132,7 @@ required_pixman_version=0.24
 if test "x$SNA" != "xno"; then
 	required_xorg_xserver_version=1.10
 	AC_DEFINE(USE_SNA, 1, [Enable SNA support])
+	AC_CHECK_HEADERS([sys/sysinfo.h])
 fi
 AC_MSG_RESULT([$SNA])
 
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 5ab5c83..ca7eafa 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -44,6 +44,10 @@
 #include <memcheck.h>
 #endif
 
+#if HAVE_SYS_SYSINFO_H
+#include <sys/sysinfo.h>
+#endif
+
 static struct kgem_bo *
 search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 
@@ -498,6 +502,18 @@ agp_aperture_size(struct pci_device *dev, int gen)
 }
 
 static size_t
+total_ram_size(void)
+{
+#if HAVE_SYS_SYSINFO_H
+	struct sysinfo info;
+	if (sysinfo(&info) == 0)
+		return info.totalram * info.mem_unit;
+#endif
+
+	return 0;
+}
+
+static size_t
 cpu_cache_size(void)
 {
 	FILE *file = fopen("/proc/cpuinfo", "r");
@@ -556,6 +572,7 @@ static bool semaphores_enabled(void)
 void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 {
 	struct drm_i915_gem_get_aperture aperture;
+	size_t totalram;
 	unsigned int i, j;
 
 	memset(kgem, 0, sizeof(*kgem));
@@ -679,6 +696,24 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	if (kgem->max_tile_size > kgem->max_gpu_size / 2)
 		kgem->max_tile_size = kgem->max_gpu_size / 2;
 
+	totalram = total_ram_size();
+	if (totalram == 0) {
+		DBG(("%s: total ram size unknown, assuming maximum of total aperture\n"));
+		totalram = kgem->aperture_total;
+	}
+	if (kgem->max_object_size > totalram / 2)
+		kgem->max_object_size = totalram / 2;
+	if (kgem->max_cpu_size > totalram / 2)
+		kgem->max_cpu_size = totalram / 2;
+	if (kgem->max_gpu_size > totalram / 4)
+		kgem->max_gpu_size = totalram / 4;
+
+	kgem->large_object_size = MAX_CACHE_SIZE;
+	if (kgem->large_object_size > kgem->max_gpu_size)
+		kgem->large_object_size = kgem->max_gpu_size;
+
+	DBG(("%s: large object thresold=%d\n",
+	     __FUNCTION__, kgem->large_object_size));
 	DBG(("%s: max object size (gpu=%d, cpu=%d, tile=%d)\n",
 	     __FUNCTION__,
 	     kgem->max_gpu_size,
@@ -2179,83 +2214,40 @@ done:
 	return tiling;
 }
 
-bool kgem_can_create_2d(struct kgem *kgem,
-			 int width, int height, int depth)
+unsigned kgem_can_create_2d(struct kgem *kgem,
+			    int width, int height, int depth)
 {
 	int bpp = BitsPerPixel(depth);
 	uint32_t pitch, size;
+	unsigned flags = 0;
 
 	if (depth < 8 || kgem->wedged)
-		return false;
-
-	size = kgem_surface_size(kgem, false, false,
-				 width, height, bpp,
-				 I915_TILING_X, &pitch);
-	if (size > 0 && size <= kgem->max_object_size)
-		return true;
-
-	size = kgem_surface_size(kgem, false, false,
-				 width, height, bpp,
-				 I915_TILING_NONE, &pitch);
-	if (size > 0 && size <= kgem->max_object_size)
-		return true;
-
-	return false;
-}
-
-bool kgem_can_create_cpu(struct kgem *kgem,
-			 int width, int height, int bpp)
-{
-	uint32_t pitch, size;
-
-	if (bpp < 8 || kgem->wedged)
-		return false;
+		return 0;
 
 	size = kgem_surface_size(kgem, false, false,
 				 width, height, bpp,
 				 I915_TILING_NONE, &pitch);
-	DBG(("%s? %d, cpu size %d, max %d\n",
-	     __FUNCTION__,
-	     size > 0 && size <= kgem->max_cpu_size,
-	     size, kgem->max_cpu_size));
-	return size > 0 && size <= kgem->max_cpu_size;
-}
-
-static bool _kgem_can_create_gpu(struct kgem *kgem,
-				 int width, int height, int bpp)
-{
-	uint32_t pitch, size;
-
-	if (bpp < 8 || kgem->wedged)
-		return false;
+	if (size > 0 && size <= kgem->max_cpu_size)
+		flags |= KGEM_CAN_CREATE_CPU | KGEM_CAN_CREATE_GPU;
+	if (size > kgem->large_object_size)
+		flags |= KGEM_CAN_CREATE_LARGE;
+	if (size > kgem->max_object_size)
+		return 0;
 
 	size = kgem_surface_size(kgem, false, false,
 				 width, height, bpp,
-				 kgem_choose_tiling(kgem,
-						    I915_TILING_X,
+				 kgem_choose_tiling(kgem, I915_TILING_X,
 						    width, height, bpp),
 				 &pitch);
-	DBG(("%s? %d, gpu size %d, max %d\n",
-	     __FUNCTION__,
-	     size > 0 && size < kgem->max_gpu_size,
-	     size, kgem->max_gpu_size));
-	return size > 0 && size < kgem->max_gpu_size;
-}
+	if (size > 0 && size <= kgem->max_gpu_size)
+		flags |= KGEM_CAN_CREATE_GPU;
+	if (size > kgem->large_object_size)
+		flags |= KGEM_CAN_CREATE_LARGE;
+	if (size > kgem->max_object_size)
+		return 0;
 
-#if DEBUG_KGEM
-bool kgem_can_create_gpu(struct kgem *kgem, int width, int height, int bpp)
-{
-	bool ret = _kgem_can_create_gpu(kgem, width, height, bpp);
-	DBG(("%s(%dx%d, bpp=%d) = %d\n", __FUNCTION__,
-	     width, height, bpp, ret));
-	return ret;
-}
-#else
-bool kgem_can_create_gpu(struct kgem *kgem, int width, int height, int bpp)
-{
-	return _kgem_can_create_gpu(kgem, width, height, bpp);
+	return flags;
 }
-#endif
 
 inline int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo)
 {
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index fea2d45..87dc386 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -161,7 +161,8 @@ struct kgem {
 	uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable;
 	uint32_t aperture, aperture_fenced;
 	uint32_t min_alignment;
-	uint32_t max_tile_size, max_gpu_size, max_cpu_size, max_object_size;
+	uint32_t max_tile_size, max_gpu_size, max_cpu_size;
+	uint32_t large_object_size, max_object_size;
 	uint32_t partial_buffer_size;
 
 	void (*context_switch)(struct kgem *kgem, int new_mode);
@@ -201,9 +202,10 @@ struct kgem_bo *kgem_upload_source_image(struct kgem *kgem,
 
 int kgem_choose_tiling(struct kgem *kgem,
 		       int tiling, int width, int height, int bpp);
-bool kgem_can_create_2d(struct kgem *kgem, int width, int height, int depth);
-bool kgem_can_create_gpu(struct kgem *kgem, int width, int height, int bpp);
-bool kgem_can_create_cpu(struct kgem *kgem, int width, int height, int bpp);
+unsigned kgem_can_create_2d(struct kgem *kgem, int width, int height, int depth);
+#define KGEM_CAN_CREATE_GPU	0x1
+#define KGEM_CAN_CREATE_CPU	0x2
+#define KGEM_CAN_CREATE_LARGE	0x4
 
 struct kgem_bo *
 kgem_replace_bo(struct kgem *kgem,
diff --git a/src/sna/sna.h b/src/sna/sna.h
index d9ba773..a0ea54f 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -145,7 +145,7 @@ struct sna_pixmap {
 	uint8_t flush :1;
 	uint8_t clear :1;
 	uint8_t undamaged :1;
-	uint8_t gpu :1;
+	uint8_t create :3;
 	uint8_t header :1;
 };
 
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 5d0e042..26fd1ab 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -246,11 +246,8 @@ sna_pixmap_alloc_cpu(struct sna *sna,
 	DBG(("%s: pixmap=%ld\n", __FUNCTION__, pixmap->drawable.serialNumber));
 	assert(priv->stride);
 
-	if ((sna->kgem.has_cpu_bo || !priv->gpu) &&
-	    kgem_can_create_cpu(&sna->kgem,
-				pixmap->drawable.width,
-				pixmap->drawable.height,
-				pixmap->drawable.bitsPerPixel)) {
+	if ((sna->kgem.has_cpu_bo || (priv->create & KGEM_CAN_CREATE_GPU) == 0) &&
+	    (priv->create & KGEM_CAN_CREATE_CPU)) {
 		DBG(("%s: allocating CPU buffer (%dx%d)\n", __FUNCTION__,
 		     pixmap->drawable.width, pixmap->drawable.height));
 
@@ -589,15 +586,7 @@ sna_pixmap_create_scratch(ScreenPtr screen,
 	DBG(("%s(%d, %d, %d, tiling=%d)\n", __FUNCTION__,
 	     width, height, depth, tiling));
 
-	if (depth < 8)
-		return create_pixmap(sna, screen, width, height, depth,
-				     CREATE_PIXMAP_USAGE_SCRATCH);
-
 	bpp = BitsPerPixel(depth);
-	if (!kgem_can_create_gpu(&sna->kgem, width, height, bpp))
-		return create_pixmap(sna, screen, width, height, depth,
-				     CREATE_PIXMAP_USAGE_SCRATCH);
-
 	if (tiling == I915_TILING_Y && !sna->have_render)
 		tiling = I915_TILING_X;
 
@@ -672,46 +661,47 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 {
 	struct sna *sna = to_sna_from_screen(screen);
 	PixmapPtr pixmap;
+	unsigned flags;
 	int pad;
 
 	DBG(("%s(%d, %d, %d, usage=%x)\n", __FUNCTION__,
 	     width, height, depth, usage));
 
-	if (!kgem_can_create_2d(&sna->kgem, width, height, depth)) {
+	if (!sna->have_render)
+		goto fallback;
+
+	flags = kgem_can_create_2d(&sna->kgem, width, height, depth);
+	if (flags == 0) {
 		DBG(("%s: can not use GPU, just creating shadow\n",
 		     __FUNCTION__));
-		return create_pixmap(sna, screen, width, height, depth, usage);
+		goto fallback;
 	}
 
-	if (!sna->have_render)
-		return create_pixmap(sna, screen,
-				     width, height, depth,
-				     usage);
-
 #if FAKE_CREATE_PIXMAP_USAGE_SCRATCH_HEADER
 	if (width == 0 || height == 0)
-		return create_pixmap(sna, screen, width, height, depth,
-				     CREATE_PIXMAP_USAGE_SCRATCH_HEADER);
+		goto fallback;
 #endif
 
-	if (usage == CREATE_PIXMAP_USAGE_SCRATCH)
-#if USE_BO_FOR_SCRATCH_PIXMAP
-		return sna_pixmap_create_scratch(screen,
-						 width, height, depth,
-						 I915_TILING_X);
-#else
-	return create_pixmap(sna, screen,
-			     width, height, depth,
-			     usage);
-#endif
+	if (usage == CREATE_PIXMAP_USAGE_SCRATCH) {
+		if (flags & KGEM_CAN_CREATE_GPU)
+			return sna_pixmap_create_scratch(screen,
+							 width, height, depth,
+							 I915_TILING_X);
+		else
+			goto fallback;
+	}
 
-	if (usage == SNA_CREATE_SCRATCH)
-		return sna_pixmap_create_scratch(screen,
-						 width, height, depth,
-						 I915_TILING_Y);
+	if (usage == SNA_CREATE_SCRATCH) {
+		if (flags & KGEM_CAN_CREATE_GPU)
+			return sna_pixmap_create_scratch(screen,
+							 width, height, depth,
+							 I915_TILING_Y);
+		else
+			goto fallback;
+	}
 
 	if (usage == CREATE_PIXMAP_USAGE_GLYPH_PICTURE)
-		return create_pixmap(sna, screen, width, height, depth, usage);
+		goto fallback;
 
 	pad = PixmapBytePad(width, depth);
 	if (pad * height <= 4096) {
@@ -741,17 +731,17 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 		priv = __sna_pixmap_attach(sna, pixmap);
 		if (priv == NULL) {
 			free(pixmap);
-			return create_pixmap(sna, screen,
-					     width, height, depth, usage);
+			goto fallback;
 		}
 
 		priv->stride = pad;
-		priv->gpu = kgem_can_create_gpu(&sna->kgem,
-						width, height,
-						pixmap->drawable.bitsPerPixel);
+		priv->create = flags;
 	}
 
 	return pixmap;
+
+fallback:
+	return create_pixmap(sna, screen, width, height, depth, usage);
 }
 
 static Bool sna_destroy_pixmap(PixmapPtr pixmap)
@@ -844,7 +834,8 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 		sna_damage_destroy(&priv->gpu_damage);
 		priv->clear = false;
 
-		if (priv->gpu && pixmap_inplace(sna, pixmap, priv)) {
+		if (priv->create & KGEM_CAN_CREATE_GPU &&
+		    pixmap_inplace(sna, pixmap, priv)) {
 			DBG(("%s: write inplace\n", __FUNCTION__));
 			if (priv->gpu_bo) {
 				if (kgem_bo_is_busy(priv->gpu_bo) &&
@@ -1004,7 +995,7 @@ skip_inplace_map:
 		priv->undamaged = true;
 	}
 
-	if (flags & MOVE_WRITE) {
+	if (flags & MOVE_WRITE || priv->create & KGEM_CAN_CREATE_LARGE) {
 		DBG(("%s: marking as damaged\n", __FUNCTION__));
 		sna_damage_all(&priv->cpu_damage,
 			       pixmap->drawable.width,
@@ -1179,7 +1170,9 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 	if (priv->clear)
 		return _sna_pixmap_move_to_cpu(pixmap, flags);
 
-	if (priv->gpu_bo == NULL && !priv->gpu && flags & MOVE_WRITE)
+	if (priv->gpu_bo == NULL &&
+	    (priv->create & KGEM_CAN_CREATE_GPU) == 0 &&
+	    flags & MOVE_WRITE)
 		return _sna_pixmap_move_to_cpu(pixmap, flags);
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
@@ -1692,7 +1685,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 	}
 
 done:
-	if (!priv->pinned && priv->gpu)
+	if (!priv->pinned && (priv->create & KGEM_CAN_CREATE_LARGE) == 0)
 		list_move(&priv->inactive, &sna->active_pixmaps);
 	priv->clear = false;
 	return true;
@@ -1733,7 +1726,7 @@ sna_drawable_use_bo(DrawablePtr drawable,
 		goto use_cpu_bo;
 
 	if (priv->gpu_bo == NULL) {
-		if (!priv->gpu) {
+		if ((priv->create & KGEM_CAN_CREATE_GPU) == 0) {
 			DBG(("%s: untiled, will not force allocation\n",
 			     __FUNCTION__));
 			goto use_cpu_bo;
@@ -1832,7 +1825,7 @@ done:
 
 use_gpu_bo:
 	priv->clear = false;
-	if (!priv->pinned && priv->gpu)
+	if (!priv->pinned && (priv->create & KGEM_CAN_CREATE_LARGE) == 0)
 		list_move(&priv->inactive,
 			  &to_sna_from_pixmap(pixmap)->active_pixmaps);
 	*damage = NULL;
@@ -1883,10 +1876,6 @@ sna_pixmap_create_upload(ScreenPtr screen,
 	DBG(("%s(%d, %d, %d)\n", __FUNCTION__, width, height, depth));
 	assert(width);
 	assert(height);
-	if (!sna->have_render ||
-	    !kgem_can_create_gpu(&sna->kgem, width, height, bpp))
-		return create_pixmap(sna, screen, width, height, depth,
-				     CREATE_PIXMAP_USAGE_SCRATCH);
 
 	if (sna->freed_pixmap) {
 		pixmap = sna->freed_pixmap;
@@ -2000,7 +1989,7 @@ sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags)
 		return NULL;
 
 	/* For large bo, try to keep only a single copy around */
-	if (!priv->gpu && priv->ptr) {
+	if (priv->create & KGEM_CAN_CREATE_LARGE && priv->ptr) {
 		sna_damage_all(&priv->gpu_damage,
 			       pixmap->drawable.width,
 			       pixmap->drawable.height);
@@ -2043,7 +2032,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 	sna_damage_reduce(&priv->cpu_damage);
 	DBG(("%s: CPU damage? %d\n", __FUNCTION__, priv->cpu_damage != NULL));
 	if (priv->gpu_bo == NULL) {
-		if (!wedged(sna) && priv->gpu)
+		if (!wedged(sna) && priv->create & KGEM_CAN_CREATE_GPU)
 			priv->gpu_bo =
 				kgem_create_2d(&sna->kgem,
 					       pixmap->drawable.width,
@@ -2128,10 +2117,13 @@ done:
 	sna_damage_reduce_all(&priv->gpu_damage,
 			      pixmap->drawable.width,
 			      pixmap->drawable.height);
-	if (DAMAGE_IS_ALL(priv->gpu_damage))
+	if (DAMAGE_IS_ALL(priv->gpu_damage)) {
 		priv->undamaged = false;
+		if (priv->ptr)
+			sna_pixmap_free_cpu(sna, priv);
+	}
 active:
-	if (!priv->pinned && priv->gpu)
+	if (!priv->pinned && (priv->create & KGEM_CAN_CREATE_LARGE) == 0)
 		list_move(&priv->inactive, &sna->active_pixmaps);
 	priv->clear = false;
 	return priv;
@@ -2984,7 +2976,7 @@ move_to_gpu(PixmapPtr pixmap, struct sna_pixmap *priv,
 	if (priv->gpu_bo)
 		return TRUE;
 
-	if (!priv->gpu)
+	if ((priv->create & KGEM_CAN_CREATE_GPU) == 0)
 		return FALSE;
 
 	if (priv->cpu_bo) {
@@ -3241,7 +3233,8 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			}
 		} else {
 			dst_priv->clear = false;
-			if (!dst_priv->pinned && dst_priv->gpu)
+			if (!dst_priv->pinned &&
+			    (dst_priv->create & KGEM_CAN_CREATE_LARGE) == 0)
 				list_move(&dst_priv->inactive,
 					  &sna->active_pixmaps);
 		}
@@ -11557,7 +11550,7 @@ static void sna_accel_inactive(struct sna *sna)
 		priv = list_first_entry(&sna->inactive_clock[1],
 					struct sna_pixmap,
 					inactive);
-		assert(priv->gpu);
+		assert((priv->create & KGEM_CAN_CREATE_LARGE) == 0);
 		assert(priv->gpu_bo);
 
 		/* XXX Rather than discarding the GPU buffer here, we
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 43e8642..fc6e6df 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -332,7 +332,7 @@ use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box)
 		int w = box->x2 - box->x1;
 		int h = box->y2 - box->y1;
 
-		if (!priv->gpu)
+		if ((priv->create & KGEM_CAN_CREATE_GPU) == 0)
 			goto done;
 
 		if (priv->source_count*w*h >= pixmap->drawable.width * pixmap->drawable.height &&
@@ -380,7 +380,7 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box)
 			return false;
 
 		upload = true;
-		if (!priv->gpu ||
+		if ((priv->create & KGEM_CAN_CREATE_GPU) == 0 ||
 		    kgem_choose_tiling(&to_sna_from_pixmap(pixmap)->kgem,
 				       I915_TILING_X,
 				       pixmap->drawable.width,
@@ -405,7 +405,7 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box)
 		return FALSE;
 
 	count = priv->source_count++;
-	if (!priv->gpu ||
+	if ((priv->create & KGEM_CAN_CREATE_GPU) == 0 ||
 	    kgem_choose_tiling(&to_sna_from_pixmap(pixmap)->kgem,
 			       I915_TILING_X,
 			       pixmap->drawable.width,
diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index 489f215..2805a01 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -102,7 +102,7 @@ too_small(DrawablePtr drawable)
 	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
 		return false;
 
-	return !priv->gpu;
+	return (priv->create & KGEM_CAN_CREATE_GPU) == 0;
 }
 
 static inline Bool
commit d53d93ffa6e133f46c39595294ecf8e2182b5a68
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jan 30 15:11:34 2012 +0000

    sna: Update the partial buffer allocation size when reusing an old mapping
    
    Whilst the old mapping is guaranteed to be larger than the requested
    allocation size, keep track of the actual size allows for better packing
    of future buffers. And the code also performs a sanity check that the
    buffer is the size we claim it to be...
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index d97a6ac..5ab5c83 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3365,6 +3365,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		if (old) {
 			DBG(("%s: reusing cpu map handle=%d for buffer\n",
 			     __FUNCTION__, old->handle));
+			alloc = num_pages(old);
 
 			memcpy(&bo->base, old, sizeof(*old));
 			if (old->rq)


More information about the xorg-commit mailing list