xf86-video-intel: 5 commits - src/sna/kgem.c src/sna/sna_accel.c
Chris Wilson
ickle at kemper.freedesktop.org
Thu Jan 12 04:38:16 PST 2012
src/sna/kgem.c | 90 +++++++++++++++++++++-------------------------------
src/sna/sna_accel.c | 18 ++++++----
2 files changed, 50 insertions(+), 58 deletions(-)
New commits:
commit 978e1aeceab3c1a524f7d7a070fe04f37530c8d9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Thu Jan 12 11:38:56 2012 +0000
sna: Only shrink a partial buffer if it is no longer used.
The condition on being able to shrink a buffer is more severe than just
whether we are reading from the buffer, but also we cannot swap the
handles if the existing handle remains exposed via a proxy.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index a9ccb41..c7ce777 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -112,7 +112,6 @@ struct kgem_partial_bo {
uint32_t used;
uint32_t need_io : 1;
uint32_t write : 1;
- uint32_t shrink : 1;
uint32_t mmapped : 1;
};
@@ -294,6 +293,8 @@ static void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo)
kgem_retire(kgem);
if (bo->exec == NULL) {
+ DBG(("%s: retiring bo handle=%d (needed flush? %d)\n",
+ __FUNCTION__, bo->handle, bo->needs_flush));
bo->rq = NULL;
list_del(&bo->request);
bo->needs_flush = bo->flush;
@@ -1079,8 +1080,9 @@ static void kgem_commit(struct kgem *kgem)
struct kgem_bo *bo, *next;
list_for_each_entry_safe(bo, next, &rq->buffers, request) {
- DBG(("%s: release handle=%d (proxy? %d)\n",
- __FUNCTION__, bo->handle, bo->proxy != NULL));
+ DBG(("%s: release handle=%d (proxy? %d), dirty? %d flush? %d\n",
+ __FUNCTION__, bo->handle, bo->proxy != NULL,
+ bo->dirty, bo->needs_flush));
assert(!bo->purged);
assert(bo->proxy || bo->rq == rq);
@@ -1193,7 +1195,7 @@ static void kgem_finish_partials(struct kgem *kgem)
}
assert(bo->base.rq == kgem->next_request);
- if (bo->shrink && bo->used < bo->base.size / 2) {
+ if (bo->base.refcnt == 1 && bo->used < bo->base.size / 2) {
struct kgem_bo *shrink;
shrink = search_linear_cache(kgem,
@@ -1227,9 +1229,12 @@ static void kgem_finish_partials(struct kgem *kgem)
list_replace(&bo->base.request,
&shrink->request);
list_init(&bo->base.request);
+ shrink->needs_flush = bo->base.dirty;
bo->base.exec = NULL;
bo->base.rq = NULL;
+ bo->base.dirty = false;
+ bo->base.needs_flush = false;
bo->used = 0;
bubble_sort_partial(kgem, bo);
@@ -2497,8 +2502,11 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
kgem->reloc[index].target_handle = bo->handle;
kgem->reloc[index].presumed_offset = bo->presumed_offset;
- if (read_write_domain & 0x7fff)
+ if (read_write_domain & 0x7fff) {
+ DBG(("%s: marking handle=%d dirty\n",
+ __FUNCTION__, bo->handle));
bo->needs_flush = bo->dirty = true;
+ }
delta += bo->presumed_offset;
} else {
@@ -2898,13 +2906,11 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
DBG(("%s: reusing write buffer for read of %d bytes? used=%d, total=%d\n",
__FUNCTION__, size, bo->used, bo->base.size));
offset = 0;
- bo->shrink = 0;
goto done;
} else if (bo->used + size <= bo->base.size) {
DBG(("%s: reusing unfinished write buffer for read of %d bytes? used=%d, total=%d\n",
__FUNCTION__, size, bo->used, bo->base.size));
offset = bo->used;
- bo->shrink = 0;
goto done;
}
}
@@ -3046,7 +3052,6 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
bo->used = size;
bo->write = write;
- bo->shrink = bo->need_io;
offset = 0;
list_add(&bo->base.list, &kgem->partial);
commit d3169154d18600e0d41db5f833fad52970e17b55
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Thu Jan 12 11:36:05 2012 +0000
sna: Improve a DBG message
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 7c62059..3576176 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -883,7 +883,7 @@ region_subsumes_damage(const RegionRec *region, struct sna_damage *damage)
re = ®ion->extents;
de = &DAMAGE_PTR(damage)->extents;
- DBG(("%s: region (%d, %d), (%d, %d), extents (%d, %d), (%d, %d)\n",
+ DBG(("%s: region (%d, %d), (%d, %d), damage (%d, %d), (%d, %d)\n",
__FUNCTION__,
re->x1, re->y1, re->x2, re->y2,
de->x1, de->y1, de->x2, de->y2));
commit 2a22990968aebd53a7bfeaf19e58ee609b980e5f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Thu Jan 12 11:22:06 2012 +0000
sna: Prevent 60Hz wakeups if the client stops in mid-render
Only continue to wake up if the scanout remains active.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index b3bea6a..7c62059 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -9757,20 +9757,26 @@ static void _sna_accel_disarm_timer(struct sna *sna, int id) { }
static bool sna_accel_flush(struct sna *sna)
{
struct sna_pixmap *priv = sna_accel_scanout(sna);
- bool nothing_to_do = priv->cpu_damage == NULL && sna->kgem.nbatch == 0;
bool need_throttle = priv->gpu_bo->rq;
+ bool busy = priv->cpu_damage || need_throttle;
- DBG(("%s (time=%ld), nothing_to_do=%d, busy? %d\n",
+ DBG(("%s (time=%ld), cpu damage? %p, exec? %d nbatch=%d, busy? %d, need_throttle=%d\n",
__FUNCTION__, (long)GetTimeInMillis(),
- nothing_to_do, sna->kgem.busy));
+ priv->cpu_damage,
+ priv->gpu_bo->exec != NULL,
+ sna->kgem.nbatch,
+ sna->kgem.busy, need_throttle));
- if (nothing_to_do && !sna->kgem.busy)
+ if (!sna->kgem.busy && !busy)
_sna_accel_disarm_timer(sna, FLUSH_TIMER);
+ sna->kgem.busy = busy;
+
if (priv->cpu_damage)
sna_pixmap_move_to_gpu(priv->pixmap, MOVE_READ);
- sna->kgem.busy = !nothing_to_do;
+
kgem_bo_flush(&sna->kgem, priv->gpu_bo);
sna->kgem.flush_now = 0;
+
return need_throttle;
}
commit 1c0e9916caef6594511009c8cb79071ac0d82090
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Thu Jan 12 11:12:56 2012 +0000
sna: Align the partial buffer contents to cachelines
To enable Daniel's faster pwrite paths. Only one step removed from using
whole page alignment...
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 8fe6118..a9ccb41 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -113,6 +113,7 @@ struct kgem_partial_bo {
uint32_t need_io : 1;
uint32_t write : 1;
uint32_t shrink : 1;
+ uint32_t mmapped : 1;
};
static struct kgem_bo *__kgem_freed_bo;
@@ -306,6 +307,7 @@ Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
assert(!bo->purged);
assert(!kgem_busy(kgem, bo->handle));
+ assert(length <= bo->size);
if (gem_write(kgem->fd, bo->handle, 0, length, data))
return FALSE;
@@ -835,11 +837,6 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
free(bo);
}
-static bool is_mmaped_buffer(struct kgem_partial_bo *bo)
-{
- return bo->mem != bo+1;
-}
-
inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
struct kgem_bo *bo)
{
@@ -1244,6 +1241,7 @@ static void kgem_finish_partials(struct kgem *kgem)
DBG(("%s: handle=%d, uploading %d/%d\n",
__FUNCTION__, bo->base.handle, bo->used, bo->base.size));
assert(!kgem_busy(kgem, bo->base.handle));
+ assert(bo->used <= bo->base.size);
gem_write(kgem->fd, bo->base.handle,
0, bo->used, bo->mem);
bo->need_io = 0;
@@ -2866,6 +2864,19 @@ struct kgem_bo *kgem_create_proxy(struct kgem_bo *target,
return bo;
}
+static struct kgem_partial_bo *partial_bo_alloc(int size)
+{
+ struct kgem_partial_bo *bo;
+
+ bo = malloc(sizeof(*bo) + 128 + size);
+ if (bo) {
+ bo->mem = (void *)ALIGN((uintptr_t)bo + sizeof(*bo), 64);
+ bo->mmapped = false;
+ }
+
+ return bo;
+}
+
struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
uint32_t size, uint32_t flags,
void **ret)
@@ -2972,19 +2983,19 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
bo->need_io = false;
bo->base.io = true;
+ bo->mmapped = true;
alloc = bo->base.size;
} else if (HAVE_VMAP && kgem->has_vmap) {
- bo = malloc(sizeof(*bo) + alloc);
+ bo = partial_bo_alloc(alloc);
if (bo == NULL)
return NULL;
- handle = gem_vmap(kgem->fd, bo+1, alloc, write);
+ handle = gem_vmap(kgem->fd, bo->mem, alloc, write);
if (handle) {
__kgem_bo_init(&bo->base, handle, alloc);
bo->base.vmap = true;
bo->need_io = false;
- bo->mem = bo + 1;
} else {
free(bo);
return NULL;
@@ -2999,13 +3010,10 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
old = search_linear_cache(kgem, alloc, CREATE_INACTIVE);
if (old) {
alloc = old->size;
- bo = malloc(sizeof(*bo) + alloc);
+ bo = partial_bo_alloc(alloc);
if (bo == NULL)
return NULL;
- bo->mem = bo + 1;
- bo->need_io = write;
-
memcpy(&bo->base, old, sizeof(*old));
if (old->rq)
list_replace(&old->request,
@@ -3017,7 +3025,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
free(old);
bo->base.refcnt = 1;
} else {
- bo = malloc(sizeof(*bo) + alloc);
+ bo = partial_bo_alloc(alloc);
if (bo == NULL)
return NULL;
@@ -3027,9 +3035,8 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
free(bo);
return NULL;
}
- bo->mem = bo + 1;
- bo->need_io = write;
}
+ bo->need_io = write;
bo->base.io = true;
}
bo->base.reusable = false;
@@ -3203,9 +3210,9 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
DBG(("%s(offset=%d, length=%d, vmap=%d)\n", __FUNCTION__,
offset, length, bo->base.vmap));
- if (!bo->base.vmap && !is_mmaped_buffer(bo)) {
+ if (!bo->base.vmap && !bo->mmapped) {
gem_read(kgem->fd,
- bo->base.handle, (char *)(bo+1)+offset,
+ bo->base.handle, (char *)bo->mem+offset,
offset, length);
kgem_bo_retire(kgem, &bo->base);
bo->base.domain = DOMAIN_NONE;
commit 1e4080318f58fd51f37792eefd367e2e4c813ea3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Thu Jan 12 10:47:19 2012 +0000
sna: Replace the open-coded bubble sort of the partial list
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 37f28c8..8fe6118 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3091,7 +3091,6 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
if (height & 1) {
struct kgem_partial_bo *io = (struct kgem_partial_bo *)bo->proxy;
- int remain;
/* Having padded this surface to ensure that accesses to
* the last pair of rows is valid, remove the padding so
@@ -3099,32 +3098,7 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
*/
io->used -= stride;
bo->size -= stride;
-
- /* And bubble-sort the partial back into place */
- remain = io->base.size - io->used;
- while (io->base.list.prev != &kgem->partial) {
- struct kgem_partial_bo *p;
-
- p = list_entry(io->base.list.prev,
- struct kgem_partial_bo,
- base.list);
- if (remain <= p->base.size - p->used)
- break;
-
- assert(p->base.list.next == &io->base.list);
- io->base.list.prev = p->base.list.prev;
- p->base.list.prev->next = &io->base.list;
- p->base.list.prev = &io->base.list;
-
- p->base.list.next = io->base.list.next;
- io->base.list.next->prev = &p->base.list;
- io->base.list.next = &p->base.list;
-
- assert(p->base.list.next->prev == &p->base.list);
- assert(io->base.list.prev->next == &io->base.list);
- }
-
- assert(validate_partials(kgem));
+ bubble_sort_partial(kgem, io);
}
bo->pitch = stride;
More information about the xorg-commit
mailing list