xf86-video-intel: 2 commits - src/sna/kgem.c
Chris Wilson
ickle at kemper.freedesktop.org
Tue Oct 29 18:24:49 CET 2013
src/sna/kgem.c | 45 +++++++++++++++++++++++++++++++++------------
1 file changed, 33 insertions(+), 12 deletions(-)
New commits:
commit 9f36e83d76eb6ef26539fc37ce9ebd08428c48a3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Tue Oct 29 16:59:57 2013 +0000
sna: Handle deferred retiring of active-buffers
Yikes, the new assertions found cases where we would retain an active
buffer even though it was still owned by the next batch. Fortunately,
this should only be a bookkeeping issue rather than lead to corruption.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 99a4cf9..772ddbd 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2095,6 +2095,10 @@ static bool kgem_retire__buffers(struct kgem *kgem)
struct kgem_buffer,
base.list);
+ DBG(("%s: handle=%d, busy? %d [%d]\n",
+ __FUNCTION__, bo->base.handle, bo->base.rq != NULL, bo->base.exec != NULL));
+
+ assert(bo->base.exec == NULL || RQ(bo->base.rq) == kgem->next_request);
if (bo->base.rq)
break;
@@ -2317,6 +2321,18 @@ bool __kgem_ring_is_idle(struct kgem *kgem, int ring)
return true;
}
+#ifndef NDEBUG
+static void kgem_commit__check_buffers(struct kgem *kgem)
+{
+ struct kgem_buffer *bo;
+
+ list_for_each_entry(bo, &kgem->active_buffers, base.list)
+ assert(bo->base.exec == NULL);
+}
+#else
+#define kgem_commit__check_buffers(kgem)
+#endif
+
static void kgem_commit(struct kgem *kgem)
{
struct kgem_request *rq = kgem->next_request;
@@ -2350,7 +2366,6 @@ static void kgem_commit(struct kgem *kgem)
if (bo->proxy) {
/* proxies are not used for domain tracking */
- bo->exec = NULL;
__kgem_bo_clear_busy(bo);
}
@@ -2384,6 +2399,8 @@ static void kgem_commit(struct kgem *kgem)
}
kgem->next_request = NULL;
+
+ kgem_commit__check_buffers(kgem);
}
static void kgem_close_list(struct kgem *kgem, struct list *head)
@@ -2413,7 +2430,7 @@ static void kgem_finish_buffers(struct kgem *kgem)
assert(bo->base.io);
assert(bo->base.refcnt >= 1);
- if (!bo->base.exec) {
+ if (bo->base.refcnt > 1 && !bo->base.exec) {
DBG(("%s: skipping unattached handle=%d, used=%d\n",
__FUNCTION__, bo->base.handle, bo->used));
continue;
@@ -2438,8 +2455,10 @@ static void kgem_finish_buffers(struct kgem *kgem)
assert(bo->base.rq);
assert(used >= bo->used);
bo->used = used;
- list_move(&bo->base.list,
- &kgem->active_buffers);
+ if (bo->base.refcnt == 1) {
+ list_move(&bo->base.list,
+ &kgem->active_buffers);
+ }
kgem->need_retire = true;
continue;
}
@@ -2850,7 +2869,7 @@ void _kgem_submit(struct kgem *kgem)
batch_end = kgem_end_batch(kgem);
kgem_sna_flush(kgem);
- DBG(("batch[%d/%d, flags=%x]: %d %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d [fenced=%]\n",
+ DBG(("batch[%d/%d, flags=%x]: %d %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d [fenced=%d]\n",
kgem->mode, kgem->ring, kgem->batch_flags,
batch_end, kgem->nbatch, kgem->surface, kgem->batch_size,
kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture, kgem->aperture_fenced));
@@ -2998,9 +3017,8 @@ void _kgem_submit(struct kgem *kgem)
#endif
}
}
-
- kgem_commit(kgem);
}
+ kgem_commit(kgem);
if (kgem->wedged)
kgem_cleanup(kgem);
@@ -5786,9 +5804,9 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
list_for_each_entry(bo, &kgem->active_buffers, base.list) {
assert(bo->base.io);
assert(bo->base.refcnt >= 1);
+ assert(bo->base.exec == NULL);
assert(bo->mmapped);
assert(bo->mmapped == MMAPPED_GTT || kgem->has_llc || bo->base.snoop);
- assert(bo->base.rq);
if ((bo->write & ~flags) & KGEM_BUFFER_INPLACE && !bo->base.snoop) {
DBG(("%s: skip write %x buffer, need %x\n",
@@ -5806,10 +5824,12 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
}
if (size <= bytes(&bo->base) &&
- !__kgem_busy(kgem, bo->base.handle)) {
+ (bo->base.rq == NULL ||
+ !__kgem_busy(kgem, bo->base.handle))) {
DBG(("%s: reusing whole buffer? size=%d, total=%d\n",
__FUNCTION__, size, bytes(&bo->base)));
__kgem_bo_clear_busy(&bo->base);
+ kgem_buffer_release(kgem, bo);
switch (bo->mmapped) {
case MMAPPED_CPU:
commit d26036d40e77e8f40c8d08659f95a8a5dfac07ed
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Tue Oct 29 16:20:14 2013 +0000
sna: Always run the retire_buffers during ring_idle
As we know that we had a request to retire, we know that we may make
progress retiring active buffers.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 875dc53..99a4cf9 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2178,7 +2178,7 @@ static bool __kgem_retire_rq(struct kgem *kgem, struct kgem_request *rq)
DBG(("%s: moving %d to flushing\n",
__FUNCTION__, bo->handle));
list_add(&bo->request, &kgem->flushing);
- bo->rq = (void *)kgem;
+ bo->rq = MAKE_REQUEST(kgem, RQ_RING(bo->rq));
continue;
}
@@ -2310,8 +2310,8 @@ bool __kgem_ring_is_idle(struct kgem *kgem, int ring)
DBG(("%s: ring=%d idle (handle=%d)\n",
__FUNCTION__, ring, rq->bo->handle));
- if (kgem_retire__requests_ring(kgem, ring))
- kgem_retire__buffers(kgem);
+ kgem_retire__requests_ring(kgem, ring);
+ kgem_retire__buffers(kgem);
assert(list_is_empty(&kgem->requests[ring]));
return true;
@@ -2435,6 +2435,7 @@ static void kgem_finish_buffers(struct kgem *kgem)
(kgem->has_llc || bo->mmapped == MMAPPED_GTT || bo->base.snoop)) {
DBG(("%s: retaining upload buffer (%d/%d)\n",
__FUNCTION__, bo->used, bytes(&bo->base)));
+ assert(bo->base.rq);
assert(used >= bo->used);
bo->used = used;
list_move(&bo->base.list,
More information about the xorg-commit
mailing list