[PATCH 15/27] drm/radeon: add general purpose fence signaled callback
j.glisse at gmail.com
j.glisse at gmail.com
Tue May 1 10:19:25 PDT 2012
From: Christian König <deathsimple at vodafone.de>
Should be used to free resource that are protected by a fence.
Signed-off-by: Christian König <deathsimple at vodafone.de>
Reviewed-by: Jerome Glisse <jglisse at redhat.com>
---
drivers/gpu/drm/radeon/radeon.h | 8 ++++-
drivers/gpu/drm/radeon/radeon_fence.c | 50 +++++++++++++++++++++++++++++---
2 files changed, 52 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index fbbbc5a..11cceb3 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -259,7 +259,6 @@ struct radeon_fence_driver {
wait_queue_head_t queue;
struct list_head created;
struct list_head emitted;
- struct list_head signaled;
bool initialized;
};
@@ -274,6 +273,10 @@ struct radeon_fence {
/* RB, DMA, etc. */
int ring;
struct radeon_semaphore *semaphore;
+
+ /* called when fence is signaled */
+ void (*signal_callback)(struct radeon_device *rdev, void *data);
+ void *callback_data;
};
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
@@ -289,6 +292,9 @@ int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
void radeon_fence_unref(struct radeon_fence **fence);
int radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
+bool radeon_fence_set_signal_callback(struct radeon_fence *fence,
+ void (*callback)(struct radeon_device *, void *),
+ void *data);
/*
* Tiling registers
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 2d13843..c58660a 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -83,7 +83,8 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
return 0;
}
-static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
+static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring,
+ struct list_head *signaled)
{
struct radeon_fence *fence;
struct list_head *i, *n;
@@ -110,7 +111,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
i = n;
do {
n = i->prev;
- list_move_tail(i, &rdev->fence_drv[ring].signaled);
+ list_move_tail(i, signaled);
fence = list_entry(i, struct radeon_fence, list);
fence->signaled = true;
i = n;
@@ -120,6 +121,20 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
return wake;
}
+static void radeon_fence_process_signaled(struct radeon_device *rdev, struct list_head *signaled)
+{
+ struct radeon_fence *fence;
+ struct list_head *i, *n;
+
+ list_for_each_safe(i, n, signaled) {
+ fence = list_entry(i, struct radeon_fence, list);
+ list_del_init(&fence->list);
+ if (fence->signal_callback) {
+ fence->signal_callback(rdev, fence->callback_data);
+ }
+ }
+}
+
static void radeon_fence_destroy(struct kref *kref)
{
unsigned long irq_flags;
@@ -152,6 +167,8 @@ int radeon_fence_create(struct radeon_device *rdev,
(*fence)->seq = 0;
(*fence)->ring = ring;
(*fence)->semaphore = NULL;
+ (*fence)->signal_callback = NULL;
+ (*fence)->callback_data = NULL;
INIT_LIST_HEAD(&(*fence)->list);
write_lock_irqsave(&rdev->fence_lock, irq_flags);
@@ -164,6 +181,7 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
{
unsigned long irq_flags;
bool signaled = false;
+ LIST_HEAD(siglist);
if (!fence)
return true;
@@ -179,10 +197,12 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
signaled = true;
}
if (!signaled) {
- radeon_fence_poll_locked(fence->rdev, fence->ring);
+ radeon_fence_poll_locked(fence->rdev, fence->ring, &siglist);
signaled = fence->signaled;
}
write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
+ radeon_fence_process_signaled(fence->rdev, &siglist);
+
return signaled;
}
@@ -341,10 +361,12 @@ void radeon_fence_process(struct radeon_device *rdev, int ring)
{
unsigned long irq_flags;
bool wake;
+ LIST_HEAD(signaled);
write_lock_irqsave(&rdev->fence_lock, irq_flags);
- wake = radeon_fence_poll_locked(rdev, ring);
+ wake = radeon_fence_poll_locked(rdev, ring, &signaled);
write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ radeon_fence_process_signaled(rdev, &signaled);
if (wake) {
wake_up_all(&rdev->fence_drv[ring].queue);
}
@@ -373,6 +395,25 @@ int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
return not_processed;
}
+bool radeon_fence_set_signal_callback(struct radeon_fence *fence,
+ void (*callback)(struct radeon_device *, void *),
+ void *data)
+{
+ struct radeon_device *rdev = fence->rdev;
+ unsigned long irq_flags;
+ bool isset = false;
+
+ /* a readlock is suficient, cause this should be called only once */
+ read_lock_irqsave(&rdev->fence_lock, irq_flags);
+ if (fence->emitted && !fence->signaled) {
+ fence->signal_callback = callback;
+ fence->callback_data = data;
+ isset = true;
+ }
+ read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ return isset;
+}
+
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
{
unsigned long irq_flags;
@@ -413,7 +454,6 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
atomic_set(&rdev->fence_drv[ring].seq, 0);
INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
- INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
init_waitqueue_head(&rdev->fence_drv[ring].queue);
rdev->fence_drv[ring].initialized = false;
}
--
1.7.7.6
More information about the dri-devel
mailing list