[PATCH 2/3] drm/radeon: rip out the ib pool v4
j.glisse at gmail.com
j.glisse at gmail.com
Wed May 2 08:59:15 PDT 2012
From: Christian König <deathsimple at vodafone.de>
It isn't necessary any more and the suballocator
seems to perform even better.
v2: ignore ERESTARTSYS in error reporting,
split fence changes into seperate patch,
use try_free SA callback to avoid lockups
v3: rebase on top of sa manager new patch
v4: rebase on top of lastest patchset
Signed-off-by: Christian König <deathsimple at vodafone.de>
Signed-off-by: Jerome Glisse <jglisse at redhat.com>
---
drivers/gpu/drm/radeon/evergreen.c | 2 +-
drivers/gpu/drm/radeon/ni.c | 2 +-
drivers/gpu/drm/radeon/r100.c | 2 +-
drivers/gpu/drm/radeon/r600.c | 6 +-
drivers/gpu/drm/radeon/r600_blit_kms.c | 2 +-
drivers/gpu/drm/radeon/radeon.h | 18 +--
drivers/gpu/drm/radeon/radeon_cs.c | 8 +-
drivers/gpu/drm/radeon/radeon_device.c | 1 -
drivers/gpu/drm/radeon/radeon_gart.c | 12 +-
drivers/gpu/drm/radeon/radeon_ring.c | 236 ++++++++---------------------
drivers/gpu/drm/radeon/radeon_semaphore.c | 7 +-
drivers/gpu/drm/radeon/si.c | 6 +-
12 files changed, 91 insertions(+), 211 deletions(-)
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 0e860c6..465026a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1366,7 +1366,7 @@ void evergreen_mc_program(struct radeon_device *rdev)
*/
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+ struct radeon_ring *ring = &rdev->ring[ib->sa_bo->fence->ring];
/* set to DX10/11 mode */
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 9cd2657..e55ee7b 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1127,7 +1127,7 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+ struct radeon_ring *ring = &rdev->ring[ib->sa_bo->fence->ring];
/* set to DX10/11 mode */
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index ee0103c..d47ffd5 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3711,7 +3711,7 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ib_free(rdev, &ib);
return r;
}
- r = radeon_fence_wait(ib->fence, false);
+ r = radeon_fence_wait(ib->sa_bo->fence, false);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 1cadf97..9bac947 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2672,7 +2672,7 @@ void r600_fini(struct radeon_device *rdev)
*/
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+ struct radeon_ring *ring = &rdev->ring[ib->sa_bo->fence->ring];
/* FIXME: implement */
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
@@ -2716,7 +2716,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
return r;
}
- r = radeon_fence_wait(ib->fence, false);
+ r = radeon_fence_wait(ib->sa_bo->fence, false);
if (r) {
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
return r;
@@ -2728,7 +2728,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
DRM_UDELAY(1);
}
if (i < rdev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib->fence->ring, i);
+ DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib->sa_bo->fence->ring, i);
} else {
DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index db38f58..9580b06 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -638,7 +638,7 @@ static int r600_vb_ib_get(struct radeon_device *rdev, unsigned size)
static void r600_vb_ib_put(struct radeon_device *rdev)
{
- radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
+ radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->sa_bo->fence);
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index acbb642..2e83a66 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -635,27 +635,13 @@ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
struct radeon_ib {
struct radeon_sa_bo *sa_bo;
- unsigned idx;
uint32_t length_dw;
uint64_t gpu_addr;
uint32_t *ptr;
- struct radeon_fence *fence;
unsigned vm_id;
bool is_const_ib;
};
-/*
- * locking -
- * mutex protects scheduled_ibs, ready, alloc_bm
- */
-struct radeon_ib_pool {
- struct radeon_mutex mutex;
- struct radeon_sa_manager sa_manager;
- struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
- bool ready;
- unsigned head_id;
-};
-
struct radeon_ring {
struct radeon_bo *ring_obj;
volatile uint32_t *ring;
@@ -798,7 +784,6 @@ struct si_rlc {
int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib **ib, unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
-bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev);
@@ -1523,7 +1508,8 @@ struct radeon_device {
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
struct radeon_semaphore_driver semaphore_drv;
struct radeon_ring ring[RADEON_NUM_RINGS];
- struct radeon_ib_pool ib_pool;
+ bool ib_pool_ready;
+ struct radeon_sa_manager ring_tmp_bo;
struct radeon_irq irq;
struct radeon_asic *asic;
struct radeon_gem gem;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 3989015..0c0c9d1 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -138,12 +138,12 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
return 0;
}
- r = radeon_semaphore_create(p->rdev, &p->ib->fence->semaphore);
+ r = radeon_semaphore_create(p->rdev, &p->ib->sa_bo->fence->semaphore);
if (r) {
return r;
}
- return radeon_semaphore_sync_rings(p->rdev, p->ib->fence->semaphore,
+ return radeon_semaphore_sync_rings(p->rdev, p->ib->sa_bo->fence->semaphore,
sync_to_ring, p->ring);
}
@@ -297,7 +297,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
if (!error && parser->ib)
ttm_eu_fence_buffer_objects(&parser->validated,
- parser->ib->fence);
+ parser->ib->sa_bo->fence);
else
ttm_eu_backoff_reservation(&parser->validated);
@@ -483,7 +483,7 @@ out:
if (vm->fence) {
radeon_fence_unref(&vm->fence);
}
- vm->fence = radeon_fence_ref(parser->ib->fence);
+ vm->fence = radeon_fence_ref(parser->ib->sa_bo->fence);
}
mutex_unlock(&fpriv->vm.mutex);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 9d6a85e..e316144 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -723,7 +723,6 @@ int radeon_device_init(struct radeon_device *rdev,
/* mutex initialization are all done here so we
* can recall function without having locking issues */
radeon_mutex_init(&rdev->cs_mutex);
- radeon_mutex_init(&rdev->ib_pool.mutex);
for (i = 0; i < RADEON_NUM_RINGS; ++i)
mutex_init(&rdev->ring[i].mutex);
mutex_init(&rdev->dc_hw_i2c_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index cc5036c..cab012f 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -437,8 +437,8 @@ retry_id:
rdev->vm_manager.use_bitmap |= 1 << id;
vm->id = id;
list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
- return radeon_vm_bo_update_pte(rdev, vm, rdev->ib_pool.sa_manager.bo,
- &rdev->ib_pool.sa_manager.bo->tbo.mem);
+ return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
+ &rdev->ring_tmp_bo.bo->tbo.mem);
}
/* object have to be reserved */
@@ -636,7 +636,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
/* map the ib pool buffer at 0 in virtual address space, set
* read only
*/
- r = radeon_vm_bo_add(rdev, vm, rdev->ib_pool.sa_manager.bo, 0,
+ r = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo, 0,
RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED);
return r;
}
@@ -653,12 +653,12 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
radeon_mutex_unlock(&rdev->cs_mutex);
/* remove all bo */
- r = radeon_bo_reserve(rdev->ib_pool.sa_manager.bo, false);
+ r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (!r) {
- bo_va = radeon_bo_va(rdev->ib_pool.sa_manager.bo, vm);
+ bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm);
list_del_init(&bo_va->bo_list);
list_del_init(&bo_va->vm_list);
- radeon_bo_unreserve(rdev->ib_pool.sa_manager.bo);
+ radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
kfree(bo_va);
}
if (!list_empty(&vm->va)) {
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index a7db890..b279a61 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -24,6 +24,7 @@
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
+ * Christian König
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -33,8 +34,10 @@
#include "radeon.h"
#include "atom.h"
-int radeon_debugfs_ib_init(struct radeon_device *rdev);
-int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
+/*
+ * IB.
+ */
+int radeon_debugfs_sa_init(struct radeon_device *rdev);
u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
{
@@ -61,47 +64,11 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
return idx_value;
}
-void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
-{
-#if DRM_DEBUG_CODE
- if (ring->count_dw <= 0) {
- DRM_ERROR("radeon: writting more dword to ring than expected !\n");
- }
-#endif
- ring->ring[ring->wptr++] = v;
- ring->wptr &= ring->ptr_mask;
- ring->count_dw--;
- ring->ring_free_dw--;
-}
-
-/*
- * IB.
- */
-bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib)
-{
- bool done = false;
-
- /* only free ib which have been emited */
- if (ib->fence && ib->fence->emitted) {
- if (radeon_fence_signaled(ib->fence)) {
- radeon_fence_unref(&ib->fence);
- radeon_sa_bo_free(rdev, &ib->sa_bo);
- done = true;
- }
- }
- return done;
-}
-
int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib **ib, unsigned size)
{
struct radeon_fence *fence;
- unsigned cretry = 0;
- int r = 0, i, idx;
-
- *ib = NULL;
- /* align size on 256 bytes */
- size = ALIGN(size, 256);
+ int r;
r = radeon_fence_create(rdev, &fence, ring);
if (r) {
@@ -109,60 +76,33 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
return r;
}
- radeon_mutex_lock(&rdev->ib_pool.mutex);
- idx = rdev->ib_pool.head_id;
-retry:
- if (cretry > 5) {
- dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
+ *ib = kmalloc(sizeof(struct radeon_ib), GFP_KERNEL);
+ if (*ib == NULL) {
radeon_fence_unref(&fence);
return -ENOMEM;
}
- cretry++;
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]);
- if (rdev->ib_pool.ibs[idx].fence == NULL) {
- r = radeon_sa_bo_new(rdev, &rdev->ib_pool.ibs[idx].sa_bo,
- &rdev->ib_pool.sa_manager,
- size, 256, false, NULL);
- if (!r) {
- *ib = &rdev->ib_pool.ibs[idx];
- (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
- (*ib)->ptr += ((*ib)->sa_bo->soffset >> 2);
- (*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
- (*ib)->gpu_addr += (*ib)->sa_bo->soffset;
- (*ib)->fence = fence;
- (*ib)->vm_id = 0;
- (*ib)->is_const_ib = false;
- /* ib are most likely to be allocated in a ring fashion
- * thus rdev->ib_pool.head_id should be the id of the
- * oldest ib
- */
- rdev->ib_pool.head_id = (1 + idx);
- rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
- return 0;
- }
- }
- idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
- }
- /* this should be rare event, ie all ib scheduled none signaled yet.
- */
- r = -ENOMEM;
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- if (rdev->ib_pool.ibs[idx].fence && rdev->ib_pool.ibs[idx].fence->emitted) {
- r = radeon_fence_wait(rdev->ib_pool.ibs[idx].fence, false);
- if (!r) {
- goto retry;
- }
- /* an error happened */
- break;
+
+ r = radeon_sa_bo_new(rdev, &(*ib)->sa_bo, &rdev->ring_tmp_bo,
+ size, 256, true, fence);
+ if (r) {
+ if (r != -ERESTARTSYS) {
+ dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
}
- idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
+ kfree(*ib);
+ *ib = NULL;
+ radeon_fence_unref(&fence);
+ return r;
}
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
+
+ (*ib)->ptr = rdev->ring_tmp_bo.cpu_ptr;
+ (*ib)->ptr += ((*ib)->sa_bo->soffset >> 2);
+ (*ib)->gpu_addr = rdev->ring_tmp_bo.gpu_addr;
+ (*ib)->gpu_addr += (*ib)->sa_bo->soffset;
+ (*ib)->vm_id = 0;
+ (*ib)->is_const_ib = false;
radeon_fence_unref(&fence);
- return r;
+
+ return 0;
}
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@@ -173,22 +113,19 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
if (tmp == NULL) {
return;
}
- radeon_mutex_lock(&rdev->ib_pool.mutex);
- if (tmp->fence && !tmp->fence->emitted) {
- radeon_sa_bo_free(rdev, &tmp->sa_bo);
- radeon_fence_unref(&tmp->fence);
- }
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
+
+ radeon_sa_bo_free(rdev, &tmp->sa_bo);
+ kfree(tmp);
}
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+ struct radeon_ring *ring = &rdev->ring[ib->sa_bo->fence->ring];
int r = 0;
if (!ib->length_dw || !ring->ready) {
/* TODO: Nothings in the ib we should report. */
- DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
+ DRM_ERROR("radeon: couldn't schedule IB.\n");
return -EINVAL;
}
@@ -198,74 +135,53 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
return r;
}
- radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
- radeon_fence_emit(rdev, ib->fence);
+ radeon_ring_ib_execute(rdev, ib->sa_bo->fence->ring, ib);
+ radeon_fence_emit(rdev, ib->sa_bo->fence);
radeon_ring_unlock_commit(rdev, ring);
return 0;
}
int radeon_ib_pool_init(struct radeon_device *rdev)
{
- struct radeon_sa_manager tmp;
- int i, r;
+ int r;
+
+ if (rdev->ib_pool_ready) {
+ return 0;
+ }
- r = radeon_sa_bo_manager_init(rdev, &tmp,
+ r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
RADEON_IB_POOL_SIZE*64*1024,
RADEON_GEM_DOMAIN_GTT);
if (r) {
return r;
}
- radeon_mutex_lock(&rdev->ib_pool.mutex);
- if (rdev->ib_pool.ready) {
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
- radeon_sa_bo_manager_fini(rdev, &tmp);
- return 0;
+ if (radeon_debugfs_sa_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for SA !\n");
}
- rdev->ib_pool.sa_manager = tmp;
- INIT_LIST_HEAD(&rdev->ib_pool.sa_manager.sa_bo);
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- rdev->ib_pool.ibs[i].fence = NULL;
- rdev->ib_pool.ibs[i].idx = i;
- rdev->ib_pool.ibs[i].length_dw = 0;
- rdev->ib_pool.ibs[i].sa_bo = NULL;
- }
- rdev->ib_pool.head_id = 0;
- rdev->ib_pool.ready = true;
DRM_INFO("radeon: ib pool ready.\n");
+ rdev->ib_pool_ready = true;
- if (radeon_debugfs_ib_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for IB !\n");
- }
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
return 0;
}
void radeon_ib_pool_fini(struct radeon_device *rdev)
{
- unsigned i;
-
- radeon_mutex_lock(&rdev->ib_pool.mutex);
- if (rdev->ib_pool.ready) {
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
- radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
- }
- radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
- rdev->ib_pool.ready = false;
+ if (rdev->ib_pool_ready) {
+ radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
+ rdev->ib_pool_ready = false;
}
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
}
int radeon_ib_pool_start(struct radeon_device *rdev)
{
- return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager);
+ return radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
}
int radeon_ib_pool_suspend(struct radeon_device *rdev)
{
- return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager);
+ return radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
}
int radeon_ib_ring_tests(struct radeon_device *rdev)
@@ -301,6 +217,21 @@ int radeon_ib_ring_tests(struct radeon_device *rdev)
/*
* Ring.
*/
+int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
+
+void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
+{
+#if DRM_DEBUG_CODE
+ if (ring->count_dw <= 0) {
+ DRM_ERROR("radeon: writting more dword to ring than expected !\n");
+ }
+#endif
+ ring->ring[ring->wptr++] = v;
+ ring->wptr &= ring->ptr_mask;
+ ring->count_dw--;
+ ring->ring_free_dw--;
+}
+
int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
{
/* r1xx-r5xx only has CP ring */
@@ -578,37 +509,13 @@ static struct drm_info_list radeon_debugfs_ring_info_list[] = {
{"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
};
-static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_ib *ib = &rdev->ib_pool.ibs[*((unsigned*)node->info_ent->data)];
- unsigned i;
-
- if (ib == NULL) {
- return 0;
- }
- seq_printf(m, "IB %04u\n", ib->idx);
- seq_printf(m, "IB fence %p\n", ib->fence);
- seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
- for (i = 0; i < ib->length_dw; i++) {
- seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
- }
- return 0;
-}
-
-static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
-static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
-static unsigned radeon_debugfs_ib_idx[RADEON_IB_POOL_SIZE];
-
static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- radeon_sa_bo_dump_debug_info(&rdev->ib_pool.sa_manager, m);
+ radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
return 0;
}
@@ -643,21 +550,10 @@ int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *rin
return 0;
}
-int radeon_debugfs_ib_init(struct radeon_device *rdev)
+int radeon_debugfs_sa_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- unsigned i;
-
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
- radeon_debugfs_ib_idx[i] = i;
- radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
- radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
- radeon_debugfs_ib_list[i].driver_features = 0;
- radeon_debugfs_ib_list[i].data = &radeon_debugfs_ib_idx[i];
- }
- return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
- RADEON_IB_POOL_SIZE);
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
#else
return 0;
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 822723e..40a207a 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -54,9 +54,9 @@ static int radeon_semaphore_add_bo(struct radeon_device *rdev)
kfree(bo);
return r;
}
- gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
+ gpu_addr = rdev->ring_tmp_bo.gpu_addr;
gpu_addr += bo->ib->sa_bo->soffset;
- cpu_ptr = rdev->ib_pool.sa_manager.cpu_ptr;
+ cpu_ptr = rdev->ring_tmp_bo.cpu_ptr;
cpu_ptr += (bo->ib->sa_bo->soffset >> 2);
for (i = 0; i < (RADEON_SEMAPHORE_BO_SIZE/8); i++) {
bo->semaphores[i].gpu_addr = gpu_addr;
@@ -75,8 +75,7 @@ static int radeon_semaphore_add_bo(struct radeon_device *rdev)
static void radeon_semaphore_del_bo_locked(struct radeon_device *rdev,
struct radeon_semaphore_bo *bo)
{
- radeon_sa_bo_free(rdev, &bo->ib->sa_bo);
- radeon_fence_unref(&bo->ib->fence);
+ radeon_ib_free(rdev, &bo->ib);
list_del(&bo->list);
kfree(bo);
}
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index f8ee066..de37f09 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1915,7 +1915,7 @@ void si_fence_ring_emit(struct radeon_device *rdev,
*/
void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+ struct radeon_ring *ring = &rdev->ring[ib->sa_bo->fence->ring];
u32 header;
if (ib->is_const_ib)
@@ -2854,7 +2854,7 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
if (ib->is_const_ib)
ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
else {
- switch (ib->fence->ring) {
+ switch (ib->sa_bo->fence->ring) {
case RADEON_RING_TYPE_GFX_INDEX:
ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt);
break;
@@ -2863,7 +2863,7 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt);
break;
default:
- dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->fence->ring);
+ dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->sa_bo->fence->ring);
ret = -EINVAL;
break;
}
--
1.7.7.6
More information about the dri-devel
mailing list