[PATCH 3/5] drm/amdkfd: Renaming dqm->packets to dqm->dpm
Felix Kuehling
felix.kuehling at amd.com
Fri Jul 16 14:05:35 UTC 2021
Am 2021-07-15 um 9:34 p.m. schrieb Oak Zeng:
> Renaming packets to dpm (device packet manager) to
> reflect the real meaning of this variable.
I don't think introducing another new acronym is helpful. Also "dpm" and
"dqm" are visually too similar. Other places use "pm" for packet
manager. If you feel "pm" is too short or too ambiguous, how about
"pmgr" or "packet_mgr"?
Regards,
Felix
>
> Signed-off-by: Oak Zeng <Oak.Zeng at amd.com>
> ---
> drivers/gpu/drm/amd/amdkfd/kfd_device.c | 2 +-
> .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 26 +++++++++++-----------
> .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 2 +-
> drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 2 +-
> 4 files changed, 16 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
> index 9e4a05e..c51402b 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
> @@ -1390,7 +1390,7 @@ int kfd_debugfs_hang_hws(struct kfd_dev *dev)
> return -EINVAL;
> }
>
> - r = pm_debugfs_hang_hws(&dev->dqm->packets);
> + r = pm_debugfs_hang_hws(&dev->dqm->dpm);
> if (!r)
> r = dqm_debugfs_execute_queues(dev->dqm);
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> index 16a1713..f2984d3 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> @@ -260,7 +260,7 @@ static int allocate_vmid(struct device_queue_manager *dqm,
> static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
> struct qcm_process_device *qpd)
> {
> - const struct packet_manager_funcs *pmf = qpd->dqm->packets.pmf;
> + const struct packet_manager_funcs *pmf = qpd->dqm->dpm.pmf;
> int ret;
>
> if (!qpd->ib_kaddr)
> @@ -1000,7 +1000,7 @@ static int start_nocpsch(struct device_queue_manager *dqm)
> init_interrupts(dqm);
>
> if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
> - return pm_init(&dqm->packets, dqm);
> + return pm_init(&dqm->dpm, dqm);
> dqm->sched_running = true;
>
> return 0;
> @@ -1009,7 +1009,7 @@ static int start_nocpsch(struct device_queue_manager *dqm)
> static int stop_nocpsch(struct device_queue_manager *dqm)
> {
> if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
> - pm_uninit(&dqm->packets, false);
> + pm_uninit(&dqm->dpm, false);
> dqm->sched_running = false;
>
> return 0;
> @@ -1124,7 +1124,7 @@ static int set_sched_resources(struct device_queue_manager *dqm)
> "queue mask: 0x%8llX\n",
> res.vmid_mask, res.queue_mask);
>
> - return pm_send_set_resources(&dqm->packets, &res);
> + return pm_send_set_resources(&dqm->dpm, &res);
> }
>
> static int initialize_cpsch(struct device_queue_manager *dqm)
> @@ -1164,7 +1164,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
>
> retval = 0;
>
> - retval = pm_init(&dqm->packets, dqm);
> + retval = pm_init(&dqm->dpm, dqm);
> if (retval)
> goto fail_packet_manager_init;
>
> @@ -1197,7 +1197,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
> return 0;
> fail_allocate_vidmem:
> fail_set_sched_resources:
> - pm_uninit(&dqm->packets, false);
> + pm_uninit(&dqm->dpm, false);
> fail_packet_manager_init:
> return retval;
> }
> @@ -1213,10 +1213,10 @@ static int stop_cpsch(struct device_queue_manager *dqm)
> dqm->sched_running = false;
> dqm_unlock(dqm);
>
> - pm_release_ib(&dqm->packets);
> + pm_release_ib(&dqm->dpm);
>
> kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
> - pm_uninit(&dqm->packets, hanging);
> + pm_uninit(&dqm->dpm, hanging);
>
> return 0;
> }
> @@ -1390,7 +1390,7 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
> if (dqm->active_runlist)
> return 0;
>
> - retval = pm_send_runlist(&dqm->packets, &dqm->queues);
> + retval = pm_send_runlist(&dqm->dpm, &dqm->queues);
> pr_debug("%s sent runlist\n", __func__);
> if (retval) {
> pr_err("failed to execute runlist\n");
> @@ -1416,13 +1416,13 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
> if (!dqm->active_runlist)
> return retval;
>
> - retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
> + retval = pm_send_unmap_queue(&dqm->dpm, KFD_QUEUE_TYPE_COMPUTE,
> filter, filter_param, false, 0);
> if (retval)
> return retval;
>
> *dqm->fence_addr = KFD_FENCE_INIT;
> - pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
> + pm_send_query_status(&dqm->dpm, dqm->fence_gpu_addr,
> KFD_FENCE_COMPLETED);
> /* should be timed out */
> retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
> @@ -1448,14 +1448,14 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
> * check those fields
> */
> mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
> - if (mqd_mgr->read_doorbell_id(dqm->packets.priv_queue->queue->mqd)) {
> + if (mqd_mgr->read_doorbell_id(dqm->dpm.priv_queue->queue->mqd)) {
> pr_err("HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n");
> while (halt_if_hws_hang)
> schedule();
> return -ETIME;
> }
>
> - pm_release_ib(&dqm->packets);
> + pm_release_ib(&dqm->dpm);
> dqm->active_runlist = false;
>
> return retval;
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
> index 71e2fde..14479e8 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
> @@ -169,7 +169,7 @@ struct device_queue_manager {
> struct device_queue_manager_asic_ops asic_ops;
>
> struct mqd_manager *mqd_mgrs[KFD_MQD_TYPE_MAX];
> - struct packet_manager packets;
> + struct packet_manager dpm;
> struct kfd_dev *dev;
> struct mutex lock_hidden; /* use dqm_lock/unlock(dqm) */
> struct list_head queues;
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
> index b1ce072..748e82b 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
> @@ -1630,7 +1630,7 @@ int kfd_debugfs_rls_by_device(struct seq_file *m, void *data)
> }
>
> seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id);
> - r = pm_debugfs_runlist(m, &dev->gpu->dqm->packets);
> + r = pm_debugfs_runlist(m, &dev->gpu->dqm->dpm);
> if (r)
> break;
> }
More information about the amd-gfx
mailing list