[Nouveau] [PATCH 20/30] drm/dp: Pass drm_dp_aux to drm_dp*_link_train_channel_eq_delay()
Lyude Paul
lyude at redhat.com
Fri Feb 19 21:53:16 UTC 2021
So that we can start using drm_dbg_*() for
drm_dp_link_train_channel_eq_delay() and
drm_dp_lttpr_link_train_channel_eq_delay().
Signed-off-by: Lyude Paul <lyude at redhat.com>
---
drivers/gpu/drm/amd/amdgpu/atombios_dp.c | 2 +-
drivers/gpu/drm/drm_dp_helper.c | 14 +++++++++-----
.../gpu/drm/i915/display/intel_dp_link_training.c | 4 ++--
drivers/gpu/drm/msm/dp/dp_ctrl.c | 4 ++--
drivers/gpu/drm/msm/edp/edp_ctrl.c | 4 ++--
drivers/gpu/drm/radeon/atombios_dp.c | 2 +-
drivers/gpu/drm/xlnx/zynqmp_dp.c | 2 +-
include/drm/drm_dp_helper.h | 6 ++++--
8 files changed, 22 insertions(+), 16 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 4468f9d6b4dd..59ce6f620fdc 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -676,7 +676,7 @@ amdgpu_atombios_dp_link_train_ce(struct amdgpu_atombios_dp_link_train_info *dp_i
dp_info->tries = 0;
channel_eq = false;
while (1) {
- drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
+ drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
dp_info->link_status) <= 0) {
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index ce08eb3bface..a9316c1ecb52 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -151,7 +151,8 @@ void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux,
}
EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
-static void __drm_dp_link_train_channel_eq_delay(unsigned long rd_interval)
+static void __drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
+ unsigned long rd_interval)
{
if (rd_interval > 4)
DRM_DEBUG_KMS("AUX interval %lu, out of range (max 4)\n",
@@ -165,9 +166,11 @@ static void __drm_dp_link_train_channel_eq_delay(unsigned long rd_interval)
usleep_range(rd_interval, rd_interval * 2);
}
-void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+void drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
- __drm_dp_link_train_channel_eq_delay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ __drm_dp_link_train_channel_eq_delay(aux,
+ dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
DP_TRAINING_AUX_RD_MASK);
}
EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
@@ -183,13 +186,14 @@ static u8 dp_lttpr_phy_cap(const u8 phy_cap[DP_LTTPR_PHY_CAP_SIZE], int r)
return phy_cap[r - DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1];
}
-void drm_dp_lttpr_link_train_channel_eq_delay(const u8 phy_cap[DP_LTTPR_PHY_CAP_SIZE])
+void drm_dp_lttpr_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
+ const u8 phy_cap[DP_LTTPR_PHY_CAP_SIZE])
{
u8 interval = dp_lttpr_phy_cap(phy_cap,
DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1) &
DP_TRAINING_AUX_RD_MASK;
- __drm_dp_link_train_channel_eq_delay(interval);
+ __drm_dp_link_train_channel_eq_delay(aux, interval);
}
EXPORT_SYMBOL(drm_dp_lttpr_link_train_channel_eq_delay);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 222073d46bdb..fe8b5a5d9d1a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -593,11 +593,11 @@ intel_dp_link_training_channel_equalization_delay(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
if (dp_phy == DP_PHY_DPRX) {
- drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
+ drm_dp_link_train_channel_eq_delay(&intel_dp->aux, intel_dp->dpcd);
} else {
const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
- drm_dp_lttpr_link_train_channel_eq_delay(phy_caps);
+ drm_dp_lttpr_link_train_channel_eq_delay(&intel_dp->aux, phy_caps);
}
}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 2501a6b326a3..33df288dd4eb 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1184,7 +1184,7 @@ static int dp_ctrl_link_lane_down_shift(struct dp_ctrl_private *ctrl)
static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl)
{
dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE);
- drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
+ drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
}
static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
@@ -1215,7 +1215,7 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
dp_ctrl_train_pattern_set(ctrl, pattern | DP_RECOVERED_CLOCK_OUT_EN);
for (tries = 0; tries <= maximum_retries; tries++) {
- drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
+ drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
ret = dp_ctrl_read_link_status(ctrl, link_status);
if (ret)
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
index 6501598448b4..4fb397ee7c84 100644
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -665,7 +665,7 @@ static int edp_start_link_train_2(struct edp_ctrl *ctrl)
return ret;
while (1) {
- drm_dp_link_train_channel_eq_delay(ctrl->dpcd);
+ drm_dp_link_train_channel_eq_delay(ctrl->drm_aux, ctrl->dpcd);
rlen = drm_dp_dpcd_read_link_status(ctrl->drm_aux, link_status);
if (rlen < DP_LINK_STATUS_SIZE) {
@@ -743,7 +743,7 @@ static int edp_clear_training_pattern(struct edp_ctrl *ctrl)
ret = edp_train_pattern_set_write(ctrl, 0);
- drm_dp_link_train_channel_eq_delay(ctrl->dpcd);
+ drm_dp_link_train_channel_eq_delay(ctrl->drm_aux, ctrl->dpcd);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 299b9d8da376..4c1e551d9714 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -743,7 +743,7 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
dp_info->tries = 0;
channel_eq = false;
while (1) {
- drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
+ drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
dp_info->link_status) <= 0) {
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index 5cc295d8ba9f..f6f2293db18d 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -778,7 +778,7 @@ static int zynqmp_dp_link_train_ce(struct zynqmp_dp *dp)
if (ret)
return ret;
- drm_dp_link_train_channel_eq_delay(dp->dpcd);
+ drm_dp_link_train_channel_eq_delay(&dp->aux, dp->dpcd);
ret = drm_dp_dpcd_read_link_status(&dp->aux, link_status);
if (ret < 0)
return ret;
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index e4681665231e..2151aeb6c279 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -1479,8 +1479,10 @@ u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZ
void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux,
const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
void drm_dp_lttpr_link_train_clock_recovery_delay(void);
-void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
-void drm_dp_lttpr_link_train_channel_eq_delay(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
+void drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+void drm_dp_lttpr_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
+ const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
u8 drm_dp_link_rate_to_bw_code(int link_rate);
int drm_dp_bw_code_to_link_rate(u8 link_bw);
--
2.29.2
More information about the Nouveau
mailing list