1
0

scsi: ufs: host: mediatek: Correct clock scaling with PM QoS flow

Correct clock scaling with PM QoS during suspend and resume.  Ensure PM
QoS is released during suspend if scaling up and re-applied after
resume. This prevents performance issues and maintains proper power
management.

Signed-off-by: Peter Wang <peter.wang@mediatek.com>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Acked-by: Chun-Hung Wu <chun-hung.wu@mediatek.com>
Link: https://patch.msgid.link/20250924094527.2992256-2-peter.wang@mediatek.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Peter Wang
2025-09-24 17:43:23 +08:00
committed by Martin K. Petersen
parent 3a86608788
commit 7162536410
3 changed files with 13 additions and 1 deletions

View File

@@ -1076,7 +1076,7 @@ void ufshcd_pm_qos_exit(struct ufs_hba *hba)
* @hba: per adapter instance * @hba: per adapter instance
* @on: If True, vote for perf PM QoS mode otherwise power save mode * @on: If True, vote for perf PM QoS mode otherwise power save mode
*/ */
static void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on) void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on)
{ {
guard(mutex)(&hba->pm_qos_mutex); guard(mutex)(&hba->pm_qos_mutex);
@@ -1085,6 +1085,7 @@ static void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on)
cpu_latency_qos_update_request(&hba->pm_qos_req, on ? 0 : PM_QOS_DEFAULT_VALUE); cpu_latency_qos_update_request(&hba->pm_qos_req, on ? 0 : PM_QOS_DEFAULT_VALUE);
} }
EXPORT_SYMBOL_GPL(ufshcd_pm_qos_update);
/** /**
* ufshcd_set_clk_freq - set UFS controller clock frequencies * ufshcd_set_clk_freq - set UFS controller clock frequencies

View File

@@ -1744,6 +1744,7 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
{ {
int err; int err;
struct arm_smccc_res res; struct arm_smccc_res res;
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
if (status == PRE_CHANGE) { if (status == PRE_CHANGE) {
if (ufshcd_is_auto_hibern8_supported(hba)) if (ufshcd_is_auto_hibern8_supported(hba))
@@ -1773,6 +1774,10 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
ufs_mtk_sram_pwr_ctrl(false, res); ufs_mtk_sram_pwr_ctrl(false, res);
/* Release pm_qos if in scale-up mode during suspend */
if (ufshcd_is_clkscaling_supported(hba) && (host->clk_scale_up))
ufshcd_pm_qos_update(hba, false);
return 0; return 0;
fail: fail:
/* /*
@@ -1788,6 +1793,7 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{ {
int err; int err;
struct arm_smccc_res res; struct arm_smccc_res res;
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
ufs_mtk_dev_vreg_set_lpm(hba, false); ufs_mtk_dev_vreg_set_lpm(hba, false);
@@ -1798,6 +1804,10 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (err) if (err)
goto fail; goto fail;
/* Request pm_qos if in scale-up mode after resume */
if (ufshcd_is_clkscaling_supported(hba) && (host->clk_scale_up))
ufshcd_pm_qos_update(hba, true);
if (ufshcd_is_link_hibern8(hba)) { if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_hpm(hba); err = ufs_mtk_link_set_hpm(hba);
if (err) if (err)

View File

@@ -1487,5 +1487,6 @@ int ufshcd_write_ee_control(struct ufs_hba *hba);
int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
const u16 *other_mask, u16 set, u16 clr); const u16 *other_mask, u16 set, u16 clr);
void ufshcd_force_error_recovery(struct ufs_hba *hba); void ufshcd_force_error_recovery(struct ufs_hba *hba);
void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on);
#endif /* End of Header */ #endif /* End of Header */