2019-03-16 13:04:47 +08:00
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright ( C ) 2019 MediaTek Inc .
* Authors :
* Stanley Chu < stanley . chu @ mediatek . com >
* Peter Wang < peter . wang @ mediatek . com >
*/
2019-12-30 13:32:26 +08:00
# include <linux/arm-smccc.h>
2019-12-30 13:32:29 +08:00
# include <linux/bitfield.h>
2022-04-19 15:58:08 -07:00
# include <linux/clk.h>
# include <linux/delay.h>
# include <linux/module.h>
2019-03-16 13:04:47 +08:00
# include <linux/of.h>
# include <linux/of_address.h>
2020-09-14 13:00:51 +08:00
# include <linux/of_device.h>
2019-03-16 13:04:47 +08:00
# include <linux/phy/phy.h>
# include <linux/platform_device.h>
2022-06-23 11:50:51 +08:00
# include <linux/pm_qos.h>
2020-06-01 18:46:44 +08:00
# include <linux/regulator/consumer.h>
2020-09-08 14:45:07 +08:00
# include <linux/reset.h>
2019-12-30 13:32:26 +08:00
# include <linux/soc/mediatek/mtk_sip_svc.h>
2019-03-16 13:04:47 +08:00
2022-05-11 14:25:52 -07:00
# include <ufs/ufshcd.h>
2019-03-16 13:04:47 +08:00
# include "ufshcd-pltfrm.h"
2022-05-11 14:25:52 -07:00
# include <ufs/ufs_quirks.h>
# include <ufs/unipro.h>
2019-03-16 13:04:47 +08:00
# include "ufs-mediatek.h"
2020-12-05 19:59:01 +08:00
# define CREATE_TRACE_POINTS
# include "ufs-mediatek-trace.h"
2022-04-19 15:57:53 -07:00
static const struct ufs_dev_quirk ufs_mtk_dev_fixups [ ] = {
2022-06-23 11:50:46 +08:00
{ . wmanufacturerid = UFS_ANY_VENDOR ,
2022-04-19 15:57:51 -07:00
. model = UFS_ANY_MODEL ,
2022-06-23 11:50:46 +08:00
. quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM } ,
2022-04-19 15:57:51 -07:00
{ . wmanufacturerid = UFS_VENDOR_SKHYNIX ,
. model = " H9HQ21AFAMZDAR " ,
. quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES } ,
{ }
2020-05-08 16:01:11 +08:00
} ;
2020-09-14 13:00:51 +08:00
static const struct of_device_id ufs_mtk_of_match [ ] = {
2020-10-29 19:57:47 +08:00
{ . compatible = " mediatek,mt8183-ufshci " } ,
2020-09-14 13:00:51 +08:00
{ } ,
} ;
2022-08-03 07:54:34 +08:00
/*
* Details of UIC Errors
*/
static const char * const ufs_uic_err_str [ ] = {
" PHY Adapter Layer " ,
" Data Link Layer " ,
" Network Link Layer " ,
" Transport Link Layer " ,
" DME "
} ;
static const char * const ufs_uic_pa_err_str [ ] = {
" PHY error on Lane 0 " ,
" PHY error on Lane 1 " ,
" PHY error on Lane 2 " ,
" PHY error on Lane 3 " ,
" Generic PHY Adapter Error. This should be the LINERESET indication "
} ;
static const char * const ufs_uic_dl_err_str [ ] = {
" NAC_RECEIVED " ,
" TCx_REPLAY_TIMER_EXPIRED " ,
" AFCx_REQUEST_TIMER_EXPIRED " ,
" FCx_PROTECTION_TIMER_EXPIRED " ,
" CRC_ERROR " ,
" RX_BUFFER_OVERFLOW " ,
" MAX_FRAME_LENGTH_EXCEEDED " ,
" WRONG_SEQUENCE_NUMBER " ,
" AFC_FRAME_SYNTAX_ERROR " ,
" NAC_FRAME_SYNTAX_ERROR " ,
" EOF_SYNTAX_ERROR " ,
" FRAME_SYNTAX_ERROR " ,
" BAD_CTRL_SYMBOL_TYPE " ,
" PA_INIT_ERROR " ,
" PA_ERROR_IND_RECEIVED " ,
" PA_INIT "
} ;
2020-09-14 13:00:51 +08:00
static bool ufs_mtk_is_boost_crypt_enabled ( struct ufs_hba * hba )
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
2020-10-29 19:57:47 +08:00
return ! ! ( host - > caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE ) ;
2020-09-14 13:00:51 +08:00
}
2020-10-29 19:57:46 +08:00
static bool ufs_mtk_is_va09_supported ( struct ufs_hba * hba )
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
2020-10-29 19:57:47 +08:00
return ! ! ( host - > caps & UFS_MTK_CAP_VA09_PWR_CTRL ) ;
2020-10-29 19:57:46 +08:00
}
2020-12-07 13:49:55 +08:00
static bool ufs_mtk_is_broken_vcc ( struct ufs_hba * hba )
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
return ! ! ( host - > caps & UFS_MTK_CAP_BROKEN_VCC ) ;
}
2022-06-16 13:37:17 +08:00
static bool ufs_mtk_is_pmc_via_fastauto ( struct ufs_hba * hba )
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
2022-11-18 13:52:42 +09:00
return ! ! ( host - > caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO ) ;
2022-06-16 13:37:17 +08:00
}
2019-03-20 22:24:58 +08:00
static void ufs_mtk_cfg_unipro_cg ( struct ufs_hba * hba , bool enable )
2019-03-16 13:04:47 +08:00
{
u32 tmp ;
if ( enable ) {
ufshcd_dme_get ( hba ,
UIC_ARG_MIB ( VS_SAVEPOWERCONTROL ) , & tmp ) ;
tmp = tmp |
( 1 < < RX_SYMBOL_CLK_GATE_EN ) |
( 1 < < SYS_CLK_GATE_EN ) |
( 1 < < TX_CLK_GATE_EN ) ;
ufshcd_dme_set ( hba ,
UIC_ARG_MIB ( VS_SAVEPOWERCONTROL ) , tmp ) ;
ufshcd_dme_get ( hba ,
UIC_ARG_MIB ( VS_DEBUGCLOCKENABLE ) , & tmp ) ;
tmp = tmp & ~ ( 1 < < TX_SYMBOL_CLK_REQ_FORCE ) ;
ufshcd_dme_set ( hba ,
UIC_ARG_MIB ( VS_DEBUGCLOCKENABLE ) , tmp ) ;
} else {
ufshcd_dme_get ( hba ,
UIC_ARG_MIB ( VS_SAVEPOWERCONTROL ) , & tmp ) ;
tmp = tmp & ~ ( ( 1 < < RX_SYMBOL_CLK_GATE_EN ) |
( 1 < < SYS_CLK_GATE_EN ) |
( 1 < < TX_CLK_GATE_EN ) ) ;
ufshcd_dme_set ( hba ,
UIC_ARG_MIB ( VS_SAVEPOWERCONTROL ) , tmp ) ;
ufshcd_dme_get ( hba ,
UIC_ARG_MIB ( VS_DEBUGCLOCKENABLE ) , & tmp ) ;
tmp = tmp | ( 1 < < TX_SYMBOL_CLK_REQ_FORCE ) ;
ufshcd_dme_set ( hba ,
UIC_ARG_MIB ( VS_DEBUGCLOCKENABLE ) , tmp ) ;
}
}
2020-07-12 08:32:26 +08:00
static void ufs_mtk_crypto_enable ( struct ufs_hba * hba )
{
struct arm_smccc_res res ;
ufs_mtk_crypto_ctrl ( res , 1 ) ;
if ( res . a0 ) {
dev_info ( hba - > dev , " %s: crypto enable failed, err: %lu \n " ,
__func__ , res . a0 ) ;
hba - > caps & = ~ UFSHCD_CAP_CRYPTO ;
}
}
2020-09-08 14:45:07 +08:00
static void ufs_mtk_host_reset ( struct ufs_hba * hba )
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
reset_control_assert ( host - > hci_reset ) ;
reset_control_assert ( host - > crypto_reset ) ;
reset_control_assert ( host - > unipro_reset ) ;
usleep_range ( 100 , 110 ) ;
reset_control_deassert ( host - > unipro_reset ) ;
reset_control_deassert ( host - > crypto_reset ) ;
reset_control_deassert ( host - > hci_reset ) ;
}
static void ufs_mtk_init_reset_control ( struct ufs_hba * hba ,
struct reset_control * * rc ,
char * str )
{
* rc = devm_reset_control_get ( hba - > dev , str ) ;
if ( IS_ERR ( * rc ) ) {
2020-09-10 09:37:56 +08:00
dev_info ( hba - > dev , " Failed to get reset control %s: %ld \n " ,
2020-09-08 14:45:07 +08:00
str , PTR_ERR ( * rc ) ) ;
* rc = NULL ;
}
}
static void ufs_mtk_init_reset ( struct ufs_hba * hba )
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
ufs_mtk_init_reset_control ( hba , & host - > hci_reset ,
" hci_rst " ) ;
ufs_mtk_init_reset_control ( hba , & host - > unipro_reset ,
" unipro_rst " ) ;
ufs_mtk_init_reset_control ( hba , & host - > crypto_reset ,
" crypto_rst " ) ;
}
2020-03-18 18:40:16 +08:00
static int ufs_mtk_hce_enable_notify ( struct ufs_hba * hba ,
enum ufs_notify_change_status status )
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
if ( status = = PRE_CHANGE ) {
2020-09-08 14:45:07 +08:00
if ( host - > unipro_lpm ) {
2020-05-09 17:37:13 +08:00
hba - > vps - > hba_enable_delay_us = 0 ;
2020-09-08 14:45:07 +08:00
} else {
2020-05-09 17:37:13 +08:00
hba - > vps - > hba_enable_delay_us = 600 ;
2020-09-08 14:45:07 +08:00
ufs_mtk_host_reset ( hba ) ;
}
2020-07-12 08:32:26 +08:00
if ( hba - > caps & UFSHCD_CAP_CRYPTO )
ufs_mtk_crypto_enable ( hba ) ;
2020-10-29 19:57:48 +08:00
if ( host - > caps & UFS_MTK_CAP_DISABLE_AH8 ) {
ufshcd_writel ( hba , 0 ,
REG_AUTO_HIBERNATE_IDLE_TIMER ) ;
hba - > capabilities & = ~ MASK_AUTO_HIBERN8_SUPPORT ;
hba - > ahit = 0 ;
}
2022-06-23 11:50:47 +08:00
/*
* Turn on CLK_CG early to bypass abnormal ERR_CHK signal
* to prevent host hang issue
*/
ufshcd_writel ( hba ,
ufshcd_readl ( hba , REG_UFS_XOUFS_CTRL ) | 0x80 ,
REG_UFS_XOUFS_CTRL ) ;
2020-03-18 18:40:16 +08:00
}
return 0 ;
}
2019-03-20 22:24:58 +08:00
static int ufs_mtk_bind_mphy ( struct ufs_hba * hba )
2019-03-16 13:04:47 +08:00
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
struct device * dev = hba - > dev ;
struct device_node * np = dev - > of_node ;
int err = 0 ;
host - > mphy = devm_of_phy_get_by_index ( dev , np , 0 ) ;
if ( host - > mphy = = ERR_PTR ( - EPROBE_DEFER ) ) {
/*
* UFS driver might be probed before the phy driver does .
* In that case we would like to return EPROBE_DEFER code .
*/
err = - EPROBE_DEFER ;
dev_info ( dev ,
" %s: required phy hasn't probed yet. err = %d \n " ,
__func__ , err ) ;
} else if ( IS_ERR ( host - > mphy ) ) {
err = PTR_ERR ( host - > mphy ) ;
2020-09-08 14:45:04 +08:00
if ( err ! = - ENODEV ) {
dev_info ( dev , " %s: PHY get failed %d \n " , __func__ ,
err ) ;
}
2019-03-16 13:04:47 +08:00
}
if ( err )
host - > mphy = NULL ;
2020-06-01 18:46:46 +08:00
/*
* Allow unbound mphy because not every platform needs specific
* mphy control .
*/
if ( err = = - ENODEV )
err = 0 ;
2019-03-16 13:04:47 +08:00
return err ;
}
2019-12-30 13:32:27 +08:00
static int ufs_mtk_setup_ref_clk ( struct ufs_hba * hba , bool on )
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
struct arm_smccc_res res ;
2020-06-01 18:46:42 +08:00
ktime_t timeout , time_checked ;
2019-12-30 13:32:27 +08:00
u32 value ;
if ( host - > ref_clk_enabled = = on )
return 0 ;
2022-06-23 11:50:48 +08:00
ufs_mtk_ref_clk_notify ( on , PRE_CHANGE , res ) ;
2019-12-30 13:32:27 +08:00
if ( on ) {
ufshcd_writel ( hba , REFCLK_REQUEST , REG_UFS_REFCLK_CTRL ) ;
} else {
2021-10-16 08:58:02 +08:00
ufshcd_delay_us ( host - > ref_clk_gating_wait_us , 10 ) ;
2019-12-30 13:32:27 +08:00
ufshcd_writel ( hba , REFCLK_RELEASE , REG_UFS_REFCLK_CTRL ) ;
}
/* Wait for ack */
2020-06-01 18:46:42 +08:00
timeout = ktime_add_us ( ktime_get ( ) , REFCLK_REQ_TIMEOUT_US ) ;
2019-12-30 13:32:27 +08:00
do {
2020-06-01 18:46:42 +08:00
time_checked = ktime_get ( ) ;
2019-12-30 13:32:27 +08:00
value = ufshcd_readl ( hba , REG_UFS_REFCLK_CTRL ) ;
/* Wait until ack bit equals to req bit */
if ( ( ( value & REFCLK_ACK ) > > 1 ) = = ( value & REFCLK_REQUEST ) )
goto out ;
usleep_range ( 100 , 200 ) ;
2020-06-01 18:46:42 +08:00
} while ( ktime_before ( time_checked , timeout ) ) ;
2019-12-30 13:32:27 +08:00
dev_err ( hba - > dev , " missing ack of refclk req, reg: 0x%x \n " , value ) ;
2022-06-23 11:50:48 +08:00
ufs_mtk_ref_clk_notify ( host - > ref_clk_enabled , POST_CHANGE , res ) ;
2019-12-30 13:32:27 +08:00
return - ETIMEDOUT ;
out :
host - > ref_clk_enabled = on ;
2021-10-16 08:58:02 +08:00
if ( on )
ufshcd_delay_us ( host - > ref_clk_ungating_wait_us , 10 ) ;
2022-06-23 11:50:48 +08:00
ufs_mtk_ref_clk_notify ( on , POST_CHANGE , res ) ;
2019-12-30 13:32:27 +08:00
return 0 ;
}
2020-02-20 21:48:48 +08:00
static void ufs_mtk_setup_ref_clk_wait_us ( struct ufs_hba * hba ,
2021-10-16 08:58:00 +08:00
u16 gating_us )
2020-02-20 21:48:48 +08:00
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
if ( hba - > dev_info . clk_gating_wait_us ) {
host - > ref_clk_gating_wait_us =
hba - > dev_info . clk_gating_wait_us ;
} else {
host - > ref_clk_gating_wait_us = gating_us ;
}
2021-10-16 08:58:00 +08:00
host - > ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US ;
2020-02-20 21:48:48 +08:00
}
2021-09-06 16:53:07 +08:00
static void ufs_mtk_dbg_sel ( struct ufs_hba * hba )
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
if ( ( ( host - > ip_ver > > 16 ) & 0xFF ) > = 0x36 ) {
ufshcd_writel ( hba , 0x820820 , REG_UFS_DEBUG_SEL ) ;
ufshcd_writel ( hba , 0x0 , REG_UFS_DEBUG_SEL_B0 ) ;
ufshcd_writel ( hba , 0x55555555 , REG_UFS_DEBUG_SEL_B1 ) ;
ufshcd_writel ( hba , 0xaaaaaaaa , REG_UFS_DEBUG_SEL_B2 ) ;
ufshcd_writel ( hba , 0xffffffff , REG_UFS_DEBUG_SEL_B3 ) ;
} else {
ufshcd_writel ( hba , 0x20 , REG_UFS_DEBUG_SEL ) ;
}
}
2021-10-06 13:47:05 +08:00
static void ufs_mtk_wait_idle_state ( struct ufs_hba * hba ,
unsigned long retry_ms )
{
u64 timeout , time_checked ;
u32 val , sm ;
bool wait_idle ;
2021-10-18 15:20:01 +02:00
/* cannot use plain ktime_get() in suspend */
timeout = ktime_get_mono_fast_ns ( ) + retry_ms * 1000000UL ;
2021-10-06 13:47:05 +08:00
/* wait a specific time after check base */
udelay ( 10 ) ;
wait_idle = false ;
do {
2021-10-18 15:20:01 +02:00
time_checked = ktime_get_mono_fast_ns ( ) ;
2021-10-06 13:47:05 +08:00
ufs_mtk_dbg_sel ( hba ) ;
val = ufshcd_readl ( hba , REG_UFS_PROBE ) ;
sm = val & 0x1f ;
/*
* if state is in H8 enter and H8 enter confirm
* wait until return to idle state .
*/
if ( ( sm > = VS_HIB_ENTER ) & & ( sm < = VS_HIB_EXIT ) ) {
wait_idle = true ;
udelay ( 50 ) ;
continue ;
} else if ( ! wait_idle )
break ;
if ( wait_idle & & ( sm = = VS_HCE_BASE ) )
break ;
} while ( time_checked < timeout ) ;
if ( wait_idle & & sm ! = VS_HCE_BASE )
dev_info ( hba - > dev , " wait idle tmo: 0x%x \n " , val ) ;
}
2020-06-16 17:51:20 +08:00
static int ufs_mtk_wait_link_state ( struct ufs_hba * hba , u32 state ,
unsigned long max_wait_ms )
2020-01-29 18:52:51 +08:00
{
2020-06-01 18:46:43 +08:00
ktime_t timeout , time_checked ;
2020-01-29 18:52:51 +08:00
u32 val ;
2020-08-09 13:57:02 +08:00
timeout = ktime_add_ms ( ktime_get ( ) , max_wait_ms ) ;
2020-06-01 18:46:43 +08:00
do {
time_checked = ktime_get ( ) ;
2021-09-06 16:53:07 +08:00
ufs_mtk_dbg_sel ( hba ) ;
2020-06-01 18:46:43 +08:00
val = ufshcd_readl ( hba , REG_UFS_PROBE ) ;
val = val > > 28 ;
if ( val = = state )
return 0 ;
/* Sleep for max. 200us */
usleep_range ( 100 , 200 ) ;
} while ( ktime_before ( time_checked , timeout ) ) ;
2020-01-29 18:52:51 +08:00
2020-06-01 18:46:43 +08:00
if ( val = = state )
return 0 ;
return - ETIMEDOUT ;
2020-01-29 18:52:51 +08:00
}
2020-10-29 19:57:46 +08:00
static int ufs_mtk_mphy_power_on ( struct ufs_hba * hba , bool on )
2020-06-01 18:46:45 +08:00
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
struct phy * mphy = host - > mphy ;
2020-10-29 19:57:46 +08:00
struct arm_smccc_res res ;
int ret = 0 ;
2020-06-01 18:46:45 +08:00
2020-10-29 19:57:46 +08:00
if ( ! mphy | | ! ( on ^ host - > mphy_powered_on ) )
return 0 ;
2020-06-01 18:46:45 +08:00
2020-10-29 19:57:46 +08:00
if ( on ) {
2020-10-29 19:57:47 +08:00
if ( ufs_mtk_is_va09_supported ( hba ) ) {
2020-10-29 19:57:46 +08:00
ret = regulator_enable ( host - > reg_va09 ) ;
if ( ret < 0 )
goto out ;
/* wait 200 us to stablize VA09 */
usleep_range ( 200 , 210 ) ;
ufs_mtk_va09_pwr_ctrl ( res , 1 ) ;
}
2020-06-01 18:46:45 +08:00
phy_power_on ( mphy ) ;
2020-10-29 19:57:46 +08:00
} else {
2020-06-01 18:46:45 +08:00
phy_power_off ( mphy ) ;
2020-10-29 19:57:47 +08:00
if ( ufs_mtk_is_va09_supported ( hba ) ) {
2020-10-29 19:57:46 +08:00
ufs_mtk_va09_pwr_ctrl ( res , 0 ) ;
ret = regulator_disable ( host - > reg_va09 ) ;
}
}
out :
if ( ret ) {
dev_info ( hba - > dev ,
" failed to %s va09: %d \n " ,
on ? " enable " : " disable " ,
ret ) ;
} else {
host - > mphy_powered_on = on ;
}
return ret ;
2020-06-01 18:46:45 +08:00
}
2020-09-14 13:00:51 +08:00
static int ufs_mtk_get_host_clk ( struct device * dev , const char * name ,
struct clk * * clk_out )
{
struct clk * clk ;
int err = 0 ;
clk = devm_clk_get ( dev , name ) ;
if ( IS_ERR ( clk ) )
err = PTR_ERR ( clk ) ;
else
* clk_out = clk ;
return err ;
}
static void ufs_mtk_boost_crypt ( struct ufs_hba * hba , bool boost )
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
struct ufs_mtk_crypt_cfg * cfg ;
struct regulator * reg ;
int volt , ret ;
if ( ! ufs_mtk_is_boost_crypt_enabled ( hba ) )
return ;
cfg = host - > crypt ;
volt = cfg - > vcore_volt ;
reg = cfg - > reg_vcore ;
ret = clk_prepare_enable ( cfg - > clk_crypt_mux ) ;
if ( ret ) {
dev_info ( hba - > dev , " clk_prepare_enable(): %d \n " ,
ret ) ;
return ;
}
if ( boost ) {
ret = regulator_set_voltage ( reg , volt , INT_MAX ) ;
if ( ret ) {
dev_info ( hba - > dev ,
" failed to set vcore to %d \n " , volt ) ;
goto out ;
}
ret = clk_set_parent ( cfg - > clk_crypt_mux ,
cfg - > clk_crypt_perf ) ;
if ( ret ) {
dev_info ( hba - > dev ,
" failed to set clk_crypt_perf \n " ) ;
regulator_set_voltage ( reg , 0 , INT_MAX ) ;
goto out ;
}
} else {
ret = clk_set_parent ( cfg - > clk_crypt_mux ,
cfg - > clk_crypt_lp ) ;
if ( ret ) {
dev_info ( hba - > dev ,
" failed to set clk_crypt_lp \n " ) ;
goto out ;
}
ret = regulator_set_voltage ( reg , 0 , INT_MAX ) ;
if ( ret ) {
dev_info ( hba - > dev ,
" failed to set vcore to MIN \n " ) ;
}
}
out :
clk_disable_unprepare ( cfg - > clk_crypt_mux ) ;
}
static int ufs_mtk_init_host_clk ( struct ufs_hba * hba , const char * name ,
struct clk * * clk )
{
int ret ;
ret = ufs_mtk_get_host_clk ( hba - > dev , name , clk ) ;
if ( ret ) {
dev_info ( hba - > dev , " %s: failed to get %s: %d " , __func__ ,
name , ret ) ;
}
return ret ;
}
2020-10-29 19:57:46 +08:00
static void ufs_mtk_init_boost_crypt ( struct ufs_hba * hba )
2020-09-14 13:00:51 +08:00
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
struct ufs_mtk_crypt_cfg * cfg ;
struct device * dev = hba - > dev ;
struct regulator * reg ;
u32 volt ;
host - > crypt = devm_kzalloc ( dev , sizeof ( * ( host - > crypt ) ) ,
GFP_KERNEL ) ;
if ( ! host - > crypt )
goto disable_caps ;
reg = devm_regulator_get_optional ( dev , " dvfsrc-vcore " ) ;
if ( IS_ERR ( reg ) ) {
dev_info ( dev , " failed to get dvfsrc-vcore: %ld " ,
PTR_ERR ( reg ) ) ;
goto disable_caps ;
}
if ( of_property_read_u32 ( dev - > of_node , " boost-crypt-vcore-min " ,
& volt ) ) {
dev_info ( dev , " failed to get boost-crypt-vcore-min " ) ;
goto disable_caps ;
}
cfg = host - > crypt ;
if ( ufs_mtk_init_host_clk ( hba , " crypt_mux " ,
& cfg - > clk_crypt_mux ) )
goto disable_caps ;
if ( ufs_mtk_init_host_clk ( hba , " crypt_lp " ,
& cfg - > clk_crypt_lp ) )
goto disable_caps ;
if ( ufs_mtk_init_host_clk ( hba , " crypt_perf " ,
& cfg - > clk_crypt_perf ) )
goto disable_caps ;
cfg - > reg_vcore = reg ;
cfg - > vcore_volt = volt ;
2020-10-29 19:57:47 +08:00
host - > caps | = UFS_MTK_CAP_BOOST_CRYPT_ENGINE ;
2020-09-14 13:00:51 +08:00
disable_caps :
2020-10-29 19:57:47 +08:00
return ;
2020-09-14 13:00:51 +08:00
}
2020-10-29 19:57:46 +08:00
static void ufs_mtk_init_va09_pwr_ctrl ( struct ufs_hba * hba )
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
host - > reg_va09 = regulator_get ( hba - > dev , " va09 " ) ;
2021-12-22 07:09:30 +00:00
if ( IS_ERR ( host - > reg_va09 ) )
2020-10-29 19:57:46 +08:00
dev_info ( hba - > dev , " failed to get va09 " ) ;
2020-10-29 19:57:47 +08:00
else
host - > caps | = UFS_MTK_CAP_VA09_PWR_CTRL ;
2020-10-29 19:57:46 +08:00
}
static void ufs_mtk_init_host_caps ( struct ufs_hba * hba )
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
2020-10-29 19:57:47 +08:00
struct device_node * np = hba - > dev - > of_node ;
2020-10-29 19:57:46 +08:00
2020-10-29 19:57:47 +08:00
if ( of_property_read_bool ( np , " mediatek,ufs-boost-crypt " ) )
2020-10-29 19:57:46 +08:00
ufs_mtk_init_boost_crypt ( hba ) ;
2020-10-29 19:57:47 +08:00
if ( of_property_read_bool ( np , " mediatek,ufs-support-va09 " ) )
2020-10-29 19:57:46 +08:00
ufs_mtk_init_va09_pwr_ctrl ( hba ) ;
2020-10-29 19:57:48 +08:00
if ( of_property_read_bool ( np , " mediatek,ufs-disable-ah8 " ) )
host - > caps | = UFS_MTK_CAP_DISABLE_AH8 ;
2020-12-07 13:49:55 +08:00
if ( of_property_read_bool ( np , " mediatek,ufs-broken-vcc " ) )
host - > caps | = UFS_MTK_CAP_BROKEN_VCC ;
2022-06-16 13:37:17 +08:00
if ( of_property_read_bool ( np , " mediatek,ufs-pmc-via-fastauto " ) )
host - > caps | = UFS_MTK_CAP_PMC_VIA_FASTAUTO ;
2020-10-29 19:57:46 +08:00
dev_info ( hba - > dev , " caps: 0x%x " , host - > caps ) ;
}
2022-06-23 11:50:51 +08:00
static void ufs_mtk_boost_pm_qos ( struct ufs_hba * hba , bool boost )
2020-11-16 14:50:46 +08:00
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
2022-06-23 11:50:51 +08:00
if ( ! host | | ! host - > pm_qos_init )
return ;
cpu_latency_qos_update_request ( & host - > pm_qos_req ,
boost ? 0 : PM_QOS_DEFAULT_VALUE ) ;
}
2022-08-03 07:54:36 +08:00
static void ufs_mtk_scale_perf ( struct ufs_hba * hba , bool scale_up )
{
ufs_mtk_boost_crypt ( hba , scale_up ) ;
ufs_mtk_boost_pm_qos ( hba , scale_up ) ;
}
2022-06-23 11:50:51 +08:00
static void ufs_mtk_pwr_ctrl ( struct ufs_hba * hba , bool on )
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
2020-11-16 14:50:46 +08:00
2022-06-23 11:50:51 +08:00
if ( on ) {
2020-11-16 14:50:46 +08:00
phy_power_on ( host - > mphy ) ;
2022-06-23 11:50:51 +08:00
ufs_mtk_setup_ref_clk ( hba , on ) ;
2022-08-03 07:54:36 +08:00
if ( ! ufshcd_is_clkscaling_supported ( hba ) )
ufs_mtk_scale_perf ( hba , on ) ;
2022-06-23 11:50:51 +08:00
} else {
2022-08-03 07:54:36 +08:00
if ( ! ufshcd_is_clkscaling_supported ( hba ) )
ufs_mtk_scale_perf ( hba , on ) ;
2022-06-23 11:50:51 +08:00
ufs_mtk_setup_ref_clk ( hba , on ) ;
2020-11-16 14:50:46 +08:00
phy_power_off ( host - > mphy ) ;
2022-06-23 11:50:51 +08:00
}
2020-11-16 14:50:46 +08:00
}
2019-03-16 13:04:47 +08:00
/**
* ufs_mtk_setup_clocks - enables / disable clocks
* @ hba : host controller instance
* @ on : If true , enable clocks else disable them .
* @ status : PRE_CHANGE or POST_CHANGE notify
*
* Returns 0 on success , non - zero on failure .
*/
static int ufs_mtk_setup_clocks ( struct ufs_hba * hba , bool on ,
enum ufs_notify_change_status status )
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
2020-06-01 18:46:45 +08:00
bool clk_pwr_off = false ;
2020-10-29 19:57:46 +08:00
int ret = 0 ;
2019-03-16 13:04:47 +08:00
/*
* In case ufs_mtk_init ( ) is not yet done , simply ignore .
* This ufs_mtk_setup_clocks ( ) shall be called from
* ufs_mtk_init ( ) after init is done .
*/
if ( ! host )
return 0 ;
2020-01-29 18:52:51 +08:00
if ( ! on & & status = = PRE_CHANGE ) {
2020-06-01 18:46:45 +08:00
if ( ufshcd_is_link_off ( hba ) ) {
clk_pwr_off = true ;
} else if ( ufshcd_is_link_hibern8 ( hba ) | |
( ! ufshcd_can_hibern8_during_gating ( hba ) & &
ufshcd_is_auto_hibern8_enabled ( hba ) ) ) {
2020-01-29 18:52:51 +08:00
/*
2020-06-01 18:46:45 +08:00
* Gate ref - clk and poweroff mphy if link state is in
* OFF or Hibern8 by either Auto - Hibern8 or
* ufshcd_link_state_transition ( ) .
2020-01-29 18:52:51 +08:00
*/
2020-06-01 18:46:45 +08:00
ret = ufs_mtk_wait_link_state ( hba ,
VS_LINK_HIBERN8 ,
15 ) ;
if ( ! ret )
clk_pwr_off = true ;
}
2020-11-16 14:50:46 +08:00
if ( clk_pwr_off )
2022-06-23 11:50:51 +08:00
ufs_mtk_pwr_ctrl ( hba , false ) ;
2020-01-29 18:52:51 +08:00
} else if ( on & & status = = POST_CHANGE ) {
2022-06-23 11:50:51 +08:00
ufs_mtk_pwr_ctrl ( hba , true ) ;
2019-03-16 13:04:47 +08:00
}
return ret ;
}
2020-10-29 19:57:50 +08:00
static void ufs_mtk_get_controller_version ( struct ufs_hba * hba )
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
int ret , ver = 0 ;
if ( host - > hw_ver . major )
return ;
/* Set default (minimum) version anyway */
host - > hw_ver . major = 2 ;
ret = ufshcd_dme_get ( hba , UIC_ARG_MIB ( PA_LOCALVERINFO ) , & ver ) ;
if ( ! ret ) {
2021-05-31 14:26:42 +08:00
if ( ver > = UFS_UNIPRO_VER_1_8 ) {
2020-10-29 19:57:50 +08:00
host - > hw_ver . major = 3 ;
2021-05-31 14:26:42 +08:00
/*
* Fix HCI version for some platforms with
* incorrect version
*/
if ( hba - > ufs_version < ufshci_version ( 3 , 0 ) )
hba - > ufs_version = ufshci_version ( 3 , 0 ) ;
}
2020-10-29 19:57:50 +08:00
}
}
2021-05-31 14:26:42 +08:00
static u32 ufs_mtk_get_ufs_hci_version ( struct ufs_hba * hba )
{
return hba - > ufs_version ;
}
2022-08-03 07:54:37 +08:00
/**
* ufs_mtk_init_clocks - Init mtk driver private clocks
*
* @ hba : per adapter instance
*/
static void ufs_mtk_init_clocks ( struct ufs_hba * hba )
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
struct list_head * head = & hba - > clk_list_head ;
struct ufs_mtk_clk * mclk = & host - > mclk ;
struct ufs_clk_info * clki , * clki_tmp ;
/*
* Find private clocks and store them in struct ufs_mtk_clk .
* Remove " ufs_sel_min_src " and " ufs_sel_min_src " from list to avoid
* being switched on / off in clock gating .
*/
list_for_each_entry_safe ( clki , clki_tmp , head , list ) {
if ( ! strcmp ( clki - > name , " ufs_sel " ) ) {
host - > mclk . ufs_sel_clki = clki ;
} else if ( ! strcmp ( clki - > name , " ufs_sel_max_src " ) ) {
host - > mclk . ufs_sel_max_clki = clki ;
clk_disable_unprepare ( clki - > clk ) ;
list_del ( & clki - > list ) ;
} else if ( ! strcmp ( clki - > name , " ufs_sel_min_src " ) ) {
host - > mclk . ufs_sel_min_clki = clki ;
clk_disable_unprepare ( clki - > clk ) ;
list_del ( & clki - > list ) ;
}
}
if ( ! mclk - > ufs_sel_clki | | ! mclk - > ufs_sel_max_clki | |
! mclk - > ufs_sel_min_clki ) {
hba - > caps & = ~ UFSHCD_CAP_CLK_SCALING ;
dev_info ( hba - > dev ,
" %s: Clk-scaling not ready. Feature disabled. " ,
__func__ ) ;
}
}
2022-06-16 13:37:24 +08:00
# define MAX_VCC_NAME 30
static int ufs_mtk_vreg_fix_vcc ( struct ufs_hba * hba )
{
struct ufs_vreg_info * info = & hba - > vreg_info ;
struct device_node * np = hba - > dev - > of_node ;
struct device * dev = hba - > dev ;
char vcc_name [ MAX_VCC_NAME ] ;
struct arm_smccc_res res ;
int err , ver ;
if ( hba - > vreg_info . vcc )
return 0 ;
if ( of_property_read_bool ( np , " mediatek,ufs-vcc-by-num " ) ) {
ufs_mtk_get_vcc_num ( res ) ;
if ( res . a1 > UFS_VCC_NONE & & res . a1 < UFS_VCC_MAX )
2022-06-19 19:54:32 +08:00
snprintf ( vcc_name , MAX_VCC_NAME , " vcc-opt%lu " , res . a1 ) ;
2022-06-16 13:37:24 +08:00
else
return - ENODEV ;
} else if ( of_property_read_bool ( np , " mediatek,ufs-vcc-by-ver " ) ) {
ver = ( hba - > dev_info . wspecversion & 0xF00 ) > > 8 ;
snprintf ( vcc_name , MAX_VCC_NAME , " vcc-ufs%u " , ver ) ;
} else {
return 0 ;
}
err = ufshcd_populate_vreg ( dev , vcc_name , & info - > vcc ) ;
if ( err )
return err ;
err = ufshcd_get_vreg ( dev , info - > vcc ) ;
if ( err )
return err ;
err = regulator_enable ( info - > vcc - > reg ) ;
if ( ! err ) {
info - > vcc - > enabled = true ;
dev_info ( dev , " %s: %s enabled \n " , __func__ , vcc_name ) ;
}
return err ;
}
2022-06-16 13:37:25 +08:00
static void ufs_mtk_vreg_fix_vccqx ( struct ufs_hba * hba )
{
struct ufs_vreg_info * info = & hba - > vreg_info ;
struct ufs_vreg * * vreg_on , * * vreg_off ;
if ( hba - > dev_info . wspecversion > = 0x0300 ) {
vreg_on = & info - > vccq ;
vreg_off = & info - > vccq2 ;
} else {
vreg_on = & info - > vccq2 ;
vreg_off = & info - > vccq ;
}
if ( * vreg_on )
( * vreg_on ) - > always_on = true ;
if ( * vreg_off ) {
regulator_disable ( ( * vreg_off ) - > reg ) ;
devm_kfree ( hba - > dev , ( * vreg_off ) - > name ) ;
devm_kfree ( hba - > dev , * vreg_off ) ;
2022-06-23 11:50:52 +08:00
* vreg_off = NULL ;
2022-06-16 13:37:25 +08:00
}
}
2019-03-16 13:04:47 +08:00
/**
* ufs_mtk_init - find other essential mmio bases
* @ hba : host controller instance
*
* Binds PHY with controller and powers up PHY enabling clocks
* and regulators .
*
* Returns - EPROBE_DEFER if binding fails , returns negative error
* on phy power up failure and returns zero on success .
*/
static int ufs_mtk_init ( struct ufs_hba * hba )
{
2020-09-14 13:00:51 +08:00
const struct of_device_id * id ;
2019-03-16 13:04:47 +08:00
struct device * dev = hba - > dev ;
2020-09-14 13:00:51 +08:00
struct ufs_mtk_host * host ;
2019-03-16 13:04:47 +08:00
int err = 0 ;
host = devm_kzalloc ( dev , sizeof ( * host ) , GFP_KERNEL ) ;
if ( ! host ) {
err = - ENOMEM ;
dev_info ( dev , " %s: no memory for mtk ufs host \n " , __func__ ) ;
goto out ;
}
host - > hba = hba ;
ufshcd_set_variant ( hba , host ) ;
2020-09-14 13:00:51 +08:00
id = of_match_device ( ufs_mtk_of_match , dev ) ;
if ( ! id ) {
err = - EINVAL ;
goto out ;
}
2020-10-29 19:57:47 +08:00
/* Initialize host capability */
ufs_mtk_init_host_caps ( hba ) ;
2020-09-14 13:00:51 +08:00
2019-03-16 13:04:47 +08:00
err = ufs_mtk_bind_mphy ( hba ) ;
if ( err )
goto out_variant_clear ;
2020-09-08 14:45:07 +08:00
ufs_mtk_init_reset ( hba ) ;
2019-09-16 23:56:51 +08:00
/* Enable runtime autosuspend */
hba - > caps | = UFSHCD_CAP_RPM_AUTOSUSPEND ;
2019-12-30 13:32:30 +08:00
/* Enable clock-gating */
hba - > caps | = UFSHCD_CAP_CLK_GATING ;
2020-07-12 08:32:26 +08:00
/* Enable inline encryption */
hba - > caps | = UFSHCD_CAP_CRYPTO ;
2020-05-08 16:01:14 +08:00
/* Enable WriteBooster */
hba - > caps | = UFSHCD_CAP_WB_EN ;
2022-08-03 07:54:37 +08:00
/* Enable clk scaling*/
hba - > caps | = UFSHCD_CAP_CLK_SCALING ;
2020-12-22 15:29:28 +08:00
hba - > quirks | = UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL ;
2020-05-09 17:37:16 +08:00
hba - > vps - > wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT ( 80 ) ;
2020-05-08 16:01:14 +08:00
2020-10-29 19:57:48 +08:00
if ( host - > caps & UFS_MTK_CAP_DISABLE_AH8 )
hba - > caps | = UFSHCD_CAP_HIBERN8_WITH_CLK_GATING ;
2022-08-03 07:54:37 +08:00
ufs_mtk_init_clocks ( hba ) ;
2019-03-16 13:04:47 +08:00
/*
* ufshcd_vops_init ( ) is invoked after
* ufshcd_setup_clock ( true ) in ufshcd_hba_init ( ) thus
* phy clock setup is skipped .
*
* Enable phy clocks specifically here .
*/
2020-10-29 19:57:46 +08:00
ufs_mtk_mphy_power_on ( hba , true ) ;
2019-03-16 13:04:47 +08:00
ufs_mtk_setup_clocks ( hba , true , POST_CHANGE ) ;
2021-09-06 16:53:07 +08:00
host - > ip_ver = ufshcd_readl ( hba , REG_UFS_MTK_IP_VER ) ;
2022-08-03 07:54:36 +08:00
/* Initialize pm-qos request */
cpu_latency_qos_add_request ( & host - > pm_qos_req , PM_QOS_DEFAULT_VALUE ) ;
host - > pm_qos_init = true ;
2019-03-16 13:04:47 +08:00
goto out ;
out_variant_clear :
ufshcd_set_variant ( hba , NULL ) ;
out :
return err ;
}
2022-06-16 13:37:17 +08:00
static bool ufs_mtk_pmc_via_fastauto ( struct ufs_hba * hba ,
struct ufs_pa_layer_attr * dev_req_params )
{
if ( ! ufs_mtk_is_pmc_via_fastauto ( hba ) )
return false ;
if ( dev_req_params - > hs_rate = = hba - > pwr_info . hs_rate )
return false ;
if ( dev_req_params - > pwr_tx ! = FAST_MODE & &
dev_req_params - > gear_tx < UFS_HS_G4 )
return false ;
if ( dev_req_params - > pwr_rx ! = FAST_MODE & &
dev_req_params - > gear_rx < UFS_HS_G4 )
return false ;
return true ;
}
2019-03-16 13:04:47 +08:00
static int ufs_mtk_pre_pwr_change ( struct ufs_hba * hba ,
struct ufs_pa_layer_attr * dev_max_params ,
struct ufs_pa_layer_attr * dev_req_params )
{
2020-10-29 19:57:50 +08:00
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
2019-03-16 13:04:47 +08:00
struct ufs_dev_params host_cap ;
int ret ;
2020-11-16 14:50:48 +08:00
ufshcd_init_pwr_dev_param ( & host_cap ) ;
2022-06-16 13:37:17 +08:00
host_cap . hs_rx_gear = UFS_HS_G5 ;
host_cap . hs_tx_gear = UFS_HS_G5 ;
2019-03-16 13:04:47 +08:00
ret = ufshcd_get_pwr_dev_param ( & host_cap ,
dev_max_params ,
dev_req_params ) ;
if ( ret ) {
pr_info ( " %s: failed to determine capabilities \n " ,
__func__ ) ;
}
2022-06-16 13:37:17 +08:00
if ( ufs_mtk_pmc_via_fastauto ( hba , dev_req_params ) ) {
ufshcd_dme_set ( hba , UIC_ARG_MIB ( PA_TXTERMINATION ) , true ) ;
ufshcd_dme_set ( hba , UIC_ARG_MIB ( PA_TXGEAR ) , UFS_HS_G1 ) ;
ufshcd_dme_set ( hba , UIC_ARG_MIB ( PA_RXTERMINATION ) , true ) ;
ufshcd_dme_set ( hba , UIC_ARG_MIB ( PA_RXGEAR ) , UFS_HS_G1 ) ;
ufshcd_dme_set ( hba , UIC_ARG_MIB ( PA_ACTIVETXDATALANES ) ,
dev_req_params - > lane_tx ) ;
ufshcd_dme_set ( hba , UIC_ARG_MIB ( PA_ACTIVERXDATALANES ) ,
dev_req_params - > lane_rx ) ;
ufshcd_dme_set ( hba , UIC_ARG_MIB ( PA_HSSERIES ) ,
dev_req_params - > hs_rate ) ;
ufshcd_dme_set ( hba , UIC_ARG_MIB ( PA_TXHSADAPTTYPE ) ,
PA_NO_ADAPT ) ;
ret = ufshcd_uic_change_pwr_mode ( hba ,
FASTAUTO_MODE < < 4 | FASTAUTO_MODE ) ;
if ( ret ) {
dev_err ( hba - > dev , " %s: HSG1B FASTAUTO failed ret=%d \n " ,
__func__ , ret ) ;
}
}
2020-10-29 19:57:50 +08:00
if ( host - > hw_ver . major > = 3 ) {
2020-11-16 14:50:53 +08:00
ret = ufshcd_dme_configure_adapt ( hba ,
dev_req_params - > gear_tx ,
PA_INITIAL_ADAPT ) ;
2020-10-29 19:57:50 +08:00
}
2019-03-16 13:04:47 +08:00
return ret ;
}
static int ufs_mtk_pwr_change_notify ( struct ufs_hba * hba ,
enum ufs_notify_change_status stage ,
struct ufs_pa_layer_attr * dev_max_params ,
struct ufs_pa_layer_attr * dev_req_params )
{
int ret = 0 ;
switch ( stage ) {
case PRE_CHANGE :
ret = ufs_mtk_pre_pwr_change ( hba , dev_max_params ,
dev_req_params ) ;
break ;
case POST_CHANGE :
break ;
default :
ret = - EINVAL ;
break ;
}
return ret ;
}
2020-10-29 19:57:45 +08:00
static int ufs_mtk_unipro_set_lpm ( struct ufs_hba * hba , bool lpm )
2020-03-18 18:40:16 +08:00
{
int ret ;
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
ret = ufshcd_dme_set ( hba ,
UIC_ARG_MIB_SEL ( VS_UNIPROPOWERDOWNCONTROL , 0 ) ,
2020-10-29 19:57:45 +08:00
lpm ? 1 : 0 ) ;
2020-09-08 14:45:06 +08:00
if ( ! ret | | ! lpm ) {
/*
* Forcibly set as non - LPM mode if UIC commands is failed
* to use default hba_enable_delay_us value for re - enabling
* the host .
*/
2020-03-18 18:40:16 +08:00
host - > unipro_lpm = lpm ;
2020-09-08 14:45:06 +08:00
}
2020-03-18 18:40:16 +08:00
return ret ;
}
2019-03-16 13:04:47 +08:00
static int ufs_mtk_pre_link ( struct ufs_hba * hba )
{
int ret ;
u32 tmp ;
2020-10-29 19:57:50 +08:00
ufs_mtk_get_controller_version ( hba ) ;
2020-10-29 19:57:45 +08:00
ret = ufs_mtk_unipro_set_lpm ( hba , false ) ;
2020-09-08 14:45:06 +08:00
if ( ret )
return ret ;
2020-01-29 18:52:48 +08:00
2020-02-07 15:03:56 +08:00
/*
* Setting PA_Local_TX_LCC_Enable to 0 before link startup
* to make sure that both host and device TX LCC are disabled
* once link startup is completed .
*/
2020-02-07 15:03:57 +08:00
ret = ufshcd_disable_host_tx_lcc ( hba ) ;
2020-02-07 15:03:56 +08:00
if ( ret )
return ret ;
2019-03-16 13:04:47 +08:00
/* disable deep stall */
ret = ufshcd_dme_get ( hba , UIC_ARG_MIB ( VS_SAVEPOWERCONTROL ) , & tmp ) ;
if ( ret )
return ret ;
tmp & = ~ ( 1 < < 6 ) ;
ret = ufshcd_dme_set ( hba , UIC_ARG_MIB ( VS_SAVEPOWERCONTROL ) , tmp ) ;
return ret ;
}
2019-12-30 13:32:30 +08:00
static void ufs_mtk_setup_clk_gating ( struct ufs_hba * hba )
{
u32 ah_ms ;
if ( ufshcd_is_clkgating_allowed ( hba ) ) {
if ( ufshcd_is_auto_hibern8_supported ( hba ) & & hba - > ahit )
ah_ms = FIELD_GET ( UFSHCI_AHIBERN8_TIMER_MASK ,
hba - > ahit ) ;
else
ah_ms = 10 ;
2022-04-19 15:58:04 -07:00
ufshcd_clkgate_delay_set ( hba - > dev , ah_ms + 5 ) ;
2019-12-30 13:32:30 +08:00
}
}
2022-11-21 09:33:38 +09:00
static void ufs_mtk_post_link ( struct ufs_hba * hba )
2019-03-16 13:04:47 +08:00
{
/* enable unipro clock gating feature */
ufs_mtk_cfg_unipro_cg ( hba , true ) ;
2021-04-23 17:20:16 -07:00
/* will be configured during probe hba */
if ( ufshcd_is_auto_hibern8_supported ( hba ) )
hba - > ahit = FIELD_PREP ( UFSHCI_AHIBERN8_TIMER_MASK , 10 ) |
FIELD_PREP ( UFSHCI_AHIBERN8_SCALE_MASK , 3 ) ;
2019-12-30 13:32:29 +08:00
2019-12-30 13:32:30 +08:00
ufs_mtk_setup_clk_gating ( hba ) ;
2019-03-16 13:04:47 +08:00
}
static int ufs_mtk_link_startup_notify ( struct ufs_hba * hba ,
enum ufs_notify_change_status stage )
{
int ret = 0 ;
switch ( stage ) {
case PRE_CHANGE :
ret = ufs_mtk_pre_link ( hba ) ;
break ;
case POST_CHANGE :
2022-11-21 09:33:38 +09:00
ufs_mtk_post_link ( hba ) ;
2019-03-16 13:04:47 +08:00
break ;
default :
ret = - EINVAL ;
break ;
}
return ret ;
}
2020-11-03 16:14:03 +02:00
static int ufs_mtk_device_reset ( struct ufs_hba * hba )
2019-12-30 13:32:26 +08:00
{
struct arm_smccc_res res ;
2021-05-28 11:36:22 +08:00
/* disable hba before device reset */
ufshcd_hba_stop ( hba ) ;
2019-12-30 13:32:26 +08:00
ufs_mtk_device_reset_ctrl ( 0 , res ) ;
/*
* The reset signal is active low . UFS devices shall detect
* more than or equal to 1u s of positive or negative RST_n
* pulse width .
*
* To be on safe side , keep the reset low for at least 10u s .
*/
usleep_range ( 10 , 15 ) ;
ufs_mtk_device_reset_ctrl ( 1 , res ) ;
/* Some devices may need time to respond to rst_n */
usleep_range ( 10000 , 15000 ) ;
dev_info ( hba - > dev , " device reset done \n " ) ;
2020-11-03 16:14:03 +02:00
return 0 ;
2019-12-30 13:32:26 +08:00
}
2020-01-17 11:51:08 +08:00
static int ufs_mtk_link_set_hpm ( struct ufs_hba * hba )
{
int err ;
err = ufshcd_hba_enable ( hba ) ;
if ( err )
return err ;
2020-10-29 19:57:45 +08:00
err = ufs_mtk_unipro_set_lpm ( hba , false ) ;
2020-01-17 11:51:08 +08:00
if ( err )
return err ;
err = ufshcd_uic_hibern8_exit ( hba ) ;
if ( ! err )
ufshcd_set_link_active ( hba ) ;
else
return err ;
err = ufshcd_make_hba_operational ( hba ) ;
if ( err )
return err ;
return 0 ;
}
static int ufs_mtk_link_set_lpm ( struct ufs_hba * hba )
{
int err ;
2022-06-23 11:50:49 +08:00
/* Disable reset confirm feature by UniPro */
ufshcd_writel ( hba ,
( ufshcd_readl ( hba , REG_UFS_XOUFS_CTRL ) & ~ 0x100 ) ,
REG_UFS_XOUFS_CTRL ) ;
2020-10-29 19:57:45 +08:00
err = ufs_mtk_unipro_set_lpm ( hba , true ) ;
2020-01-17 11:51:08 +08:00
if ( err ) {
/* Resume UniPro state for following error recovery */
2020-10-29 19:57:45 +08:00
ufs_mtk_unipro_set_lpm ( hba , false ) ;
2020-01-17 11:51:08 +08:00
return err ;
}
return 0 ;
}
2022-06-16 13:37:22 +08:00
static void ufs_mtk_vccqx_set_lpm ( struct ufs_hba * hba , bool lpm )
2020-06-01 18:46:44 +08:00
{
2022-06-16 13:37:20 +08:00
struct ufs_vreg * vccqx = NULL ;
2022-06-16 13:37:22 +08:00
if ( hba - > vreg_info . vccq )
vccqx = hba - > vreg_info . vccq ;
else
vccqx = hba - > vreg_info . vccq2 ;
regulator_set_mode ( vccqx - > reg ,
lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL ) ;
}
static void ufs_mtk_vsx_set_lpm ( struct ufs_hba * hba , bool lpm )
{
struct arm_smccc_res res ;
ufs_mtk_device_pwr_ctrl ( ! lpm ,
( unsigned long ) hba - > dev_info . wspecversion ,
res ) ;
}
static void ufs_mtk_dev_vreg_set_lpm ( struct ufs_hba * hba , bool lpm )
{
2022-06-16 13:37:20 +08:00
if ( ! hba - > vreg_info . vccq & & ! hba - > vreg_info . vccq2 )
return ;
/* Skip if VCC is assumed always-on */
if ( ! hba - > vreg_info . vcc )
2020-06-01 18:46:44 +08:00
return ;
2022-06-16 13:37:19 +08:00
/* Bypass LPM when device is still active */
if ( lpm & & ufshcd_is_ufs_dev_active ( hba ) )
return ;
/* Bypass LPM if VCC is enabled */
if ( lpm & & hba - > vreg_info . vcc - > enabled )
return ;
2022-06-16 13:37:22 +08:00
if ( lpm ) {
ufs_mtk_vccqx_set_lpm ( hba , lpm ) ;
ufs_mtk_vsx_set_lpm ( hba , lpm ) ;
} else {
ufs_mtk_vsx_set_lpm ( hba , lpm ) ;
ufs_mtk_vccqx_set_lpm ( hba , lpm ) ;
}
2020-06-01 18:46:44 +08:00
}
2021-10-06 13:47:05 +08:00
static void ufs_mtk_auto_hibern8_disable ( struct ufs_hba * hba )
{
int ret ;
/* disable auto-hibern8 */
ufshcd_writel ( hba , 0 , REG_AUTO_HIBERNATE_IDLE_TIMER ) ;
/* wait host return to idle state when auto-hibern8 off */
ufs_mtk_wait_idle_state ( hba , 5 ) ;
ret = ufs_mtk_wait_link_state ( hba , VS_LINK_UP , 100 ) ;
if ( ret )
dev_warn ( hba - > dev , " exit h8 state fail, ret=%d \n " , ret ) ;
}
static int ufs_mtk_suspend ( struct ufs_hba * hba , enum ufs_pm_op pm_op ,
enum ufs_notify_change_status status )
2019-03-16 13:04:47 +08:00
{
2020-01-17 11:51:08 +08:00
int err ;
2021-05-12 18:01:45 +08:00
struct arm_smccc_res res ;
2019-03-16 13:04:47 +08:00
2021-10-06 13:47:05 +08:00
if ( status = = PRE_CHANGE ) {
2022-11-21 09:33:38 +09:00
if ( ufshcd_is_auto_hibern8_supported ( hba ) )
ufs_mtk_auto_hibern8_disable ( hba ) ;
2021-10-06 13:47:05 +08:00
return 0 ;
}
2019-12-30 13:32:27 +08:00
if ( ufshcd_is_link_hibern8 ( hba ) ) {
2020-01-17 11:51:08 +08:00
err = ufs_mtk_link_set_lpm ( hba ) ;
2020-10-29 19:57:46 +08:00
if ( err )
goto fail ;
}
if ( ! ufshcd_is_link_active ( hba ) ) {
2020-06-01 18:46:44 +08:00
/*
* Make sure no error will be returned to prevent
* ufshcd_suspend ( ) re - enabling regulators while vreg is still
* in low - power mode .
*/
2020-10-29 19:57:46 +08:00
err = ufs_mtk_mphy_power_on ( hba , false ) ;
if ( err )
goto fail ;
2019-12-30 13:32:27 +08:00
}
2019-03-16 13:04:47 +08:00
2021-05-12 18:01:45 +08:00
if ( ufshcd_is_link_off ( hba ) )
ufs_mtk_device_reset_ctrl ( 0 , res ) ;
2022-06-23 11:50:50 +08:00
ufs_mtk_host_pwr_ctrl ( HOST_PWR_HCI , false , res ) ;
2019-03-16 13:04:47 +08:00
return 0 ;
2020-10-29 19:57:46 +08:00
fail :
/*
* Set link as off state enforcedly to trigger
* ufshcd_host_reset_and_restore ( ) in ufshcd_suspend ( )
* for completed host reset .
*/
ufshcd_set_link_off ( hba ) ;
return - EAGAIN ;
2019-03-16 13:04:47 +08:00
}
static int ufs_mtk_resume ( struct ufs_hba * hba , enum ufs_pm_op pm_op )
{
2020-01-17 11:51:08 +08:00
int err ;
2022-06-23 11:50:50 +08:00
struct arm_smccc_res res ;
2019-03-16 13:04:47 +08:00
2022-06-16 13:37:18 +08:00
if ( hba - > ufshcd_state ! = UFSHCD_STATE_OPERATIONAL )
2022-06-16 13:37:22 +08:00
ufs_mtk_dev_vreg_set_lpm ( hba , false ) ;
2022-06-16 13:37:18 +08:00
2022-06-23 11:50:50 +08:00
ufs_mtk_host_pwr_ctrl ( HOST_PWR_HCI , true , res ) ;
2020-10-29 19:57:46 +08:00
err = ufs_mtk_mphy_power_on ( hba , true ) ;
if ( err )
goto fail ;
2020-01-29 18:52:49 +08:00
if ( ufshcd_is_link_hibern8 ( hba ) ) {
2020-01-17 11:51:08 +08:00
err = ufs_mtk_link_set_hpm ( hba ) ;
2020-10-29 19:57:46 +08:00
if ( err )
goto fail ;
2019-12-30 13:32:27 +08:00
}
2019-03-16 13:04:47 +08:00
return 0 ;
2020-10-29 19:57:46 +08:00
fail :
return ufshcd_link_recovery ( hba ) ;
2019-03-16 13:04:47 +08:00
}
2020-01-17 11:51:06 +08:00
static void ufs_mtk_dbg_register_dump ( struct ufs_hba * hba )
{
2022-08-03 07:54:35 +08:00
/* Dump ufshci register 0x140 ~ 0x14C */
ufshcd_dump_regs ( hba , REG_UFS_XOUFS_CTRL , 0x10 ,
" XOUFS Ctrl (0x140): " ) ;
2020-01-17 11:51:06 +08:00
ufshcd_dump_regs ( hba , REG_UFS_EXTREG , 0x4 , " Ext Reg " ) ;
2022-08-03 07:54:35 +08:00
/* Dump ufshci register 0x2200 ~ 0x22AC */
2020-01-17 11:51:06 +08:00
ufshcd_dump_regs ( hba , REG_UFS_MPHYCTRL ,
REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4 ,
2022-08-03 07:54:35 +08:00
" MPHY Ctrl (0x2200): " ) ;
2020-01-17 11:51:06 +08:00
/* Direct debugging information to REG_MTK_PROBE */
2021-09-06 16:53:07 +08:00
ufs_mtk_dbg_sel ( hba ) ;
2020-01-17 11:51:06 +08:00
ufshcd_dump_regs ( hba , REG_UFS_PROBE , 0x4 , " Debug Probe " ) ;
}
2020-01-20 14:08:14 +01:00
static int ufs_mtk_apply_dev_quirks ( struct ufs_hba * hba )
2020-01-11 15:11:47 +08:00
{
2020-01-20 14:08:14 +01:00
struct ufs_dev_info * dev_info = & hba - > dev_info ;
2020-02-20 21:48:48 +08:00
u16 mid = dev_info - > wmanufacturerid ;
2020-01-20 14:08:14 +01:00
2022-06-23 11:50:51 +08:00
if ( mid = = UFS_VENDOR_SAMSUNG ) {
2020-01-11 15:11:47 +08:00
ufshcd_dme_set ( hba , UIC_ARG_MIB ( PA_TACTIVATE ) , 6 ) ;
2022-06-23 11:50:51 +08:00
ufshcd_dme_set ( hba , UIC_ARG_MIB ( PA_HIBERN8TIME ) , 10 ) ;
}
2020-01-11 15:11:47 +08:00
2020-02-20 21:48:48 +08:00
/*
* Decide waiting time before gating reference clock and
* after ungating reference clock according to vendors '
* requirements .
*/
if ( mid = = UFS_VENDOR_SAMSUNG )
2021-10-16 08:58:00 +08:00
ufs_mtk_setup_ref_clk_wait_us ( hba , 1 ) ;
2020-02-20 21:48:48 +08:00
else if ( mid = = UFS_VENDOR_SKHYNIX )
2021-10-16 08:58:00 +08:00
ufs_mtk_setup_ref_clk_wait_us ( hba , 30 ) ;
2020-02-20 21:48:48 +08:00
else if ( mid = = UFS_VENDOR_TOSHIBA )
2021-10-16 08:58:00 +08:00
ufs_mtk_setup_ref_clk_wait_us ( hba , 100 ) ;
else
ufs_mtk_setup_ref_clk_wait_us ( hba ,
REFCLK_DEFAULT_WAIT_US ) ;
2020-01-11 15:11:47 +08:00
return 0 ;
}
2020-05-14 09:26:55 +08:00
static void ufs_mtk_fixup_dev_quirks ( struct ufs_hba * hba )
2020-05-08 16:01:11 +08:00
{
ufshcd_fixup_dev_quirks ( hba , ufs_mtk_dev_fixups ) ;
2020-12-07 13:49:55 +08:00
if ( ufs_mtk_is_broken_vcc ( hba ) & & hba - > vreg_info . vcc & &
( hba - > dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM ) ) {
hba - > vreg_info . vcc - > always_on = true ;
/*
* VCC will be kept always - on thus we don ' t
* need any delay during regulator operations
*/
hba - > dev_quirks & = ~ ( UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
UFS_DEVICE_QUIRK_DELAY_AFTER_LPM ) ;
}
2022-06-16 13:37:24 +08:00
ufs_mtk_vreg_fix_vcc ( hba ) ;
2022-06-16 13:37:25 +08:00
ufs_mtk_vreg_fix_vccqx ( hba ) ;
2020-05-08 16:01:11 +08:00
}
2020-12-05 19:59:01 +08:00
static void ufs_mtk_event_notify ( struct ufs_hba * hba ,
enum ufs_event_type evt , void * data )
{
unsigned int val = * ( u32 * ) data ;
2022-08-03 07:54:34 +08:00
unsigned long reg ;
u8 bit ;
2020-12-05 19:59:01 +08:00
trace_ufs_mtk_event ( evt , val ) ;
2022-08-03 07:54:34 +08:00
/* Print details of UIC Errors */
if ( evt < = UFS_EVT_DME_ERR ) {
dev_info ( hba - > dev ,
" Host UIC Error Code (%s): %08x \n " ,
ufs_uic_err_str [ evt ] , val ) ;
reg = val ;
}
if ( evt = = UFS_EVT_PA_ERR ) {
for_each_set_bit ( bit , & reg , ARRAY_SIZE ( ufs_uic_pa_err_str ) )
dev_info ( hba - > dev , " %s \n " , ufs_uic_pa_err_str [ bit ] ) ;
}
if ( evt = = UFS_EVT_DL_ERR ) {
for_each_set_bit ( bit , & reg , ARRAY_SIZE ( ufs_uic_dl_err_str ) )
dev_info ( hba - > dev , " %s \n " , ufs_uic_dl_err_str [ bit ] ) ;
}
2020-12-05 19:59:01 +08:00
}
2022-08-03 07:54:37 +08:00
static void ufs_mtk_config_scaling_param ( struct ufs_hba * hba ,
struct devfreq_dev_profile * profile ,
struct devfreq_simple_ondemand_data * data )
{
/* Customize min gear in clk scaling */
hba - > clk_scaling . min_gear = UFS_HS_G4 ;
hba - > vps - > devfreq_profile . polling_ms = 200 ;
hba - > vps - > ondemand_data . upthreshold = 50 ;
hba - > vps - > ondemand_data . downdifferential = 20 ;
}
/**
* ufs_mtk_clk_scale - Internal clk scaling operation
*
* MTK platform supports clk scaling by switching parent of ufs_sel ( mux ) .
* The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware .
* Max and min clocks rate of ufs_sel defined in dts should match rate of
* " ufs_sel_max_src " and " ufs_sel_min_src " respectively .
* This prevent changing rate of pll clock that is shared between modules .
*
* @ hba : per adapter instance
* @ scale_up : True for scaling up and false for scaling down
*/
static void ufs_mtk_clk_scale ( struct ufs_hba * hba , bool scale_up )
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
struct ufs_mtk_clk * mclk = & host - > mclk ;
struct ufs_clk_info * clki = mclk - > ufs_sel_clki ;
int ret = 0 ;
ret = clk_prepare_enable ( clki - > clk ) ;
if ( ret ) {
dev_info ( hba - > dev ,
" clk_prepare_enable() fail, ret: %d \n " , ret ) ;
return ;
}
if ( scale_up ) {
ret = clk_set_parent ( clki - > clk , mclk - > ufs_sel_max_clki - > clk ) ;
clki - > curr_freq = clki - > max_freq ;
} else {
ret = clk_set_parent ( clki - > clk , mclk - > ufs_sel_min_clki - > clk ) ;
clki - > curr_freq = clki - > min_freq ;
}
if ( ret ) {
dev_info ( hba - > dev ,
" Failed to set ufs_sel_clki, ret: %d \n " , ret ) ;
}
clk_disable_unprepare ( clki - > clk ) ;
trace_ufs_mtk_clk_scale ( clki - > name , scale_up , clk_get_rate ( clki - > clk ) ) ;
}
static int ufs_mtk_clk_scale_notify ( struct ufs_hba * hba , bool scale_up ,
enum ufs_notify_change_status status )
{
if ( ! ufshcd_is_clkscaling_supported ( hba ) )
return 0 ;
if ( status = = PRE_CHANGE ) {
/* Switch parent before clk_set_rate() */
ufs_mtk_clk_scale ( hba , scale_up ) ;
} else {
/* Request interrupt latency QoS accordingly */
ufs_mtk_scale_perf ( hba , scale_up ) ;
}
return 0 ;
}
2020-09-10 09:37:56 +08:00
/*
2019-03-16 13:04:47 +08:00
* struct ufs_hba_mtk_vops - UFS MTK specific variant operations
*
* The variant operations configure the necessary controller and PHY
* handshake during initialization .
*/
2020-09-10 09:37:56 +08:00
static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
2019-03-16 13:04:47 +08:00
. name = " mediatek.ufshci " ,
. init = ufs_mtk_init ,
2021-05-31 14:26:42 +08:00
. get_ufs_hci_version = ufs_mtk_get_ufs_hci_version ,
2019-03-16 13:04:47 +08:00
. setup_clocks = ufs_mtk_setup_clocks ,
2020-03-18 18:40:16 +08:00
. hce_enable_notify = ufs_mtk_hce_enable_notify ,
2019-03-16 13:04:47 +08:00
. link_startup_notify = ufs_mtk_link_startup_notify ,
. pwr_change_notify = ufs_mtk_pwr_change_notify ,
2020-01-11 15:11:47 +08:00
. apply_dev_quirks = ufs_mtk_apply_dev_quirks ,
2020-05-08 16:01:11 +08:00
. fixup_dev_quirks = ufs_mtk_fixup_dev_quirks ,
2019-03-16 13:04:47 +08:00
. suspend = ufs_mtk_suspend ,
. resume = ufs_mtk_resume ,
2020-01-17 11:51:06 +08:00
. dbg_register_dump = ufs_mtk_dbg_register_dump ,
2019-12-30 13:32:26 +08:00
. device_reset = ufs_mtk_device_reset ,
2020-12-05 19:59:01 +08:00
. event_notify = ufs_mtk_event_notify ,
2022-08-03 07:54:37 +08:00
. config_scaling_param = ufs_mtk_config_scaling_param ,
. clk_scale_notify = ufs_mtk_clk_scale_notify ,
2019-03-16 13:04:47 +08:00
} ;
/**
* ufs_mtk_probe - probe routine of the driver
* @ pdev : pointer to Platform device handle
*
* Return zero for success and non - zero for failure
*/
static int ufs_mtk_probe ( struct platform_device * pdev )
{
int err ;
struct device * dev = & pdev - > dev ;
2021-06-02 10:42:00 +08:00
struct device_node * reset_node ;
struct platform_device * reset_pdev ;
struct device_link * link ;
reset_node = of_find_compatible_node ( NULL , NULL ,
" ti,syscon-reset " ) ;
if ( ! reset_node ) {
dev_notice ( dev , " find ti,syscon-reset fail \n " ) ;
goto skip_reset ;
}
reset_pdev = of_find_device_by_node ( reset_node ) ;
if ( ! reset_pdev ) {
dev_notice ( dev , " find reset_pdev fail \n " ) ;
goto skip_reset ;
}
link = device_link_add ( dev , & reset_pdev - > dev ,
DL_FLAG_AUTOPROBE_CONSUMER ) ;
2021-11-10 10:51:33 +00:00
put_device ( & reset_pdev - > dev ) ;
2021-06-02 10:42:00 +08:00
if ( ! link ) {
dev_notice ( dev , " add reset device_link fail \n " ) ;
goto skip_reset ;
}
/* supplier is not probed */
if ( link - > status = = DL_STATE_DORMANT ) {
err = - EPROBE_DEFER ;
goto out ;
}
2019-03-16 13:04:47 +08:00
2021-06-02 10:42:00 +08:00
skip_reset :
2019-03-16 13:04:47 +08:00
/* perform generic probe */
err = ufshcd_pltfrm_init ( pdev , & ufs_hba_mtk_vops ) ;
2021-06-02 10:42:00 +08:00
out :
2019-03-16 13:04:47 +08:00
if ( err )
dev_info ( dev , " probe failed %d \n " , err ) ;
2021-06-17 19:32:02 +08:00
of_node_put ( reset_node ) ;
2019-03-16 13:04:47 +08:00
return err ;
}
/**
* ufs_mtk_remove - set driver_data of the device to NULL
* @ pdev : pointer to platform device handle
*
* Always return 0
*/
static int ufs_mtk_remove ( struct platform_device * pdev )
{
struct ufs_hba * hba = platform_get_drvdata ( pdev ) ;
pm_runtime_get_sync ( & ( pdev ) - > dev ) ;
ufshcd_remove ( hba ) ;
return 0 ;
}
2022-06-19 19:54:32 +08:00
# ifdef CONFIG_PM_SLEEP
2022-06-23 11:50:45 +08:00
static int ufs_mtk_system_suspend ( struct device * dev )
2022-06-16 13:37:18 +08:00
{
struct ufs_hba * hba = dev_get_drvdata ( dev ) ;
int ret ;
ret = ufshcd_system_suspend ( dev ) ;
if ( ret )
return ret ;
2022-06-16 13:37:22 +08:00
ufs_mtk_dev_vreg_set_lpm ( hba , true ) ;
2022-06-16 13:37:18 +08:00
return 0 ;
}
2022-06-23 11:50:45 +08:00
static int ufs_mtk_system_resume ( struct device * dev )
2022-06-16 13:37:18 +08:00
{
struct ufs_hba * hba = dev_get_drvdata ( dev ) ;
2022-06-16 13:37:22 +08:00
ufs_mtk_dev_vreg_set_lpm ( hba , false ) ;
2022-06-16 13:37:18 +08:00
return ufshcd_system_resume ( dev ) ;
}
2022-06-19 19:54:32 +08:00
# endif
2022-06-16 13:37:18 +08:00
2023-02-20 22:24:31 +08:00
# ifdef CONFIG_PM
2022-06-23 11:50:45 +08:00
static int ufs_mtk_runtime_suspend ( struct device * dev )
2022-06-16 13:37:18 +08:00
{
struct ufs_hba * hba = dev_get_drvdata ( dev ) ;
int ret = 0 ;
ret = ufshcd_runtime_suspend ( dev ) ;
if ( ret )
return ret ;
2022-06-16 13:37:22 +08:00
ufs_mtk_dev_vreg_set_lpm ( hba , true ) ;
2022-06-16 13:37:18 +08:00
return 0 ;
}
2022-06-23 11:50:45 +08:00
static int ufs_mtk_runtime_resume ( struct device * dev )
2022-06-16 13:37:18 +08:00
{
struct ufs_hba * hba = dev_get_drvdata ( dev ) ;
2022-06-16 13:37:22 +08:00
ufs_mtk_dev_vreg_set_lpm ( hba , false ) ;
2022-06-16 13:37:18 +08:00
return ufshcd_runtime_resume ( dev ) ;
}
2023-02-20 22:24:31 +08:00
# endif
2022-06-16 13:37:18 +08:00
2019-03-16 13:04:47 +08:00
static const struct dev_pm_ops ufs_mtk_pm_ops = {
2022-06-16 13:37:18 +08:00
SET_SYSTEM_SLEEP_PM_OPS ( ufs_mtk_system_suspend ,
ufs_mtk_system_resume )
SET_RUNTIME_PM_OPS ( ufs_mtk_runtime_suspend ,
ufs_mtk_runtime_resume , NULL )
2021-04-23 17:20:16 -07:00
. prepare = ufshcd_suspend_prepare ,
. complete = ufshcd_resume_complete ,
2019-03-16 13:04:47 +08:00
} ;
static struct platform_driver ufs_mtk_pltform = {
. probe = ufs_mtk_probe ,
. remove = ufs_mtk_remove ,
. shutdown = ufshcd_pltfrm_shutdown ,
. driver = {
. name = " ufshcd-mtk " ,
. pm = & ufs_mtk_pm_ops ,
. of_match_table = ufs_mtk_of_match ,
} ,
} ;
2019-03-21 14:24:44 +01:00
MODULE_AUTHOR ( " Stanley Chu <stanley.chu@mediatek.com> " ) ;
MODULE_AUTHOR ( " Peter Wang <peter.wang@mediatek.com> " ) ;
MODULE_DESCRIPTION ( " MediaTek UFS Host Driver " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
2019-03-16 13:04:47 +08:00
module_platform_driver ( ufs_mtk_pltform ) ;