2019-03-16 13:04:47 +08:00
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright ( C ) 2019 MediaTek Inc .
* Authors :
* Stanley Chu < stanley . chu @ mediatek . com >
* Peter Wang < peter . wang @ mediatek . com >
*/
2019-12-30 13:32:26 +08:00
# include <linux/arm-smccc.h>
2019-12-30 13:32:29 +08:00
# include <linux/bitfield.h>
2019-03-16 13:04:47 +08:00
# include <linux/of.h>
# include <linux/of_address.h>
# include <linux/phy/phy.h>
# include <linux/platform_device.h>
2019-12-30 13:32:26 +08:00
# include <linux/soc/mediatek/mtk_sip_svc.h>
2019-03-16 13:04:47 +08:00
# include "ufshcd.h"
# include "ufshcd-pltfrm.h"
2020-01-11 15:11:47 +08:00
# include "ufs_quirks.h"
2019-03-16 13:04:47 +08:00
# include "unipro.h"
# include "ufs-mediatek.h"
2019-12-30 13:32:26 +08:00
# define ufs_mtk_smc(cmd, val, res) \
arm_smccc_smc ( MTK_SIP_UFS_CONTROL , \
cmd , val , 0 , 0 , 0 , 0 , 0 , & ( res ) )
2019-12-30 13:32:27 +08:00
# define ufs_mtk_ref_clk_notify(on, res) \
ufs_mtk_smc ( UFS_MTK_SIP_REF_CLK_NOTIFICATION , on , res )
2019-12-30 13:32:26 +08:00
# define ufs_mtk_device_reset_ctrl(high, res) \
ufs_mtk_smc ( UFS_MTK_SIP_DEVICE_RESET , high , res )
2020-05-08 16:01:11 +08:00
static struct ufs_dev_fix ufs_mtk_dev_fixups [ ] = {
UFS_FIX ( UFS_VENDOR_SKHYNIX , " H9HQ21AFAMZDAR " ,
UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES ) ,
END_FIX
} ;
2019-03-20 22:24:58 +08:00
static void ufs_mtk_cfg_unipro_cg ( struct ufs_hba * hba , bool enable )
2019-03-16 13:04:47 +08:00
{
u32 tmp ;
if ( enable ) {
ufshcd_dme_get ( hba ,
UIC_ARG_MIB ( VS_SAVEPOWERCONTROL ) , & tmp ) ;
tmp = tmp |
( 1 < < RX_SYMBOL_CLK_GATE_EN ) |
( 1 < < SYS_CLK_GATE_EN ) |
( 1 < < TX_CLK_GATE_EN ) ;
ufshcd_dme_set ( hba ,
UIC_ARG_MIB ( VS_SAVEPOWERCONTROL ) , tmp ) ;
ufshcd_dme_get ( hba ,
UIC_ARG_MIB ( VS_DEBUGCLOCKENABLE ) , & tmp ) ;
tmp = tmp & ~ ( 1 < < TX_SYMBOL_CLK_REQ_FORCE ) ;
ufshcd_dme_set ( hba ,
UIC_ARG_MIB ( VS_DEBUGCLOCKENABLE ) , tmp ) ;
} else {
ufshcd_dme_get ( hba ,
UIC_ARG_MIB ( VS_SAVEPOWERCONTROL ) , & tmp ) ;
tmp = tmp & ~ ( ( 1 < < RX_SYMBOL_CLK_GATE_EN ) |
( 1 < < SYS_CLK_GATE_EN ) |
( 1 < < TX_CLK_GATE_EN ) ) ;
ufshcd_dme_set ( hba ,
UIC_ARG_MIB ( VS_SAVEPOWERCONTROL ) , tmp ) ;
ufshcd_dme_get ( hba ,
UIC_ARG_MIB ( VS_DEBUGCLOCKENABLE ) , & tmp ) ;
tmp = tmp | ( 1 < < TX_SYMBOL_CLK_REQ_FORCE ) ;
ufshcd_dme_set ( hba ,
UIC_ARG_MIB ( VS_DEBUGCLOCKENABLE ) , tmp ) ;
}
}
2020-03-18 18:40:16 +08:00
static int ufs_mtk_hce_enable_notify ( struct ufs_hba * hba ,
enum ufs_notify_change_status status )
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
if ( status = = PRE_CHANGE ) {
if ( host - > unipro_lpm )
2020-05-09 17:37:13 +08:00
hba - > vps - > hba_enable_delay_us = 0 ;
2020-03-18 18:40:16 +08:00
else
2020-05-09 17:37:13 +08:00
hba - > vps - > hba_enable_delay_us = 600 ;
2020-03-18 18:40:16 +08:00
}
return 0 ;
}
2019-03-20 22:24:58 +08:00
static int ufs_mtk_bind_mphy ( struct ufs_hba * hba )
2019-03-16 13:04:47 +08:00
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
struct device * dev = hba - > dev ;
struct device_node * np = dev - > of_node ;
int err = 0 ;
host - > mphy = devm_of_phy_get_by_index ( dev , np , 0 ) ;
if ( host - > mphy = = ERR_PTR ( - EPROBE_DEFER ) ) {
/*
* UFS driver might be probed before the phy driver does .
* In that case we would like to return EPROBE_DEFER code .
*/
err = - EPROBE_DEFER ;
dev_info ( dev ,
" %s: required phy hasn't probed yet. err = %d \n " ,
__func__ , err ) ;
} else if ( IS_ERR ( host - > mphy ) ) {
err = PTR_ERR ( host - > mphy ) ;
dev_info ( dev , " %s: PHY get failed %d \n " , __func__ , err ) ;
}
if ( err )
host - > mphy = NULL ;
return err ;
}
2019-12-30 13:32:27 +08:00
static int ufs_mtk_setup_ref_clk ( struct ufs_hba * hba , bool on )
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
struct arm_smccc_res res ;
2020-06-01 18:46:42 +08:00
ktime_t timeout , time_checked ;
2019-12-30 13:32:27 +08:00
u32 value ;
if ( host - > ref_clk_enabled = = on )
return 0 ;
if ( on ) {
ufs_mtk_ref_clk_notify ( on , res ) ;
2020-03-18 18:40:13 +08:00
ufshcd_delay_us ( host - > ref_clk_ungating_wait_us , 10 ) ;
2019-12-30 13:32:27 +08:00
ufshcd_writel ( hba , REFCLK_REQUEST , REG_UFS_REFCLK_CTRL ) ;
} else {
ufshcd_writel ( hba , REFCLK_RELEASE , REG_UFS_REFCLK_CTRL ) ;
}
/* Wait for ack */
2020-06-01 18:46:42 +08:00
timeout = ktime_add_us ( ktime_get ( ) , REFCLK_REQ_TIMEOUT_US ) ;
2019-12-30 13:32:27 +08:00
do {
2020-06-01 18:46:42 +08:00
time_checked = ktime_get ( ) ;
2019-12-30 13:32:27 +08:00
value = ufshcd_readl ( hba , REG_UFS_REFCLK_CTRL ) ;
/* Wait until ack bit equals to req bit */
if ( ( ( value & REFCLK_ACK ) > > 1 ) = = ( value & REFCLK_REQUEST ) )
goto out ;
usleep_range ( 100 , 200 ) ;
2020-06-01 18:46:42 +08:00
} while ( ktime_before ( time_checked , timeout ) ) ;
2019-12-30 13:32:27 +08:00
dev_err ( hba - > dev , " missing ack of refclk req, reg: 0x%x \n " , value ) ;
ufs_mtk_ref_clk_notify ( host - > ref_clk_enabled , res ) ;
return - ETIMEDOUT ;
out :
host - > ref_clk_enabled = on ;
2020-02-20 21:48:48 +08:00
if ( ! on ) {
2020-03-18 18:40:13 +08:00
ufshcd_delay_us ( host - > ref_clk_gating_wait_us , 10 ) ;
2019-12-30 13:32:27 +08:00
ufs_mtk_ref_clk_notify ( on , res ) ;
2020-02-20 21:48:48 +08:00
}
2019-12-30 13:32:27 +08:00
return 0 ;
}
2020-02-20 21:48:48 +08:00
static void ufs_mtk_setup_ref_clk_wait_us ( struct ufs_hba * hba ,
u16 gating_us , u16 ungating_us )
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
if ( hba - > dev_info . clk_gating_wait_us ) {
host - > ref_clk_gating_wait_us =
hba - > dev_info . clk_gating_wait_us ;
} else {
host - > ref_clk_gating_wait_us = gating_us ;
}
host - > ref_clk_ungating_wait_us = ungating_us ;
}
2020-06-01 18:46:43 +08:00
int ufs_mtk_wait_link_state ( struct ufs_hba * hba , u32 state ,
unsigned long max_wait_ms )
2020-01-29 18:52:51 +08:00
{
2020-06-01 18:46:43 +08:00
ktime_t timeout , time_checked ;
2020-01-29 18:52:51 +08:00
u32 val ;
2020-06-01 18:46:43 +08:00
timeout = ktime_add_us ( ktime_get ( ) , ms_to_ktime ( max_wait_ms ) ) ;
do {
time_checked = ktime_get ( ) ;
ufshcd_writel ( hba , 0x20 , REG_UFS_DEBUG_SEL ) ;
val = ufshcd_readl ( hba , REG_UFS_PROBE ) ;
val = val > > 28 ;
if ( val = = state )
return 0 ;
/* Sleep for max. 200us */
usleep_range ( 100 , 200 ) ;
} while ( ktime_before ( time_checked , timeout ) ) ;
2020-01-29 18:52:51 +08:00
2020-06-01 18:46:43 +08:00
if ( val = = state )
return 0 ;
return - ETIMEDOUT ;
2020-01-29 18:52:51 +08:00
}
2019-03-16 13:04:47 +08:00
/**
* ufs_mtk_setup_clocks - enables / disable clocks
* @ hba : host controller instance
* @ on : If true , enable clocks else disable them .
* @ status : PRE_CHANGE or POST_CHANGE notify
*
* Returns 0 on success , non - zero on failure .
*/
static int ufs_mtk_setup_clocks ( struct ufs_hba * hba , bool on ,
enum ufs_notify_change_status status )
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
2020-01-29 18:52:51 +08:00
int ret = 0 ;
2019-03-16 13:04:47 +08:00
/*
* In case ufs_mtk_init ( ) is not yet done , simply ignore .
* This ufs_mtk_setup_clocks ( ) shall be called from
* ufs_mtk_init ( ) after init is done .
*/
if ( ! host )
return 0 ;
2020-01-29 18:52:51 +08:00
if ( ! on & & status = = PRE_CHANGE ) {
if ( ! ufshcd_is_link_active ( hba ) ) {
2019-12-30 13:32:27 +08:00
ufs_mtk_setup_ref_clk ( hba , on ) ;
2019-03-16 13:04:47 +08:00
ret = phy_power_off ( host - > mphy ) ;
2020-01-29 18:52:51 +08:00
} else {
/*
* Gate ref - clk if link state is in Hibern8
* triggered by Auto - Hibern8 .
*/
if ( ! ufshcd_can_hibern8_during_gating ( hba ) & &
2020-06-01 18:46:43 +08:00
ufshcd_is_auto_hibern8_enabled ( hba ) ) {
ret = ufs_mtk_wait_link_state ( hba ,
VS_LINK_HIBERN8 ,
15 ) ;
if ( ! ret )
ufs_mtk_setup_ref_clk ( hba , on ) ;
}
2019-12-30 13:32:27 +08:00
}
2020-01-29 18:52:51 +08:00
} else if ( on & & status = = POST_CHANGE ) {
ret = phy_power_on ( host - > mphy ) ;
ufs_mtk_setup_ref_clk ( hba , on ) ;
2019-03-16 13:04:47 +08:00
}
return ret ;
}
/**
* ufs_mtk_init - find other essential mmio bases
* @ hba : host controller instance
*
* Binds PHY with controller and powers up PHY enabling clocks
* and regulators .
*
* Returns - EPROBE_DEFER if binding fails , returns negative error
* on phy power up failure and returns zero on success .
*/
static int ufs_mtk_init ( struct ufs_hba * hba )
{
struct ufs_mtk_host * host ;
struct device * dev = hba - > dev ;
int err = 0 ;
host = devm_kzalloc ( dev , sizeof ( * host ) , GFP_KERNEL ) ;
if ( ! host ) {
err = - ENOMEM ;
dev_info ( dev , " %s: no memory for mtk ufs host \n " , __func__ ) ;
goto out ;
}
host - > hba = hba ;
ufshcd_set_variant ( hba , host ) ;
err = ufs_mtk_bind_mphy ( hba ) ;
if ( err )
goto out_variant_clear ;
2019-09-16 23:56:51 +08:00
/* Enable runtime autosuspend */
hba - > caps | = UFSHCD_CAP_RPM_AUTOSUSPEND ;
2019-12-30 13:32:30 +08:00
/* Enable clock-gating */
hba - > caps | = UFSHCD_CAP_CLK_GATING ;
2020-05-08 16:01:14 +08:00
/* Enable WriteBooster */
hba - > caps | = UFSHCD_CAP_WB_EN ;
2020-05-09 17:37:16 +08:00
hba - > vps - > wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT ( 80 ) ;
2020-05-08 16:01:14 +08:00
2019-03-16 13:04:47 +08:00
/*
* ufshcd_vops_init ( ) is invoked after
* ufshcd_setup_clock ( true ) in ufshcd_hba_init ( ) thus
* phy clock setup is skipped .
*
* Enable phy clocks specifically here .
*/
ufs_mtk_setup_clocks ( hba , true , POST_CHANGE ) ;
goto out ;
out_variant_clear :
ufshcd_set_variant ( hba , NULL ) ;
out :
return err ;
}
static int ufs_mtk_pre_pwr_change ( struct ufs_hba * hba ,
struct ufs_pa_layer_attr * dev_max_params ,
struct ufs_pa_layer_attr * dev_req_params )
{
struct ufs_dev_params host_cap ;
int ret ;
host_cap . tx_lanes = UFS_MTK_LIMIT_NUM_LANES_TX ;
host_cap . rx_lanes = UFS_MTK_LIMIT_NUM_LANES_RX ;
host_cap . hs_rx_gear = UFS_MTK_LIMIT_HSGEAR_RX ;
host_cap . hs_tx_gear = UFS_MTK_LIMIT_HSGEAR_TX ;
host_cap . pwm_rx_gear = UFS_MTK_LIMIT_PWMGEAR_RX ;
host_cap . pwm_tx_gear = UFS_MTK_LIMIT_PWMGEAR_TX ;
host_cap . rx_pwr_pwm = UFS_MTK_LIMIT_RX_PWR_PWM ;
host_cap . tx_pwr_pwm = UFS_MTK_LIMIT_TX_PWR_PWM ;
host_cap . rx_pwr_hs = UFS_MTK_LIMIT_RX_PWR_HS ;
host_cap . tx_pwr_hs = UFS_MTK_LIMIT_TX_PWR_HS ;
host_cap . hs_rate = UFS_MTK_LIMIT_HS_RATE ;
host_cap . desired_working_mode =
UFS_MTK_LIMIT_DESIRED_MODE ;
ret = ufshcd_get_pwr_dev_param ( & host_cap ,
dev_max_params ,
dev_req_params ) ;
if ( ret ) {
pr_info ( " %s: failed to determine capabilities \n " ,
__func__ ) ;
}
return ret ;
}
static int ufs_mtk_pwr_change_notify ( struct ufs_hba * hba ,
enum ufs_notify_change_status stage ,
struct ufs_pa_layer_attr * dev_max_params ,
struct ufs_pa_layer_attr * dev_req_params )
{
int ret = 0 ;
switch ( stage ) {
case PRE_CHANGE :
ret = ufs_mtk_pre_pwr_change ( hba , dev_max_params ,
dev_req_params ) ;
break ;
case POST_CHANGE :
break ;
default :
ret = - EINVAL ;
break ;
}
return ret ;
}
2020-03-18 18:40:16 +08:00
static int ufs_mtk_unipro_set_pm ( struct ufs_hba * hba , u32 lpm )
{
int ret ;
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
ret = ufshcd_dme_set ( hba ,
UIC_ARG_MIB_SEL ( VS_UNIPROPOWERDOWNCONTROL , 0 ) ,
lpm ) ;
if ( ! ret )
host - > unipro_lpm = lpm ;
return ret ;
}
2019-03-16 13:04:47 +08:00
static int ufs_mtk_pre_link ( struct ufs_hba * hba )
{
int ret ;
u32 tmp ;
2020-03-18 18:40:16 +08:00
ufs_mtk_unipro_set_pm ( hba , 0 ) ;
2020-01-29 18:52:48 +08:00
2020-02-07 15:03:56 +08:00
/*
* Setting PA_Local_TX_LCC_Enable to 0 before link startup
* to make sure that both host and device TX LCC are disabled
* once link startup is completed .
*/
2020-02-07 15:03:57 +08:00
ret = ufshcd_disable_host_tx_lcc ( hba ) ;
2020-02-07 15:03:56 +08:00
if ( ret )
return ret ;
2019-03-16 13:04:47 +08:00
/* disable deep stall */
ret = ufshcd_dme_get ( hba , UIC_ARG_MIB ( VS_SAVEPOWERCONTROL ) , & tmp ) ;
if ( ret )
return ret ;
tmp & = ~ ( 1 < < 6 ) ;
ret = ufshcd_dme_set ( hba , UIC_ARG_MIB ( VS_SAVEPOWERCONTROL ) , tmp ) ;
return ret ;
}
2019-12-30 13:32:30 +08:00
static void ufs_mtk_setup_clk_gating ( struct ufs_hba * hba )
{
unsigned long flags ;
u32 ah_ms ;
if ( ufshcd_is_clkgating_allowed ( hba ) ) {
if ( ufshcd_is_auto_hibern8_supported ( hba ) & & hba - > ahit )
ah_ms = FIELD_GET ( UFSHCI_AHIBERN8_TIMER_MASK ,
hba - > ahit ) ;
else
ah_ms = 10 ;
spin_lock_irqsave ( hba - > host - > host_lock , flags ) ;
hba - > clk_gating . delay_ms = ah_ms + 5 ;
spin_unlock_irqrestore ( hba - > host - > host_lock , flags ) ;
}
}
2019-03-16 13:04:47 +08:00
static int ufs_mtk_post_link ( struct ufs_hba * hba )
{
/* enable unipro clock gating feature */
ufs_mtk_cfg_unipro_cg ( hba , true ) ;
2019-12-30 13:32:29 +08:00
/* configure auto-hibern8 timer to 10ms */
if ( ufshcd_is_auto_hibern8_supported ( hba ) ) {
ufshcd_auto_hibern8_update ( hba ,
FIELD_PREP ( UFSHCI_AHIBERN8_TIMER_MASK , 10 ) |
FIELD_PREP ( UFSHCI_AHIBERN8_SCALE_MASK , 3 ) ) ;
}
2019-12-30 13:32:30 +08:00
ufs_mtk_setup_clk_gating ( hba ) ;
2019-03-16 13:04:47 +08:00
return 0 ;
}
static int ufs_mtk_link_startup_notify ( struct ufs_hba * hba ,
enum ufs_notify_change_status stage )
{
int ret = 0 ;
switch ( stage ) {
case PRE_CHANGE :
ret = ufs_mtk_pre_link ( hba ) ;
break ;
case POST_CHANGE :
ret = ufs_mtk_post_link ( hba ) ;
break ;
default :
ret = - EINVAL ;
break ;
}
return ret ;
}
2019-12-30 13:32:26 +08:00
static void ufs_mtk_device_reset ( struct ufs_hba * hba )
{
struct arm_smccc_res res ;
ufs_mtk_device_reset_ctrl ( 0 , res ) ;
/*
* The reset signal is active low . UFS devices shall detect
* more than or equal to 1u s of positive or negative RST_n
* pulse width .
*
* To be on safe side , keep the reset low for at least 10u s .
*/
usleep_range ( 10 , 15 ) ;
ufs_mtk_device_reset_ctrl ( 1 , res ) ;
/* Some devices may need time to respond to rst_n */
usleep_range ( 10000 , 15000 ) ;
dev_info ( hba - > dev , " device reset done \n " ) ;
}
2020-01-17 11:51:08 +08:00
static int ufs_mtk_link_set_hpm ( struct ufs_hba * hba )
{
int err ;
err = ufshcd_hba_enable ( hba ) ;
if ( err )
return err ;
2020-03-18 18:40:16 +08:00
err = ufs_mtk_unipro_set_pm ( hba , 0 ) ;
2020-01-17 11:51:08 +08:00
if ( err )
return err ;
err = ufshcd_uic_hibern8_exit ( hba ) ;
if ( ! err )
ufshcd_set_link_active ( hba ) ;
else
return err ;
err = ufshcd_make_hba_operational ( hba ) ;
if ( err )
return err ;
return 0 ;
}
static int ufs_mtk_link_set_lpm ( struct ufs_hba * hba )
{
int err ;
2020-03-18 18:40:16 +08:00
err = ufs_mtk_unipro_set_pm ( hba , 1 ) ;
2020-01-17 11:51:08 +08:00
if ( err ) {
/* Resume UniPro state for following error recovery */
2020-03-18 18:40:16 +08:00
ufs_mtk_unipro_set_pm ( hba , 0 ) ;
2020-01-17 11:51:08 +08:00
return err ;
}
return 0 ;
}
2019-03-16 13:04:47 +08:00
static int ufs_mtk_suspend ( struct ufs_hba * hba , enum ufs_pm_op pm_op )
{
2020-01-17 11:51:08 +08:00
int err ;
2019-03-16 13:04:47 +08:00
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
2019-12-30 13:32:27 +08:00
if ( ufshcd_is_link_hibern8 ( hba ) ) {
2020-01-17 11:51:08 +08:00
err = ufs_mtk_link_set_lpm ( hba ) ;
2020-03-27 17:53:29 +08:00
if ( err ) {
/*
* Set link as off state enforcedly to trigger
* ufshcd_host_reset_and_restore ( ) in ufshcd_suspend ( )
* for completed host reset .
*/
ufshcd_set_link_off ( hba ) ;
2020-01-17 11:51:08 +08:00
return - EAGAIN ;
2020-03-27 17:53:29 +08:00
}
2019-12-30 13:32:27 +08:00
}
2019-03-16 13:04:47 +08:00
2020-01-29 18:52:49 +08:00
if ( ! ufshcd_is_link_active ( hba ) )
phy_power_off ( host - > mphy ) ;
2019-03-16 13:04:47 +08:00
return 0 ;
}
static int ufs_mtk_resume ( struct ufs_hba * hba , enum ufs_pm_op pm_op )
{
struct ufs_mtk_host * host = ufshcd_get_variant ( hba ) ;
2020-01-17 11:51:08 +08:00
int err ;
2019-03-16 13:04:47 +08:00
2020-01-29 18:52:49 +08:00
if ( ! ufshcd_is_link_active ( hba ) )
2019-03-16 13:04:47 +08:00
phy_power_on ( host - > mphy ) ;
2020-01-29 18:52:49 +08:00
if ( ufshcd_is_link_hibern8 ( hba ) ) {
2020-01-17 11:51:08 +08:00
err = ufs_mtk_link_set_hpm ( hba ) ;
2020-03-27 17:53:29 +08:00
if ( err ) {
err = ufshcd_link_recovery ( hba ) ;
2020-01-17 11:51:08 +08:00
return err ;
2020-03-27 17:53:29 +08:00
}
2019-12-30 13:32:27 +08:00
}
2019-03-16 13:04:47 +08:00
return 0 ;
}
2020-01-17 11:51:06 +08:00
static void ufs_mtk_dbg_register_dump ( struct ufs_hba * hba )
{
ufshcd_dump_regs ( hba , REG_UFS_REFCLK_CTRL , 0x4 , " Ref-Clk Ctrl " ) ;
ufshcd_dump_regs ( hba , REG_UFS_EXTREG , 0x4 , " Ext Reg " ) ;
ufshcd_dump_regs ( hba , REG_UFS_MPHYCTRL ,
REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4 ,
" MPHY Ctrl " ) ;
/* Direct debugging information to REG_MTK_PROBE */
ufshcd_writel ( hba , 0x20 , REG_UFS_DEBUG_SEL ) ;
ufshcd_dump_regs ( hba , REG_UFS_PROBE , 0x4 , " Debug Probe " ) ;
}
2020-01-20 14:08:14 +01:00
static int ufs_mtk_apply_dev_quirks ( struct ufs_hba * hba )
2020-01-11 15:11:47 +08:00
{
2020-01-20 14:08:14 +01:00
struct ufs_dev_info * dev_info = & hba - > dev_info ;
2020-02-20 21:48:48 +08:00
u16 mid = dev_info - > wmanufacturerid ;
2020-01-20 14:08:14 +01:00
2020-05-08 16:01:11 +08:00
if ( mid = = UFS_VENDOR_SAMSUNG )
2020-01-11 15:11:47 +08:00
ufshcd_dme_set ( hba , UIC_ARG_MIB ( PA_TACTIVATE ) , 6 ) ;
2020-02-20 21:48:48 +08:00
/*
* Decide waiting time before gating reference clock and
* after ungating reference clock according to vendors '
* requirements .
*/
if ( mid = = UFS_VENDOR_SAMSUNG )
ufs_mtk_setup_ref_clk_wait_us ( hba , 1 , 1 ) ;
else if ( mid = = UFS_VENDOR_SKHYNIX )
ufs_mtk_setup_ref_clk_wait_us ( hba , 30 , 30 ) ;
else if ( mid = = UFS_VENDOR_TOSHIBA )
ufs_mtk_setup_ref_clk_wait_us ( hba , 100 , 32 ) ;
2020-01-11 15:11:47 +08:00
return 0 ;
}
2020-05-14 09:26:55 +08:00
static void ufs_mtk_fixup_dev_quirks ( struct ufs_hba * hba )
2020-05-08 16:01:11 +08:00
{
struct ufs_dev_info * dev_info = & hba - > dev_info ;
u16 mid = dev_info - > wmanufacturerid ;
ufshcd_fixup_dev_quirks ( hba , ufs_mtk_dev_fixups ) ;
if ( mid = = UFS_VENDOR_SAMSUNG )
hba - > dev_quirks & = ~ UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE ;
}
2019-03-16 13:04:47 +08:00
/**
* struct ufs_hba_mtk_vops - UFS MTK specific variant operations
*
* The variant operations configure the necessary controller and PHY
* handshake during initialization .
*/
static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
. name = " mediatek.ufshci " ,
. init = ufs_mtk_init ,
. setup_clocks = ufs_mtk_setup_clocks ,
2020-03-18 18:40:16 +08:00
. hce_enable_notify = ufs_mtk_hce_enable_notify ,
2019-03-16 13:04:47 +08:00
. link_startup_notify = ufs_mtk_link_startup_notify ,
. pwr_change_notify = ufs_mtk_pwr_change_notify ,
2020-01-11 15:11:47 +08:00
. apply_dev_quirks = ufs_mtk_apply_dev_quirks ,
2020-05-08 16:01:11 +08:00
. fixup_dev_quirks = ufs_mtk_fixup_dev_quirks ,
2019-03-16 13:04:47 +08:00
. suspend = ufs_mtk_suspend ,
. resume = ufs_mtk_resume ,
2020-01-17 11:51:06 +08:00
. dbg_register_dump = ufs_mtk_dbg_register_dump ,
2019-12-30 13:32:26 +08:00
. device_reset = ufs_mtk_device_reset ,
2019-03-16 13:04:47 +08:00
} ;
/**
* ufs_mtk_probe - probe routine of the driver
* @ pdev : pointer to Platform device handle
*
* Return zero for success and non - zero for failure
*/
static int ufs_mtk_probe ( struct platform_device * pdev )
{
int err ;
struct device * dev = & pdev - > dev ;
/* perform generic probe */
err = ufshcd_pltfrm_init ( pdev , & ufs_hba_mtk_vops ) ;
if ( err )
dev_info ( dev , " probe failed %d \n " , err ) ;
return err ;
}
/**
* ufs_mtk_remove - set driver_data of the device to NULL
* @ pdev : pointer to platform device handle
*
* Always return 0
*/
static int ufs_mtk_remove ( struct platform_device * pdev )
{
struct ufs_hba * hba = platform_get_drvdata ( pdev ) ;
pm_runtime_get_sync ( & ( pdev ) - > dev ) ;
ufshcd_remove ( hba ) ;
return 0 ;
}
2019-03-20 22:24:58 +08:00
static const struct of_device_id ufs_mtk_of_match [ ] = {
2019-03-16 13:04:47 +08:00
{ . compatible = " mediatek,mt8183-ufshci " } ,
{ } ,
} ;
static const struct dev_pm_ops ufs_mtk_pm_ops = {
. suspend = ufshcd_pltfrm_suspend ,
. resume = ufshcd_pltfrm_resume ,
. runtime_suspend = ufshcd_pltfrm_runtime_suspend ,
. runtime_resume = ufshcd_pltfrm_runtime_resume ,
. runtime_idle = ufshcd_pltfrm_runtime_idle ,
} ;
static struct platform_driver ufs_mtk_pltform = {
. probe = ufs_mtk_probe ,
. remove = ufs_mtk_remove ,
. shutdown = ufshcd_pltfrm_shutdown ,
. driver = {
. name = " ufshcd-mtk " ,
. pm = & ufs_mtk_pm_ops ,
. of_match_table = ufs_mtk_of_match ,
} ,
} ;
2019-03-21 14:24:44 +01:00
MODULE_AUTHOR ( " Stanley Chu <stanley.chu@mediatek.com> " ) ;
MODULE_AUTHOR ( " Peter Wang <peter.wang@mediatek.com> " ) ;
MODULE_DESCRIPTION ( " MediaTek UFS Host Driver " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;
2019-03-16 13:04:47 +08:00
module_platform_driver ( ufs_mtk_pltform ) ;