2015-01-15 17:32:37 +03:00
/*
* Copyright ( c ) 2013 - 2015 , Linux Foundation . All rights reserved .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
*/
# include <linux/time.h>
# include <linux/of.h>
# include <linux/platform_device.h>
# include <linux/phy/phy.h>
# include <linux/phy/phy-qcom-ufs.h>
# include "ufshcd.h"
# include "unipro.h"
# include "ufs-qcom.h"
# include "ufshci.h"
static struct ufs_qcom_host * ufs_qcom_hosts [ MAX_UFS_QCOM_HOSTS ] ;
static void ufs_qcom_get_speed_mode ( struct ufs_pa_layer_attr * p , char * result ) ;
static int ufs_qcom_get_bus_vote ( struct ufs_qcom_host * host ,
const char * speed_mode ) ;
static int ufs_qcom_set_bus_vote ( struct ufs_qcom_host * host , int vote ) ;
static int ufs_qcom_get_connected_tx_lanes ( struct ufs_hba * hba , u32 * tx_lanes )
{
int err = 0 ;
err = ufshcd_dme_get ( hba ,
UIC_ARG_MIB ( PA_CONNECTEDTXDATALANES ) , tx_lanes ) ;
if ( err )
dev_err ( hba - > dev , " %s: couldn't read PA_CONNECTEDTXDATALANES %d \n " ,
__func__ , err ) ;
return err ;
}
static int ufs_qcom_host_clk_get ( struct device * dev ,
const char * name , struct clk * * clk_out )
{
struct clk * clk ;
int err = 0 ;
clk = devm_clk_get ( dev , name ) ;
if ( IS_ERR ( clk ) ) {
err = PTR_ERR ( clk ) ;
dev_err ( dev , " %s: failed to get %s err %d " ,
__func__ , name , err ) ;
} else {
* clk_out = clk ;
}
return err ;
}
static int ufs_qcom_host_clk_enable ( struct device * dev ,
const char * name , struct clk * clk )
{
int err = 0 ;
err = clk_prepare_enable ( clk ) ;
if ( err )
dev_err ( dev , " %s: %s enable failed %d \n " , __func__ , name , err ) ;
return err ;
}
static void ufs_qcom_disable_lane_clks ( struct ufs_qcom_host * host )
{
if ( ! host - > is_lane_clks_enabled )
return ;
clk_disable_unprepare ( host - > tx_l1_sync_clk ) ;
clk_disable_unprepare ( host - > tx_l0_sync_clk ) ;
clk_disable_unprepare ( host - > rx_l1_sync_clk ) ;
clk_disable_unprepare ( host - > rx_l0_sync_clk ) ;
host - > is_lane_clks_enabled = false ;
}
static int ufs_qcom_enable_lane_clks ( struct ufs_qcom_host * host )
{
int err = 0 ;
struct device * dev = host - > hba - > dev ;
if ( host - > is_lane_clks_enabled )
return 0 ;
err = ufs_qcom_host_clk_enable ( dev , " rx_lane0_sync_clk " ,
host - > rx_l0_sync_clk ) ;
if ( err )
goto out ;
err = ufs_qcom_host_clk_enable ( dev , " tx_lane0_sync_clk " ,
host - > tx_l0_sync_clk ) ;
if ( err )
goto disable_rx_l0 ;
err = ufs_qcom_host_clk_enable ( dev , " rx_lane1_sync_clk " ,
host - > rx_l1_sync_clk ) ;
if ( err )
goto disable_tx_l0 ;
err = ufs_qcom_host_clk_enable ( dev , " tx_lane1_sync_clk " ,
host - > tx_l1_sync_clk ) ;
if ( err )
goto disable_rx_l1 ;
host - > is_lane_clks_enabled = true ;
goto out ;
disable_rx_l1 :
clk_disable_unprepare ( host - > rx_l1_sync_clk ) ;
disable_tx_l0 :
clk_disable_unprepare ( host - > tx_l0_sync_clk ) ;
disable_rx_l0 :
clk_disable_unprepare ( host - > rx_l0_sync_clk ) ;
out :
return err ;
}
static int ufs_qcom_init_lane_clks ( struct ufs_qcom_host * host )
{
int err = 0 ;
struct device * dev = host - > hba - > dev ;
err = ufs_qcom_host_clk_get ( dev ,
" rx_lane0_sync_clk " , & host - > rx_l0_sync_clk ) ;
if ( err )
goto out ;
err = ufs_qcom_host_clk_get ( dev ,
" tx_lane0_sync_clk " , & host - > tx_l0_sync_clk ) ;
if ( err )
goto out ;
err = ufs_qcom_host_clk_get ( dev , " rx_lane1_sync_clk " ,
& host - > rx_l1_sync_clk ) ;
if ( err )
goto out ;
err = ufs_qcom_host_clk_get ( dev , " tx_lane1_sync_clk " ,
& host - > tx_l1_sync_clk ) ;
out :
return err ;
}
static int ufs_qcom_link_startup_post_change ( struct ufs_hba * hba )
{
struct ufs_qcom_host * host = hba - > priv ;
struct phy * phy = host - > generic_phy ;
u32 tx_lanes ;
int err = 0 ;
err = ufs_qcom_get_connected_tx_lanes ( hba , & tx_lanes ) ;
if ( err )
goto out ;
err = ufs_qcom_phy_set_tx_lane_enable ( phy , tx_lanes ) ;
if ( err )
dev_err ( hba - > dev , " %s: ufs_qcom_phy_set_tx_lane_enable failed \n " ,
__func__ ) ;
out :
return err ;
}
static int ufs_qcom_check_hibern8 ( struct ufs_hba * hba )
{
int err ;
u32 tx_fsm_val = 0 ;
unsigned long timeout = jiffies + msecs_to_jiffies ( HBRN8_POLL_TOUT_MS ) ;
do {
err = ufshcd_dme_get ( hba ,
UIC_ARG_MIB ( MPHY_TX_FSM_STATE ) , & tx_fsm_val ) ;
if ( err | | tx_fsm_val = = TX_FSM_HIBERN8 )
break ;
/* sleep for max. 200us */
usleep_range ( 100 , 200 ) ;
} while ( time_before ( jiffies , timeout ) ) ;
/*
* we might have scheduled out for long during polling so
* check the state again .
*/
if ( time_after ( jiffies , timeout ) )
err = ufshcd_dme_get ( hba ,
UIC_ARG_MIB ( MPHY_TX_FSM_STATE ) , & tx_fsm_val ) ;
if ( err ) {
dev_err ( hba - > dev , " %s: unable to get TX_FSM_STATE, err %d \n " ,
__func__ , err ) ;
} else if ( tx_fsm_val ! = TX_FSM_HIBERN8 ) {
err = tx_fsm_val ;
dev_err ( hba - > dev , " %s: invalid TX_FSM_STATE = %d \n " ,
__func__ , err ) ;
}
return err ;
}
static int ufs_qcom_power_up_sequence ( struct ufs_hba * hba )
{
struct ufs_qcom_host * host = hba - > priv ;
struct phy * phy = host - > generic_phy ;
int ret = 0 ;
bool is_rate_B = ( UFS_QCOM_LIMIT_HS_RATE = = PA_HS_MODE_B )
? true : false ;
/* Assert PHY reset and apply PHY calibration values */
ufs_qcom_assert_reset ( hba ) ;
/* provide 1ms delay to let the reset pulse propagate */
usleep_range ( 1000 , 1100 ) ;
ret = ufs_qcom_phy_calibrate_phy ( phy , is_rate_B ) ;
if ( ret ) {
dev_err ( hba - > dev , " %s: ufs_qcom_phy_calibrate_phy() failed, ret = %d \n " ,
__func__ , ret ) ;
goto out ;
}
/* De-assert PHY reset and start serdes */
ufs_qcom_deassert_reset ( hba ) ;
/*
* after reset deassertion , phy will need all ref clocks ,
* voltage , current to settle down before starting serdes .
*/
usleep_range ( 1000 , 1100 ) ;
ret = ufs_qcom_phy_start_serdes ( phy ) ;
if ( ret ) {
dev_err ( hba - > dev , " %s: ufs_qcom_phy_start_serdes() failed, ret = %d \n " ,
__func__ , ret ) ;
goto out ;
}
ret = ufs_qcom_phy_is_pcs_ready ( phy ) ;
if ( ret )
dev_err ( hba - > dev , " %s: is_physical_coding_sublayer_ready() failed, ret = %d \n " ,
__func__ , ret ) ;
out :
return ret ;
}
/*
* The UTP controller has a number of internal clock gating cells ( CGCs ) .
* Internal hardware sub - modules within the UTP controller control the CGCs .
* Hardware CGCs disable the clock to inactivate UTP sub - modules not involved
* in a specific operation , UTP controller CGCs are by default disabled and
* this function enables them ( after every UFS link startup ) to save some power
* leakage .
*/
static void ufs_qcom_enable_hw_clk_gating ( struct ufs_hba * hba )
{
ufshcd_writel ( hba ,
ufshcd_readl ( hba , REG_UFS_CFG2 ) | REG_UFS_CFG2_CGC_EN_ALL ,
REG_UFS_CFG2 ) ;
/* Ensure that HW clock gating is enabled before next operations */
mb ( ) ;
}
static int ufs_qcom_hce_enable_notify ( struct ufs_hba * hba , bool status )
{
struct ufs_qcom_host * host = hba - > priv ;
int err = 0 ;
switch ( status ) {
case PRE_CHANGE :
ufs_qcom_power_up_sequence ( hba ) ;
/*
* The PHY PLL output is the source of tx / rx lane symbol
* clocks , hence , enable the lane clocks only after PHY
* is initialized .
*/
err = ufs_qcom_enable_lane_clks ( host ) ;
break ;
case POST_CHANGE :
/* check if UFS PHY moved from DISABLED to HIBERN8 */
err = ufs_qcom_check_hibern8 ( hba ) ;
ufs_qcom_enable_hw_clk_gating ( hba ) ;
break ;
default :
dev_err ( hba - > dev , " %s: invalid status %d \n " , __func__ , status ) ;
err = - EINVAL ;
break ;
}
return err ;
}
/**
* Returns non - zero for success ( which rate of core_clk ) and 0
* in case of a failure
*/
static unsigned long
ufs_qcom_cfg_timers ( struct ufs_hba * hba , u32 gear , u32 hs , u32 rate )
{
2015-05-17 18:54:58 +03:00
struct ufs_qcom_host * host = hba - > priv ;
2015-01-15 17:32:37 +03:00
struct ufs_clk_info * clki ;
u32 core_clk_period_in_ns ;
u32 tx_clk_cycles_per_us = 0 ;
unsigned long core_clk_rate = 0 ;
u32 core_clk_cycles_per_us = 0 ;
static u32 pwm_fr_table [ ] [ 2 ] = {
{ UFS_PWM_G1 , 0x1 } ,
{ UFS_PWM_G2 , 0x1 } ,
{ UFS_PWM_G3 , 0x1 } ,
{ UFS_PWM_G4 , 0x1 } ,
} ;
static u32 hs_fr_table_rA [ ] [ 2 ] = {
{ UFS_HS_G1 , 0x1F } ,
{ UFS_HS_G2 , 0x3e } ,
} ;
static u32 hs_fr_table_rB [ ] [ 2 ] = {
{ UFS_HS_G1 , 0x24 } ,
{ UFS_HS_G2 , 0x49 } ,
} ;
2015-05-17 18:54:58 +03:00
/*
* The Qunipro controller does not use following registers :
* SYS1CLK_1US_REG , TX_SYMBOL_CLK_1US_REG , CLK_NS_REG &
* UFS_REG_PA_LINK_STARTUP_TIMER
* But UTP controller uses SYS1CLK_1US_REG register for Interrupt
* Aggregation logic .
*/
if ( ufs_qcom_cap_qunipro ( host ) & & ! ufshcd_is_intr_aggr_allowed ( hba ) )
goto out ;
2015-01-15 17:32:37 +03:00
if ( gear = = 0 ) {
dev_err ( hba - > dev , " %s: invalid gear = %d \n " , __func__ , gear ) ;
goto out_error ;
}
list_for_each_entry ( clki , & hba - > clk_list_head , list ) {
if ( ! strcmp ( clki - > name , " core_clk " ) )
core_clk_rate = clk_get_rate ( clki - > clk ) ;
}
/* If frequency is smaller than 1MHz, set to 1MHz */
if ( core_clk_rate < DEFAULT_CLK_RATE_HZ )
core_clk_rate = DEFAULT_CLK_RATE_HZ ;
core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC ;
ufshcd_writel ( hba , core_clk_cycles_per_us , REG_UFS_SYS1CLK_1US ) ;
core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate ;
core_clk_period_in_ns < < = OFFSET_CLK_NS_REG ;
core_clk_period_in_ns & = MASK_CLK_NS_REG ;
switch ( hs ) {
case FASTAUTO_MODE :
case FAST_MODE :
if ( rate = = PA_HS_MODE_A ) {
if ( gear > ARRAY_SIZE ( hs_fr_table_rA ) ) {
dev_err ( hba - > dev ,
" %s: index %d exceeds table size %zu \n " ,
__func__ , gear ,
ARRAY_SIZE ( hs_fr_table_rA ) ) ;
goto out_error ;
}
tx_clk_cycles_per_us = hs_fr_table_rA [ gear - 1 ] [ 1 ] ;
} else if ( rate = = PA_HS_MODE_B ) {
if ( gear > ARRAY_SIZE ( hs_fr_table_rB ) ) {
dev_err ( hba - > dev ,
" %s: index %d exceeds table size %zu \n " ,
__func__ , gear ,
ARRAY_SIZE ( hs_fr_table_rB ) ) ;
goto out_error ;
}
tx_clk_cycles_per_us = hs_fr_table_rB [ gear - 1 ] [ 1 ] ;
} else {
dev_err ( hba - > dev , " %s: invalid rate = %d \n " ,
__func__ , rate ) ;
goto out_error ;
}
break ;
case SLOWAUTO_MODE :
case SLOW_MODE :
if ( gear > ARRAY_SIZE ( pwm_fr_table ) ) {
dev_err ( hba - > dev ,
" %s: index %d exceeds table size %zu \n " ,
__func__ , gear ,
ARRAY_SIZE ( pwm_fr_table ) ) ;
goto out_error ;
}
tx_clk_cycles_per_us = pwm_fr_table [ gear - 1 ] [ 1 ] ;
break ;
case UNCHANGED :
default :
dev_err ( hba - > dev , " %s: invalid mode = %d \n " , __func__ , hs ) ;
goto out_error ;
}
/* this register 2 fields shall be written at once */
ufshcd_writel ( hba , core_clk_period_in_ns | tx_clk_cycles_per_us ,
REG_UFS_TX_SYMBOL_CLK_NS_US ) ;
goto out ;
out_error :
core_clk_rate = 0 ;
out :
return core_clk_rate ;
}
static int ufs_qcom_link_startup_notify ( struct ufs_hba * hba , bool status )
{
unsigned long core_clk_rate = 0 ;
u32 core_clk_cycles_per_100ms ;
switch ( status ) {
case PRE_CHANGE :
core_clk_rate = ufs_qcom_cfg_timers ( hba , UFS_PWM_G1 ,
SLOWAUTO_MODE , 0 ) ;
if ( ! core_clk_rate ) {
dev_err ( hba - > dev , " %s: ufs_qcom_cfg_timers() failed \n " ,
__func__ ) ;
return - EINVAL ;
}
core_clk_cycles_per_100ms =
( core_clk_rate / MSEC_PER_SEC ) * 100 ;
ufshcd_writel ( hba , core_clk_cycles_per_100ms ,
REG_UFS_PA_LINK_STARTUP_TIMER ) ;
break ;
case POST_CHANGE :
ufs_qcom_link_startup_post_change ( hba ) ;
break ;
default :
break ;
}
return 0 ;
}
static int ufs_qcom_suspend ( struct ufs_hba * hba , enum ufs_pm_op pm_op )
{
struct ufs_qcom_host * host = hba - > priv ;
struct phy * phy = host - > generic_phy ;
int ret = 0 ;
if ( ufs_qcom_is_link_off ( hba ) ) {
/*
* Disable the tx / rx lane symbol clocks before PHY is
* powered down as the PLL source should be disabled
* after downstream clocks are disabled .
*/
ufs_qcom_disable_lane_clks ( host ) ;
phy_power_off ( phy ) ;
/* Assert PHY soft reset */
ufs_qcom_assert_reset ( hba ) ;
goto out ;
}
/*
* If UniPro link is not active , PHY ref_clk , main PHY analog power
* rail and low noise analog power rail for PLL can be switched off .
*/
if ( ! ufs_qcom_is_link_active ( hba ) )
phy_power_off ( phy ) ;
out :
return ret ;
}
static int ufs_qcom_resume ( struct ufs_hba * hba , enum ufs_pm_op pm_op )
{
struct ufs_qcom_host * host = hba - > priv ;
struct phy * phy = host - > generic_phy ;
int err ;
err = phy_power_on ( phy ) ;
if ( err ) {
dev_err ( hba - > dev , " %s: failed enabling regs, err = %d \n " ,
__func__ , err ) ;
goto out ;
}
hba - > is_sys_suspended = false ;
out :
return err ;
}
struct ufs_qcom_dev_params {
u32 pwm_rx_gear ; /* pwm rx gear to work in */
u32 pwm_tx_gear ; /* pwm tx gear to work in */
u32 hs_rx_gear ; /* hs rx gear to work in */
u32 hs_tx_gear ; /* hs tx gear to work in */
u32 rx_lanes ; /* number of rx lanes */
u32 tx_lanes ; /* number of tx lanes */
u32 rx_pwr_pwm ; /* rx pwm working pwr */
u32 tx_pwr_pwm ; /* tx pwm working pwr */
u32 rx_pwr_hs ; /* rx hs working pwr */
u32 tx_pwr_hs ; /* tx hs working pwr */
u32 hs_rate ; /* rate A/B to work in HS */
u32 desired_working_mode ;
} ;
static int ufs_qcom_get_pwr_dev_param ( struct ufs_qcom_dev_params * qcom_param ,
struct ufs_pa_layer_attr * dev_max ,
struct ufs_pa_layer_attr * agreed_pwr )
{
int min_qcom_gear ;
int min_dev_gear ;
bool is_dev_sup_hs = false ;
bool is_qcom_max_hs = false ;
if ( dev_max - > pwr_rx = = FAST_MODE )
is_dev_sup_hs = true ;
if ( qcom_param - > desired_working_mode = = FAST ) {
is_qcom_max_hs = true ;
min_qcom_gear = min_t ( u32 , qcom_param - > hs_rx_gear ,
qcom_param - > hs_tx_gear ) ;
} else {
min_qcom_gear = min_t ( u32 , qcom_param - > pwm_rx_gear ,
qcom_param - > pwm_tx_gear ) ;
}
/*
* device doesn ' t support HS but qcom_param - > desired_working_mode is
* HS , thus device and qcom_param don ' t agree
*/
if ( ! is_dev_sup_hs & & is_qcom_max_hs ) {
pr_err ( " %s: failed to agree on power mode (device doesn't support HS but requested power is HS) \n " ,
__func__ ) ;
return - ENOTSUPP ;
} else if ( is_dev_sup_hs & & is_qcom_max_hs ) {
/*
* since device supports HS , it supports FAST_MODE .
* since qcom_param - > desired_working_mode is also HS
* then final decision ( FAST / FASTAUTO ) is done according
* to qcom_params as it is the restricting factor
*/
agreed_pwr - > pwr_rx = agreed_pwr - > pwr_tx =
qcom_param - > rx_pwr_hs ;
} else {
/*
* here qcom_param - > desired_working_mode is PWM .
* it doesn ' t matter whether device supports HS or PWM ,
* in both cases qcom_param - > desired_working_mode will
* determine the mode
*/
agreed_pwr - > pwr_rx = agreed_pwr - > pwr_tx =
qcom_param - > rx_pwr_pwm ;
}
/*
* we would like tx to work in the minimum number of lanes
* between device capability and vendor preferences .
* the same decision will be made for rx
*/
agreed_pwr - > lane_tx = min_t ( u32 , dev_max - > lane_tx ,
qcom_param - > tx_lanes ) ;
agreed_pwr - > lane_rx = min_t ( u32 , dev_max - > lane_rx ,
qcom_param - > rx_lanes ) ;
/* device maximum gear is the minimum between device rx and tx gears */
min_dev_gear = min_t ( u32 , dev_max - > gear_rx , dev_max - > gear_tx ) ;
/*
* if both device capabilities and vendor pre - defined preferences are
* both HS or both PWM then set the minimum gear to be the chosen
* working gear .
* if one is PWM and one is HS then the one that is PWM get to decide
* what is the gear , as it is the one that also decided previously what
* pwr the device will be configured to .
*/
if ( ( is_dev_sup_hs & & is_qcom_max_hs ) | |
( ! is_dev_sup_hs & & ! is_qcom_max_hs ) )
agreed_pwr - > gear_rx = agreed_pwr - > gear_tx =
min_t ( u32 , min_dev_gear , min_qcom_gear ) ;
else if ( ! is_dev_sup_hs )
agreed_pwr - > gear_rx = agreed_pwr - > gear_tx = min_dev_gear ;
else
agreed_pwr - > gear_rx = agreed_pwr - > gear_tx = min_qcom_gear ;
agreed_pwr - > hs_rate = qcom_param - > hs_rate ;
return 0 ;
}
static int ufs_qcom_update_bus_bw_vote ( struct ufs_qcom_host * host )
{
int vote ;
int err = 0 ;
char mode [ BUS_VECTOR_NAME_LEN ] ;
ufs_qcom_get_speed_mode ( & host - > dev_req_params , mode ) ;
vote = ufs_qcom_get_bus_vote ( host , mode ) ;
if ( vote > = 0 )
err = ufs_qcom_set_bus_vote ( host , vote ) ;
else
err = vote ;
if ( err )
dev_err ( host - > hba - > dev , " %s: failed %d \n " , __func__ , err ) ;
else
host - > bus_vote . saved_vote = vote ;
return err ;
}
static int ufs_qcom_pwr_change_notify ( struct ufs_hba * hba ,
bool status ,
struct ufs_pa_layer_attr * dev_max_params ,
struct ufs_pa_layer_attr * dev_req_params )
{
u32 val ;
struct ufs_qcom_host * host = hba - > priv ;
struct phy * phy = host - > generic_phy ;
struct ufs_qcom_dev_params ufs_qcom_cap ;
int ret = 0 ;
int res = 0 ;
if ( ! dev_req_params ) {
pr_err ( " %s: incoming dev_req_params is NULL \n " , __func__ ) ;
ret = - EINVAL ;
goto out ;
}
switch ( status ) {
case PRE_CHANGE :
ufs_qcom_cap . tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX ;
ufs_qcom_cap . rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX ;
ufs_qcom_cap . hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX ;
ufs_qcom_cap . hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX ;
ufs_qcom_cap . pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX ;
ufs_qcom_cap . pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX ;
ufs_qcom_cap . rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM ;
ufs_qcom_cap . tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM ;
ufs_qcom_cap . rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS ;
ufs_qcom_cap . tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS ;
ufs_qcom_cap . hs_rate = UFS_QCOM_LIMIT_HS_RATE ;
ufs_qcom_cap . desired_working_mode =
UFS_QCOM_LIMIT_DESIRED_MODE ;
ret = ufs_qcom_get_pwr_dev_param ( & ufs_qcom_cap ,
dev_max_params ,
dev_req_params ) ;
if ( ret ) {
pr_err ( " %s: failed to determine capabilities \n " ,
__func__ ) ;
goto out ;
}
break ;
case POST_CHANGE :
if ( ! ufs_qcom_cfg_timers ( hba , dev_req_params - > gear_rx ,
dev_req_params - > pwr_rx ,
dev_req_params - > hs_rate ) ) {
dev_err ( hba - > dev , " %s: ufs_qcom_cfg_timers() failed \n " ,
__func__ ) ;
/*
* we return error code at the end of the routine ,
* but continue to configure UFS_PHY_TX_LANE_ENABLE
* and bus voting as usual
*/
ret = - EINVAL ;
}
val = ~ ( MAX_U32 < < dev_req_params - > lane_tx ) ;
res = ufs_qcom_phy_set_tx_lane_enable ( phy , val ) ;
if ( res ) {
dev_err ( hba - > dev , " %s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d \n " ,
__func__ , res ) ;
ret = res ;
}
/* cache the power mode parameters to use internally */
memcpy ( & host - > dev_req_params ,
dev_req_params , sizeof ( * dev_req_params ) ) ;
ufs_qcom_update_bus_bw_vote ( host ) ;
break ;
default :
ret = - EINVAL ;
break ;
}
out :
return ret ;
}
2015-05-17 18:55:06 +03:00
static u32 ufs_qcom_get_ufs_hci_version ( struct ufs_hba * hba )
{
struct ufs_qcom_host * host = hba - > priv ;
if ( host - > hw_ver . major = = 0x1 )
return UFSHCI_VERSION_11 ;
else
return UFSHCI_VERSION_20 ;
}
2015-01-15 17:32:37 +03:00
/**
* ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
* @ hba : host controller instance
*
* QCOM UFS host controller might have some non standard behaviours ( quirks )
* than what is specified by UFSHCI specification . Advertise all such
* quirks to standard UFS host controller driver so standard takes them into
* account .
*/
static void ufs_qcom_advertise_quirks ( struct ufs_hba * hba )
{
2015-03-31 17:37:14 +03:00
struct ufs_qcom_host * host = hba - > priv ;
2015-01-15 17:32:37 +03:00
2015-05-17 18:54:58 +03:00
if ( host - > hw_ver . major = = 0x01 ) {
2015-05-17 18:55:02 +03:00
hba - > quirks | = UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
2015-05-17 18:55:04 +03:00
| UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
| UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE ;
2015-01-15 17:32:37 +03:00
2015-05-17 18:54:58 +03:00
if ( host - > hw_ver . minor = = 0x0001 & & host - > hw_ver . step = = 0x0001 )
hba - > quirks | = UFSHCD_QUIRK_BROKEN_INTR_AGGR ;
}
2015-03-31 17:37:14 +03:00
if ( host - > hw_ver . major > = 0x2 ) {
2015-05-17 18:55:00 +03:00
hba - > quirks | = UFSHCD_QUIRK_BROKEN_LCC ;
2015-05-17 18:55:06 +03:00
hba - > quirks | = UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION ;
2015-05-17 18:55:00 +03:00
2015-03-31 17:37:14 +03:00
if ( ! ufs_qcom_cap_qunipro ( host ) )
/* Legacy UniPro mode still need following quirks */
2015-05-17 18:55:02 +03:00
hba - > quirks | = ( UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
2015-05-17 18:55:04 +03:00
| UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
2015-05-17 18:55:02 +03:00
| UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP ) ;
2015-03-31 17:37:14 +03:00
}
}
static void ufs_qcom_set_caps ( struct ufs_hba * hba )
{
struct ufs_qcom_host * host = hba - > priv ;
if ( host - > hw_ver . major > = 0x2 )
host - > caps = UFS_QCOM_CAP_QUNIPRO ;
2015-01-15 17:32:37 +03:00
}
static int ufs_qcom_get_bus_vote ( struct ufs_qcom_host * host ,
const char * speed_mode )
{
struct device * dev = host - > hba - > dev ;
struct device_node * np = dev - > of_node ;
int err ;
const char * key = " qcom,bus-vector-names " ;
if ( ! speed_mode ) {
err = - EINVAL ;
goto out ;
}
if ( host - > bus_vote . is_max_bw_needed & & ! ! strcmp ( speed_mode , " MIN " ) )
err = of_property_match_string ( np , key , " MAX " ) ;
else
err = of_property_match_string ( np , key , speed_mode ) ;
out :
if ( err < 0 )
dev_err ( dev , " %s: Invalid %s mode %d \n " ,
__func__ , speed_mode , err ) ;
return err ;
}
static int ufs_qcom_set_bus_vote ( struct ufs_qcom_host * host , int vote )
{
int err = 0 ;
if ( vote ! = host - > bus_vote . curr_vote )
host - > bus_vote . curr_vote = vote ;
return err ;
}
static void ufs_qcom_get_speed_mode ( struct ufs_pa_layer_attr * p , char * result )
{
int gear = max_t ( u32 , p - > gear_rx , p - > gear_tx ) ;
int lanes = max_t ( u32 , p - > lane_rx , p - > lane_tx ) ;
int pwr ;
/* default to PWM Gear 1, Lane 1 if power mode is not initialized */
if ( ! gear )
gear = 1 ;
if ( ! lanes )
lanes = 1 ;
if ( ! p - > pwr_rx & & ! p - > pwr_tx ) {
pwr = SLOWAUTO_MODE ;
snprintf ( result , BUS_VECTOR_NAME_LEN , " MIN " ) ;
} else if ( p - > pwr_rx = = FAST_MODE | | p - > pwr_rx = = FASTAUTO_MODE | |
p - > pwr_tx = = FAST_MODE | | p - > pwr_tx = = FASTAUTO_MODE ) {
pwr = FAST_MODE ;
snprintf ( result , BUS_VECTOR_NAME_LEN , " %s_R%s_G%d_L%d " , " HS " ,
p - > hs_rate = = PA_HS_MODE_B ? " B " : " A " , gear , lanes ) ;
} else {
pwr = SLOW_MODE ;
snprintf ( result , BUS_VECTOR_NAME_LEN , " %s_G%d_L%d " ,
" PWM " , gear , lanes ) ;
}
}
static int ufs_qcom_setup_clocks ( struct ufs_hba * hba , bool on )
{
struct ufs_qcom_host * host = hba - > priv ;
int err = 0 ;
int vote = 0 ;
/*
* In case ufs_qcom_init ( ) is not yet done , simply ignore .
* This ufs_qcom_setup_clocks ( ) shall be called from
* ufs_qcom_init ( ) after init is done .
*/
if ( ! host )
return 0 ;
if ( on ) {
err = ufs_qcom_phy_enable_iface_clk ( host - > generic_phy ) ;
if ( err )
goto out ;
err = ufs_qcom_phy_enable_ref_clk ( host - > generic_phy ) ;
if ( err ) {
dev_err ( hba - > dev , " %s enable phy ref clock failed, err=%d \n " ,
__func__ , err ) ;
ufs_qcom_phy_disable_iface_clk ( host - > generic_phy ) ;
goto out ;
}
/* enable the device ref clock */
ufs_qcom_phy_enable_dev_ref_clk ( host - > generic_phy ) ;
vote = host - > bus_vote . saved_vote ;
if ( vote = = host - > bus_vote . min_bw_vote )
ufs_qcom_update_bus_bw_vote ( host ) ;
} else {
/* M-PHY RMMI interface clocks can be turned off */
ufs_qcom_phy_disable_iface_clk ( host - > generic_phy ) ;
if ( ! ufs_qcom_is_link_active ( hba ) ) {
/* turn off UFS local PHY ref_clk */
ufs_qcom_phy_disable_ref_clk ( host - > generic_phy ) ;
/* disable device ref_clk */
ufs_qcom_phy_disable_dev_ref_clk ( host - > generic_phy ) ;
}
vote = host - > bus_vote . min_bw_vote ;
}
err = ufs_qcom_set_bus_vote ( host , vote ) ;
if ( err )
dev_err ( hba - > dev , " %s: set bus vote failed %d \n " ,
__func__ , err ) ;
out :
return err ;
}
static ssize_t
show_ufs_to_mem_max_bus_bw ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
struct ufs_hba * hba = dev_get_drvdata ( dev ) ;
struct ufs_qcom_host * host = hba - > priv ;
return snprintf ( buf , PAGE_SIZE , " %u \n " ,
host - > bus_vote . is_max_bw_needed ) ;
}
static ssize_t
store_ufs_to_mem_max_bus_bw ( struct device * dev , struct device_attribute * attr ,
const char * buf , size_t count )
{
struct ufs_hba * hba = dev_get_drvdata ( dev ) ;
struct ufs_qcom_host * host = hba - > priv ;
uint32_t value ;
if ( ! kstrtou32 ( buf , 0 , & value ) ) {
host - > bus_vote . is_max_bw_needed = ! ! value ;
ufs_qcom_update_bus_bw_vote ( host ) ;
}
return count ;
}
static int ufs_qcom_bus_register ( struct ufs_qcom_host * host )
{
int err ;
struct device * dev = host - > hba - > dev ;
struct device_node * np = dev - > of_node ;
err = of_property_count_strings ( np , " qcom,bus-vector-names " ) ;
if ( err < 0 ) {
dev_err ( dev , " %s: qcom,bus-vector-names not specified correctly %d \n " ,
__func__ , err ) ;
goto out ;
}
/* cache the vote index for minimum and maximum bandwidth */
host - > bus_vote . min_bw_vote = ufs_qcom_get_bus_vote ( host , " MIN " ) ;
host - > bus_vote . max_bw_vote = ufs_qcom_get_bus_vote ( host , " MAX " ) ;
host - > bus_vote . max_bus_bw . show = show_ufs_to_mem_max_bus_bw ;
host - > bus_vote . max_bus_bw . store = store_ufs_to_mem_max_bus_bw ;
sysfs_attr_init ( & host - > bus_vote . max_bus_bw . attr ) ;
host - > bus_vote . max_bus_bw . attr . name = " max_bus_bw " ;
host - > bus_vote . max_bus_bw . attr . mode = S_IRUGO | S_IWUSR ;
err = device_create_file ( dev , & host - > bus_vote . max_bus_bw ) ;
out :
return err ;
}
# define ANDROID_BOOT_DEV_MAX 30
static char android_boot_dev [ ANDROID_BOOT_DEV_MAX ] ;
2015-10-28 14:15:45 +03:00
# ifndef MODULE
static int __init get_android_boot_dev ( char * str )
2015-01-15 17:32:37 +03:00
{
strlcpy ( android_boot_dev , str , ANDROID_BOOT_DEV_MAX ) ;
return 1 ;
}
__setup ( " androidboot.bootdevice= " , get_android_boot_dev ) ;
2015-10-28 14:15:45 +03:00
# endif
2015-01-15 17:32:37 +03:00
/**
* ufs_qcom_init - bind phy with controller
* @ hba : host controller instance
*
* Binds PHY with controller and powers up PHY enabling clocks
* and regulators .
*
* Returns - EPROBE_DEFER if binding fails , returns negative error
* on phy power up failure and returns zero on success .
*/
static int ufs_qcom_init ( struct ufs_hba * hba )
{
int err ;
struct device * dev = hba - > dev ;
struct ufs_qcom_host * host ;
if ( strlen ( android_boot_dev ) & & strcmp ( android_boot_dev , dev_name ( dev ) ) )
return - ENODEV ;
host = devm_kzalloc ( dev , sizeof ( * host ) , GFP_KERNEL ) ;
if ( ! host ) {
err = - ENOMEM ;
dev_err ( dev , " %s: no memory for qcom ufs host \n " , __func__ ) ;
goto out ;
}
host - > hba = hba ;
hba - > priv = ( void * ) host ;
host - > generic_phy = devm_phy_get ( dev , " ufsphy " ) ;
if ( IS_ERR ( host - > generic_phy ) ) {
err = PTR_ERR ( host - > generic_phy ) ;
dev_err ( dev , " %s: PHY get failed %d \n " , __func__ , err ) ;
goto out ;
}
err = ufs_qcom_bus_register ( host ) ;
if ( err )
goto out_host_free ;
2015-03-31 17:37:13 +03:00
ufs_qcom_get_controller_revision ( hba , & host - > hw_ver . major ,
& host - > hw_ver . minor , & host - > hw_ver . step ) ;
/* update phy revision information before calling phy_init() */
ufs_qcom_phy_save_controller_version ( host - > generic_phy ,
host - > hw_ver . major , host - > hw_ver . minor , host - > hw_ver . step ) ;
2015-01-15 17:32:37 +03:00
phy_init ( host - > generic_phy ) ;
err = phy_power_on ( host - > generic_phy ) ;
if ( err )
goto out_unregister_bus ;
err = ufs_qcom_init_lane_clks ( host ) ;
if ( err )
goto out_disable_phy ;
2015-03-31 17:37:14 +03:00
ufs_qcom_set_caps ( hba ) ;
2015-01-15 17:32:37 +03:00
ufs_qcom_advertise_quirks ( hba ) ;
hba - > caps | = UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_CLK_SCALING ;
hba - > caps | = UFSHCD_CAP_AUTO_BKOPS_SUSPEND ;
ufs_qcom_setup_clocks ( hba , true ) ;
if ( hba - > dev - > id < MAX_UFS_QCOM_HOSTS )
ufs_qcom_hosts [ hba - > dev - > id ] = host ;
goto out ;
out_disable_phy :
phy_power_off ( host - > generic_phy ) ;
out_unregister_bus :
phy_exit ( host - > generic_phy ) ;
out_host_free :
devm_kfree ( dev , host ) ;
hba - > priv = NULL ;
out :
return err ;
}
static void ufs_qcom_exit ( struct ufs_hba * hba )
{
struct ufs_qcom_host * host = hba - > priv ;
ufs_qcom_disable_lane_clks ( host ) ;
phy_power_off ( host - > generic_phy ) ;
}
static
void ufs_qcom_clk_scale_notify ( struct ufs_hba * hba )
{
struct ufs_qcom_host * host = hba - > priv ;
struct ufs_pa_layer_attr * dev_req_params = & host - > dev_req_params ;
if ( ! dev_req_params )
return ;
ufs_qcom_cfg_timers ( hba , dev_req_params - > gear_rx ,
dev_req_params - > pwr_rx ,
dev_req_params - > hs_rate ) ;
}
/**
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
*
* The variant operations configure the necessary controller and PHY
* handshake during initialization .
*/
static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
. name = " qcom " ,
. init = ufs_qcom_init ,
. exit = ufs_qcom_exit ,
2015-05-17 18:55:06 +03:00
. get_ufs_hci_version = ufs_qcom_get_ufs_hci_version ,
2015-01-15 17:32:37 +03:00
. clk_scale_notify = ufs_qcom_clk_scale_notify ,
. setup_clocks = ufs_qcom_setup_clocks ,
. hce_enable_notify = ufs_qcom_hce_enable_notify ,
. link_startup_notify = ufs_qcom_link_startup_notify ,
. pwr_change_notify = ufs_qcom_pwr_change_notify ,
. suspend = ufs_qcom_suspend ,
. resume = ufs_qcom_resume ,
} ;
EXPORT_SYMBOL ( ufs_hba_qcom_vops ) ;
2015-10-28 14:15:45 +03:00
MODULE_LICENSE ( " GPL v2 " ) ;