2013-02-25 20:14:33 +04:00
/*
* Universal Flash Storage Host controller driver
*
* This code is based on drivers / scsi / ufs / ufshcd . h
* Copyright ( C ) 2011 - 2013 Samsung India Software Operations
*
* Authors :
* Santosh Yaraganavi < santosh . sy @ samsung . com >
* Vinayak Holikatti < h . vinayak @ samsung . com >
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version 2
* of the License , or ( at your option ) any later version .
* See the COPYING file in the top - level directory or visit
* < http : //www.gnu.org/licenses/gpl-2.0.html>
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* This program is provided " AS IS " and " WITH ALL FAULTS " and
* without warranty of any kind . You are solely responsible for
* determining the appropriateness of using and distributing
* the program and assume all risks associated with your exercise
* of rights with respect to the program , including but not limited
* to infringement of third party rights , the risks and costs of
* program errors , damage to or loss of data , programs or equipment ,
* and unavailability or interruption of operations . Under no
* circumstances will the contributor of this Program be liable for
* any damages of any kind arising from your use or distribution of
* this program .
*/
# ifndef _UFSHCD_H
# define _UFSHCD_H
# include <linux/module.h>
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/io.h>
# include <linux/delay.h>
# include <linux/slab.h>
# include <linux/spinlock.h>
# include <linux/workqueue.h>
# include <linux/errno.h>
# include <linux/types.h>
# include <linux/wait.h>
# include <linux/bitops.h>
# include <linux/pm_runtime.h>
# include <linux/clk.h>
2013-06-26 21:09:29 +04:00
# include <linux/completion.h>
2014-09-25 16:32:22 +04:00
# include <linux/regulator/consumer.h>
2013-02-25 20:14:33 +04:00
# include <asm/irq.h>
# include <asm/byteorder.h>
# include <scsi/scsi.h>
# include <scsi/scsi_cmnd.h>
# include <scsi/scsi_host.h>
# include <scsi/scsi_tcq.h>
# include <scsi/scsi_dbg.h>
# include <scsi/scsi_eh.h>
# include "ufs.h"
# include "ufshci.h"
# define UFSHCD "ufshcd"
# define UFSHCD_DRIVER_VERSION "0.2"
2014-09-25 16:32:21 +04:00
struct ufs_hba ;
2013-07-29 23:05:57 +04:00
enum dev_cmd_type {
DEV_CMD_TYPE_NOP = 0x0 ,
2013-07-29 23:05:58 +04:00
DEV_CMD_TYPE_QUERY = 0x1 ,
2013-07-29 23:05:57 +04:00
} ;
2013-02-25 20:14:33 +04:00
/**
* struct uic_command - UIC command structure
* @ command : UIC command
* @ argument1 : UIC command argument 1
* @ argument2 : UIC command argument 2
* @ argument3 : UIC command argument 3
* @ cmd_active : Indicate if UIC command is outstanding
* @ result : UIC command result
2013-06-26 21:09:29 +04:00
* @ done : UIC command completion
2013-02-25 20:14:33 +04:00
*/
struct uic_command {
u32 command ;
u32 argument1 ;
u32 argument2 ;
u32 argument3 ;
int cmd_active ;
int result ;
2013-06-26 21:09:29 +04:00
struct completion done ;
2013-02-25 20:14:33 +04:00
} ;
2014-09-25 16:32:30 +04:00
/* Used to differentiate the power management options */
enum ufs_pm_op {
UFS_RUNTIME_PM ,
UFS_SYSTEM_PM ,
UFS_SHUTDOWN_PM ,
} ;
# define ufshcd_is_runtime_pm(op) ((op) == UFS_RUNTIME_PM)
# define ufshcd_is_system_pm(op) ((op) == UFS_SYSTEM_PM)
# define ufshcd_is_shutdown_pm(op) ((op) == UFS_SHUTDOWN_PM)
/* Host <-> Device UniPro Link state */
enum uic_link_state {
UIC_LINK_OFF_STATE = 0 , /* Link powered down or disabled */
UIC_LINK_ACTIVE_STATE = 1 , /* Link is in Fast/Slow/Sleep state */
UIC_LINK_HIBERN8_STATE = 2 , /* Link is in Hibernate state */
} ;
# define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
# define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \
UIC_LINK_ACTIVE_STATE )
# define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
UIC_LINK_HIBERN8_STATE )
# define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
# define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
UIC_LINK_ACTIVE_STATE )
# define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
UIC_LINK_HIBERN8_STATE )
/*
* UFS Power management levels .
* Each level is in increasing order of power savings .
*/
enum ufs_pm_level {
UFS_PM_LVL_0 , /* UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE */
UFS_PM_LVL_1 , /* UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE */
UFS_PM_LVL_2 , /* UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE */
UFS_PM_LVL_3 , /* UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE */
UFS_PM_LVL_4 , /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE */
UFS_PM_LVL_5 , /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE */
UFS_PM_LVL_MAX
} ;
struct ufs_pm_lvl_states {
enum ufs_dev_pwr_mode dev_state ;
enum uic_link_state link_state ;
} ;
2013-02-25 20:14:33 +04:00
/**
* struct ufshcd_lrb - local reference block
* @ utr_descriptor_ptr : UTRD address of the command
2013-07-29 23:05:57 +04:00
* @ ucd_req_ptr : UCD address of the command
2013-02-25 20:14:33 +04:00
* @ ucd_rsp_ptr : Response UPIU address for this command
* @ ucd_prdt_ptr : PRDT address of the command
* @ cmd : pointer to SCSI command
* @ sense_buffer : pointer to sense buffer address of the SCSI command
* @ sense_bufflen : Length of the sense buffer
* @ scsi_status : SCSI status of the command
* @ command_type : SCSI , UFS , Query .
* @ task_tag : Task tag of the command
* @ lun : LUN of the command
2013-07-29 23:05:57 +04:00
* @ intr_cmd : Interrupt command ( doesn ' t participate in interrupt aggregation )
2013-02-25 20:14:33 +04:00
*/
struct ufshcd_lrb {
struct utp_transfer_req_desc * utr_descriptor_ptr ;
2013-07-29 23:05:57 +04:00
struct utp_upiu_req * ucd_req_ptr ;
2013-02-25 20:14:33 +04:00
struct utp_upiu_rsp * ucd_rsp_ptr ;
struct ufshcd_sg_entry * ucd_prdt_ptr ;
struct scsi_cmnd * cmd ;
u8 * sense_buffer ;
unsigned int sense_bufflen ;
int scsi_status ;
int command_type ;
int task_tag ;
2014-09-25 16:32:29 +04:00
u8 lun ; /* UPIU LUN id field is only 8-bit wide */
2013-07-29 23:05:57 +04:00
bool intr_cmd ;
2013-02-25 20:14:33 +04:00
} ;
2013-07-29 23:05:58 +04:00
/**
* struct ufs_query - holds relevent data structures for query request
* @ request : request upiu and function
* @ descriptor : buffer for sending / receiving descriptor
* @ response : response upiu and response
*/
struct ufs_query {
struct ufs_query_req request ;
u8 * descriptor ;
struct ufs_query_res response ;
} ;
2013-07-29 23:05:57 +04:00
/**
* struct ufs_dev_cmd - all assosiated fields with device management commands
* @ type : device management command type - Query , NOP OUT
* @ lock : lock to allow one command at a time
* @ complete : internal commands completion
* @ tag_wq : wait queue until free command slot is available
*/
struct ufs_dev_cmd {
enum dev_cmd_type type ;
struct mutex lock ;
struct completion * complete ;
wait_queue_head_t tag_wq ;
2013-07-29 23:05:58 +04:00
struct ufs_query query ;
2013-07-29 23:05:57 +04:00
} ;
2013-02-25 20:14:33 +04:00
2014-09-25 16:32:23 +04:00
/**
* struct ufs_clk_info - UFS clock related info
* @ list : list headed by hba - > clk_list_head
* @ clk : clock node
* @ name : clock name
* @ max_freq : maximum frequency supported by the clock
2014-09-25 16:32:33 +04:00
* @ min_freq : min frequency that can be used for clock scaling
2014-09-25 16:32:34 +04:00
* @ curr_freq : indicates the current frequency that it is set to
2014-09-25 16:32:23 +04:00
* @ enabled : variable to check against multiple enable / disable
*/
struct ufs_clk_info {
struct list_head list ;
struct clk * clk ;
const char * name ;
u32 max_freq ;
2014-09-25 16:32:33 +04:00
u32 min_freq ;
2014-09-25 16:32:34 +04:00
u32 curr_freq ;
2014-09-25 16:32:23 +04:00
bool enabled ;
} ;
2014-09-25 16:32:21 +04:00
# define PRE_CHANGE 0
# define POST_CHANGE 1
2014-09-25 16:32:31 +04:00
struct ufs_pa_layer_attr {
u32 gear_rx ;
u32 gear_tx ;
u32 lane_rx ;
u32 lane_tx ;
u32 pwr_rx ;
u32 pwr_tx ;
u32 hs_rate ;
} ;
struct ufs_pwr_mode_info {
bool is_valid ;
struct ufs_pa_layer_attr info ;
} ;
2014-09-25 16:32:21 +04:00
/**
* struct ufs_hba_variant_ops - variant specific callbacks
* @ name : variant name
* @ init : called when the driver is initialized
* @ exit : called to cleanup everything done in init
2015-05-17 18:55:05 +03:00
* @ get_ufs_hci_version : called to get UFS HCI version
2014-09-25 16:32:34 +04:00
* @ clk_scale_notify : notifies that clks are scaled up / down
2014-09-25 16:32:21 +04:00
* @ setup_clocks : called before touching any of the controller registers
* @ setup_regulators : called before accessing the host controller
* @ hce_enable_notify : called before and after HCE enable bit is set to allow
* variant specific Uni - Pro initialization .
* @ link_startup_notify : called before and after Link startup is carried out
* to allow variant specific Uni - Pro initialization .
2014-09-25 16:32:31 +04:00
* @ pwr_change_notify : called before and after a power mode change
* is carried out to allow vendor spesific capabilities
* to be set .
2014-09-25 16:32:30 +04:00
* @ suspend : called during host controller PM callback
* @ resume : called during host controller PM callback
2014-09-25 16:32:21 +04:00
*/
struct ufs_hba_variant_ops {
const char * name ;
int ( * init ) ( struct ufs_hba * ) ;
void ( * exit ) ( struct ufs_hba * ) ;
2015-05-17 18:55:05 +03:00
u32 ( * get_ufs_hci_version ) ( struct ufs_hba * ) ;
2014-09-25 16:32:34 +04:00
void ( * clk_scale_notify ) ( struct ufs_hba * ) ;
2014-09-25 16:32:21 +04:00
int ( * setup_clocks ) ( struct ufs_hba * , bool ) ;
int ( * setup_regulators ) ( struct ufs_hba * , bool ) ;
int ( * hce_enable_notify ) ( struct ufs_hba * , bool ) ;
int ( * link_startup_notify ) ( struct ufs_hba * , bool ) ;
2014-09-25 16:32:31 +04:00
int ( * pwr_change_notify ) ( struct ufs_hba * ,
bool , struct ufs_pa_layer_attr * ,
struct ufs_pa_layer_attr * ) ;
2014-09-25 16:32:30 +04:00
int ( * suspend ) ( struct ufs_hba * , enum ufs_pm_op ) ;
int ( * resume ) ( struct ufs_hba * , enum ufs_pm_op ) ;
2014-09-25 16:32:21 +04:00
} ;
2014-09-25 16:32:32 +04:00
/* clock gating state */
enum clk_gating_state {
CLKS_OFF ,
CLKS_ON ,
REQ_CLKS_OFF ,
REQ_CLKS_ON ,
} ;
/**
* struct ufs_clk_gating - UFS clock gating related info
* @ gate_work : worker to turn off clocks after some delay as specified in
* delay_ms
* @ ungate_work : worker to turn on clocks that will be used in case of
* interrupt context
* @ state : the current clocks state
* @ delay_ms : gating delay in ms
* @ is_suspended : clk gating is suspended when set to 1 which can be used
* during suspend / resume
* @ delay_attr : sysfs attribute to control delay_attr
* @ active_reqs : number of requests that are pending and should be waited for
* completion before gating clocks .
*/
struct ufs_clk_gating {
struct delayed_work gate_work ;
struct work_struct ungate_work ;
enum clk_gating_state state ;
unsigned long delay_ms ;
bool is_suspended ;
struct device_attribute delay_attr ;
int active_reqs ;
} ;
2014-09-25 16:32:34 +04:00
struct ufs_clk_scaling {
ktime_t busy_start_t ;
bool is_busy_started ;
unsigned long tot_busy_t ;
unsigned long window_start_t ;
} ;
2014-09-25 16:32:27 +04:00
/**
* struct ufs_init_prefetch - contains data that is pre - fetched once during
* initialization
* @ icc_level : icc level which was read during initialization
*/
struct ufs_init_prefetch {
u32 icc_level ;
} ;
2013-02-25 20:14:33 +04:00
/**
* struct ufs_hba - per adapter private structure
* @ mmio_base : UFSHCI base register address
* @ ucdl_base_addr : UFS Command Descriptor base address
* @ utrdl_base_addr : UTP Transfer Request Descriptor base address
* @ utmrdl_base_addr : UTP Task Management Descriptor base address
* @ ucdl_dma_addr : UFS Command Descriptor DMA address
* @ utrdl_dma_addr : UTRDL DMA address
* @ utmrdl_dma_addr : UTMRDL DMA address
* @ host : Scsi_Host instance of the driver
* @ dev : device handle
* @ lrb : local reference block
2013-07-29 23:05:57 +04:00
* @ lrb_in_use : lrb in use
2013-02-25 20:14:33 +04:00
* @ outstanding_tasks : Bits representing outstanding task requests
* @ outstanding_reqs : Bits representing outstanding transfer requests
* @ capabilities : UFS Controller Capabilities
* @ nutrs : Transfer Request Queue depth supported by controller
* @ nutmrs : Task Management Queue depth supported by controller
* @ ufs_version : UFS Version to which controller complies
2014-09-25 16:32:21 +04:00
* @ vops : pointer to variant specific operations
* @ priv : pointer to variant specific private data
2013-02-25 20:14:33 +04:00
* @ irq : Irq number of the controller
* @ active_uic_cmd : handle of active UIC command
2013-06-26 21:09:29 +04:00
* @ uic_cmd_mutex : mutex for uic command
2014-05-26 09:29:12 +04:00
* @ tm_wq : wait queue for task management
* @ tm_tag_wq : wait queue for free task management slots
* @ tm_slots_in_use : bit map of task management request slots in use
2013-08-31 20:10:22 +04:00
* @ pwr_done : completion for power mode change
2013-02-25 20:14:33 +04:00
* @ tm_condition : condition variable for task management
* @ ufshcd_state : UFSHCD states
2014-05-26 09:29:14 +04:00
* @ eh_flags : Error handling flags
2013-06-26 21:09:27 +04:00
* @ intr_mask : Interrupt Mask Bits
2013-07-29 23:05:59 +04:00
* @ ee_ctrl_mask : Exception event control mask
2014-09-25 16:32:26 +04:00
* @ is_powered : flag to check if HBA is powered
2014-09-25 16:32:27 +04:00
* @ is_init_prefetch : flag to check if data was pre - fetched in initialization
* @ init_prefetch_data : data pre - fetched during initialization
2014-05-26 09:29:15 +04:00
* @ eh_work : Worker to handle UFS errors that require s / w attention
2013-07-29 23:05:59 +04:00
* @ eeh_work : Worker to handle exception events
2013-02-25 20:14:33 +04:00
* @ errors : HBA errors
2014-05-26 09:29:15 +04:00
* @ uic_error : UFS interconnect layer error status
* @ saved_err : sticky error mask
* @ saved_uic_err : sticky UIC error mask
2013-07-29 23:05:57 +04:00
* @ dev_cmd : ufs device management command information
2015-03-31 17:37:14 +03:00
* @ last_dme_cmd_tstamp : time stamp of the last completed DME command
2013-07-29 23:05:59 +04:00
* @ auto_bkops_enabled : to track whether bkops is enabled in device
2014-09-25 16:32:22 +04:00
* @ vreg_info : UFS device voltage regulator information
2014-09-25 16:32:23 +04:00
* @ clk_list_head : UFS host controller clocks list node head
2014-09-25 16:32:31 +04:00
* @ pwr_info : holds current power mode
* @ max_pwr_info : keeps the device max valid pwm
2013-02-25 20:14:33 +04:00
*/
struct ufs_hba {
void __iomem * mmio_base ;
/* Virtual memory reference */
struct utp_transfer_cmd_desc * ucdl_base_addr ;
struct utp_transfer_req_desc * utrdl_base_addr ;
struct utp_task_req_desc * utmrdl_base_addr ;
/* DMA memory reference */
dma_addr_t ucdl_dma_addr ;
dma_addr_t utrdl_dma_addr ;
dma_addr_t utmrdl_dma_addr ;
struct Scsi_Host * host ;
struct device * dev ;
2014-09-25 16:32:28 +04:00
/*
* This field is to keep a reference to " scsi_device " corresponding to
* " UFS device " W - LU .
*/
struct scsi_device * sdev_ufs_device ;
2013-02-25 20:14:33 +04:00
2014-09-25 16:32:30 +04:00
enum ufs_dev_pwr_mode curr_dev_pwr_mode ;
enum uic_link_state uic_link_state ;
/* Desired UFS power management level during runtime PM */
enum ufs_pm_level rpm_lvl ;
/* Desired UFS power management level during system PM */
enum ufs_pm_level spm_lvl ;
int pm_op_in_progress ;
2013-02-25 20:14:33 +04:00
struct ufshcd_lrb * lrb ;
2013-07-29 23:05:57 +04:00
unsigned long lrb_in_use ;
2013-02-25 20:14:33 +04:00
unsigned long outstanding_tasks ;
unsigned long outstanding_reqs ;
u32 capabilities ;
int nutrs ;
int nutmrs ;
u32 ufs_version ;
2014-09-25 16:32:21 +04:00
struct ufs_hba_variant_ops * vops ;
void * priv ;
2013-02-25 20:14:33 +04:00
unsigned int irq ;
2014-09-25 16:32:30 +04:00
bool is_irq_enabled ;
2013-02-25 20:14:33 +04:00
2015-05-17 18:54:57 +03:00
/* Interrupt aggregation support is broken */
# define UFSHCD_QUIRK_BROKEN_INTR_AGGR UFS_BIT(0)
2015-03-31 17:37:14 +03:00
/*
* delay before each dme command is required as the unipro
* layer has shown instabilities
*/
2015-05-17 18:54:57 +03:00
# define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS UFS_BIT(1)
2015-05-17 18:54:59 +03:00
/*
* If UFS host controller is having issue in processing LCC ( Line
* Control Command ) coming from device then enable this quirk .
* When this quirk is enabled , host controller driver should disable
* the LCC transmission on UFS device ( by clearing TX_LCC_ENABLE
* attribute of device to 0 ) .
*/
# define UFSHCD_QUIRK_BROKEN_LCC UFS_BIT(2)
2015-03-31 17:37:14 +03:00
2015-05-17 18:55:01 +03:00
/*
* The attribute PA_RXHSUNTERMCAP specifies whether or not the
* inbound Link supports unterminated line in HS mode . Setting this
* attribute to 1 fixes moving to HS gear .
*/
# define UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP UFS_BIT(3)
2015-05-17 18:55:03 +03:00
/*
* This quirk needs to be enabled if the host contoller only allows
* accessing the peer dme attributes in AUTO mode ( FAST AUTO or
* SLOW AUTO ) .
*/
# define UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE UFS_BIT(4)
2015-05-17 18:55:05 +03:00
/*
* This quirk needs to be enabled if the host contoller doesn ' t
* advertise the correct version in UFS_VER register . If this quirk
* is enabled , standard UFS host driver will call the vendor specific
* ops ( get_ufs_hci_version ) to get the correct version .
*/
# define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION UFS_BIT(5)
2015-03-31 17:37:14 +03:00
unsigned int quirks ; /* Deviations from standard UFSHCI spec. */
2013-06-26 21:09:29 +04:00
2014-05-26 09:29:12 +04:00
wait_queue_head_t tm_wq ;
wait_queue_head_t tm_tag_wq ;
2013-02-25 20:14:33 +04:00
unsigned long tm_condition ;
2014-05-26 09:29:12 +04:00
unsigned long tm_slots_in_use ;
2013-02-25 20:14:33 +04:00
2014-09-25 16:32:30 +04:00
struct uic_command * active_uic_cmd ;
struct mutex uic_cmd_mutex ;
struct completion * uic_async_done ;
2013-08-31 20:10:22 +04:00
2013-02-25 20:14:33 +04:00
u32 ufshcd_state ;
2014-05-26 09:29:14 +04:00
u32 eh_flags ;
2013-06-26 21:09:27 +04:00
u32 intr_mask ;
2013-07-29 23:05:59 +04:00
u16 ee_ctrl_mask ;
2014-09-25 16:32:26 +04:00
bool is_powered ;
2014-09-25 16:32:27 +04:00
bool is_init_prefetch ;
struct ufs_init_prefetch init_prefetch_data ;
2013-02-25 20:14:33 +04:00
/* Work Queues */
2014-05-26 09:29:15 +04:00
struct work_struct eh_work ;
2013-07-29 23:05:59 +04:00
struct work_struct eeh_work ;
2013-02-25 20:14:33 +04:00
/* HBA Errors */
u32 errors ;
2014-05-26 09:29:15 +04:00
u32 uic_error ;
u32 saved_err ;
u32 saved_uic_err ;
2013-07-29 23:05:57 +04:00
/* Device management request data */
struct ufs_dev_cmd dev_cmd ;
2015-03-31 17:37:14 +03:00
ktime_t last_dme_cmd_tstamp ;
2013-07-29 23:05:59 +04:00
2014-09-25 16:32:30 +04:00
/* Keeps information of the UFS device connected to this host */
struct ufs_dev_info dev_info ;
2013-07-29 23:05:59 +04:00
bool auto_bkops_enabled ;
2014-09-25 16:32:22 +04:00
struct ufs_vreg_info vreg_info ;
2014-09-25 16:32:23 +04:00
struct list_head clk_list_head ;
2014-09-25 16:32:30 +04:00
bool wlun_dev_clr_ua ;
2014-09-25 16:32:31 +04:00
struct ufs_pa_layer_attr pwr_info ;
struct ufs_pwr_mode_info max_pwr_info ;
2014-09-25 16:32:32 +04:00
struct ufs_clk_gating clk_gating ;
/* Control to enable/disable host capabilities */
u32 caps ;
/* Allow dynamic clk gating */
# define UFSHCD_CAP_CLK_GATING (1 << 0)
/* Allow hiberb8 with clk gating */
# define UFSHCD_CAP_HIBERN8_WITH_CLK_GATING (1 << 1)
2014-09-25 16:32:34 +04:00
/* Allow dynamic clk scaling */
# define UFSHCD_CAP_CLK_SCALING (1 << 2)
2014-09-25 16:32:35 +04:00
/* Allow auto bkops to enabled during runtime suspend */
# define UFSHCD_CAP_AUTO_BKOPS_SUSPEND (1 << 3)
2015-05-17 18:54:57 +03:00
/*
* This capability allows host controller driver to use the UFS HCI ' s
* interrupt aggregation capability .
* CAUTION : Enabling this might reduce overall UFS throughput .
*/
# define UFSHCD_CAP_INTR_AGGR (1 << 4)
2014-09-25 16:32:34 +04:00
struct devfreq * devfreq ;
struct ufs_clk_scaling clk_scaling ;
2014-09-25 16:32:36 +04:00
bool is_sys_suspended ;
2013-02-25 20:14:33 +04:00
} ;
2014-09-25 16:32:32 +04:00
/* Returns true if clocks can be gated. Otherwise false */
static inline bool ufshcd_is_clkgating_allowed ( struct ufs_hba * hba )
{
return hba - > caps & UFSHCD_CAP_CLK_GATING ;
}
static inline bool ufshcd_can_hibern8_during_gating ( struct ufs_hba * hba )
{
return hba - > caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING ;
}
2014-09-25 16:32:34 +04:00
static inline int ufshcd_is_clkscaling_enabled ( struct ufs_hba * hba )
{
return hba - > caps & UFSHCD_CAP_CLK_SCALING ;
}
2014-09-25 16:32:35 +04:00
static inline bool ufshcd_can_autobkops_during_suspend ( struct ufs_hba * hba )
{
return hba - > caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND ;
}
2015-05-17 18:54:57 +03:00
static inline bool ufshcd_is_intr_aggr_allowed ( struct ufs_hba * hba )
{
if ( ( hba - > caps & UFSHCD_CAP_INTR_AGGR ) & &
! ( hba - > quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR ) )
return true ;
else
return false ;
}
2013-06-26 21:09:26 +04:00
# define ufshcd_writel(hba, val, reg) \
writel ( ( val ) , ( hba ) - > mmio_base + ( reg ) )
# define ufshcd_readl(hba, reg) \
readl ( ( hba ) - > mmio_base + ( reg ) )
2014-09-25 16:32:36 +04:00
/**
* ufshcd_rmwl - read modify write into a register
* @ hba - per adapter instance
* @ mask - mask to apply on read value
* @ val - actual value to write
* @ reg - register address
*/
static inline void ufshcd_rmwl ( struct ufs_hba * hba , u32 mask , u32 val , u32 reg )
{
u32 tmp ;
tmp = ufshcd_readl ( hba , reg ) ;
tmp & = ~ mask ;
tmp | = ( val & mask ) ;
ufshcd_writel ( hba , tmp , reg ) ;
}
2014-09-25 16:32:21 +04:00
int ufshcd_alloc_host ( struct device * , struct ufs_hba * * ) ;
2015-10-28 14:15:49 +03:00
void ufshcd_dealloc_host ( struct ufs_hba * ) ;
2014-09-25 16:32:21 +04:00
int ufshcd_init ( struct ufs_hba * , void __iomem * , unsigned int ) ;
2013-02-25 20:14:33 +04:00
void ufshcd_remove ( struct ufs_hba * ) ;
/**
* ufshcd_hba_stop - Send controller to reset state
* @ hba : per adapter instance
*/
static inline void ufshcd_hba_stop ( struct ufs_hba * hba )
{
2013-06-26 21:09:26 +04:00
ufshcd_writel ( hba , CONTROLLER_DISABLE , REG_CONTROLLER_ENABLE ) ;
2013-02-25 20:14:33 +04:00
}
2013-07-29 23:05:58 +04:00
static inline void check_upiu_size ( void )
{
BUILD_BUG_ON ( ALIGNED_UPIU_SIZE <
GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE ) ;
}
2015-10-28 14:15:47 +03:00
/**
* ufshcd_set_variant - set variant specific data to the hba
* @ hba - per adapter instance
* @ variant - pointer to variant specific data
*/
static inline void ufshcd_set_variant ( struct ufs_hba * hba , void * variant )
{
BUG_ON ( ! hba ) ;
hba - > priv = variant ;
}
/**
* ufshcd_get_variant - get variant specific data from the hba
* @ hba - per adapter instance
*/
static inline void * ufshcd_get_variant ( struct ufs_hba * hba )
{
BUG_ON ( ! hba ) ;
return hba - > priv ;
}
2013-07-29 23:05:59 +04:00
extern int ufshcd_runtime_suspend ( struct ufs_hba * hba ) ;
extern int ufshcd_runtime_resume ( struct ufs_hba * hba ) ;
extern int ufshcd_runtime_idle ( struct ufs_hba * hba ) ;
2014-09-25 16:32:30 +04:00
extern int ufshcd_system_suspend ( struct ufs_hba * hba ) ;
extern int ufshcd_system_resume ( struct ufs_hba * hba ) ;
extern int ufshcd_shutdown ( struct ufs_hba * hba ) ;
2013-08-31 20:10:21 +04:00
extern int ufshcd_dme_set_attr ( struct ufs_hba * hba , u32 attr_sel ,
u8 attr_set , u32 mib_val , u8 peer ) ;
extern int ufshcd_dme_get_attr ( struct ufs_hba * hba , u32 attr_sel ,
u32 * mib_val , u8 peer ) ;
/* UIC command interfaces for DME primitives */
# define DME_LOCAL 0
# define DME_PEER 1
# define ATTR_SET_NOR 0 /* NORMAL */
# define ATTR_SET_ST 1 /* STATIC */
static inline int ufshcd_dme_set ( struct ufs_hba * hba , u32 attr_sel ,
u32 mib_val )
{
return ufshcd_dme_set_attr ( hba , attr_sel , ATTR_SET_NOR ,
mib_val , DME_LOCAL ) ;
}
static inline int ufshcd_dme_st_set ( struct ufs_hba * hba , u32 attr_sel ,
u32 mib_val )
{
return ufshcd_dme_set_attr ( hba , attr_sel , ATTR_SET_ST ,
mib_val , DME_LOCAL ) ;
}
static inline int ufshcd_dme_peer_set ( struct ufs_hba * hba , u32 attr_sel ,
u32 mib_val )
{
return ufshcd_dme_set_attr ( hba , attr_sel , ATTR_SET_NOR ,
mib_val , DME_PEER ) ;
}
static inline int ufshcd_dme_peer_st_set ( struct ufs_hba * hba , u32 attr_sel ,
u32 mib_val )
{
return ufshcd_dme_set_attr ( hba , attr_sel , ATTR_SET_ST ,
mib_val , DME_PEER ) ;
}
static inline int ufshcd_dme_get ( struct ufs_hba * hba ,
u32 attr_sel , u32 * mib_val )
{
return ufshcd_dme_get_attr ( hba , attr_sel , mib_val , DME_LOCAL ) ;
}
static inline int ufshcd_dme_peer_get ( struct ufs_hba * hba ,
u32 attr_sel , u32 * mib_val )
{
return ufshcd_dme_get_attr ( hba , attr_sel , mib_val , DME_PEER ) ;
}
2014-09-25 16:32:32 +04:00
int ufshcd_hold ( struct ufs_hba * hba , bool async ) ;
void ufshcd_release ( struct ufs_hba * hba ) ;
2015-10-28 14:15:48 +03:00
/* Wrapper functions for safely calling variant operations */
static inline const char * ufshcd_get_var_name ( struct ufs_hba * hba )
{
if ( hba - > vops )
return hba - > vops - > name ;
return " " ;
}
static inline int ufshcd_vops_init ( struct ufs_hba * hba )
{
if ( hba - > vops & & hba - > vops - > init )
return hba - > vops - > init ( hba ) ;
return 0 ;
}
static inline void ufshcd_vops_exit ( struct ufs_hba * hba )
{
if ( hba - > vops & & hba - > vops - > exit )
return hba - > vops - > exit ( hba ) ;
}
static inline u32 ufshcd_vops_get_ufs_hci_version ( struct ufs_hba * hba )
{
if ( hba - > vops & & hba - > vops - > get_ufs_hci_version )
return hba - > vops - > get_ufs_hci_version ( hba ) ;
return ufshcd_readl ( hba , REG_UFS_VERSION ) ;
}
static inline void ufshcd_vops_clk_scale_notify ( struct ufs_hba * hba )
{
if ( hba - > vops & & hba - > vops - > clk_scale_notify )
return hba - > vops - > clk_scale_notify ( hba ) ;
}
static inline int ufshcd_vops_setup_clocks ( struct ufs_hba * hba , bool on )
{
if ( hba - > vops & & hba - > vops - > setup_clocks )
return hba - > vops - > setup_clocks ( hba , on ) ;
return 0 ;
}
static inline int ufshcd_vops_setup_regulators ( struct ufs_hba * hba , bool status )
{
if ( hba - > vops & & hba - > vops - > setup_regulators )
return hba - > vops - > setup_regulators ( hba , status ) ;
return 0 ;
}
static inline int ufshcd_vops_hce_enable_notify ( struct ufs_hba * hba ,
bool status )
{
if ( hba - > vops & & hba - > vops - > hce_enable_notify )
return hba - > vops - > hce_enable_notify ( hba , status ) ;
return 0 ;
}
static inline int ufshcd_vops_link_startup_notify ( struct ufs_hba * hba ,
bool status )
{
if ( hba - > vops & & hba - > vops - > link_startup_notify )
return hba - > vops - > link_startup_notify ( hba , status ) ;
return 0 ;
}
static inline int ufshcd_vops_pwr_change_notify ( struct ufs_hba * hba ,
bool status ,
struct ufs_pa_layer_attr * dev_max_params ,
struct ufs_pa_layer_attr * dev_req_params )
{
if ( hba - > vops & & hba - > vops - > pwr_change_notify )
return hba - > vops - > pwr_change_notify ( hba , status ,
dev_max_params , dev_req_params ) ;
return - ENOTSUPP ;
}
static inline int ufshcd_vops_suspend ( struct ufs_hba * hba , enum ufs_pm_op op )
{
if ( hba - > vops & & hba - > vops - > suspend )
return hba - > vops - > suspend ( hba , op ) ;
return 0 ;
}
static inline int ufshcd_vops_resume ( struct ufs_hba * hba , enum ufs_pm_op op )
{
if ( hba - > vops & & hba - > vops - > resume )
return hba - > vops - > resume ( hba , op ) ;
return 0 ;
}
2013-02-25 20:14:33 +04:00
# endif /* End of Header */