2020-04-04 14:51:40 +05:30
/* SPDX-License-Identifier: GPL-2.0 */
2015-11-24 13:09:55 +02:00
/*
* Copyright ( c ) 2015 MediaTek Inc .
* Author :
* Zhigang . Wei < zhigang . wei @ mediatek . com >
* Chunfeng . Yun < chunfeng . yun @ mediatek . com >
*/
# ifndef _XHCI_MTK_H_
# define _XHCI_MTK_H_
# include "xhci.h"
/**
* To simplify scheduler algorithm , set a upper limit for ESIT ,
* if a synchromous ep ' s ESIT is larger than @ XHCI_MTK_MAX_ESIT ,
* round down to the limit value , that means allocating more
* bandwidth to it .
*/
# define XHCI_MTK_MAX_ESIT 64
2018-09-20 19:13:34 +03:00
/**
2021-03-08 10:51:51 +08:00
* @ ss_bit_map : used to avoid start split microframes overlay
* @ fs_bus_bw : array to keep track of bandwidth already used for FS
2018-09-20 19:13:34 +03:00
* @ ep_list : Endpoints using this TT
*/
struct mu3h_sch_tt {
2021-03-08 10:51:51 +08:00
DECLARE_BITMAP ( ss_bit_map , XHCI_MTK_MAX_ESIT ) ;
u32 fs_bus_bw [ XHCI_MTK_MAX_ESIT ] ;
2018-09-20 19:13:34 +03:00
struct list_head ep_list ;
} ;
2015-11-24 13:09:55 +02:00
/**
* struct mu3h_sch_bw_info : schedule information for bandwidth domain
*
* @ bus_bw : array to keep track of bandwidth already used at each uframes
* @ bw_ep_list : eps in the bandwidth domain
*
* treat a HS root port as a bandwidth domain , but treat a SS root port as
* two bandwidth domains , one for IN eps and another for OUT eps .
*/
struct mu3h_sch_bw_info {
u32 bus_bw [ XHCI_MTK_MAX_ESIT ] ;
struct list_head bw_ep_list ;
} ;
/**
* struct mu3h_sch_ep_info : schedule information for endpoint
*
* @ esit : unit is 125u s , equal to 2 < < Interval field in ep - context
* @ num_budget_microframes : number of continuous uframes
* ( @ repeat = = 1 ) scheduled within the interval
* @ bw_cost_per_microframe : bandwidth cost per microframe
* @ endpoint : linked into bandwidth domain which it belongs to
2018-09-20 19:13:34 +03:00
* @ tt_endpoint : linked into mu3h_sch_tt ' s list which it belongs to
* @ sch_tt : mu3h_sch_tt linked into
* @ ep_type : endpoint type
* @ maxpkt : max packet size of endpoint
2015-11-24 13:09:55 +02:00
* @ ep : address of usb_host_endpoint struct
2021-02-01 13:57:44 +08:00
* @ allocated : the bandwidth is aready allocated from bus_bw
2015-11-24 13:09:55 +02:00
* @ offset : which uframe of the interval that transfer should be
* scheduled first time within the interval
* @ repeat : the time gap between two uframes that transfers are
* scheduled within a interval . in the simple algorithm , only
* assign 0 or 1 to it ; 0 means using only one uframe in a
* interval , and 1 means using @ num_budget_microframes
* continuous uframes
* @ pkts : number of packets to be transferred in the scheduled uframes
* @ cs_count : number of CS that host will trigger
* @ burst_mode : burst mode for scheduling . 0 : normal burst mode ,
* distribute the bMaxBurst + 1 packets for a single burst
* according to @ pkts and @ repeat , repeate the burst multiple
* times ; 1 : distribute the ( bMaxBurst + 1 ) * ( Mult + 1 ) packets
* according to @ pkts and @ repeat . normal mode is used by
* default
2018-09-20 19:13:33 +03:00
* @ bw_budget_table : table to record bandwidth budget per microframe
2015-11-24 13:09:55 +02:00
*/
struct mu3h_sch_ep_info {
u32 esit ;
u32 num_budget_microframes ;
u32 bw_cost_per_microframe ;
struct list_head endpoint ;
2018-09-20 19:13:34 +03:00
struct list_head tt_endpoint ;
struct mu3h_sch_tt * sch_tt ;
u32 ep_type ;
u32 maxpkt ;
2021-03-08 10:51:58 +08:00
struct usb_host_endpoint * ep ;
2021-03-08 10:51:59 +08:00
enum usb_device_speed speed ;
2021-02-01 13:57:44 +08:00
bool allocated ;
2015-11-24 13:09:55 +02:00
/*
* mtk xHCI scheduling information put into reserved DWs
* in ep context
*/
u32 offset ;
u32 repeat ;
u32 pkts ;
u32 cs_count ;
u32 burst_mode ;
2020-02-20 07:20:17 -06:00
u32 bw_budget_table [ ] ;
2015-11-24 13:09:55 +02:00
} ;
# define MU3C_U3_PORT_MAX 4
# define MU3C_U2_PORT_MAX 5
/**
* struct mu3c_ippc_regs : MTK ssusb ip port control registers
* @ ip_pw_ctr0 ~ 3 : ip power and clock control registers
* @ ip_pw_sts1 ~ 2 : ip power and clock status registers
* @ ip_xhci_cap : ip xHCI capability register
* @ u3_ctrl_p [ x ] : ip usb3 port x control register , only low 4 bytes are used
* @ u2_ctrl_p [ x ] : ip usb2 port x control register , only low 4 bytes are used
* @ u2_phy_pll : usb2 phy pll control register
*/
struct mu3c_ippc_regs {
__le32 ip_pw_ctr0 ;
__le32 ip_pw_ctr1 ;
__le32 ip_pw_ctr2 ;
__le32 ip_pw_ctr3 ;
__le32 ip_pw_sts1 ;
__le32 ip_pw_sts2 ;
__le32 reserved0 [ 3 ] ;
__le32 ip_xhci_cap ;
__le32 reserved1 [ 2 ] ;
__le64 u3_ctrl_p [ MU3C_U3_PORT_MAX ] ;
__le64 u2_ctrl_p [ MU3C_U2_PORT_MAX ] ;
__le32 reserved2 ;
__le32 u2_phy_pll ;
__le32 reserved3 [ 33 ] ; /* 0x80 ~ 0xff */
} ;
struct xhci_hcd_mtk {
struct device * dev ;
struct usb_hcd * hcd ;
struct mu3h_sch_bw_info * sch_array ;
2021-02-01 13:57:44 +08:00
struct list_head bw_ep_chk_list ;
2015-11-24 13:09:55 +02:00
struct mu3c_ippc_regs __iomem * ippc_regs ;
2016-10-19 10:28:22 +08:00
bool has_ippc ;
2015-11-24 13:09:55 +02:00
int num_u2_ports ;
int num_u3_ports ;
2017-10-13 16:26:36 +08:00
int u3p_dis_msk ;
2015-11-24 13:09:55 +02:00
struct regulator * vusb33 ;
struct regulator * vbus ;
struct clk * sys_clk ; /* sys and mac clock */
2019-08-23 14:40:25 +08:00
struct clk * xhci_clk ;
2017-01-18 14:08:24 +08:00
struct clk * ref_clk ;
2017-10-13 16:26:38 +08:00
struct clk * mcu_clk ;
struct clk * dma_clk ;
2015-11-24 13:09:55 +02:00
struct regmap * pericfg ;
struct phy * * phys ;
int num_phys ;
bool lpm_support ;
2018-01-03 16:53:20 +08:00
/* usb remote wakeup */
bool uwk_en ;
struct regmap * uwk ;
u32 uwk_reg_base ;
u32 uwk_vers ;
2015-11-24 13:09:55 +02:00
} ;
static inline struct xhci_hcd_mtk * hcd_to_mtk ( struct usb_hcd * hcd )
{
return dev_get_drvdata ( hcd - > self . controller ) ;
}
int xhci_mtk_sch_init ( struct xhci_hcd_mtk * mtk ) ;
void xhci_mtk_sch_exit ( struct xhci_hcd_mtk * mtk ) ;
2021-03-08 10:52:04 +08:00
int xhci_mtk_add_ep ( struct usb_hcd * hcd , struct usb_device * udev ,
struct usb_host_endpoint * ep ) ;
int xhci_mtk_drop_ep ( struct usb_hcd * hcd , struct usb_device * udev ,
struct usb_host_endpoint * ep ) ;
2021-01-13 18:05:11 +08:00
int xhci_mtk_check_bandwidth ( struct usb_hcd * hcd , struct usb_device * udev ) ;
void xhci_mtk_reset_bandwidth ( struct usb_hcd * hcd , struct usb_device * udev ) ;
2015-11-24 13:09:55 +02:00
# endif /* _XHCI_MTK_H_ */