2014-06-04 00:03:58 +04:00
/*
* Thunderbolt Cactus Ridge driver - NHI driver
*
* Copyright ( c ) 2014 Andreas Noever < andreas . noever @ gmail . com >
*/
# ifndef DSL3510_H_
# define DSL3510_H_
2017-06-06 15:24:57 +03:00
# include <linux/idr.h>
2014-06-04 00:03:58 +04:00
# include <linux/mutex.h>
# include <linux/workqueue.h>
/**
* struct tb_nhi - thunderbolt native host interface
2017-06-06 15:24:57 +03:00
* @ lock : Must be held during ring creation / destruction . Is acquired by
* interrupt_work when dispatching interrupts to individual rings .
* @ pdev : Pointer to the PCI device
* @ iobase : MMIO space of the NHI
* @ tx_rings : All Tx rings available on this host controller
* @ rx_rings : All Rx rings available on this host controller
* @ msix_ida : Used to allocate MSI - X vectors for rings
2017-06-06 15:25:15 +03:00
* @ going_away : The host controller device is about to disappear so when
* this flag is set , avoid touching the hardware anymore .
2017-06-06 15:24:57 +03:00
* @ interrupt_work : Work scheduled to handle ring interrupt when no
* MSI - X is used .
* @ hop_count : Number of rings ( end point hops ) supported by NHI .
2014-06-04 00:03:58 +04:00
*/
struct tb_nhi {
2017-06-06 15:24:57 +03:00
struct mutex lock ;
2014-06-04 00:03:58 +04:00
struct pci_dev * pdev ;
void __iomem * iobase ;
struct tb_ring * * tx_rings ;
struct tb_ring * * rx_rings ;
2017-06-06 15:24:57 +03:00
struct ida msix_ida ;
2017-06-06 15:25:15 +03:00
bool going_away ;
2014-06-04 00:03:58 +04:00
struct work_struct interrupt_work ;
2017-06-06 15:24:57 +03:00
u32 hop_count ;
2014-06-04 00:03:58 +04:00
} ;
/**
* struct tb_ring - thunderbolt TX or RX ring associated with a NHI
2017-06-06 15:24:57 +03:00
* @ lock : Lock serializing actions to this ring . Must be acquired after
* nhi - > lock .
* @ nhi : Pointer to the native host controller interface
* @ size : Size of the ring
* @ hop : Hop ( DMA channel ) associated with this ring
* @ head : Head of the ring ( write next descriptor here )
* @ tail : Tail of the ring ( complete next descriptor here )
* @ descriptors : Allocated descriptors for this ring
* @ queue : Queue holding frames to be transferred over this ring
* @ in_flight : Queue holding frames that are currently in flight
* @ work : Interrupt work structure
* @ is_tx : Is the ring Tx or Rx
* @ running : Is the ring running
* @ irq : MSI - X irq number if the ring uses MSI - X . % 0 otherwise .
* @ vector : MSI - X vector number the ring uses ( only set if @ irq is > 0 )
* @ flags : Ring specific flags
2014-06-04 00:03:58 +04:00
*/
struct tb_ring {
2017-06-06 15:24:57 +03:00
struct mutex lock ;
2014-06-04 00:03:58 +04:00
struct tb_nhi * nhi ;
int size ;
int hop ;
2017-06-06 15:24:57 +03:00
int head ;
int tail ;
2014-06-04 00:03:58 +04:00
struct ring_desc * descriptors ;
dma_addr_t descriptors_dma ;
struct list_head queue ;
struct list_head in_flight ;
struct work_struct work ;
2017-06-06 15:24:57 +03:00
bool is_tx : 1 ;
2014-06-04 00:03:58 +04:00
bool running : 1 ;
2017-06-06 15:24:57 +03:00
int irq ;
u8 vector ;
unsigned int flags ;
2014-06-04 00:03:58 +04:00
} ;
2017-06-06 15:24:57 +03:00
/* Leave ring interrupt enabled on suspend */
# define RING_FLAG_NO_SUSPEND BIT(0)
2014-06-04 00:03:58 +04:00
struct ring_frame ;
typedef void ( * ring_cb ) ( struct tb_ring * , struct ring_frame * , bool canceled ) ;
/**
* struct ring_frame - for use with ring_rx / ring_tx
*/
struct ring_frame {
dma_addr_t buffer_phy ;
ring_cb callback ;
struct list_head list ;
u32 size : 12 ; /* TX: in, RX: out*/
u32 flags : 12 ; /* RX: out */
u32 eof : 4 ; /* TX:in, RX: out */
u32 sof : 4 ; /* TX:in, RX: out */
} ;
# define TB_FRAME_SIZE 0x100 /* minimum size for ring_rx */
2017-06-06 15:24:57 +03:00
struct tb_ring * ring_alloc_tx ( struct tb_nhi * nhi , int hop , int size ,
unsigned int flags ) ;
struct tb_ring * ring_alloc_rx ( struct tb_nhi * nhi , int hop , int size ,
unsigned int flags ) ;
2014-06-04 00:03:58 +04:00
void ring_start ( struct tb_ring * ring ) ;
void ring_stop ( struct tb_ring * ring ) ;
void ring_free ( struct tb_ring * ring ) ;
int __ring_enqueue ( struct tb_ring * ring , struct ring_frame * frame ) ;
/**
* ring_rx ( ) - enqueue a frame on an RX ring
*
* frame - > buffer , frame - > buffer_phy and frame - > callback have to be set . The
* buffer must contain at least TB_FRAME_SIZE bytes .
*
* frame - > callback will be invoked with frame - > size , frame - > flags , frame - > eof ,
* frame - > sof set once the frame has been received .
*
* If ring_stop is called after the packet has been enqueued frame - > callback
* will be called with canceled set to true .
*
* Return : Returns ESHUTDOWN if ring_stop has been called . Zero otherwise .
*/
static inline int ring_rx ( struct tb_ring * ring , struct ring_frame * frame )
{
WARN_ON ( ring - > is_tx ) ;
return __ring_enqueue ( ring , frame ) ;
}
/**
* ring_tx ( ) - enqueue a frame on an TX ring
*
* frame - > buffer , frame - > buffer_phy , frame - > callback , frame - > size , frame - > eof
* and frame - > sof have to be set .
*
* frame - > callback will be invoked with once the frame has been transmitted .
*
* If ring_stop is called after the packet has been enqueued frame - > callback
* will be called with canceled set to true .
*
* Return : Returns ESHUTDOWN if ring_stop has been called . Zero otherwise .
*/
static inline int ring_tx ( struct tb_ring * ring , struct ring_frame * frame )
{
WARN_ON ( ! ring - > is_tx ) ;
return __ring_enqueue ( ring , frame ) ;
}
2017-06-06 15:25:12 +03:00
enum nhi_fw_mode {
NHI_FW_SAFE_MODE ,
NHI_FW_AUTH_MODE ,
NHI_FW_EP_MODE ,
NHI_FW_CM_MODE ,
} ;
enum nhi_mailbox_cmd {
NHI_MAILBOX_SAVE_DEVS = 0x05 ,
2017-06-06 15:25:17 +03:00
NHI_MAILBOX_DISCONNECT_PCIE_PATHS = 0x06 ,
2017-06-06 15:25:12 +03:00
NHI_MAILBOX_DRV_UNLOADS = 0x07 ,
NHI_MAILBOX_ALLOW_ALL_DEVS = 0x23 ,
} ;
int nhi_mailbox_cmd ( struct tb_nhi * nhi , enum nhi_mailbox_cmd cmd , u32 data ) ;
enum nhi_fw_mode nhi_mailbox_mode ( struct tb_nhi * nhi ) ;
2017-06-06 15:25:11 +03:00
/*
* PCI IDs used in this driver from Win Ridge forward . There is no
* need for the PCI quirk anymore as we will use ICM also on Apple
* hardware .
*/
# define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI 0x157d
# define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE 0x157e
# define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI 0x15bf
# define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE 0x15c0
# define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI 0x15d2
# define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE 0x15d3
# define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI 0x15d9
# define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE 0x15da
# define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI 0x15dc
# define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI 0x15dd
# define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI 0x15de
2014-06-04 00:03:58 +04:00
# endif