2018-05-09 09:12:59 +08:00
// SPDX-License-Identifier: GPL-2.0+
/*
* Rockchip AXI PCIe endpoint controller driver
*
* Copyright ( c ) 2018 Rockchip , Inc .
*
* Author : Shawn Lin < shawn . lin @ rock - chips . com >
* Simon Xue < xxm @ rock - chips . com >
*/
# include <linux/configfs.h>
# include <linux/delay.h>
# include <linux/kernel.h>
# include <linux/of.h>
# include <linux/pci-epc.h>
# include <linux/platform_device.h>
# include <linux/pci-epf.h>
# include <linux/sizes.h>
# include "pcie-rockchip.h"
/**
* struct rockchip_pcie_ep - private data for PCIe endpoint controller driver
* @ rockchip : Rockchip PCIe controller
2020-07-29 22:12:19 +02:00
* @ epc : PCI EPC device
2018-05-09 09:12:59 +08:00
* @ max_regions : maximum number of regions supported by hardware
* @ ob_region_map : bitmask of mapped outbound regions
* @ ob_addr : base addresses in the AXI bus where the outbound regions start
2023-11-22 15:04:04 +09:00
* @ irq_phys_addr : base address on the AXI bus where the MSI / INTX IRQ
2018-05-09 09:12:59 +08:00
* dedicated outbound regions is mapped .
* @ irq_cpu_addr : base address in the CPU space where a write access triggers
2023-11-22 15:04:04 +09:00
* the sending of a memory write ( MSI ) / normal message ( INTX
2018-05-09 09:12:59 +08:00
* IRQ ) TLP through the PCIe bus .
2023-11-22 15:04:04 +09:00
* @ irq_pci_addr : used to save the current mapping of the MSI / INTX IRQ
2018-05-09 09:12:59 +08:00
* dedicated outbound region .
* @ irq_pci_fn : the latest PCI function that has updated the mapping of
2023-11-22 15:04:04 +09:00
* the MSI / INTX IRQ dedicated outbound region .
* @ irq_pending : bitmask of asserted INTX IRQs .
2018-05-09 09:12:59 +08:00
*/
struct rockchip_pcie_ep {
struct rockchip_pcie rockchip ;
struct pci_epc * epc ;
u32 max_regions ;
unsigned long ob_region_map ;
phys_addr_t * ob_addr ;
phys_addr_t irq_phys_addr ;
void __iomem * irq_cpu_addr ;
u64 irq_pci_addr ;
u8 irq_pci_fn ;
u8 irq_pending ;
} ;
static void rockchip_pcie_clear_ep_ob_atu ( struct rockchip_pcie * rockchip ,
u32 region )
{
rockchip_pcie_write ( rockchip , 0 ,
ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0 ( region ) ) ;
rockchip_pcie_write ( rockchip , 0 ,
ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1 ( region ) ) ;
rockchip_pcie_write ( rockchip , 0 ,
ROCKCHIP_PCIE_AT_OB_REGION_DESC0 ( region ) ) ;
rockchip_pcie_write ( rockchip , 0 ,
ROCKCHIP_PCIE_AT_OB_REGION_DESC1 ( region ) ) ;
}
static void rockchip_pcie_prog_ep_ob_atu ( struct rockchip_pcie * rockchip , u8 fn ,
2023-04-18 09:46:55 +02:00
u32 r , u64 cpu_addr , u64 pci_addr ,
size_t size )
2018-05-09 09:12:59 +08:00
{
2023-04-18 09:46:55 +02:00
int num_pass_bits = fls64 ( size - 1 ) ;
u32 addr0 , addr1 , desc0 ;
2018-05-09 09:12:59 +08:00
if ( num_pass_bits < 8 )
num_pass_bits = 8 ;
2023-04-18 09:46:55 +02:00
addr0 = ( ( num_pass_bits - 1 ) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS ) |
( lower_32_bits ( pci_addr ) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR ) ;
addr1 = upper_32_bits ( pci_addr ) ;
desc0 = ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN ( fn ) | AXI_WRAPPER_MEM_WRITE ;
/* PCI bus address region */
rockchip_pcie_write ( rockchip , addr0 ,
ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0 ( r ) ) ;
rockchip_pcie_write ( rockchip , addr1 ,
ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1 ( r ) ) ;
rockchip_pcie_write ( rockchip , desc0 ,
ROCKCHIP_PCIE_AT_OB_REGION_DESC0 ( r ) ) ;
rockchip_pcie_write ( rockchip , 0 ,
ROCKCHIP_PCIE_AT_OB_REGION_DESC1 ( r ) ) ;
2018-05-09 09:12:59 +08:00
}
2021-08-19 18:03:39 +05:30
static int rockchip_pcie_ep_write_header ( struct pci_epc * epc , u8 fn , u8 vfn ,
2018-05-09 09:12:59 +08:00
struct pci_epf_header * hdr )
{
2023-04-18 09:46:49 +02:00
u32 reg ;
2018-05-09 09:12:59 +08:00
struct rockchip_pcie_ep * ep = epc_get_drvdata ( epc ) ;
struct rockchip_pcie * rockchip = & ep - > rockchip ;
/* All functions share the same vendor ID with function 0 */
if ( fn = = 0 ) {
2024-04-03 16:45:08 +02:00
rockchip_pcie_write ( rockchip ,
hdr - > vendorid | hdr - > subsys_vendor_id < < 16 ,
2018-05-09 09:12:59 +08:00
PCIE_CORE_CONFIG_VENDOR ) ;
}
2023-04-18 09:46:49 +02:00
reg = rockchip_pcie_read ( rockchip , PCIE_EP_CONFIG_DID_VID ) ;
reg = ( reg & 0xFFFF ) | ( hdr - > deviceid < < 16 ) ;
rockchip_pcie_write ( rockchip , reg , PCIE_EP_CONFIG_DID_VID ) ;
2018-05-09 09:12:59 +08:00
rockchip_pcie_write ( rockchip ,
hdr - > revid |
hdr - > progif_code < < 8 |
hdr - > subclass_code < < 16 |
hdr - > baseclass_code < < 24 ,
ROCKCHIP_PCIE_EP_FUNC_BASE ( fn ) + PCI_REVISION_ID ) ;
rockchip_pcie_write ( rockchip , hdr - > cache_line_size ,
ROCKCHIP_PCIE_EP_FUNC_BASE ( fn ) +
PCI_CACHE_LINE_SIZE ) ;
rockchip_pcie_write ( rockchip , hdr - > subsys_id < < 16 ,
ROCKCHIP_PCIE_EP_FUNC_BASE ( fn ) +
PCI_SUBSYSTEM_VENDOR_ID ) ;
rockchip_pcie_write ( rockchip , hdr - > interrupt_pin < < 8 ,
ROCKCHIP_PCIE_EP_FUNC_BASE ( fn ) +
PCI_INTERRUPT_LINE ) ;
return 0 ;
}
2021-08-19 18:03:39 +05:30
static int rockchip_pcie_ep_set_bar ( struct pci_epc * epc , u8 fn , u8 vfn ,
2018-05-09 09:12:59 +08:00
struct pci_epf_bar * epf_bar )
{
struct rockchip_pcie_ep * ep = epc_get_drvdata ( epc ) ;
struct rockchip_pcie * rockchip = & ep - > rockchip ;
dma_addr_t bar_phys = epf_bar - > phys_addr ;
enum pci_barno bar = epf_bar - > barno ;
int flags = epf_bar - > flags ;
u32 addr0 , addr1 , reg , cfg , b , aperture , ctrl ;
u64 sz ;
/* BAR size is 2^(aperture + 7) */
sz = max_t ( size_t , epf_bar - > size , MIN_EP_APERTURE ) ;
/*
* roundup_pow_of_two ( ) returns an unsigned long , which is not suited
* for 64 bit values .
*/
sz = 1ULL < < fls64 ( sz - 1 ) ;
aperture = ilog2 ( sz ) - 7 ; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */
if ( ( flags & PCI_BASE_ADDRESS_SPACE ) = = PCI_BASE_ADDRESS_SPACE_IO ) {
ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS ;
} else {
bool is_prefetch = ! ! ( flags & PCI_BASE_ADDRESS_MEM_PREFETCH ) ;
2024-03-20 12:31:54 +01:00
bool is_64bits = ! ! ( flags & PCI_BASE_ADDRESS_MEM_TYPE_64 ) ;
2018-05-09 09:12:59 +08:00
if ( is_64bits & & ( bar & 1 ) )
return - EINVAL ;
if ( is_64bits & & is_prefetch )
ctrl =
ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS ;
else if ( is_prefetch )
ctrl =
ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS ;
else if ( is_64bits )
ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS ;
else
ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS ;
}
if ( bar < BAR_4 ) {
reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0 ( fn ) ;
b = bar ;
} else {
reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1 ( fn ) ;
b = bar - BAR_4 ;
}
addr0 = lower_32_bits ( bar_phys ) ;
addr1 = upper_32_bits ( bar_phys ) ;
cfg = rockchip_pcie_read ( rockchip , reg ) ;
cfg & = ~ ( ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK ( b ) |
ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK ( b ) ) ;
cfg | = ( ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE ( b , aperture ) |
ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL ( b , ctrl ) ) ;
rockchip_pcie_write ( rockchip , cfg , reg ) ;
rockchip_pcie_write ( rockchip , addr0 ,
ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0 ( fn , bar ) ) ;
rockchip_pcie_write ( rockchip , addr1 ,
ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1 ( fn , bar ) ) ;
return 0 ;
}
2021-08-19 18:03:39 +05:30
static void rockchip_pcie_ep_clear_bar ( struct pci_epc * epc , u8 fn , u8 vfn ,
2018-05-09 09:12:59 +08:00
struct pci_epf_bar * epf_bar )
{
struct rockchip_pcie_ep * ep = epc_get_drvdata ( epc ) ;
struct rockchip_pcie * rockchip = & ep - > rockchip ;
u32 reg , cfg , b , ctrl ;
enum pci_barno bar = epf_bar - > barno ;
if ( bar < BAR_4 ) {
reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0 ( fn ) ;
b = bar ;
} else {
reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1 ( fn ) ;
b = bar - BAR_4 ;
}
ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED ;
cfg = rockchip_pcie_read ( rockchip , reg ) ;
cfg & = ~ ( ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK ( b ) |
ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK ( b ) ) ;
cfg | = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL ( b , ctrl ) ;
rockchip_pcie_write ( rockchip , cfg , reg ) ;
rockchip_pcie_write ( rockchip , 0x0 ,
ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0 ( fn , bar ) ) ;
rockchip_pcie_write ( rockchip , 0x0 ,
ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1 ( fn , bar ) ) ;
}
2023-04-18 09:46:55 +02:00
static inline u32 rockchip_ob_region ( phys_addr_t addr )
{
return ( addr > > ilog2 ( SZ_1M ) ) & 0x1f ;
}
2021-08-19 18:03:39 +05:30
static int rockchip_pcie_ep_map_addr ( struct pci_epc * epc , u8 fn , u8 vfn ,
2018-05-09 09:12:59 +08:00
phys_addr_t addr , u64 pci_addr ,
size_t size )
{
struct rockchip_pcie_ep * ep = epc_get_drvdata ( epc ) ;
struct rockchip_pcie * pcie = & ep - > rockchip ;
2023-04-18 09:46:55 +02:00
u32 r = rockchip_ob_region ( addr ) ;
2018-05-09 09:12:59 +08:00
2023-04-18 09:46:55 +02:00
rockchip_pcie_prog_ep_ob_atu ( pcie , fn , r , addr , pci_addr , size ) ;
2018-05-09 09:12:59 +08:00
set_bit ( r , & ep - > ob_region_map ) ;
ep - > ob_addr [ r ] = addr ;
return 0 ;
}
2021-08-19 18:03:39 +05:30
static void rockchip_pcie_ep_unmap_addr ( struct pci_epc * epc , u8 fn , u8 vfn ,
2018-05-09 09:12:59 +08:00
phys_addr_t addr )
{
struct rockchip_pcie_ep * ep = epc_get_drvdata ( epc ) ;
struct rockchip_pcie * rockchip = & ep - > rockchip ;
u32 r ;
2023-04-18 09:46:55 +02:00
for ( r = 0 ; r < ep - > max_regions ; r + + )
2018-05-09 09:12:59 +08:00
if ( ep - > ob_addr [ r ] = = addr )
break ;
2023-04-18 09:46:55 +02:00
if ( r = = ep - > max_regions )
2018-05-09 09:12:59 +08:00
return ;
rockchip_pcie_clear_ep_ob_atu ( rockchip , r ) ;
ep - > ob_addr [ r ] = 0 ;
clear_bit ( r , & ep - > ob_region_map ) ;
}
2021-08-19 18:03:39 +05:30
static int rockchip_pcie_ep_set_msi ( struct pci_epc * epc , u8 fn , u8 vfn ,
2018-05-09 09:12:59 +08:00
u8 multi_msg_cap )
{
struct rockchip_pcie_ep * ep = epc_get_drvdata ( epc ) ;
struct rockchip_pcie * rockchip = & ep - > rockchip ;
2023-04-18 09:46:56 +02:00
u32 flags ;
2018-05-09 09:12:59 +08:00
flags = rockchip_pcie_read ( rockchip ,
ROCKCHIP_PCIE_EP_FUNC_BASE ( fn ) +
ROCKCHIP_PCIE_EP_MSI_CTRL_REG ) ;
flags & = ~ ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK ;
flags | =
2023-04-18 09:46:56 +02:00
( multi_msg_cap < < ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET ) |
( PCI_MSI_FLAGS_64BIT < < ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET ) ;
2018-05-09 09:12:59 +08:00
flags & = ~ ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP ;
rockchip_pcie_write ( rockchip , flags ,
ROCKCHIP_PCIE_EP_FUNC_BASE ( fn ) +
ROCKCHIP_PCIE_EP_MSI_CTRL_REG ) ;
return 0 ;
}
2021-08-19 18:03:39 +05:30
static int rockchip_pcie_ep_get_msi ( struct pci_epc * epc , u8 fn , u8 vfn )
2018-05-09 09:12:59 +08:00
{
struct rockchip_pcie_ep * ep = epc_get_drvdata ( epc ) ;
struct rockchip_pcie * rockchip = & ep - > rockchip ;
2023-04-18 09:46:56 +02:00
u32 flags ;
2018-05-09 09:12:59 +08:00
flags = rockchip_pcie_read ( rockchip ,
ROCKCHIP_PCIE_EP_FUNC_BASE ( fn ) +
ROCKCHIP_PCIE_EP_MSI_CTRL_REG ) ;
if ( ! ( flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME ) )
return - EINVAL ;
return ( ( flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK ) > >
ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET ) ;
}
static void rockchip_pcie_ep_assert_intx ( struct rockchip_pcie_ep * ep , u8 fn ,
2023-04-18 09:46:54 +02:00
u8 intx , bool do_assert )
2018-05-09 09:12:59 +08:00
{
struct rockchip_pcie * rockchip = & ep - > rockchip ;
intx & = 3 ;
2023-04-18 09:46:54 +02:00
if ( do_assert ) {
2018-05-09 09:12:59 +08:00
ep - > irq_pending | = BIT ( intx ) ;
2023-04-18 09:46:54 +02:00
rockchip_pcie_write ( rockchip ,
PCIE_CLIENT_INT_IN_ASSERT |
PCIE_CLIENT_INT_PEND_ST_PEND ,
PCIE_CLIENT_LEGACY_INT_CTRL ) ;
2018-05-09 09:12:59 +08:00
} else {
ep - > irq_pending & = ~ BIT ( intx ) ;
2023-04-18 09:46:54 +02:00
rockchip_pcie_write ( rockchip ,
PCIE_CLIENT_INT_IN_DEASSERT |
PCIE_CLIENT_INT_PEND_ST_NORMAL ,
PCIE_CLIENT_LEGACY_INT_CTRL ) ;
2018-05-09 09:12:59 +08:00
}
}
2023-11-22 15:04:04 +09:00
static int rockchip_pcie_ep_send_intx_irq ( struct rockchip_pcie_ep * ep , u8 fn ,
u8 intx )
2018-05-09 09:12:59 +08:00
{
u16 cmd ;
cmd = rockchip_pcie_read ( & ep - > rockchip ,
ROCKCHIP_PCIE_EP_FUNC_BASE ( fn ) +
ROCKCHIP_PCIE_EP_CMD_STATUS ) ;
if ( cmd & PCI_COMMAND_INTX_DISABLE )
return - EINVAL ;
/*
* Should add some delay between toggling INTx per TRM vaguely saying
* it depends on some cycles of the AHB bus clock to function it . So
* add sufficient 1 ms here .
*/
rockchip_pcie_ep_assert_intx ( ep , fn , intx , true ) ;
mdelay ( 1 ) ;
rockchip_pcie_ep_assert_intx ( ep , fn , intx , false ) ;
return 0 ;
}
static int rockchip_pcie_ep_send_msi_irq ( struct rockchip_pcie_ep * ep , u8 fn ,
u8 interrupt_num )
{
struct rockchip_pcie * rockchip = & ep - > rockchip ;
2023-04-18 09:46:56 +02:00
u32 flags , mme , data , data_mask ;
2018-05-09 09:12:59 +08:00
u8 msi_count ;
2023-04-18 09:46:55 +02:00
u64 pci_addr ;
u32 r ;
2018-05-09 09:12:59 +08:00
/* Check MSI enable bit */
flags = rockchip_pcie_read ( & ep - > rockchip ,
ROCKCHIP_PCIE_EP_FUNC_BASE ( fn ) +
ROCKCHIP_PCIE_EP_MSI_CTRL_REG ) ;
if ( ! ( flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME ) )
return - EINVAL ;
/* Get MSI numbers from MME */
mme = ( ( flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK ) > >
ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET ) ;
msi_count = 1 < < mme ;
if ( ! interrupt_num | | interrupt_num > msi_count )
return - EINVAL ;
/* Set MSI private data */
data_mask = msi_count - 1 ;
data = rockchip_pcie_read ( rockchip ,
ROCKCHIP_PCIE_EP_FUNC_BASE ( fn ) +
ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
PCI_MSI_DATA_64 ) ;
data = ( data & ~ data_mask ) | ( ( interrupt_num - 1 ) & data_mask ) ;
/* Get MSI PCI address */
pci_addr = rockchip_pcie_read ( rockchip ,
ROCKCHIP_PCIE_EP_FUNC_BASE ( fn ) +
ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
PCI_MSI_ADDRESS_HI ) ;
pci_addr < < = 32 ;
pci_addr | = rockchip_pcie_read ( rockchip ,
ROCKCHIP_PCIE_EP_FUNC_BASE ( fn ) +
ROCKCHIP_PCIE_EP_MSI_CTRL_REG +
PCI_MSI_ADDRESS_LO ) ;
/* Set the outbound region if needed. */
2023-04-18 09:46:55 +02:00
if ( unlikely ( ep - > irq_pci_addr ! = ( pci_addr & PCIE_ADDR_MASK ) | |
2018-05-09 09:12:59 +08:00
ep - > irq_pci_fn ! = fn ) ) {
2023-04-18 09:46:55 +02:00
r = rockchip_ob_region ( ep - > irq_phys_addr ) ;
rockchip_pcie_prog_ep_ob_atu ( rockchip , fn , r ,
2018-05-09 09:12:59 +08:00
ep - > irq_phys_addr ,
2023-04-18 09:46:55 +02:00
pci_addr & PCIE_ADDR_MASK ,
~ PCIE_ADDR_MASK + 1 ) ;
ep - > irq_pci_addr = ( pci_addr & PCIE_ADDR_MASK ) ;
2018-05-09 09:12:59 +08:00
ep - > irq_pci_fn = fn ;
}
2023-04-18 09:46:55 +02:00
writew ( data , ep - > irq_cpu_addr + ( pci_addr & ~ PCIE_ADDR_MASK ) ) ;
2018-05-09 09:12:59 +08:00
return 0 ;
}
2021-08-19 18:03:39 +05:30
static int rockchip_pcie_ep_raise_irq ( struct pci_epc * epc , u8 fn , u8 vfn ,
2023-11-22 15:03:52 +09:00
unsigned int type , u16 interrupt_num )
2018-05-09 09:12:59 +08:00
{
struct rockchip_pcie_ep * ep = epc_get_drvdata ( epc ) ;
switch ( type ) {
2023-11-22 15:03:52 +09:00
case PCI_IRQ_INTX :
2023-11-22 15:04:04 +09:00
return rockchip_pcie_ep_send_intx_irq ( ep , fn , 0 ) ;
2023-11-22 15:03:52 +09:00
case PCI_IRQ_MSI :
2018-05-09 09:12:59 +08:00
return rockchip_pcie_ep_send_msi_irq ( ep , fn , interrupt_num ) ;
default :
return - EINVAL ;
}
}
static int rockchip_pcie_ep_start ( struct pci_epc * epc )
{
struct rockchip_pcie_ep * ep = epc_get_drvdata ( epc ) ;
struct rockchip_pcie * rockchip = & ep - > rockchip ;
struct pci_epf * epf ;
u32 cfg ;
cfg = BIT ( 0 ) ;
list_for_each_entry ( epf , & epc - > pci_epf , list )
cfg | = BIT ( epf - > func_no ) ;
rockchip_pcie_write ( rockchip , cfg , PCIE_CORE_PHY_FUNC_CFG ) ;
return 0 ;
}
2019-01-14 16:45:03 +05:30
static const struct pci_epc_features rockchip_pcie_epc_features = {
. linkup_notifier = false ,
. msi_capable = true ,
. msix_capable = false ,
2023-04-18 09:46:58 +02:00
. align = 256 ,
2019-01-14 16:45:03 +05:30
} ;
static const struct pci_epc_features *
2021-08-19 18:03:39 +05:30
rockchip_pcie_ep_get_features ( struct pci_epc * epc , u8 func_no , u8 vfunc_no )
2019-01-14 16:45:03 +05:30
{
return & rockchip_pcie_epc_features ;
}
2018-05-09 09:12:59 +08:00
static const struct pci_epc_ops rockchip_pcie_epc_ops = {
. write_header = rockchip_pcie_ep_write_header ,
. set_bar = rockchip_pcie_ep_set_bar ,
. clear_bar = rockchip_pcie_ep_clear_bar ,
. map_addr = rockchip_pcie_ep_map_addr ,
. unmap_addr = rockchip_pcie_ep_unmap_addr ,
. set_msi = rockchip_pcie_ep_set_msi ,
. get_msi = rockchip_pcie_ep_get_msi ,
. raise_irq = rockchip_pcie_ep_raise_irq ,
. start = rockchip_pcie_ep_start ,
2019-01-14 16:45:03 +05:30
. get_features = rockchip_pcie_ep_get_features ,
2018-05-09 09:12:59 +08:00
} ;
static int rockchip_pcie_parse_ep_dt ( struct rockchip_pcie * rockchip ,
struct rockchip_pcie_ep * ep )
{
struct device * dev = rockchip - > dev ;
int err ;
err = rockchip_pcie_parse_dt ( rockchip ) ;
if ( err )
return err ;
err = rockchip_pcie_get_phys ( rockchip ) ;
if ( err )
return err ;
err = of_property_read_u32 ( dev - > of_node ,
" rockchip,max-outbound-regions " ,
& ep - > max_regions ) ;
if ( err < 0 | | ep - > max_regions > MAX_REGION_LIMIT )
ep - > max_regions = MAX_REGION_LIMIT ;
2023-04-18 09:46:55 +02:00
ep - > ob_region_map = 0 ;
2018-05-09 09:12:59 +08:00
err = of_property_read_u8 ( dev - > of_node , " max-functions " ,
& ep - > epc - > max_functions ) ;
if ( err < 0 )
ep - > epc - > max_functions = 1 ;
return 0 ;
}
static const struct of_device_id rockchip_pcie_ep_of_match [ ] = {
{ . compatible = " rockchip,rk3399-pcie-ep " } ,
{ } ,
} ;
static int rockchip_pcie_ep_probe ( struct platform_device * pdev )
{
struct device * dev = & pdev - > dev ;
struct rockchip_pcie_ep * ep ;
struct rockchip_pcie * rockchip ;
struct pci_epc * epc ;
size_t max_regions ;
2023-04-18 09:46:55 +02:00
struct pci_epc_mem_window * windows = NULL ;
int err , i ;
2023-04-18 09:46:57 +02:00
u32 cfg_msi , cfg_msix_cp ;
2018-05-09 09:12:59 +08:00
ep = devm_kzalloc ( dev , sizeof ( * ep ) , GFP_KERNEL ) ;
if ( ! ep )
return - ENOMEM ;
rockchip = & ep - > rockchip ;
rockchip - > is_rc = false ;
rockchip - > dev = dev ;
epc = devm_pci_epc_create ( dev , & rockchip_pcie_epc_ops ) ;
if ( IS_ERR ( epc ) ) {
dev_err ( dev , " failed to create epc device \n " ) ;
return PTR_ERR ( epc ) ;
}
ep - > epc = epc ;
epc_set_drvdata ( epc , ep ) ;
err = rockchip_pcie_parse_ep_dt ( rockchip , ep ) ;
if ( err )
return err ;
err = rockchip_pcie_enable_clocks ( rockchip ) ;
if ( err )
return err ;
err = rockchip_pcie_init_port ( rockchip ) ;
if ( err )
goto err_disable_clocks ;
/* Establish the link automatically */
rockchip_pcie_write ( rockchip , PCIE_CLIENT_LINK_TRAIN_ENABLE ,
PCIE_CLIENT_CONFIG ) ;
max_regions = ep - > max_regions ;
treewide: devm_kzalloc() -> devm_kcalloc()
The devm_kzalloc() function has a 2-factor argument form, devm_kcalloc().
This patch replaces cases of:
devm_kzalloc(handle, a * b, gfp)
with:
devm_kcalloc(handle, a * b, gfp)
as well as handling cases of:
devm_kzalloc(handle, a * b * c, gfp)
with:
devm_kzalloc(handle, array3_size(a, b, c), gfp)
as it's slightly less ugly than:
devm_kcalloc(handle, array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
devm_kzalloc(handle, 4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
Some manual whitespace fixes were needed in this patch, as Coccinelle
really liked to write "=devm_kcalloc..." instead of "= devm_kcalloc...".
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
expression HANDLE;
type TYPE;
expression THING, E;
@@
(
devm_kzalloc(HANDLE,
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
devm_kzalloc(HANDLE,
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression HANDLE;
expression COUNT;
typedef u8;
typedef __u8;
@@
(
devm_kzalloc(HANDLE,
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(char) * COUNT
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
expression HANDLE;
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
expression HANDLE;
identifier SIZE, COUNT;
@@
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression HANDLE;
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
devm_kzalloc(HANDLE,
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression HANDLE;
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
expression HANDLE;
identifier STRIDE, SIZE, COUNT;
@@
(
devm_kzalloc(HANDLE,
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression HANDLE;
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
devm_kzalloc(HANDLE, C1 * C2 * C3, ...)
|
devm_kzalloc(HANDLE,
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
devm_kzalloc(HANDLE,
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
devm_kzalloc(HANDLE,
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
devm_kzalloc(HANDLE,
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression HANDLE;
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
devm_kzalloc(HANDLE, sizeof(THING) * C2, ...)
|
devm_kzalloc(HANDLE, sizeof(TYPE) * C2, ...)
|
devm_kzalloc(HANDLE, C1 * C2 * C3, ...)
|
devm_kzalloc(HANDLE, C1 * C2, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- (E1) * E2
+ E1, E2
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- (E1) * (E2)
+ E1, E2
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 14:07:58 -07:00
ep - > ob_addr = devm_kcalloc ( dev , max_regions , sizeof ( * ep - > ob_addr ) ,
2018-05-09 09:12:59 +08:00
GFP_KERNEL ) ;
if ( ! ep - > ob_addr ) {
err = - ENOMEM ;
goto err_uninit_port ;
}
/* Only enable function 0 by default */
rockchip_pcie_write ( rockchip , BIT ( 0 ) , PCIE_CORE_PHY_FUNC_CFG ) ;
2023-04-18 09:46:55 +02:00
windows = devm_kcalloc ( dev , ep - > max_regions ,
sizeof ( struct pci_epc_mem_window ) , GFP_KERNEL ) ;
if ( ! windows ) {
err = - ENOMEM ;
goto err_uninit_port ;
}
for ( i = 0 ; i < ep - > max_regions ; i + + ) {
windows [ i ] . phys_base = rockchip - > mem_res - > start + ( SZ_1M * i ) ;
windows [ i ] . size = SZ_1M ;
windows [ i ] . page_size = SZ_1M ;
}
err = pci_epc_multi_mem_init ( epc , windows , ep - > max_regions ) ;
devm_kfree ( dev , windows ) ;
2018-05-09 09:12:59 +08:00
if ( err < 0 ) {
dev_err ( dev , " failed to initialize the memory space \n " ) ;
goto err_uninit_port ;
}
ep - > irq_cpu_addr = pci_epc_mem_alloc_addr ( epc , & ep - > irq_phys_addr ,
2023-04-18 09:46:55 +02:00
SZ_1M ) ;
2018-05-09 09:12:59 +08:00
if ( ! ep - > irq_cpu_addr ) {
dev_err ( dev , " failed to reserve memory space for MSI \n " ) ;
err = - ENOMEM ;
goto err_epc_mem_exit ;
}
ep - > irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR ;
2023-04-18 09:46:57 +02:00
/*
* MSI - X is not supported but the controller still advertises the MSI - X
* capability by default , which can lead to the Root Complex side
* allocating MSI - X vectors which cannot be used . Avoid this by skipping
* the MSI - X capability entry in the PCIe capabilities linked - list : get
* the next pointer from the MSI - X entry and set that in the MSI
* capability entry ( which is the previous entry ) . This way the MSI - X
* entry is skipped ( left out of the linked - list ) and not advertised .
*/
cfg_msi = rockchip_pcie_read ( rockchip , PCIE_EP_CONFIG_BASE +
ROCKCHIP_PCIE_EP_MSI_CTRL_REG ) ;
cfg_msi & = ~ ROCKCHIP_PCIE_EP_MSI_CP1_MASK ;
cfg_msix_cp = rockchip_pcie_read ( rockchip , PCIE_EP_CONFIG_BASE +
ROCKCHIP_PCIE_EP_MSIX_CAP_REG ) &
ROCKCHIP_PCIE_EP_MSIX_CAP_CP_MASK ;
cfg_msi | = cfg_msix_cp ;
rockchip_pcie_write ( rockchip , cfg_msi ,
PCIE_EP_CONFIG_BASE + ROCKCHIP_PCIE_EP_MSI_CTRL_REG ) ;
2023-04-18 09:46:50 +02:00
rockchip_pcie_write ( rockchip , PCIE_CLIENT_CONF_ENABLE ,
PCIE_CLIENT_CONFIG ) ;
2024-03-27 14:43:37 +05:30
pci_epc_init_notify ( epc ) ;
2018-05-09 09:12:59 +08:00
return 0 ;
err_epc_mem_exit :
pci_epc_mem_exit ( epc ) ;
err_uninit_port :
rockchip_pcie_deinit_phys ( rockchip ) ;
err_disable_clocks :
rockchip_pcie_disable_clocks ( rockchip ) ;
return err ;
}
static struct platform_driver rockchip_pcie_ep_driver = {
. driver = {
. name = " rockchip-pcie-ep " ,
. of_match_table = rockchip_pcie_ep_of_match ,
} ,
. probe = rockchip_pcie_ep_probe ,
} ;
builtin_platform_driver ( rockchip_pcie_ep_driver ) ;