2017-02-09 09:17:38 -08:00
/*
* Copyright ( C ) 2015 - 2017 Netronome Systems , Inc .
*
* This software is dual licensed under the GNU General License Version 2 ,
* June 1991 as shown in the file COPYING in the top - level directory of this
* source tree or the BSD 2 - Clause License provided below . You have the
* option to license this software under the complete terms of either license .
*
* The BSD 2 - Clause License :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* 1. Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
/*
* nfp_net_main . c
* Netronome network device driver : Main entry point
* Authors : Jakub Kicinski < jakub . kicinski @ netronome . com >
* Alejandro Lucero < alejandro . lucero @ netronome . com >
* Jason McMullan < jason . mcmullan @ netronome . com >
* Rolf Neugebauer < rolf . neugebauer @ netronome . com >
*/
# include <linux/etherdevice.h>
# include <linux/kernel.h>
# include <linux/init.h>
2017-05-26 01:03:36 -07:00
# include <linux/lockdep.h>
2017-02-09 09:17:38 -08:00
# include <linux/pci.h>
# include <linux/pci_regs.h>
# include <linux/msi.h>
# include <linux/random.h>
2017-04-04 16:12:26 -07:00
# include <linux/rtnetlink.h>
2017-02-09 09:17:38 -08:00
# include "nfpcore/nfp.h"
# include "nfpcore/nfp_cpp.h"
# include "nfpcore/nfp_nffw.h"
2017-04-04 16:12:30 -07:00
# include "nfpcore/nfp_nsp.h"
2017-02-09 09:17:38 -08:00
# include "nfpcore/nfp6000_pcie.h"
2017-05-22 10:59:26 -07:00
# include "nfp_app.h"
2017-02-09 09:17:38 -08:00
# include "nfp_net_ctrl.h"
2017-08-24 21:31:49 -07:00
# include "nfp_net_sriov.h"
2017-02-09 09:17:38 -08:00
# include "nfp_net.h"
# include "nfp_main.h"
2017-05-22 10:59:28 -07:00
# include "nfp_port.h"
2017-02-09 09:17:38 -08:00
# define NFP_PF_CSR_SLICE_SIZE (32 * 1024)
2017-04-04 16:12:23 -07:00
/**
* nfp_net_get_mac_addr ( ) - Get the MAC address .
2017-06-08 20:56:12 -07:00
* @ pf : NFP PF handle
2018-04-25 11:21:08 -07:00
* @ netdev : net_device to set MAC address on
2017-06-23 22:12:05 +02:00
* @ port : NFP port structure
2017-04-04 16:12:23 -07:00
*
* First try to get the MAC address from NSP ETH table . If that
2017-07-04 02:27:20 -07:00
* fails generate a random address .
2017-04-04 16:12:23 -07:00
*/
2018-04-25 11:21:08 -07:00
void
nfp_net_get_mac_addr ( struct nfp_pf * pf , struct net_device * netdev ,
struct nfp_port * port )
2017-02-09 09:17:38 -08:00
{
2017-05-22 10:59:28 -07:00
struct nfp_eth_table_port * eth_port ;
2017-02-09 09:17:38 -08:00
2017-06-23 22:12:05 +02:00
eth_port = __nfp_port_get_eth_port ( port ) ;
2017-07-04 02:27:20 -07:00
if ( ! eth_port ) {
2018-04-25 11:21:08 -07:00
eth_hw_addr_random ( netdev ) ;
2017-02-09 09:17:38 -08:00
return ;
}
2018-04-25 11:21:08 -07:00
ether_addr_copy ( netdev - > dev_addr , eth_port - > mac_addr ) ;
ether_addr_copy ( netdev - > perm_addr , eth_port - > mac_addr ) ;
2017-02-09 09:17:38 -08:00
}
2017-07-04 02:27:19 -07:00
static struct nfp_eth_table_port *
nfp_net_find_port ( struct nfp_eth_table * eth_tbl , unsigned int index )
2017-02-09 09:17:38 -08:00
{
int i ;
2017-04-22 20:17:56 -07:00
for ( i = 0 ; eth_tbl & & i < eth_tbl - > count ; i + + )
2017-07-04 02:27:19 -07:00
if ( eth_tbl - > ports [ i ] . index = = index )
2017-04-22 20:17:56 -07:00
return & eth_tbl - > ports [ i ] ;
2017-03-08 08:57:02 -08:00
2017-04-04 16:12:23 -07:00
return NULL ;
2017-02-09 09:17:38 -08:00
}
2017-05-31 08:06:45 -07:00
static int nfp_net_pf_get_num_ports ( struct nfp_pf * pf )
{
2018-05-21 22:12:43 -07:00
return nfp_pf_rtsym_read_optional ( pf , " nfd_cfg_pf%u_num_ports " , 1 ) ;
2017-05-31 08:06:45 -07:00
}
2017-05-31 08:06:46 -07:00
static int nfp_net_pf_get_app_id ( struct nfp_pf * pf )
{
2018-05-21 22:12:43 -07:00
return nfp_pf_rtsym_read_optional ( pf , " _pf%u_net_app_id " ,
NFP_APP_CORE_NIC ) ;
2017-02-09 09:17:38 -08:00
}
2017-05-22 10:59:25 -07:00
static void nfp_net_pf_free_vnic ( struct nfp_pf * pf , struct nfp_net * nn )
{
2017-09-02 18:26:00 -07:00
if ( nfp_net_is_data_vnic ( nn ) )
nfp_app_vnic_free ( pf - > app , nn ) ;
2017-05-22 10:59:28 -07:00
nfp_port_free ( nn - > port ) ;
2017-05-22 10:59:25 -07:00
list_del ( & nn - > vnic_list ) ;
pf - > num_vnics - - ;
nfp_net_free ( nn ) ;
}
2017-05-22 10:59:24 -07:00
static void nfp_net_pf_free_vnics ( struct nfp_pf * pf )
2017-02-09 09:17:38 -08:00
{
2017-06-05 17:01:56 -07:00
struct nfp_net * nn , * next ;
2017-02-09 09:17:38 -08:00
2017-06-05 17:01:56 -07:00
list_for_each_entry_safe ( nn , next , & pf - > vnics , vnic_list )
if ( nfp_net_is_data_vnic ( nn ) )
nfp_net_pf_free_vnic ( pf , nn ) ;
2017-02-09 09:17:38 -08:00
}
static struct nfp_net *
2017-06-05 17:01:48 -07:00
nfp_net_pf_alloc_vnic ( struct nfp_pf * pf , bool needs_netdev ,
2017-06-05 17:01:52 -07:00
void __iomem * ctrl_bar , void __iomem * qc_bar ,
2017-07-04 02:27:19 -07:00
int stride , unsigned int id )
2017-02-09 09:17:38 -08:00
{
2017-06-05 17:01:52 -07:00
u32 tx_base , rx_base , n_tx_rings , n_rx_rings ;
2017-02-09 09:17:38 -08:00
struct nfp_net * nn ;
2017-05-31 08:06:46 -07:00
int err ;
2017-02-09 09:17:38 -08:00
2017-06-05 17:01:52 -07:00
tx_base = readl ( ctrl_bar + NFP_NET_CFG_START_TXQ ) ;
rx_base = readl ( ctrl_bar + NFP_NET_CFG_START_RXQ ) ;
2017-02-09 09:17:38 -08:00
n_tx_rings = readl ( ctrl_bar + NFP_NET_CFG_MAX_TXRINGS ) ;
n_rx_rings = readl ( ctrl_bar + NFP_NET_CFG_MAX_RXRINGS ) ;
2017-05-22 10:59:24 -07:00
/* Allocate and initialise the vNIC */
2017-06-05 17:01:48 -07:00
nn = nfp_net_alloc ( pf - > pdev , needs_netdev , n_tx_rings , n_rx_rings ) ;
2017-02-09 09:17:38 -08:00
if ( IS_ERR ( nn ) )
return nn ;
2017-05-22 10:59:26 -07:00
nn - > app = pf - > app ;
2017-06-05 17:01:53 -07:00
nfp_net_get_fw_version ( & nn - > fw_ver , ctrl_bar ) ;
2017-03-10 10:38:28 -08:00
nn - > dp . ctrl_bar = ctrl_bar ;
2017-06-05 17:01:52 -07:00
nn - > tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ ;
nn - > rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ ;
2017-03-10 10:38:27 -08:00
nn - > dp . is_vf = 0 ;
2017-02-09 09:17:38 -08:00
nn - > stride_rx = stride ;
nn - > stride_tx = stride ;
2017-05-22 10:59:28 -07:00
2017-06-05 17:01:56 -07:00
if ( needs_netdev ) {
2017-09-02 18:26:00 -07:00
err = nfp_app_vnic_alloc ( pf - > app , nn , id ) ;
2017-06-05 17:01:56 -07:00
if ( err ) {
nfp_net_free ( nn ) ;
return ERR_PTR ( err ) ;
}
2017-05-22 10:59:28 -07:00
}
2017-05-22 10:59:27 -07:00
pf - > num_vnics + + ;
list_add_tail ( & nn - > vnic_list , & pf - > vnics ) ;
2017-02-09 09:17:38 -08:00
return nn ;
}
static int
2017-05-22 10:59:24 -07:00
nfp_net_pf_init_vnic ( struct nfp_pf * pf , struct nfp_net * nn , unsigned int id )
2017-02-09 09:17:38 -08:00
{
int err ;
2018-05-21 22:12:55 -07:00
nn - > id = id ;
2017-05-22 10:59:23 -07:00
err = nfp_net_init ( nn ) ;
2017-02-09 09:17:38 -08:00
if ( err )
return err ;
2018-05-21 22:12:55 -07:00
nfp_net_debugfs_vnic_add ( nn , pf - > ddir ) ;
2017-02-09 09:17:38 -08:00
2017-05-26 01:03:34 -07:00
if ( nn - > port ) {
err = nfp_devlink_port_register ( pf - > app , nn - > port ) ;
if ( err )
goto err_dfs_clean ;
}
2017-02-09 09:17:38 -08:00
nfp_net_info ( nn ) ;
2017-09-02 18:26:00 -07:00
if ( nfp_net_is_data_vnic ( nn ) ) {
err = nfp_app_vnic_init ( pf - > app , nn ) ;
if ( err )
goto err_devlink_port_clean ;
}
2017-02-09 09:17:38 -08:00
return 0 ;
2017-05-26 01:03:34 -07:00
2017-09-02 18:26:00 -07:00
err_devlink_port_clean :
if ( nn - > port )
nfp_devlink_port_unregister ( nn - > port ) ;
2017-05-26 01:03:34 -07:00
err_dfs_clean :
nfp_net_debugfs_dir_clean ( & nn - > debugfs_dir ) ;
nfp_net_clean ( nn ) ;
return err ;
2017-02-09 09:17:38 -08:00
}
static int
2017-05-22 10:59:24 -07:00
nfp_net_pf_alloc_vnics ( struct nfp_pf * pf , void __iomem * ctrl_bar ,
2017-06-05 17:01:53 -07:00
void __iomem * qc_bar , int stride )
2017-02-09 09:17:38 -08:00
{
struct nfp_net * nn ;
unsigned int i ;
int err ;
2017-05-22 10:59:24 -07:00
for ( i = 0 ; i < pf - > max_data_vnics ; i + + ) {
2017-06-05 17:01:52 -07:00
nn = nfp_net_pf_alloc_vnic ( pf , true , ctrl_bar , qc_bar ,
2017-06-05 17:01:53 -07:00
stride , i ) ;
2017-05-22 10:59:27 -07:00
if ( IS_ERR ( nn ) ) {
err = PTR_ERR ( nn ) ;
goto err_free_prev ;
2017-02-09 09:17:38 -08:00
}
ctrl_bar + = NFP_PF_CSR_SLICE_SIZE ;
2017-05-22 10:59:27 -07:00
2017-05-31 08:06:46 -07:00
/* Kill the vNIC if app init marked it as invalid */
if ( nn - > port & & nn - > port - > type = = NFP_PORT_INVALID ) {
2017-05-22 10:59:27 -07:00
nfp_net_pf_free_vnic ( pf , nn ) ;
continue ;
}
2017-02-09 09:17:38 -08:00
}
2017-05-22 10:59:24 -07:00
if ( list_empty ( & pf - > vnics ) )
2017-04-04 16:12:23 -07:00
return - ENODEV ;
2017-02-09 09:17:38 -08:00
return 0 ;
err_free_prev :
2017-05-22 10:59:24 -07:00
nfp_net_pf_free_vnics ( pf ) ;
2017-02-09 09:17:38 -08:00
return err ;
}
2017-05-26 01:03:33 -07:00
static void nfp_net_pf_clean_vnic ( struct nfp_pf * pf , struct nfp_net * nn )
{
2017-09-02 18:26:00 -07:00
if ( nfp_net_is_data_vnic ( nn ) )
nfp_app_vnic_clean ( pf - > app , nn ) ;
2017-05-26 01:03:34 -07:00
if ( nn - > port )
nfp_devlink_port_unregister ( nn - > port ) ;
2017-05-26 01:03:33 -07:00
nfp_net_debugfs_dir_clean ( & nn - > debugfs_dir ) ;
nfp_net_clean ( nn ) ;
}
2017-06-05 17:01:54 -07:00
static int nfp_net_pf_alloc_irqs ( struct nfp_pf * pf )
2017-02-09 09:17:38 -08:00
{
2017-06-05 17:01:54 -07:00
unsigned int wanted_irqs , num_irqs , vnics_left , irqs_left ;
2017-02-09 09:17:38 -08:00
struct nfp_net * nn ;
/* Get MSI-X vectors */
wanted_irqs = 0 ;
2017-05-22 10:59:24 -07:00
list_for_each_entry ( nn , & pf - > vnics , vnic_list )
2017-03-10 10:38:27 -08:00
wanted_irqs + = NFP_NET_NON_Q_VECTORS + nn - > dp . num_r_vecs ;
2017-02-09 09:17:38 -08:00
pf - > irq_entries = kcalloc ( wanted_irqs , sizeof ( * pf - > irq_entries ) ,
GFP_KERNEL ) ;
2017-06-05 17:01:54 -07:00
if ( ! pf - > irq_entries )
return - ENOMEM ;
2017-02-09 09:17:38 -08:00
num_irqs = nfp_net_irqs_alloc ( pf - > pdev , pf - > irq_entries ,
2017-05-22 10:59:24 -07:00
NFP_NET_MIN_VNIC_IRQS * pf - > num_vnics ,
2017-02-09 09:17:38 -08:00
wanted_irqs ) ;
if ( ! num_irqs ) {
2017-06-05 17:01:54 -07:00
nfp_warn ( pf - > cpp , " Unable to allocate MSI-X vectors \n " ) ;
kfree ( pf - > irq_entries ) ;
return - ENOMEM ;
2017-02-09 09:17:38 -08:00
}
2017-05-22 10:59:24 -07:00
/* Distribute IRQs to vNICs */
2017-02-09 09:17:38 -08:00
irqs_left = num_irqs ;
2017-05-22 10:59:24 -07:00
vnics_left = pf - > num_vnics ;
list_for_each_entry ( nn , & pf - > vnics , vnic_list ) {
2017-02-09 09:17:38 -08:00
unsigned int n ;
2017-06-05 17:01:55 -07:00
n = min ( NFP_NET_NON_Q_VECTORS + nn - > dp . num_r_vecs ,
DIV_ROUND_UP ( irqs_left , vnics_left ) ) ;
2017-02-09 09:17:38 -08:00
nfp_net_irqs_assign ( nn , & pf - > irq_entries [ num_irqs - irqs_left ] ,
n ) ;
irqs_left - = n ;
2017-05-22 10:59:24 -07:00
vnics_left - - ;
2017-02-09 09:17:38 -08:00
}
2017-06-05 17:01:54 -07:00
return 0 ;
}
static void nfp_net_pf_free_irqs ( struct nfp_pf * pf )
{
nfp_net_irqs_disable ( pf - > pdev ) ;
kfree ( pf - > irq_entries ) ;
}
static int nfp_net_pf_init_vnics ( struct nfp_pf * pf )
{
struct nfp_net * nn ;
unsigned int id ;
int err ;
2017-05-22 10:59:24 -07:00
/* Finish vNIC init and register */
2017-02-09 09:17:38 -08:00
id = 0 ;
2017-05-22 10:59:24 -07:00
list_for_each_entry ( nn , & pf - > vnics , vnic_list ) {
2017-06-05 17:01:56 -07:00
if ( ! nfp_net_is_data_vnic ( nn ) )
continue ;
2017-05-22 10:59:24 -07:00
err = nfp_net_pf_init_vnic ( pf , nn , id ) ;
2017-02-09 09:17:38 -08:00
if ( err )
goto err_prev_deinit ;
id + + ;
}
return 0 ;
err_prev_deinit :
2017-05-26 01:03:33 -07:00
list_for_each_entry_continue_reverse ( nn , & pf - > vnics , vnic_list )
2017-06-05 17:01:56 -07:00
if ( nfp_net_is_data_vnic ( nn ) )
nfp_net_pf_clean_vnic ( pf , nn ) ;
2017-02-09 09:17:38 -08:00
return err ;
}
2017-06-05 17:01:56 -07:00
static int
nfp_net_pf_app_init ( struct nfp_pf * pf , u8 __iomem * qc_bar , unsigned int stride )
2017-05-22 10:59:26 -07:00
{
2017-06-05 17:01:56 -07:00
u8 __iomem * ctrl_bar ;
2017-05-31 08:06:46 -07:00
int err ;
pf - > app = nfp_app_alloc ( pf , nfp_net_pf_get_app_id ( pf ) ) ;
if ( IS_ERR ( pf - > app ) )
return PTR_ERR ( pf - > app ) ;
2018-01-17 18:50:57 -08:00
mutex_lock ( & pf - > lock ) ;
2017-05-31 08:06:46 -07:00
err = nfp_app_init ( pf - > app ) ;
2018-01-17 18:50:57 -08:00
mutex_unlock ( & pf - > lock ) ;
2017-05-31 08:06:46 -07:00
if ( err )
goto err_free ;
2017-06-05 17:01:56 -07:00
if ( ! nfp_app_needs_ctrl_vnic ( pf - > app ) )
return 0 ;
2018-05-21 22:12:43 -07:00
ctrl_bar = nfp_pf_map_rtsym ( pf , " net.ctrl " , " _pf%u_net_ctrl_bar " ,
NFP_PF_CSR_SLICE_SIZE , & pf - > ctrl_vnic_bar ) ;
2017-06-05 17:01:56 -07:00
if ( IS_ERR ( ctrl_bar ) ) {
2017-08-18 15:48:21 -07:00
nfp_err ( pf - > cpp , " Failed to find ctrl vNIC memory symbol \n " ) ;
2017-06-05 17:01:56 -07:00
err = PTR_ERR ( ctrl_bar ) ;
2017-06-27 00:50:19 -07:00
goto err_app_clean ;
2017-06-05 17:01:56 -07:00
}
pf - > ctrl_vnic = nfp_net_pf_alloc_vnic ( pf , false , ctrl_bar , qc_bar ,
stride , 0 ) ;
if ( IS_ERR ( pf - > ctrl_vnic ) ) {
err = PTR_ERR ( pf - > ctrl_vnic ) ;
goto err_unmap ;
}
2017-05-31 08:06:46 -07:00
return 0 ;
2017-05-22 10:59:26 -07:00
2017-06-05 17:01:56 -07:00
err_unmap :
nfp_cpp_area_release_free ( pf - > ctrl_vnic_bar ) ;
2017-06-27 00:50:19 -07:00
err_app_clean :
2018-01-17 18:50:57 -08:00
mutex_lock ( & pf - > lock ) ;
2017-06-27 00:50:19 -07:00
nfp_app_clean ( pf - > app ) ;
2018-01-17 18:50:57 -08:00
mutex_unlock ( & pf - > lock ) ;
2017-05-31 08:06:46 -07:00
err_free :
nfp_app_free ( pf - > app ) ;
2017-06-27 00:50:19 -07:00
pf - > app = NULL ;
2017-05-31 08:06:46 -07:00
return err ;
2017-05-22 10:59:26 -07:00
}
static void nfp_net_pf_app_clean ( struct nfp_pf * pf )
{
2017-06-05 17:01:56 -07:00
if ( pf - > ctrl_vnic ) {
nfp_net_pf_free_vnic ( pf , pf - > ctrl_vnic ) ;
nfp_cpp_area_release_free ( pf - > ctrl_vnic_bar ) ;
}
2018-01-17 18:50:57 -08:00
mutex_lock ( & pf - > lock ) ;
2017-06-27 00:50:19 -07:00
nfp_app_clean ( pf - > app ) ;
2018-01-17 18:50:57 -08:00
mutex_unlock ( & pf - > lock ) ;
2017-05-22 10:59:26 -07:00
nfp_app_free ( pf - > app ) ;
2017-05-26 01:03:32 -07:00
pf - > app = NULL ;
2017-05-22 10:59:26 -07:00
}
2017-06-05 17:01:56 -07:00
static int nfp_net_pf_app_start_ctrl ( struct nfp_pf * pf )
{
int err ;
if ( ! pf - > ctrl_vnic )
return 0 ;
err = nfp_net_pf_init_vnic ( pf , pf - > ctrl_vnic , 0 ) ;
if ( err )
return err ;
err = nfp_ctrl_open ( pf - > ctrl_vnic ) ;
if ( err )
goto err_clean_ctrl ;
return 0 ;
err_clean_ctrl :
nfp_net_pf_clean_vnic ( pf , pf - > ctrl_vnic ) ;
return err ;
}
static void nfp_net_pf_app_stop_ctrl ( struct nfp_pf * pf )
{
if ( ! pf - > ctrl_vnic )
return ;
nfp_ctrl_close ( pf - > ctrl_vnic ) ;
nfp_net_pf_clean_vnic ( pf , pf - > ctrl_vnic ) ;
}
static int nfp_net_pf_app_start ( struct nfp_pf * pf )
{
int err ;
2017-09-02 18:26:02 -07:00
err = nfp_net_pf_app_start_ctrl ( pf ) ;
2017-06-05 17:01:56 -07:00
if ( err )
2017-08-29 22:15:16 +03:00
return err ;
2017-06-05 17:01:56 -07:00
2017-09-02 18:26:02 -07:00
err = nfp_app_start ( pf - > app , pf - > ctrl_vnic ) ;
if ( err )
goto err_ctrl_stop ;
2017-06-27 00:50:26 -07:00
if ( pf - > num_vfs ) {
err = nfp_app_sriov_enable ( pf - > app , pf - > num_vfs ) ;
if ( err )
goto err_app_stop ;
}
2017-06-05 17:01:56 -07:00
return 0 ;
2017-06-27 00:50:26 -07:00
err_app_stop :
nfp_app_stop ( pf - > app ) ;
2017-09-02 18:26:02 -07:00
err_ctrl_stop :
nfp_net_pf_app_stop_ctrl ( pf ) ;
2017-06-05 17:01:56 -07:00
return err ;
}
static void nfp_net_pf_app_stop ( struct nfp_pf * pf )
{
2017-06-27 00:50:26 -07:00
if ( pf - > num_vfs )
nfp_app_sriov_disable ( pf - > app ) ;
2017-06-05 17:01:56 -07:00
nfp_app_stop ( pf - > app ) ;
2017-09-02 18:26:02 -07:00
nfp_net_pf_app_stop_ctrl ( pf ) ;
2017-06-05 17:01:56 -07:00
}
2017-06-23 22:12:01 +02:00
static void nfp_net_pci_unmap_mem ( struct nfp_pf * pf )
{
2017-08-24 21:31:49 -07:00
if ( pf - > vfcfg_tbl2_area )
nfp_cpp_area_release_free ( pf - > vfcfg_tbl2_area ) ;
2017-06-23 22:12:01 +02:00
if ( pf - > vf_cfg_bar )
nfp_cpp_area_release_free ( pf - > vf_cfg_bar ) ;
if ( pf - > mac_stats_bar )
nfp_cpp_area_release_free ( pf - > mac_stats_bar ) ;
nfp_cpp_area_release_free ( pf - > qc_area ) ;
nfp_cpp_area_release_free ( pf - > data_vnic_bar ) ;
}
static int nfp_net_pci_map_mem ( struct nfp_pf * pf )
{
u8 __iomem * mem ;
2017-06-27 00:50:17 -07:00
u32 min_size ;
2017-06-23 22:12:01 +02:00
int err ;
2017-06-27 00:50:17 -07:00
min_size = pf - > max_data_vnics * NFP_PF_CSR_SLICE_SIZE ;
2018-05-21 22:12:43 -07:00
mem = nfp_pf_map_rtsym ( pf , " net.bar0 " , " _pf%d_net_bar0 " ,
min_size , & pf - > data_vnic_bar ) ;
2017-06-23 22:12:01 +02:00
if ( IS_ERR ( mem ) ) {
nfp_err ( pf - > cpp , " Failed to find data vNIC memory symbol \n " ) ;
2017-06-27 00:50:15 -07:00
return PTR_ERR ( mem ) ;
2017-06-23 22:12:01 +02:00
}
2018-04-25 11:21:08 -07:00
if ( pf - > eth_tbl ) {
min_size = NFP_MAC_STATS_SIZE * ( pf - > eth_tbl - > max_index + 1 ) ;
pf - > mac_stats_mem = nfp_rtsym_map ( pf - > rtbl , " _mac_stats " ,
" net.macstats " , min_size ,
& pf - > mac_stats_bar ) ;
if ( IS_ERR ( pf - > mac_stats_mem ) ) {
if ( PTR_ERR ( pf - > mac_stats_mem ) ! = - ENOENT ) {
err = PTR_ERR ( pf - > mac_stats_mem ) ;
goto err_unmap_ctrl ;
}
pf - > mac_stats_mem = NULL ;
2017-06-23 22:12:01 +02:00
}
}
2018-05-21 22:12:43 -07:00
pf - > vf_cfg_mem = nfp_pf_map_rtsym ( pf , " net.vfcfg " , " _pf%d_net_vf_bar " ,
NFP_NET_CFG_BAR_SZ * pf - > limit_vfs ,
& pf - > vf_cfg_bar ) ;
2017-06-23 22:12:01 +02:00
if ( IS_ERR ( pf - > vf_cfg_mem ) ) {
if ( PTR_ERR ( pf - > vf_cfg_mem ) ! = - ENOENT ) {
err = PTR_ERR ( pf - > vf_cfg_mem ) ;
goto err_unmap_mac_stats ;
}
pf - > vf_cfg_mem = NULL ;
}
2017-08-24 21:31:49 -07:00
min_size = NFP_NET_VF_CFG_SZ * pf - > limit_vfs + NFP_NET_VF_CFG_MB_SZ ;
2018-05-21 22:12:43 -07:00
pf - > vfcfg_tbl2 = nfp_pf_map_rtsym ( pf , " net.vfcfg_tbl2 " ,
" _pf%d_net_vf_cfg2 " ,
min_size , & pf - > vfcfg_tbl2_area ) ;
2017-08-24 21:31:49 -07:00
if ( IS_ERR ( pf - > vfcfg_tbl2 ) ) {
if ( PTR_ERR ( pf - > vfcfg_tbl2 ) ! = - ENOENT ) {
err = PTR_ERR ( pf - > vfcfg_tbl2 ) ;
goto err_unmap_vf_cfg ;
}
pf - > vfcfg_tbl2 = NULL ;
}
2017-06-27 00:50:16 -07:00
mem = nfp_cpp_map_area ( pf - > cpp , " net.qc " , 0 , 0 ,
2017-06-23 22:12:01 +02:00
NFP_PCIE_QUEUE ( 0 ) , NFP_QCP_QUEUE_AREA_SZ ,
& pf - > qc_area ) ;
if ( IS_ERR ( mem ) ) {
nfp_err ( pf - > cpp , " Failed to map Queue Controller area. \n " ) ;
err = PTR_ERR ( mem ) ;
2017-08-24 21:31:49 -07:00
goto err_unmap_vfcfg_tbl2 ;
2017-06-23 22:12:01 +02:00
}
return 0 ;
2017-08-24 21:31:49 -07:00
err_unmap_vfcfg_tbl2 :
if ( pf - > vfcfg_tbl2_area )
nfp_cpp_area_release_free ( pf - > vfcfg_tbl2_area ) ;
2017-06-23 22:12:01 +02:00
err_unmap_vf_cfg :
if ( pf - > vf_cfg_bar )
nfp_cpp_area_release_free ( pf - > vf_cfg_bar ) ;
err_unmap_mac_stats :
if ( pf - > mac_stats_bar )
nfp_cpp_area_release_free ( pf - > mac_stats_bar ) ;
err_unmap_ctrl :
nfp_cpp_area_release_free ( pf - > data_vnic_bar ) ;
return err ;
}
2017-05-22 10:59:29 -07:00
static int
nfp_net_eth_port_update ( struct nfp_cpp * cpp , struct nfp_port * port ,
struct nfp_eth_table * eth_table )
{
struct nfp_eth_table_port * eth_port ;
ASSERT_RTNL ( ) ;
eth_port = nfp_net_find_port ( eth_table , port - > eth_id ) ;
if ( ! eth_port ) {
2017-05-22 10:59:33 -07:00
set_bit ( NFP_PORT_CHANGED , & port - > flags ) ;
2017-05-22 10:59:29 -07:00
nfp_warn ( cpp , " Warning: port #%d not present after reconfig \n " ,
port - > eth_id ) ;
return - EIO ;
}
if ( eth_port - > override_changed ) {
2017-11-04 16:48:55 +01:00
nfp_warn ( cpp , " Port #%d config changed, unregistering. Driver reload required before port will be operational again. \n " , port - > eth_id ) ;
2017-05-22 10:59:29 -07:00
port - > type = NFP_PORT_INVALID ;
}
memcpy ( port - > eth_port , eth_port , sizeof ( * eth_port ) ) ;
return 0 ;
}
2017-05-26 01:03:36 -07:00
int nfp_net_refresh_port_table_sync ( struct nfp_pf * pf )
2017-04-04 16:12:26 -07:00
{
2017-04-22 20:17:56 -07:00
struct nfp_eth_table * eth_table ;
2017-04-04 16:12:26 -07:00
struct nfp_net * nn , * next ;
2017-05-22 10:59:31 -07:00
struct nfp_port * port ;
2017-11-04 16:48:57 +01:00
int err ;
2017-04-04 16:12:26 -07:00
2017-05-26 01:03:36 -07:00
lockdep_assert_held ( & pf - > lock ) ;
2017-04-04 16:12:26 -07:00
/* Check for nfp_net_pci_remove() racing against us */
2017-05-22 10:59:24 -07:00
if ( list_empty ( & pf - > vnics ) )
2017-05-26 01:03:36 -07:00
return 0 ;
2017-04-04 16:12:26 -07:00
2017-05-22 10:59:30 -07:00
/* Update state of all ports */
rtnl_lock ( ) ;
2017-05-22 10:59:31 -07:00
list_for_each_entry ( port , & pf - > ports , port_list )
clear_bit ( NFP_PORT_CHANGED , & port - > flags ) ;
2017-04-22 20:17:56 -07:00
eth_table = nfp_eth_read_ports ( pf - > cpp ) ;
if ( ! eth_table ) {
2017-05-22 10:59:33 -07:00
list_for_each_entry ( port , & pf - > ports , port_list )
if ( __nfp_port_get_eth_port ( port ) )
set_bit ( NFP_PORT_CHANGED , & port - > flags ) ;
2017-05-22 10:59:30 -07:00
rtnl_unlock ( ) ;
2017-04-22 20:17:56 -07:00
nfp_err ( pf - > cpp , " Error refreshing port config! \n " ) ;
2017-05-26 01:03:36 -07:00
return - EIO ;
2017-04-22 20:17:56 -07:00
}
2017-05-22 10:59:31 -07:00
list_for_each_entry ( port , & pf - > ports , port_list )
if ( __nfp_port_get_eth_port ( port ) )
nfp_net_eth_port_update ( pf - > cpp , port , eth_table ) ;
2017-04-22 20:17:56 -07:00
rtnl_unlock ( ) ;
2017-05-22 10:59:29 -07:00
kfree ( eth_table ) ;
2017-04-22 20:17:56 -07:00
2017-11-04 16:48:57 +01:00
/* Resync repr state. This may cause reprs to be removed. */
err = nfp_reprs_resync_phys_ports ( pf - > app ) ;
if ( err )
return err ;
2017-05-22 10:59:30 -07:00
/* Shoot off the ports which became invalid */
2017-05-22 10:59:24 -07:00
list_for_each_entry_safe ( nn , next , & pf - > vnics , vnic_list ) {
2017-05-22 10:59:28 -07:00
if ( ! nn - > port | | nn - > port - > type ! = NFP_PORT_INVALID )
2017-04-04 16:12:26 -07:00
continue ;
2017-05-26 01:03:33 -07:00
nfp_net_pf_clean_vnic ( pf , nn ) ;
2017-05-22 10:59:25 -07:00
nfp_net_pf_free_vnic ( pf , nn ) ;
2017-04-04 16:12:26 -07:00
}
2017-05-26 01:03:36 -07:00
return 0 ;
}
static void nfp_net_refresh_vnics ( struct work_struct * work )
{
struct nfp_pf * pf = container_of ( work , struct nfp_pf ,
port_refresh_work ) ;
mutex_lock ( & pf - > lock ) ;
nfp_net_refresh_port_table_sync ( pf ) ;
2017-05-22 10:59:24 -07:00
mutex_unlock ( & pf - > lock ) ;
2017-04-04 16:12:26 -07:00
}
2017-05-22 10:59:28 -07:00
void nfp_net_refresh_port_table ( struct nfp_port * port )
2017-04-04 16:12:26 -07:00
{
2017-05-22 10:59:28 -07:00
struct nfp_pf * pf = port - > app - > pf ;
2017-04-04 16:12:26 -07:00
2017-05-22 10:59:32 -07:00
set_bit ( NFP_PORT_CHANGED , & port - > flags ) ;
2017-06-27 00:50:27 -07:00
queue_work ( pf - > wq , & pf - > port_refresh_work ) ;
2017-04-22 20:17:56 -07:00
}
2017-04-04 16:12:26 -07:00
2017-05-22 10:59:28 -07:00
int nfp_net_refresh_eth_port ( struct nfp_port * port )
2017-04-22 20:17:56 -07:00
{
2017-05-22 10:59:28 -07:00
struct nfp_cpp * cpp = port - > app - > cpp ;
2017-04-22 20:17:56 -07:00
struct nfp_eth_table * eth_table ;
2017-05-22 10:59:29 -07:00
int ret ;
2017-04-04 16:12:26 -07:00
2017-05-22 10:59:30 -07:00
clear_bit ( NFP_PORT_CHANGED , & port - > flags ) ;
2017-05-22 10:59:28 -07:00
eth_table = nfp_eth_read_ports ( cpp ) ;
2017-04-22 20:17:56 -07:00
if ( ! eth_table ) {
2017-05-22 10:59:33 -07:00
set_bit ( NFP_PORT_CHANGED , & port - > flags ) ;
2017-05-22 10:59:28 -07:00
nfp_err ( cpp , " Error refreshing port state table! \n " ) ;
2017-04-22 20:17:56 -07:00
return - EIO ;
}
2017-04-04 16:12:26 -07:00
2017-05-22 10:59:29 -07:00
ret = nfp_net_eth_port_update ( cpp , port , eth_table ) ;
2017-04-04 16:12:26 -07:00
2017-04-22 20:17:56 -07:00
kfree ( eth_table ) ;
2017-04-04 16:12:26 -07:00
2017-05-22 10:59:29 -07:00
return ret ;
2017-04-04 16:12:26 -07:00
}
2017-02-09 09:17:38 -08:00
/*
* PCI device functions
*/
int nfp_net_pci_probe ( struct nfp_pf * pf )
{
2018-01-17 18:50:57 -08:00
struct devlink * devlink = priv_to_devlink ( pf ) ;
2017-02-09 09:17:38 -08:00
struct nfp_net_fw_version fw_ver ;
2017-06-05 17:01:52 -07:00
u8 __iomem * ctrl_bar , * qc_bar ;
2017-02-09 09:17:38 -08:00
int stride ;
int err ;
2017-05-22 10:59:24 -07:00
INIT_WORK ( & pf - > port_refresh_work , nfp_net_refresh_vnics ) ;
2017-04-04 16:12:24 -07:00
2017-06-27 00:50:15 -07:00
if ( ! pf - > rtbl ) {
nfp_err ( pf - > cpp , " No %s, giving up. \n " ,
pf - > fw_loaded ? " symbol table " : " firmware found " ) ;
2017-07-26 11:09:46 -07:00
return - EINVAL ;
2017-06-27 00:50:15 -07:00
}
2017-05-22 10:59:24 -07:00
pf - > max_data_vnics = nfp_net_pf_get_num_ports ( pf ) ;
2018-01-17 18:50:57 -08:00
if ( ( int ) pf - > max_data_vnics < 0 )
return pf - > max_data_vnics ;
2017-02-09 09:17:38 -08:00
2017-06-23 22:12:01 +02:00
err = nfp_net_pci_map_mem ( pf ) ;
if ( err )
2018-01-17 18:50:57 -08:00
return err ;
2017-06-23 22:12:01 +02:00
ctrl_bar = nfp_cpp_area_iomem ( pf - > data_vnic_bar ) ;
qc_bar = nfp_cpp_area_iomem ( pf - > qc_area ) ;
if ( ! ctrl_bar | | ! qc_bar ) {
err = - EIO ;
goto err_unmap ;
2017-04-04 16:12:24 -07:00
}
2017-02-09 09:17:38 -08:00
nfp_net_get_fw_version ( & fw_ver , ctrl_bar ) ;
if ( fw_ver . resv | | fw_ver . class ! = NFP_NET_CFG_VERSION_CLASS_GENERIC ) {
nfp_err ( pf - > cpp , " Unknown Firmware ABI %d.%d.%d.%d \n " ,
fw_ver . resv , fw_ver . class , fw_ver . major , fw_ver . minor ) ;
err = - EINVAL ;
2017-06-23 22:12:01 +02:00
goto err_unmap ;
2017-02-09 09:17:38 -08:00
}
/* Determine stride */
if ( nfp_net_fw_ver_eq ( & fw_ver , 0 , 0 , 0 , 1 ) ) {
stride = 2 ;
nfp_warn ( pf - > cpp , " OBSOLETE Firmware detected - VF isolation not available \n " ) ;
} else {
switch ( fw_ver . major ) {
2017-06-05 17:01:57 -07:00
case 1 . . . 5 :
2017-02-09 09:17:38 -08:00
stride = 4 ;
break ;
default :
nfp_err ( pf - > cpp , " Unsupported Firmware ABI %d.%d.%d.%d \n " ,
fw_ver . resv , fw_ver . class ,
fw_ver . major , fw_ver . minor ) ;
err = - EINVAL ;
2017-06-23 22:12:01 +02:00
goto err_unmap ;
2017-02-09 09:17:38 -08:00
}
}
2017-06-05 17:01:56 -07:00
err = nfp_net_pf_app_init ( pf , qc_bar , stride ) ;
2017-05-22 10:59:26 -07:00
if ( err )
2017-06-23 22:12:01 +02:00
goto err_unmap ;
2017-05-22 10:59:26 -07:00
2018-01-17 18:50:57 -08:00
err = devlink_register ( devlink , & pf - > pdev - > dev ) ;
if ( err )
goto err_app_clean ;
2018-05-21 22:12:45 -07:00
err = nfp_shared_buf_register ( pf ) ;
if ( err )
goto err_devlink_unreg ;
2018-01-17 18:50:57 -08:00
mutex_lock ( & pf - > lock ) ;
2017-02-09 09:17:38 -08:00
pf - > ddir = nfp_net_debugfs_device_add ( pf - > pdev ) ;
2017-06-05 17:01:54 -07:00
/* Allocate the vnics and do basic init */
err = nfp_net_pf_alloc_vnics ( pf , ctrl_bar , qc_bar , stride ) ;
2017-02-09 09:17:38 -08:00
if ( err )
goto err_clean_ddir ;
2017-06-05 17:01:54 -07:00
err = nfp_net_pf_alloc_irqs ( pf ) ;
if ( err )
goto err_free_vnics ;
2017-09-02 18:26:02 -07:00
err = nfp_net_pf_app_start ( pf ) ;
2017-06-05 17:01:54 -07:00
if ( err )
goto err_free_irqs ;
2017-06-05 17:01:56 -07:00
err = nfp_net_pf_init_vnics ( pf ) ;
if ( err )
goto err_stop_app ;
2017-05-22 10:59:24 -07:00
mutex_unlock ( & pf - > lock ) ;
2017-04-04 16:12:24 -07:00
2017-02-09 09:17:38 -08:00
return 0 ;
2017-06-05 17:01:56 -07:00
err_stop_app :
2017-09-02 18:26:02 -07:00
nfp_net_pf_app_stop ( pf ) ;
2017-06-05 17:01:54 -07:00
err_free_irqs :
nfp_net_pf_free_irqs ( pf ) ;
err_free_vnics :
nfp_net_pf_free_vnics ( pf ) ;
2017-02-09 09:17:38 -08:00
err_clean_ddir :
nfp_net_debugfs_dir_clean ( & pf - > ddir ) ;
2018-01-17 18:50:57 -08:00
mutex_unlock ( & pf - > lock ) ;
2018-05-21 22:12:45 -07:00
nfp_shared_buf_unregister ( pf ) ;
err_devlink_unreg :
2018-01-17 18:50:57 -08:00
cancel_work_sync ( & pf - > port_refresh_work ) ;
devlink_unregister ( devlink ) ;
err_app_clean :
2017-05-22 10:59:26 -07:00
nfp_net_pf_app_clean ( pf ) ;
2017-06-23 22:12:01 +02:00
err_unmap :
nfp_net_pci_unmap_mem ( pf ) ;
2017-02-09 09:17:38 -08:00
return err ;
}
void nfp_net_pci_remove ( struct nfp_pf * pf )
{
2018-01-17 18:50:56 -08:00
struct nfp_net * nn , * next ;
2017-02-09 09:17:38 -08:00
2017-05-22 10:59:24 -07:00
mutex_lock ( & pf - > lock ) ;
2018-01-17 18:50:56 -08:00
list_for_each_entry_safe ( nn , next , & pf - > vnics , vnic_list ) {
if ( ! nfp_net_is_data_vnic ( nn ) )
continue ;
nfp_net_pf_clean_vnic ( pf , nn ) ;
nfp_net_pf_free_vnic ( pf , nn ) ;
}
2017-04-04 16:12:24 -07:00
2018-01-17 18:50:56 -08:00
nfp_net_pf_app_stop ( pf ) ;
/* stop app first, to avoid double free of ctrl vNIC's ddir */
nfp_net_debugfs_dir_clean ( & pf - > ddir ) ;
2017-02-09 09:17:38 -08:00
2018-01-17 18:50:57 -08:00
mutex_unlock ( & pf - > lock ) ;
2018-05-21 22:12:45 -07:00
nfp_shared_buf_unregister ( pf ) ;
2018-01-17 18:50:57 -08:00
devlink_unregister ( priv_to_devlink ( pf ) ) ;
2018-01-17 18:50:56 -08:00
nfp_net_pf_free_irqs ( pf ) ;
nfp_net_pf_app_clean ( pf ) ;
nfp_net_pci_unmap_mem ( pf ) ;
2017-02-09 09:17:38 -08:00
2017-04-04 16:12:26 -07:00
cancel_work_sync ( & pf - > port_refresh_work ) ;
2017-02-09 09:17:38 -08:00
}