2019-05-23 11:14:51 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2015-12-21 11:26:06 -06:00
/**************************************************************************/
/* */
/* IBM System i and System p Virtual NIC Device Driver */
/* Copyright (C) 2014 IBM Corp. */
/* Santiago Leon (santi_leon@yahoo.com) */
/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
/* John Allen (jallen@linux.vnet.ibm.com) */
/* */
/* */
/* This module contains the implementation of a virtual ethernet device */
/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
/* option of the RS/6000 Platform Architecture to interface with virtual */
/* ethernet NICs that are presented to the partition by the hypervisor. */
/* */
/* Messages are passed between the VNIC driver and the VNIC server using */
/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
/* issue and receive commands that initiate communication with the server */
/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
/* are used by the driver to notify the server that a packet is */
/* ready for transmission or that a buffer has been added to receive a */
/* packet. Subsequently, sCRQs are used by the server to notify the */
/* driver that a packet transmission has been completed or that a packet */
/* has been received and placed in a waiting buffer. */
/* */
/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
/* which skbs are DMA mapped and immediately unmapped when the transmit */
/* or receive has been completed, the VNIC driver is required to use */
/* "long term mapping". This entails that large, continuous DMA mapped */
/* buffers are allocated on driver initialization and these buffers are */
/* then continuously reused to pass skbs to and from the VNIC server. */
/* */
/**************************************************************************/
# include <linux/module.h>
# include <linux/moduleparam.h>
# include <linux/types.h>
# include <linux/errno.h>
# include <linux/completion.h>
# include <linux/ioport.h>
# include <linux/dma-mapping.h>
# include <linux/kernel.h>
# include <linux/netdevice.h>
# include <linux/etherdevice.h>
# include <linux/skbuff.h>
# include <linux/init.h>
# include <linux/delay.h>
# include <linux/mm.h>
# include <linux/ethtool.h>
# include <linux/proc_fs.h>
2017-12-18 12:52:40 -06:00
# include <linux/if_arp.h>
2015-12-21 11:26:06 -06:00
# include <linux/in.h>
# include <linux/ip.h>
2016-04-01 17:20:34 -05:00
# include <linux/ipv6.h>
2015-12-21 11:26:06 -06:00
# include <linux/irq.h>
# include <linux/kthread.h>
# include <linux/seq_file.h>
# include <linux/interrupt.h>
# include <net/net_namespace.h>
# include <asm/hvcall.h>
# include <linux/atomic.h>
# include <asm/vio.h>
# include <asm/iommu.h>
# include <linux/uaccess.h>
# include <asm/firmware.h>
2016-07-06 15:35:18 -05:00
# include <linux/workqueue.h>
2017-04-21 15:38:46 -04:00
# include <linux/if_vlan.h>
2017-11-08 11:23:56 -06:00
# include <linux/utsname.h>
2015-12-21 11:26:06 -06:00
# include "ibmvnic.h"
static const char ibmvnic_driver_name [ ] = " ibmvnic " ;
static const char ibmvnic_driver_string [ ] = " IBM System i/p Virtual NIC Driver " ;
2017-06-01 15:32:34 -05:00
MODULE_AUTHOR ( " Santiago Leon " ) ;
2015-12-21 11:26:06 -06:00
MODULE_DESCRIPTION ( " IBM System i/p Virtual NIC Driver " ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_VERSION ( IBMVNIC_DRIVER_VERSION ) ;
static int ibmvnic_version = IBMVNIC_INITIAL_VERSION ;
static int ibmvnic_remove ( struct vio_dev * ) ;
2018-02-19 13:30:31 -06:00
static void release_sub_crqs ( struct ibmvnic_adapter * , bool ) ;
2015-12-21 11:26:06 -06:00
static int ibmvnic_reset_crq ( struct ibmvnic_adapter * ) ;
static int ibmvnic_send_crq_init ( struct ibmvnic_adapter * ) ;
static int ibmvnic_reenable_crq_queue ( struct ibmvnic_adapter * ) ;
static int ibmvnic_send_crq ( struct ibmvnic_adapter * , union ibmvnic_crq * ) ;
static int send_subcrq ( struct ibmvnic_adapter * adapter , u64 remote_handle ,
union sub_crq * sub_crq ) ;
2016-04-01 17:20:34 -05:00
static int send_subcrq_indirect ( struct ibmvnic_adapter * , u64 , u64 , u64 ) ;
2015-12-21 11:26:06 -06:00
static irqreturn_t ibmvnic_interrupt_rx ( int irq , void * instance ) ;
static int enable_scrq_irq ( struct ibmvnic_adapter * ,
struct ibmvnic_sub_crq_queue * ) ;
static int disable_scrq_irq ( struct ibmvnic_adapter * ,
struct ibmvnic_sub_crq_queue * ) ;
static int pending_scrq ( struct ibmvnic_adapter * ,
struct ibmvnic_sub_crq_queue * ) ;
static union sub_crq * ibmvnic_next_scrq ( struct ibmvnic_adapter * ,
struct ibmvnic_sub_crq_queue * ) ;
static int ibmvnic_poll ( struct napi_struct * napi , int data ) ;
static void send_map_query ( struct ibmvnic_adapter * adapter ) ;
2018-05-23 13:37:57 -05:00
static int send_request_map ( struct ibmvnic_adapter * , dma_addr_t , __be32 , u8 ) ;
static int send_request_unmap ( struct ibmvnic_adapter * , u8 ) ;
2018-02-26 18:10:59 -06:00
static int send_login ( struct ibmvnic_adapter * adapter ) ;
2017-03-17 17:13:40 -05:00
static void send_cap_queries ( struct ibmvnic_adapter * adapter ) ;
2017-08-01 15:04:36 -05:00
static int init_sub_crqs ( struct ibmvnic_adapter * ) ;
2017-03-17 17:13:40 -05:00
static int init_sub_crq_irqs ( struct ibmvnic_adapter * adapter ) ;
2017-03-17 17:13:43 -05:00
static int ibmvnic_init ( struct ibmvnic_adapter * ) ;
2018-05-23 13:38:00 -05:00
static int ibmvnic_reset_init ( struct ibmvnic_adapter * ) ;
2017-03-30 02:48:54 -04:00
static void release_crq_queue ( struct ibmvnic_adapter * ) ;
2019-05-09 23:13:43 -05:00
static int __ibmvnic_set_mac ( struct net_device * , u8 * ) ;
2018-04-06 18:37:06 -05:00
static int init_crq_queue ( struct ibmvnic_adapter * adapter ) ;
2019-03-19 10:28:51 -03:00
static int send_query_phys_parms ( struct ibmvnic_adapter * adapter ) ;
2015-12-21 11:26:06 -06:00
struct ibmvnic_stat {
char name [ ETH_GSTRING_LEN ] ;
int offset ;
} ;
# define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
offsetof ( struct ibmvnic_statistics , stat ) )
# define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
static const struct ibmvnic_stat ibmvnic_stats [ ] = {
{ " rx_packets " , IBMVNIC_STAT_OFF ( rx_packets ) } ,
{ " rx_bytes " , IBMVNIC_STAT_OFF ( rx_bytes ) } ,
{ " tx_packets " , IBMVNIC_STAT_OFF ( tx_packets ) } ,
{ " tx_bytes " , IBMVNIC_STAT_OFF ( tx_bytes ) } ,
{ " ucast_tx_packets " , IBMVNIC_STAT_OFF ( ucast_tx_packets ) } ,
{ " ucast_rx_packets " , IBMVNIC_STAT_OFF ( ucast_rx_packets ) } ,
{ " mcast_tx_packets " , IBMVNIC_STAT_OFF ( mcast_tx_packets ) } ,
{ " mcast_rx_packets " , IBMVNIC_STAT_OFF ( mcast_rx_packets ) } ,
{ " bcast_tx_packets " , IBMVNIC_STAT_OFF ( bcast_tx_packets ) } ,
{ " bcast_rx_packets " , IBMVNIC_STAT_OFF ( bcast_rx_packets ) } ,
{ " align_errors " , IBMVNIC_STAT_OFF ( align_errors ) } ,
{ " fcs_errors " , IBMVNIC_STAT_OFF ( fcs_errors ) } ,
{ " single_collision_frames " , IBMVNIC_STAT_OFF ( single_collision_frames ) } ,
{ " multi_collision_frames " , IBMVNIC_STAT_OFF ( multi_collision_frames ) } ,
{ " sqe_test_errors " , IBMVNIC_STAT_OFF ( sqe_test_errors ) } ,
{ " deferred_tx " , IBMVNIC_STAT_OFF ( deferred_tx ) } ,
{ " late_collisions " , IBMVNIC_STAT_OFF ( late_collisions ) } ,
{ " excess_collisions " , IBMVNIC_STAT_OFF ( excess_collisions ) } ,
{ " internal_mac_tx_errors " , IBMVNIC_STAT_OFF ( internal_mac_tx_errors ) } ,
{ " carrier_sense " , IBMVNIC_STAT_OFF ( carrier_sense ) } ,
{ " too_long_frames " , IBMVNIC_STAT_OFF ( too_long_frames ) } ,
{ " internal_mac_rx_errors " , IBMVNIC_STAT_OFF ( internal_mac_rx_errors ) } ,
} ;
static long h_reg_sub_crq ( unsigned long unit_address , unsigned long token ,
unsigned long length , unsigned long * number ,
unsigned long * irq )
{
unsigned long retbuf [ PLPAR_HCALL_BUFSIZE ] ;
long rc ;
rc = plpar_hcall ( H_REG_SUB_CRQ , retbuf , unit_address , token , length ) ;
* number = retbuf [ 0 ] ;
* irq = retbuf [ 1 ] ;
return rc ;
}
static int alloc_long_term_buff ( struct ibmvnic_adapter * adapter ,
struct ibmvnic_long_term_buff * ltb , int size )
{
struct device * dev = & adapter - > vdev - > dev ;
2018-05-23 13:37:57 -05:00
int rc ;
2015-12-21 11:26:06 -06:00
ltb - > size = size ;
ltb - > buff = dma_alloc_coherent ( dev , ltb - > size , & ltb - > addr ,
GFP_KERNEL ) ;
if ( ! ltb - > buff ) {
dev_err ( dev , " Couldn't alloc long term buffer \n " ) ;
return - ENOMEM ;
}
ltb - > map_id = adapter - > map_id ;
adapter - > map_id + + ;
2017-02-10 13:45:05 -05:00
init_completion ( & adapter - > fw_done ) ;
2018-05-23 13:37:57 -05:00
rc = send_request_map ( adapter , ltb - > addr ,
ltb - > size , ltb - > map_id ) ;
if ( rc ) {
dma_free_coherent ( dev , ltb - > size , ltb - > buff , ltb - > addr ) ;
return rc ;
}
2015-12-21 11:26:06 -06:00
wait_for_completion ( & adapter - > fw_done ) ;
2017-06-21 14:53:01 -05:00
if ( adapter - > fw_done_rc ) {
dev_err ( dev , " Couldn't map long term buffer,rc = %d \n " ,
adapter - > fw_done_rc ) ;
2018-05-16 15:49:03 -05:00
dma_free_coherent ( dev , ltb - > size , ltb - > buff , ltb - > addr ) ;
2017-06-21 14:53:01 -05:00
return - 1 ;
}
2015-12-21 11:26:06 -06:00
return 0 ;
}
static void free_long_term_buff ( struct ibmvnic_adapter * adapter ,
struct ibmvnic_long_term_buff * ltb )
{
struct device * dev = & adapter - > vdev - > dev ;
2017-03-30 02:49:06 -04:00
if ( ! ltb - > buff )
return ;
2017-05-03 14:04:38 -04:00
if ( adapter - > reset_reason ! = VNIC_RESET_FAILOVER & &
adapter - > reset_reason ! = VNIC_RESET_MOBILITY )
2016-08-18 11:37:51 -05:00
send_request_unmap ( adapter , ltb - > map_id ) ;
2017-04-19 13:44:41 -04:00
dma_free_coherent ( dev , ltb - > size , ltb - > buff , ltb - > addr ) ;
2015-12-21 11:26:06 -06:00
}
2017-06-21 14:53:01 -05:00
static int reset_long_term_buff ( struct ibmvnic_adapter * adapter ,
struct ibmvnic_long_term_buff * ltb )
{
2018-05-23 13:37:57 -05:00
int rc ;
2017-06-21 14:53:01 -05:00
memset ( ltb - > buff , 0 , ltb - > size ) ;
init_completion ( & adapter - > fw_done ) ;
2018-05-23 13:37:57 -05:00
rc = send_request_map ( adapter , ltb - > addr , ltb - > size , ltb - > map_id ) ;
if ( rc )
return rc ;
2017-06-21 14:53:01 -05:00
wait_for_completion ( & adapter - > fw_done ) ;
if ( adapter - > fw_done_rc ) {
dev_info ( & adapter - > vdev - > dev ,
" Reset failed, attempting to free and reallocate buffer \n " ) ;
free_long_term_buff ( adapter , ltb ) ;
return alloc_long_term_buff ( adapter , ltb , ltb - > size ) ;
}
return 0 ;
}
2017-05-26 10:30:48 -04:00
static void deactivate_rx_pools ( struct ibmvnic_adapter * adapter )
{
int i ;
for ( i = 0 ; i < be32_to_cpu ( adapter - > login_rsp_buf - > num_rxadd_subcrqs ) ;
i + + )
adapter - > rx_pool [ i ] . active = 0 ;
}
2015-12-21 11:26:06 -06:00
static void replenish_rx_pool ( struct ibmvnic_adapter * adapter ,
struct ibmvnic_rx_pool * pool )
{
int count = pool - > size - atomic_read ( & pool - > available ) ;
struct device * dev = & adapter - > vdev - > dev ;
int buffers_added = 0 ;
unsigned long lpar_rc ;
union sub_crq sub_crq ;
struct sk_buff * skb ;
unsigned int offset ;
dma_addr_t dma_addr ;
unsigned char * dst ;
u64 * handle_array ;
int shift = 0 ;
int index ;
int i ;
2017-05-26 10:30:48 -04:00
if ( ! pool - > active )
return ;
2015-12-21 11:26:06 -06:00
handle_array = ( u64 * ) ( ( u8 * ) ( adapter - > login_rsp_buf ) +
be32_to_cpu ( adapter - > login_rsp_buf - >
off_rxadd_subcrqs ) ) ;
for ( i = 0 ; i < count ; + + i ) {
skb = alloc_skb ( pool - > buff_size , GFP_ATOMIC ) ;
if ( ! skb ) {
dev_err ( dev , " Couldn't replenish rx buff \n " ) ;
adapter - > replenish_no_mem + + ;
break ;
}
index = pool - > free_map [ pool - > next_free ] ;
if ( pool - > rx_buff [ index ] . skb )
dev_err ( dev , " Inconsistent free_map! \n " ) ;
/* Copy the skb to the long term mapped DMA buffer */
offset = index * pool - > buff_size ;
dst = pool - > long_term_buff . buff + offset ;
memset ( dst , 0 , pool - > buff_size ) ;
dma_addr = pool - > long_term_buff . addr + offset ;
pool - > rx_buff [ index ] . data = dst ;
pool - > free_map [ pool - > next_free ] = IBMVNIC_INVALID_MAP ;
pool - > rx_buff [ index ] . dma = dma_addr ;
pool - > rx_buff [ index ] . skb = skb ;
pool - > rx_buff [ index ] . pool_index = pool - > index ;
pool - > rx_buff [ index ] . size = pool - > buff_size ;
memset ( & sub_crq , 0 , sizeof ( sub_crq ) ) ;
sub_crq . rx_add . first = IBMVNIC_CRQ_CMD ;
sub_crq . rx_add . correlator =
cpu_to_be64 ( ( u64 ) & pool - > rx_buff [ index ] ) ;
sub_crq . rx_add . ioba = cpu_to_be32 ( dma_addr ) ;
sub_crq . rx_add . map_id = pool - > long_term_buff . map_id ;
/* The length field of the sCRQ is defined to be 24 bits so the
* buffer size needs to be left shifted by a byte before it is
* converted to big endian to prevent the last byte from being
* truncated .
*/
# ifdef __LITTLE_ENDIAN__
shift = 8 ;
# endif
sub_crq . rx_add . len = cpu_to_be32 ( pool - > buff_size < < shift ) ;
lpar_rc = send_subcrq ( adapter , handle_array [ pool - > index ] ,
& sub_crq ) ;
if ( lpar_rc ! = H_SUCCESS )
goto failure ;
buffers_added + + ;
adapter - > replenish_add_buff_success + + ;
pool - > next_free = ( pool - > next_free + 1 ) % pool - > size ;
}
atomic_add ( buffers_added , & pool - > available ) ;
return ;
failure :
2018-07-13 12:03:32 -05:00
if ( lpar_rc ! = H_PARAMETER & & lpar_rc ! = H_CLOSED )
dev_err_ratelimited ( dev , " rx: replenish packet buffer failed \n " ) ;
2015-12-21 11:26:06 -06:00
pool - > free_map [ pool - > next_free ] = index ;
pool - > rx_buff [ index ] . skb = NULL ;
dev_kfree_skb_any ( skb ) ;
adapter - > replenish_add_buff_failure + + ;
atomic_add ( buffers_added , & pool - > available ) ;
2017-05-26 10:30:48 -04:00
2018-04-06 18:37:05 -05:00
if ( lpar_rc = = H_CLOSED | | adapter - > failover_pending ) {
2017-05-26 10:30:48 -04:00
/* Disable buffer pool replenishment and report carrier off if
2018-04-06 18:37:05 -05:00
* queue is closed or pending failover .
* Firmware guarantees that a signal will be sent to the
* driver , triggering a reset .
2017-05-26 10:30:48 -04:00
*/
deactivate_rx_pools ( adapter ) ;
netif_carrier_off ( adapter - > netdev ) ;
}
2015-12-21 11:26:06 -06:00
}
static void replenish_pools ( struct ibmvnic_adapter * adapter )
{
int i ;
adapter - > replenish_task_cycles + + ;
for ( i = 0 ; i < be32_to_cpu ( adapter - > login_rsp_buf - > num_rxadd_subcrqs ) ;
i + + ) {
if ( adapter - > rx_pool [ i ] . active )
replenish_rx_pool ( adapter , & adapter - > rx_pool [ i ] ) ;
}
}
2017-08-02 16:44:14 -05:00
static void release_stats_buffers ( struct ibmvnic_adapter * adapter )
{
kfree ( adapter - > tx_stats_buffers ) ;
kfree ( adapter - > rx_stats_buffers ) ;
2018-02-06 17:25:23 -06:00
adapter - > tx_stats_buffers = NULL ;
adapter - > rx_stats_buffers = NULL ;
2017-08-02 16:44:14 -05:00
}
static int init_stats_buffers ( struct ibmvnic_adapter * adapter )
{
adapter - > tx_stats_buffers =
2018-02-19 13:30:47 -06:00
kcalloc ( IBMVNIC_MAX_QUEUES ,
2017-08-02 16:44:14 -05:00
sizeof ( struct ibmvnic_tx_queue_stats ) ,
GFP_KERNEL ) ;
if ( ! adapter - > tx_stats_buffers )
return - ENOMEM ;
adapter - > rx_stats_buffers =
2018-02-19 13:30:47 -06:00
kcalloc ( IBMVNIC_MAX_QUEUES ,
2017-08-02 16:44:14 -05:00
sizeof ( struct ibmvnic_rx_queue_stats ) ,
GFP_KERNEL ) ;
if ( ! adapter - > rx_stats_buffers )
return - ENOMEM ;
return 0 ;
}
2017-03-30 02:49:23 -04:00
static void release_stats_token ( struct ibmvnic_adapter * adapter )
{
struct device * dev = & adapter - > vdev - > dev ;
if ( ! adapter - > stats_token )
return ;
dma_unmap_single ( dev , adapter - > stats_token ,
sizeof ( struct ibmvnic_statistics ) ,
DMA_FROM_DEVICE ) ;
adapter - > stats_token = 0 ;
}
static int init_stats_token ( struct ibmvnic_adapter * adapter )
{
struct device * dev = & adapter - > vdev - > dev ;
dma_addr_t stok ;
stok = dma_map_single ( dev , & adapter - > stats ,
sizeof ( struct ibmvnic_statistics ) ,
DMA_FROM_DEVICE ) ;
if ( dma_mapping_error ( dev , stok ) ) {
dev_err ( dev , " Couldn't map stats buffer \n " ) ;
return - 1 ;
}
adapter - > stats_token = stok ;
2017-08-08 15:24:05 -05:00
netdev_dbg ( adapter - > netdev , " Stats token initialized (%llx) \n " , stok ) ;
2017-03-30 02:49:23 -04:00
return 0 ;
}
2017-05-26 10:31:06 -04:00
static int reset_rx_pools ( struct ibmvnic_adapter * adapter )
{
struct ibmvnic_rx_pool * rx_pool ;
int rx_scrqs ;
2017-06-21 14:53:01 -05:00
int i , j , rc ;
2018-01-18 16:26:31 -06:00
u64 * size_array ;
size_array = ( u64 * ) ( ( u8 * ) ( adapter - > login_rsp_buf ) +
be32_to_cpu ( adapter - > login_rsp_buf - > off_rxadd_buff_size ) ) ;
2017-05-26 10:31:06 -04:00
rx_scrqs = be32_to_cpu ( adapter - > login_rsp_buf - > num_rxadd_subcrqs ) ;
for ( i = 0 ; i < rx_scrqs ; i + + ) {
rx_pool = & adapter - > rx_pool [ i ] ;
2017-08-08 15:24:05 -05:00
netdev_dbg ( adapter - > netdev , " Re-setting rx_pool[%d] \n " , i ) ;
2018-01-18 16:26:31 -06:00
if ( rx_pool - > buff_size ! = be64_to_cpu ( size_array [ i ] ) ) {
free_long_term_buff ( adapter , & rx_pool - > long_term_buff ) ;
rx_pool - > buff_size = be64_to_cpu ( size_array [ i ] ) ;
2019-06-07 16:03:55 -05:00
rc = alloc_long_term_buff ( adapter ,
& rx_pool - > long_term_buff ,
rx_pool - > size *
rx_pool - > buff_size ) ;
2018-01-18 16:26:31 -06:00
} else {
rc = reset_long_term_buff ( adapter ,
& rx_pool - > long_term_buff ) ;
}
2017-06-21 14:53:01 -05:00
if ( rc )
return rc ;
2017-05-26 10:31:06 -04:00
for ( j = 0 ; j < rx_pool - > size ; j + + )
rx_pool - > free_map [ j ] = j ;
memset ( rx_pool - > rx_buff , 0 ,
rx_pool - > size * sizeof ( struct ibmvnic_rx_buff ) ) ;
atomic_set ( & rx_pool - > available , 0 ) ;
rx_pool - > next_alloc = 0 ;
rx_pool - > next_free = 0 ;
2017-06-14 23:50:05 -05:00
rx_pool - > active = 1 ;
2017-05-26 10:31:06 -04:00
}
return 0 ;
}
2017-03-30 02:49:12 -04:00
static void release_rx_pools ( struct ibmvnic_adapter * adapter )
2015-12-21 11:26:06 -06:00
{
2017-03-30 02:49:12 -04:00
struct ibmvnic_rx_pool * rx_pool ;
int i , j ;
2015-12-21 11:26:06 -06:00
2017-03-30 02:49:12 -04:00
if ( ! adapter - > rx_pool )
2015-12-21 11:26:06 -06:00
return ;
2018-02-21 21:33:56 -06:00
for ( i = 0 ; i < adapter - > num_active_rx_pools ; i + + ) {
2017-03-30 02:49:12 -04:00
rx_pool = & adapter - > rx_pool [ i ] ;
2017-08-08 15:24:05 -05:00
netdev_dbg ( adapter - > netdev , " Releasing rx_pool[%d] \n " , i ) ;
2017-03-30 02:49:12 -04:00
kfree ( rx_pool - > free_map ) ;
free_long_term_buff ( adapter , & rx_pool - > long_term_buff ) ;
if ( ! rx_pool - > rx_buff )
2017-05-03 14:04:50 -04:00
continue ;
2017-03-30 02:49:12 -04:00
for ( j = 0 ; j < rx_pool - > size ; j + + ) {
if ( rx_pool - > rx_buff [ j ] . skb ) {
2018-11-21 11:17:58 -06:00
dev_kfree_skb_any ( rx_pool - > rx_buff [ j ] . skb ) ;
rx_pool - > rx_buff [ j ] . skb = NULL ;
2017-03-30 02:49:12 -04:00
}
}
kfree ( rx_pool - > rx_buff ) ;
}
kfree ( adapter - > rx_pool ) ;
adapter - > rx_pool = NULL ;
2018-02-21 21:33:56 -06:00
adapter - > num_active_rx_pools = 0 ;
2017-03-30 02:49:12 -04:00
}
static int init_rx_pools ( struct net_device * netdev )
{
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
struct device * dev = & adapter - > vdev - > dev ;
struct ibmvnic_rx_pool * rx_pool ;
int rxadd_subcrqs ;
u64 * size_array ;
int i , j ;
rxadd_subcrqs =
be32_to_cpu ( adapter - > login_rsp_buf - > num_rxadd_subcrqs ) ;
size_array = ( u64 * ) ( ( u8 * ) ( adapter - > login_rsp_buf ) +
be32_to_cpu ( adapter - > login_rsp_buf - > off_rxadd_buff_size ) ) ;
adapter - > rx_pool = kcalloc ( rxadd_subcrqs ,
sizeof ( struct ibmvnic_rx_pool ) ,
GFP_KERNEL ) ;
if ( ! adapter - > rx_pool ) {
dev_err ( dev , " Failed to allocate rx pools \n " ) ;
return - 1 ;
}
2018-02-21 21:33:56 -06:00
adapter - > num_active_rx_pools = rxadd_subcrqs ;
2017-03-30 02:49:12 -04:00
for ( i = 0 ; i < rxadd_subcrqs ; i + + ) {
rx_pool = & adapter - > rx_pool [ i ] ;
netdev_dbg ( adapter - > netdev ,
2017-08-08 15:24:05 -05:00
" Initializing rx_pool[%d], %lld buffs, %lld bytes each \n " ,
2017-03-30 02:49:12 -04:00
i , adapter - > req_rx_add_entries_per_subcrq ,
be64_to_cpu ( size_array [ i ] ) ) ;
rx_pool - > size = adapter - > req_rx_add_entries_per_subcrq ;
rx_pool - > index = i ;
rx_pool - > buff_size = be64_to_cpu ( size_array [ i ] ) ;
rx_pool - > active = 1 ;
rx_pool - > free_map = kcalloc ( rx_pool - > size , sizeof ( int ) ,
GFP_KERNEL ) ;
if ( ! rx_pool - > free_map ) {
release_rx_pools ( adapter ) ;
return - 1 ;
2015-12-21 11:26:06 -06:00
}
2017-03-30 02:49:12 -04:00
rx_pool - > rx_buff = kcalloc ( rx_pool - > size ,
sizeof ( struct ibmvnic_rx_buff ) ,
GFP_KERNEL ) ;
if ( ! rx_pool - > rx_buff ) {
dev_err ( dev , " Couldn't alloc rx buffers \n " ) ;
release_rx_pools ( adapter ) ;
return - 1 ;
}
if ( alloc_long_term_buff ( adapter , & rx_pool - > long_term_buff ,
rx_pool - > size * rx_pool - > buff_size ) ) {
release_rx_pools ( adapter ) ;
return - 1 ;
}
for ( j = 0 ; j < rx_pool - > size ; + + j )
rx_pool - > free_map [ j ] = j ;
atomic_set ( & rx_pool - > available , 0 ) ;
rx_pool - > next_alloc = 0 ;
rx_pool - > next_free = 0 ;
2015-12-21 11:26:06 -06:00
}
2017-03-30 02:49:12 -04:00
return 0 ;
2015-12-21 11:26:06 -06:00
}
2018-03-16 20:00:25 -05:00
static int reset_one_tx_pool ( struct ibmvnic_adapter * adapter ,
struct ibmvnic_tx_pool * tx_pool )
{
int rc , i ;
rc = reset_long_term_buff ( adapter , & tx_pool - > long_term_buff ) ;
if ( rc )
return rc ;
memset ( tx_pool - > tx_buff , 0 ,
tx_pool - > num_buffers *
sizeof ( struct ibmvnic_tx_buff ) ) ;
for ( i = 0 ; i < tx_pool - > num_buffers ; i + + )
tx_pool - > free_map [ i ] = i ;
tx_pool - > consumer_index = 0 ;
tx_pool - > producer_index = 0 ;
return 0 ;
}
2017-05-26 10:31:06 -04:00
static int reset_tx_pools ( struct ibmvnic_adapter * adapter )
{
int tx_scrqs ;
2018-03-16 20:00:25 -05:00
int i , rc ;
2017-05-26 10:31:06 -04:00
tx_scrqs = be32_to_cpu ( adapter - > login_rsp_buf - > num_txsubm_subcrqs ) ;
for ( i = 0 ; i < tx_scrqs ; i + + ) {
2018-03-16 20:00:25 -05:00
rc = reset_one_tx_pool ( adapter , & adapter - > tso_pool [ i ] ) ;
2017-06-21 14:53:01 -05:00
if ( rc )
return rc ;
2018-03-16 20:00:25 -05:00
rc = reset_one_tx_pool ( adapter , & adapter - > tx_pool [ i ] ) ;
2017-10-17 12:36:55 -05:00
if ( rc )
return rc ;
2017-05-26 10:31:06 -04:00
}
return 0 ;
}
2017-11-13 15:59:19 -02:00
static void release_vpd_data ( struct ibmvnic_adapter * adapter )
{
if ( ! adapter - > vpd )
return ;
kfree ( adapter - > vpd - > buff ) ;
kfree ( adapter - > vpd ) ;
2018-02-06 17:25:23 -06:00
adapter - > vpd = NULL ;
2017-11-13 15:59:19 -02:00
}
2018-03-16 20:00:26 -05:00
static void release_one_tx_pool ( struct ibmvnic_adapter * adapter ,
struct ibmvnic_tx_pool * tx_pool )
{
kfree ( tx_pool - > tx_buff ) ;
kfree ( tx_pool - > free_map ) ;
free_long_term_buff ( adapter , & tx_pool - > long_term_buff ) ;
}
2017-03-30 02:49:06 -04:00
static void release_tx_pools ( struct ibmvnic_adapter * adapter )
{
2018-01-18 16:26:31 -06:00
int i ;
2017-03-30 02:49:06 -04:00
if ( ! adapter - > tx_pool )
return ;
2018-02-21 21:33:56 -06:00
for ( i = 0 ; i < adapter - > num_active_tx_pools ; i + + ) {
2018-03-16 20:00:26 -05:00
release_one_tx_pool ( adapter , & adapter - > tx_pool [ i ] ) ;
release_one_tx_pool ( adapter , & adapter - > tso_pool [ i ] ) ;
2017-03-30 02:49:06 -04:00
}
kfree ( adapter - > tx_pool ) ;
adapter - > tx_pool = NULL ;
2018-03-16 20:00:26 -05:00
kfree ( adapter - > tso_pool ) ;
adapter - > tso_pool = NULL ;
2018-02-21 21:33:56 -06:00
adapter - > num_active_tx_pools = 0 ;
2017-03-30 02:49:06 -04:00
}
2018-03-16 20:00:27 -05:00
static int init_one_tx_pool ( struct net_device * netdev ,
struct ibmvnic_tx_pool * tx_pool ,
int num_entries , int buf_size )
{
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
int i ;
tx_pool - > tx_buff = kcalloc ( num_entries ,
sizeof ( struct ibmvnic_tx_buff ) ,
GFP_KERNEL ) ;
if ( ! tx_pool - > tx_buff )
return - 1 ;
if ( alloc_long_term_buff ( adapter , & tx_pool - > long_term_buff ,
num_entries * buf_size ) )
return - 1 ;
tx_pool - > free_map = kcalloc ( num_entries , sizeof ( int ) , GFP_KERNEL ) ;
if ( ! tx_pool - > free_map )
return - 1 ;
for ( i = 0 ; i < num_entries ; i + + )
tx_pool - > free_map [ i ] = i ;
tx_pool - > consumer_index = 0 ;
tx_pool - > producer_index = 0 ;
tx_pool - > num_buffers = num_entries ;
tx_pool - > buf_size = buf_size ;
return 0 ;
}
2017-03-30 02:49:06 -04:00
static int init_tx_pools ( struct net_device * netdev )
{
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
int tx_subcrqs ;
2018-03-16 20:00:27 -05:00
int i , rc ;
2017-03-30 02:49:06 -04:00
tx_subcrqs = be32_to_cpu ( adapter - > login_rsp_buf - > num_txsubm_subcrqs ) ;
adapter - > tx_pool = kcalloc ( tx_subcrqs ,
sizeof ( struct ibmvnic_tx_pool ) , GFP_KERNEL ) ;
if ( ! adapter - > tx_pool )
return - 1 ;
2018-03-16 20:00:27 -05:00
adapter - > tso_pool = kcalloc ( tx_subcrqs ,
sizeof ( struct ibmvnic_tx_pool ) , GFP_KERNEL ) ;
if ( ! adapter - > tso_pool )
return - 1 ;
2018-02-21 21:33:56 -06:00
adapter - > num_active_tx_pools = tx_subcrqs ;
2017-03-30 02:49:06 -04:00
for ( i = 0 ; i < tx_subcrqs ; i + + ) {
2018-03-16 20:00:27 -05:00
rc = init_one_tx_pool ( netdev , & adapter - > tx_pool [ i ] ,
adapter - > req_tx_entries_per_subcrq ,
adapter - > req_mtu + VLAN_HLEN ) ;
if ( rc ) {
2017-03-30 02:49:06 -04:00
release_tx_pools ( adapter ) ;
2018-03-16 20:00:27 -05:00
return rc ;
2017-10-17 12:36:55 -05:00
}
2019-06-07 16:03:55 -05:00
rc = init_one_tx_pool ( netdev , & adapter - > tso_pool [ i ] ,
IBMVNIC_TSO_BUFS ,
IBMVNIC_TSO_BUF_SZ ) ;
2018-03-16 20:00:27 -05:00
if ( rc ) {
2017-03-30 02:49:06 -04:00
release_tx_pools ( adapter ) ;
2018-03-16 20:00:27 -05:00
return rc ;
2017-03-30 02:49:06 -04:00
}
}
return 0 ;
}
2017-05-26 10:30:13 -04:00
static void ibmvnic_napi_enable ( struct ibmvnic_adapter * adapter )
{
int i ;
if ( adapter - > napi_enabled )
return ;
for ( i = 0 ; i < adapter - > req_rx_queues ; i + + )
napi_enable ( & adapter - > napi [ i ] ) ;
adapter - > napi_enabled = true ;
}
static void ibmvnic_napi_disable ( struct ibmvnic_adapter * adapter )
{
int i ;
if ( ! adapter - > napi_enabled )
return ;
2017-08-08 15:24:05 -05:00
for ( i = 0 ; i < adapter - > req_rx_queues ; i + + ) {
netdev_dbg ( adapter - > netdev , " Disabling napi[%d] \n " , i ) ;
2017-05-26 10:30:13 -04:00
napi_disable ( & adapter - > napi [ i ] ) ;
2017-08-08 15:24:05 -05:00
}
2017-05-26 10:30:13 -04:00
adapter - > napi_enabled = false ;
}
2018-02-19 13:30:39 -06:00
static int init_napi ( struct ibmvnic_adapter * adapter )
{
int i ;
adapter - > napi = kcalloc ( adapter - > req_rx_queues ,
sizeof ( struct napi_struct ) , GFP_KERNEL ) ;
if ( ! adapter - > napi )
return - ENOMEM ;
for ( i = 0 ; i < adapter - > req_rx_queues ; i + + ) {
netdev_dbg ( adapter - > netdev , " Adding napi[%d] \n " , i ) ;
netif_napi_add ( adapter - > netdev , & adapter - > napi [ i ] ,
ibmvnic_poll , NAPI_POLL_WEIGHT ) ;
}
2018-02-21 21:33:56 -06:00
adapter - > num_active_rx_napi = adapter - > req_rx_queues ;
2018-02-19 13:30:39 -06:00
return 0 ;
}
static void release_napi ( struct ibmvnic_adapter * adapter )
{
int i ;
if ( ! adapter - > napi )
return ;
2018-02-21 21:33:56 -06:00
for ( i = 0 ; i < adapter - > num_active_rx_napi ; i + + ) {
2018-12-11 12:20:46 +08:00
netdev_dbg ( adapter - > netdev , " Releasing napi[%d] \n " , i ) ;
netif_napi_del ( & adapter - > napi [ i ] ) ;
2018-02-19 13:30:39 -06:00
}
kfree ( adapter - > napi ) ;
adapter - > napi = NULL ;
2018-02-21 21:33:56 -06:00
adapter - > num_active_rx_napi = 0 ;
2018-05-23 13:37:55 -05:00
adapter - > napi_enabled = false ;
2018-02-19 13:30:39 -06:00
}
2017-03-17 17:13:41 -05:00
static int ibmvnic_login ( struct net_device * netdev )
2015-12-21 11:26:06 -06:00
{
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
2017-03-17 17:13:40 -05:00
unsigned long timeout = msecs_to_jiffies ( 30000 ) ;
2018-04-11 10:09:32 -05:00
int retry_count = 0 ;
2018-05-24 14:37:53 -05:00
bool retry ;
2017-08-01 15:04:36 -05:00
int rc ;
2015-12-21 11:26:06 -06:00
2017-03-17 17:13:40 -05:00
do {
2018-05-24 14:37:53 -05:00
retry = false ;
2018-04-11 10:09:32 -05:00
if ( retry_count > IBMVNIC_MAX_QUEUES ) {
netdev_warn ( netdev , " Login attempts exceeded \n " ) ;
return - 1 ;
}
adapter - > init_done_rc = 0 ;
reinit_completion ( & adapter - > init_done ) ;
rc = send_login ( adapter ) ;
if ( rc ) {
netdev_warn ( netdev , " Unable to login \n " ) ;
return rc ;
}
if ( ! wait_for_completion_timeout ( & adapter - > init_done ,
timeout ) ) {
netdev_warn ( netdev , " Login timed out \n " ) ;
return - 1 ;
}
if ( adapter - > init_done_rc = = PARTIALSUCCESS ) {
retry_count + + ;
2018-02-19 13:30:31 -06:00
release_sub_crqs ( adapter , 1 ) ;
2017-03-17 17:13:40 -05:00
2018-05-24 14:37:53 -05:00
retry = true ;
netdev_dbg ( netdev ,
" Received partial success, retrying... \n " ) ;
2018-04-11 10:09:32 -05:00
adapter - > init_done_rc = 0 ;
2017-03-17 17:13:40 -05:00
reinit_completion ( & adapter - > init_done ) ;
send_cap_queries ( adapter ) ;
if ( ! wait_for_completion_timeout ( & adapter - > init_done ,
timeout ) ) {
2018-04-11 10:09:32 -05:00
netdev_warn ( netdev ,
" Capabilities query timed out \n " ) ;
2017-03-17 17:13:40 -05:00
return - 1 ;
}
2018-04-11 10:09:32 -05:00
2017-08-01 15:04:36 -05:00
rc = init_sub_crqs ( adapter ) ;
if ( rc ) {
2018-04-11 10:09:32 -05:00
netdev_warn ( netdev ,
" SCRQ initialization failed \n " ) ;
2017-08-01 15:04:36 -05:00
return - 1 ;
}
2018-04-11 10:09:32 -05:00
2017-08-01 15:04:36 -05:00
rc = init_sub_crq_irqs ( adapter ) ;
if ( rc ) {
2018-04-11 10:09:32 -05:00
netdev_warn ( netdev ,
" SCRQ irq initialization failed \n " ) ;
2017-08-01 15:04:36 -05:00
return - 1 ;
}
2018-04-11 10:09:32 -05:00
} else if ( adapter - > init_done_rc ) {
netdev_warn ( netdev , " Adapter login failed \n " ) ;
2017-03-17 17:13:40 -05:00
return - 1 ;
}
2018-05-24 14:37:53 -05:00
} while ( retry ) ;
2017-03-17 17:13:40 -05:00
2019-05-09 23:13:43 -05:00
__ibmvnic_set_mac ( netdev , adapter - > mac_addr ) ;
2018-01-10 19:39:52 -06:00
2017-03-17 17:13:41 -05:00
return 0 ;
}
2018-02-13 18:23:40 -06:00
static void release_login_buffer ( struct ibmvnic_adapter * adapter )
{
kfree ( adapter - > login_buf ) ;
adapter - > login_buf = NULL ;
}
static void release_login_rsp_buffer ( struct ibmvnic_adapter * adapter )
{
kfree ( adapter - > login_rsp_buf ) ;
adapter - > login_rsp_buf = NULL ;
}
2017-03-30 02:49:29 -04:00
static void release_resources ( struct ibmvnic_adapter * adapter )
{
2017-11-13 15:59:19 -02:00
release_vpd_data ( adapter ) ;
2017-03-30 02:49:29 -04:00
release_tx_pools ( adapter ) ;
release_rx_pools ( adapter ) ;
2018-02-19 13:30:39 -06:00
release_napi ( adapter ) ;
2018-02-13 18:23:40 -06:00
release_login_rsp_buffer ( adapter ) ;
2017-03-30 02:49:29 -04:00
}
2017-04-21 15:39:04 -04:00
static int set_link_state ( struct ibmvnic_adapter * adapter , u8 link_state )
{
struct net_device * netdev = adapter - > netdev ;
unsigned long timeout = msecs_to_jiffies ( 30000 ) ;
union ibmvnic_crq crq ;
bool resend ;
int rc ;
2017-08-08 15:24:05 -05:00
netdev_dbg ( netdev , " setting link state %d \n " , link_state ) ;
2017-04-21 15:39:04 -04:00
memset ( & crq , 0 , sizeof ( crq ) ) ;
crq . logical_link_state . first = IBMVNIC_CRQ_CMD ;
crq . logical_link_state . cmd = LOGICAL_LINK_STATE ;
crq . logical_link_state . link_state = link_state ;
do {
resend = false ;
reinit_completion ( & adapter - > init_done ) ;
rc = ibmvnic_send_crq ( adapter , & crq ) ;
if ( rc ) {
netdev_err ( netdev , " Failed to set link state \n " ) ;
return rc ;
}
if ( ! wait_for_completion_timeout ( & adapter - > init_done ,
timeout ) ) {
netdev_err ( netdev , " timeout setting link state \n " ) ;
return - 1 ;
}
if ( adapter - > init_done_rc = = 1 ) {
/* Partuial success, delay and re-send */
mdelay ( 1000 ) ;
resend = true ;
2018-05-23 13:37:59 -05:00
} else if ( adapter - > init_done_rc ) {
netdev_warn ( netdev , " Unable to set link state, rc=%d \n " ,
adapter - > init_done_rc ) ;
return adapter - > init_done_rc ;
2017-04-21 15:39:04 -04:00
}
} while ( resend ) ;
return 0 ;
}
2017-04-21 15:38:40 -04:00
static int set_real_num_queues ( struct net_device * netdev )
{
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
int rc ;
2017-08-08 15:24:05 -05:00
netdev_dbg ( netdev , " Setting real tx/rx queues (%llx/%llx) \n " ,
adapter - > req_tx_queues , adapter - > req_rx_queues ) ;
2017-04-21 15:38:40 -04:00
rc = netif_set_real_num_tx_queues ( netdev , adapter - > req_tx_queues ) ;
if ( rc ) {
netdev_err ( netdev , " failed to set the number of tx queues \n " ) ;
return rc ;
}
rc = netif_set_real_num_rx_queues ( netdev , adapter - > req_rx_queues ) ;
if ( rc )
netdev_err ( netdev , " failed to set the number of rx queues \n " ) ;
return rc ;
}
2017-11-13 15:59:19 -02:00
static int ibmvnic_get_vpd ( struct ibmvnic_adapter * adapter )
{
struct device * dev = & adapter - > vdev - > dev ;
union ibmvnic_crq crq ;
int len = 0 ;
2018-05-23 13:37:57 -05:00
int rc ;
2017-11-13 15:59:19 -02:00
if ( adapter - > vpd - > buff )
len = adapter - > vpd - > len ;
2018-01-18 16:27:58 -06:00
init_completion ( & adapter - > fw_done ) ;
2017-11-13 15:59:19 -02:00
crq . get_vpd_size . first = IBMVNIC_CRQ_CMD ;
crq . get_vpd_size . cmd = GET_VPD_SIZE ;
2018-05-23 13:37:57 -05:00
rc = ibmvnic_send_crq ( adapter , & crq ) ;
if ( rc )
return rc ;
2017-11-13 15:59:19 -02:00
wait_for_completion ( & adapter - > fw_done ) ;
if ( ! adapter - > vpd - > len )
return - ENODATA ;
if ( ! adapter - > vpd - > buff )
adapter - > vpd - > buff = kzalloc ( adapter - > vpd - > len , GFP_KERNEL ) ;
else if ( adapter - > vpd - > len ! = len )
adapter - > vpd - > buff =
krealloc ( adapter - > vpd - > buff ,
adapter - > vpd - > len , GFP_KERNEL ) ;
if ( ! adapter - > vpd - > buff ) {
dev_err ( dev , " Could allocate VPD buffer \n " ) ;
return - ENOMEM ;
}
adapter - > vpd - > dma_addr =
dma_map_single ( dev , adapter - > vpd - > buff , adapter - > vpd - > len ,
DMA_FROM_DEVICE ) ;
2017-11-17 09:09:04 -02:00
if ( dma_mapping_error ( dev , adapter - > vpd - > dma_addr ) ) {
2017-11-13 15:59:19 -02:00
dev_err ( dev , " Could not map VPD buffer \n " ) ;
kfree ( adapter - > vpd - > buff ) ;
2018-02-06 17:25:23 -06:00
adapter - > vpd - > buff = NULL ;
2017-11-13 15:59:19 -02:00
return - ENOMEM ;
}
reinit_completion ( & adapter - > fw_done ) ;
crq . get_vpd . first = IBMVNIC_CRQ_CMD ;
crq . get_vpd . cmd = GET_VPD ;
crq . get_vpd . ioba = cpu_to_be32 ( adapter - > vpd - > dma_addr ) ;
crq . get_vpd . len = cpu_to_be32 ( ( u32 ) adapter - > vpd - > len ) ;
2018-05-23 13:37:57 -05:00
rc = ibmvnic_send_crq ( adapter , & crq ) ;
if ( rc ) {
kfree ( adapter - > vpd - > buff ) ;
adapter - > vpd - > buff = NULL ;
return rc ;
}
2017-11-13 15:59:19 -02:00
wait_for_completion ( & adapter - > fw_done ) ;
return 0 ;
}
2017-05-03 14:04:26 -04:00
static int init_resources ( struct ibmvnic_adapter * adapter )
2017-03-17 17:13:41 -05:00
{
2017-05-03 14:04:26 -04:00
struct net_device * netdev = adapter - > netdev ;
2018-02-19 13:30:39 -06:00
int rc ;
2017-03-17 17:13:41 -05:00
2017-04-21 15:38:40 -04:00
rc = set_real_num_queues ( netdev ) ;
if ( rc )
return rc ;
2017-03-17 17:13:40 -05:00
2017-11-13 15:59:19 -02:00
adapter - > vpd = kzalloc ( sizeof ( * adapter - > vpd ) , GFP_KERNEL ) ;
if ( ! adapter - > vpd )
return - ENOMEM ;
2018-01-18 16:27:58 -06:00
/* Vital Product Data (VPD) */
rc = ibmvnic_get_vpd ( adapter ) ;
if ( rc ) {
netdev_err ( netdev , " failed to initialize Vital Product Data (VPD) \n " ) ;
return rc ;
}
2015-12-21 11:26:06 -06:00
adapter - > map_id = 1 ;
2017-05-03 14:04:26 -04:00
2018-02-19 13:30:39 -06:00
rc = init_napi ( adapter ) ;
if ( rc )
return rc ;
2015-12-21 11:26:06 -06:00
send_map_query ( adapter ) ;
2017-03-30 02:49:12 -04:00
rc = init_rx_pools ( netdev ) ;
if ( rc )
2017-05-03 14:04:26 -04:00
return rc ;
2015-12-21 11:26:06 -06:00
2017-03-30 02:49:06 -04:00
rc = init_tx_pools ( netdev ) ;
2017-05-03 14:04:26 -04:00
return rc ;
}
2017-05-03 14:04:38 -04:00
static int __ibmvnic_open ( struct net_device * netdev )
2017-05-03 14:04:26 -04:00
{
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
2017-05-03 14:04:38 -04:00
enum vnic_state prev_state = adapter - > state ;
2017-05-03 14:04:26 -04:00
int i , rc ;
2017-05-03 14:04:32 -04:00
adapter - > state = VNIC_OPENING ;
2015-12-21 11:26:06 -06:00
replenish_pools ( adapter ) ;
2017-05-26 10:30:13 -04:00
ibmvnic_napi_enable ( adapter ) ;
2017-05-03 14:04:26 -04:00
2015-12-21 11:26:06 -06:00
/* We're ready to receive frames, enable the sub-crq interrupts and
* set the logical link state to up
*/
2017-05-03 14:04:38 -04:00
for ( i = 0 ; i < adapter - > req_rx_queues ; i + + ) {
2017-08-08 15:24:05 -05:00
netdev_dbg ( netdev , " Enabling rx_scrq[%d] irq \n " , i ) ;
2017-05-03 14:04:38 -04:00
if ( prev_state = = VNIC_CLOSED )
enable_irq ( adapter - > rx_scrq [ i ] - > irq ) ;
2018-04-15 18:53:36 -05:00
enable_scrq_irq ( adapter , adapter - > rx_scrq [ i ] ) ;
2017-05-03 14:04:38 -04:00
}
2015-12-21 11:26:06 -06:00
2017-05-03 14:04:38 -04:00
for ( i = 0 ; i < adapter - > req_tx_queues ; i + + ) {
2017-08-08 15:24:05 -05:00
netdev_dbg ( netdev , " Enabling tx_scrq[%d] irq \n " , i ) ;
2017-05-03 14:04:38 -04:00
if ( prev_state = = VNIC_CLOSED )
enable_irq ( adapter - > tx_scrq [ i ] - > irq ) ;
2018-04-15 18:53:36 -05:00
enable_scrq_irq ( adapter , adapter - > tx_scrq [ i ] ) ;
2017-05-03 14:04:38 -04:00
}
2015-12-21 11:26:06 -06:00
2017-04-21 15:39:04 -04:00
rc = set_link_state ( adapter , IBMVNIC_LOGICAL_LNK_UP ) ;
2017-05-03 14:04:26 -04:00
if ( rc ) {
for ( i = 0 ; i < adapter - > req_rx_queues ; i + + )
napi_disable ( & adapter - > napi [ i ] ) ;
release_resources ( adapter ) ;
2017-05-03 14:04:38 -04:00
return rc ;
2017-05-03 14:04:26 -04:00
}
2015-12-21 11:26:06 -06:00
2017-05-03 14:04:38 -04:00
netif_tx_start_all_queues ( netdev ) ;
if ( prev_state = = VNIC_CLOSED ) {
for ( i = 0 ; i < adapter - > req_rx_queues ; i + + )
napi_schedule ( & adapter - > napi [ i ] ) ;
}
adapter - > state = VNIC_OPEN ;
return rc ;
}
static int ibmvnic_open ( struct net_device * netdev )
{
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
2018-01-18 16:27:58 -06:00
int rc ;
2017-05-03 14:04:38 -04:00
2018-04-06 18:37:05 -05:00
/* If device failover is pending, just set device state and return.
* Device operation will be handled by reset routine .
*/
if ( adapter - > failover_pending ) {
adapter - > state = VNIC_OPEN ;
return 0 ;
}
2017-05-03 14:04:38 -04:00
if ( adapter - > state ! = VNIC_CLOSED ) {
rc = ibmvnic_login ( netdev ) ;
2018-11-19 15:59:22 -06:00
if ( rc )
2017-05-03 14:04:38 -04:00
return rc ;
rc = init_resources ( adapter ) ;
if ( rc ) {
netdev_err ( netdev , " failed to initialize resources \n " ) ;
release_resources ( adapter ) ;
return rc ;
}
}
rc = __ibmvnic_open ( netdev ) ;
2017-11-13 15:59:19 -02:00
2017-05-03 14:04:26 -04:00
return rc ;
2015-12-21 11:26:06 -06:00
}
2018-02-13 18:23:43 -06:00
static void clean_rx_pools ( struct ibmvnic_adapter * adapter )
{
struct ibmvnic_rx_pool * rx_pool ;
2018-02-26 18:10:57 -06:00
struct ibmvnic_rx_buff * rx_buff ;
2018-02-13 18:23:43 -06:00
u64 rx_entries ;
int rx_scrqs ;
int i , j ;
if ( ! adapter - > rx_pool )
return ;
2018-04-20 14:25:32 -05:00
rx_scrqs = adapter - > num_active_rx_pools ;
2018-02-13 18:23:43 -06:00
rx_entries = adapter - > req_rx_add_entries_per_subcrq ;
/* Free any remaining skbs in the rx buffer pools */
for ( i = 0 ; i < rx_scrqs ; i + + ) {
rx_pool = & adapter - > rx_pool [ i ] ;
2018-02-26 18:10:57 -06:00
if ( ! rx_pool | | ! rx_pool - > rx_buff )
2018-02-13 18:23:43 -06:00
continue ;
netdev_dbg ( adapter - > netdev , " Cleaning rx_pool[%d] \n " , i ) ;
for ( j = 0 ; j < rx_entries ; j + + ) {
2018-02-26 18:10:57 -06:00
rx_buff = & rx_pool - > rx_buff [ j ] ;
if ( rx_buff & & rx_buff - > skb ) {
dev_kfree_skb_any ( rx_buff - > skb ) ;
rx_buff - > skb = NULL ;
2018-02-13 18:23:43 -06:00
}
}
}
}
2018-03-16 20:00:30 -05:00
static void clean_one_tx_pool ( struct ibmvnic_adapter * adapter ,
struct ibmvnic_tx_pool * tx_pool )
2017-05-03 14:04:56 -04:00
{
2018-02-26 18:10:57 -06:00
struct ibmvnic_tx_buff * tx_buff ;
2017-05-03 14:04:56 -04:00
u64 tx_entries ;
2018-03-16 20:00:30 -05:00
int i ;
2018-03-23 14:36:15 +03:00
if ( ! tx_pool | | ! tx_pool - > tx_buff )
2018-03-16 20:00:30 -05:00
return ;
tx_entries = tx_pool - > num_buffers ;
for ( i = 0 ; i < tx_entries ; i + + ) {
tx_buff = & tx_pool - > tx_buff [ i ] ;
if ( tx_buff & & tx_buff - > skb ) {
dev_kfree_skb_any ( tx_buff - > skb ) ;
tx_buff - > skb = NULL ;
}
}
}
static void clean_tx_pools ( struct ibmvnic_adapter * adapter )
{
2017-05-03 14:04:56 -04:00
int tx_scrqs ;
2018-03-16 20:00:30 -05:00
int i ;
2017-05-03 14:04:56 -04:00
2018-03-16 20:00:30 -05:00
if ( ! adapter - > tx_pool | | ! adapter - > tso_pool )
2017-05-03 14:04:56 -04:00
return ;
2018-04-20 14:25:32 -05:00
tx_scrqs = adapter - > num_active_tx_pools ;
2017-05-03 14:04:56 -04:00
/* Free any remaining skbs in the tx buffer pools */
for ( i = 0 ; i < tx_scrqs ; i + + ) {
2017-08-08 15:24:05 -05:00
netdev_dbg ( adapter - > netdev , " Cleaning tx_pool[%d] \n " , i ) ;
2018-03-16 20:00:30 -05:00
clean_one_tx_pool ( adapter , & adapter - > tx_pool [ i ] ) ;
clean_one_tx_pool ( adapter , & adapter - > tso_pool [ i ] ) ;
2017-05-03 14:04:56 -04:00
}
}
2018-03-30 13:44:21 -05:00
static void ibmvnic_disable_irqs ( struct ibmvnic_adapter * adapter )
2017-03-17 17:13:43 -05:00
{
2018-03-30 13:44:21 -05:00
struct net_device * netdev = adapter - > netdev ;
2017-03-17 17:13:43 -05:00
int i ;
2017-05-03 14:05:02 -04:00
if ( adapter - > tx_scrq ) {
for ( i = 0 ; i < adapter - > req_tx_queues ; i + + )
2017-08-08 15:24:05 -05:00
if ( adapter - > tx_scrq [ i ] - > irq ) {
2018-03-07 17:51:45 -06:00
netdev_dbg ( netdev ,
2017-08-08 15:24:05 -05:00
" Disabling tx_scrq[%d] irq \n " , i ) ;
2018-04-15 18:53:36 -05:00
disable_scrq_irq ( adapter , adapter - > tx_scrq [ i ] ) ;
2017-05-03 14:05:02 -04:00
disable_irq ( adapter - > tx_scrq [ i ] - > irq ) ;
2017-08-08 15:24:05 -05:00
}
2017-05-03 14:05:02 -04:00
}
if ( adapter - > rx_scrq ) {
for ( i = 0 ; i < adapter - > req_rx_queues ; i + + ) {
2017-08-08 15:24:05 -05:00
if ( adapter - > rx_scrq [ i ] - > irq ) {
2018-03-07 17:51:45 -06:00
netdev_dbg ( netdev ,
2017-08-08 15:24:05 -05:00
" Disabling rx_scrq[%d] irq \n " , i ) ;
2018-04-15 18:53:36 -05:00
disable_scrq_irq ( adapter , adapter - > rx_scrq [ i ] ) ;
2017-05-03 14:05:02 -04:00
disable_irq ( adapter - > rx_scrq [ i ] - > irq ) ;
2017-08-08 15:24:05 -05:00
}
2017-05-03 14:05:02 -04:00
}
}
2018-03-30 13:44:21 -05:00
}
static void ibmvnic_cleanup ( struct net_device * netdev )
{
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
/* ensure that transmissions are stopped if called by do_reset */
2019-09-20 16:11:23 -04:00
if ( test_bit ( 0 , & adapter - > resetting ) )
2018-03-30 13:44:21 -05:00
netif_tx_disable ( netdev ) ;
else
netif_tx_stop_all_queues ( netdev ) ;
ibmvnic_napi_disable ( adapter ) ;
ibmvnic_disable_irqs ( adapter ) ;
2018-02-13 18:23:43 -06:00
clean_rx_pools ( adapter ) ;
2017-05-26 10:30:31 -04:00
clean_tx_pools ( adapter ) ;
2018-03-07 17:51:46 -06:00
}
static int __ibmvnic_close ( struct net_device * netdev )
{
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
int rc = 0 ;
adapter - > state = VNIC_CLOSING ;
rc = set_link_state ( adapter , IBMVNIC_LOGICAL_LNK_DN ) ;
if ( rc )
return rc ;
2017-05-03 14:04:32 -04:00
adapter - > state = VNIC_CLOSED ;
2018-03-07 17:51:46 -06:00
return 0 ;
2015-12-21 11:26:06 -06:00
}
2017-05-03 14:04:38 -04:00
static int ibmvnic_close ( struct net_device * netdev )
{
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
int rc ;
2018-04-06 18:37:05 -05:00
/* If device failover is pending, just set device state and return.
* Device operation will be handled by reset routine .
*/
if ( adapter - > failover_pending ) {
adapter - > state = VNIC_CLOSED ;
return 0 ;
}
2017-05-03 14:04:38 -04:00
rc = __ibmvnic_close ( netdev ) ;
2018-04-06 18:37:06 -05:00
ibmvnic_cleanup ( netdev ) ;
2017-05-03 14:04:38 -04:00
return rc ;
}
2016-04-01 17:20:34 -05:00
/**
* build_hdr_data - creates L2 / L3 / L4 header data buffer
* @ hdr_field - bitfield determining needed headers
* @ skb - socket buffer
* @ hdr_len - array of header lengths
* @ tot_len - total length of data
*
* Reads hdr_field to determine which headers are needed by firmware .
* Builds a buffer containing these headers . Saves individual header
* lengths and total buffer length to be used to build descriptors .
*/
static int build_hdr_data ( u8 hdr_field , struct sk_buff * skb ,
int * hdr_len , u8 * hdr_data )
{
int len = 0 ;
u8 * hdr ;
2018-03-12 11:51:02 -05:00
if ( skb_vlan_tagged ( skb ) & & ! skb_vlan_tag_present ( skb ) )
hdr_len [ 0 ] = sizeof ( struct vlan_ethhdr ) ;
else
hdr_len [ 0 ] = sizeof ( struct ethhdr ) ;
2016-04-01 17:20:34 -05:00
if ( skb - > protocol = = htons ( ETH_P_IP ) ) {
hdr_len [ 1 ] = ip_hdr ( skb ) - > ihl * 4 ;
if ( ip_hdr ( skb ) - > protocol = = IPPROTO_TCP )
hdr_len [ 2 ] = tcp_hdrlen ( skb ) ;
else if ( ip_hdr ( skb ) - > protocol = = IPPROTO_UDP )
hdr_len [ 2 ] = sizeof ( struct udphdr ) ;
} else if ( skb - > protocol = = htons ( ETH_P_IPV6 ) ) {
hdr_len [ 1 ] = sizeof ( struct ipv6hdr ) ;
if ( ipv6_hdr ( skb ) - > nexthdr = = IPPROTO_TCP )
hdr_len [ 2 ] = tcp_hdrlen ( skb ) ;
else if ( ipv6_hdr ( skb ) - > nexthdr = = IPPROTO_UDP )
hdr_len [ 2 ] = sizeof ( struct udphdr ) ;
2017-12-18 12:52:40 -06:00
} else if ( skb - > protocol = = htons ( ETH_P_ARP ) ) {
hdr_len [ 1 ] = arp_hdr_len ( skb - > dev ) ;
hdr_len [ 2 ] = 0 ;
2016-04-01 17:20:34 -05:00
}
memset ( hdr_data , 0 , 120 ) ;
if ( ( hdr_field > > 6 ) & 1 ) {
hdr = skb_mac_header ( skb ) ;
memcpy ( hdr_data , hdr , hdr_len [ 0 ] ) ;
len + = hdr_len [ 0 ] ;
}
if ( ( hdr_field > > 5 ) & 1 ) {
hdr = skb_network_header ( skb ) ;
memcpy ( hdr_data + len , hdr , hdr_len [ 1 ] ) ;
len + = hdr_len [ 1 ] ;
}
if ( ( hdr_field > > 4 ) & 1 ) {
hdr = skb_transport_header ( skb ) ;
memcpy ( hdr_data + len , hdr , hdr_len [ 2 ] ) ;
len + = hdr_len [ 2 ] ;
}
return len ;
}
/**
* create_hdr_descs - create header and header extension descriptors
* @ hdr_field - bitfield determining needed headers
* @ data - buffer containing header data
* @ len - length of data buffer
* @ hdr_len - array of individual header lengths
* @ scrq_arr - descriptor array
*
* Creates header and , if needed , header extension descriptors and
* places them in a descriptor array , scrq_arr
*/
2017-10-16 10:02:11 -05:00
static int create_hdr_descs ( u8 hdr_field , u8 * hdr_data , int len , int * hdr_len ,
union sub_crq * scrq_arr )
2016-04-01 17:20:34 -05:00
{
union sub_crq hdr_desc ;
int tmp_len = len ;
2017-10-16 10:02:11 -05:00
int num_descs = 0 ;
2016-04-01 17:20:34 -05:00
u8 * data , * cur ;
int tmp ;
while ( tmp_len > 0 ) {
cur = hdr_data + len - tmp_len ;
memset ( & hdr_desc , 0 , sizeof ( hdr_desc ) ) ;
if ( cur ! = hdr_data ) {
data = hdr_desc . hdr_ext . data ;
tmp = tmp_len > 29 ? 29 : tmp_len ;
hdr_desc . hdr_ext . first = IBMVNIC_CRQ_CMD ;
hdr_desc . hdr_ext . type = IBMVNIC_HDR_EXT_DESC ;
hdr_desc . hdr_ext . len = tmp ;
} else {
data = hdr_desc . hdr . data ;
tmp = tmp_len > 24 ? 24 : tmp_len ;
hdr_desc . hdr . first = IBMVNIC_CRQ_CMD ;
hdr_desc . hdr . type = IBMVNIC_HDR_DESC ;
hdr_desc . hdr . len = tmp ;
hdr_desc . hdr . l2_len = ( u8 ) hdr_len [ 0 ] ;
hdr_desc . hdr . l3_len = cpu_to_be16 ( ( u16 ) hdr_len [ 1 ] ) ;
hdr_desc . hdr . l4_len = ( u8 ) hdr_len [ 2 ] ;
hdr_desc . hdr . flag = hdr_field < < 1 ;
}
memcpy ( data , cur , tmp ) ;
tmp_len - = tmp ;
* scrq_arr = hdr_desc ;
scrq_arr + + ;
2017-10-16 10:02:11 -05:00
num_descs + + ;
2016-04-01 17:20:34 -05:00
}
2017-10-16 10:02:11 -05:00
return num_descs ;
2016-04-01 17:20:34 -05:00
}
/**
* build_hdr_descs_arr - build a header descriptor array
* @ skb - socket buffer
* @ num_entries - number of descriptors to be sent
* @ subcrq - first TX descriptor
* @ hdr_field - bit field determining which headers will be sent
*
* This function will build a TX descriptor array with applicable
* L2 / L3 / L4 packet header descriptors to be sent by send_subcrq_indirect .
*/
static void build_hdr_descs_arr ( struct ibmvnic_tx_buff * txbuff ,
int * num_entries , u8 hdr_field )
{
int hdr_len [ 3 ] = { 0 , 0 , 0 } ;
2017-10-16 10:02:11 -05:00
int tot_len ;
2016-04-01 17:20:34 -05:00
u8 * hdr_data = txbuff - > hdr_data ;
tot_len = build_hdr_data ( hdr_field , txbuff - > skb , hdr_len ,
txbuff - > hdr_data ) ;
2017-10-16 10:02:11 -05:00
* num_entries + = create_hdr_descs ( hdr_field , hdr_data , tot_len , hdr_len ,
2016-04-01 17:20:34 -05:00
txbuff - > indir_arr + 1 ) ;
}
2018-03-12 11:51:04 -05:00
static int ibmvnic_xmit_workarounds ( struct sk_buff * skb ,
struct net_device * netdev )
{
/* For some backing devices, mishandling of small packets
* can result in a loss of connection or TX stall . Device
* architects recommend that no packet should be smaller
* than the minimum MTU value provided to the driver , so
* pad any packets to that length
*/
if ( skb - > len < netdev - > min_mtu )
return skb_put_padto ( skb , netdev - > min_mtu ) ;
2018-03-12 21:05:26 -05:00
return 0 ;
2018-03-12 11:51:04 -05:00
}
2018-09-18 14:35:47 +08:00
static netdev_tx_t ibmvnic_xmit ( struct sk_buff * skb , struct net_device * netdev )
2015-12-21 11:26:06 -06:00
{
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
int queue_num = skb_get_queue_mapping ( skb ) ;
2016-04-01 17:20:34 -05:00
u8 * hdrs = ( u8 * ) & adapter - > tx_rx_desc_req ;
2015-12-21 11:26:06 -06:00
struct device * dev = & adapter - > vdev - > dev ;
struct ibmvnic_tx_buff * tx_buff = NULL ;
2017-03-05 12:18:41 -06:00
struct ibmvnic_sub_crq_queue * tx_scrq ;
2015-12-21 11:26:06 -06:00
struct ibmvnic_tx_pool * tx_pool ;
unsigned int tx_send_failed = 0 ;
unsigned int tx_map_failed = 0 ;
unsigned int tx_dropped = 0 ;
unsigned int tx_packets = 0 ;
unsigned int tx_bytes = 0 ;
dma_addr_t data_dma_addr ;
struct netdev_queue * txq ;
unsigned long lpar_rc ;
union sub_crq tx_crq ;
unsigned int offset ;
2016-04-01 17:20:34 -05:00
int num_entries = 1 ;
2015-12-21 11:26:06 -06:00
unsigned char * dst ;
u64 * handle_array ;
int index = 0 ;
2018-01-18 19:29:48 -06:00
u8 proto = 0 ;
2018-09-18 14:35:47 +08:00
netdev_tx_t ret = NETDEV_TX_OK ;
2015-12-21 11:26:06 -06:00
2019-09-20 16:11:23 -04:00
if ( test_bit ( 0 , & adapter - > resetting ) ) {
2017-04-21 15:39:16 -04:00
if ( ! netif_subqueue_stopped ( netdev , skb ) )
netif_stop_subqueue ( netdev , queue_num ) ;
dev_kfree_skb_any ( skb ) ;
2015-12-21 11:26:06 -06:00
tx_send_failed + + ;
tx_dropped + + ;
2017-04-21 15:39:16 -04:00
ret = NETDEV_TX_OK ;
2015-12-21 11:26:06 -06:00
goto out ;
}
2018-03-12 21:05:26 -05:00
if ( ibmvnic_xmit_workarounds ( skb , netdev ) ) {
2018-03-12 11:51:04 -05:00
tx_dropped + + ;
tx_send_failed + + ;
ret = NETDEV_TX_OK ;
goto out ;
}
2018-03-16 20:00:28 -05:00
if ( skb_is_gso ( skb ) )
tx_pool = & adapter - > tso_pool [ queue_num ] ;
else
tx_pool = & adapter - > tx_pool [ queue_num ] ;
2018-03-12 11:51:04 -05:00
2017-05-03 14:05:08 -04:00
tx_scrq = adapter - > tx_scrq [ queue_num ] ;
txq = netdev_get_tx_queue ( netdev , skb_get_queue_mapping ( skb ) ) ;
handle_array = ( u64 * ) ( ( u8 * ) ( adapter - > login_rsp_buf ) +
be32_to_cpu ( adapter - > login_rsp_buf - > off_txsubm_subcrqs ) ) ;
2015-12-21 11:26:06 -06:00
index = tx_pool - > free_map [ tx_pool - > consumer_index ] ;
2017-10-17 12:36:55 -05:00
2018-03-16 20:00:29 -05:00
if ( index = = IBMVNIC_INVALID_MAP ) {
dev_kfree_skb_any ( skb ) ;
tx_send_failed + + ;
tx_dropped + + ;
ret = NETDEV_TX_OK ;
goto out ;
}
tx_pool - > free_map [ tx_pool - > consumer_index ] = IBMVNIC_INVALID_MAP ;
2018-03-16 20:00:28 -05:00
offset = index * tx_pool - > buf_size ;
dst = tx_pool - > long_term_buff . buff + offset ;
memset ( dst , 0 , tx_pool - > buf_size ) ;
data_dma_addr = tx_pool - > long_term_buff . addr + offset ;
2015-12-21 11:26:06 -06:00
2017-10-17 12:36:54 -05:00
if ( skb_shinfo ( skb ) - > nr_frags ) {
int cur , i ;
/* Copy the head */
skb_copy_from_linear_data ( skb , dst , skb_headlen ( skb ) ) ;
cur = skb_headlen ( skb ) ;
/* Copy the frags */
for ( i = 0 ; i < skb_shinfo ( skb ) - > nr_frags ; i + + ) {
const skb_frag_t * frag = & skb_shinfo ( skb ) - > frags [ i ] ;
memcpy ( dst + cur ,
page_address ( skb_frag_page ( frag ) ) +
2019-07-30 07:40:33 -07:00
skb_frag_off ( frag ) , skb_frag_size ( frag ) ) ;
2017-10-17 12:36:54 -05:00
cur + = skb_frag_size ( frag ) ;
}
} else {
skb_copy_from_linear_data ( skb , dst , skb - > len ) ;
}
2015-12-21 11:26:06 -06:00
tx_pool - > consumer_index =
2018-03-16 20:00:28 -05:00
( tx_pool - > consumer_index + 1 ) % tx_pool - > num_buffers ;
2015-12-21 11:26:06 -06:00
tx_buff = & tx_pool - > tx_buff [ index ] ;
tx_buff - > skb = skb ;
tx_buff - > data_dma [ 0 ] = data_dma_addr ;
tx_buff - > data_len [ 0 ] = skb - > len ;
tx_buff - > index = index ;
tx_buff - > pool_index = queue_num ;
tx_buff - > last_frag = true ;
memset ( & tx_crq , 0 , sizeof ( tx_crq ) ) ;
tx_crq . v1 . first = IBMVNIC_CRQ_CMD ;
tx_crq . v1 . type = IBMVNIC_TX_DESC ;
tx_crq . v1 . n_crq_elem = 1 ;
tx_crq . v1 . n_sge = 1 ;
tx_crq . v1 . flags1 = IBMVNIC_TX_COMP_NEEDED ;
2018-03-16 20:00:28 -05:00
2017-10-17 12:36:55 -05:00
if ( skb_is_gso ( skb ) )
2018-03-16 20:00:28 -05:00
tx_crq . v1 . correlator =
cpu_to_be32 ( index | IBMVNIC_TSO_POOL_MASK ) ;
2017-10-17 12:36:55 -05:00
else
2018-03-16 20:00:28 -05:00
tx_crq . v1 . correlator = cpu_to_be32 ( index ) ;
tx_crq . v1 . dma_reg = cpu_to_be16 ( tx_pool - > long_term_buff . map_id ) ;
2015-12-21 11:26:06 -06:00
tx_crq . v1 . sge_len = cpu_to_be32 ( skb - > len ) ;
tx_crq . v1 . ioba = cpu_to_be64 ( data_dma_addr ) ;
2018-11-07 17:50:52 +01:00
if ( adapter - > vlan_header_insertion & & skb_vlan_tag_present ( skb ) ) {
2015-12-21 11:26:06 -06:00
tx_crq . v1 . flags2 | = IBMVNIC_TX_VLAN_INSERT ;
tx_crq . v1 . vlan_id = cpu_to_be16 ( skb - > vlan_tci ) ;
}
if ( skb - > protocol = = htons ( ETH_P_IP ) ) {
2018-01-18 19:29:48 -06:00
tx_crq . v1 . flags1 | = IBMVNIC_TX_PROT_IPV4 ;
proto = ip_hdr ( skb ) - > protocol ;
} else if ( skb - > protocol = = htons ( ETH_P_IPV6 ) ) {
tx_crq . v1 . flags1 | = IBMVNIC_TX_PROT_IPV6 ;
proto = ipv6_hdr ( skb ) - > nexthdr ;
2015-12-21 11:26:06 -06:00
}
2018-01-18 19:29:48 -06:00
if ( proto = = IPPROTO_TCP )
tx_crq . v1 . flags1 | = IBMVNIC_TX_PROT_TCP ;
else if ( proto = = IPPROTO_UDP )
tx_crq . v1 . flags1 | = IBMVNIC_TX_PROT_UDP ;
2016-04-01 17:20:34 -05:00
if ( skb - > ip_summed = = CHECKSUM_PARTIAL ) {
2015-12-21 11:26:06 -06:00
tx_crq . v1 . flags1 | = IBMVNIC_TX_CHKSUM_OFFLOAD ;
2016-04-01 17:20:34 -05:00
hdrs + = 2 ;
}
2017-10-17 12:36:55 -05:00
if ( skb_is_gso ( skb ) ) {
tx_crq . v1 . flags1 | = IBMVNIC_TX_LSO ;
tx_crq . v1 . mss = cpu_to_be16 ( skb_shinfo ( skb ) - > gso_size ) ;
hdrs + = 2 ;
}
2016-04-01 17:20:34 -05:00
/* determine if l2/3/4 headers are sent to firmware */
2018-02-09 13:19:46 -06:00
if ( ( * hdrs > > 7 ) & 1 ) {
2016-04-01 17:20:34 -05:00
build_hdr_descs_arr ( tx_buff , & num_entries , * hdrs ) ;
tx_crq . v1 . n_crq_elem = num_entries ;
2018-02-26 18:10:55 -06:00
tx_buff - > num_entries = num_entries ;
2016-04-01 17:20:34 -05:00
tx_buff - > indir_arr [ 0 ] = tx_crq ;
tx_buff - > indir_dma = dma_map_single ( dev , tx_buff - > indir_arr ,
sizeof ( tx_buff - > indir_arr ) ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( dev , tx_buff - > indir_dma ) ) {
2017-04-21 15:39:16 -04:00
dev_kfree_skb_any ( skb ) ;
tx_buff - > skb = NULL ;
2016-04-01 17:20:34 -05:00
if ( ! firmware_has_feature ( FW_FEATURE_CMO ) )
dev_err ( dev , " tx: unable to map descriptor array \n " ) ;
tx_map_failed + + ;
tx_dropped + + ;
2017-04-21 15:39:16 -04:00
ret = NETDEV_TX_OK ;
2018-03-16 20:00:29 -05:00
goto tx_err_out ;
2016-04-01 17:20:34 -05:00
}
2016-04-06 11:49:55 -05:00
lpar_rc = send_subcrq_indirect ( adapter , handle_array [ queue_num ] ,
2016-04-01 17:20:34 -05:00
( u64 ) tx_buff - > indir_dma ,
( u64 ) num_entries ) ;
2019-08-14 14:57:05 -05:00
dma_unmap_single ( dev , tx_buff - > indir_dma ,
sizeof ( tx_buff - > indir_arr ) , DMA_TO_DEVICE ) ;
2016-04-01 17:20:34 -05:00
} else {
2018-02-26 18:10:55 -06:00
tx_buff - > num_entries = num_entries ;
2016-04-06 11:49:55 -05:00
lpar_rc = send_subcrq ( adapter , handle_array [ queue_num ] ,
& tx_crq ) ;
2016-04-01 17:20:34 -05:00
}
2015-12-21 11:26:06 -06:00
if ( lpar_rc ! = H_SUCCESS ) {
2018-07-13 12:03:32 -05:00
if ( lpar_rc ! = H_CLOSED & & lpar_rc ! = H_PARAMETER )
dev_err_ratelimited ( dev , " tx: send failed \n " ) ;
2017-04-21 15:39:16 -04:00
dev_kfree_skb_any ( skb ) ;
tx_buff - > skb = NULL ;
2018-04-06 18:37:05 -05:00
if ( lpar_rc = = H_CLOSED | | adapter - > failover_pending ) {
/* Disable TX and report carrier off if queue is closed
* or pending failover .
2017-05-26 10:30:42 -04:00
* Firmware guarantees that a signal will be sent to the
* driver , triggering a reset or some other action .
*/
netif_tx_stop_all_queues ( netdev ) ;
netif_carrier_off ( netdev ) ;
}
2017-04-21 15:39:16 -04:00
2015-12-21 11:26:06 -06:00
tx_send_failed + + ;
tx_dropped + + ;
2017-04-21 15:39:16 -04:00
ret = NETDEV_TX_OK ;
2018-03-16 20:00:29 -05:00
goto tx_err_out ;
2015-12-21 11:26:06 -06:00
}
2017-03-05 12:18:41 -06:00
2018-02-18 10:08:41 -06:00
if ( atomic_add_return ( num_entries , & tx_scrq - > used )
2017-04-19 13:44:47 -04:00
> = adapter - > req_tx_entries_per_subcrq ) {
2018-02-26 18:10:58 -06:00
netdev_dbg ( netdev , " Stopping queue %d \n " , queue_num ) ;
2017-03-05 12:18:41 -06:00
netif_stop_subqueue ( netdev , queue_num ) ;
}
2015-12-21 11:26:06 -06:00
tx_packets + + ;
tx_bytes + = skb - > len ;
txq - > trans_start = jiffies ;
ret = NETDEV_TX_OK ;
2018-03-16 20:00:29 -05:00
goto out ;
2015-12-21 11:26:06 -06:00
2018-03-16 20:00:29 -05:00
tx_err_out :
/* roll back consumer index and map array*/
if ( tx_pool - > consumer_index = = 0 )
tx_pool - > consumer_index =
tx_pool - > num_buffers - 1 ;
else
tx_pool - > consumer_index - - ;
tx_pool - > free_map [ tx_pool - > consumer_index ] = index ;
2015-12-21 11:26:06 -06:00
out :
netdev - > stats . tx_dropped + = tx_dropped ;
netdev - > stats . tx_bytes + = tx_bytes ;
netdev - > stats . tx_packets + = tx_packets ;
adapter - > tx_send_failed + = tx_send_failed ;
adapter - > tx_map_failed + = tx_map_failed ;
2017-08-02 16:44:14 -05:00
adapter - > tx_stats_buffers [ queue_num ] . packets + = tx_packets ;
adapter - > tx_stats_buffers [ queue_num ] . bytes + = tx_bytes ;
adapter - > tx_stats_buffers [ queue_num ] . dropped_packets + = tx_dropped ;
2015-12-21 11:26:06 -06:00
return ret ;
}
static void ibmvnic_set_multi ( struct net_device * netdev )
{
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
struct netdev_hw_addr * ha ;
union ibmvnic_crq crq ;
memset ( & crq , 0 , sizeof ( crq ) ) ;
crq . request_capability . first = IBMVNIC_CRQ_CMD ;
crq . request_capability . cmd = REQUEST_CAPABILITY ;
if ( netdev - > flags & IFF_PROMISC ) {
if ( ! adapter - > promisc_supported )
return ;
} else {
if ( netdev - > flags & IFF_ALLMULTI ) {
/* Accept all multicast */
memset ( & crq , 0 , sizeof ( crq ) ) ;
crq . multicast_ctrl . first = IBMVNIC_CRQ_CMD ;
crq . multicast_ctrl . cmd = MULTICAST_CTRL ;
crq . multicast_ctrl . flags = IBMVNIC_ENABLE_ALL ;
ibmvnic_send_crq ( adapter , & crq ) ;
} else if ( netdev_mc_empty ( netdev ) ) {
/* Reject all multicast */
memset ( & crq , 0 , sizeof ( crq ) ) ;
crq . multicast_ctrl . first = IBMVNIC_CRQ_CMD ;
crq . multicast_ctrl . cmd = MULTICAST_CTRL ;
crq . multicast_ctrl . flags = IBMVNIC_DISABLE_ALL ;
ibmvnic_send_crq ( adapter , & crq ) ;
} else {
/* Accept one or more multicast(s) */
netdev_for_each_mc_addr ( ha , netdev ) {
memset ( & crq , 0 , sizeof ( crq ) ) ;
crq . multicast_ctrl . first = IBMVNIC_CRQ_CMD ;
crq . multicast_ctrl . cmd = MULTICAST_CTRL ;
crq . multicast_ctrl . flags = IBMVNIC_ENABLE_MC ;
ether_addr_copy ( & crq . multicast_ctrl . mac_addr [ 0 ] ,
ha - > addr ) ;
ibmvnic_send_crq ( adapter , & crq ) ;
}
}
}
}
2019-05-09 23:13:43 -05:00
static int __ibmvnic_set_mac ( struct net_device * netdev , u8 * dev_addr )
2015-12-21 11:26:06 -06:00
{
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
union ibmvnic_crq crq ;
2018-05-23 13:37:57 -05:00
int rc ;
2015-12-21 11:26:06 -06:00
2019-05-09 23:13:43 -05:00
if ( ! is_valid_ether_addr ( dev_addr ) ) {
rc = - EADDRNOTAVAIL ;
goto err ;
}
2015-12-21 11:26:06 -06:00
memset ( & crq , 0 , sizeof ( crq ) ) ;
crq . change_mac_addr . first = IBMVNIC_CRQ_CMD ;
crq . change_mac_addr . cmd = CHANGE_MAC_ADDR ;
2019-05-09 23:13:43 -05:00
ether_addr_copy ( & crq . change_mac_addr . mac_addr [ 0 ] , dev_addr ) ;
2018-01-29 13:45:05 -06:00
init_completion ( & adapter - > fw_done ) ;
2018-05-23 13:37:57 -05:00
rc = ibmvnic_send_crq ( adapter , & crq ) ;
2019-05-09 23:13:43 -05:00
if ( rc ) {
rc = - EIO ;
goto err ;
}
2018-01-29 13:45:05 -06:00
wait_for_completion ( & adapter - > fw_done ) ;
2015-12-21 11:26:06 -06:00
/* netdev->dev_addr is changed in handle_change_mac_rsp function */
2019-05-09 23:13:43 -05:00
if ( adapter - > fw_done_rc ) {
rc = - EIO ;
goto err ;
}
return 0 ;
err :
ether_addr_copy ( adapter - > mac_addr , netdev - > dev_addr ) ;
return rc ;
2015-12-21 11:26:06 -06:00
}
2017-10-26 16:23:25 -05:00
static int ibmvnic_set_mac ( struct net_device * netdev , void * p )
{
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
struct sockaddr * addr = p ;
2018-01-29 13:45:05 -06:00
int rc ;
2017-10-26 16:23:25 -05:00
2019-05-09 23:13:43 -05:00
rc = 0 ;
ether_addr_copy ( adapter - > mac_addr , addr - > sa_data ) ;
if ( adapter - > state ! = VNIC_PROBED )
rc = __ibmvnic_set_mac ( netdev , addr - > sa_data ) ;
2017-10-26 16:23:25 -05:00
2018-01-29 13:45:05 -06:00
return rc ;
2017-10-26 16:23:25 -05:00
}
2019-09-20 16:11:22 -04:00
/**
* do_change_param_reset returns zero if we are able to keep processing reset
* events , or non - zero if we hit a fatal error and must halt .
*/
static int do_change_param_reset ( struct ibmvnic_adapter * adapter ,
struct ibmvnic_rwi * rwi ,
u32 reset_state )
{
struct net_device * netdev = adapter - > netdev ;
int i , rc ;
netdev_dbg ( adapter - > netdev , " Change param resetting driver (%d) \n " ,
rwi - > reset_reason ) ;
netif_carrier_off ( netdev ) ;
adapter - > reset_reason = rwi - > reset_reason ;
ibmvnic_cleanup ( netdev ) ;
if ( reset_state = = VNIC_OPEN ) {
rc = __ibmvnic_close ( netdev ) ;
if ( rc )
return rc ;
}
release_resources ( adapter ) ;
release_sub_crqs ( adapter , 1 ) ;
release_crq_queue ( adapter ) ;
adapter - > state = VNIC_PROBED ;
rc = init_crq_queue ( adapter ) ;
if ( rc ) {
netdev_err ( adapter - > netdev ,
" Couldn't initialize crq. rc=%d \n " , rc ) ;
return rc ;
}
rc = ibmvnic_reset_init ( adapter ) ;
if ( rc )
return IBMVNIC_INIT_FAILED ;
/* If the adapter was in PROBE state prior to the reset,
* exit here .
*/
if ( reset_state = = VNIC_PROBED )
return 0 ;
rc = ibmvnic_login ( netdev ) ;
if ( rc ) {
adapter - > state = reset_state ;
return rc ;
}
rc = init_resources ( adapter ) ;
if ( rc )
return rc ;
ibmvnic_disable_irqs ( adapter ) ;
adapter - > state = VNIC_CLOSED ;
if ( reset_state = = VNIC_CLOSED )
return 0 ;
rc = __ibmvnic_open ( netdev ) ;
if ( rc )
return IBMVNIC_OPEN_FAILED ;
/* refresh device's multicast list */
ibmvnic_set_multi ( netdev ) ;
/* kick napi */
for ( i = 0 ; i < adapter - > req_rx_queues ; i + + )
napi_schedule ( & adapter - > napi [ i ] ) ;
return 0 ;
}
2017-05-03 14:04:38 -04:00
/**
* do_reset returns zero if we are able to keep processing reset events , or
* non - zero if we hit a fatal error and must halt .
*/
static int do_reset ( struct ibmvnic_adapter * adapter ,
struct ibmvnic_rwi * rwi , u32 reset_state )
2015-12-21 11:26:06 -06:00
{
2018-01-18 16:26:31 -06:00
u64 old_num_rx_queues , old_num_tx_queues ;
2018-11-21 11:17:59 -06:00
u64 old_num_rx_slots , old_num_tx_slots ;
2017-05-03 14:04:38 -04:00
struct net_device * netdev = adapter - > netdev ;
int i , rc ;
2017-08-08 15:24:05 -05:00
netdev_dbg ( adapter - > netdev , " Re-setting driver (%d) \n " ,
rwi - > reset_reason ) ;
2019-09-20 16:11:22 -04:00
rtnl_lock ( ) ;
2017-05-03 14:04:38 -04:00
netif_carrier_off ( netdev ) ;
adapter - > reset_reason = rwi - > reset_reason ;
2018-01-18 16:26:31 -06:00
old_num_rx_queues = adapter - > req_rx_queues ;
old_num_tx_queues = adapter - > req_tx_queues ;
2018-11-21 11:17:59 -06:00
old_num_rx_slots = adapter - > req_rx_add_entries_per_subcrq ;
old_num_tx_slots = adapter - > req_tx_entries_per_subcrq ;
2018-01-18 16:26:31 -06:00
2018-04-06 18:37:06 -05:00
ibmvnic_cleanup ( netdev ) ;
2019-06-07 16:03:53 -05:00
if ( reset_state = = VNIC_OPEN & &
adapter - > reset_reason ! = VNIC_RESET_MOBILITY & &
2018-04-06 18:37:06 -05:00
adapter - > reset_reason ! = VNIC_RESET_FAILOVER ) {
2019-09-20 16:11:22 -04:00
adapter - > state = VNIC_CLOSING ;
/* Release the RTNL lock before link state change and
* re - acquire after the link state change to allow
* linkwatch_event to grab the RTNL lock and run during
* a reset .
*/
rtnl_unlock ( ) ;
rc = set_link_state ( adapter , IBMVNIC_LOGICAL_LNK_DN ) ;
rtnl_lock ( ) ;
2018-03-07 17:51:47 -06:00
if ( rc )
2019-09-20 16:11:22 -04:00
goto out ;
2017-05-03 14:04:38 -04:00
2019-09-20 16:11:22 -04:00
if ( adapter - > state ! = VNIC_CLOSING ) {
rc = - 1 ;
goto out ;
}
adapter - > state = VNIC_CLOSED ;
2017-10-26 16:23:25 -05:00
}
2017-05-26 10:30:37 -04:00
if ( adapter - > reset_reason ! = VNIC_RESET_NON_FATAL ) {
/* remove the closed state so when we call open it appears
* we are coming from the probed state .
*/
adapter - > state = VNIC_PROBED ;
2015-12-21 11:26:06 -06:00
2019-09-20 16:11:22 -04:00
if ( adapter - > reset_reason = = VNIC_RESET_MOBILITY ) {
2018-04-06 18:37:06 -05:00
rc = ibmvnic_reenable_crq_queue ( adapter ) ;
release_sub_crqs ( adapter , 1 ) ;
} else {
rc = ibmvnic_reset_crq ( adapter ) ;
if ( ! rc )
rc = vio_enable_interrupts ( adapter - > vdev ) ;
}
if ( rc ) {
netdev_err ( adapter - > netdev ,
" Couldn't initialize crq. rc=%d \n " , rc ) ;
2019-09-20 16:11:22 -04:00
goto out ;
2018-04-06 18:37:06 -05:00
}
2018-05-23 13:38:00 -05:00
rc = ibmvnic_reset_init ( adapter ) ;
2019-09-20 16:11:22 -04:00
if ( rc ) {
rc = IBMVNIC_INIT_FAILED ;
goto out ;
}
2017-05-03 14:04:38 -04:00
2017-05-26 10:30:37 -04:00
/* If the adapter was in PROBE state prior to the reset,
* exit here .
*/
2019-09-20 16:11:22 -04:00
if ( reset_state = = VNIC_PROBED ) {
rc = 0 ;
goto out ;
}
2017-05-03 14:04:38 -04:00
2017-05-26 10:30:37 -04:00
rc = ibmvnic_login ( netdev ) ;
if ( rc ) {
2018-07-16 10:29:30 -05:00
adapter - > state = reset_state ;
2019-09-20 16:11:22 -04:00
goto out ;
2017-05-26 10:30:37 -04:00
}
2017-05-03 14:04:38 -04:00
2019-09-20 16:11:22 -04:00
if ( adapter - > req_rx_queues ! = old_num_rx_queues | |
adapter - > req_tx_queues ! = old_num_tx_queues | |
adapter - > req_rx_add_entries_per_subcrq ! =
old_num_rx_slots | |
adapter - > req_tx_entries_per_subcrq ! =
old_num_tx_slots ) {
2018-01-18 16:26:31 -06:00
release_rx_pools ( adapter ) ;
release_tx_pools ( adapter ) ;
2018-02-19 13:30:39 -06:00
release_napi ( adapter ) ;
2018-11-19 15:59:22 -06:00
release_vpd_data ( adapter ) ;
rc = init_resources ( adapter ) ;
2018-08-30 13:19:53 -05:00
if ( rc )
2019-09-20 16:11:22 -04:00
goto out ;
2018-11-19 15:59:22 -06:00
2017-10-26 16:23:25 -05:00
} else {
rc = reset_tx_pools ( adapter ) ;
if ( rc )
2019-09-20 16:11:22 -04:00
goto out ;
2017-05-26 10:31:06 -04:00
2017-10-26 16:23:25 -05:00
rc = reset_rx_pools ( adapter ) ;
if ( rc )
2019-09-20 16:11:22 -04:00
goto out ;
2017-10-26 16:23:25 -05:00
}
2018-05-16 15:49:04 -05:00
ibmvnic_disable_irqs ( adapter ) ;
2017-05-26 10:30:37 -04:00
}
2018-03-14 10:41:29 -05:00
adapter - > state = VNIC_CLOSED ;
2019-09-20 16:11:22 -04:00
if ( reset_state = = VNIC_CLOSED ) {
rc = 0 ;
goto out ;
}
2018-03-14 10:41:29 -05:00
2017-05-03 14:04:38 -04:00
rc = __ibmvnic_open ( netdev ) ;
if ( rc ) {
2019-09-20 16:11:22 -04:00
rc = IBMVNIC_OPEN_FAILED ;
goto out ;
2017-05-03 14:04:38 -04:00
}
2019-06-07 16:03:54 -05:00
/* refresh device's multicast list */
ibmvnic_set_multi ( netdev ) ;
2017-05-03 14:04:38 -04:00
/* kick napi */
for ( i = 0 ; i < adapter - > req_rx_queues ; i + + )
napi_schedule ( & adapter - > napi [ i ] ) ;
2019-09-20 16:11:22 -04:00
if ( adapter - > reset_reason ! = VNIC_RESET_FAILOVER )
2018-11-30 10:59:08 -06:00
call_netdevice_notifiers ( NETDEV_NOTIFY_PEERS , netdev ) ;
2017-06-12 20:47:45 -04:00
2019-09-20 16:11:22 -04:00
rc = 0 ;
out :
rtnl_unlock ( ) ;
return rc ;
2017-05-03 14:04:38 -04:00
}
2018-05-23 13:38:02 -05:00
static int do_hard_reset ( struct ibmvnic_adapter * adapter ,
struct ibmvnic_rwi * rwi , u32 reset_state )
{
struct net_device * netdev = adapter - > netdev ;
int rc ;
netdev_dbg ( adapter - > netdev , " Hard resetting driver (%d) \n " ,
rwi - > reset_reason ) ;
netif_carrier_off ( netdev ) ;
adapter - > reset_reason = rwi - > reset_reason ;
ibmvnic_cleanup ( netdev ) ;
release_resources ( adapter ) ;
release_sub_crqs ( adapter , 0 ) ;
release_crq_queue ( adapter ) ;
/* remove the closed state so when we call open it appears
* we are coming from the probed state .
*/
adapter - > state = VNIC_PROBED ;
ibmvnic: Fix completion structure initialization
Fix device initialization completion handling for vNIC adapters.
Initialize the completion structure on probe and reinitialize when needed.
This also fixes a race condition during kdump where the driver can attempt
to access the completion struct before it is initialized:
Unable to handle kernel paging request for data at address 0x00000000
Faulting instruction address: 0xc0000000081acbe0
Oops: Kernel access of bad area, sig: 11 [#1]
LE SMP NR_CPUS=2048 NUMA pSeries
Modules linked in: ibmvnic(+) ibmveth sunrpc overlay squashfs loop
CPU: 19 PID: 301 Comm: systemd-udevd Not tainted 4.18.0-64.el8.ppc64le #1
NIP: c0000000081acbe0 LR: c0000000081ad964 CTR: c0000000081ad900
REGS: c000000027f3f990 TRAP: 0300 Not tainted (4.18.0-64.el8.ppc64le)
MSR: 800000010280b033 <SF,VEC,VSX,EE,FP,ME,IR,DR,RI,LE,TM[E]> CR: 28228288 XER: 00000006
CFAR: c000000008008934 DAR: 0000000000000000 DSISR: 40000000 IRQMASK: 1
GPR00: c0000000081ad964 c000000027f3fc10 c0000000095b5800 c0000000221b4e58
GPR04: 0000000000000003 0000000000000001 000049a086918581 00000000000000d4
GPR08: 0000000000000007 0000000000000000 ffffffffffffffe8 d0000000014dde28
GPR12: c0000000081ad900 c000000009a00c00 0000000000000001 0000000000000100
GPR16: 0000000000000038 0000000000000007 c0000000095e2230 0000000000000006
GPR20: 0000000000400140 0000000000000001 c00000000910c880 0000000000000000
GPR24: 0000000000000000 0000000000000006 0000000000000000 0000000000000003
GPR28: 0000000000000001 0000000000000001 c0000000221b4e60 c0000000221b4e58
NIP [c0000000081acbe0] __wake_up_locked+0x50/0x100
LR [c0000000081ad964] complete+0x64/0xa0
Call Trace:
[c000000027f3fc10] [c000000027f3fc60] 0xc000000027f3fc60 (unreliable)
[c000000027f3fc60] [c0000000081ad964] complete+0x64/0xa0
[c000000027f3fca0] [d0000000014dad58] ibmvnic_handle_crq+0xce0/0x1160 [ibmvnic]
[c000000027f3fd50] [d0000000014db270] ibmvnic_tasklet+0x98/0x130 [ibmvnic]
[c000000027f3fda0] [c00000000813f334] tasklet_action_common.isra.3+0xc4/0x1a0
[c000000027f3fe00] [c000000008cd13f4] __do_softirq+0x164/0x400
[c000000027f3fef0] [c00000000813ed64] irq_exit+0x184/0x1c0
[c000000027f3ff20] [c0000000080188e8] __do_irq+0xb8/0x210
[c000000027f3ff90] [c00000000802d0a4] call_do_irq+0x14/0x24
[c000000026a5b010] [c000000008018adc] do_IRQ+0x9c/0x130
[c000000026a5b060] [c000000008008ce4] hardware_interrupt_common+0x114/0x120
Signed-off-by: Thomas Falcon <tlfalcon@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-04 18:58:26 -05:00
reinit_completion ( & adapter - > init_done ) ;
2018-05-23 13:38:02 -05:00
rc = init_crq_queue ( adapter ) ;
if ( rc ) {
netdev_err ( adapter - > netdev ,
" Couldn't initialize crq. rc=%d \n " , rc ) ;
return rc ;
}
rc = ibmvnic_init ( adapter ) ;
if ( rc )
return rc ;
/* If the adapter was in PROBE state prior to the reset,
* exit here .
*/
if ( reset_state = = VNIC_PROBED )
return 0 ;
rc = ibmvnic_login ( netdev ) ;
if ( rc ) {
adapter - > state = VNIC_PROBED ;
return 0 ;
}
2018-11-19 15:59:22 -06:00
rc = init_resources ( adapter ) ;
2018-05-23 13:38:02 -05:00
if ( rc )
return rc ;
ibmvnic_disable_irqs ( adapter ) ;
adapter - > state = VNIC_CLOSED ;
if ( reset_state = = VNIC_CLOSED )
return 0 ;
rc = __ibmvnic_open ( netdev ) ;
2019-09-20 16:11:22 -04:00
if ( rc )
return IBMVNIC_OPEN_FAILED ;
2018-05-23 13:38:02 -05:00
return 0 ;
}
2017-05-03 14:04:38 -04:00
static struct ibmvnic_rwi * get_next_rwi ( struct ibmvnic_adapter * adapter )
{
struct ibmvnic_rwi * rwi ;
2018-12-10 15:22:22 -06:00
unsigned long flags ;
2017-05-03 14:04:38 -04:00
2018-12-10 15:22:22 -06:00
spin_lock_irqsave ( & adapter - > rwi_lock , flags ) ;
2017-05-03 14:04:38 -04:00
if ( ! list_empty ( & adapter - > rwi_list ) ) {
rwi = list_first_entry ( & adapter - > rwi_list , struct ibmvnic_rwi ,
list ) ;
list_del ( & rwi - > list ) ;
} else {
rwi = NULL ;
}
2018-12-10 15:22:22 -06:00
spin_unlock_irqrestore ( & adapter - > rwi_lock , flags ) ;
2017-05-03 14:04:38 -04:00
return rwi ;
}
static void free_all_rwi ( struct ibmvnic_adapter * adapter )
{
struct ibmvnic_rwi * rwi ;
rwi = get_next_rwi ( adapter ) ;
while ( rwi ) {
kfree ( rwi ) ;
rwi = get_next_rwi ( adapter ) ;
}
}
static void __ibmvnic_reset ( struct work_struct * work )
{
struct ibmvnic_rwi * rwi ;
struct ibmvnic_adapter * adapter ;
u32 reset_state ;
2017-10-26 16:23:25 -05:00
int rc = 0 ;
2017-05-03 14:04:38 -04:00
adapter = container_of ( work , struct ibmvnic_adapter , ibmvnic_reset ) ;
2019-09-20 16:11:23 -04:00
if ( test_and_set_bit_lock ( 0 , & adapter - > resetting ) ) {
schedule_delayed_work ( & adapter - > ibmvnic_delayed_reset ,
IBMVNIC_RESET_DELAY ) ;
return ;
}
2017-05-03 14:04:38 -04:00
reset_state = adapter - > state ;
rwi = get_next_rwi ( adapter ) ;
while ( rwi ) {
2019-08-27 11:10:04 -05:00
if ( adapter - > state = = VNIC_REMOVING | |
2019-09-09 22:44:51 +02:00
adapter - > state = = VNIC_REMOVED ) {
2019-09-05 17:30:01 -04:00
kfree ( rwi ) ;
rc = EBUSY ;
break ;
}
2019-08-27 11:10:04 -05:00
2019-09-20 16:11:22 -04:00
if ( rwi - > reset_reason = = VNIC_RESET_CHANGE_PARAM ) {
/* CHANGE_PARAM requestor holds rtnl_lock */
rc = do_change_param_reset ( adapter , rwi , reset_state ) ;
} else if ( adapter - > force_reset_recovery ) {
/* Transport event occurred during previous reset */
if ( adapter - > wait_for_reset ) {
/* Previous was CHANGE_PARAM; caller locked */
adapter - > force_reset_recovery = false ;
rc = do_hard_reset ( adapter , rwi , reset_state ) ;
} else {
rtnl_lock ( ) ;
adapter - > force_reset_recovery = false ;
rc = do_hard_reset ( adapter , rwi , reset_state ) ;
rtnl_unlock ( ) ;
}
2018-05-23 13:38:02 -05:00
} else {
rc = do_reset ( adapter , rwi , reset_state ) ;
}
2017-05-03 14:04:38 -04:00
kfree ( rwi ) ;
2019-09-20 16:11:22 -04:00
if ( rc = = IBMVNIC_OPEN_FAILED ) {
if ( list_empty ( & adapter - > rwi_list ) )
adapter - > state = VNIC_CLOSED ;
else
adapter - > state = reset_state ;
rc = 0 ;
} else if ( rc & & rc ! = IBMVNIC_INIT_FAILED & &
2018-05-23 13:38:02 -05:00
! adapter - > force_reset_recovery )
2017-05-03 14:04:38 -04:00
break ;
rwi = get_next_rwi ( adapter ) ;
2019-09-20 16:11:23 -04:00
if ( rwi & & ( rwi - > reset_reason = = VNIC_RESET_FAILOVER | |
rwi - > reset_reason = = VNIC_RESET_MOBILITY ) )
adapter - > force_reset_recovery = true ;
2017-05-03 14:04:38 -04:00
}
2017-10-26 16:23:25 -05:00
if ( adapter - > wait_for_reset ) {
adapter - > reset_done_rc = rc ;
complete ( & adapter - > reset_done ) ;
}
2017-05-03 14:04:38 -04:00
if ( rc ) {
2017-08-08 15:24:05 -05:00
netdev_dbg ( adapter - > netdev , " Reset failed \n " ) ;
2017-05-03 14:04:38 -04:00
free_all_rwi ( adapter ) ;
}
2019-09-05 17:30:01 -04:00
2019-09-20 16:11:23 -04:00
clear_bit_unlock ( 0 , & adapter - > resetting ) ;
}
static void __ibmvnic_delayed_reset ( struct work_struct * work )
{
struct ibmvnic_adapter * adapter ;
adapter = container_of ( work , struct ibmvnic_adapter ,
ibmvnic_delayed_reset . work ) ;
__ibmvnic_reset ( & adapter - > ibmvnic_reset ) ;
2017-05-03 14:04:38 -04:00
}
2018-04-06 18:37:04 -05:00
static int ibmvnic_reset ( struct ibmvnic_adapter * adapter ,
enum ibmvnic_reset_reason reason )
2017-05-03 14:04:38 -04:00
{
2018-05-23 13:38:02 -05:00
struct list_head * entry , * tmp_entry ;
2017-05-03 14:04:38 -04:00
struct ibmvnic_rwi * rwi , * tmp ;
struct net_device * netdev = adapter - > netdev ;
2018-12-10 15:22:22 -06:00
unsigned long flags ;
2018-04-06 18:37:04 -05:00
int ret ;
2017-05-03 14:04:38 -04:00
if ( adapter - > state = = VNIC_REMOVING | |
2018-04-06 18:37:05 -05:00
adapter - > state = = VNIC_REMOVED | |
adapter - > failover_pending ) {
2018-04-06 18:37:04 -05:00
ret = EBUSY ;
2018-04-06 18:37:05 -05:00
netdev_dbg ( netdev , " Adapter removing or pending failover, skipping reset \n " ) ;
2018-04-06 18:37:04 -05:00
goto err ;
2017-05-03 14:04:38 -04:00
}
2017-06-15 14:48:09 -04:00
if ( adapter - > state = = VNIC_PROBING ) {
netdev_warn ( netdev , " Adapter reset during probe \n " ) ;
2018-04-06 18:37:04 -05:00
ret = adapter - > init_done_rc = EAGAIN ;
goto err ;
2017-06-15 14:48:09 -04:00
}
2018-12-10 15:22:22 -06:00
spin_lock_irqsave ( & adapter - > rwi_lock , flags ) ;
2017-05-03 14:04:38 -04:00
list_for_each ( entry , & adapter - > rwi_list ) {
tmp = list_entry ( entry , struct ibmvnic_rwi , list ) ;
if ( tmp - > reset_reason = = reason ) {
2017-08-08 15:24:05 -05:00
netdev_dbg ( netdev , " Skipping matching reset \n " ) ;
2018-12-10 15:22:22 -06:00
spin_unlock_irqrestore ( & adapter - > rwi_lock , flags ) ;
2018-04-06 18:37:04 -05:00
ret = EBUSY ;
goto err ;
2017-05-03 14:04:38 -04:00
}
}
2018-12-10 15:22:23 -06:00
rwi = kzalloc ( sizeof ( * rwi ) , GFP_ATOMIC ) ;
2017-05-03 14:04:38 -04:00
if ( ! rwi ) {
2018-12-10 15:22:22 -06:00
spin_unlock_irqrestore ( & adapter - > rwi_lock , flags ) ;
2017-05-03 14:04:38 -04:00
ibmvnic_close ( netdev ) ;
2018-04-06 18:37:04 -05:00
ret = ENOMEM ;
goto err ;
2017-05-03 14:04:38 -04:00
}
2018-05-23 13:38:02 -05:00
/* if we just received a transport event,
* flush reset queue and process this reset
*/
if ( adapter - > force_reset_recovery & & ! list_empty ( & adapter - > rwi_list ) ) {
list_for_each_safe ( entry , tmp_entry , & adapter - > rwi_list )
list_del ( entry ) ;
}
2017-05-03 14:04:38 -04:00
rwi - > reset_reason = reason ;
list_add_tail ( & rwi - > list , & adapter - > rwi_list ) ;
2018-12-10 15:22:22 -06:00
spin_unlock_irqrestore ( & adapter - > rwi_lock , flags ) ;
2017-08-08 15:24:05 -05:00
netdev_dbg ( adapter - > netdev , " Scheduling reset (reason %d) \n " , reason ) ;
2017-05-03 14:04:38 -04:00
schedule_work ( & adapter - > ibmvnic_reset ) ;
2018-04-06 18:37:04 -05:00
return 0 ;
err :
return - ret ;
2017-05-03 14:04:38 -04:00
}
static void ibmvnic_tx_timeout ( struct net_device * dev )
{
struct ibmvnic_adapter * adapter = netdev_priv ( dev ) ;
ibmvnic_reset ( adapter , VNIC_RESET_TIMEOUT ) ;
2015-12-21 11:26:06 -06:00
}
static void remove_buff_from_pool ( struct ibmvnic_adapter * adapter ,
struct ibmvnic_rx_buff * rx_buff )
{
struct ibmvnic_rx_pool * pool = & adapter - > rx_pool [ rx_buff - > pool_index ] ;
rx_buff - > skb = NULL ;
pool - > free_map [ pool - > next_alloc ] = ( int ) ( rx_buff - pool - > rx_buff ) ;
pool - > next_alloc = ( pool - > next_alloc + 1 ) % pool - > size ;
atomic_dec ( & pool - > available ) ;
}
static int ibmvnic_poll ( struct napi_struct * napi , int budget )
{
struct net_device * netdev = napi - > dev ;
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
int scrq_num = ( int ) ( napi - adapter - > napi ) ;
int frames_processed = 0 ;
2017-05-26 10:30:54 -04:00
2015-12-21 11:26:06 -06:00
restart_poll :
while ( frames_processed < budget ) {
struct sk_buff * skb ;
struct ibmvnic_rx_buff * rx_buff ;
union sub_crq * next ;
u32 length ;
u16 offset ;
u8 flags = 0 ;
2019-09-20 16:11:23 -04:00
if ( unlikely ( test_bit ( 0 , & adapter - > resetting ) & &
2018-02-06 16:21:49 -06:00
adapter - > reset_reason ! = VNIC_RESET_NON_FATAL ) ) {
2017-06-14 23:50:09 -05:00
enable_scrq_irq ( adapter , adapter - > rx_scrq [ scrq_num ] ) ;
napi_complete_done ( napi , frames_processed ) ;
return frames_processed ;
}
2015-12-21 11:26:06 -06:00
if ( ! pending_scrq ( adapter , adapter - > rx_scrq [ scrq_num ] ) )
break ;
next = ibmvnic_next_scrq ( adapter , adapter - > rx_scrq [ scrq_num ] ) ;
rx_buff =
( struct ibmvnic_rx_buff * ) be64_to_cpu ( next - >
rx_comp . correlator ) ;
/* do error checking */
if ( next - > rx_comp . rc ) {
2017-08-07 15:42:30 -05:00
netdev_dbg ( netdev , " rx buffer returned with rc %x \n " ,
be16_to_cpu ( next - > rx_comp . rc ) ) ;
2015-12-21 11:26:06 -06:00
/* free the entry */
next - > rx_comp . first = 0 ;
2018-02-13 18:23:42 -06:00
dev_kfree_skb_any ( rx_buff - > skb ) ;
2015-12-21 11:26:06 -06:00
remove_buff_from_pool ( adapter , rx_buff ) ;
2017-05-03 14:05:14 -04:00
continue ;
2018-02-19 20:12:57 -06:00
} else if ( ! rx_buff - > skb ) {
/* free the entry */
next - > rx_comp . first = 0 ;
remove_buff_from_pool ( adapter , rx_buff ) ;
continue ;
2015-12-21 11:26:06 -06:00
}
length = be32_to_cpu ( next - > rx_comp . len ) ;
offset = be16_to_cpu ( next - > rx_comp . off_frame_data ) ;
flags = next - > rx_comp . flags ;
skb = rx_buff - > skb ;
skb_copy_to_linear_data ( skb , rx_buff - > data + offset ,
length ) ;
2017-04-21 15:38:46 -04:00
/* VLAN Header has been stripped by the system firmware and
* needs to be inserted by the driver
*/
if ( adapter - > rx_vlan_header_insertion & &
( flags & IBMVNIC_VLAN_STRIPPED ) )
__vlan_hwaccel_put_tag ( skb , htons ( ETH_P_8021Q ) ,
ntohs ( next - > rx_comp . vlan_tci ) ) ;
2015-12-21 11:26:06 -06:00
/* free the entry */
next - > rx_comp . first = 0 ;
remove_buff_from_pool ( adapter , rx_buff ) ;
skb_put ( skb , length ) ;
skb - > protocol = eth_type_trans ( skb , netdev ) ;
2017-05-03 14:05:20 -04:00
skb_record_rx_queue ( skb , scrq_num ) ;
2015-12-21 11:26:06 -06:00
if ( flags & IBMVNIC_IP_CHKSUM_GOOD & &
flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD ) {
skb - > ip_summed = CHECKSUM_UNNECESSARY ;
}
length = skb - > len ;
napi_gro_receive ( napi , skb ) ; /* send it up */
netdev - > stats . rx_packets + + ;
netdev - > stats . rx_bytes + = length ;
2017-08-02 16:44:14 -05:00
adapter - > rx_stats_buffers [ scrq_num ] . packets + + ;
adapter - > rx_stats_buffers [ scrq_num ] . bytes + = length ;
2015-12-21 11:26:06 -06:00
frames_processed + + ;
}
2017-05-26 10:30:54 -04:00
if ( adapter - > state ! = VNIC_CLOSING )
replenish_rx_pool ( adapter , & adapter - > rx_pool [ scrq_num ] ) ;
2015-12-21 11:26:06 -06:00
if ( frames_processed < budget ) {
enable_scrq_irq ( adapter , adapter - > rx_scrq [ scrq_num ] ) ;
2017-01-30 08:22:01 -08:00
napi_complete_done ( napi , frames_processed ) ;
2015-12-21 11:26:06 -06:00
if ( pending_scrq ( adapter , adapter - > rx_scrq [ scrq_num ] ) & &
napi_reschedule ( napi ) ) {
disable_scrq_irq ( adapter , adapter - > rx_scrq [ scrq_num ] ) ;
goto restart_poll ;
}
}
return frames_processed ;
}
2017-10-26 16:23:25 -05:00
static int wait_for_reset ( struct ibmvnic_adapter * adapter )
{
2018-04-06 18:37:04 -05:00
int rc , ret ;
2017-10-26 16:23:25 -05:00
adapter - > fallback . mtu = adapter - > req_mtu ;
adapter - > fallback . rx_queues = adapter - > req_rx_queues ;
adapter - > fallback . tx_queues = adapter - > req_tx_queues ;
adapter - > fallback . rx_entries = adapter - > req_rx_add_entries_per_subcrq ;
adapter - > fallback . tx_entries = adapter - > req_tx_entries_per_subcrq ;
init_completion ( & adapter - > reset_done ) ;
adapter - > wait_for_reset = true ;
2018-04-06 18:37:04 -05:00
rc = ibmvnic_reset ( adapter , VNIC_RESET_CHANGE_PARAM ) ;
if ( rc )
return rc ;
2017-10-26 16:23:25 -05:00
wait_for_completion ( & adapter - > reset_done ) ;
2018-04-06 18:37:04 -05:00
ret = 0 ;
2017-10-26 16:23:25 -05:00
if ( adapter - > reset_done_rc ) {
2018-04-06 18:37:04 -05:00
ret = - EIO ;
2017-10-26 16:23:25 -05:00
adapter - > desired . mtu = adapter - > fallback . mtu ;
adapter - > desired . rx_queues = adapter - > fallback . rx_queues ;
adapter - > desired . tx_queues = adapter - > fallback . tx_queues ;
adapter - > desired . rx_entries = adapter - > fallback . rx_entries ;
adapter - > desired . tx_entries = adapter - > fallback . tx_entries ;
init_completion ( & adapter - > reset_done ) ;
2018-04-06 18:37:04 -05:00
adapter - > wait_for_reset = true ;
rc = ibmvnic_reset ( adapter , VNIC_RESET_CHANGE_PARAM ) ;
if ( rc )
return ret ;
2017-10-26 16:23:25 -05:00
wait_for_completion ( & adapter - > reset_done ) ;
}
adapter - > wait_for_reset = false ;
2018-04-06 18:37:04 -05:00
return ret ;
2017-10-26 16:23:25 -05:00
}
2017-06-06 16:55:52 -05:00
static int ibmvnic_change_mtu ( struct net_device * netdev , int new_mtu )
{
2017-10-26 16:23:25 -05:00
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
adapter - > desired . mtu = new_mtu + ETH_HLEN ;
return wait_for_reset ( adapter ) ;
2017-06-06 16:55:52 -05:00
}
2018-03-12 11:51:05 -05:00
static netdev_features_t ibmvnic_features_check ( struct sk_buff * skb ,
struct net_device * dev ,
netdev_features_t features )
{
/* Some backing hardware adapters can not
* handle packets with a MSS less than 224
* or with only one segment .
*/
if ( skb_is_gso ( skb ) ) {
if ( skb_shinfo ( skb ) - > gso_size < 224 | |
skb_shinfo ( skb ) - > gso_segs = = 1 )
features & = ~ NETIF_F_GSO_MASK ;
}
return features ;
}
2015-12-21 11:26:06 -06:00
static const struct net_device_ops ibmvnic_netdev_ops = {
. ndo_open = ibmvnic_open ,
. ndo_stop = ibmvnic_close ,
. ndo_start_xmit = ibmvnic_xmit ,
. ndo_set_rx_mode = ibmvnic_set_multi ,
. ndo_set_mac_address = ibmvnic_set_mac ,
. ndo_validate_addr = eth_validate_addr ,
. ndo_tx_timeout = ibmvnic_tx_timeout ,
2017-06-06 16:55:52 -05:00
. ndo_change_mtu = ibmvnic_change_mtu ,
2018-03-12 11:51:05 -05:00
. ndo_features_check = ibmvnic_features_check ,
2015-12-21 11:26:06 -06:00
} ;
/* ethtool functions */
2017-01-07 22:37:29 +01:00
static int ibmvnic_get_link_ksettings ( struct net_device * netdev ,
struct ethtool_link_ksettings * cmd )
2015-12-21 11:26:06 -06:00
{
2019-03-19 10:28:51 -03:00
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
int rc ;
rc = send_query_phys_parms ( adapter ) ;
if ( rc ) {
adapter - > speed = SPEED_UNKNOWN ;
adapter - > duplex = DUPLEX_UNKNOWN ;
}
cmd - > base . speed = adapter - > speed ;
cmd - > base . duplex = adapter - > duplex ;
2017-01-07 22:37:29 +01:00
cmd - > base . port = PORT_FIBRE ;
cmd - > base . phy_address = 0 ;
cmd - > base . autoneg = AUTONEG_ENABLE ;
2015-12-21 11:26:06 -06:00
return 0 ;
}
2017-11-13 15:59:19 -02:00
static void ibmvnic_get_drvinfo ( struct net_device * netdev ,
2015-12-21 11:26:06 -06:00
struct ethtool_drvinfo * info )
{
2017-11-13 15:59:19 -02:00
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
2015-12-21 11:26:06 -06:00
strlcpy ( info - > driver , ibmvnic_driver_name , sizeof ( info - > driver ) ) ;
strlcpy ( info - > version , IBMVNIC_DRIVER_VERSION , sizeof ( info - > version ) ) ;
2017-11-13 15:59:19 -02:00
strlcpy ( info - > fw_version , adapter - > fw_version ,
sizeof ( info - > fw_version ) ) ;
2015-12-21 11:26:06 -06:00
}
static u32 ibmvnic_get_msglevel ( struct net_device * netdev )
{
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
return adapter - > msg_enable ;
}
static void ibmvnic_set_msglevel ( struct net_device * netdev , u32 data )
{
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
adapter - > msg_enable = data ;
}
static u32 ibmvnic_get_link ( struct net_device * netdev )
{
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
/* Don't need to send a query because we request a logical link up at
* init and then we wait for link state indications
*/
return adapter - > logical_link_state ;
}
static void ibmvnic_get_ringparam ( struct net_device * netdev ,
struct ethtool_ringparam * ring )
{
2017-08-02 16:46:30 -05:00
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
2018-09-28 18:38:26 -05:00
if ( adapter - > priv_flags & IBMVNIC_USE_SERVER_MAXES ) {
ring - > rx_max_pending = adapter - > max_rx_add_entries_per_subcrq ;
ring - > tx_max_pending = adapter - > max_tx_entries_per_subcrq ;
} else {
ring - > rx_max_pending = IBMVNIC_MAX_QUEUE_SZ ;
ring - > tx_max_pending = IBMVNIC_MAX_QUEUE_SZ ;
}
2015-12-21 11:26:06 -06:00
ring - > rx_mini_max_pending = 0 ;
ring - > rx_jumbo_max_pending = 0 ;
2017-08-02 16:46:30 -05:00
ring - > rx_pending = adapter - > req_rx_add_entries_per_subcrq ;
ring - > tx_pending = adapter - > req_tx_entries_per_subcrq ;
2015-12-21 11:26:06 -06:00
ring - > rx_mini_pending = 0 ;
ring - > rx_jumbo_pending = 0 ;
}
2017-10-26 16:23:25 -05:00
static int ibmvnic_set_ringparam ( struct net_device * netdev ,
struct ethtool_ringparam * ring )
{
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
2018-09-28 18:38:26 -05:00
int ret ;
2017-10-26 16:23:25 -05:00
2018-09-28 18:38:26 -05:00
ret = 0 ;
2017-10-26 16:23:25 -05:00
adapter - > desired . rx_entries = ring - > rx_pending ;
adapter - > desired . tx_entries = ring - > tx_pending ;
2018-09-28 18:38:26 -05:00
ret = wait_for_reset ( adapter ) ;
if ( ! ret & &
( adapter - > req_rx_add_entries_per_subcrq ! = ring - > rx_pending | |
adapter - > req_tx_entries_per_subcrq ! = ring - > tx_pending ) )
netdev_info ( netdev ,
" Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu \n " ,
ring - > rx_pending , ring - > tx_pending ,
adapter - > req_rx_add_entries_per_subcrq ,
adapter - > req_tx_entries_per_subcrq ) ;
return ret ;
2017-10-26 16:23:25 -05:00
}
2017-08-02 16:47:17 -05:00
static void ibmvnic_get_channels ( struct net_device * netdev ,
struct ethtool_channels * channels )
{
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
2018-09-28 18:38:26 -05:00
if ( adapter - > priv_flags & IBMVNIC_USE_SERVER_MAXES ) {
channels - > max_rx = adapter - > max_rx_queues ;
channels - > max_tx = adapter - > max_tx_queues ;
} else {
channels - > max_rx = IBMVNIC_MAX_QUEUES ;
channels - > max_tx = IBMVNIC_MAX_QUEUES ;
}
2017-08-02 16:47:17 -05:00
channels - > max_other = 0 ;
channels - > max_combined = 0 ;
channels - > rx_count = adapter - > req_rx_queues ;
channels - > tx_count = adapter - > req_tx_queues ;
channels - > other_count = 0 ;
channels - > combined_count = 0 ;
}
2017-10-26 16:23:25 -05:00
static int ibmvnic_set_channels ( struct net_device * netdev ,
struct ethtool_channels * channels )
{
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
2018-09-28 18:38:26 -05:00
int ret ;
2017-10-26 16:23:25 -05:00
2018-09-28 18:38:26 -05:00
ret = 0 ;
2017-10-26 16:23:25 -05:00
adapter - > desired . rx_queues = channels - > rx_count ;
adapter - > desired . tx_queues = channels - > tx_count ;
2018-09-28 18:38:26 -05:00
ret = wait_for_reset ( adapter ) ;
if ( ! ret & &
( adapter - > req_rx_queues ! = channels - > rx_count | |
adapter - > req_tx_queues ! = channels - > tx_count ) )
netdev_info ( netdev ,
" Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu \n " ,
channels - > rx_count , channels - > tx_count ,
adapter - > req_rx_queues , adapter - > req_tx_queues ) ;
return ret ;
2017-10-26 16:23:25 -05:00
}
2015-12-21 11:26:06 -06:00
static void ibmvnic_get_strings ( struct net_device * dev , u32 stringset , u8 * data )
{
2017-08-02 16:44:14 -05:00
struct ibmvnic_adapter * adapter = netdev_priv ( dev ) ;
2015-12-21 11:26:06 -06:00
int i ;
2018-09-28 18:38:26 -05:00
switch ( stringset ) {
case ETH_SS_STATS :
for ( i = 0 ; i < ARRAY_SIZE ( ibmvnic_stats ) ;
i + + , data + = ETH_GSTRING_LEN )
memcpy ( data , ibmvnic_stats [ i ] . name , ETH_GSTRING_LEN ) ;
2015-12-21 11:26:06 -06:00
2018-09-28 18:38:26 -05:00
for ( i = 0 ; i < adapter - > req_tx_queues ; i + + ) {
snprintf ( data , ETH_GSTRING_LEN , " tx%d_packets " , i ) ;
data + = ETH_GSTRING_LEN ;
2017-08-02 16:44:14 -05:00
2018-09-28 18:38:26 -05:00
snprintf ( data , ETH_GSTRING_LEN , " tx%d_bytes " , i ) ;
data + = ETH_GSTRING_LEN ;
2017-08-02 16:44:14 -05:00
2018-09-28 18:38:26 -05:00
snprintf ( data , ETH_GSTRING_LEN ,
" tx%d_dropped_packets " , i ) ;
data + = ETH_GSTRING_LEN ;
}
2017-08-02 16:44:14 -05:00
2018-09-28 18:38:26 -05:00
for ( i = 0 ; i < adapter - > req_rx_queues ; i + + ) {
snprintf ( data , ETH_GSTRING_LEN , " rx%d_packets " , i ) ;
data + = ETH_GSTRING_LEN ;
2017-08-02 16:44:14 -05:00
2018-09-28 18:38:26 -05:00
snprintf ( data , ETH_GSTRING_LEN , " rx%d_bytes " , i ) ;
data + = ETH_GSTRING_LEN ;
2017-08-02 16:44:14 -05:00
2018-09-28 18:38:26 -05:00
snprintf ( data , ETH_GSTRING_LEN , " rx%d_interrupts " , i ) ;
data + = ETH_GSTRING_LEN ;
}
break ;
2017-08-02 16:44:14 -05:00
2018-09-28 18:38:26 -05:00
case ETH_SS_PRIV_FLAGS :
for ( i = 0 ; i < ARRAY_SIZE ( ibmvnic_priv_flags ) ; i + + )
strcpy ( data + i * ETH_GSTRING_LEN ,
ibmvnic_priv_flags [ i ] ) ;
break ;
default :
return ;
2017-08-02 16:44:14 -05:00
}
2015-12-21 11:26:06 -06:00
}
static int ibmvnic_get_sset_count ( struct net_device * dev , int sset )
{
2017-08-02 16:44:14 -05:00
struct ibmvnic_adapter * adapter = netdev_priv ( dev ) ;
2015-12-21 11:26:06 -06:00
switch ( sset ) {
case ETH_SS_STATS :
2017-08-02 16:44:14 -05:00
return ARRAY_SIZE ( ibmvnic_stats ) +
adapter - > req_tx_queues * NUM_TX_STATS +
adapter - > req_rx_queues * NUM_RX_STATS ;
2018-09-28 18:38:26 -05:00
case ETH_SS_PRIV_FLAGS :
return ARRAY_SIZE ( ibmvnic_priv_flags ) ;
2015-12-21 11:26:06 -06:00
default :
return - EOPNOTSUPP ;
}
}
static void ibmvnic_get_ethtool_stats ( struct net_device * dev ,
struct ethtool_stats * stats , u64 * data )
{
struct ibmvnic_adapter * adapter = netdev_priv ( dev ) ;
union ibmvnic_crq crq ;
2017-08-02 16:44:14 -05:00
int i , j ;
2018-05-23 13:37:57 -05:00
int rc ;
2015-12-21 11:26:06 -06:00
memset ( & crq , 0 , sizeof ( crq ) ) ;
crq . request_statistics . first = IBMVNIC_CRQ_CMD ;
crq . request_statistics . cmd = REQUEST_STATISTICS ;
crq . request_statistics . ioba = cpu_to_be32 ( adapter - > stats_token ) ;
crq . request_statistics . len =
cpu_to_be32 ( sizeof ( struct ibmvnic_statistics ) ) ;
/* Wait for data to be written */
init_completion ( & adapter - > stats_done ) ;
2018-05-23 13:37:57 -05:00
rc = ibmvnic_send_crq ( adapter , & crq ) ;
if ( rc )
return ;
2015-12-21 11:26:06 -06:00
wait_for_completion ( & adapter - > stats_done ) ;
for ( i = 0 ; i < ARRAY_SIZE ( ibmvnic_stats ) ; i + + )
2017-08-02 16:45:28 -05:00
data [ i ] = be64_to_cpu ( IBMVNIC_GET_STAT ( adapter ,
ibmvnic_stats [ i ] . offset ) ) ;
2017-08-02 16:44:14 -05:00
for ( j = 0 ; j < adapter - > req_tx_queues ; j + + ) {
data [ i ] = adapter - > tx_stats_buffers [ j ] . packets ;
i + + ;
data [ i ] = adapter - > tx_stats_buffers [ j ] . bytes ;
i + + ;
data [ i ] = adapter - > tx_stats_buffers [ j ] . dropped_packets ;
i + + ;
}
for ( j = 0 ; j < adapter - > req_rx_queues ; j + + ) {
data [ i ] = adapter - > rx_stats_buffers [ j ] . packets ;
i + + ;
data [ i ] = adapter - > rx_stats_buffers [ j ] . bytes ;
i + + ;
data [ i ] = adapter - > rx_stats_buffers [ j ] . interrupts ;
i + + ;
}
2015-12-21 11:26:06 -06:00
}
2018-09-28 18:38:26 -05:00
static u32 ibmvnic_get_priv_flags ( struct net_device * netdev )
{
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
return adapter - > priv_flags ;
}
static int ibmvnic_set_priv_flags ( struct net_device * netdev , u32 flags )
{
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
bool which_maxes = ! ! ( flags & IBMVNIC_USE_SERVER_MAXES ) ;
if ( which_maxes )
adapter - > priv_flags | = IBMVNIC_USE_SERVER_MAXES ;
else
adapter - > priv_flags & = ~ IBMVNIC_USE_SERVER_MAXES ;
return 0 ;
}
2015-12-21 11:26:06 -06:00
static const struct ethtool_ops ibmvnic_ethtool_ops = {
. get_drvinfo = ibmvnic_get_drvinfo ,
. get_msglevel = ibmvnic_get_msglevel ,
. set_msglevel = ibmvnic_set_msglevel ,
. get_link = ibmvnic_get_link ,
. get_ringparam = ibmvnic_get_ringparam ,
2017-10-26 16:23:25 -05:00
. set_ringparam = ibmvnic_set_ringparam ,
2017-08-02 16:47:17 -05:00
. get_channels = ibmvnic_get_channels ,
2017-10-26 16:23:25 -05:00
. set_channels = ibmvnic_set_channels ,
2015-12-21 11:26:06 -06:00
. get_strings = ibmvnic_get_strings ,
. get_sset_count = ibmvnic_get_sset_count ,
. get_ethtool_stats = ibmvnic_get_ethtool_stats ,
2017-01-07 22:37:29 +01:00
. get_link_ksettings = ibmvnic_get_link_ksettings ,
2018-09-28 18:38:26 -05:00
. get_priv_flags = ibmvnic_get_priv_flags ,
. set_priv_flags = ibmvnic_set_priv_flags ,
2015-12-21 11:26:06 -06:00
} ;
/* Routines for managing CRQs/sCRQs */
2017-05-26 10:31:12 -04:00
static int reset_one_sub_crq_queue ( struct ibmvnic_adapter * adapter ,
struct ibmvnic_sub_crq_queue * scrq )
{
int rc ;
if ( scrq - > irq ) {
free_irq ( scrq - > irq , scrq ) ;
irq_dispose_mapping ( scrq - > irq ) ;
scrq - > irq = 0 ;
}
2017-06-14 23:50:07 -05:00
memset ( scrq - > msgs , 0 , 4 * PAGE_SIZE ) ;
2018-04-06 18:37:03 -05:00
atomic_set ( & scrq - > used , 0 ) ;
2017-05-26 10:31:12 -04:00
scrq - > cur = 0 ;
rc = h_reg_sub_crq ( adapter - > vdev - > unit_address , scrq - > msg_token ,
4 * PAGE_SIZE , & scrq - > crq_num , & scrq - > hw_irq ) ;
return rc ;
}
static int reset_sub_crq_queues ( struct ibmvnic_adapter * adapter )
{
int i , rc ;
for ( i = 0 ; i < adapter - > req_tx_queues ; i + + ) {
2017-08-08 15:24:05 -05:00
netdev_dbg ( adapter - > netdev , " Re-setting tx_scrq[%d] \n " , i ) ;
2017-05-26 10:31:12 -04:00
rc = reset_one_sub_crq_queue ( adapter , adapter - > tx_scrq [ i ] ) ;
if ( rc )
return rc ;
}
for ( i = 0 ; i < adapter - > req_rx_queues ; i + + ) {
2017-08-08 15:24:05 -05:00
netdev_dbg ( adapter - > netdev , " Re-setting rx_scrq[%d] \n " , i ) ;
2017-05-26 10:31:12 -04:00
rc = reset_one_sub_crq_queue ( adapter , adapter - > rx_scrq [ i ] ) ;
if ( rc )
return rc ;
}
return rc ;
}
2015-12-21 11:26:06 -06:00
static void release_sub_crq_queue ( struct ibmvnic_adapter * adapter ,
2018-02-19 13:30:31 -06:00
struct ibmvnic_sub_crq_queue * scrq ,
bool do_h_free )
2015-12-21 11:26:06 -06:00
{
struct device * dev = & adapter - > vdev - > dev ;
long rc ;
netdev_dbg ( adapter - > netdev , " Releasing sub-CRQ \n " ) ;
2018-02-19 13:30:31 -06:00
if ( do_h_free ) {
/* Close the sub-crqs */
do {
rc = plpar_hcall_norets ( H_FREE_SUB_CRQ ,
adapter - > vdev - > unit_address ,
scrq - > crq_num ) ;
} while ( rc = = H_BUSY | | H_IS_LONG_BUSY ( rc ) ) ;
2015-12-21 11:26:06 -06:00
2018-02-19 13:30:31 -06:00
if ( rc ) {
netdev_err ( adapter - > netdev ,
" Failed to release sub-CRQ %16lx, rc = %ld \n " ,
scrq - > crq_num , rc ) ;
}
2017-04-19 13:44:29 -04:00
}
2015-12-21 11:26:06 -06:00
dma_unmap_single ( dev , scrq - > msg_token , 4 * PAGE_SIZE ,
DMA_BIDIRECTIONAL ) ;
free_pages ( ( unsigned long ) scrq - > msgs , 2 ) ;
kfree ( scrq ) ;
}
static struct ibmvnic_sub_crq_queue * init_sub_crq_queue ( struct ibmvnic_adapter
* adapter )
{
struct device * dev = & adapter - > vdev - > dev ;
struct ibmvnic_sub_crq_queue * scrq ;
int rc ;
2017-04-25 15:01:10 -04:00
scrq = kzalloc ( sizeof ( * scrq ) , GFP_KERNEL ) ;
2015-12-21 11:26:06 -06:00
if ( ! scrq )
return NULL ;
2017-04-19 13:45:16 -04:00
scrq - > msgs =
2017-04-25 15:01:10 -04:00
( union sub_crq * ) __get_free_pages ( GFP_KERNEL | __GFP_ZERO , 2 ) ;
2015-12-21 11:26:06 -06:00
if ( ! scrq - > msgs ) {
dev_warn ( dev , " Couldn't allocate crq queue messages page \n " ) ;
goto zero_page_failed ;
}
scrq - > msg_token = dma_map_single ( dev , scrq - > msgs , 4 * PAGE_SIZE ,
DMA_BIDIRECTIONAL ) ;
if ( dma_mapping_error ( dev , scrq - > msg_token ) ) {
dev_warn ( dev , " Couldn't map crq queue messages page \n " ) ;
goto map_failed ;
}
rc = h_reg_sub_crq ( adapter - > vdev - > unit_address , scrq - > msg_token ,
4 * PAGE_SIZE , & scrq - > crq_num , & scrq - > hw_irq ) ;
if ( rc = = H_RESOURCE )
rc = ibmvnic_reset_crq ( adapter ) ;
if ( rc = = H_CLOSED ) {
dev_warn ( dev , " Partner adapter not ready, waiting. \n " ) ;
} else if ( rc ) {
dev_warn ( dev , " Error %d registering sub-crq \n " , rc ) ;
goto reg_failed ;
}
scrq - > adapter = adapter ;
scrq - > size = 4 * PAGE_SIZE / sizeof ( * scrq - > msgs ) ;
spin_lock_init ( & scrq - > lock ) ;
netdev_dbg ( adapter - > netdev ,
" sub-crq initialized, num %lx, hw_irq=%lx, irq=%x \n " ,
scrq - > crq_num , scrq - > hw_irq , scrq - > irq ) ;
return scrq ;
reg_failed :
dma_unmap_single ( dev , scrq - > msg_token , 4 * PAGE_SIZE ,
DMA_BIDIRECTIONAL ) ;
map_failed :
free_pages ( ( unsigned long ) scrq - > msgs , 2 ) ;
zero_page_failed :
kfree ( scrq ) ;
return NULL ;
}
2018-02-19 13:30:31 -06:00
static void release_sub_crqs ( struct ibmvnic_adapter * adapter , bool do_h_free )
2015-12-21 11:26:06 -06:00
{
int i ;
if ( adapter - > tx_scrq ) {
2018-02-21 21:33:56 -06:00
for ( i = 0 ; i < adapter - > num_active_tx_scrqs ; i + + ) {
2017-03-30 02:49:18 -04:00
if ( ! adapter - > tx_scrq [ i ] )
continue ;
2017-08-08 15:24:05 -05:00
netdev_dbg ( adapter - > netdev , " Releasing tx_scrq[%d] \n " ,
i ) ;
2017-03-30 02:49:18 -04:00
if ( adapter - > tx_scrq [ i ] - > irq ) {
2015-12-21 11:26:06 -06:00
free_irq ( adapter - > tx_scrq [ i ] - > irq ,
adapter - > tx_scrq [ i ] ) ;
2016-07-06 15:35:16 -05:00
irq_dispose_mapping ( adapter - > tx_scrq [ i ] - > irq ) ;
2017-03-30 02:49:18 -04:00
adapter - > tx_scrq [ i ] - > irq = 0 ;
2015-12-21 11:26:06 -06:00
}
2017-03-30 02:49:18 -04:00
2018-02-19 13:30:31 -06:00
release_sub_crq_queue ( adapter , adapter - > tx_scrq [ i ] ,
do_h_free ) ;
2017-03-30 02:49:18 -04:00
}
2017-03-15 23:38:07 -04:00
kfree ( adapter - > tx_scrq ) ;
2015-12-21 11:26:06 -06:00
adapter - > tx_scrq = NULL ;
2018-02-21 21:33:56 -06:00
adapter - > num_active_tx_scrqs = 0 ;
2015-12-21 11:26:06 -06:00
}
if ( adapter - > rx_scrq ) {
2018-02-21 21:33:56 -06:00
for ( i = 0 ; i < adapter - > num_active_rx_scrqs ; i + + ) {
2017-03-30 02:49:18 -04:00
if ( ! adapter - > rx_scrq [ i ] )
continue ;
2017-08-08 15:24:05 -05:00
netdev_dbg ( adapter - > netdev , " Releasing rx_scrq[%d] \n " ,
i ) ;
2017-03-30 02:49:18 -04:00
if ( adapter - > rx_scrq [ i ] - > irq ) {
2015-12-21 11:26:06 -06:00
free_irq ( adapter - > rx_scrq [ i ] - > irq ,
adapter - > rx_scrq [ i ] ) ;
2016-07-06 15:35:16 -05:00
irq_dispose_mapping ( adapter - > rx_scrq [ i ] - > irq ) ;
2017-03-30 02:49:18 -04:00
adapter - > rx_scrq [ i ] - > irq = 0 ;
2015-12-21 11:26:06 -06:00
}
2018-02-19 13:30:31 -06:00
release_sub_crq_queue ( adapter , adapter - > rx_scrq [ i ] ,
do_h_free ) ;
2017-03-30 02:49:18 -04:00
}
2016-07-06 15:35:17 -05:00
2017-03-30 02:49:18 -04:00
kfree ( adapter - > rx_scrq ) ;
2016-07-06 15:35:17 -05:00
adapter - > rx_scrq = NULL ;
2018-02-21 21:33:56 -06:00
adapter - > num_active_rx_scrqs = 0 ;
2016-07-06 15:35:17 -05:00
}
}
2015-12-21 11:26:06 -06:00
static int disable_scrq_irq ( struct ibmvnic_adapter * adapter ,
struct ibmvnic_sub_crq_queue * scrq )
{
struct device * dev = & adapter - > vdev - > dev ;
unsigned long rc ;
rc = plpar_hcall_norets ( H_VIOCTL , adapter - > vdev - > unit_address ,
H_DISABLE_VIO_INTERRUPT , scrq - > hw_irq , 0 , 0 ) ;
if ( rc )
dev_err ( dev , " Couldn't disable scrq irq 0x%lx. rc=%ld \n " ,
scrq - > hw_irq , rc ) ;
return rc ;
}
static int enable_scrq_irq ( struct ibmvnic_adapter * adapter ,
struct ibmvnic_sub_crq_queue * scrq )
{
struct device * dev = & adapter - > vdev - > dev ;
unsigned long rc ;
if ( scrq - > hw_irq > 0x100000000ULL ) {
dev_err ( dev , " bad hw_irq = %lx \n " , scrq - > hw_irq ) ;
return 1 ;
}
2019-09-20 16:11:23 -04:00
if ( test_bit ( 0 , & adapter - > resetting ) & &
2018-05-22 11:21:10 -05:00
adapter - > reset_reason = = VNIC_RESET_MOBILITY ) {
2019-11-20 10:50:03 -05:00
u64 val = ( 0xff000000 ) | scrq - > hw_irq ;
2018-05-22 11:21:10 -05:00
2019-11-20 10:50:03 -05:00
rc = plpar_hcall_norets ( H_EOI , val ) ;
2019-11-20 10:50:04 -05:00
/* H_EOI would fail with rc = H_FUNCTION when running
* in XIVE mode which is expected , but not an error .
*/
if ( rc & & ( rc ! = H_FUNCTION ) )
2019-11-20 10:50:03 -05:00
dev_err ( dev , " H_EOI FAILED irq 0x%llx. rc=%ld \n " ,
val , rc ) ;
2018-05-22 11:21:10 -05:00
}
2018-04-15 18:53:36 -05:00
2015-12-21 11:26:06 -06:00
rc = plpar_hcall_norets ( H_VIOCTL , adapter - > vdev - > unit_address ,
H_ENABLE_VIO_INTERRUPT , scrq - > hw_irq , 0 , 0 ) ;
if ( rc )
dev_err ( dev , " Couldn't enable scrq irq 0x%lx. rc=%ld \n " ,
scrq - > hw_irq , rc ) ;
return rc ;
}
static int ibmvnic_complete_tx ( struct ibmvnic_adapter * adapter ,
struct ibmvnic_sub_crq_queue * scrq )
{
struct device * dev = & adapter - > vdev - > dev ;
2018-03-16 20:00:28 -05:00
struct ibmvnic_tx_pool * tx_pool ;
2015-12-21 11:26:06 -06:00
struct ibmvnic_tx_buff * txbuff ;
union sub_crq * next ;
int index ;
int i , j ;
restart_loop :
while ( pending_scrq ( adapter , scrq ) ) {
unsigned int pool = scrq - > pool_index ;
2018-02-18 10:08:41 -06:00
int num_entries = 0 ;
2015-12-21 11:26:06 -06:00
next = ibmvnic_next_scrq ( adapter , scrq ) ;
for ( i = 0 ; i < next - > tx_comp . num_comps ; i + + ) {
if ( next - > tx_comp . rcs [ i ] ) {
dev_err ( dev , " tx error %x \n " ,
next - > tx_comp . rcs [ i ] ) ;
continue ;
}
index = be32_to_cpu ( next - > tx_comp . correlators [ i ] ) ;
2018-03-16 20:00:28 -05:00
if ( index & IBMVNIC_TSO_POOL_MASK ) {
tx_pool = & adapter - > tso_pool [ pool ] ;
index & = ~ IBMVNIC_TSO_POOL_MASK ;
} else {
tx_pool = & adapter - > tx_pool [ pool ] ;
}
txbuff = & tx_pool - > tx_buff [ index ] ;
2015-12-21 11:26:06 -06:00
for ( j = 0 ; j < IBMVNIC_MAX_FRAGS_PER_CRQ ; j + + ) {
if ( ! txbuff - > data_dma [ j ] )
continue ;
txbuff - > data_dma [ j ] = 0 ;
}
2017-03-05 12:18:41 -06:00
if ( txbuff - > last_frag ) {
2015-12-21 11:26:06 -06:00
dev_kfree_skb_any ( txbuff - > skb ) ;
2017-05-03 14:05:25 -04:00
txbuff - > skb = NULL ;
2017-03-05 12:18:41 -06:00
}
2015-12-21 11:26:06 -06:00
2018-02-18 10:08:41 -06:00
num_entries + = txbuff - > num_entries ;
2018-03-16 20:00:28 -05:00
tx_pool - > free_map [ tx_pool - > producer_index ] = index ;
tx_pool - > producer_index =
( tx_pool - > producer_index + 1 ) %
tx_pool - > num_buffers ;
2015-12-21 11:26:06 -06:00
}
/* remove tx_comp scrq*/
next - > tx_comp . first = 0 ;
2017-05-03 14:05:25 -04:00
2018-02-18 10:08:41 -06:00
if ( atomic_sub_return ( num_entries , & scrq - > used ) < =
2017-05-03 14:05:25 -04:00
( adapter - > req_tx_entries_per_subcrq / 2 ) & &
__netif_subqueue_stopped ( adapter - > netdev ,
scrq - > pool_index ) ) {
netif_wake_subqueue ( adapter - > netdev , scrq - > pool_index ) ;
2018-02-26 18:10:58 -06:00
netdev_dbg ( adapter - > netdev , " Started queue %d \n " ,
scrq - > pool_index ) ;
2017-05-03 14:05:25 -04:00
}
2015-12-21 11:26:06 -06:00
}
enable_scrq_irq ( adapter , scrq ) ;
if ( pending_scrq ( adapter , scrq ) ) {
disable_scrq_irq ( adapter , scrq ) ;
goto restart_loop ;
}
return 0 ;
}
static irqreturn_t ibmvnic_interrupt_tx ( int irq , void * instance )
{
struct ibmvnic_sub_crq_queue * scrq = instance ;
struct ibmvnic_adapter * adapter = scrq - > adapter ;
disable_scrq_irq ( adapter , scrq ) ;
ibmvnic_complete_tx ( adapter , scrq ) ;
return IRQ_HANDLED ;
}
static irqreturn_t ibmvnic_interrupt_rx ( int irq , void * instance )
{
struct ibmvnic_sub_crq_queue * scrq = instance ;
struct ibmvnic_adapter * adapter = scrq - > adapter ;
2018-01-10 10:40:09 -06:00
/* When booting a kdump kernel we can hit pending interrupts
* prior to completing driver initialization .
*/
if ( unlikely ( adapter - > state ! = VNIC_OPEN ) )
return IRQ_NONE ;
2017-08-02 16:44:14 -05:00
adapter - > rx_stats_buffers [ scrq - > scrq_num ] . interrupts + + ;
2015-12-21 11:26:06 -06:00
if ( napi_schedule_prep ( & adapter - > napi [ scrq - > scrq_num ] ) ) {
disable_scrq_irq ( adapter , scrq ) ;
__napi_schedule ( & adapter - > napi [ scrq - > scrq_num ] ) ;
}
return IRQ_HANDLED ;
}
2016-07-06 15:35:17 -05:00
static int init_sub_crq_irqs ( struct ibmvnic_adapter * adapter )
{
struct device * dev = & adapter - > vdev - > dev ;
struct ibmvnic_sub_crq_queue * scrq ;
int i = 0 , j = 0 ;
int rc = 0 ;
for ( i = 0 ; i < adapter - > req_tx_queues ; i + + ) {
2017-08-08 15:24:05 -05:00
netdev_dbg ( adapter - > netdev , " Initializing tx_scrq[%d] irq \n " ,
i ) ;
2016-07-06 15:35:17 -05:00
scrq = adapter - > tx_scrq [ i ] ;
scrq - > irq = irq_create_mapping ( NULL , scrq - > hw_irq ) ;
2016-09-10 19:59:05 +10:00
if ( ! scrq - > irq ) {
2016-07-06 15:35:17 -05:00
rc = - EINVAL ;
dev_err ( dev , " Error mapping irq \n " ) ;
goto req_tx_irq_failed ;
}
2019-04-25 11:02:33 -03:00
snprintf ( scrq - > name , sizeof ( scrq - > name ) , " ibmvnic-%x-tx%d " ,
adapter - > vdev - > unit_address , i ) ;
2016-07-06 15:35:17 -05:00
rc = request_irq ( scrq - > irq , ibmvnic_interrupt_tx ,
2019-04-25 11:02:33 -03:00
0 , scrq - > name , scrq ) ;
2016-07-06 15:35:17 -05:00
if ( rc ) {
dev_err ( dev , " Couldn't register tx irq 0x%x. rc=%d \n " ,
scrq - > irq , rc ) ;
irq_dispose_mapping ( scrq - > irq ) ;
2018-02-20 11:04:18 -06:00
goto req_tx_irq_failed ;
2016-07-06 15:35:17 -05:00
}
}
for ( i = 0 ; i < adapter - > req_rx_queues ; i + + ) {
2017-08-08 15:24:05 -05:00
netdev_dbg ( adapter - > netdev , " Initializing rx_scrq[%d] irq \n " ,
i ) ;
2016-07-06 15:35:17 -05:00
scrq = adapter - > rx_scrq [ i ] ;
scrq - > irq = irq_create_mapping ( NULL , scrq - > hw_irq ) ;
2016-09-10 19:59:05 +10:00
if ( ! scrq - > irq ) {
2016-07-06 15:35:17 -05:00
rc = - EINVAL ;
dev_err ( dev , " Error mapping irq \n " ) ;
goto req_rx_irq_failed ;
}
2019-04-25 11:02:33 -03:00
snprintf ( scrq - > name , sizeof ( scrq - > name ) , " ibmvnic-%x-rx%d " ,
adapter - > vdev - > unit_address , i ) ;
2016-07-06 15:35:17 -05:00
rc = request_irq ( scrq - > irq , ibmvnic_interrupt_rx ,
2019-04-25 11:02:33 -03:00
0 , scrq - > name , scrq ) ;
2016-07-06 15:35:17 -05:00
if ( rc ) {
dev_err ( dev , " Couldn't register rx irq 0x%x. rc=%d \n " ,
scrq - > irq , rc ) ;
irq_dispose_mapping ( scrq - > irq ) ;
goto req_rx_irq_failed ;
}
}
return rc ;
req_rx_irq_failed :
2016-10-27 12:28:52 -05:00
for ( j = 0 ; j < i ; j + + ) {
2016-07-06 15:35:17 -05:00
free_irq ( adapter - > rx_scrq [ j ] - > irq , adapter - > rx_scrq [ j ] ) ;
irq_dispose_mapping ( adapter - > rx_scrq [ j ] - > irq ) ;
2016-10-27 12:28:52 -05:00
}
2016-07-06 15:35:17 -05:00
i = adapter - > req_tx_queues ;
req_tx_irq_failed :
2016-10-27 12:28:52 -05:00
for ( j = 0 ; j < i ; j + + ) {
2016-07-06 15:35:17 -05:00
free_irq ( adapter - > tx_scrq [ j ] - > irq , adapter - > tx_scrq [ j ] ) ;
irq_dispose_mapping ( adapter - > rx_scrq [ j ] - > irq ) ;
2016-10-27 12:28:52 -05:00
}
2018-02-19 13:30:31 -06:00
release_sub_crqs ( adapter , 1 ) ;
2016-07-06 15:35:17 -05:00
return rc ;
}
2017-04-25 15:01:04 -04:00
static int init_sub_crqs ( struct ibmvnic_adapter * adapter )
2015-12-21 11:26:06 -06:00
{
struct device * dev = & adapter - > vdev - > dev ;
struct ibmvnic_sub_crq_queue * * allqueues ;
int registered_queues = 0 ;
int total_queues ;
int more = 0 ;
2016-07-06 15:35:17 -05:00
int i ;
2015-12-21 11:26:06 -06:00
total_queues = adapter - > req_tx_queues + adapter - > req_rx_queues ;
2017-04-25 15:01:10 -04:00
allqueues = kcalloc ( total_queues , sizeof ( * allqueues ) , GFP_KERNEL ) ;
2015-12-21 11:26:06 -06:00
if ( ! allqueues )
2017-04-25 15:01:04 -04:00
return - 1 ;
2015-12-21 11:26:06 -06:00
for ( i = 0 ; i < total_queues ; i + + ) {
allqueues [ i ] = init_sub_crq_queue ( adapter ) ;
if ( ! allqueues [ i ] ) {
dev_warn ( dev , " Couldn't allocate all sub-crqs \n " ) ;
break ;
}
registered_queues + + ;
}
/* Make sure we were able to register the minimum number of queues */
if ( registered_queues <
adapter - > min_tx_queues + adapter - > min_rx_queues ) {
dev_err ( dev , " Fatal: Couldn't init min number of sub-crqs \n " ) ;
goto tx_failed ;
}
/* Distribute the failed allocated queues*/
for ( i = 0 ; i < total_queues - registered_queues + more ; i + + ) {
netdev_dbg ( adapter - > netdev , " Reducing number of queues \n " ) ;
switch ( i % 3 ) {
case 0 :
if ( adapter - > req_rx_queues > adapter - > min_rx_queues )
adapter - > req_rx_queues - - ;
else
more + + ;
break ;
case 1 :
if ( adapter - > req_tx_queues > adapter - > min_tx_queues )
adapter - > req_tx_queues - - ;
else
more + + ;
break ;
}
}
adapter - > tx_scrq = kcalloc ( adapter - > req_tx_queues ,
2017-04-25 15:01:10 -04:00
sizeof ( * adapter - > tx_scrq ) , GFP_KERNEL ) ;
2015-12-21 11:26:06 -06:00
if ( ! adapter - > tx_scrq )
goto tx_failed ;
for ( i = 0 ; i < adapter - > req_tx_queues ; i + + ) {
adapter - > tx_scrq [ i ] = allqueues [ i ] ;
adapter - > tx_scrq [ i ] - > pool_index = i ;
2018-02-21 21:33:56 -06:00
adapter - > num_active_tx_scrqs + + ;
2015-12-21 11:26:06 -06:00
}
adapter - > rx_scrq = kcalloc ( adapter - > req_rx_queues ,
2017-04-25 15:01:10 -04:00
sizeof ( * adapter - > rx_scrq ) , GFP_KERNEL ) ;
2015-12-21 11:26:06 -06:00
if ( ! adapter - > rx_scrq )
goto rx_failed ;
for ( i = 0 ; i < adapter - > req_rx_queues ; i + + ) {
adapter - > rx_scrq [ i ] = allqueues [ i + adapter - > req_tx_queues ] ;
adapter - > rx_scrq [ i ] - > scrq_num = i ;
2018-02-21 21:33:56 -06:00
adapter - > num_active_rx_scrqs + + ;
2015-12-21 11:26:06 -06:00
}
2017-04-25 15:01:04 -04:00
kfree ( allqueues ) ;
return 0 ;
rx_failed :
kfree ( adapter - > tx_scrq ) ;
adapter - > tx_scrq = NULL ;
tx_failed :
for ( i = 0 ; i < registered_queues ; i + + )
2018-02-19 13:30:31 -06:00
release_sub_crq_queue ( adapter , allqueues [ i ] , 1 ) ;
2017-04-25 15:01:04 -04:00
kfree ( allqueues ) ;
return - 1 ;
}
static void ibmvnic_send_req_caps ( struct ibmvnic_adapter * adapter , int retry )
{
struct device * dev = & adapter - > vdev - > dev ;
union ibmvnic_crq crq ;
2017-10-26 16:23:25 -05:00
int max_entries ;
2017-04-25 15:01:04 -04:00
if ( ! retry ) {
/* Sub-CRQ entries are 32 byte long */
int entries_page = 4 * PAGE_SIZE / ( sizeof ( u64 ) * 4 ) ;
if ( adapter - > min_tx_entries_per_subcrq > entries_page | |
adapter - > min_rx_add_entries_per_subcrq > entries_page ) {
dev_err ( dev , " Fatal, invalid entries per sub-crq \n " ) ;
return ;
}
2017-10-26 16:23:25 -05:00
if ( adapter - > desired . mtu )
adapter - > req_mtu = adapter - > desired . mtu ;
else
adapter - > req_mtu = adapter - > netdev - > mtu + ETH_HLEN ;
if ( ! adapter - > desired . tx_entries )
adapter - > desired . tx_entries =
adapter - > max_tx_entries_per_subcrq ;
if ( ! adapter - > desired . rx_entries )
adapter - > desired . rx_entries =
adapter - > max_rx_add_entries_per_subcrq ;
max_entries = IBMVNIC_MAX_LTB_SIZE /
( adapter - > req_mtu + IBMVNIC_BUFFER_HLEN ) ;
if ( ( adapter - > req_mtu + IBMVNIC_BUFFER_HLEN ) *
adapter - > desired . tx_entries > IBMVNIC_MAX_LTB_SIZE ) {
adapter - > desired . tx_entries = max_entries ;
}
2017-04-25 15:01:04 -04:00
2017-10-26 16:23:25 -05:00
if ( ( adapter - > req_mtu + IBMVNIC_BUFFER_HLEN ) *
adapter - > desired . rx_entries > IBMVNIC_MAX_LTB_SIZE ) {
adapter - > desired . rx_entries = max_entries ;
}
if ( adapter - > desired . tx_entries )
adapter - > req_tx_entries_per_subcrq =
adapter - > desired . tx_entries ;
else
adapter - > req_tx_entries_per_subcrq =
adapter - > max_tx_entries_per_subcrq ;
if ( adapter - > desired . rx_entries )
adapter - > req_rx_add_entries_per_subcrq =
adapter - > desired . rx_entries ;
else
adapter - > req_rx_add_entries_per_subcrq =
adapter - > max_rx_add_entries_per_subcrq ;
if ( adapter - > desired . tx_queues )
adapter - > req_tx_queues =
adapter - > desired . tx_queues ;
else
adapter - > req_tx_queues =
adapter - > opt_tx_comp_sub_queues ;
if ( adapter - > desired . rx_queues )
adapter - > req_rx_queues =
adapter - > desired . rx_queues ;
else
adapter - > req_rx_queues =
adapter - > opt_rx_comp_queues ;
adapter - > req_rx_add_queues = adapter - > max_rx_add_queues ;
2017-04-25 15:01:04 -04:00
}
2015-12-21 11:26:06 -06:00
memset ( & crq , 0 , sizeof ( crq ) ) ;
crq . request_capability . first = IBMVNIC_CRQ_CMD ;
crq . request_capability . cmd = REQUEST_CAPABILITY ;
crq . request_capability . capability = cpu_to_be16 ( REQ_TX_QUEUES ) ;
2016-03-01 10:20:09 -06:00
crq . request_capability . number = cpu_to_be64 ( adapter - > req_tx_queues ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . request_capability . capability = cpu_to_be16 ( REQ_RX_QUEUES ) ;
2016-03-01 10:20:09 -06:00
crq . request_capability . number = cpu_to_be64 ( adapter - > req_rx_queues ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . request_capability . capability = cpu_to_be16 ( REQ_RX_ADD_QUEUES ) ;
2016-03-01 10:20:09 -06:00
crq . request_capability . number = cpu_to_be64 ( adapter - > req_rx_add_queues ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . request_capability . capability =
cpu_to_be16 ( REQ_TX_ENTRIES_PER_SUBCRQ ) ;
crq . request_capability . number =
2016-03-01 10:20:09 -06:00
cpu_to_be64 ( adapter - > req_tx_entries_per_subcrq ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . request_capability . capability =
cpu_to_be16 ( REQ_RX_ADD_ENTRIES_PER_SUBCRQ ) ;
crq . request_capability . number =
2016-03-01 10:20:09 -06:00
cpu_to_be64 ( adapter - > req_rx_add_entries_per_subcrq ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . request_capability . capability = cpu_to_be16 ( REQ_MTU ) ;
2016-03-01 10:20:09 -06:00
crq . request_capability . number = cpu_to_be64 ( adapter - > req_mtu ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
if ( adapter - > netdev - > flags & IFF_PROMISC ) {
if ( adapter - > promisc_supported ) {
crq . request_capability . capability =
cpu_to_be16 ( PROMISC_REQUESTED ) ;
2016-03-01 10:20:09 -06:00
crq . request_capability . number = cpu_to_be64 ( 1 ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
}
} else {
crq . request_capability . capability =
cpu_to_be16 ( PROMISC_REQUESTED ) ;
2016-03-01 10:20:09 -06:00
crq . request_capability . number = cpu_to_be64 ( 0 ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
}
}
static int pending_scrq ( struct ibmvnic_adapter * adapter ,
struct ibmvnic_sub_crq_queue * scrq )
{
union sub_crq * entry = & scrq - > msgs [ scrq - > cur ] ;
2017-06-14 23:50:08 -05:00
if ( entry - > generic . first & IBMVNIC_CRQ_CMD_RSP )
2015-12-21 11:26:06 -06:00
return 1 ;
else
return 0 ;
}
static union sub_crq * ibmvnic_next_scrq ( struct ibmvnic_adapter * adapter ,
struct ibmvnic_sub_crq_queue * scrq )
{
union sub_crq * entry ;
unsigned long flags ;
spin_lock_irqsave ( & scrq - > lock , flags ) ;
entry = & scrq - > msgs [ scrq - > cur ] ;
if ( entry - > generic . first & IBMVNIC_CRQ_CMD_RSP ) {
if ( + + scrq - > cur = = scrq - > size )
scrq - > cur = 0 ;
} else {
entry = NULL ;
}
spin_unlock_irqrestore ( & scrq - > lock , flags ) ;
return entry ;
}
static union ibmvnic_crq * ibmvnic_next_crq ( struct ibmvnic_adapter * adapter )
{
struct ibmvnic_crq_queue * queue = & adapter - > crq ;
union ibmvnic_crq * crq ;
crq = & queue - > msgs [ queue - > cur ] ;
if ( crq - > generic . first & IBMVNIC_CRQ_CMD_RSP ) {
if ( + + queue - > cur = = queue - > size )
queue - > cur = 0 ;
} else {
crq = NULL ;
}
return crq ;
}
2018-07-13 12:03:32 -05:00
static void print_subcrq_error ( struct device * dev , int rc , const char * func )
{
switch ( rc ) {
case H_PARAMETER :
dev_warn_ratelimited ( dev ,
" %s failed: Send request is malformed or adapter failover pending. (rc=%d) \n " ,
func , rc ) ;
break ;
case H_CLOSED :
dev_warn_ratelimited ( dev ,
" %s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d) \n " ,
func , rc ) ;
break ;
default :
dev_err_ratelimited ( dev , " %s failed: (rc=%d) \n " , func , rc ) ;
break ;
}
}
2015-12-21 11:26:06 -06:00
static int send_subcrq ( struct ibmvnic_adapter * adapter , u64 remote_handle ,
union sub_crq * sub_crq )
{
unsigned int ua = adapter - > vdev - > unit_address ;
struct device * dev = & adapter - > vdev - > dev ;
u64 * u64_crq = ( u64 * ) sub_crq ;
int rc ;
netdev_dbg ( adapter - > netdev ,
" Sending sCRQ %016lx: %016lx %016lx %016lx %016lx \n " ,
( unsigned long int ) cpu_to_be64 ( remote_handle ) ,
( unsigned long int ) cpu_to_be64 ( u64_crq [ 0 ] ) ,
( unsigned long int ) cpu_to_be64 ( u64_crq [ 1 ] ) ,
( unsigned long int ) cpu_to_be64 ( u64_crq [ 2 ] ) ,
( unsigned long int ) cpu_to_be64 ( u64_crq [ 3 ] ) ) ;
/* Make sure the hypervisor sees the complete request */
mb ( ) ;
rc = plpar_hcall_norets ( H_SEND_SUB_CRQ , ua ,
cpu_to_be64 ( remote_handle ) ,
cpu_to_be64 ( u64_crq [ 0 ] ) ,
cpu_to_be64 ( u64_crq [ 1 ] ) ,
cpu_to_be64 ( u64_crq [ 2 ] ) ,
cpu_to_be64 ( u64_crq [ 3 ] ) ) ;
2018-07-13 12:03:32 -05:00
if ( rc )
print_subcrq_error ( dev , rc , __func__ ) ;
2015-12-21 11:26:06 -06:00
return rc ;
}
2016-04-01 17:20:34 -05:00
static int send_subcrq_indirect ( struct ibmvnic_adapter * adapter ,
u64 remote_handle , u64 ioba , u64 num_entries )
{
unsigned int ua = adapter - > vdev - > unit_address ;
struct device * dev = & adapter - > vdev - > dev ;
int rc ;
/* Make sure the hypervisor sees the complete request */
mb ( ) ;
rc = plpar_hcall_norets ( H_SEND_SUB_CRQ_INDIRECT , ua ,
cpu_to_be64 ( remote_handle ) ,
ioba , num_entries ) ;
2018-07-13 12:03:32 -05:00
if ( rc )
print_subcrq_error ( dev , rc , __func__ ) ;
2016-04-01 17:20:34 -05:00
return rc ;
}
2015-12-21 11:26:06 -06:00
static int ibmvnic_send_crq ( struct ibmvnic_adapter * adapter ,
union ibmvnic_crq * crq )
{
unsigned int ua = adapter - > vdev - > unit_address ;
struct device * dev = & adapter - > vdev - > dev ;
u64 * u64_crq = ( u64 * ) crq ;
int rc ;
netdev_dbg ( adapter - > netdev , " Sending CRQ: %016lx %016lx \n " ,
( unsigned long int ) cpu_to_be64 ( u64_crq [ 0 ] ) ,
( unsigned long int ) cpu_to_be64 ( u64_crq [ 1 ] ) ) ;
2018-05-23 13:37:56 -05:00
if ( ! adapter - > crq . active & &
crq - > generic . first ! = IBMVNIC_CRQ_INIT_CMD ) {
dev_warn ( dev , " Invalid request detected while CRQ is inactive, possible device state change during reset \n " ) ;
return - EINVAL ;
}
2015-12-21 11:26:06 -06:00
/* Make sure the hypervisor sees the complete request */
mb ( ) ;
rc = plpar_hcall_norets ( H_SEND_CRQ , ua ,
cpu_to_be64 ( u64_crq [ 0 ] ) ,
cpu_to_be64 ( u64_crq [ 1 ] ) ) ;
if ( rc ) {
2018-02-07 13:00:24 -06:00
if ( rc = = H_CLOSED ) {
2015-12-21 11:26:06 -06:00
dev_warn ( dev , " CRQ Queue closed \n " ) ;
2019-09-20 16:11:23 -04:00
if ( test_bit ( 0 , & adapter - > resetting ) )
2018-02-07 13:00:24 -06:00
ibmvnic_reset ( adapter , VNIC_RESET_FATAL ) ;
}
2015-12-21 11:26:06 -06:00
dev_warn ( dev , " Send error (rc=%d) \n " , rc ) ;
}
return rc ;
}
static int ibmvnic_send_crq_init ( struct ibmvnic_adapter * adapter )
{
union ibmvnic_crq crq ;
memset ( & crq , 0 , sizeof ( crq ) ) ;
crq . generic . first = IBMVNIC_CRQ_INIT_CMD ;
crq . generic . cmd = IBMVNIC_CRQ_INIT ;
netdev_dbg ( adapter - > netdev , " Sending CRQ init \n " ) ;
return ibmvnic_send_crq ( adapter , & crq ) ;
}
static int send_version_xchg ( struct ibmvnic_adapter * adapter )
{
union ibmvnic_crq crq ;
memset ( & crq , 0 , sizeof ( crq ) ) ;
crq . version_exchange . first = IBMVNIC_CRQ_CMD ;
crq . version_exchange . cmd = VERSION_EXCHANGE ;
crq . version_exchange . version = cpu_to_be16 ( ibmvnic_version ) ;
return ibmvnic_send_crq ( adapter , & crq ) ;
}
2017-11-08 11:23:56 -06:00
struct vnic_login_client_data {
u8 type ;
__be16 len ;
2018-04-10 15:26:43 -07:00
char name [ ] ;
2017-11-08 11:23:56 -06:00
} __packed ;
static int vnic_client_data_len ( struct ibmvnic_adapter * adapter )
{
int len ;
/* Calculate the amount of buffer space needed for the
* vnic client data in the login buffer . There are four entries ,
* OS name , LPAR name , device name , and a null last entry .
*/
len = 4 * sizeof ( struct vnic_login_client_data ) ;
len + = 6 ; /* "Linux" plus NULL */
len + = strlen ( utsname ( ) - > nodename ) + 1 ;
len + = strlen ( adapter - > netdev - > name ) + 1 ;
return len ;
}
static void vnic_add_client_data ( struct ibmvnic_adapter * adapter ,
struct vnic_login_client_data * vlcd )
{
const char * os_name = " Linux " ;
int len ;
/* Type 1 - LPAR OS */
vlcd - > type = 1 ;
len = strlen ( os_name ) + 1 ;
vlcd - > len = cpu_to_be16 ( len ) ;
2018-04-10 15:26:43 -07:00
strncpy ( vlcd - > name , os_name , len ) ;
vlcd = ( struct vnic_login_client_data * ) ( vlcd - > name + len ) ;
2017-11-08 11:23:56 -06:00
/* Type 2 - LPAR name */
vlcd - > type = 2 ;
len = strlen ( utsname ( ) - > nodename ) + 1 ;
vlcd - > len = cpu_to_be16 ( len ) ;
2018-04-10 15:26:43 -07:00
strncpy ( vlcd - > name , utsname ( ) - > nodename , len ) ;
vlcd = ( struct vnic_login_client_data * ) ( vlcd - > name + len ) ;
2017-11-08 11:23:56 -06:00
/* Type 3 - device name */
vlcd - > type = 3 ;
len = strlen ( adapter - > netdev - > name ) + 1 ;
vlcd - > len = cpu_to_be16 ( len ) ;
2018-04-10 15:26:43 -07:00
strncpy ( vlcd - > name , adapter - > netdev - > name , len ) ;
2017-11-08 11:23:56 -06:00
}
2018-02-26 18:10:59 -06:00
static int send_login ( struct ibmvnic_adapter * adapter )
2015-12-21 11:26:06 -06:00
{
struct ibmvnic_login_rsp_buffer * login_rsp_buffer ;
struct ibmvnic_login_buffer * login_buffer ;
struct device * dev = & adapter - > vdev - > dev ;
dma_addr_t rsp_buffer_token ;
dma_addr_t buffer_token ;
size_t rsp_buffer_size ;
union ibmvnic_crq crq ;
size_t buffer_size ;
__be64 * tx_list_p ;
__be64 * rx_list_p ;
2017-11-08 11:23:56 -06:00
int client_data_len ;
struct vnic_login_client_data * vlcd ;
2015-12-21 11:26:06 -06:00
int i ;
2018-02-26 18:10:59 -06:00
if ( ! adapter - > tx_scrq | | ! adapter - > rx_scrq ) {
netdev_err ( adapter - > netdev ,
" RX or TX queues are not allocated, device login failed \n " ) ;
return - 1 ;
}
2018-02-13 18:23:40 -06:00
release_login_rsp_buffer ( adapter ) ;
2017-11-08 11:23:56 -06:00
client_data_len = vnic_client_data_len ( adapter ) ;
2015-12-21 11:26:06 -06:00
buffer_size =
sizeof ( struct ibmvnic_login_buffer ) +
2017-11-08 11:23:56 -06:00
sizeof ( u64 ) * ( adapter - > req_tx_queues + adapter - > req_rx_queues ) +
client_data_len ;
2015-12-21 11:26:06 -06:00
2017-11-08 11:23:56 -06:00
login_buffer = kzalloc ( buffer_size , GFP_ATOMIC ) ;
2015-12-21 11:26:06 -06:00
if ( ! login_buffer )
goto buf_alloc_failed ;
buffer_token = dma_map_single ( dev , login_buffer , buffer_size ,
DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( dev , buffer_token ) ) {
dev_err ( dev , " Couldn't map login buffer \n " ) ;
goto buf_map_failed ;
}
2016-04-06 11:49:55 -05:00
rsp_buffer_size = sizeof ( struct ibmvnic_login_rsp_buffer ) +
sizeof ( u64 ) * adapter - > req_tx_queues +
sizeof ( u64 ) * adapter - > req_rx_queues +
sizeof ( u64 ) * adapter - > req_rx_queues +
sizeof ( u8 ) * IBMVNIC_TX_DESC_VERSIONS ;
2015-12-21 11:26:06 -06:00
login_rsp_buffer = kmalloc ( rsp_buffer_size , GFP_ATOMIC ) ;
if ( ! login_rsp_buffer )
goto buf_rsp_alloc_failed ;
rsp_buffer_token = dma_map_single ( dev , login_rsp_buffer ,
rsp_buffer_size , DMA_FROM_DEVICE ) ;
if ( dma_mapping_error ( dev , rsp_buffer_token ) ) {
dev_err ( dev , " Couldn't map login rsp buffer \n " ) ;
goto buf_rsp_map_failed ;
}
2017-04-19 13:44:58 -04:00
2015-12-21 11:26:06 -06:00
adapter - > login_buf = login_buffer ;
adapter - > login_buf_token = buffer_token ;
adapter - > login_buf_sz = buffer_size ;
adapter - > login_rsp_buf = login_rsp_buffer ;
adapter - > login_rsp_buf_token = rsp_buffer_token ;
adapter - > login_rsp_buf_sz = rsp_buffer_size ;
login_buffer - > len = cpu_to_be32 ( buffer_size ) ;
login_buffer - > version = cpu_to_be32 ( INITIAL_VERSION_LB ) ;
login_buffer - > num_txcomp_subcrqs = cpu_to_be32 ( adapter - > req_tx_queues ) ;
login_buffer - > off_txcomp_subcrqs =
cpu_to_be32 ( sizeof ( struct ibmvnic_login_buffer ) ) ;
login_buffer - > num_rxcomp_subcrqs = cpu_to_be32 ( adapter - > req_rx_queues ) ;
login_buffer - > off_rxcomp_subcrqs =
cpu_to_be32 ( sizeof ( struct ibmvnic_login_buffer ) +
sizeof ( u64 ) * adapter - > req_tx_queues ) ;
login_buffer - > login_rsp_ioba = cpu_to_be32 ( rsp_buffer_token ) ;
login_buffer - > login_rsp_len = cpu_to_be32 ( rsp_buffer_size ) ;
tx_list_p = ( __be64 * ) ( ( char * ) login_buffer +
sizeof ( struct ibmvnic_login_buffer ) ) ;
rx_list_p = ( __be64 * ) ( ( char * ) login_buffer +
sizeof ( struct ibmvnic_login_buffer ) +
sizeof ( u64 ) * adapter - > req_tx_queues ) ;
for ( i = 0 ; i < adapter - > req_tx_queues ; i + + ) {
if ( adapter - > tx_scrq [ i ] ) {
tx_list_p [ i ] = cpu_to_be64 ( adapter - > tx_scrq [ i ] - >
crq_num ) ;
}
}
for ( i = 0 ; i < adapter - > req_rx_queues ; i + + ) {
if ( adapter - > rx_scrq [ i ] ) {
rx_list_p [ i ] = cpu_to_be64 ( adapter - > rx_scrq [ i ] - >
crq_num ) ;
}
}
2017-11-08 11:23:56 -06:00
/* Insert vNIC login client data */
vlcd = ( struct vnic_login_client_data * )
( ( char * ) rx_list_p + ( sizeof ( u64 ) * adapter - > req_rx_queues ) ) ;
login_buffer - > client_data_offset =
cpu_to_be32 ( ( char * ) vlcd - ( char * ) login_buffer ) ;
login_buffer - > client_data_len = cpu_to_be32 ( client_data_len ) ;
vnic_add_client_data ( adapter , vlcd ) ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( adapter - > netdev , " Login Buffer: \n " ) ;
for ( i = 0 ; i < ( adapter - > login_buf_sz - 1 ) / 8 + 1 ; i + + ) {
netdev_dbg ( adapter - > netdev , " %016lx \n " ,
( ( unsigned long int * ) ( adapter - > login_buf ) ) [ i ] ) ;
}
memset ( & crq , 0 , sizeof ( crq ) ) ;
crq . login . first = IBMVNIC_CRQ_CMD ;
crq . login . cmd = LOGIN ;
crq . login . ioba = cpu_to_be32 ( buffer_token ) ;
crq . login . len = cpu_to_be32 ( buffer_size ) ;
ibmvnic_send_crq ( adapter , & crq ) ;
2018-02-26 18:10:59 -06:00
return 0 ;
2015-12-21 11:26:06 -06:00
buf_rsp_map_failed :
kfree ( login_rsp_buffer ) ;
buf_rsp_alloc_failed :
dma_unmap_single ( dev , buffer_token , buffer_size , DMA_TO_DEVICE ) ;
buf_map_failed :
kfree ( login_buffer ) ;
buf_alloc_failed :
2018-02-26 18:10:59 -06:00
return - 1 ;
2015-12-21 11:26:06 -06:00
}
2018-05-23 13:37:57 -05:00
static int send_request_map ( struct ibmvnic_adapter * adapter , dma_addr_t addr ,
u32 len , u8 map_id )
2015-12-21 11:26:06 -06:00
{
union ibmvnic_crq crq ;
memset ( & crq , 0 , sizeof ( crq ) ) ;
crq . request_map . first = IBMVNIC_CRQ_CMD ;
crq . request_map . cmd = REQUEST_MAP ;
crq . request_map . map_id = map_id ;
crq . request_map . ioba = cpu_to_be32 ( addr ) ;
crq . request_map . len = cpu_to_be32 ( len ) ;
2018-05-23 13:37:57 -05:00
return ibmvnic_send_crq ( adapter , & crq ) ;
2015-12-21 11:26:06 -06:00
}
2018-05-23 13:37:57 -05:00
static int send_request_unmap ( struct ibmvnic_adapter * adapter , u8 map_id )
2015-12-21 11:26:06 -06:00
{
union ibmvnic_crq crq ;
memset ( & crq , 0 , sizeof ( crq ) ) ;
crq . request_unmap . first = IBMVNIC_CRQ_CMD ;
crq . request_unmap . cmd = REQUEST_UNMAP ;
crq . request_unmap . map_id = map_id ;
2018-05-23 13:37:57 -05:00
return ibmvnic_send_crq ( adapter , & crq ) ;
2015-12-21 11:26:06 -06:00
}
static void send_map_query ( struct ibmvnic_adapter * adapter )
{
union ibmvnic_crq crq ;
memset ( & crq , 0 , sizeof ( crq ) ) ;
crq . query_map . first = IBMVNIC_CRQ_CMD ;
crq . query_map . cmd = QUERY_MAP ;
ibmvnic_send_crq ( adapter , & crq ) ;
}
/* Send a series of CRQs requesting various capabilities of the VNIC server */
static void send_cap_queries ( struct ibmvnic_adapter * adapter )
{
union ibmvnic_crq crq ;
2017-02-15 12:17:59 -06:00
atomic_set ( & adapter - > running_cap_crqs , 0 ) ;
2015-12-21 11:26:06 -06:00
memset ( & crq , 0 , sizeof ( crq ) ) ;
crq . query_capability . first = IBMVNIC_CRQ_CMD ;
crq . query_capability . cmd = QUERY_CAPABILITY ;
crq . query_capability . capability = cpu_to_be16 ( MIN_TX_QUEUES ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . query_capability . capability = cpu_to_be16 ( MIN_RX_QUEUES ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . query_capability . capability = cpu_to_be16 ( MIN_RX_ADD_QUEUES ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . query_capability . capability = cpu_to_be16 ( MAX_TX_QUEUES ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . query_capability . capability = cpu_to_be16 ( MAX_RX_QUEUES ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . query_capability . capability = cpu_to_be16 ( MAX_RX_ADD_QUEUES ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . query_capability . capability =
cpu_to_be16 ( MIN_TX_ENTRIES_PER_SUBCRQ ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . query_capability . capability =
cpu_to_be16 ( MIN_RX_ADD_ENTRIES_PER_SUBCRQ ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . query_capability . capability =
cpu_to_be16 ( MAX_TX_ENTRIES_PER_SUBCRQ ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . query_capability . capability =
cpu_to_be16 ( MAX_RX_ADD_ENTRIES_PER_SUBCRQ ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . query_capability . capability = cpu_to_be16 ( TCP_IP_OFFLOAD ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . query_capability . capability = cpu_to_be16 ( PROMISC_SUPPORTED ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . query_capability . capability = cpu_to_be16 ( MIN_MTU ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . query_capability . capability = cpu_to_be16 ( MAX_MTU ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . query_capability . capability = cpu_to_be16 ( MAX_MULTICAST_FILTERS ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . query_capability . capability = cpu_to_be16 ( VLAN_HEADER_INSERTION ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
2017-04-21 15:38:46 -04:00
crq . query_capability . capability = cpu_to_be16 ( RX_VLAN_HEADER_INSERTION ) ;
atomic_inc ( & adapter - > running_cap_crqs ) ;
ibmvnic_send_crq ( adapter , & crq ) ;
2015-12-21 11:26:06 -06:00
crq . query_capability . capability = cpu_to_be16 ( MAX_TX_SG_ENTRIES ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . query_capability . capability = cpu_to_be16 ( RX_SG_SUPPORTED ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . query_capability . capability = cpu_to_be16 ( OPT_TX_COMP_SUB_QUEUES ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . query_capability . capability = cpu_to_be16 ( OPT_RX_COMP_QUEUES ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . query_capability . capability =
cpu_to_be16 ( OPT_RX_BUFADD_Q_PER_RX_COMP_Q ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . query_capability . capability =
cpu_to_be16 ( OPT_TX_ENTRIES_PER_SUBCRQ ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . query_capability . capability =
cpu_to_be16 ( OPT_RXBA_ENTRIES_PER_SUBCRQ ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
crq . query_capability . capability = cpu_to_be16 ( TX_RX_DESC_REQ ) ;
2017-02-15 12:17:59 -06:00
atomic_inc ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
ibmvnic_send_crq ( adapter , & crq ) ;
}
2017-11-13 15:59:19 -02:00
static void handle_vpd_size_rsp ( union ibmvnic_crq * crq ,
struct ibmvnic_adapter * adapter )
{
struct device * dev = & adapter - > vdev - > dev ;
if ( crq - > get_vpd_size_rsp . rc . code ) {
dev_err ( dev , " Error retrieving VPD size, rc=%x \n " ,
crq - > get_vpd_size_rsp . rc . code ) ;
complete ( & adapter - > fw_done ) ;
return ;
}
adapter - > vpd - > len = be64_to_cpu ( crq - > get_vpd_size_rsp . len ) ;
complete ( & adapter - > fw_done ) ;
}
static void handle_vpd_rsp ( union ibmvnic_crq * crq ,
struct ibmvnic_adapter * adapter )
{
struct device * dev = & adapter - > vdev - > dev ;
2018-02-05 14:33:55 -02:00
unsigned char * substr = NULL ;
2017-11-13 15:59:19 -02:00
u8 fw_level_len = 0 ;
memset ( adapter - > fw_version , 0 , 32 ) ;
dma_unmap_single ( dev , adapter - > vpd - > dma_addr , adapter - > vpd - > len ,
DMA_FROM_DEVICE ) ;
if ( crq - > get_vpd_rsp . rc . code ) {
dev_err ( dev , " Error retrieving VPD from device, rc=%x \n " ,
crq - > get_vpd_rsp . rc . code ) ;
goto complete ;
}
/* get the position of the firmware version info
* located after the ASCII ' RM ' substring in the buffer
*/
substr = strnstr ( adapter - > vpd - > buff , " RM " , adapter - > vpd - > len ) ;
if ( ! substr ) {
2018-02-01 16:04:30 -02:00
dev_info ( dev , " Warning - No FW level has been provided in the VPD buffer by the VIOS Server \n " ) ;
2017-11-13 15:59:19 -02:00
goto complete ;
}
/* get length of firmware level ASCII substring */
if ( ( substr + 2 ) < ( adapter - > vpd - > buff + adapter - > vpd - > len ) ) {
fw_level_len = * ( substr + 2 ) ;
} else {
dev_info ( dev , " Length of FW substr extrapolated VDP buff \n " ) ;
goto complete ;
}
/* copy firmware version string from vpd into adapter */
if ( ( substr + 3 + fw_level_len ) <
( adapter - > vpd - > buff + adapter - > vpd - > len ) ) {
2018-02-05 14:33:55 -02:00
strncpy ( ( char * ) adapter - > fw_version , substr + 3 , fw_level_len ) ;
2017-11-13 15:59:19 -02:00
} else {
dev_info ( dev , " FW substr extrapolated VPD buff \n " ) ;
}
complete :
2018-02-05 14:33:55 -02:00
if ( adapter - > fw_version [ 0 ] = = ' \0 ' )
strncpy ( ( char * ) adapter - > fw_version , " N/A " , 3 * sizeof ( char ) ) ;
2017-11-13 15:59:19 -02:00
complete ( & adapter - > fw_done ) ;
}
2015-12-21 11:26:06 -06:00
static void handle_query_ip_offload_rsp ( struct ibmvnic_adapter * adapter )
{
struct device * dev = & adapter - > vdev - > dev ;
struct ibmvnic_query_ip_offload_buffer * buf = & adapter - > ip_offload_buf ;
2019-04-10 11:07:00 -05:00
netdev_features_t old_hw_features = 0 ;
2015-12-21 11:26:06 -06:00
union ibmvnic_crq crq ;
int i ;
dma_unmap_single ( dev , adapter - > ip_offload_tok ,
sizeof ( adapter - > ip_offload_buf ) , DMA_FROM_DEVICE ) ;
netdev_dbg ( adapter - > netdev , " Query IP Offload Buffer: \n " ) ;
for ( i = 0 ; i < ( sizeof ( adapter - > ip_offload_buf ) - 1 ) / 8 + 1 ; i + + )
netdev_dbg ( adapter - > netdev , " %016lx \n " ,
( ( unsigned long int * ) ( buf ) ) [ i ] ) ;
netdev_dbg ( adapter - > netdev , " ipv4_chksum = %d \n " , buf - > ipv4_chksum ) ;
netdev_dbg ( adapter - > netdev , " ipv6_chksum = %d \n " , buf - > ipv6_chksum ) ;
netdev_dbg ( adapter - > netdev , " tcp_ipv4_chksum = %d \n " ,
buf - > tcp_ipv4_chksum ) ;
netdev_dbg ( adapter - > netdev , " tcp_ipv6_chksum = %d \n " ,
buf - > tcp_ipv6_chksum ) ;
netdev_dbg ( adapter - > netdev , " udp_ipv4_chksum = %d \n " ,
buf - > udp_ipv4_chksum ) ;
netdev_dbg ( adapter - > netdev , " udp_ipv6_chksum = %d \n " ,
buf - > udp_ipv6_chksum ) ;
netdev_dbg ( adapter - > netdev , " large_tx_ipv4 = %d \n " ,
buf - > large_tx_ipv4 ) ;
netdev_dbg ( adapter - > netdev , " large_tx_ipv6 = %d \n " ,
buf - > large_tx_ipv6 ) ;
netdev_dbg ( adapter - > netdev , " large_rx_ipv4 = %d \n " ,
buf - > large_rx_ipv4 ) ;
netdev_dbg ( adapter - > netdev , " large_rx_ipv6 = %d \n " ,
buf - > large_rx_ipv6 ) ;
netdev_dbg ( adapter - > netdev , " max_ipv4_hdr_sz = %d \n " ,
buf - > max_ipv4_header_size ) ;
netdev_dbg ( adapter - > netdev , " max_ipv6_hdr_sz = %d \n " ,
buf - > max_ipv6_header_size ) ;
netdev_dbg ( adapter - > netdev , " max_tcp_hdr_size = %d \n " ,
buf - > max_tcp_header_size ) ;
netdev_dbg ( adapter - > netdev , " max_udp_hdr_size = %d \n " ,
buf - > max_udp_header_size ) ;
netdev_dbg ( adapter - > netdev , " max_large_tx_size = %d \n " ,
buf - > max_large_tx_size ) ;
netdev_dbg ( adapter - > netdev , " max_large_rx_size = %d \n " ,
buf - > max_large_rx_size ) ;
netdev_dbg ( adapter - > netdev , " ipv6_ext_hdr = %d \n " ,
buf - > ipv6_extension_header ) ;
netdev_dbg ( adapter - > netdev , " tcp_pseudosum_req = %d \n " ,
buf - > tcp_pseudosum_req ) ;
netdev_dbg ( adapter - > netdev , " num_ipv6_ext_hd = %d \n " ,
buf - > num_ipv6_ext_headers ) ;
netdev_dbg ( adapter - > netdev , " off_ipv6_ext_hd = %d \n " ,
buf - > off_ipv6_ext_headers ) ;
adapter - > ip_offload_ctrl_tok =
dma_map_single ( dev , & adapter - > ip_offload_ctrl ,
sizeof ( adapter - > ip_offload_ctrl ) , DMA_TO_DEVICE ) ;
if ( dma_mapping_error ( dev , adapter - > ip_offload_ctrl_tok ) ) {
dev_err ( dev , " Couldn't map ip offload control buffer \n " ) ;
return ;
}
2018-01-18 19:05:01 -06:00
adapter - > ip_offload_ctrl . len =
cpu_to_be32 ( sizeof ( adapter - > ip_offload_ctrl ) ) ;
2015-12-21 11:26:06 -06:00
adapter - > ip_offload_ctrl . version = cpu_to_be32 ( INITIAL_VERSION_IOB ) ;
2018-01-18 19:05:01 -06:00
adapter - > ip_offload_ctrl . ipv4_chksum = buf - > ipv4_chksum ;
adapter - > ip_offload_ctrl . ipv6_chksum = buf - > ipv6_chksum ;
2015-12-21 11:26:06 -06:00
adapter - > ip_offload_ctrl . tcp_ipv4_chksum = buf - > tcp_ipv4_chksum ;
adapter - > ip_offload_ctrl . udp_ipv4_chksum = buf - > udp_ipv4_chksum ;
adapter - > ip_offload_ctrl . tcp_ipv6_chksum = buf - > tcp_ipv6_chksum ;
adapter - > ip_offload_ctrl . udp_ipv6_chksum = buf - > udp_ipv6_chksum ;
2017-10-17 12:36:55 -05:00
adapter - > ip_offload_ctrl . large_tx_ipv4 = buf - > large_tx_ipv4 ;
adapter - > ip_offload_ctrl . large_tx_ipv6 = buf - > large_tx_ipv6 ;
2015-12-21 11:26:06 -06:00
2017-10-17 12:36:55 -05:00
/* large_rx disabled for now, additional features needed */
2015-12-21 11:26:06 -06:00
adapter - > ip_offload_ctrl . large_rx_ipv4 = 0 ;
adapter - > ip_offload_ctrl . large_rx_ipv6 = 0 ;
2019-04-10 11:07:00 -05:00
if ( adapter - > state ! = VNIC_PROBING ) {
old_hw_features = adapter - > netdev - > hw_features ;
adapter - > netdev - > hw_features = 0 ;
}
2019-04-10 11:06:59 -05:00
adapter - > netdev - > hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO ;
2015-12-21 11:26:06 -06:00
if ( buf - > tcp_ipv4_chksum | | buf - > udp_ipv4_chksum )
2019-04-10 11:07:00 -05:00
adapter - > netdev - > hw_features | = NETIF_F_IP_CSUM ;
2015-12-21 11:26:06 -06:00
if ( buf - > tcp_ipv6_chksum | | buf - > udp_ipv6_chksum )
2019-04-10 11:07:00 -05:00
adapter - > netdev - > hw_features | = NETIF_F_IPV6_CSUM ;
2015-12-21 11:26:06 -06:00
2016-04-01 17:20:35 -05:00
if ( ( adapter - > netdev - > features &
( NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM ) ) )
2019-04-10 11:07:00 -05:00
adapter - > netdev - > hw_features | = NETIF_F_RXCSUM ;
2016-04-01 17:20:35 -05:00
2017-10-17 12:36:55 -05:00
if ( buf - > large_tx_ipv4 )
2019-04-10 11:07:00 -05:00
adapter - > netdev - > hw_features | = NETIF_F_TSO ;
2017-10-17 12:36:55 -05:00
if ( buf - > large_tx_ipv6 )
2019-04-10 11:07:00 -05:00
adapter - > netdev - > hw_features | = NETIF_F_TSO6 ;
2017-10-17 12:36:55 -05:00
2019-04-10 11:07:00 -05:00
if ( adapter - > state = = VNIC_PROBING ) {
adapter - > netdev - > features | = adapter - > netdev - > hw_features ;
} else if ( old_hw_features ! = adapter - > netdev - > hw_features ) {
netdev_features_t tmp = 0 ;
/* disable features no longer supported */
adapter - > netdev - > features & = adapter - > netdev - > hw_features ;
/* turn on features now supported if previously enabled */
tmp = ( old_hw_features ^ adapter - > netdev - > hw_features ) &
adapter - > netdev - > hw_features ;
adapter - > netdev - > features | =
tmp & adapter - > netdev - > wanted_features ;
}
2017-10-17 12:36:56 -05:00
2015-12-21 11:26:06 -06:00
memset ( & crq , 0 , sizeof ( crq ) ) ;
crq . control_ip_offload . first = IBMVNIC_CRQ_CMD ;
crq . control_ip_offload . cmd = CONTROL_IP_OFFLOAD ;
crq . control_ip_offload . len =
cpu_to_be32 ( sizeof ( adapter - > ip_offload_ctrl ) ) ;
crq . control_ip_offload . ioba = cpu_to_be32 ( adapter - > ip_offload_ctrl_tok ) ;
ibmvnic_send_crq ( adapter , & crq ) ;
}
2018-08-06 21:39:59 -05:00
static const char * ibmvnic_fw_err_cause ( u16 cause )
{
switch ( cause ) {
case ADAPTER_PROBLEM :
return " adapter problem " ;
case BUS_PROBLEM :
return " bus problem " ;
case FW_PROBLEM :
return " firmware problem " ;
case DD_PROBLEM :
return " device driver problem " ;
case EEH_RECOVERY :
return " EEH recovery " ;
case FW_UPDATED :
return " firmware updated " ;
case LOW_MEMORY :
return " low Memory " ;
default :
return " unknown " ;
}
}
2017-04-21 15:38:52 -04:00
static void handle_error_indication ( union ibmvnic_crq * crq ,
struct ibmvnic_adapter * adapter )
{
struct device * dev = & adapter - > vdev - > dev ;
2018-08-06 21:39:59 -05:00
u16 cause ;
cause = be16_to_cpu ( crq - > error_indication . error_cause ) ;
2017-04-21 15:38:52 -04:00
2018-08-06 21:39:59 -05:00
dev_warn_ratelimited ( dev ,
" Firmware reports %serror, cause: %s. Starting recovery... \n " ,
crq - > error_indication . flags
& IBMVNIC_FATAL_ERROR ? " FATAL " : " " ,
ibmvnic_fw_err_cause ( cause ) ) ;
2017-04-21 15:38:52 -04:00
2017-05-03 14:04:38 -04:00
if ( crq - > error_indication . flags & IBMVNIC_FATAL_ERROR )
ibmvnic_reset ( adapter , VNIC_RESET_FATAL ) ;
2017-05-26 10:30:37 -04:00
else
ibmvnic_reset ( adapter , VNIC_RESET_NON_FATAL ) ;
2015-12-21 11:26:06 -06:00
}
2018-01-29 13:45:05 -06:00
static int handle_change_mac_rsp ( union ibmvnic_crq * crq ,
struct ibmvnic_adapter * adapter )
2015-12-21 11:26:06 -06:00
{
struct net_device * netdev = adapter - > netdev ;
struct device * dev = & adapter - > vdev - > dev ;
long rc ;
rc = crq - > change_mac_addr_rsp . rc . code ;
if ( rc ) {
dev_err ( dev , " Error %ld in CHANGE_MAC_ADDR_RSP \n " , rc ) ;
2018-01-29 13:45:05 -06:00
goto out ;
2015-12-21 11:26:06 -06:00
}
2019-05-09 23:13:43 -05:00
ether_addr_copy ( netdev - > dev_addr ,
& crq - > change_mac_addr_rsp . mac_addr [ 0 ] ) ;
2018-01-29 13:45:05 -06:00
out :
complete ( & adapter - > fw_done ) ;
return rc ;
2015-12-21 11:26:06 -06:00
}
static void handle_request_cap_rsp ( union ibmvnic_crq * crq ,
struct ibmvnic_adapter * adapter )
{
struct device * dev = & adapter - > vdev - > dev ;
u64 * req_value ;
char * name ;
2017-02-15 12:17:59 -06:00
atomic_dec ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
switch ( be16_to_cpu ( crq - > request_capability_rsp . capability ) ) {
case REQ_TX_QUEUES :
req_value = & adapter - > req_tx_queues ;
name = " tx " ;
break ;
case REQ_RX_QUEUES :
req_value = & adapter - > req_rx_queues ;
name = " rx " ;
break ;
case REQ_RX_ADD_QUEUES :
req_value = & adapter - > req_rx_add_queues ;
name = " rx_add " ;
break ;
case REQ_TX_ENTRIES_PER_SUBCRQ :
req_value = & adapter - > req_tx_entries_per_subcrq ;
name = " tx_entries_per_subcrq " ;
break ;
case REQ_RX_ADD_ENTRIES_PER_SUBCRQ :
req_value = & adapter - > req_rx_add_entries_per_subcrq ;
name = " rx_add_entries_per_subcrq " ;
break ;
case REQ_MTU :
req_value = & adapter - > req_mtu ;
name = " mtu " ;
break ;
case PROMISC_REQUESTED :
req_value = & adapter - > promisc ;
name = " promisc " ;
break ;
default :
dev_err ( dev , " Got invalid cap request rsp %d \n " ,
crq - > request_capability . capability ) ;
return ;
}
switch ( crq - > request_capability_rsp . rc . code ) {
case SUCCESS :
break ;
case PARTIALSUCCESS :
dev_info ( dev , " req=%lld, rsp=%ld in %s queue, retrying. \n " ,
* req_value ,
2017-02-15 10:32:11 -06:00
( long int ) be64_to_cpu ( crq - > request_capability_rsp .
2015-12-21 11:26:06 -06:00
number ) , name ) ;
2018-01-18 16:27:12 -06:00
if ( be16_to_cpu ( crq - > request_capability_rsp . capability ) = =
REQ_MTU ) {
pr_err ( " mtu of %llu is not supported. Reverting. \n " ,
* req_value ) ;
* req_value = adapter - > fallback . mtu ;
} else {
* req_value =
be64_to_cpu ( crq - > request_capability_rsp . number ) ;
}
2017-04-25 15:01:04 -04:00
ibmvnic_send_req_caps ( adapter , 1 ) ;
2015-12-21 11:26:06 -06:00
return ;
default :
dev_err ( dev , " Error %d in request cap rsp \n " ,
crq - > request_capability_rsp . rc . code ) ;
return ;
}
/* Done receiving requested capabilities, query IP offload support */
2017-02-15 12:17:59 -06:00
if ( atomic_read ( & adapter - > running_cap_crqs ) = = 0 ) {
2015-12-21 11:26:06 -06:00
union ibmvnic_crq newcrq ;
int buf_sz = sizeof ( struct ibmvnic_query_ip_offload_buffer ) ;
struct ibmvnic_query_ip_offload_buffer * ip_offload_buf =
& adapter - > ip_offload_buf ;
2017-02-15 12:18:00 -06:00
adapter - > wait_capability = false ;
2015-12-21 11:26:06 -06:00
adapter - > ip_offload_tok = dma_map_single ( dev , ip_offload_buf ,
buf_sz ,
DMA_FROM_DEVICE ) ;
if ( dma_mapping_error ( dev , adapter - > ip_offload_tok ) ) {
if ( ! firmware_has_feature ( FW_FEATURE_CMO ) )
dev_err ( dev , " Couldn't map offload buffer \n " ) ;
return ;
}
memset ( & newcrq , 0 , sizeof ( newcrq ) ) ;
newcrq . query_ip_offload . first = IBMVNIC_CRQ_CMD ;
newcrq . query_ip_offload . cmd = QUERY_IP_OFFLOAD ;
newcrq . query_ip_offload . len = cpu_to_be32 ( buf_sz ) ;
newcrq . query_ip_offload . ioba =
cpu_to_be32 ( adapter - > ip_offload_tok ) ;
ibmvnic_send_crq ( adapter , & newcrq ) ;
}
}
static int handle_login_rsp ( union ibmvnic_crq * login_rsp_crq ,
struct ibmvnic_adapter * adapter )
{
struct device * dev = & adapter - > vdev - > dev ;
2017-10-26 16:23:25 -05:00
struct net_device * netdev = adapter - > netdev ;
2015-12-21 11:26:06 -06:00
struct ibmvnic_login_rsp_buffer * login_rsp = adapter - > login_rsp_buf ;
struct ibmvnic_login_buffer * login = adapter - > login_buf ;
int i ;
dma_unmap_single ( dev , adapter - > login_buf_token , adapter - > login_buf_sz ,
2018-04-06 18:37:02 -05:00
DMA_TO_DEVICE ) ;
2015-12-21 11:26:06 -06:00
dma_unmap_single ( dev , adapter - > login_rsp_buf_token ,
2018-04-06 18:37:02 -05:00
adapter - > login_rsp_buf_sz , DMA_FROM_DEVICE ) ;
2015-12-21 11:26:06 -06:00
2016-04-06 11:49:55 -05:00
/* If the number of queues requested can't be allocated by the
* server , the login response will return with code 1. We will need
* to resend the login buffer with fewer queues requested .
*/
if ( login_rsp_crq - > generic . rc . code ) {
2018-04-11 10:09:32 -05:00
adapter - > init_done_rc = login_rsp_crq - > generic . rc . code ;
2016-04-06 11:49:55 -05:00
complete ( & adapter - > init_done ) ;
return 0 ;
}
2017-10-26 16:23:25 -05:00
netdev - > mtu = adapter - > req_mtu - ETH_HLEN ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( adapter - > netdev , " Login Response Buffer: \n " ) ;
for ( i = 0 ; i < ( adapter - > login_rsp_buf_sz - 1 ) / 8 + 1 ; i + + ) {
netdev_dbg ( adapter - > netdev , " %016lx \n " ,
( ( unsigned long int * ) ( adapter - > login_rsp_buf ) ) [ i ] ) ;
}
/* Sanity checks */
if ( login - > num_txcomp_subcrqs ! = login_rsp - > num_txsubm_subcrqs | |
( be32_to_cpu ( login - > num_rxcomp_subcrqs ) *
adapter - > req_rx_add_queues ! =
be32_to_cpu ( login_rsp - > num_rxadd_subcrqs ) ) ) {
dev_err ( dev , " FATAL: Inconsistent login and login rsp \n " ) ;
ibmvnic_remove ( adapter - > vdev ) ;
return - EIO ;
}
2018-02-21 18:18:30 -06:00
release_login_buffer ( adapter ) ;
2015-12-21 11:26:06 -06:00
complete ( & adapter - > init_done ) ;
return 0 ;
}
static void handle_request_unmap_rsp ( union ibmvnic_crq * crq ,
struct ibmvnic_adapter * adapter )
{
struct device * dev = & adapter - > vdev - > dev ;
long rc ;
rc = crq - > request_unmap_rsp . rc . code ;
if ( rc )
dev_err ( dev , " Error %ld in REQUEST_UNMAP_RSP \n " , rc ) ;
}
static void handle_query_map_rsp ( union ibmvnic_crq * crq ,
struct ibmvnic_adapter * adapter )
{
struct net_device * netdev = adapter - > netdev ;
struct device * dev = & adapter - > vdev - > dev ;
long rc ;
rc = crq - > query_map_rsp . rc . code ;
if ( rc ) {
dev_err ( dev , " Error %ld in QUERY_MAP_RSP \n " , rc ) ;
return ;
}
netdev_dbg ( netdev , " page_size = %d \n tot_pages = %d \n free_pages = %d \n " ,
crq - > query_map_rsp . page_size , crq - > query_map_rsp . tot_pages ,
crq - > query_map_rsp . free_pages ) ;
}
static void handle_query_cap_rsp ( union ibmvnic_crq * crq ,
struct ibmvnic_adapter * adapter )
{
struct net_device * netdev = adapter - > netdev ;
struct device * dev = & adapter - > vdev - > dev ;
long rc ;
2017-02-15 12:17:59 -06:00
atomic_dec ( & adapter - > running_cap_crqs ) ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( netdev , " Outstanding queries: %d \n " ,
2017-02-15 12:17:59 -06:00
atomic_read ( & adapter - > running_cap_crqs ) ) ;
2015-12-21 11:26:06 -06:00
rc = crq - > query_capability . rc . code ;
if ( rc ) {
dev_err ( dev , " Error %ld in QUERY_CAP_RSP \n " , rc ) ;
goto out ;
}
switch ( be16_to_cpu ( crq - > query_capability . capability ) ) {
case MIN_TX_QUEUES :
adapter - > min_tx_queues =
2016-03-01 10:20:09 -06:00
be64_to_cpu ( crq - > query_capability . number ) ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( netdev , " min_tx_queues = %lld \n " ,
adapter - > min_tx_queues ) ;
break ;
case MIN_RX_QUEUES :
adapter - > min_rx_queues =
2016-03-01 10:20:09 -06:00
be64_to_cpu ( crq - > query_capability . number ) ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( netdev , " min_rx_queues = %lld \n " ,
adapter - > min_rx_queues ) ;
break ;
case MIN_RX_ADD_QUEUES :
adapter - > min_rx_add_queues =
2016-03-01 10:20:09 -06:00
be64_to_cpu ( crq - > query_capability . number ) ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( netdev , " min_rx_add_queues = %lld \n " ,
adapter - > min_rx_add_queues ) ;
break ;
case MAX_TX_QUEUES :
adapter - > max_tx_queues =
2016-03-01 10:20:09 -06:00
be64_to_cpu ( crq - > query_capability . number ) ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( netdev , " max_tx_queues = %lld \n " ,
adapter - > max_tx_queues ) ;
break ;
case MAX_RX_QUEUES :
adapter - > max_rx_queues =
2016-03-01 10:20:09 -06:00
be64_to_cpu ( crq - > query_capability . number ) ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( netdev , " max_rx_queues = %lld \n " ,
adapter - > max_rx_queues ) ;
break ;
case MAX_RX_ADD_QUEUES :
adapter - > max_rx_add_queues =
2016-03-01 10:20:09 -06:00
be64_to_cpu ( crq - > query_capability . number ) ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( netdev , " max_rx_add_queues = %lld \n " ,
adapter - > max_rx_add_queues ) ;
break ;
case MIN_TX_ENTRIES_PER_SUBCRQ :
adapter - > min_tx_entries_per_subcrq =
2016-03-01 10:20:09 -06:00
be64_to_cpu ( crq - > query_capability . number ) ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( netdev , " min_tx_entries_per_subcrq = %lld \n " ,
adapter - > min_tx_entries_per_subcrq ) ;
break ;
case MIN_RX_ADD_ENTRIES_PER_SUBCRQ :
adapter - > min_rx_add_entries_per_subcrq =
2016-03-01 10:20:09 -06:00
be64_to_cpu ( crq - > query_capability . number ) ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( netdev , " min_rx_add_entrs_per_subcrq = %lld \n " ,
adapter - > min_rx_add_entries_per_subcrq ) ;
break ;
case MAX_TX_ENTRIES_PER_SUBCRQ :
adapter - > max_tx_entries_per_subcrq =
2016-03-01 10:20:09 -06:00
be64_to_cpu ( crq - > query_capability . number ) ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( netdev , " max_tx_entries_per_subcrq = %lld \n " ,
adapter - > max_tx_entries_per_subcrq ) ;
break ;
case MAX_RX_ADD_ENTRIES_PER_SUBCRQ :
adapter - > max_rx_add_entries_per_subcrq =
2016-03-01 10:20:09 -06:00
be64_to_cpu ( crq - > query_capability . number ) ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( netdev , " max_rx_add_entrs_per_subcrq = %lld \n " ,
adapter - > max_rx_add_entries_per_subcrq ) ;
break ;
case TCP_IP_OFFLOAD :
adapter - > tcp_ip_offload =
2016-03-01 10:20:09 -06:00
be64_to_cpu ( crq - > query_capability . number ) ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( netdev , " tcp_ip_offload = %lld \n " ,
adapter - > tcp_ip_offload ) ;
break ;
case PROMISC_SUPPORTED :
adapter - > promisc_supported =
2016-03-01 10:20:09 -06:00
be64_to_cpu ( crq - > query_capability . number ) ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( netdev , " promisc_supported = %lld \n " ,
adapter - > promisc_supported ) ;
break ;
case MIN_MTU :
2016-03-01 10:20:09 -06:00
adapter - > min_mtu = be64_to_cpu ( crq - > query_capability . number ) ;
2017-02-14 10:22:59 -06:00
netdev - > min_mtu = adapter - > min_mtu - ETH_HLEN ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( netdev , " min_mtu = %lld \n " , adapter - > min_mtu ) ;
break ;
case MAX_MTU :
2016-03-01 10:20:09 -06:00
adapter - > max_mtu = be64_to_cpu ( crq - > query_capability . number ) ;
2017-02-14 10:22:59 -06:00
netdev - > max_mtu = adapter - > max_mtu - ETH_HLEN ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( netdev , " max_mtu = %lld \n " , adapter - > max_mtu ) ;
break ;
case MAX_MULTICAST_FILTERS :
adapter - > max_multicast_filters =
2016-03-01 10:20:09 -06:00
be64_to_cpu ( crq - > query_capability . number ) ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( netdev , " max_multicast_filters = %lld \n " ,
adapter - > max_multicast_filters ) ;
break ;
case VLAN_HEADER_INSERTION :
adapter - > vlan_header_insertion =
2016-03-01 10:20:09 -06:00
be64_to_cpu ( crq - > query_capability . number ) ;
2015-12-21 11:26:06 -06:00
if ( adapter - > vlan_header_insertion )
netdev - > features | = NETIF_F_HW_VLAN_STAG_TX ;
netdev_dbg ( netdev , " vlan_header_insertion = %lld \n " ,
adapter - > vlan_header_insertion ) ;
break ;
2017-04-21 15:38:46 -04:00
case RX_VLAN_HEADER_INSERTION :
adapter - > rx_vlan_header_insertion =
be64_to_cpu ( crq - > query_capability . number ) ;
netdev_dbg ( netdev , " rx_vlan_header_insertion = %lld \n " ,
adapter - > rx_vlan_header_insertion ) ;
break ;
2015-12-21 11:26:06 -06:00
case MAX_TX_SG_ENTRIES :
adapter - > max_tx_sg_entries =
2016-03-01 10:20:09 -06:00
be64_to_cpu ( crq - > query_capability . number ) ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( netdev , " max_tx_sg_entries = %lld \n " ,
adapter - > max_tx_sg_entries ) ;
break ;
case RX_SG_SUPPORTED :
adapter - > rx_sg_supported =
2016-03-01 10:20:09 -06:00
be64_to_cpu ( crq - > query_capability . number ) ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( netdev , " rx_sg_supported = %lld \n " ,
adapter - > rx_sg_supported ) ;
break ;
case OPT_TX_COMP_SUB_QUEUES :
adapter - > opt_tx_comp_sub_queues =
2016-03-01 10:20:09 -06:00
be64_to_cpu ( crq - > query_capability . number ) ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( netdev , " opt_tx_comp_sub_queues = %lld \n " ,
adapter - > opt_tx_comp_sub_queues ) ;
break ;
case OPT_RX_COMP_QUEUES :
adapter - > opt_rx_comp_queues =
2016-03-01 10:20:09 -06:00
be64_to_cpu ( crq - > query_capability . number ) ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( netdev , " opt_rx_comp_queues = %lld \n " ,
adapter - > opt_rx_comp_queues ) ;
break ;
case OPT_RX_BUFADD_Q_PER_RX_COMP_Q :
adapter - > opt_rx_bufadd_q_per_rx_comp_q =
2016-03-01 10:20:09 -06:00
be64_to_cpu ( crq - > query_capability . number ) ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( netdev , " opt_rx_bufadd_q_per_rx_comp_q = %lld \n " ,
adapter - > opt_rx_bufadd_q_per_rx_comp_q ) ;
break ;
case OPT_TX_ENTRIES_PER_SUBCRQ :
adapter - > opt_tx_entries_per_subcrq =
2016-03-01 10:20:09 -06:00
be64_to_cpu ( crq - > query_capability . number ) ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( netdev , " opt_tx_entries_per_subcrq = %lld \n " ,
adapter - > opt_tx_entries_per_subcrq ) ;
break ;
case OPT_RXBA_ENTRIES_PER_SUBCRQ :
adapter - > opt_rxba_entries_per_subcrq =
2016-03-01 10:20:09 -06:00
be64_to_cpu ( crq - > query_capability . number ) ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( netdev , " opt_rxba_entries_per_subcrq = %lld \n " ,
adapter - > opt_rxba_entries_per_subcrq ) ;
break ;
case TX_RX_DESC_REQ :
adapter - > tx_rx_desc_req = crq - > query_capability . number ;
netdev_dbg ( netdev , " tx_rx_desc_req = %llx \n " ,
adapter - > tx_rx_desc_req ) ;
break ;
default :
netdev_err ( netdev , " Got invalid cap rsp %d \n " ,
crq - > query_capability . capability ) ;
}
out :
2017-02-15 12:18:00 -06:00
if ( atomic_read ( & adapter - > running_cap_crqs ) = = 0 ) {
adapter - > wait_capability = false ;
2017-04-25 15:01:04 -04:00
ibmvnic_send_req_caps ( adapter , 0 ) ;
2017-02-15 12:18:00 -06:00
}
2015-12-21 11:26:06 -06:00
}
2019-03-19 10:28:51 -03:00
static int send_query_phys_parms ( struct ibmvnic_adapter * adapter )
{
union ibmvnic_crq crq ;
int rc ;
memset ( & crq , 0 , sizeof ( crq ) ) ;
crq . query_phys_parms . first = IBMVNIC_CRQ_CMD ;
crq . query_phys_parms . cmd = QUERY_PHYS_PARMS ;
init_completion ( & adapter - > fw_done ) ;
rc = ibmvnic_send_crq ( adapter , & crq ) ;
if ( rc )
return rc ;
wait_for_completion ( & adapter - > fw_done ) ;
return adapter - > fw_done_rc ? - EIO : 0 ;
}
static int handle_query_phys_parms_rsp ( union ibmvnic_crq * crq ,
struct ibmvnic_adapter * adapter )
{
struct net_device * netdev = adapter - > netdev ;
int rc ;
2019-09-16 11:50:37 -03:00
__be32 rspeed = cpu_to_be32 ( crq - > query_phys_parms_rsp . speed ) ;
2019-03-19 10:28:51 -03:00
rc = crq - > query_phys_parms_rsp . rc . code ;
if ( rc ) {
netdev_err ( netdev , " Error %d in QUERY_PHYS_PARMS \n " , rc ) ;
return rc ;
}
2019-09-16 11:50:37 -03:00
switch ( rspeed ) {
2019-03-19 10:28:51 -03:00
case IBMVNIC_10MBPS :
adapter - > speed = SPEED_10 ;
break ;
case IBMVNIC_100MBPS :
adapter - > speed = SPEED_100 ;
break ;
case IBMVNIC_1GBPS :
adapter - > speed = SPEED_1000 ;
break ;
case IBMVNIC_10GBP :
adapter - > speed = SPEED_10000 ;
break ;
case IBMVNIC_25GBPS :
adapter - > speed = SPEED_25000 ;
break ;
case IBMVNIC_40GBPS :
adapter - > speed = SPEED_40000 ;
break ;
case IBMVNIC_50GBPS :
adapter - > speed = SPEED_50000 ;
break ;
case IBMVNIC_100GBPS :
adapter - > speed = SPEED_100000 ;
break ;
default :
2019-09-16 11:50:37 -03:00
if ( netif_carrier_ok ( netdev ) )
netdev_warn ( netdev , " Unknown speed 0x%08x \n " , rspeed ) ;
2019-03-19 10:28:51 -03:00
adapter - > speed = SPEED_UNKNOWN ;
}
if ( crq - > query_phys_parms_rsp . flags1 & IBMVNIC_FULL_DUPLEX )
adapter - > duplex = DUPLEX_FULL ;
else if ( crq - > query_phys_parms_rsp . flags1 & IBMVNIC_HALF_DUPLEX )
adapter - > duplex = DUPLEX_HALF ;
else
adapter - > duplex = DUPLEX_UNKNOWN ;
return rc ;
}
2015-12-21 11:26:06 -06:00
static void ibmvnic_handle_crq ( union ibmvnic_crq * crq ,
struct ibmvnic_adapter * adapter )
{
struct ibmvnic_generic_crq * gen_crq = & crq - > generic ;
struct net_device * netdev = adapter - > netdev ;
struct device * dev = & adapter - > vdev - > dev ;
2017-04-19 13:44:35 -04:00
u64 * u64_crq = ( u64 * ) crq ;
2015-12-21 11:26:06 -06:00
long rc ;
netdev_dbg ( netdev , " Handling CRQ: %016lx %016lx \n " ,
2017-04-19 13:44:35 -04:00
( unsigned long int ) cpu_to_be64 ( u64_crq [ 0 ] ) ,
( unsigned long int ) cpu_to_be64 ( u64_crq [ 1 ] ) ) ;
2015-12-21 11:26:06 -06:00
switch ( gen_crq - > first ) {
case IBMVNIC_CRQ_INIT_RSP :
switch ( gen_crq - > cmd ) {
case IBMVNIC_CRQ_INIT :
dev_info ( dev , " Partner initialized \n " ) ;
2017-05-26 10:30:19 -04:00
adapter - > from_passive_init = true ;
2018-04-06 18:37:05 -05:00
adapter - > failover_pending = false ;
2018-05-23 13:37:58 -05:00
if ( ! completion_done ( & adapter - > init_done ) ) {
complete ( & adapter - > init_done ) ;
adapter - > init_done_rc = - EIO ;
}
2018-04-06 18:37:05 -05:00
ibmvnic_reset ( adapter , VNIC_RESET_FAILOVER ) ;
2015-12-21 11:26:06 -06:00
break ;
case IBMVNIC_CRQ_INIT_COMPLETE :
dev_info ( dev , " Partner initialization complete \n " ) ;
2018-05-23 13:37:56 -05:00
adapter - > crq . active = true ;
2015-12-21 11:26:06 -06:00
send_version_xchg ( adapter ) ;
break ;
default :
dev_err ( dev , " Unknown crq cmd: %d \n " , gen_crq - > cmd ) ;
}
return ;
case IBMVNIC_CRQ_XPORT_EVENT :
2017-05-03 14:04:38 -04:00
netif_carrier_off ( netdev ) ;
2018-05-23 13:37:56 -05:00
adapter - > crq . active = false ;
2019-09-20 16:11:23 -04:00
if ( test_bit ( 0 , & adapter - > resetting ) )
2018-05-23 13:38:02 -05:00
adapter - > force_reset_recovery = true ;
2015-12-21 11:26:06 -06:00
if ( gen_crq - > cmd = = IBMVNIC_PARTITION_MIGRATED ) {
2017-05-03 14:04:38 -04:00
dev_info ( dev , " Migrated, re-enabling adapter \n " ) ;
ibmvnic_reset ( adapter , VNIC_RESET_MOBILITY ) ;
2016-08-18 11:37:51 -05:00
} else if ( gen_crq - > cmd = = IBMVNIC_DEVICE_FAILOVER ) {
dev_info ( dev , " Backing device failover detected \n " ) ;
2018-04-06 18:37:05 -05:00
adapter - > failover_pending = true ;
2015-12-21 11:26:06 -06:00
} else {
/* The adapter lost the connection */
dev_err ( dev , " Virtual Adapter failed (rc=%d) \n " ,
gen_crq - > cmd ) ;
2017-05-03 14:04:38 -04:00
ibmvnic_reset ( adapter , VNIC_RESET_FATAL ) ;
2015-12-21 11:26:06 -06:00
}
return ;
case IBMVNIC_CRQ_CMD_RSP :
break ;
default :
dev_err ( dev , " Got an invalid msg type 0x%02x \n " ,
gen_crq - > first ) ;
return ;
}
switch ( gen_crq - > cmd ) {
case VERSION_EXCHANGE_RSP :
rc = crq - > version_exchange_rsp . rc . code ;
if ( rc ) {
dev_err ( dev , " Error %ld in VERSION_EXCHG_RSP \n " , rc ) ;
break ;
}
dev_info ( dev , " Partner protocol version is %d \n " ,
crq - > version_exchange_rsp . version ) ;
if ( be16_to_cpu ( crq - > version_exchange_rsp . version ) <
ibmvnic_version )
ibmvnic_version =
be16_to_cpu ( crq - > version_exchange_rsp . version ) ;
send_cap_queries ( adapter ) ;
break ;
case QUERY_CAPABILITY_RSP :
handle_query_cap_rsp ( crq , adapter ) ;
break ;
case QUERY_MAP_RSP :
handle_query_map_rsp ( crq , adapter ) ;
break ;
case REQUEST_MAP_RSP :
2017-06-21 14:53:01 -05:00
adapter - > fw_done_rc = crq - > request_map_rsp . rc . code ;
complete ( & adapter - > fw_done ) ;
2015-12-21 11:26:06 -06:00
break ;
case REQUEST_UNMAP_RSP :
handle_request_unmap_rsp ( crq , adapter ) ;
break ;
case REQUEST_CAPABILITY_RSP :
handle_request_cap_rsp ( crq , adapter ) ;
break ;
case LOGIN_RSP :
netdev_dbg ( netdev , " Got Login Response \n " ) ;
handle_login_rsp ( crq , adapter ) ;
break ;
case LOGICAL_LINK_STATE_RSP :
2017-04-21 15:39:04 -04:00
netdev_dbg ( netdev ,
" Got Logical Link State Response, state: %d rc: %d \n " ,
crq - > logical_link_state_rsp . link_state ,
crq - > logical_link_state_rsp . rc . code ) ;
2015-12-21 11:26:06 -06:00
adapter - > logical_link_state =
crq - > logical_link_state_rsp . link_state ;
2017-04-21 15:39:04 -04:00
adapter - > init_done_rc = crq - > logical_link_state_rsp . rc . code ;
complete ( & adapter - > init_done ) ;
2015-12-21 11:26:06 -06:00
break ;
case LINK_STATE_INDICATION :
netdev_dbg ( netdev , " Got Logical Link State Indication \n " ) ;
adapter - > phys_link_state =
crq - > link_state_indication . phys_link_state ;
adapter - > logical_link_state =
crq - > link_state_indication . logical_link_state ;
2019-05-09 23:13:44 -05:00
if ( adapter - > phys_link_state & & adapter - > logical_link_state )
netif_carrier_on ( netdev ) ;
else
netif_carrier_off ( netdev ) ;
2015-12-21 11:26:06 -06:00
break ;
case CHANGE_MAC_ADDR_RSP :
netdev_dbg ( netdev , " Got MAC address change Response \n " ) ;
2018-01-29 13:45:05 -06:00
adapter - > fw_done_rc = handle_change_mac_rsp ( crq , adapter ) ;
2015-12-21 11:26:06 -06:00
break ;
case ERROR_INDICATION :
netdev_dbg ( netdev , " Got Error Indication \n " ) ;
handle_error_indication ( crq , adapter ) ;
break ;
case REQUEST_STATISTICS_RSP :
netdev_dbg ( netdev , " Got Statistics Response \n " ) ;
complete ( & adapter - > stats_done ) ;
break ;
case QUERY_IP_OFFLOAD_RSP :
netdev_dbg ( netdev , " Got Query IP offload Response \n " ) ;
handle_query_ip_offload_rsp ( adapter ) ;
break ;
case MULTICAST_CTRL_RSP :
netdev_dbg ( netdev , " Got multicast control Response \n " ) ;
break ;
case CONTROL_IP_OFFLOAD_RSP :
netdev_dbg ( netdev , " Got Control IP offload Response \n " ) ;
dma_unmap_single ( dev , adapter - > ip_offload_ctrl_tok ,
sizeof ( adapter - > ip_offload_ctrl ) ,
DMA_TO_DEVICE ) ;
2017-03-17 17:13:40 -05:00
complete ( & adapter - > init_done ) ;
2015-12-21 11:26:06 -06:00
break ;
case COLLECT_FW_TRACE_RSP :
netdev_dbg ( netdev , " Got Collect firmware trace Response \n " ) ;
complete ( & adapter - > fw_done ) ;
break ;
2017-11-13 15:59:19 -02:00
case GET_VPD_SIZE_RSP :
handle_vpd_size_rsp ( crq , adapter ) ;
break ;
case GET_VPD_RSP :
handle_vpd_rsp ( crq , adapter ) ;
break ;
2019-03-19 10:28:51 -03:00
case QUERY_PHYS_PARMS_RSP :
adapter - > fw_done_rc = handle_query_phys_parms_rsp ( crq , adapter ) ;
complete ( & adapter - > fw_done ) ;
break ;
2015-12-21 11:26:06 -06:00
default :
netdev_err ( netdev , " Got an invalid cmd type 0x%02x \n " ,
gen_crq - > cmd ) ;
}
}
static irqreturn_t ibmvnic_interrupt ( int irq , void * instance )
{
struct ibmvnic_adapter * adapter = instance ;
2017-02-15 12:17:58 -06:00
tasklet_schedule ( & adapter - > tasklet ) ;
return IRQ_HANDLED ;
}
static void ibmvnic_tasklet ( void * data )
{
struct ibmvnic_adapter * adapter = data ;
2015-12-21 11:26:06 -06:00
struct ibmvnic_crq_queue * queue = & adapter - > crq ;
union ibmvnic_crq * crq ;
unsigned long flags ;
bool done = false ;
spin_lock_irqsave ( & queue - > lock , flags ) ;
while ( ! done ) {
/* Pull all the valid messages off the CRQ */
while ( ( crq = ibmvnic_next_crq ( adapter ) ) ! = NULL ) {
ibmvnic_handle_crq ( crq , adapter ) ;
crq - > generic . first = 0 ;
}
2017-04-19 13:44:53 -04:00
/* remain in tasklet until all
* capabilities responses are received
*/
if ( ! adapter - > wait_capability )
done = true ;
2015-12-21 11:26:06 -06:00
}
2017-02-15 12:18:00 -06:00
/* if capabilities CRQ's were sent in this tasklet, the following
* tasklet must wait until all responses are received
*/
if ( atomic_read ( & adapter - > running_cap_crqs ) ! = 0 )
adapter - > wait_capability = true ;
2015-12-21 11:26:06 -06:00
spin_unlock_irqrestore ( & queue - > lock , flags ) ;
}
static int ibmvnic_reenable_crq_queue ( struct ibmvnic_adapter * adapter )
{
struct vio_dev * vdev = adapter - > vdev ;
int rc ;
do {
rc = plpar_hcall_norets ( H_ENABLE_CRQ , vdev - > unit_address ) ;
} while ( rc = = H_IN_PROGRESS | | rc = = H_BUSY | | H_IS_LONG_BUSY ( rc ) ) ;
if ( rc )
dev_err ( & vdev - > dev , " Error enabling adapter (rc=%d) \n " , rc ) ;
return rc ;
}
static int ibmvnic_reset_crq ( struct ibmvnic_adapter * adapter )
{
struct ibmvnic_crq_queue * crq = & adapter - > crq ;
struct device * dev = & adapter - > vdev - > dev ;
struct vio_dev * vdev = adapter - > vdev ;
int rc ;
/* Close the CRQ */
do {
rc = plpar_hcall_norets ( H_FREE_CRQ , vdev - > unit_address ) ;
} while ( rc = = H_BUSY | | H_IS_LONG_BUSY ( rc ) ) ;
/* Clean out the queue */
memset ( crq - > msgs , 0 , PAGE_SIZE ) ;
crq - > cur = 0 ;
2018-05-23 13:37:56 -05:00
crq - > active = false ;
2015-12-21 11:26:06 -06:00
/* And re-open it again */
rc = plpar_hcall_norets ( H_REG_CRQ , vdev - > unit_address ,
crq - > msg_token , PAGE_SIZE ) ;
if ( rc = = H_CLOSED )
/* Adapter is good, but other end is not ready */
dev_warn ( dev , " Partner adapter not ready \n " ) ;
else if ( rc ! = 0 )
dev_warn ( dev , " Couldn't register crq (rc=%d) \n " , rc ) ;
return rc ;
}
2017-03-30 02:48:54 -04:00
static void release_crq_queue ( struct ibmvnic_adapter * adapter )
2015-12-21 11:26:06 -06:00
{
struct ibmvnic_crq_queue * crq = & adapter - > crq ;
struct vio_dev * vdev = adapter - > vdev ;
long rc ;
2017-03-30 02:48:54 -04:00
if ( ! crq - > msgs )
return ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( adapter - > netdev , " Releasing CRQ \n " ) ;
free_irq ( vdev - > irq , adapter ) ;
2017-02-15 12:17:58 -06:00
tasklet_kill ( & adapter - > tasklet ) ;
2015-12-21 11:26:06 -06:00
do {
rc = plpar_hcall_norets ( H_FREE_CRQ , vdev - > unit_address ) ;
} while ( rc = = H_BUSY | | H_IS_LONG_BUSY ( rc ) ) ;
dma_unmap_single ( & vdev - > dev , crq - > msg_token , PAGE_SIZE ,
DMA_BIDIRECTIONAL ) ;
free_page ( ( unsigned long ) crq - > msgs ) ;
2017-03-30 02:48:54 -04:00
crq - > msgs = NULL ;
2018-05-23 13:37:56 -05:00
crq - > active = false ;
2015-12-21 11:26:06 -06:00
}
2017-03-30 02:48:54 -04:00
static int init_crq_queue ( struct ibmvnic_adapter * adapter )
2015-12-21 11:26:06 -06:00
{
struct ibmvnic_crq_queue * crq = & adapter - > crq ;
struct device * dev = & adapter - > vdev - > dev ;
struct vio_dev * vdev = adapter - > vdev ;
int rc , retrc = - ENOMEM ;
2017-03-30 02:48:54 -04:00
if ( crq - > msgs )
return 0 ;
2015-12-21 11:26:06 -06:00
crq - > msgs = ( union ibmvnic_crq * ) get_zeroed_page ( GFP_KERNEL ) ;
/* Should we allocate more than one page? */
if ( ! crq - > msgs )
return - ENOMEM ;
crq - > size = PAGE_SIZE / sizeof ( * crq - > msgs ) ;
crq - > msg_token = dma_map_single ( dev , crq - > msgs , PAGE_SIZE ,
DMA_BIDIRECTIONAL ) ;
if ( dma_mapping_error ( dev , crq - > msg_token ) )
goto map_failed ;
rc = plpar_hcall_norets ( H_REG_CRQ , vdev - > unit_address ,
crq - > msg_token , PAGE_SIZE ) ;
if ( rc = = H_RESOURCE )
/* maybe kexecing and resource is busy. try a reset */
rc = ibmvnic_reset_crq ( adapter ) ;
retrc = rc ;
if ( rc = = H_CLOSED ) {
dev_warn ( dev , " Partner adapter not ready \n " ) ;
} else if ( rc ) {
dev_warn ( dev , " Error %d opening adapter \n " , rc ) ;
goto reg_crq_failed ;
}
retrc = 0 ;
2017-02-15 12:17:58 -06:00
tasklet_init ( & adapter - > tasklet , ( void * ) ibmvnic_tasklet ,
( unsigned long ) adapter ) ;
2015-12-21 11:26:06 -06:00
netdev_dbg ( adapter - > netdev , " registering irq 0x%x \n " , vdev - > irq ) ;
2019-04-25 11:02:33 -03:00
snprintf ( crq - > name , sizeof ( crq - > name ) , " ibmvnic-%x " ,
adapter - > vdev - > unit_address ) ;
rc = request_irq ( vdev - > irq , ibmvnic_interrupt , 0 , crq - > name , adapter ) ;
2015-12-21 11:26:06 -06:00
if ( rc ) {
dev_err ( dev , " Couldn't register irq 0x%x. rc=%d \n " ,
vdev - > irq , rc ) ;
goto req_irq_failed ;
}
rc = vio_enable_interrupts ( vdev ) ;
if ( rc ) {
dev_err ( dev , " Error %d enabling interrupts \n " , rc ) ;
goto req_irq_failed ;
}
crq - > cur = 0 ;
spin_lock_init ( & crq - > lock ) ;
return retrc ;
req_irq_failed :
2017-02-15 12:17:58 -06:00
tasklet_kill ( & adapter - > tasklet ) ;
2015-12-21 11:26:06 -06:00
do {
rc = plpar_hcall_norets ( H_FREE_CRQ , vdev - > unit_address ) ;
} while ( rc = = H_BUSY | | H_IS_LONG_BUSY ( rc ) ) ;
reg_crq_failed :
dma_unmap_single ( dev , crq - > msg_token , PAGE_SIZE , DMA_BIDIRECTIONAL ) ;
map_failed :
free_page ( ( unsigned long ) crq - > msgs ) ;
2017-03-30 02:48:54 -04:00
crq - > msgs = NULL ;
2015-12-21 11:26:06 -06:00
return retrc ;
}
2018-05-23 13:38:00 -05:00
static int ibmvnic_reset_init ( struct ibmvnic_adapter * adapter )
2015-12-21 11:26:06 -06:00
{
2017-03-17 17:13:42 -05:00
struct device * dev = & adapter - > vdev - > dev ;
2016-07-06 15:35:17 -05:00
unsigned long timeout = msecs_to_jiffies ( 30000 ) ;
2018-02-19 13:30:31 -06:00
u64 old_num_rx_queues , old_num_tx_queues ;
2017-03-17 17:13:42 -05:00
int rc ;
2017-05-26 10:30:19 -04:00
adapter - > from_passive_init = false ;
2018-02-19 13:30:31 -06:00
old_num_rx_queues = adapter - > req_rx_queues ;
old_num_tx_queues = adapter - > req_tx_queues ;
ibmvnic: Fix completion structure initialization
Fix device initialization completion handling for vNIC adapters.
Initialize the completion structure on probe and reinitialize when needed.
This also fixes a race condition during kdump where the driver can attempt
to access the completion struct before it is initialized:
Unable to handle kernel paging request for data at address 0x00000000
Faulting instruction address: 0xc0000000081acbe0
Oops: Kernel access of bad area, sig: 11 [#1]
LE SMP NR_CPUS=2048 NUMA pSeries
Modules linked in: ibmvnic(+) ibmveth sunrpc overlay squashfs loop
CPU: 19 PID: 301 Comm: systemd-udevd Not tainted 4.18.0-64.el8.ppc64le #1
NIP: c0000000081acbe0 LR: c0000000081ad964 CTR: c0000000081ad900
REGS: c000000027f3f990 TRAP: 0300 Not tainted (4.18.0-64.el8.ppc64le)
MSR: 800000010280b033 <SF,VEC,VSX,EE,FP,ME,IR,DR,RI,LE,TM[E]> CR: 28228288 XER: 00000006
CFAR: c000000008008934 DAR: 0000000000000000 DSISR: 40000000 IRQMASK: 1
GPR00: c0000000081ad964 c000000027f3fc10 c0000000095b5800 c0000000221b4e58
GPR04: 0000000000000003 0000000000000001 000049a086918581 00000000000000d4
GPR08: 0000000000000007 0000000000000000 ffffffffffffffe8 d0000000014dde28
GPR12: c0000000081ad900 c000000009a00c00 0000000000000001 0000000000000100
GPR16: 0000000000000038 0000000000000007 c0000000095e2230 0000000000000006
GPR20: 0000000000400140 0000000000000001 c00000000910c880 0000000000000000
GPR24: 0000000000000000 0000000000000006 0000000000000000 0000000000000003
GPR28: 0000000000000001 0000000000000001 c0000000221b4e60 c0000000221b4e58
NIP [c0000000081acbe0] __wake_up_locked+0x50/0x100
LR [c0000000081ad964] complete+0x64/0xa0
Call Trace:
[c000000027f3fc10] [c000000027f3fc60] 0xc000000027f3fc60 (unreliable)
[c000000027f3fc60] [c0000000081ad964] complete+0x64/0xa0
[c000000027f3fca0] [d0000000014dad58] ibmvnic_handle_crq+0xce0/0x1160 [ibmvnic]
[c000000027f3fd50] [d0000000014db270] ibmvnic_tasklet+0x98/0x130 [ibmvnic]
[c000000027f3fda0] [c00000000813f334] tasklet_action_common.isra.3+0xc4/0x1a0
[c000000027f3fe00] [c000000008cd13f4] __do_softirq+0x164/0x400
[c000000027f3fef0] [c00000000813ed64] irq_exit+0x184/0x1c0
[c000000027f3ff20] [c0000000080188e8] __do_irq+0xb8/0x210
[c000000027f3ff90] [c00000000802d0a4] call_do_irq+0x14/0x24
[c000000026a5b010] [c000000008018adc] do_IRQ+0x9c/0x130
[c000000026a5b060] [c000000008008ce4] hardware_interrupt_common+0x114/0x120
Signed-off-by: Thomas Falcon <tlfalcon@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-04 18:58:26 -05:00
reinit_completion ( & adapter - > init_done ) ;
2017-06-15 14:48:09 -04:00
adapter - > init_done_rc = 0 ;
2017-03-17 17:13:42 -05:00
ibmvnic_send_crq_init ( adapter ) ;
if ( ! wait_for_completion_timeout ( & adapter - > init_done , timeout ) ) {
dev_err ( dev , " Initialization sequence timed out \n " ) ;
2017-05-26 10:30:19 -04:00
return - 1 ;
}
2017-06-15 14:48:09 -04:00
if ( adapter - > init_done_rc ) {
release_crq_queue ( adapter ) ;
return adapter - > init_done_rc ;
}
2017-05-26 10:30:19 -04:00
if ( adapter - > from_passive_init ) {
adapter - > state = VNIC_OPEN ;
adapter - > from_passive_init = false ;
2017-03-17 17:13:42 -05:00
return - 1 ;
}
2019-09-20 16:11:23 -04:00
if ( test_bit ( 0 , & adapter - > resetting ) & & ! adapter - > wait_for_reset & &
2018-04-06 18:37:06 -05:00
adapter - > reset_reason ! = VNIC_RESET_MOBILITY ) {
2018-02-19 13:30:31 -06:00
if ( adapter - > req_rx_queues ! = old_num_rx_queues | |
adapter - > req_tx_queues ! = old_num_tx_queues ) {
release_sub_crqs ( adapter , 0 ) ;
rc = init_sub_crqs ( adapter ) ;
} else {
rc = reset_sub_crq_queues ( adapter ) ;
}
} else {
2017-05-26 10:31:12 -04:00
rc = init_sub_crqs ( adapter ) ;
2018-02-19 13:30:31 -06:00
}
2017-04-25 15:01:10 -04:00
if ( rc ) {
dev_err ( dev , " Initialization of sub crqs failed \n " ) ;
release_crq_queue ( adapter ) ;
2017-06-28 19:55:54 -05:00
return rc ;
}
rc = init_sub_crq_irqs ( adapter ) ;
if ( rc ) {
dev_err ( dev , " Failed to initialize sub crq irqs \n " ) ;
release_crq_queue ( adapter ) ;
2017-04-25 15:01:10 -04:00
}
return rc ;
2017-03-17 17:13:42 -05:00
}
2018-05-23 13:38:00 -05:00
static int ibmvnic_init ( struct ibmvnic_adapter * adapter )
{
struct device * dev = & adapter - > vdev - > dev ;
unsigned long timeout = msecs_to_jiffies ( 30000 ) ;
int rc ;
adapter - > from_passive_init = false ;
adapter - > init_done_rc = 0 ;
ibmvnic_send_crq_init ( adapter ) ;
if ( ! wait_for_completion_timeout ( & adapter - > init_done , timeout ) ) {
dev_err ( dev , " Initialization sequence timed out \n " ) ;
return - 1 ;
}
if ( adapter - > init_done_rc ) {
release_crq_queue ( adapter ) ;
return adapter - > init_done_rc ;
}
if ( adapter - > from_passive_init ) {
adapter - > state = VNIC_OPEN ;
adapter - > from_passive_init = false ;
return - 1 ;
}
rc = init_sub_crqs ( adapter ) ;
if ( rc ) {
dev_err ( dev , " Initialization of sub crqs failed \n " ) ;
release_crq_queue ( adapter ) ;
return rc ;
}
rc = init_sub_crq_irqs ( adapter ) ;
if ( rc ) {
dev_err ( dev , " Failed to initialize sub crq irqs \n " ) ;
release_crq_queue ( adapter ) ;
}
return rc ;
}
2017-06-12 12:35:04 -05:00
static struct device_attribute dev_attr_failover ;
2017-03-17 17:13:42 -05:00
static int ibmvnic_probe ( struct vio_dev * dev , const struct vio_device_id * id )
{
2015-12-21 11:26:06 -06:00
struct ibmvnic_adapter * adapter ;
struct net_device * netdev ;
unsigned char * mac_addr_p ;
int rc ;
dev_dbg ( & dev - > dev , " entering ibmvnic_probe for UA 0x%x \n " ,
dev - > unit_address ) ;
mac_addr_p = ( unsigned char * ) vio_get_attribute ( dev ,
VETH_MAC_ADDR , NULL ) ;
if ( ! mac_addr_p ) {
dev_err ( & dev - > dev ,
" (%s:%3.3d) ERROR: Can't find MAC_ADDR attribute \n " ,
__FILE__ , __LINE__ ) ;
return 0 ;
}
netdev = alloc_etherdev_mq ( sizeof ( struct ibmvnic_adapter ) ,
2017-12-18 12:52:11 -06:00
IBMVNIC_MAX_QUEUES ) ;
2015-12-21 11:26:06 -06:00
if ( ! netdev )
return - ENOMEM ;
adapter = netdev_priv ( netdev ) ;
2017-05-03 14:04:32 -04:00
adapter - > state = VNIC_PROBING ;
2015-12-21 11:26:06 -06:00
dev_set_drvdata ( & dev - > dev , netdev ) ;
adapter - > vdev = dev ;
adapter - > netdev = netdev ;
ether_addr_copy ( adapter - > mac_addr , mac_addr_p ) ;
ether_addr_copy ( netdev - > dev_addr , adapter - > mac_addr ) ;
netdev - > irq = dev - > irq ;
netdev - > netdev_ops = & ibmvnic_netdev_ops ;
netdev - > ethtool_ops = & ibmvnic_ethtool_ops ;
SET_NETDEV_DEV ( netdev , & dev - > dev ) ;
spin_lock_init ( & adapter - > stats_lock ) ;
2017-05-03 14:04:38 -04:00
INIT_WORK ( & adapter - > ibmvnic_reset , __ibmvnic_reset ) ;
2019-09-20 16:11:23 -04:00
INIT_DELAYED_WORK ( & adapter - > ibmvnic_delayed_reset ,
__ibmvnic_delayed_reset ) ;
2017-05-03 14:04:38 -04:00
INIT_LIST_HEAD ( & adapter - > rwi_list ) ;
2018-12-10 15:22:22 -06:00
spin_lock_init ( & adapter - > rwi_lock ) ;
ibmvnic: Fix completion structure initialization
Fix device initialization completion handling for vNIC adapters.
Initialize the completion structure on probe and reinitialize when needed.
This also fixes a race condition during kdump where the driver can attempt
to access the completion struct before it is initialized:
Unable to handle kernel paging request for data at address 0x00000000
Faulting instruction address: 0xc0000000081acbe0
Oops: Kernel access of bad area, sig: 11 [#1]
LE SMP NR_CPUS=2048 NUMA pSeries
Modules linked in: ibmvnic(+) ibmveth sunrpc overlay squashfs loop
CPU: 19 PID: 301 Comm: systemd-udevd Not tainted 4.18.0-64.el8.ppc64le #1
NIP: c0000000081acbe0 LR: c0000000081ad964 CTR: c0000000081ad900
REGS: c000000027f3f990 TRAP: 0300 Not tainted (4.18.0-64.el8.ppc64le)
MSR: 800000010280b033 <SF,VEC,VSX,EE,FP,ME,IR,DR,RI,LE,TM[E]> CR: 28228288 XER: 00000006
CFAR: c000000008008934 DAR: 0000000000000000 DSISR: 40000000 IRQMASK: 1
GPR00: c0000000081ad964 c000000027f3fc10 c0000000095b5800 c0000000221b4e58
GPR04: 0000000000000003 0000000000000001 000049a086918581 00000000000000d4
GPR08: 0000000000000007 0000000000000000 ffffffffffffffe8 d0000000014dde28
GPR12: c0000000081ad900 c000000009a00c00 0000000000000001 0000000000000100
GPR16: 0000000000000038 0000000000000007 c0000000095e2230 0000000000000006
GPR20: 0000000000400140 0000000000000001 c00000000910c880 0000000000000000
GPR24: 0000000000000000 0000000000000006 0000000000000000 0000000000000003
GPR28: 0000000000000001 0000000000000001 c0000000221b4e60 c0000000221b4e58
NIP [c0000000081acbe0] __wake_up_locked+0x50/0x100
LR [c0000000081ad964] complete+0x64/0xa0
Call Trace:
[c000000027f3fc10] [c000000027f3fc60] 0xc000000027f3fc60 (unreliable)
[c000000027f3fc60] [c0000000081ad964] complete+0x64/0xa0
[c000000027f3fca0] [d0000000014dad58] ibmvnic_handle_crq+0xce0/0x1160 [ibmvnic]
[c000000027f3fd50] [d0000000014db270] ibmvnic_tasklet+0x98/0x130 [ibmvnic]
[c000000027f3fda0] [c00000000813f334] tasklet_action_common.isra.3+0xc4/0x1a0
[c000000027f3fe00] [c000000008cd13f4] __do_softirq+0x164/0x400
[c000000027f3fef0] [c00000000813ed64] irq_exit+0x184/0x1c0
[c000000027f3ff20] [c0000000080188e8] __do_irq+0xb8/0x210
[c000000027f3ff90] [c00000000802d0a4] call_do_irq+0x14/0x24
[c000000026a5b010] [c000000008018adc] do_IRQ+0x9c/0x130
[c000000026a5b060] [c000000008008ce4] hardware_interrupt_common+0x114/0x120
Signed-off-by: Thomas Falcon <tlfalcon@linux.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-04 18:58:26 -05:00
init_completion ( & adapter - > init_done ) ;
2019-09-20 16:11:23 -04:00
clear_bit ( 0 , & adapter - > resetting ) ;
2017-05-03 14:04:38 -04:00
2017-06-15 14:48:09 -04:00
do {
2018-04-06 18:37:06 -05:00
rc = init_crq_queue ( adapter ) ;
if ( rc ) {
dev_err ( & dev - > dev , " Couldn't initialize crq. rc=%d \n " ,
rc ) ;
goto ibmvnic_init_fail ;
}
2017-06-15 14:48:09 -04:00
rc = ibmvnic_init ( adapter ) ;
2017-08-08 14:28:45 -05:00
if ( rc & & rc ! = EAGAIN )
goto ibmvnic_init_fail ;
2017-06-15 14:48:09 -04:00
} while ( rc = = EAGAIN ) ;
2015-12-21 11:26:06 -06:00
2018-05-16 15:49:05 -05:00
rc = init_stats_buffers ( adapter ) ;
if ( rc )
goto ibmvnic_init_fail ;
rc = init_stats_token ( adapter ) ;
if ( rc )
goto ibmvnic_stats_fail ;
2017-02-14 10:22:59 -06:00
netdev - > mtu = adapter - > req_mtu - ETH_HLEN ;
2017-10-26 16:23:25 -05:00
netdev - > min_mtu = adapter - > min_mtu - ETH_HLEN ;
netdev - > max_mtu = adapter - > max_mtu - ETH_HLEN ;
2015-12-21 11:26:06 -06:00
2017-06-12 12:35:04 -05:00
rc = device_create_file ( & dev - > dev , & dev_attr_failover ) ;
2017-08-08 14:28:45 -05:00
if ( rc )
2018-05-16 15:49:05 -05:00
goto ibmvnic_dev_file_err ;
2017-06-12 12:35:04 -05:00
2017-09-28 13:53:18 -07:00
netif_carrier_off ( netdev ) ;
2015-12-21 11:26:06 -06:00
rc = register_netdev ( netdev ) ;
if ( rc ) {
dev_err ( & dev - > dev , " failed to register netdev rc=%d \n " , rc ) ;
2017-08-08 14:28:45 -05:00
goto ibmvnic_register_fail ;
2015-12-21 11:26:06 -06:00
}
dev_info ( & dev - > dev , " ibmvnic registered \n " ) ;
2017-05-03 14:04:32 -04:00
adapter - > state = VNIC_PROBED ;
2017-10-26 16:23:25 -05:00
adapter - > wait_for_reset = false ;
2015-12-21 11:26:06 -06:00
return 0 ;
2017-08-08 14:28:45 -05:00
ibmvnic_register_fail :
device_remove_file ( & dev - > dev , & dev_attr_failover ) ;
2018-05-16 15:49:05 -05:00
ibmvnic_dev_file_err :
release_stats_token ( adapter ) ;
ibmvnic_stats_fail :
release_stats_buffers ( adapter ) ;
2017-08-08 14:28:45 -05:00
ibmvnic_init_fail :
2018-02-19 13:30:31 -06:00
release_sub_crqs ( adapter , 1 ) ;
2017-08-08 14:28:45 -05:00
release_crq_queue ( adapter ) ;
free_netdev ( netdev ) ;
return rc ;
2015-12-21 11:26:06 -06:00
}
static int ibmvnic_remove ( struct vio_dev * dev )
{
struct net_device * netdev = dev_get_drvdata ( & dev - > dev ) ;
2017-04-19 13:45:04 -04:00
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
2015-12-21 11:26:06 -06:00
2017-05-03 14:04:32 -04:00
adapter - > state = VNIC_REMOVING ;
2018-11-19 15:59:22 -06:00
rtnl_lock ( ) ;
unregister_netdevice ( netdev ) ;
2017-04-19 13:45:04 -04:00
release_resources ( adapter ) ;
2018-02-19 13:30:31 -06:00
release_sub_crqs ( adapter , 1 ) ;
2017-04-19 13:45:04 -04:00
release_crq_queue ( adapter ) ;
2018-02-26 18:10:56 -06:00
release_stats_token ( adapter ) ;
release_stats_buffers ( adapter ) ;
2017-05-03 14:04:32 -04:00
adapter - > state = VNIC_REMOVED ;
2018-11-19 15:59:22 -06:00
rtnl_unlock ( ) ;
2017-06-12 12:35:04 -05:00
device_remove_file ( & dev - > dev , & dev_attr_failover ) ;
2015-12-21 11:26:06 -06:00
free_netdev ( netdev ) ;
dev_set_drvdata ( & dev - > dev , NULL ) ;
return 0 ;
}
2017-06-12 12:35:04 -05:00
static ssize_t failover_store ( struct device * dev , struct device_attribute * attr ,
const char * buf , size_t count )
{
struct net_device * netdev = dev_get_drvdata ( dev ) ;
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
unsigned long retbuf [ PLPAR_HCALL_BUFSIZE ] ;
__be64 session_token ;
long rc ;
if ( ! sysfs_streq ( buf , " 1 " ) )
return - EINVAL ;
rc = plpar_hcall ( H_VIOCTL , retbuf , adapter - > vdev - > unit_address ,
H_GET_SESSION_TOKEN , 0 , 0 , 0 ) ;
if ( rc ) {
netdev_err ( netdev , " Couldn't retrieve session token, rc %ld \n " ,
rc ) ;
return - EINVAL ;
}
session_token = ( __be64 ) retbuf [ 0 ] ;
netdev_dbg ( netdev , " Initiating client failover, session id %llx \n " ,
be64_to_cpu ( session_token ) ) ;
rc = plpar_hcall_norets ( H_VIOCTL , adapter - > vdev - > unit_address ,
H_SESSION_ERR_DETECTED , session_token , 0 , 0 ) ;
if ( rc ) {
netdev_err ( netdev , " Client initiated failover failed, rc %ld \n " ,
rc ) ;
return - EINVAL ;
}
return count ;
}
2017-12-19 10:15:09 -08:00
static DEVICE_ATTR_WO ( failover ) ;
2017-06-12 12:35:04 -05:00
2015-12-21 11:26:06 -06:00
static unsigned long ibmvnic_get_desired_dma ( struct vio_dev * vdev )
{
struct net_device * netdev = dev_get_drvdata ( & vdev - > dev ) ;
struct ibmvnic_adapter * adapter ;
struct iommu_table * tbl ;
unsigned long ret = 0 ;
int i ;
tbl = get_iommu_table_base ( & vdev - > dev ) ;
/* netdev inits at probe time along with the structures we need below*/
if ( ! netdev )
return IOMMU_PAGE_ALIGN ( IBMVNIC_IO_ENTITLEMENT_DEFAULT , tbl ) ;
adapter = netdev_priv ( netdev ) ;
ret + = PAGE_SIZE ; /* the crq message queue */
ret + = IOMMU_PAGE_ALIGN ( sizeof ( struct ibmvnic_statistics ) , tbl ) ;
for ( i = 0 ; i < adapter - > req_tx_queues + adapter - > req_rx_queues ; i + + )
ret + = 4 * PAGE_SIZE ; /* the scrq message queue */
for ( i = 0 ; i < be32_to_cpu ( adapter - > login_rsp_buf - > num_rxadd_subcrqs ) ;
i + + )
ret + = adapter - > rx_pool [ i ] . size *
IOMMU_PAGE_ALIGN ( adapter - > rx_pool [ i ] . buff_size , tbl ) ;
return ret ;
}
static int ibmvnic_resume ( struct device * dev )
{
struct net_device * netdev = dev_get_drvdata ( dev ) ;
struct ibmvnic_adapter * adapter = netdev_priv ( netdev ) ;
2017-06-19 11:27:53 -05:00
if ( adapter - > state ! = VNIC_OPEN )
return 0 ;
2017-07-24 13:26:06 -05:00
tasklet_schedule ( & adapter - > tasklet ) ;
2015-12-21 11:26:06 -06:00
return 0 ;
}
2017-08-17 18:52:54 +05:30
static const struct vio_device_id ibmvnic_device_table [ ] = {
2015-12-21 11:26:06 -06:00
{ " network " , " IBM,vnic " } ,
{ " " , " " }
} ;
MODULE_DEVICE_TABLE ( vio , ibmvnic_device_table ) ;
static const struct dev_pm_ops ibmvnic_pm_ops = {
. resume = ibmvnic_resume
} ;
static struct vio_driver ibmvnic_driver = {
. id_table = ibmvnic_device_table ,
. probe = ibmvnic_probe ,
. remove = ibmvnic_remove ,
. get_desired_dma = ibmvnic_get_desired_dma ,
. name = ibmvnic_driver_name ,
. pm = & ibmvnic_pm_ops ,
} ;
/* module functions */
static int __init ibmvnic_module_init ( void )
{
pr_info ( " %s: %s %s \n " , ibmvnic_driver_name , ibmvnic_driver_string ,
IBMVNIC_DRIVER_VERSION ) ;
return vio_register_driver ( & ibmvnic_driver ) ;
}
static void __exit ibmvnic_module_exit ( void )
{
vio_unregister_driver ( & ibmvnic_driver ) ;
}
module_init ( ibmvnic_module_init ) ;
module_exit ( ibmvnic_module_exit ) ;