2016-10-01 21:59:56 +03:00
/* QLogic qedr NIC Driver
2017-01-01 13:57:00 +02:00
* Copyright ( c ) 2015 - 2017 QLogic Corporation
2016-10-01 21:59:56 +03:00
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
# include <linux/pci.h>
# include <linux/netdevice.h>
# include <linux/list.h>
# include <linux/mutex.h>
2017-06-20 16:00:03 +03:00
# include <linux/qed/qede_rdma.h>
2016-10-01 21:59:56 +03:00
# include "qede.h"
static struct qedr_driver * qedr_drv ;
static LIST_HEAD ( qedr_dev_list ) ;
static DEFINE_MUTEX ( qedr_dev_list_lock ) ;
2017-06-20 16:00:04 +03:00
bool qede_rdma_supported ( struct qede_dev * dev )
2016-10-01 21:59:56 +03:00
{
return dev - > dev_info . common . rdma_supported ;
}
2017-06-20 16:00:04 +03:00
static void _qede_rdma_dev_add ( struct qede_dev * edev )
2016-10-01 21:59:56 +03:00
{
if ( ! qedr_drv )
return ;
2019-01-28 19:27:56 +02:00
/* Leftovers from previous error recovery */
edev - > rdma_info . exp_recovery = false ;
2016-10-01 21:59:56 +03:00
edev - > rdma_info . qedr_dev = qedr_drv - > add ( edev - > cdev , edev - > pdev ,
edev - > ndev ) ;
}
2017-06-20 16:00:04 +03:00
static int qede_rdma_create_wq ( struct qede_dev * edev )
2016-10-01 21:59:56 +03:00
{
2017-06-20 16:00:04 +03:00
INIT_LIST_HEAD ( & edev - > rdma_info . rdma_event_list ) ;
2020-02-17 13:37:18 +02:00
kref_init ( & edev - > rdma_info . refcnt ) ;
init_completion ( & edev - > rdma_info . event_comp ) ;
2017-06-20 16:00:04 +03:00
edev - > rdma_info . rdma_wq = create_singlethread_workqueue ( " rdma_wq " ) ;
if ( ! edev - > rdma_info . rdma_wq ) {
2016-10-01 21:59:56 +03:00
DP_NOTICE ( edev , " qedr: Could not create workqueue \n " ) ;
return - ENOMEM ;
}
return 0 ;
}
2017-06-20 16:00:04 +03:00
static void qede_rdma_cleanup_event ( struct qede_dev * edev )
2016-10-01 21:59:56 +03:00
{
2017-06-20 16:00:04 +03:00
struct list_head * head = & edev - > rdma_info . rdma_event_list ;
struct qede_rdma_event_work * event_node ;
2016-10-01 21:59:56 +03:00
2017-06-20 16:00:04 +03:00
flush_workqueue ( edev - > rdma_info . rdma_wq ) ;
2016-10-01 21:59:56 +03:00
while ( ! list_empty ( head ) ) {
2017-06-20 16:00:04 +03:00
event_node = list_entry ( head - > next , struct qede_rdma_event_work ,
2016-10-01 21:59:56 +03:00
list ) ;
cancel_work_sync ( & event_node - > work ) ;
list_del ( & event_node - > list ) ;
kfree ( event_node ) ;
}
}
2020-02-17 13:37:18 +02:00
static void qede_rdma_complete_event ( struct kref * ref )
{
struct qede_rdma_dev * rdma_dev =
container_of ( ref , struct qede_rdma_dev , refcnt ) ;
/* no more events will be added after this */
complete ( & rdma_dev - > event_comp ) ;
}
2017-06-20 16:00:04 +03:00
static void qede_rdma_destroy_wq ( struct qede_dev * edev )
2016-10-01 21:59:56 +03:00
{
2020-02-17 13:37:18 +02:00
/* Avoid race with add_event flow, make sure it finishes before
* we start accessing the list and cleaning up the work
*/
kref_put ( & edev - > rdma_info . refcnt , qede_rdma_complete_event ) ;
wait_for_completion ( & edev - > rdma_info . event_comp ) ;
2017-06-20 16:00:04 +03:00
qede_rdma_cleanup_event ( edev ) ;
destroy_workqueue ( edev - > rdma_info . rdma_wq ) ;
2016-10-01 21:59:56 +03:00
}
2019-01-28 19:27:56 +02:00
int qede_rdma_dev_add ( struct qede_dev * edev , bool recovery )
2016-10-01 21:59:56 +03:00
{
2019-01-28 19:27:56 +02:00
int rc ;
2016-10-01 21:59:56 +03:00
2019-01-28 19:27:56 +02:00
if ( ! qede_rdma_supported ( edev ) )
return 0 ;
2016-10-01 21:59:56 +03:00
2019-01-28 19:27:56 +02:00
/* Cannot start qedr while recovering since it wasn't fully stopped */
if ( recovery )
return 0 ;
rc = qede_rdma_create_wq ( edev ) ;
if ( rc )
return rc ;
INIT_LIST_HEAD ( & edev - > rdma_info . entry ) ;
mutex_lock ( & qedr_dev_list_lock ) ;
list_add_tail ( & edev - > rdma_info . entry , & qedr_dev_list ) ;
_qede_rdma_dev_add ( edev ) ;
mutex_unlock ( & qedr_dev_list_lock ) ;
2016-10-01 21:59:56 +03:00
return rc ;
}
2017-06-20 16:00:04 +03:00
static void _qede_rdma_dev_remove ( struct qede_dev * edev )
2016-10-01 21:59:56 +03:00
{
if ( qedr_drv & & qedr_drv - > remove & & edev - > rdma_info . qedr_dev )
qedr_drv - > remove ( edev - > rdma_info . qedr_dev ) ;
}
2019-01-28 19:27:56 +02:00
void qede_rdma_dev_remove ( struct qede_dev * edev , bool recovery )
2016-10-01 21:59:56 +03:00
{
2017-06-20 16:00:04 +03:00
if ( ! qede_rdma_supported ( edev ) )
2016-10-01 21:59:56 +03:00
return ;
2019-01-28 19:27:56 +02:00
/* Cannot remove qedr while recovering since it wasn't fully stopped */
if ( ! recovery ) {
qede_rdma_destroy_wq ( edev ) ;
mutex_lock ( & qedr_dev_list_lock ) ;
if ( ! edev - > rdma_info . exp_recovery )
_qede_rdma_dev_remove ( edev ) ;
edev - > rdma_info . qedr_dev = NULL ;
list_del ( & edev - > rdma_info . entry ) ;
mutex_unlock ( & qedr_dev_list_lock ) ;
} else {
if ( ! edev - > rdma_info . exp_recovery ) {
mutex_lock ( & qedr_dev_list_lock ) ;
_qede_rdma_dev_remove ( edev ) ;
mutex_unlock ( & qedr_dev_list_lock ) ;
}
edev - > rdma_info . exp_recovery = true ;
}
2016-10-01 21:59:56 +03:00
}
2017-06-20 16:00:04 +03:00
static void _qede_rdma_dev_open ( struct qede_dev * edev )
2016-10-01 21:59:56 +03:00
{
if ( qedr_drv & & edev - > rdma_info . qedr_dev & & qedr_drv - > notify )
qedr_drv - > notify ( edev - > rdma_info . qedr_dev , QEDE_UP ) ;
}
2017-06-20 16:00:04 +03:00
static void qede_rdma_dev_open ( struct qede_dev * edev )
2016-10-01 21:59:56 +03:00
{
2017-06-20 16:00:04 +03:00
if ( ! qede_rdma_supported ( edev ) )
2016-10-01 21:59:56 +03:00
return ;
mutex_lock ( & qedr_dev_list_lock ) ;
2017-06-20 16:00:04 +03:00
_qede_rdma_dev_open ( edev ) ;
2016-10-01 21:59:56 +03:00
mutex_unlock ( & qedr_dev_list_lock ) ;
}
2017-06-20 16:00:04 +03:00
static void _qede_rdma_dev_close ( struct qede_dev * edev )
2016-10-01 21:59:56 +03:00
{
if ( qedr_drv & & edev - > rdma_info . qedr_dev & & qedr_drv - > notify )
qedr_drv - > notify ( edev - > rdma_info . qedr_dev , QEDE_DOWN ) ;
}
2017-06-20 16:00:04 +03:00
static void qede_rdma_dev_close ( struct qede_dev * edev )
2016-10-01 21:59:56 +03:00
{
2017-06-20 16:00:04 +03:00
if ( ! qede_rdma_supported ( edev ) )
2016-10-01 21:59:56 +03:00
return ;
mutex_lock ( & qedr_dev_list_lock ) ;
2017-06-20 16:00:04 +03:00
_qede_rdma_dev_close ( edev ) ;
2016-10-01 21:59:56 +03:00
mutex_unlock ( & qedr_dev_list_lock ) ;
}
2017-06-20 16:00:04 +03:00
static void qede_rdma_dev_shutdown ( struct qede_dev * edev )
2016-10-01 21:59:56 +03:00
{
2017-06-20 16:00:04 +03:00
if ( ! qede_rdma_supported ( edev ) )
2016-10-01 21:59:56 +03:00
return ;
mutex_lock ( & qedr_dev_list_lock ) ;
if ( qedr_drv & & edev - > rdma_info . qedr_dev & & qedr_drv - > notify )
qedr_drv - > notify ( edev - > rdma_info . qedr_dev , QEDE_CLOSE ) ;
mutex_unlock ( & qedr_dev_list_lock ) ;
}
2017-06-20 16:00:04 +03:00
int qede_rdma_register_driver ( struct qedr_driver * drv )
2016-10-01 21:59:56 +03:00
{
struct qede_dev * edev ;
u8 qedr_counter = 0 ;
mutex_lock ( & qedr_dev_list_lock ) ;
if ( qedr_drv ) {
mutex_unlock ( & qedr_dev_list_lock ) ;
return - EINVAL ;
}
qedr_drv = drv ;
list_for_each_entry ( edev , & qedr_dev_list , rdma_info . entry ) {
struct net_device * ndev ;
qedr_counter + + ;
2017-06-20 16:00:04 +03:00
_qede_rdma_dev_add ( edev ) ;
2016-10-01 21:59:56 +03:00
ndev = edev - > ndev ;
if ( netif_running ( ndev ) & & netif_oper_up ( ndev ) )
2017-06-20 16:00:04 +03:00
_qede_rdma_dev_open ( edev ) ;
2016-10-01 21:59:56 +03:00
}
mutex_unlock ( & qedr_dev_list_lock ) ;
2017-06-20 16:00:04 +03:00
pr_notice ( " qedr: discovered and registered %d RDMA funcs \n " ,
2016-11-23 08:03:04 +00:00
qedr_counter ) ;
2016-10-01 21:59:56 +03:00
return 0 ;
}
2017-06-20 16:00:04 +03:00
EXPORT_SYMBOL ( qede_rdma_register_driver ) ;
2016-10-01 21:59:56 +03:00
2017-06-20 16:00:04 +03:00
void qede_rdma_unregister_driver ( struct qedr_driver * drv )
2016-10-01 21:59:56 +03:00
{
struct qede_dev * edev ;
mutex_lock ( & qedr_dev_list_lock ) ;
list_for_each_entry ( edev , & qedr_dev_list , rdma_info . entry ) {
2019-01-28 19:27:56 +02:00
/* If device has experienced recovery it was already removed */
if ( edev - > rdma_info . qedr_dev & & ! edev - > rdma_info . exp_recovery )
2017-06-20 16:00:04 +03:00
_qede_rdma_dev_remove ( edev ) ;
2016-10-01 21:59:56 +03:00
}
qedr_drv = NULL ;
mutex_unlock ( & qedr_dev_list_lock ) ;
}
2017-06-20 16:00:04 +03:00
EXPORT_SYMBOL ( qede_rdma_unregister_driver ) ;
2016-10-01 21:59:56 +03:00
2017-06-20 16:00:04 +03:00
static void qede_rdma_changeaddr ( struct qede_dev * edev )
2016-10-01 21:59:56 +03:00
{
2017-06-20 16:00:04 +03:00
if ( ! qede_rdma_supported ( edev ) )
2016-10-01 21:59:56 +03:00
return ;
if ( qedr_drv & & edev - > rdma_info . qedr_dev & & qedr_drv - > notify )
qedr_drv - > notify ( edev - > rdma_info . qedr_dev , QEDE_CHANGE_ADDR ) ;
}
2017-06-20 16:00:04 +03:00
static struct qede_rdma_event_work *
qede_rdma_get_free_event_node ( struct qede_dev * edev )
2016-10-01 21:59:56 +03:00
{
2017-06-20 16:00:04 +03:00
struct qede_rdma_event_work * event_node = NULL ;
2016-10-01 21:59:56 +03:00
struct list_head * list_node = NULL ;
bool found = false ;
2017-06-20 16:00:04 +03:00
list_for_each ( list_node , & edev - > rdma_info . rdma_event_list ) {
event_node = list_entry ( list_node , struct qede_rdma_event_work ,
2016-10-01 21:59:56 +03:00
list ) ;
if ( ! work_pending ( & event_node - > work ) ) {
found = true ;
break ;
}
}
if ( ! found ) {
2018-05-08 21:29:19 +03:00
event_node = kzalloc ( sizeof ( * event_node ) , GFP_ATOMIC ) ;
2016-10-01 21:59:56 +03:00
if ( ! event_node ) {
DP_NOTICE ( edev ,
2017-06-20 16:00:04 +03:00
" qedr: Could not allocate memory for rdma work \n " ) ;
2016-10-01 21:59:56 +03:00
return NULL ;
}
list_add_tail ( & event_node - > list ,
2017-06-20 16:00:04 +03:00
& edev - > rdma_info . rdma_event_list ) ;
2016-10-01 21:59:56 +03:00
}
return event_node ;
}
2017-06-20 16:00:04 +03:00
static void qede_rdma_handle_event ( struct work_struct * work )
2016-10-01 21:59:56 +03:00
{
2017-06-20 16:00:04 +03:00
struct qede_rdma_event_work * event_node ;
enum qede_rdma_event event ;
2016-10-01 21:59:56 +03:00
struct qede_dev * edev ;
2017-06-20 16:00:04 +03:00
event_node = container_of ( work , struct qede_rdma_event_work , work ) ;
2016-10-01 21:59:56 +03:00
event = event_node - > event ;
edev = event_node - > ptr ;
switch ( event ) {
case QEDE_UP :
2017-06-20 16:00:04 +03:00
qede_rdma_dev_open ( edev ) ;
2016-10-01 21:59:56 +03:00
break ;
case QEDE_DOWN :
2017-06-20 16:00:04 +03:00
qede_rdma_dev_close ( edev ) ;
2016-10-01 21:59:56 +03:00
break ;
case QEDE_CLOSE :
2017-06-20 16:00:04 +03:00
qede_rdma_dev_shutdown ( edev ) ;
2016-10-01 21:59:56 +03:00
break ;
case QEDE_CHANGE_ADDR :
2017-06-20 16:00:04 +03:00
qede_rdma_changeaddr ( edev ) ;
2016-10-01 21:59:56 +03:00
break ;
default :
2017-06-20 16:00:04 +03:00
DP_NOTICE ( edev , " Invalid rdma event %d " , event ) ;
2016-10-01 21:59:56 +03:00
}
}
2017-06-20 16:00:04 +03:00
static void qede_rdma_add_event ( struct qede_dev * edev ,
enum qede_rdma_event event )
2016-10-01 21:59:56 +03:00
{
2017-06-20 16:00:04 +03:00
struct qede_rdma_event_work * event_node ;
2016-10-01 21:59:56 +03:00
2019-01-28 19:27:56 +02:00
/* If a recovery was experienced avoid adding the event */
if ( edev - > rdma_info . exp_recovery )
return ;
2016-10-01 21:59:56 +03:00
if ( ! edev - > rdma_info . qedr_dev )
return ;
2020-02-17 13:37:18 +02:00
/* We don't want the cleanup flow to start while we're allocating and
* scheduling the work
*/
if ( ! kref_get_unless_zero ( & edev - > rdma_info . refcnt ) )
return ; /* already being destroyed */
2017-06-20 16:00:04 +03:00
event_node = qede_rdma_get_free_event_node ( edev ) ;
2016-10-01 21:59:56 +03:00
if ( ! event_node )
2020-02-17 13:37:18 +02:00
goto out ;
2016-10-01 21:59:56 +03:00
event_node - > event = event ;
event_node - > ptr = edev ;
2017-06-20 16:00:04 +03:00
INIT_WORK ( & event_node - > work , qede_rdma_handle_event ) ;
queue_work ( edev - > rdma_info . rdma_wq , & event_node - > work ) ;
2020-02-17 13:37:18 +02:00
out :
kref_put ( & edev - > rdma_info . refcnt , qede_rdma_complete_event ) ;
2016-10-01 21:59:56 +03:00
}
2017-06-20 16:00:04 +03:00
void qede_rdma_dev_event_open ( struct qede_dev * edev )
2016-10-01 21:59:56 +03:00
{
2017-06-20 16:00:04 +03:00
qede_rdma_add_event ( edev , QEDE_UP ) ;
2016-10-01 21:59:56 +03:00
}
2017-06-20 16:00:04 +03:00
void qede_rdma_dev_event_close ( struct qede_dev * edev )
2016-10-01 21:59:56 +03:00
{
2017-06-20 16:00:04 +03:00
qede_rdma_add_event ( edev , QEDE_DOWN ) ;
2016-10-01 21:59:56 +03:00
}
2017-06-20 16:00:04 +03:00
void qede_rdma_event_changeaddr ( struct qede_dev * edev )
2016-10-01 21:59:56 +03:00
{
2017-06-20 16:00:04 +03:00
qede_rdma_add_event ( edev , QEDE_CHANGE_ADDR ) ;
2016-10-01 21:59:56 +03:00
}