2005-11-03 01:07:13 +03:00
/*
* Copyright ( c ) 2005 Cisco Systems . All rights reserved .
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
2012-01-14 16:39:44 +04:00
# define pr_fmt(fmt) PFX fmt
2005-11-03 01:07:13 +03:00
# include <linux/module.h>
# include <linux/init.h>
# include <linux/slab.h>
# include <linux/err.h>
# include <linux/string.h>
# include <linux/parser.h>
# include <linux/random.h>
2006-01-08 12:02:05 +03:00
# include <linux/jiffies.h>
2005-11-03 01:07:13 +03:00
2011-07-27 03:09:06 +04:00
# include <linux/atomic.h>
2005-11-03 01:07:13 +03:00
# include <scsi/scsi.h>
# include <scsi/scsi_device.h>
# include <scsi/scsi_dbg.h>
2013-11-07 14:37:37 +04:00
# include <scsi/scsi_tcq.h>
2005-11-03 01:07:13 +03:00
# include <scsi/srp.h>
2007-06-27 11:33:12 +04:00
# include <scsi/scsi_transport_srp.h>
2005-11-03 01:07:13 +03:00
# include "ib_srp.h"
# define DRV_NAME "ib_srp"
# define PFX DRV_NAME ": "
2013-06-28 16:59:08 +04:00
# define DRV_VERSION "1.0"
# define DRV_RELDATE "July 1, 2013"
2005-11-03 01:07:13 +03:00
MODULE_AUTHOR ( " Roland Dreier " ) ;
MODULE_DESCRIPTION ( " InfiniBand SCSI RDMA Protocol initiator "
" v " DRV_VERSION " ( " DRV_RELDATE " ) " ) ;
MODULE_LICENSE ( " Dual BSD/GPL " ) ;
2011-01-15 02:23:24 +03:00
static unsigned int srp_sg_tablesize ;
static unsigned int cmd_sg_entries ;
2011-01-16 21:57:10 +03:00
static unsigned int indirect_sg_entries ;
static bool allow_ext_sg ;
2011-01-15 02:23:24 +03:00
static int topspin_workarounds = 1 ;
2006-06-18 07:37:32 +04:00
2011-01-15 02:23:24 +03:00
module_param ( srp_sg_tablesize , uint , 0444 ) ;
MODULE_PARM_DESC ( srp_sg_tablesize , " Deprecated name for cmd_sg_entries " ) ;
2006-06-18 07:37:32 +04:00
2011-01-15 02:23:24 +03:00
module_param ( cmd_sg_entries , uint , 0444 ) ;
MODULE_PARM_DESC ( cmd_sg_entries ,
" Default number of gather/scatter entries in the SRP command (default is 12, max 255) " ) ;
2005-11-03 01:07:13 +03:00
2011-01-16 21:57:10 +03:00
module_param ( indirect_sg_entries , uint , 0444 ) ;
MODULE_PARM_DESC ( indirect_sg_entries ,
" Default max number of gather/scatter entries (default is 12, max is " __stringify ( SCSI_MAX_SG_CHAIN_SEGMENTS ) " ) " ) ;
module_param ( allow_ext_sg , bool , 0444 ) ;
MODULE_PARM_DESC ( allow_ext_sg ,
" Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false) " ) ;
2005-11-03 01:07:13 +03:00
module_param ( topspin_workarounds , int , 0444 ) ;
MODULE_PARM_DESC ( topspin_workarounds ,
" Enable workarounds for Topspin/Cisco SRP target bugs if != 0 " ) ;
2013-10-26 16:34:27 +04:00
static struct kernel_param_ops srp_tmo_ops ;
2013-10-26 16:37:17 +04:00
static int srp_reconnect_delay = 10 ;
module_param_cb ( reconnect_delay , & srp_tmo_ops , & srp_reconnect_delay ,
S_IRUGO | S_IWUSR ) ;
MODULE_PARM_DESC ( reconnect_delay , " Time between successive reconnect attempts " ) ;
2013-10-26 16:34:27 +04:00
static int srp_fast_io_fail_tmo = 15 ;
module_param_cb ( fast_io_fail_tmo , & srp_tmo_ops , & srp_fast_io_fail_tmo ,
S_IRUGO | S_IWUSR ) ;
MODULE_PARM_DESC ( fast_io_fail_tmo ,
" Number of seconds between the observation of a transport "
" layer error and failing all I/O. \" off \" means that this "
" functionality is disabled. " ) ;
2013-10-26 16:37:17 +04:00
static int srp_dev_loss_tmo = 600 ;
2013-10-26 16:34:27 +04:00
module_param_cb ( dev_loss_tmo , & srp_tmo_ops , & srp_dev_loss_tmo ,
S_IRUGO | S_IWUSR ) ;
MODULE_PARM_DESC ( dev_loss_tmo ,
" Maximum number of seconds that the SRP transport should "
" insulate transport layer errors. After this time has been "
" exceeded the SCSI host is removed. Should be "
" between 1 and " __stringify ( SCSI_DEVICE_BLOCK_MAX_TIMEOUT )
" if fast_io_fail_tmo has not been set. \" off \" means that "
" this functionality is disabled. " ) ;
2005-11-03 01:07:13 +03:00
static void srp_add_one ( struct ib_device * device ) ;
static void srp_remove_one ( struct ib_device * device ) ;
2010-02-02 22:23:54 +03:00
static void srp_recv_completion ( struct ib_cq * cq , void * target_ptr ) ;
static void srp_send_completion ( struct ib_cq * cq , void * target_ptr ) ;
2005-11-03 01:07:13 +03:00
static int srp_cm_handler ( struct ib_cm_id * cm_id , struct ib_cm_event * event ) ;
2007-06-27 11:33:12 +04:00
static struct scsi_transport_template * ib_srp_transport_template ;
2005-11-03 01:07:13 +03:00
static struct ib_client srp_client = {
. name = " srp " ,
. add = srp_add_one ,
. remove = srp_remove_one
} ;
2006-08-22 03:40:12 +04:00
static struct ib_sa_client srp_sa_client ;
2013-10-26 16:34:27 +04:00
static int srp_tmo_get ( char * buffer , const struct kernel_param * kp )
{
int tmo = * ( int * ) kp - > arg ;
if ( tmo > = 0 )
return sprintf ( buffer , " %d " , tmo ) ;
else
return sprintf ( buffer , " off " ) ;
}
static int srp_tmo_set ( const char * val , const struct kernel_param * kp )
{
int tmo , res ;
if ( strncmp ( val , " off " , 3 ) ! = 0 ) {
res = kstrtoint ( val , 0 , & tmo ) ;
if ( res )
goto out ;
} else {
tmo = - 1 ;
}
2013-10-26 16:37:17 +04:00
if ( kp - > arg = = & srp_reconnect_delay )
res = srp_tmo_valid ( tmo , srp_fast_io_fail_tmo ,
srp_dev_loss_tmo ) ;
else if ( kp - > arg = = & srp_fast_io_fail_tmo )
res = srp_tmo_valid ( srp_reconnect_delay , tmo , srp_dev_loss_tmo ) ;
2013-10-26 16:34:27 +04:00
else
2013-10-26 16:37:17 +04:00
res = srp_tmo_valid ( srp_reconnect_delay , srp_fast_io_fail_tmo ,
tmo ) ;
2013-10-26 16:34:27 +04:00
if ( res )
goto out ;
* ( int * ) kp - > arg = tmo ;
out :
return res ;
}
static struct kernel_param_ops srp_tmo_ops = {
. get = srp_tmo_get ,
. set = srp_tmo_set ,
} ;
2005-11-03 01:07:13 +03:00
static inline struct srp_target_port * host_to_target ( struct Scsi_Host * host )
{
return ( struct srp_target_port * ) host - > hostdata ;
}
static const char * srp_target_info ( struct Scsi_Host * host )
{
return host_to_target ( host ) - > target_name ;
}
2007-08-03 21:45:18 +04:00
static int srp_target_is_topspin ( struct srp_target_port * target )
{
static const u8 topspin_oui [ 3 ] = { 0x00 , 0x05 , 0xad } ;
2007-08-03 21:45:18 +04:00
static const u8 cisco_oui [ 3 ] = { 0x00 , 0x1b , 0x0d } ;
2007-08-03 21:45:18 +04:00
return topspin_workarounds & &
2007-08-03 21:45:18 +04:00
( ! memcmp ( & target - > ioc_guid , topspin_oui , sizeof topspin_oui ) | |
! memcmp ( & target - > ioc_guid , cisco_oui , sizeof cisco_oui ) ) ;
2007-08-03 21:45:18 +04:00
}
2005-11-03 01:07:13 +03:00
static struct srp_iu * srp_alloc_iu ( struct srp_host * host , size_t size ,
gfp_t gfp_mask ,
enum dma_data_direction direction )
{
struct srp_iu * iu ;
iu = kmalloc ( sizeof * iu , gfp_mask ) ;
if ( ! iu )
goto out ;
iu - > buf = kzalloc ( size , gfp_mask ) ;
if ( ! iu - > buf )
goto out_free_iu ;
2008-03-06 02:13:36 +03:00
iu - > dma = ib_dma_map_single ( host - > srp_dev - > dev , iu - > buf , size ,
direction ) ;
if ( ib_dma_mapping_error ( host - > srp_dev - > dev , iu - > dma ) )
2005-11-03 01:07:13 +03:00
goto out_free_buf ;
iu - > size = size ;
iu - > direction = direction ;
return iu ;
out_free_buf :
kfree ( iu - > buf ) ;
out_free_iu :
kfree ( iu ) ;
out :
return NULL ;
}
static void srp_free_iu ( struct srp_host * host , struct srp_iu * iu )
{
if ( ! iu )
return ;
2008-03-06 02:13:36 +03:00
ib_dma_unmap_single ( host - > srp_dev - > dev , iu - > dma , iu - > size ,
iu - > direction ) ;
2005-11-03 01:07:13 +03:00
kfree ( iu - > buf ) ;
kfree ( iu ) ;
}
static void srp_qp_event ( struct ib_event * event , void * context )
{
2012-01-14 16:39:44 +04:00
pr_debug ( " QP event %d \n " , event - > event ) ;
2005-11-03 01:07:13 +03:00
}
static int srp_init_qp ( struct srp_target_port * target ,
struct ib_qp * qp )
{
struct ib_qp_attr * attr ;
int ret ;
attr = kmalloc ( sizeof * attr , GFP_KERNEL ) ;
if ( ! attr )
return - ENOMEM ;
2008-07-15 10:48:43 +04:00
ret = ib_find_pkey ( target - > srp_host - > srp_dev - > dev ,
target - > srp_host - > port ,
be16_to_cpu ( target - > path . pkey ) ,
& attr - > pkey_index ) ;
2005-11-03 01:07:13 +03:00
if ( ret )
goto out ;
attr - > qp_state = IB_QPS_INIT ;
attr - > qp_access_flags = ( IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_WRITE ) ;
attr - > port_num = target - > srp_host - > port ;
ret = ib_modify_qp ( qp , attr ,
IB_QP_STATE |
IB_QP_PKEY_INDEX |
IB_QP_ACCESS_FLAGS |
IB_QP_PORT ) ;
out :
kfree ( attr ) ;
return ret ;
}
2008-01-09 01:08:52 +03:00
static int srp_new_cm_id ( struct srp_target_port * target )
{
struct ib_cm_id * new_cm_id ;
2008-03-06 02:13:36 +03:00
new_cm_id = ib_create_cm_id ( target - > srp_host - > srp_dev - > dev ,
2008-01-09 01:08:52 +03:00
srp_cm_handler , target ) ;
if ( IS_ERR ( new_cm_id ) )
return PTR_ERR ( new_cm_id ) ;
if ( target - > cm_id )
ib_destroy_cm_id ( target - > cm_id ) ;
target - > cm_id = new_cm_id ;
return 0 ;
}
2005-11-03 01:07:13 +03:00
static int srp_create_target_ib ( struct srp_target_port * target )
{
struct ib_qp_init_attr * init_attr ;
2012-11-26 14:44:53 +04:00
struct ib_cq * recv_cq , * send_cq ;
struct ib_qp * qp ;
2005-11-03 01:07:13 +03:00
int ret ;
init_attr = kzalloc ( sizeof * init_attr , GFP_KERNEL ) ;
if ( ! init_attr )
return - ENOMEM ;
2012-11-26 14:44:53 +04:00
recv_cq = ib_create_cq ( target - > srp_host - > srp_dev - > dev ,
2013-10-26 16:40:37 +04:00
srp_recv_completion , NULL , target ,
target - > queue_size , target - > comp_vector ) ;
2012-11-26 14:44:53 +04:00
if ( IS_ERR ( recv_cq ) ) {
ret = PTR_ERR ( recv_cq ) ;
2010-02-25 02:07:59 +03:00
goto err ;
2005-11-03 01:07:13 +03:00
}
2012-11-26 14:44:53 +04:00
send_cq = ib_create_cq ( target - > srp_host - > srp_dev - > dev ,
2013-10-26 16:40:37 +04:00
srp_send_completion , NULL , target ,
target - > queue_size , target - > comp_vector ) ;
2012-11-26 14:44:53 +04:00
if ( IS_ERR ( send_cq ) ) {
ret = PTR_ERR ( send_cq ) ;
2010-02-25 02:07:59 +03:00
goto err_recv_cq ;
2010-02-02 22:23:54 +03:00
}
2012-11-26 14:44:53 +04:00
ib_req_notify_cq ( recv_cq , IB_CQ_NEXT_COMP ) ;
2005-11-03 01:07:13 +03:00
init_attr - > event_handler = srp_qp_event ;
2013-10-26 16:40:37 +04:00
init_attr - > cap . max_send_wr = target - > queue_size ;
init_attr - > cap . max_recv_wr = target - > queue_size ;
2005-11-03 01:07:13 +03:00
init_attr - > cap . max_recv_sge = 1 ;
init_attr - > cap . max_send_sge = 1 ;
init_attr - > sq_sig_type = IB_SIGNAL_ALL_WR ;
init_attr - > qp_type = IB_QPT_RC ;
2012-11-26 14:44:53 +04:00
init_attr - > send_cq = send_cq ;
init_attr - > recv_cq = recv_cq ;
2005-11-03 01:07:13 +03:00
2012-11-26 14:44:53 +04:00
qp = ib_create_qp ( target - > srp_host - > srp_dev - > pd , init_attr ) ;
if ( IS_ERR ( qp ) ) {
ret = PTR_ERR ( qp ) ;
2010-02-25 02:07:59 +03:00
goto err_send_cq ;
2005-11-03 01:07:13 +03:00
}
2012-11-26 14:44:53 +04:00
ret = srp_init_qp ( target , qp ) ;
2010-02-25 02:07:59 +03:00
if ( ret )
goto err_qp ;
2005-11-03 01:07:13 +03:00
2012-11-26 14:44:53 +04:00
if ( target - > qp )
ib_destroy_qp ( target - > qp ) ;
if ( target - > recv_cq )
ib_destroy_cq ( target - > recv_cq ) ;
if ( target - > send_cq )
ib_destroy_cq ( target - > send_cq ) ;
target - > qp = qp ;
target - > recv_cq = recv_cq ;
target - > send_cq = send_cq ;
2010-02-25 02:07:59 +03:00
kfree ( init_attr ) ;
return 0 ;
err_qp :
2012-11-26 14:44:53 +04:00
ib_destroy_qp ( qp ) ;
2010-02-25 02:07:59 +03:00
err_send_cq :
2012-11-26 14:44:53 +04:00
ib_destroy_cq ( send_cq ) ;
2010-02-25 02:07:59 +03:00
err_recv_cq :
2012-11-26 14:44:53 +04:00
ib_destroy_cq ( recv_cq ) ;
2010-02-25 02:07:59 +03:00
err :
2005-11-03 01:07:13 +03:00
kfree ( init_attr ) ;
return ret ;
}
2013-10-26 16:40:37 +04:00
/*
* Note : this function may be called without srp_alloc_iu_bufs ( ) having been
* invoked . Hence the target - > [ rt ] x_ring checks .
*/
2005-11-03 01:07:13 +03:00
static void srp_free_target_ib ( struct srp_target_port * target )
{
int i ;
ib_destroy_qp ( target - > qp ) ;
2010-02-02 22:23:54 +03:00
ib_destroy_cq ( target - > send_cq ) ;
ib_destroy_cq ( target - > recv_cq ) ;
2005-11-03 01:07:13 +03:00
2012-11-26 14:44:53 +04:00
target - > qp = NULL ;
target - > send_cq = target - > recv_cq = NULL ;
2013-10-26 16:40:37 +04:00
if ( target - > rx_ring ) {
for ( i = 0 ; i < target - > queue_size ; + + i )
srp_free_iu ( target - > srp_host , target - > rx_ring [ i ] ) ;
kfree ( target - > rx_ring ) ;
target - > rx_ring = NULL ;
}
if ( target - > tx_ring ) {
for ( i = 0 ; i < target - > queue_size ; + + i )
srp_free_iu ( target - > srp_host , target - > tx_ring [ i ] ) ;
kfree ( target - > tx_ring ) ;
target - > tx_ring = NULL ;
}
2005-11-03 01:07:13 +03:00
}
static void srp_path_rec_completion ( int status ,
struct ib_sa_path_rec * pathrec ,
void * target_ptr )
{
struct srp_target_port * target = target_ptr ;
target - > status = status ;
if ( status )
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_ERR , target - > scsi_host ,
PFX " Got failed path rec status %d \n " , status ) ;
2005-11-03 01:07:13 +03:00
else
target - > path = * pathrec ;
complete ( & target - > done ) ;
}
static int srp_lookup_path ( struct srp_target_port * target )
{
2014-03-14 16:53:10 +04:00
int ret ;
2005-11-03 01:07:13 +03:00
target - > path . numb_path = 1 ;
init_completion ( & target - > done ) ;
2006-08-22 03:40:12 +04:00
target - > path_query_id = ib_sa_path_rec_get ( & srp_sa_client ,
2008-03-06 02:13:36 +03:00
target - > srp_host - > srp_dev - > dev ,
2005-11-03 01:07:13 +03:00
target - > srp_host - > port ,
& target - > path ,
2007-08-09 02:51:18 +04:00
IB_SA_PATH_REC_SERVICE_ID |
2005-11-03 01:07:13 +03:00
IB_SA_PATH_REC_DGID |
IB_SA_PATH_REC_SGID |
IB_SA_PATH_REC_NUMB_PATH |
IB_SA_PATH_REC_PKEY ,
SRP_PATH_REC_TIMEOUT_MS ,
GFP_KERNEL ,
srp_path_rec_completion ,
target , & target - > path_query ) ;
if ( target - > path_query_id < 0 )
return target - > path_query_id ;
2014-03-14 16:53:10 +04:00
ret = wait_for_completion_interruptible ( & target - > done ) ;
if ( ret < 0 )
return ret ;
2005-11-03 01:07:13 +03:00
if ( target - > status < 0 )
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_WARNING , target - > scsi_host ,
PFX " Path record query failed \n " ) ;
2005-11-03 01:07:13 +03:00
return target - > status ;
}
static int srp_send_req ( struct srp_target_port * target )
{
struct {
struct ib_cm_req_param param ;
struct srp_login_req priv ;
} * req = NULL ;
int status ;
req = kzalloc ( sizeof * req , GFP_KERNEL ) ;
if ( ! req )
return - ENOMEM ;
req - > param . primary_path = & target - > path ;
req - > param . alternate_path = NULL ;
req - > param . service_id = target - > service_id ;
req - > param . qp_num = target - > qp - > qp_num ;
req - > param . qp_type = target - > qp - > qp_type ;
req - > param . private_data = & req - > priv ;
req - > param . private_data_len = sizeof req - > priv ;
req - > param . flow_control = 1 ;
get_random_bytes ( & req - > param . starting_psn , 4 ) ;
req - > param . starting_psn & = 0xffffff ;
/*
* Pick some arbitrary defaults here ; we could make these
* module parameters if anyone cared about setting them .
*/
req - > param . responder_resources = 4 ;
req - > param . remote_cm_response_timeout = 20 ;
req - > param . local_cm_response_timeout = 20 ;
2013-10-26 16:31:27 +04:00
req - > param . retry_count = target - > tl_retry_count ;
2005-11-03 01:07:13 +03:00
req - > param . rnr_retry_count = 7 ;
req - > param . max_cm_retries = 15 ;
req - > priv . opcode = SRP_LOGIN_REQ ;
req - > priv . tag = 0 ;
2011-01-15 02:23:24 +03:00
req - > priv . req_it_iu_len = cpu_to_be32 ( target - > max_iu_len ) ;
2005-11-03 01:07:13 +03:00
req - > priv . req_buf_fmt = cpu_to_be16 ( SRP_BUF_FORMAT_DIRECT |
SRP_BUF_FORMAT_INDIRECT ) ;
2006-06-18 07:37:38 +04:00
/*
2006-09-23 02:22:46 +04:00
* In the published SRP specification ( draft rev . 16 a ) , the
2006-06-18 07:37:38 +04:00
* port identifier format is 8 bytes of ID extension followed
* by 8 bytes of GUID . Older drafts put the two halves in the
* opposite order , so that the GUID comes first .
*
* Targets conforming to these obsolete drafts can be
* recognized by the I / O Class they report .
*/
if ( target - > io_class = = SRP_REV10_IB_IO_CLASS ) {
memcpy ( req - > priv . initiator_port_id ,
2006-10-04 17:28:56 +04:00
& target - > path . sgid . global . interface_id , 8 ) ;
2006-06-18 07:37:38 +04:00
memcpy ( req - > priv . initiator_port_id + 8 ,
2006-10-04 17:28:56 +04:00
& target - > initiator_ext , 8 ) ;
2006-06-18 07:37:38 +04:00
memcpy ( req - > priv . target_port_id , & target - > ioc_guid , 8 ) ;
memcpy ( req - > priv . target_port_id + 8 , & target - > id_ext , 8 ) ;
} else {
memcpy ( req - > priv . initiator_port_id ,
2006-10-04 17:28:56 +04:00
& target - > initiator_ext , 8 ) ;
memcpy ( req - > priv . initiator_port_id + 8 ,
& target - > path . sgid . global . interface_id , 8 ) ;
2006-06-18 07:37:38 +04:00
memcpy ( req - > priv . target_port_id , & target - > id_ext , 8 ) ;
memcpy ( req - > priv . target_port_id + 8 , & target - > ioc_guid , 8 ) ;
}
2005-11-03 01:07:13 +03:00
/*
* Topspin / Cisco SRP targets will reject our login unless we
2006-10-04 17:28:56 +04:00
* zero out the first 8 bytes of our initiator port ID and set
* the second 8 bytes to the local node GUID .
2005-11-03 01:07:13 +03:00
*/
2007-08-03 21:45:18 +04:00
if ( srp_target_is_topspin ( target ) ) {
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_DEBUG , target - > scsi_host ,
PFX " Topspin/Cisco initiator port ID workaround "
" activated for target GUID %016llx \n " ,
( unsigned long long ) be64_to_cpu ( target - > ioc_guid ) ) ;
2005-11-03 01:07:13 +03:00
memset ( req - > priv . initiator_port_id , 0 , 8 ) ;
2006-10-04 17:28:56 +04:00
memcpy ( req - > priv . initiator_port_id + 8 ,
2008-03-06 02:13:36 +03:00
& target - > srp_host - > srp_dev - > dev - > node_guid , 8 ) ;
2005-11-03 01:07:13 +03:00
}
status = ib_send_cm_req ( target - > cm_id , & req - > param ) ;
kfree ( req ) ;
return status ;
}
2011-12-26 20:49:18 +04:00
static bool srp_queue_remove_work ( struct srp_target_port * target )
{
bool changed = false ;
spin_lock_irq ( & target - > lock ) ;
if ( target - > state ! = SRP_TARGET_REMOVED ) {
target - > state = SRP_TARGET_REMOVED ;
changed = true ;
}
spin_unlock_irq ( & target - > lock ) ;
if ( changed )
queue_work ( system_long_wq , & target - > remove_work ) ;
return changed ;
}
2011-12-25 16:18:12 +04:00
static bool srp_change_conn_state ( struct srp_target_port * target ,
bool connected )
{
bool changed = false ;
spin_lock_irq ( & target - > lock ) ;
if ( target - > connected ! = connected ) {
target - > connected = connected ;
changed = true ;
}
spin_unlock_irq ( & target - > lock ) ;
return changed ;
}
2005-11-03 01:07:13 +03:00
static void srp_disconnect_target ( struct srp_target_port * target )
{
2011-12-25 16:18:12 +04:00
if ( srp_change_conn_state ( target , false ) ) {
/* XXX should send SRP_I_LOGOUT request */
2005-11-03 01:07:13 +03:00
2011-12-25 16:18:12 +04:00
if ( ib_send_cm_dreq ( target - > cm_id , NULL , 0 ) ) {
shost_printk ( KERN_DEBUG , target - > scsi_host ,
PFX " Sending CM DREQ failed \n " ) ;
}
2006-05-17 20:13:21 +04:00
}
2005-11-03 01:07:13 +03:00
}
2011-01-15 03:45:50 +03:00
static void srp_free_req_data ( struct srp_target_port * target )
{
2011-01-16 21:57:10 +03:00
struct ib_device * ibdev = target - > srp_host - > srp_dev - > dev ;
2011-01-15 03:45:50 +03:00
struct srp_request * req ;
int i ;
2013-10-26 16:40:37 +04:00
if ( ! target - > req_ring )
return ;
for ( i = 0 ; i < target - > req_ring_size ; + + i ) {
req = & target - > req_ring [ i ] ;
2011-01-15 03:45:50 +03:00
kfree ( req - > fmr_list ) ;
kfree ( req - > map_page ) ;
2011-01-16 21:57:10 +03:00
if ( req - > indirect_dma_addr ) {
ib_dma_unmap_single ( ibdev , req - > indirect_dma_addr ,
target - > indirect_size ,
DMA_TO_DEVICE ) ;
}
kfree ( req - > indirect_desc ) ;
2011-01-15 03:45:50 +03:00
}
2013-10-26 16:40:37 +04:00
kfree ( target - > req_ring ) ;
target - > req_ring = NULL ;
2011-01-15 03:45:50 +03:00
}
2013-10-26 16:38:47 +04:00
static int srp_alloc_req_data ( struct srp_target_port * target )
{
struct srp_device * srp_dev = target - > srp_host - > srp_dev ;
struct ib_device * ibdev = srp_dev - > dev ;
struct srp_request * req ;
dma_addr_t dma_addr ;
int i , ret = - ENOMEM ;
INIT_LIST_HEAD ( & target - > free_reqs ) ;
2013-10-26 16:40:37 +04:00
target - > req_ring = kzalloc ( target - > req_ring_size *
sizeof ( * target - > req_ring ) , GFP_KERNEL ) ;
if ( ! target - > req_ring )
goto out ;
for ( i = 0 ; i < target - > req_ring_size ; + + i ) {
2013-10-26 16:38:47 +04:00
req = & target - > req_ring [ i ] ;
req - > fmr_list = kmalloc ( target - > cmd_sg_cnt * sizeof ( void * ) ,
GFP_KERNEL ) ;
req - > map_page = kmalloc ( SRP_FMR_SIZE * sizeof ( void * ) ,
GFP_KERNEL ) ;
req - > indirect_desc = kmalloc ( target - > indirect_size , GFP_KERNEL ) ;
if ( ! req - > fmr_list | | ! req - > map_page | | ! req - > indirect_desc )
goto out ;
dma_addr = ib_dma_map_single ( ibdev , req - > indirect_desc ,
target - > indirect_size ,
DMA_TO_DEVICE ) ;
if ( ib_dma_mapping_error ( ibdev , dma_addr ) )
goto out ;
req - > indirect_dma_addr = dma_addr ;
req - > index = i ;
list_add_tail ( & req - > list , & target - > free_reqs ) ;
}
ret = 0 ;
out :
return ret ;
}
2012-01-14 16:40:44 +04:00
/**
* srp_del_scsi_host_attr ( ) - Remove attributes defined in the host template .
* @ shost : SCSI host whose attributes to remove from sysfs .
*
* Note : Any attributes defined in the host template and that did not exist
* before invocation of this function will be ignored .
*/
static void srp_del_scsi_host_attr ( struct Scsi_Host * shost )
{
struct device_attribute * * attr ;
for ( attr = shost - > hostt - > shost_attrs ; attr & & * attr ; + + attr )
device_remove_file ( & shost - > shost_dev , * attr ) ;
}
2011-12-25 23:41:07 +04:00
static void srp_remove_target ( struct srp_target_port * target )
{
2011-12-26 20:49:18 +04:00
WARN_ON_ONCE ( target - > state ! = SRP_TARGET_REMOVED ) ;
2011-12-25 23:41:07 +04:00
srp_del_scsi_host_attr ( target - > scsi_host ) ;
2013-10-26 16:32:30 +04:00
srp_rport_get ( target - > rport ) ;
2011-12-25 23:41:07 +04:00
srp_remove_host ( target - > scsi_host ) ;
scsi_remove_host ( target - > scsi_host ) ;
2013-12-11 20:06:14 +04:00
srp_stop_rport_timers ( target - > rport ) ;
2011-12-26 20:49:18 +04:00
srp_disconnect_target ( target ) ;
2011-12-25 23:41:07 +04:00
ib_destroy_cm_id ( target - > cm_id ) ;
srp_free_target_ib ( target ) ;
2013-10-26 16:35:08 +04:00
cancel_work_sync ( & target - > tl_err_work ) ;
2013-10-26 16:32:30 +04:00
srp_rport_put ( target - > rport ) ;
2011-12-25 23:41:07 +04:00
srp_free_req_data ( target ) ;
2013-10-10 15:50:29 +04:00
spin_lock ( & target - > srp_host - > target_lock ) ;
list_del ( & target - > list ) ;
spin_unlock ( & target - > srp_host - > target_lock ) ;
2011-12-25 23:41:07 +04:00
scsi_host_put ( target - > scsi_host ) ;
}
2006-11-22 17:57:56 +03:00
static void srp_remove_work ( struct work_struct * work )
2005-11-03 01:07:13 +03:00
{
2006-11-22 17:57:56 +03:00
struct srp_target_port * target =
2011-12-26 20:49:18 +04:00
container_of ( work , struct srp_target_port , remove_work ) ;
2005-11-03 01:07:13 +03:00
2011-12-26 20:49:18 +04:00
WARN_ON_ONCE ( target - > state ! = SRP_TARGET_REMOVED ) ;
2005-11-03 01:07:13 +03:00
2013-06-28 16:51:26 +04:00
srp_remove_target ( target ) ;
2005-11-03 01:07:13 +03:00
}
2011-09-16 22:41:13 +04:00
static void srp_rport_delete ( struct srp_rport * rport )
{
struct srp_target_port * target = rport - > lld_data ;
srp_queue_remove_work ( target ) ;
}
2005-11-03 01:07:13 +03:00
static int srp_connect_target ( struct srp_target_port * target )
{
2008-01-09 01:08:52 +03:00
int retries = 3 ;
2005-11-03 01:07:13 +03:00
int ret ;
2011-12-25 16:18:12 +04:00
WARN_ON_ONCE ( target - > connected ) ;
2011-09-03 11:25:42 +04:00
target - > qp_in_error = false ;
2005-11-03 01:07:13 +03:00
ret = srp_lookup_path ( target ) ;
if ( ret )
return ret ;
while ( 1 ) {
init_completion ( & target - > done ) ;
ret = srp_send_req ( target ) ;
if ( ret )
return ret ;
2014-03-14 16:53:10 +04:00
ret = wait_for_completion_interruptible ( & target - > done ) ;
if ( ret < 0 )
return ret ;
2005-11-03 01:07:13 +03:00
/*
* The CM event handling code will set status to
* SRP_PORT_REDIRECT if we get a port redirect REJ
* back , or SRP_DLID_REDIRECT if we get a lid / qp
* redirect REJ back .
*/
switch ( target - > status ) {
case 0 :
2011-12-25 16:18:12 +04:00
srp_change_conn_state ( target , true ) ;
2005-11-03 01:07:13 +03:00
return 0 ;
case SRP_PORT_REDIRECT :
ret = srp_lookup_path ( target ) ;
if ( ret )
return ret ;
break ;
case SRP_DLID_REDIRECT :
break ;
2008-01-09 01:08:52 +03:00
case SRP_STALE_CONN :
/* Our current CM id was stale, and is now in timewait.
* Try to reconnect with a new one .
*/
if ( ! retries - - | | srp_new_cm_id ( target ) ) {
shost_printk ( KERN_ERR , target - > scsi_host , PFX
" giving up on stale connection \n " ) ;
target - > status = - ECONNRESET ;
return target - > status ;
}
shost_printk ( KERN_ERR , target - > scsi_host , PFX
" retrying stale connection \n " ) ;
break ;
2005-11-03 01:07:13 +03:00
default :
return target - > status ;
}
}
}
2006-05-09 21:50:28 +04:00
static void srp_unmap_data ( struct scsi_cmnd * scmnd ,
struct srp_target_port * target ,
struct srp_request * req )
{
2011-01-15 03:45:50 +03:00
struct ib_device * ibdev = target - > srp_host - > srp_dev - > dev ;
struct ib_pool_fmr * * pfmr ;
2007-05-25 21:28:25 +04:00
if ( ! scsi_sglist ( scmnd ) | |
2006-05-09 21:50:28 +04:00
( scmnd - > sc_data_direction ! = DMA_TO_DEVICE & &
scmnd - > sc_data_direction ! = DMA_FROM_DEVICE ) )
return ;
2011-01-15 03:45:50 +03:00
pfmr = req - > fmr_list ;
while ( req - > nfmr - - )
ib_fmr_pool_unmap ( * pfmr + + ) ;
2006-06-18 07:37:29 +04:00
2011-01-15 03:45:50 +03:00
ib_dma_unmap_sg ( ibdev , scsi_sglist ( scmnd ) , scsi_sg_count ( scmnd ) ,
scmnd - > sc_data_direction ) ;
2006-05-09 21:50:28 +04:00
}
2012-08-14 17:18:53 +04:00
/**
* srp_claim_req - Take ownership of the scmnd associated with a request .
* @ target : SRP target port .
* @ req : SRP request .
* @ scmnd : If NULL , take ownership of @ req - > scmnd . If not NULL , only take
* ownership of @ req - > scmnd if it equals @ scmnd .
*
* Return value :
* Either NULL or a pointer to the SCSI command the caller became owner of .
*/
static struct scsi_cmnd * srp_claim_req ( struct srp_target_port * target ,
struct srp_request * req ,
struct scsi_cmnd * scmnd )
{
unsigned long flags ;
spin_lock_irqsave ( & target - > lock , flags ) ;
if ( ! scmnd ) {
scmnd = req - > scmnd ;
req - > scmnd = NULL ;
} else if ( req - > scmnd = = scmnd ) {
req - > scmnd = NULL ;
} else {
scmnd = NULL ;
}
spin_unlock_irqrestore ( & target - > lock , flags ) ;
return scmnd ;
}
/**
* srp_free_req ( ) - Unmap data and add request to the free request list .
*/
static void srp_free_req ( struct srp_target_port * target ,
struct srp_request * req , struct scsi_cmnd * scmnd ,
s32 req_lim_delta )
2006-06-18 07:37:38 +04:00
{
2010-11-26 22:50:09 +03:00
unsigned long flags ;
2012-08-14 17:18:53 +04:00
srp_unmap_data ( scmnd , target , req ) ;
2010-11-26 23:08:38 +03:00
spin_lock_irqsave ( & target - > lock , flags ) ;
2010-11-26 22:50:09 +03:00
target - > req_lim + = req_lim_delta ;
2010-11-26 21:58:27 +03:00
list_add_tail ( & req - > list , & target - > free_reqs ) ;
2010-11-26 23:08:38 +03:00
spin_unlock_irqrestore ( & target - > lock , flags ) ;
2006-06-18 07:37:38 +04:00
}
2013-10-26 16:34:27 +04:00
static void srp_finish_req ( struct srp_target_port * target ,
struct srp_request * req , int result )
2006-06-18 07:37:38 +04:00
{
2012-08-14 17:18:53 +04:00
struct scsi_cmnd * scmnd = srp_claim_req ( target , req , NULL ) ;
if ( scmnd ) {
2012-08-24 14:27:54 +04:00
srp_free_req ( target , req , scmnd , 0 ) ;
2013-10-26 16:34:27 +04:00
scmnd - > result = result ;
2012-08-14 17:18:53 +04:00
scmnd - > scsi_done ( scmnd ) ;
}
2006-06-18 07:37:38 +04:00
}
2013-10-26 16:34:27 +04:00
static void srp_terminate_io ( struct srp_rport * rport )
2005-11-03 01:07:13 +03:00
{
2013-10-26 16:34:27 +04:00
struct srp_target_port * target = rport - > lld_data ;
int i ;
2013-10-26 16:40:37 +04:00
for ( i = 0 ; i < target - > req_ring_size ; + + i ) {
2013-10-26 16:34:27 +04:00
struct srp_request * req = & target - > req_ring [ i ] ;
srp_finish_req ( target , req , DID_TRANSPORT_FAILFAST < < 16 ) ;
}
}
2005-11-03 01:07:13 +03:00
2013-10-26 16:34:27 +04:00
/*
* It is up to the caller to ensure that srp_rport_reconnect ( ) calls are
* serialized and that no concurrent srp_queuecommand ( ) , srp_abort ( ) ,
* srp_reset_device ( ) or srp_reset_host ( ) calls will occur while this function
* is in progress . One way to realize that is not to call this function
* directly but to call srp_reconnect_rport ( ) instead since that last function
* serializes calls of this function via rport - > mutex and also blocks
* srp_queuecommand ( ) calls before invoking this function .
*/
static int srp_rport_reconnect ( struct srp_rport * rport )
{
struct srp_target_port * target = rport - > lld_data ;
int i , ret ;
2012-03-17 21:18:54 +04:00
2005-11-03 01:07:13 +03:00
srp_disconnect_target ( target ) ;
/*
2013-02-21 21:19:04 +04:00
* Now get a new local CM ID so that we avoid confusing the target in
* case things are really fouled up . Doing so also ensures that all CM
* callbacks will have finished before a new QP is allocated .
2005-11-03 01:07:13 +03:00
*/
2008-01-09 01:08:52 +03:00
ret = srp_new_cm_id ( target ) ;
2013-02-21 21:19:04 +04:00
/*
* Whether or not creating a new CM ID succeeded , create a new
* QP . This guarantees that all completion callback function
* invocations have finished before request resetting starts .
*/
if ( ret = = 0 )
ret = srp_create_target_ib ( target ) ;
else
srp_create_target_ib ( target ) ;
2005-11-03 01:07:13 +03:00
2013-10-26 16:40:37 +04:00
for ( i = 0 ; i < target - > req_ring_size ; + + i ) {
2010-11-26 21:58:27 +03:00
struct srp_request * req = & target - > req_ring [ i ] ;
2013-10-26 16:34:27 +04:00
srp_finish_req ( target , req , DID_RESET < < 16 ) ;
2010-11-26 21:58:27 +03:00
}
2005-11-03 01:07:13 +03:00
2010-11-26 21:58:27 +03:00
INIT_LIST_HEAD ( & target - > free_tx ) ;
2013-10-26 16:40:37 +04:00
for ( i = 0 ; i < target - > queue_size ; + + i )
2010-11-26 21:58:27 +03:00
list_add ( & target - > tx_ring [ i ] - > list , & target - > free_tx ) ;
2005-11-03 01:07:13 +03:00
2013-02-21 21:19:04 +04:00
if ( ret = = 0 )
ret = srp_connect_target ( target ) ;
2012-03-17 21:18:54 +04:00
2013-10-26 16:34:27 +04:00
if ( ret = = 0 )
shost_printk ( KERN_INFO , target - > scsi_host ,
PFX " reconnect succeeded \n " ) ;
2005-11-03 01:07:13 +03:00
return ret ;
}
2011-01-15 03:45:50 +03:00
static void srp_map_desc ( struct srp_map_state * state , dma_addr_t dma_addr ,
unsigned int dma_len , u32 rkey )
2006-06-18 07:37:29 +04:00
{
2011-01-15 03:45:50 +03:00
struct srp_direct_buf * desc = state - > desc ;
2006-06-18 07:37:29 +04:00
2011-01-15 03:45:50 +03:00
desc - > va = cpu_to_be64 ( dma_addr ) ;
desc - > key = cpu_to_be32 ( rkey ) ;
desc - > len = cpu_to_be32 ( dma_len ) ;
2006-06-18 07:37:29 +04:00
2011-01-15 03:45:50 +03:00
state - > total_len + = dma_len ;
state - > desc + + ;
state - > ndesc + + ;
}
2006-08-03 21:35:43 +04:00
2011-01-15 03:45:50 +03:00
static int srp_map_finish_fmr ( struct srp_map_state * state ,
struct srp_target_port * target )
{
struct srp_device * dev = target - > srp_host - > srp_dev ;
struct ib_pool_fmr * fmr ;
u64 io_addr = 0 ;
2006-12-13 01:30:55 +03:00
2011-01-15 03:45:50 +03:00
if ( ! state - > npages )
return 0 ;
2006-06-18 07:37:29 +04:00
2011-01-15 03:45:50 +03:00
if ( state - > npages = = 1 ) {
srp_map_desc ( state , state - > base_dma_addr , state - > fmr_len ,
target - > rkey ) ;
state - > npages = state - > fmr_len = 0 ;
return 0 ;
2006-06-18 07:37:29 +04:00
}
2011-01-15 03:45:50 +03:00
fmr = ib_fmr_pool_map_phys ( dev - > fmr_pool , state - > pages ,
state - > npages , io_addr ) ;
if ( IS_ERR ( fmr ) )
return PTR_ERR ( fmr ) ;
2006-06-18 07:37:29 +04:00
2011-01-15 03:45:50 +03:00
* state - > next_fmr + + = fmr ;
state - > nfmr + + ;
2006-06-18 07:37:29 +04:00
2011-01-15 03:45:50 +03:00
srp_map_desc ( state , 0 , state - > fmr_len , fmr - > fmr - > rkey ) ;
state - > npages = state - > fmr_len = 0 ;
return 0 ;
}
static void srp_map_update_start ( struct srp_map_state * state ,
struct scatterlist * sg , int sg_index ,
dma_addr_t dma_addr )
{
state - > unmapped_sg = sg ;
state - > unmapped_index = sg_index ;
state - > unmapped_addr = dma_addr ;
}
2006-12-13 01:30:55 +03:00
2011-01-15 03:45:50 +03:00
static int srp_map_sg_entry ( struct srp_map_state * state ,
struct srp_target_port * target ,
struct scatterlist * sg , int sg_index ,
int use_fmr )
{
struct srp_device * dev = target - > srp_host - > srp_dev ;
struct ib_device * ibdev = dev - > dev ;
dma_addr_t dma_addr = ib_sg_dma_address ( ibdev , sg ) ;
unsigned int dma_len = ib_sg_dma_len ( ibdev , sg ) ;
unsigned int len ;
int ret ;
if ( ! dma_len )
return 0 ;
if ( use_fmr = = SRP_MAP_NO_FMR ) {
/* Once we're in direct map mode for a request, we don't
* go back to FMR mode , so no need to update anything
* other than the descriptor .
*/
srp_map_desc ( state , dma_addr , dma_len , target - > rkey ) ;
return 0 ;
2006-12-13 01:30:55 +03:00
}
2006-06-18 07:37:29 +04:00
2011-01-15 03:45:50 +03:00
/* If we start at an offset into the FMR page, don't merge into
* the current FMR . Finish it out , and use the kernel ' s MR for this
* sg entry . This is to avoid potential bugs on some SRP targets
* that were never quite defined , but went away when the initiator
* avoided using FMR on such page fragments .
*/
if ( dma_addr & ~ dev - > fmr_page_mask | | dma_len > dev - > fmr_max_size ) {
ret = srp_map_finish_fmr ( state , target ) ;
if ( ret )
return ret ;
srp_map_desc ( state , dma_addr , dma_len , target - > rkey ) ;
srp_map_update_start ( state , NULL , 0 , 0 ) ;
return 0 ;
2006-06-18 07:37:29 +04:00
}
2011-01-15 03:45:50 +03:00
/* If this is the first sg to go into the FMR, save our position.
* We need to know the first unmapped entry , its index , and the
* first unmapped address within that entry to be able to restart
* mapping after an error .
*/
if ( ! state - > unmapped_sg )
srp_map_update_start ( state , sg , sg_index , dma_addr ) ;
2006-06-18 07:37:29 +04:00
2011-01-15 03:45:50 +03:00
while ( dma_len ) {
if ( state - > npages = = SRP_FMR_SIZE ) {
ret = srp_map_finish_fmr ( state , target ) ;
if ( ret )
return ret ;
2006-06-18 07:37:29 +04:00
2011-01-15 03:45:50 +03:00
srp_map_update_start ( state , sg , sg_index , dma_addr ) ;
}
len = min_t ( unsigned int , dma_len , dev - > fmr_page_size ) ;
2006-06-18 07:37:29 +04:00
2011-01-15 03:45:50 +03:00
if ( ! state - > npages )
state - > base_dma_addr = dma_addr ;
state - > pages [ state - > npages + + ] = dma_addr ;
state - > fmr_len + = len ;
dma_addr + = len ;
dma_len - = len ;
}
/* If the last entry of the FMR wasn't a full page, then we need to
* close it out and start a new one - - we can only merge at page
* boundries .
*/
ret = 0 ;
if ( len ! = dev - > fmr_page_size ) {
ret = srp_map_finish_fmr ( state , target ) ;
if ( ! ret )
srp_map_update_start ( state , NULL , 0 , 0 ) ;
}
2006-06-18 07:37:29 +04:00
return ret ;
}
2005-11-03 01:07:13 +03:00
static int srp_map_data ( struct scsi_cmnd * scmnd , struct srp_target_port * target ,
struct srp_request * req )
{
2011-01-15 03:45:50 +03:00
struct scatterlist * scat , * sg ;
2005-11-03 01:07:13 +03:00
struct srp_cmd * cmd = req - > cmd - > buf ;
2011-01-15 03:45:50 +03:00
int i , len , nents , count , use_fmr ;
2006-12-13 01:30:55 +03:00
struct srp_device * dev ;
struct ib_device * ibdev ;
2011-01-15 03:45:50 +03:00
struct srp_map_state state ;
struct srp_indirect_buf * indirect_hdr ;
u32 table_len ;
u8 fmt ;
2005-11-03 01:07:13 +03:00
2007-05-25 21:28:25 +04:00
if ( ! scsi_sglist ( scmnd ) | | scmnd - > sc_data_direction = = DMA_NONE )
2005-11-03 01:07:13 +03:00
return sizeof ( struct srp_cmd ) ;
if ( scmnd - > sc_data_direction ! = DMA_FROM_DEVICE & &
scmnd - > sc_data_direction ! = DMA_TO_DEVICE ) {
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_WARNING , target - > scsi_host ,
PFX " Unhandled data direction %d \n " ,
scmnd - > sc_data_direction ) ;
2005-11-03 01:07:13 +03:00
return - EINVAL ;
}
2007-05-25 21:28:25 +04:00
nents = scsi_sg_count ( scmnd ) ;
scat = scsi_sglist ( scmnd ) ;
2005-11-03 01:07:13 +03:00
2008-03-06 02:13:36 +03:00
dev = target - > srp_host - > srp_dev ;
2006-12-13 01:30:55 +03:00
ibdev = dev - > dev ;
count = ib_dma_map_sg ( ibdev , scat , nents , scmnd - > sc_data_direction ) ;
2011-01-15 03:45:50 +03:00
if ( unlikely ( count = = 0 ) )
return - EIO ;
2006-06-18 07:37:29 +04:00
fmt = SRP_DATA_DESC_DIRECT ;
len = sizeof ( struct srp_cmd ) + sizeof ( struct srp_direct_buf ) ;
2005-11-03 01:07:13 +03:00
2006-03-25 02:47:26 +03:00
if ( count = = 1 ) {
2006-06-18 07:37:29 +04:00
/*
* The midlayer only generated a single gather / scatter
* entry , or DMA mapping coalesced everything to a
* single entry . So a direct descriptor along with
* the DMA MR suffices .
*/
2006-03-25 02:47:26 +03:00
struct srp_direct_buf * buf = ( void * ) cmd - > add_data ;
2005-11-03 01:07:13 +03:00
2006-12-13 01:30:55 +03:00
buf - > va = cpu_to_be64 ( ib_sg_dma_address ( ibdev , scat ) ) ;
2010-11-26 23:34:46 +03:00
buf - > key = cpu_to_be32 ( target - > rkey ) ;
2006-12-13 01:30:55 +03:00
buf - > len = cpu_to_be32 ( ib_sg_dma_len ( ibdev , scat ) ) ;
2011-01-15 03:45:50 +03:00
req - > nfmr = 0 ;
goto map_complete ;
}
/* We have more than one scatter/gather entry, so build our indirect
* descriptor table , trying to merge as many entries with FMR as we
* can .
*/
indirect_hdr = ( void * ) cmd - > add_data ;
2011-01-16 21:57:10 +03:00
ib_dma_sync_single_for_cpu ( ibdev , req - > indirect_dma_addr ,
target - > indirect_size , DMA_TO_DEVICE ) ;
2011-01-15 03:45:50 +03:00
memset ( & state , 0 , sizeof ( state ) ) ;
2011-01-16 21:57:10 +03:00
state . desc = req - > indirect_desc ;
2011-01-15 03:45:50 +03:00
state . pages = req - > map_page ;
state . next_fmr = req - > fmr_list ;
use_fmr = dev - > fmr_pool ? SRP_MAP_ALLOW_FMR : SRP_MAP_NO_FMR ;
for_each_sg ( scat , sg , count , i ) {
if ( srp_map_sg_entry ( & state , target , sg , i , use_fmr ) ) {
/* FMR mapping failed, so backtrack to the first
* unmapped entry and continue on without using FMR .
*/
dma_addr_t dma_addr ;
unsigned int dma_len ;
backtrack :
sg = state . unmapped_sg ;
i = state . unmapped_index ;
dma_addr = ib_sg_dma_address ( ibdev , sg ) ;
dma_len = ib_sg_dma_len ( ibdev , sg ) ;
dma_len - = ( state . unmapped_addr - dma_addr ) ;
dma_addr = state . unmapped_addr ;
use_fmr = SRP_MAP_NO_FMR ;
srp_map_desc ( & state , dma_addr , dma_len , target - > rkey ) ;
2006-06-18 07:37:29 +04:00
}
2011-01-15 03:45:50 +03:00
}
2005-11-03 01:07:13 +03:00
2011-01-15 03:45:50 +03:00
if ( use_fmr = = SRP_MAP_ALLOW_FMR & & srp_map_finish_fmr ( & state , target ) )
goto backtrack ;
2006-03-25 02:47:26 +03:00
2011-01-16 21:57:10 +03:00
/* We've mapped the request, now pull as much of the indirect
* descriptor table as we can into the command buffer . If this
* target is not using an external indirect table , we are
* guaranteed to fit into the command , as the SCSI layer won ' t
* give us more S / G entries than we allow .
2011-01-15 03:45:50 +03:00
*/
req - > nfmr = state . nfmr ;
if ( state . ndesc = = 1 ) {
/* FMR mapping was able to collapse this to one entry,
* so use a direct descriptor .
*/
struct srp_direct_buf * buf = ( void * ) cmd - > add_data ;
2006-03-25 02:47:26 +03:00
2011-01-16 21:57:10 +03:00
* buf = req - > indirect_desc [ 0 ] ;
2011-01-15 03:45:50 +03:00
goto map_complete ;
2005-11-03 01:07:13 +03:00
}
2011-01-16 21:57:10 +03:00
if ( unlikely ( target - > cmd_sg_cnt < state . ndesc & &
! target - > allow_ext_sg ) ) {
shost_printk ( KERN_ERR , target - > scsi_host ,
" Could not fit S/G list into SRP_CMD \n " ) ;
return - EIO ;
}
count = min ( state . ndesc , target - > cmd_sg_cnt ) ;
2011-01-15 03:45:50 +03:00
table_len = state . ndesc * sizeof ( struct srp_direct_buf ) ;
fmt = SRP_DATA_DESC_INDIRECT ;
len = sizeof ( struct srp_cmd ) + sizeof ( struct srp_indirect_buf ) ;
2011-01-16 21:57:10 +03:00
len + = count * sizeof ( struct srp_direct_buf ) ;
2011-01-15 03:45:50 +03:00
2011-01-16 21:57:10 +03:00
memcpy ( indirect_hdr - > desc_list , req - > indirect_desc ,
count * sizeof ( struct srp_direct_buf ) ) ;
2011-01-15 03:45:50 +03:00
2011-01-16 21:57:10 +03:00
indirect_hdr - > table_desc . va = cpu_to_be64 ( req - > indirect_dma_addr ) ;
2011-01-15 03:45:50 +03:00
indirect_hdr - > table_desc . key = cpu_to_be32 ( target - > rkey ) ;
indirect_hdr - > table_desc . len = cpu_to_be32 ( table_len ) ;
indirect_hdr - > len = cpu_to_be32 ( state . total_len ) ;
if ( scmnd - > sc_data_direction = = DMA_TO_DEVICE )
2011-01-16 21:57:10 +03:00
cmd - > data_out_desc_cnt = count ;
2011-01-15 03:45:50 +03:00
else
2011-01-16 21:57:10 +03:00
cmd - > data_in_desc_cnt = count ;
ib_dma_sync_single_for_device ( ibdev , req - > indirect_dma_addr , table_len ,
DMA_TO_DEVICE ) ;
2011-01-15 03:45:50 +03:00
map_complete :
2005-11-03 01:07:13 +03:00
if ( scmnd - > sc_data_direction = = DMA_TO_DEVICE )
cmd - > buf_fmt = fmt < < 4 ;
else
cmd - > buf_fmt = fmt ;
return len ;
}
2010-11-26 22:37:47 +03:00
/*
* Return an IU and possible credit to the free pool
*/
static void srp_put_tx_iu ( struct srp_target_port * target , struct srp_iu * iu ,
enum srp_iu_type iu_type )
{
unsigned long flags ;
2010-11-26 23:08:38 +03:00
spin_lock_irqsave ( & target - > lock , flags ) ;
2010-11-26 22:37:47 +03:00
list_add ( & iu - > list , & target - > free_tx ) ;
if ( iu_type ! = SRP_IU_RSP )
+ + target - > req_lim ;
2010-11-26 23:08:38 +03:00
spin_unlock_irqrestore ( & target - > lock , flags ) ;
2010-11-26 22:37:47 +03:00
}
2010-10-08 22:48:14 +04:00
/*
2010-11-26 23:08:38 +03:00
* Must be called with target - > lock held to protect req_lim and free_tx .
* If IU is not sent , it must be returned using srp_put_tx_iu ( ) .
2010-10-08 22:48:14 +04:00
*
* Note :
* An upper limit for the number of allocated information units for each
* request type is :
* - SRP_IU_CMD : SRP_CMD_SQ_SIZE , since the SCSI mid - layer never queues
* more than Scsi_Host . can_queue requests .
* - SRP_IU_TSK_MGMT : SRP_TSK_MGMT_SQ_SIZE .
* - SRP_IU_RSP : 1 , since a conforming SRP target never sends more than
* one unanswered SRP request to an initiator .
*/
static struct srp_iu * __srp_get_tx_iu ( struct srp_target_port * target ,
enum srp_iu_type iu_type )
{
s32 rsv = ( iu_type = = SRP_IU_TSK_MGMT ) ? 0 : SRP_TSK_MGMT_SQ_SIZE ;
struct srp_iu * iu ;
srp_send_completion ( target - > send_cq , target ) ;
2010-11-26 21:22:48 +03:00
if ( list_empty ( & target - > free_tx ) )
2010-10-08 22:48:14 +04:00
return NULL ;
/* Initiator responses to target requests do not consume credits */
2010-11-26 22:37:47 +03:00
if ( iu_type ! = SRP_IU_RSP ) {
if ( target - > req_lim < = rsv ) {
+ + target - > zero_req_lim ;
return NULL ;
}
- - target - > req_lim ;
2010-10-08 22:48:14 +04:00
}
2010-11-26 21:22:48 +03:00
iu = list_first_entry ( & target - > free_tx , struct srp_iu , list ) ;
2010-11-26 22:37:47 +03:00
list_del ( & iu - > list ) ;
2010-10-08 22:48:14 +04:00
return iu ;
}
2010-11-26 22:37:47 +03:00
static int srp_post_send ( struct srp_target_port * target ,
struct srp_iu * iu , int len )
2010-10-08 22:48:14 +04:00
{
struct ib_sge list ;
struct ib_send_wr wr , * bad_wr ;
list . addr = iu - > dma ;
list . length = len ;
2010-11-26 23:34:46 +03:00
list . lkey = target - > lkey ;
2010-10-08 22:48:14 +04:00
wr . next = NULL ;
2010-11-26 21:22:48 +03:00
wr . wr_id = ( uintptr_t ) iu ;
2010-10-08 22:48:14 +04:00
wr . sg_list = & list ;
wr . num_sge = 1 ;
wr . opcode = IB_WR_SEND ;
wr . send_flags = IB_SEND_SIGNALED ;
2010-11-26 22:37:47 +03:00
return ib_post_send ( target - > qp , & wr , & bad_wr ) ;
2010-10-08 22:48:14 +04:00
}
2010-11-26 21:22:48 +03:00
static int srp_post_recv ( struct srp_target_port * target , struct srp_iu * iu )
2010-07-30 14:59:05 +04:00
{
struct ib_recv_wr wr , * bad_wr ;
2010-11-26 21:22:48 +03:00
struct ib_sge list ;
2010-07-30 14:59:05 +04:00
list . addr = iu - > dma ;
list . length = iu - > size ;
2010-11-26 23:34:46 +03:00
list . lkey = target - > lkey ;
2010-07-30 14:59:05 +04:00
wr . next = NULL ;
2010-11-26 21:22:48 +03:00
wr . wr_id = ( uintptr_t ) iu ;
2010-07-30 14:59:05 +04:00
wr . sg_list = & list ;
wr . num_sge = 1 ;
2010-11-26 21:22:48 +03:00
return ib_post_recv ( target - > qp , & wr , & bad_wr ) ;
2010-07-30 14:59:05 +04:00
}
2005-11-03 01:07:13 +03:00
static void srp_process_rsp ( struct srp_target_port * target , struct srp_rsp * rsp )
{
struct srp_request * req ;
struct scsi_cmnd * scmnd ;
unsigned long flags ;
if ( unlikely ( rsp - > tag & SRP_TAG_TSK_MGMT ) ) {
2010-11-26 23:08:38 +03:00
spin_lock_irqsave ( & target - > lock , flags ) ;
2010-11-26 22:50:09 +03:00
target - > req_lim + = be32_to_cpu ( rsp - > req_lim_delta ) ;
2010-11-26 23:08:38 +03:00
spin_unlock_irqrestore ( & target - > lock , flags ) ;
2010-11-26 22:50:09 +03:00
2010-11-26 21:02:21 +03:00
target - > tsk_mgmt_status = - 1 ;
if ( be32_to_cpu ( rsp - > resp_data_len ) > = 4 )
target - > tsk_mgmt_status = rsp - > data [ 3 ] ;
complete ( & target - > tsk_mgmt_done ) ;
2005-11-03 01:07:13 +03:00
} else {
2010-11-26 21:02:21 +03:00
req = & target - > req_ring [ rsp - > tag ] ;
2012-08-14 17:18:53 +04:00
scmnd = srp_claim_req ( target , req , NULL ) ;
if ( ! scmnd ) {
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_ERR , target - > scsi_host ,
" Null scmnd for RSP w/tag %016llx \n " ,
( unsigned long long ) rsp - > tag ) ;
2012-08-14 17:18:53 +04:00
spin_lock_irqsave ( & target - > lock , flags ) ;
target - > req_lim + = be32_to_cpu ( rsp - > req_lim_delta ) ;
spin_unlock_irqrestore ( & target - > lock , flags ) ;
return ;
}
2005-11-03 01:07:13 +03:00
scmnd - > result = rsp - > status ;
if ( rsp - > flags & SRP_RSP_FLAG_SNSVALID ) {
memcpy ( scmnd - > sense_buffer , rsp - > data +
be32_to_cpu ( rsp - > resp_data_len ) ,
min_t ( int , be32_to_cpu ( rsp - > sense_data_len ) ,
SCSI_SENSE_BUFFERSIZE ) ) ;
}
if ( rsp - > flags & ( SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER ) )
2007-05-25 21:28:25 +04:00
scsi_set_resid ( scmnd , be32_to_cpu ( rsp - > data_out_res_cnt ) ) ;
2005-11-03 01:07:13 +03:00
else if ( rsp - > flags & ( SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER ) )
2007-05-25 21:28:25 +04:00
scsi_set_resid ( scmnd , be32_to_cpu ( rsp - > data_in_res_cnt ) ) ;
2005-11-03 01:07:13 +03:00
2012-08-14 17:18:53 +04:00
srp_free_req ( target , req , scmnd ,
be32_to_cpu ( rsp - > req_lim_delta ) ) ;
2010-11-26 21:02:21 +03:00
scmnd - > host_scribble = NULL ;
scmnd - > scsi_done ( scmnd ) ;
2005-11-03 01:07:13 +03:00
}
}
2010-10-08 22:40:47 +04:00
static int srp_response_common ( struct srp_target_port * target , s32 req_delta ,
void * rsp , int len )
{
2010-11-26 22:37:47 +03:00
struct ib_device * dev = target - > srp_host - > srp_dev - > dev ;
2010-10-08 22:40:47 +04:00
unsigned long flags ;
struct srp_iu * iu ;
2010-11-26 22:37:47 +03:00
int err ;
2010-10-08 22:40:47 +04:00
2010-11-26 23:08:38 +03:00
spin_lock_irqsave ( & target - > lock , flags ) ;
2010-10-08 22:40:47 +04:00
target - > req_lim + = req_delta ;
iu = __srp_get_tx_iu ( target , SRP_IU_RSP ) ;
2010-11-26 23:08:38 +03:00
spin_unlock_irqrestore ( & target - > lock , flags ) ;
2010-11-26 22:37:47 +03:00
2010-10-08 22:40:47 +04:00
if ( ! iu ) {
shost_printk ( KERN_ERR , target - > scsi_host , PFX
" no IU available to send response \n " ) ;
2010-11-26 22:37:47 +03:00
return 1 ;
2010-10-08 22:40:47 +04:00
}
ib_dma_sync_single_for_cpu ( dev , iu - > dma , len , DMA_TO_DEVICE ) ;
memcpy ( iu - > buf , rsp , len ) ;
ib_dma_sync_single_for_device ( dev , iu - > dma , len , DMA_TO_DEVICE ) ;
2010-11-26 22:37:47 +03:00
err = srp_post_send ( target , iu , len ) ;
if ( err ) {
2010-10-08 22:40:47 +04:00
shost_printk ( KERN_ERR , target - > scsi_host , PFX
" unable to post response: %d \n " , err ) ;
2010-11-26 22:37:47 +03:00
srp_put_tx_iu ( target , iu , SRP_IU_RSP ) ;
}
2010-10-08 22:40:47 +04:00
return err ;
}
static void srp_process_cred_req ( struct srp_target_port * target ,
struct srp_cred_req * req )
{
struct srp_cred_rsp rsp = {
. opcode = SRP_CRED_RSP ,
. tag = req - > tag ,
} ;
s32 delta = be32_to_cpu ( req - > req_lim_delta ) ;
if ( srp_response_common ( target , delta , & rsp , sizeof rsp ) )
shost_printk ( KERN_ERR , target - > scsi_host , PFX
" problems processing SRP_CRED_REQ \n " ) ;
}
static void srp_process_aer_req ( struct srp_target_port * target ,
struct srp_aer_req * req )
{
struct srp_aer_rsp rsp = {
. opcode = SRP_AER_RSP ,
. tag = req - > tag ,
} ;
s32 delta = be32_to_cpu ( req - > req_lim_delta ) ;
shost_printk ( KERN_ERR , target - > scsi_host , PFX
" ignoring AER for LUN %llu \n " , be64_to_cpu ( req - > lun ) ) ;
if ( srp_response_common ( target , delta , & rsp , sizeof rsp ) )
shost_printk ( KERN_ERR , target - > scsi_host , PFX
" problems processing SRP_AER_REQ \n " ) ;
}
2005-11-03 01:07:13 +03:00
static void srp_handle_recv ( struct srp_target_port * target , struct ib_wc * wc )
{
2010-11-26 21:22:48 +03:00
struct ib_device * dev = target - > srp_host - > srp_dev - > dev ;
2011-05-23 22:30:04 +04:00
struct srp_iu * iu = ( struct srp_iu * ) ( uintptr_t ) wc - > wr_id ;
2010-07-30 14:59:05 +04:00
int res ;
2005-11-03 01:07:13 +03:00
u8 opcode ;
2006-12-13 01:30:55 +03:00
ib_dma_sync_single_for_cpu ( dev , iu - > dma , target - > max_ti_iu_len ,
DMA_FROM_DEVICE ) ;
2005-11-03 01:07:13 +03:00
opcode = * ( u8 * ) iu - > buf ;
if ( 0 ) {
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_ERR , target - > scsi_host ,
PFX " recv completion, opcode 0x%02x \n " , opcode ) ;
2010-07-29 19:56:37 +04:00
print_hex_dump ( KERN_ERR , " " , DUMP_PREFIX_OFFSET , 8 , 1 ,
iu - > buf , wc - > byte_len , true ) ;
2005-11-03 01:07:13 +03:00
}
switch ( opcode ) {
case SRP_RSP :
srp_process_rsp ( target , iu - > buf ) ;
break ;
2010-10-08 22:40:47 +04:00
case SRP_CRED_REQ :
srp_process_cred_req ( target , iu - > buf ) ;
break ;
case SRP_AER_REQ :
srp_process_aer_req ( target , iu - > buf ) ;
break ;
2005-11-03 01:07:13 +03:00
case SRP_T_LOGOUT :
/* XXX Handle target logout */
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_WARNING , target - > scsi_host ,
PFX " Got target logout request \n " ) ;
2005-11-03 01:07:13 +03:00
break ;
default :
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_WARNING , target - > scsi_host ,
PFX " Unhandled SRP opcode 0x%02x \n " , opcode ) ;
2005-11-03 01:07:13 +03:00
break ;
}
2006-12-13 01:30:55 +03:00
ib_dma_sync_single_for_device ( dev , iu - > dma , target - > max_ti_iu_len ,
DMA_FROM_DEVICE ) ;
2010-07-30 14:59:05 +04:00
2010-11-26 21:22:48 +03:00
res = srp_post_recv ( target , iu ) ;
2010-07-30 14:59:05 +04:00
if ( res ! = 0 )
shost_printk ( KERN_ERR , target - > scsi_host ,
PFX " Recv failed with error code %d \n " , res ) ;
2005-11-03 01:07:13 +03:00
}
2013-10-26 16:35:08 +04:00
/**
* srp_tl_err_work ( ) - handle a transport layer error
*
* Note : This function may get invoked before the rport has been created ,
* hence the target - > rport test .
*/
static void srp_tl_err_work ( struct work_struct * work )
{
struct srp_target_port * target ;
target = container_of ( work , struct srp_target_port , tl_err_work ) ;
if ( target - > rport )
srp_start_tl_fail_timers ( target - > rport ) ;
}
2013-10-10 15:53:25 +04:00
static void srp_handle_qp_err ( enum ib_wc_status wc_status , bool send_err ,
2011-09-03 11:25:42 +04:00
struct srp_target_port * target )
{
2011-12-25 16:18:12 +04:00
if ( target - > connected & & ! target - > qp_in_error ) {
2012-11-26 14:16:40 +04:00
shost_printk ( KERN_ERR , target - > scsi_host ,
PFX " failed %s status %d \n " ,
2013-10-10 15:53:25 +04:00
send_err ? " send " : " receive " ,
2012-11-26 14:16:40 +04:00
wc_status ) ;
2013-10-26 16:35:08 +04:00
queue_work ( system_long_wq , & target - > tl_err_work ) ;
2012-11-26 14:16:40 +04:00
}
2011-09-03 11:25:42 +04:00
target - > qp_in_error = true ;
}
2010-02-02 22:23:54 +03:00
static void srp_recv_completion ( struct ib_cq * cq , void * target_ptr )
2005-11-03 01:07:13 +03:00
{
struct srp_target_port * target = target_ptr ;
struct ib_wc wc ;
ib_req_notify_cq ( cq , IB_CQ_NEXT_COMP ) ;
while ( ib_poll_cq ( cq , 1 , & wc ) > 0 ) {
2011-09-03 11:25:42 +04:00
if ( likely ( wc . status = = IB_WC_SUCCESS ) ) {
srp_handle_recv ( target , & wc ) ;
} else {
2013-10-10 15:53:25 +04:00
srp_handle_qp_err ( wc . status , false , target ) ;
2005-11-03 01:07:13 +03:00
}
2010-02-02 22:23:54 +03:00
}
}
static void srp_send_completion ( struct ib_cq * cq , void * target_ptr )
{
struct srp_target_port * target = target_ptr ;
struct ib_wc wc ;
2010-11-26 21:22:48 +03:00
struct srp_iu * iu ;
2010-02-02 22:23:54 +03:00
while ( ib_poll_cq ( cq , 1 , & wc ) > 0 ) {
2011-09-03 11:25:42 +04:00
if ( likely ( wc . status = = IB_WC_SUCCESS ) ) {
iu = ( struct srp_iu * ) ( uintptr_t ) wc . wr_id ;
list_add ( & iu - > list , & target - > free_tx ) ;
} else {
2013-10-10 15:53:25 +04:00
srp_handle_qp_err ( wc . status , true , target ) ;
2010-02-02 22:23:54 +03:00
}
2005-11-03 01:07:13 +03:00
}
}
2010-11-26 22:37:47 +03:00
static int srp_queuecommand ( struct Scsi_Host * shost , struct scsi_cmnd * scmnd )
2005-11-03 01:07:13 +03:00
{
2010-11-26 22:37:47 +03:00
struct srp_target_port * target = host_to_target ( shost ) ;
2013-10-26 16:37:17 +04:00
struct srp_rport * rport = target - > rport ;
2005-11-03 01:07:13 +03:00
struct srp_request * req ;
struct srp_iu * iu ;
struct srp_cmd * cmd ;
2006-12-13 01:30:55 +03:00
struct ib_device * dev ;
2010-11-26 22:37:47 +03:00
unsigned long flags ;
2013-10-26 16:34:27 +04:00
int len , result ;
2013-10-26 16:37:17 +04:00
const bool in_scsi_eh = ! in_interrupt ( ) & & current = = shost - > ehandler ;
/*
* The SCSI EH thread is the only context from which srp_queuecommand ( )
* can get invoked for blocked devices ( SDEV_BLOCK /
* SDEV_CREATED_BLOCK ) . Avoid racing with srp_reconnect_rport ( ) by
* locking the rport mutex if invoked from inside the SCSI EH .
*/
if ( in_scsi_eh )
mutex_lock ( & rport - > mutex ) ;
2005-11-03 01:07:13 +03:00
2013-10-26 16:34:27 +04:00
result = srp_chkready ( target - > rport ) ;
if ( unlikely ( result ) ) {
scmnd - > result = result ;
2013-02-21 21:20:00 +04:00
scmnd - > scsi_done ( scmnd ) ;
2013-10-26 16:37:17 +04:00
goto unlock_rport ;
2013-02-21 21:20:00 +04:00
}
2010-11-26 23:08:38 +03:00
spin_lock_irqsave ( & target - > lock , flags ) ;
2010-10-08 22:40:47 +04:00
iu = __srp_get_tx_iu ( target , SRP_IU_CMD ) ;
2005-11-03 01:07:13 +03:00
if ( ! iu )
2011-01-13 22:02:25 +03:00
goto err_unlock ;
req = list_first_entry ( & target - > free_reqs , struct srp_request , list ) ;
list_del ( & req - > list ) ;
spin_unlock_irqrestore ( & target - > lock , flags ) ;
2005-11-03 01:07:13 +03:00
2008-03-06 02:13:36 +03:00
dev = target - > srp_host - > srp_dev - > dev ;
2011-01-15 02:23:24 +03:00
ib_dma_sync_single_for_cpu ( dev , iu - > dma , target - > max_iu_len ,
2006-12-13 01:30:55 +03:00
DMA_TO_DEVICE ) ;
2005-11-03 01:07:13 +03:00
scmnd - > result = 0 ;
2010-11-26 21:02:21 +03:00
scmnd - > host_scribble = ( void * ) req ;
2005-11-03 01:07:13 +03:00
cmd = iu - > buf ;
memset ( cmd , 0 , sizeof * cmd ) ;
cmd - > opcode = SRP_CMD ;
cmd - > lun = cpu_to_be64 ( ( u64 ) scmnd - > device - > lun < < 48 ) ;
2006-05-09 21:50:28 +04:00
cmd - > tag = req - > index ;
2005-11-03 01:07:13 +03:00
memcpy ( cmd - > cdb , scmnd - > cmnd , scmnd - > cmd_len ) ;
req - > scmnd = scmnd ;
req - > cmd = iu ;
len = srp_map_data ( scmnd , target , req ) ;
if ( len < 0 ) {
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_ERR , target - > scsi_host ,
PFX " Failed to map data \n " ) ;
2010-11-26 22:37:47 +03:00
goto err_iu ;
2005-11-03 01:07:13 +03:00
}
2011-01-15 02:23:24 +03:00
ib_dma_sync_single_for_device ( dev , iu - > dma , target - > max_iu_len ,
2006-12-13 01:30:55 +03:00
DMA_TO_DEVICE ) ;
2005-11-03 01:07:13 +03:00
2010-11-26 22:37:47 +03:00
if ( srp_post_send ( target , iu , len ) ) {
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_ERR , target - > scsi_host , PFX " Send failed \n " ) ;
2005-11-03 01:07:13 +03:00
goto err_unmap ;
}
2013-10-26 16:37:17 +04:00
unlock_rport :
if ( in_scsi_eh )
mutex_unlock ( & rport - > mutex ) ;
2005-11-03 01:07:13 +03:00
return 0 ;
err_unmap :
srp_unmap_data ( scmnd , target , req ) ;
2010-11-26 22:37:47 +03:00
err_iu :
srp_put_tx_iu ( target , iu , SRP_IU_CMD ) ;
2010-11-26 23:08:38 +03:00
spin_lock_irqsave ( & target - > lock , flags ) ;
2010-11-26 22:37:47 +03:00
list_add ( & req - > list , & target - > free_reqs ) ;
2011-01-13 22:02:25 +03:00
err_unlock :
2010-11-26 23:08:38 +03:00
spin_unlock_irqrestore ( & target - > lock , flags ) ;
2010-11-26 22:37:47 +03:00
2013-10-26 16:37:17 +04:00
if ( in_scsi_eh )
mutex_unlock ( & rport - > mutex ) ;
2005-11-03 01:07:13 +03:00
return SCSI_MLQUEUE_HOST_BUSY ;
}
2013-10-26 16:40:37 +04:00
/*
* Note : the resources allocated in this function are freed in
* srp_free_target_ib ( ) .
*/
2005-11-03 01:07:13 +03:00
static int srp_alloc_iu_bufs ( struct srp_target_port * target )
{
int i ;
2013-10-26 16:40:37 +04:00
target - > rx_ring = kzalloc ( target - > queue_size * sizeof ( * target - > rx_ring ) ,
GFP_KERNEL ) ;
if ( ! target - > rx_ring )
goto err_no_ring ;
target - > tx_ring = kzalloc ( target - > queue_size * sizeof ( * target - > tx_ring ) ,
GFP_KERNEL ) ;
if ( ! target - > tx_ring )
goto err_no_ring ;
for ( i = 0 ; i < target - > queue_size ; + + i ) {
2005-11-03 01:07:13 +03:00
target - > rx_ring [ i ] = srp_alloc_iu ( target - > srp_host ,
target - > max_ti_iu_len ,
GFP_KERNEL , DMA_FROM_DEVICE ) ;
if ( ! target - > rx_ring [ i ] )
goto err ;
}
2013-10-26 16:40:37 +04:00
for ( i = 0 ; i < target - > queue_size ; + + i ) {
2005-11-03 01:07:13 +03:00
target - > tx_ring [ i ] = srp_alloc_iu ( target - > srp_host ,
2011-01-15 02:23:24 +03:00
target - > max_iu_len ,
2005-11-03 01:07:13 +03:00
GFP_KERNEL , DMA_TO_DEVICE ) ;
if ( ! target - > tx_ring [ i ] )
goto err ;
2010-11-26 21:22:48 +03:00
list_add ( & target - > tx_ring [ i ] - > list , & target - > free_tx ) ;
2005-11-03 01:07:13 +03:00
}
return 0 ;
err :
2013-10-26 16:40:37 +04:00
for ( i = 0 ; i < target - > queue_size ; + + i ) {
2005-11-03 01:07:13 +03:00
srp_free_iu ( target - > srp_host , target - > rx_ring [ i ] ) ;
srp_free_iu ( target - > srp_host , target - > tx_ring [ i ] ) ;
}
2013-10-26 16:40:37 +04:00
err_no_ring :
kfree ( target - > tx_ring ) ;
target - > tx_ring = NULL ;
kfree ( target - > rx_ring ) ;
target - > rx_ring = NULL ;
2005-11-03 01:07:13 +03:00
return - ENOMEM ;
}
2011-09-03 11:34:48 +04:00
static uint32_t srp_compute_rq_tmo ( struct ib_qp_attr * qp_attr , int attr_mask )
{
uint64_t T_tr_ns , max_compl_time_ms ;
uint32_t rq_tmo_jiffies ;
/*
* According to section 11.2 .4 .2 in the IBTA spec ( Modify Queue Pair ,
* table 91 ) , both the QP timeout and the retry count have to be set
* for RC QP ' s during the RTR to RTS transition .
*/
WARN_ON_ONCE ( ( attr_mask & ( IB_QP_TIMEOUT | IB_QP_RETRY_CNT ) ) ! =
( IB_QP_TIMEOUT | IB_QP_RETRY_CNT ) ) ;
/*
* Set target - > rq_tmo_jiffies to one second more than the largest time
* it can take before an error completion is generated . See also
* C9 - 140. .142 in the IBTA spec for more information about how to
* convert the QP Local ACK Timeout value to nanoseconds .
*/
T_tr_ns = 4096 * ( 1ULL < < qp_attr - > timeout ) ;
max_compl_time_ms = qp_attr - > retry_cnt * 4 * T_tr_ns ;
do_div ( max_compl_time_ms , NSEC_PER_MSEC ) ;
rq_tmo_jiffies = msecs_to_jiffies ( max_compl_time_ms + 1000 ) ;
return rq_tmo_jiffies ;
}
2011-01-15 01:32:07 +03:00
static void srp_cm_rep_handler ( struct ib_cm_id * cm_id ,
struct srp_login_rsp * lrsp ,
struct srp_target_port * target )
{
struct ib_qp_attr * qp_attr = NULL ;
int attr_mask = 0 ;
int ret ;
int i ;
if ( lrsp - > opcode = = SRP_LOGIN_RSP ) {
target - > max_ti_iu_len = be32_to_cpu ( lrsp - > max_ti_iu_len ) ;
target - > req_lim = be32_to_cpu ( lrsp - > req_lim_delta ) ;
/*
* Reserve credits for task management so we don ' t
* bounce requests back to the SCSI mid - layer .
*/
target - > scsi_host - > can_queue
= min ( target - > req_lim - SRP_TSK_MGMT_SQ_SIZE ,
target - > scsi_host - > can_queue ) ;
2013-10-26 16:40:37 +04:00
target - > scsi_host - > cmd_per_lun
= min_t ( int , target - > scsi_host - > can_queue ,
target - > scsi_host - > cmd_per_lun ) ;
2011-01-15 01:32:07 +03:00
} else {
shost_printk ( KERN_WARNING , target - > scsi_host ,
PFX " Unhandled RSP opcode %#x \n " , lrsp - > opcode ) ;
ret = - ECONNRESET ;
goto error ;
}
2013-10-26 16:40:37 +04:00
if ( ! target - > rx_ring ) {
2011-01-15 01:32:07 +03:00
ret = srp_alloc_iu_bufs ( target ) ;
if ( ret )
goto error ;
}
ret = - ENOMEM ;
qp_attr = kmalloc ( sizeof * qp_attr , GFP_KERNEL ) ;
if ( ! qp_attr )
goto error ;
qp_attr - > qp_state = IB_QPS_RTR ;
ret = ib_cm_init_qp_attr ( cm_id , qp_attr , & attr_mask ) ;
if ( ret )
goto error_free ;
ret = ib_modify_qp ( target - > qp , qp_attr , attr_mask ) ;
if ( ret )
goto error_free ;
2013-10-26 16:40:37 +04:00
for ( i = 0 ; i < target - > queue_size ; i + + ) {
2011-01-15 01:32:07 +03:00
struct srp_iu * iu = target - > rx_ring [ i ] ;
ret = srp_post_recv ( target , iu ) ;
if ( ret )
goto error_free ;
}
qp_attr - > qp_state = IB_QPS_RTS ;
ret = ib_cm_init_qp_attr ( cm_id , qp_attr , & attr_mask ) ;
if ( ret )
goto error_free ;
2011-09-03 11:34:48 +04:00
target - > rq_tmo_jiffies = srp_compute_rq_tmo ( qp_attr , attr_mask ) ;
2011-01-15 01:32:07 +03:00
ret = ib_modify_qp ( target - > qp , qp_attr , attr_mask ) ;
if ( ret )
goto error_free ;
ret = ib_send_cm_rtu ( cm_id , NULL , 0 ) ;
error_free :
kfree ( qp_attr ) ;
error :
target - > status = ret ;
}
2005-11-03 01:07:13 +03:00
static void srp_cm_rej_handler ( struct ib_cm_id * cm_id ,
struct ib_cm_event * event ,
struct srp_target_port * target )
{
2008-01-08 02:23:41 +03:00
struct Scsi_Host * shost = target - > scsi_host ;
2005-11-03 01:07:13 +03:00
struct ib_class_port_info * cpi ;
int opcode ;
switch ( event - > param . rej_rcvd . reason ) {
case IB_CM_REJ_PORT_CM_REDIRECT :
cpi = event - > param . rej_rcvd . ari ;
target - > path . dlid = cpi - > redirect_lid ;
target - > path . pkey = cpi - > redirect_pkey ;
cm_id - > remote_cm_qpn = be32_to_cpu ( cpi - > redirect_qp ) & 0x00ffffff ;
memcpy ( target - > path . dgid . raw , cpi - > redirect_gid , 16 ) ;
target - > status = target - > path . dlid ?
SRP_DLID_REDIRECT : SRP_PORT_REDIRECT ;
break ;
case IB_CM_REJ_PORT_REDIRECT :
2007-08-03 21:45:18 +04:00
if ( srp_target_is_topspin ( target ) ) {
2005-11-03 01:07:13 +03:00
/*
* Topspin / Cisco SRP gateways incorrectly send
* reject reason code 25 when they mean 24
* ( port redirect ) .
*/
memcpy ( target - > path . dgid . raw ,
event - > param . rej_rcvd . ari , 16 ) ;
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_DEBUG , shost ,
PFX " Topspin/Cisco redirect to target port GID %016llx%016llx \n " ,
( unsigned long long ) be64_to_cpu ( target - > path . dgid . global . subnet_prefix ) ,
( unsigned long long ) be64_to_cpu ( target - > path . dgid . global . interface_id ) ) ;
2005-11-03 01:07:13 +03:00
target - > status = SRP_PORT_REDIRECT ;
} else {
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_WARNING , shost ,
" REJ reason: IB_CM_REJ_PORT_REDIRECT \n " ) ;
2005-11-03 01:07:13 +03:00
target - > status = - ECONNRESET ;
}
break ;
case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID :
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_WARNING , shost ,
" REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID \n " ) ;
2005-11-03 01:07:13 +03:00
target - > status = - ECONNRESET ;
break ;
case IB_CM_REJ_CONSUMER_DEFINED :
opcode = * ( u8 * ) event - > private_data ;
if ( opcode = = SRP_LOGIN_REJ ) {
struct srp_login_rej * rej = event - > private_data ;
u32 reason = be32_to_cpu ( rej - > reason ) ;
if ( reason = = SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE )
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_WARNING , shost ,
PFX " SRP_LOGIN_REJ: requested max_it_iu_len too large \n " ) ;
2005-11-03 01:07:13 +03:00
else
2014-03-14 16:52:21 +04:00
shost_printk ( KERN_WARNING , shost , PFX
" SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x \n " ,
target - > path . sgid . raw ,
target - > orig_dgid , reason ) ;
2005-11-03 01:07:13 +03:00
} else
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_WARNING , shost ,
" REJ reason: IB_CM_REJ_CONSUMER_DEFINED, "
" opcode 0x%02x \n " , opcode ) ;
2005-11-03 01:07:13 +03:00
target - > status = - ECONNRESET ;
break ;
2008-01-09 01:08:52 +03:00
case IB_CM_REJ_STALE_CONN :
shost_printk ( KERN_WARNING , shost , " REJ reason: stale connection \n " ) ;
target - > status = SRP_STALE_CONN ;
break ;
2005-11-03 01:07:13 +03:00
default :
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_WARNING , shost , " REJ reason 0x%x \n " ,
event - > param . rej_rcvd . reason ) ;
2005-11-03 01:07:13 +03:00
target - > status = - ECONNRESET ;
}
}
static int srp_cm_handler ( struct ib_cm_id * cm_id , struct ib_cm_event * event )
{
struct srp_target_port * target = cm_id - > context ;
int comp = 0 ;
switch ( event - > event ) {
case IB_CM_REQ_ERROR :
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_DEBUG , target - > scsi_host ,
PFX " Sending CM REQ failed \n " ) ;
2005-11-03 01:07:13 +03:00
comp = 1 ;
target - > status = - ECONNRESET ;
break ;
case IB_CM_REP_RECEIVED :
comp = 1 ;
2011-01-15 01:32:07 +03:00
srp_cm_rep_handler ( cm_id , event - > private_data , target ) ;
2005-11-03 01:07:13 +03:00
break ;
case IB_CM_REJ_RECEIVED :
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_DEBUG , target - > scsi_host , PFX " REJ received \n " ) ;
2005-11-03 01:07:13 +03:00
comp = 1 ;
srp_cm_rej_handler ( cm_id , event , target ) ;
break ;
2006-06-18 07:37:32 +04:00
case IB_CM_DREQ_RECEIVED :
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_WARNING , target - > scsi_host ,
PFX " DREQ received - connection closed \n " ) ;
2011-12-25 16:18:12 +04:00
srp_change_conn_state ( target , false ) ;
2006-06-18 07:37:32 +04:00
if ( ib_send_cm_drep ( cm_id , NULL , 0 ) )
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_ERR , target - > scsi_host ,
PFX " Sending CM DREP failed \n " ) ;
2013-10-26 16:35:08 +04:00
queue_work ( system_long_wq , & target - > tl_err_work ) ;
2005-11-03 01:07:13 +03:00
break ;
case IB_CM_TIMEWAIT_EXIT :
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_ERR , target - > scsi_host ,
PFX " connection closed \n " ) ;
2005-11-03 01:07:13 +03:00
target - > status = 0 ;
break ;
2006-06-18 07:37:32 +04:00
case IB_CM_MRA_RECEIVED :
case IB_CM_DREQ_ERROR :
case IB_CM_DREP_RECEIVED :
break ;
2005-11-03 01:07:13 +03:00
default :
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_WARNING , target - > scsi_host ,
PFX " Unhandled CM event %d \n " , event - > event ) ;
2005-11-03 01:07:13 +03:00
break ;
}
if ( comp )
complete ( & target - > done ) ;
return 0 ;
}
2013-11-07 14:37:37 +04:00
/**
* srp_change_queue_type - changing device queue tag type
* @ sdev : scsi device struct
* @ tag_type : requested tag type
*
* Returns queue tag type .
*/
static int
srp_change_queue_type ( struct scsi_device * sdev , int tag_type )
{
if ( sdev - > tagged_supported ) {
scsi_set_tag_type ( sdev , tag_type ) ;
if ( tag_type )
scsi_activate_tcq ( sdev , sdev - > queue_depth ) ;
else
scsi_deactivate_tcq ( sdev , sdev - > queue_depth ) ;
} else
tag_type = 0 ;
return tag_type ;
}
/**
* srp_change_queue_depth - setting device queue depth
* @ sdev : scsi device struct
* @ qdepth : requested queue depth
* @ reason : SCSI_QDEPTH_DEFAULT / SCSI_QDEPTH_QFULL / SCSI_QDEPTH_RAMP_UP
* ( see include / scsi / scsi_host . h for definition )
*
* Returns queue depth .
*/
static int
srp_change_queue_depth ( struct scsi_device * sdev , int qdepth , int reason )
{
struct Scsi_Host * shost = sdev - > host ;
int max_depth ;
if ( reason = = SCSI_QDEPTH_DEFAULT | | reason = = SCSI_QDEPTH_RAMP_UP ) {
max_depth = shost - > can_queue ;
if ( ! sdev - > tagged_supported )
max_depth = 1 ;
if ( qdepth > max_depth )
qdepth = max_depth ;
scsi_adjust_queue_depth ( sdev , scsi_get_tag_type ( sdev ) , qdepth ) ;
} else if ( reason = = SCSI_QDEPTH_QFULL )
scsi_track_queue_full ( sdev , qdepth ) ;
else
return - EOPNOTSUPP ;
return sdev - > queue_depth ;
}
2006-05-09 21:50:28 +04:00
static int srp_send_tsk_mgmt ( struct srp_target_port * target ,
2010-11-26 21:02:21 +03:00
u64 req_tag , unsigned int lun , u8 func )
2005-11-03 01:07:13 +03:00
{
2013-10-26 16:37:17 +04:00
struct srp_rport * rport = target - > rport ;
2010-10-18 16:54:49 +04:00
struct ib_device * dev = target - > srp_host - > srp_dev - > dev ;
2005-11-03 01:07:13 +03:00
struct srp_iu * iu ;
struct srp_tsk_mgmt * tsk_mgmt ;
2013-02-21 21:18:00 +04:00
if ( ! target - > connected | | target - > qp_in_error )
return - 1 ;
2010-11-26 21:02:21 +03:00
init_completion ( & target - > tsk_mgmt_done ) ;
2005-11-03 01:07:13 +03:00
2013-10-26 16:37:17 +04:00
/*
* Lock the rport mutex to avoid that srp_create_target_ib ( ) is
* invoked while a task management function is being sent .
*/
mutex_lock ( & rport - > mutex ) ;
2010-11-26 23:08:38 +03:00
spin_lock_irq ( & target - > lock ) ;
2010-10-08 22:40:47 +04:00
iu = __srp_get_tx_iu ( target , SRP_IU_TSK_MGMT ) ;
2010-11-26 23:08:38 +03:00
spin_unlock_irq ( & target - > lock ) ;
2010-11-26 22:37:47 +03:00
2013-10-26 16:37:17 +04:00
if ( ! iu ) {
mutex_unlock ( & rport - > mutex ) ;
2010-11-26 22:37:47 +03:00
return - 1 ;
2013-10-26 16:37:17 +04:00
}
2005-11-03 01:07:13 +03:00
2010-10-18 16:54:49 +04:00
ib_dma_sync_single_for_cpu ( dev , iu - > dma , sizeof * tsk_mgmt ,
DMA_TO_DEVICE ) ;
2005-11-03 01:07:13 +03:00
tsk_mgmt = iu - > buf ;
memset ( tsk_mgmt , 0 , sizeof * tsk_mgmt ) ;
tsk_mgmt - > opcode = SRP_TSK_MGMT ;
2010-11-26 21:02:21 +03:00
tsk_mgmt - > lun = cpu_to_be64 ( ( u64 ) lun < < 48 ) ;
tsk_mgmt - > tag = req_tag | SRP_TAG_TSK_MGMT ;
2005-11-03 01:07:13 +03:00
tsk_mgmt - > tsk_mgmt_func = func ;
2010-11-26 21:02:21 +03:00
tsk_mgmt - > task_tag = req_tag ;
2005-11-03 01:07:13 +03:00
2010-10-18 16:54:49 +04:00
ib_dma_sync_single_for_device ( dev , iu - > dma , sizeof * tsk_mgmt ,
DMA_TO_DEVICE ) ;
2010-11-26 22:37:47 +03:00
if ( srp_post_send ( target , iu , sizeof * tsk_mgmt ) ) {
srp_put_tx_iu ( target , iu , SRP_IU_TSK_MGMT ) ;
2013-10-26 16:37:17 +04:00
mutex_unlock ( & rport - > mutex ) ;
2010-11-26 22:37:47 +03:00
return - 1 ;
}
2013-10-26 16:37:17 +04:00
mutex_unlock ( & rport - > mutex ) ;
2006-05-09 21:50:28 +04:00
2010-11-26 21:02:21 +03:00
if ( ! wait_for_completion_timeout ( & target - > tsk_mgmt_done ,
2005-11-03 01:07:13 +03:00
msecs_to_jiffies ( SRP_ABORT_TIMEOUT_MS ) ) )
2006-05-09 21:50:28 +04:00
return - 1 ;
2005-11-03 01:07:13 +03:00
2006-05-09 21:50:28 +04:00
return 0 ;
}
2005-11-03 01:07:13 +03:00
static int srp_abort ( struct scsi_cmnd * scmnd )
{
2006-05-09 21:50:28 +04:00
struct srp_target_port * target = host_to_target ( scmnd - > device - > host ) ;
2010-11-26 21:02:21 +03:00
struct srp_request * req = ( struct srp_request * ) scmnd - > host_scribble ;
2013-06-12 17:23:04 +04:00
int ret ;
2006-05-09 21:50:28 +04:00
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_ERR , target - > scsi_host , " SRP abort called \n " ) ;
2005-11-03 01:07:13 +03:00
2013-02-21 21:19:04 +04:00
if ( ! req | | ! srp_claim_req ( target , req , scmnd ) )
2013-10-10 15:52:33 +04:00
return SUCCESS ;
2013-06-12 17:23:04 +04:00
if ( srp_send_tsk_mgmt ( target , req - > index , scmnd - > device - > lun ,
2013-07-10 19:36:35 +04:00
SRP_TSK_ABORT_TASK ) = = 0 )
2013-06-12 17:23:04 +04:00
ret = SUCCESS ;
2013-10-26 16:34:27 +04:00
else if ( target - > rport - > state = = SRP_RPORT_LOST )
2013-06-28 16:49:58 +04:00
ret = FAST_IO_FAIL ;
2013-06-12 17:23:04 +04:00
else
ret = FAILED ;
2012-08-14 17:18:53 +04:00
srp_free_req ( target , req , scmnd , 0 ) ;
scmnd - > result = DID_ABORT < < 16 ;
2012-08-24 14:29:11 +04:00
scmnd - > scsi_done ( scmnd ) ;
2006-05-09 21:50:28 +04:00
2013-06-12 17:23:04 +04:00
return ret ;
2005-11-03 01:07:13 +03:00
}
static int srp_reset_device ( struct scsi_cmnd * scmnd )
{
2006-05-09 21:50:28 +04:00
struct srp_target_port * target = host_to_target ( scmnd - > device - > host ) ;
2010-11-26 21:58:27 +03:00
int i ;
2006-05-09 21:50:28 +04:00
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_ERR , target - > scsi_host , " SRP reset_device called \n " ) ;
2005-11-03 01:07:13 +03:00
2010-11-26 21:02:21 +03:00
if ( srp_send_tsk_mgmt ( target , SRP_TAG_NO_REQ , scmnd - > device - > lun ,
SRP_TSK_LUN_RESET ) )
2006-05-09 21:50:28 +04:00
return FAILED ;
2010-11-26 21:02:21 +03:00
if ( target - > tsk_mgmt_status )
2006-05-09 21:50:28 +04:00
return FAILED ;
2013-10-26 16:40:37 +04:00
for ( i = 0 ; i < target - > req_ring_size ; + + i ) {
2010-11-26 21:58:27 +03:00
struct srp_request * req = & target - > req_ring [ i ] ;
2010-11-26 21:02:21 +03:00
if ( req - > scmnd & & req - > scmnd - > device = = scmnd - > device )
2013-10-26 16:34:27 +04:00
srp_finish_req ( target , req , DID_RESET < < 16 ) ;
2010-11-26 21:58:27 +03:00
}
2006-05-09 21:50:28 +04:00
return SUCCESS ;
2005-11-03 01:07:13 +03:00
}
static int srp_reset_host ( struct scsi_cmnd * scmnd )
{
struct srp_target_port * target = host_to_target ( scmnd - > device - > host ) ;
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_ERR , target - > scsi_host , PFX " SRP reset_host called \n " ) ;
2005-11-03 01:07:13 +03:00
2013-10-26 16:34:27 +04:00
return srp_reconnect_rport ( target - > rport ) = = 0 ? SUCCESS : FAILED ;
2005-11-03 01:07:13 +03:00
}
2011-09-03 11:34:48 +04:00
static int srp_slave_configure ( struct scsi_device * sdev )
{
struct Scsi_Host * shost = sdev - > host ;
struct srp_target_port * target = host_to_target ( shost ) ;
struct request_queue * q = sdev - > request_queue ;
unsigned long timeout ;
if ( sdev - > type = = TYPE_DISK ) {
timeout = max_t ( unsigned , 30 * HZ , target - > rq_tmo_jiffies ) ;
blk_queue_rq_timeout ( q , timeout ) ;
}
return 0 ;
}
2008-02-22 02:13:36 +03:00
static ssize_t show_id_ext ( struct device * dev , struct device_attribute * attr ,
char * buf )
2006-03-20 21:08:23 +03:00
{
2008-02-22 02:13:36 +03:00
struct srp_target_port * target = host_to_target ( class_to_shost ( dev ) ) ;
2006-03-20 21:08:23 +03:00
return sprintf ( buf , " 0x%016llx \n " ,
( unsigned long long ) be64_to_cpu ( target - > id_ext ) ) ;
}
2008-02-22 02:13:36 +03:00
static ssize_t show_ioc_guid ( struct device * dev , struct device_attribute * attr ,
char * buf )
2006-03-20 21:08:23 +03:00
{
2008-02-22 02:13:36 +03:00
struct srp_target_port * target = host_to_target ( class_to_shost ( dev ) ) ;
2006-03-20 21:08:23 +03:00
return sprintf ( buf , " 0x%016llx \n " ,
( unsigned long long ) be64_to_cpu ( target - > ioc_guid ) ) ;
}
2008-02-22 02:13:36 +03:00
static ssize_t show_service_id ( struct device * dev ,
struct device_attribute * attr , char * buf )
2006-03-20 21:08:23 +03:00
{
2008-02-22 02:13:36 +03:00
struct srp_target_port * target = host_to_target ( class_to_shost ( dev ) ) ;
2006-03-20 21:08:23 +03:00
return sprintf ( buf , " 0x%016llx \n " ,
( unsigned long long ) be64_to_cpu ( target - > service_id ) ) ;
}
2008-02-22 02:13:36 +03:00
static ssize_t show_pkey ( struct device * dev , struct device_attribute * attr ,
char * buf )
2006-03-20 21:08:23 +03:00
{
2008-02-22 02:13:36 +03:00
struct srp_target_port * target = host_to_target ( class_to_shost ( dev ) ) ;
2006-03-20 21:08:23 +03:00
return sprintf ( buf , " 0x%04x \n " , be16_to_cpu ( target - > path . pkey ) ) ;
}
2013-10-26 16:38:12 +04:00
static ssize_t show_sgid ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
struct srp_target_port * target = host_to_target ( class_to_shost ( dev ) ) ;
return sprintf ( buf , " %pI6 \n " , target - > path . sgid . raw ) ;
}
2008-02-22 02:13:36 +03:00
static ssize_t show_dgid ( struct device * dev , struct device_attribute * attr ,
char * buf )
2006-03-20 21:08:23 +03:00
{
2008-02-22 02:13:36 +03:00
struct srp_target_port * target = host_to_target ( class_to_shost ( dev ) ) ;
2006-03-20 21:08:23 +03:00
2008-10-29 22:52:50 +03:00
return sprintf ( buf , " %pI6 \n " , target - > path . dgid . raw ) ;
2006-03-20 21:08:23 +03:00
}
2008-02-22 02:13:36 +03:00
static ssize_t show_orig_dgid ( struct device * dev ,
struct device_attribute * attr , char * buf )
2007-05-07 08:18:11 +04:00
{
2008-02-22 02:13:36 +03:00
struct srp_target_port * target = host_to_target ( class_to_shost ( dev ) ) ;
2007-05-07 08:18:11 +04:00
2008-10-29 22:52:50 +03:00
return sprintf ( buf , " %pI6 \n " , target - > orig_dgid ) ;
2007-05-07 08:18:11 +04:00
}
2010-08-03 18:08:45 +04:00
static ssize_t show_req_lim ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct srp_target_port * target = host_to_target ( class_to_shost ( dev ) ) ;
return sprintf ( buf , " %d \n " , target - > req_lim ) ;
}
2008-02-22 02:13:36 +03:00
static ssize_t show_zero_req_lim ( struct device * dev ,
struct device_attribute * attr , char * buf )
2006-06-18 07:37:33 +04:00
{
2008-02-22 02:13:36 +03:00
struct srp_target_port * target = host_to_target ( class_to_shost ( dev ) ) ;
2006-06-18 07:37:33 +04:00
return sprintf ( buf , " %d \n " , target - > zero_req_lim ) ;
}
2008-02-22 02:13:36 +03:00
static ssize_t show_local_ib_port ( struct device * dev ,
struct device_attribute * attr , char * buf )
2006-08-15 18:34:52 +04:00
{
2008-02-22 02:13:36 +03:00
struct srp_target_port * target = host_to_target ( class_to_shost ( dev ) ) ;
2006-08-15 18:34:52 +04:00
return sprintf ( buf , " %d \n " , target - > srp_host - > port ) ;
}
2008-02-22 02:13:36 +03:00
static ssize_t show_local_ib_device ( struct device * dev ,
struct device_attribute * attr , char * buf )
2006-08-15 18:34:52 +04:00
{
2008-02-22 02:13:36 +03:00
struct srp_target_port * target = host_to_target ( class_to_shost ( dev ) ) ;
2006-08-15 18:34:52 +04:00
2008-03-06 02:13:36 +03:00
return sprintf ( buf , " %s \n " , target - > srp_host - > srp_dev - > dev - > name ) ;
2006-08-15 18:34:52 +04:00
}
2013-06-28 16:57:42 +04:00
static ssize_t show_comp_vector ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct srp_target_port * target = host_to_target ( class_to_shost ( dev ) ) ;
return sprintf ( buf , " %d \n " , target - > comp_vector ) ;
}
2013-10-26 16:31:27 +04:00
static ssize_t show_tl_retry_count ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct srp_target_port * target = host_to_target ( class_to_shost ( dev ) ) ;
return sprintf ( buf , " %d \n " , target - > tl_retry_count ) ;
}
2011-01-15 02:23:24 +03:00
static ssize_t show_cmd_sg_entries ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct srp_target_port * target = host_to_target ( class_to_shost ( dev ) ) ;
return sprintf ( buf , " %u \n " , target - > cmd_sg_cnt ) ;
}
2011-01-16 21:57:10 +03:00
static ssize_t show_allow_ext_sg ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct srp_target_port * target = host_to_target ( class_to_shost ( dev ) ) ;
return sprintf ( buf , " %s \n " , target - > allow_ext_sg ? " true " : " false " ) ;
}
2008-02-22 02:13:36 +03:00
static DEVICE_ATTR ( id_ext , S_IRUGO , show_id_ext , NULL ) ;
static DEVICE_ATTR ( ioc_guid , S_IRUGO , show_ioc_guid , NULL ) ;
static DEVICE_ATTR ( service_id , S_IRUGO , show_service_id , NULL ) ;
static DEVICE_ATTR ( pkey , S_IRUGO , show_pkey , NULL ) ;
2013-10-26 16:38:12 +04:00
static DEVICE_ATTR ( sgid , S_IRUGO , show_sgid , NULL ) ;
2008-02-22 02:13:36 +03:00
static DEVICE_ATTR ( dgid , S_IRUGO , show_dgid , NULL ) ;
static DEVICE_ATTR ( orig_dgid , S_IRUGO , show_orig_dgid , NULL ) ;
2010-08-03 18:08:45 +04:00
static DEVICE_ATTR ( req_lim , S_IRUGO , show_req_lim , NULL ) ;
2008-02-22 02:13:36 +03:00
static DEVICE_ATTR ( zero_req_lim , S_IRUGO , show_zero_req_lim , NULL ) ;
static DEVICE_ATTR ( local_ib_port , S_IRUGO , show_local_ib_port , NULL ) ;
static DEVICE_ATTR ( local_ib_device , S_IRUGO , show_local_ib_device , NULL ) ;
2013-06-28 16:57:42 +04:00
static DEVICE_ATTR ( comp_vector , S_IRUGO , show_comp_vector , NULL ) ;
2013-10-26 16:31:27 +04:00
static DEVICE_ATTR ( tl_retry_count , S_IRUGO , show_tl_retry_count , NULL ) ;
2011-01-15 02:23:24 +03:00
static DEVICE_ATTR ( cmd_sg_entries , S_IRUGO , show_cmd_sg_entries , NULL ) ;
2011-01-16 21:57:10 +03:00
static DEVICE_ATTR ( allow_ext_sg , S_IRUGO , show_allow_ext_sg , NULL ) ;
2008-02-22 02:13:36 +03:00
static struct device_attribute * srp_host_attrs [ ] = {
& dev_attr_id_ext ,
& dev_attr_ioc_guid ,
& dev_attr_service_id ,
& dev_attr_pkey ,
2013-10-26 16:38:12 +04:00
& dev_attr_sgid ,
2008-02-22 02:13:36 +03:00
& dev_attr_dgid ,
& dev_attr_orig_dgid ,
2010-08-03 18:08:45 +04:00
& dev_attr_req_lim ,
2008-02-22 02:13:36 +03:00
& dev_attr_zero_req_lim ,
& dev_attr_local_ib_port ,
& dev_attr_local_ib_device ,
2013-06-28 16:57:42 +04:00
& dev_attr_comp_vector ,
2013-10-26 16:31:27 +04:00
& dev_attr_tl_retry_count ,
2011-01-15 02:23:24 +03:00
& dev_attr_cmd_sg_entries ,
2011-01-16 21:57:10 +03:00
& dev_attr_allow_ext_sg ,
2006-03-20 21:08:23 +03:00
NULL
} ;
2005-11-03 01:07:13 +03:00
static struct scsi_host_template srp_template = {
. module = THIS_MODULE ,
2007-05-07 08:18:11 +04:00
. name = " InfiniBand SRP initiator " ,
. proc_name = DRV_NAME ,
2011-09-03 11:34:48 +04:00
. slave_configure = srp_slave_configure ,
2005-11-03 01:07:13 +03:00
. info = srp_target_info ,
. queuecommand = srp_queuecommand ,
2013-11-07 14:37:37 +04:00
. change_queue_depth = srp_change_queue_depth ,
. change_queue_type = srp_change_queue_type ,
2005-11-03 01:07:13 +03:00
. eh_abort_handler = srp_abort ,
. eh_device_reset_handler = srp_reset_device ,
. eh_host_reset_handler = srp_reset_host ,
2013-06-12 17:24:25 +04:00
. skip_settle_delay = true ,
2011-01-15 02:23:24 +03:00
. sg_tablesize = SRP_DEF_SG_TABLESIZE ,
2013-10-26 16:40:37 +04:00
. can_queue = SRP_DEFAULT_CMD_SQ_SIZE ,
2005-11-03 01:07:13 +03:00
. this_id = - 1 ,
2013-10-26 16:40:37 +04:00
. cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE ,
2006-03-20 21:08:23 +03:00
. use_clustering = ENABLE_CLUSTERING ,
. shost_attrs = srp_host_attrs
2005-11-03 01:07:13 +03:00
} ;
static int srp_add_target ( struct srp_host * host , struct srp_target_port * target )
{
2007-06-27 11:33:12 +04:00
struct srp_rport_identifiers ids ;
struct srp_rport * rport ;
2005-11-03 01:07:13 +03:00
sprintf ( target - > target_name , " SRP.T10:%016llX " ,
( unsigned long long ) be64_to_cpu ( target - > id_ext ) ) ;
2008-03-06 02:13:36 +03:00
if ( scsi_add_host ( target - > scsi_host , host - > srp_dev - > dev - > dma_device ) )
2005-11-03 01:07:13 +03:00
return - ENODEV ;
2007-06-27 11:33:12 +04:00
memcpy ( ids . port_id , & target - > id_ext , 8 ) ;
memcpy ( ids . port_id + 8 , & target - > ioc_guid , 8 ) ;
2007-07-11 10:08:15 +04:00
ids . roles = SRP_RPORT_ROLE_TARGET ;
2007-06-27 11:33:12 +04:00
rport = srp_rport_add ( target - > scsi_host , & ids ) ;
if ( IS_ERR ( rport ) ) {
scsi_remove_host ( target - > scsi_host ) ;
return PTR_ERR ( rport ) ;
}
2011-09-16 22:41:13 +04:00
rport - > lld_data = target ;
2013-10-26 16:32:30 +04:00
target - > rport = rport ;
2011-09-16 22:41:13 +04:00
2006-06-18 07:37:30 +04:00
spin_lock ( & host - > target_lock ) ;
2005-11-03 01:07:13 +03:00
list_add_tail ( & target - > list , & host - > target_list ) ;
2006-06-18 07:37:30 +04:00
spin_unlock ( & host - > target_lock ) ;
2005-11-03 01:07:13 +03:00
target - > state = SRP_TARGET_LIVE ;
scsi_scan_target ( & target - > scsi_host - > shost_gendev ,
2006-06-18 07:37:30 +04:00
0 , target - > scsi_id , SCAN_WILD_CARD , 0 ) ;
2005-11-03 01:07:13 +03:00
return 0 ;
}
2008-02-22 02:13:36 +03:00
static void srp_release_dev ( struct device * dev )
2005-11-03 01:07:13 +03:00
{
struct srp_host * host =
2008-02-22 02:13:36 +03:00
container_of ( dev , struct srp_host , dev ) ;
2005-11-03 01:07:13 +03:00
complete ( & host - > released ) ;
}
static struct class srp_class = {
. name = " infiniband_srp " ,
2008-02-22 02:13:36 +03:00
. dev_release = srp_release_dev
2005-11-03 01:07:13 +03:00
} ;
2013-06-28 16:51:26 +04:00
/**
* srp_conn_unique ( ) - check whether the connection to a target is unique
*/
static bool srp_conn_unique ( struct srp_host * host ,
struct srp_target_port * target )
{
struct srp_target_port * t ;
bool ret = false ;
if ( target - > state = = SRP_TARGET_REMOVED )
goto out ;
ret = true ;
spin_lock ( & host - > target_lock ) ;
list_for_each_entry ( t , & host - > target_list , list ) {
if ( t ! = target & &
target - > id_ext = = t - > id_ext & &
target - > ioc_guid = = t - > ioc_guid & &
target - > initiator_ext = = t - > initiator_ext ) {
ret = false ;
break ;
}
}
spin_unlock ( & host - > target_lock ) ;
out :
return ret ;
}
2005-11-03 01:07:13 +03:00
/*
* Target ports are added by writing
*
* id_ext = < SRP ID ext > , ioc_guid = < SRP IOC GUID > , dgid = < dest GID > ,
* pkey = < P_Key > , service_id = < service ID >
*
* to the add_target sysfs attribute .
*/
enum {
SRP_OPT_ERR = 0 ,
SRP_OPT_ID_EXT = 1 < < 0 ,
SRP_OPT_IOC_GUID = 1 < < 1 ,
SRP_OPT_DGID = 1 < < 2 ,
SRP_OPT_PKEY = 1 < < 3 ,
SRP_OPT_SERVICE_ID = 1 < < 4 ,
SRP_OPT_MAX_SECT = 1 < < 5 ,
2006-06-18 07:37:31 +04:00
SRP_OPT_MAX_CMD_PER_LUN = 1 < < 6 ,
2006-06-18 07:37:38 +04:00
SRP_OPT_IO_CLASS = 1 < < 7 ,
2006-10-04 17:28:56 +04:00
SRP_OPT_INITIATOR_EXT = 1 < < 8 ,
2011-01-15 02:23:24 +03:00
SRP_OPT_CMD_SG_ENTRIES = 1 < < 9 ,
2011-01-16 21:57:10 +03:00
SRP_OPT_ALLOW_EXT_SG = 1 < < 10 ,
SRP_OPT_SG_TABLESIZE = 1 < < 11 ,
2013-06-28 16:57:42 +04:00
SRP_OPT_COMP_VECTOR = 1 < < 12 ,
2013-10-26 16:31:27 +04:00
SRP_OPT_TL_RETRY_COUNT = 1 < < 13 ,
2013-10-26 16:40:37 +04:00
SRP_OPT_QUEUE_SIZE = 1 < < 14 ,
2005-11-03 01:07:13 +03:00
SRP_OPT_ALL = ( SRP_OPT_ID_EXT |
SRP_OPT_IOC_GUID |
SRP_OPT_DGID |
SRP_OPT_PKEY |
SRP_OPT_SERVICE_ID ) ,
} ;
2008-10-13 13:46:57 +04:00
static const match_table_t srp_opt_tokens = {
2006-06-18 07:37:31 +04:00
{ SRP_OPT_ID_EXT , " id_ext=%s " } ,
{ SRP_OPT_IOC_GUID , " ioc_guid=%s " } ,
{ SRP_OPT_DGID , " dgid=%s " } ,
{ SRP_OPT_PKEY , " pkey=%x " } ,
{ SRP_OPT_SERVICE_ID , " service_id=%s " } ,
{ SRP_OPT_MAX_SECT , " max_sect=%d " } ,
{ SRP_OPT_MAX_CMD_PER_LUN , " max_cmd_per_lun=%d " } ,
2006-06-18 07:37:38 +04:00
{ SRP_OPT_IO_CLASS , " io_class=%x " } ,
2006-10-04 17:28:56 +04:00
{ SRP_OPT_INITIATOR_EXT , " initiator_ext=%s " } ,
2011-01-15 02:23:24 +03:00
{ SRP_OPT_CMD_SG_ENTRIES , " cmd_sg_entries=%u " } ,
2011-01-16 21:57:10 +03:00
{ SRP_OPT_ALLOW_EXT_SG , " allow_ext_sg=%u " } ,
{ SRP_OPT_SG_TABLESIZE , " sg_tablesize=%u " } ,
2013-06-28 16:57:42 +04:00
{ SRP_OPT_COMP_VECTOR , " comp_vector=%u " } ,
2013-10-26 16:31:27 +04:00
{ SRP_OPT_TL_RETRY_COUNT , " tl_retry_count=%u " } ,
2013-10-26 16:40:37 +04:00
{ SRP_OPT_QUEUE_SIZE , " queue_size=%d " } ,
2006-06-18 07:37:31 +04:00
{ SRP_OPT_ERR , NULL }
2005-11-03 01:07:13 +03:00
} ;
static int srp_parse_options ( const char * buf , struct srp_target_port * target )
{
char * options , * sep_opt ;
char * p ;
char dgid [ 3 ] ;
substring_t args [ MAX_OPT_ARGS ] ;
int opt_mask = 0 ;
int token ;
int ret = - EINVAL ;
int i ;
options = kstrdup ( buf , GFP_KERNEL ) ;
if ( ! options )
return - ENOMEM ;
sep_opt = options ;
while ( ( p = strsep ( & sep_opt , " , " ) ) ! = NULL ) {
if ( ! * p )
continue ;
token = match_token ( p , srp_opt_tokens , args ) ;
opt_mask | = token ;
switch ( token ) {
case SRP_OPT_ID_EXT :
p = match_strdup ( args ) ;
2007-01-16 18:20:25 +03:00
if ( ! p ) {
ret = - ENOMEM ;
goto out ;
}
2005-11-03 01:07:13 +03:00
target - > id_ext = cpu_to_be64 ( simple_strtoull ( p , NULL , 16 ) ) ;
kfree ( p ) ;
break ;
case SRP_OPT_IOC_GUID :
p = match_strdup ( args ) ;
2007-01-16 18:20:25 +03:00
if ( ! p ) {
ret = - ENOMEM ;
goto out ;
}
2005-11-03 01:07:13 +03:00
target - > ioc_guid = cpu_to_be64 ( simple_strtoull ( p , NULL , 16 ) ) ;
kfree ( p ) ;
break ;
case SRP_OPT_DGID :
p = match_strdup ( args ) ;
2007-01-16 18:20:25 +03:00
if ( ! p ) {
ret = - ENOMEM ;
goto out ;
}
2005-11-03 01:07:13 +03:00
if ( strlen ( p ) ! = 32 ) {
2012-01-14 16:39:44 +04:00
pr_warn ( " bad dest GID parameter '%s' \n " , p ) ;
2006-04-03 20:31:04 +04:00
kfree ( p ) ;
2005-11-03 01:07:13 +03:00
goto out ;
}
for ( i = 0 ; i < 16 ; + + i ) {
strlcpy ( dgid , p + i * 2 , 3 ) ;
target - > path . dgid . raw [ i ] = simple_strtoul ( dgid , NULL , 16 ) ;
}
2006-03-20 21:08:25 +03:00
kfree ( p ) ;
2007-05-07 08:18:11 +04:00
memcpy ( target - > orig_dgid , target - > path . dgid . raw , 16 ) ;
2005-11-03 01:07:13 +03:00
break ;
case SRP_OPT_PKEY :
if ( match_hex ( args , & token ) ) {
2012-01-14 16:39:44 +04:00
pr_warn ( " bad P_Key parameter '%s' \n " , p ) ;
2005-11-03 01:07:13 +03:00
goto out ;
}
target - > path . pkey = cpu_to_be16 ( token ) ;
break ;
case SRP_OPT_SERVICE_ID :
p = match_strdup ( args ) ;
2007-01-16 18:20:25 +03:00
if ( ! p ) {
ret = - ENOMEM ;
goto out ;
}
2005-11-03 01:07:13 +03:00
target - > service_id = cpu_to_be64 ( simple_strtoull ( p , NULL , 16 ) ) ;
2007-08-09 02:51:18 +04:00
target - > path . service_id = target - > service_id ;
2005-11-03 01:07:13 +03:00
kfree ( p ) ;
break ;
case SRP_OPT_MAX_SECT :
if ( match_int ( args , & token ) ) {
2012-01-14 16:39:44 +04:00
pr_warn ( " bad max sect parameter '%s' \n " , p ) ;
2005-11-03 01:07:13 +03:00
goto out ;
}
target - > scsi_host - > max_sectors = token ;
break ;
2013-10-26 16:40:37 +04:00
case SRP_OPT_QUEUE_SIZE :
if ( match_int ( args , & token ) | | token < 1 ) {
pr_warn ( " bad queue_size parameter '%s' \n " , p ) ;
goto out ;
}
target - > scsi_host - > can_queue = token ;
target - > queue_size = token + SRP_RSP_SQ_SIZE +
SRP_TSK_MGMT_SQ_SIZE ;
if ( ! ( opt_mask & SRP_OPT_MAX_CMD_PER_LUN ) )
target - > scsi_host - > cmd_per_lun = token ;
break ;
2006-06-18 07:37:31 +04:00
case SRP_OPT_MAX_CMD_PER_LUN :
2013-10-26 16:40:37 +04:00
if ( match_int ( args , & token ) | | token < 1 ) {
2012-01-14 16:39:44 +04:00
pr_warn ( " bad max cmd_per_lun parameter '%s' \n " ,
p ) ;
2006-06-18 07:37:31 +04:00
goto out ;
}
2013-10-26 16:40:37 +04:00
target - > scsi_host - > cmd_per_lun = token ;
2006-06-18 07:37:31 +04:00
break ;
2006-06-18 07:37:38 +04:00
case SRP_OPT_IO_CLASS :
if ( match_hex ( args , & token ) ) {
2012-01-14 16:39:44 +04:00
pr_warn ( " bad IO class parameter '%s' \n " , p ) ;
2006-06-18 07:37:38 +04:00
goto out ;
}
if ( token ! = SRP_REV10_IB_IO_CLASS & &
token ! = SRP_REV16A_IB_IO_CLASS ) {
2012-01-14 16:39:44 +04:00
pr_warn ( " unknown IO class parameter value %x specified (use %x or %x). \n " ,
token , SRP_REV10_IB_IO_CLASS ,
SRP_REV16A_IB_IO_CLASS ) ;
2006-06-18 07:37:38 +04:00
goto out ;
}
target - > io_class = token ;
break ;
2006-10-04 17:28:56 +04:00
case SRP_OPT_INITIATOR_EXT :
p = match_strdup ( args ) ;
2007-01-16 18:20:25 +03:00
if ( ! p ) {
ret = - ENOMEM ;
goto out ;
}
2006-10-04 17:28:56 +04:00
target - > initiator_ext = cpu_to_be64 ( simple_strtoull ( p , NULL , 16 ) ) ;
kfree ( p ) ;
break ;
2011-01-15 02:23:24 +03:00
case SRP_OPT_CMD_SG_ENTRIES :
if ( match_int ( args , & token ) | | token < 1 | | token > 255 ) {
2012-01-14 16:39:44 +04:00
pr_warn ( " bad max cmd_sg_entries parameter '%s' \n " ,
p ) ;
2011-01-15 02:23:24 +03:00
goto out ;
}
target - > cmd_sg_cnt = token ;
break ;
2011-01-16 21:57:10 +03:00
case SRP_OPT_ALLOW_EXT_SG :
if ( match_int ( args , & token ) ) {
2012-01-14 16:39:44 +04:00
pr_warn ( " bad allow_ext_sg parameter '%s' \n " , p ) ;
2011-01-16 21:57:10 +03:00
goto out ;
}
target - > allow_ext_sg = ! ! token ;
break ;
case SRP_OPT_SG_TABLESIZE :
if ( match_int ( args , & token ) | | token < 1 | |
token > SCSI_MAX_SG_CHAIN_SEGMENTS ) {
2012-01-14 16:39:44 +04:00
pr_warn ( " bad max sg_tablesize parameter '%s' \n " ,
p ) ;
2011-01-16 21:57:10 +03:00
goto out ;
}
target - > sg_tablesize = token ;
break ;
2013-06-28 16:57:42 +04:00
case SRP_OPT_COMP_VECTOR :
if ( match_int ( args , & token ) | | token < 0 ) {
pr_warn ( " bad comp_vector parameter '%s' \n " , p ) ;
goto out ;
}
target - > comp_vector = token ;
break ;
2013-10-26 16:31:27 +04:00
case SRP_OPT_TL_RETRY_COUNT :
if ( match_int ( args , & token ) | | token < 2 | | token > 7 ) {
pr_warn ( " bad tl_retry_count parameter '%s' (must be a number between 2 and 7) \n " ,
p ) ;
goto out ;
}
target - > tl_retry_count = token ;
break ;
2005-11-03 01:07:13 +03:00
default :
2012-01-14 16:39:44 +04:00
pr_warn ( " unknown parameter or missing value '%s' in target creation request \n " ,
p ) ;
2005-11-03 01:07:13 +03:00
goto out ;
}
}
if ( ( opt_mask & SRP_OPT_ALL ) = = SRP_OPT_ALL )
ret = 0 ;
else
for ( i = 0 ; i < ARRAY_SIZE ( srp_opt_tokens ) ; + + i )
if ( ( srp_opt_tokens [ i ] . token & SRP_OPT_ALL ) & &
! ( srp_opt_tokens [ i ] . token & opt_mask ) )
2012-01-14 16:39:44 +04:00
pr_warn ( " target creation request is missing parameter '%s' \n " ,
srp_opt_tokens [ i ] . pattern ) ;
2005-11-03 01:07:13 +03:00
2013-10-26 16:40:37 +04:00
if ( target - > scsi_host - > cmd_per_lun > target - > scsi_host - > can_queue
& & ( opt_mask & SRP_OPT_MAX_CMD_PER_LUN ) )
pr_warn ( " cmd_per_lun = %d > queue_size = %d \n " ,
target - > scsi_host - > cmd_per_lun ,
target - > scsi_host - > can_queue ) ;
2005-11-03 01:07:13 +03:00
out :
kfree ( options ) ;
return ret ;
}
2008-02-22 02:13:36 +03:00
static ssize_t srp_create_target ( struct device * dev ,
struct device_attribute * attr ,
2005-11-03 01:07:13 +03:00
const char * buf , size_t count )
{
struct srp_host * host =
2008-02-22 02:13:36 +03:00
container_of ( dev , struct srp_host , dev ) ;
2005-11-03 01:07:13 +03:00
struct Scsi_Host * target_host ;
struct srp_target_port * target ;
2011-01-16 21:57:10 +03:00
struct ib_device * ibdev = host - > srp_dev - > dev ;
2013-10-26 16:38:47 +04:00
int ret ;
2005-11-03 01:07:13 +03:00
target_host = scsi_host_alloc ( & srp_template ,
sizeof ( struct srp_target_port ) ) ;
if ( ! target_host )
return - ENOMEM ;
2011-01-15 02:23:24 +03:00
target_host - > transportt = ib_srp_transport_template ;
2011-07-13 20:19:16 +04:00
target_host - > max_channel = 0 ;
target_host - > max_id = 1 ;
2006-11-15 14:43:00 +03:00
target_host - > max_lun = SRP_MAX_LUN ;
target_host - > max_cmd_len = sizeof ( ( struct srp_cmd * ) ( void * ) 0L ) - > cdb ;
2005-11-12 01:06:01 +03:00
2005-11-03 01:07:13 +03:00
target = host_to_target ( target_host ) ;
2011-01-15 02:23:24 +03:00
target - > io_class = SRP_REV16A_IB_IO_CLASS ;
target - > scsi_host = target_host ;
target - > srp_host = host ;
target - > lkey = host - > srp_dev - > mr - > lkey ;
target - > rkey = host - > srp_dev - > mr - > rkey ;
target - > cmd_sg_cnt = cmd_sg_entries ;
2011-01-16 21:57:10 +03:00
target - > sg_tablesize = indirect_sg_entries ? : cmd_sg_entries ;
target - > allow_ext_sg = allow_ext_sg ;
2013-10-26 16:31:27 +04:00
target - > tl_retry_count = 7 ;
2013-10-26 16:40:37 +04:00
target - > queue_size = SRP_DEFAULT_QUEUE_SIZE ;
2005-11-03 01:07:13 +03:00
2014-03-14 16:52:45 +04:00
mutex_lock ( & host - > add_target_mutex ) ;
2005-11-03 01:07:13 +03:00
ret = srp_parse_options ( buf , target ) ;
if ( ret )
goto err ;
2013-10-26 16:40:37 +04:00
target - > req_ring_size = target - > queue_size - SRP_TSK_MGMT_SQ_SIZE ;
2013-06-28 16:51:26 +04:00
if ( ! srp_conn_unique ( target - > srp_host , target ) ) {
shost_printk ( KERN_INFO , target - > scsi_host ,
PFX " Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx \n " ,
be64_to_cpu ( target - > id_ext ) ,
be64_to_cpu ( target - > ioc_guid ) ,
be64_to_cpu ( target - > initiator_ext ) ) ;
ret = - EEXIST ;
goto err ;
}
2011-01-16 21:57:10 +03:00
if ( ! host - > srp_dev - > fmr_pool & & ! target - > allow_ext_sg & &
target - > cmd_sg_cnt < target - > sg_tablesize ) {
2012-01-14 16:39:44 +04:00
pr_warn ( " No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt \n " ) ;
2011-01-16 21:57:10 +03:00
target - > sg_tablesize = target - > cmd_sg_cnt ;
}
target_host - > sg_tablesize = target - > sg_tablesize ;
target - > indirect_size = target - > sg_tablesize *
sizeof ( struct srp_direct_buf ) ;
2011-01-15 02:23:24 +03:00
target - > max_iu_len = sizeof ( struct srp_cmd ) +
sizeof ( struct srp_indirect_buf ) +
target - > cmd_sg_cnt * sizeof ( struct srp_direct_buf ) ;
2013-10-26 16:35:08 +04:00
INIT_WORK ( & target - > tl_err_work , srp_tl_err_work ) ;
2011-12-26 20:49:18 +04:00
INIT_WORK ( & target - > remove_work , srp_remove_work ) ;
2011-01-15 03:45:50 +03:00
spin_lock_init ( & target - > lock ) ;
INIT_LIST_HEAD ( & target - > free_tx ) ;
2013-10-26 16:38:47 +04:00
ret = srp_alloc_req_data ( target ) ;
if ( ret )
goto err_free_mem ;
2011-01-15 03:45:50 +03:00
2014-03-14 16:51:58 +04:00
ret = ib_query_gid ( ibdev , host - > port , 0 , & target - > path . sgid ) ;
if ( ret )
goto err_free_mem ;
2005-11-03 01:07:13 +03:00
ret = srp_create_target_ib ( target ) ;
if ( ret )
2011-01-15 03:45:50 +03:00
goto err_free_mem ;
2005-11-03 01:07:13 +03:00
2008-01-09 01:08:52 +03:00
ret = srp_new_cm_id ( target ) ;
if ( ret )
2011-01-15 03:45:50 +03:00
goto err_free_ib ;
2005-11-03 01:07:13 +03:00
ret = srp_connect_target ( target ) ;
if ( ret ) {
2008-01-08 02:23:41 +03:00
shost_printk ( KERN_ERR , target - > scsi_host ,
PFX " Connection failed \n " ) ;
2005-11-03 01:07:13 +03:00
goto err_cm_id ;
}
ret = srp_add_target ( host , target ) ;
if ( ret )
goto err_disconnect ;
2014-03-14 16:52:21 +04:00
shost_printk ( KERN_DEBUG , target - > scsi_host , PFX
" new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6 \n " ,
be64_to_cpu ( target - > id_ext ) ,
be64_to_cpu ( target - > ioc_guid ) ,
be16_to_cpu ( target - > path . pkey ) ,
be64_to_cpu ( target - > service_id ) ,
target - > path . sgid . raw , target - > path . dgid . raw ) ;
2014-03-14 16:52:45 +04:00
ret = count ;
out :
mutex_unlock ( & host - > add_target_mutex ) ;
return ret ;
2005-11-03 01:07:13 +03:00
err_disconnect :
srp_disconnect_target ( target ) ;
err_cm_id :
ib_destroy_cm_id ( target - > cm_id ) ;
2011-01-15 03:45:50 +03:00
err_free_ib :
2005-11-03 01:07:13 +03:00
srp_free_target_ib ( target ) ;
2011-01-15 03:45:50 +03:00
err_free_mem :
srp_free_req_data ( target ) ;
2005-11-03 01:07:13 +03:00
err :
scsi_host_put ( target_host ) ;
2014-03-14 16:52:45 +04:00
goto out ;
2005-11-03 01:07:13 +03:00
}
2008-02-22 02:13:36 +03:00
static DEVICE_ATTR ( add_target , S_IWUSR , NULL , srp_create_target ) ;
2005-11-03 01:07:13 +03:00
2008-02-22 02:13:36 +03:00
static ssize_t show_ibdev ( struct device * dev , struct device_attribute * attr ,
char * buf )
2005-11-03 01:07:13 +03:00
{
2008-02-22 02:13:36 +03:00
struct srp_host * host = container_of ( dev , struct srp_host , dev ) ;
2005-11-03 01:07:13 +03:00
2008-03-06 02:13:36 +03:00
return sprintf ( buf , " %s \n " , host - > srp_dev - > dev - > name ) ;
2005-11-03 01:07:13 +03:00
}
2008-02-22 02:13:36 +03:00
static DEVICE_ATTR ( ibdev , S_IRUGO , show_ibdev , NULL ) ;
2005-11-03 01:07:13 +03:00
2008-02-22 02:13:36 +03:00
static ssize_t show_port ( struct device * dev , struct device_attribute * attr ,
char * buf )
2005-11-03 01:07:13 +03:00
{
2008-02-22 02:13:36 +03:00
struct srp_host * host = container_of ( dev , struct srp_host , dev ) ;
2005-11-03 01:07:13 +03:00
return sprintf ( buf , " %d \n " , host - > port ) ;
}
2008-02-22 02:13:36 +03:00
static DEVICE_ATTR ( port , S_IRUGO , show_port , NULL ) ;
2005-11-03 01:07:13 +03:00
2006-06-18 07:37:29 +04:00
static struct srp_host * srp_add_port ( struct srp_device * device , u8 port )
2005-11-03 01:07:13 +03:00
{
struct srp_host * host ;
host = kzalloc ( sizeof * host , GFP_KERNEL ) ;
if ( ! host )
return NULL ;
INIT_LIST_HEAD ( & host - > target_list ) ;
2006-06-18 07:37:30 +04:00
spin_lock_init ( & host - > target_lock ) ;
2005-11-03 01:07:13 +03:00
init_completion ( & host - > released ) ;
2014-03-14 16:52:45 +04:00
mutex_init ( & host - > add_target_mutex ) ;
2008-03-06 02:13:36 +03:00
host - > srp_dev = device ;
2005-11-03 01:07:13 +03:00
host - > port = port ;
2008-02-22 02:13:36 +03:00
host - > dev . class = & srp_class ;
host - > dev . parent = device - > dev - > dma_device ;
2009-01-06 21:44:39 +03:00
dev_set_name ( & host - > dev , " srp-%s-%d " , device - > dev - > name , port ) ;
2005-11-03 01:07:13 +03:00
2008-02-22 02:13:36 +03:00
if ( device_register ( & host - > dev ) )
2006-06-18 07:37:29 +04:00
goto free_host ;
2008-02-22 02:13:36 +03:00
if ( device_create_file ( & host - > dev , & dev_attr_add_target ) )
2005-11-03 01:07:13 +03:00
goto err_class ;
2008-02-22 02:13:36 +03:00
if ( device_create_file ( & host - > dev , & dev_attr_ibdev ) )
2005-11-03 01:07:13 +03:00
goto err_class ;
2008-02-22 02:13:36 +03:00
if ( device_create_file ( & host - > dev , & dev_attr_port ) )
2005-11-03 01:07:13 +03:00
goto err_class ;
return host ;
err_class :
2008-02-22 02:13:36 +03:00
device_unregister ( & host - > dev ) ;
2005-11-03 01:07:13 +03:00
2006-06-18 07:37:29 +04:00
free_host :
2005-11-03 01:07:13 +03:00
kfree ( host ) ;
return NULL ;
}
static void srp_add_one ( struct ib_device * device )
{
2006-06-18 07:37:29 +04:00
struct srp_device * srp_dev ;
struct ib_device_attr * dev_attr ;
struct ib_fmr_pool_param fmr_param ;
2005-11-03 01:07:13 +03:00
struct srp_host * host ;
2011-01-19 05:58:09 +03:00
int max_pages_per_fmr , fmr_page_shift , s , e , p ;
2005-11-03 01:07:13 +03:00
2006-06-18 07:37:29 +04:00
dev_attr = kmalloc ( sizeof * dev_attr , GFP_KERNEL ) ;
if ( ! dev_attr )
2006-01-10 18:39:34 +03:00
return ;
2005-11-03 01:07:13 +03:00
2006-06-18 07:37:29 +04:00
if ( ib_query_device ( device , dev_attr ) ) {
2012-01-14 16:39:44 +04:00
pr_warn ( " Query device failed for %s \n " , device - > name ) ;
2006-06-18 07:37:29 +04:00
goto free_attr ;
}
srp_dev = kmalloc ( sizeof * srp_dev , GFP_KERNEL ) ;
if ( ! srp_dev )
goto free_attr ;
/*
* Use the smallest page size supported by the HCA , down to a
2011-01-15 03:45:50 +03:00
* minimum of 4096 bytes . We ' re unlikely to build large sglists
* out of smaller entries .
2006-06-18 07:37:29 +04:00
*/
2011-01-15 03:45:50 +03:00
fmr_page_shift = max ( 12 , ffs ( dev_attr - > page_size_cap ) - 1 ) ;
srp_dev - > fmr_page_size = 1 < < fmr_page_shift ;
srp_dev - > fmr_page_mask = ~ ( ( u64 ) srp_dev - > fmr_page_size - 1 ) ;
srp_dev - > fmr_max_size = srp_dev - > fmr_page_size * SRP_FMR_SIZE ;
2006-06-18 07:37:29 +04:00
INIT_LIST_HEAD ( & srp_dev - > dev_list ) ;
srp_dev - > dev = device ;
srp_dev - > pd = ib_alloc_pd ( device ) ;
if ( IS_ERR ( srp_dev - > pd ) )
goto free_dev ;
srp_dev - > mr = ib_get_dma_mr ( srp_dev - > pd ,
IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_WRITE ) ;
if ( IS_ERR ( srp_dev - > mr ) )
goto err_pd ;
2011-01-19 05:58:09 +03:00
for ( max_pages_per_fmr = SRP_FMR_SIZE ;
max_pages_per_fmr > = SRP_FMR_MIN_SIZE ;
max_pages_per_fmr / = 2 , srp_dev - > fmr_max_size / = 2 ) {
memset ( & fmr_param , 0 , sizeof fmr_param ) ;
fmr_param . pool_size = SRP_FMR_POOL_SIZE ;
fmr_param . dirty_watermark = SRP_FMR_DIRTY_SIZE ;
fmr_param . cache = 1 ;
fmr_param . max_pages_per_fmr = max_pages_per_fmr ;
fmr_param . page_shift = fmr_page_shift ;
fmr_param . access = ( IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ ) ;
srp_dev - > fmr_pool = ib_create_fmr_pool ( srp_dev - > pd , & fmr_param ) ;
if ( ! IS_ERR ( srp_dev - > fmr_pool ) )
break ;
}
2006-06-18 07:37:29 +04:00
if ( IS_ERR ( srp_dev - > fmr_pool ) )
srp_dev - > fmr_pool = NULL ;
2005-11-03 01:07:13 +03:00
2006-08-04 01:02:42 +04:00
if ( device - > node_type = = RDMA_NODE_IB_SWITCH ) {
2005-11-03 01:07:13 +03:00
s = 0 ;
e = 0 ;
} else {
s = 1 ;
e = device - > phys_port_cnt ;
}
for ( p = s ; p < = e ; + + p ) {
2006-06-18 07:37:29 +04:00
host = srp_add_port ( srp_dev , p ) ;
2005-11-03 01:07:13 +03:00
if ( host )
2006-06-18 07:37:29 +04:00
list_add_tail ( & host - > list , & srp_dev - > dev_list ) ;
2005-11-03 01:07:13 +03:00
}
2006-06-18 07:37:29 +04:00
ib_set_client_data ( device , & srp_client , srp_dev ) ;
goto free_attr ;
err_pd :
ib_dealloc_pd ( srp_dev - > pd ) ;
free_dev :
kfree ( srp_dev ) ;
free_attr :
kfree ( dev_attr ) ;
2005-11-03 01:07:13 +03:00
}
static void srp_remove_one ( struct ib_device * device )
{
2006-06-18 07:37:29 +04:00
struct srp_device * srp_dev ;
2005-11-03 01:07:13 +03:00
struct srp_host * host , * tmp_host ;
2011-12-26 20:49:18 +04:00
struct srp_target_port * target ;
2005-11-03 01:07:13 +03:00
2006-06-18 07:37:29 +04:00
srp_dev = ib_get_client_data ( device , & srp_client ) ;
2013-06-12 17:20:36 +04:00
if ( ! srp_dev )
return ;
2005-11-03 01:07:13 +03:00
2006-06-18 07:37:29 +04:00
list_for_each_entry_safe ( host , tmp_host , & srp_dev - > dev_list , list ) {
2008-02-22 02:13:36 +03:00
device_unregister ( & host - > dev ) ;
2005-11-03 01:07:13 +03:00
/*
* Wait for the sysfs entry to go away , so that no new
* target ports can be created .
*/
wait_for_completion ( & host - > released ) ;
/*
2011-12-26 20:49:18 +04:00
* Remove all target ports .
2005-11-03 01:07:13 +03:00
*/
2006-06-18 07:37:30 +04:00
spin_lock ( & host - > target_lock ) ;
2011-12-26 20:49:18 +04:00
list_for_each_entry ( target , & host - > target_list , list )
srp_queue_remove_work ( target ) ;
2006-06-18 07:37:30 +04:00
spin_unlock ( & host - > target_lock ) ;
2005-11-03 01:07:13 +03:00
/*
2011-12-26 20:49:18 +04:00
* Wait for target port removal tasks .
2005-11-03 01:07:13 +03:00
*/
2011-12-26 20:49:18 +04:00
flush_workqueue ( system_long_wq ) ;
2005-11-03 01:07:13 +03:00
kfree ( host ) ;
}
2006-06-18 07:37:29 +04:00
if ( srp_dev - > fmr_pool )
ib_destroy_fmr_pool ( srp_dev - > fmr_pool ) ;
ib_dereg_mr ( srp_dev - > mr ) ;
ib_dealloc_pd ( srp_dev - > pd ) ;
kfree ( srp_dev ) ;
2005-11-03 01:07:13 +03:00
}
2007-06-27 11:33:12 +04:00
static struct srp_function_template ib_srp_transport_functions = {
2013-10-26 16:34:27 +04:00
. has_rport_state = true ,
. reset_timer_if_blocked = true ,
2013-10-26 16:37:17 +04:00
. reconnect_delay = & srp_reconnect_delay ,
2013-10-26 16:34:27 +04:00
. fast_io_fail_tmo = & srp_fast_io_fail_tmo ,
. dev_loss_tmo = & srp_dev_loss_tmo ,
. reconnect = srp_rport_reconnect ,
2011-09-16 22:41:13 +04:00
. rport_delete = srp_rport_delete ,
2013-10-26 16:34:27 +04:00
. terminate_rport_io = srp_terminate_io ,
2007-06-27 11:33:12 +04:00
} ;
2005-11-03 01:07:13 +03:00
static int __init srp_init_module ( void )
{
int ret ;
2010-11-26 21:22:48 +03:00
BUILD_BUG_ON ( FIELD_SIZEOF ( struct ib_wc , wr_id ) < sizeof ( void * ) ) ;
2010-08-30 23:27:20 +04:00
2011-01-15 02:23:24 +03:00
if ( srp_sg_tablesize ) {
2012-01-14 16:39:44 +04:00
pr_warn ( " srp_sg_tablesize is deprecated, please use cmd_sg_entries \n " ) ;
2011-01-15 02:23:24 +03:00
if ( ! cmd_sg_entries )
cmd_sg_entries = srp_sg_tablesize ;
}
if ( ! cmd_sg_entries )
cmd_sg_entries = SRP_DEF_SG_TABLESIZE ;
if ( cmd_sg_entries > 255 ) {
2012-01-14 16:39:44 +04:00
pr_warn ( " Clamping cmd_sg_entries to 255 \n " ) ;
2011-01-15 02:23:24 +03:00
cmd_sg_entries = 255 ;
2008-04-17 08:01:12 +04:00
}
2011-01-16 21:57:10 +03:00
if ( ! indirect_sg_entries )
indirect_sg_entries = cmd_sg_entries ;
else if ( indirect_sg_entries < cmd_sg_entries ) {
2012-01-14 16:39:44 +04:00
pr_warn ( " Bumping up indirect_sg_entries to match cmd_sg_entries (%u) \n " ,
cmd_sg_entries ) ;
2011-01-16 21:57:10 +03:00
indirect_sg_entries = cmd_sg_entries ;
}
2007-06-27 11:33:12 +04:00
ib_srp_transport_template =
srp_attach_transport ( & ib_srp_transport_functions ) ;
if ( ! ib_srp_transport_template )
return - ENOMEM ;
2005-11-03 01:07:13 +03:00
ret = class_register ( & srp_class ) ;
if ( ret ) {
2012-01-14 16:39:44 +04:00
pr_err ( " couldn't register class infiniband_srp \n " ) ;
2007-06-27 11:33:12 +04:00
srp_release_transport ( ib_srp_transport_template ) ;
2005-11-03 01:07:13 +03:00
return ret ;
}
2006-08-22 03:40:12 +04:00
ib_sa_register_client ( & srp_sa_client ) ;
2005-11-03 01:07:13 +03:00
ret = ib_register_client ( & srp_client ) ;
if ( ret ) {
2012-01-14 16:39:44 +04:00
pr_err ( " couldn't register IB client \n " ) ;
2007-06-27 11:33:12 +04:00
srp_release_transport ( ib_srp_transport_template ) ;
2006-08-22 03:40:12 +04:00
ib_sa_unregister_client ( & srp_sa_client ) ;
2005-11-03 01:07:13 +03:00
class_unregister ( & srp_class ) ;
return ret ;
}
return 0 ;
}
static void __exit srp_cleanup_module ( void )
{
ib_unregister_client ( & srp_client ) ;
2006-08-22 03:40:12 +04:00
ib_sa_unregister_client ( & srp_sa_client ) ;
2005-11-03 01:07:13 +03:00
class_unregister ( & srp_class ) ;
2007-06-27 11:33:12 +04:00
srp_release_transport ( ib_srp_transport_template ) ;
2005-11-03 01:07:13 +03:00
}
module_init ( srp_init_module ) ;
module_exit ( srp_cleanup_module ) ;