2006-05-11 10:02:19 +03:00
/*
* Copyright ( c ) 2004 , 2005 , 2006 Voltaire , Inc . All rights reserved .
2014-04-01 16:28:41 +03:00
* Copyright ( c ) 2013 - 2014 Mellanox Technologies . All rights reserved .
2006-05-11 10:02:19 +03:00
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
# include <linux/kernel.h>
# include <linux/slab.h>
# include <linux/mm.h>
# include <linux/scatterlist.h>
# include <linux/kfifo.h>
# include <scsi/scsi_cmnd.h>
# include <scsi/scsi_host.h>
# include "iscsi_iser.h"
/* Register user buffer memory and initialize passive rdma
2014-06-11 12:09:58 +03:00
* dto descriptor . Data size is stored in
* task - > data [ ISER_DIR_IN ] . data_len , Protection size
* os stored in task - > prot [ ISER_DIR_IN ] . data_len
2006-05-11 10:02:19 +03:00
*/
2014-06-11 12:09:58 +03:00
static int iser_prepare_read_cmd ( struct iscsi_task * task )
2006-05-11 10:02:19 +03:00
{
2008-05-21 15:54:11 -05:00
struct iscsi_iser_task * iser_task = task - > dd_data ;
2015-04-14 18:08:19 +03:00
struct iser_mem_reg * mem_reg ;
2006-05-11 10:02:19 +03:00
int err ;
2015-12-09 14:12:03 +02:00
struct iser_ctrl * hdr = & iser_task - > desc . iser_header ;
2008-05-21 15:54:11 -05:00
struct iser_data_buf * buf_in = & iser_task - > data [ ISER_DIR_IN ] ;
2006-05-11 10:02:19 +03:00
2008-05-21 15:54:11 -05:00
err = iser_dma_map_task_data ( iser_task ,
2006-05-11 10:02:19 +03:00
buf_in ,
ISER_DIR_IN ,
DMA_FROM_DEVICE ) ;
if ( err )
return err ;
2014-03-05 19:43:48 +02:00
if ( scsi_prot_sg_count ( iser_task - > sc ) ) {
struct iser_data_buf * pbuf_in = & iser_task - > prot [ ISER_DIR_IN ] ;
err = iser_dma_map_task_data ( iser_task ,
pbuf_in ,
ISER_DIR_IN ,
DMA_FROM_DEVICE ) ;
if ( err )
return err ;
}
2015-12-09 14:12:01 +02:00
err = iser_reg_rdma_mem ( iser_task , ISER_DIR_IN , false ) ;
2006-05-11 10:02:19 +03:00
if ( err ) {
iser_err ( " Failed to set up Data-IN RDMA \n " ) ;
return err ;
}
2015-04-14 18:08:19 +03:00
mem_reg = & iser_task - > rdma_reg [ ISER_DIR_IN ] ;
2006-05-11 10:02:19 +03:00
hdr - > flags | = ISER_RSV ;
2015-04-14 18:08:19 +03:00
hdr - > read_stag = cpu_to_be32 ( mem_reg - > rkey ) ;
2015-04-14 18:08:24 +03:00
hdr - > read_va = cpu_to_be64 ( mem_reg - > sge . addr ) ;
2006-05-11 10:02:19 +03:00
iser_dbg ( " Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX \n " ,
2015-04-14 18:08:19 +03:00
task - > itt , mem_reg - > rkey ,
2015-04-14 18:08:24 +03:00
( unsigned long long ) mem_reg - > sge . addr ) ;
2006-05-11 10:02:19 +03:00
return 0 ;
}
/* Register user buffer memory and initialize passive rdma
2014-06-11 12:09:58 +03:00
* dto descriptor . Data size is stored in
* task - > data [ ISER_DIR_OUT ] . data_len , Protection size
* is stored at task - > prot [ ISER_DIR_OUT ] . data_len
2006-05-11 10:02:19 +03:00
*/
static int
2008-05-21 15:54:11 -05:00
iser_prepare_write_cmd ( struct iscsi_task * task ,
2006-05-11 10:02:19 +03:00
unsigned int imm_sz ,
unsigned int unsol_sz ,
unsigned int edtl )
{
2008-05-21 15:54:11 -05:00
struct iscsi_iser_task * iser_task = task - > dd_data ;
2015-04-14 18:08:19 +03:00
struct iser_mem_reg * mem_reg ;
2006-05-11 10:02:19 +03:00
int err ;
2015-12-09 14:12:03 +02:00
struct iser_ctrl * hdr = & iser_task - > desc . iser_header ;
2008-05-21 15:54:11 -05:00
struct iser_data_buf * buf_out = & iser_task - > data [ ISER_DIR_OUT ] ;
2010-02-08 13:19:56 +00:00
struct ib_sge * tx_dsg = & iser_task - > desc . tx_sg [ 1 ] ;
2006-05-11 10:02:19 +03:00
2008-05-21 15:54:11 -05:00
err = iser_dma_map_task_data ( iser_task ,
2006-05-11 10:02:19 +03:00
buf_out ,
ISER_DIR_OUT ,
DMA_TO_DEVICE ) ;
if ( err )
return err ;
2014-03-05 19:43:48 +02:00
if ( scsi_prot_sg_count ( iser_task - > sc ) ) {
struct iser_data_buf * pbuf_out = & iser_task - > prot [ ISER_DIR_OUT ] ;
err = iser_dma_map_task_data ( iser_task ,
pbuf_out ,
ISER_DIR_OUT ,
DMA_TO_DEVICE ) ;
if ( err )
return err ;
}
2015-12-09 14:12:01 +02:00
err = iser_reg_rdma_mem ( iser_task , ISER_DIR_OUT ,
buf_out - > data_len = = imm_sz ) ;
2006-05-11 10:02:19 +03:00
if ( err ! = 0 ) {
iser_err ( " Failed to register write cmd RDMA mem \n " ) ;
return err ;
}
2015-04-14 18:08:19 +03:00
mem_reg = & iser_task - > rdma_reg [ ISER_DIR_OUT ] ;
2006-05-11 10:02:19 +03:00
if ( unsol_sz < edtl ) {
hdr - > flags | = ISER_WSV ;
2015-04-14 18:08:19 +03:00
hdr - > write_stag = cpu_to_be32 ( mem_reg - > rkey ) ;
2015-04-14 18:08:24 +03:00
hdr - > write_va = cpu_to_be64 ( mem_reg - > sge . addr + unsol_sz ) ;
2006-05-11 10:02:19 +03:00
iser_dbg ( " Cmd itt:%d, WRITE tags, RKEY:%#.4X "
" VA:%#llX + unsol:%d \n " ,
2015-04-14 18:08:19 +03:00
task - > itt , mem_reg - > rkey ,
2015-04-14 18:08:24 +03:00
( unsigned long long ) mem_reg - > sge . addr , unsol_sz ) ;
2006-05-11 10:02:19 +03:00
}
if ( imm_sz > 0 ) {
iser_dbg ( " Cmd itt:%d, WRITE, adding imm.data sz: %d \n " ,
2008-05-21 15:54:11 -05:00
task - > itt , imm_sz ) ;
2015-04-14 18:08:24 +03:00
tx_dsg - > addr = mem_reg - > sge . addr ;
2010-02-08 13:19:56 +00:00
tx_dsg - > length = imm_sz ;
2015-04-14 18:08:24 +03:00
tx_dsg - > lkey = mem_reg - > sge . lkey ;
2010-02-08 13:19:56 +00:00
iser_task - > desc . num_sge = 2 ;
2006-05-11 10:02:19 +03:00
}
return 0 ;
}
/* creates a new tx descriptor and adds header regd buffer */
2014-10-01 14:01:57 +03:00
static void iser_create_send_desc ( struct iser_conn * iser_conn ,
2010-02-08 13:19:56 +00:00
struct iser_tx_desc * tx_desc )
2006-05-11 10:02:19 +03:00
{
2014-10-01 14:01:58 +03:00
struct iser_device * device = iser_conn - > ib_conn . device ;
2006-05-11 10:02:19 +03:00
2010-02-08 13:19:56 +00:00
ib_dma_sync_single_for_cpu ( device - > ib_device ,
tx_desc - > dma_addr , ISER_HEADERS_LEN , DMA_TO_DEVICE ) ;
2006-05-11 10:02:19 +03:00
2015-12-09 14:12:03 +02:00
memset ( & tx_desc - > iser_header , 0 , sizeof ( struct iser_ctrl ) ) ;
2006-05-11 10:02:19 +03:00
tx_desc - > iser_header . flags = ISER_VER ;
2010-02-08 13:19:56 +00:00
tx_desc - > num_sge = 1 ;
2006-05-11 10:02:19 +03:00
}
2014-10-01 14:01:57 +03:00
static void iser_free_login_buf ( struct iser_conn * iser_conn )
2013-07-28 12:35:37 +03:00
{
2014-10-01 14:01:58 +03:00
struct iser_device * device = iser_conn - > ib_conn . device ;
2015-11-04 10:50:31 +02:00
struct iser_login_desc * desc = & iser_conn - > login_desc ;
2014-10-01 14:01:58 +03:00
2015-11-04 10:50:31 +02:00
if ( ! desc - > req )
2013-07-28 12:35:37 +03:00
return ;
2015-11-04 10:50:31 +02:00
ib_dma_unmap_single ( device - > ib_device , desc - > req_dma ,
ISCSI_DEF_MAX_RECV_SEG_LEN , DMA_TO_DEVICE ) ;
2013-07-28 12:35:37 +03:00
2015-11-04 10:50:31 +02:00
ib_dma_unmap_single ( device - > ib_device , desc - > rsp_dma ,
ISER_RX_LOGIN_SIZE , DMA_FROM_DEVICE ) ;
2013-07-28 12:35:37 +03:00
2015-11-04 10:50:31 +02:00
kfree ( desc - > req ) ;
kfree ( desc - > rsp ) ;
2013-07-28 12:35:37 +03:00
/* make sure we never redo any unmapping */
2015-11-04 10:50:31 +02:00
desc - > req = NULL ;
desc - > rsp = NULL ;
2013-07-28 12:35:37 +03:00
}
2014-10-01 14:01:57 +03:00
static int iser_alloc_login_buf ( struct iser_conn * iser_conn )
2013-07-28 12:35:37 +03:00
{
2014-10-01 14:01:58 +03:00
struct iser_device * device = iser_conn - > ib_conn . device ;
2015-11-04 10:50:31 +02:00
struct iser_login_desc * desc = & iser_conn - > login_desc ;
desc - > req = kmalloc ( ISCSI_DEF_MAX_RECV_SEG_LEN , GFP_KERNEL ) ;
if ( ! desc - > req )
return - ENOMEM ;
desc - > req_dma = ib_dma_map_single ( device - > ib_device , desc - > req ,
ISCSI_DEF_MAX_RECV_SEG_LEN ,
DMA_TO_DEVICE ) ;
if ( ib_dma_mapping_error ( device - > ib_device ,
desc - > req_dma ) )
goto free_req ;
desc - > rsp = kmalloc ( ISER_RX_LOGIN_SIZE , GFP_KERNEL ) ;
if ( ! desc - > rsp )
goto unmap_req ;
desc - > rsp_dma = ib_dma_map_single ( device - > ib_device , desc - > rsp ,
ISER_RX_LOGIN_SIZE ,
DMA_FROM_DEVICE ) ;
if ( ib_dma_mapping_error ( device - > ib_device ,
desc - > rsp_dma ) )
goto free_rsp ;
2013-07-28 12:35:37 +03:00
return 0 ;
2015-11-04 10:50:31 +02:00
free_rsp :
kfree ( desc - > rsp ) ;
unmap_req :
ib_dma_unmap_single ( device - > ib_device , desc - > req_dma ,
ISCSI_DEF_MAX_RECV_SEG_LEN ,
DMA_TO_DEVICE ) ;
free_req :
kfree ( desc - > req ) ;
2013-07-28 12:35:37 +03:00
return - ENOMEM ;
}
2010-02-08 13:19:56 +00:00
2014-10-01 14:01:57 +03:00
int iser_alloc_rx_descriptors ( struct iser_conn * iser_conn ,
struct iscsi_session * session )
2010-02-08 13:17:42 +00:00
{
int i , j ;
u64 dma_addr ;
struct iser_rx_desc * rx_desc ;
struct ib_sge * rx_sg ;
2014-10-01 14:01:58 +03:00
struct ib_conn * ib_conn = & iser_conn - > ib_conn ;
struct iser_device * device = ib_conn - > device ;
2010-02-08 13:17:42 +00:00
2014-10-01 14:01:57 +03:00
iser_conn - > qp_max_recv_dtos = session - > cmds_max ;
iser_conn - > qp_max_recv_dtos_mask = session - > cmds_max - 1 ; /* cmds_max is 2^N */
iser_conn - > min_posted_rx = iser_conn - > qp_max_recv_dtos > > 2 ;
2013-07-28 12:35:38 +03:00
2015-08-06 18:33:03 +03:00
if ( device - > reg_ops - > alloc_reg_res ( ib_conn , session - > scsi_cmds_max ,
2015-08-06 18:33:04 +03:00
iser_conn - > scsi_sg_tablesize ) )
2013-07-28 12:35:39 +03:00
goto create_rdma_reg_res_failed ;
2013-07-28 12:35:37 +03:00
2014-10-01 14:01:57 +03:00
if ( iser_alloc_login_buf ( iser_conn ) )
2013-07-28 12:35:37 +03:00
goto alloc_login_buf_fail ;
2014-10-01 14:02:09 +03:00
iser_conn - > num_rx_descs = session - > cmds_max ;
iser_conn - > rx_descs = kmalloc ( iser_conn - > num_rx_descs *
2010-02-08 13:17:42 +00:00
sizeof ( struct iser_rx_desc ) , GFP_KERNEL ) ;
2014-10-01 14:01:57 +03:00
if ( ! iser_conn - > rx_descs )
2010-02-08 13:17:42 +00:00
goto rx_desc_alloc_fail ;
2014-10-01 14:01:57 +03:00
rx_desc = iser_conn - > rx_descs ;
2010-02-08 13:17:42 +00:00
2014-10-01 14:01:57 +03:00
for ( i = 0 ; i < iser_conn - > qp_max_recv_dtos ; i + + , rx_desc + + ) {
2010-02-08 13:17:42 +00:00
dma_addr = ib_dma_map_single ( device - > ib_device , ( void * ) rx_desc ,
ISER_RX_PAYLOAD_SIZE , DMA_FROM_DEVICE ) ;
if ( ib_dma_mapping_error ( device - > ib_device , dma_addr ) )
goto rx_desc_dma_map_failed ;
rx_desc - > dma_addr = dma_addr ;
2015-12-11 11:54:28 -08:00
rx_desc - > cqe . done = iser_task_rsp ;
2010-02-08 13:17:42 +00:00
rx_sg = & rx_desc - > rx_sg ;
2015-12-11 11:54:28 -08:00
rx_sg - > addr = rx_desc - > dma_addr ;
2010-02-08 13:17:42 +00:00
rx_sg - > length = ISER_RX_PAYLOAD_SIZE ;
2015-12-11 11:54:28 -08:00
rx_sg - > lkey = device - > pd - > local_dma_lkey ;
2010-02-08 13:17:42 +00:00
}
2014-10-01 14:01:57 +03:00
iser_conn - > rx_desc_head = 0 ;
2010-02-08 13:17:42 +00:00
return 0 ;
rx_desc_dma_map_failed :
2014-10-01 14:01:57 +03:00
rx_desc = iser_conn - > rx_descs ;
2010-02-08 13:17:42 +00:00
for ( j = 0 ; j < i ; j + + , rx_desc + + )
ib_dma_unmap_single ( device - > ib_device , rx_desc - > dma_addr ,
2013-07-28 12:35:37 +03:00
ISER_RX_PAYLOAD_SIZE , DMA_FROM_DEVICE ) ;
2014-10-01 14:01:57 +03:00
kfree ( iser_conn - > rx_descs ) ;
iser_conn - > rx_descs = NULL ;
2010-02-08 13:17:42 +00:00
rx_desc_alloc_fail :
2014-10-01 14:01:57 +03:00
iser_free_login_buf ( iser_conn ) ;
2013-07-28 12:35:37 +03:00
alloc_login_buf_fail :
2015-08-06 18:32:56 +03:00
device - > reg_ops - > free_reg_res ( ib_conn ) ;
2013-07-28 12:35:39 +03:00
create_rdma_reg_res_failed :
2010-02-08 13:17:42 +00:00
iser_err ( " failed allocating rx descriptors / data buffers \n " ) ;
return - ENOMEM ;
}
2014-10-01 14:01:57 +03:00
void iser_free_rx_descriptors ( struct iser_conn * iser_conn )
2010-02-08 13:17:42 +00:00
{
int i ;
struct iser_rx_desc * rx_desc ;
2014-10-01 14:01:58 +03:00
struct ib_conn * ib_conn = & iser_conn - > ib_conn ;
struct iser_device * device = ib_conn - > device ;
2010-02-08 13:17:42 +00:00
2015-08-06 18:32:56 +03:00
if ( device - > reg_ops - > free_reg_res )
device - > reg_ops - > free_reg_res ( ib_conn ) ;
2010-02-08 13:17:42 +00:00
2014-10-01 14:01:57 +03:00
rx_desc = iser_conn - > rx_descs ;
for ( i = 0 ; i < iser_conn - > qp_max_recv_dtos ; i + + , rx_desc + + )
2010-02-08 13:17:42 +00:00
ib_dma_unmap_single ( device - > ib_device , rx_desc - > dma_addr ,
2013-07-28 12:35:37 +03:00
ISER_RX_PAYLOAD_SIZE , DMA_FROM_DEVICE ) ;
2014-10-01 14:01:57 +03:00
kfree ( iser_conn - > rx_descs ) ;
2013-07-28 12:35:37 +03:00
/* make sure we never redo any unmapping */
2014-10-01 14:01:57 +03:00
iser_conn - > rx_descs = NULL ;
2013-07-28 12:35:37 +03:00
2014-10-01 14:01:57 +03:00
iser_free_login_buf ( iser_conn ) ;
2010-02-08 13:17:42 +00:00
}
2012-03-05 18:21:44 +02:00
static int iser_post_rx_bufs ( struct iscsi_conn * conn , struct iscsi_hdr * req )
2006-05-11 10:02:19 +03:00
{
2014-10-01 14:01:57 +03:00
struct iser_conn * iser_conn = conn - > dd_data ;
2014-10-01 14:01:58 +03:00
struct ib_conn * ib_conn = & iser_conn - > ib_conn ;
2013-08-08 13:44:29 +03:00
struct iscsi_session * session = conn - > session ;
2006-05-11 10:02:19 +03:00
2012-03-05 18:21:44 +02:00
iser_dbg ( " req op %x flags %x \n " , req - > opcode , req - > flags ) ;
/* check if this is the last login - going to full feature phase */
if ( ( req - > flags & ISCSI_FULL_FEATURE_PHASE ) ! = ISCSI_FULL_FEATURE_PHASE )
return 0 ;
2006-05-11 10:02:19 +03:00
2012-03-05 18:21:44 +02:00
/*
2014-10-01 14:02:10 +03:00
* Check that there is one posted recv buffer
* ( for the last login response ) .
2012-03-05 18:21:44 +02:00
*/
2014-10-01 14:01:58 +03:00
WARN_ON ( ib_conn - > post_recv_buf_count ! = 1 ) ;
2010-02-08 13:17:42 +00:00
2013-08-08 13:44:29 +03:00
if ( session - > discovery_sess ) {
iser_info ( " Discovery session, re-using login RX buffer \n " ) ;
return 0 ;
} else
iser_info ( " Normal session, posting batch of RX %d buffers \n " ,
2014-10-01 14:01:57 +03:00
iser_conn - > min_posted_rx ) ;
2013-08-08 13:44:29 +03:00
2006-05-11 10:02:19 +03:00
/* Initial post receive buffers */
2014-10-01 14:01:57 +03:00
if ( iser_post_recvm ( iser_conn , iser_conn - > min_posted_rx ) )
2010-02-08 13:17:42 +00:00
return - ENOMEM ;
2006-05-11 10:02:19 +03:00
return 0 ;
}
2014-12-07 16:09:56 +02:00
static inline bool iser_signal_comp ( u8 sig_count )
2014-10-01 14:02:12 +03:00
{
return ( ( sig_count % ISER_SIGNAL_CMD_COUNT ) = = 0 ) ;
}
2006-05-11 10:02:19 +03:00
/**
* iser_send_command - send command PDU
*/
2008-05-21 15:54:08 -05:00
int iser_send_command ( struct iscsi_conn * conn ,
2008-05-21 15:54:11 -05:00
struct iscsi_task * task )
2006-05-11 10:02:19 +03:00
{
2014-10-01 14:01:57 +03:00
struct iser_conn * iser_conn = conn - > dd_data ;
2008-05-21 15:54:11 -05:00
struct iscsi_iser_task * iser_task = task - > dd_data ;
2006-05-11 10:02:19 +03:00
unsigned long edtl ;
2010-02-08 13:17:42 +00:00
int err ;
2014-03-05 19:43:48 +02:00
struct iser_data_buf * data_buf , * prot_buf ;
2011-05-27 11:16:33 +00:00
struct iscsi_scsi_req * hdr = ( struct iscsi_scsi_req * ) task - > hdr ;
2008-05-21 15:54:11 -05:00
struct scsi_cmnd * sc = task - > sc ;
2010-02-08 13:19:56 +00:00
struct iser_tx_desc * tx_desc = & iser_task - > desc ;
2014-12-07 16:09:56 +02:00
u8 sig_count = + + iser_conn - > ib_conn . sig_count ;
2006-05-11 10:02:19 +03:00
edtl = ntohl ( hdr - > data_length ) ;
/* build the tx desc regd header and add it to the tx desc dto */
2010-02-08 13:19:56 +00:00
tx_desc - > type = ISCSI_TX_SCSI_COMMAND ;
2015-12-11 11:54:28 -08:00
tx_desc - > cqe . done = iser_cmd_comp ;
2014-10-01 14:01:57 +03:00
iser_create_send_desc ( iser_conn , tx_desc ) ;
2006-05-11 10:02:19 +03:00
2014-03-05 19:43:48 +02:00
if ( hdr - > flags & ISCSI_FLAG_CMD_READ ) {
2008-05-21 15:54:11 -05:00
data_buf = & iser_task - > data [ ISER_DIR_IN ] ;
2014-03-05 19:43:48 +02:00
prot_buf = & iser_task - > prot [ ISER_DIR_IN ] ;
} else {
2008-05-21 15:54:11 -05:00
data_buf = & iser_task - > data [ ISER_DIR_OUT ] ;
2014-03-05 19:43:48 +02:00
prot_buf = & iser_task - > prot [ ISER_DIR_OUT ] ;
}
2006-05-11 10:02:19 +03:00
2007-06-01 18:56:21 +09:00
if ( scsi_sg_count ( sc ) ) { /* using a scatter list */
2015-04-14 18:08:15 +03:00
data_buf - > sg = scsi_sglist ( sc ) ;
2007-06-01 18:56:21 +09:00
data_buf - > size = scsi_sg_count ( sc ) ;
2006-05-11 10:02:19 +03:00
}
2007-06-01 18:56:21 +09:00
data_buf - > data_len = scsi_bufflen ( sc ) ;
2006-05-11 10:02:19 +03:00
2014-03-05 19:43:48 +02:00
if ( scsi_prot_sg_count ( sc ) ) {
2015-04-14 18:08:15 +03:00
prot_buf - > sg = scsi_prot_sglist ( sc ) ;
2014-03-05 19:43:48 +02:00
prot_buf - > size = scsi_prot_sg_count ( sc ) ;
2015-04-14 18:08:13 +03:00
prot_buf - > data_len = ( data_buf - > data_len > >
ilog2 ( sc - > device - > sector_size ) ) * 8 ;
2014-03-05 19:43:48 +02:00
}
2006-05-11 10:02:19 +03:00
if ( hdr - > flags & ISCSI_FLAG_CMD_READ ) {
2014-06-11 12:09:58 +03:00
err = iser_prepare_read_cmd ( task ) ;
2006-05-11 10:02:19 +03:00
if ( err )
goto send_command_error ;
}
if ( hdr - > flags & ISCSI_FLAG_CMD_WRITE ) {
2008-05-21 15:54:11 -05:00
err = iser_prepare_write_cmd ( task ,
task - > imm_count ,
task - > imm_count +
2008-12-02 00:32:06 -06:00
task - > unsol_r2t . data_length ,
2006-05-11 10:02:19 +03:00
edtl ) ;
if ( err )
goto send_command_error ;
}
2008-05-21 15:54:11 -05:00
iser_task - > status = ISER_TASK_STATUS_STARTED ;
2006-05-11 10:02:19 +03:00
2014-10-01 14:02:12 +03:00
err = iser_post_send ( & iser_conn - > ib_conn , tx_desc ,
2014-12-07 16:09:56 +02:00
iser_signal_comp ( sig_count ) ) ;
2006-05-11 10:02:19 +03:00
if ( ! err )
return 0 ;
send_command_error :
2008-05-21 15:54:11 -05:00
iser_err ( " conn %p failed task->itt %d err %d \n " , conn , task - > itt , err ) ;
2006-05-11 10:02:19 +03:00
return err ;
}
/**
* iser_send_data_out - send data out PDU
*/
2008-05-21 15:54:08 -05:00
int iser_send_data_out ( struct iscsi_conn * conn ,
2008-05-21 15:54:11 -05:00
struct iscsi_task * task ,
2006-05-11 10:02:19 +03:00
struct iscsi_data * hdr )
{
2014-10-01 14:01:57 +03:00
struct iser_conn * iser_conn = conn - > dd_data ;
2008-05-21 15:54:11 -05:00
struct iscsi_iser_task * iser_task = task - > dd_data ;
2010-02-08 13:19:56 +00:00
struct iser_tx_desc * tx_desc = NULL ;
2015-04-14 18:08:19 +03:00
struct iser_mem_reg * mem_reg ;
2006-05-11 10:02:19 +03:00
unsigned long buf_offset ;
unsigned long data_seg_len ;
2008-04-16 21:09:35 -07:00
uint32_t itt ;
2015-08-06 18:32:48 +03:00
int err ;
2010-02-08 13:19:56 +00:00
struct ib_sge * tx_dsg ;
2008-04-16 21:09:35 -07:00
itt = ( __force uint32_t ) hdr - > itt ;
2006-05-11 10:02:19 +03:00
data_seg_len = ntoh24 ( hdr - > dlength ) ;
buf_offset = ntohl ( hdr - > offset ) ;
iser_dbg ( " %s itt %d dseg_len %d offset %d \n " ,
__func__ , ( int ) itt , ( int ) data_seg_len , ( int ) buf_offset ) ;
2010-02-08 13:20:43 +00:00
tx_desc = kmem_cache_zalloc ( ig . desc_cache , GFP_ATOMIC ) ;
2006-05-11 10:02:19 +03:00
if ( tx_desc = = NULL ) {
iser_err ( " Failed to alloc desc for post dataout \n " ) ;
return - ENOMEM ;
}
tx_desc - > type = ISCSI_TX_DATAOUT ;
2015-12-11 11:54:28 -08:00
tx_desc - > cqe . done = iser_dataout_comp ;
2010-02-08 13:19:56 +00:00
tx_desc - > iser_header . flags = ISER_VER ;
2006-05-11 10:02:19 +03:00
memcpy ( & tx_desc - > iscsi_header , hdr , sizeof ( struct iscsi_hdr ) ) ;
2010-02-08 13:19:56 +00:00
/* build the tx desc */
2015-08-06 18:32:48 +03:00
err = iser_initialize_task_headers ( task , tx_desc ) ;
if ( err )
goto send_data_out_error ;
2006-05-11 10:02:19 +03:00
2015-04-14 18:08:19 +03:00
mem_reg = & iser_task - > rdma_reg [ ISER_DIR_OUT ] ;
2010-02-08 13:19:56 +00:00
tx_dsg = & tx_desc - > tx_sg [ 1 ] ;
2015-04-14 18:08:24 +03:00
tx_dsg - > addr = mem_reg - > sge . addr + buf_offset ;
tx_dsg - > length = data_seg_len ;
tx_dsg - > lkey = mem_reg - > sge . lkey ;
2010-02-08 13:19:56 +00:00
tx_desc - > num_sge = 2 ;
2006-05-11 10:02:19 +03:00
2008-05-21 15:54:11 -05:00
if ( buf_offset + data_seg_len > iser_task - > data [ ISER_DIR_OUT ] . data_len ) {
2006-05-11 10:02:19 +03:00
iser_err ( " Offset:%ld & DSL:%ld in Data-Out "
" inconsistent with total len:%ld, itt:%d \n " ,
buf_offset , data_seg_len ,
2008-05-21 15:54:11 -05:00
iser_task - > data [ ISER_DIR_OUT ] . data_len , itt ) ;
2006-05-11 10:02:19 +03:00
err = - EINVAL ;
goto send_data_out_error ;
}
iser_dbg ( " data-out itt: %d, offset: %ld, sz: %ld \n " ,
itt , buf_offset , data_seg_len ) ;
2014-10-01 14:02:12 +03:00
err = iser_post_send ( & iser_conn - > ib_conn , tx_desc , true ) ;
2006-05-11 10:02:19 +03:00
if ( ! err )
return 0 ;
send_data_out_error :
kmem_cache_free ( ig . desc_cache , tx_desc ) ;
2015-08-06 18:32:48 +03:00
iser_err ( " conn %p failed err %d \n " , conn , err ) ;
2006-05-11 10:02:19 +03:00
return err ;
}
int iser_send_control ( struct iscsi_conn * conn ,
2008-05-21 15:54:11 -05:00
struct iscsi_task * task )
2006-05-11 10:02:19 +03:00
{
2014-10-01 14:01:57 +03:00
struct iser_conn * iser_conn = conn - > dd_data ;
2008-05-21 15:54:11 -05:00
struct iscsi_iser_task * iser_task = task - > dd_data ;
2010-02-08 13:19:56 +00:00
struct iser_tx_desc * mdesc = & iser_task - > desc ;
2006-05-11 10:02:19 +03:00
unsigned long data_seg_len ;
2010-02-08 13:19:56 +00:00
int err = 0 ;
2006-05-11 10:02:19 +03:00
struct iser_device * device ;
/* build the tx desc regd header and add it to the tx desc dto */
mdesc - > type = ISCSI_TX_CONTROL ;
2015-12-11 11:54:28 -08:00
mdesc - > cqe . done = iser_ctrl_comp ;
2014-10-01 14:01:57 +03:00
iser_create_send_desc ( iser_conn , mdesc ) ;
2006-05-11 10:02:19 +03:00
2014-10-01 14:01:58 +03:00
device = iser_conn - > ib_conn . device ;
2006-05-11 10:02:19 +03:00
2008-05-21 15:54:11 -05:00
data_seg_len = ntoh24 ( task - > hdr - > dlength ) ;
2006-05-11 10:02:19 +03:00
if ( data_seg_len > 0 ) {
2015-11-04 10:50:31 +02:00
struct iser_login_desc * desc = & iser_conn - > login_desc ;
2010-02-08 13:19:56 +00:00
struct ib_sge * tx_dsg = & mdesc - > tx_sg [ 1 ] ;
2015-11-04 10:50:31 +02:00
2010-02-08 13:19:56 +00:00
if ( task ! = conn - > login_task ) {
iser_err ( " data present on non login task!!! \n " ) ;
goto send_control_error ;
}
2011-11-04 00:19:46 +02:00
2015-11-04 10:50:31 +02:00
ib_dma_sync_single_for_cpu ( device - > ib_device , desc - > req_dma ,
task - > data_count , DMA_TO_DEVICE ) ;
2011-11-04 00:19:46 +02:00
2015-11-04 10:50:31 +02:00
memcpy ( desc - > req , task - > data , task - > data_count ) ;
2011-11-04 00:19:46 +02:00
2015-11-04 10:50:31 +02:00
ib_dma_sync_single_for_device ( device - > ib_device , desc - > req_dma ,
task - > data_count , DMA_TO_DEVICE ) ;
2011-11-04 00:19:46 +02:00
2015-11-04 10:50:31 +02:00
tx_dsg - > addr = desc - > req_dma ;
tx_dsg - > length = task - > data_count ;
tx_dsg - > lkey = device - > pd - > local_dma_lkey ;
2010-02-08 13:19:56 +00:00
mdesc - > num_sge = 2 ;
2006-05-11 10:02:19 +03:00
}
2010-02-08 13:17:42 +00:00
if ( task = = conn - > login_task ) {
2013-08-08 13:44:29 +03:00
iser_dbg ( " op %x dsl %lx, posting login rx buffer \n " ,
task - > hdr - > opcode , data_seg_len ) ;
2014-10-01 14:01:57 +03:00
err = iser_post_recvl ( iser_conn ) ;
2010-02-08 13:17:42 +00:00
if ( err )
goto send_control_error ;
2012-03-05 18:21:44 +02:00
err = iser_post_rx_bufs ( conn , task - > hdr ) ;
if ( err )
goto send_control_error ;
2006-05-11 10:02:19 +03:00
}
2014-10-01 14:02:12 +03:00
err = iser_post_send ( & iser_conn - > ib_conn , mdesc , true ) ;
2006-05-11 10:02:19 +03:00
if ( ! err )
return 0 ;
send_control_error :
iser_err ( " conn %p failed err %d \n " , conn , err ) ;
return err ;
}
2015-12-11 11:54:28 -08:00
void iser_login_rsp ( struct ib_cq * cq , struct ib_wc * wc )
2006-05-11 10:02:19 +03:00
{
2015-12-11 11:54:28 -08:00
struct ib_conn * ib_conn = wc - > qp - > qp_context ;
2015-11-04 10:50:32 +02:00
struct iser_conn * iser_conn = to_iser_conn ( ib_conn ) ;
2015-12-11 11:54:28 -08:00
struct iser_login_desc * desc = iser_login ( wc - > wr_cqe ) ;
2006-05-11 10:02:19 +03:00
struct iscsi_hdr * hdr ;
2015-11-04 10:50:31 +02:00
char * data ;
2015-12-11 11:54:28 -08:00
int length ;
if ( unlikely ( wc - > status ! = IB_WC_SUCCESS ) ) {
iser_err_comp ( wc , " login_rsp " ) ;
return ;
}
ib_dma_sync_single_for_cpu ( ib_conn - > device - > ib_device ,
desc - > rsp_dma , ISER_RX_LOGIN_SIZE ,
DMA_FROM_DEVICE ) ;
2015-12-09 14:12:03 +02:00
hdr = desc - > rsp + sizeof ( struct iser_ctrl ) ;
2015-12-11 11:54:28 -08:00
data = desc - > rsp + ISER_HEADERS_LEN ;
length = wc - > byte_len - ISER_HEADERS_LEN ;
iser_dbg ( " op 0x%x itt 0x%x dlen %d \n " , hdr - > opcode ,
hdr - > itt , length ) ;
iscsi_iser_recv ( iser_conn - > iscsi_conn , hdr , data , length ) ;
ib_dma_sync_single_for_device ( ib_conn - > device - > ib_device ,
desc - > rsp_dma , ISER_RX_LOGIN_SIZE ,
DMA_FROM_DEVICE ) ;
ib_conn - > post_recv_buf_count - - ;
}
2015-12-24 12:20:48 +02:00
static inline void
iser_inv_desc ( struct iser_fr_desc * desc , u32 rkey )
{
if ( likely ( rkey = = desc - > rsc . mr - > rkey ) )
desc - > rsc . mr_valid = 0 ;
else if ( likely ( rkey = = desc - > pi_ctx - > sig_mr - > rkey ) )
desc - > pi_ctx - > sig_mr_valid = 0 ;
}
static int
iser_check_remote_inv ( struct iser_conn * iser_conn ,
struct ib_wc * wc ,
struct iscsi_hdr * hdr )
{
if ( wc - > wc_flags & IB_WC_WITH_INVALIDATE ) {
struct iscsi_task * task ;
u32 rkey = wc - > ex . invalidate_rkey ;
iser_dbg ( " conn %p: remote invalidation for rkey %#x \n " ,
iser_conn , rkey ) ;
if ( unlikely ( ! iser_conn - > snd_w_inv ) ) {
iser_err ( " conn %p: unexepected remote invalidation, "
" terminating connection \n " , iser_conn ) ;
return - EPROTO ;
}
task = iscsi_itt_to_ctask ( iser_conn - > iscsi_conn , hdr - > itt ) ;
if ( likely ( task ) ) {
struct iscsi_iser_task * iser_task = task - > dd_data ;
struct iser_fr_desc * desc ;
if ( iser_task - > dir [ ISER_DIR_IN ] ) {
desc = iser_task - > rdma_reg [ ISER_DIR_IN ] . mem_h ;
iser_inv_desc ( desc , rkey ) ;
}
if ( iser_task - > dir [ ISER_DIR_OUT ] ) {
desc = iser_task - > rdma_reg [ ISER_DIR_OUT ] . mem_h ;
iser_inv_desc ( desc , rkey ) ;
}
} else {
iser_err ( " failed to get task for itt=%d \n " , hdr - > itt ) ;
return - EINVAL ;
}
}
return 0 ;
}
2015-12-11 11:54:28 -08:00
void iser_task_rsp ( struct ib_cq * cq , struct ib_wc * wc )
{
struct ib_conn * ib_conn = wc - > qp - > qp_context ;
struct iser_conn * iser_conn = to_iser_conn ( ib_conn ) ;
struct iser_rx_desc * desc = iser_rx ( wc - > wr_cqe ) ;
struct iscsi_hdr * hdr ;
int length ;
int outstanding , count , err ;
if ( unlikely ( wc - > status ! = IB_WC_SUCCESS ) ) {
iser_err_comp ( wc , " task_rsp " ) ;
return ;
2010-02-08 13:17:42 +00:00
}
2006-05-11 10:02:19 +03:00
2015-12-11 11:54:28 -08:00
ib_dma_sync_single_for_cpu ( ib_conn - > device - > ib_device ,
desc - > dma_addr , ISER_RX_PAYLOAD_SIZE ,
DMA_FROM_DEVICE ) ;
2006-05-11 10:02:19 +03:00
2015-12-11 11:54:28 -08:00
hdr = & desc - > iscsi_header ;
length = wc - > byte_len - ISER_HEADERS_LEN ;
2006-05-11 10:02:19 +03:00
2010-02-08 13:17:42 +00:00
iser_dbg ( " op 0x%x itt 0x%x dlen %d \n " , hdr - > opcode ,
2015-12-11 11:54:28 -08:00
hdr - > itt , length ) ;
2006-05-11 10:02:19 +03:00
2015-12-24 12:20:48 +02:00
if ( iser_check_remote_inv ( iser_conn , wc , hdr ) ) {
iscsi_conn_failure ( iser_conn - > iscsi_conn ,
ISCSI_ERR_CONN_FAILED ) ;
return ;
}
2015-12-11 11:54:28 -08:00
iscsi_iser_recv ( iser_conn - > iscsi_conn , hdr , desc - > data , length ) ;
2006-05-11 10:02:19 +03:00
2015-12-11 11:54:28 -08:00
ib_dma_sync_single_for_device ( ib_conn - > device - > ib_device ,
desc - > dma_addr , ISER_RX_PAYLOAD_SIZE ,
DMA_FROM_DEVICE ) ;
2006-05-11 10:02:19 +03:00
/* decrementing conn->post_recv_buf_count only --after-- freeing the *
* task eliminates the need to worry on tasks which are completed in *
* parallel to the execution of iser_conn_term . So the code that waits *
* for the posted rx bufs refcount to become zero handles everything */
2014-10-01 14:01:58 +03:00
ib_conn - > post_recv_buf_count - - ;
2010-02-08 13:17:42 +00:00
2014-10-01 14:01:58 +03:00
outstanding = ib_conn - > post_recv_buf_count ;
2014-10-01 14:01:57 +03:00
if ( outstanding + iser_conn - > min_posted_rx < = iser_conn - > qp_max_recv_dtos ) {
count = min ( iser_conn - > qp_max_recv_dtos - outstanding ,
iser_conn - > min_posted_rx ) ;
err = iser_post_recvm ( iser_conn , count ) ;
2010-02-08 13:17:42 +00:00
if ( err )
iser_err ( " posting %d rx bufs err %d \n " , count , err ) ;
}
2006-05-11 10:02:19 +03:00
}
2015-12-11 11:54:28 -08:00
void iser_cmd_comp ( struct ib_cq * cq , struct ib_wc * wc )
{
if ( unlikely ( wc - > status ! = IB_WC_SUCCESS ) )
iser_err_comp ( wc , " command " ) ;
}
void iser_ctrl_comp ( struct ib_cq * cq , struct ib_wc * wc )
2006-05-11 10:02:19 +03:00
{
2015-12-11 11:54:28 -08:00
struct iser_tx_desc * desc = iser_tx ( wc - > wr_cqe ) ;
2008-05-21 15:54:11 -05:00
struct iscsi_task * task ;
2006-05-11 10:02:19 +03:00
2015-12-11 11:54:28 -08:00
if ( unlikely ( wc - > status ! = IB_WC_SUCCESS ) ) {
iser_err_comp ( wc , " control " ) ;
return ;
2010-02-08 13:19:56 +00:00
}
2006-05-11 10:02:19 +03:00
2015-12-11 11:54:28 -08:00
/* this arithmetic is legal by libiscsi dd_data allocation */
task = ( void * ) desc - sizeof ( struct iscsi_task ) ;
if ( task - > hdr - > itt = = RESERVED_ITT )
iscsi_put_task ( task ) ;
}
void iser_dataout_comp ( struct ib_cq * cq , struct ib_wc * wc )
{
struct iser_tx_desc * desc = iser_tx ( wc - > wr_cqe ) ;
struct ib_conn * ib_conn = wc - > qp - > qp_context ;
struct iser_device * device = ib_conn - > device ;
if ( unlikely ( wc - > status ! = IB_WC_SUCCESS ) )
iser_err_comp ( wc , " dataout " ) ;
ib_dma_unmap_single ( device - > ib_device , desc - > dma_addr ,
ISER_HEADERS_LEN , DMA_TO_DEVICE ) ;
kmem_cache_free ( ig . desc_cache , desc ) ;
}
2008-05-21 15:54:11 -05:00
void iser_task_rdma_init ( struct iscsi_iser_task * iser_task )
2006-05-11 10:02:19 +03:00
{
2008-05-21 15:54:11 -05:00
iser_task - > status = ISER_TASK_STATUS_INIT ;
2006-05-11 10:02:19 +03:00
2008-05-21 15:54:11 -05:00
iser_task - > dir [ ISER_DIR_IN ] = 0 ;
iser_task - > dir [ ISER_DIR_OUT ] = 0 ;
2006-05-11 10:02:19 +03:00
2008-05-21 15:54:11 -05:00
iser_task - > data [ ISER_DIR_IN ] . data_len = 0 ;
iser_task - > data [ ISER_DIR_OUT ] . data_len = 0 ;
2006-05-11 10:02:19 +03:00
2014-03-05 19:43:48 +02:00
iser_task - > prot [ ISER_DIR_IN ] . data_len = 0 ;
iser_task - > prot [ ISER_DIR_OUT ] . data_len = 0 ;
2015-04-14 18:08:19 +03:00
memset ( & iser_task - > rdma_reg [ ISER_DIR_IN ] , 0 ,
sizeof ( struct iser_mem_reg ) ) ;
memset ( & iser_task - > rdma_reg [ ISER_DIR_OUT ] , 0 ,
sizeof ( struct iser_mem_reg ) ) ;
2006-05-11 10:02:19 +03:00
}
2008-05-21 15:54:11 -05:00
void iser_task_rdma_finalize ( struct iscsi_iser_task * iser_task )
2006-05-11 10:02:19 +03:00
{
2014-03-05 19:43:48 +02:00
int prot_count = scsi_prot_sg_count ( iser_task - > sc ) ;
2006-05-11 10:02:19 +03:00
2014-03-05 19:43:44 +02:00
if ( iser_task - > dir [ ISER_DIR_IN ] ) {
2015-08-06 18:33:02 +03:00
iser_unreg_rdma_mem ( iser_task , ISER_DIR_IN ) ;
2015-10-13 19:12:58 +03:00
iser_dma_unmap_task_data ( iser_task ,
& iser_task - > data [ ISER_DIR_IN ] ,
DMA_FROM_DEVICE ) ;
if ( prot_count )
2014-03-05 19:43:48 +02:00
iser_dma_unmap_task_data ( iser_task ,
2014-12-28 14:26:11 +02:00
& iser_task - > prot [ ISER_DIR_IN ] ,
DMA_FROM_DEVICE ) ;
2014-03-05 19:43:44 +02:00
}
2006-05-11 10:02:19 +03:00
2014-03-05 19:43:44 +02:00
if ( iser_task - > dir [ ISER_DIR_OUT ] ) {
2015-08-06 18:33:02 +03:00
iser_unreg_rdma_mem ( iser_task , ISER_DIR_OUT ) ;
2015-10-13 19:12:58 +03:00
iser_dma_unmap_task_data ( iser_task ,
& iser_task - > data [ ISER_DIR_OUT ] ,
DMA_TO_DEVICE ) ;
if ( prot_count )
2014-03-05 19:43:44 +02:00
iser_dma_unmap_task_data ( iser_task ,
2014-12-28 14:26:11 +02:00
& iser_task - > prot [ ISER_DIR_OUT ] ,
DMA_TO_DEVICE ) ;
2014-03-05 19:43:44 +02:00
}
2006-05-11 10:02:19 +03:00
}