2006-05-11 11:03:08 +04:00
/*
* Copyright ( c ) 2004 , 2005 , 2006 Voltaire , Inc . All rights reserved .
2014-04-01 17:28:41 +04:00
* Copyright ( c ) 2013 - 2014 Mellanox Technologies . All rights reserved .
2006-05-11 11:03:08 +04:00
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
# include <linux/module.h>
# include <linux/kernel.h>
# include <linux/slab.h>
# include <linux/mm.h>
2006-10-20 00:08:53 +04:00
# include <linux/highmem.h>
2006-05-11 11:03:08 +04:00
# include <linux/scatterlist.h>
# include "iscsi_iser.h"
2015-08-06 18:33:02 +03:00
static
int iser_fast_reg_fmr ( struct iscsi_iser_task * iser_task ,
struct iser_data_buf * mem ,
struct iser_reg_resources * rsc ,
struct iser_mem_reg * mem_reg ) ;
static
int iser_fast_reg_mr ( struct iscsi_iser_task * iser_task ,
struct iser_data_buf * mem ,
struct iser_reg_resources * rsc ,
struct iser_mem_reg * mem_reg ) ;
2006-05-11 11:03:08 +04:00
2015-08-06 18:32:56 +03:00
static struct iser_reg_ops fastreg_ops = {
. alloc_reg_res = iser_alloc_fastreg_pool ,
. free_reg_res = iser_free_fastreg_pool ,
2015-08-06 18:33:02 +03:00
. reg_mem = iser_fast_reg_mr ,
. unreg_mem = iser_unreg_mem_fastreg ,
2015-08-06 18:33:01 +03:00
. reg_desc_get = iser_reg_desc_get_fr ,
. reg_desc_put = iser_reg_desc_put_fr ,
2015-08-06 18:32:56 +03:00
} ;
static struct iser_reg_ops fmr_ops = {
. alloc_reg_res = iser_alloc_fmr_pool ,
. free_reg_res = iser_free_fmr_pool ,
2015-08-06 18:33:02 +03:00
. reg_mem = iser_fast_reg_fmr ,
. unreg_mem = iser_unreg_mem_fmr ,
2015-08-06 18:33:01 +03:00
. reg_desc_get = iser_reg_desc_get_fmr ,
. reg_desc_put = iser_reg_desc_put_fmr ,
2015-08-06 18:32:56 +03:00
} ;
int iser_assign_reg_ops ( struct iser_device * device )
{
struct ib_device_attr * dev_attr = & device - > dev_attr ;
/* Assign function handles - based on FMR support */
if ( device - > ib_device - > alloc_fmr & & device - > ib_device - > dealloc_fmr & &
device - > ib_device - > map_phys_fmr & & device - > ib_device - > unmap_fmr ) {
iser_info ( " FMR supported, using FMR for registration \n " ) ;
device - > reg_ops = & fmr_ops ;
} else
if ( dev_attr - > device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS ) {
iser_info ( " FastReg supported, using FastReg for registration \n " ) ;
device - > reg_ops = & fastreg_ops ;
} else {
iser_err ( " IB device does not support FMRs nor FastRegs, can't register memory \n " ) ;
return - 1 ;
}
return 0 ;
}
2015-08-06 18:32:54 +03:00
struct iser_fr_desc *
2015-08-06 18:33:01 +03:00
iser_reg_desc_get_fr ( struct ib_conn * ib_conn )
2015-04-14 18:08:21 +03:00
{
2015-08-06 18:32:58 +03:00
struct iser_fr_pool * fr_pool = & ib_conn - > fr_pool ;
2015-08-06 18:32:54 +03:00
struct iser_fr_desc * desc ;
2015-04-14 18:08:21 +03:00
unsigned long flags ;
2015-08-06 18:32:58 +03:00
spin_lock_irqsave ( & fr_pool - > lock , flags ) ;
2015-08-06 18:32:59 +03:00
desc = list_first_entry ( & fr_pool - > list ,
2015-08-06 18:32:54 +03:00
struct iser_fr_desc , list ) ;
2015-04-14 18:08:21 +03:00
list_del ( & desc - > list ) ;
2015-08-06 18:32:58 +03:00
spin_unlock_irqrestore ( & fr_pool - > lock , flags ) ;
2015-04-14 18:08:21 +03:00
return desc ;
}
void
2015-08-06 18:33:01 +03:00
iser_reg_desc_put_fr ( struct ib_conn * ib_conn ,
struct iser_fr_desc * desc )
2015-04-14 18:08:21 +03:00
{
2015-08-06 18:32:58 +03:00
struct iser_fr_pool * fr_pool = & ib_conn - > fr_pool ;
2015-04-14 18:08:21 +03:00
unsigned long flags ;
2015-08-06 18:32:58 +03:00
spin_lock_irqsave ( & fr_pool - > lock , flags ) ;
2015-08-06 18:32:59 +03:00
list_add ( & desc - > list , & fr_pool - > list ) ;
2015-08-06 18:32:58 +03:00
spin_unlock_irqrestore ( & fr_pool - > lock , flags ) ;
2015-04-14 18:08:21 +03:00
}
2015-08-06 18:33:01 +03:00
struct iser_fr_desc *
iser_reg_desc_get_fmr ( struct ib_conn * ib_conn )
{
struct iser_fr_pool * fr_pool = & ib_conn - > fr_pool ;
return list_first_entry ( & fr_pool - > list ,
struct iser_fr_desc , list ) ;
}
void
iser_reg_desc_put_fmr ( struct ib_conn * ib_conn ,
struct iser_fr_desc * desc )
{
}
2009-11-12 22:32:27 +03:00
# define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
2006-05-11 11:03:08 +04:00
/**
* iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
* and returns the length of resulting physical address array ( may be less than
* the original due to possible compaction ) .
*
* we build a " page vec " under the assumption that the SG meets the RDMA
* alignment requirements . Other then the first and last SG elements , all
* the " internal " elements can be compacted into a list whose elements are
* dma addresses of physical pages . The code supports also the weird case
* where - - few fragments of the same page - - are present in the SG as
* consecutive elements . Also , it handles one entry SG .
*/
2009-11-12 22:32:27 +03:00
2006-05-11 11:03:08 +04:00
static int iser_sg_to_page_vec ( struct iser_data_buf * data ,
2013-07-28 13:35:40 +04:00
struct ib_device * ibdev , u64 * pages ,
int * offset , int * data_size )
2006-05-11 11:03:08 +04:00
{
2015-04-14 18:08:15 +03:00
struct scatterlist * sg , * sgl = data - > sg ;
2009-11-12 22:32:27 +03:00
u64 start_addr , end_addr , page , chunk_start = 0 ;
2006-05-11 11:03:08 +04:00
unsigned long total_sz = 0 ;
2009-11-12 22:32:27 +03:00
unsigned int dma_len ;
int i , new_chunk , cur_page , last_ent = data - > dma_nents - 1 ;
2006-05-11 11:03:08 +04:00
/* compute the offset of first element */
2013-07-28 13:35:40 +04:00
* offset = ( u64 ) sgl [ 0 ] . offset & ~ MASK_4K ;
2006-05-11 11:03:08 +04:00
2009-11-12 22:32:27 +03:00
new_chunk = 1 ;
cur_page = 0 ;
2007-07-24 16:41:13 +04:00
for_each_sg ( sgl , sg , data - > dma_nents , i ) {
2009-11-12 22:32:27 +03:00
start_addr = ib_sg_dma_address ( ibdev , sg ) ;
if ( new_chunk )
chunk_start = start_addr ;
dma_len = ib_sg_dma_len ( ibdev , sg ) ;
end_addr = start_addr + dma_len ;
2006-12-13 01:31:00 +03:00
total_sz + = dma_len ;
2006-05-11 11:03:08 +04:00
2009-11-12 22:32:27 +03:00
/* collect page fragments until aligned or end of SG list */
if ( ! IS_4K_ALIGNED ( end_addr ) & & i < last_ent ) {
new_chunk = 0 ;
continue ;
2006-05-11 11:03:08 +04:00
}
2009-11-12 22:32:27 +03:00
new_chunk = 1 ;
/* address of the first page in the contiguous chunk;
masking relevant for the very first SG entry ,
which might be unaligned */
page = chunk_start & MASK_4K ;
do {
2013-07-28 13:35:40 +04:00
pages [ cur_page + + ] = page ;
2006-09-11 13:22:30 +04:00
page + = SIZE_4K ;
2009-11-12 22:32:27 +03:00
} while ( page < end_addr ) ;
2006-05-11 11:03:08 +04:00
}
2009-11-12 22:32:27 +03:00
2013-07-28 13:35:40 +04:00
* data_size = total_sz ;
iser_dbg ( " page_vec->data_size:%d cur_page %d \n " ,
* data_size , cur_page ) ;
2006-05-11 11:03:08 +04:00
return cur_page ;
}
2006-12-13 01:31:00 +03:00
static void iser_data_buf_dump ( struct iser_data_buf * data ,
struct ib_device * ibdev )
2006-05-11 11:03:08 +04:00
{
2007-07-24 16:41:13 +04:00
struct scatterlist * sg ;
2006-05-11 11:03:08 +04:00
int i ;
2015-04-14 18:08:15 +03:00
for_each_sg ( data - > sg , sg , data - > dma_nents , i )
2013-07-28 13:35:36 +04:00
iser_dbg ( " sg[%d] dma_addr:0x%lX page:0x%p "
2006-09-11 13:24:00 +04:00
" off:0x%x sz:0x%x dma_len:0x%x \n " ,
2007-07-24 16:41:13 +04:00
i , ( unsigned long ) ib_sg_dma_address ( ibdev , sg ) ,
2007-10-22 23:19:53 +04:00
sg_page ( sg ) , sg - > offset ,
2007-07-24 16:41:13 +04:00
sg - > length , ib_sg_dma_len ( ibdev , sg ) ) ;
2006-05-11 11:03:08 +04:00
}
static void iser_dump_page_vec ( struct iser_page_vec * page_vec )
{
int i ;
iser_err ( " page vec length %d data size %d \n " ,
page_vec - > length , page_vec - > data_size ) ;
for ( i = 0 ; i < page_vec - > length ; i + + )
iser_err ( " %d %lx \n " , i , ( unsigned long ) page_vec - > pages [ i ] ) ;
}
2008-05-22 00:54:11 +04:00
int iser_dma_map_task_data ( struct iscsi_iser_task * iser_task ,
struct iser_data_buf * data ,
enum iser_data_dir iser_dir ,
enum dma_data_direction dma_dir )
2006-09-27 17:43:06 +04:00
{
2006-12-13 01:31:00 +03:00
struct ib_device * dev ;
2006-09-27 17:43:06 +04:00
2008-05-22 00:54:11 +04:00
iser_task - > dir [ iser_dir ] = 1 ;
2014-10-01 15:01:58 +04:00
dev = iser_task - > iser_conn - > ib_conn . device - > ib_device ;
2006-09-27 17:43:06 +04:00
2015-04-14 18:08:15 +03:00
data - > dma_nents = ib_dma_map_sg ( dev , data - > sg , data - > size , dma_dir ) ;
2006-09-27 17:43:06 +04:00
if ( data - > dma_nents = = 0 ) {
iser_err ( " dma_map_sg failed!!! \n " ) ;
return - EINVAL ;
}
return 0 ;
}
2014-03-05 21:43:44 +04:00
void iser_dma_unmap_task_data ( struct iscsi_iser_task * iser_task ,
2014-12-28 15:26:11 +03:00
struct iser_data_buf * data ,
enum dma_data_direction dir )
2006-09-27 17:43:06 +04:00
{
2006-12-13 01:31:00 +03:00
struct ib_device * dev ;
2006-09-27 17:43:06 +04:00
2014-10-01 15:01:58 +04:00
dev = iser_task - > iser_conn - > ib_conn . device - > ib_device ;
2015-04-14 18:08:15 +03:00
ib_dma_unmap_sg ( dev , data - > sg , data - > size , dir ) ;
2006-09-27 17:43:06 +04:00
}
2015-04-14 18:08:26 +03:00
static int
iser_reg_dma ( struct iser_device * device , struct iser_data_buf * mem ,
struct iser_mem_reg * reg )
{
struct scatterlist * sg = mem - > sg ;
2015-07-31 02:22:20 +03:00
reg - > sge . lkey = device - > pd - > local_dma_lkey ;
2015-04-14 18:08:26 +03:00
reg - > rkey = device - > mr - > rkey ;
reg - > sge . addr = ib_sg_dma_address ( device - > ib_device , & sg [ 0 ] ) ;
reg - > sge . length = ib_sg_dma_len ( device - > ib_device , & sg [ 0 ] ) ;
iser_dbg ( " Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx, "
" length=0x%x \n " , reg - > sge . lkey , reg - > rkey ,
reg - > sge . addr , reg - > sge . length ) ;
return 0 ;
}
2015-04-14 18:08:17 +03:00
/**
* iser_reg_page_vec - Register physical memory
*
* returns : 0 on success , errno code on failure
*/
static
2015-08-06 18:33:00 +03:00
int iser_fast_reg_fmr ( struct iscsi_iser_task * iser_task ,
2015-04-14 18:08:20 +03:00
struct iser_data_buf * mem ,
2015-08-06 18:32:59 +03:00
struct iser_reg_resources * rsc ,
2015-08-06 18:33:00 +03:00
struct iser_mem_reg * reg )
2015-04-14 18:08:17 +03:00
{
2015-04-14 18:08:20 +03:00
struct ib_conn * ib_conn = & iser_task - > iser_conn - > ib_conn ;
struct iser_device * device = ib_conn - > device ;
2015-08-06 18:32:59 +03:00
struct iser_page_vec * page_vec = rsc - > page_vec ;
struct ib_fmr_pool * fmr_pool = rsc - > fmr_pool ;
2015-04-14 18:08:20 +03:00
struct ib_pool_fmr * fmr ;
int ret , plen ;
plen = iser_sg_to_page_vec ( mem , device - > ib_device ,
page_vec - > pages ,
& page_vec - > offset ,
& page_vec - > data_size ) ;
page_vec - > length = plen ;
if ( plen * SIZE_4K < page_vec - > data_size ) {
iser_err ( " page vec too short to hold this SG \n " ) ;
iser_data_buf_dump ( mem , device - > ib_device ) ;
iser_dump_page_vec ( page_vec ) ;
return - EINVAL ;
}
2015-04-14 18:08:17 +03:00
2015-08-06 18:32:59 +03:00
fmr = ib_fmr_pool_map_phys ( fmr_pool ,
2015-04-14 18:08:20 +03:00
page_vec - > pages ,
2015-04-14 18:08:17 +03:00
page_vec - > length ,
2015-04-14 18:08:20 +03:00
page_vec - > pages [ 0 ] ) ;
if ( IS_ERR ( fmr ) ) {
ret = PTR_ERR ( fmr ) ;
iser_err ( " ib_fmr_pool_map_phys failed: %d \n " , ret ) ;
return ret ;
2015-04-14 18:08:17 +03:00
}
2015-08-06 18:33:00 +03:00
reg - > sge . lkey = fmr - > fmr - > lkey ;
reg - > rkey = fmr - > fmr - > rkey ;
reg - > sge . addr = page_vec - > pages [ 0 ] + page_vec - > offset ;
reg - > sge . length = page_vec - > data_size ;
reg - > mem_h = fmr ;
2015-04-14 18:08:20 +03:00
2015-08-06 18:33:05 +03:00
iser_dbg ( " fmr reg: lkey=0x%x, rkey=0x%x, addr=0x%llx, "
" length=0x%x \n " , reg - > sge . lkey , reg - > rkey ,
reg - > sge . addr , reg - > sge . length ) ;
2015-04-14 18:08:17 +03:00
return 0 ;
}
/**
* Unregister ( previosuly registered using FMR ) memory .
* If memory is non - FMR does nothing .
*/
void iser_unreg_mem_fmr ( struct iscsi_iser_task * iser_task ,
enum iser_data_dir cmd_dir )
{
2015-04-14 18:08:19 +03:00
struct iser_mem_reg * reg = & iser_task - > rdma_reg [ cmd_dir ] ;
2015-04-14 18:08:17 +03:00
int ret ;
if ( ! reg - > mem_h )
return ;
iser_dbg ( " PHYSICAL Mem.Unregister mem_h %p \n " , reg - > mem_h ) ;
ret = ib_fmr_pool_unmap ( ( struct ib_pool_fmr * ) reg - > mem_h ) ;
if ( ret )
iser_err ( " ib_fmr_pool_unmap failed %d \n " , ret ) ;
reg - > mem_h = NULL ;
}
void iser_unreg_mem_fastreg ( struct iscsi_iser_task * iser_task ,
enum iser_data_dir cmd_dir )
{
2015-08-06 18:33:01 +03:00
struct iser_device * device = iser_task - > iser_conn - > ib_conn . device ;
2015-04-14 18:08:19 +03:00
struct iser_mem_reg * reg = & iser_task - > rdma_reg [ cmd_dir ] ;
2015-04-14 18:08:17 +03:00
2015-04-14 18:08:21 +03:00
if ( ! reg - > mem_h )
2015-04-14 18:08:17 +03:00
return ;
2015-08-06 18:33:01 +03:00
device - > reg_ops - > reg_desc_put ( & iser_task - > iser_conn - > ib_conn ,
reg - > mem_h ) ;
2015-04-14 18:08:17 +03:00
reg - > mem_h = NULL ;
}
2014-12-07 17:10:06 +03:00
static void
2014-08-13 20:54:33 +04:00
iser_set_dif_domain ( struct scsi_cmnd * sc , struct ib_sig_attrs * sig_attrs ,
struct ib_sig_domain * domain )
{
2014-08-13 20:54:35 +04:00
domain - > sig_type = IB_SIG_TYPE_T10_DIF ;
2014-12-07 17:10:06 +03:00
domain - > sig . dif . pi_interval = scsi_prot_interval ( sc ) ;
domain - > sig . dif . ref_tag = scsi_prot_ref_tag ( sc ) ;
2014-08-13 20:54:35 +04:00
/*
* At the moment we hard code those , but in the future
* we will take them from sc .
*/
domain - > sig . dif . apptag_check_mask = 0xffff ;
domain - > sig . dif . app_escape = true ;
domain - > sig . dif . ref_escape = true ;
2014-12-07 17:10:06 +03:00
if ( sc - > prot_flags & SCSI_PROT_REF_INCREMENT )
2014-08-13 20:54:35 +04:00
domain - > sig . dif . ref_remap = true ;
2014-08-13 20:54:33 +04:00
} ;
2014-03-05 21:43:48 +04:00
static int
iser_set_sig_attrs ( struct scsi_cmnd * sc , struct ib_sig_attrs * sig_attrs )
{
switch ( scsi_get_prot_op ( sc ) ) {
case SCSI_PROT_WRITE_INSERT :
case SCSI_PROT_READ_STRIP :
2014-08-13 20:54:35 +04:00
sig_attrs - > mem . sig_type = IB_SIG_TYPE_NONE ;
2014-08-13 20:54:33 +04:00
iser_set_dif_domain ( sc , sig_attrs , & sig_attrs - > wire ) ;
2014-03-05 21:43:48 +04:00
sig_attrs - > wire . sig . dif . bg_type = IB_T10DIF_CRC ;
break ;
case SCSI_PROT_READ_INSERT :
case SCSI_PROT_WRITE_STRIP :
2014-08-13 20:54:35 +04:00
sig_attrs - > wire . sig_type = IB_SIG_TYPE_NONE ;
2014-08-13 20:54:33 +04:00
iser_set_dif_domain ( sc , sig_attrs , & sig_attrs - > mem ) ;
2014-12-07 17:10:06 +03:00
sig_attrs - > mem . sig . dif . bg_type = sc - > prot_flags & SCSI_PROT_IP_CHECKSUM ?
IB_T10DIF_CSUM : IB_T10DIF_CRC ;
2014-03-05 21:43:48 +04:00
break ;
case SCSI_PROT_READ_PASS :
case SCSI_PROT_WRITE_PASS :
2014-08-13 20:54:33 +04:00
iser_set_dif_domain ( sc , sig_attrs , & sig_attrs - > wire ) ;
2014-03-05 21:43:48 +04:00
sig_attrs - > wire . sig . dif . bg_type = IB_T10DIF_CRC ;
2014-08-13 20:54:33 +04:00
iser_set_dif_domain ( sc , sig_attrs , & sig_attrs - > mem ) ;
2014-12-07 17:10:06 +03:00
sig_attrs - > mem . sig . dif . bg_type = sc - > prot_flags & SCSI_PROT_IP_CHECKSUM ?
IB_T10DIF_CSUM : IB_T10DIF_CRC ;
2014-03-05 21:43:48 +04:00
break ;
default :
iser_err ( " Unsupported PI operation %d \n " ,
scsi_get_prot_op ( sc ) ) ;
return - EINVAL ;
}
2014-08-13 20:54:35 +04:00
2014-03-05 21:43:48 +04:00
return 0 ;
}
2014-12-07 17:10:06 +03:00
static inline void
2014-03-05 21:43:48 +04:00
iser_set_prot_checks ( struct scsi_cmnd * sc , u8 * mask )
{
2014-12-07 17:10:06 +03:00
* mask = 0 ;
if ( sc - > prot_flags & SCSI_PROT_REF_CHECK )
* mask | = ISER_CHECK_REFTAG ;
if ( sc - > prot_flags & SCSI_PROT_GUARD_CHECK )
* mask | = ISER_CHECK_GUARD ;
2014-03-05 21:43:48 +04:00
}
2014-12-07 17:10:01 +03:00
static void
iser_inv_rkey ( struct ib_send_wr * inv_wr , struct ib_mr * mr )
{
u32 rkey ;
inv_wr - > opcode = IB_WR_LOCAL_INV ;
inv_wr - > wr_id = ISER_FASTREG_LI_WRID ;
inv_wr - > ex . invalidate_rkey = mr - > rkey ;
2015-08-06 18:33:06 +03:00
inv_wr - > send_flags = 0 ;
inv_wr - > num_sge = 0 ;
2014-12-07 17:10:01 +03:00
rkey = ib_inc_rkey ( mr - > rkey ) ;
ib_update_fast_reg_key ( mr , rkey ) ;
}
2014-03-05 21:43:48 +04:00
static int
iser_reg_sig_mr ( struct iscsi_iser_task * iser_task ,
2015-08-06 18:32:53 +03:00
struct iser_pi_context * pi_ctx ,
2015-04-14 18:08:25 +03:00
struct iser_mem_reg * data_reg ,
struct iser_mem_reg * prot_reg ,
struct iser_mem_reg * sig_reg )
2014-03-05 21:43:48 +04:00
{
2015-08-06 18:33:06 +03:00
struct iser_tx_desc * tx_desc = & iser_task - > desc ;
struct ib_sig_attrs * sig_attrs = & tx_desc - > sig_attrs ;
2015-10-08 11:16:33 +03:00
struct ib_sig_handover_wr * wr ;
2014-03-05 21:43:48 +04:00
int ret ;
2015-08-06 18:33:06 +03:00
memset ( sig_attrs , 0 , sizeof ( * sig_attrs ) ) ;
ret = iser_set_sig_attrs ( iser_task - > sc , sig_attrs ) ;
2014-03-05 21:43:48 +04:00
if ( ret )
goto err ;
2015-08-06 18:33:06 +03:00
iser_set_prot_checks ( iser_task - > sc , & sig_attrs - > check_mask ) ;
2014-03-05 21:43:48 +04:00
2015-10-08 11:16:33 +03:00
if ( ! pi_ctx - > sig_mr_valid )
iser_inv_rkey ( iser_tx_next_wr ( tx_desc ) , pi_ctx - > sig_mr ) ;
wr = sig_handover_wr ( iser_tx_next_wr ( tx_desc ) ) ;
wr - > wr . opcode = IB_WR_REG_SIG_MR ;
wr - > wr . wr_id = ISER_FASTREG_LI_WRID ;
wr - > wr . sg_list = & data_reg - > sge ;
wr - > wr . num_sge = 1 ;
wr - > wr . send_flags = 0 ;
wr - > sig_attrs = sig_attrs ;
wr - > sig_mr = pi_ctx - > sig_mr ;
2014-03-05 21:43:48 +04:00
if ( scsi_prot_sg_count ( iser_task - > sc ) )
2015-10-08 11:16:33 +03:00
wr - > prot = & prot_reg - > sge ;
2014-03-05 21:43:48 +04:00
else
2015-10-08 11:16:33 +03:00
wr - > prot = NULL ;
wr - > access_flags = IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_WRITE ;
2015-08-06 18:32:53 +03:00
pi_ctx - > sig_mr_valid = 0 ;
2014-03-05 21:43:48 +04:00
2015-04-14 18:08:25 +03:00
sig_reg - > sge . lkey = pi_ctx - > sig_mr - > lkey ;
sig_reg - > rkey = pi_ctx - > sig_mr - > rkey ;
sig_reg - > sge . addr = 0 ;
sig_reg - > sge . length = scsi_transfer_length ( iser_task - > sc ) ;
2014-03-05 21:43:48 +04:00
2015-10-13 19:11:33 +03:00
iser_dbg ( " lkey=0x%x rkey=0x%x addr=0x%llx length=%u \n " ,
2015-04-14 18:08:25 +03:00
sig_reg - > sge . lkey , sig_reg - > rkey , sig_reg - > sge . addr ,
sig_reg - > sge . length ) ;
2014-03-05 21:43:48 +04:00
err :
return ret ;
}
2014-03-05 21:43:40 +04:00
static int iser_fast_reg_mr ( struct iscsi_iser_task * iser_task ,
struct iser_data_buf * mem ,
2015-08-06 18:32:53 +03:00
struct iser_reg_resources * rsc ,
2015-04-14 18:08:25 +03:00
struct iser_mem_reg * reg )
2013-07-28 13:35:42 +04:00
{
2015-08-06 18:33:06 +03:00
struct iser_tx_desc * tx_desc = & iser_task - > desc ;
2015-10-13 19:11:33 +03:00
struct ib_mr * mr = rsc - > mr ;
struct ib_reg_wr * wr ;
int n ;
2013-07-28 13:35:42 +04:00
2015-10-08 11:16:33 +03:00
if ( ! rsc - > mr_valid )
iser_inv_rkey ( iser_tx_next_wr ( tx_desc ) , mr ) ;
2015-10-13 19:11:33 +03:00
n = ib_map_mr_sg ( mr , mem - > sg , mem - > size , SIZE_4K ) ;
if ( unlikely ( n ! = mem - > size ) ) {
iser_err ( " failed to map sg (%d/%d) \n " ,
n , mem - > size ) ;
return n < 0 ? n : - EINVAL ;
}
wr = reg_wr ( iser_tx_next_wr ( tx_desc ) ) ;
wr - > wr . opcode = IB_WR_REG_MR ;
2015-10-08 11:16:33 +03:00
wr - > wr . wr_id = ISER_FASTREG_LI_WRID ;
wr - > wr . send_flags = 0 ;
2015-10-13 19:11:33 +03:00
wr - > wr . num_sge = 0 ;
wr - > mr = mr ;
wr - > key = mr - > rkey ;
wr - > access = IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ ;
2015-08-06 18:32:53 +03:00
rsc - > mr_valid = 0 ;
2013-07-28 13:35:42 +04:00
2015-04-14 18:08:25 +03:00
reg - > sge . lkey = mr - > lkey ;
reg - > rkey = mr - > rkey ;
2015-10-13 19:11:33 +03:00
reg - > sge . addr = mr - > iova ;
reg - > sge . length = mr - > length ;
2013-07-28 13:35:42 +04:00
2015-10-13 19:11:33 +03:00
iser_dbg ( " lkey=0x%x rkey=0x%x addr=0x%llx length=0x%x \n " ,
reg - > sge . lkey , reg - > rkey , reg - > sge . addr , reg - > sge . length ) ;
2015-08-06 18:33:05 +03:00
2015-08-06 18:33:06 +03:00
return 0 ;
2013-07-28 13:35:42 +04:00
}
2015-08-06 18:33:02 +03:00
static int
iser_reg_prot_sg ( struct iscsi_iser_task * task ,
struct iser_data_buf * mem ,
struct iser_fr_desc * desc ,
2015-09-24 10:34:22 +03:00
bool use_dma_key ,
2015-08-06 18:33:02 +03:00
struct iser_mem_reg * reg )
{
struct iser_device * device = task - > iser_conn - > ib_conn . device ;
2015-09-24 10:34:22 +03:00
if ( use_dma_key )
2015-08-06 18:33:02 +03:00
return iser_reg_dma ( device , mem , reg ) ;
return device - > reg_ops - > reg_mem ( task , mem , & desc - > pi_ctx - > rsc , reg ) ;
}
static int
iser_reg_data_sg ( struct iscsi_iser_task * task ,
struct iser_data_buf * mem ,
struct iser_fr_desc * desc ,
2015-09-24 10:34:22 +03:00
bool use_dma_key ,
2015-08-06 18:33:02 +03:00
struct iser_mem_reg * reg )
{
struct iser_device * device = task - > iser_conn - > ib_conn . device ;
2015-09-24 10:34:22 +03:00
if ( use_dma_key )
2015-08-06 18:33:02 +03:00
return iser_reg_dma ( device , mem , reg ) ;
return device - > reg_ops - > reg_mem ( task , mem , & desc - > rsc , reg ) ;
}
int iser_reg_rdma_mem ( struct iscsi_iser_task * task ,
enum iser_data_dir dir )
{
struct ib_conn * ib_conn = & task - > iser_conn - > ib_conn ;
struct iser_device * device = ib_conn - > device ;
struct iser_data_buf * mem = & task - > data [ dir ] ;
struct iser_mem_reg * reg = & task - > rdma_reg [ dir ] ;
2015-08-06 18:33:06 +03:00
struct iser_mem_reg * data_reg ;
2015-08-06 18:33:02 +03:00
struct iser_fr_desc * desc = NULL ;
2015-09-24 10:34:22 +03:00
bool use_dma_key ;
2015-08-06 18:33:02 +03:00
int err ;
2015-09-24 10:34:22 +03:00
use_dma_key = ( mem - > dma_nents = = 1 & & ! iser_always_reg & &
scsi_get_prot_op ( task - > sc ) = = SCSI_PROT_NORMAL ) ;
if ( ! use_dma_key ) {
2015-08-06 18:33:01 +03:00
desc = device - > reg_ops - > reg_desc_get ( ib_conn ) ;
2015-08-06 18:33:02 +03:00
reg - > mem_h = desc ;
2014-03-05 21:43:40 +04:00
}
2013-07-28 13:35:42 +04:00
2015-08-06 18:33:06 +03:00
if ( scsi_get_prot_op ( task - > sc ) = = SCSI_PROT_NORMAL )
data_reg = reg ;
else
data_reg = & task - > desc . data_reg ;
2015-09-24 10:34:22 +03:00
err = iser_reg_data_sg ( task , mem , desc , use_dma_key , data_reg ) ;
2015-08-06 18:33:02 +03:00
if ( unlikely ( err ) )
2014-03-05 21:43:40 +04:00
goto err_reg ;
2015-08-06 18:33:02 +03:00
if ( scsi_get_prot_op ( task - > sc ) ! = SCSI_PROT_NORMAL ) {
2015-08-06 18:33:06 +03:00
struct iser_mem_reg * prot_reg = & task - > desc . prot_reg ;
2014-03-05 21:43:48 +04:00
2015-08-06 18:33:02 +03:00
if ( scsi_prot_sg_count ( task - > sc ) ) {
mem = & task - > prot [ dir ] ;
2015-09-24 10:34:22 +03:00
err = iser_reg_prot_sg ( task , mem , desc ,
use_dma_key , prot_reg ) ;
2015-08-06 18:33:02 +03:00
if ( unlikely ( err ) )
2014-03-05 21:43:48 +04:00
goto err_reg ;
}
2015-08-06 18:33:06 +03:00
err = iser_reg_sig_mr ( task , desc - > pi_ctx , data_reg ,
prot_reg , reg ) ;
2015-08-06 18:33:02 +03:00
if ( unlikely ( err ) )
goto err_reg ;
2015-08-06 18:32:53 +03:00
desc - > pi_ctx - > sig_protected = 1 ;
2014-03-05 21:43:48 +04:00
}
2014-03-05 21:43:40 +04:00
2013-07-28 13:35:42 +04:00
return 0 ;
2015-08-06 18:33:02 +03:00
2013-07-28 13:35:42 +04:00
err_reg :
2015-04-14 18:08:21 +03:00
if ( desc )
2015-08-06 18:33:01 +03:00
device - > reg_ops - > reg_desc_put ( ib_conn , desc ) ;
2014-03-05 21:43:40 +04:00
2013-07-28 13:35:42 +04:00
return err ;
}
2015-08-06 18:33:02 +03:00
void iser_unreg_rdma_mem ( struct iscsi_iser_task * task ,
enum iser_data_dir dir )
{
struct iser_device * device = task - > iser_conn - > ib_conn . device ;
device - > reg_ops - > unreg_mem ( task , dir ) ;
}