2017-02-15 06:28:23 -08:00
/*
* QLogic FCoE Offload Driver
2017-05-31 06:33:49 -07:00
* Copyright ( c ) 2016 - 2017 Cavium Inc .
2017-02-15 06:28:23 -08:00
*
* This software is available under the terms of the GNU General Public License
* ( GPL ) Version 2 , available from the file COPYING in the main directory of
* this source tree .
*/
# include <linux/spinlock.h>
# include <linux/vmalloc.h>
# include "qedf.h"
# include <scsi/scsi_tcq.h>
void qedf_cmd_timer_set ( struct qedf_ctx * qedf , struct qedf_ioreq * io_req ,
unsigned int timer_msec )
{
queue_delayed_work ( qedf - > timer_work_queue , & io_req - > timeout_work ,
msecs_to_jiffies ( timer_msec ) ) ;
}
static void qedf_cmd_timeout ( struct work_struct * work )
{
struct qedf_ioreq * io_req =
container_of ( work , struct qedf_ioreq , timeout_work . work ) ;
struct qedf_ctx * qedf = io_req - > fcport - > qedf ;
struct qedf_rport * fcport = io_req - > fcport ;
u8 op = 0 ;
switch ( io_req - > cmd_type ) {
case QEDF_ABTS :
QEDF_ERR ( ( & qedf - > dbg_ctx ) , " ABTS timeout, xid=0x%x. \n " ,
io_req - > xid ) ;
/* Cleanup timed out ABTS */
qedf_initiate_cleanup ( io_req , true ) ;
complete ( & io_req - > abts_done ) ;
/*
* Need to call kref_put for reference taken when initiate_abts
* was called since abts_compl won ' t be called now that we ' ve
* cleaned up the task .
*/
kref_put ( & io_req - > refcount , qedf_release_cmd ) ;
/*
* Now that the original I / O and the ABTS are complete see
* if we need to reconnect to the target .
*/
qedf_restart_rport ( fcport ) ;
break ;
case QEDF_ELS :
kref_get ( & io_req - > refcount ) ;
/*
* Don ' t attempt to clean an ELS timeout as any subseqeunt
* ABTS or cleanup requests just hang . For now just free
* the resources of the original I / O and the RRQ
*/
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " ELS timeout, xid=0x%x. \n " ,
io_req - > xid ) ;
io_req - > event = QEDF_IOREQ_EV_ELS_TMO ;
/* Call callback function to complete command */
if ( io_req - > cb_func & & io_req - > cb_arg ) {
op = io_req - > cb_arg - > op ;
io_req - > cb_func ( io_req - > cb_arg ) ;
io_req - > cb_arg = NULL ;
}
qedf_initiate_cleanup ( io_req , true ) ;
kref_put ( & io_req - > refcount , qedf_release_cmd ) ;
break ;
case QEDF_SEQ_CLEANUP :
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " Sequence cleanup timeout, "
" xid=0x%x. \n " , io_req - > xid ) ;
qedf_initiate_cleanup ( io_req , true ) ;
io_req - > event = QEDF_IOREQ_EV_ELS_TMO ;
qedf_process_seq_cleanup_compl ( qedf , NULL , io_req ) ;
break ;
default :
break ;
}
}
void qedf_cmd_mgr_free ( struct qedf_cmd_mgr * cmgr )
{
struct io_bdt * bdt_info ;
struct qedf_ctx * qedf = cmgr - > qedf ;
size_t bd_tbl_sz ;
u16 min_xid = QEDF_MIN_XID ;
u16 max_xid = ( FCOE_PARAMS_NUM_TASKS - 1 ) ;
int num_ios ;
int i ;
struct qedf_ioreq * io_req ;
num_ios = max_xid - min_xid + 1 ;
/* Free fcoe_bdt_ctx structures */
if ( ! cmgr - > io_bdt_pool )
goto free_cmd_pool ;
2017-03-11 18:39:18 +02:00
bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof ( struct scsi_sge ) ;
2017-02-15 06:28:23 -08:00
for ( i = 0 ; i < num_ios ; i + + ) {
bdt_info = cmgr - > io_bdt_pool [ i ] ;
if ( bdt_info - > bd_tbl ) {
dma_free_coherent ( & qedf - > pdev - > dev , bd_tbl_sz ,
bdt_info - > bd_tbl , bdt_info - > bd_tbl_dma ) ;
bdt_info - > bd_tbl = NULL ;
}
}
/* Destroy io_bdt pool */
for ( i = 0 ; i < num_ios ; i + + ) {
kfree ( cmgr - > io_bdt_pool [ i ] ) ;
cmgr - > io_bdt_pool [ i ] = NULL ;
}
kfree ( cmgr - > io_bdt_pool ) ;
cmgr - > io_bdt_pool = NULL ;
free_cmd_pool :
for ( i = 0 ; i < num_ios ; i + + ) {
io_req = & cmgr - > cmds [ i ] ;
2017-03-11 18:39:18 +02:00
kfree ( io_req - > sgl_task_params ) ;
kfree ( io_req - > task_params ) ;
2017-02-15 06:28:23 -08:00
/* Make sure we free per command sense buffer */
if ( io_req - > sense_buffer )
dma_free_coherent ( & qedf - > pdev - > dev ,
QEDF_SCSI_SENSE_BUFFERSIZE , io_req - > sense_buffer ,
io_req - > sense_buffer_dma ) ;
cancel_delayed_work_sync ( & io_req - > rrq_work ) ;
}
/* Free command manager itself */
vfree ( cmgr ) ;
}
static void qedf_handle_rrq ( struct work_struct * work )
{
struct qedf_ioreq * io_req =
container_of ( work , struct qedf_ioreq , rrq_work . work ) ;
qedf_send_rrq ( io_req ) ;
}
struct qedf_cmd_mgr * qedf_cmd_mgr_alloc ( struct qedf_ctx * qedf )
{
struct qedf_cmd_mgr * cmgr ;
struct io_bdt * bdt_info ;
struct qedf_ioreq * io_req ;
u16 xid ;
int i ;
int num_ios ;
u16 min_xid = QEDF_MIN_XID ;
u16 max_xid = ( FCOE_PARAMS_NUM_TASKS - 1 ) ;
/* Make sure num_queues is already set before calling this function */
if ( ! qedf - > num_queues ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " num_queues is not set. \n " ) ;
return NULL ;
}
if ( max_xid < = min_xid | | max_xid = = FC_XID_UNKNOWN ) {
QEDF_WARN ( & ( qedf - > dbg_ctx ) , " Invalid min_xid 0x%x and "
" max_xid 0x%x. \n " , min_xid , max_xid ) ;
return NULL ;
}
QEDF_INFO ( & ( qedf - > dbg_ctx ) , QEDF_LOG_DISC , " min xid 0x%x, max xid "
" 0x%x. \n " , min_xid , max_xid ) ;
num_ios = max_xid - min_xid + 1 ;
cmgr = vzalloc ( sizeof ( struct qedf_cmd_mgr ) ) ;
if ( ! cmgr ) {
QEDF_WARN ( & ( qedf - > dbg_ctx ) , " Failed to alloc cmd mgr. \n " ) ;
return NULL ;
}
cmgr - > qedf = qedf ;
spin_lock_init ( & cmgr - > lock ) ;
/*
2017-03-11 18:39:18 +02:00
* Initialize I / O request fields .
2017-02-15 06:28:23 -08:00
*/
xid = QEDF_MIN_XID ;
for ( i = 0 ; i < num_ios ; i + + ) {
io_req = & cmgr - > cmds [ i ] ;
INIT_DELAYED_WORK ( & io_req - > timeout_work , qedf_cmd_timeout ) ;
io_req - > xid = xid + + ;
INIT_DELAYED_WORK ( & io_req - > rrq_work , qedf_handle_rrq ) ;
/* Allocate DMA memory to hold sense buffer */
io_req - > sense_buffer = dma_alloc_coherent ( & qedf - > pdev - > dev ,
QEDF_SCSI_SENSE_BUFFERSIZE , & io_req - > sense_buffer_dma ,
GFP_KERNEL ) ;
if ( ! io_req - > sense_buffer )
goto mem_err ;
2017-03-11 18:39:18 +02:00
/* Allocate task parameters to pass to f/w init funcions */
io_req - > task_params = kzalloc ( sizeof ( * io_req - > task_params ) ,
GFP_KERNEL ) ;
if ( ! io_req - > task_params ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) ,
" Failed to allocate task_params for xid=0x%x \n " ,
i ) ;
goto mem_err ;
}
/*
* Allocate scatter / gather list info to pass to f / w init
* functions .
*/
io_req - > sgl_task_params = kzalloc (
sizeof ( struct scsi_sgl_task_params ) , GFP_KERNEL ) ;
if ( ! io_req - > sgl_task_params ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) ,
" Failed to allocate sgl_task_params for xid=0x%x \n " ,
i ) ;
goto mem_err ;
}
2017-02-15 06:28:23 -08:00
}
/* Allocate pool of io_bdts - one for each qedf_ioreq */
cmgr - > io_bdt_pool = kmalloc_array ( num_ios , sizeof ( struct io_bdt * ) ,
GFP_KERNEL ) ;
if ( ! cmgr - > io_bdt_pool ) {
QEDF_WARN ( & ( qedf - > dbg_ctx ) , " Failed to alloc io_bdt_pool. \n " ) ;
goto mem_err ;
}
for ( i = 0 ; i < num_ios ; i + + ) {
cmgr - > io_bdt_pool [ i ] = kmalloc ( sizeof ( struct io_bdt ) ,
GFP_KERNEL ) ;
if ( ! cmgr - > io_bdt_pool [ i ] ) {
2017-03-11 18:39:18 +02:00
QEDF_WARN ( & ( qedf - > dbg_ctx ) ,
" Failed to alloc io_bdt_pool[%d]. \n " , i ) ;
2017-02-15 06:28:23 -08:00
goto mem_err ;
}
}
for ( i = 0 ; i < num_ios ; i + + ) {
bdt_info = cmgr - > io_bdt_pool [ i ] ;
bdt_info - > bd_tbl = dma_alloc_coherent ( & qedf - > pdev - > dev ,
2017-03-11 18:39:18 +02:00
QEDF_MAX_BDS_PER_CMD * sizeof ( struct scsi_sge ) ,
2017-02-15 06:28:23 -08:00
& bdt_info - > bd_tbl_dma , GFP_KERNEL ) ;
if ( ! bdt_info - > bd_tbl ) {
2017-03-11 18:39:18 +02:00
QEDF_WARN ( & ( qedf - > dbg_ctx ) ,
" Failed to alloc bdt_tbl[%d]. \n " , i ) ;
2017-02-15 06:28:23 -08:00
goto mem_err ;
}
}
atomic_set ( & cmgr - > free_list_cnt , num_ios ) ;
QEDF_INFO ( & ( qedf - > dbg_ctx ) , QEDF_LOG_IO ,
" cmgr->free_list_cnt=%d. \n " ,
atomic_read ( & cmgr - > free_list_cnt ) ) ;
return cmgr ;
mem_err :
qedf_cmd_mgr_free ( cmgr ) ;
return NULL ;
}
struct qedf_ioreq * qedf_alloc_cmd ( struct qedf_rport * fcport , u8 cmd_type )
{
struct qedf_ctx * qedf = fcport - > qedf ;
struct qedf_cmd_mgr * cmd_mgr = qedf - > cmd_mgr ;
struct qedf_ioreq * io_req = NULL ;
struct io_bdt * bd_tbl ;
u16 xid ;
uint32_t free_sqes ;
int i ;
unsigned long flags ;
free_sqes = atomic_read ( & fcport - > free_sqes ) ;
if ( ! free_sqes ) {
QEDF_INFO ( & ( qedf - > dbg_ctx ) , QEDF_LOG_IO ,
" Returning NULL, free_sqes=%d. \n " ,
free_sqes ) ;
goto out_failed ;
}
/* Limit the number of outstanding R/W tasks */
if ( ( atomic_read ( & fcport - > num_active_ios ) > =
NUM_RW_TASKS_PER_CONNECTION ) ) {
QEDF_INFO ( & ( qedf - > dbg_ctx ) , QEDF_LOG_IO ,
" Returning NULL, num_active_ios=%d. \n " ,
atomic_read ( & fcport - > num_active_ios ) ) ;
goto out_failed ;
}
/* Limit global TIDs certain tasks */
if ( atomic_read ( & cmd_mgr - > free_list_cnt ) < = GBL_RSVD_TASKS ) {
QEDF_INFO ( & ( qedf - > dbg_ctx ) , QEDF_LOG_IO ,
" Returning NULL, free_list_cnt=%d. \n " ,
atomic_read ( & cmd_mgr - > free_list_cnt ) ) ;
goto out_failed ;
}
spin_lock_irqsave ( & cmd_mgr - > lock , flags ) ;
for ( i = 0 ; i < FCOE_PARAMS_NUM_TASKS ; i + + ) {
io_req = & cmd_mgr - > cmds [ cmd_mgr - > idx ] ;
cmd_mgr - > idx + + ;
if ( cmd_mgr - > idx = = FCOE_PARAMS_NUM_TASKS )
cmd_mgr - > idx = 0 ;
/* Check to make sure command was previously freed */
if ( ! test_bit ( QEDF_CMD_OUTSTANDING , & io_req - > flags ) )
break ;
}
if ( i = = FCOE_PARAMS_NUM_TASKS ) {
spin_unlock_irqrestore ( & cmd_mgr - > lock , flags ) ;
goto out_failed ;
}
set_bit ( QEDF_CMD_OUTSTANDING , & io_req - > flags ) ;
spin_unlock_irqrestore ( & cmd_mgr - > lock , flags ) ;
atomic_inc ( & fcport - > num_active_ios ) ;
atomic_dec ( & fcport - > free_sqes ) ;
xid = io_req - > xid ;
atomic_dec ( & cmd_mgr - > free_list_cnt ) ;
io_req - > cmd_mgr = cmd_mgr ;
io_req - > fcport = fcport ;
/* Hold the io_req against deletion */
kref_init ( & io_req - > refcount ) ;
/* Bind io_bdt for this io_req */
/* Have a static link between io_req and io_bdt_pool */
bd_tbl = io_req - > bd_tbl = cmd_mgr - > io_bdt_pool [ xid ] ;
if ( bd_tbl = = NULL ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " bd_tbl is NULL, xid=%x. \n " , xid ) ;
kref_put ( & io_req - > refcount , qedf_release_cmd ) ;
goto out_failed ;
}
bd_tbl - > io_req = io_req ;
io_req - > cmd_type = cmd_type ;
2017-03-11 18:39:18 +02:00
io_req - > tm_flags = 0 ;
2017-02-15 06:28:23 -08:00
/* Reset sequence offset data */
io_req - > rx_buf_off = 0 ;
io_req - > tx_buf_off = 0 ;
io_req - > rx_id = 0xffff ; /* No OX_ID */
return io_req ;
out_failed :
/* Record failure for stats and return NULL to caller */
qedf - > alloc_failures + + ;
return NULL ;
}
static void qedf_free_mp_resc ( struct qedf_ioreq * io_req )
{
struct qedf_mp_req * mp_req = & ( io_req - > mp_req ) ;
struct qedf_ctx * qedf = io_req - > fcport - > qedf ;
2017-03-11 18:39:18 +02:00
uint64_t sz = sizeof ( struct scsi_sge ) ;
2017-02-15 06:28:23 -08:00
/* clear tm flags */
if ( mp_req - > mp_req_bd ) {
dma_free_coherent ( & qedf - > pdev - > dev , sz ,
mp_req - > mp_req_bd , mp_req - > mp_req_bd_dma ) ;
mp_req - > mp_req_bd = NULL ;
}
if ( mp_req - > mp_resp_bd ) {
dma_free_coherent ( & qedf - > pdev - > dev , sz ,
mp_req - > mp_resp_bd , mp_req - > mp_resp_bd_dma ) ;
mp_req - > mp_resp_bd = NULL ;
}
if ( mp_req - > req_buf ) {
dma_free_coherent ( & qedf - > pdev - > dev , QEDF_PAGE_SIZE ,
mp_req - > req_buf , mp_req - > req_buf_dma ) ;
mp_req - > req_buf = NULL ;
}
if ( mp_req - > resp_buf ) {
dma_free_coherent ( & qedf - > pdev - > dev , QEDF_PAGE_SIZE ,
mp_req - > resp_buf , mp_req - > resp_buf_dma ) ;
mp_req - > resp_buf = NULL ;
}
}
void qedf_release_cmd ( struct kref * ref )
{
struct qedf_ioreq * io_req =
container_of ( ref , struct qedf_ioreq , refcount ) ;
struct qedf_cmd_mgr * cmd_mgr = io_req - > cmd_mgr ;
struct qedf_rport * fcport = io_req - > fcport ;
if ( io_req - > cmd_type = = QEDF_ELS | |
io_req - > cmd_type = = QEDF_TASK_MGMT_CMD )
qedf_free_mp_resc ( io_req ) ;
atomic_inc ( & cmd_mgr - > free_list_cnt ) ;
atomic_dec ( & fcport - > num_active_ios ) ;
if ( atomic_read ( & fcport - > num_active_ios ) < 0 )
QEDF_WARN ( & ( fcport - > qedf - > dbg_ctx ) , " active_ios < 0. \n " ) ;
/* Increment task retry identifier now that the request is released */
io_req - > task_retry_identifier + + ;
clear_bit ( QEDF_CMD_OUTSTANDING , & io_req - > flags ) ;
}
static int qedf_split_bd ( struct qedf_ioreq * io_req , u64 addr , int sg_len ,
int bd_index )
{
2017-03-11 18:39:18 +02:00
struct scsi_sge * bd = io_req - > bd_tbl - > bd_tbl ;
2017-02-15 06:28:23 -08:00
int frag_size , sg_frags ;
sg_frags = 0 ;
while ( sg_len ) {
if ( sg_len > QEDF_BD_SPLIT_SZ )
frag_size = QEDF_BD_SPLIT_SZ ;
else
frag_size = sg_len ;
bd [ bd_index + sg_frags ] . sge_addr . lo = U64_LO ( addr ) ;
bd [ bd_index + sg_frags ] . sge_addr . hi = U64_HI ( addr ) ;
2017-03-11 18:39:18 +02:00
bd [ bd_index + sg_frags ] . sge_len = ( uint16_t ) frag_size ;
2017-02-15 06:28:23 -08:00
addr + = ( u64 ) frag_size ;
sg_frags + + ;
sg_len - = frag_size ;
}
return sg_frags ;
}
static int qedf_map_sg ( struct qedf_ioreq * io_req )
{
struct scsi_cmnd * sc = io_req - > sc_cmd ;
struct Scsi_Host * host = sc - > device - > host ;
struct fc_lport * lport = shost_priv ( host ) ;
struct qedf_ctx * qedf = lport_priv ( lport ) ;
2017-03-11 18:39:18 +02:00
struct scsi_sge * bd = io_req - > bd_tbl - > bd_tbl ;
2017-02-15 06:28:23 -08:00
struct scatterlist * sg ;
int byte_count = 0 ;
int sg_count = 0 ;
int bd_count = 0 ;
int sg_frags ;
unsigned int sg_len ;
u64 addr , end_addr ;
int i ;
sg_count = dma_map_sg ( & qedf - > pdev - > dev , scsi_sglist ( sc ) ,
scsi_sg_count ( sc ) , sc - > sc_data_direction ) ;
sg = scsi_sglist ( sc ) ;
/*
* New condition to send single SGE as cached - SGL with length less
* than 64 k .
*/
if ( ( sg_count = = 1 ) & & ( sg_dma_len ( sg ) < =
QEDF_MAX_SGLEN_FOR_CACHESGL ) ) {
sg_len = sg_dma_len ( sg ) ;
addr = ( u64 ) sg_dma_address ( sg ) ;
bd [ bd_count ] . sge_addr . lo = ( addr & 0xffffffff ) ;
bd [ bd_count ] . sge_addr . hi = ( addr > > 32 ) ;
2017-03-11 18:39:18 +02:00
bd [ bd_count ] . sge_len = ( u16 ) sg_len ;
2017-02-15 06:28:23 -08:00
return + + bd_count ;
}
scsi_for_each_sg ( sc , sg , sg_count , i ) {
sg_len = sg_dma_len ( sg ) ;
addr = ( u64 ) sg_dma_address ( sg ) ;
end_addr = ( u64 ) ( addr + sg_len ) ;
/*
* First s / g element in the list so check if the end_addr
* is paged aligned . Also check to make sure the length is
* at least page size .
*/
if ( ( i = = 0 ) & & ( sg_count > 1 ) & &
( ( end_addr % QEDF_PAGE_SIZE ) | |
sg_len < QEDF_PAGE_SIZE ) )
io_req - > use_slowpath = true ;
/*
* Last s / g element so check if the start address is paged
* aligned .
*/
else if ( ( i = = ( sg_count - 1 ) ) & & ( sg_count > 1 ) & &
( addr % QEDF_PAGE_SIZE ) )
io_req - > use_slowpath = true ;
/*
* Intermediate s / g element so check if start and end address
* is page aligned .
*/
else if ( ( i ! = 0 ) & & ( i ! = ( sg_count - 1 ) ) & &
( ( addr % QEDF_PAGE_SIZE ) | | ( end_addr % QEDF_PAGE_SIZE ) ) )
io_req - > use_slowpath = true ;
if ( sg_len > QEDF_MAX_BD_LEN ) {
sg_frags = qedf_split_bd ( io_req , addr , sg_len ,
bd_count ) ;
} else {
sg_frags = 1 ;
bd [ bd_count ] . sge_addr . lo = U64_LO ( addr ) ;
bd [ bd_count ] . sge_addr . hi = U64_HI ( addr ) ;
2017-03-11 18:39:18 +02:00
bd [ bd_count ] . sge_len = ( uint16_t ) sg_len ;
2017-02-15 06:28:23 -08:00
}
bd_count + = sg_frags ;
byte_count + = sg_len ;
}
if ( byte_count ! = scsi_bufflen ( sc ) )
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " byte_count = %d != "
" scsi_bufflen = %d, task_id = 0x%x. \n " , byte_count ,
scsi_bufflen ( sc ) , io_req - > xid ) ;
return bd_count ;
}
static int qedf_build_bd_list_from_sg ( struct qedf_ioreq * io_req )
{
struct scsi_cmnd * sc = io_req - > sc_cmd ;
2017-03-11 18:39:18 +02:00
struct scsi_sge * bd = io_req - > bd_tbl - > bd_tbl ;
2017-02-15 06:28:23 -08:00
int bd_count ;
if ( scsi_sg_count ( sc ) ) {
bd_count = qedf_map_sg ( io_req ) ;
if ( bd_count = = 0 )
return - ENOMEM ;
} else {
bd_count = 0 ;
bd [ 0 ] . sge_addr . lo = bd [ 0 ] . sge_addr . hi = 0 ;
2017-03-11 18:39:18 +02:00
bd [ 0 ] . sge_len = 0 ;
2017-02-15 06:28:23 -08:00
}
io_req - > bd_tbl - > bd_valid = bd_count ;
return 0 ;
}
static void qedf_build_fcp_cmnd ( struct qedf_ioreq * io_req ,
struct fcp_cmnd * fcp_cmnd )
{
struct scsi_cmnd * sc_cmd = io_req - > sc_cmd ;
/* fcp_cmnd is 32 bytes */
memset ( fcp_cmnd , 0 , FCP_CMND_LEN ) ;
/* 8 bytes: SCSI LUN info */
int_to_scsilun ( sc_cmd - > device - > lun ,
( struct scsi_lun * ) & fcp_cmnd - > fc_lun ) ;
/* 4 bytes: flag info */
fcp_cmnd - > fc_pri_ta = 0 ;
2017-03-11 18:39:18 +02:00
fcp_cmnd - > fc_tm_flags = io_req - > tm_flags ;
2017-02-15 06:28:23 -08:00
fcp_cmnd - > fc_flags = io_req - > io_req_flags ;
fcp_cmnd - > fc_cmdref = 0 ;
/* Populate data direction */
2017-03-11 18:39:18 +02:00
if ( io_req - > cmd_type = = QEDF_TASK_MGMT_CMD ) {
2017-02-15 06:28:23 -08:00
fcp_cmnd - > fc_flags | = FCP_CFL_RDDATA ;
2017-03-11 18:39:18 +02:00
} else {
if ( sc_cmd - > sc_data_direction = = DMA_TO_DEVICE )
fcp_cmnd - > fc_flags | = FCP_CFL_WRDATA ;
else if ( sc_cmd - > sc_data_direction = = DMA_FROM_DEVICE )
fcp_cmnd - > fc_flags | = FCP_CFL_RDDATA ;
}
2017-02-15 06:28:23 -08:00
fcp_cmnd - > fc_pri_ta = FCP_PTA_SIMPLE ;
/* 16 bytes: CDB information */
2017-03-11 18:39:18 +02:00
if ( io_req - > cmd_type ! = QEDF_TASK_MGMT_CMD )
memcpy ( fcp_cmnd - > fc_cdb , sc_cmd - > cmnd , sc_cmd - > cmd_len ) ;
2017-02-15 06:28:23 -08:00
/* 4 bytes: FCP data length */
fcp_cmnd - > fc_dl = htonl ( io_req - > data_xfer_len ) ;
}
static void qedf_init_task ( struct qedf_rport * fcport , struct fc_lport * lport ,
2017-03-11 18:39:18 +02:00
struct qedf_ioreq * io_req , struct fcoe_task_context * task_ctx ,
struct fcoe_wqe * sqe )
2017-02-15 06:28:23 -08:00
{
enum fcoe_task_type task_type ;
struct scsi_cmnd * sc_cmd = io_req - > sc_cmd ;
struct io_bdt * bd_tbl = io_req - > bd_tbl ;
2017-03-11 18:39:18 +02:00
u8 fcp_cmnd [ 32 ] ;
2017-02-15 06:28:23 -08:00
u32 tmp_fcp_cmnd [ 8 ] ;
2017-03-11 18:39:18 +02:00
int bd_count = 0 ;
2017-02-15 06:28:23 -08:00
struct qedf_ctx * qedf = fcport - > qedf ;
uint16_t cq_idx = smp_processor_id ( ) % qedf - > num_queues ;
2017-03-11 18:39:18 +02:00
struct regpair sense_data_buffer_phys_addr ;
u32 tx_io_size = 0 ;
u32 rx_io_size = 0 ;
int i , cnt ;
2017-02-15 06:28:23 -08:00
2017-03-11 18:39:18 +02:00
/* Note init_initiator_rw_fcoe_task memsets the task context */
2017-02-15 06:28:23 -08:00
io_req - > task = task_ctx ;
2017-03-11 18:39:18 +02:00
memset ( task_ctx , 0 , sizeof ( struct fcoe_task_context ) ) ;
memset ( io_req - > task_params , 0 , sizeof ( struct fcoe_task_params ) ) ;
memset ( io_req - > sgl_task_params , 0 , sizeof ( struct scsi_sgl_task_params ) ) ;
2017-02-15 06:28:23 -08:00
2017-03-11 18:39:18 +02:00
/* Set task type bassed on DMA directio of command */
if ( io_req - > cmd_type = = QEDF_TASK_MGMT_CMD ) {
2017-02-15 06:28:23 -08:00
task_type = FCOE_TASK_TYPE_READ_INITIATOR ;
} else {
2017-03-11 18:39:18 +02:00
if ( sc_cmd - > sc_data_direction = = DMA_TO_DEVICE ) {
task_type = FCOE_TASK_TYPE_WRITE_INITIATOR ;
tx_io_size = io_req - > data_xfer_len ;
2017-02-15 06:28:23 -08:00
} else {
2017-03-11 18:39:18 +02:00
task_type = FCOE_TASK_TYPE_READ_INITIATOR ;
rx_io_size = io_req - > data_xfer_len ;
2017-02-15 06:28:23 -08:00
}
}
2017-03-11 18:39:18 +02:00
/* Setup the fields for fcoe_task_params */
io_req - > task_params - > context = task_ctx ;
io_req - > task_params - > sqe = sqe ;
io_req - > task_params - > task_type = task_type ;
io_req - > task_params - > tx_io_size = tx_io_size ;
io_req - > task_params - > rx_io_size = rx_io_size ;
io_req - > task_params - > conn_cid = fcport - > fw_cid ;
io_req - > task_params - > itid = io_req - > xid ;
io_req - > task_params - > cq_rss_number = cq_idx ;
io_req - > task_params - > is_tape_device = fcport - > dev_type ;
/* Fill in information for scatter/gather list */
if ( io_req - > cmd_type ! = QEDF_TASK_MGMT_CMD ) {
bd_count = bd_tbl - > bd_valid ;
io_req - > sgl_task_params - > sgl = bd_tbl - > bd_tbl ;
io_req - > sgl_task_params - > sgl_phys_addr . lo =
U64_LO ( bd_tbl - > bd_tbl_dma ) ;
io_req - > sgl_task_params - > sgl_phys_addr . hi =
U64_HI ( bd_tbl - > bd_tbl_dma ) ;
io_req - > sgl_task_params - > num_sges = bd_count ;
io_req - > sgl_task_params - > total_buffer_size =
scsi_bufflen ( io_req - > sc_cmd ) ;
io_req - > sgl_task_params - > small_mid_sge =
io_req - > use_slowpath ;
}
/* Fill in physical address of sense buffer */
sense_data_buffer_phys_addr . lo = U64_LO ( io_req - > sense_buffer_dma ) ;
sense_data_buffer_phys_addr . hi = U64_HI ( io_req - > sense_buffer_dma ) ;
2017-02-15 06:28:23 -08:00
/* fill FCP_CMND IU */
2017-03-11 18:39:18 +02:00
qedf_build_fcp_cmnd ( io_req , ( struct fcp_cmnd * ) tmp_fcp_cmnd ) ;
2017-02-15 06:28:23 -08:00
/* Swap fcp_cmnd since FC is big endian */
cnt = sizeof ( struct fcp_cmnd ) / sizeof ( u32 ) ;
for ( i = 0 ; i < cnt ; i + + ) {
2017-03-11 18:39:18 +02:00
tmp_fcp_cmnd [ i ] = cpu_to_be32 ( tmp_fcp_cmnd [ i ] ) ;
}
memcpy ( fcp_cmnd , tmp_fcp_cmnd , sizeof ( struct fcp_cmnd ) ) ;
init_initiator_rw_fcoe_task ( io_req - > task_params ,
io_req - > sgl_task_params ,
sense_data_buffer_phys_addr ,
io_req - > task_retry_identifier , fcp_cmnd ) ;
/* Increment SGL type counters */
if ( bd_count = = 1 ) {
qedf - > single_sge_ios + + ;
io_req - > sge_type = QEDF_IOREQ_SINGLE_SGE ;
} else if ( io_req - > use_slowpath ) {
qedf - > slow_sge_ios + + ;
io_req - > sge_type = QEDF_IOREQ_SLOW_SGE ;
} else {
qedf - > fast_sge_ios + + ;
io_req - > sge_type = QEDF_IOREQ_FAST_SGE ;
2017-02-15 06:28:23 -08:00
}
}
void qedf_init_mp_task ( struct qedf_ioreq * io_req ,
2017-03-11 18:39:18 +02:00
struct fcoe_task_context * task_ctx , struct fcoe_wqe * sqe )
2017-02-15 06:28:23 -08:00
{
struct qedf_mp_req * mp_req = & ( io_req - > mp_req ) ;
struct qedf_rport * fcport = io_req - > fcport ;
struct qedf_ctx * qedf = io_req - > fcport - > qedf ;
struct fc_frame_header * fc_hdr ;
2017-03-11 18:39:18 +02:00
struct fcoe_tx_mid_path_params task_fc_hdr ;
struct scsi_sgl_task_params tx_sgl_task_params ;
struct scsi_sgl_task_params rx_sgl_task_params ;
2017-02-15 06:28:23 -08:00
2017-03-11 18:39:18 +02:00
QEDF_INFO ( & ( qedf - > dbg_ctx ) , QEDF_LOG_DISC ,
" Initializing MP task for cmd_type=%d \n " ,
io_req - > cmd_type ) ;
2017-02-15 06:28:23 -08:00
qedf - > control_requests + + ;
2017-03-11 18:39:18 +02:00
memset ( & tx_sgl_task_params , 0 , sizeof ( struct scsi_sgl_task_params ) ) ;
memset ( & rx_sgl_task_params , 0 , sizeof ( struct scsi_sgl_task_params ) ) ;
2017-02-15 06:28:23 -08:00
memset ( task_ctx , 0 , sizeof ( struct fcoe_task_context ) ) ;
2017-03-11 18:39:18 +02:00
memset ( & task_fc_hdr , 0 , sizeof ( struct fcoe_tx_mid_path_params ) ) ;
2017-02-15 06:28:23 -08:00
/* Setup the task from io_req for easy reference */
io_req - > task = task_ctx ;
2017-03-11 18:39:18 +02:00
/* Setup the fields for fcoe_task_params */
io_req - > task_params - > context = task_ctx ;
io_req - > task_params - > sqe = sqe ;
io_req - > task_params - > task_type = FCOE_TASK_TYPE_MIDPATH ;
io_req - > task_params - > tx_io_size = io_req - > data_xfer_len ;
/* rx_io_size tells the f/w how large a response buffer we have */
io_req - > task_params - > rx_io_size = PAGE_SIZE ;
io_req - > task_params - > conn_cid = fcport - > fw_cid ;
io_req - > task_params - > itid = io_req - > xid ;
/* Return middle path commands on CQ 0 */
io_req - > task_params - > cq_rss_number = 0 ;
io_req - > task_params - > is_tape_device = fcport - > dev_type ;
fc_hdr = & ( mp_req - > req_fc_hdr ) ;
/* Set OX_ID and RX_ID based on driver task id */
fc_hdr - > fh_ox_id = io_req - > xid ;
fc_hdr - > fh_rx_id = htons ( 0xffff ) ;
/* Set up FC header information */
task_fc_hdr . parameter = fc_hdr - > fh_parm_offset ;
task_fc_hdr . r_ctl = fc_hdr - > fh_r_ctl ;
task_fc_hdr . type = fc_hdr - > fh_type ;
task_fc_hdr . cs_ctl = fc_hdr - > fh_cs_ctl ;
task_fc_hdr . df_ctl = fc_hdr - > fh_df_ctl ;
task_fc_hdr . rx_id = fc_hdr - > fh_rx_id ;
task_fc_hdr . ox_id = fc_hdr - > fh_ox_id ;
/* Set up s/g list parameters for request buffer */
tx_sgl_task_params . sgl = mp_req - > mp_req_bd ;
tx_sgl_task_params . sgl_phys_addr . lo = U64_LO ( mp_req - > mp_req_bd_dma ) ;
tx_sgl_task_params . sgl_phys_addr . hi = U64_HI ( mp_req - > mp_req_bd_dma ) ;
tx_sgl_task_params . num_sges = 1 ;
/* Set PAGE_SIZE for now since sg element is that size ??? */
tx_sgl_task_params . total_buffer_size = io_req - > data_xfer_len ;
tx_sgl_task_params . small_mid_sge = 0 ;
/* Set up s/g list parameters for request buffer */
rx_sgl_task_params . sgl = mp_req - > mp_resp_bd ;
rx_sgl_task_params . sgl_phys_addr . lo = U64_LO ( mp_req - > mp_resp_bd_dma ) ;
rx_sgl_task_params . sgl_phys_addr . hi = U64_HI ( mp_req - > mp_resp_bd_dma ) ;
rx_sgl_task_params . num_sges = 1 ;
/* Set PAGE_SIZE for now since sg element is that size ??? */
rx_sgl_task_params . total_buffer_size = PAGE_SIZE ;
rx_sgl_task_params . small_mid_sge = 0 ;
2017-02-15 06:28:23 -08:00
2017-03-11 18:39:18 +02:00
/*
* Last arg is 0 as previous code did not set that we wanted the
* fc header information .
*/
init_initiator_midpath_unsolicited_fcoe_task ( io_req - > task_params ,
& task_fc_hdr ,
& tx_sgl_task_params ,
& rx_sgl_task_params , 0 ) ;
2017-02-15 06:28:23 -08:00
2017-03-11 18:39:18 +02:00
/* Midpath requests always consume 1 SGE */
qedf - > single_sge_ios + + ;
2017-02-15 06:28:23 -08:00
}
2017-03-11 18:39:18 +02:00
/* Presumed that fcport->rport_lock is held */
u16 qedf_get_sqe_idx ( struct qedf_rport * fcport )
2017-02-15 06:28:23 -08:00
{
uint16_t total_sqe = ( fcport - > sq_mem_size ) / ( sizeof ( struct fcoe_wqe ) ) ;
2017-03-11 18:39:18 +02:00
u16 rval ;
2017-02-15 06:28:23 -08:00
2017-03-11 18:39:18 +02:00
rval = fcport - > sq_prod_idx ;
2017-02-15 06:28:23 -08:00
2017-03-11 18:39:18 +02:00
/* Adjust ring index */
2017-02-15 06:28:23 -08:00
fcport - > sq_prod_idx + + ;
fcport - > fw_sq_prod_idx + + ;
if ( fcport - > sq_prod_idx = = total_sqe )
fcport - > sq_prod_idx = 0 ;
2017-03-11 18:39:18 +02:00
return rval ;
2017-02-15 06:28:23 -08:00
}
void qedf_ring_doorbell ( struct qedf_rport * fcport )
{
struct fcoe_db_data dbell = { 0 } ;
dbell . agg_flags = 0 ;
dbell . params | = DB_DEST_XCM < < FCOE_DB_DATA_DEST_SHIFT ;
dbell . params | = DB_AGG_CMD_SET < < FCOE_DB_DATA_AGG_CMD_SHIFT ;
dbell . params | = DQ_XCM_FCOE_SQ_PROD_CMD < <
FCOE_DB_DATA_AGG_VAL_SEL_SHIFT ;
dbell . sq_prod = fcport - > fw_sq_prod_idx ;
writel ( * ( u32 * ) & dbell , fcport - > p_doorbell ) ;
/* Make sure SQ index is updated so f/w prcesses requests in order */
wmb ( ) ;
mmiowb ( ) ;
}
static void qedf_trace_io ( struct qedf_rport * fcport , struct qedf_ioreq * io_req ,
int8_t direction )
{
struct qedf_ctx * qedf = fcport - > qedf ;
struct qedf_io_log * io_log ;
struct scsi_cmnd * sc_cmd = io_req - > sc_cmd ;
unsigned long flags ;
uint8_t op ;
spin_lock_irqsave ( & qedf - > io_trace_lock , flags ) ;
io_log = & qedf - > io_trace_buf [ qedf - > io_trace_idx ] ;
io_log - > direction = direction ;
io_log - > task_id = io_req - > xid ;
io_log - > port_id = fcport - > rdata - > ids . port_id ;
io_log - > lun = sc_cmd - > device - > lun ;
io_log - > op = op = sc_cmd - > cmnd [ 0 ] ;
io_log - > lba [ 0 ] = sc_cmd - > cmnd [ 2 ] ;
io_log - > lba [ 1 ] = sc_cmd - > cmnd [ 3 ] ;
io_log - > lba [ 2 ] = sc_cmd - > cmnd [ 4 ] ;
io_log - > lba [ 3 ] = sc_cmd - > cmnd [ 5 ] ;
io_log - > bufflen = scsi_bufflen ( sc_cmd ) ;
io_log - > sg_count = scsi_sg_count ( sc_cmd ) ;
io_log - > result = sc_cmd - > result ;
io_log - > jiffies = jiffies ;
2017-02-23 07:01:03 -08:00
io_log - > refcount = kref_read ( & io_req - > refcount ) ;
2017-02-15 06:28:23 -08:00
if ( direction = = QEDF_IO_TRACE_REQ ) {
/* For requests we only care abot the submission CPU */
io_log - > req_cpu = io_req - > cpu ;
io_log - > int_cpu = 0 ;
io_log - > rsp_cpu = 0 ;
} else if ( direction = = QEDF_IO_TRACE_RSP ) {
io_log - > req_cpu = io_req - > cpu ;
io_log - > int_cpu = io_req - > int_cpu ;
io_log - > rsp_cpu = smp_processor_id ( ) ;
}
io_log - > sge_type = io_req - > sge_type ;
qedf - > io_trace_idx + + ;
if ( qedf - > io_trace_idx = = QEDF_IO_TRACE_SIZE )
qedf - > io_trace_idx = 0 ;
spin_unlock_irqrestore ( & qedf - > io_trace_lock , flags ) ;
}
int qedf_post_io_req ( struct qedf_rport * fcport , struct qedf_ioreq * io_req )
{
struct scsi_cmnd * sc_cmd = io_req - > sc_cmd ;
struct Scsi_Host * host = sc_cmd - > device - > host ;
struct fc_lport * lport = shost_priv ( host ) ;
struct qedf_ctx * qedf = lport_priv ( lport ) ;
struct fcoe_task_context * task_ctx ;
u16 xid ;
enum fcoe_task_type req_type = 0 ;
2017-03-11 18:39:18 +02:00
struct fcoe_wqe * sqe ;
u16 sqe_idx ;
2017-02-15 06:28:23 -08:00
/* Initialize rest of io_req fileds */
io_req - > data_xfer_len = scsi_bufflen ( sc_cmd ) ;
sc_cmd - > SCp . ptr = ( char * ) io_req ;
io_req - > use_slowpath = false ; /* Assume fast SGL by default */
/* Record which cpu this request is associated with */
io_req - > cpu = smp_processor_id ( ) ;
if ( sc_cmd - > sc_data_direction = = DMA_FROM_DEVICE ) {
req_type = FCOE_TASK_TYPE_READ_INITIATOR ;
io_req - > io_req_flags = QEDF_READ ;
qedf - > input_requests + + ;
} else if ( sc_cmd - > sc_data_direction = = DMA_TO_DEVICE ) {
req_type = FCOE_TASK_TYPE_WRITE_INITIATOR ;
io_req - > io_req_flags = QEDF_WRITE ;
qedf - > output_requests + + ;
} else {
io_req - > io_req_flags = 0 ;
qedf - > control_requests + + ;
}
xid = io_req - > xid ;
/* Build buffer descriptor list for firmware from sg list */
if ( qedf_build_bd_list_from_sg ( io_req ) ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " BD list creation failed. \n " ) ;
kref_put ( & io_req - > refcount , qedf_release_cmd ) ;
return - EAGAIN ;
}
2017-03-11 18:39:18 +02:00
if ( ! test_bit ( QEDF_RPORT_SESSION_READY , & fcport - > flags ) ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " Session not offloaded yet. \n " ) ;
kref_put ( & io_req - > refcount , qedf_release_cmd ) ;
}
/* Obtain free SQE */
sqe_idx = qedf_get_sqe_idx ( fcport ) ;
sqe = & fcport - > sq [ sqe_idx ] ;
memset ( sqe , 0 , sizeof ( struct fcoe_wqe ) ) ;
2017-02-15 06:28:23 -08:00
/* Get the task context */
task_ctx = qedf_get_task_mem ( & qedf - > tasks , xid ) ;
if ( ! task_ctx ) {
QEDF_WARN ( & ( qedf - > dbg_ctx ) , " task_ctx is NULL, xid=%d. \n " ,
xid ) ;
kref_put ( & io_req - > refcount , qedf_release_cmd ) ;
return - EINVAL ;
}
2017-03-11 18:39:18 +02:00
qedf_init_task ( fcport , lport , io_req , task_ctx , sqe ) ;
2017-02-15 06:28:23 -08:00
/* Ring doorbell */
qedf_ring_doorbell ( fcport ) ;
if ( qedf_io_tracing & & io_req - > sc_cmd )
qedf_trace_io ( fcport , io_req , QEDF_IO_TRACE_REQ ) ;
return false ;
}
int
qedf_queuecommand ( struct Scsi_Host * host , struct scsi_cmnd * sc_cmd )
{
struct fc_lport * lport = shost_priv ( host ) ;
struct qedf_ctx * qedf = lport_priv ( lport ) ;
struct fc_rport * rport = starget_to_rport ( scsi_target ( sc_cmd - > device ) ) ;
struct fc_rport_libfc_priv * rp = rport - > dd_data ;
struct qedf_rport * fcport = rport - > dd_data ;
struct qedf_ioreq * io_req ;
int rc = 0 ;
int rval ;
unsigned long flags = 0 ;
if ( test_bit ( QEDF_UNLOADING , & qedf - > flags ) | |
test_bit ( QEDF_DBG_STOP_IO , & qedf - > flags ) ) {
sc_cmd - > result = DID_NO_CONNECT < < 16 ;
sc_cmd - > scsi_done ( sc_cmd ) ;
return 0 ;
}
rval = fc_remote_port_chkready ( rport ) ;
if ( rval ) {
sc_cmd - > result = rval ;
sc_cmd - > scsi_done ( sc_cmd ) ;
return 0 ;
}
/* Retry command if we are doing a qed drain operation */
if ( test_bit ( QEDF_DRAIN_ACTIVE , & qedf - > flags ) ) {
rc = SCSI_MLQUEUE_HOST_BUSY ;
goto exit_qcmd ;
}
if ( lport - > state ! = LPORT_ST_READY | |
atomic_read ( & qedf - > link_state ) ! = QEDF_LINK_UP ) {
rc = SCSI_MLQUEUE_HOST_BUSY ;
goto exit_qcmd ;
}
/* rport and tgt are allocated together, so tgt should be non-NULL */
fcport = ( struct qedf_rport * ) & rp [ 1 ] ;
if ( ! test_bit ( QEDF_RPORT_SESSION_READY , & fcport - > flags ) ) {
/*
* Session is not offloaded yet . Let SCSI - ml retry
* the command .
*/
rc = SCSI_MLQUEUE_TARGET_BUSY ;
goto exit_qcmd ;
}
if ( fcport - > retry_delay_timestamp ) {
if ( time_after ( jiffies , fcport - > retry_delay_timestamp ) ) {
fcport - > retry_delay_timestamp = 0 ;
} else {
/* If retry_delay timer is active, flow off the ML */
rc = SCSI_MLQUEUE_TARGET_BUSY ;
goto exit_qcmd ;
}
}
io_req = qedf_alloc_cmd ( fcport , QEDF_SCSI_CMD ) ;
if ( ! io_req ) {
rc = SCSI_MLQUEUE_HOST_BUSY ;
goto exit_qcmd ;
}
io_req - > sc_cmd = sc_cmd ;
/* Take fcport->rport_lock for posting to fcport send queue */
spin_lock_irqsave ( & fcport - > rport_lock , flags ) ;
if ( qedf_post_io_req ( fcport , io_req ) ) {
QEDF_WARN ( & ( qedf - > dbg_ctx ) , " Unable to post io_req \n " ) ;
/* Return SQE to pool */
atomic_inc ( & fcport - > free_sqes ) ;
rc = SCSI_MLQUEUE_HOST_BUSY ;
}
spin_unlock_irqrestore ( & fcport - > rport_lock , flags ) ;
exit_qcmd :
return rc ;
}
static void qedf_parse_fcp_rsp ( struct qedf_ioreq * io_req ,
struct fcoe_cqe_rsp_info * fcp_rsp )
{
struct scsi_cmnd * sc_cmd = io_req - > sc_cmd ;
struct qedf_ctx * qedf = io_req - > fcport - > qedf ;
u8 rsp_flags = fcp_rsp - > rsp_flags . flags ;
int fcp_sns_len = 0 ;
int fcp_rsp_len = 0 ;
uint8_t * rsp_info , * sense_data ;
io_req - > fcp_status = FC_GOOD ;
io_req - > fcp_resid = 0 ;
if ( rsp_flags & ( FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER ) )
io_req - > fcp_resid = fcp_rsp - > fcp_resid ;
io_req - > scsi_comp_flags = rsp_flags ;
CMD_SCSI_STATUS ( sc_cmd ) = io_req - > cdb_status =
fcp_rsp - > scsi_status_code ;
if ( rsp_flags &
FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID )
fcp_rsp_len = fcp_rsp - > fcp_rsp_len ;
if ( rsp_flags &
FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID )
fcp_sns_len = fcp_rsp - > fcp_sns_len ;
io_req - > fcp_rsp_len = fcp_rsp_len ;
io_req - > fcp_sns_len = fcp_sns_len ;
rsp_info = sense_data = io_req - > sense_buffer ;
/* fetch fcp_rsp_code */
if ( ( fcp_rsp_len = = 4 ) | | ( fcp_rsp_len = = 8 ) ) {
/* Only for task management function */
io_req - > fcp_rsp_code = rsp_info [ 3 ] ;
QEDF_INFO ( & ( qedf - > dbg_ctx ) , QEDF_LOG_IO ,
" fcp_rsp_code = %d \n " , io_req - > fcp_rsp_code ) ;
/* Adjust sense-data location. */
sense_data + = fcp_rsp_len ;
}
if ( fcp_sns_len > SCSI_SENSE_BUFFERSIZE ) {
QEDF_INFO ( & ( qedf - > dbg_ctx ) , QEDF_LOG_IO ,
" Truncating sense buffer \n " ) ;
fcp_sns_len = SCSI_SENSE_BUFFERSIZE ;
}
memset ( sc_cmd - > sense_buffer , 0 , SCSI_SENSE_BUFFERSIZE ) ;
if ( fcp_sns_len )
memcpy ( sc_cmd - > sense_buffer , sense_data ,
fcp_sns_len ) ;
}
static void qedf_unmap_sg_list ( struct qedf_ctx * qedf , struct qedf_ioreq * io_req )
{
struct scsi_cmnd * sc = io_req - > sc_cmd ;
if ( io_req - > bd_tbl - > bd_valid & & sc & & scsi_sg_count ( sc ) ) {
dma_unmap_sg ( & qedf - > pdev - > dev , scsi_sglist ( sc ) ,
scsi_sg_count ( sc ) , sc - > sc_data_direction ) ;
io_req - > bd_tbl - > bd_valid = 0 ;
}
}
void qedf_scsi_completion ( struct qedf_ctx * qedf , struct fcoe_cqe * cqe ,
struct qedf_ioreq * io_req )
{
u16 xid , rval ;
struct fcoe_task_context * task_ctx ;
struct scsi_cmnd * sc_cmd ;
struct fcoe_cqe_rsp_info * fcp_rsp ;
struct qedf_rport * fcport ;
int refcount ;
u16 scope , qualifier = 0 ;
u8 fw_residual_flag = 0 ;
if ( ! io_req )
return ;
if ( ! cqe )
return ;
xid = io_req - > xid ;
task_ctx = qedf_get_task_mem ( & qedf - > tasks , xid ) ;
sc_cmd = io_req - > sc_cmd ;
fcp_rsp = & cqe - > cqe_info . rsp_info ;
if ( ! sc_cmd ) {
QEDF_WARN ( & ( qedf - > dbg_ctx ) , " sc_cmd is NULL! \n " ) ;
return ;
}
if ( ! sc_cmd - > SCp . ptr ) {
QEDF_WARN ( & ( qedf - > dbg_ctx ) , " SCp.ptr is NULL, returned in "
" another context. \n " ) ;
return ;
}
if ( ! sc_cmd - > request ) {
QEDF_WARN ( & ( qedf - > dbg_ctx ) , " sc_cmd->request is NULL, "
" sc_cmd=%p. \n " , sc_cmd ) ;
return ;
}
if ( ! sc_cmd - > request - > special ) {
QEDF_WARN ( & ( qedf - > dbg_ctx ) , " request->special is NULL so "
" request not valid, sc_cmd=%p. \n " , sc_cmd ) ;
return ;
}
if ( ! sc_cmd - > request - > q ) {
QEDF_WARN ( & ( qedf - > dbg_ctx ) , " request->q is NULL so request "
" is not valid, sc_cmd=%p. \n " , sc_cmd ) ;
return ;
}
fcport = io_req - > fcport ;
qedf_parse_fcp_rsp ( io_req , fcp_rsp ) ;
qedf_unmap_sg_list ( qedf , io_req ) ;
/* Check for FCP transport error */
if ( io_req - > fcp_rsp_len > 3 & & io_req - > fcp_rsp_code ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) ,
" FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
" fcp_rsp_code=%d. \n " , io_req - > xid , io_req - > fcp_rsp_len ,
io_req - > fcp_rsp_code ) ;
sc_cmd - > result = DID_BUS_BUSY < < 16 ;
goto out ;
}
fw_residual_flag = GET_FIELD ( cqe - > cqe_info . rsp_info . fw_error_flags ,
FCOE_CQE_RSP_INFO_FW_UNDERRUN ) ;
if ( fw_residual_flag ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) ,
" Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x "
" fcp_resid=%d fw_residual=0x%x. \n " , io_req - > xid ,
fcp_rsp - > rsp_flags . flags , io_req - > fcp_resid ,
cqe - > cqe_info . rsp_info . fw_residual ) ;
if ( io_req - > cdb_status = = 0 )
sc_cmd - > result = ( DID_ERROR < < 16 ) | io_req - > cdb_status ;
else
sc_cmd - > result = ( DID_OK < < 16 ) | io_req - > cdb_status ;
/* Abort the command since we did not get all the data */
init_completion ( & io_req - > abts_done ) ;
rval = qedf_initiate_abts ( io_req , true ) ;
if ( rval ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " Failed to queue ABTS. \n " ) ;
sc_cmd - > result = ( DID_ERROR < < 16 ) | io_req - > cdb_status ;
}
/*
* Set resid to the whole buffer length so we won ' t try to resue
* any previously data .
*/
scsi_set_resid ( sc_cmd , scsi_bufflen ( sc_cmd ) ) ;
goto out ;
}
switch ( io_req - > fcp_status ) {
case FC_GOOD :
if ( io_req - > cdb_status = = 0 ) {
/* Good I/O completion */
sc_cmd - > result = DID_OK < < 16 ;
} else {
2017-02-23 07:01:03 -08:00
refcount = kref_read ( & io_req - > refcount ) ;
2017-02-15 06:28:23 -08:00
QEDF_INFO ( & ( qedf - > dbg_ctx ) , QEDF_LOG_IO ,
2017-03-04 00:07:04 -08:00
" %d:0:%d:%lld xid=0x%0x op=0x%02x "
2017-02-15 06:28:23 -08:00
" lba=%02x%02x%02x%02x cdb_status=%d "
" fcp_resid=0x%x refcount=%d. \n " ,
qedf - > lport - > host - > host_no , sc_cmd - > device - > id ,
sc_cmd - > device - > lun , io_req - > xid ,
sc_cmd - > cmnd [ 0 ] , sc_cmd - > cmnd [ 2 ] , sc_cmd - > cmnd [ 3 ] ,
sc_cmd - > cmnd [ 4 ] , sc_cmd - > cmnd [ 5 ] ,
io_req - > cdb_status , io_req - > fcp_resid ,
refcount ) ;
sc_cmd - > result = ( DID_OK < < 16 ) | io_req - > cdb_status ;
if ( io_req - > cdb_status = = SAM_STAT_TASK_SET_FULL | |
io_req - > cdb_status = = SAM_STAT_BUSY ) {
/*
* Check whether we need to set retry_delay at
* all based on retry_delay module parameter
* and the status qualifier .
*/
/* Upper 2 bits */
scope = fcp_rsp - > retry_delay_timer & 0xC000 ;
/* Lower 14 bits */
qualifier = fcp_rsp - > retry_delay_timer & 0x3FFF ;
if ( qedf_retry_delay & &
scope > 0 & & qualifier > 0 & &
qualifier < = 0x3FEF ) {
/* Check we don't go over the max */
if ( qualifier > QEDF_RETRY_DELAY_MAX )
qualifier =
QEDF_RETRY_DELAY_MAX ;
fcport - > retry_delay_timestamp =
jiffies + ( qualifier * HZ / 10 ) ;
}
}
}
if ( io_req - > fcp_resid )
scsi_set_resid ( sc_cmd , io_req - > fcp_resid ) ;
break ;
default :
QEDF_INFO ( & ( qedf - > dbg_ctx ) , QEDF_LOG_IO , " fcp_status=%d. \n " ,
io_req - > fcp_status ) ;
break ;
}
out :
if ( qedf_io_tracing )
qedf_trace_io ( fcport , io_req , QEDF_IO_TRACE_RSP ) ;
io_req - > sc_cmd = NULL ;
sc_cmd - > SCp . ptr = NULL ;
sc_cmd - > scsi_done ( sc_cmd ) ;
kref_put ( & io_req - > refcount , qedf_release_cmd ) ;
}
/* Return a SCSI command in some other context besides a normal completion */
void qedf_scsi_done ( struct qedf_ctx * qedf , struct qedf_ioreq * io_req ,
int result )
{
u16 xid ;
struct scsi_cmnd * sc_cmd ;
int refcount ;
if ( ! io_req )
return ;
xid = io_req - > xid ;
sc_cmd = io_req - > sc_cmd ;
if ( ! sc_cmd ) {
QEDF_WARN ( & ( qedf - > dbg_ctx ) , " sc_cmd is NULL! \n " ) ;
return ;
}
if ( ! sc_cmd - > SCp . ptr ) {
QEDF_WARN ( & ( qedf - > dbg_ctx ) , " SCp.ptr is NULL, returned in "
" another context. \n " ) ;
return ;
}
qedf_unmap_sg_list ( qedf , io_req ) ;
sc_cmd - > result = result < < 16 ;
2017-02-23 07:01:03 -08:00
refcount = kref_read ( & io_req - > refcount ) ;
2017-03-04 00:07:04 -08:00
QEDF_INFO ( & ( qedf - > dbg_ctx ) , QEDF_LOG_IO , " %d:0:%d:%lld: Completing "
2017-02-15 06:28:23 -08:00
" sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
" allowed=%d retries=%d refcount=%d. \n " ,
qedf - > lport - > host - > host_no , sc_cmd - > device - > id ,
sc_cmd - > device - > lun , sc_cmd , sc_cmd - > result , sc_cmd - > cmnd [ 0 ] ,
sc_cmd - > cmnd [ 2 ] , sc_cmd - > cmnd [ 3 ] , sc_cmd - > cmnd [ 4 ] ,
sc_cmd - > cmnd [ 5 ] , sc_cmd - > allowed , sc_cmd - > retries ,
refcount ) ;
/*
* Set resid to the whole buffer length so we won ' t try to resue any
* previously read data
*/
scsi_set_resid ( sc_cmd , scsi_bufflen ( sc_cmd ) ) ;
if ( qedf_io_tracing )
qedf_trace_io ( io_req - > fcport , io_req , QEDF_IO_TRACE_RSP ) ;
io_req - > sc_cmd = NULL ;
sc_cmd - > SCp . ptr = NULL ;
sc_cmd - > scsi_done ( sc_cmd ) ;
kref_put ( & io_req - > refcount , qedf_release_cmd ) ;
}
/*
* Handle warning type CQE completions . This is mainly used for REC timer
* popping .
*/
void qedf_process_warning_compl ( struct qedf_ctx * qedf , struct fcoe_cqe * cqe ,
struct qedf_ioreq * io_req )
{
int rval , i ;
struct qedf_rport * fcport = io_req - > fcport ;
u64 err_warn_bit_map ;
u8 err_warn = 0xff ;
if ( ! cqe )
return ;
QEDF_ERR ( & ( io_req - > fcport - > qedf - > dbg_ctx ) , " Warning CQE, "
" xid=0x%x \n " , io_req - > xid ) ;
QEDF_ERR ( & ( io_req - > fcport - > qedf - > dbg_ctx ) ,
" err_warn_bitmap=%08x:%08x \n " ,
le32_to_cpu ( cqe - > cqe_info . err_info . err_warn_bitmap_hi ) ,
le32_to_cpu ( cqe - > cqe_info . err_info . err_warn_bitmap_lo ) ) ;
QEDF_ERR ( & ( io_req - > fcport - > qedf - > dbg_ctx ) , " tx_buff_off=%08x, "
" rx_buff_off=%08x, rx_id=%04x \n " ,
le32_to_cpu ( cqe - > cqe_info . err_info . tx_buf_off ) ,
le32_to_cpu ( cqe - > cqe_info . err_info . rx_buf_off ) ,
le32_to_cpu ( cqe - > cqe_info . err_info . rx_id ) ) ;
/* Normalize the error bitmap value to an just an unsigned int */
err_warn_bit_map = ( u64 )
( ( u64 ) cqe - > cqe_info . err_info . err_warn_bitmap_hi < < 32 ) |
( u64 ) cqe - > cqe_info . err_info . err_warn_bitmap_lo ;
for ( i = 0 ; i < 64 ; i + + ) {
if ( err_warn_bit_map & ( u64 ) ( ( u64 ) 1 < < i ) ) {
err_warn = i ;
break ;
}
}
/* Check if REC TOV expired if this is a tape device */
if ( fcport - > dev_type = = QEDF_RPORT_TYPE_TAPE ) {
if ( err_warn = =
FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " REC timer expired. \n " ) ;
if ( ! test_bit ( QEDF_CMD_SRR_SENT , & io_req - > flags ) ) {
io_req - > rx_buf_off =
cqe - > cqe_info . err_info . rx_buf_off ;
io_req - > tx_buf_off =
cqe - > cqe_info . err_info . tx_buf_off ;
io_req - > rx_id = cqe - > cqe_info . err_info . rx_id ;
rval = qedf_send_rec ( io_req ) ;
/*
* We only want to abort the io_req if we
* can ' t queue the REC command as we want to
* keep the exchange open for recovery .
*/
if ( rval )
goto send_abort ;
}
return ;
}
}
send_abort :
init_completion ( & io_req - > abts_done ) ;
rval = qedf_initiate_abts ( io_req , true ) ;
if ( rval )
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " Failed to queue ABTS. \n " ) ;
}
/* Cleanup a command when we receive an error detection completion */
void qedf_process_error_detect ( struct qedf_ctx * qedf , struct fcoe_cqe * cqe ,
struct qedf_ioreq * io_req )
{
int rval ;
if ( ! cqe )
return ;
QEDF_ERR ( & ( io_req - > fcport - > qedf - > dbg_ctx ) , " Error detection CQE, "
" xid=0x%x \n " , io_req - > xid ) ;
QEDF_ERR ( & ( io_req - > fcport - > qedf - > dbg_ctx ) ,
" err_warn_bitmap=%08x:%08x \n " ,
le32_to_cpu ( cqe - > cqe_info . err_info . err_warn_bitmap_hi ) ,
le32_to_cpu ( cqe - > cqe_info . err_info . err_warn_bitmap_lo ) ) ;
QEDF_ERR ( & ( io_req - > fcport - > qedf - > dbg_ctx ) , " tx_buff_off=%08x, "
" rx_buff_off=%08x, rx_id=%04x \n " ,
le32_to_cpu ( cqe - > cqe_info . err_info . tx_buf_off ) ,
le32_to_cpu ( cqe - > cqe_info . err_info . rx_buf_off ) ,
le32_to_cpu ( cqe - > cqe_info . err_info . rx_id ) ) ;
if ( qedf - > stop_io_on_error ) {
qedf_stop_all_io ( qedf ) ;
return ;
}
init_completion ( & io_req - > abts_done ) ;
rval = qedf_initiate_abts ( io_req , true ) ;
if ( rval )
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " Failed to queue ABTS. \n " ) ;
}
static void qedf_flush_els_req ( struct qedf_ctx * qedf ,
struct qedf_ioreq * els_req )
{
QEDF_INFO ( & ( qedf - > dbg_ctx ) , QEDF_LOG_IO ,
" Flushing ELS request xid=0x%x refcount=%d. \n " , els_req - > xid ,
2017-02-23 07:01:03 -08:00
kref_read ( & els_req - > refcount ) ) ;
2017-02-15 06:28:23 -08:00
/*
* Need to distinguish this from a timeout when calling the
* els_req - > cb_func .
*/
els_req - > event = QEDF_IOREQ_EV_ELS_FLUSH ;
/* Cancel the timer */
cancel_delayed_work_sync ( & els_req - > timeout_work ) ;
/* Call callback function to complete command */
if ( els_req - > cb_func & & els_req - > cb_arg ) {
els_req - > cb_func ( els_req - > cb_arg ) ;
els_req - > cb_arg = NULL ;
}
/* Release kref for original initiate_els */
kref_put ( & els_req - > refcount , qedf_release_cmd ) ;
}
/* A value of -1 for lun is a wild card that means flush all
* active SCSI I / Os for the target .
*/
void qedf_flush_active_ios ( struct qedf_rport * fcport , int lun )
{
struct qedf_ioreq * io_req ;
struct qedf_ctx * qedf ;
struct qedf_cmd_mgr * cmd_mgr ;
int i , rc ;
if ( ! fcport )
return ;
qedf = fcport - > qedf ;
cmd_mgr = qedf - > cmd_mgr ;
QEDF_INFO ( & ( qedf - > dbg_ctx ) , QEDF_LOG_IO , " Flush active i/o's. \n " ) ;
for ( i = 0 ; i < FCOE_PARAMS_NUM_TASKS ; i + + ) {
io_req = & cmd_mgr - > cmds [ i ] ;
if ( ! io_req )
continue ;
if ( io_req - > fcport ! = fcport )
continue ;
if ( io_req - > cmd_type = = QEDF_ELS ) {
rc = kref_get_unless_zero ( & io_req - > refcount ) ;
if ( ! rc ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) ,
" Could not get kref for io_req=0x%p. \n " ,
io_req ) ;
continue ;
}
qedf_flush_els_req ( qedf , io_req ) ;
/*
* Release the kref and go back to the top of the
* loop .
*/
goto free_cmd ;
}
if ( ! io_req - > sc_cmd )
continue ;
if ( lun > 0 ) {
if ( io_req - > sc_cmd - > device - > lun ! =
( u64 ) lun )
continue ;
}
/*
* Use kref_get_unless_zero in the unlikely case the command
* we ' re about to flush was completed in the normal SCSI path
*/
rc = kref_get_unless_zero ( & io_req - > refcount ) ;
if ( ! rc ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " Could not get kref for "
" io_req=0x%p \n " , io_req ) ;
continue ;
}
QEDF_INFO ( & ( qedf - > dbg_ctx ) , QEDF_LOG_IO ,
" Cleanup xid=0x%x. \n " , io_req - > xid ) ;
/* Cleanup task and return I/O mid-layer */
qedf_initiate_cleanup ( io_req , true ) ;
free_cmd :
kref_put ( & io_req - > refcount , qedf_release_cmd ) ;
}
}
/*
* Initiate a ABTS middle path command . Note that we don ' t have to initialize
* the task context for an ABTS task .
*/
int qedf_initiate_abts ( struct qedf_ioreq * io_req , bool return_scsi_cmd_on_abts )
{
struct fc_lport * lport ;
struct qedf_rport * fcport = io_req - > fcport ;
2017-05-31 06:33:52 -07:00
struct fc_rport_priv * rdata ;
struct qedf_ctx * qedf ;
2017-02-15 06:28:23 -08:00
u16 xid ;
u32 r_a_tov = 0 ;
int rc = 0 ;
unsigned long flags ;
2017-03-11 18:39:18 +02:00
struct fcoe_wqe * sqe ;
u16 sqe_idx ;
2017-02-15 06:28:23 -08:00
2017-05-31 06:33:52 -07:00
/* Sanity check qedf_rport before dereferencing any pointers */
2017-02-15 06:28:23 -08:00
if ( ! test_bit ( QEDF_RPORT_SESSION_READY , & fcport - > flags ) ) {
2017-05-31 06:33:52 -07:00
QEDF_ERR ( NULL , " tgt not offloaded \n " ) ;
2017-02-15 06:28:23 -08:00
rc = 1 ;
goto abts_err ;
}
2017-05-31 06:33:52 -07:00
rdata = fcport - > rdata ;
r_a_tov = rdata - > r_a_tov ;
qedf = fcport - > qedf ;
lport = qedf - > lport ;
2017-02-15 06:28:23 -08:00
if ( lport - > state ! = LPORT_ST_READY | | ! ( lport - > link_up ) ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " link is not ready \n " ) ;
rc = 1 ;
goto abts_err ;
}
if ( atomic_read ( & qedf - > link_down_tmo_valid ) > 0 ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " link_down_tmo active. \n " ) ;
rc = 1 ;
goto abts_err ;
}
/* Ensure room on SQ */
if ( ! atomic_read ( & fcport - > free_sqes ) ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " No SQ entries available \n " ) ;
rc = 1 ;
goto abts_err ;
}
kref_get ( & io_req - > refcount ) ;
xid = io_req - > xid ;
qedf - > control_requests + + ;
qedf - > packet_aborts + + ;
/* Set the return CPU to be the same as the request one */
io_req - > cpu = smp_processor_id ( ) ;
/* Set the command type to abort */
io_req - > cmd_type = QEDF_ABTS ;
io_req - > return_scsi_cmd_on_abts = return_scsi_cmd_on_abts ;
set_bit ( QEDF_CMD_IN_ABORT , & io_req - > flags ) ;
QEDF_INFO ( & ( qedf - > dbg_ctx ) , QEDF_LOG_SCSI_TM , " ABTS io_req xid = "
" 0x%x \n " , xid ) ;
qedf_cmd_timer_set ( qedf , io_req , QEDF_ABORT_TIMEOUT * HZ ) ;
spin_lock_irqsave ( & fcport - > rport_lock , flags ) ;
2017-03-11 18:39:18 +02:00
sqe_idx = qedf_get_sqe_idx ( fcport ) ;
sqe = & fcport - > sq [ sqe_idx ] ;
memset ( sqe , 0 , sizeof ( struct fcoe_wqe ) ) ;
io_req - > task_params - > sqe = sqe ;
2017-02-15 06:28:23 -08:00
2017-03-11 18:39:18 +02:00
init_initiator_abort_fcoe_task ( io_req - > task_params ) ;
2017-02-15 06:28:23 -08:00
qedf_ring_doorbell ( fcport ) ;
spin_unlock_irqrestore ( & fcport - > rport_lock , flags ) ;
return rc ;
abts_err :
/*
* If the ABTS task fails to queue then we need to cleanup the
* task at the firmware .
*/
qedf_initiate_cleanup ( io_req , return_scsi_cmd_on_abts ) ;
return rc ;
}
void qedf_process_abts_compl ( struct qedf_ctx * qedf , struct fcoe_cqe * cqe ,
struct qedf_ioreq * io_req )
{
uint32_t r_ctl ;
uint16_t xid ;
QEDF_INFO ( & ( qedf - > dbg_ctx ) , QEDF_LOG_SCSI_TM , " Entered with xid = "
" 0x%x cmd_type = %d \n " , io_req - > xid , io_req - > cmd_type ) ;
cancel_delayed_work ( & io_req - > timeout_work ) ;
xid = io_req - > xid ;
r_ctl = cqe - > cqe_info . abts_info . r_ctl ;
switch ( r_ctl ) {
case FC_RCTL_BA_ACC :
QEDF_INFO ( & ( qedf - > dbg_ctx ) , QEDF_LOG_SCSI_TM ,
" ABTS response - ACC Send RRQ after R_A_TOV \n " ) ;
io_req - > event = QEDF_IOREQ_EV_ABORT_SUCCESS ;
/*
* Dont release this cmd yet . It will be relesed
* after we get RRQ response
*/
kref_get ( & io_req - > refcount ) ;
queue_delayed_work ( qedf - > dpc_wq , & io_req - > rrq_work ,
msecs_to_jiffies ( qedf - > lport - > r_a_tov ) ) ;
break ;
/* For error cases let the cleanup return the command */
case FC_RCTL_BA_RJT :
QEDF_INFO ( & ( qedf - > dbg_ctx ) , QEDF_LOG_SCSI_TM ,
" ABTS response - RJT \n " ) ;
io_req - > event = QEDF_IOREQ_EV_ABORT_FAILED ;
break ;
default :
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " Unknown ABTS response \n " ) ;
break ;
}
clear_bit ( QEDF_CMD_IN_ABORT , & io_req - > flags ) ;
if ( io_req - > sc_cmd ) {
if ( io_req - > return_scsi_cmd_on_abts )
qedf_scsi_done ( qedf , io_req , DID_ERROR ) ;
}
/* Notify eh_abort handler that ABTS is complete */
complete ( & io_req - > abts_done ) ;
kref_put ( & io_req - > refcount , qedf_release_cmd ) ;
}
int qedf_init_mp_req ( struct qedf_ioreq * io_req )
{
struct qedf_mp_req * mp_req ;
2017-03-11 18:39:18 +02:00
struct scsi_sge * mp_req_bd ;
struct scsi_sge * mp_resp_bd ;
2017-02-15 06:28:23 -08:00
struct qedf_ctx * qedf = io_req - > fcport - > qedf ;
dma_addr_t addr ;
uint64_t sz ;
QEDF_INFO ( & ( qedf - > dbg_ctx ) , QEDF_LOG_MP_REQ , " Entered. \n " ) ;
mp_req = ( struct qedf_mp_req * ) & ( io_req - > mp_req ) ;
memset ( mp_req , 0 , sizeof ( struct qedf_mp_req ) ) ;
if ( io_req - > cmd_type ! = QEDF_ELS ) {
mp_req - > req_len = sizeof ( struct fcp_cmnd ) ;
io_req - > data_xfer_len = mp_req - > req_len ;
} else
mp_req - > req_len = io_req - > data_xfer_len ;
mp_req - > req_buf = dma_alloc_coherent ( & qedf - > pdev - > dev , QEDF_PAGE_SIZE ,
& mp_req - > req_buf_dma , GFP_KERNEL ) ;
if ( ! mp_req - > req_buf ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " Unable to alloc MP req buffer \n " ) ;
qedf_free_mp_resc ( io_req ) ;
return - ENOMEM ;
}
mp_req - > resp_buf = dma_alloc_coherent ( & qedf - > pdev - > dev ,
QEDF_PAGE_SIZE , & mp_req - > resp_buf_dma , GFP_KERNEL ) ;
if ( ! mp_req - > resp_buf ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " Unable to alloc TM resp "
" buffer \n " ) ;
qedf_free_mp_resc ( io_req ) ;
return - ENOMEM ;
}
/* Allocate and map mp_req_bd and mp_resp_bd */
2017-03-11 18:39:18 +02:00
sz = sizeof ( struct scsi_sge ) ;
2017-02-15 06:28:23 -08:00
mp_req - > mp_req_bd = dma_alloc_coherent ( & qedf - > pdev - > dev , sz ,
& mp_req - > mp_req_bd_dma , GFP_KERNEL ) ;
if ( ! mp_req - > mp_req_bd ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " Unable to alloc MP req bd \n " ) ;
qedf_free_mp_resc ( io_req ) ;
return - ENOMEM ;
}
mp_req - > mp_resp_bd = dma_alloc_coherent ( & qedf - > pdev - > dev , sz ,
& mp_req - > mp_resp_bd_dma , GFP_KERNEL ) ;
if ( ! mp_req - > mp_resp_bd ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " Unable to alloc MP resp bd \n " ) ;
qedf_free_mp_resc ( io_req ) ;
return - ENOMEM ;
}
/* Fill bd table */
addr = mp_req - > req_buf_dma ;
mp_req_bd = mp_req - > mp_req_bd ;
mp_req_bd - > sge_addr . lo = U64_LO ( addr ) ;
mp_req_bd - > sge_addr . hi = U64_HI ( addr ) ;
2017-03-11 18:39:18 +02:00
mp_req_bd - > sge_len = QEDF_PAGE_SIZE ;
2017-02-15 06:28:23 -08:00
/*
* MP buffer is either a task mgmt command or an ELS .
* So the assumption is that it consumes a single bd
* entry in the bd table
*/
mp_resp_bd = mp_req - > mp_resp_bd ;
addr = mp_req - > resp_buf_dma ;
mp_resp_bd - > sge_addr . lo = U64_LO ( addr ) ;
mp_resp_bd - > sge_addr . hi = U64_HI ( addr ) ;
2017-03-11 18:39:18 +02:00
mp_resp_bd - > sge_len = QEDF_PAGE_SIZE ;
2017-02-15 06:28:23 -08:00
return 0 ;
}
/*
* Last ditch effort to clear the port if it ' s stuck . Used only after a
* cleanup task times out .
*/
static void qedf_drain_request ( struct qedf_ctx * qedf )
{
if ( test_bit ( QEDF_DRAIN_ACTIVE , & qedf - > flags ) ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " MCP drain already active. \n " ) ;
return ;
}
/* Set bit to return all queuecommand requests as busy */
set_bit ( QEDF_DRAIN_ACTIVE , & qedf - > flags ) ;
/* Call qed drain request for function. Should be synchronous */
qed_ops - > common - > drain ( qedf - > cdev ) ;
/* Settle time for CQEs to be returned */
msleep ( 100 ) ;
/* Unplug and continue */
clear_bit ( QEDF_DRAIN_ACTIVE , & qedf - > flags ) ;
}
/*
* Returns SUCCESS if the cleanup task does not timeout , otherwise return
* FAILURE .
*/
int qedf_initiate_cleanup ( struct qedf_ioreq * io_req ,
bool return_scsi_cmd_on_abts )
{
struct qedf_rport * fcport ;
struct qedf_ctx * qedf ;
uint16_t xid ;
struct fcoe_task_context * task ;
int tmo = 0 ;
int rc = SUCCESS ;
unsigned long flags ;
2017-03-11 18:39:18 +02:00
struct fcoe_wqe * sqe ;
u16 sqe_idx ;
2017-02-15 06:28:23 -08:00
fcport = io_req - > fcport ;
if ( ! fcport ) {
QEDF_ERR ( NULL , " fcport is NULL. \n " ) ;
return SUCCESS ;
}
2017-05-31 06:33:52 -07:00
/* Sanity check qedf_rport before dereferencing any pointers */
if ( ! test_bit ( QEDF_RPORT_SESSION_READY , & fcport - > flags ) ) {
QEDF_ERR ( NULL , " tgt not offloaded \n " ) ;
rc = 1 ;
return SUCCESS ;
}
2017-02-15 06:28:23 -08:00
qedf = fcport - > qedf ;
if ( ! qedf ) {
QEDF_ERR ( NULL , " qedf is NULL. \n " ) ;
return SUCCESS ;
}
if ( ! test_bit ( QEDF_CMD_OUTSTANDING , & io_req - > flags ) | |
test_bit ( QEDF_CMD_IN_CLEANUP , & io_req - > flags ) ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " io_req xid=0x%x already in "
" cleanup processing or already completed. \n " ,
io_req - > xid ) ;
return SUCCESS ;
}
/* Ensure room on SQ */
if ( ! atomic_read ( & fcport - > free_sqes ) ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " No SQ entries available \n " ) ;
return FAILED ;
}
QEDF_INFO ( & ( qedf - > dbg_ctx ) , QEDF_LOG_IO , " Entered xid=0x%x \n " ,
io_req - > xid ) ;
/* Cleanup cmds re-use the same TID as the original I/O */
xid = io_req - > xid ;
io_req - > cmd_type = QEDF_CLEANUP ;
io_req - > return_scsi_cmd_on_abts = return_scsi_cmd_on_abts ;
/* Set the return CPU to be the same as the request one */
io_req - > cpu = smp_processor_id ( ) ;
set_bit ( QEDF_CMD_IN_CLEANUP , & io_req - > flags ) ;
task = qedf_get_task_mem ( & qedf - > tasks , xid ) ;
init_completion ( & io_req - > tm_done ) ;
spin_lock_irqsave ( & fcport - > rport_lock , flags ) ;
2017-03-11 18:39:18 +02:00
sqe_idx = qedf_get_sqe_idx ( fcport ) ;
sqe = & fcport - > sq [ sqe_idx ] ;
memset ( sqe , 0 , sizeof ( struct fcoe_wqe ) ) ;
io_req - > task_params - > sqe = sqe ;
init_initiator_cleanup_fcoe_task ( io_req - > task_params ) ;
2017-02-15 06:28:23 -08:00
qedf_ring_doorbell ( fcport ) ;
2017-03-11 18:39:18 +02:00
2017-02-15 06:28:23 -08:00
spin_unlock_irqrestore ( & fcport - > rport_lock , flags ) ;
tmo = wait_for_completion_timeout ( & io_req - > tm_done ,
QEDF_CLEANUP_TIMEOUT * HZ ) ;
if ( ! tmo ) {
rc = FAILED ;
/* Timeout case */
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " Cleanup command timeout, "
" xid=%x. \n " , io_req - > xid ) ;
clear_bit ( QEDF_CMD_IN_CLEANUP , & io_req - > flags ) ;
/* Issue a drain request if cleanup task times out */
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " Issuing MCP drain request. \n " ) ;
qedf_drain_request ( qedf ) ;
}
if ( io_req - > sc_cmd ) {
if ( io_req - > return_scsi_cmd_on_abts )
qedf_scsi_done ( qedf , io_req , DID_ERROR ) ;
}
if ( rc = = SUCCESS )
io_req - > event = QEDF_IOREQ_EV_CLEANUP_SUCCESS ;
else
io_req - > event = QEDF_IOREQ_EV_CLEANUP_FAILED ;
return rc ;
}
void qedf_process_cleanup_compl ( struct qedf_ctx * qedf , struct fcoe_cqe * cqe ,
struct qedf_ioreq * io_req )
{
QEDF_INFO ( & ( qedf - > dbg_ctx ) , QEDF_LOG_IO , " Entered xid = 0x%x \n " ,
io_req - > xid ) ;
clear_bit ( QEDF_CMD_IN_CLEANUP , & io_req - > flags ) ;
/* Complete so we can finish cleaning up the I/O */
complete ( & io_req - > tm_done ) ;
}
static int qedf_execute_tmf ( struct qedf_rport * fcport , struct scsi_cmnd * sc_cmd ,
uint8_t tm_flags )
{
struct qedf_ioreq * io_req ;
struct fcoe_task_context * task ;
struct qedf_ctx * qedf = fcport - > qedf ;
2017-03-11 18:39:18 +02:00
struct fc_lport * lport = qedf - > lport ;
2017-02-15 06:28:23 -08:00
int rc = 0 ;
uint16_t xid ;
int tmo = 0 ;
unsigned long flags ;
2017-03-11 18:39:18 +02:00
struct fcoe_wqe * sqe ;
u16 sqe_idx ;
2017-02-15 06:28:23 -08:00
if ( ! sc_cmd ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " invalid arg \n " ) ;
return FAILED ;
}
if ( ! ( test_bit ( QEDF_RPORT_SESSION_READY , & fcport - > flags ) ) ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " fcport not offloaded \n " ) ;
rc = FAILED ;
return FAILED ;
}
QEDF_INFO ( & ( qedf - > dbg_ctx ) , QEDF_LOG_SCSI_TM , " portid = 0x%x "
" tm_flags = %d \n " , fcport - > rdata - > ids . port_id , tm_flags ) ;
io_req = qedf_alloc_cmd ( fcport , QEDF_TASK_MGMT_CMD ) ;
if ( ! io_req ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " Failed TMF " ) ;
rc = - EAGAIN ;
goto reset_tmf_err ;
}
/* Initialize rest of io_req fields */
io_req - > sc_cmd = sc_cmd ;
io_req - > fcport = fcport ;
io_req - > cmd_type = QEDF_TASK_MGMT_CMD ;
/* Set the return CPU to be the same as the request one */
io_req - > cpu = smp_processor_id ( ) ;
/* Set TM flags */
2017-03-11 18:39:18 +02:00
io_req - > io_req_flags = QEDF_READ ;
io_req - > data_xfer_len = 0 ;
io_req - > tm_flags = tm_flags ;
2017-02-15 06:28:23 -08:00
/* Default is to return a SCSI command when an error occurs */
io_req - > return_scsi_cmd_on_abts = true ;
/* Obtain exchange id */
xid = io_req - > xid ;
QEDF_INFO ( & ( qedf - > dbg_ctx ) , QEDF_LOG_SCSI_TM , " TMF io_req xid = "
" 0x%x \n " , xid ) ;
/* Initialize task context for this IO request */
task = qedf_get_task_mem ( & qedf - > tasks , xid ) ;
init_completion ( & io_req - > tm_done ) ;
spin_lock_irqsave ( & fcport - > rport_lock , flags ) ;
2017-03-11 18:39:18 +02:00
sqe_idx = qedf_get_sqe_idx ( fcport ) ;
sqe = & fcport - > sq [ sqe_idx ] ;
memset ( sqe , 0 , sizeof ( struct fcoe_wqe ) ) ;
qedf_init_task ( fcport , lport , io_req , task , sqe ) ;
2017-02-15 06:28:23 -08:00
qedf_ring_doorbell ( fcport ) ;
2017-03-11 18:39:18 +02:00
2017-02-15 06:28:23 -08:00
spin_unlock_irqrestore ( & fcport - > rport_lock , flags ) ;
tmo = wait_for_completion_timeout ( & io_req - > tm_done ,
QEDF_TM_TIMEOUT * HZ ) ;
if ( ! tmo ) {
rc = FAILED ;
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " wait for tm_cmpl timeout! \n " ) ;
} else {
/* Check TMF response code */
if ( io_req - > fcp_rsp_code = = 0 )
rc = SUCCESS ;
else
rc = FAILED ;
}
if ( tm_flags = = FCP_TMF_LUN_RESET )
qedf_flush_active_ios ( fcport , ( int ) sc_cmd - > device - > lun ) ;
else
qedf_flush_active_ios ( fcport , - 1 ) ;
kref_put ( & io_req - > refcount , qedf_release_cmd ) ;
if ( rc ! = SUCCESS ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " task mgmt command failed... \n " ) ;
rc = FAILED ;
} else {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " task mgmt command success... \n " ) ;
rc = SUCCESS ;
}
reset_tmf_err :
return rc ;
}
int qedf_initiate_tmf ( struct scsi_cmnd * sc_cmd , u8 tm_flags )
{
struct fc_rport * rport = starget_to_rport ( scsi_target ( sc_cmd - > device ) ) ;
struct fc_rport_libfc_priv * rp = rport - > dd_data ;
struct qedf_rport * fcport = ( struct qedf_rport * ) & rp [ 1 ] ;
struct qedf_ctx * qedf ;
struct fc_lport * lport ;
int rc = SUCCESS ;
int rval ;
rval = fc_remote_port_chkready ( rport ) ;
if ( rval ) {
QEDF_ERR ( NULL , " device_reset rport not ready \n " ) ;
rc = FAILED ;
goto tmf_err ;
}
if ( fcport = = NULL ) {
QEDF_ERR ( NULL , " device_reset: rport is NULL \n " ) ;
rc = FAILED ;
goto tmf_err ;
}
qedf = fcport - > qedf ;
lport = qedf - > lport ;
if ( test_bit ( QEDF_UNLOADING , & qedf - > flags ) | |
test_bit ( QEDF_DBG_STOP_IO , & qedf - > flags ) ) {
rc = SUCCESS ;
goto tmf_err ;
}
if ( lport - > state ! = LPORT_ST_READY | | ! ( lport - > link_up ) ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " link is not ready \n " ) ;
rc = FAILED ;
goto tmf_err ;
}
rc = qedf_execute_tmf ( fcport , sc_cmd , tm_flags ) ;
tmf_err :
return rc ;
}
void qedf_process_tmf_compl ( struct qedf_ctx * qedf , struct fcoe_cqe * cqe ,
struct qedf_ioreq * io_req )
{
struct fcoe_cqe_rsp_info * fcp_rsp ;
fcp_rsp = & cqe - > cqe_info . rsp_info ;
qedf_parse_fcp_rsp ( io_req , fcp_rsp ) ;
io_req - > sc_cmd = NULL ;
complete ( & io_req - > tm_done ) ;
}
void qedf_process_unsol_compl ( struct qedf_ctx * qedf , uint16_t que_idx ,
struct fcoe_cqe * cqe )
{
unsigned long flags ;
uint16_t tmp ;
uint16_t pktlen = cqe - > cqe_info . unsolic_info . pkt_len ;
u32 payload_len , crc ;
struct fc_frame_header * fh ;
struct fc_frame * fp ;
struct qedf_io_work * io_work ;
u32 bdq_idx ;
void * bdq_addr ;
QEDF_INFO ( & ( qedf - > dbg_ctx ) , QEDF_LOG_UNSOL ,
" address.hi=%x address.lo=%x opaque_data.hi=%x "
" opaque_data.lo=%x bdq_prod_idx=%u len=%u. \n " ,
le32_to_cpu ( cqe - > cqe_info . unsolic_info . bd_info . address . hi ) ,
le32_to_cpu ( cqe - > cqe_info . unsolic_info . bd_info . address . lo ) ,
le32_to_cpu ( cqe - > cqe_info . unsolic_info . bd_info . opaque . hi ) ,
le32_to_cpu ( cqe - > cqe_info . unsolic_info . bd_info . opaque . lo ) ,
qedf - > bdq_prod_idx , pktlen ) ;
bdq_idx = le32_to_cpu ( cqe - > cqe_info . unsolic_info . bd_info . opaque . lo ) ;
if ( bdq_idx > = QEDF_BDQ_SIZE ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " bdq_idx is out of range %d. \n " ,
bdq_idx ) ;
goto increment_prod ;
}
bdq_addr = qedf - > bdq [ bdq_idx ] . buf_addr ;
if ( ! bdq_addr ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " bdq_addr is NULL, dropping "
" unsolicited packet. \n " ) ;
goto increment_prod ;
}
if ( qedf_dump_frames ) {
QEDF_INFO ( & ( qedf - > dbg_ctx ) , QEDF_LOG_UNSOL ,
" BDQ frame is at addr=%p. \n " , bdq_addr ) ;
print_hex_dump ( KERN_WARNING , " bdq " , DUMP_PREFIX_OFFSET , 16 , 1 ,
( void * ) bdq_addr , pktlen , false ) ;
}
/* Allocate frame */
payload_len = pktlen - sizeof ( struct fc_frame_header ) ;
fp = fc_frame_alloc ( qedf - > lport , payload_len ) ;
if ( ! fp ) {
QEDF_ERR ( & ( qedf - > dbg_ctx ) , " Could not allocate fp. \n " ) ;
goto increment_prod ;
}
/* Copy data from BDQ buffer into fc_frame struct */
fh = ( struct fc_frame_header * ) fc_frame_header_get ( fp ) ;
memcpy ( fh , ( void * ) bdq_addr , pktlen ) ;
/* Initialize the frame so libfc sees it as a valid frame */
crc = fcoe_fc_crc ( fp ) ;
fc_frame_init ( fp ) ;
fr_dev ( fp ) = qedf - > lport ;
fr_sof ( fp ) = FC_SOF_I3 ;
fr_eof ( fp ) = FC_EOF_T ;
fr_crc ( fp ) = cpu_to_le32 ( ~ crc ) ;
/*
* We need to return the frame back up to libfc in a non - atomic
* context
*/
io_work = mempool_alloc ( qedf - > io_mempool , GFP_ATOMIC ) ;
if ( ! io_work ) {
QEDF_WARN ( & ( qedf - > dbg_ctx ) , " Could not allocate "
" work for I/O completion. \n " ) ;
fc_frame_free ( fp ) ;
goto increment_prod ;
}
memset ( io_work , 0 , sizeof ( struct qedf_io_work ) ) ;
INIT_WORK ( & io_work - > work , qedf_fp_io_handler ) ;
/* Copy contents of CQE for deferred processing */
memcpy ( & io_work - > cqe , cqe , sizeof ( struct fcoe_cqe ) ) ;
io_work - > qedf = qedf ;
io_work - > fp = fp ;
queue_work_on ( smp_processor_id ( ) , qedf_io_wq , & io_work - > work ) ;
increment_prod :
spin_lock_irqsave ( & qedf - > hba_lock , flags ) ;
/* Increment producer to let f/w know we've handled the frame */
qedf - > bdq_prod_idx + + ;
/* Producer index wraps at uint16_t boundary */
if ( qedf - > bdq_prod_idx = = 0xffff )
qedf - > bdq_prod_idx = 0 ;
writew ( qedf - > bdq_prod_idx , qedf - > bdq_primary_prod ) ;
tmp = readw ( qedf - > bdq_primary_prod ) ;
writew ( qedf - > bdq_prod_idx , qedf - > bdq_secondary_prod ) ;
tmp = readw ( qedf - > bdq_secondary_prod ) ;
spin_unlock_irqrestore ( & qedf - > hba_lock , flags ) ;
}