2017-06-21 13:48:43 -07:00
/*
* QLogic Fibre Channel HBA Driver
* Copyright ( c ) 2003 - 2017 QLogic Corporation
*
* See LICENSE . qla2xxx for copyright and licensing details .
*/
# include "qla_nvme.h"
# include <linux/scatterlist.h>
# include <linux/delay.h>
# include <linux/nvme.h>
# include <linux/nvme-fc.h>
static struct nvme_fc_port_template qla_nvme_fc_transport ;
static void qla_nvme_unregister_remote_port ( struct work_struct * ) ;
2017-07-21 09:32:24 -07:00
int qla_nvme_register_remote ( struct scsi_qla_host * vha , struct fc_port * fcport )
2017-06-21 13:48:43 -07:00
{
2018-03-20 23:09:32 -07:00
struct qla_nvme_rport * rport ;
struct nvme_fc_port_info req ;
2017-06-21 13:48:43 -07:00
int ret ;
2017-06-30 18:10:40 +02:00
if ( ! IS_ENABLED ( CONFIG_NVME_FC ) )
return 0 ;
2017-06-21 13:48:43 -07:00
if ( ! vha - > flags . nvme_enabled ) {
ql_log ( ql_log_info , vha , 0x2100 ,
" %s: Not registering target since Host NVME is not enabled \n " ,
__func__ ) ;
return 0 ;
}
2018-08-02 13:16:57 -07:00
if ( ! vha - > nvme_local_port & & qla_nvme_register_hba ( vha ) )
return 0 ;
2017-06-21 13:48:43 -07:00
if ( ! ( fcport - > nvme_prli_service_param &
2018-03-20 23:09:32 -07:00
( NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY ) ) | |
( fcport - > nvme_flag & NVME_FLAG_REGISTERED ) )
2017-06-21 13:48:43 -07:00
return 0 ;
INIT_WORK ( & fcport - > nvme_del_work , qla_nvme_unregister_remote_port ) ;
2018-03-20 23:09:35 -07:00
fcport - > nvme_flag & = ~ NVME_FLAG_RESETTING ;
2017-06-21 13:48:43 -07:00
2018-03-20 23:09:32 -07:00
memset ( & req , 0 , sizeof ( struct nvme_fc_port_info ) ) ;
req . port_name = wwn_to_u64 ( fcport - > port_name ) ;
req . node_name = wwn_to_u64 ( fcport - > node_name ) ;
req . port_role = 0 ;
req . dev_loss_tmo = NVME_FC_DEV_LOSS_TMO ;
2017-06-21 13:48:43 -07:00
if ( fcport - > nvme_prli_service_param & NVME_PRLI_SP_INITIATOR )
2018-03-20 23:09:32 -07:00
req . port_role = FC_PORT_ROLE_NVME_INITIATOR ;
2017-06-21 13:48:43 -07:00
if ( fcport - > nvme_prli_service_param & NVME_PRLI_SP_TARGET )
2018-03-20 23:09:32 -07:00
req . port_role | = FC_PORT_ROLE_NVME_TARGET ;
2017-06-21 13:48:43 -07:00
if ( fcport - > nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY )
2018-03-20 23:09:32 -07:00
req . port_role | = FC_PORT_ROLE_NVME_DISCOVERY ;
2017-06-21 13:48:43 -07:00
2018-03-20 23:09:32 -07:00
req . port_id = fcport - > d_id . b24 ;
2017-06-21 13:48:43 -07:00
ql_log ( ql_log_info , vha , 0x2102 ,
2017-08-23 15:04:59 -07:00
" %s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x \n " ,
2018-03-20 23:09:32 -07:00
__func__ , req . node_name , req . port_name ,
req . port_id ) ;
2017-06-21 13:48:43 -07:00
2018-03-20 23:09:32 -07:00
ret = nvme_fc_register_remoteport ( vha - > nvme_local_port , & req ,
2017-06-21 13:48:43 -07:00
& fcport - > nvme_remote_port ) ;
if ( ret ) {
ql_log ( ql_log_warn , vha , 0x212e ,
" Failed to register remote port. Transport returned %d \n " ,
ret ) ;
return ret ;
}
2018-03-20 23:09:32 -07:00
rport = fcport - > nvme_remote_port - > private ;
2017-06-21 13:48:43 -07:00
rport - > fcport = fcport ;
list_add_tail ( & rport - > list , & vha - > nvme_rport_list ) ;
2018-03-20 23:09:32 -07:00
fcport - > nvme_flag | = NVME_FLAG_REGISTERED ;
2017-06-21 13:48:43 -07:00
return 0 ;
}
/* Allocate a queue for NVMe traffic */
2017-07-21 09:32:23 -07:00
static int qla_nvme_alloc_queue ( struct nvme_fc_local_port * lport ,
unsigned int qidx , u16 qsize , void * * handle )
2017-06-21 13:48:43 -07:00
{
struct scsi_qla_host * vha ;
struct qla_hw_data * ha ;
struct qla_qpair * qpair ;
if ( ! qidx )
qidx + + ;
vha = ( struct scsi_qla_host * ) lport - > private ;
ha = vha - > hw ;
ql_log ( ql_log_info , vha , 0x2104 ,
" %s: handle %p, idx =%d, qsize %d \n " ,
__func__ , handle , qidx , qsize ) ;
if ( qidx > qla_nvme_fc_transport . max_hw_queues ) {
ql_log ( ql_log_warn , vha , 0x212f ,
" %s: Illegal qidx=%d. Max=%d \n " ,
__func__ , qidx , qla_nvme_fc_transport . max_hw_queues ) ;
return - EINVAL ;
}
if ( ha - > queue_pair_map [ qidx ] ) {
* handle = ha - > queue_pair_map [ qidx ] ;
ql_log ( ql_log_info , vha , 0x2121 ,
" Returning existing qpair of %p for idx=%x \n " ,
* handle , qidx ) ;
return 0 ;
}
qpair = qla2xxx_create_qpair ( vha , 5 , vha - > vp_idx , true ) ;
if ( qpair = = NULL ) {
ql_log ( ql_log_warn , vha , 0x2122 ,
" Failed to allocate qpair \n " ) ;
return - EINVAL ;
}
* handle = qpair ;
return 0 ;
}
static void qla_nvme_sp_ls_done ( void * ptr , int res )
{
srb_t * sp = ptr ;
struct srb_iocb * nvme ;
struct nvmefc_ls_req * fd ;
struct nvme_private * priv ;
if ( atomic_read ( & sp - > ref_count ) = = 0 ) {
ql_log ( ql_log_warn , sp - > fcport - > vha , 0x2123 ,
" SP reference-count to ZERO on LS_done -- sp=%p. \n " , sp ) ;
return ;
}
if ( ! atomic_dec_and_test ( & sp - > ref_count ) )
return ;
if ( res )
res = - EINVAL ;
nvme = & sp - > u . iocb_cmd ;
fd = nvme - > u . nvme . desc ;
priv = fd - > private ;
priv - > comp_status = res ;
schedule_work ( & priv - > ls_work ) ;
/* work schedule doesn't need the sp */
qla2x00_rel_sp ( sp ) ;
}
static void qla_nvme_sp_done ( void * ptr , int res )
{
srb_t * sp = ptr ;
struct srb_iocb * nvme ;
struct nvmefc_fcp_req * fd ;
nvme = & sp - > u . iocb_cmd ;
fd = nvme - > u . nvme . desc ;
if ( ! atomic_dec_and_test ( & sp - > ref_count ) )
return ;
2018-03-20 23:09:32 -07:00
if ( res = = QLA_SUCCESS )
2017-06-21 13:48:43 -07:00
fd - > status = 0 ;
2018-03-20 23:09:32 -07:00
else
fd - > status = NVME_SC_INTERNAL ;
2017-06-21 13:48:43 -07:00
fd - > rcv_rsplen = nvme - > u . nvme . rsp_pyld_len ;
2018-03-20 23:09:36 -07:00
fd - > done ( fd ) ;
qla2xxx_rel_qpair_sp ( sp - > qpair , sp ) ;
2018-03-20 23:09:32 -07:00
2017-08-23 15:04:58 -07:00
return ;
2017-06-21 13:48:43 -07:00
}
2018-03-20 23:09:33 -07:00
static void qla_nvme_abort_work ( struct work_struct * work )
2017-06-21 13:48:43 -07:00
{
2018-03-20 23:09:33 -07:00
struct nvme_private * priv =
container_of ( work , struct nvme_private , abort_work ) ;
2017-06-21 13:48:43 -07:00
srb_t * sp = priv - > sp ;
2018-03-20 23:09:33 -07:00
fc_port_t * fcport = sp - > fcport ;
2017-06-21 13:48:43 -07:00
struct qla_hw_data * ha = fcport - > vha - > hw ;
2018-03-20 23:09:33 -07:00
int rval ;
2017-06-21 13:48:43 -07:00
rval = ha - > isp_ops - > abort_command ( sp ) ;
ql_dbg ( ql_dbg_io , fcport - > vha , 0x212b ,
2018-03-20 23:09:35 -07:00
" %s: %s command for sp=%p, handle=%x on fcport=%p rval=%x \n " ,
__func__ , ( rval ! = QLA_SUCCESS ) ? " Failed to abort " : " Aborted " ,
sp , sp - > handle , fcport , rval ) ;
2017-06-21 13:48:43 -07:00
}
2018-03-20 23:09:33 -07:00
static void qla_nvme_ls_abort ( struct nvme_fc_local_port * lport ,
struct nvme_fc_remote_port * rport , struct nvmefc_ls_req * fd )
{
struct nvme_private * priv = fd - > private ;
INIT_WORK ( & priv - > abort_work , qla_nvme_abort_work ) ;
schedule_work ( & priv - > abort_work ) ;
}
2017-06-21 13:48:43 -07:00
static void qla_nvme_ls_complete ( struct work_struct * work )
{
struct nvme_private * priv =
container_of ( work , struct nvme_private , ls_work ) ;
struct nvmefc_ls_req * fd = priv - > fd ;
fd - > done ( fd , priv - > comp_status ) ;
}
static int qla_nvme_ls_req ( struct nvme_fc_local_port * lport ,
struct nvme_fc_remote_port * rport , struct nvmefc_ls_req * fd )
{
2018-03-20 23:09:32 -07:00
struct qla_nvme_rport * qla_rport = rport - > private ;
fc_port_t * fcport = qla_rport - > fcport ;
2017-06-21 13:48:43 -07:00
struct srb_iocb * nvme ;
struct nvme_private * priv = fd - > private ;
struct scsi_qla_host * vha ;
int rval = QLA_FUNCTION_FAILED ;
struct qla_hw_data * ha ;
srb_t * sp ;
vha = fcport - > vha ;
ha = vha - > hw ;
/* Alloc SRB structure */
sp = qla2x00_get_sp ( vha , fcport , GFP_ATOMIC ) ;
if ( ! sp )
return rval ;
sp - > type = SRB_NVME_LS ;
sp - > name = " nvme_ls " ;
sp - > done = qla_nvme_sp_ls_done ;
atomic_set ( & sp - > ref_count , 1 ) ;
nvme = & sp - > u . iocb_cmd ;
priv - > sp = sp ;
priv - > fd = fd ;
INIT_WORK ( & priv - > ls_work , qla_nvme_ls_complete ) ;
nvme - > u . nvme . desc = fd ;
nvme - > u . nvme . dir = 0 ;
nvme - > u . nvme . dl = 0 ;
nvme - > u . nvme . cmd_len = fd - > rqstlen ;
nvme - > u . nvme . rsp_len = fd - > rsplen ;
nvme - > u . nvme . rsp_dma = fd - > rspdma ;
nvme - > u . nvme . timeout_sec = fd - > timeout ;
nvme - > u . nvme . cmd_dma = dma_map_single ( & ha - > pdev - > dev , fd - > rqstaddr ,
fd - > rqstlen , DMA_TO_DEVICE ) ;
dma_sync_single_for_device ( & ha - > pdev - > dev , nvme - > u . nvme . cmd_dma ,
fd - > rqstlen , DMA_TO_DEVICE ) ;
rval = qla2x00_start_sp ( sp ) ;
if ( rval ! = QLA_SUCCESS ) {
ql_log ( ql_log_warn , vha , 0x700e ,
" qla2x00_start_sp failed = %d \n " , rval ) ;
atomic_dec ( & sp - > ref_count ) ;
2017-07-21 09:32:23 -07:00
wake_up ( & sp - > nvme_ls_waitq ) ;
2017-06-21 13:48:43 -07:00
return rval ;
}
return rval ;
}
static void qla_nvme_fcp_abort ( struct nvme_fc_local_port * lport ,
struct nvme_fc_remote_port * rport , void * hw_queue_handle ,
struct nvmefc_fcp_req * fd )
{
struct nvme_private * priv = fd - > private ;
2018-03-20 23:09:33 -07:00
INIT_WORK ( & priv - > abort_work , qla_nvme_abort_work ) ;
schedule_work ( & priv - > abort_work ) ;
2017-06-21 13:48:43 -07:00
}
static void qla_nvme_poll ( struct nvme_fc_local_port * lport , void * hw_queue_handle )
{
2017-07-21 09:32:23 -07:00
struct qla_qpair * qpair = hw_queue_handle ;
2018-03-20 23:09:32 -07:00
unsigned long flags ;
struct scsi_qla_host * vha = lport - > private ;
2017-06-21 13:48:43 -07:00
spin_lock_irqsave ( & qpair - > qp_lock , flags ) ;
qla24xx_process_response_queue ( vha , qpair - > rsp ) ;
spin_unlock_irqrestore ( & qpair - > qp_lock , flags ) ;
}
2018-03-20 23:09:39 -07:00
static inline int qla2x00_start_nvme_mq ( srb_t * sp )
2017-06-21 13:48:43 -07:00
{
unsigned long flags ;
uint32_t * clr_ptr ;
uint32_t index ;
uint32_t handle ;
struct cmd_nvme * cmd_pkt ;
uint16_t cnt , i ;
uint16_t req_cnt ;
uint16_t tot_dsds ;
uint16_t avail_dsds ;
uint32_t * cur_dsd ;
struct req_que * req = NULL ;
struct scsi_qla_host * vha = sp - > fcport - > vha ;
struct qla_hw_data * ha = vha - > hw ;
struct qla_qpair * qpair = sp - > qpair ;
struct srb_iocb * nvme = & sp - > u . iocb_cmd ;
struct scatterlist * sgl , * sg ;
struct nvmefc_fcp_req * fd = nvme - > u . nvme . desc ;
uint32_t rval = QLA_SUCCESS ;
2018-03-20 23:09:30 -07:00
/* Setup qpair pointers */
req = qpair - > req ;
2017-06-21 13:48:43 -07:00
tot_dsds = fd - > sg_cnt ;
/* Acquire qpair specific lock */
spin_lock_irqsave ( & qpair - > qp_lock , flags ) ;
/* Check for room in outstanding command list. */
handle = req - > current_outstanding_cmd ;
for ( index = 1 ; index < req - > num_outstanding_cmds ; index + + ) {
handle + + ;
if ( handle = = req - > num_outstanding_cmds )
handle = 1 ;
if ( ! req - > outstanding_cmds [ handle ] )
break ;
}
if ( index = = req - > num_outstanding_cmds ) {
2018-03-20 23:09:35 -07:00
rval = - EBUSY ;
2017-06-21 13:48:43 -07:00
goto queuing_error ;
}
req_cnt = qla24xx_calc_iocbs ( vha , tot_dsds ) ;
if ( req - > cnt < ( req_cnt + 2 ) ) {
cnt = IS_SHADOW_REG_CAPABLE ( ha ) ? * req - > out_ptr :
RD_REG_DWORD_RELAXED ( req - > req_q_out ) ;
if ( req - > ring_index < cnt )
req - > cnt = cnt - req - > ring_index ;
else
req - > cnt = req - > length - ( req - > ring_index - cnt ) ;
if ( req - > cnt < ( req_cnt + 2 ) ) {
2018-03-20 23:09:35 -07:00
rval = - EBUSY ;
2017-06-21 13:48:43 -07:00
goto queuing_error ;
}
}
if ( unlikely ( ! fd - > sqid ) ) {
struct nvme_fc_cmd_iu * cmd = fd - > cmdaddr ;
if ( cmd - > sqe . common . opcode = = nvme_admin_async_event ) {
nvme - > u . nvme . aen_op = 1 ;
2018-03-20 23:09:30 -07:00
atomic_inc ( & ha - > nvme_active_aen_cnt ) ;
2017-06-21 13:48:43 -07:00
}
}
/* Build command packet. */
req - > current_outstanding_cmd = handle ;
req - > outstanding_cmds [ handle ] = sp ;
sp - > handle = handle ;
req - > cnt - = req_cnt ;
cmd_pkt = ( struct cmd_nvme * ) req - > ring_ptr ;
cmd_pkt - > handle = MAKE_HANDLE ( req - > id , handle ) ;
/* Zero out remaining portion of packet. */
clr_ptr = ( uint32_t * ) cmd_pkt + 2 ;
memset ( clr_ptr , 0 , REQUEST_ENTRY_SIZE - 8 ) ;
cmd_pkt - > entry_status = 0 ;
/* Update entry type to indicate Command NVME IOCB */
cmd_pkt - > entry_type = COMMAND_NVME ;
/* No data transfer how do we check buffer len == 0?? */
if ( fd - > io_dir = = NVMEFC_FCP_READ ) {
cmd_pkt - > control_flags =
cpu_to_le16 ( CF_READ_DATA | CF_NVME_ENABLE ) ;
vha - > qla_stats . input_bytes + = fd - > payload_length ;
vha - > qla_stats . input_requests + + ;
} else if ( fd - > io_dir = = NVMEFC_FCP_WRITE ) {
cmd_pkt - > control_flags =
cpu_to_le16 ( CF_WRITE_DATA | CF_NVME_ENABLE ) ;
vha - > qla_stats . output_bytes + = fd - > payload_length ;
vha - > qla_stats . output_requests + + ;
} else if ( fd - > io_dir = = 0 ) {
cmd_pkt - > control_flags = cpu_to_le16 ( CF_NVME_ENABLE ) ;
}
/* Set NPORT-ID */
cmd_pkt - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
cmd_pkt - > port_id [ 0 ] = sp - > fcport - > d_id . b . al_pa ;
cmd_pkt - > port_id [ 1 ] = sp - > fcport - > d_id . b . area ;
cmd_pkt - > port_id [ 2 ] = sp - > fcport - > d_id . b . domain ;
cmd_pkt - > vp_index = sp - > fcport - > vha - > vp_idx ;
/* NVME RSP IU */
cmd_pkt - > nvme_rsp_dsd_len = cpu_to_le16 ( fd - > rsplen ) ;
cmd_pkt - > nvme_rsp_dseg_address [ 0 ] = cpu_to_le32 ( LSD ( fd - > rspdma ) ) ;
cmd_pkt - > nvme_rsp_dseg_address [ 1 ] = cpu_to_le32 ( MSD ( fd - > rspdma ) ) ;
/* NVME CNMD IU */
cmd_pkt - > nvme_cmnd_dseg_len = cpu_to_le16 ( fd - > cmdlen ) ;
cmd_pkt - > nvme_cmnd_dseg_address [ 0 ] = cpu_to_le32 ( LSD ( fd - > cmddma ) ) ;
cmd_pkt - > nvme_cmnd_dseg_address [ 1 ] = cpu_to_le32 ( MSD ( fd - > cmddma ) ) ;
cmd_pkt - > dseg_count = cpu_to_le16 ( tot_dsds ) ;
cmd_pkt - > byte_count = cpu_to_le32 ( fd - > payload_length ) ;
/* One DSD is available in the Command Type NVME IOCB */
avail_dsds = 1 ;
cur_dsd = ( uint32_t * ) & cmd_pkt - > nvme_data_dseg_address [ 0 ] ;
sgl = fd - > first_sgl ;
/* Load data segments */
for_each_sg ( sgl , sg , tot_dsds , i ) {
dma_addr_t sle_dma ;
cont_a64_entry_t * cont_pkt ;
/* Allocate additional continuation packets? */
if ( avail_dsds = = 0 ) {
/*
* Five DSDs are available in the Continuation
* Type 1 IOCB .
*/
/* Adjust ring index */
req - > ring_index + + ;
if ( req - > ring_index = = req - > length ) {
req - > ring_index = 0 ;
req - > ring_ptr = req - > ring ;
} else {
req - > ring_ptr + + ;
}
cont_pkt = ( cont_a64_entry_t * ) req - > ring_ptr ;
2017-06-30 19:32:53 -07:00
* ( ( uint32_t * ) ( & cont_pkt - > entry_type ) ) =
cpu_to_le32 ( CONTINUE_A64_TYPE ) ;
2017-06-21 13:48:43 -07:00
cur_dsd = ( uint32_t * ) cont_pkt - > dseg_0_address ;
avail_dsds = 5 ;
}
sle_dma = sg_dma_address ( sg ) ;
* cur_dsd + + = cpu_to_le32 ( LSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( sg_dma_len ( sg ) ) ;
avail_dsds - - ;
}
/* Set total entry count. */
cmd_pkt - > entry_count = ( uint8_t ) req_cnt ;
wmb ( ) ;
/* Adjust ring index. */
req - > ring_index + + ;
if ( req - > ring_index = = req - > length ) {
req - > ring_index = 0 ;
req - > ring_ptr = req - > ring ;
} else {
req - > ring_ptr + + ;
}
/* Set chip new ring index. */
WRT_REG_DWORD ( req - > req_q_in , req - > ring_index ) ;
queuing_error :
spin_unlock_irqrestore ( & qpair - > qp_lock , flags ) ;
return rval ;
}
/* Post a command */
static int qla_nvme_post_cmd ( struct nvme_fc_local_port * lport ,
struct nvme_fc_remote_port * rport , void * hw_queue_handle ,
struct nvmefc_fcp_req * fd )
{
fc_port_t * fcport ;
struct srb_iocb * nvme ;
struct scsi_qla_host * vha ;
2018-03-20 23:09:35 -07:00
int rval = - ENODEV ;
2017-06-21 13:48:43 -07:00
srb_t * sp ;
2017-07-21 09:32:23 -07:00
struct qla_qpair * qpair = hw_queue_handle ;
2017-06-21 13:48:43 -07:00
struct nvme_private * priv ;
2018-03-20 23:09:32 -07:00
struct qla_nvme_rport * qla_rport = rport - > private ;
2017-06-21 13:48:43 -07:00
2018-03-20 23:09:35 -07:00
if ( ! fd | | ! qpair ) {
ql_log ( ql_log_warn , NULL , 0x2134 ,
" NO NVMe request or Queue Handle \n " ) ;
2017-06-21 13:48:43 -07:00
return rval ;
}
priv = fd - > private ;
2018-03-20 23:09:32 -07:00
fcport = qla_rport - > fcport ;
2017-06-21 13:48:43 -07:00
if ( ! fcport ) {
ql_log ( ql_log_warn , NULL , 0x210e , " No fcport ptr \n " ) ;
return rval ;
}
vha = fcport - > vha ;
2018-03-20 23:09:35 -07:00
2018-03-20 23:09:38 -07:00
if ( test_bit ( ABORT_ISP_ACTIVE , & vha - > dpc_flags ) )
return rval ;
2018-03-20 23:09:35 -07:00
/*
* If we know the dev is going away while the transport is still sending
* IO ' s return busy back to stall the IO Q . This happens when the
* link goes away and fw hasn ' t notified us yet , but IO ' s are being
* returned . If the dev comes back quickly we won ' t exhaust the IO
* retry count at the core .
*/
if ( fcport - > nvme_flag & NVME_FLAG_RESETTING )
2017-06-21 13:48:43 -07:00
return - EBUSY ;
/* Alloc SRB structure */
2018-09-04 14:19:15 -07:00
sp = qla2xxx_get_qpair_sp ( vha , qpair , fcport , GFP_ATOMIC ) ;
2017-06-21 13:48:43 -07:00
if ( ! sp )
2018-03-20 23:09:35 -07:00
return - EBUSY ;
2017-06-21 13:48:43 -07:00
atomic_set ( & sp - > ref_count , 1 ) ;
2017-07-21 09:32:23 -07:00
init_waitqueue_head ( & sp - > nvme_ls_waitq ) ;
2017-06-21 13:48:43 -07:00
priv - > sp = sp ;
sp - > type = SRB_NVME_CMD ;
sp - > name = " nvme_cmd " ;
sp - > done = qla_nvme_sp_done ;
sp - > qpair = qpair ;
nvme = & sp - > u . iocb_cmd ;
nvme - > u . nvme . desc = fd ;
rval = qla2x00_start_nvme_mq ( sp ) ;
if ( rval ! = QLA_SUCCESS ) {
ql_log ( ql_log_warn , vha , 0x212d ,
" qla2x00_start_nvme_mq failed = %d \n " , rval ) ;
atomic_dec ( & sp - > ref_count ) ;
2017-07-21 09:32:23 -07:00
wake_up ( & sp - > nvme_ls_waitq ) ;
2017-06-21 13:48:43 -07:00
}
return rval ;
}
static void qla_nvme_localport_delete ( struct nvme_fc_local_port * lport )
{
struct scsi_qla_host * vha = lport - > private ;
ql_log ( ql_log_info , vha , 0x210f ,
" localport delete of %p completed. \n " , vha - > nvme_local_port ) ;
vha - > nvme_local_port = NULL ;
2017-07-21 09:32:26 -07:00
complete ( & vha - > nvme_del_done ) ;
2017-06-21 13:48:43 -07:00
}
static void qla_nvme_remoteport_delete ( struct nvme_fc_remote_port * rport )
{
fc_port_t * fcport ;
2018-03-20 23:09:32 -07:00
struct qla_nvme_rport * qla_rport = rport - > private , * trport ;
2017-06-21 13:48:43 -07:00
2018-03-20 23:09:32 -07:00
fcport = qla_rport - > fcport ;
2017-06-21 13:48:43 -07:00
fcport - > nvme_remote_port = NULL ;
fcport - > nvme_flag & = ~ NVME_FLAG_REGISTERED ;
2018-03-20 23:09:32 -07:00
list_for_each_entry_safe ( qla_rport , trport ,
2017-06-21 13:48:43 -07:00
& fcport - > vha - > nvme_rport_list , list ) {
2018-03-20 23:09:32 -07:00
if ( qla_rport - > fcport = = fcport ) {
list_del ( & qla_rport - > list ) ;
2017-06-21 13:48:43 -07:00
break ;
}
}
2017-07-21 09:32:26 -07:00
complete ( & fcport - > nvme_del_done ) ;
2017-06-21 13:48:43 -07:00
2018-03-20 23:09:32 -07:00
if ( ! test_bit ( UNLOADING , & fcport - > vha - > dpc_flags ) ) {
INIT_WORK ( & fcport - > free_work , qlt_free_session_done ) ;
schedule_work ( & fcport - > free_work ) ;
}
fcport - > nvme_flag & = ~ ( NVME_FLAG_REGISTERED | NVME_FLAG_DELETING ) ;
2017-06-21 13:48:43 -07:00
ql_log ( ql_log_info , fcport - > vha , 0x2110 ,
" remoteport_delete of %p completed. \n " , fcport ) ;
}
static struct nvme_fc_port_template qla_nvme_fc_transport = {
. localport_delete = qla_nvme_localport_delete ,
. remoteport_delete = qla_nvme_remoteport_delete ,
. create_queue = qla_nvme_alloc_queue ,
. delete_queue = NULL ,
. ls_req = qla_nvme_ls_req ,
. ls_abort = qla_nvme_ls_abort ,
. fcp_io = qla_nvme_post_cmd ,
. fcp_abort = qla_nvme_fcp_abort ,
. poll_queue = qla_nvme_poll ,
. max_hw_queues = 8 ,
. max_sgl_segments = 128 ,
. max_dif_sgl_segments = 64 ,
. dma_boundary = 0xFFFFFFFF ,
. local_priv_sz = 8 ,
2018-03-20 23:09:32 -07:00
. remote_priv_sz = sizeof ( struct qla_nvme_rport ) ,
2017-06-21 13:48:43 -07:00
. lsrqst_priv_sz = sizeof ( struct nvme_private ) ,
. fcprqst_priv_sz = sizeof ( struct nvme_private ) ,
} ;
# define NVME_ABORT_POLLING_PERIOD 2
static int qla_nvme_wait_on_command ( srb_t * sp )
{
int ret = QLA_SUCCESS ;
2017-07-21 09:32:23 -07:00
wait_event_timeout ( sp - > nvme_ls_waitq , ( atomic_read ( & sp - > ref_count ) > 1 ) ,
2017-06-21 13:48:43 -07:00
NVME_ABORT_POLLING_PERIOD * HZ ) ;
if ( atomic_read ( & sp - > ref_count ) > 1 )
ret = QLA_FUNCTION_FAILED ;
return ret ;
}
2018-03-20 23:09:38 -07:00
void qla_nvme_abort ( struct qla_hw_data * ha , struct srb * sp , int res )
2017-06-21 13:48:43 -07:00
{
int rval ;
2018-09-26 22:05:11 -07:00
if ( ha - > flags . fw_started ) {
2018-03-20 23:09:38 -07:00
rval = ha - > isp_ops - > abort_command ( sp ) ;
if ( ! rval & & ! qla_nvme_wait_on_command ( sp ) )
ql_log ( ql_log_warn , NULL , 0x2112 ,
" timed out waiting on sp=%p \n " , sp ) ;
} else {
sp - > done ( sp , res ) ;
}
2017-06-21 13:48:43 -07:00
}
static void qla_nvme_unregister_remote_port ( struct work_struct * work )
{
struct fc_port * fcport = container_of ( work , struct fc_port ,
nvme_del_work ) ;
2018-03-20 23:09:32 -07:00
struct qla_nvme_rport * qla_rport , * trport ;
2017-06-21 13:48:43 -07:00
2017-06-30 18:10:40 +02:00
if ( ! IS_ENABLED ( CONFIG_NVME_FC ) )
return ;
2017-07-21 09:32:27 -07:00
ql_log ( ql_log_warn , NULL , 0x2112 ,
" %s: unregister remoteport on %p \n " , __func__ , fcport ) ;
2018-03-20 23:09:32 -07:00
list_for_each_entry_safe ( qla_rport , trport ,
2017-06-21 13:48:43 -07:00
& fcport - > vha - > nvme_rport_list , list ) {
2018-03-20 23:09:32 -07:00
if ( qla_rport - > fcport = = fcport ) {
2017-06-21 13:48:43 -07:00
ql_log ( ql_log_info , fcport - > vha , 0x2113 ,
" %s: fcport=%p \n " , __func__ , fcport ) ;
2017-07-21 09:32:27 -07:00
init_completion ( & fcport - > nvme_del_done ) ;
2017-06-21 13:48:43 -07:00
nvme_fc_unregister_remoteport (
fcport - > nvme_remote_port ) ;
2018-03-20 23:09:32 -07:00
wait_for_completion ( & fcport - > nvme_del_done ) ;
break ;
2017-06-21 13:48:43 -07:00
}
}
}
2017-07-21 09:32:24 -07:00
void qla_nvme_delete ( struct scsi_qla_host * vha )
2017-06-21 13:48:43 -07:00
{
2018-03-20 23:09:32 -07:00
struct qla_nvme_rport * qla_rport , * trport ;
2017-06-21 13:48:43 -07:00
fc_port_t * fcport ;
int nv_ret ;
2017-06-30 18:10:40 +02:00
if ( ! IS_ENABLED ( CONFIG_NVME_FC ) )
return ;
2018-03-20 23:09:32 -07:00
list_for_each_entry_safe ( qla_rport , trport ,
& vha - > nvme_rport_list , list ) {
fcport = qla_rport - > fcport ;
2017-06-21 13:48:43 -07:00
ql_log ( ql_log_info , fcport - > vha , 0x2114 , " %s: fcport=%p \n " ,
__func__ , fcport ) ;
2018-03-20 23:09:33 -07:00
nvme_fc_set_remoteport_devloss ( fcport - > nvme_remote_port , 0 ) ;
2017-06-21 13:48:43 -07:00
}
if ( vha - > nvme_local_port ) {
2017-07-21 09:32:26 -07:00
init_completion ( & vha - > nvme_del_done ) ;
2018-03-20 23:09:32 -07:00
ql_log ( ql_log_info , vha , 0x2116 ,
" unregister localport=%p \n " ,
vha - > nvme_local_port ) ;
2017-06-21 13:48:43 -07:00
nv_ret = nvme_fc_unregister_localport ( vha - > nvme_local_port ) ;
2018-03-20 23:09:32 -07:00
if ( nv_ret )
2017-06-21 13:48:43 -07:00
ql_log ( ql_log_info , vha , 0x2115 ,
" Unregister of localport failed \n " ) ;
2018-03-20 23:09:32 -07:00
else
wait_for_completion ( & vha - > nvme_del_done ) ;
2017-06-21 13:48:43 -07:00
}
}
2018-08-02 13:16:57 -07:00
int qla_nvme_register_hba ( struct scsi_qla_host * vha )
2017-06-21 13:48:43 -07:00
{
struct nvme_fc_port_template * tmpl ;
struct qla_hw_data * ha ;
struct nvme_fc_port_info pinfo ;
2018-08-02 13:16:57 -07:00
int ret = EINVAL ;
2017-06-21 13:48:43 -07:00
2017-06-30 18:10:40 +02:00
if ( ! IS_ENABLED ( CONFIG_NVME_FC ) )
2018-08-02 13:16:57 -07:00
return ret ;
2017-06-30 18:10:40 +02:00
2017-06-21 13:48:43 -07:00
ha = vha - > hw ;
tmpl = & qla_nvme_fc_transport ;
WARN_ON ( vha - > nvme_local_port ) ;
WARN_ON ( ha - > max_req_queues < 3 ) ;
qla_nvme_fc_transport . max_hw_queues =
min ( ( uint8_t ) ( qla_nvme_fc_transport . max_hw_queues ) ,
( uint8_t ) ( ha - > max_req_queues - 2 ) ) ;
pinfo . node_name = wwn_to_u64 ( vha - > node_name ) ;
pinfo . port_name = wwn_to_u64 ( vha - > port_name ) ;
pinfo . port_role = FC_PORT_ROLE_NVME_INITIATOR ;
pinfo . port_id = vha - > d_id . b24 ;
ql_log ( ql_log_info , vha , 0xffff ,
2017-08-23 15:04:59 -07:00
" register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x \n " ,
pinfo . node_name , pinfo . port_name , pinfo . port_id ) ;
2017-06-21 13:48:43 -07:00
qla_nvme_fc_transport . dma_boundary = vha - > host - > dma_boundary ;
ret = nvme_fc_register_localport ( & pinfo , tmpl ,
get_device ( & ha - > pdev - > dev ) , & vha - > nvme_local_port ) ;
if ( ret ) {
ql_log ( ql_log_warn , vha , 0xffff ,
" register_localport failed: ret=%x \n " , ret ) ;
2018-08-02 13:16:57 -07:00
} else {
vha - > nvme_local_port - > private = vha ;
2017-06-21 13:48:43 -07:00
}
2018-08-02 13:16:57 -07:00
return ret ;
2017-06-21 13:48:43 -07:00
}