2017-06-21 23:48:43 +03:00
/*
* QLogic Fibre Channel HBA Driver
* Copyright ( c ) 2003 - 2017 QLogic Corporation
*
* See LICENSE . qla2xxx for copyright and licensing details .
*/
# include "qla_nvme.h"
# include <linux/scatterlist.h>
# include <linux/delay.h>
# include <linux/nvme.h>
# include <linux/nvme-fc.h>
static struct nvme_fc_port_template qla_nvme_fc_transport ;
2017-07-21 19:32:24 +03:00
int qla_nvme_register_remote ( struct scsi_qla_host * vha , struct fc_port * fcport )
2017-06-21 23:48:43 +03:00
{
2018-03-21 09:09:32 +03:00
struct qla_nvme_rport * rport ;
struct nvme_fc_port_info req ;
2017-06-21 23:48:43 +03:00
int ret ;
2017-06-30 19:10:40 +03:00
if ( ! IS_ENABLED ( CONFIG_NVME_FC ) )
return 0 ;
2017-06-21 23:48:43 +03:00
if ( ! vha - > flags . nvme_enabled ) {
ql_log ( ql_log_info , vha , 0x2100 ,
" %s: Not registering target since Host NVME is not enabled \n " ,
__func__ ) ;
return 0 ;
}
2018-08-02 23:16:57 +03:00
if ( ! vha - > nvme_local_port & & qla_nvme_register_hba ( vha ) )
return 0 ;
2017-06-21 23:48:43 +03:00
if ( ! ( fcport - > nvme_prli_service_param &
2018-03-21 09:09:32 +03:00
( NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY ) ) | |
( fcport - > nvme_flag & NVME_FLAG_REGISTERED ) )
2017-06-21 23:48:43 +03:00
return 0 ;
2018-03-21 09:09:35 +03:00
fcport - > nvme_flag & = ~ NVME_FLAG_RESETTING ;
2017-06-21 23:48:43 +03:00
2018-03-21 09:09:32 +03:00
memset ( & req , 0 , sizeof ( struct nvme_fc_port_info ) ) ;
req . port_name = wwn_to_u64 ( fcport - > port_name ) ;
req . node_name = wwn_to_u64 ( fcport - > node_name ) ;
req . port_role = 0 ;
req . dev_loss_tmo = NVME_FC_DEV_LOSS_TMO ;
2017-06-21 23:48:43 +03:00
if ( fcport - > nvme_prli_service_param & NVME_PRLI_SP_INITIATOR )
2018-03-21 09:09:32 +03:00
req . port_role = FC_PORT_ROLE_NVME_INITIATOR ;
2017-06-21 23:48:43 +03:00
if ( fcport - > nvme_prli_service_param & NVME_PRLI_SP_TARGET )
2018-03-21 09:09:32 +03:00
req . port_role | = FC_PORT_ROLE_NVME_TARGET ;
2017-06-21 23:48:43 +03:00
if ( fcport - > nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY )
2018-03-21 09:09:32 +03:00
req . port_role | = FC_PORT_ROLE_NVME_DISCOVERY ;
2017-06-21 23:48:43 +03:00
2018-03-21 09:09:32 +03:00
req . port_id = fcport - > d_id . b24 ;
2017-06-21 23:48:43 +03:00
ql_log ( ql_log_info , vha , 0x2102 ,
2017-08-24 01:04:59 +03:00
" %s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x \n " ,
2018-03-21 09:09:32 +03:00
__func__ , req . node_name , req . port_name ,
req . port_id ) ;
2017-06-21 23:48:43 +03:00
2018-03-21 09:09:32 +03:00
ret = nvme_fc_register_remoteport ( vha - > nvme_local_port , & req ,
2017-06-21 23:48:43 +03:00
& fcport - > nvme_remote_port ) ;
if ( ret ) {
ql_log ( ql_log_warn , vha , 0x212e ,
" Failed to register remote port. Transport returned %d \n " ,
ret ) ;
return ret ;
}
2018-03-21 09:09:32 +03:00
rport = fcport - > nvme_remote_port - > private ;
2017-06-21 23:48:43 +03:00
rport - > fcport = fcport ;
2018-03-21 09:09:32 +03:00
fcport - > nvme_flag | = NVME_FLAG_REGISTERED ;
2017-06-21 23:48:43 +03:00
return 0 ;
}
/* Allocate a queue for NVMe traffic */
2017-07-21 19:32:23 +03:00
static int qla_nvme_alloc_queue ( struct nvme_fc_local_port * lport ,
unsigned int qidx , u16 qsize , void * * handle )
2017-06-21 23:48:43 +03:00
{
struct scsi_qla_host * vha ;
struct qla_hw_data * ha ;
struct qla_qpair * qpair ;
if ( ! qidx )
qidx + + ;
vha = ( struct scsi_qla_host * ) lport - > private ;
ha = vha - > hw ;
ql_log ( ql_log_info , vha , 0x2104 ,
" %s: handle %p, idx =%d, qsize %d \n " ,
__func__ , handle , qidx , qsize ) ;
if ( qidx > qla_nvme_fc_transport . max_hw_queues ) {
ql_log ( ql_log_warn , vha , 0x212f ,
" %s: Illegal qidx=%d. Max=%d \n " ,
__func__ , qidx , qla_nvme_fc_transport . max_hw_queues ) ;
return - EINVAL ;
}
if ( ha - > queue_pair_map [ qidx ] ) {
* handle = ha - > queue_pair_map [ qidx ] ;
ql_log ( ql_log_info , vha , 0x2121 ,
" Returning existing qpair of %p for idx=%x \n " ,
* handle , qidx ) ;
return 0 ;
}
qpair = qla2xxx_create_qpair ( vha , 5 , vha - > vp_idx , true ) ;
if ( qpair = = NULL ) {
ql_log ( ql_log_warn , vha , 0x2122 ,
" Failed to allocate qpair \n " ) ;
return - EINVAL ;
}
* handle = qpair ;
return 0 ;
}
2019-06-21 19:50:24 +03:00
static void qla_nvme_release_fcp_cmd_kref ( struct kref * kref )
{
struct srb * sp = container_of ( kref , struct srb , cmd_kref ) ;
struct nvme_private * priv = ( struct nvme_private * ) sp - > priv ;
struct nvmefc_fcp_req * fd ;
struct srb_iocb * nvme ;
unsigned long flags ;
if ( ! priv )
goto out ;
nvme = & sp - > u . iocb_cmd ;
fd = nvme - > u . nvme . desc ;
spin_lock_irqsave ( & priv - > cmd_lock , flags ) ;
priv - > sp = NULL ;
sp - > priv = NULL ;
if ( priv - > comp_status = = QLA_SUCCESS ) {
fd - > rcv_rsplen = nvme - > u . nvme . rsp_pyld_len ;
} else {
fd - > rcv_rsplen = 0 ;
fd - > transferred_length = 0 ;
}
fd - > status = 0 ;
spin_unlock_irqrestore ( & priv - > cmd_lock , flags ) ;
fd - > done ( fd ) ;
out :
qla2xxx_rel_qpair_sp ( sp - > qpair , sp ) ;
}
static void qla_nvme_release_ls_cmd_kref ( struct kref * kref )
{
struct srb * sp = container_of ( kref , struct srb , cmd_kref ) ;
struct nvme_private * priv = ( struct nvme_private * ) sp - > priv ;
struct nvmefc_ls_req * fd ;
unsigned long flags ;
if ( ! priv )
goto out ;
spin_lock_irqsave ( & priv - > cmd_lock , flags ) ;
priv - > sp = NULL ;
sp - > priv = NULL ;
spin_unlock_irqrestore ( & priv - > cmd_lock , flags ) ;
fd = priv - > fd ;
fd - > done ( fd , priv - > comp_status ) ;
out :
qla2x00_rel_sp ( sp ) ;
}
static void qla_nvme_ls_complete ( struct work_struct * work )
{
struct nvme_private * priv =
container_of ( work , struct nvme_private , ls_work ) ;
kref_put ( & priv - > sp - > cmd_kref , qla_nvme_release_ls_cmd_kref ) ;
}
2019-08-09 06:02:04 +03:00
static void qla_nvme_sp_ls_done ( srb_t * sp , int res )
2017-06-21 23:48:43 +03:00
{
2019-08-09 06:02:04 +03:00
struct nvme_private * priv = sp - > priv ;
2017-06-21 23:48:43 +03:00
2019-06-21 19:50:24 +03:00
if ( WARN_ON_ONCE ( kref_read ( & sp - > cmd_kref ) = = 0 ) )
2017-06-21 23:48:43 +03:00
return ;
if ( res )
res = - EINVAL ;
priv - > comp_status = res ;
2019-06-21 19:50:24 +03:00
INIT_WORK ( & priv - > ls_work , qla_nvme_ls_complete ) ;
2017-06-21 23:48:43 +03:00
schedule_work ( & priv - > ls_work ) ;
}
2019-06-21 19:50:24 +03:00
/* it assumed that QPair lock is held. */
2019-08-09 06:02:04 +03:00
static void qla_nvme_sp_done ( srb_t * sp , int res )
2017-06-21 23:48:43 +03:00
{
2019-08-09 06:02:04 +03:00
struct nvme_private * priv = sp - > priv ;
2017-06-21 23:48:43 +03:00
2019-06-21 19:50:24 +03:00
priv - > comp_status = res ;
kref_put ( & sp - > cmd_kref , qla_nvme_release_fcp_cmd_kref ) ;
2018-03-21 09:09:32 +03:00
2017-08-24 01:04:58 +03:00
return ;
2017-06-21 23:48:43 +03:00
}
2018-03-21 09:09:33 +03:00
static void qla_nvme_abort_work ( struct work_struct * work )
2017-06-21 23:48:43 +03:00
{
2018-03-21 09:09:33 +03:00
struct nvme_private * priv =
container_of ( work , struct nvme_private , abort_work ) ;
2017-06-21 23:48:43 +03:00
srb_t * sp = priv - > sp ;
2018-03-21 09:09:33 +03:00
fc_port_t * fcport = sp - > fcport ;
2017-06-21 23:48:43 +03:00
struct qla_hw_data * ha = fcport - > vha - > hw ;
2018-03-21 09:09:33 +03:00
int rval ;
2017-06-21 23:48:43 +03:00
2019-04-18 00:44:18 +03:00
ql_dbg ( ql_dbg_io , fcport - > vha , 0xffff ,
" %s called for sp=%p, hndl=%x on fcport=%p deleted=%d \n " ,
__func__ , sp , sp - > handle , fcport , fcport - > deleted ) ;
2019-02-16 01:37:15 +03:00
2019-08-09 06:01:37 +03:00
if ( ! ha - > flags . fw_started & & fcport - > deleted )
2019-06-21 19:50:24 +03:00
goto out ;
2019-02-16 01:37:15 +03:00
2019-04-03 00:24:32 +03:00
if ( ha - > flags . host_shutting_down ) {
ql_log ( ql_log_info , sp - > fcport - > vha , 0xffff ,
2019-11-05 18:06:54 +03:00
" %s Calling done on sp: %p, type: 0x%x \n " ,
__func__ , sp , sp - > type ) ;
2019-04-03 00:24:32 +03:00
sp - > done ( sp , 0 ) ;
2019-06-21 19:50:24 +03:00
goto out ;
2019-04-03 00:24:32 +03:00
}
2017-06-21 23:48:43 +03:00
rval = ha - > isp_ops - > abort_command ( sp ) ;
ql_dbg ( ql_dbg_io , fcport - > vha , 0x212b ,
2018-03-21 09:09:35 +03:00
" %s: %s command for sp=%p, handle=%x on fcport=%p rval=%x \n " ,
__func__ , ( rval ! = QLA_SUCCESS ) ? " Failed to abort " : " Aborted " ,
sp , sp - > handle , fcport , rval ) ;
2019-06-21 19:50:24 +03:00
out :
/* kref_get was done before work was schedule. */
kref_put ( & sp - > cmd_kref , sp - > put_fn ) ;
2017-06-21 23:48:43 +03:00
}
2018-03-21 09:09:33 +03:00
static void qla_nvme_ls_abort ( struct nvme_fc_local_port * lport ,
struct nvme_fc_remote_port * rport , struct nvmefc_ls_req * fd )
{
struct nvme_private * priv = fd - > private ;
2019-06-21 19:50:24 +03:00
unsigned long flags ;
spin_lock_irqsave ( & priv - > cmd_lock , flags ) ;
if ( ! priv - > sp ) {
spin_unlock_irqrestore ( & priv - > cmd_lock , flags ) ;
return ;
}
if ( ! kref_get_unless_zero ( & priv - > sp - > cmd_kref ) ) {
spin_unlock_irqrestore ( & priv - > cmd_lock , flags ) ;
return ;
}
spin_unlock_irqrestore ( & priv - > cmd_lock , flags ) ;
2018-03-21 09:09:33 +03:00
INIT_WORK ( & priv - > abort_work , qla_nvme_abort_work ) ;
schedule_work ( & priv - > abort_work ) ;
}
2017-06-21 23:48:43 +03:00
static int qla_nvme_ls_req ( struct nvme_fc_local_port * lport ,
struct nvme_fc_remote_port * rport , struct nvmefc_ls_req * fd )
{
2018-03-21 09:09:32 +03:00
struct qla_nvme_rport * qla_rport = rport - > private ;
fc_port_t * fcport = qla_rport - > fcport ;
2017-06-21 23:48:43 +03:00
struct srb_iocb * nvme ;
struct nvme_private * priv = fd - > private ;
struct scsi_qla_host * vha ;
int rval = QLA_FUNCTION_FAILED ;
struct qla_hw_data * ha ;
srb_t * sp ;
2019-06-21 19:50:23 +03:00
if ( ! fcport | | ( fcport & & fcport - > deleted ) )
return rval ;
2017-06-21 23:48:43 +03:00
vha = fcport - > vha ;
ha = vha - > hw ;
2019-06-21 19:50:23 +03:00
if ( ! ha - > flags . fw_started )
return rval ;
2017-06-21 23:48:43 +03:00
/* Alloc SRB structure */
sp = qla2x00_get_sp ( vha , fcport , GFP_ATOMIC ) ;
if ( ! sp )
return rval ;
sp - > type = SRB_NVME_LS ;
sp - > name = " nvme_ls " ;
sp - > done = qla_nvme_sp_ls_done ;
2019-06-21 19:50:24 +03:00
sp - > put_fn = qla_nvme_release_ls_cmd_kref ;
sp - > priv = ( void * ) priv ;
2017-06-21 23:48:43 +03:00
priv - > sp = sp ;
2019-06-21 19:50:24 +03:00
kref_init ( & sp - > cmd_kref ) ;
spin_lock_init ( & priv - > cmd_lock ) ;
nvme = & sp - > u . iocb_cmd ;
2017-06-21 23:48:43 +03:00
priv - > fd = fd ;
nvme - > u . nvme . desc = fd ;
nvme - > u . nvme . dir = 0 ;
nvme - > u . nvme . dl = 0 ;
nvme - > u . nvme . cmd_len = fd - > rqstlen ;
nvme - > u . nvme . rsp_len = fd - > rsplen ;
nvme - > u . nvme . rsp_dma = fd - > rspdma ;
nvme - > u . nvme . timeout_sec = fd - > timeout ;
nvme - > u . nvme . cmd_dma = dma_map_single ( & ha - > pdev - > dev , fd - > rqstaddr ,
fd - > rqstlen , DMA_TO_DEVICE ) ;
dma_sync_single_for_device ( & ha - > pdev - > dev , nvme - > u . nvme . cmd_dma ,
fd - > rqstlen , DMA_TO_DEVICE ) ;
rval = qla2x00_start_sp ( sp ) ;
if ( rval ! = QLA_SUCCESS ) {
ql_log ( ql_log_warn , vha , 0x700e ,
" qla2x00_start_sp failed = %d \n " , rval ) ;
2017-07-21 19:32:23 +03:00
wake_up ( & sp - > nvme_ls_waitq ) ;
2019-06-21 19:50:24 +03:00
sp - > priv = NULL ;
priv - > sp = NULL ;
qla2x00_rel_sp ( sp ) ;
2017-06-21 23:48:43 +03:00
return rval ;
}
return rval ;
}
static void qla_nvme_fcp_abort ( struct nvme_fc_local_port * lport ,
struct nvme_fc_remote_port * rport , void * hw_queue_handle ,
struct nvmefc_fcp_req * fd )
{
struct nvme_private * priv = fd - > private ;
2019-06-21 19:50:24 +03:00
unsigned long flags ;
spin_lock_irqsave ( & priv - > cmd_lock , flags ) ;
if ( ! priv - > sp ) {
spin_unlock_irqrestore ( & priv - > cmd_lock , flags ) ;
return ;
}
if ( ! kref_get_unless_zero ( & priv - > sp - > cmd_kref ) ) {
spin_unlock_irqrestore ( & priv - > cmd_lock , flags ) ;
return ;
}
spin_unlock_irqrestore ( & priv - > cmd_lock , flags ) ;
2017-06-21 23:48:43 +03:00
2018-03-21 09:09:33 +03:00
INIT_WORK ( & priv - > abort_work , qla_nvme_abort_work ) ;
schedule_work ( & priv - > abort_work ) ;
2017-06-21 23:48:43 +03:00
}
2018-03-21 09:09:39 +03:00
static inline int qla2x00_start_nvme_mq ( srb_t * sp )
2017-06-21 23:48:43 +03:00
{
unsigned long flags ;
uint32_t * clr_ptr ;
uint32_t handle ;
struct cmd_nvme * cmd_pkt ;
uint16_t cnt , i ;
uint16_t req_cnt ;
uint16_t tot_dsds ;
uint16_t avail_dsds ;
2019-04-18 00:44:38 +03:00
struct dsd64 * cur_dsd ;
2017-06-21 23:48:43 +03:00
struct req_que * req = NULL ;
struct scsi_qla_host * vha = sp - > fcport - > vha ;
struct qla_hw_data * ha = vha - > hw ;
struct qla_qpair * qpair = sp - > qpair ;
struct srb_iocb * nvme = & sp - > u . iocb_cmd ;
struct scatterlist * sgl , * sg ;
struct nvmefc_fcp_req * fd = nvme - > u . nvme . desc ;
uint32_t rval = QLA_SUCCESS ;
2018-03-21 09:09:30 +03:00
/* Setup qpair pointers */
req = qpair - > req ;
2017-06-21 23:48:43 +03:00
tot_dsds = fd - > sg_cnt ;
/* Acquire qpair specific lock */
spin_lock_irqsave ( & qpair - > qp_lock , flags ) ;
2019-08-09 06:02:09 +03:00
handle = qla2xxx_get_next_handle ( req ) ;
if ( handle = = 0 ) {
2018-03-21 09:09:35 +03:00
rval = - EBUSY ;
2017-06-21 23:48:43 +03:00
goto queuing_error ;
}
req_cnt = qla24xx_calc_iocbs ( vha , tot_dsds ) ;
if ( req - > cnt < ( req_cnt + 2 ) ) {
cnt = IS_SHADOW_REG_CAPABLE ( ha ) ? * req - > out_ptr :
RD_REG_DWORD_RELAXED ( req - > req_q_out ) ;
if ( req - > ring_index < cnt )
req - > cnt = cnt - req - > ring_index ;
else
req - > cnt = req - > length - ( req - > ring_index - cnt ) ;
if ( req - > cnt < ( req_cnt + 2 ) ) {
2018-03-21 09:09:35 +03:00
rval = - EBUSY ;
2017-06-21 23:48:43 +03:00
goto queuing_error ;
}
}
if ( unlikely ( ! fd - > sqid ) ) {
struct nvme_fc_cmd_iu * cmd = fd - > cmdaddr ;
2019-04-12 00:53:17 +03:00
2017-06-21 23:48:43 +03:00
if ( cmd - > sqe . common . opcode = = nvme_admin_async_event ) {
nvme - > u . nvme . aen_op = 1 ;
2018-03-21 09:09:30 +03:00
atomic_inc ( & ha - > nvme_active_aen_cnt ) ;
2017-06-21 23:48:43 +03:00
}
}
/* Build command packet. */
req - > current_outstanding_cmd = handle ;
req - > outstanding_cmds [ handle ] = sp ;
sp - > handle = handle ;
req - > cnt - = req_cnt ;
cmd_pkt = ( struct cmd_nvme * ) req - > ring_ptr ;
cmd_pkt - > handle = MAKE_HANDLE ( req - > id , handle ) ;
/* Zero out remaining portion of packet. */
clr_ptr = ( uint32_t * ) cmd_pkt + 2 ;
memset ( clr_ptr , 0 , REQUEST_ENTRY_SIZE - 8 ) ;
cmd_pkt - > entry_status = 0 ;
/* Update entry type to indicate Command NVME IOCB */
cmd_pkt - > entry_type = COMMAND_NVME ;
/* No data transfer how do we check buffer len == 0?? */
if ( fd - > io_dir = = NVMEFC_FCP_READ ) {
2019-02-16 01:37:13 +03:00
cmd_pkt - > control_flags = CF_READ_DATA ;
2017-06-21 23:48:43 +03:00
vha - > qla_stats . input_bytes + = fd - > payload_length ;
vha - > qla_stats . input_requests + + ;
} else if ( fd - > io_dir = = NVMEFC_FCP_WRITE ) {
2019-02-16 01:37:13 +03:00
cmd_pkt - > control_flags = CF_WRITE_DATA ;
if ( ( vha - > flags . nvme_first_burst ) & &
( sp - > fcport - > nvme_prli_service_param &
NVME_PRLI_SP_FIRST_BURST ) ) {
if ( ( fd - > payload_length < =
sp - > fcport - > nvme_first_burst_size ) | |
( sp - > fcport - > nvme_first_burst_size = = 0 ) )
cmd_pkt - > control_flags | =
CF_NVME_FIRST_BURST_ENABLE ;
}
2017-06-21 23:48:43 +03:00
vha - > qla_stats . output_bytes + = fd - > payload_length ;
vha - > qla_stats . output_requests + + ;
} else if ( fd - > io_dir = = 0 ) {
2019-02-16 01:37:13 +03:00
cmd_pkt - > control_flags = 0 ;
2017-06-21 23:48:43 +03:00
}
/* Set NPORT-ID */
cmd_pkt - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
cmd_pkt - > port_id [ 0 ] = sp - > fcport - > d_id . b . al_pa ;
cmd_pkt - > port_id [ 1 ] = sp - > fcport - > d_id . b . area ;
cmd_pkt - > port_id [ 2 ] = sp - > fcport - > d_id . b . domain ;
cmd_pkt - > vp_index = sp - > fcport - > vha - > vp_idx ;
/* NVME RSP IU */
cmd_pkt - > nvme_rsp_dsd_len = cpu_to_le16 ( fd - > rsplen ) ;
2019-04-18 00:44:39 +03:00
put_unaligned_le64 ( fd - > rspdma , & cmd_pkt - > nvme_rsp_dseg_address ) ;
2017-06-21 23:48:43 +03:00
/* NVME CNMD IU */
cmd_pkt - > nvme_cmnd_dseg_len = cpu_to_le16 ( fd - > cmdlen ) ;
2019-04-18 00:44:39 +03:00
cmd_pkt - > nvme_cmnd_dseg_address = cpu_to_le64 ( fd - > cmddma ) ;
2017-06-21 23:48:43 +03:00
cmd_pkt - > dseg_count = cpu_to_le16 ( tot_dsds ) ;
cmd_pkt - > byte_count = cpu_to_le32 ( fd - > payload_length ) ;
/* One DSD is available in the Command Type NVME IOCB */
avail_dsds = 1 ;
2019-04-18 00:44:38 +03:00
cur_dsd = & cmd_pkt - > nvme_dsd ;
2017-06-21 23:48:43 +03:00
sgl = fd - > first_sgl ;
/* Load data segments */
for_each_sg ( sgl , sg , tot_dsds , i ) {
cont_a64_entry_t * cont_pkt ;
/* Allocate additional continuation packets? */
if ( avail_dsds = = 0 ) {
/*
* Five DSDs are available in the Continuation
* Type 1 IOCB .
*/
/* Adjust ring index */
req - > ring_index + + ;
if ( req - > ring_index = = req - > length ) {
req - > ring_index = 0 ;
req - > ring_ptr = req - > ring ;
} else {
req - > ring_ptr + + ;
}
cont_pkt = ( cont_a64_entry_t * ) req - > ring_ptr ;
2019-04-04 22:44:45 +03:00
put_unaligned_le32 ( CONTINUE_A64_TYPE ,
& cont_pkt - > entry_type ) ;
2017-06-21 23:48:43 +03:00
2019-04-18 00:44:38 +03:00
cur_dsd = cont_pkt - > dsd ;
avail_dsds = ARRAY_SIZE ( cont_pkt - > dsd ) ;
2017-06-21 23:48:43 +03:00
}
2019-04-18 00:44:38 +03:00
append_dsd64 ( & cur_dsd , sg ) ;
2017-06-21 23:48:43 +03:00
avail_dsds - - ;
}
/* Set total entry count. */
cmd_pkt - > entry_count = ( uint8_t ) req_cnt ;
wmb ( ) ;
/* Adjust ring index. */
req - > ring_index + + ;
if ( req - > ring_index = = req - > length ) {
req - > ring_index = 0 ;
req - > ring_ptr = req - > ring ;
} else {
req - > ring_ptr + + ;
}
/* Set chip new ring index. */
WRT_REG_DWORD ( req - > req_q_in , req - > ring_index ) ;
queuing_error :
spin_unlock_irqrestore ( & qpair - > qp_lock , flags ) ;
return rval ;
}
/* Post a command */
static int qla_nvme_post_cmd ( struct nvme_fc_local_port * lport ,
struct nvme_fc_remote_port * rport , void * hw_queue_handle ,
struct nvmefc_fcp_req * fd )
{
fc_port_t * fcport ;
struct srb_iocb * nvme ;
struct scsi_qla_host * vha ;
2018-03-21 09:09:35 +03:00
int rval = - ENODEV ;
2017-06-21 23:48:43 +03:00
srb_t * sp ;
2017-07-21 19:32:23 +03:00
struct qla_qpair * qpair = hw_queue_handle ;
2018-12-10 23:36:23 +03:00
struct nvme_private * priv = fd - > private ;
2018-03-21 09:09:32 +03:00
struct qla_nvme_rport * qla_rport = rport - > private ;
2017-06-21 23:48:43 +03:00
2018-03-21 09:09:32 +03:00
fcport = qla_rport - > fcport ;
2017-06-21 23:48:43 +03:00
2019-06-21 19:50:23 +03:00
if ( ! qpair | | ! fcport | | ( qpair & & ! qpair - > fw_started ) | |
( fcport & & fcport - > deleted ) )
2018-03-21 09:09:38 +03:00
return rval ;
2019-06-21 19:50:23 +03:00
vha = fcport - > vha ;
2018-03-21 09:09:35 +03:00
/*
* If we know the dev is going away while the transport is still sending
* IO ' s return busy back to stall the IO Q . This happens when the
* link goes away and fw hasn ' t notified us yet , but IO ' s are being
* returned . If the dev comes back quickly we won ' t exhaust the IO
* retry count at the core .
*/
if ( fcport - > nvme_flag & NVME_FLAG_RESETTING )
2017-06-21 23:48:43 +03:00
return - EBUSY ;
/* Alloc SRB structure */
2018-09-05 00:19:15 +03:00
sp = qla2xxx_get_qpair_sp ( vha , qpair , fcport , GFP_ATOMIC ) ;
2017-06-21 23:48:43 +03:00
if ( ! sp )
2018-03-21 09:09:35 +03:00
return - EBUSY ;
2017-06-21 23:48:43 +03:00
2017-07-21 19:32:23 +03:00
init_waitqueue_head ( & sp - > nvme_ls_waitq ) ;
2019-06-21 19:50:24 +03:00
kref_init ( & sp - > cmd_kref ) ;
spin_lock_init ( & priv - > cmd_lock ) ;
sp - > priv = ( void * ) priv ;
2017-06-21 23:48:43 +03:00
priv - > sp = sp ;
sp - > type = SRB_NVME_CMD ;
sp - > name = " nvme_cmd " ;
sp - > done = qla_nvme_sp_done ;
2019-06-21 19:50:24 +03:00
sp - > put_fn = qla_nvme_release_fcp_cmd_kref ;
2017-06-21 23:48:43 +03:00
sp - > qpair = qpair ;
2018-12-10 23:36:23 +03:00
sp - > vha = vha ;
2017-06-21 23:48:43 +03:00
nvme = & sp - > u . iocb_cmd ;
nvme - > u . nvme . desc = fd ;
rval = qla2x00_start_nvme_mq ( sp ) ;
if ( rval ! = QLA_SUCCESS ) {
ql_log ( ql_log_warn , vha , 0x212d ,
" qla2x00_start_nvme_mq failed = %d \n " , rval ) ;
2017-07-21 19:32:23 +03:00
wake_up ( & sp - > nvme_ls_waitq ) ;
2019-06-21 19:50:24 +03:00
sp - > priv = NULL ;
priv - > sp = NULL ;
qla2xxx_rel_qpair_sp ( sp - > qpair , sp ) ;
2017-06-21 23:48:43 +03:00
}
return rval ;
}
static void qla_nvme_localport_delete ( struct nvme_fc_local_port * lport )
{
struct scsi_qla_host * vha = lport - > private ;
ql_log ( ql_log_info , vha , 0x210f ,
" localport delete of %p completed. \n " , vha - > nvme_local_port ) ;
vha - > nvme_local_port = NULL ;
2017-07-21 19:32:26 +03:00
complete ( & vha - > nvme_del_done ) ;
2017-06-21 23:48:43 +03:00
}
static void qla_nvme_remoteport_delete ( struct nvme_fc_remote_port * rport )
{
fc_port_t * fcport ;
2019-06-21 19:50:22 +03:00
struct qla_nvme_rport * qla_rport = rport - > private ;
2017-06-21 23:48:43 +03:00
2018-03-21 09:09:32 +03:00
fcport = qla_rport - > fcport ;
2017-06-21 23:48:43 +03:00
fcport - > nvme_remote_port = NULL ;
fcport - > nvme_flag & = ~ NVME_FLAG_REGISTERED ;
2018-12-10 23:36:23 +03:00
fcport - > nvme_flag & = ~ NVME_FLAG_DELETING ;
2017-06-21 23:48:43 +03:00
ql_log ( ql_log_info , fcport - > vha , 0x2110 ,
2019-06-21 19:50:23 +03:00
" remoteport_delete of %p %8phN completed. \n " ,
fcport , fcport - > port_name ) ;
2019-06-16 18:05:53 +03:00
complete ( & fcport - > nvme_del_done ) ;
2017-06-21 23:48:43 +03:00
}
static struct nvme_fc_port_template qla_nvme_fc_transport = {
2019-11-15 02:15:26 +03:00
. module = THIS_MODULE ,
2017-06-21 23:48:43 +03:00
. localport_delete = qla_nvme_localport_delete ,
. remoteport_delete = qla_nvme_remoteport_delete ,
. create_queue = qla_nvme_alloc_queue ,
. delete_queue = NULL ,
. ls_req = qla_nvme_ls_req ,
. ls_abort = qla_nvme_ls_abort ,
. fcp_io = qla_nvme_post_cmd ,
. fcp_abort = qla_nvme_fcp_abort ,
. max_hw_queues = 8 ,
2019-04-03 00:24:23 +03:00
. max_sgl_segments = 1024 ,
2017-06-21 23:48:43 +03:00
. max_dif_sgl_segments = 64 ,
. dma_boundary = 0xFFFFFFFF ,
. local_priv_sz = 8 ,
2018-03-21 09:09:32 +03:00
. remote_priv_sz = sizeof ( struct qla_nvme_rport ) ,
2017-06-21 23:48:43 +03:00
. lsrqst_priv_sz = sizeof ( struct nvme_private ) ,
. fcprqst_priv_sz = sizeof ( struct nvme_private ) ,
} ;
2019-06-16 18:05:53 +03:00
void qla_nvme_unregister_remote_port ( struct fc_port * fcport )
2017-06-21 23:48:43 +03:00
{
2019-06-21 19:50:22 +03:00
int ret ;
2017-06-21 23:48:43 +03:00
2017-06-30 19:10:40 +03:00
if ( ! IS_ENABLED ( CONFIG_NVME_FC ) )
return ;
2017-07-21 19:32:27 +03:00
ql_log ( ql_log_warn , NULL , 0x2112 ,
2019-06-21 19:50:23 +03:00
" %s: unregister remoteport on %p %8phN \n " ,
__func__ , fcport , fcport - > port_name ) ;
2017-07-21 19:32:27 +03:00
2019-07-26 19:07:39 +03:00
if ( test_bit ( PFLG_DRIVER_REMOVING , & fcport - > vha - > pci_flags ) )
nvme_fc_set_remoteport_devloss ( fcport - > nvme_remote_port , 0 ) ;
2019-06-21 19:50:22 +03:00
init_completion ( & fcport - > nvme_del_done ) ;
ret = nvme_fc_unregister_remoteport ( fcport - > nvme_remote_port ) ;
if ( ret )
ql_log ( ql_log_info , fcport - > vha , 0x2114 ,
" %s: Failed to unregister nvme_remote_port (%d) \n " ,
__func__ , ret ) ;
wait_for_completion ( & fcport - > nvme_del_done ) ;
2017-06-21 23:48:43 +03:00
}
2017-07-21 19:32:24 +03:00
void qla_nvme_delete ( struct scsi_qla_host * vha )
2017-06-21 23:48:43 +03:00
{
int nv_ret ;
2017-06-30 19:10:40 +03:00
if ( ! IS_ENABLED ( CONFIG_NVME_FC ) )
return ;
2017-06-21 23:48:43 +03:00
if ( vha - > nvme_local_port ) {
2017-07-21 19:32:26 +03:00
init_completion ( & vha - > nvme_del_done ) ;
2018-03-21 09:09:32 +03:00
ql_log ( ql_log_info , vha , 0x2116 ,
" unregister localport=%p \n " ,
vha - > nvme_local_port ) ;
2017-06-21 23:48:43 +03:00
nv_ret = nvme_fc_unregister_localport ( vha - > nvme_local_port ) ;
2018-03-21 09:09:32 +03:00
if ( nv_ret )
2017-06-21 23:48:43 +03:00
ql_log ( ql_log_info , vha , 0x2115 ,
" Unregister of localport failed \n " ) ;
2018-03-21 09:09:32 +03:00
else
wait_for_completion ( & vha - > nvme_del_done ) ;
2017-06-21 23:48:43 +03:00
}
}
2018-08-02 23:16:57 +03:00
int qla_nvme_register_hba ( struct scsi_qla_host * vha )
2017-06-21 23:48:43 +03:00
{
struct nvme_fc_port_template * tmpl ;
struct qla_hw_data * ha ;
struct nvme_fc_port_info pinfo ;
2018-08-02 23:16:57 +03:00
int ret = EINVAL ;
2017-06-21 23:48:43 +03:00
2017-06-30 19:10:40 +03:00
if ( ! IS_ENABLED ( CONFIG_NVME_FC ) )
2018-08-02 23:16:57 +03:00
return ret ;
2017-06-30 19:10:40 +03:00
2017-06-21 23:48:43 +03:00
ha = vha - > hw ;
tmpl = & qla_nvme_fc_transport ;
WARN_ON ( vha - > nvme_local_port ) ;
WARN_ON ( ha - > max_req_queues < 3 ) ;
qla_nvme_fc_transport . max_hw_queues =
min ( ( uint8_t ) ( qla_nvme_fc_transport . max_hw_queues ) ,
( uint8_t ) ( ha - > max_req_queues - 2 ) ) ;
pinfo . node_name = wwn_to_u64 ( vha - > node_name ) ;
pinfo . port_name = wwn_to_u64 ( vha - > port_name ) ;
pinfo . port_role = FC_PORT_ROLE_NVME_INITIATOR ;
pinfo . port_id = vha - > d_id . b24 ;
ql_log ( ql_log_info , vha , 0xffff ,
2017-08-24 01:04:59 +03:00
" register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x \n " ,
pinfo . node_name , pinfo . port_name , pinfo . port_id ) ;
2017-06-21 23:48:43 +03:00
qla_nvme_fc_transport . dma_boundary = vha - > host - > dma_boundary ;
ret = nvme_fc_register_localport ( & pinfo , tmpl ,
get_device ( & ha - > pdev - > dev ) , & vha - > nvme_local_port ) ;
if ( ret ) {
ql_log ( ql_log_warn , vha , 0xffff ,
" register_localport failed: ret=%x \n " , ret ) ;
2018-08-02 23:16:57 +03:00
} else {
vha - > nvme_local_port - > private = vha ;
2017-06-21 23:48:43 +03:00
}
2018-08-02 23:16:57 +03:00
return ret ;
2017-06-21 23:48:43 +03:00
}