2005-10-27 11:10:08 -07:00
/*
* QLogic Fibre Channel HBA Driver
2014-04-11 16:54:24 -04:00
* Copyright ( c ) 2003 - 2014 QLogic Corporation
2005-04-16 15:20:36 -07:00
*
2005-10-27 11:10:08 -07:00
* See LICENSE . qla2xxx for copyright and licensing details .
*/
2005-04-16 15:20:36 -07:00
# include "qla_def.h"
2012-05-15 14:34:28 -04:00
# include "qla_target.h"
2005-04-16 15:20:36 -07:00
# include <linux/blkdev.h>
# include <linux/delay.h>
# include <scsi/scsi_tcq.h>
/**
* qla2x00_get_cmd_direction ( ) - Determine control_flag data direction .
2018-01-23 16:33:51 -08:00
* @ sp : SCSI command
2005-04-16 15:20:36 -07:00
*
* Returns the proper CF_ * direction based on CDB .
*/
static inline uint16_t
2008-09-11 21:22:47 -07:00
qla2x00_get_cmd_direction ( srb_t * sp )
2005-04-16 15:20:36 -07:00
{
uint16_t cflags ;
2012-02-09 11:15:36 -08:00
struct scsi_cmnd * cmd = GET_CMD_SP ( sp ) ;
2017-01-19 22:28:04 -08:00
struct scsi_qla_host * vha = sp - > vha ;
2005-04-16 15:20:36 -07:00
cflags = 0 ;
/* Set transfer direction */
2012-02-09 11:15:36 -08:00
if ( cmd - > sc_data_direction = = DMA_TO_DEVICE ) {
2005-04-16 15:20:36 -07:00
cflags = CF_WRITE ;
2012-05-15 14:34:16 -04:00
vha - > qla_stats . output_bytes + = scsi_bufflen ( cmd ) ;
2013-08-27 01:37:40 -04:00
vha - > qla_stats . output_requests + + ;
2012-02-09 11:15:36 -08:00
} else if ( cmd - > sc_data_direction = = DMA_FROM_DEVICE ) {
2005-04-16 15:20:36 -07:00
cflags = CF_READ ;
2012-05-15 14:34:16 -04:00
vha - > qla_stats . input_bytes + = scsi_bufflen ( cmd ) ;
2013-08-27 01:37:40 -04:00
vha - > qla_stats . input_requests + + ;
2008-09-11 21:22:47 -07:00
}
2005-04-16 15:20:36 -07:00
return ( cflags ) ;
}
/**
* qla2x00_calc_iocbs_32 ( ) - Determine number of Command Type 2 and
* Continuation Type 0 IOCBs to allocate .
*
* @ dsds : number of data segment decriptors needed
*
* Returns the number of IOCB entries needed to store @ dsds .
*/
uint16_t
qla2x00_calc_iocbs_32 ( uint16_t dsds )
{
uint16_t iocbs ;
iocbs = 1 ;
if ( dsds > 3 ) {
iocbs + = ( dsds - 3 ) / 7 ;
if ( ( dsds - 3 ) % 7 )
iocbs + + ;
}
return ( iocbs ) ;
}
/**
* qla2x00_calc_iocbs_64 ( ) - Determine number of Command Type 3 and
* Continuation Type 1 IOCBs to allocate .
*
* @ dsds : number of data segment decriptors needed
*
* Returns the number of IOCB entries needed to store @ dsds .
*/
uint16_t
qla2x00_calc_iocbs_64 ( uint16_t dsds )
{
uint16_t iocbs ;
iocbs = 1 ;
if ( dsds > 2 ) {
iocbs + = ( dsds - 2 ) / 5 ;
if ( ( dsds - 2 ) % 5 )
iocbs + + ;
}
return ( iocbs ) ;
}
/**
* qla2x00_prep_cont_type0_iocb ( ) - Initialize a Continuation Type 0 IOCB .
2018-01-23 16:33:51 -08:00
* @ vha : HA context
2005-04-16 15:20:36 -07:00
*
* Returns a pointer to the Continuation Type 0 IOCB packet .
*/
static inline cont_entry_t *
2009-04-06 22:33:42 -07:00
qla2x00_prep_cont_type0_iocb ( struct scsi_qla_host * vha )
2005-04-16 15:20:36 -07:00
{
cont_entry_t * cont_pkt ;
2009-04-06 22:33:42 -07:00
struct req_que * req = vha - > req ;
2005-04-16 15:20:36 -07:00
/* Adjust ring index. */
2008-11-06 10:40:51 -08:00
req - > ring_index + + ;
if ( req - > ring_index = = req - > length ) {
req - > ring_index = 0 ;
req - > ring_ptr = req - > ring ;
2005-04-16 15:20:36 -07:00
} else {
2008-11-06 10:40:51 -08:00
req - > ring_ptr + + ;
2005-04-16 15:20:36 -07:00
}
2008-11-06 10:40:51 -08:00
cont_pkt = ( cont_entry_t * ) req - > ring_ptr ;
2005-04-16 15:20:36 -07:00
/* Load packet defaults. */
2015-07-09 07:24:08 -07:00
* ( ( uint32_t * ) ( & cont_pkt - > entry_type ) ) = cpu_to_le32 ( CONTINUE_TYPE ) ;
2005-04-16 15:20:36 -07:00
return ( cont_pkt ) ;
}
/**
* qla2x00_prep_cont_type1_iocb ( ) - Initialize a Continuation Type 1 IOCB .
2018-01-23 16:33:51 -08:00
* @ vha : HA context
* @ req : request queue
2005-04-16 15:20:36 -07:00
*
* Returns a pointer to the continuation type 1 IOCB packet .
*/
static inline cont_a64_entry_t *
2011-11-18 09:02:21 -08:00
qla2x00_prep_cont_type1_iocb ( scsi_qla_host_t * vha , struct req_que * req )
2005-04-16 15:20:36 -07:00
{
cont_a64_entry_t * cont_pkt ;
/* Adjust ring index. */
2008-11-06 10:40:51 -08:00
req - > ring_index + + ;
if ( req - > ring_index = = req - > length ) {
req - > ring_index = 0 ;
req - > ring_ptr = req - > ring ;
2005-04-16 15:20:36 -07:00
} else {
2008-11-06 10:40:51 -08:00
req - > ring_ptr + + ;
2005-04-16 15:20:36 -07:00
}
2008-11-06 10:40:51 -08:00
cont_pkt = ( cont_a64_entry_t * ) req - > ring_ptr ;
2005-04-16 15:20:36 -07:00
/* Load packet defaults. */
2013-03-28 08:21:23 -04:00
* ( ( uint32_t * ) ( & cont_pkt - > entry_type ) ) = IS_QLAFX00 ( vha - > hw ) ?
2015-07-09 07:24:08 -07:00
cpu_to_le32 ( CONTINUE_A64_TYPE_FX00 ) :
cpu_to_le32 ( CONTINUE_A64_TYPE ) ;
2005-04-16 15:20:36 -07:00
return ( cont_pkt ) ;
}
2016-12-12 14:40:07 -08:00
inline int
2010-05-04 15:01:30 -07:00
qla24xx_configure_prot_mode ( srb_t * sp , uint16_t * fw_prot_opts )
{
2012-02-09 11:15:36 -08:00
struct scsi_cmnd * cmd = GET_CMD_SP ( sp ) ;
uint8_t guard = scsi_host_get_guard ( cmd - > device - > host ) ;
2010-05-04 15:01:30 -07:00
/* We always use DIFF Bundling for best performance */
* fw_prot_opts = 0 ;
/* Translate SCSI opcode to a protection opcode */
2012-02-09 11:15:36 -08:00
switch ( scsi_get_prot_op ( cmd ) ) {
2010-05-04 15:01:30 -07:00
case SCSI_PROT_READ_STRIP :
* fw_prot_opts | = PO_MODE_DIF_REMOVE ;
break ;
case SCSI_PROT_WRITE_INSERT :
* fw_prot_opts | = PO_MODE_DIF_INSERT ;
break ;
case SCSI_PROT_READ_INSERT :
* fw_prot_opts | = PO_MODE_DIF_INSERT ;
break ;
case SCSI_PROT_WRITE_STRIP :
* fw_prot_opts | = PO_MODE_DIF_REMOVE ;
break ;
case SCSI_PROT_READ_PASS :
case SCSI_PROT_WRITE_PASS :
2012-08-22 14:21:31 -04:00
if ( guard & SHOST_DIX_GUARD_IP )
* fw_prot_opts | = PO_MODE_DIF_TCP_CKSUM ;
else
* fw_prot_opts | = PO_MODE_DIF_PASS ;
2010-05-04 15:01:30 -07:00
break ;
default : /* Normal Request */
* fw_prot_opts | = PO_MODE_DIF_PASS ;
break ;
}
2012-02-09 11:15:36 -08:00
return scsi_prot_sg_count ( cmd ) ;
2010-05-04 15:01:30 -07:00
}
/*
2005-04-16 15:20:36 -07:00
* qla2x00_build_scsi_iocbs_32 ( ) - Build IOCB command utilizing 32 bit
* capable IOCB types .
*
* @ sp : SRB command to process
* @ cmd_pkt : Command type 2 IOCB
* @ tot_dsds : Total number of segments to transfer
*/
void qla2x00_build_scsi_iocbs_32 ( srb_t * sp , cmd_entry_t * cmd_pkt ,
uint16_t tot_dsds )
{
uint16_t avail_dsds ;
uint32_t * cur_dsd ;
2008-11-06 10:40:51 -08:00
scsi_qla_host_t * vha ;
2005-04-16 15:20:36 -07:00
struct scsi_cmnd * cmd ;
2007-05-26 01:55:38 +09:00
struct scatterlist * sg ;
int i ;
2005-04-16 15:20:36 -07:00
2012-02-09 11:15:36 -08:00
cmd = GET_CMD_SP ( sp ) ;
2005-04-16 15:20:36 -07:00
/* Update entry type to indicate Command Type 2 IOCB */
* ( ( uint32_t * ) ( & cmd_pkt - > entry_type ) ) =
2015-07-09 07:24:08 -07:00
cpu_to_le32 ( COMMAND_TYPE ) ;
2005-04-16 15:20:36 -07:00
/* No data transfer */
2007-05-26 01:55:38 +09:00
if ( ! scsi_bufflen ( cmd ) | | cmd - > sc_data_direction = = DMA_NONE ) {
2015-07-09 07:24:08 -07:00
cmd_pkt - > byte_count = cpu_to_le32 ( 0 ) ;
2005-04-16 15:20:36 -07:00
return ;
}
2017-01-19 22:28:04 -08:00
vha = sp - > vha ;
2008-09-11 21:22:47 -07:00
cmd_pkt - > control_flags | = cpu_to_le16 ( qla2x00_get_cmd_direction ( sp ) ) ;
2005-04-16 15:20:36 -07:00
/* Three DSDs are available in the Command Type 2 IOCB */
avail_dsds = 3 ;
cur_dsd = ( uint32_t * ) & cmd_pkt - > dseg_0_address ;
/* Load data segments */
2007-05-26 01:55:38 +09:00
scsi_for_each_sg ( cmd , sg , tot_dsds , i ) {
cont_entry_t * cont_pkt ;
/* Allocate additional continuation packets? */
if ( avail_dsds = = 0 ) {
/*
* Seven DSDs are available in the Continuation
* Type 0 IOCB .
*/
2009-04-06 22:33:42 -07:00
cont_pkt = qla2x00_prep_cont_type0_iocb ( vha ) ;
2007-05-26 01:55:38 +09:00
cur_dsd = ( uint32_t * ) & cont_pkt - > dseg_0_address ;
avail_dsds = 7 ;
2005-04-16 15:20:36 -07:00
}
2007-05-26 01:55:38 +09:00
* cur_dsd + + = cpu_to_le32 ( sg_dma_address ( sg ) ) ;
* cur_dsd + + = cpu_to_le32 ( sg_dma_len ( sg ) ) ;
avail_dsds - - ;
2005-04-16 15:20:36 -07:00
}
}
/**
* qla2x00_build_scsi_iocbs_64 ( ) - Build IOCB command utilizing 64 bit
* capable IOCB types .
*
* @ sp : SRB command to process
* @ cmd_pkt : Command type 3 IOCB
* @ tot_dsds : Total number of segments to transfer
*/
void qla2x00_build_scsi_iocbs_64 ( srb_t * sp , cmd_entry_t * cmd_pkt ,
uint16_t tot_dsds )
{
uint16_t avail_dsds ;
uint32_t * cur_dsd ;
2008-11-06 10:40:51 -08:00
scsi_qla_host_t * vha ;
2005-04-16 15:20:36 -07:00
struct scsi_cmnd * cmd ;
2007-05-26 01:55:38 +09:00
struct scatterlist * sg ;
int i ;
2005-04-16 15:20:36 -07:00
2012-02-09 11:15:36 -08:00
cmd = GET_CMD_SP ( sp ) ;
2005-04-16 15:20:36 -07:00
/* Update entry type to indicate Command Type 3 IOCB */
2015-07-09 07:24:08 -07:00
* ( ( uint32_t * ) ( & cmd_pkt - > entry_type ) ) = cpu_to_le32 ( COMMAND_A64_TYPE ) ;
2005-04-16 15:20:36 -07:00
/* No data transfer */
2007-05-26 01:55:38 +09:00
if ( ! scsi_bufflen ( cmd ) | | cmd - > sc_data_direction = = DMA_NONE ) {
2015-07-09 07:24:08 -07:00
cmd_pkt - > byte_count = cpu_to_le32 ( 0 ) ;
2005-04-16 15:20:36 -07:00
return ;
}
2017-01-19 22:28:04 -08:00
vha = sp - > vha ;
2008-09-11 21:22:47 -07:00
cmd_pkt - > control_flags | = cpu_to_le16 ( qla2x00_get_cmd_direction ( sp ) ) ;
2005-04-16 15:20:36 -07:00
/* Two DSDs are available in the Command Type 3 IOCB */
avail_dsds = 2 ;
cur_dsd = ( uint32_t * ) & cmd_pkt - > dseg_0_address ;
/* Load data segments */
2007-05-26 01:55:38 +09:00
scsi_for_each_sg ( cmd , sg , tot_dsds , i ) {
dma_addr_t sle_dma ;
cont_a64_entry_t * cont_pkt ;
/* Allocate additional continuation packets? */
if ( avail_dsds = = 0 ) {
/*
* Five DSDs are available in the Continuation
* Type 1 IOCB .
*/
2011-11-18 09:02:21 -08:00
cont_pkt = qla2x00_prep_cont_type1_iocb ( vha , vha - > req ) ;
2007-05-26 01:55:38 +09:00
cur_dsd = ( uint32_t * ) cont_pkt - > dseg_0_address ;
avail_dsds = 5 ;
2005-04-16 15:20:36 -07:00
}
2007-05-26 01:55:38 +09:00
sle_dma = sg_dma_address ( sg ) ;
* cur_dsd + + = cpu_to_le32 ( LSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( sg_dma_len ( sg ) ) ;
avail_dsds - - ;
2005-04-16 15:20:36 -07:00
}
}
/**
* qla2x00_start_scsi ( ) - Send a SCSI command to the ISP
* @ sp : command to send to the ISP
*
2008-09-11 21:22:51 -07:00
* Returns non - zero if a failure occurred , else zero .
2005-04-16 15:20:36 -07:00
*/
int
qla2x00_start_scsi ( srb_t * sp )
{
2015-07-09 07:23:26 -07:00
int nseg ;
2005-04-16 15:20:36 -07:00
unsigned long flags ;
2008-11-06 10:40:51 -08:00
scsi_qla_host_t * vha ;
2005-04-16 15:20:36 -07:00
struct scsi_cmnd * cmd ;
uint32_t * clr_ptr ;
uint32_t index ;
uint32_t handle ;
cmd_entry_t * cmd_pkt ;
uint16_t cnt ;
uint16_t req_cnt ;
uint16_t tot_dsds ;
2005-07-06 10:30:26 -07:00
struct device_reg_2xxx __iomem * reg ;
2008-11-06 10:40:51 -08:00
struct qla_hw_data * ha ;
struct req_que * req ;
2008-12-09 16:45:39 -08:00
struct rsp_que * rsp ;
2005-04-16 15:20:36 -07:00
/* Setup device pointers. */
2017-01-19 22:28:04 -08:00
vha = sp - > vha ;
2008-11-06 10:40:51 -08:00
ha = vha - > hw ;
2005-07-06 10:30:26 -07:00
reg = & ha - > iobase - > isp ;
2012-02-09 11:15:36 -08:00
cmd = GET_CMD_SP ( sp ) ;
2008-12-09 16:45:39 -08:00
req = ha - > req_q_map [ 0 ] ;
rsp = ha - > rsp_q_map [ 0 ] ;
2005-04-17 15:10:41 -05:00
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0 ;
2005-04-16 15:20:36 -07:00
/* Send marker if required */
2008-11-06 10:40:51 -08:00
if ( vha - > marker_needed ! = 0 ) {
2011-07-14 12:00:13 -07:00
if ( qla2x00_marker ( vha , req , rsp , 0 , 0 , MK_SYNC_ALL ) ! =
QLA_SUCCESS ) {
2005-04-16 15:20:36 -07:00
return ( QLA_FUNCTION_FAILED ) ;
2011-07-14 12:00:13 -07:00
}
2008-11-06 10:40:51 -08:00
vha - > marker_needed = 0 ;
2005-04-16 15:20:36 -07:00
}
/* Acquire ring specific lock */
2008-07-24 08:31:49 -07:00
spin_lock_irqsave ( & ha - > hardware_lock , flags ) ;
2005-04-16 15:20:36 -07:00
/* Check for room in outstanding command list. */
2008-11-06 10:40:51 -08:00
handle = req - > current_outstanding_cmd ;
2013-01-30 03:34:37 -05:00
for ( index = 1 ; index < req - > num_outstanding_cmds ; index + + ) {
2005-04-16 15:20:36 -07:00
handle + + ;
2013-01-30 03:34:37 -05:00
if ( handle = = req - > num_outstanding_cmds )
2005-04-16 15:20:36 -07:00
handle = 1 ;
2008-11-06 10:40:51 -08:00
if ( ! req - > outstanding_cmds [ handle ] )
2005-04-16 15:20:36 -07:00
break ;
}
2013-01-30 03:34:37 -05:00
if ( index = = req - > num_outstanding_cmds )
2005-04-16 15:20:36 -07:00
goto queuing_error ;
2005-04-17 15:10:41 -05:00
/* Map the sg table so we have an accurate count of sg entries needed */
2007-07-05 13:16:51 -07:00
if ( scsi_sg_count ( cmd ) ) {
nseg = dma_map_sg ( & ha - > pdev - > dev , scsi_sglist ( cmd ) ,
scsi_sg_count ( cmd ) , cmd - > sc_data_direction ) ;
if ( unlikely ( ! nseg ) )
goto queuing_error ;
} else
nseg = 0 ;
2007-05-26 01:55:38 +09:00
tot_dsds = nseg ;
2005-04-17 15:10:41 -05:00
2005-04-16 15:20:36 -07:00
/* Calculate the number of request entries needed. */
2007-07-19 15:06:00 -07:00
req_cnt = ha - > isp_ops - > calc_req_entries ( tot_dsds ) ;
2008-11-06 10:40:51 -08:00
if ( req - > cnt < ( req_cnt + 2 ) ) {
2005-04-16 15:20:36 -07:00
cnt = RD_REG_WORD_RELAXED ( ISP_REQ_Q_OUT ( ha , reg ) ) ;
2008-11-06 10:40:51 -08:00
if ( req - > ring_index < cnt )
req - > cnt = cnt - req - > ring_index ;
2005-04-16 15:20:36 -07:00
else
2008-11-06 10:40:51 -08:00
req - > cnt = req - > length -
( req - > ring_index - cnt ) ;
2012-05-15 14:34:09 -04:00
/* If still no head room then bail out */
if ( req - > cnt < ( req_cnt + 2 ) )
goto queuing_error ;
2005-04-16 15:20:36 -07:00
}
/* Build command packet */
2008-11-06 10:40:51 -08:00
req - > current_outstanding_cmd = handle ;
req - > outstanding_cmds [ handle ] = sp ;
2009-08-20 11:06:04 -07:00
sp - > handle = handle ;
2012-02-09 11:15:36 -08:00
cmd - > host_scribble = ( unsigned char * ) ( unsigned long ) handle ;
2008-11-06 10:40:51 -08:00
req - > cnt - = req_cnt ;
2005-04-16 15:20:36 -07:00
2008-11-06 10:40:51 -08:00
cmd_pkt = ( cmd_entry_t * ) req - > ring_ptr ;
2005-04-16 15:20:36 -07:00
cmd_pkt - > handle = handle ;
/* Zero out remaining portion of packet. */
clr_ptr = ( uint32_t * ) cmd_pkt + 2 ;
memset ( clr_ptr , 0 , REQUEST_ENTRY_SIZE - 8 ) ;
cmd_pkt - > dseg_count = cpu_to_le16 ( tot_dsds ) ;
2005-04-17 15:06:53 -05:00
/* Set target ID and LUN number*/
SET_TARGET_ID ( ha , cmd_pkt - > target , sp - > fcport - > loop_id ) ;
2012-02-09 11:15:36 -08:00
cmd_pkt - > lun = cpu_to_le16 ( cmd - > device - > lun ) ;
2015-07-09 07:24:08 -07:00
cmd_pkt - > control_flags = cpu_to_le16 ( CF_SIMPLE_TAG ) ;
2005-04-16 15:20:36 -07:00
/* Load SCSI command packet. */
memcpy ( cmd_pkt - > scsi_cdb , cmd - > cmnd , cmd - > cmd_len ) ;
2007-05-26 01:55:38 +09:00
cmd_pkt - > byte_count = cpu_to_le32 ( ( uint32_t ) scsi_bufflen ( cmd ) ) ;
2005-04-16 15:20:36 -07:00
/* Build IOCB segments */
2007-07-19 15:06:00 -07:00
ha - > isp_ops - > build_iocbs ( sp , cmd_pkt , tot_dsds ) ;
2005-04-16 15:20:36 -07:00
/* Set total data segment count. */
cmd_pkt - > entry_count = ( uint8_t ) req_cnt ;
wmb ( ) ;
/* Adjust ring index. */
2008-11-06 10:40:51 -08:00
req - > ring_index + + ;
if ( req - > ring_index = = req - > length ) {
req - > ring_index = 0 ;
req - > ring_ptr = req - > ring ;
2005-04-16 15:20:36 -07:00
} else
2008-11-06 10:40:51 -08:00
req - > ring_ptr + + ;
2005-04-16 15:20:36 -07:00
sp - > flags | = SRB_DMA_VALID ;
/* Set chip new ring index. */
2008-11-06 10:40:51 -08:00
WRT_REG_WORD ( ISP_REQ_Q_IN ( ha , reg ) , req - > ring_index ) ;
2005-04-16 15:20:36 -07:00
RD_REG_WORD_RELAXED ( ISP_REQ_Q_IN ( ha , reg ) ) ; /* PCI Posting. */
2005-10-27 11:09:48 -07:00
/* Manage unprocessed RIO/ZIO commands in response queue. */
2008-11-06 10:40:51 -08:00
if ( vha - > flags . process_response_queue & &
2008-12-09 16:45:39 -08:00
rsp - > ring_ptr - > signature ! = RESPONSE_PROCESSED )
qla2x00_process_response_queue ( rsp ) ;
2005-10-27 11:09:48 -07:00
2008-07-24 08:31:49 -07:00
spin_unlock_irqrestore ( & ha - > hardware_lock , flags ) ;
2005-04-16 15:20:36 -07:00
return ( QLA_SUCCESS ) ;
queuing_error :
2007-05-26 01:55:38 +09:00
if ( tot_dsds )
scsi_dma_unmap ( cmd ) ;
2008-07-24 08:31:49 -07:00
spin_unlock_irqrestore ( & ha - > hardware_lock , flags ) ;
2005-04-16 15:20:36 -07:00
return ( QLA_FUNCTION_FAILED ) ;
}
2011-11-18 09:03:18 -08:00
/**
* qla2x00_start_iocbs ( ) - Execute the IOCB command
2018-01-23 16:33:51 -08:00
* @ vha : HA context
* @ req : request queue
2011-11-18 09:03:18 -08:00
*/
2012-05-15 14:34:28 -04:00
void
2011-11-18 09:03:18 -08:00
qla2x00_start_iocbs ( struct scsi_qla_host * vha , struct req_que * req )
{
struct qla_hw_data * ha = vha - > hw ;
2015-07-09 07:24:27 -07:00
device_reg_t * reg = ISP_QUE_REG ( ha , req - > id ) ;
2011-11-18 09:03:18 -08:00
2013-08-27 01:37:28 -04:00
if ( IS_P3P_TYPE ( ha ) ) {
2011-11-18 09:03:18 -08:00
qla82xx_start_iocbs ( vha ) ;
} else {
/* Adjust ring index. */
req - > ring_index + + ;
if ( req - > ring_index = = req - > length ) {
req - > ring_index = 0 ;
req - > ring_ptr = req - > ring ;
} else
req - > ring_ptr + + ;
/* Set chip new ring index. */
2017-06-02 09:12:07 -07:00
if ( ha - > mqenable | | IS_QLA27XX ( ha ) ) {
WRT_REG_DWORD ( req - > req_q_in , req - > ring_index ) ;
} else if ( IS_QLA83XX ( ha ) ) {
2012-02-09 11:15:34 -08:00
WRT_REG_DWORD ( req - > req_q_in , req - > ring_index ) ;
2012-02-09 11:15:59 -08:00
RD_REG_DWORD_RELAXED ( & ha - > iobase - > isp24 . hccr ) ;
2013-03-28 08:21:23 -04:00
} else if ( IS_QLAFX00 ( ha ) ) {
WRT_REG_DWORD ( & reg - > ispfx00 . req_q_in , req - > ring_index ) ;
RD_REG_DWORD_RELAXED ( & reg - > ispfx00 . req_q_in ) ;
QLAFX00_SET_HST_INTR ( ha , ha - > rqstq_intr_code ) ;
2011-11-18 09:03:18 -08:00
} else if ( IS_FWI2_CAPABLE ( ha ) ) {
WRT_REG_DWORD ( & reg - > isp24 . req_q_in , req - > ring_index ) ;
RD_REG_DWORD_RELAXED ( & reg - > isp24 . req_q_in ) ;
} else {
WRT_REG_WORD ( ISP_REQ_Q_IN ( ha , & reg - > isp ) ,
req - > ring_index ) ;
RD_REG_WORD_RELAXED ( ISP_REQ_Q_IN ( ha , & reg - > isp ) ) ;
}
}
}
2005-04-16 15:20:36 -07:00
/**
* qla2x00_marker ( ) - Send a marker IOCB to the firmware .
2018-01-23 16:33:51 -08:00
* @ vha : HA context
* @ req : request queue
* @ rsp : response queue
2005-04-16 15:20:36 -07:00
* @ loop_id : loop ID
* @ lun : LUN
* @ type : marker modifier
*
* Can be called from both normal and interrupt context .
*
2008-09-11 21:22:51 -07:00
* Returns non - zero if a failure occurred , else zero .
2005-04-16 15:20:36 -07:00
*/
2010-07-23 15:28:37 +05:00
static int
2008-12-09 16:45:39 -08:00
__qla2x00_marker ( struct scsi_qla_host * vha , struct req_que * req ,
struct rsp_que * rsp , uint16_t loop_id ,
2014-06-25 15:27:36 +02:00
uint64_t lun , uint8_t type )
2005-04-16 15:20:36 -07:00
{
2005-07-06 10:31:17 -07:00
mrk_entry_t * mrk ;
2013-03-28 08:21:23 -04:00
struct mrk_entry_24xx * mrk24 = NULL ;
2008-11-06 10:40:51 -08:00
struct qla_hw_data * ha = vha - > hw ;
scsi_qla_host_t * base_vha = pci_get_drvdata ( ha - > pdev ) ;
2005-04-16 15:20:36 -07:00
2011-11-18 09:03:17 -08:00
req = ha - > req_q_map [ 0 ] ;
2012-11-21 02:40:29 -05:00
mrk = ( mrk_entry_t * ) qla2x00_alloc_iocbs ( vha , NULL ) ;
2005-07-06 10:31:17 -07:00
if ( mrk = = NULL ) {
2011-07-14 12:00:13 -07:00
ql_log ( ql_log_warn , base_vha , 0x3026 ,
" Failed to allocate Marker IOCB. \n " ) ;
2005-04-16 15:20:36 -07:00
return ( QLA_FUNCTION_FAILED ) ;
}
2005-07-06 10:31:17 -07:00
mrk - > entry_type = MARKER_TYPE ;
mrk - > modifier = type ;
2005-04-16 15:20:36 -07:00
if ( type ! = MK_SYNC_ALL ) {
2014-02-26 04:15:07 -05:00
if ( IS_FWI2_CAPABLE ( ha ) ) {
2005-07-06 10:31:17 -07:00
mrk24 = ( struct mrk_entry_24xx * ) mrk ;
mrk24 - > nport_handle = cpu_to_le16 ( loop_id ) ;
2014-06-25 15:27:36 +02:00
int_to_scsilun ( lun , ( struct scsi_lun * ) & mrk24 - > lun ) ;
2006-08-01 13:48:13 -07:00
host_to_fcp_swap ( mrk24 - > lun , sizeof ( mrk24 - > lun ) ) ;
2008-11-06 10:40:51 -08:00
mrk24 - > vp_index = vha - > vp_idx ;
2009-04-06 22:33:40 -07:00
mrk24 - > handle = MAKE_HANDLE ( req - > id , mrk24 - > handle ) ;
2005-07-06 10:31:17 -07:00
} else {
SET_TARGET_ID ( ha , mrk - > target , loop_id ) ;
2014-06-25 15:27:36 +02:00
mrk - > lun = cpu_to_le16 ( ( uint16_t ) lun ) ;
2005-07-06 10:31:17 -07:00
}
2005-04-16 15:20:36 -07:00
}
wmb ( ) ;
2011-11-18 09:03:18 -08:00
qla2x00_start_iocbs ( vha , req ) ;
2005-04-16 15:20:36 -07:00
return ( QLA_SUCCESS ) ;
}
2005-07-06 10:32:07 -07:00
int
2008-12-09 16:45:39 -08:00
qla2x00_marker ( struct scsi_qla_host * vha , struct req_que * req ,
2014-06-25 15:27:36 +02:00
struct rsp_que * rsp , uint16_t loop_id , uint64_t lun ,
2008-12-09 16:45:39 -08:00
uint8_t type )
2005-04-16 15:20:36 -07:00
{
int ret ;
unsigned long flags = 0 ;
2008-12-09 16:45:39 -08:00
spin_lock_irqsave ( & vha - > hw - > hardware_lock , flags ) ;
ret = __qla2x00_marker ( vha , req , rsp , loop_id , lun , type ) ;
spin_unlock_irqrestore ( & vha - > hw - > hardware_lock , flags ) ;
2005-04-16 15:20:36 -07:00
return ( ret ) ;
}
2012-05-15 14:34:28 -04:00
/*
* qla2x00_issue_marker
*
* Issue marker
* Caller CAN have hardware lock held as specified by ha_locked parameter .
* Might release it , then reaquire .
*/
int qla2x00_issue_marker ( scsi_qla_host_t * vha , int ha_locked )
{
if ( ha_locked ) {
if ( __qla2x00_marker ( vha , vha - > req , vha - > req - > rsp , 0 , 0 ,
MK_SYNC_ALL ) ! = QLA_SUCCESS )
return QLA_FUNCTION_FAILED ;
} else {
if ( qla2x00_marker ( vha , vha - > req , vha - > req - > rsp , 0 , 0 ,
MK_SYNC_ALL ) ! = QLA_SUCCESS )
return QLA_FUNCTION_FAILED ;
}
vha - > marker_needed = 0 ;
return QLA_SUCCESS ;
}
2011-11-18 09:03:18 -08:00
static inline int
qla24xx_build_scsi_type_6_iocbs ( srb_t * sp , struct cmd_type_6 * cmd_pkt ,
uint16_t tot_dsds )
{
uint32_t * cur_dsd = NULL ;
scsi_qla_host_t * vha ;
struct qla_hw_data * ha ;
struct scsi_cmnd * cmd ;
struct scatterlist * cur_seg ;
uint32_t * dsd_seg ;
void * next_dsd ;
uint8_t avail_dsds ;
uint8_t first_iocb = 1 ;
uint32_t dsd_list_len ;
struct dsd_dma * dsd_ptr ;
struct ct6_dsd * ctx ;
2005-04-16 15:20:36 -07:00
2012-02-09 11:15:36 -08:00
cmd = GET_CMD_SP ( sp ) ;
2010-04-12 17:59:55 -07:00
2011-11-18 09:03:18 -08:00
/* Update entry type to indicate Command Type 3 IOCB */
2015-07-09 07:24:08 -07:00
* ( ( uint32_t * ) ( & cmd_pkt - > entry_type ) ) = cpu_to_le32 ( COMMAND_TYPE_6 ) ;
2011-11-18 09:03:18 -08:00
/* No data transfer */
if ( ! scsi_bufflen ( cmd ) | | cmd - > sc_data_direction = = DMA_NONE ) {
2015-07-09 07:24:08 -07:00
cmd_pkt - > byte_count = cpu_to_le32 ( 0 ) ;
2011-11-18 09:03:18 -08:00
return 0 ;
}
2017-01-19 22:28:04 -08:00
vha = sp - > vha ;
2011-11-18 09:03:18 -08:00
ha = vha - > hw ;
/* Set transfer direction */
if ( cmd - > sc_data_direction = = DMA_TO_DEVICE ) {
2015-07-09 07:24:08 -07:00
cmd_pkt - > control_flags = cpu_to_le16 ( CF_WRITE_DATA ) ;
2012-05-15 14:34:16 -04:00
vha - > qla_stats . output_bytes + = scsi_bufflen ( cmd ) ;
2013-08-27 01:37:40 -04:00
vha - > qla_stats . output_requests + + ;
2011-11-18 09:03:18 -08:00
} else if ( cmd - > sc_data_direction = = DMA_FROM_DEVICE ) {
2015-07-09 07:24:08 -07:00
cmd_pkt - > control_flags = cpu_to_le16 ( CF_READ_DATA ) ;
2012-05-15 14:34:16 -04:00
vha - > qla_stats . input_bytes + = scsi_bufflen ( cmd ) ;
2013-08-27 01:37:40 -04:00
vha - > qla_stats . input_requests + + ;
2011-11-18 09:03:18 -08:00
}
cur_seg = scsi_sglist ( cmd ) ;
2012-02-09 11:15:36 -08:00
ctx = GET_CMD_CTX_SP ( sp ) ;
2011-11-18 09:03:18 -08:00
while ( tot_dsds ) {
avail_dsds = ( tot_dsds > QLA_DSDS_PER_IOCB ) ?
QLA_DSDS_PER_IOCB : tot_dsds ;
tot_dsds - = avail_dsds ;
dsd_list_len = ( avail_dsds + 1 ) * QLA_DSD_SIZE ;
dsd_ptr = list_first_entry ( & ha - > gbl_dsd_list ,
struct dsd_dma , list ) ;
next_dsd = dsd_ptr - > dsd_addr ;
list_del ( & dsd_ptr - > list ) ;
ha - > gbl_dsd_avail - - ;
list_add_tail ( & dsd_ptr - > list , & ctx - > dsd_list ) ;
ctx - > dsd_use_cnt + + ;
ha - > gbl_dsd_inuse + + ;
if ( first_iocb ) {
first_iocb = 0 ;
dsd_seg = ( uint32_t * ) & cmd_pkt - > fcp_data_dseg_address ;
* dsd_seg + + = cpu_to_le32 ( LSD ( dsd_ptr - > dsd_list_dma ) ) ;
* dsd_seg + + = cpu_to_le32 ( MSD ( dsd_ptr - > dsd_list_dma ) ) ;
cmd_pkt - > fcp_data_dseg_len = cpu_to_le32 ( dsd_list_len ) ;
2008-12-09 16:45:39 -08:00
} else {
2011-11-18 09:03:18 -08:00
* cur_dsd + + = cpu_to_le32 ( LSD ( dsd_ptr - > dsd_list_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( dsd_ptr - > dsd_list_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( dsd_list_len ) ;
}
cur_dsd = ( uint32_t * ) next_dsd ;
while ( avail_dsds ) {
dma_addr_t sle_dma ;
sle_dma = sg_dma_address ( cur_seg ) ;
* cur_dsd + + = cpu_to_le32 ( LSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( sg_dma_len ( cur_seg ) ) ;
cur_seg = sg_next ( cur_seg ) ;
avail_dsds - - ;
2008-12-09 16:45:39 -08:00
}
2005-07-06 10:31:17 -07:00
}
2011-11-18 09:03:18 -08:00
/* Null termination */
* cur_dsd + + = 0 ;
* cur_dsd + + = 0 ;
* cur_dsd + + = 0 ;
cmd_pkt - > control_flags | = CF_DATA_SEG_DESCR_ENABLE ;
return 0 ;
2005-07-06 10:31:17 -07:00
}
2011-11-18 09:03:18 -08:00
/*
* qla24xx_calc_dsd_lists ( ) - Determine number of DSD list required
* for Command Type 6.
2005-07-06 10:31:17 -07:00
*
* @ dsds : number of data segment decriptors needed
*
2011-11-18 09:03:18 -08:00
* Returns the number of dsd list needed to store @ dsds .
2005-07-06 10:31:17 -07:00
*/
2015-07-09 07:23:02 -07:00
static inline uint16_t
2011-11-18 09:03:18 -08:00
qla24xx_calc_dsd_lists ( uint16_t dsds )
2005-07-06 10:31:17 -07:00
{
2011-11-18 09:03:18 -08:00
uint16_t dsd_lists = 0 ;
2005-07-06 10:31:17 -07:00
2011-11-18 09:03:18 -08:00
dsd_lists = ( dsds / QLA_DSDS_PER_IOCB ) ;
if ( dsds % QLA_DSDS_PER_IOCB )
dsd_lists + + ;
return dsd_lists ;
2005-07-06 10:31:17 -07:00
}
2011-11-18 09:03:18 -08:00
2005-07-06 10:31:17 -07:00
/**
* qla24xx_build_scsi_iocbs ( ) - Build IOCB command utilizing Command Type 7
* IOCB types .
*
* @ sp : SRB command to process
* @ cmd_pkt : Command type 3 IOCB
* @ tot_dsds : Total number of segments to transfer
2016-12-12 14:40:07 -08:00
* @ req : pointer to request queue
2005-07-06 10:31:17 -07:00
*/
2016-12-12 14:40:07 -08:00
inline void
2005-07-06 10:31:17 -07:00
qla24xx_build_scsi_iocbs ( srb_t * sp , struct cmd_type_7 * cmd_pkt ,
2016-12-12 14:40:07 -08:00
uint16_t tot_dsds , struct req_que * req )
2005-07-06 10:31:17 -07:00
{
uint16_t avail_dsds ;
uint32_t * cur_dsd ;
2008-11-06 10:40:51 -08:00
scsi_qla_host_t * vha ;
2005-07-06 10:31:17 -07:00
struct scsi_cmnd * cmd ;
2007-05-26 01:55:38 +09:00
struct scatterlist * sg ;
int i ;
2005-07-06 10:31:17 -07:00
2012-02-09 11:15:36 -08:00
cmd = GET_CMD_SP ( sp ) ;
2005-07-06 10:31:17 -07:00
/* Update entry type to indicate Command Type 3 IOCB */
2015-07-09 07:24:08 -07:00
* ( ( uint32_t * ) ( & cmd_pkt - > entry_type ) ) = cpu_to_le32 ( COMMAND_TYPE_7 ) ;
2005-07-06 10:31:17 -07:00
/* No data transfer */
2007-05-26 01:55:38 +09:00
if ( ! scsi_bufflen ( cmd ) | | cmd - > sc_data_direction = = DMA_NONE ) {
2015-07-09 07:24:08 -07:00
cmd_pkt - > byte_count = cpu_to_le32 ( 0 ) ;
2005-07-06 10:31:17 -07:00
return ;
}
2017-01-19 22:28:04 -08:00
vha = sp - > vha ;
2005-07-06 10:31:17 -07:00
/* Set transfer direction */
2008-09-11 21:22:47 -07:00
if ( cmd - > sc_data_direction = = DMA_TO_DEVICE ) {
2015-07-09 07:24:08 -07:00
cmd_pkt - > task_mgmt_flags = cpu_to_le16 ( TMF_WRITE_DATA ) ;
2012-05-15 14:34:16 -04:00
vha - > qla_stats . output_bytes + = scsi_bufflen ( cmd ) ;
2013-08-27 01:37:40 -04:00
vha - > qla_stats . output_requests + + ;
2008-09-11 21:22:47 -07:00
} else if ( cmd - > sc_data_direction = = DMA_FROM_DEVICE ) {
2015-07-09 07:24:08 -07:00
cmd_pkt - > task_mgmt_flags = cpu_to_le16 ( TMF_READ_DATA ) ;
2012-05-15 14:34:16 -04:00
vha - > qla_stats . input_bytes + = scsi_bufflen ( cmd ) ;
2013-08-27 01:37:40 -04:00
vha - > qla_stats . input_requests + + ;
2008-09-11 21:22:47 -07:00
}
2005-07-06 10:31:17 -07:00
/* One DSD is available in the Command Type 3 IOCB */
avail_dsds = 1 ;
cur_dsd = ( uint32_t * ) & cmd_pkt - > dseg_0_address ;
/* Load data segments */
2007-05-26 01:55:38 +09:00
scsi_for_each_sg ( cmd , sg , tot_dsds , i ) {
dma_addr_t sle_dma ;
cont_a64_entry_t * cont_pkt ;
/* Allocate additional continuation packets? */
if ( avail_dsds = = 0 ) {
/*
* Five DSDs are available in the Continuation
* Type 1 IOCB .
*/
2016-12-12 14:40:07 -08:00
cont_pkt = qla2x00_prep_cont_type1_iocb ( vha , req ) ;
2007-05-26 01:55:38 +09:00
cur_dsd = ( uint32_t * ) cont_pkt - > dseg_0_address ;
avail_dsds = 5 ;
2005-07-06 10:31:17 -07:00
}
2007-05-26 01:55:38 +09:00
sle_dma = sg_dma_address ( sg ) ;
* cur_dsd + + = cpu_to_le32 ( LSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( sg_dma_len ( sg ) ) ;
avail_dsds - - ;
2005-07-06 10:31:17 -07:00
}
}
2010-05-04 15:01:30 -07:00
struct fw_dif_context {
uint32_t ref_tag ;
uint16_t app_tag ;
uint8_t ref_tag_mask [ 4 ] ; /* Validation/Replacement Mask*/
uint8_t app_tag_mask [ 2 ] ; /* Validation/Replacement Mask*/
} ;
/*
* qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
*
*/
static inline void
2011-08-16 11:29:23 -07:00
qla24xx_set_t10dif_tags ( srb_t * sp , struct fw_dif_context * pkt ,
2010-05-04 15:01:30 -07:00
unsigned int protcnt )
{
2012-02-09 11:15:36 -08:00
struct scsi_cmnd * cmd = GET_CMD_SP ( sp ) ;
2010-05-04 15:01:30 -07:00
switch ( scsi_get_prot_type ( cmd ) ) {
case SCSI_PROT_DIF_TYPE0 :
2011-08-16 11:29:22 -07:00
/*
* No check for ql2xenablehba_err_chk , as it would be an
* I / O error if hba tag generation is not done .
*/
pkt - > ref_tag = cpu_to_le32 ( ( uint32_t )
( 0xffffffff & scsi_get_lba ( cmd ) ) ) ;
2011-08-16 11:29:23 -07:00
if ( ! qla2x00_hba_err_chk_enabled ( sp ) )
break ;
2011-08-16 11:29:22 -07:00
pkt - > ref_tag_mask [ 0 ] = 0xff ;
pkt - > ref_tag_mask [ 1 ] = 0xff ;
pkt - > ref_tag_mask [ 2 ] = 0xff ;
pkt - > ref_tag_mask [ 3 ] = 0xff ;
2010-05-04 15:01:30 -07:00
break ;
/*
* For TYPE 2 protection : 16 bit GUARD + 32 bit REF tag has to
* match LBA in CDB + N
*/
case SCSI_PROT_DIF_TYPE2 :
2015-07-09 07:24:08 -07:00
pkt - > app_tag = cpu_to_le16 ( 0 ) ;
2011-08-16 11:29:23 -07:00
pkt - > app_tag_mask [ 0 ] = 0x0 ;
pkt - > app_tag_mask [ 1 ] = 0x0 ;
2010-07-23 15:28:38 +05:00
pkt - > ref_tag = cpu_to_le32 ( ( uint32_t )
( 0xffffffff & scsi_get_lba ( cmd ) ) ) ;
2011-08-16 11:29:23 -07:00
if ( ! qla2x00_hba_err_chk_enabled ( sp ) )
break ;
2010-07-23 15:28:38 +05:00
/* enable ALL bytes of the ref tag */
pkt - > ref_tag_mask [ 0 ] = 0xff ;
pkt - > ref_tag_mask [ 1 ] = 0xff ;
pkt - > ref_tag_mask [ 2 ] = 0xff ;
pkt - > ref_tag_mask [ 3 ] = 0xff ;
2010-05-04 15:01:30 -07:00
break ;
/* For Type 3 protection: 16 bit GUARD only */
case SCSI_PROT_DIF_TYPE3 :
pkt - > ref_tag_mask [ 0 ] = pkt - > ref_tag_mask [ 1 ] =
pkt - > ref_tag_mask [ 2 ] = pkt - > ref_tag_mask [ 3 ] =
0x00 ;
break ;
/*
* For TYpe 1 protection : 16 bit GUARD tag , 32 bit REF tag , and
* 16 bit app tag .
*/
case SCSI_PROT_DIF_TYPE1 :
2011-08-16 11:29:23 -07:00
pkt - > ref_tag = cpu_to_le32 ( ( uint32_t )
( 0xffffffff & scsi_get_lba ( cmd ) ) ) ;
2015-07-09 07:24:08 -07:00
pkt - > app_tag = cpu_to_le16 ( 0 ) ;
2011-08-16 11:29:23 -07:00
pkt - > app_tag_mask [ 0 ] = 0x0 ;
pkt - > app_tag_mask [ 1 ] = 0x0 ;
if ( ! qla2x00_hba_err_chk_enabled ( sp ) )
2010-05-04 15:01:30 -07:00
break ;
/* enable ALL bytes of the ref tag */
pkt - > ref_tag_mask [ 0 ] = 0xff ;
pkt - > ref_tag_mask [ 1 ] = 0xff ;
pkt - > ref_tag_mask [ 2 ] = 0xff ;
pkt - > ref_tag_mask [ 3 ] = 0xff ;
break ;
}
}
2016-12-12 14:40:07 -08:00
int
2011-08-16 11:29:22 -07:00
qla24xx_get_one_block_sg ( uint32_t blk_sz , struct qla2_sgx * sgx ,
uint32_t * partial )
{
struct scatterlist * sg ;
uint32_t cumulative_partial , sg_len ;
dma_addr_t sg_dma_addr ;
if ( sgx - > num_bytes = = sgx - > tot_bytes )
return 0 ;
sg = sgx - > cur_sg ;
cumulative_partial = sgx - > tot_partial ;
sg_dma_addr = sg_dma_address ( sg ) ;
sg_len = sg_dma_len ( sg ) ;
sgx - > dma_addr = sg_dma_addr + sgx - > bytes_consumed ;
if ( ( cumulative_partial + ( sg_len - sgx - > bytes_consumed ) ) > = blk_sz ) {
sgx - > dma_len = ( blk_sz - cumulative_partial ) ;
sgx - > tot_partial = 0 ;
sgx - > num_bytes + = blk_sz ;
* partial = 0 ;
} else {
sgx - > dma_len = sg_len - sgx - > bytes_consumed ;
sgx - > tot_partial + = sgx - > dma_len ;
* partial = 1 ;
}
sgx - > bytes_consumed + = sgx - > dma_len ;
if ( sg_len = = sgx - > bytes_consumed ) {
sg = sg_next ( sg ) ;
sgx - > num_sg + + ;
sgx - > cur_sg = sg ;
sgx - > bytes_consumed = 0 ;
}
return 1 ;
}
2014-04-11 16:54:43 -04:00
int
2011-08-16 11:29:22 -07:00
qla24xx_walk_and_build_sglist_no_difb ( struct qla_hw_data * ha , srb_t * sp ,
2017-03-15 09:48:49 -07:00
uint32_t * dsd , uint16_t tot_dsds , struct qla_tc_param * tc )
2011-08-16 11:29:22 -07:00
{
void * next_dsd ;
uint8_t avail_dsds = 0 ;
uint32_t dsd_list_len ;
struct dsd_dma * dsd_ptr ;
struct scatterlist * sg_prot ;
uint32_t * cur_dsd = dsd ;
uint16_t used_dsds = tot_dsds ;
2014-04-11 16:54:43 -04:00
uint32_t prot_int ; /* protection interval */
2011-08-16 11:29:22 -07:00
uint32_t partial ;
struct qla2_sgx sgx ;
dma_addr_t sle_dma ;
uint32_t sle_dma_len , tot_prot_dma_len = 0 ;
2014-04-11 16:54:43 -04:00
struct scsi_cmnd * cmd ;
2011-08-16 11:29:22 -07:00
memset ( & sgx , 0 , sizeof ( struct qla2_sgx ) ) ;
2014-04-11 16:54:43 -04:00
if ( sp ) {
cmd = GET_CMD_SP ( sp ) ;
prot_int = cmd - > device - > sector_size ;
sgx . tot_bytes = scsi_bufflen ( cmd ) ;
sgx . cur_sg = scsi_sglist ( cmd ) ;
sgx . sp = sp ;
sg_prot = scsi_prot_sglist ( cmd ) ;
} else if ( tc ) {
prot_int = tc - > blk_sz ;
sgx . tot_bytes = tc - > bufflen ;
sgx . cur_sg = tc - > sg ;
sg_prot = tc - > prot_sg ;
} else {
BUG ( ) ;
return 1 ;
}
2011-08-16 11:29:22 -07:00
while ( qla24xx_get_one_block_sg ( prot_int , & sgx , & partial ) ) {
sle_dma = sgx . dma_addr ;
sle_dma_len = sgx . dma_len ;
alloc_and_fill :
/* Allocate additional continuation packets? */
if ( avail_dsds = = 0 ) {
avail_dsds = ( used_dsds > QLA_DSDS_PER_IOCB ) ?
QLA_DSDS_PER_IOCB : used_dsds ;
dsd_list_len = ( avail_dsds + 1 ) * 12 ;
used_dsds - = avail_dsds ;
/* allocate tracking DS */
dsd_ptr = kzalloc ( sizeof ( struct dsd_dma ) , GFP_ATOMIC ) ;
if ( ! dsd_ptr )
return 1 ;
/* allocate new list */
dsd_ptr - > dsd_addr = next_dsd =
dma_pool_alloc ( ha - > dl_dma_pool , GFP_ATOMIC ,
& dsd_ptr - > dsd_list_dma ) ;
if ( ! next_dsd ) {
/*
* Need to cleanup only this dsd_ptr , rest
* will be done by sp_free_dma ( )
*/
kfree ( dsd_ptr ) ;
return 1 ;
}
2014-04-11 16:54:43 -04:00
if ( sp ) {
list_add_tail ( & dsd_ptr - > list ,
& ( ( struct crc_context * )
sp - > u . scmd . ctx ) - > dsd_list ) ;
sp - > flags | = SRB_CRC_CTX_DSD_VALID ;
} else {
list_add_tail ( & dsd_ptr - > list ,
& ( tc - > ctx - > dsd_list ) ) ;
2017-03-15 09:48:49 -07:00
* tc - > ctx_dsd_alloced = 1 ;
2014-04-11 16:54:43 -04:00
}
2011-08-16 11:29:22 -07:00
/* add new list to cmd iocb or last list */
* cur_dsd + + = cpu_to_le32 ( LSD ( dsd_ptr - > dsd_list_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( dsd_ptr - > dsd_list_dma ) ) ;
* cur_dsd + + = dsd_list_len ;
cur_dsd = ( uint32_t * ) next_dsd ;
}
* cur_dsd + + = cpu_to_le32 ( LSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( sle_dma_len ) ;
avail_dsds - - ;
if ( partial = = 0 ) {
/* Got a full protection interval */
sle_dma = sg_dma_address ( sg_prot ) + tot_prot_dma_len ;
sle_dma_len = 8 ;
2010-05-04 15:01:30 -07:00
2011-08-16 11:29:22 -07:00
tot_prot_dma_len + = sle_dma_len ;
if ( tot_prot_dma_len = = sg_dma_len ( sg_prot ) ) {
tot_prot_dma_len = 0 ;
sg_prot = sg_next ( sg_prot ) ;
}
partial = 1 ; /* So as to not re-enter this block */
goto alloc_and_fill ;
}
}
/* Null termination */
* cur_dsd + + = 0 ;
* cur_dsd + + = 0 ;
* cur_dsd + + = 0 ;
return 0 ;
}
2011-11-18 09:03:18 -08:00
2014-04-11 16:54:43 -04:00
int
2010-05-04 15:01:30 -07:00
qla24xx_walk_and_build_sglist ( struct qla_hw_data * ha , srb_t * sp , uint32_t * dsd ,
2017-03-15 09:48:49 -07:00
uint16_t tot_dsds , struct qla_tc_param * tc )
2010-05-04 15:01:30 -07:00
{
void * next_dsd ;
uint8_t avail_dsds = 0 ;
uint32_t dsd_list_len ;
struct dsd_dma * dsd_ptr ;
2014-04-11 16:54:43 -04:00
struct scatterlist * sg , * sgl ;
2010-05-04 15:01:30 -07:00
uint32_t * cur_dsd = dsd ;
int i ;
uint16_t used_dsds = tot_dsds ;
2014-04-11 16:54:43 -04:00
struct scsi_cmnd * cmd ;
if ( sp ) {
cmd = GET_CMD_SP ( sp ) ;
sgl = scsi_sglist ( cmd ) ;
} else if ( tc ) {
sgl = tc - > sg ;
} else {
BUG ( ) ;
return 1 ;
}
2010-05-04 15:01:30 -07:00
2014-04-11 16:54:43 -04:00
for_each_sg ( sgl , sg , tot_dsds , i ) {
2010-05-04 15:01:30 -07:00
dma_addr_t sle_dma ;
/* Allocate additional continuation packets? */
if ( avail_dsds = = 0 ) {
avail_dsds = ( used_dsds > QLA_DSDS_PER_IOCB ) ?
QLA_DSDS_PER_IOCB : used_dsds ;
dsd_list_len = ( avail_dsds + 1 ) * 12 ;
used_dsds - = avail_dsds ;
/* allocate tracking DS */
dsd_ptr = kzalloc ( sizeof ( struct dsd_dma ) , GFP_ATOMIC ) ;
if ( ! dsd_ptr )
return 1 ;
/* allocate new list */
dsd_ptr - > dsd_addr = next_dsd =
dma_pool_alloc ( ha - > dl_dma_pool , GFP_ATOMIC ,
& dsd_ptr - > dsd_list_dma ) ;
if ( ! next_dsd ) {
/*
* Need to cleanup only this dsd_ptr , rest
* will be done by sp_free_dma ( )
*/
kfree ( dsd_ptr ) ;
return 1 ;
}
2014-04-11 16:54:43 -04:00
if ( sp ) {
list_add_tail ( & dsd_ptr - > list ,
& ( ( struct crc_context * )
sp - > u . scmd . ctx ) - > dsd_list ) ;
2010-05-04 15:01:30 -07:00
2014-04-11 16:54:43 -04:00
sp - > flags | = SRB_CRC_CTX_DSD_VALID ;
} else {
list_add_tail ( & dsd_ptr - > list ,
& ( tc - > ctx - > dsd_list ) ) ;
2017-03-15 09:48:49 -07:00
* tc - > ctx_dsd_alloced = 1 ;
2014-04-11 16:54:43 -04:00
}
2010-05-04 15:01:30 -07:00
/* add new list to cmd iocb or last list */
* cur_dsd + + = cpu_to_le32 ( LSD ( dsd_ptr - > dsd_list_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( dsd_ptr - > dsd_list_dma ) ) ;
* cur_dsd + + = dsd_list_len ;
cur_dsd = ( uint32_t * ) next_dsd ;
}
sle_dma = sg_dma_address ( sg ) ;
2012-08-22 14:21:31 -04:00
2010-05-04 15:01:30 -07:00
* cur_dsd + + = cpu_to_le32 ( LSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( sg_dma_len ( sg ) ) ;
avail_dsds - - ;
}
/* Null termination */
* cur_dsd + + = 0 ;
* cur_dsd + + = 0 ;
* cur_dsd + + = 0 ;
return 0 ;
}
2014-04-11 16:54:43 -04:00
int
2010-05-04 15:01:30 -07:00
qla24xx_walk_and_build_prot_sglist ( struct qla_hw_data * ha , srb_t * sp ,
2017-03-15 09:48:49 -07:00
uint32_t * dsd , uint16_t tot_dsds , struct qla_tc_param * tc )
2010-05-04 15:01:30 -07:00
{
void * next_dsd ;
uint8_t avail_dsds = 0 ;
uint32_t dsd_list_len ;
struct dsd_dma * dsd_ptr ;
2014-04-11 16:54:43 -04:00
struct scatterlist * sg , * sgl ;
2010-05-04 15:01:30 -07:00
int i ;
struct scsi_cmnd * cmd ;
uint32_t * cur_dsd = dsd ;
2014-04-11 16:54:43 -04:00
uint16_t used_dsds = tot_dsds ;
struct scsi_qla_host * vha ;
if ( sp ) {
cmd = GET_CMD_SP ( sp ) ;
sgl = scsi_prot_sglist ( cmd ) ;
2017-01-19 22:28:04 -08:00
vha = sp - > vha ;
2014-04-11 16:54:43 -04:00
} else if ( tc ) {
vha = tc - > vha ;
sgl = tc - > prot_sg ;
} else {
BUG ( ) ;
return 1 ;
}
2010-05-04 15:01:30 -07:00
2014-04-11 16:54:43 -04:00
ql_dbg ( ql_dbg_tgt , vha , 0xe021 ,
" %s: enter \n " , __func__ ) ;
for_each_sg ( sgl , sg , tot_dsds , i ) {
2010-05-04 15:01:30 -07:00
dma_addr_t sle_dma ;
/* Allocate additional continuation packets? */
if ( avail_dsds = = 0 ) {
avail_dsds = ( used_dsds > QLA_DSDS_PER_IOCB ) ?
QLA_DSDS_PER_IOCB : used_dsds ;
dsd_list_len = ( avail_dsds + 1 ) * 12 ;
used_dsds - = avail_dsds ;
/* allocate tracking DS */
dsd_ptr = kzalloc ( sizeof ( struct dsd_dma ) , GFP_ATOMIC ) ;
if ( ! dsd_ptr )
return 1 ;
/* allocate new list */
dsd_ptr - > dsd_addr = next_dsd =
dma_pool_alloc ( ha - > dl_dma_pool , GFP_ATOMIC ,
& dsd_ptr - > dsd_list_dma ) ;
if ( ! next_dsd ) {
/*
* Need to cleanup only this dsd_ptr , rest
* will be done by sp_free_dma ( )
*/
kfree ( dsd_ptr ) ;
return 1 ;
}
2014-04-11 16:54:43 -04:00
if ( sp ) {
list_add_tail ( & dsd_ptr - > list ,
& ( ( struct crc_context * )
sp - > u . scmd . ctx ) - > dsd_list ) ;
2010-05-04 15:01:30 -07:00
2014-04-11 16:54:43 -04:00
sp - > flags | = SRB_CRC_CTX_DSD_VALID ;
} else {
list_add_tail ( & dsd_ptr - > list ,
& ( tc - > ctx - > dsd_list ) ) ;
2017-03-15 09:48:49 -07:00
* tc - > ctx_dsd_alloced = 1 ;
2014-04-11 16:54:43 -04:00
}
2010-05-04 15:01:30 -07:00
/* add new list to cmd iocb or last list */
* cur_dsd + + = cpu_to_le32 ( LSD ( dsd_ptr - > dsd_list_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( dsd_ptr - > dsd_list_dma ) ) ;
* cur_dsd + + = dsd_list_len ;
cur_dsd = ( uint32_t * ) next_dsd ;
}
sle_dma = sg_dma_address ( sg ) ;
2012-08-22 14:21:31 -04:00
2010-05-04 15:01:30 -07:00
* cur_dsd + + = cpu_to_le32 ( LSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( sg_dma_len ( sg ) ) ;
avail_dsds - - ;
}
/* Null termination */
* cur_dsd + + = 0 ;
* cur_dsd + + = 0 ;
* cur_dsd + + = 0 ;
return 0 ;
}
/**
* qla24xx_build_scsi_crc_2_iocbs ( ) - Build IOCB command utilizing Command
* Type 6 IOCB types .
*
* @ sp : SRB command to process
* @ cmd_pkt : Command type 3 IOCB
* @ tot_dsds : Total number of segments to transfer
2018-01-23 16:33:51 -08:00
* @ tot_prot_dsds :
* @ fw_prot_opts :
2010-05-04 15:01:30 -07:00
*/
2016-12-12 14:40:07 -08:00
inline int
2010-05-04 15:01:30 -07:00
qla24xx_build_scsi_crc_2_iocbs ( srb_t * sp , struct cmd_type_crc_2 * cmd_pkt ,
uint16_t tot_dsds , uint16_t tot_prot_dsds , uint16_t fw_prot_opts )
{
uint32_t * cur_dsd , * fcp_dl ;
scsi_qla_host_t * vha ;
struct scsi_cmnd * cmd ;
2011-08-16 11:29:22 -07:00
uint32_t total_bytes = 0 ;
2010-05-04 15:01:30 -07:00
uint32_t data_bytes ;
uint32_t dif_bytes ;
uint8_t bundling = 1 ;
uint16_t blk_size ;
struct crc_context * crc_ctx_pkt = NULL ;
struct qla_hw_data * ha ;
uint8_t additional_fcpcdb_len ;
uint16_t fcp_cmnd_len ;
struct fcp_cmnd * fcp_cmnd ;
dma_addr_t crc_ctx_dma ;
2012-02-09 11:15:36 -08:00
cmd = GET_CMD_SP ( sp ) ;
2010-05-04 15:01:30 -07:00
/* Update entry type to indicate Command Type CRC_2 IOCB */
2015-07-09 07:24:08 -07:00
* ( ( uint32_t * ) ( & cmd_pkt - > entry_type ) ) = cpu_to_le32 ( COMMAND_TYPE_CRC_2 ) ;
2010-05-04 15:01:30 -07:00
2017-01-19 22:28:04 -08:00
vha = sp - > vha ;
2011-07-14 12:00:13 -07:00
ha = vha - > hw ;
2010-05-04 15:01:30 -07:00
/* No data transfer */
data_bytes = scsi_bufflen ( cmd ) ;
if ( ! data_bytes | | cmd - > sc_data_direction = = DMA_NONE ) {
2015-07-09 07:24:08 -07:00
cmd_pkt - > byte_count = cpu_to_le32 ( 0 ) ;
2010-05-04 15:01:30 -07:00
return QLA_SUCCESS ;
}
2017-01-19 22:28:04 -08:00
cmd_pkt - > vp_index = sp - > vha - > vp_idx ;
2010-05-04 15:01:30 -07:00
/* Set transfer direction */
if ( cmd - > sc_data_direction = = DMA_TO_DEVICE ) {
cmd_pkt - > control_flags =
2015-07-09 07:24:08 -07:00
cpu_to_le16 ( CF_WRITE_DATA ) ;
2010-05-04 15:01:30 -07:00
} else if ( cmd - > sc_data_direction = = DMA_FROM_DEVICE ) {
cmd_pkt - > control_flags =
2015-07-09 07:24:08 -07:00
cpu_to_le16 ( CF_READ_DATA ) ;
2010-05-04 15:01:30 -07:00
}
2012-02-09 11:15:36 -08:00
if ( ( scsi_get_prot_op ( cmd ) = = SCSI_PROT_READ_INSERT ) | |
( scsi_get_prot_op ( cmd ) = = SCSI_PROT_WRITE_STRIP ) | |
( scsi_get_prot_op ( cmd ) = = SCSI_PROT_READ_STRIP ) | |
( scsi_get_prot_op ( cmd ) = = SCSI_PROT_WRITE_INSERT ) )
2010-05-04 15:01:30 -07:00
bundling = 0 ;
/* Allocate CRC context from global pool */
2012-02-09 11:15:36 -08:00
crc_ctx_pkt = sp - > u . scmd . ctx =
2018-02-15 01:40:38 +05:30
dma_pool_zalloc ( ha - > dl_dma_pool , GFP_ATOMIC , & crc_ctx_dma ) ;
2010-05-04 15:01:30 -07:00
if ( ! crc_ctx_pkt )
goto crc_queuing_error ;
crc_ctx_pkt - > crc_ctx_dma = crc_ctx_dma ;
sp - > flags | = SRB_CRC_CTX_DMA_VALID ;
/* Set handle */
crc_ctx_pkt - > handle = cmd_pkt - > handle ;
INIT_LIST_HEAD ( & crc_ctx_pkt - > dsd_list ) ;
2011-08-16 11:29:23 -07:00
qla24xx_set_t10dif_tags ( sp , ( struct fw_dif_context * )
2010-05-04 15:01:30 -07:00
& crc_ctx_pkt - > ref_tag , tot_prot_dsds ) ;
cmd_pkt - > crc_context_address [ 0 ] = cpu_to_le32 ( LSD ( crc_ctx_dma ) ) ;
cmd_pkt - > crc_context_address [ 1 ] = cpu_to_le32 ( MSD ( crc_ctx_dma ) ) ;
cmd_pkt - > crc_context_len = CRC_CONTEXT_LEN_FW ;
/* Determine SCSI command length -- align to 4 byte boundary */
if ( cmd - > cmd_len > 16 ) {
additional_fcpcdb_len = cmd - > cmd_len - 16 ;
if ( ( cmd - > cmd_len % 4 ) ! = 0 ) {
/* SCSI cmd > 16 bytes must be multiple of 4 */
goto crc_queuing_error ;
}
fcp_cmnd_len = 12 + cmd - > cmd_len + 4 ;
} else {
additional_fcpcdb_len = 0 ;
fcp_cmnd_len = 12 + 16 + 4 ;
}
fcp_cmnd = & crc_ctx_pkt - > fcp_cmnd ;
fcp_cmnd - > additional_cdb_len = additional_fcpcdb_len ;
if ( cmd - > sc_data_direction = = DMA_TO_DEVICE )
fcp_cmnd - > additional_cdb_len | = 1 ;
else if ( cmd - > sc_data_direction = = DMA_FROM_DEVICE )
fcp_cmnd - > additional_cdb_len | = 2 ;
2012-02-09 11:15:36 -08:00
int_to_scsilun ( cmd - > device - > lun , & fcp_cmnd - > lun ) ;
2010-05-04 15:01:30 -07:00
memcpy ( fcp_cmnd - > cdb , cmd - > cmnd , cmd - > cmd_len ) ;
cmd_pkt - > fcp_cmnd_dseg_len = cpu_to_le16 ( fcp_cmnd_len ) ;
cmd_pkt - > fcp_cmnd_dseg_address [ 0 ] = cpu_to_le32 (
LSD ( crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF ) ) ;
cmd_pkt - > fcp_cmnd_dseg_address [ 1 ] = cpu_to_le32 (
MSD ( crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF ) ) ;
2010-06-11 12:17:01 +02:00
fcp_cmnd - > task_management = 0 ;
2014-10-30 14:30:06 +01:00
fcp_cmnd - > task_attribute = TSK_SIMPLE ;
2011-02-23 15:27:15 -08:00
2010-05-04 15:01:30 -07:00
cmd_pkt - > fcp_rsp_dseg_len = 0 ; /* Let response come in status iocb */
/* Compute dif len and adjust data len to incude protection */
dif_bytes = 0 ;
blk_size = cmd - > device - > sector_size ;
2011-08-16 11:29:22 -07:00
dif_bytes = ( data_bytes / blk_size ) * 8 ;
2012-02-09 11:15:36 -08:00
switch ( scsi_get_prot_op ( GET_CMD_SP ( sp ) ) ) {
2011-08-16 11:29:22 -07:00
case SCSI_PROT_READ_INSERT :
case SCSI_PROT_WRITE_STRIP :
total_bytes = data_bytes ;
data_bytes + = dif_bytes ;
break ;
case SCSI_PROT_READ_STRIP :
case SCSI_PROT_WRITE_INSERT :
case SCSI_PROT_READ_PASS :
case SCSI_PROT_WRITE_PASS :
total_bytes = data_bytes + dif_bytes ;
break ;
default :
BUG ( ) ;
2010-05-04 15:01:30 -07:00
}
2011-08-16 11:29:23 -07:00
if ( ! qla2x00_hba_err_chk_enabled ( sp ) )
2010-05-04 15:01:30 -07:00
fw_prot_opts | = 0x10 ; /* Disable Guard tag checking */
2012-08-22 14:21:31 -04:00
/* HBA error checking enabled */
else if ( IS_PI_UNINIT_CAPABLE ( ha ) ) {
if ( ( scsi_get_prot_type ( GET_CMD_SP ( sp ) ) = = SCSI_PROT_DIF_TYPE1 )
| | ( scsi_get_prot_type ( GET_CMD_SP ( sp ) ) = =
SCSI_PROT_DIF_TYPE2 ) )
fw_prot_opts | = BIT_10 ;
else if ( scsi_get_prot_type ( GET_CMD_SP ( sp ) ) = =
SCSI_PROT_DIF_TYPE3 )
fw_prot_opts | = BIT_11 ;
}
2010-05-04 15:01:30 -07:00
if ( ! bundling ) {
cur_dsd = ( uint32_t * ) & crc_ctx_pkt - > u . nobundling . data_address ;
} else {
/*
* Configure Bundling if we need to fetch interlaving
* protection PCI accesses
*/
fw_prot_opts | = PO_ENABLE_DIF_BUNDLING ;
crc_ctx_pkt - > u . bundling . dif_byte_count = cpu_to_le32 ( dif_bytes ) ;
crc_ctx_pkt - > u . bundling . dseg_count = cpu_to_le16 ( tot_dsds -
tot_prot_dsds ) ;
cur_dsd = ( uint32_t * ) & crc_ctx_pkt - > u . bundling . data_address ;
}
/* Finish the common fields of CRC pkt */
crc_ctx_pkt - > blk_size = cpu_to_le16 ( blk_size ) ;
crc_ctx_pkt - > prot_opts = cpu_to_le16 ( fw_prot_opts ) ;
crc_ctx_pkt - > byte_count = cpu_to_le32 ( data_bytes ) ;
2015-07-09 07:24:08 -07:00
crc_ctx_pkt - > guard_seed = cpu_to_le16 ( 0 ) ;
2010-05-04 15:01:30 -07:00
/* Fibre channel byte count */
cmd_pkt - > byte_count = cpu_to_le32 ( total_bytes ) ;
fcp_dl = ( uint32_t * ) ( crc_ctx_pkt - > fcp_cmnd . cdb + 16 +
additional_fcpcdb_len ) ;
* fcp_dl = htonl ( total_bytes ) ;
2010-07-23 15:28:38 +05:00
if ( ! data_bytes | | cmd - > sc_data_direction = = DMA_NONE ) {
2015-07-09 07:24:08 -07:00
cmd_pkt - > byte_count = cpu_to_le32 ( 0 ) ;
2010-07-23 15:28:38 +05:00
return QLA_SUCCESS ;
}
2010-05-04 15:01:30 -07:00
/* Walks data segments */
2015-07-09 07:24:08 -07:00
cmd_pkt - > control_flags | = cpu_to_le16 ( CF_DATA_SEG_DESCR_ENABLE ) ;
2011-08-16 11:29:22 -07:00
if ( ! bundling & & tot_prot_dsds ) {
if ( qla24xx_walk_and_build_sglist_no_difb ( ha , sp ,
2014-04-11 16:54:43 -04:00
cur_dsd , tot_dsds , NULL ) )
2011-08-16 11:29:22 -07:00
goto crc_queuing_error ;
} else if ( qla24xx_walk_and_build_sglist ( ha , sp , cur_dsd ,
2014-04-11 16:54:43 -04:00
( tot_dsds - tot_prot_dsds ) , NULL ) )
2010-05-04 15:01:30 -07:00
goto crc_queuing_error ;
if ( bundling & & tot_prot_dsds ) {
/* Walks dif segments */
2015-07-09 07:24:08 -07:00
cmd_pkt - > control_flags | = cpu_to_le16 ( CF_DIF_SEG_DESCR_ENABLE ) ;
2010-05-04 15:01:30 -07:00
cur_dsd = ( uint32_t * ) & crc_ctx_pkt - > u . bundling . dif_address ;
if ( qla24xx_walk_and_build_prot_sglist ( ha , sp , cur_dsd ,
2014-04-11 16:54:43 -04:00
tot_prot_dsds , NULL ) )
2010-05-04 15:01:30 -07:00
goto crc_queuing_error ;
}
return QLA_SUCCESS ;
crc_queuing_error :
/* Cleanup will be performed by the caller */
return QLA_FUNCTION_FAILED ;
}
2005-07-06 10:31:17 -07:00
/**
* qla24xx_start_scsi ( ) - Send a SCSI command to the ISP
* @ sp : command to send to the ISP
*
2008-09-11 21:22:51 -07:00
* Returns non - zero if a failure occurred , else zero .
2005-07-06 10:31:17 -07:00
*/
int
qla24xx_start_scsi ( srb_t * sp )
{
2015-07-09 07:23:26 -07:00
int nseg ;
2005-07-06 10:31:17 -07:00
unsigned long flags ;
uint32_t * clr_ptr ;
uint32_t index ;
uint32_t handle ;
struct cmd_type_7 * cmd_pkt ;
uint16_t cnt ;
uint16_t req_cnt ;
uint16_t tot_dsds ;
2008-12-09 16:45:39 -08:00
struct req_que * req = NULL ;
struct rsp_que * rsp = NULL ;
2012-02-09 11:15:36 -08:00
struct scsi_cmnd * cmd = GET_CMD_SP ( sp ) ;
2017-01-19 22:28:04 -08:00
struct scsi_qla_host * vha = sp - > vha ;
2008-12-09 16:45:39 -08:00
struct qla_hw_data * ha = vha - > hw ;
2005-07-06 10:31:17 -07:00
/* Setup device pointers. */
2009-06-03 09:55:19 -07:00
req = vha - > req ;
2016-12-12 14:40:07 -08:00
rsp = req - > rsp ;
2008-12-09 16:45:39 -08:00
2005-07-06 10:31:17 -07:00
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0 ;
/* Send marker if required */
2008-11-06 10:40:51 -08:00
if ( vha - > marker_needed ! = 0 ) {
2011-07-14 12:00:13 -07:00
if ( qla2x00_marker ( vha , req , rsp , 0 , 0 , MK_SYNC_ALL ) ! =
QLA_SUCCESS )
2005-07-06 10:31:17 -07:00
return QLA_FUNCTION_FAILED ;
2008-11-06 10:40:51 -08:00
vha - > marker_needed = 0 ;
2005-07-06 10:31:17 -07:00
}
/* Acquire ring specific lock */
2008-11-06 10:40:51 -08:00
spin_lock_irqsave ( & ha - > hardware_lock , flags ) ;
2005-07-06 10:31:17 -07:00
/* Check for room in outstanding command list. */
2008-11-06 10:40:51 -08:00
handle = req - > current_outstanding_cmd ;
2013-01-30 03:34:37 -05:00
for ( index = 1 ; index < req - > num_outstanding_cmds ; index + + ) {
2005-07-06 10:31:17 -07:00
handle + + ;
2013-01-30 03:34:37 -05:00
if ( handle = = req - > num_outstanding_cmds )
2005-07-06 10:31:17 -07:00
handle = 1 ;
2008-11-06 10:40:51 -08:00
if ( ! req - > outstanding_cmds [ handle ] )
2005-07-06 10:31:17 -07:00
break ;
}
2013-01-30 03:34:37 -05:00
if ( index = = req - > num_outstanding_cmds )
2005-07-06 10:31:17 -07:00
goto queuing_error ;
/* Map the sg table so we have an accurate count of sg entries needed */
2007-07-05 13:16:51 -07:00
if ( scsi_sg_count ( cmd ) ) {
nseg = dma_map_sg ( & ha - > pdev - > dev , scsi_sglist ( cmd ) ,
scsi_sg_count ( cmd ) , cmd - > sc_data_direction ) ;
if ( unlikely ( ! nseg ) )
2005-07-06 10:31:17 -07:00
goto queuing_error ;
2007-07-05 13:16:51 -07:00
} else
nseg = 0 ;
2007-05-26 01:55:38 +09:00
tot_dsds = nseg ;
2011-07-14 12:00:13 -07:00
req_cnt = qla24xx_calc_iocbs ( vha , tot_dsds ) ;
2008-11-06 10:40:51 -08:00
if ( req - > cnt < ( req_cnt + 2 ) ) {
2014-04-11 16:54:37 -04:00
cnt = IS_SHADOW_REG_CAPABLE ( ha ) ? * req - > out_ptr :
RD_REG_DWORD_RELAXED ( req - > req_q_out ) ;
2008-11-06 10:40:51 -08:00
if ( req - > ring_index < cnt )
req - > cnt = cnt - req - > ring_index ;
2005-07-06 10:31:17 -07:00
else
2008-11-06 10:40:51 -08:00
req - > cnt = req - > length -
( req - > ring_index - cnt ) ;
2012-05-15 14:34:09 -04:00
if ( req - > cnt < ( req_cnt + 2 ) )
goto queuing_error ;
2005-07-06 10:31:17 -07:00
}
/* Build command packet. */
2008-11-06 10:40:51 -08:00
req - > current_outstanding_cmd = handle ;
req - > outstanding_cmds [ handle ] = sp ;
2009-08-20 11:06:04 -07:00
sp - > handle = handle ;
2012-02-09 11:15:36 -08:00
cmd - > host_scribble = ( unsigned char * ) ( unsigned long ) handle ;
2008-11-06 10:40:51 -08:00
req - > cnt - = req_cnt ;
2005-07-06 10:31:17 -07:00
2008-11-06 10:40:51 -08:00
cmd_pkt = ( struct cmd_type_7 * ) req - > ring_ptr ;
2009-04-06 22:33:40 -07:00
cmd_pkt - > handle = MAKE_HANDLE ( req - > id , handle ) ;
2005-07-06 10:31:17 -07:00
/* Zero out remaining portion of packet. */
2005-10-28 14:41:19 -05:00
/* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2005-07-06 10:31:17 -07:00
clr_ptr = ( uint32_t * ) cmd_pkt + 2 ;
memset ( clr_ptr , 0 , REQUEST_ENTRY_SIZE - 8 ) ;
cmd_pkt - > dseg_count = cpu_to_le16 ( tot_dsds ) ;
/* Set NPORT-ID and LUN number*/
cmd_pkt - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
cmd_pkt - > port_id [ 0 ] = sp - > fcport - > d_id . b . al_pa ;
cmd_pkt - > port_id [ 1 ] = sp - > fcport - > d_id . b . area ;
cmd_pkt - > port_id [ 2 ] = sp - > fcport - > d_id . b . domain ;
2017-01-19 22:28:04 -08:00
cmd_pkt - > vp_index = sp - > vha - > vp_idx ;
2005-07-06 10:31:17 -07:00
2012-02-09 11:15:36 -08:00
int_to_scsilun ( cmd - > device - > lun , & cmd_pkt - > lun ) ;
2006-02-07 08:45:35 -08:00
host_to_fcp_swap ( ( uint8_t * ) & cmd_pkt - > lun , sizeof ( cmd_pkt - > lun ) ) ;
2005-07-06 10:31:17 -07:00
2014-10-30 14:30:06 +01:00
cmd_pkt - > task = TSK_SIMPLE ;
2011-02-23 15:27:15 -08:00
2005-07-06 10:31:17 -07:00
/* Load SCSI command packet. */
memcpy ( cmd_pkt - > fcp_cdb , cmd - > cmnd , cmd - > cmd_len ) ;
host_to_fcp_swap ( cmd_pkt - > fcp_cdb , sizeof ( cmd_pkt - > fcp_cdb ) ) ;
2007-05-26 01:55:38 +09:00
cmd_pkt - > byte_count = cpu_to_le32 ( ( uint32_t ) scsi_bufflen ( cmd ) ) ;
2005-07-06 10:31:17 -07:00
/* Build IOCB segments */
2016-12-12 14:40:07 -08:00
qla24xx_build_scsi_iocbs ( sp , cmd_pkt , tot_dsds , req ) ;
2005-07-06 10:31:17 -07:00
/* Set total data segment count. */
cmd_pkt - > entry_count = ( uint8_t ) req_cnt ;
wmb ( ) ;
/* Adjust ring index. */
2008-11-06 10:40:51 -08:00
req - > ring_index + + ;
if ( req - > ring_index = = req - > length ) {
req - > ring_index = 0 ;
req - > ring_ptr = req - > ring ;
2005-07-06 10:31:17 -07:00
} else
2008-11-06 10:40:51 -08:00
req - > ring_ptr + + ;
2005-07-06 10:31:17 -07:00
sp - > flags | = SRB_DMA_VALID ;
/* Set chip new ring index. */
2009-03-24 09:07:55 -07:00
WRT_REG_DWORD ( req - > req_q_in , req - > ring_index ) ;
RD_REG_DWORD_RELAXED ( & ha - > iobase - > isp24 . hccr ) ;
2005-07-06 10:31:17 -07:00
2005-10-27 11:09:48 -07:00
/* Manage unprocessed RIO/ZIO commands in response queue. */
2008-11-06 10:40:51 -08:00
if ( vha - > flags . process_response_queue & &
2008-12-09 16:45:39 -08:00
rsp - > ring_ptr - > signature ! = RESPONSE_PROCESSED )
2009-04-06 22:33:40 -07:00
qla24xx_process_response_queue ( vha , rsp ) ;
2005-10-27 11:09:48 -07:00
2008-11-06 10:40:51 -08:00
spin_unlock_irqrestore ( & ha - > hardware_lock , flags ) ;
2005-07-06 10:31:17 -07:00
return QLA_SUCCESS ;
queuing_error :
2007-05-26 01:55:38 +09:00
if ( tot_dsds )
scsi_dma_unmap ( cmd ) ;
2008-11-06 10:40:51 -08:00
spin_unlock_irqrestore ( & ha - > hardware_lock , flags ) ;
2005-07-06 10:31:17 -07:00
return QLA_FUNCTION_FAILED ;
2005-04-16 15:20:36 -07:00
}
2009-04-06 22:33:41 -07:00
2010-05-04 15:01:30 -07:00
/**
* qla24xx_dif_start_scsi ( ) - Send a SCSI command to the ISP
* @ sp : command to send to the ISP
*
* Returns non - zero if a failure occurred , else zero .
*/
int
qla24xx_dif_start_scsi ( srb_t * sp )
{
int nseg ;
unsigned long flags ;
uint32_t * clr_ptr ;
uint32_t index ;
uint32_t handle ;
uint16_t cnt ;
uint16_t req_cnt = 0 ;
uint16_t tot_dsds ;
uint16_t tot_prot_dsds ;
uint16_t fw_prot_opts = 0 ;
struct req_que * req = NULL ;
struct rsp_que * rsp = NULL ;
2012-02-09 11:15:36 -08:00
struct scsi_cmnd * cmd = GET_CMD_SP ( sp ) ;
2017-01-19 22:28:04 -08:00
struct scsi_qla_host * vha = sp - > vha ;
2010-05-04 15:01:30 -07:00
struct qla_hw_data * ha = vha - > hw ;
struct cmd_type_crc_2 * cmd_pkt ;
uint32_t status = 0 ;
# define QDSS_GOT_Q_SPACE BIT_0
2010-07-23 15:28:38 +05:00
/* Only process protection or >16 cdb in this routine */
if ( scsi_get_prot_op ( cmd ) = = SCSI_PROT_NORMAL ) {
if ( cmd - > cmd_len < = 16 )
return qla24xx_start_scsi ( sp ) ;
}
2010-05-04 15:01:30 -07:00
/* Setup device pointers. */
req = vha - > req ;
2016-12-12 14:40:07 -08:00
rsp = req - > rsp ;
2010-05-04 15:01:30 -07:00
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0 ;
/* Send marker if required */
if ( vha - > marker_needed ! = 0 ) {
if ( qla2x00_marker ( vha , req , rsp , 0 , 0 , MK_SYNC_ALL ) ! =
QLA_SUCCESS )
return QLA_FUNCTION_FAILED ;
vha - > marker_needed = 0 ;
}
/* Acquire ring specific lock */
spin_lock_irqsave ( & ha - > hardware_lock , flags ) ;
/* Check for room in outstanding command list. */
handle = req - > current_outstanding_cmd ;
2013-01-30 03:34:37 -05:00
for ( index = 1 ; index < req - > num_outstanding_cmds ; index + + ) {
2010-05-04 15:01:30 -07:00
handle + + ;
2013-01-30 03:34:37 -05:00
if ( handle = = req - > num_outstanding_cmds )
2010-05-04 15:01:30 -07:00
handle = 1 ;
if ( ! req - > outstanding_cmds [ handle ] )
break ;
}
2013-01-30 03:34:37 -05:00
if ( index = = req - > num_outstanding_cmds )
2010-05-04 15:01:30 -07:00
goto queuing_error ;
/* Compute number of required data segments */
/* Map the sg table so we have an accurate count of sg entries needed */
if ( scsi_sg_count ( cmd ) ) {
nseg = dma_map_sg ( & ha - > pdev - > dev , scsi_sglist ( cmd ) ,
scsi_sg_count ( cmd ) , cmd - > sc_data_direction ) ;
if ( unlikely ( ! nseg ) )
goto queuing_error ;
else
sp - > flags | = SRB_DMA_VALID ;
2011-08-16 11:29:22 -07:00
if ( ( scsi_get_prot_op ( cmd ) = = SCSI_PROT_READ_INSERT ) | |
( scsi_get_prot_op ( cmd ) = = SCSI_PROT_WRITE_STRIP ) ) {
struct qla2_sgx sgx ;
uint32_t partial ;
memset ( & sgx , 0 , sizeof ( struct qla2_sgx ) ) ;
sgx . tot_bytes = scsi_bufflen ( cmd ) ;
sgx . cur_sg = scsi_sglist ( cmd ) ;
sgx . sp = sp ;
nseg = 0 ;
while ( qla24xx_get_one_block_sg (
cmd - > device - > sector_size , & sgx , & partial ) )
nseg + + ;
}
2010-05-04 15:01:30 -07:00
} else
nseg = 0 ;
/* number of required data segments */
tot_dsds = nseg ;
/* Compute number of required protection segments */
if ( qla24xx_configure_prot_mode ( sp , & fw_prot_opts ) ) {
nseg = dma_map_sg ( & ha - > pdev - > dev , scsi_prot_sglist ( cmd ) ,
scsi_prot_sg_count ( cmd ) , cmd - > sc_data_direction ) ;
if ( unlikely ( ! nseg ) )
goto queuing_error ;
else
sp - > flags | = SRB_CRC_PROT_DMA_VALID ;
2011-08-16 11:29:22 -07:00
if ( ( scsi_get_prot_op ( cmd ) = = SCSI_PROT_READ_INSERT ) | |
( scsi_get_prot_op ( cmd ) = = SCSI_PROT_WRITE_STRIP ) ) {
nseg = scsi_bufflen ( cmd ) / cmd - > device - > sector_size ;
}
2010-05-04 15:01:30 -07:00
} else {
nseg = 0 ;
}
req_cnt = 1 ;
/* Total Data and protection sg segment(s) */
tot_prot_dsds = nseg ;
tot_dsds + = nseg ;
if ( req - > cnt < ( req_cnt + 2 ) ) {
2014-04-11 16:54:37 -04:00
cnt = IS_SHADOW_REG_CAPABLE ( ha ) ? * req - > out_ptr :
RD_REG_DWORD_RELAXED ( req - > req_q_out ) ;
2010-05-04 15:01:30 -07:00
if ( req - > ring_index < cnt )
req - > cnt = cnt - req - > ring_index ;
else
req - > cnt = req - > length -
( req - > ring_index - cnt ) ;
2012-05-15 14:34:09 -04:00
if ( req - > cnt < ( req_cnt + 2 ) )
goto queuing_error ;
2010-05-04 15:01:30 -07:00
}
status | = QDSS_GOT_Q_SPACE ;
/* Build header part of command packet (excluding the OPCODE). */
req - > current_outstanding_cmd = handle ;
req - > outstanding_cmds [ handle ] = sp ;
2011-08-16 11:29:22 -07:00
sp - > handle = handle ;
2012-02-09 11:15:36 -08:00
cmd - > host_scribble = ( unsigned char * ) ( unsigned long ) handle ;
2010-05-04 15:01:30 -07:00
req - > cnt - = req_cnt ;
/* Fill-in common area */
cmd_pkt = ( struct cmd_type_crc_2 * ) req - > ring_ptr ;
cmd_pkt - > handle = MAKE_HANDLE ( req - > id , handle ) ;
clr_ptr = ( uint32_t * ) cmd_pkt + 2 ;
memset ( clr_ptr , 0 , REQUEST_ENTRY_SIZE - 8 ) ;
/* Set NPORT-ID and LUN number*/
cmd_pkt - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
cmd_pkt - > port_id [ 0 ] = sp - > fcport - > d_id . b . al_pa ;
cmd_pkt - > port_id [ 1 ] = sp - > fcport - > d_id . b . area ;
cmd_pkt - > port_id [ 2 ] = sp - > fcport - > d_id . b . domain ;
2012-02-09 11:15:36 -08:00
int_to_scsilun ( cmd - > device - > lun , & cmd_pkt - > lun ) ;
2010-05-04 15:01:30 -07:00
host_to_fcp_swap ( ( uint8_t * ) & cmd_pkt - > lun , sizeof ( cmd_pkt - > lun ) ) ;
/* Total Data and protection segment(s) */
cmd_pkt - > dseg_count = cpu_to_le16 ( tot_dsds ) ;
/* Build IOCB segments and adjust for data protection segments */
if ( qla24xx_build_scsi_crc_2_iocbs ( sp , ( struct cmd_type_crc_2 * )
req - > ring_ptr , tot_dsds , tot_prot_dsds , fw_prot_opts ) ! =
QLA_SUCCESS )
goto queuing_error ;
cmd_pkt - > entry_count = ( uint8_t ) req_cnt ;
/* Specify response queue number where completion should happen */
cmd_pkt - > entry_status = ( uint8_t ) rsp - > id ;
2015-07-09 07:24:08 -07:00
cmd_pkt - > timeout = cpu_to_le16 ( 0 ) ;
2010-05-04 15:01:30 -07:00
wmb ( ) ;
/* Adjust ring index. */
req - > ring_index + + ;
if ( req - > ring_index = = req - > length ) {
req - > ring_index = 0 ;
req - > ring_ptr = req - > ring ;
} else
req - > ring_ptr + + ;
/* Set chip new ring index. */
WRT_REG_DWORD ( req - > req_q_in , req - > ring_index ) ;
RD_REG_DWORD_RELAXED ( & ha - > iobase - > isp24 . hccr ) ;
/* Manage unprocessed RIO/ZIO commands in response queue. */
if ( vha - > flags . process_response_queue & &
rsp - > ring_ptr - > signature ! = RESPONSE_PROCESSED )
qla24xx_process_response_queue ( vha , rsp ) ;
spin_unlock_irqrestore ( & ha - > hardware_lock , flags ) ;
return QLA_SUCCESS ;
queuing_error :
if ( status & QDSS_GOT_Q_SPACE ) {
req - > outstanding_cmds [ handle ] = NULL ;
req - > cnt + = req_cnt ;
}
/* Cleanup will be performed by the caller (queuecommand) */
spin_unlock_irqrestore ( & ha - > hardware_lock , flags ) ;
return QLA_FUNCTION_FAILED ;
}
2016-12-12 14:40:07 -08:00
/**
* qla2xxx_start_scsi_mq ( ) - Send a SCSI command to the ISP
* @ sp : command to send to the ISP
*
* Returns non - zero if a failure occurred , else zero .
*/
static int
qla2xxx_start_scsi_mq ( srb_t * sp )
2009-04-06 22:33:41 -07:00
{
2016-12-12 14:40:07 -08:00
int nseg ;
unsigned long flags ;
uint32_t * clr_ptr ;
uint32_t index ;
uint32_t handle ;
struct cmd_type_7 * cmd_pkt ;
uint16_t cnt ;
uint16_t req_cnt ;
uint16_t tot_dsds ;
struct req_que * req = NULL ;
struct rsp_que * rsp = NULL ;
2012-02-09 11:15:36 -08:00
struct scsi_cmnd * cmd = GET_CMD_SP ( sp ) ;
2016-12-12 14:40:07 -08:00
struct scsi_qla_host * vha = sp - > fcport - > vha ;
struct qla_hw_data * ha = vha - > hw ;
struct qla_qpair * qpair = sp - > qpair ;
2017-06-23 09:10:11 +02:00
/* Acquire qpair specific lock */
spin_lock_irqsave ( & qpair - > qp_lock , flags ) ;
2016-12-12 14:40:07 -08:00
/* Setup qpair pointers */
rsp = qpair - > rsp ;
req = qpair - > req ;
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0 ;
/* Send marker if required */
if ( vha - > marker_needed ! = 0 ) {
2017-06-23 09:10:11 +02:00
if ( __qla2x00_marker ( vha , req , rsp , 0 , 0 , MK_SYNC_ALL ) ! =
QLA_SUCCESS ) {
spin_unlock_irqrestore ( & qpair - > qp_lock , flags ) ;
2016-12-12 14:40:07 -08:00
return QLA_FUNCTION_FAILED ;
2017-06-23 09:10:11 +02:00
}
2016-12-12 14:40:07 -08:00
vha - > marker_needed = 0 ;
}
/* Check for room in outstanding command list. */
handle = req - > current_outstanding_cmd ;
for ( index = 1 ; index < req - > num_outstanding_cmds ; index + + ) {
handle + + ;
if ( handle = = req - > num_outstanding_cmds )
handle = 1 ;
if ( ! req - > outstanding_cmds [ handle ] )
break ;
}
if ( index = = req - > num_outstanding_cmds )
goto queuing_error ;
/* Map the sg table so we have an accurate count of sg entries needed */
if ( scsi_sg_count ( cmd ) ) {
nseg = dma_map_sg ( & ha - > pdev - > dev , scsi_sglist ( cmd ) ,
scsi_sg_count ( cmd ) , cmd - > sc_data_direction ) ;
if ( unlikely ( ! nseg ) )
goto queuing_error ;
} else
nseg = 0 ;
tot_dsds = nseg ;
req_cnt = qla24xx_calc_iocbs ( vha , tot_dsds ) ;
if ( req - > cnt < ( req_cnt + 2 ) ) {
cnt = IS_SHADOW_REG_CAPABLE ( ha ) ? * req - > out_ptr :
RD_REG_DWORD_RELAXED ( req - > req_q_out ) ;
if ( req - > ring_index < cnt )
req - > cnt = cnt - req - > ring_index ;
else
req - > cnt = req - > length -
( req - > ring_index - cnt ) ;
if ( req - > cnt < ( req_cnt + 2 ) )
goto queuing_error ;
}
/* Build command packet. */
req - > current_outstanding_cmd = handle ;
req - > outstanding_cmds [ handle ] = sp ;
sp - > handle = handle ;
cmd - > host_scribble = ( unsigned char * ) ( unsigned long ) handle ;
req - > cnt - = req_cnt ;
cmd_pkt = ( struct cmd_type_7 * ) req - > ring_ptr ;
cmd_pkt - > handle = MAKE_HANDLE ( req - > id , handle ) ;
/* Zero out remaining portion of packet. */
/* tagged queuing modifier -- default is TSK_SIMPLE (0). */
clr_ptr = ( uint32_t * ) cmd_pkt + 2 ;
memset ( clr_ptr , 0 , REQUEST_ENTRY_SIZE - 8 ) ;
cmd_pkt - > dseg_count = cpu_to_le16 ( tot_dsds ) ;
/* Set NPORT-ID and LUN number*/
cmd_pkt - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
cmd_pkt - > port_id [ 0 ] = sp - > fcport - > d_id . b . al_pa ;
cmd_pkt - > port_id [ 1 ] = sp - > fcport - > d_id . b . area ;
cmd_pkt - > port_id [ 2 ] = sp - > fcport - > d_id . b . domain ;
cmd_pkt - > vp_index = sp - > fcport - > vha - > vp_idx ;
int_to_scsilun ( cmd - > device - > lun , & cmd_pkt - > lun ) ;
host_to_fcp_swap ( ( uint8_t * ) & cmd_pkt - > lun , sizeof ( cmd_pkt - > lun ) ) ;
cmd_pkt - > task = TSK_SIMPLE ;
/* Load SCSI command packet. */
memcpy ( cmd_pkt - > fcp_cdb , cmd - > cmnd , cmd - > cmd_len ) ;
host_to_fcp_swap ( cmd_pkt - > fcp_cdb , sizeof ( cmd_pkt - > fcp_cdb ) ) ;
cmd_pkt - > byte_count = cpu_to_le32 ( ( uint32_t ) scsi_bufflen ( cmd ) ) ;
/* Build IOCB segments */
qla24xx_build_scsi_iocbs ( sp , cmd_pkt , tot_dsds , req ) ;
/* Set total data segment count. */
cmd_pkt - > entry_count = ( uint8_t ) req_cnt ;
wmb ( ) ;
/* Adjust ring index. */
req - > ring_index + + ;
if ( req - > ring_index = = req - > length ) {
req - > ring_index = 0 ;
req - > ring_ptr = req - > ring ;
} else
req - > ring_ptr + + ;
sp - > flags | = SRB_DMA_VALID ;
/* Set chip new ring index. */
WRT_REG_DWORD ( req - > req_q_in , req - > ring_index ) ;
/* Manage unprocessed RIO/ZIO commands in response queue. */
if ( vha - > flags . process_response_queue & &
rsp - > ring_ptr - > signature ! = RESPONSE_PROCESSED )
qla24xx_process_response_queue ( vha , rsp ) ;
spin_unlock_irqrestore ( & qpair - > qp_lock , flags ) ;
return QLA_SUCCESS ;
queuing_error :
if ( tot_dsds )
scsi_dma_unmap ( cmd ) ;
spin_unlock_irqrestore ( & qpair - > qp_lock , flags ) ;
return QLA_FUNCTION_FAILED ;
}
/**
* qla2xxx_dif_start_scsi_mq ( ) - Send a SCSI command to the ISP
* @ sp : command to send to the ISP
*
* Returns non - zero if a failure occurred , else zero .
*/
int
qla2xxx_dif_start_scsi_mq ( srb_t * sp )
{
int nseg ;
unsigned long flags ;
uint32_t * clr_ptr ;
uint32_t index ;
uint32_t handle ;
uint16_t cnt ;
uint16_t req_cnt = 0 ;
uint16_t tot_dsds ;
uint16_t tot_prot_dsds ;
uint16_t fw_prot_opts = 0 ;
struct req_que * req = NULL ;
struct rsp_que * rsp = NULL ;
struct scsi_cmnd * cmd = GET_CMD_SP ( sp ) ;
struct scsi_qla_host * vha = sp - > fcport - > vha ;
struct qla_hw_data * ha = vha - > hw ;
struct cmd_type_crc_2 * cmd_pkt ;
uint32_t status = 0 ;
struct qla_qpair * qpair = sp - > qpair ;
# define QDSS_GOT_Q_SPACE BIT_0
/* Check for host side state */
if ( ! qpair - > online ) {
cmd - > result = DID_NO_CONNECT < < 16 ;
return QLA_INTERFACE_ERROR ;
}
if ( ! qpair - > difdix_supported & &
scsi_get_prot_op ( cmd ) ! = SCSI_PROT_NORMAL ) {
cmd - > result = DID_NO_CONNECT < < 16 ;
return QLA_INTERFACE_ERROR ;
}
/* Only process protection or >16 cdb in this routine */
if ( scsi_get_prot_op ( cmd ) = = SCSI_PROT_NORMAL ) {
if ( cmd - > cmd_len < = 16 )
return qla2xxx_start_scsi_mq ( sp ) ;
}
2017-06-23 09:10:11 +02:00
spin_lock_irqsave ( & qpair - > qp_lock , flags ) ;
2016-12-12 14:40:07 -08:00
/* Setup qpair pointers */
rsp = qpair - > rsp ;
req = qpair - > req ;
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0 ;
/* Send marker if required */
if ( vha - > marker_needed ! = 0 ) {
2017-06-23 09:10:11 +02:00
if ( __qla2x00_marker ( vha , req , rsp , 0 , 0 , MK_SYNC_ALL ) ! =
QLA_SUCCESS ) {
spin_unlock_irqrestore ( & qpair - > qp_lock , flags ) ;
2016-12-12 14:40:07 -08:00
return QLA_FUNCTION_FAILED ;
2017-06-23 09:10:11 +02:00
}
2016-12-12 14:40:07 -08:00
vha - > marker_needed = 0 ;
}
/* Check for room in outstanding command list. */
handle = req - > current_outstanding_cmd ;
for ( index = 1 ; index < req - > num_outstanding_cmds ; index + + ) {
handle + + ;
if ( handle = = req - > num_outstanding_cmds )
handle = 1 ;
if ( ! req - > outstanding_cmds [ handle ] )
break ;
}
if ( index = = req - > num_outstanding_cmds )
goto queuing_error ;
/* Compute number of required data segments */
/* Map the sg table so we have an accurate count of sg entries needed */
if ( scsi_sg_count ( cmd ) ) {
nseg = dma_map_sg ( & ha - > pdev - > dev , scsi_sglist ( cmd ) ,
scsi_sg_count ( cmd ) , cmd - > sc_data_direction ) ;
if ( unlikely ( ! nseg ) )
goto queuing_error ;
else
sp - > flags | = SRB_DMA_VALID ;
if ( ( scsi_get_prot_op ( cmd ) = = SCSI_PROT_READ_INSERT ) | |
( scsi_get_prot_op ( cmd ) = = SCSI_PROT_WRITE_STRIP ) ) {
struct qla2_sgx sgx ;
uint32_t partial ;
memset ( & sgx , 0 , sizeof ( struct qla2_sgx ) ) ;
sgx . tot_bytes = scsi_bufflen ( cmd ) ;
sgx . cur_sg = scsi_sglist ( cmd ) ;
sgx . sp = sp ;
nseg = 0 ;
while ( qla24xx_get_one_block_sg (
cmd - > device - > sector_size , & sgx , & partial ) )
nseg + + ;
}
} else
nseg = 0 ;
/* number of required data segments */
tot_dsds = nseg ;
/* Compute number of required protection segments */
if ( qla24xx_configure_prot_mode ( sp , & fw_prot_opts ) ) {
nseg = dma_map_sg ( & ha - > pdev - > dev , scsi_prot_sglist ( cmd ) ,
scsi_prot_sg_count ( cmd ) , cmd - > sc_data_direction ) ;
if ( unlikely ( ! nseg ) )
goto queuing_error ;
else
sp - > flags | = SRB_CRC_PROT_DMA_VALID ;
if ( ( scsi_get_prot_op ( cmd ) = = SCSI_PROT_READ_INSERT ) | |
( scsi_get_prot_op ( cmd ) = = SCSI_PROT_WRITE_STRIP ) ) {
nseg = scsi_bufflen ( cmd ) / cmd - > device - > sector_size ;
}
} else {
nseg = 0 ;
}
req_cnt = 1 ;
/* Total Data and protection sg segment(s) */
tot_prot_dsds = nseg ;
tot_dsds + = nseg ;
if ( req - > cnt < ( req_cnt + 2 ) ) {
cnt = IS_SHADOW_REG_CAPABLE ( ha ) ? * req - > out_ptr :
RD_REG_DWORD_RELAXED ( req - > req_q_out ) ;
if ( req - > ring_index < cnt )
req - > cnt = cnt - req - > ring_index ;
else
req - > cnt = req - > length -
( req - > ring_index - cnt ) ;
if ( req - > cnt < ( req_cnt + 2 ) )
goto queuing_error ;
}
status | = QDSS_GOT_Q_SPACE ;
/* Build header part of command packet (excluding the OPCODE). */
req - > current_outstanding_cmd = handle ;
req - > outstanding_cmds [ handle ] = sp ;
sp - > handle = handle ;
cmd - > host_scribble = ( unsigned char * ) ( unsigned long ) handle ;
req - > cnt - = req_cnt ;
/* Fill-in common area */
cmd_pkt = ( struct cmd_type_crc_2 * ) req - > ring_ptr ;
cmd_pkt - > handle = MAKE_HANDLE ( req - > id , handle ) ;
clr_ptr = ( uint32_t * ) cmd_pkt + 2 ;
memset ( clr_ptr , 0 , REQUEST_ENTRY_SIZE - 8 ) ;
/* Set NPORT-ID and LUN number*/
cmd_pkt - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
cmd_pkt - > port_id [ 0 ] = sp - > fcport - > d_id . b . al_pa ;
cmd_pkt - > port_id [ 1 ] = sp - > fcport - > d_id . b . area ;
cmd_pkt - > port_id [ 2 ] = sp - > fcport - > d_id . b . domain ;
2009-04-06 22:33:41 -07:00
2016-12-12 14:40:07 -08:00
int_to_scsilun ( cmd - > device - > lun , & cmd_pkt - > lun ) ;
host_to_fcp_swap ( ( uint8_t * ) & cmd_pkt - > lun , sizeof ( cmd_pkt - > lun ) ) ;
/* Total Data and protection segment(s) */
cmd_pkt - > dseg_count = cpu_to_le16 ( tot_dsds ) ;
/* Build IOCB segments and adjust for data protection segments */
if ( qla24xx_build_scsi_crc_2_iocbs ( sp , ( struct cmd_type_crc_2 * )
req - > ring_ptr , tot_dsds , tot_prot_dsds , fw_prot_opts ) ! =
QLA_SUCCESS )
goto queuing_error ;
cmd_pkt - > entry_count = ( uint8_t ) req_cnt ;
cmd_pkt - > timeout = cpu_to_le16 ( 0 ) ;
wmb ( ) ;
/* Adjust ring index. */
req - > ring_index + + ;
if ( req - > ring_index = = req - > length ) {
req - > ring_index = 0 ;
req - > ring_ptr = req - > ring ;
} else
req - > ring_ptr + + ;
/* Set chip new ring index. */
WRT_REG_DWORD ( req - > req_q_in , req - > ring_index ) ;
/* Manage unprocessed RIO/ZIO commands in response queue. */
if ( vha - > flags . process_response_queue & &
rsp - > ring_ptr - > signature ! = RESPONSE_PROCESSED )
qla24xx_process_response_queue ( vha , rsp ) ;
spin_unlock_irqrestore ( & qpair - > qp_lock , flags ) ;
return QLA_SUCCESS ;
queuing_error :
if ( status & QDSS_GOT_Q_SPACE ) {
req - > outstanding_cmds [ handle ] = NULL ;
req - > cnt + = req_cnt ;
}
/* Cleanup will be performed by the caller (queuecommand) */
spin_unlock_irqrestore ( & qpair - > qp_lock , flags ) ;
return QLA_FUNCTION_FAILED ;
2009-04-06 22:33:41 -07:00
}
2009-08-20 11:06:05 -07:00
/* Generic Control-SRB manipulation functions. */
2014-09-25 06:14:52 -04:00
/* hardware_lock assumed to be held. */
2010-07-23 15:28:23 +05:00
void *
2017-06-13 20:47:17 -07:00
__qla2x00_alloc_iocbs ( struct qla_qpair * qpair , srb_t * sp )
2009-08-20 11:06:05 -07:00
{
2017-06-13 20:47:17 -07:00
scsi_qla_host_t * vha = qpair - > vha ;
2009-08-20 11:06:05 -07:00
struct qla_hw_data * ha = vha - > hw ;
2017-06-13 20:47:17 -07:00
struct req_que * req = qpair - > req ;
2015-07-09 07:24:27 -07:00
device_reg_t * reg = ISP_QUE_REG ( ha , req - > id ) ;
2009-08-20 11:06:05 -07:00
uint32_t index , handle ;
request_t * pkt ;
uint16_t cnt , req_cnt ;
pkt = NULL ;
req_cnt = 1 ;
2010-07-23 15:28:23 +05:00
handle = 0 ;
if ( ! sp )
goto skip_cmd_array ;
2009-08-20 11:06:05 -07:00
/* Check for room in outstanding command list. */
handle = req - > current_outstanding_cmd ;
2014-03-07 02:43:52 -05:00
for ( index = 1 ; index < req - > num_outstanding_cmds ; index + + ) {
2009-08-20 11:06:05 -07:00
handle + + ;
2013-01-30 03:34:37 -05:00
if ( handle = = req - > num_outstanding_cmds )
2009-08-20 11:06:05 -07:00
handle = 1 ;
if ( ! req - > outstanding_cmds [ handle ] )
break ;
}
2013-01-30 03:34:37 -05:00
if ( index = = req - > num_outstanding_cmds ) {
2011-07-14 12:00:13 -07:00
ql_log ( ql_log_warn , vha , 0x700b ,
2012-08-22 14:20:58 -04:00
" No room on outstanding cmd array. \n " ) ;
2009-08-20 11:06:05 -07:00
goto queuing_error ;
2011-07-14 12:00:13 -07:00
}
2009-08-20 11:06:05 -07:00
2010-07-23 15:28:23 +05:00
/* Prep command array. */
req - > current_outstanding_cmd = handle ;
req - > outstanding_cmds [ handle ] = sp ;
sp - > handle = handle ;
2011-11-18 09:03:20 -08:00
/* Adjust entry-counts as needed. */
2012-02-09 11:15:36 -08:00
if ( sp - > type ! = SRB_SCSI_CMD )
req_cnt = sp - > iocbs ;
2011-11-18 09:03:20 -08:00
2010-07-23 15:28:23 +05:00
skip_cmd_array :
2009-08-20 11:06:05 -07:00
/* Check for room on request queue. */
2014-09-25 06:14:46 -04:00
if ( req - > cnt < req_cnt + 2 ) {
2017-12-28 12:33:18 -08:00
if ( qpair - > use_shadow_reg )
cnt = * req - > out_ptr ;
else if ( ha - > mqenable | | IS_QLA83XX ( ha ) | | IS_QLA27XX ( ha ) )
2009-08-20 11:06:05 -07:00
cnt = RD_REG_DWORD ( & reg - > isp25mq . req_q_out ) ;
2013-08-27 01:37:28 -04:00
else if ( IS_P3P_TYPE ( ha ) )
2010-07-23 15:28:23 +05:00
cnt = RD_REG_DWORD ( & reg - > isp82 . req_q_out ) ;
2009-08-20 11:06:05 -07:00
else if ( IS_FWI2_CAPABLE ( ha ) )
cnt = RD_REG_DWORD ( & reg - > isp24 . req_q_out ) ;
2013-03-28 08:21:23 -04:00
else if ( IS_QLAFX00 ( ha ) )
cnt = RD_REG_DWORD ( & reg - > ispfx00 . req_q_out ) ;
2009-08-20 11:06:05 -07:00
else
cnt = qla2x00_debounce_register (
ISP_REQ_Q_OUT ( ha , & reg - > isp ) ) ;
if ( req - > ring_index < cnt )
req - > cnt = cnt - req - > ring_index ;
else
req - > cnt = req - > length -
( req - > ring_index - cnt ) ;
}
2014-09-25 06:14:46 -04:00
if ( req - > cnt < req_cnt + 2 )
2009-08-20 11:06:05 -07:00
goto queuing_error ;
/* Prep packet */
req - > cnt - = req_cnt ;
pkt = req - > ring_ptr ;
memset ( pkt , 0 , REQUEST_ENTRY_SIZE ) ;
2013-03-28 08:21:23 -04:00
if ( IS_QLAFX00 ( ha ) ) {
2013-06-25 11:27:21 -04:00
WRT_REG_BYTE ( ( void __iomem * ) & pkt - > entry_count , req_cnt ) ;
WRT_REG_WORD ( ( void __iomem * ) & pkt - > handle , handle ) ;
2013-03-28 08:21:23 -04:00
} else {
pkt - > entry_count = req_cnt ;
pkt - > handle = handle ;
}
2009-08-20 11:06:05 -07:00
queuing_error :
2017-06-13 20:47:28 -07:00
qpair - > tgt_counters . num_alloc_iocb_failed + + ;
2009-08-20 11:06:05 -07:00
return pkt ;
}
2017-06-13 20:47:17 -07:00
void *
qla2x00_alloc_iocbs_ready ( struct qla_qpair * qpair , srb_t * sp )
{
scsi_qla_host_t * vha = qpair - > vha ;
if ( qla2x00_reset_active ( vha ) )
return NULL ;
return __qla2x00_alloc_iocbs ( qpair , sp ) ;
}
void *
qla2x00_alloc_iocbs ( struct scsi_qla_host * vha , srb_t * sp )
{
return __qla2x00_alloc_iocbs ( vha - > hw - > base_qpair , sp ) ;
}
2017-06-21 13:48:41 -07:00
static void
qla24xx_prli_iocb ( srb_t * sp , struct logio_entry_24xx * logio )
{
struct srb_iocb * lio = & sp - > u . iocb_cmd ;
logio - > entry_type = LOGINOUT_PORT_IOCB_TYPE ;
logio - > control_flags = cpu_to_le16 ( LCF_COMMAND_PRLI ) ;
if ( lio - > u . logio . flags & SRB_LOGIN_NVME_PRLI )
logio - > control_flags | = LCF_NVME_PRLI ;
logio - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
logio - > port_id [ 0 ] = sp - > fcport - > d_id . b . al_pa ;
logio - > port_id [ 1 ] = sp - > fcport - > d_id . b . area ;
logio - > port_id [ 2 ] = sp - > fcport - > d_id . b . domain ;
logio - > vp_index = sp - > vha - > vp_idx ;
}
2009-08-20 11:06:05 -07:00
static void
qla24xx_login_iocb ( srb_t * sp , struct logio_entry_24xx * logio )
{
2012-02-09 11:15:36 -08:00
struct srb_iocb * lio = & sp - > u . iocb_cmd ;
2009-08-20 11:06:05 -07:00
logio - > entry_type = LOGINOUT_PORT_IOCB_TYPE ;
logio - > control_flags = cpu_to_le16 ( LCF_COMMAND_PLOGI ) ;
2017-06-21 13:48:41 -07:00
2010-05-04 15:01:28 -07:00
if ( lio - > u . logio . flags & SRB_LOGIN_COND_PLOGI )
2009-08-20 11:06:05 -07:00
logio - > control_flags | = cpu_to_le16 ( LCF_COND_PLOGI ) ;
2010-05-04 15:01:28 -07:00
if ( lio - > u . logio . flags & SRB_LOGIN_SKIP_PRLI )
2009-08-20 11:06:05 -07:00
logio - > control_flags | = cpu_to_le16 ( LCF_SKIP_PRLI ) ;
logio - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
logio - > port_id [ 0 ] = sp - > fcport - > d_id . b . al_pa ;
logio - > port_id [ 1 ] = sp - > fcport - > d_id . b . area ;
logio - > port_id [ 2 ] = sp - > fcport - > d_id . b . domain ;
2017-01-19 22:28:04 -08:00
logio - > vp_index = sp - > vha - > vp_idx ;
2009-08-20 11:06:05 -07:00
}
static void
qla2x00_login_iocb ( srb_t * sp , struct mbx_entry * mbx )
{
2017-01-19 22:28:04 -08:00
struct qla_hw_data * ha = sp - > vha - > hw ;
2012-02-09 11:15:36 -08:00
struct srb_iocb * lio = & sp - > u . iocb_cmd ;
2009-08-20 11:06:05 -07:00
uint16_t opts ;
2010-05-28 15:08:15 -07:00
mbx - > entry_type = MBX_IOCB_TYPE ;
2009-08-20 11:06:05 -07:00
SET_TARGET_ID ( ha , mbx - > loop_id , sp - > fcport - > loop_id ) ;
mbx - > mb0 = cpu_to_le16 ( MBC_LOGIN_FABRIC_PORT ) ;
2010-05-04 15:01:28 -07:00
opts = lio - > u . logio . flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0 ;
opts | = lio - > u . logio . flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0 ;
2009-08-20 11:06:05 -07:00
if ( HAS_EXTENDED_IDS ( ha ) ) {
mbx - > mb1 = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
mbx - > mb10 = cpu_to_le16 ( opts ) ;
} else {
mbx - > mb1 = cpu_to_le16 ( ( sp - > fcport - > loop_id < < 8 ) | opts ) ;
}
mbx - > mb2 = cpu_to_le16 ( sp - > fcport - > d_id . b . domain ) ;
mbx - > mb3 = cpu_to_le16 ( sp - > fcport - > d_id . b . area < < 8 |
sp - > fcport - > d_id . b . al_pa ) ;
2017-01-19 22:28:04 -08:00
mbx - > mb9 = cpu_to_le16 ( sp - > vha - > vp_idx ) ;
2009-08-20 11:06:05 -07:00
}
static void
qla24xx_logout_iocb ( srb_t * sp , struct logio_entry_24xx * logio )
{
logio - > entry_type = LOGINOUT_PORT_IOCB_TYPE ;
logio - > control_flags =
cpu_to_le16 ( LCF_COMMAND_LOGO | LCF_IMPL_LOGO ) ;
2017-01-19 22:28:00 -08:00
if ( ! sp - > fcport - > se_sess | |
2017-01-19 22:27:59 -08:00
! sp - > fcport - > keep_nport_handle )
2015-07-14 16:00:44 -04:00
logio - > control_flags | = cpu_to_le16 ( LCF_FREE_NPORT ) ;
2009-08-20 11:06:05 -07:00
logio - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
logio - > port_id [ 0 ] = sp - > fcport - > d_id . b . al_pa ;
logio - > port_id [ 1 ] = sp - > fcport - > d_id . b . area ;
logio - > port_id [ 2 ] = sp - > fcport - > d_id . b . domain ;
2017-01-19 22:28:04 -08:00
logio - > vp_index = sp - > vha - > vp_idx ;
2009-08-20 11:06:05 -07:00
}
static void
qla2x00_logout_iocb ( srb_t * sp , struct mbx_entry * mbx )
{
2017-01-19 22:28:04 -08:00
struct qla_hw_data * ha = sp - > vha - > hw ;
2009-08-20 11:06:05 -07:00
2010-05-28 15:08:15 -07:00
mbx - > entry_type = MBX_IOCB_TYPE ;
2009-08-20 11:06:05 -07:00
SET_TARGET_ID ( ha , mbx - > loop_id , sp - > fcport - > loop_id ) ;
mbx - > mb0 = cpu_to_le16 ( MBC_LOGOUT_FABRIC_PORT ) ;
mbx - > mb1 = HAS_EXTENDED_IDS ( ha ) ?
cpu_to_le16 ( sp - > fcport - > loop_id ) :
cpu_to_le16 ( sp - > fcport - > loop_id < < 8 ) ;
mbx - > mb2 = cpu_to_le16 ( sp - > fcport - > d_id . b . domain ) ;
mbx - > mb3 = cpu_to_le16 ( sp - > fcport - > d_id . b . area < < 8 |
sp - > fcport - > d_id . b . al_pa ) ;
2017-01-19 22:28:04 -08:00
mbx - > mb9 = cpu_to_le16 ( sp - > vha - > vp_idx ) ;
2009-08-20 11:06:05 -07:00
/* Implicit: mbx->mbx10 = 0. */
}
2010-05-04 15:01:26 -07:00
static void
qla24xx_adisc_iocb ( srb_t * sp , struct logio_entry_24xx * logio )
{
logio - > entry_type = LOGINOUT_PORT_IOCB_TYPE ;
logio - > control_flags = cpu_to_le16 ( LCF_COMMAND_ADISC ) ;
logio - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
2017-01-19 22:28:04 -08:00
logio - > vp_index = sp - > vha - > vp_idx ;
2010-05-04 15:01:26 -07:00
}
static void
qla2x00_adisc_iocb ( srb_t * sp , struct mbx_entry * mbx )
{
2017-01-19 22:28:04 -08:00
struct qla_hw_data * ha = sp - > vha - > hw ;
2010-05-04 15:01:26 -07:00
mbx - > entry_type = MBX_IOCB_TYPE ;
SET_TARGET_ID ( ha , mbx - > loop_id , sp - > fcport - > loop_id ) ;
mbx - > mb0 = cpu_to_le16 ( MBC_GET_PORT_DATABASE ) ;
if ( HAS_EXTENDED_IDS ( ha ) ) {
mbx - > mb1 = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
mbx - > mb10 = cpu_to_le16 ( BIT_0 ) ;
} else {
mbx - > mb1 = cpu_to_le16 ( ( sp - > fcport - > loop_id < < 8 ) | BIT_0 ) ;
}
mbx - > mb2 = cpu_to_le16 ( MSW ( ha - > async_pd_dma ) ) ;
mbx - > mb3 = cpu_to_le16 ( LSW ( ha - > async_pd_dma ) ) ;
mbx - > mb6 = cpu_to_le16 ( MSW ( MSD ( ha - > async_pd_dma ) ) ) ;
mbx - > mb7 = cpu_to_le16 ( LSW ( MSD ( ha - > async_pd_dma ) ) ) ;
2017-01-19 22:28:04 -08:00
mbx - > mb9 = cpu_to_le16 ( sp - > vha - > vp_idx ) ;
2010-05-04 15:01:26 -07:00
}
2010-05-04 15:01:29 -07:00
static void
qla24xx_tm_iocb ( srb_t * sp , struct tsk_mgmt_entry * tsk )
{
uint32_t flags ;
2014-06-25 15:27:36 +02:00
uint64_t lun ;
2010-05-04 15:01:29 -07:00
struct fc_port * fcport = sp - > fcport ;
scsi_qla_host_t * vha = fcport - > vha ;
struct qla_hw_data * ha = vha - > hw ;
2012-02-09 11:15:36 -08:00
struct srb_iocb * iocb = & sp - > u . iocb_cmd ;
2010-05-04 15:01:29 -07:00
struct req_que * req = vha - > req ;
flags = iocb - > u . tmf . flags ;
lun = iocb - > u . tmf . lun ;
tsk - > entry_type = TSK_MGMT_IOCB_TYPE ;
tsk - > entry_count = 1 ;
tsk - > handle = MAKE_HANDLE ( req - > id , tsk - > handle ) ;
tsk - > nport_handle = cpu_to_le16 ( fcport - > loop_id ) ;
tsk - > timeout = cpu_to_le16 ( ha - > r_a_tov / 10 * 2 ) ;
tsk - > control_flags = cpu_to_le32 ( flags ) ;
tsk - > port_id [ 0 ] = fcport - > d_id . b . al_pa ;
tsk - > port_id [ 1 ] = fcport - > d_id . b . area ;
tsk - > port_id [ 2 ] = fcport - > d_id . b . domain ;
2012-05-15 14:34:20 -04:00
tsk - > vp_index = fcport - > vha - > vp_idx ;
2010-05-04 15:01:29 -07:00
if ( flags = = TCF_LUN_RESET ) {
int_to_scsilun ( lun , & tsk - > lun ) ;
host_to_fcp_swap ( ( uint8_t * ) & tsk - > lun ,
sizeof ( tsk - > lun ) ) ;
}
}
2015-12-17 14:57:00 -05:00
static void
2017-01-19 22:28:04 -08:00
qla2x00_els_dcmd_sp_free ( void * data )
2015-12-17 14:57:00 -05:00
{
2017-01-19 22:28:04 -08:00
srb_t * sp = data ;
2015-12-17 14:57:00 -05:00
struct srb_iocb * elsio = & sp - > u . iocb_cmd ;
kfree ( sp - > fcport ) ;
if ( elsio - > u . els_logo . els_logo_pyld )
2017-01-19 22:28:04 -08:00
dma_free_coherent ( & sp - > vha - > hw - > pdev - > dev , DMA_POOL_SIZE ,
2015-12-17 14:57:00 -05:00
elsio - > u . els_logo . els_logo_pyld ,
elsio - > u . els_logo . els_logo_pyld_dma ) ;
del_timer ( & elsio - > timer ) ;
2017-01-19 22:28:04 -08:00
qla2x00_rel_sp ( sp ) ;
2015-12-17 14:57:00 -05:00
}
static void
qla2x00_els_dcmd_iocb_timeout ( void * data )
{
2017-01-19 22:28:04 -08:00
srb_t * sp = data ;
2015-12-17 14:57:00 -05:00
fc_port_t * fcport = sp - > fcport ;
2017-01-19 22:28:04 -08:00
struct scsi_qla_host * vha = sp - > vha ;
struct srb_iocb * lio = & sp - > u . iocb_cmd ;
2015-12-17 14:57:00 -05:00
ql_dbg ( ql_dbg_io , vha , 0x3069 ,
" %s Timeout, hdl=%x, portid=%02x%02x%02x \n " ,
sp - > name , sp - > handle , fcport - > d_id . b . domain , fcport - > d_id . b . area ,
fcport - > d_id . b . al_pa ) ;
complete ( & lio - > u . els_logo . comp ) ;
}
static void
2017-01-19 22:28:04 -08:00
qla2x00_els_dcmd_sp_done ( void * ptr , int res )
2015-12-17 14:57:00 -05:00
{
2017-01-19 22:28:04 -08:00
srb_t * sp = ptr ;
2015-12-17 14:57:00 -05:00
fc_port_t * fcport = sp - > fcport ;
struct srb_iocb * lio = & sp - > u . iocb_cmd ;
2017-01-19 22:28:04 -08:00
struct scsi_qla_host * vha = sp - > vha ;
2015-12-17 14:57:00 -05:00
ql_dbg ( ql_dbg_io , vha , 0x3072 ,
" %s hdl=%x, portid=%02x%02x%02x done \n " ,
sp - > name , sp - > handle , fcport - > d_id . b . domain ,
fcport - > d_id . b . area , fcport - > d_id . b . al_pa ) ;
complete ( & lio - > u . els_logo . comp ) ;
}
int
qla24xx_els_dcmd_iocb ( scsi_qla_host_t * vha , int els_opcode ,
port_id_t remote_did )
{
srb_t * sp ;
fc_port_t * fcport = NULL ;
struct srb_iocb * elsio = NULL ;
struct qla_hw_data * ha = vha - > hw ;
struct els_logo_payload logo_pyld ;
int rval = QLA_SUCCESS ;
fcport = qla2x00_alloc_fcport ( vha , GFP_KERNEL ) ;
if ( ! fcport ) {
ql_log ( ql_log_info , vha , 0x70e5 , " fcport allocation failed \n " ) ;
return - ENOMEM ;
}
/* Alloc SRB structure */
sp = qla2x00_get_sp ( vha , fcport , GFP_KERNEL ) ;
if ( ! sp ) {
kfree ( fcport ) ;
ql_log ( ql_log_info , vha , 0x70e6 ,
" SRB allocation failed \n " ) ;
return - ENOMEM ;
}
elsio = & sp - > u . iocb_cmd ;
fcport - > loop_id = 0xFFFF ;
fcport - > d_id . b . domain = remote_did . b . domain ;
fcport - > d_id . b . area = remote_did . b . area ;
fcport - > d_id . b . al_pa = remote_did . b . al_pa ;
ql_dbg ( ql_dbg_io , vha , 0x3073 , " portid=%02x%02x%02x done \n " ,
fcport - > d_id . b . domain , fcport - > d_id . b . area , fcport - > d_id . b . al_pa ) ;
sp - > type = SRB_ELS_DCMD ;
sp - > name = " ELS_DCMD " ;
sp - > fcport = fcport ;
elsio - > timeout = qla2x00_els_dcmd_iocb_timeout ;
2018-03-20 21:36:14 +00:00
qla2x00_init_timer ( sp , ELS_DCMD_TIMEOUT ) ;
2015-12-17 14:57:00 -05:00
sp - > done = qla2x00_els_dcmd_sp_done ;
sp - > free = qla2x00_els_dcmd_sp_free ;
elsio - > u . els_logo . els_logo_pyld = dma_alloc_coherent ( & ha - > pdev - > dev ,
DMA_POOL_SIZE , & elsio - > u . els_logo . els_logo_pyld_dma ,
GFP_KERNEL ) ;
if ( ! elsio - > u . els_logo . els_logo_pyld ) {
2017-01-19 22:28:04 -08:00
sp - > free ( sp ) ;
2015-12-17 14:57:00 -05:00
return QLA_FUNCTION_FAILED ;
}
memset ( & logo_pyld , 0 , sizeof ( struct els_logo_payload ) ) ;
elsio - > u . els_logo . els_cmd = els_opcode ;
logo_pyld . opcode = els_opcode ;
logo_pyld . s_id [ 0 ] = vha - > d_id . b . al_pa ;
logo_pyld . s_id [ 1 ] = vha - > d_id . b . area ;
logo_pyld . s_id [ 2 ] = vha - > d_id . b . domain ;
host_to_fcp_swap ( logo_pyld . s_id , sizeof ( uint32_t ) ) ;
memcpy ( & logo_pyld . wwpn , vha - > port_name , WWN_SIZE ) ;
memcpy ( elsio - > u . els_logo . els_logo_pyld , & logo_pyld ,
sizeof ( struct els_logo_payload ) ) ;
rval = qla2x00_start_sp ( sp ) ;
if ( rval ! = QLA_SUCCESS ) {
2017-01-19 22:28:04 -08:00
sp - > free ( sp ) ;
2015-12-17 14:57:00 -05:00
return QLA_FUNCTION_FAILED ;
}
ql_dbg ( ql_dbg_io , vha , 0x3074 ,
" %s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x. \n " ,
sp - > name , sp - > handle , fcport - > loop_id , fcport - > d_id . b . domain ,
fcport - > d_id . b . area , fcport - > d_id . b . al_pa ) ;
wait_for_completion ( & elsio - > u . els_logo . comp ) ;
2017-01-19 22:28:04 -08:00
sp - > free ( sp ) ;
2015-12-17 14:57:00 -05:00
return rval ;
}
static void
qla24xx_els_logo_iocb ( srb_t * sp , struct els_entry_24xx * els_iocb )
{
2017-01-19 22:28:04 -08:00
scsi_qla_host_t * vha = sp - > vha ;
2015-12-17 14:57:00 -05:00
struct srb_iocb * elsio = & sp - > u . iocb_cmd ;
2017-10-13 09:34:06 -07:00
uint32_t dsd_len = 24 ;
2015-12-17 14:57:00 -05:00
els_iocb - > entry_type = ELS_IOCB_TYPE ;
els_iocb - > entry_count = 1 ;
els_iocb - > sys_define = 0 ;
els_iocb - > entry_status = 0 ;
els_iocb - > handle = sp - > handle ;
els_iocb - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
els_iocb - > tx_dsd_count = 1 ;
els_iocb - > vp_index = vha - > vp_idx ;
els_iocb - > sof_type = EST_SOFI3 ;
els_iocb - > rx_dsd_count = 0 ;
els_iocb - > opcode = elsio - > u . els_logo . els_cmd ;
els_iocb - > port_id [ 0 ] = sp - > fcport - > d_id . b . al_pa ;
els_iocb - > port_id [ 1 ] = sp - > fcport - > d_id . b . area ;
els_iocb - > port_id [ 2 ] = sp - > fcport - > d_id . b . domain ;
2017-10-13 09:34:06 -07:00
els_iocb - > s_id [ 0 ] = vha - > d_id . b . al_pa ;
els_iocb - > s_id [ 1 ] = vha - > d_id . b . area ;
els_iocb - > s_id [ 2 ] = vha - > d_id . b . domain ;
2015-12-17 14:57:00 -05:00
els_iocb - > control_flags = 0 ;
2017-10-13 09:34:06 -07:00
if ( elsio - > u . els_logo . els_cmd = = ELS_DCMD_PLOGI ) {
els_iocb - > tx_byte_count = sizeof ( struct els_plogi_payload ) ;
els_iocb - > tx_address [ 0 ] =
cpu_to_le32 ( LSD ( elsio - > u . els_plogi . els_plogi_pyld_dma ) ) ;
els_iocb - > tx_address [ 1 ] =
cpu_to_le32 ( MSD ( elsio - > u . els_plogi . els_plogi_pyld_dma ) ) ;
els_iocb - > tx_len = dsd_len ;
els_iocb - > rx_dsd_count = 1 ;
els_iocb - > rx_byte_count = sizeof ( struct els_plogi_payload ) ;
els_iocb - > rx_address [ 0 ] =
cpu_to_le32 ( LSD ( elsio - > u . els_plogi . els_resp_pyld_dma ) ) ;
els_iocb - > rx_address [ 1 ] =
cpu_to_le32 ( MSD ( elsio - > u . els_plogi . els_resp_pyld_dma ) ) ;
els_iocb - > rx_len = dsd_len ;
ql_dbg ( ql_dbg_io + ql_dbg_buffer , vha , 0x3073 ,
" PLOGI ELS IOCB: \n " ) ;
ql_dump_buffer ( ql_log_info , vha , 0x0109 ,
( uint8_t * ) els_iocb , 0x70 ) ;
} else {
els_iocb - > tx_byte_count = sizeof ( struct els_logo_payload ) ;
els_iocb - > tx_address [ 0 ] =
cpu_to_le32 ( LSD ( elsio - > u . els_logo . els_logo_pyld_dma ) ) ;
els_iocb - > tx_address [ 1 ] =
cpu_to_le32 ( MSD ( elsio - > u . els_logo . els_logo_pyld_dma ) ) ;
els_iocb - > tx_len = cpu_to_le32 ( sizeof ( struct els_logo_payload ) ) ;
2015-12-17 14:57:00 -05:00
2017-10-13 09:34:06 -07:00
els_iocb - > rx_byte_count = 0 ;
els_iocb - > rx_address [ 0 ] = 0 ;
els_iocb - > rx_address [ 1 ] = 0 ;
els_iocb - > rx_len = 0 ;
}
2015-12-17 14:57:00 -05:00
2017-01-19 22:28:04 -08:00
sp - > vha - > qla_stats . control_requests + + ;
2015-12-17 14:57:00 -05:00
}
2017-10-13 09:34:06 -07:00
static void
qla2x00_els_dcmd2_sp_free ( void * data )
{
srb_t * sp = data ;
struct srb_iocb * elsio = & sp - > u . iocb_cmd ;
if ( elsio - > u . els_plogi . els_plogi_pyld )
dma_free_coherent ( & sp - > vha - > hw - > pdev - > dev , DMA_POOL_SIZE ,
elsio - > u . els_plogi . els_plogi_pyld ,
elsio - > u . els_plogi . els_plogi_pyld_dma ) ;
if ( elsio - > u . els_plogi . els_resp_pyld )
dma_free_coherent ( & sp - > vha - > hw - > pdev - > dev , DMA_POOL_SIZE ,
elsio - > u . els_plogi . els_resp_pyld ,
elsio - > u . els_plogi . els_resp_pyld_dma ) ;
del_timer ( & elsio - > timer ) ;
qla2x00_rel_sp ( sp ) ;
}
static void
qla2x00_els_dcmd2_iocb_timeout ( void * data )
{
srb_t * sp = data ;
fc_port_t * fcport = sp - > fcport ;
struct scsi_qla_host * vha = sp - > vha ;
struct qla_hw_data * ha = vha - > hw ;
struct srb_iocb * lio = & sp - > u . iocb_cmd ;
unsigned long flags = 0 ;
int res ;
ql_dbg ( ql_dbg_io + ql_dbg_disc , vha , 0x3069 ,
" %s hdl=%x ELS Timeout, %8phC portid=%06x \n " ,
sp - > name , sp - > handle , fcport - > port_name , fcport - > d_id . b24 ) ;
/* Abort the exchange */
spin_lock_irqsave ( & ha - > hardware_lock , flags ) ;
res = ha - > isp_ops - > abort_command ( sp ) ;
ql_dbg ( ql_dbg_io , vha , 0x3070 ,
" mbx abort_command %s \n " ,
( res = = QLA_SUCCESS ) ? " successful " : " failed " ) ;
spin_unlock_irqrestore ( & ha - > hardware_lock , flags ) ;
complete ( & lio - > u . els_plogi . comp ) ;
}
static void
qla2x00_els_dcmd2_sp_done ( void * ptr , int res )
{
srb_t * sp = ptr ;
fc_port_t * fcport = sp - > fcport ;
struct srb_iocb * lio = & sp - > u . iocb_cmd ;
struct scsi_qla_host * vha = sp - > vha ;
ql_dbg ( ql_dbg_io + ql_dbg_disc , vha , 0x3072 ,
2018-01-15 20:46:49 -08:00
" %s ELS hdl=%x, portid=%06x done %8phC \n " ,
2017-10-13 09:34:06 -07:00
sp - > name , sp - > handle , fcport - > d_id . b24 , fcport - > port_name ) ;
complete ( & lio - > u . els_plogi . comp ) ;
}
int
qla24xx_els_dcmd2_iocb ( scsi_qla_host_t * vha , int els_opcode ,
fc_port_t * fcport , port_id_t remote_did )
{
srb_t * sp ;
struct srb_iocb * elsio = NULL ;
struct qla_hw_data * ha = vha - > hw ;
int rval = QLA_SUCCESS ;
void * ptr , * resp_ptr ;
dma_addr_t ptr_dma ;
/* Alloc SRB structure */
sp = qla2x00_get_sp ( vha , fcport , GFP_KERNEL ) ;
if ( ! sp ) {
ql_log ( ql_log_info , vha , 0x70e6 ,
" SRB allocation failed \n " ) ;
return - ENOMEM ;
}
elsio = & sp - > u . iocb_cmd ;
fcport - > d_id . b . domain = remote_did . b . domain ;
fcport - > d_id . b . area = remote_did . b . area ;
fcport - > d_id . b . al_pa = remote_did . b . al_pa ;
ql_dbg ( ql_dbg_io , vha , 0x3073 ,
" Enter: PLOGI portid=%06x \n " , fcport - > d_id . b24 ) ;
sp - > type = SRB_ELS_DCMD ;
sp - > name = " ELS_DCMD " ;
sp - > fcport = fcport ;
2018-03-20 21:36:14 +00:00
2017-10-13 09:34:06 -07:00
elsio - > timeout = qla2x00_els_dcmd2_iocb_timeout ;
2018-03-20 21:36:14 +00:00
init_completion ( & elsio - > u . els_plogi . comp ) ;
qla2x00_init_timer ( sp , ELS_DCMD_TIMEOUT ) ;
2017-10-13 09:34:06 -07:00
sp - > done = qla2x00_els_dcmd2_sp_done ;
sp - > free = qla2x00_els_dcmd2_sp_free ;
ptr = elsio - > u . els_plogi . els_plogi_pyld =
dma_alloc_coherent ( & ha - > pdev - > dev , DMA_POOL_SIZE ,
& elsio - > u . els_plogi . els_plogi_pyld_dma , GFP_KERNEL ) ;
ptr_dma = elsio - > u . els_plogi . els_plogi_pyld_dma ;
if ( ! elsio - > u . els_plogi . els_plogi_pyld ) {
rval = QLA_FUNCTION_FAILED ;
goto out ;
}
resp_ptr = elsio - > u . els_plogi . els_resp_pyld =
dma_alloc_coherent ( & ha - > pdev - > dev , DMA_POOL_SIZE ,
& elsio - > u . els_plogi . els_resp_pyld_dma , GFP_KERNEL ) ;
if ( ! elsio - > u . els_plogi . els_resp_pyld ) {
rval = QLA_FUNCTION_FAILED ;
goto out ;
}
ql_dbg ( ql_dbg_io , vha , 0x3073 , " PLOGI %p %p \n " , ptr , resp_ptr ) ;
memset ( ptr , 0 , sizeof ( struct els_plogi_payload ) ) ;
memset ( resp_ptr , 0 , sizeof ( struct els_plogi_payload ) ) ;
elsio - > u . els_plogi . els_cmd = els_opcode ;
elsio - > u . els_plogi . els_plogi_pyld - > opcode = els_opcode ;
qla24xx_get_port_login_templ ( vha , ptr_dma + 4 ,
& elsio - > u . els_plogi . els_plogi_pyld - > data [ 0 ] ,
sizeof ( struct els_plogi_payload ) ) ;
ql_dbg ( ql_dbg_io + ql_dbg_buffer , vha , 0x3073 , " PLOGI buffer: \n " ) ;
ql_dump_buffer ( ql_dbg_io + ql_dbg_buffer , vha , 0x0109 ,
( uint8_t * ) elsio - > u . els_plogi . els_plogi_pyld , 0x70 ) ;
rval = qla2x00_start_sp ( sp ) ;
if ( rval ! = QLA_SUCCESS ) {
rval = QLA_FUNCTION_FAILED ;
goto out ;
}
ql_dbg ( ql_dbg_io , vha , 0x3074 ,
" %s PLOGI sent, hdl=%x, loopid=%x, portid=%06x \n " ,
sp - > name , sp - > handle , fcport - > loop_id , fcport - > d_id . b24 ) ;
wait_for_completion ( & elsio - > u . els_plogi . comp ) ;
if ( elsio - > u . els_plogi . comp_status ! = CS_COMPLETE )
rval = QLA_FUNCTION_FAILED ;
out :
sp - > free ( sp ) ;
return rval ;
}
2010-01-12 13:02:47 -08:00
static void
qla24xx_els_iocb ( srb_t * sp , struct els_entry_24xx * els_iocb )
{
2016-11-17 10:31:19 +01:00
struct bsg_job * bsg_job = sp - > u . bsg_job ;
2016-11-17 10:31:12 +01:00
struct fc_bsg_request * bsg_request = bsg_job - > request ;
2010-01-12 13:02:47 -08:00
els_iocb - > entry_type = ELS_IOCB_TYPE ;
els_iocb - > entry_count = 1 ;
els_iocb - > sys_define = 0 ;
els_iocb - > entry_status = 0 ;
els_iocb - > handle = sp - > handle ;
els_iocb - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
2015-07-09 07:24:08 -07:00
els_iocb - > tx_dsd_count = cpu_to_le16 ( bsg_job - > request_payload . sg_cnt ) ;
2017-01-19 22:28:04 -08:00
els_iocb - > vp_index = sp - > vha - > vp_idx ;
2010-01-12 13:02:47 -08:00
els_iocb - > sof_type = EST_SOFI3 ;
2015-07-09 07:24:08 -07:00
els_iocb - > rx_dsd_count = cpu_to_le16 ( bsg_job - > reply_payload . sg_cnt ) ;
2010-01-12 13:02:47 -08:00
2010-05-04 15:01:28 -07:00
els_iocb - > opcode =
2012-02-09 11:15:36 -08:00
sp - > type = = SRB_ELS_CMD_RPT ?
2016-11-17 10:31:12 +01:00
bsg_request - > rqst_data . r_els . els_code :
bsg_request - > rqst_data . h_els . command_code ;
2010-01-12 13:02:47 -08:00
els_iocb - > port_id [ 0 ] = sp - > fcport - > d_id . b . al_pa ;
els_iocb - > port_id [ 1 ] = sp - > fcport - > d_id . b . area ;
els_iocb - > port_id [ 2 ] = sp - > fcport - > d_id . b . domain ;
els_iocb - > control_flags = 0 ;
els_iocb - > rx_byte_count =
cpu_to_le32 ( bsg_job - > reply_payload . payload_len ) ;
els_iocb - > tx_byte_count =
cpu_to_le32 ( bsg_job - > request_payload . payload_len ) ;
els_iocb - > tx_address [ 0 ] = cpu_to_le32 ( LSD ( sg_dma_address
( bsg_job - > request_payload . sg_list ) ) ) ;
els_iocb - > tx_address [ 1 ] = cpu_to_le32 ( MSD ( sg_dma_address
( bsg_job - > request_payload . sg_list ) ) ) ;
els_iocb - > tx_len = cpu_to_le32 ( sg_dma_len
( bsg_job - > request_payload . sg_list ) ) ;
els_iocb - > rx_address [ 0 ] = cpu_to_le32 ( LSD ( sg_dma_address
( bsg_job - > reply_payload . sg_list ) ) ) ;
els_iocb - > rx_address [ 1 ] = cpu_to_le32 ( MSD ( sg_dma_address
( bsg_job - > reply_payload . sg_list ) ) ) ;
els_iocb - > rx_len = cpu_to_le32 ( sg_dma_len
( bsg_job - > reply_payload . sg_list ) ) ;
2013-08-27 01:37:40 -04:00
2017-01-19 22:28:04 -08:00
sp - > vha - > qla_stats . control_requests + + ;
2010-01-12 13:02:47 -08:00
}
2010-07-23 15:28:32 +05:00
static void
qla2x00_ct_iocb ( srb_t * sp , ms_iocb_entry_t * ct_iocb )
{
uint16_t avail_dsds ;
uint32_t * cur_dsd ;
struct scatterlist * sg ;
int index ;
uint16_t tot_dsds ;
2017-01-19 22:28:04 -08:00
scsi_qla_host_t * vha = sp - > vha ;
2010-07-23 15:28:32 +05:00
struct qla_hw_data * ha = vha - > hw ;
2016-11-17 10:31:19 +01:00
struct bsg_job * bsg_job = sp - > u . bsg_job ;
2010-07-23 15:28:32 +05:00
int loop_iterartion = 0 ;
int entry_count = 1 ;
memset ( ct_iocb , 0 , sizeof ( ms_iocb_entry_t ) ) ;
ct_iocb - > entry_type = CT_IOCB_TYPE ;
ct_iocb - > entry_status = 0 ;
ct_iocb - > handle1 = sp - > handle ;
SET_TARGET_ID ( ha , ct_iocb - > loop_id , sp - > fcport - > loop_id ) ;
2015-07-09 07:24:08 -07:00
ct_iocb - > status = cpu_to_le16 ( 0 ) ;
ct_iocb - > control_flags = cpu_to_le16 ( 0 ) ;
2010-07-23 15:28:32 +05:00
ct_iocb - > timeout = 0 ;
ct_iocb - > cmd_dsd_count =
2015-07-09 07:24:08 -07:00
cpu_to_le16 ( bsg_job - > request_payload . sg_cnt ) ;
2010-07-23 15:28:32 +05:00
ct_iocb - > total_dsd_count =
2015-07-09 07:24:08 -07:00
cpu_to_le16 ( bsg_job - > request_payload . sg_cnt + 1 ) ;
2010-07-23 15:28:32 +05:00
ct_iocb - > req_bytecount =
cpu_to_le32 ( bsg_job - > request_payload . payload_len ) ;
ct_iocb - > rsp_bytecount =
cpu_to_le32 ( bsg_job - > reply_payload . payload_len ) ;
ct_iocb - > dseg_req_address [ 0 ] = cpu_to_le32 ( LSD ( sg_dma_address
( bsg_job - > request_payload . sg_list ) ) ) ;
ct_iocb - > dseg_req_address [ 1 ] = cpu_to_le32 ( MSD ( sg_dma_address
( bsg_job - > request_payload . sg_list ) ) ) ;
ct_iocb - > dseg_req_length = ct_iocb - > req_bytecount ;
ct_iocb - > dseg_rsp_address [ 0 ] = cpu_to_le32 ( LSD ( sg_dma_address
( bsg_job - > reply_payload . sg_list ) ) ) ;
ct_iocb - > dseg_rsp_address [ 1 ] = cpu_to_le32 ( MSD ( sg_dma_address
( bsg_job - > reply_payload . sg_list ) ) ) ;
ct_iocb - > dseg_rsp_length = ct_iocb - > rsp_bytecount ;
avail_dsds = 1 ;
cur_dsd = ( uint32_t * ) ct_iocb - > dseg_rsp_address ;
index = 0 ;
tot_dsds = bsg_job - > reply_payload . sg_cnt ;
for_each_sg ( bsg_job - > reply_payload . sg_list , sg , tot_dsds , index ) {
dma_addr_t sle_dma ;
cont_a64_entry_t * cont_pkt ;
/* Allocate additional continuation packets? */
if ( avail_dsds = = 0 ) {
/*
* Five DSDs are available in the Cont .
* Type 1 IOCB .
*/
2011-11-18 09:02:21 -08:00
cont_pkt = qla2x00_prep_cont_type1_iocb ( vha ,
vha - > hw - > req_q_map [ 0 ] ) ;
2010-07-23 15:28:32 +05:00
cur_dsd = ( uint32_t * ) cont_pkt - > dseg_0_address ;
avail_dsds = 5 ;
entry_count + + ;
}
sle_dma = sg_dma_address ( sg ) ;
* cur_dsd + + = cpu_to_le32 ( LSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( sg_dma_len ( sg ) ) ;
loop_iterartion + + ;
avail_dsds - - ;
}
ct_iocb - > entry_count = entry_count ;
2013-08-27 01:37:40 -04:00
2017-01-19 22:28:04 -08:00
sp - > vha - > qla_stats . control_requests + + ;
2010-07-23 15:28:32 +05:00
}
2010-01-12 13:02:47 -08:00
static void
qla24xx_ct_iocb ( srb_t * sp , struct ct_entry_24xx * ct_iocb )
{
uint16_t avail_dsds ;
uint32_t * cur_dsd ;
struct scatterlist * sg ;
int index ;
2017-08-23 15:05:23 -07:00
uint16_t cmd_dsds , rsp_dsds ;
2017-01-19 22:28:04 -08:00
scsi_qla_host_t * vha = sp - > vha ;
2011-11-18 09:02:21 -08:00
struct qla_hw_data * ha = vha - > hw ;
2016-11-17 10:31:19 +01:00
struct bsg_job * bsg_job = sp - > u . bsg_job ;
2010-01-12 13:02:47 -08:00
int entry_count = 1 ;
2017-08-23 15:05:23 -07:00
cont_a64_entry_t * cont_pkt = NULL ;
2010-01-12 13:02:47 -08:00
ct_iocb - > entry_type = CT_IOCB_TYPE ;
ct_iocb - > entry_status = 0 ;
ct_iocb - > sys_define = 0 ;
ct_iocb - > handle = sp - > handle ;
ct_iocb - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
2017-01-19 22:28:04 -08:00
ct_iocb - > vp_index = sp - > vha - > vp_idx ;
2015-07-09 07:24:08 -07:00
ct_iocb - > comp_status = cpu_to_le16 ( 0 ) ;
2010-01-12 13:02:47 -08:00
2017-08-23 15:05:23 -07:00
cmd_dsds = bsg_job - > request_payload . sg_cnt ;
rsp_dsds = bsg_job - > reply_payload . sg_cnt ;
ct_iocb - > cmd_dsd_count = cpu_to_le16 ( cmd_dsds ) ;
2010-01-12 13:02:47 -08:00
ct_iocb - > timeout = 0 ;
2017-08-23 15:05:23 -07:00
ct_iocb - > rsp_dsd_count = cpu_to_le16 ( rsp_dsds ) ;
2010-01-12 13:02:47 -08:00
ct_iocb - > cmd_byte_count =
cpu_to_le32 ( bsg_job - > request_payload . payload_len ) ;
2017-08-23 15:05:23 -07:00
avail_dsds = 2 ;
cur_dsd = ( uint32_t * ) ct_iocb - > dseg_0_address ;
2010-01-12 13:02:47 -08:00
index = 0 ;
2017-08-23 15:05:23 -07:00
for_each_sg ( bsg_job - > request_payload . sg_list , sg , cmd_dsds , index ) {
dma_addr_t sle_dma ;
/* Allocate additional continuation packets? */
if ( avail_dsds = = 0 ) {
/*
* Five DSDs are available in the Cont .
* Type 1 IOCB .
*/
cont_pkt = qla2x00_prep_cont_type1_iocb (
vha , ha - > req_q_map [ 0 ] ) ;
cur_dsd = ( uint32_t * ) cont_pkt - > dseg_0_address ;
avail_dsds = 5 ;
entry_count + + ;
}
sle_dma = sg_dma_address ( sg ) ;
* cur_dsd + + = cpu_to_le32 ( LSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( sg_dma_len ( sg ) ) ;
avail_dsds - - ;
}
index = 0 ;
for_each_sg ( bsg_job - > reply_payload . sg_list , sg , rsp_dsds , index ) {
2010-01-12 13:02:47 -08:00
dma_addr_t sle_dma ;
/* Allocate additional continuation packets? */
if ( avail_dsds = = 0 ) {
/*
* Five DSDs are available in the Cont .
* Type 1 IOCB .
*/
2011-11-18 09:02:21 -08:00
cont_pkt = qla2x00_prep_cont_type1_iocb ( vha ,
ha - > req_q_map [ 0 ] ) ;
2010-01-12 13:02:47 -08:00
cur_dsd = ( uint32_t * ) cont_pkt - > dseg_0_address ;
avail_dsds = 5 ;
entry_count + + ;
}
sle_dma = sg_dma_address ( sg ) ;
* cur_dsd + + = cpu_to_le32 ( LSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( sg_dma_len ( sg ) ) ;
avail_dsds - - ;
}
ct_iocb - > entry_count = entry_count ;
}
2011-11-18 09:03:18 -08:00
/*
* qla82xx_start_scsi ( ) - Send a SCSI command to the ISP
* @ sp : command to send to the ISP
*
* Returns non - zero if a failure occurred , else zero .
*/
int
qla82xx_start_scsi ( srb_t * sp )
{
2015-07-09 07:23:26 -07:00
int nseg ;
2011-11-18 09:03:18 -08:00
unsigned long flags ;
struct scsi_cmnd * cmd ;
uint32_t * clr_ptr ;
uint32_t index ;
uint32_t handle ;
uint16_t cnt ;
uint16_t req_cnt ;
uint16_t tot_dsds ;
struct device_reg_82xx __iomem * reg ;
uint32_t dbval ;
uint32_t * fcp_dl ;
uint8_t additional_cdb_len ;
struct ct6_dsd * ctx ;
2017-01-19 22:28:04 -08:00
struct scsi_qla_host * vha = sp - > vha ;
2011-11-18 09:03:18 -08:00
struct qla_hw_data * ha = vha - > hw ;
struct req_que * req = NULL ;
struct rsp_que * rsp = NULL ;
/* Setup device pointers. */
reg = & ha - > iobase - > isp82 ;
2012-02-09 11:15:36 -08:00
cmd = GET_CMD_SP ( sp ) ;
2011-11-18 09:03:18 -08:00
req = vha - > req ;
rsp = ha - > rsp_q_map [ 0 ] ;
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0 ;
dbval = 0x04 | ( ha - > portnum < < 5 ) ;
/* Send marker if required */
if ( vha - > marker_needed ! = 0 ) {
if ( qla2x00_marker ( vha , req ,
rsp , 0 , 0 , MK_SYNC_ALL ) ! = QLA_SUCCESS ) {
ql_log ( ql_log_warn , vha , 0x300c ,
" qla2x00_marker failed for cmd=%p. \n " , cmd ) ;
return QLA_FUNCTION_FAILED ;
}
vha - > marker_needed = 0 ;
}
/* Acquire ring specific lock */
spin_lock_irqsave ( & ha - > hardware_lock , flags ) ;
/* Check for room in outstanding command list. */
handle = req - > current_outstanding_cmd ;
2013-01-30 03:34:37 -05:00
for ( index = 1 ; index < req - > num_outstanding_cmds ; index + + ) {
2011-11-18 09:03:18 -08:00
handle + + ;
2013-01-30 03:34:37 -05:00
if ( handle = = req - > num_outstanding_cmds )
2011-11-18 09:03:18 -08:00
handle = 1 ;
if ( ! req - > outstanding_cmds [ handle ] )
break ;
}
2013-01-30 03:34:37 -05:00
if ( index = = req - > num_outstanding_cmds )
2011-11-18 09:03:18 -08:00
goto queuing_error ;
/* Map the sg table so we have an accurate count of sg entries needed */
if ( scsi_sg_count ( cmd ) ) {
nseg = dma_map_sg ( & ha - > pdev - > dev , scsi_sglist ( cmd ) ,
scsi_sg_count ( cmd ) , cmd - > sc_data_direction ) ;
if ( unlikely ( ! nseg ) )
goto queuing_error ;
} else
nseg = 0 ;
tot_dsds = nseg ;
if ( tot_dsds > ql2xshiftctondsd ) {
struct cmd_type_6 * cmd_pkt ;
uint16_t more_dsd_lists = 0 ;
struct dsd_dma * dsd_ptr ;
uint16_t i ;
more_dsd_lists = qla24xx_calc_dsd_lists ( tot_dsds ) ;
if ( ( more_dsd_lists + ha - > gbl_dsd_inuse ) > = NUM_DSD_CHAIN ) {
ql_dbg ( ql_dbg_io , vha , 0x300d ,
" Num of DSD list %d is than %d for cmd=%p. \n " ,
more_dsd_lists + ha - > gbl_dsd_inuse , NUM_DSD_CHAIN ,
cmd ) ;
goto queuing_error ;
}
if ( more_dsd_lists < = ha - > gbl_dsd_avail )
goto sufficient_dsds ;
else
more_dsd_lists - = ha - > gbl_dsd_avail ;
for ( i = 0 ; i < more_dsd_lists ; i + + ) {
dsd_ptr = kzalloc ( sizeof ( struct dsd_dma ) , GFP_ATOMIC ) ;
if ( ! dsd_ptr ) {
ql_log ( ql_log_fatal , vha , 0x300e ,
" Failed to allocate memory for dsd_dma "
" for cmd=%p. \n " , cmd ) ;
goto queuing_error ;
}
dsd_ptr - > dsd_addr = dma_pool_alloc ( ha - > dl_dma_pool ,
GFP_ATOMIC , & dsd_ptr - > dsd_list_dma ) ;
if ( ! dsd_ptr - > dsd_addr ) {
kfree ( dsd_ptr ) ;
ql_log ( ql_log_fatal , vha , 0x300f ,
" Failed to allocate memory for dsd_addr "
" for cmd=%p. \n " , cmd ) ;
goto queuing_error ;
}
list_add_tail ( & dsd_ptr - > list , & ha - > gbl_dsd_list ) ;
ha - > gbl_dsd_avail + + ;
}
sufficient_dsds :
req_cnt = 1 ;
if ( req - > cnt < ( req_cnt + 2 ) ) {
cnt = ( uint16_t ) RD_REG_DWORD_RELAXED (
& reg - > req_q_out [ 0 ] ) ;
if ( req - > ring_index < cnt )
req - > cnt = cnt - req - > ring_index ;
else
req - > cnt = req - > length -
( req - > ring_index - cnt ) ;
2012-05-15 14:34:09 -04:00
if ( req - > cnt < ( req_cnt + 2 ) )
goto queuing_error ;
2011-11-18 09:03:18 -08:00
}
2012-02-09 11:15:36 -08:00
ctx = sp - > u . scmd . ctx =
mempool_alloc ( ha - > ctx_mempool , GFP_ATOMIC ) ;
if ( ! ctx ) {
2011-11-18 09:03:18 -08:00
ql_log ( ql_log_fatal , vha , 0x3010 ,
" Failed to allocate ctx for cmd=%p. \n " , cmd ) ;
goto queuing_error ;
}
2012-02-09 11:15:36 -08:00
2011-11-18 09:03:18 -08:00
memset ( ctx , 0 , sizeof ( struct ct6_dsd ) ) ;
2018-02-15 01:40:38 +05:30
ctx - > fcp_cmnd = dma_pool_zalloc ( ha - > fcp_cmnd_dma_pool ,
2011-11-18 09:03:18 -08:00
GFP_ATOMIC , & ctx - > fcp_cmnd_dma ) ;
if ( ! ctx - > fcp_cmnd ) {
ql_log ( ql_log_fatal , vha , 0x3011 ,
" Failed to allocate fcp_cmnd for cmd=%p. \n " , cmd ) ;
2012-05-17 10:13:40 +03:00
goto queuing_error ;
2011-11-18 09:03:18 -08:00
}
/* Initialize the DSD list and dma handle */
INIT_LIST_HEAD ( & ctx - > dsd_list ) ;
ctx - > dsd_use_cnt = 0 ;
if ( cmd - > cmd_len > 16 ) {
additional_cdb_len = cmd - > cmd_len - 16 ;
if ( ( cmd - > cmd_len % 4 ) ! = 0 ) {
/* SCSI command bigger than 16 bytes must be
* multiple of 4
*/
ql_log ( ql_log_warn , vha , 0x3012 ,
" scsi cmd len %d not multiple of 4 "
" for cmd=%p. \n " , cmd - > cmd_len , cmd ) ;
goto queuing_error_fcp_cmnd ;
}
ctx - > fcp_cmnd_len = 12 + cmd - > cmd_len + 4 ;
} else {
additional_cdb_len = 0 ;
ctx - > fcp_cmnd_len = 12 + 16 + 4 ;
}
cmd_pkt = ( struct cmd_type_6 * ) req - > ring_ptr ;
cmd_pkt - > handle = MAKE_HANDLE ( req - > id , handle ) ;
/* Zero out remaining portion of packet. */
/* tagged queuing modifier -- default is TSK_SIMPLE (0). */
clr_ptr = ( uint32_t * ) cmd_pkt + 2 ;
memset ( clr_ptr , 0 , REQUEST_ENTRY_SIZE - 8 ) ;
cmd_pkt - > dseg_count = cpu_to_le16 ( tot_dsds ) ;
/* Set NPORT-ID and LUN number*/
cmd_pkt - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
cmd_pkt - > port_id [ 0 ] = sp - > fcport - > d_id . b . al_pa ;
cmd_pkt - > port_id [ 1 ] = sp - > fcport - > d_id . b . area ;
cmd_pkt - > port_id [ 2 ] = sp - > fcport - > d_id . b . domain ;
2017-01-19 22:28:04 -08:00
cmd_pkt - > vp_index = sp - > vha - > vp_idx ;
2011-11-18 09:03:18 -08:00
/* Build IOCB segments */
if ( qla24xx_build_scsi_type_6_iocbs ( sp , cmd_pkt , tot_dsds ) )
goto queuing_error_fcp_cmnd ;
2012-02-09 11:15:36 -08:00
int_to_scsilun ( cmd - > device - > lun , & cmd_pkt - > lun ) ;
2011-11-18 09:03:18 -08:00
host_to_fcp_swap ( ( uint8_t * ) & cmd_pkt - > lun , sizeof ( cmd_pkt - > lun ) ) ;
/* build FCP_CMND IU */
2012-02-09 11:15:36 -08:00
int_to_scsilun ( cmd - > device - > lun , & ctx - > fcp_cmnd - > lun ) ;
2011-11-18 09:03:18 -08:00
ctx - > fcp_cmnd - > additional_cdb_len = additional_cdb_len ;
if ( cmd - > sc_data_direction = = DMA_TO_DEVICE )
ctx - > fcp_cmnd - > additional_cdb_len | = 1 ;
else if ( cmd - > sc_data_direction = = DMA_FROM_DEVICE )
ctx - > fcp_cmnd - > additional_cdb_len | = 2 ;
2011-11-18 09:03:19 -08:00
/* Populate the FCP_PRIO. */
if ( ha - > flags . fcp_prio_enabled )
ctx - > fcp_cmnd - > task_attribute | =
sp - > fcport - > fcp_prio < < 3 ;
2011-11-18 09:03:18 -08:00
memcpy ( ctx - > fcp_cmnd - > cdb , cmd - > cmnd , cmd - > cmd_len ) ;
fcp_dl = ( uint32_t * ) ( ctx - > fcp_cmnd - > cdb + 16 +
additional_cdb_len ) ;
* fcp_dl = htonl ( ( uint32_t ) scsi_bufflen ( cmd ) ) ;
cmd_pkt - > fcp_cmnd_dseg_len = cpu_to_le16 ( ctx - > fcp_cmnd_len ) ;
cmd_pkt - > fcp_cmnd_dseg_address [ 0 ] =
cpu_to_le32 ( LSD ( ctx - > fcp_cmnd_dma ) ) ;
cmd_pkt - > fcp_cmnd_dseg_address [ 1 ] =
cpu_to_le32 ( MSD ( ctx - > fcp_cmnd_dma ) ) ;
sp - > flags | = SRB_FCP_CMND_DMA_VALID ;
cmd_pkt - > byte_count = cpu_to_le32 ( ( uint32_t ) scsi_bufflen ( cmd ) ) ;
/* Set total data segment count. */
cmd_pkt - > entry_count = ( uint8_t ) req_cnt ;
/* Specify response queue number where
* completion should happen
*/
cmd_pkt - > entry_status = ( uint8_t ) rsp - > id ;
} else {
struct cmd_type_7 * cmd_pkt ;
req_cnt = qla24xx_calc_iocbs ( vha , tot_dsds ) ;
if ( req - > cnt < ( req_cnt + 2 ) ) {
cnt = ( uint16_t ) RD_REG_DWORD_RELAXED (
& reg - > req_q_out [ 0 ] ) ;
if ( req - > ring_index < cnt )
req - > cnt = cnt - req - > ring_index ;
else
req - > cnt = req - > length -
( req - > ring_index - cnt ) ;
}
if ( req - > cnt < ( req_cnt + 2 ) )
goto queuing_error ;
cmd_pkt = ( struct cmd_type_7 * ) req - > ring_ptr ;
cmd_pkt - > handle = MAKE_HANDLE ( req - > id , handle ) ;
/* Zero out remaining portion of packet. */
/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
clr_ptr = ( uint32_t * ) cmd_pkt + 2 ;
memset ( clr_ptr , 0 , REQUEST_ENTRY_SIZE - 8 ) ;
cmd_pkt - > dseg_count = cpu_to_le16 ( tot_dsds ) ;
/* Set NPORT-ID and LUN number*/
cmd_pkt - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
cmd_pkt - > port_id [ 0 ] = sp - > fcport - > d_id . b . al_pa ;
cmd_pkt - > port_id [ 1 ] = sp - > fcport - > d_id . b . area ;
cmd_pkt - > port_id [ 2 ] = sp - > fcport - > d_id . b . domain ;
2017-01-19 22:28:04 -08:00
cmd_pkt - > vp_index = sp - > vha - > vp_idx ;
2011-11-18 09:03:18 -08:00
2012-02-09 11:15:36 -08:00
int_to_scsilun ( cmd - > device - > lun , & cmd_pkt - > lun ) ;
2011-11-18 09:03:18 -08:00
host_to_fcp_swap ( ( uint8_t * ) & cmd_pkt - > lun ,
2012-02-09 11:15:36 -08:00
sizeof ( cmd_pkt - > lun ) ) ;
2011-11-18 09:03:18 -08:00
2011-11-18 09:03:19 -08:00
/* Populate the FCP_PRIO. */
if ( ha - > flags . fcp_prio_enabled )
cmd_pkt - > task | = sp - > fcport - > fcp_prio < < 3 ;
2011-11-18 09:03:18 -08:00
/* Load SCSI command packet. */
memcpy ( cmd_pkt - > fcp_cdb , cmd - > cmnd , cmd - > cmd_len ) ;
host_to_fcp_swap ( cmd_pkt - > fcp_cdb , sizeof ( cmd_pkt - > fcp_cdb ) ) ;
cmd_pkt - > byte_count = cpu_to_le32 ( ( uint32_t ) scsi_bufflen ( cmd ) ) ;
/* Build IOCB segments */
2016-12-12 14:40:07 -08:00
qla24xx_build_scsi_iocbs ( sp , cmd_pkt , tot_dsds , req ) ;
2011-11-18 09:03:18 -08:00
/* Set total data segment count. */
cmd_pkt - > entry_count = ( uint8_t ) req_cnt ;
/* Specify response queue number where
* completion should happen .
*/
cmd_pkt - > entry_status = ( uint8_t ) rsp - > id ;
}
/* Build command packet. */
req - > current_outstanding_cmd = handle ;
req - > outstanding_cmds [ handle ] = sp ;
sp - > handle = handle ;
2012-02-09 11:15:36 -08:00
cmd - > host_scribble = ( unsigned char * ) ( unsigned long ) handle ;
2011-11-18 09:03:18 -08:00
req - > cnt - = req_cnt ;
wmb ( ) ;
/* Adjust ring index. */
req - > ring_index + + ;
if ( req - > ring_index = = req - > length ) {
req - > ring_index = 0 ;
req - > ring_ptr = req - > ring ;
} else
req - > ring_ptr + + ;
sp - > flags | = SRB_DMA_VALID ;
/* Set chip new ring index. */
/* write, read and verify logic */
dbval = dbval | ( req - > id < < 8 ) | ( req - > ring_index < < 16 ) ;
if ( ql2xdbwr )
2015-07-09 07:24:50 -07:00
qla82xx_wr_32 ( ha , ( uintptr_t __force ) ha - > nxdb_wr_ptr , dbval ) ;
2011-11-18 09:03:18 -08:00
else {
2015-07-09 07:24:50 -07:00
WRT_REG_DWORD ( ha - > nxdb_wr_ptr , dbval ) ;
2011-11-18 09:03:18 -08:00
wmb ( ) ;
2015-07-09 07:24:50 -07:00
while ( RD_REG_DWORD ( ha - > nxdb_rd_ptr ) ! = dbval ) {
WRT_REG_DWORD ( ha - > nxdb_wr_ptr , dbval ) ;
2011-11-18 09:03:18 -08:00
wmb ( ) ;
}
}
/* Manage unprocessed RIO/ZIO commands in response queue. */
if ( vha - > flags . process_response_queue & &
rsp - > ring_ptr - > signature ! = RESPONSE_PROCESSED )
qla24xx_process_response_queue ( vha , rsp ) ;
spin_unlock_irqrestore ( & ha - > hardware_lock , flags ) ;
return QLA_SUCCESS ;
queuing_error_fcp_cmnd :
dma_pool_free ( ha - > fcp_cmnd_dma_pool , ctx - > fcp_cmnd , ctx - > fcp_cmnd_dma ) ;
queuing_error :
if ( tot_dsds )
scsi_dma_unmap ( cmd ) ;
2012-02-09 11:15:36 -08:00
if ( sp - > u . scmd . ctx ) {
mempool_free ( sp - > u . scmd . ctx , ha - > ctx_mempool ) ;
sp - > u . scmd . ctx = NULL ;
2011-11-18 09:03:18 -08:00
}
spin_unlock_irqrestore ( & ha - > hardware_lock , flags ) ;
return QLA_FUNCTION_FAILED ;
}
2014-09-25 05:17:05 -04:00
static void
2014-02-26 04:15:18 -05:00
qla24xx_abort_iocb ( srb_t * sp , struct abort_entry_24xx * abt_iocb )
{
struct srb_iocb * aio = & sp - > u . iocb_cmd ;
2017-01-19 22:28:04 -08:00
scsi_qla_host_t * vha = sp - > vha ;
2014-02-26 04:15:18 -05:00
struct req_que * req = vha - > req ;
memset ( abt_iocb , 0 , sizeof ( struct abort_entry_24xx ) ) ;
abt_iocb - > entry_type = ABORT_IOCB_TYPE ;
abt_iocb - > entry_count = 1 ;
2018-02-01 10:33:18 -08:00
abt_iocb - > handle = cpu_to_le32 ( MAKE_HANDLE ( req - > id , sp - > handle ) ) ;
2014-02-26 04:15:18 -05:00
abt_iocb - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
abt_iocb - > handle_to_abort =
2018-02-01 10:33:18 -08:00
cpu_to_le32 ( MAKE_HANDLE ( aio - > u . abt . req_que_no ,
aio - > u . abt . cmd_hndl ) ) ;
2014-02-26 04:15:18 -05:00
abt_iocb - > port_id [ 0 ] = sp - > fcport - > d_id . b . al_pa ;
abt_iocb - > port_id [ 1 ] = sp - > fcport - > d_id . b . area ;
abt_iocb - > port_id [ 2 ] = sp - > fcport - > d_id . b . domain ;
abt_iocb - > vp_index = vha - > vp_idx ;
2018-01-15 20:46:51 -08:00
abt_iocb - > req_que_no = cpu_to_le16 ( aio - > u . abt . req_que_no ) ;
2014-02-26 04:15:18 -05:00
/* Send the command to the firmware */
wmb ( ) ;
}
2017-01-19 22:28:00 -08:00
static void
qla2x00_mb_iocb ( srb_t * sp , struct mbx_24xx_entry * mbx )
{
int i , sz ;
mbx - > entry_type = MBX_IOCB_TYPE ;
mbx - > handle = sp - > handle ;
sz = min ( ARRAY_SIZE ( mbx - > mb ) , ARRAY_SIZE ( sp - > u . iocb_cmd . u . mbx . out_mb ) ) ;
for ( i = 0 ; i < sz ; i + + )
mbx - > mb [ i ] = cpu_to_le16 ( sp - > u . iocb_cmd . u . mbx . out_mb [ i ] ) ;
}
static void
qla2x00_ctpthru_cmd_iocb ( srb_t * sp , struct ct_entry_24xx * ct_pkt )
{
sp - > u . iocb_cmd . u . ctarg . iocb = ct_pkt ;
qla24xx_prep_ms_iocb ( sp - > vha , & sp - > u . iocb_cmd . u . ctarg ) ;
ct_pkt - > handle = sp - > handle ;
}
static void qla2x00_send_notify_ack_iocb ( srb_t * sp ,
struct nack_to_isp * nack )
{
struct imm_ntfy_from_isp * ntfy = sp - > u . iocb_cmd . u . nack . ntfy ;
nack - > entry_type = NOTIFY_ACK_TYPE ;
nack - > entry_count = 1 ;
nack - > ox_id = ntfy - > ox_id ;
nack - > u . isp24 . handle = sp - > handle ;
nack - > u . isp24 . nport_handle = ntfy - > u . isp24 . nport_handle ;
if ( le16_to_cpu ( ntfy - > u . isp24 . status ) = = IMM_NTFY_ELS ) {
nack - > u . isp24 . flags = ntfy - > u . isp24 . flags &
cpu_to_le32 ( NOTIFY24XX_FLAGS_PUREX_IOCB ) ;
}
nack - > u . isp24 . srr_rx_id = ntfy - > u . isp24 . srr_rx_id ;
nack - > u . isp24 . status = ntfy - > u . isp24 . status ;
nack - > u . isp24 . status_subcode = ntfy - > u . isp24 . status_subcode ;
nack - > u . isp24 . fw_handle = ntfy - > u . isp24 . fw_handle ;
nack - > u . isp24 . exchange_address = ntfy - > u . isp24 . exchange_address ;
nack - > u . isp24 . srr_rel_offs = ntfy - > u . isp24 . srr_rel_offs ;
nack - > u . isp24 . srr_ui = ntfy - > u . isp24 . srr_ui ;
nack - > u . isp24 . srr_flags = 0 ;
nack - > u . isp24 . srr_reject_code = 0 ;
nack - > u . isp24 . srr_reject_code_expl = 0 ;
nack - > u . isp24 . vp_index = ntfy - > u . isp24 . vp_index ;
}
2017-06-21 13:48:43 -07:00
/*
* Build NVME LS request
*/
static int
qla_nvme_ls ( srb_t * sp , struct pt_ls4_request * cmd_pkt )
{
struct srb_iocb * nvme ;
int rval = QLA_SUCCESS ;
nvme = & sp - > u . iocb_cmd ;
cmd_pkt - > entry_type = PT_LS4_REQUEST ;
cmd_pkt - > entry_count = 1 ;
cmd_pkt - > control_flags = CF_LS4_ORIGINATOR < < CF_LS4_SHIFT ;
cmd_pkt - > timeout = cpu_to_le16 ( nvme - > u . nvme . timeout_sec ) ;
cmd_pkt - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
cmd_pkt - > vp_index = sp - > fcport - > vha - > vp_idx ;
cmd_pkt - > tx_dseg_count = 1 ;
cmd_pkt - > tx_byte_count = nvme - > u . nvme . cmd_len ;
cmd_pkt - > dseg0_len = nvme - > u . nvme . cmd_len ;
cmd_pkt - > dseg0_address [ 0 ] = cpu_to_le32 ( LSD ( nvme - > u . nvme . cmd_dma ) ) ;
cmd_pkt - > dseg0_address [ 1 ] = cpu_to_le32 ( MSD ( nvme - > u . nvme . cmd_dma ) ) ;
cmd_pkt - > rx_dseg_count = 1 ;
cmd_pkt - > rx_byte_count = nvme - > u . nvme . rsp_len ;
cmd_pkt - > dseg1_len = nvme - > u . nvme . rsp_len ;
cmd_pkt - > dseg1_address [ 0 ] = cpu_to_le32 ( LSD ( nvme - > u . nvme . rsp_dma ) ) ;
cmd_pkt - > dseg1_address [ 1 ] = cpu_to_le32 ( MSD ( nvme - > u . nvme . rsp_dma ) ) ;
return rval ;
}
2017-12-28 12:33:10 -08:00
static void
qla25xx_ctrlvp_iocb ( srb_t * sp , struct vp_ctrl_entry_24xx * vce )
{
int map , pos ;
vce - > entry_type = VP_CTRL_IOCB_TYPE ;
vce - > handle = sp - > handle ;
vce - > entry_count = 1 ;
vce - > command = cpu_to_le16 ( sp - > u . iocb_cmd . u . ctrlvp . cmd ) ;
vce - > vp_count = cpu_to_le16 ( 1 ) ;
/*
* index map in firmware starts with 1 ; decrement index
* this is ok as we never use index 0
*/
map = ( sp - > u . iocb_cmd . u . ctrlvp . vp_index - 1 ) / 8 ;
pos = ( sp - > u . iocb_cmd . u . ctrlvp . vp_index - 1 ) & 7 ;
vce - > vp_idx_map [ map ] | = 1 < < pos ;
}
2017-12-28 12:33:20 -08:00
static void
qla24xx_prlo_iocb ( srb_t * sp , struct logio_entry_24xx * logio )
{
logio - > entry_type = LOGINOUT_PORT_IOCB_TYPE ;
logio - > control_flags =
cpu_to_le16 ( LCF_COMMAND_PRLO | LCF_IMPL_PRLO ) ;
logio - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
logio - > port_id [ 0 ] = sp - > fcport - > d_id . b . al_pa ;
logio - > port_id [ 1 ] = sp - > fcport - > d_id . b . area ;
logio - > port_id [ 2 ] = sp - > fcport - > d_id . b . domain ;
logio - > vp_index = sp - > fcport - > vha - > vp_idx ;
}
2009-08-20 11:06:05 -07:00
int
qla2x00_start_sp ( srb_t * sp )
{
int rval ;
2017-01-19 22:28:04 -08:00
scsi_qla_host_t * vha = sp - > vha ;
2017-01-19 22:28:00 -08:00
struct qla_hw_data * ha = vha - > hw ;
2009-08-20 11:06:05 -07:00
void * pkt ;
unsigned long flags ;
rval = QLA_FUNCTION_FAILED ;
spin_lock_irqsave ( & ha - > hardware_lock , flags ) ;
2017-01-19 22:28:00 -08:00
pkt = qla2x00_alloc_iocbs ( vha , sp ) ;
2011-07-14 12:00:13 -07:00
if ( ! pkt ) {
2017-01-19 22:28:00 -08:00
ql_log ( ql_log_warn , vha , 0x700c ,
2011-07-14 12:00:13 -07:00
" qla2x00_alloc_iocbs failed. \n " ) ;
2009-08-20 11:06:05 -07:00
goto done ;
2011-07-14 12:00:13 -07:00
}
2009-08-20 11:06:05 -07:00
rval = QLA_SUCCESS ;
2012-02-09 11:15:36 -08:00
switch ( sp - > type ) {
2009-08-20 11:06:05 -07:00
case SRB_LOGIN_CMD :
IS_FWI2_CAPABLE ( ha ) ?
2010-05-04 15:01:26 -07:00
qla24xx_login_iocb ( sp , pkt ) :
2009-08-20 11:06:05 -07:00
qla2x00_login_iocb ( sp , pkt ) ;
break ;
2017-06-21 13:48:41 -07:00
case SRB_PRLI_CMD :
qla24xx_prli_iocb ( sp , pkt ) ;
break ;
2009-08-20 11:06:05 -07:00
case SRB_LOGOUT_CMD :
IS_FWI2_CAPABLE ( ha ) ?
2010-05-04 15:01:26 -07:00
qla24xx_logout_iocb ( sp , pkt ) :
2009-08-20 11:06:05 -07:00
qla2x00_logout_iocb ( sp , pkt ) ;
break ;
2010-01-12 13:02:47 -08:00
case SRB_ELS_CMD_RPT :
case SRB_ELS_CMD_HST :
qla24xx_els_iocb ( sp , pkt ) ;
break ;
case SRB_CT_CMD :
2010-07-23 15:28:32 +05:00
IS_FWI2_CAPABLE ( ha ) ?
2011-11-18 09:03:20 -08:00
qla24xx_ct_iocb ( sp , pkt ) :
qla2x00_ct_iocb ( sp , pkt ) ;
2010-01-12 13:02:47 -08:00
break ;
2010-05-04 15:01:26 -07:00
case SRB_ADISC_CMD :
IS_FWI2_CAPABLE ( ha ) ?
qla24xx_adisc_iocb ( sp , pkt ) :
qla2x00_adisc_iocb ( sp , pkt ) ;
break ;
2010-05-04 15:01:29 -07:00
case SRB_TM_CMD :
2013-03-28 08:21:23 -04:00
IS_QLAFX00 ( ha ) ?
qlafx00_tm_iocb ( sp , pkt ) :
qla24xx_tm_iocb ( sp , pkt ) ;
break ;
case SRB_FXIOCB_DCMD :
case SRB_FXIOCB_BCMD :
qlafx00_fxdisc_iocb ( sp , pkt ) ;
break ;
2017-06-21 13:48:43 -07:00
case SRB_NVME_LS :
qla_nvme_ls ( sp , pkt ) ;
break ;
2013-03-28 08:21:23 -04:00
case SRB_ABT_CMD :
2014-02-26 04:15:18 -05:00
IS_QLAFX00 ( ha ) ?
qlafx00_abort_iocb ( sp , pkt ) :
qla24xx_abort_iocb ( sp , pkt ) ;
2010-05-04 15:01:29 -07:00
break ;
2015-12-17 14:57:00 -05:00
case SRB_ELS_DCMD :
qla24xx_els_logo_iocb ( sp , pkt ) ;
break ;
2017-01-19 22:28:00 -08:00
case SRB_CT_PTHRU_CMD :
qla2x00_ctpthru_cmd_iocb ( sp , pkt ) ;
break ;
case SRB_MB_IOCB :
qla2x00_mb_iocb ( sp , pkt ) ;
break ;
case SRB_NACK_PLOGI :
case SRB_NACK_PRLI :
case SRB_NACK_LOGO :
qla2x00_send_notify_ack_iocb ( sp , pkt ) ;
break ;
2017-12-28 12:33:10 -08:00
case SRB_CTRL_VP :
qla25xx_ctrlvp_iocb ( sp , pkt ) ;
break ;
2017-12-28 12:33:20 -08:00
case SRB_PRLO_CMD :
qla24xx_prlo_iocb ( sp , pkt ) ;
break ;
2009-08-20 11:06:05 -07:00
default :
break ;
}
wmb ( ) ;
2017-01-19 22:28:00 -08:00
qla2x00_start_iocbs ( vha , ha - > req_q_map [ 0 ] ) ;
2009-08-20 11:06:05 -07:00
done :
spin_unlock_irqrestore ( & ha - > hardware_lock , flags ) ;
return rval ;
}
2012-08-22 14:21:01 -04:00
static void
qla25xx_build_bidir_iocb ( srb_t * sp , struct scsi_qla_host * vha ,
struct cmd_bidir * cmd_pkt , uint32_t tot_dsds )
{
uint16_t avail_dsds ;
uint32_t * cur_dsd ;
uint32_t req_data_len = 0 ;
uint32_t rsp_data_len = 0 ;
struct scatterlist * sg ;
int index ;
int entry_count = 1 ;
2016-11-17 10:31:19 +01:00
struct bsg_job * bsg_job = sp - > u . bsg_job ;
2012-08-22 14:21:01 -04:00
/*Update entry type to indicate bidir command */
* ( ( uint32_t * ) ( & cmd_pkt - > entry_type ) ) =
2015-07-09 07:24:08 -07:00
cpu_to_le32 ( COMMAND_BIDIRECTIONAL ) ;
2012-08-22 14:21:01 -04:00
/* Set the transfer direction, in this set both flags
* Also set the BD_WRAP_BACK flag , firmware will take care
* assigning DID = SID for outgoing pkts .
*/
cmd_pkt - > wr_dseg_count = cpu_to_le16 ( bsg_job - > request_payload . sg_cnt ) ;
cmd_pkt - > rd_dseg_count = cpu_to_le16 ( bsg_job - > reply_payload . sg_cnt ) ;
2015-07-09 07:24:08 -07:00
cmd_pkt - > control_flags = cpu_to_le16 ( BD_WRITE_DATA | BD_READ_DATA |
2012-08-22 14:21:01 -04:00
BD_WRAP_BACK ) ;
req_data_len = rsp_data_len = bsg_job - > request_payload . payload_len ;
cmd_pkt - > wr_byte_count = cpu_to_le32 ( req_data_len ) ;
cmd_pkt - > rd_byte_count = cpu_to_le32 ( rsp_data_len ) ;
cmd_pkt - > timeout = cpu_to_le16 ( qla2x00_get_async_timeout ( vha ) + 2 ) ;
vha - > bidi_stats . transfer_bytes + = req_data_len ;
vha - > bidi_stats . io_count + + ;
2013-08-27 01:37:40 -04:00
vha - > qla_stats . output_bytes + = req_data_len ;
vha - > qla_stats . output_requests + + ;
2012-08-22 14:21:01 -04:00
/* Only one dsd is available for bidirectional IOCB, remaining dsds
* are bundled in continuation iocb
*/
avail_dsds = 1 ;
cur_dsd = ( uint32_t * ) & cmd_pkt - > fcp_data_dseg_address ;
index = 0 ;
for_each_sg ( bsg_job - > request_payload . sg_list , sg ,
bsg_job - > request_payload . sg_cnt , index ) {
dma_addr_t sle_dma ;
cont_a64_entry_t * cont_pkt ;
/* Allocate additional continuation packets */
if ( avail_dsds = = 0 ) {
/* Continuation type 1 IOCB can accomodate
* 5 DSDS
*/
cont_pkt = qla2x00_prep_cont_type1_iocb ( vha , vha - > req ) ;
cur_dsd = ( uint32_t * ) cont_pkt - > dseg_0_address ;
avail_dsds = 5 ;
entry_count + + ;
}
sle_dma = sg_dma_address ( sg ) ;
* cur_dsd + + = cpu_to_le32 ( LSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( sg_dma_len ( sg ) ) ;
avail_dsds - - ;
}
/* For read request DSD will always goes to continuation IOCB
* and follow the write DSD . If there is room on the current IOCB
* then it is added to that IOCB else new continuation IOCB is
* allocated .
*/
for_each_sg ( bsg_job - > reply_payload . sg_list , sg ,
bsg_job - > reply_payload . sg_cnt , index ) {
dma_addr_t sle_dma ;
cont_a64_entry_t * cont_pkt ;
/* Allocate additional continuation packets */
if ( avail_dsds = = 0 ) {
/* Continuation type 1 IOCB can accomodate
* 5 DSDS
*/
cont_pkt = qla2x00_prep_cont_type1_iocb ( vha , vha - > req ) ;
cur_dsd = ( uint32_t * ) cont_pkt - > dseg_0_address ;
avail_dsds = 5 ;
entry_count + + ;
}
sle_dma = sg_dma_address ( sg ) ;
* cur_dsd + + = cpu_to_le32 ( LSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( sg_dma_len ( sg ) ) ;
avail_dsds - - ;
}
/* This value should be same as number of IOCB required for this cmd */
cmd_pkt - > entry_count = entry_count ;
}
int
qla2x00_start_bidir ( srb_t * sp , struct scsi_qla_host * vha , uint32_t tot_dsds )
{
struct qla_hw_data * ha = vha - > hw ;
unsigned long flags ;
uint32_t handle ;
uint32_t index ;
uint16_t req_cnt ;
uint16_t cnt ;
uint32_t * clr_ptr ;
struct cmd_bidir * cmd_pkt = NULL ;
struct rsp_que * rsp ;
struct req_que * req ;
int rval = EXT_STATUS_OK ;
rval = QLA_SUCCESS ;
rsp = ha - > rsp_q_map [ 0 ] ;
req = vha - > req ;
/* Send marker if required */
if ( vha - > marker_needed ! = 0 ) {
if ( qla2x00_marker ( vha , req ,
rsp , 0 , 0 , MK_SYNC_ALL ) ! = QLA_SUCCESS )
return EXT_STATUS_MAILBOX ;
vha - > marker_needed = 0 ;
}
/* Acquire ring specific lock */
spin_lock_irqsave ( & ha - > hardware_lock , flags ) ;
/* Check for room in outstanding command list. */
handle = req - > current_outstanding_cmd ;
2013-01-30 03:34:37 -05:00
for ( index = 1 ; index < req - > num_outstanding_cmds ; index + + ) {
2012-08-22 14:21:01 -04:00
handle + + ;
2015-06-04 15:58:09 -07:00
if ( handle = = req - > num_outstanding_cmds )
handle = 1 ;
if ( ! req - > outstanding_cmds [ handle ] )
break ;
2012-08-22 14:21:01 -04:00
}
2013-01-30 03:34:37 -05:00
if ( index = = req - > num_outstanding_cmds ) {
2012-08-22 14:21:01 -04:00
rval = EXT_STATUS_BUSY ;
goto queuing_error ;
}
/* Calculate number of IOCB required */
req_cnt = qla24xx_calc_iocbs ( vha , tot_dsds ) ;
/* Check for room on request queue. */
if ( req - > cnt < req_cnt + 2 ) {
2014-04-11 16:54:37 -04:00
cnt = IS_SHADOW_REG_CAPABLE ( ha ) ? * req - > out_ptr :
RD_REG_DWORD_RELAXED ( req - > req_q_out ) ;
2012-08-22 14:21:01 -04:00
if ( req - > ring_index < cnt )
req - > cnt = cnt - req - > ring_index ;
else
req - > cnt = req - > length -
( req - > ring_index - cnt ) ;
}
if ( req - > cnt < req_cnt + 2 ) {
rval = EXT_STATUS_BUSY ;
goto queuing_error ;
}
cmd_pkt = ( struct cmd_bidir * ) req - > ring_ptr ;
cmd_pkt - > handle = MAKE_HANDLE ( req - > id , handle ) ;
/* Zero out remaining portion of packet. */
/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
clr_ptr = ( uint32_t * ) cmd_pkt + 2 ;
memset ( clr_ptr , 0 , REQUEST_ENTRY_SIZE - 8 ) ;
/* Set NPORT-ID (of vha)*/
cmd_pkt - > nport_handle = cpu_to_le16 ( vha - > self_login_loop_id ) ;
cmd_pkt - > port_id [ 0 ] = vha - > d_id . b . al_pa ;
cmd_pkt - > port_id [ 1 ] = vha - > d_id . b . area ;
cmd_pkt - > port_id [ 2 ] = vha - > d_id . b . domain ;
qla25xx_build_bidir_iocb ( sp , vha , cmd_pkt , tot_dsds ) ;
cmd_pkt - > entry_status = ( uint8_t ) rsp - > id ;
/* Build command packet. */
req - > current_outstanding_cmd = handle ;
req - > outstanding_cmds [ handle ] = sp ;
sp - > handle = handle ;
req - > cnt - = req_cnt ;
/* Send the command to the firmware */
wmb ( ) ;
qla2x00_start_iocbs ( vha , req ) ;
queuing_error :
spin_unlock_irqrestore ( & ha - > hardware_lock , flags ) ;
return rval ;
}