2005-10-27 11:10:08 -07:00
/*
* QLogic Fibre Channel HBA Driver
2014-04-11 16:54:24 -04:00
* Copyright ( c ) 2003 - 2014 QLogic Corporation
2005-04-16 15:20:36 -07:00
*
2005-10-27 11:10:08 -07:00
* See LICENSE . qla2xxx for copyright and licensing details .
*/
2005-04-16 15:20:36 -07:00
# include "qla_def.h"
2012-05-15 14:34:28 -04:00
# include "qla_target.h"
2005-04-16 15:20:36 -07:00
# include <linux/blkdev.h>
# include <linux/delay.h>
# include <scsi/scsi_tcq.h>
2009-06-03 09:55:19 -07:00
static void qla25xx_set_que ( srb_t * , struct rsp_que * * ) ;
2005-04-16 15:20:36 -07:00
/**
* qla2x00_get_cmd_direction ( ) - Determine control_flag data direction .
* @ cmd : SCSI command
*
* Returns the proper CF_ * direction based on CDB .
*/
static inline uint16_t
2008-09-11 21:22:47 -07:00
qla2x00_get_cmd_direction ( srb_t * sp )
2005-04-16 15:20:36 -07:00
{
uint16_t cflags ;
2012-02-09 11:15:36 -08:00
struct scsi_cmnd * cmd = GET_CMD_SP ( sp ) ;
2012-05-15 14:34:16 -04:00
struct scsi_qla_host * vha = sp - > fcport - > vha ;
2005-04-16 15:20:36 -07:00
cflags = 0 ;
/* Set transfer direction */
2012-02-09 11:15:36 -08:00
if ( cmd - > sc_data_direction = = DMA_TO_DEVICE ) {
2005-04-16 15:20:36 -07:00
cflags = CF_WRITE ;
2012-05-15 14:34:16 -04:00
vha - > qla_stats . output_bytes + = scsi_bufflen ( cmd ) ;
2013-08-27 01:37:40 -04:00
vha - > qla_stats . output_requests + + ;
2012-02-09 11:15:36 -08:00
} else if ( cmd - > sc_data_direction = = DMA_FROM_DEVICE ) {
2005-04-16 15:20:36 -07:00
cflags = CF_READ ;
2012-05-15 14:34:16 -04:00
vha - > qla_stats . input_bytes + = scsi_bufflen ( cmd ) ;
2013-08-27 01:37:40 -04:00
vha - > qla_stats . input_requests + + ;
2008-09-11 21:22:47 -07:00
}
2005-04-16 15:20:36 -07:00
return ( cflags ) ;
}
/**
* qla2x00_calc_iocbs_32 ( ) - Determine number of Command Type 2 and
* Continuation Type 0 IOCBs to allocate .
*
* @ dsds : number of data segment decriptors needed
*
* Returns the number of IOCB entries needed to store @ dsds .
*/
uint16_t
qla2x00_calc_iocbs_32 ( uint16_t dsds )
{
uint16_t iocbs ;
iocbs = 1 ;
if ( dsds > 3 ) {
iocbs + = ( dsds - 3 ) / 7 ;
if ( ( dsds - 3 ) % 7 )
iocbs + + ;
}
return ( iocbs ) ;
}
/**
* qla2x00_calc_iocbs_64 ( ) - Determine number of Command Type 3 and
* Continuation Type 1 IOCBs to allocate .
*
* @ dsds : number of data segment decriptors needed
*
* Returns the number of IOCB entries needed to store @ dsds .
*/
uint16_t
qla2x00_calc_iocbs_64 ( uint16_t dsds )
{
uint16_t iocbs ;
iocbs = 1 ;
if ( dsds > 2 ) {
iocbs + = ( dsds - 2 ) / 5 ;
if ( ( dsds - 2 ) % 5 )
iocbs + + ;
}
return ( iocbs ) ;
}
/**
* qla2x00_prep_cont_type0_iocb ( ) - Initialize a Continuation Type 0 IOCB .
* @ ha : HA context
*
* Returns a pointer to the Continuation Type 0 IOCB packet .
*/
static inline cont_entry_t *
2009-04-06 22:33:42 -07:00
qla2x00_prep_cont_type0_iocb ( struct scsi_qla_host * vha )
2005-04-16 15:20:36 -07:00
{
cont_entry_t * cont_pkt ;
2009-04-06 22:33:42 -07:00
struct req_que * req = vha - > req ;
2005-04-16 15:20:36 -07:00
/* Adjust ring index. */
2008-11-06 10:40:51 -08:00
req - > ring_index + + ;
if ( req - > ring_index = = req - > length ) {
req - > ring_index = 0 ;
req - > ring_ptr = req - > ring ;
2005-04-16 15:20:36 -07:00
} else {
2008-11-06 10:40:51 -08:00
req - > ring_ptr + + ;
2005-04-16 15:20:36 -07:00
}
2008-11-06 10:40:51 -08:00
cont_pkt = ( cont_entry_t * ) req - > ring_ptr ;
2005-04-16 15:20:36 -07:00
/* Load packet defaults. */
* ( ( uint32_t * ) ( & cont_pkt - > entry_type ) ) =
__constant_cpu_to_le32 ( CONTINUE_TYPE ) ;
return ( cont_pkt ) ;
}
/**
* qla2x00_prep_cont_type1_iocb ( ) - Initialize a Continuation Type 1 IOCB .
* @ ha : HA context
*
* Returns a pointer to the continuation type 1 IOCB packet .
*/
static inline cont_a64_entry_t *
2011-11-18 09:02:21 -08:00
qla2x00_prep_cont_type1_iocb ( scsi_qla_host_t * vha , struct req_que * req )
2005-04-16 15:20:36 -07:00
{
cont_a64_entry_t * cont_pkt ;
/* Adjust ring index. */
2008-11-06 10:40:51 -08:00
req - > ring_index + + ;
if ( req - > ring_index = = req - > length ) {
req - > ring_index = 0 ;
req - > ring_ptr = req - > ring ;
2005-04-16 15:20:36 -07:00
} else {
2008-11-06 10:40:51 -08:00
req - > ring_ptr + + ;
2005-04-16 15:20:36 -07:00
}
2008-11-06 10:40:51 -08:00
cont_pkt = ( cont_a64_entry_t * ) req - > ring_ptr ;
2005-04-16 15:20:36 -07:00
/* Load packet defaults. */
2013-03-28 08:21:23 -04:00
* ( ( uint32_t * ) ( & cont_pkt - > entry_type ) ) = IS_QLAFX00 ( vha - > hw ) ?
__constant_cpu_to_le32 ( CONTINUE_A64_TYPE_FX00 ) :
2005-04-16 15:20:36 -07:00
__constant_cpu_to_le32 ( CONTINUE_A64_TYPE ) ;
return ( cont_pkt ) ;
}
2010-05-04 15:01:30 -07:00
static inline int
qla24xx_configure_prot_mode ( srb_t * sp , uint16_t * fw_prot_opts )
{
2012-02-09 11:15:36 -08:00
struct scsi_cmnd * cmd = GET_CMD_SP ( sp ) ;
uint8_t guard = scsi_host_get_guard ( cmd - > device - > host ) ;
2010-05-04 15:01:30 -07:00
/* We always use DIFF Bundling for best performance */
* fw_prot_opts = 0 ;
/* Translate SCSI opcode to a protection opcode */
2012-02-09 11:15:36 -08:00
switch ( scsi_get_prot_op ( cmd ) ) {
2010-05-04 15:01:30 -07:00
case SCSI_PROT_READ_STRIP :
* fw_prot_opts | = PO_MODE_DIF_REMOVE ;
break ;
case SCSI_PROT_WRITE_INSERT :
* fw_prot_opts | = PO_MODE_DIF_INSERT ;
break ;
case SCSI_PROT_READ_INSERT :
* fw_prot_opts | = PO_MODE_DIF_INSERT ;
break ;
case SCSI_PROT_WRITE_STRIP :
* fw_prot_opts | = PO_MODE_DIF_REMOVE ;
break ;
case SCSI_PROT_READ_PASS :
case SCSI_PROT_WRITE_PASS :
2012-08-22 14:21:31 -04:00
if ( guard & SHOST_DIX_GUARD_IP )
* fw_prot_opts | = PO_MODE_DIF_TCP_CKSUM ;
else
* fw_prot_opts | = PO_MODE_DIF_PASS ;
2010-05-04 15:01:30 -07:00
break ;
default : /* Normal Request */
* fw_prot_opts | = PO_MODE_DIF_PASS ;
break ;
}
2012-02-09 11:15:36 -08:00
return scsi_prot_sg_count ( cmd ) ;
2010-05-04 15:01:30 -07:00
}
/*
2005-04-16 15:20:36 -07:00
* qla2x00_build_scsi_iocbs_32 ( ) - Build IOCB command utilizing 32 bit
* capable IOCB types .
*
* @ sp : SRB command to process
* @ cmd_pkt : Command type 2 IOCB
* @ tot_dsds : Total number of segments to transfer
*/
void qla2x00_build_scsi_iocbs_32 ( srb_t * sp , cmd_entry_t * cmd_pkt ,
uint16_t tot_dsds )
{
uint16_t avail_dsds ;
uint32_t * cur_dsd ;
2008-11-06 10:40:51 -08:00
scsi_qla_host_t * vha ;
2005-04-16 15:20:36 -07:00
struct scsi_cmnd * cmd ;
2007-05-26 01:55:38 +09:00
struct scatterlist * sg ;
int i ;
2005-04-16 15:20:36 -07:00
2012-02-09 11:15:36 -08:00
cmd = GET_CMD_SP ( sp ) ;
2005-04-16 15:20:36 -07:00
/* Update entry type to indicate Command Type 2 IOCB */
* ( ( uint32_t * ) ( & cmd_pkt - > entry_type ) ) =
__constant_cpu_to_le32 ( COMMAND_TYPE ) ;
/* No data transfer */
2007-05-26 01:55:38 +09:00
if ( ! scsi_bufflen ( cmd ) | | cmd - > sc_data_direction = = DMA_NONE ) {
2005-04-16 15:20:36 -07:00
cmd_pkt - > byte_count = __constant_cpu_to_le32 ( 0 ) ;
return ;
}
2009-01-05 11:18:10 -08:00
vha = sp - > fcport - > vha ;
2008-09-11 21:22:47 -07:00
cmd_pkt - > control_flags | = cpu_to_le16 ( qla2x00_get_cmd_direction ( sp ) ) ;
2005-04-16 15:20:36 -07:00
/* Three DSDs are available in the Command Type 2 IOCB */
avail_dsds = 3 ;
cur_dsd = ( uint32_t * ) & cmd_pkt - > dseg_0_address ;
/* Load data segments */
2007-05-26 01:55:38 +09:00
scsi_for_each_sg ( cmd , sg , tot_dsds , i ) {
cont_entry_t * cont_pkt ;
/* Allocate additional continuation packets? */
if ( avail_dsds = = 0 ) {
/*
* Seven DSDs are available in the Continuation
* Type 0 IOCB .
*/
2009-04-06 22:33:42 -07:00
cont_pkt = qla2x00_prep_cont_type0_iocb ( vha ) ;
2007-05-26 01:55:38 +09:00
cur_dsd = ( uint32_t * ) & cont_pkt - > dseg_0_address ;
avail_dsds = 7 ;
2005-04-16 15:20:36 -07:00
}
2007-05-26 01:55:38 +09:00
* cur_dsd + + = cpu_to_le32 ( sg_dma_address ( sg ) ) ;
* cur_dsd + + = cpu_to_le32 ( sg_dma_len ( sg ) ) ;
avail_dsds - - ;
2005-04-16 15:20:36 -07:00
}
}
/**
* qla2x00_build_scsi_iocbs_64 ( ) - Build IOCB command utilizing 64 bit
* capable IOCB types .
*
* @ sp : SRB command to process
* @ cmd_pkt : Command type 3 IOCB
* @ tot_dsds : Total number of segments to transfer
*/
void qla2x00_build_scsi_iocbs_64 ( srb_t * sp , cmd_entry_t * cmd_pkt ,
uint16_t tot_dsds )
{
uint16_t avail_dsds ;
uint32_t * cur_dsd ;
2008-11-06 10:40:51 -08:00
scsi_qla_host_t * vha ;
2005-04-16 15:20:36 -07:00
struct scsi_cmnd * cmd ;
2007-05-26 01:55:38 +09:00
struct scatterlist * sg ;
int i ;
2005-04-16 15:20:36 -07:00
2012-02-09 11:15:36 -08:00
cmd = GET_CMD_SP ( sp ) ;
2005-04-16 15:20:36 -07:00
/* Update entry type to indicate Command Type 3 IOCB */
* ( ( uint32_t * ) ( & cmd_pkt - > entry_type ) ) =
__constant_cpu_to_le32 ( COMMAND_A64_TYPE ) ;
/* No data transfer */
2007-05-26 01:55:38 +09:00
if ( ! scsi_bufflen ( cmd ) | | cmd - > sc_data_direction = = DMA_NONE ) {
2005-04-16 15:20:36 -07:00
cmd_pkt - > byte_count = __constant_cpu_to_le32 ( 0 ) ;
return ;
}
2009-01-05 11:18:10 -08:00
vha = sp - > fcport - > vha ;
2008-09-11 21:22:47 -07:00
cmd_pkt - > control_flags | = cpu_to_le16 ( qla2x00_get_cmd_direction ( sp ) ) ;
2005-04-16 15:20:36 -07:00
/* Two DSDs are available in the Command Type 3 IOCB */
avail_dsds = 2 ;
cur_dsd = ( uint32_t * ) & cmd_pkt - > dseg_0_address ;
/* Load data segments */
2007-05-26 01:55:38 +09:00
scsi_for_each_sg ( cmd , sg , tot_dsds , i ) {
dma_addr_t sle_dma ;
cont_a64_entry_t * cont_pkt ;
/* Allocate additional continuation packets? */
if ( avail_dsds = = 0 ) {
/*
* Five DSDs are available in the Continuation
* Type 1 IOCB .
*/
2011-11-18 09:02:21 -08:00
cont_pkt = qla2x00_prep_cont_type1_iocb ( vha , vha - > req ) ;
2007-05-26 01:55:38 +09:00
cur_dsd = ( uint32_t * ) cont_pkt - > dseg_0_address ;
avail_dsds = 5 ;
2005-04-16 15:20:36 -07:00
}
2007-05-26 01:55:38 +09:00
sle_dma = sg_dma_address ( sg ) ;
* cur_dsd + + = cpu_to_le32 ( LSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( sg_dma_len ( sg ) ) ;
avail_dsds - - ;
2005-04-16 15:20:36 -07:00
}
}
/**
* qla2x00_start_scsi ( ) - Send a SCSI command to the ISP
* @ sp : command to send to the ISP
*
2008-09-11 21:22:51 -07:00
* Returns non - zero if a failure occurred , else zero .
2005-04-16 15:20:36 -07:00
*/
int
qla2x00_start_scsi ( srb_t * sp )
{
2007-05-26 01:55:38 +09:00
int ret , nseg ;
2005-04-16 15:20:36 -07:00
unsigned long flags ;
2008-11-06 10:40:51 -08:00
scsi_qla_host_t * vha ;
2005-04-16 15:20:36 -07:00
struct scsi_cmnd * cmd ;
uint32_t * clr_ptr ;
uint32_t index ;
uint32_t handle ;
cmd_entry_t * cmd_pkt ;
uint16_t cnt ;
uint16_t req_cnt ;
uint16_t tot_dsds ;
2005-07-06 10:30:26 -07:00
struct device_reg_2xxx __iomem * reg ;
2008-11-06 10:40:51 -08:00
struct qla_hw_data * ha ;
struct req_que * req ;
2008-12-09 16:45:39 -08:00
struct rsp_que * rsp ;
2011-02-23 15:27:15 -08:00
char tag [ 2 ] ;
2005-04-16 15:20:36 -07:00
/* Setup device pointers. */
ret = 0 ;
2009-01-05 11:18:10 -08:00
vha = sp - > fcport - > vha ;
2008-11-06 10:40:51 -08:00
ha = vha - > hw ;
2005-07-06 10:30:26 -07:00
reg = & ha - > iobase - > isp ;
2012-02-09 11:15:36 -08:00
cmd = GET_CMD_SP ( sp ) ;
2008-12-09 16:45:39 -08:00
req = ha - > req_q_map [ 0 ] ;
rsp = ha - > rsp_q_map [ 0 ] ;
2005-04-17 15:10:41 -05:00
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0 ;
2005-04-16 15:20:36 -07:00
/* Send marker if required */
2008-11-06 10:40:51 -08:00
if ( vha - > marker_needed ! = 0 ) {
2011-07-14 12:00:13 -07:00
if ( qla2x00_marker ( vha , req , rsp , 0 , 0 , MK_SYNC_ALL ) ! =
QLA_SUCCESS ) {
2005-04-16 15:20:36 -07:00
return ( QLA_FUNCTION_FAILED ) ;
2011-07-14 12:00:13 -07:00
}
2008-11-06 10:40:51 -08:00
vha - > marker_needed = 0 ;
2005-04-16 15:20:36 -07:00
}
/* Acquire ring specific lock */
2008-07-24 08:31:49 -07:00
spin_lock_irqsave ( & ha - > hardware_lock , flags ) ;
2005-04-16 15:20:36 -07:00
/* Check for room in outstanding command list. */
2008-11-06 10:40:51 -08:00
handle = req - > current_outstanding_cmd ;
2013-01-30 03:34:37 -05:00
for ( index = 1 ; index < req - > num_outstanding_cmds ; index + + ) {
2005-04-16 15:20:36 -07:00
handle + + ;
2013-01-30 03:34:37 -05:00
if ( handle = = req - > num_outstanding_cmds )
2005-04-16 15:20:36 -07:00
handle = 1 ;
2008-11-06 10:40:51 -08:00
if ( ! req - > outstanding_cmds [ handle ] )
2005-04-16 15:20:36 -07:00
break ;
}
2013-01-30 03:34:37 -05:00
if ( index = = req - > num_outstanding_cmds )
2005-04-16 15:20:36 -07:00
goto queuing_error ;
2005-04-17 15:10:41 -05:00
/* Map the sg table so we have an accurate count of sg entries needed */
2007-07-05 13:16:51 -07:00
if ( scsi_sg_count ( cmd ) ) {
nseg = dma_map_sg ( & ha - > pdev - > dev , scsi_sglist ( cmd ) ,
scsi_sg_count ( cmd ) , cmd - > sc_data_direction ) ;
if ( unlikely ( ! nseg ) )
goto queuing_error ;
} else
nseg = 0 ;
2007-05-26 01:55:38 +09:00
tot_dsds = nseg ;
2005-04-17 15:10:41 -05:00
2005-04-16 15:20:36 -07:00
/* Calculate the number of request entries needed. */
2007-07-19 15:06:00 -07:00
req_cnt = ha - > isp_ops - > calc_req_entries ( tot_dsds ) ;
2008-11-06 10:40:51 -08:00
if ( req - > cnt < ( req_cnt + 2 ) ) {
2005-04-16 15:20:36 -07:00
cnt = RD_REG_WORD_RELAXED ( ISP_REQ_Q_OUT ( ha , reg ) ) ;
2008-11-06 10:40:51 -08:00
if ( req - > ring_index < cnt )
req - > cnt = cnt - req - > ring_index ;
2005-04-16 15:20:36 -07:00
else
2008-11-06 10:40:51 -08:00
req - > cnt = req - > length -
( req - > ring_index - cnt ) ;
2012-05-15 14:34:09 -04:00
/* If still no head room then bail out */
if ( req - > cnt < ( req_cnt + 2 ) )
goto queuing_error ;
2005-04-16 15:20:36 -07:00
}
/* Build command packet */
2008-11-06 10:40:51 -08:00
req - > current_outstanding_cmd = handle ;
req - > outstanding_cmds [ handle ] = sp ;
2009-08-20 11:06:04 -07:00
sp - > handle = handle ;
2012-02-09 11:15:36 -08:00
cmd - > host_scribble = ( unsigned char * ) ( unsigned long ) handle ;
2008-11-06 10:40:51 -08:00
req - > cnt - = req_cnt ;
2005-04-16 15:20:36 -07:00
2008-11-06 10:40:51 -08:00
cmd_pkt = ( cmd_entry_t * ) req - > ring_ptr ;
2005-04-16 15:20:36 -07:00
cmd_pkt - > handle = handle ;
/* Zero out remaining portion of packet. */
clr_ptr = ( uint32_t * ) cmd_pkt + 2 ;
memset ( clr_ptr , 0 , REQUEST_ENTRY_SIZE - 8 ) ;
cmd_pkt - > dseg_count = cpu_to_le16 ( tot_dsds ) ;
2005-04-17 15:06:53 -05:00
/* Set target ID and LUN number*/
SET_TARGET_ID ( ha , cmd_pkt - > target , sp - > fcport - > loop_id ) ;
2012-02-09 11:15:36 -08:00
cmd_pkt - > lun = cpu_to_le16 ( cmd - > device - > lun ) ;
2005-04-16 15:20:36 -07:00
/* Update tagged queuing modifier */
2011-02-23 15:27:15 -08:00
if ( scsi_populate_tag_msg ( cmd , tag ) ) {
switch ( tag [ 0 ] ) {
case HEAD_OF_QUEUE_TAG :
cmd_pkt - > control_flags =
__constant_cpu_to_le16 ( CF_HEAD_TAG ) ;
break ;
case ORDERED_QUEUE_TAG :
cmd_pkt - > control_flags =
__constant_cpu_to_le16 ( CF_ORDERED_TAG ) ;
break ;
default :
cmd_pkt - > control_flags =
__constant_cpu_to_le16 ( CF_SIMPLE_TAG ) ;
break ;
}
2013-07-12 14:47:51 -04:00
} else {
cmd_pkt - > control_flags = __constant_cpu_to_le16 ( CF_SIMPLE_TAG ) ;
2011-02-23 15:27:15 -08:00
}
2005-04-16 15:20:36 -07:00
/* Load SCSI command packet. */
memcpy ( cmd_pkt - > scsi_cdb , cmd - > cmnd , cmd - > cmd_len ) ;
2007-05-26 01:55:38 +09:00
cmd_pkt - > byte_count = cpu_to_le32 ( ( uint32_t ) scsi_bufflen ( cmd ) ) ;
2005-04-16 15:20:36 -07:00
/* Build IOCB segments */
2007-07-19 15:06:00 -07:00
ha - > isp_ops - > build_iocbs ( sp , cmd_pkt , tot_dsds ) ;
2005-04-16 15:20:36 -07:00
/* Set total data segment count. */
cmd_pkt - > entry_count = ( uint8_t ) req_cnt ;
wmb ( ) ;
/* Adjust ring index. */
2008-11-06 10:40:51 -08:00
req - > ring_index + + ;
if ( req - > ring_index = = req - > length ) {
req - > ring_index = 0 ;
req - > ring_ptr = req - > ring ;
2005-04-16 15:20:36 -07:00
} else
2008-11-06 10:40:51 -08:00
req - > ring_ptr + + ;
2005-04-16 15:20:36 -07:00
sp - > flags | = SRB_DMA_VALID ;
/* Set chip new ring index. */
2008-11-06 10:40:51 -08:00
WRT_REG_WORD ( ISP_REQ_Q_IN ( ha , reg ) , req - > ring_index ) ;
2005-04-16 15:20:36 -07:00
RD_REG_WORD_RELAXED ( ISP_REQ_Q_IN ( ha , reg ) ) ; /* PCI Posting. */
2005-10-27 11:09:48 -07:00
/* Manage unprocessed RIO/ZIO commands in response queue. */
2008-11-06 10:40:51 -08:00
if ( vha - > flags . process_response_queue & &
2008-12-09 16:45:39 -08:00
rsp - > ring_ptr - > signature ! = RESPONSE_PROCESSED )
qla2x00_process_response_queue ( rsp ) ;
2005-10-27 11:09:48 -07:00
2008-07-24 08:31:49 -07:00
spin_unlock_irqrestore ( & ha - > hardware_lock , flags ) ;
2005-04-16 15:20:36 -07:00
return ( QLA_SUCCESS ) ;
queuing_error :
2007-05-26 01:55:38 +09:00
if ( tot_dsds )
scsi_dma_unmap ( cmd ) ;
2008-07-24 08:31:49 -07:00
spin_unlock_irqrestore ( & ha - > hardware_lock , flags ) ;
2005-04-16 15:20:36 -07:00
return ( QLA_FUNCTION_FAILED ) ;
}
2011-11-18 09:03:18 -08:00
/**
* qla2x00_start_iocbs ( ) - Execute the IOCB command
*/
2012-05-15 14:34:28 -04:00
void
2011-11-18 09:03:18 -08:00
qla2x00_start_iocbs ( struct scsi_qla_host * vha , struct req_que * req )
{
struct qla_hw_data * ha = vha - > hw ;
device_reg_t __iomem * reg = ISP_QUE_REG ( ha , req - > id ) ;
2013-08-27 01:37:28 -04:00
if ( IS_P3P_TYPE ( ha ) ) {
2011-11-18 09:03:18 -08:00
qla82xx_start_iocbs ( vha ) ;
} else {
/* Adjust ring index. */
req - > ring_index + + ;
if ( req - > ring_index = = req - > length ) {
req - > ring_index = 0 ;
req - > ring_ptr = req - > ring ;
} else
req - > ring_ptr + + ;
/* Set chip new ring index. */
2014-02-26 04:15:06 -05:00
if ( ha - > mqenable | | IS_QLA83XX ( ha ) | | IS_QLA27XX ( ha ) ) {
2012-02-09 11:15:34 -08:00
WRT_REG_DWORD ( req - > req_q_in , req - > ring_index ) ;
2012-02-09 11:15:59 -08:00
RD_REG_DWORD_RELAXED ( & ha - > iobase - > isp24 . hccr ) ;
2013-03-28 08:21:23 -04:00
} else if ( IS_QLAFX00 ( ha ) ) {
WRT_REG_DWORD ( & reg - > ispfx00 . req_q_in , req - > ring_index ) ;
RD_REG_DWORD_RELAXED ( & reg - > ispfx00 . req_q_in ) ;
QLAFX00_SET_HST_INTR ( ha , ha - > rqstq_intr_code ) ;
2011-11-18 09:03:18 -08:00
} else if ( IS_FWI2_CAPABLE ( ha ) ) {
WRT_REG_DWORD ( & reg - > isp24 . req_q_in , req - > ring_index ) ;
RD_REG_DWORD_RELAXED ( & reg - > isp24 . req_q_in ) ;
} else {
WRT_REG_WORD ( ISP_REQ_Q_IN ( ha , & reg - > isp ) ,
req - > ring_index ) ;
RD_REG_WORD_RELAXED ( ISP_REQ_Q_IN ( ha , & reg - > isp ) ) ;
}
}
}
2005-04-16 15:20:36 -07:00
/**
* qla2x00_marker ( ) - Send a marker IOCB to the firmware .
* @ ha : HA context
* @ loop_id : loop ID
* @ lun : LUN
* @ type : marker modifier
*
* Can be called from both normal and interrupt context .
*
2008-09-11 21:22:51 -07:00
* Returns non - zero if a failure occurred , else zero .
2005-04-16 15:20:36 -07:00
*/
2010-07-23 15:28:37 +05:00
static int
2008-12-09 16:45:39 -08:00
__qla2x00_marker ( struct scsi_qla_host * vha , struct req_que * req ,
struct rsp_que * rsp , uint16_t loop_id ,
2014-06-25 15:27:36 +02:00
uint64_t lun , uint8_t type )
2005-04-16 15:20:36 -07:00
{
2005-07-06 10:31:17 -07:00
mrk_entry_t * mrk ;
2013-03-28 08:21:23 -04:00
struct mrk_entry_24xx * mrk24 = NULL ;
2008-11-06 10:40:51 -08:00
struct qla_hw_data * ha = vha - > hw ;
scsi_qla_host_t * base_vha = pci_get_drvdata ( ha - > pdev ) ;
2005-04-16 15:20:36 -07:00
2011-11-18 09:03:17 -08:00
req = ha - > req_q_map [ 0 ] ;
2012-11-21 02:40:29 -05:00
mrk = ( mrk_entry_t * ) qla2x00_alloc_iocbs ( vha , NULL ) ;
2005-07-06 10:31:17 -07:00
if ( mrk = = NULL ) {
2011-07-14 12:00:13 -07:00
ql_log ( ql_log_warn , base_vha , 0x3026 ,
" Failed to allocate Marker IOCB. \n " ) ;
2005-04-16 15:20:36 -07:00
return ( QLA_FUNCTION_FAILED ) ;
}
2005-07-06 10:31:17 -07:00
mrk - > entry_type = MARKER_TYPE ;
mrk - > modifier = type ;
2005-04-16 15:20:36 -07:00
if ( type ! = MK_SYNC_ALL ) {
2014-02-26 04:15:07 -05:00
if ( IS_FWI2_CAPABLE ( ha ) ) {
2005-07-06 10:31:17 -07:00
mrk24 = ( struct mrk_entry_24xx * ) mrk ;
mrk24 - > nport_handle = cpu_to_le16 ( loop_id ) ;
2014-06-25 15:27:36 +02:00
int_to_scsilun ( lun , ( struct scsi_lun * ) & mrk24 - > lun ) ;
2006-08-01 13:48:13 -07:00
host_to_fcp_swap ( mrk24 - > lun , sizeof ( mrk24 - > lun ) ) ;
2008-11-06 10:40:51 -08:00
mrk24 - > vp_index = vha - > vp_idx ;
2009-04-06 22:33:40 -07:00
mrk24 - > handle = MAKE_HANDLE ( req - > id , mrk24 - > handle ) ;
2005-07-06 10:31:17 -07:00
} else {
SET_TARGET_ID ( ha , mrk - > target , loop_id ) ;
2014-06-25 15:27:36 +02:00
mrk - > lun = cpu_to_le16 ( ( uint16_t ) lun ) ;
2005-07-06 10:31:17 -07:00
}
2005-04-16 15:20:36 -07:00
}
wmb ( ) ;
2011-11-18 09:03:18 -08:00
qla2x00_start_iocbs ( vha , req ) ;
2005-04-16 15:20:36 -07:00
return ( QLA_SUCCESS ) ;
}
2005-07-06 10:32:07 -07:00
int
2008-12-09 16:45:39 -08:00
qla2x00_marker ( struct scsi_qla_host * vha , struct req_que * req ,
2014-06-25 15:27:36 +02:00
struct rsp_que * rsp , uint16_t loop_id , uint64_t lun ,
2008-12-09 16:45:39 -08:00
uint8_t type )
2005-04-16 15:20:36 -07:00
{
int ret ;
unsigned long flags = 0 ;
2008-12-09 16:45:39 -08:00
spin_lock_irqsave ( & vha - > hw - > hardware_lock , flags ) ;
ret = __qla2x00_marker ( vha , req , rsp , loop_id , lun , type ) ;
spin_unlock_irqrestore ( & vha - > hw - > hardware_lock , flags ) ;
2005-04-16 15:20:36 -07:00
return ( ret ) ;
}
2012-05-15 14:34:28 -04:00
/*
* qla2x00_issue_marker
*
* Issue marker
* Caller CAN have hardware lock held as specified by ha_locked parameter .
* Might release it , then reaquire .
*/
int qla2x00_issue_marker ( scsi_qla_host_t * vha , int ha_locked )
{
if ( ha_locked ) {
if ( __qla2x00_marker ( vha , vha - > req , vha - > req - > rsp , 0 , 0 ,
MK_SYNC_ALL ) ! = QLA_SUCCESS )
return QLA_FUNCTION_FAILED ;
} else {
if ( qla2x00_marker ( vha , vha - > req , vha - > req - > rsp , 0 , 0 ,
MK_SYNC_ALL ) ! = QLA_SUCCESS )
return QLA_FUNCTION_FAILED ;
}
vha - > marker_needed = 0 ;
return QLA_SUCCESS ;
}
2011-11-18 09:03:18 -08:00
static inline int
qla24xx_build_scsi_type_6_iocbs ( srb_t * sp , struct cmd_type_6 * cmd_pkt ,
uint16_t tot_dsds )
{
uint32_t * cur_dsd = NULL ;
scsi_qla_host_t * vha ;
struct qla_hw_data * ha ;
struct scsi_cmnd * cmd ;
struct scatterlist * cur_seg ;
uint32_t * dsd_seg ;
void * next_dsd ;
uint8_t avail_dsds ;
uint8_t first_iocb = 1 ;
uint32_t dsd_list_len ;
struct dsd_dma * dsd_ptr ;
struct ct6_dsd * ctx ;
2005-04-16 15:20:36 -07:00
2012-02-09 11:15:36 -08:00
cmd = GET_CMD_SP ( sp ) ;
2010-04-12 17:59:55 -07:00
2011-11-18 09:03:18 -08:00
/* Update entry type to indicate Command Type 3 IOCB */
* ( ( uint32_t * ) ( & cmd_pkt - > entry_type ) ) =
__constant_cpu_to_le32 ( COMMAND_TYPE_6 ) ;
/* No data transfer */
if ( ! scsi_bufflen ( cmd ) | | cmd - > sc_data_direction = = DMA_NONE ) {
cmd_pkt - > byte_count = __constant_cpu_to_le32 ( 0 ) ;
return 0 ;
}
vha = sp - > fcport - > vha ;
ha = vha - > hw ;
/* Set transfer direction */
if ( cmd - > sc_data_direction = = DMA_TO_DEVICE ) {
cmd_pkt - > control_flags =
__constant_cpu_to_le16 ( CF_WRITE_DATA ) ;
2012-05-15 14:34:16 -04:00
vha - > qla_stats . output_bytes + = scsi_bufflen ( cmd ) ;
2013-08-27 01:37:40 -04:00
vha - > qla_stats . output_requests + + ;
2011-11-18 09:03:18 -08:00
} else if ( cmd - > sc_data_direction = = DMA_FROM_DEVICE ) {
cmd_pkt - > control_flags =
__constant_cpu_to_le16 ( CF_READ_DATA ) ;
2012-05-15 14:34:16 -04:00
vha - > qla_stats . input_bytes + = scsi_bufflen ( cmd ) ;
2013-08-27 01:37:40 -04:00
vha - > qla_stats . input_requests + + ;
2011-11-18 09:03:18 -08:00
}
cur_seg = scsi_sglist ( cmd ) ;
2012-02-09 11:15:36 -08:00
ctx = GET_CMD_CTX_SP ( sp ) ;
2011-11-18 09:03:18 -08:00
while ( tot_dsds ) {
avail_dsds = ( tot_dsds > QLA_DSDS_PER_IOCB ) ?
QLA_DSDS_PER_IOCB : tot_dsds ;
tot_dsds - = avail_dsds ;
dsd_list_len = ( avail_dsds + 1 ) * QLA_DSD_SIZE ;
dsd_ptr = list_first_entry ( & ha - > gbl_dsd_list ,
struct dsd_dma , list ) ;
next_dsd = dsd_ptr - > dsd_addr ;
list_del ( & dsd_ptr - > list ) ;
ha - > gbl_dsd_avail - - ;
list_add_tail ( & dsd_ptr - > list , & ctx - > dsd_list ) ;
ctx - > dsd_use_cnt + + ;
ha - > gbl_dsd_inuse + + ;
if ( first_iocb ) {
first_iocb = 0 ;
dsd_seg = ( uint32_t * ) & cmd_pkt - > fcp_data_dseg_address ;
* dsd_seg + + = cpu_to_le32 ( LSD ( dsd_ptr - > dsd_list_dma ) ) ;
* dsd_seg + + = cpu_to_le32 ( MSD ( dsd_ptr - > dsd_list_dma ) ) ;
cmd_pkt - > fcp_data_dseg_len = cpu_to_le32 ( dsd_list_len ) ;
2008-12-09 16:45:39 -08:00
} else {
2011-11-18 09:03:18 -08:00
* cur_dsd + + = cpu_to_le32 ( LSD ( dsd_ptr - > dsd_list_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( dsd_ptr - > dsd_list_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( dsd_list_len ) ;
}
cur_dsd = ( uint32_t * ) next_dsd ;
while ( avail_dsds ) {
dma_addr_t sle_dma ;
sle_dma = sg_dma_address ( cur_seg ) ;
* cur_dsd + + = cpu_to_le32 ( LSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( sg_dma_len ( cur_seg ) ) ;
cur_seg = sg_next ( cur_seg ) ;
avail_dsds - - ;
2008-12-09 16:45:39 -08:00
}
2005-07-06 10:31:17 -07:00
}
2011-11-18 09:03:18 -08:00
/* Null termination */
* cur_dsd + + = 0 ;
* cur_dsd + + = 0 ;
* cur_dsd + + = 0 ;
cmd_pkt - > control_flags | = CF_DATA_SEG_DESCR_ENABLE ;
return 0 ;
2005-07-06 10:31:17 -07:00
}
2011-11-18 09:03:18 -08:00
/*
* qla24xx_calc_dsd_lists ( ) - Determine number of DSD list required
* for Command Type 6.
2005-07-06 10:31:17 -07:00
*
* @ dsds : number of data segment decriptors needed
*
2011-11-18 09:03:18 -08:00
* Returns the number of dsd list needed to store @ dsds .
2005-07-06 10:31:17 -07:00
*/
2010-04-12 17:59:55 -07:00
inline uint16_t
2011-11-18 09:03:18 -08:00
qla24xx_calc_dsd_lists ( uint16_t dsds )
2005-07-06 10:31:17 -07:00
{
2011-11-18 09:03:18 -08:00
uint16_t dsd_lists = 0 ;
2005-07-06 10:31:17 -07:00
2011-11-18 09:03:18 -08:00
dsd_lists = ( dsds / QLA_DSDS_PER_IOCB ) ;
if ( dsds % QLA_DSDS_PER_IOCB )
dsd_lists + + ;
return dsd_lists ;
2005-07-06 10:31:17 -07:00
}
2011-11-18 09:03:18 -08:00
2005-07-06 10:31:17 -07:00
/**
* qla24xx_build_scsi_iocbs ( ) - Build IOCB command utilizing Command Type 7
* IOCB types .
*
* @ sp : SRB command to process
* @ cmd_pkt : Command type 3 IOCB
* @ tot_dsds : Total number of segments to transfer
*/
2010-04-12 17:59:55 -07:00
inline void
2005-07-06 10:31:17 -07:00
qla24xx_build_scsi_iocbs ( srb_t * sp , struct cmd_type_7 * cmd_pkt ,
uint16_t tot_dsds )
{
uint16_t avail_dsds ;
uint32_t * cur_dsd ;
2008-11-06 10:40:51 -08:00
scsi_qla_host_t * vha ;
2005-07-06 10:31:17 -07:00
struct scsi_cmnd * cmd ;
2007-05-26 01:55:38 +09:00
struct scatterlist * sg ;
int i ;
2008-12-09 16:45:39 -08:00
struct req_que * req ;
2005-07-06 10:31:17 -07:00
2012-02-09 11:15:36 -08:00
cmd = GET_CMD_SP ( sp ) ;
2005-07-06 10:31:17 -07:00
/* Update entry type to indicate Command Type 3 IOCB */
* ( ( uint32_t * ) ( & cmd_pkt - > entry_type ) ) =
__constant_cpu_to_le32 ( COMMAND_TYPE_7 ) ;
/* No data transfer */
2007-05-26 01:55:38 +09:00
if ( ! scsi_bufflen ( cmd ) | | cmd - > sc_data_direction = = DMA_NONE ) {
2005-07-06 10:31:17 -07:00
cmd_pkt - > byte_count = __constant_cpu_to_le32 ( 0 ) ;
return ;
}
2009-01-05 11:18:10 -08:00
vha = sp - > fcport - > vha ;
2009-04-06 22:33:42 -07:00
req = vha - > req ;
2005-07-06 10:31:17 -07:00
/* Set transfer direction */
2008-09-11 21:22:47 -07:00
if ( cmd - > sc_data_direction = = DMA_TO_DEVICE ) {
2005-07-06 10:31:17 -07:00
cmd_pkt - > task_mgmt_flags =
__constant_cpu_to_le16 ( TMF_WRITE_DATA ) ;
2012-05-15 14:34:16 -04:00
vha - > qla_stats . output_bytes + = scsi_bufflen ( cmd ) ;
2013-08-27 01:37:40 -04:00
vha - > qla_stats . output_requests + + ;
2008-09-11 21:22:47 -07:00
} else if ( cmd - > sc_data_direction = = DMA_FROM_DEVICE ) {
2005-07-06 10:31:17 -07:00
cmd_pkt - > task_mgmt_flags =
__constant_cpu_to_le16 ( TMF_READ_DATA ) ;
2012-05-15 14:34:16 -04:00
vha - > qla_stats . input_bytes + = scsi_bufflen ( cmd ) ;
2013-08-27 01:37:40 -04:00
vha - > qla_stats . input_requests + + ;
2008-09-11 21:22:47 -07:00
}
2005-07-06 10:31:17 -07:00
/* One DSD is available in the Command Type 3 IOCB */
avail_dsds = 1 ;
cur_dsd = ( uint32_t * ) & cmd_pkt - > dseg_0_address ;
/* Load data segments */
2007-05-26 01:55:38 +09:00
scsi_for_each_sg ( cmd , sg , tot_dsds , i ) {
dma_addr_t sle_dma ;
cont_a64_entry_t * cont_pkt ;
/* Allocate additional continuation packets? */
if ( avail_dsds = = 0 ) {
/*
* Five DSDs are available in the Continuation
* Type 1 IOCB .
*/
2011-11-18 09:02:21 -08:00
cont_pkt = qla2x00_prep_cont_type1_iocb ( vha , vha - > req ) ;
2007-05-26 01:55:38 +09:00
cur_dsd = ( uint32_t * ) cont_pkt - > dseg_0_address ;
avail_dsds = 5 ;
2005-07-06 10:31:17 -07:00
}
2007-05-26 01:55:38 +09:00
sle_dma = sg_dma_address ( sg ) ;
* cur_dsd + + = cpu_to_le32 ( LSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( sg_dma_len ( sg ) ) ;
avail_dsds - - ;
2005-07-06 10:31:17 -07:00
}
}
2010-05-04 15:01:30 -07:00
struct fw_dif_context {
uint32_t ref_tag ;
uint16_t app_tag ;
uint8_t ref_tag_mask [ 4 ] ; /* Validation/Replacement Mask*/
uint8_t app_tag_mask [ 2 ] ; /* Validation/Replacement Mask*/
} ;
/*
* qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
*
*/
static inline void
2011-08-16 11:29:23 -07:00
qla24xx_set_t10dif_tags ( srb_t * sp , struct fw_dif_context * pkt ,
2010-05-04 15:01:30 -07:00
unsigned int protcnt )
{
2012-02-09 11:15:36 -08:00
struct scsi_cmnd * cmd = GET_CMD_SP ( sp ) ;
2010-05-04 15:01:30 -07:00
switch ( scsi_get_prot_type ( cmd ) ) {
case SCSI_PROT_DIF_TYPE0 :
2011-08-16 11:29:22 -07:00
/*
* No check for ql2xenablehba_err_chk , as it would be an
* I / O error if hba tag generation is not done .
*/
pkt - > ref_tag = cpu_to_le32 ( ( uint32_t )
( 0xffffffff & scsi_get_lba ( cmd ) ) ) ;
2011-08-16 11:29:23 -07:00
if ( ! qla2x00_hba_err_chk_enabled ( sp ) )
break ;
2011-08-16 11:29:22 -07:00
pkt - > ref_tag_mask [ 0 ] = 0xff ;
pkt - > ref_tag_mask [ 1 ] = 0xff ;
pkt - > ref_tag_mask [ 2 ] = 0xff ;
pkt - > ref_tag_mask [ 3 ] = 0xff ;
2010-05-04 15:01:30 -07:00
break ;
/*
* For TYPE 2 protection : 16 bit GUARD + 32 bit REF tag has to
* match LBA in CDB + N
*/
case SCSI_PROT_DIF_TYPE2 :
2011-08-16 11:29:23 -07:00
pkt - > app_tag = __constant_cpu_to_le16 ( 0 ) ;
pkt - > app_tag_mask [ 0 ] = 0x0 ;
pkt - > app_tag_mask [ 1 ] = 0x0 ;
2010-07-23 15:28:38 +05:00
pkt - > ref_tag = cpu_to_le32 ( ( uint32_t )
( 0xffffffff & scsi_get_lba ( cmd ) ) ) ;
2011-08-16 11:29:23 -07:00
if ( ! qla2x00_hba_err_chk_enabled ( sp ) )
break ;
2010-07-23 15:28:38 +05:00
/* enable ALL bytes of the ref tag */
pkt - > ref_tag_mask [ 0 ] = 0xff ;
pkt - > ref_tag_mask [ 1 ] = 0xff ;
pkt - > ref_tag_mask [ 2 ] = 0xff ;
pkt - > ref_tag_mask [ 3 ] = 0xff ;
2010-05-04 15:01:30 -07:00
break ;
/* For Type 3 protection: 16 bit GUARD only */
case SCSI_PROT_DIF_TYPE3 :
pkt - > ref_tag_mask [ 0 ] = pkt - > ref_tag_mask [ 1 ] =
pkt - > ref_tag_mask [ 2 ] = pkt - > ref_tag_mask [ 3 ] =
0x00 ;
break ;
/*
* For TYpe 1 protection : 16 bit GUARD tag , 32 bit REF tag , and
* 16 bit app tag .
*/
case SCSI_PROT_DIF_TYPE1 :
2011-08-16 11:29:23 -07:00
pkt - > ref_tag = cpu_to_le32 ( ( uint32_t )
( 0xffffffff & scsi_get_lba ( cmd ) ) ) ;
pkt - > app_tag = __constant_cpu_to_le16 ( 0 ) ;
pkt - > app_tag_mask [ 0 ] = 0x0 ;
pkt - > app_tag_mask [ 1 ] = 0x0 ;
if ( ! qla2x00_hba_err_chk_enabled ( sp ) )
2010-05-04 15:01:30 -07:00
break ;
/* enable ALL bytes of the ref tag */
pkt - > ref_tag_mask [ 0 ] = 0xff ;
pkt - > ref_tag_mask [ 1 ] = 0xff ;
pkt - > ref_tag_mask [ 2 ] = 0xff ;
pkt - > ref_tag_mask [ 3 ] = 0xff ;
break ;
}
}
2011-08-16 11:29:22 -07:00
struct qla2_sgx {
dma_addr_t dma_addr ; /* OUT */
uint32_t dma_len ; /* OUT */
uint32_t tot_bytes ; /* IN */
struct scatterlist * cur_sg ; /* IN */
/* for book keeping, bzero on initial invocation */
uint32_t bytes_consumed ;
uint32_t num_bytes ;
uint32_t tot_partial ;
/* for debugging */
uint32_t num_sg ;
srb_t * sp ;
} ;
static int
qla24xx_get_one_block_sg ( uint32_t blk_sz , struct qla2_sgx * sgx ,
uint32_t * partial )
{
struct scatterlist * sg ;
uint32_t cumulative_partial , sg_len ;
dma_addr_t sg_dma_addr ;
if ( sgx - > num_bytes = = sgx - > tot_bytes )
return 0 ;
sg = sgx - > cur_sg ;
cumulative_partial = sgx - > tot_partial ;
sg_dma_addr = sg_dma_address ( sg ) ;
sg_len = sg_dma_len ( sg ) ;
sgx - > dma_addr = sg_dma_addr + sgx - > bytes_consumed ;
if ( ( cumulative_partial + ( sg_len - sgx - > bytes_consumed ) ) > = blk_sz ) {
sgx - > dma_len = ( blk_sz - cumulative_partial ) ;
sgx - > tot_partial = 0 ;
sgx - > num_bytes + = blk_sz ;
* partial = 0 ;
} else {
sgx - > dma_len = sg_len - sgx - > bytes_consumed ;
sgx - > tot_partial + = sgx - > dma_len ;
* partial = 1 ;
}
sgx - > bytes_consumed + = sgx - > dma_len ;
if ( sg_len = = sgx - > bytes_consumed ) {
sg = sg_next ( sg ) ;
sgx - > num_sg + + ;
sgx - > cur_sg = sg ;
sgx - > bytes_consumed = 0 ;
}
return 1 ;
}
2014-04-11 16:54:43 -04:00
int
2011-08-16 11:29:22 -07:00
qla24xx_walk_and_build_sglist_no_difb ( struct qla_hw_data * ha , srb_t * sp ,
2014-04-11 16:54:43 -04:00
uint32_t * dsd , uint16_t tot_dsds , struct qla_tgt_cmd * tc )
2011-08-16 11:29:22 -07:00
{
void * next_dsd ;
uint8_t avail_dsds = 0 ;
uint32_t dsd_list_len ;
struct dsd_dma * dsd_ptr ;
struct scatterlist * sg_prot ;
uint32_t * cur_dsd = dsd ;
uint16_t used_dsds = tot_dsds ;
2014-04-11 16:54:43 -04:00
uint32_t prot_int ; /* protection interval */
2011-08-16 11:29:22 -07:00
uint32_t partial ;
struct qla2_sgx sgx ;
dma_addr_t sle_dma ;
uint32_t sle_dma_len , tot_prot_dma_len = 0 ;
2014-04-11 16:54:43 -04:00
struct scsi_cmnd * cmd ;
struct scsi_qla_host * vha ;
2011-08-16 11:29:22 -07:00
memset ( & sgx , 0 , sizeof ( struct qla2_sgx ) ) ;
2014-04-11 16:54:43 -04:00
if ( sp ) {
vha = sp - > fcport - > vha ;
cmd = GET_CMD_SP ( sp ) ;
prot_int = cmd - > device - > sector_size ;
sgx . tot_bytes = scsi_bufflen ( cmd ) ;
sgx . cur_sg = scsi_sglist ( cmd ) ;
sgx . sp = sp ;
sg_prot = scsi_prot_sglist ( cmd ) ;
} else if ( tc ) {
vha = tc - > vha ;
prot_int = tc - > blk_sz ;
sgx . tot_bytes = tc - > bufflen ;
sgx . cur_sg = tc - > sg ;
sg_prot = tc - > prot_sg ;
} else {
BUG ( ) ;
return 1 ;
}
2011-08-16 11:29:22 -07:00
while ( qla24xx_get_one_block_sg ( prot_int , & sgx , & partial ) ) {
sle_dma = sgx . dma_addr ;
sle_dma_len = sgx . dma_len ;
alloc_and_fill :
/* Allocate additional continuation packets? */
if ( avail_dsds = = 0 ) {
avail_dsds = ( used_dsds > QLA_DSDS_PER_IOCB ) ?
QLA_DSDS_PER_IOCB : used_dsds ;
dsd_list_len = ( avail_dsds + 1 ) * 12 ;
used_dsds - = avail_dsds ;
/* allocate tracking DS */
dsd_ptr = kzalloc ( sizeof ( struct dsd_dma ) , GFP_ATOMIC ) ;
if ( ! dsd_ptr )
return 1 ;
/* allocate new list */
dsd_ptr - > dsd_addr = next_dsd =
dma_pool_alloc ( ha - > dl_dma_pool , GFP_ATOMIC ,
& dsd_ptr - > dsd_list_dma ) ;
if ( ! next_dsd ) {
/*
* Need to cleanup only this dsd_ptr , rest
* will be done by sp_free_dma ( )
*/
kfree ( dsd_ptr ) ;
return 1 ;
}
2014-04-11 16:54:43 -04:00
if ( sp ) {
list_add_tail ( & dsd_ptr - > list ,
& ( ( struct crc_context * )
sp - > u . scmd . ctx ) - > dsd_list ) ;
sp - > flags | = SRB_CRC_CTX_DSD_VALID ;
} else {
list_add_tail ( & dsd_ptr - > list ,
& ( tc - > ctx - > dsd_list ) ) ;
tc - > ctx_dsd_alloced = 1 ;
}
2011-08-16 11:29:22 -07:00
/* add new list to cmd iocb or last list */
* cur_dsd + + = cpu_to_le32 ( LSD ( dsd_ptr - > dsd_list_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( dsd_ptr - > dsd_list_dma ) ) ;
* cur_dsd + + = dsd_list_len ;
cur_dsd = ( uint32_t * ) next_dsd ;
}
* cur_dsd + + = cpu_to_le32 ( LSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( sle_dma_len ) ;
avail_dsds - - ;
if ( partial = = 0 ) {
/* Got a full protection interval */
sle_dma = sg_dma_address ( sg_prot ) + tot_prot_dma_len ;
sle_dma_len = 8 ;
2010-05-04 15:01:30 -07:00
2011-08-16 11:29:22 -07:00
tot_prot_dma_len + = sle_dma_len ;
if ( tot_prot_dma_len = = sg_dma_len ( sg_prot ) ) {
tot_prot_dma_len = 0 ;
sg_prot = sg_next ( sg_prot ) ;
}
partial = 1 ; /* So as to not re-enter this block */
goto alloc_and_fill ;
}
}
/* Null termination */
* cur_dsd + + = 0 ;
* cur_dsd + + = 0 ;
* cur_dsd + + = 0 ;
return 0 ;
}
2011-11-18 09:03:18 -08:00
2014-04-11 16:54:43 -04:00
int
2010-05-04 15:01:30 -07:00
qla24xx_walk_and_build_sglist ( struct qla_hw_data * ha , srb_t * sp , uint32_t * dsd ,
2014-04-11 16:54:43 -04:00
uint16_t tot_dsds , struct qla_tgt_cmd * tc )
2010-05-04 15:01:30 -07:00
{
void * next_dsd ;
uint8_t avail_dsds = 0 ;
uint32_t dsd_list_len ;
struct dsd_dma * dsd_ptr ;
2014-04-11 16:54:43 -04:00
struct scatterlist * sg , * sgl ;
2010-05-04 15:01:30 -07:00
uint32_t * cur_dsd = dsd ;
int i ;
uint16_t used_dsds = tot_dsds ;
2014-04-11 16:54:43 -04:00
struct scsi_cmnd * cmd ;
struct scsi_qla_host * vha ;
if ( sp ) {
cmd = GET_CMD_SP ( sp ) ;
sgl = scsi_sglist ( cmd ) ;
vha = sp - > fcport - > vha ;
} else if ( tc ) {
sgl = tc - > sg ;
vha = tc - > vha ;
} else {
BUG ( ) ;
return 1 ;
}
2010-05-04 15:01:30 -07:00
2014-04-11 16:54:43 -04:00
for_each_sg ( sgl , sg , tot_dsds , i ) {
2010-05-04 15:01:30 -07:00
dma_addr_t sle_dma ;
/* Allocate additional continuation packets? */
if ( avail_dsds = = 0 ) {
avail_dsds = ( used_dsds > QLA_DSDS_PER_IOCB ) ?
QLA_DSDS_PER_IOCB : used_dsds ;
dsd_list_len = ( avail_dsds + 1 ) * 12 ;
used_dsds - = avail_dsds ;
/* allocate tracking DS */
dsd_ptr = kzalloc ( sizeof ( struct dsd_dma ) , GFP_ATOMIC ) ;
if ( ! dsd_ptr )
return 1 ;
/* allocate new list */
dsd_ptr - > dsd_addr = next_dsd =
dma_pool_alloc ( ha - > dl_dma_pool , GFP_ATOMIC ,
& dsd_ptr - > dsd_list_dma ) ;
if ( ! next_dsd ) {
/*
* Need to cleanup only this dsd_ptr , rest
* will be done by sp_free_dma ( )
*/
kfree ( dsd_ptr ) ;
return 1 ;
}
2014-04-11 16:54:43 -04:00
if ( sp ) {
list_add_tail ( & dsd_ptr - > list ,
& ( ( struct crc_context * )
sp - > u . scmd . ctx ) - > dsd_list ) ;
2010-05-04 15:01:30 -07:00
2014-04-11 16:54:43 -04:00
sp - > flags | = SRB_CRC_CTX_DSD_VALID ;
} else {
list_add_tail ( & dsd_ptr - > list ,
& ( tc - > ctx - > dsd_list ) ) ;
tc - > ctx_dsd_alloced = 1 ;
}
2010-05-04 15:01:30 -07:00
/* add new list to cmd iocb or last list */
* cur_dsd + + = cpu_to_le32 ( LSD ( dsd_ptr - > dsd_list_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( dsd_ptr - > dsd_list_dma ) ) ;
* cur_dsd + + = dsd_list_len ;
cur_dsd = ( uint32_t * ) next_dsd ;
}
sle_dma = sg_dma_address ( sg ) ;
2012-08-22 14:21:31 -04:00
2010-05-04 15:01:30 -07:00
* cur_dsd + + = cpu_to_le32 ( LSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( sg_dma_len ( sg ) ) ;
avail_dsds - - ;
}
/* Null termination */
* cur_dsd + + = 0 ;
* cur_dsd + + = 0 ;
* cur_dsd + + = 0 ;
return 0 ;
}
2014-04-11 16:54:43 -04:00
int
2010-05-04 15:01:30 -07:00
qla24xx_walk_and_build_prot_sglist ( struct qla_hw_data * ha , srb_t * sp ,
2014-04-11 16:54:43 -04:00
uint32_t * dsd , uint16_t tot_dsds , struct qla_tgt_cmd * tc )
2010-05-04 15:01:30 -07:00
{
void * next_dsd ;
uint8_t avail_dsds = 0 ;
uint32_t dsd_list_len ;
struct dsd_dma * dsd_ptr ;
2014-04-11 16:54:43 -04:00
struct scatterlist * sg , * sgl ;
2010-05-04 15:01:30 -07:00
int i ;
struct scsi_cmnd * cmd ;
uint32_t * cur_dsd = dsd ;
2014-04-11 16:54:43 -04:00
uint16_t used_dsds = tot_dsds ;
struct scsi_qla_host * vha ;
if ( sp ) {
cmd = GET_CMD_SP ( sp ) ;
sgl = scsi_prot_sglist ( cmd ) ;
vha = sp - > fcport - > vha ;
} else if ( tc ) {
vha = tc - > vha ;
sgl = tc - > prot_sg ;
} else {
BUG ( ) ;
return 1 ;
}
2010-05-04 15:01:30 -07:00
2014-04-11 16:54:43 -04:00
ql_dbg ( ql_dbg_tgt , vha , 0xe021 ,
" %s: enter \n " , __func__ ) ;
for_each_sg ( sgl , sg , tot_dsds , i ) {
2010-05-04 15:01:30 -07:00
dma_addr_t sle_dma ;
/* Allocate additional continuation packets? */
if ( avail_dsds = = 0 ) {
avail_dsds = ( used_dsds > QLA_DSDS_PER_IOCB ) ?
QLA_DSDS_PER_IOCB : used_dsds ;
dsd_list_len = ( avail_dsds + 1 ) * 12 ;
used_dsds - = avail_dsds ;
/* allocate tracking DS */
dsd_ptr = kzalloc ( sizeof ( struct dsd_dma ) , GFP_ATOMIC ) ;
if ( ! dsd_ptr )
return 1 ;
/* allocate new list */
dsd_ptr - > dsd_addr = next_dsd =
dma_pool_alloc ( ha - > dl_dma_pool , GFP_ATOMIC ,
& dsd_ptr - > dsd_list_dma ) ;
if ( ! next_dsd ) {
/*
* Need to cleanup only this dsd_ptr , rest
* will be done by sp_free_dma ( )
*/
kfree ( dsd_ptr ) ;
return 1 ;
}
2014-04-11 16:54:43 -04:00
if ( sp ) {
list_add_tail ( & dsd_ptr - > list ,
& ( ( struct crc_context * )
sp - > u . scmd . ctx ) - > dsd_list ) ;
2010-05-04 15:01:30 -07:00
2014-04-11 16:54:43 -04:00
sp - > flags | = SRB_CRC_CTX_DSD_VALID ;
} else {
list_add_tail ( & dsd_ptr - > list ,
& ( tc - > ctx - > dsd_list ) ) ;
tc - > ctx_dsd_alloced = 1 ;
}
2010-05-04 15:01:30 -07:00
/* add new list to cmd iocb or last list */
* cur_dsd + + = cpu_to_le32 ( LSD ( dsd_ptr - > dsd_list_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( dsd_ptr - > dsd_list_dma ) ) ;
* cur_dsd + + = dsd_list_len ;
cur_dsd = ( uint32_t * ) next_dsd ;
}
sle_dma = sg_dma_address ( sg ) ;
2012-08-22 14:21:31 -04:00
2010-05-04 15:01:30 -07:00
* cur_dsd + + = cpu_to_le32 ( LSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( sg_dma_len ( sg ) ) ;
avail_dsds - - ;
}
/* Null termination */
* cur_dsd + + = 0 ;
* cur_dsd + + = 0 ;
* cur_dsd + + = 0 ;
return 0 ;
}
/**
* qla24xx_build_scsi_crc_2_iocbs ( ) - Build IOCB command utilizing Command
* Type 6 IOCB types .
*
* @ sp : SRB command to process
* @ cmd_pkt : Command type 3 IOCB
* @ tot_dsds : Total number of segments to transfer
*/
static inline int
qla24xx_build_scsi_crc_2_iocbs ( srb_t * sp , struct cmd_type_crc_2 * cmd_pkt ,
uint16_t tot_dsds , uint16_t tot_prot_dsds , uint16_t fw_prot_opts )
{
uint32_t * cur_dsd , * fcp_dl ;
scsi_qla_host_t * vha ;
struct scsi_cmnd * cmd ;
int sgc ;
2011-08-16 11:29:22 -07:00
uint32_t total_bytes = 0 ;
2010-05-04 15:01:30 -07:00
uint32_t data_bytes ;
uint32_t dif_bytes ;
uint8_t bundling = 1 ;
uint16_t blk_size ;
uint8_t * clr_ptr ;
struct crc_context * crc_ctx_pkt = NULL ;
struct qla_hw_data * ha ;
uint8_t additional_fcpcdb_len ;
uint16_t fcp_cmnd_len ;
struct fcp_cmnd * fcp_cmnd ;
dma_addr_t crc_ctx_dma ;
2011-02-23 15:27:15 -08:00
char tag [ 2 ] ;
2010-05-04 15:01:30 -07:00
2012-02-09 11:15:36 -08:00
cmd = GET_CMD_SP ( sp ) ;
2010-05-04 15:01:30 -07:00
sgc = 0 ;
/* Update entry type to indicate Command Type CRC_2 IOCB */
* ( ( uint32_t * ) ( & cmd_pkt - > entry_type ) ) =
__constant_cpu_to_le32 ( COMMAND_TYPE_CRC_2 ) ;
2011-07-14 12:00:13 -07:00
vha = sp - > fcport - > vha ;
ha = vha - > hw ;
2010-05-04 15:01:30 -07:00
/* No data transfer */
data_bytes = scsi_bufflen ( cmd ) ;
if ( ! data_bytes | | cmd - > sc_data_direction = = DMA_NONE ) {
cmd_pkt - > byte_count = __constant_cpu_to_le32 ( 0 ) ;
return QLA_SUCCESS ;
}
2012-05-15 14:34:20 -04:00
cmd_pkt - > vp_index = sp - > fcport - > vha - > vp_idx ;
2010-05-04 15:01:30 -07:00
/* Set transfer direction */
if ( cmd - > sc_data_direction = = DMA_TO_DEVICE ) {
cmd_pkt - > control_flags =
__constant_cpu_to_le16 ( CF_WRITE_DATA ) ;
} else if ( cmd - > sc_data_direction = = DMA_FROM_DEVICE ) {
cmd_pkt - > control_flags =
__constant_cpu_to_le16 ( CF_READ_DATA ) ;
}
2012-02-09 11:15:36 -08:00
if ( ( scsi_get_prot_op ( cmd ) = = SCSI_PROT_READ_INSERT ) | |
( scsi_get_prot_op ( cmd ) = = SCSI_PROT_WRITE_STRIP ) | |
( scsi_get_prot_op ( cmd ) = = SCSI_PROT_READ_STRIP ) | |
( scsi_get_prot_op ( cmd ) = = SCSI_PROT_WRITE_INSERT ) )
2010-05-04 15:01:30 -07:00
bundling = 0 ;
/* Allocate CRC context from global pool */
2012-02-09 11:15:36 -08:00
crc_ctx_pkt = sp - > u . scmd . ctx =
dma_pool_alloc ( ha - > dl_dma_pool , GFP_ATOMIC , & crc_ctx_dma ) ;
2010-05-04 15:01:30 -07:00
if ( ! crc_ctx_pkt )
goto crc_queuing_error ;
/* Zero out CTX area. */
clr_ptr = ( uint8_t * ) crc_ctx_pkt ;
memset ( clr_ptr , 0 , sizeof ( * crc_ctx_pkt ) ) ;
crc_ctx_pkt - > crc_ctx_dma = crc_ctx_dma ;
sp - > flags | = SRB_CRC_CTX_DMA_VALID ;
/* Set handle */
crc_ctx_pkt - > handle = cmd_pkt - > handle ;
INIT_LIST_HEAD ( & crc_ctx_pkt - > dsd_list ) ;
2011-08-16 11:29:23 -07:00
qla24xx_set_t10dif_tags ( sp , ( struct fw_dif_context * )
2010-05-04 15:01:30 -07:00
& crc_ctx_pkt - > ref_tag , tot_prot_dsds ) ;
cmd_pkt - > crc_context_address [ 0 ] = cpu_to_le32 ( LSD ( crc_ctx_dma ) ) ;
cmd_pkt - > crc_context_address [ 1 ] = cpu_to_le32 ( MSD ( crc_ctx_dma ) ) ;
cmd_pkt - > crc_context_len = CRC_CONTEXT_LEN_FW ;
/* Determine SCSI command length -- align to 4 byte boundary */
if ( cmd - > cmd_len > 16 ) {
additional_fcpcdb_len = cmd - > cmd_len - 16 ;
if ( ( cmd - > cmd_len % 4 ) ! = 0 ) {
/* SCSI cmd > 16 bytes must be multiple of 4 */
goto crc_queuing_error ;
}
fcp_cmnd_len = 12 + cmd - > cmd_len + 4 ;
} else {
additional_fcpcdb_len = 0 ;
fcp_cmnd_len = 12 + 16 + 4 ;
}
fcp_cmnd = & crc_ctx_pkt - > fcp_cmnd ;
fcp_cmnd - > additional_cdb_len = additional_fcpcdb_len ;
if ( cmd - > sc_data_direction = = DMA_TO_DEVICE )
fcp_cmnd - > additional_cdb_len | = 1 ;
else if ( cmd - > sc_data_direction = = DMA_FROM_DEVICE )
fcp_cmnd - > additional_cdb_len | = 2 ;
2012-02-09 11:15:36 -08:00
int_to_scsilun ( cmd - > device - > lun , & fcp_cmnd - > lun ) ;
2010-05-04 15:01:30 -07:00
memcpy ( fcp_cmnd - > cdb , cmd - > cmnd , cmd - > cmd_len ) ;
cmd_pkt - > fcp_cmnd_dseg_len = cpu_to_le16 ( fcp_cmnd_len ) ;
cmd_pkt - > fcp_cmnd_dseg_address [ 0 ] = cpu_to_le32 (
LSD ( crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF ) ) ;
cmd_pkt - > fcp_cmnd_dseg_address [ 1 ] = cpu_to_le32 (
MSD ( crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF ) ) ;
2010-06-11 12:17:01 +02:00
fcp_cmnd - > task_management = 0 ;
2010-05-04 15:01:30 -07:00
2011-02-23 15:27:15 -08:00
/*
* Update tagged queuing modifier if using command tag queuing
*/
if ( scsi_populate_tag_msg ( cmd , tag ) ) {
switch ( tag [ 0 ] ) {
case HEAD_OF_QUEUE_TAG :
fcp_cmnd - > task_attribute = TSK_HEAD_OF_QUEUE ;
break ;
case ORDERED_QUEUE_TAG :
fcp_cmnd - > task_attribute = TSK_ORDERED ;
break ;
default :
2013-07-12 14:47:51 -04:00
fcp_cmnd - > task_attribute = TSK_SIMPLE ;
2011-02-23 15:27:15 -08:00
break ;
}
} else {
2013-07-12 14:47:51 -04:00
fcp_cmnd - > task_attribute = TSK_SIMPLE ;
2011-02-23 15:27:15 -08:00
}
2010-05-04 15:01:30 -07:00
cmd_pkt - > fcp_rsp_dseg_len = 0 ; /* Let response come in status iocb */
/* Compute dif len and adjust data len to incude protection */
dif_bytes = 0 ;
blk_size = cmd - > device - > sector_size ;
2011-08-16 11:29:22 -07:00
dif_bytes = ( data_bytes / blk_size ) * 8 ;
2012-02-09 11:15:36 -08:00
switch ( scsi_get_prot_op ( GET_CMD_SP ( sp ) ) ) {
2011-08-16 11:29:22 -07:00
case SCSI_PROT_READ_INSERT :
case SCSI_PROT_WRITE_STRIP :
total_bytes = data_bytes ;
data_bytes + = dif_bytes ;
break ;
case SCSI_PROT_READ_STRIP :
case SCSI_PROT_WRITE_INSERT :
case SCSI_PROT_READ_PASS :
case SCSI_PROT_WRITE_PASS :
total_bytes = data_bytes + dif_bytes ;
break ;
default :
BUG ( ) ;
2010-05-04 15:01:30 -07:00
}
2011-08-16 11:29:23 -07:00
if ( ! qla2x00_hba_err_chk_enabled ( sp ) )
2010-05-04 15:01:30 -07:00
fw_prot_opts | = 0x10 ; /* Disable Guard tag checking */
2012-08-22 14:21:31 -04:00
/* HBA error checking enabled */
else if ( IS_PI_UNINIT_CAPABLE ( ha ) ) {
if ( ( scsi_get_prot_type ( GET_CMD_SP ( sp ) ) = = SCSI_PROT_DIF_TYPE1 )
| | ( scsi_get_prot_type ( GET_CMD_SP ( sp ) ) = =
SCSI_PROT_DIF_TYPE2 ) )
fw_prot_opts | = BIT_10 ;
else if ( scsi_get_prot_type ( GET_CMD_SP ( sp ) ) = =
SCSI_PROT_DIF_TYPE3 )
fw_prot_opts | = BIT_11 ;
}
2010-05-04 15:01:30 -07:00
if ( ! bundling ) {
cur_dsd = ( uint32_t * ) & crc_ctx_pkt - > u . nobundling . data_address ;
} else {
/*
* Configure Bundling if we need to fetch interlaving
* protection PCI accesses
*/
fw_prot_opts | = PO_ENABLE_DIF_BUNDLING ;
crc_ctx_pkt - > u . bundling . dif_byte_count = cpu_to_le32 ( dif_bytes ) ;
crc_ctx_pkt - > u . bundling . dseg_count = cpu_to_le16 ( tot_dsds -
tot_prot_dsds ) ;
cur_dsd = ( uint32_t * ) & crc_ctx_pkt - > u . bundling . data_address ;
}
/* Finish the common fields of CRC pkt */
crc_ctx_pkt - > blk_size = cpu_to_le16 ( blk_size ) ;
crc_ctx_pkt - > prot_opts = cpu_to_le16 ( fw_prot_opts ) ;
crc_ctx_pkt - > byte_count = cpu_to_le32 ( data_bytes ) ;
crc_ctx_pkt - > guard_seed = __constant_cpu_to_le16 ( 0 ) ;
/* Fibre channel byte count */
cmd_pkt - > byte_count = cpu_to_le32 ( total_bytes ) ;
fcp_dl = ( uint32_t * ) ( crc_ctx_pkt - > fcp_cmnd . cdb + 16 +
additional_fcpcdb_len ) ;
* fcp_dl = htonl ( total_bytes ) ;
2010-07-23 15:28:38 +05:00
if ( ! data_bytes | | cmd - > sc_data_direction = = DMA_NONE ) {
cmd_pkt - > byte_count = __constant_cpu_to_le32 ( 0 ) ;
return QLA_SUCCESS ;
}
2010-05-04 15:01:30 -07:00
/* Walks data segments */
cmd_pkt - > control_flags | =
__constant_cpu_to_le16 ( CF_DATA_SEG_DESCR_ENABLE ) ;
2011-08-16 11:29:22 -07:00
if ( ! bundling & & tot_prot_dsds ) {
if ( qla24xx_walk_and_build_sglist_no_difb ( ha , sp ,
2014-04-11 16:54:43 -04:00
cur_dsd , tot_dsds , NULL ) )
2011-08-16 11:29:22 -07:00
goto crc_queuing_error ;
} else if ( qla24xx_walk_and_build_sglist ( ha , sp , cur_dsd ,
2014-04-11 16:54:43 -04:00
( tot_dsds - tot_prot_dsds ) , NULL ) )
2010-05-04 15:01:30 -07:00
goto crc_queuing_error ;
if ( bundling & & tot_prot_dsds ) {
/* Walks dif segments */
cmd_pkt - > control_flags | =
__constant_cpu_to_le16 ( CF_DIF_SEG_DESCR_ENABLE ) ;
cur_dsd = ( uint32_t * ) & crc_ctx_pkt - > u . bundling . dif_address ;
if ( qla24xx_walk_and_build_prot_sglist ( ha , sp , cur_dsd ,
2014-04-11 16:54:43 -04:00
tot_prot_dsds , NULL ) )
2010-05-04 15:01:30 -07:00
goto crc_queuing_error ;
}
return QLA_SUCCESS ;
crc_queuing_error :
/* Cleanup will be performed by the caller */
return QLA_FUNCTION_FAILED ;
}
2005-07-06 10:31:17 -07:00
/**
* qla24xx_start_scsi ( ) - Send a SCSI command to the ISP
* @ sp : command to send to the ISP
*
2008-09-11 21:22:51 -07:00
* Returns non - zero if a failure occurred , else zero .
2005-07-06 10:31:17 -07:00
*/
int
qla24xx_start_scsi ( srb_t * sp )
{
2007-05-26 01:55:38 +09:00
int ret , nseg ;
2005-07-06 10:31:17 -07:00
unsigned long flags ;
uint32_t * clr_ptr ;
uint32_t index ;
uint32_t handle ;
struct cmd_type_7 * cmd_pkt ;
uint16_t cnt ;
uint16_t req_cnt ;
uint16_t tot_dsds ;
2008-12-09 16:45:39 -08:00
struct req_que * req = NULL ;
struct rsp_que * rsp = NULL ;
2012-02-09 11:15:36 -08:00
struct scsi_cmnd * cmd = GET_CMD_SP ( sp ) ;
2009-01-05 11:18:10 -08:00
struct scsi_qla_host * vha = sp - > fcport - > vha ;
2008-12-09 16:45:39 -08:00
struct qla_hw_data * ha = vha - > hw ;
2011-02-23 15:27:15 -08:00
char tag [ 2 ] ;
2005-07-06 10:31:17 -07:00
/* Setup device pointers. */
ret = 0 ;
2008-12-09 16:45:39 -08:00
2009-06-03 09:55:19 -07:00
qla25xx_set_que ( sp , & rsp ) ;
req = vha - > req ;
2008-12-09 16:45:39 -08:00
2005-07-06 10:31:17 -07:00
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0 ;
/* Send marker if required */
2008-11-06 10:40:51 -08:00
if ( vha - > marker_needed ! = 0 ) {
2011-07-14 12:00:13 -07:00
if ( qla2x00_marker ( vha , req , rsp , 0 , 0 , MK_SYNC_ALL ) ! =
QLA_SUCCESS )
2005-07-06 10:31:17 -07:00
return QLA_FUNCTION_FAILED ;
2008-11-06 10:40:51 -08:00
vha - > marker_needed = 0 ;
2005-07-06 10:31:17 -07:00
}
/* Acquire ring specific lock */
2008-11-06 10:40:51 -08:00
spin_lock_irqsave ( & ha - > hardware_lock , flags ) ;
2005-07-06 10:31:17 -07:00
/* Check for room in outstanding command list. */
2008-11-06 10:40:51 -08:00
handle = req - > current_outstanding_cmd ;
2013-01-30 03:34:37 -05:00
for ( index = 1 ; index < req - > num_outstanding_cmds ; index + + ) {
2005-07-06 10:31:17 -07:00
handle + + ;
2013-01-30 03:34:37 -05:00
if ( handle = = req - > num_outstanding_cmds )
2005-07-06 10:31:17 -07:00
handle = 1 ;
2008-11-06 10:40:51 -08:00
if ( ! req - > outstanding_cmds [ handle ] )
2005-07-06 10:31:17 -07:00
break ;
}
2013-01-30 03:34:37 -05:00
if ( index = = req - > num_outstanding_cmds )
2005-07-06 10:31:17 -07:00
goto queuing_error ;
/* Map the sg table so we have an accurate count of sg entries needed */
2007-07-05 13:16:51 -07:00
if ( scsi_sg_count ( cmd ) ) {
nseg = dma_map_sg ( & ha - > pdev - > dev , scsi_sglist ( cmd ) ,
scsi_sg_count ( cmd ) , cmd - > sc_data_direction ) ;
if ( unlikely ( ! nseg ) )
2005-07-06 10:31:17 -07:00
goto queuing_error ;
2007-07-05 13:16:51 -07:00
} else
nseg = 0 ;
2007-05-26 01:55:38 +09:00
tot_dsds = nseg ;
2011-07-14 12:00:13 -07:00
req_cnt = qla24xx_calc_iocbs ( vha , tot_dsds ) ;
2008-11-06 10:40:51 -08:00
if ( req - > cnt < ( req_cnt + 2 ) ) {
2014-04-11 16:54:37 -04:00
cnt = IS_SHADOW_REG_CAPABLE ( ha ) ? * req - > out_ptr :
RD_REG_DWORD_RELAXED ( req - > req_q_out ) ;
2008-11-06 10:40:51 -08:00
if ( req - > ring_index < cnt )
req - > cnt = cnt - req - > ring_index ;
2005-07-06 10:31:17 -07:00
else
2008-11-06 10:40:51 -08:00
req - > cnt = req - > length -
( req - > ring_index - cnt ) ;
2012-05-15 14:34:09 -04:00
if ( req - > cnt < ( req_cnt + 2 ) )
goto queuing_error ;
2005-07-06 10:31:17 -07:00
}
/* Build command packet. */
2008-11-06 10:40:51 -08:00
req - > current_outstanding_cmd = handle ;
req - > outstanding_cmds [ handle ] = sp ;
2009-08-20 11:06:04 -07:00
sp - > handle = handle ;
2012-02-09 11:15:36 -08:00
cmd - > host_scribble = ( unsigned char * ) ( unsigned long ) handle ;
2008-11-06 10:40:51 -08:00
req - > cnt - = req_cnt ;
2005-07-06 10:31:17 -07:00
2008-11-06 10:40:51 -08:00
cmd_pkt = ( struct cmd_type_7 * ) req - > ring_ptr ;
2009-04-06 22:33:40 -07:00
cmd_pkt - > handle = MAKE_HANDLE ( req - > id , handle ) ;
2005-07-06 10:31:17 -07:00
/* Zero out remaining portion of packet. */
2005-10-28 14:41:19 -05:00
/* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2005-07-06 10:31:17 -07:00
clr_ptr = ( uint32_t * ) cmd_pkt + 2 ;
memset ( clr_ptr , 0 , REQUEST_ENTRY_SIZE - 8 ) ;
cmd_pkt - > dseg_count = cpu_to_le16 ( tot_dsds ) ;
/* Set NPORT-ID and LUN number*/
cmd_pkt - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
cmd_pkt - > port_id [ 0 ] = sp - > fcport - > d_id . b . al_pa ;
cmd_pkt - > port_id [ 1 ] = sp - > fcport - > d_id . b . area ;
cmd_pkt - > port_id [ 2 ] = sp - > fcport - > d_id . b . domain ;
2012-05-15 14:34:20 -04:00
cmd_pkt - > vp_index = sp - > fcport - > vha - > vp_idx ;
2005-07-06 10:31:17 -07:00
2012-02-09 11:15:36 -08:00
int_to_scsilun ( cmd - > device - > lun , & cmd_pkt - > lun ) ;
2006-02-07 08:45:35 -08:00
host_to_fcp_swap ( ( uint8_t * ) & cmd_pkt - > lun , sizeof ( cmd_pkt - > lun ) ) ;
2005-07-06 10:31:17 -07:00
2011-02-23 15:27:15 -08:00
/* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
if ( scsi_populate_tag_msg ( cmd , tag ) ) {
switch ( tag [ 0 ] ) {
case HEAD_OF_QUEUE_TAG :
cmd_pkt - > task = TSK_HEAD_OF_QUEUE ;
break ;
case ORDERED_QUEUE_TAG :
cmd_pkt - > task = TSK_ORDERED ;
break ;
2013-07-12 14:47:51 -04:00
default :
cmd_pkt - > task = TSK_SIMPLE ;
break ;
2011-02-23 15:27:15 -08:00
}
2013-07-12 14:47:51 -04:00
} else {
cmd_pkt - > task = TSK_SIMPLE ;
2011-02-23 15:27:15 -08:00
}
2005-07-06 10:31:17 -07:00
/* Load SCSI command packet. */
memcpy ( cmd_pkt - > fcp_cdb , cmd - > cmnd , cmd - > cmd_len ) ;
host_to_fcp_swap ( cmd_pkt - > fcp_cdb , sizeof ( cmd_pkt - > fcp_cdb ) ) ;
2007-05-26 01:55:38 +09:00
cmd_pkt - > byte_count = cpu_to_le32 ( ( uint32_t ) scsi_bufflen ( cmd ) ) ;
2005-07-06 10:31:17 -07:00
/* Build IOCB segments */
qla24xx_build_scsi_iocbs ( sp , cmd_pkt , tot_dsds ) ;
/* Set total data segment count. */
cmd_pkt - > entry_count = ( uint8_t ) req_cnt ;
2009-04-06 22:33:40 -07:00
/* Specify response queue number where completion should happen */
cmd_pkt - > entry_status = ( uint8_t ) rsp - > id ;
2005-07-06 10:31:17 -07:00
wmb ( ) ;
/* Adjust ring index. */
2008-11-06 10:40:51 -08:00
req - > ring_index + + ;
if ( req - > ring_index = = req - > length ) {
req - > ring_index = 0 ;
req - > ring_ptr = req - > ring ;
2005-07-06 10:31:17 -07:00
} else
2008-11-06 10:40:51 -08:00
req - > ring_ptr + + ;
2005-07-06 10:31:17 -07:00
sp - > flags | = SRB_DMA_VALID ;
/* Set chip new ring index. */
2009-03-24 09:07:55 -07:00
WRT_REG_DWORD ( req - > req_q_in , req - > ring_index ) ;
RD_REG_DWORD_RELAXED ( & ha - > iobase - > isp24 . hccr ) ;
2005-07-06 10:31:17 -07:00
2005-10-27 11:09:48 -07:00
/* Manage unprocessed RIO/ZIO commands in response queue. */
2008-11-06 10:40:51 -08:00
if ( vha - > flags . process_response_queue & &
2008-12-09 16:45:39 -08:00
rsp - > ring_ptr - > signature ! = RESPONSE_PROCESSED )
2009-04-06 22:33:40 -07:00
qla24xx_process_response_queue ( vha , rsp ) ;
2005-10-27 11:09:48 -07:00
2008-11-06 10:40:51 -08:00
spin_unlock_irqrestore ( & ha - > hardware_lock , flags ) ;
2005-07-06 10:31:17 -07:00
return QLA_SUCCESS ;
queuing_error :
2007-05-26 01:55:38 +09:00
if ( tot_dsds )
scsi_dma_unmap ( cmd ) ;
2008-11-06 10:40:51 -08:00
spin_unlock_irqrestore ( & ha - > hardware_lock , flags ) ;
2005-07-06 10:31:17 -07:00
return QLA_FUNCTION_FAILED ;
2005-04-16 15:20:36 -07:00
}
2009-04-06 22:33:41 -07:00
2010-05-04 15:01:30 -07:00
/**
* qla24xx_dif_start_scsi ( ) - Send a SCSI command to the ISP
* @ sp : command to send to the ISP
*
* Returns non - zero if a failure occurred , else zero .
*/
int
qla24xx_dif_start_scsi ( srb_t * sp )
{
int nseg ;
unsigned long flags ;
uint32_t * clr_ptr ;
uint32_t index ;
uint32_t handle ;
uint16_t cnt ;
uint16_t req_cnt = 0 ;
uint16_t tot_dsds ;
uint16_t tot_prot_dsds ;
uint16_t fw_prot_opts = 0 ;
struct req_que * req = NULL ;
struct rsp_que * rsp = NULL ;
2012-02-09 11:15:36 -08:00
struct scsi_cmnd * cmd = GET_CMD_SP ( sp ) ;
2010-05-04 15:01:30 -07:00
struct scsi_qla_host * vha = sp - > fcport - > vha ;
struct qla_hw_data * ha = vha - > hw ;
struct cmd_type_crc_2 * cmd_pkt ;
uint32_t status = 0 ;
# define QDSS_GOT_Q_SPACE BIT_0
2010-07-23 15:28:38 +05:00
/* Only process protection or >16 cdb in this routine */
if ( scsi_get_prot_op ( cmd ) = = SCSI_PROT_NORMAL ) {
if ( cmd - > cmd_len < = 16 )
return qla24xx_start_scsi ( sp ) ;
}
2010-05-04 15:01:30 -07:00
/* Setup device pointers. */
qla25xx_set_que ( sp , & rsp ) ;
req = vha - > req ;
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0 ;
/* Send marker if required */
if ( vha - > marker_needed ! = 0 ) {
if ( qla2x00_marker ( vha , req , rsp , 0 , 0 , MK_SYNC_ALL ) ! =
QLA_SUCCESS )
return QLA_FUNCTION_FAILED ;
vha - > marker_needed = 0 ;
}
/* Acquire ring specific lock */
spin_lock_irqsave ( & ha - > hardware_lock , flags ) ;
/* Check for room in outstanding command list. */
handle = req - > current_outstanding_cmd ;
2013-01-30 03:34:37 -05:00
for ( index = 1 ; index < req - > num_outstanding_cmds ; index + + ) {
2010-05-04 15:01:30 -07:00
handle + + ;
2013-01-30 03:34:37 -05:00
if ( handle = = req - > num_outstanding_cmds )
2010-05-04 15:01:30 -07:00
handle = 1 ;
if ( ! req - > outstanding_cmds [ handle ] )
break ;
}
2013-01-30 03:34:37 -05:00
if ( index = = req - > num_outstanding_cmds )
2010-05-04 15:01:30 -07:00
goto queuing_error ;
/* Compute number of required data segments */
/* Map the sg table so we have an accurate count of sg entries needed */
if ( scsi_sg_count ( cmd ) ) {
nseg = dma_map_sg ( & ha - > pdev - > dev , scsi_sglist ( cmd ) ,
scsi_sg_count ( cmd ) , cmd - > sc_data_direction ) ;
if ( unlikely ( ! nseg ) )
goto queuing_error ;
else
sp - > flags | = SRB_DMA_VALID ;
2011-08-16 11:29:22 -07:00
if ( ( scsi_get_prot_op ( cmd ) = = SCSI_PROT_READ_INSERT ) | |
( scsi_get_prot_op ( cmd ) = = SCSI_PROT_WRITE_STRIP ) ) {
struct qla2_sgx sgx ;
uint32_t partial ;
memset ( & sgx , 0 , sizeof ( struct qla2_sgx ) ) ;
sgx . tot_bytes = scsi_bufflen ( cmd ) ;
sgx . cur_sg = scsi_sglist ( cmd ) ;
sgx . sp = sp ;
nseg = 0 ;
while ( qla24xx_get_one_block_sg (
cmd - > device - > sector_size , & sgx , & partial ) )
nseg + + ;
}
2010-05-04 15:01:30 -07:00
} else
nseg = 0 ;
/* number of required data segments */
tot_dsds = nseg ;
/* Compute number of required protection segments */
if ( qla24xx_configure_prot_mode ( sp , & fw_prot_opts ) ) {
nseg = dma_map_sg ( & ha - > pdev - > dev , scsi_prot_sglist ( cmd ) ,
scsi_prot_sg_count ( cmd ) , cmd - > sc_data_direction ) ;
if ( unlikely ( ! nseg ) )
goto queuing_error ;
else
sp - > flags | = SRB_CRC_PROT_DMA_VALID ;
2011-08-16 11:29:22 -07:00
if ( ( scsi_get_prot_op ( cmd ) = = SCSI_PROT_READ_INSERT ) | |
( scsi_get_prot_op ( cmd ) = = SCSI_PROT_WRITE_STRIP ) ) {
nseg = scsi_bufflen ( cmd ) / cmd - > device - > sector_size ;
}
2010-05-04 15:01:30 -07:00
} else {
nseg = 0 ;
}
req_cnt = 1 ;
/* Total Data and protection sg segment(s) */
tot_prot_dsds = nseg ;
tot_dsds + = nseg ;
if ( req - > cnt < ( req_cnt + 2 ) ) {
2014-04-11 16:54:37 -04:00
cnt = IS_SHADOW_REG_CAPABLE ( ha ) ? * req - > out_ptr :
RD_REG_DWORD_RELAXED ( req - > req_q_out ) ;
2010-05-04 15:01:30 -07:00
if ( req - > ring_index < cnt )
req - > cnt = cnt - req - > ring_index ;
else
req - > cnt = req - > length -
( req - > ring_index - cnt ) ;
2012-05-15 14:34:09 -04:00
if ( req - > cnt < ( req_cnt + 2 ) )
goto queuing_error ;
2010-05-04 15:01:30 -07:00
}
status | = QDSS_GOT_Q_SPACE ;
/* Build header part of command packet (excluding the OPCODE). */
req - > current_outstanding_cmd = handle ;
req - > outstanding_cmds [ handle ] = sp ;
2011-08-16 11:29:22 -07:00
sp - > handle = handle ;
2012-02-09 11:15:36 -08:00
cmd - > host_scribble = ( unsigned char * ) ( unsigned long ) handle ;
2010-05-04 15:01:30 -07:00
req - > cnt - = req_cnt ;
/* Fill-in common area */
cmd_pkt = ( struct cmd_type_crc_2 * ) req - > ring_ptr ;
cmd_pkt - > handle = MAKE_HANDLE ( req - > id , handle ) ;
clr_ptr = ( uint32_t * ) cmd_pkt + 2 ;
memset ( clr_ptr , 0 , REQUEST_ENTRY_SIZE - 8 ) ;
/* Set NPORT-ID and LUN number*/
cmd_pkt - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
cmd_pkt - > port_id [ 0 ] = sp - > fcport - > d_id . b . al_pa ;
cmd_pkt - > port_id [ 1 ] = sp - > fcport - > d_id . b . area ;
cmd_pkt - > port_id [ 2 ] = sp - > fcport - > d_id . b . domain ;
2012-02-09 11:15:36 -08:00
int_to_scsilun ( cmd - > device - > lun , & cmd_pkt - > lun ) ;
2010-05-04 15:01:30 -07:00
host_to_fcp_swap ( ( uint8_t * ) & cmd_pkt - > lun , sizeof ( cmd_pkt - > lun ) ) ;
/* Total Data and protection segment(s) */
cmd_pkt - > dseg_count = cpu_to_le16 ( tot_dsds ) ;
/* Build IOCB segments and adjust for data protection segments */
if ( qla24xx_build_scsi_crc_2_iocbs ( sp , ( struct cmd_type_crc_2 * )
req - > ring_ptr , tot_dsds , tot_prot_dsds , fw_prot_opts ) ! =
QLA_SUCCESS )
goto queuing_error ;
cmd_pkt - > entry_count = ( uint8_t ) req_cnt ;
/* Specify response queue number where completion should happen */
cmd_pkt - > entry_status = ( uint8_t ) rsp - > id ;
cmd_pkt - > timeout = __constant_cpu_to_le16 ( 0 ) ;
wmb ( ) ;
/* Adjust ring index. */
req - > ring_index + + ;
if ( req - > ring_index = = req - > length ) {
req - > ring_index = 0 ;
req - > ring_ptr = req - > ring ;
} else
req - > ring_ptr + + ;
/* Set chip new ring index. */
WRT_REG_DWORD ( req - > req_q_in , req - > ring_index ) ;
RD_REG_DWORD_RELAXED ( & ha - > iobase - > isp24 . hccr ) ;
/* Manage unprocessed RIO/ZIO commands in response queue. */
if ( vha - > flags . process_response_queue & &
rsp - > ring_ptr - > signature ! = RESPONSE_PROCESSED )
qla24xx_process_response_queue ( vha , rsp ) ;
spin_unlock_irqrestore ( & ha - > hardware_lock , flags ) ;
return QLA_SUCCESS ;
queuing_error :
if ( status & QDSS_GOT_Q_SPACE ) {
req - > outstanding_cmds [ handle ] = NULL ;
req - > cnt + = req_cnt ;
}
/* Cleanup will be performed by the caller (queuecommand) */
spin_unlock_irqrestore ( & ha - > hardware_lock , flags ) ;
return QLA_FUNCTION_FAILED ;
}
2009-06-03 09:55:19 -07:00
static void qla25xx_set_que ( srb_t * sp , struct rsp_que * * rsp )
2009-04-06 22:33:41 -07:00
{
2012-02-09 11:15:36 -08:00
struct scsi_cmnd * cmd = GET_CMD_SP ( sp ) ;
2009-04-06 22:33:41 -07:00
struct qla_hw_data * ha = sp - > fcport - > vha - > hw ;
int affinity = cmd - > request - > cpu ;
2009-08-05 09:18:40 -07:00
if ( ha - > flags . cpu_affinity_enabled & & affinity > = 0 & &
2009-06-03 09:55:19 -07:00
affinity < ha - > max_rsp_queues - 1 )
2009-04-06 22:33:41 -07:00
* rsp = ha - > rsp_q_map [ affinity + 1 ] ;
2009-06-03 09:55:19 -07:00
else
2009-04-06 22:33:41 -07:00
* rsp = ha - > rsp_q_map [ 0 ] ;
}
2009-08-20 11:06:05 -07:00
/* Generic Control-SRB manipulation functions. */
2010-07-23 15:28:23 +05:00
void *
qla2x00_alloc_iocbs ( scsi_qla_host_t * vha , srb_t * sp )
2009-08-20 11:06:05 -07:00
{
struct qla_hw_data * ha = vha - > hw ;
struct req_que * req = ha - > req_q_map [ 0 ] ;
device_reg_t __iomem * reg = ISP_QUE_REG ( ha , req - > id ) ;
uint32_t index , handle ;
request_t * pkt ;
uint16_t cnt , req_cnt ;
pkt = NULL ;
req_cnt = 1 ;
2010-07-23 15:28:23 +05:00
handle = 0 ;
if ( ! sp )
goto skip_cmd_array ;
2009-08-20 11:06:05 -07:00
/* Check for room in outstanding command list. */
handle = req - > current_outstanding_cmd ;
2014-03-07 02:43:52 -05:00
for ( index = 1 ; index < req - > num_outstanding_cmds ; index + + ) {
2009-08-20 11:06:05 -07:00
handle + + ;
2013-01-30 03:34:37 -05:00
if ( handle = = req - > num_outstanding_cmds )
2009-08-20 11:06:05 -07:00
handle = 1 ;
if ( ! req - > outstanding_cmds [ handle ] )
break ;
}
2013-01-30 03:34:37 -05:00
if ( index = = req - > num_outstanding_cmds ) {
2011-07-14 12:00:13 -07:00
ql_log ( ql_log_warn , vha , 0x700b ,
2012-08-22 14:20:58 -04:00
" No room on outstanding cmd array. \n " ) ;
2009-08-20 11:06:05 -07:00
goto queuing_error ;
2011-07-14 12:00:13 -07:00
}
2009-08-20 11:06:05 -07:00
2010-07-23 15:28:23 +05:00
/* Prep command array. */
req - > current_outstanding_cmd = handle ;
req - > outstanding_cmds [ handle ] = sp ;
sp - > handle = handle ;
2011-11-18 09:03:20 -08:00
/* Adjust entry-counts as needed. */
2012-02-09 11:15:36 -08:00
if ( sp - > type ! = SRB_SCSI_CMD )
req_cnt = sp - > iocbs ;
2011-11-18 09:03:20 -08:00
2010-07-23 15:28:23 +05:00
skip_cmd_array :
2009-08-20 11:06:05 -07:00
/* Check for room on request queue. */
if ( req - > cnt < req_cnt ) {
2014-02-26 04:15:06 -05:00
if ( ha - > mqenable | | IS_QLA83XX ( ha ) | | IS_QLA27XX ( ha ) )
2009-08-20 11:06:05 -07:00
cnt = RD_REG_DWORD ( & reg - > isp25mq . req_q_out ) ;
2013-08-27 01:37:28 -04:00
else if ( IS_P3P_TYPE ( ha ) )
2010-07-23 15:28:23 +05:00
cnt = RD_REG_DWORD ( & reg - > isp82 . req_q_out ) ;
2009-08-20 11:06:05 -07:00
else if ( IS_FWI2_CAPABLE ( ha ) )
cnt = RD_REG_DWORD ( & reg - > isp24 . req_q_out ) ;
2013-03-28 08:21:23 -04:00
else if ( IS_QLAFX00 ( ha ) )
cnt = RD_REG_DWORD ( & reg - > ispfx00 . req_q_out ) ;
2009-08-20 11:06:05 -07:00
else
cnt = qla2x00_debounce_register (
ISP_REQ_Q_OUT ( ha , & reg - > isp ) ) ;
if ( req - > ring_index < cnt )
req - > cnt = cnt - req - > ring_index ;
else
req - > cnt = req - > length -
( req - > ring_index - cnt ) ;
}
if ( req - > cnt < req_cnt )
goto queuing_error ;
/* Prep packet */
req - > cnt - = req_cnt ;
pkt = req - > ring_ptr ;
memset ( pkt , 0 , REQUEST_ENTRY_SIZE ) ;
2013-03-28 08:21:23 -04:00
if ( IS_QLAFX00 ( ha ) ) {
2013-06-25 11:27:21 -04:00
WRT_REG_BYTE ( ( void __iomem * ) & pkt - > entry_count , req_cnt ) ;
WRT_REG_WORD ( ( void __iomem * ) & pkt - > handle , handle ) ;
2013-03-28 08:21:23 -04:00
} else {
pkt - > entry_count = req_cnt ;
pkt - > handle = handle ;
}
2009-08-20 11:06:05 -07:00
queuing_error :
return pkt ;
}
static void
qla24xx_login_iocb ( srb_t * sp , struct logio_entry_24xx * logio )
{
2012-02-09 11:15:36 -08:00
struct srb_iocb * lio = & sp - > u . iocb_cmd ;
2009-08-20 11:06:05 -07:00
logio - > entry_type = LOGINOUT_PORT_IOCB_TYPE ;
logio - > control_flags = cpu_to_le16 ( LCF_COMMAND_PLOGI ) ;
2010-05-04 15:01:28 -07:00
if ( lio - > u . logio . flags & SRB_LOGIN_COND_PLOGI )
2009-08-20 11:06:05 -07:00
logio - > control_flags | = cpu_to_le16 ( LCF_COND_PLOGI ) ;
2010-05-04 15:01:28 -07:00
if ( lio - > u . logio . flags & SRB_LOGIN_SKIP_PRLI )
2009-08-20 11:06:05 -07:00
logio - > control_flags | = cpu_to_le16 ( LCF_SKIP_PRLI ) ;
logio - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
logio - > port_id [ 0 ] = sp - > fcport - > d_id . b . al_pa ;
logio - > port_id [ 1 ] = sp - > fcport - > d_id . b . area ;
logio - > port_id [ 2 ] = sp - > fcport - > d_id . b . domain ;
2012-05-15 14:34:20 -04:00
logio - > vp_index = sp - > fcport - > vha - > vp_idx ;
2009-08-20 11:06:05 -07:00
}
static void
qla2x00_login_iocb ( srb_t * sp , struct mbx_entry * mbx )
{
struct qla_hw_data * ha = sp - > fcport - > vha - > hw ;
2012-02-09 11:15:36 -08:00
struct srb_iocb * lio = & sp - > u . iocb_cmd ;
2009-08-20 11:06:05 -07:00
uint16_t opts ;
2010-05-28 15:08:15 -07:00
mbx - > entry_type = MBX_IOCB_TYPE ;
2009-08-20 11:06:05 -07:00
SET_TARGET_ID ( ha , mbx - > loop_id , sp - > fcport - > loop_id ) ;
mbx - > mb0 = cpu_to_le16 ( MBC_LOGIN_FABRIC_PORT ) ;
2010-05-04 15:01:28 -07:00
opts = lio - > u . logio . flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0 ;
opts | = lio - > u . logio . flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0 ;
2009-08-20 11:06:05 -07:00
if ( HAS_EXTENDED_IDS ( ha ) ) {
mbx - > mb1 = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
mbx - > mb10 = cpu_to_le16 ( opts ) ;
} else {
mbx - > mb1 = cpu_to_le16 ( ( sp - > fcport - > loop_id < < 8 ) | opts ) ;
}
mbx - > mb2 = cpu_to_le16 ( sp - > fcport - > d_id . b . domain ) ;
mbx - > mb3 = cpu_to_le16 ( sp - > fcport - > d_id . b . area < < 8 |
sp - > fcport - > d_id . b . al_pa ) ;
2012-05-15 14:34:20 -04:00
mbx - > mb9 = cpu_to_le16 ( sp - > fcport - > vha - > vp_idx ) ;
2009-08-20 11:06:05 -07:00
}
static void
qla24xx_logout_iocb ( srb_t * sp , struct logio_entry_24xx * logio )
{
logio - > entry_type = LOGINOUT_PORT_IOCB_TYPE ;
logio - > control_flags =
cpu_to_le16 ( LCF_COMMAND_LOGO | LCF_IMPL_LOGO ) ;
logio - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
logio - > port_id [ 0 ] = sp - > fcport - > d_id . b . al_pa ;
logio - > port_id [ 1 ] = sp - > fcport - > d_id . b . area ;
logio - > port_id [ 2 ] = sp - > fcport - > d_id . b . domain ;
2012-05-15 14:34:20 -04:00
logio - > vp_index = sp - > fcport - > vha - > vp_idx ;
2009-08-20 11:06:05 -07:00
}
static void
qla2x00_logout_iocb ( srb_t * sp , struct mbx_entry * mbx )
{
struct qla_hw_data * ha = sp - > fcport - > vha - > hw ;
2010-05-28 15:08:15 -07:00
mbx - > entry_type = MBX_IOCB_TYPE ;
2009-08-20 11:06:05 -07:00
SET_TARGET_ID ( ha , mbx - > loop_id , sp - > fcport - > loop_id ) ;
mbx - > mb0 = cpu_to_le16 ( MBC_LOGOUT_FABRIC_PORT ) ;
mbx - > mb1 = HAS_EXTENDED_IDS ( ha ) ?
cpu_to_le16 ( sp - > fcport - > loop_id ) :
cpu_to_le16 ( sp - > fcport - > loop_id < < 8 ) ;
mbx - > mb2 = cpu_to_le16 ( sp - > fcport - > d_id . b . domain ) ;
mbx - > mb3 = cpu_to_le16 ( sp - > fcport - > d_id . b . area < < 8 |
sp - > fcport - > d_id . b . al_pa ) ;
2012-05-15 14:34:20 -04:00
mbx - > mb9 = cpu_to_le16 ( sp - > fcport - > vha - > vp_idx ) ;
2009-08-20 11:06:05 -07:00
/* Implicit: mbx->mbx10 = 0. */
}
2010-05-04 15:01:26 -07:00
static void
qla24xx_adisc_iocb ( srb_t * sp , struct logio_entry_24xx * logio )
{
logio - > entry_type = LOGINOUT_PORT_IOCB_TYPE ;
logio - > control_flags = cpu_to_le16 ( LCF_COMMAND_ADISC ) ;
logio - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
2012-05-15 14:34:20 -04:00
logio - > vp_index = sp - > fcport - > vha - > vp_idx ;
2010-05-04 15:01:26 -07:00
}
static void
qla2x00_adisc_iocb ( srb_t * sp , struct mbx_entry * mbx )
{
struct qla_hw_data * ha = sp - > fcport - > vha - > hw ;
mbx - > entry_type = MBX_IOCB_TYPE ;
SET_TARGET_ID ( ha , mbx - > loop_id , sp - > fcport - > loop_id ) ;
mbx - > mb0 = cpu_to_le16 ( MBC_GET_PORT_DATABASE ) ;
if ( HAS_EXTENDED_IDS ( ha ) ) {
mbx - > mb1 = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
mbx - > mb10 = cpu_to_le16 ( BIT_0 ) ;
} else {
mbx - > mb1 = cpu_to_le16 ( ( sp - > fcport - > loop_id < < 8 ) | BIT_0 ) ;
}
mbx - > mb2 = cpu_to_le16 ( MSW ( ha - > async_pd_dma ) ) ;
mbx - > mb3 = cpu_to_le16 ( LSW ( ha - > async_pd_dma ) ) ;
mbx - > mb6 = cpu_to_le16 ( MSW ( MSD ( ha - > async_pd_dma ) ) ) ;
mbx - > mb7 = cpu_to_le16 ( LSW ( MSD ( ha - > async_pd_dma ) ) ) ;
2012-05-15 14:34:20 -04:00
mbx - > mb9 = cpu_to_le16 ( sp - > fcport - > vha - > vp_idx ) ;
2010-05-04 15:01:26 -07:00
}
2010-05-04 15:01:29 -07:00
static void
qla24xx_tm_iocb ( srb_t * sp , struct tsk_mgmt_entry * tsk )
{
uint32_t flags ;
2014-06-25 15:27:36 +02:00
uint64_t lun ;
2010-05-04 15:01:29 -07:00
struct fc_port * fcport = sp - > fcport ;
scsi_qla_host_t * vha = fcport - > vha ;
struct qla_hw_data * ha = vha - > hw ;
2012-02-09 11:15:36 -08:00
struct srb_iocb * iocb = & sp - > u . iocb_cmd ;
2010-05-04 15:01:29 -07:00
struct req_que * req = vha - > req ;
flags = iocb - > u . tmf . flags ;
lun = iocb - > u . tmf . lun ;
tsk - > entry_type = TSK_MGMT_IOCB_TYPE ;
tsk - > entry_count = 1 ;
tsk - > handle = MAKE_HANDLE ( req - > id , tsk - > handle ) ;
tsk - > nport_handle = cpu_to_le16 ( fcport - > loop_id ) ;
tsk - > timeout = cpu_to_le16 ( ha - > r_a_tov / 10 * 2 ) ;
tsk - > control_flags = cpu_to_le32 ( flags ) ;
tsk - > port_id [ 0 ] = fcport - > d_id . b . al_pa ;
tsk - > port_id [ 1 ] = fcport - > d_id . b . area ;
tsk - > port_id [ 2 ] = fcport - > d_id . b . domain ;
2012-05-15 14:34:20 -04:00
tsk - > vp_index = fcport - > vha - > vp_idx ;
2010-05-04 15:01:29 -07:00
if ( flags = = TCF_LUN_RESET ) {
int_to_scsilun ( lun , & tsk - > lun ) ;
host_to_fcp_swap ( ( uint8_t * ) & tsk - > lun ,
sizeof ( tsk - > lun ) ) ;
}
}
2010-01-12 13:02:47 -08:00
static void
qla24xx_els_iocb ( srb_t * sp , struct els_entry_24xx * els_iocb )
{
2012-02-09 11:15:36 -08:00
struct fc_bsg_job * bsg_job = sp - > u . bsg_job ;
2010-01-12 13:02:47 -08:00
els_iocb - > entry_type = ELS_IOCB_TYPE ;
els_iocb - > entry_count = 1 ;
els_iocb - > sys_define = 0 ;
els_iocb - > entry_status = 0 ;
els_iocb - > handle = sp - > handle ;
els_iocb - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
els_iocb - > tx_dsd_count = __constant_cpu_to_le16 ( bsg_job - > request_payload . sg_cnt ) ;
2012-05-15 14:34:20 -04:00
els_iocb - > vp_index = sp - > fcport - > vha - > vp_idx ;
2010-01-12 13:02:47 -08:00
els_iocb - > sof_type = EST_SOFI3 ;
els_iocb - > rx_dsd_count = __constant_cpu_to_le16 ( bsg_job - > reply_payload . sg_cnt ) ;
2010-05-04 15:01:28 -07:00
els_iocb - > opcode =
2012-02-09 11:15:36 -08:00
sp - > type = = SRB_ELS_CMD_RPT ?
2010-05-04 15:01:28 -07:00
bsg_job - > request - > rqst_data . r_els . els_code :
bsg_job - > request - > rqst_data . h_els . command_code ;
2010-01-12 13:02:47 -08:00
els_iocb - > port_id [ 0 ] = sp - > fcport - > d_id . b . al_pa ;
els_iocb - > port_id [ 1 ] = sp - > fcport - > d_id . b . area ;
els_iocb - > port_id [ 2 ] = sp - > fcport - > d_id . b . domain ;
els_iocb - > control_flags = 0 ;
els_iocb - > rx_byte_count =
cpu_to_le32 ( bsg_job - > reply_payload . payload_len ) ;
els_iocb - > tx_byte_count =
cpu_to_le32 ( bsg_job - > request_payload . payload_len ) ;
els_iocb - > tx_address [ 0 ] = cpu_to_le32 ( LSD ( sg_dma_address
( bsg_job - > request_payload . sg_list ) ) ) ;
els_iocb - > tx_address [ 1 ] = cpu_to_le32 ( MSD ( sg_dma_address
( bsg_job - > request_payload . sg_list ) ) ) ;
els_iocb - > tx_len = cpu_to_le32 ( sg_dma_len
( bsg_job - > request_payload . sg_list ) ) ;
els_iocb - > rx_address [ 0 ] = cpu_to_le32 ( LSD ( sg_dma_address
( bsg_job - > reply_payload . sg_list ) ) ) ;
els_iocb - > rx_address [ 1 ] = cpu_to_le32 ( MSD ( sg_dma_address
( bsg_job - > reply_payload . sg_list ) ) ) ;
els_iocb - > rx_len = cpu_to_le32 ( sg_dma_len
( bsg_job - > reply_payload . sg_list ) ) ;
2013-08-27 01:37:40 -04:00
sp - > fcport - > vha - > qla_stats . control_requests + + ;
2010-01-12 13:02:47 -08:00
}
2010-07-23 15:28:32 +05:00
static void
qla2x00_ct_iocb ( srb_t * sp , ms_iocb_entry_t * ct_iocb )
{
uint16_t avail_dsds ;
uint32_t * cur_dsd ;
struct scatterlist * sg ;
int index ;
uint16_t tot_dsds ;
scsi_qla_host_t * vha = sp - > fcport - > vha ;
struct qla_hw_data * ha = vha - > hw ;
2012-02-09 11:15:36 -08:00
struct fc_bsg_job * bsg_job = sp - > u . bsg_job ;
2010-07-23 15:28:32 +05:00
int loop_iterartion = 0 ;
int cont_iocb_prsnt = 0 ;
int entry_count = 1 ;
memset ( ct_iocb , 0 , sizeof ( ms_iocb_entry_t ) ) ;
ct_iocb - > entry_type = CT_IOCB_TYPE ;
ct_iocb - > entry_status = 0 ;
ct_iocb - > handle1 = sp - > handle ;
SET_TARGET_ID ( ha , ct_iocb - > loop_id , sp - > fcport - > loop_id ) ;
ct_iocb - > status = __constant_cpu_to_le16 ( 0 ) ;
ct_iocb - > control_flags = __constant_cpu_to_le16 ( 0 ) ;
ct_iocb - > timeout = 0 ;
ct_iocb - > cmd_dsd_count =
__constant_cpu_to_le16 ( bsg_job - > request_payload . sg_cnt ) ;
ct_iocb - > total_dsd_count =
__constant_cpu_to_le16 ( bsg_job - > request_payload . sg_cnt + 1 ) ;
ct_iocb - > req_bytecount =
cpu_to_le32 ( bsg_job - > request_payload . payload_len ) ;
ct_iocb - > rsp_bytecount =
cpu_to_le32 ( bsg_job - > reply_payload . payload_len ) ;
ct_iocb - > dseg_req_address [ 0 ] = cpu_to_le32 ( LSD ( sg_dma_address
( bsg_job - > request_payload . sg_list ) ) ) ;
ct_iocb - > dseg_req_address [ 1 ] = cpu_to_le32 ( MSD ( sg_dma_address
( bsg_job - > request_payload . sg_list ) ) ) ;
ct_iocb - > dseg_req_length = ct_iocb - > req_bytecount ;
ct_iocb - > dseg_rsp_address [ 0 ] = cpu_to_le32 ( LSD ( sg_dma_address
( bsg_job - > reply_payload . sg_list ) ) ) ;
ct_iocb - > dseg_rsp_address [ 1 ] = cpu_to_le32 ( MSD ( sg_dma_address
( bsg_job - > reply_payload . sg_list ) ) ) ;
ct_iocb - > dseg_rsp_length = ct_iocb - > rsp_bytecount ;
avail_dsds = 1 ;
cur_dsd = ( uint32_t * ) ct_iocb - > dseg_rsp_address ;
index = 0 ;
tot_dsds = bsg_job - > reply_payload . sg_cnt ;
for_each_sg ( bsg_job - > reply_payload . sg_list , sg , tot_dsds , index ) {
dma_addr_t sle_dma ;
cont_a64_entry_t * cont_pkt ;
/* Allocate additional continuation packets? */
if ( avail_dsds = = 0 ) {
/*
* Five DSDs are available in the Cont .
* Type 1 IOCB .
*/
2011-11-18 09:02:21 -08:00
cont_pkt = qla2x00_prep_cont_type1_iocb ( vha ,
vha - > hw - > req_q_map [ 0 ] ) ;
2010-07-23 15:28:32 +05:00
cur_dsd = ( uint32_t * ) cont_pkt - > dseg_0_address ;
avail_dsds = 5 ;
cont_iocb_prsnt = 1 ;
entry_count + + ;
}
sle_dma = sg_dma_address ( sg ) ;
* cur_dsd + + = cpu_to_le32 ( LSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( sg_dma_len ( sg ) ) ;
loop_iterartion + + ;
avail_dsds - - ;
}
ct_iocb - > entry_count = entry_count ;
2013-08-27 01:37:40 -04:00
sp - > fcport - > vha - > qla_stats . control_requests + + ;
2010-07-23 15:28:32 +05:00
}
2010-01-12 13:02:47 -08:00
static void
qla24xx_ct_iocb ( srb_t * sp , struct ct_entry_24xx * ct_iocb )
{
uint16_t avail_dsds ;
uint32_t * cur_dsd ;
struct scatterlist * sg ;
int index ;
uint16_t tot_dsds ;
scsi_qla_host_t * vha = sp - > fcport - > vha ;
2011-11-18 09:02:21 -08:00
struct qla_hw_data * ha = vha - > hw ;
2012-02-09 11:15:36 -08:00
struct fc_bsg_job * bsg_job = sp - > u . bsg_job ;
2010-01-12 13:02:47 -08:00
int loop_iterartion = 0 ;
int cont_iocb_prsnt = 0 ;
int entry_count = 1 ;
ct_iocb - > entry_type = CT_IOCB_TYPE ;
ct_iocb - > entry_status = 0 ;
ct_iocb - > sys_define = 0 ;
ct_iocb - > handle = sp - > handle ;
ct_iocb - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
2012-05-15 14:34:20 -04:00
ct_iocb - > vp_index = sp - > fcport - > vha - > vp_idx ;
2010-01-12 13:02:47 -08:00
ct_iocb - > comp_status = __constant_cpu_to_le16 ( 0 ) ;
ct_iocb - > cmd_dsd_count =
__constant_cpu_to_le16 ( bsg_job - > request_payload . sg_cnt ) ;
ct_iocb - > timeout = 0 ;
ct_iocb - > rsp_dsd_count =
__constant_cpu_to_le16 ( bsg_job - > reply_payload . sg_cnt ) ;
ct_iocb - > rsp_byte_count =
cpu_to_le32 ( bsg_job - > reply_payload . payload_len ) ;
ct_iocb - > cmd_byte_count =
cpu_to_le32 ( bsg_job - > request_payload . payload_len ) ;
ct_iocb - > dseg_0_address [ 0 ] = cpu_to_le32 ( LSD ( sg_dma_address
( bsg_job - > request_payload . sg_list ) ) ) ;
ct_iocb - > dseg_0_address [ 1 ] = cpu_to_le32 ( MSD ( sg_dma_address
( bsg_job - > request_payload . sg_list ) ) ) ;
ct_iocb - > dseg_0_len = cpu_to_le32 ( sg_dma_len
( bsg_job - > request_payload . sg_list ) ) ;
avail_dsds = 1 ;
cur_dsd = ( uint32_t * ) ct_iocb - > dseg_1_address ;
index = 0 ;
tot_dsds = bsg_job - > reply_payload . sg_cnt ;
for_each_sg ( bsg_job - > reply_payload . sg_list , sg , tot_dsds , index ) {
dma_addr_t sle_dma ;
cont_a64_entry_t * cont_pkt ;
/* Allocate additional continuation packets? */
if ( avail_dsds = = 0 ) {
/*
* Five DSDs are available in the Cont .
* Type 1 IOCB .
*/
2011-11-18 09:02:21 -08:00
cont_pkt = qla2x00_prep_cont_type1_iocb ( vha ,
ha - > req_q_map [ 0 ] ) ;
2010-01-12 13:02:47 -08:00
cur_dsd = ( uint32_t * ) cont_pkt - > dseg_0_address ;
avail_dsds = 5 ;
cont_iocb_prsnt = 1 ;
entry_count + + ;
}
sle_dma = sg_dma_address ( sg ) ;
* cur_dsd + + = cpu_to_le32 ( LSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( sg_dma_len ( sg ) ) ;
loop_iterartion + + ;
avail_dsds - - ;
}
ct_iocb - > entry_count = entry_count ;
}
2011-11-18 09:03:18 -08:00
/*
* qla82xx_start_scsi ( ) - Send a SCSI command to the ISP
* @ sp : command to send to the ISP
*
* Returns non - zero if a failure occurred , else zero .
*/
int
qla82xx_start_scsi ( srb_t * sp )
{
int ret , nseg ;
unsigned long flags ;
struct scsi_cmnd * cmd ;
uint32_t * clr_ptr ;
uint32_t index ;
uint32_t handle ;
uint16_t cnt ;
uint16_t req_cnt ;
uint16_t tot_dsds ;
struct device_reg_82xx __iomem * reg ;
uint32_t dbval ;
uint32_t * fcp_dl ;
uint8_t additional_cdb_len ;
struct ct6_dsd * ctx ;
struct scsi_qla_host * vha = sp - > fcport - > vha ;
struct qla_hw_data * ha = vha - > hw ;
struct req_que * req = NULL ;
struct rsp_que * rsp = NULL ;
2012-02-09 11:15:36 -08:00
char tag [ 2 ] ;
2011-11-18 09:03:18 -08:00
/* Setup device pointers. */
ret = 0 ;
reg = & ha - > iobase - > isp82 ;
2012-02-09 11:15:36 -08:00
cmd = GET_CMD_SP ( sp ) ;
2011-11-18 09:03:18 -08:00
req = vha - > req ;
rsp = ha - > rsp_q_map [ 0 ] ;
/* So we know we haven't pci_map'ed anything yet */
tot_dsds = 0 ;
dbval = 0x04 | ( ha - > portnum < < 5 ) ;
/* Send marker if required */
if ( vha - > marker_needed ! = 0 ) {
if ( qla2x00_marker ( vha , req ,
rsp , 0 , 0 , MK_SYNC_ALL ) ! = QLA_SUCCESS ) {
ql_log ( ql_log_warn , vha , 0x300c ,
" qla2x00_marker failed for cmd=%p. \n " , cmd ) ;
return QLA_FUNCTION_FAILED ;
}
vha - > marker_needed = 0 ;
}
/* Acquire ring specific lock */
spin_lock_irqsave ( & ha - > hardware_lock , flags ) ;
/* Check for room in outstanding command list. */
handle = req - > current_outstanding_cmd ;
2013-01-30 03:34:37 -05:00
for ( index = 1 ; index < req - > num_outstanding_cmds ; index + + ) {
2011-11-18 09:03:18 -08:00
handle + + ;
2013-01-30 03:34:37 -05:00
if ( handle = = req - > num_outstanding_cmds )
2011-11-18 09:03:18 -08:00
handle = 1 ;
if ( ! req - > outstanding_cmds [ handle ] )
break ;
}
2013-01-30 03:34:37 -05:00
if ( index = = req - > num_outstanding_cmds )
2011-11-18 09:03:18 -08:00
goto queuing_error ;
/* Map the sg table so we have an accurate count of sg entries needed */
if ( scsi_sg_count ( cmd ) ) {
nseg = dma_map_sg ( & ha - > pdev - > dev , scsi_sglist ( cmd ) ,
scsi_sg_count ( cmd ) , cmd - > sc_data_direction ) ;
if ( unlikely ( ! nseg ) )
goto queuing_error ;
} else
nseg = 0 ;
tot_dsds = nseg ;
if ( tot_dsds > ql2xshiftctondsd ) {
struct cmd_type_6 * cmd_pkt ;
uint16_t more_dsd_lists = 0 ;
struct dsd_dma * dsd_ptr ;
uint16_t i ;
more_dsd_lists = qla24xx_calc_dsd_lists ( tot_dsds ) ;
if ( ( more_dsd_lists + ha - > gbl_dsd_inuse ) > = NUM_DSD_CHAIN ) {
ql_dbg ( ql_dbg_io , vha , 0x300d ,
" Num of DSD list %d is than %d for cmd=%p. \n " ,
more_dsd_lists + ha - > gbl_dsd_inuse , NUM_DSD_CHAIN ,
cmd ) ;
goto queuing_error ;
}
if ( more_dsd_lists < = ha - > gbl_dsd_avail )
goto sufficient_dsds ;
else
more_dsd_lists - = ha - > gbl_dsd_avail ;
for ( i = 0 ; i < more_dsd_lists ; i + + ) {
dsd_ptr = kzalloc ( sizeof ( struct dsd_dma ) , GFP_ATOMIC ) ;
if ( ! dsd_ptr ) {
ql_log ( ql_log_fatal , vha , 0x300e ,
" Failed to allocate memory for dsd_dma "
" for cmd=%p. \n " , cmd ) ;
goto queuing_error ;
}
dsd_ptr - > dsd_addr = dma_pool_alloc ( ha - > dl_dma_pool ,
GFP_ATOMIC , & dsd_ptr - > dsd_list_dma ) ;
if ( ! dsd_ptr - > dsd_addr ) {
kfree ( dsd_ptr ) ;
ql_log ( ql_log_fatal , vha , 0x300f ,
" Failed to allocate memory for dsd_addr "
" for cmd=%p. \n " , cmd ) ;
goto queuing_error ;
}
list_add_tail ( & dsd_ptr - > list , & ha - > gbl_dsd_list ) ;
ha - > gbl_dsd_avail + + ;
}
sufficient_dsds :
req_cnt = 1 ;
if ( req - > cnt < ( req_cnt + 2 ) ) {
cnt = ( uint16_t ) RD_REG_DWORD_RELAXED (
& reg - > req_q_out [ 0 ] ) ;
if ( req - > ring_index < cnt )
req - > cnt = cnt - req - > ring_index ;
else
req - > cnt = req - > length -
( req - > ring_index - cnt ) ;
2012-05-15 14:34:09 -04:00
if ( req - > cnt < ( req_cnt + 2 ) )
goto queuing_error ;
2011-11-18 09:03:18 -08:00
}
2012-02-09 11:15:36 -08:00
ctx = sp - > u . scmd . ctx =
mempool_alloc ( ha - > ctx_mempool , GFP_ATOMIC ) ;
if ( ! ctx ) {
2011-11-18 09:03:18 -08:00
ql_log ( ql_log_fatal , vha , 0x3010 ,
" Failed to allocate ctx for cmd=%p. \n " , cmd ) ;
goto queuing_error ;
}
2012-02-09 11:15:36 -08:00
2011-11-18 09:03:18 -08:00
memset ( ctx , 0 , sizeof ( struct ct6_dsd ) ) ;
ctx - > fcp_cmnd = dma_pool_alloc ( ha - > fcp_cmnd_dma_pool ,
GFP_ATOMIC , & ctx - > fcp_cmnd_dma ) ;
if ( ! ctx - > fcp_cmnd ) {
ql_log ( ql_log_fatal , vha , 0x3011 ,
" Failed to allocate fcp_cmnd for cmd=%p. \n " , cmd ) ;
2012-05-17 10:13:40 +03:00
goto queuing_error ;
2011-11-18 09:03:18 -08:00
}
/* Initialize the DSD list and dma handle */
INIT_LIST_HEAD ( & ctx - > dsd_list ) ;
ctx - > dsd_use_cnt = 0 ;
if ( cmd - > cmd_len > 16 ) {
additional_cdb_len = cmd - > cmd_len - 16 ;
if ( ( cmd - > cmd_len % 4 ) ! = 0 ) {
/* SCSI command bigger than 16 bytes must be
* multiple of 4
*/
ql_log ( ql_log_warn , vha , 0x3012 ,
" scsi cmd len %d not multiple of 4 "
" for cmd=%p. \n " , cmd - > cmd_len , cmd ) ;
goto queuing_error_fcp_cmnd ;
}
ctx - > fcp_cmnd_len = 12 + cmd - > cmd_len + 4 ;
} else {
additional_cdb_len = 0 ;
ctx - > fcp_cmnd_len = 12 + 16 + 4 ;
}
cmd_pkt = ( struct cmd_type_6 * ) req - > ring_ptr ;
cmd_pkt - > handle = MAKE_HANDLE ( req - > id , handle ) ;
/* Zero out remaining portion of packet. */
/* tagged queuing modifier -- default is TSK_SIMPLE (0). */
clr_ptr = ( uint32_t * ) cmd_pkt + 2 ;
memset ( clr_ptr , 0 , REQUEST_ENTRY_SIZE - 8 ) ;
cmd_pkt - > dseg_count = cpu_to_le16 ( tot_dsds ) ;
/* Set NPORT-ID and LUN number*/
cmd_pkt - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
cmd_pkt - > port_id [ 0 ] = sp - > fcport - > d_id . b . al_pa ;
cmd_pkt - > port_id [ 1 ] = sp - > fcport - > d_id . b . area ;
cmd_pkt - > port_id [ 2 ] = sp - > fcport - > d_id . b . domain ;
2012-05-15 14:34:20 -04:00
cmd_pkt - > vp_index = sp - > fcport - > vha - > vp_idx ;
2011-11-18 09:03:18 -08:00
/* Build IOCB segments */
if ( qla24xx_build_scsi_type_6_iocbs ( sp , cmd_pkt , tot_dsds ) )
goto queuing_error_fcp_cmnd ;
2012-02-09 11:15:36 -08:00
int_to_scsilun ( cmd - > device - > lun , & cmd_pkt - > lun ) ;
2011-11-18 09:03:18 -08:00
host_to_fcp_swap ( ( uint8_t * ) & cmd_pkt - > lun , sizeof ( cmd_pkt - > lun ) ) ;
/* build FCP_CMND IU */
memset ( ctx - > fcp_cmnd , 0 , sizeof ( struct fcp_cmnd ) ) ;
2012-02-09 11:15:36 -08:00
int_to_scsilun ( cmd - > device - > lun , & ctx - > fcp_cmnd - > lun ) ;
2011-11-18 09:03:18 -08:00
ctx - > fcp_cmnd - > additional_cdb_len = additional_cdb_len ;
if ( cmd - > sc_data_direction = = DMA_TO_DEVICE )
ctx - > fcp_cmnd - > additional_cdb_len | = 1 ;
else if ( cmd - > sc_data_direction = = DMA_FROM_DEVICE )
ctx - > fcp_cmnd - > additional_cdb_len | = 2 ;
/*
* Update tagged queuing modifier - - default is TSK_SIMPLE ( 0 ) .
*/
if ( scsi_populate_tag_msg ( cmd , tag ) ) {
switch ( tag [ 0 ] ) {
case HEAD_OF_QUEUE_TAG :
ctx - > fcp_cmnd - > task_attribute =
TSK_HEAD_OF_QUEUE ;
break ;
case ORDERED_QUEUE_TAG :
ctx - > fcp_cmnd - > task_attribute =
TSK_ORDERED ;
break ;
}
}
2011-11-18 09:03:19 -08:00
/* Populate the FCP_PRIO. */
if ( ha - > flags . fcp_prio_enabled )
ctx - > fcp_cmnd - > task_attribute | =
sp - > fcport - > fcp_prio < < 3 ;
2011-11-18 09:03:18 -08:00
memcpy ( ctx - > fcp_cmnd - > cdb , cmd - > cmnd , cmd - > cmd_len ) ;
fcp_dl = ( uint32_t * ) ( ctx - > fcp_cmnd - > cdb + 16 +
additional_cdb_len ) ;
* fcp_dl = htonl ( ( uint32_t ) scsi_bufflen ( cmd ) ) ;
cmd_pkt - > fcp_cmnd_dseg_len = cpu_to_le16 ( ctx - > fcp_cmnd_len ) ;
cmd_pkt - > fcp_cmnd_dseg_address [ 0 ] =
cpu_to_le32 ( LSD ( ctx - > fcp_cmnd_dma ) ) ;
cmd_pkt - > fcp_cmnd_dseg_address [ 1 ] =
cpu_to_le32 ( MSD ( ctx - > fcp_cmnd_dma ) ) ;
sp - > flags | = SRB_FCP_CMND_DMA_VALID ;
cmd_pkt - > byte_count = cpu_to_le32 ( ( uint32_t ) scsi_bufflen ( cmd ) ) ;
/* Set total data segment count. */
cmd_pkt - > entry_count = ( uint8_t ) req_cnt ;
/* Specify response queue number where
* completion should happen
*/
cmd_pkt - > entry_status = ( uint8_t ) rsp - > id ;
} else {
struct cmd_type_7 * cmd_pkt ;
req_cnt = qla24xx_calc_iocbs ( vha , tot_dsds ) ;
if ( req - > cnt < ( req_cnt + 2 ) ) {
cnt = ( uint16_t ) RD_REG_DWORD_RELAXED (
& reg - > req_q_out [ 0 ] ) ;
if ( req - > ring_index < cnt )
req - > cnt = cnt - req - > ring_index ;
else
req - > cnt = req - > length -
( req - > ring_index - cnt ) ;
}
if ( req - > cnt < ( req_cnt + 2 ) )
goto queuing_error ;
cmd_pkt = ( struct cmd_type_7 * ) req - > ring_ptr ;
cmd_pkt - > handle = MAKE_HANDLE ( req - > id , handle ) ;
/* Zero out remaining portion of packet. */
/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
clr_ptr = ( uint32_t * ) cmd_pkt + 2 ;
memset ( clr_ptr , 0 , REQUEST_ENTRY_SIZE - 8 ) ;
cmd_pkt - > dseg_count = cpu_to_le16 ( tot_dsds ) ;
/* Set NPORT-ID and LUN number*/
cmd_pkt - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
cmd_pkt - > port_id [ 0 ] = sp - > fcport - > d_id . b . al_pa ;
cmd_pkt - > port_id [ 1 ] = sp - > fcport - > d_id . b . area ;
cmd_pkt - > port_id [ 2 ] = sp - > fcport - > d_id . b . domain ;
2012-05-15 14:34:20 -04:00
cmd_pkt - > vp_index = sp - > fcport - > vha - > vp_idx ;
2011-11-18 09:03:18 -08:00
2012-02-09 11:15:36 -08:00
int_to_scsilun ( cmd - > device - > lun , & cmd_pkt - > lun ) ;
2011-11-18 09:03:18 -08:00
host_to_fcp_swap ( ( uint8_t * ) & cmd_pkt - > lun ,
2012-02-09 11:15:36 -08:00
sizeof ( cmd_pkt - > lun ) ) ;
2011-11-18 09:03:18 -08:00
/*
* Update tagged queuing modifier - - default is TSK_SIMPLE ( 0 ) .
*/
if ( scsi_populate_tag_msg ( cmd , tag ) ) {
switch ( tag [ 0 ] ) {
case HEAD_OF_QUEUE_TAG :
cmd_pkt - > task = TSK_HEAD_OF_QUEUE ;
break ;
case ORDERED_QUEUE_TAG :
cmd_pkt - > task = TSK_ORDERED ;
break ;
}
}
2011-11-18 09:03:19 -08:00
/* Populate the FCP_PRIO. */
if ( ha - > flags . fcp_prio_enabled )
cmd_pkt - > task | = sp - > fcport - > fcp_prio < < 3 ;
2011-11-18 09:03:18 -08:00
/* Load SCSI command packet. */
memcpy ( cmd_pkt - > fcp_cdb , cmd - > cmnd , cmd - > cmd_len ) ;
host_to_fcp_swap ( cmd_pkt - > fcp_cdb , sizeof ( cmd_pkt - > fcp_cdb ) ) ;
cmd_pkt - > byte_count = cpu_to_le32 ( ( uint32_t ) scsi_bufflen ( cmd ) ) ;
/* Build IOCB segments */
qla24xx_build_scsi_iocbs ( sp , cmd_pkt , tot_dsds ) ;
/* Set total data segment count. */
cmd_pkt - > entry_count = ( uint8_t ) req_cnt ;
/* Specify response queue number where
* completion should happen .
*/
cmd_pkt - > entry_status = ( uint8_t ) rsp - > id ;
}
/* Build command packet. */
req - > current_outstanding_cmd = handle ;
req - > outstanding_cmds [ handle ] = sp ;
sp - > handle = handle ;
2012-02-09 11:15:36 -08:00
cmd - > host_scribble = ( unsigned char * ) ( unsigned long ) handle ;
2011-11-18 09:03:18 -08:00
req - > cnt - = req_cnt ;
wmb ( ) ;
/* Adjust ring index. */
req - > ring_index + + ;
if ( req - > ring_index = = req - > length ) {
req - > ring_index = 0 ;
req - > ring_ptr = req - > ring ;
} else
req - > ring_ptr + + ;
sp - > flags | = SRB_DMA_VALID ;
/* Set chip new ring index. */
/* write, read and verify logic */
dbval = dbval | ( req - > id < < 8 ) | ( req - > ring_index < < 16 ) ;
if ( ql2xdbwr )
qla82xx_wr_32 ( ha , ha - > nxdb_wr_ptr , dbval ) ;
else {
WRT_REG_DWORD (
( unsigned long __iomem * ) ha - > nxdb_wr_ptr ,
dbval ) ;
wmb ( ) ;
2012-11-21 02:40:29 -05:00
while ( RD_REG_DWORD ( ( void __iomem * ) ha - > nxdb_rd_ptr ) ! = dbval ) {
2011-11-18 09:03:18 -08:00
WRT_REG_DWORD (
( unsigned long __iomem * ) ha - > nxdb_wr_ptr ,
dbval ) ;
wmb ( ) ;
}
}
/* Manage unprocessed RIO/ZIO commands in response queue. */
if ( vha - > flags . process_response_queue & &
rsp - > ring_ptr - > signature ! = RESPONSE_PROCESSED )
qla24xx_process_response_queue ( vha , rsp ) ;
spin_unlock_irqrestore ( & ha - > hardware_lock , flags ) ;
return QLA_SUCCESS ;
queuing_error_fcp_cmnd :
dma_pool_free ( ha - > fcp_cmnd_dma_pool , ctx - > fcp_cmnd , ctx - > fcp_cmnd_dma ) ;
queuing_error :
if ( tot_dsds )
scsi_dma_unmap ( cmd ) ;
2012-02-09 11:15:36 -08:00
if ( sp - > u . scmd . ctx ) {
mempool_free ( sp - > u . scmd . ctx , ha - > ctx_mempool ) ;
sp - > u . scmd . ctx = NULL ;
2011-11-18 09:03:18 -08:00
}
spin_unlock_irqrestore ( & ha - > hardware_lock , flags ) ;
return QLA_FUNCTION_FAILED ;
}
2014-02-26 04:15:18 -05:00
void
qla24xx_abort_iocb ( srb_t * sp , struct abort_entry_24xx * abt_iocb )
{
struct srb_iocb * aio = & sp - > u . iocb_cmd ;
scsi_qla_host_t * vha = sp - > fcport - > vha ;
struct req_que * req = vha - > req ;
memset ( abt_iocb , 0 , sizeof ( struct abort_entry_24xx ) ) ;
abt_iocb - > entry_type = ABORT_IOCB_TYPE ;
abt_iocb - > entry_count = 1 ;
abt_iocb - > handle = cpu_to_le32 ( MAKE_HANDLE ( req - > id , sp - > handle ) ) ;
abt_iocb - > nport_handle = cpu_to_le16 ( sp - > fcport - > loop_id ) ;
abt_iocb - > handle_to_abort =
cpu_to_le32 ( MAKE_HANDLE ( req - > id , aio - > u . abt . cmd_hndl ) ) ;
abt_iocb - > port_id [ 0 ] = sp - > fcport - > d_id . b . al_pa ;
abt_iocb - > port_id [ 1 ] = sp - > fcport - > d_id . b . area ;
abt_iocb - > port_id [ 2 ] = sp - > fcport - > d_id . b . domain ;
abt_iocb - > vp_index = vha - > vp_idx ;
abt_iocb - > req_que_no = cpu_to_le16 ( req - > id ) ;
/* Send the command to the firmware */
wmb ( ) ;
}
2009-08-20 11:06:05 -07:00
int
qla2x00_start_sp ( srb_t * sp )
{
int rval ;
struct qla_hw_data * ha = sp - > fcport - > vha - > hw ;
void * pkt ;
unsigned long flags ;
rval = QLA_FUNCTION_FAILED ;
spin_lock_irqsave ( & ha - > hardware_lock , flags ) ;
2010-07-23 15:28:23 +05:00
pkt = qla2x00_alloc_iocbs ( sp - > fcport - > vha , sp ) ;
2011-07-14 12:00:13 -07:00
if ( ! pkt ) {
ql_log ( ql_log_warn , sp - > fcport - > vha , 0x700c ,
" qla2x00_alloc_iocbs failed. \n " ) ;
2009-08-20 11:06:05 -07:00
goto done ;
2011-07-14 12:00:13 -07:00
}
2009-08-20 11:06:05 -07:00
rval = QLA_SUCCESS ;
2012-02-09 11:15:36 -08:00
switch ( sp - > type ) {
2009-08-20 11:06:05 -07:00
case SRB_LOGIN_CMD :
IS_FWI2_CAPABLE ( ha ) ?
2010-05-04 15:01:26 -07:00
qla24xx_login_iocb ( sp , pkt ) :
2009-08-20 11:06:05 -07:00
qla2x00_login_iocb ( sp , pkt ) ;
break ;
case SRB_LOGOUT_CMD :
IS_FWI2_CAPABLE ( ha ) ?
2010-05-04 15:01:26 -07:00
qla24xx_logout_iocb ( sp , pkt ) :
2009-08-20 11:06:05 -07:00
qla2x00_logout_iocb ( sp , pkt ) ;
break ;
2010-01-12 13:02:47 -08:00
case SRB_ELS_CMD_RPT :
case SRB_ELS_CMD_HST :
qla24xx_els_iocb ( sp , pkt ) ;
break ;
case SRB_CT_CMD :
2010-07-23 15:28:32 +05:00
IS_FWI2_CAPABLE ( ha ) ?
2011-11-18 09:03:20 -08:00
qla24xx_ct_iocb ( sp , pkt ) :
qla2x00_ct_iocb ( sp , pkt ) ;
2010-01-12 13:02:47 -08:00
break ;
2010-05-04 15:01:26 -07:00
case SRB_ADISC_CMD :
IS_FWI2_CAPABLE ( ha ) ?
qla24xx_adisc_iocb ( sp , pkt ) :
qla2x00_adisc_iocb ( sp , pkt ) ;
break ;
2010-05-04 15:01:29 -07:00
case SRB_TM_CMD :
2013-03-28 08:21:23 -04:00
IS_QLAFX00 ( ha ) ?
qlafx00_tm_iocb ( sp , pkt ) :
qla24xx_tm_iocb ( sp , pkt ) ;
break ;
case SRB_FXIOCB_DCMD :
case SRB_FXIOCB_BCMD :
qlafx00_fxdisc_iocb ( sp , pkt ) ;
break ;
case SRB_ABT_CMD :
2014-02-26 04:15:18 -05:00
IS_QLAFX00 ( ha ) ?
qlafx00_abort_iocb ( sp , pkt ) :
qla24xx_abort_iocb ( sp , pkt ) ;
2010-05-04 15:01:29 -07:00
break ;
2009-08-20 11:06:05 -07:00
default :
break ;
}
wmb ( ) ;
2011-11-18 09:03:18 -08:00
qla2x00_start_iocbs ( sp - > fcport - > vha , ha - > req_q_map [ 0 ] ) ;
2009-08-20 11:06:05 -07:00
done :
spin_unlock_irqrestore ( & ha - > hardware_lock , flags ) ;
return rval ;
}
2012-08-22 14:21:01 -04:00
static void
qla25xx_build_bidir_iocb ( srb_t * sp , struct scsi_qla_host * vha ,
struct cmd_bidir * cmd_pkt , uint32_t tot_dsds )
{
uint16_t avail_dsds ;
uint32_t * cur_dsd ;
uint32_t req_data_len = 0 ;
uint32_t rsp_data_len = 0 ;
struct scatterlist * sg ;
int index ;
int entry_count = 1 ;
struct fc_bsg_job * bsg_job = sp - > u . bsg_job ;
/*Update entry type to indicate bidir command */
* ( ( uint32_t * ) ( & cmd_pkt - > entry_type ) ) =
__constant_cpu_to_le32 ( COMMAND_BIDIRECTIONAL ) ;
/* Set the transfer direction, in this set both flags
* Also set the BD_WRAP_BACK flag , firmware will take care
* assigning DID = SID for outgoing pkts .
*/
cmd_pkt - > wr_dseg_count = cpu_to_le16 ( bsg_job - > request_payload . sg_cnt ) ;
cmd_pkt - > rd_dseg_count = cpu_to_le16 ( bsg_job - > reply_payload . sg_cnt ) ;
cmd_pkt - > control_flags =
__constant_cpu_to_le16 ( BD_WRITE_DATA | BD_READ_DATA |
BD_WRAP_BACK ) ;
req_data_len = rsp_data_len = bsg_job - > request_payload . payload_len ;
cmd_pkt - > wr_byte_count = cpu_to_le32 ( req_data_len ) ;
cmd_pkt - > rd_byte_count = cpu_to_le32 ( rsp_data_len ) ;
cmd_pkt - > timeout = cpu_to_le16 ( qla2x00_get_async_timeout ( vha ) + 2 ) ;
vha - > bidi_stats . transfer_bytes + = req_data_len ;
vha - > bidi_stats . io_count + + ;
2013-08-27 01:37:40 -04:00
vha - > qla_stats . output_bytes + = req_data_len ;
vha - > qla_stats . output_requests + + ;
2012-08-22 14:21:01 -04:00
/* Only one dsd is available for bidirectional IOCB, remaining dsds
* are bundled in continuation iocb
*/
avail_dsds = 1 ;
cur_dsd = ( uint32_t * ) & cmd_pkt - > fcp_data_dseg_address ;
index = 0 ;
for_each_sg ( bsg_job - > request_payload . sg_list , sg ,
bsg_job - > request_payload . sg_cnt , index ) {
dma_addr_t sle_dma ;
cont_a64_entry_t * cont_pkt ;
/* Allocate additional continuation packets */
if ( avail_dsds = = 0 ) {
/* Continuation type 1 IOCB can accomodate
* 5 DSDS
*/
cont_pkt = qla2x00_prep_cont_type1_iocb ( vha , vha - > req ) ;
cur_dsd = ( uint32_t * ) cont_pkt - > dseg_0_address ;
avail_dsds = 5 ;
entry_count + + ;
}
sle_dma = sg_dma_address ( sg ) ;
* cur_dsd + + = cpu_to_le32 ( LSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( sg_dma_len ( sg ) ) ;
avail_dsds - - ;
}
/* For read request DSD will always goes to continuation IOCB
* and follow the write DSD . If there is room on the current IOCB
* then it is added to that IOCB else new continuation IOCB is
* allocated .
*/
for_each_sg ( bsg_job - > reply_payload . sg_list , sg ,
bsg_job - > reply_payload . sg_cnt , index ) {
dma_addr_t sle_dma ;
cont_a64_entry_t * cont_pkt ;
/* Allocate additional continuation packets */
if ( avail_dsds = = 0 ) {
/* Continuation type 1 IOCB can accomodate
* 5 DSDS
*/
cont_pkt = qla2x00_prep_cont_type1_iocb ( vha , vha - > req ) ;
cur_dsd = ( uint32_t * ) cont_pkt - > dseg_0_address ;
avail_dsds = 5 ;
entry_count + + ;
}
sle_dma = sg_dma_address ( sg ) ;
* cur_dsd + + = cpu_to_le32 ( LSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( MSD ( sle_dma ) ) ;
* cur_dsd + + = cpu_to_le32 ( sg_dma_len ( sg ) ) ;
avail_dsds - - ;
}
/* This value should be same as number of IOCB required for this cmd */
cmd_pkt - > entry_count = entry_count ;
}
int
qla2x00_start_bidir ( srb_t * sp , struct scsi_qla_host * vha , uint32_t tot_dsds )
{
struct qla_hw_data * ha = vha - > hw ;
unsigned long flags ;
uint32_t handle ;
uint32_t index ;
uint16_t req_cnt ;
uint16_t cnt ;
uint32_t * clr_ptr ;
struct cmd_bidir * cmd_pkt = NULL ;
struct rsp_que * rsp ;
struct req_que * req ;
int rval = EXT_STATUS_OK ;
rval = QLA_SUCCESS ;
rsp = ha - > rsp_q_map [ 0 ] ;
req = vha - > req ;
/* Send marker if required */
if ( vha - > marker_needed ! = 0 ) {
if ( qla2x00_marker ( vha , req ,
rsp , 0 , 0 , MK_SYNC_ALL ) ! = QLA_SUCCESS )
return EXT_STATUS_MAILBOX ;
vha - > marker_needed = 0 ;
}
/* Acquire ring specific lock */
spin_lock_irqsave ( & ha - > hardware_lock , flags ) ;
/* Check for room in outstanding command list. */
handle = req - > current_outstanding_cmd ;
2013-01-30 03:34:37 -05:00
for ( index = 1 ; index < req - > num_outstanding_cmds ; index + + ) {
2012-08-22 14:21:01 -04:00
handle + + ;
2013-01-30 03:34:37 -05:00
if ( handle = = req - > num_outstanding_cmds )
2012-08-22 14:21:01 -04:00
handle = 1 ;
if ( ! req - > outstanding_cmds [ handle ] )
break ;
}
2013-01-30 03:34:37 -05:00
if ( index = = req - > num_outstanding_cmds ) {
2012-08-22 14:21:01 -04:00
rval = EXT_STATUS_BUSY ;
goto queuing_error ;
}
/* Calculate number of IOCB required */
req_cnt = qla24xx_calc_iocbs ( vha , tot_dsds ) ;
/* Check for room on request queue. */
if ( req - > cnt < req_cnt + 2 ) {
2014-04-11 16:54:37 -04:00
cnt = IS_SHADOW_REG_CAPABLE ( ha ) ? * req - > out_ptr :
RD_REG_DWORD_RELAXED ( req - > req_q_out ) ;
2012-08-22 14:21:01 -04:00
if ( req - > ring_index < cnt )
req - > cnt = cnt - req - > ring_index ;
else
req - > cnt = req - > length -
( req - > ring_index - cnt ) ;
}
if ( req - > cnt < req_cnt + 2 ) {
rval = EXT_STATUS_BUSY ;
goto queuing_error ;
}
cmd_pkt = ( struct cmd_bidir * ) req - > ring_ptr ;
cmd_pkt - > handle = MAKE_HANDLE ( req - > id , handle ) ;
/* Zero out remaining portion of packet. */
/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
clr_ptr = ( uint32_t * ) cmd_pkt + 2 ;
memset ( clr_ptr , 0 , REQUEST_ENTRY_SIZE - 8 ) ;
/* Set NPORT-ID (of vha)*/
cmd_pkt - > nport_handle = cpu_to_le16 ( vha - > self_login_loop_id ) ;
cmd_pkt - > port_id [ 0 ] = vha - > d_id . b . al_pa ;
cmd_pkt - > port_id [ 1 ] = vha - > d_id . b . area ;
cmd_pkt - > port_id [ 2 ] = vha - > d_id . b . domain ;
qla25xx_build_bidir_iocb ( sp , vha , cmd_pkt , tot_dsds ) ;
cmd_pkt - > entry_status = ( uint8_t ) rsp - > id ;
/* Build command packet. */
req - > current_outstanding_cmd = handle ;
req - > outstanding_cmds [ handle ] = sp ;
sp - > handle = handle ;
req - > cnt - = req_cnt ;
/* Send the command to the firmware */
wmb ( ) ;
qla2x00_start_iocbs ( vha , req ) ;
queuing_error :
spin_unlock_irqrestore ( & ha - > hardware_lock , flags ) ;
return rval ;
}