2012-05-20 19:59:14 +04:00
/*
* SCSI Block Commands ( SBC ) parsing and emulation .
*
2013-09-06 02:29:12 +04:00
* ( c ) Copyright 2002 - 2013 Datera , Inc .
2012-05-20 19:59:14 +04:00
*
* Nicholas A . Bellinger < nab @ kernel . org >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place - Suite 330 , Boston , MA 02111 - 1307 , USA .
*/
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/ratelimit.h>
2014-01-08 02:44:57 +04:00
# include <linux/crc-t10dif.h>
2016-11-15 02:47:14 +03:00
# include <linux/t10-pi.h>
2012-05-20 19:59:14 +04:00
# include <asm/unaligned.h>
2015-05-08 11:11:12 +03:00
# include <scsi/scsi_proto.h>
2013-08-20 02:20:28 +04:00
# include <scsi/scsi_tcq.h>
2012-05-20 19:59:14 +04:00
# include <target/target_core_base.h>
# include <target/target_core_backend.h>
# include <target/target_core_fabric.h>
# include "target_core_internal.h"
# include "target_core_ua.h"
2013-12-17 12:18:49 +04:00
# include "target_core_alua.h"
2012-05-20 19:59:14 +04:00
2015-02-14 04:32:11 +03:00
static sense_reason_t
sbc_check_prot ( struct se_device * , struct se_cmd * , unsigned char * , u32 , bool ) ;
2015-06-19 16:10:59 +03:00
static sense_reason_t sbc_execute_unmap ( struct se_cmd * cmd ) ;
2015-02-14 04:32:11 +03:00
2012-11-07 00:24:09 +04:00
static sense_reason_t
sbc_emulate_readcapacity ( struct se_cmd * cmd )
2012-05-20 19:59:15 +04:00
{
struct se_device * dev = cmd - > se_dev ;
2013-06-27 04:36:19 +04:00
unsigned char * cdb = cmd - > t_task_cdb ;
2012-05-20 19:59:15 +04:00
unsigned long long blocks_long = dev - > transport - > get_blocks ( dev ) ;
2012-09-07 19:30:40 +04:00
unsigned char * rbuf ;
unsigned char buf [ 8 ] ;
2012-05-20 19:59:15 +04:00
u32 blocks ;
2013-06-27 04:36:19 +04:00
/*
* SBC - 2 says :
* If the PMI bit is set to zero and the LOGICAL BLOCK
* ADDRESS field is not set to zero , the device server shall
* terminate the command with CHECK CONDITION status with
* the sense key set to ILLEGAL REQUEST and the additional
* sense code set to INVALID FIELD IN CDB .
*
* In SBC - 3 , these fields are obsolete , but some SCSI
* compliance tests actually check this , so we might as well
* follow SBC - 2.
*/
if ( ! ( cdb [ 8 ] & 1 ) & & ! ! ( cdb [ 2 ] | cdb [ 3 ] | cdb [ 4 ] | cdb [ 5 ] ) )
return TCM_INVALID_CDB_FIELD ;
2012-05-20 19:59:15 +04:00
if ( blocks_long > = 0x00000000ffffffff )
blocks = 0xffffffff ;
else
blocks = ( u32 ) blocks_long ;
2017-05-24 02:48:27 +03:00
put_unaligned_be32 ( blocks , & buf [ 0 ] ) ;
put_unaligned_be32 ( dev - > dev_attrib . block_size , & buf [ 4 ] ) ;
2012-05-20 19:59:15 +04:00
2012-09-07 19:30:40 +04:00
rbuf = transport_kmap_data_sg ( cmd ) ;
2013-01-30 02:18:00 +04:00
if ( rbuf ) {
memcpy ( rbuf , buf , min_t ( u32 , sizeof ( buf ) , cmd - > data_length ) ) ;
transport_kunmap_data_sg ( cmd ) ;
}
2012-05-20 19:59:15 +04:00
2014-06-10 22:07:47 +04:00
target_complete_cmd_with_length ( cmd , GOOD , 8 ) ;
2012-05-20 19:59:15 +04:00
return 0 ;
}
2012-11-07 00:24:09 +04:00
static sense_reason_t
sbc_emulate_readcapacity_16 ( struct se_cmd * cmd )
2012-05-20 19:59:15 +04:00
{
struct se_device * dev = cmd - > se_dev ;
2014-04-03 00:34:04 +04:00
struct se_session * sess = cmd - > se_sess ;
2015-02-28 09:05:33 +03:00
int pi_prot_type = dev - > dev_attrib . pi_prot_type ;
2012-09-07 19:30:40 +04:00
unsigned char * rbuf ;
unsigned char buf [ 32 ] ;
2012-05-20 19:59:15 +04:00
unsigned long long blocks = dev - > transport - > get_blocks ( dev ) ;
2012-09-07 19:30:40 +04:00
memset ( buf , 0 , sizeof ( buf ) ) ;
2017-05-24 02:48:27 +03:00
put_unaligned_be64 ( blocks , & buf [ 0 ] ) ;
put_unaligned_be32 ( dev - > dev_attrib . block_size , & buf [ 8 ] ) ;
2013-12-24 00:36:09 +04:00
/*
* Set P_TYPE and PROT_EN bits for DIF support
*/
2014-04-03 00:34:04 +04:00
if ( sess - > sup_prot_ops & ( TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS ) ) {
2015-02-28 09:05:33 +03:00
/*
* Only override a device ' s pi_prot_type if no T10 - PI is
* available , and sess_prot_type has been explicitly enabled .
*/
if ( ! pi_prot_type )
pi_prot_type = sess - > sess_prot_type ;
if ( pi_prot_type )
buf [ 12 ] = ( pi_prot_type - 1 ) < < 1 | 0x1 ;
2014-04-03 00:34:04 +04:00
}
2013-11-11 20:59:17 +04:00
if ( dev - > transport - > get_lbppbe )
buf [ 13 ] = dev - > transport - > get_lbppbe ( dev ) & 0x0f ;
if ( dev - > transport - > get_alignment_offset_lbas ) {
u16 lalba = dev - > transport - > get_alignment_offset_lbas ( dev ) ;
2017-05-24 02:48:27 +03:00
put_unaligned_be16 ( lalba , & buf [ 14 ] ) ;
2013-11-11 20:59:17 +04:00
}
2012-05-20 19:59:15 +04:00
/*
* Set Thin Provisioning Enable bit following sbc3r22 in section
* READ CAPACITY ( 16 ) byte 14 if emulate_tpu or emulate_tpws is enabled .
*/
2015-11-30 01:44:57 +03:00
if ( dev - > dev_attrib . emulate_tpu | | dev - > dev_attrib . emulate_tpws ) {
2013-11-11 20:59:17 +04:00
buf [ 14 ] | = 0x80 ;
2012-05-20 19:59:15 +04:00
2015-11-30 01:44:57 +03:00
/*
* LBPRZ signifies that zeroes will be read back from an LBA after
* an UNMAP or WRITE SAME w / unmap bit ( sbc3r36 5.16 .2 )
*/
if ( dev - > dev_attrib . unmap_zeroes_data )
buf [ 14 ] | = 0x40 ;
}
2012-09-07 19:30:40 +04:00
rbuf = transport_kmap_data_sg ( cmd ) ;
2013-01-30 02:18:00 +04:00
if ( rbuf ) {
memcpy ( rbuf , buf , min_t ( u32 , sizeof ( buf ) , cmd - > data_length ) ) ;
transport_kunmap_data_sg ( cmd ) ;
}
2012-05-20 19:59:15 +04:00
2014-06-10 22:07:47 +04:00
target_complete_cmd_with_length ( cmd , GOOD , 32 ) ;
2012-05-20 19:59:15 +04:00
return 0 ;
}
2015-07-24 01:27:46 +03:00
static sense_reason_t
sbc_emulate_startstop ( struct se_cmd * cmd )
{
unsigned char * cdb = cmd - > t_task_cdb ;
/*
* See sbc3r36 section 5.25
* Immediate bit should be set since there is nothing to complete
* POWER CONDITION MODIFIER 0 h
*/
if ( ! ( cdb [ 1 ] & 1 ) | | cdb [ 2 ] | | cdb [ 3 ] )
return TCM_INVALID_CDB_FIELD ;
/*
* See sbc3r36 section 5.25
* POWER CONDITION 0 h START_VALID - process START and LOEJ
*/
if ( cdb [ 4 ] > > 4 & 0xf )
return TCM_INVALID_CDB_FIELD ;
/*
* See sbc3r36 section 5.25
* LOEJ 0 h - nothing to load or unload
* START 1 h - we are ready
*/
if ( ! ( cdb [ 4 ] & 1 ) | | ( cdb [ 4 ] & 2 ) | | ( cdb [ 4 ] & 4 ) )
return TCM_INVALID_CDB_FIELD ;
target_complete_cmd ( cmd , SAM_STAT_GOOD ) ;
return 0 ;
}
2013-02-22 21:52:57 +04:00
sector_t sbc_get_write_same_sectors ( struct se_cmd * cmd )
2012-05-20 19:59:15 +04:00
{
u32 num_blocks ;
if ( cmd - > t_task_cdb [ 0 ] = = WRITE_SAME )
num_blocks = get_unaligned_be16 ( & cmd - > t_task_cdb [ 7 ] ) ;
else if ( cmd - > t_task_cdb [ 0 ] = = WRITE_SAME_16 )
num_blocks = get_unaligned_be32 ( & cmd - > t_task_cdb [ 10 ] ) ;
else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
num_blocks = get_unaligned_be32 ( & cmd - > t_task_cdb [ 28 ] ) ;
/*
* Use the explicit range when non zero is supplied , otherwise calculate
* the remaining range based on - > get_blocks ( ) - starting LBA .
*/
2012-06-18 02:40:54 +04:00
if ( num_blocks )
return num_blocks ;
2012-05-20 19:59:15 +04:00
2012-06-18 02:40:54 +04:00
return cmd - > se_dev - > transport - > get_blocks ( cmd - > se_dev ) -
cmd - > t_task_lba + 1 ;
2012-05-20 19:59:15 +04:00
}
2013-02-22 21:52:57 +04:00
EXPORT_SYMBOL ( sbc_get_write_same_sectors ) ;
2012-05-20 19:59:15 +04:00
2015-06-19 16:11:00 +03:00
static sense_reason_t
sbc_execute_write_same_unmap ( struct se_cmd * cmd )
{
struct sbc_ops * ops = cmd - > protocol_data ;
sector_t nolb = sbc_get_write_same_sectors ( cmd ) ;
sense_reason_t ret ;
if ( nolb ) {
ret = ops - > execute_unmap ( cmd , cmd - > t_task_lba , nolb ) ;
if ( ret )
return ret ;
}
target_complete_cmd ( cmd , GOOD ) ;
return 0 ;
}
2012-11-07 00:24:09 +04:00
static sense_reason_t
2012-11-07 08:59:41 +04:00
sbc_emulate_noop ( struct se_cmd * cmd )
2012-10-24 17:53:58 +04:00
{
target_complete_cmd ( cmd , GOOD ) ;
return 0 ;
}
2012-05-20 19:59:14 +04:00
static inline u32 sbc_get_size ( struct se_cmd * cmd , u32 sectors )
{
2012-10-08 08:03:19 +04:00
return cmd - > se_dev - > dev_attrib . block_size * sectors ;
2012-05-20 19:59:14 +04:00
}
static inline u32 transport_get_sectors_6 ( unsigned char * cdb )
{
/*
* Use 8 - bit sector value . SBC - 3 says :
*
* A TRANSFER LENGTH field set to zero specifies that 256
* logical blocks shall be written . Any other value
* specifies the number of logical blocks that shall be
* written .
*/
return cdb [ 4 ] ? : 256 ;
}
static inline u32 transport_get_sectors_10 ( unsigned char * cdb )
{
2017-05-24 02:48:27 +03:00
return get_unaligned_be16 ( & cdb [ 7 ] ) ;
2012-05-20 19:59:14 +04:00
}
static inline u32 transport_get_sectors_12 ( unsigned char * cdb )
{
2017-05-24 02:48:27 +03:00
return get_unaligned_be32 ( & cdb [ 6 ] ) ;
2012-05-20 19:59:14 +04:00
}
static inline u32 transport_get_sectors_16 ( unsigned char * cdb )
{
2017-05-24 02:48:27 +03:00
return get_unaligned_be32 ( & cdb [ 10 ] ) ;
2012-05-20 19:59:14 +04:00
}
/*
* Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
*/
static inline u32 transport_get_sectors_32 ( unsigned char * cdb )
{
2017-05-24 02:48:27 +03:00
return get_unaligned_be32 ( & cdb [ 28 ] ) ;
2012-05-20 19:59:14 +04:00
}
static inline u32 transport_lba_21 ( unsigned char * cdb )
{
2017-05-24 02:48:27 +03:00
return get_unaligned_be24 ( & cdb [ 1 ] ) & 0x1fffff ;
2012-05-20 19:59:14 +04:00
}
static inline u32 transport_lba_32 ( unsigned char * cdb )
{
2017-05-24 02:48:27 +03:00
return get_unaligned_be32 ( & cdb [ 2 ] ) ;
2012-05-20 19:59:14 +04:00
}
static inline unsigned long long transport_lba_64 ( unsigned char * cdb )
{
2017-05-24 02:48:27 +03:00
return get_unaligned_be64 ( & cdb [ 2 ] ) ;
2012-05-20 19:59:14 +04:00
}
/*
* For VARIABLE_LENGTH_CDB w / 32 byte extended CDBs
*/
static inline unsigned long long transport_lba_64_ext ( unsigned char * cdb )
{
2017-05-24 02:48:27 +03:00
return get_unaligned_be64 ( & cdb [ 12 ] ) ;
2012-05-20 19:59:14 +04:00
}
2012-11-08 08:01:10 +04:00
static sense_reason_t
sbc_setup_write_same ( struct se_cmd * cmd , unsigned char * flags , struct sbc_ops * ops )
2012-05-20 19:59:14 +04:00
{
2015-02-14 01:09:47 +03:00
struct se_device * dev = cmd - > se_dev ;
sector_t end_lba = dev - > transport - > get_blocks ( dev ) + 1 ;
2013-02-22 21:52:57 +04:00
unsigned int sectors = sbc_get_write_same_sectors ( cmd ) ;
2015-02-14 04:32:11 +03:00
sense_reason_t ret ;
2012-11-15 23:02:49 +04:00
2012-05-20 19:59:14 +04:00
if ( ( flags [ 0 ] & 0x04 ) | | ( flags [ 0 ] & 0x02 ) ) {
pr_err ( " WRITE_SAME PBDATA and LBDATA "
" bits not supported for Block Discard "
" Emulation \n " ) ;
2012-11-08 08:01:10 +04:00
return TCM_UNSUPPORTED_SCSI_OPCODE ;
2012-05-20 19:59:14 +04:00
}
2012-11-15 23:02:49 +04:00
if ( sectors > cmd - > se_dev - > dev_attrib . max_write_same_len ) {
pr_warn ( " WRITE_SAME sectors: %u exceeds max_write_same_len: %u \n " ,
sectors , cmd - > se_dev - > dev_attrib . max_write_same_len ) ;
return TCM_INVALID_CDB_FIELD ;
}
2015-02-14 01:09:47 +03:00
/*
* Sanity check for LBA wrap and request past end of device .
*/
if ( ( ( cmd - > t_task_lba + sectors ) < cmd - > t_task_lba ) | |
( ( cmd - > t_task_lba + sectors ) > end_lba ) ) {
pr_err ( " WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u) \n " ,
( unsigned long long ) end_lba , cmd - > t_task_lba , sectors ) ;
return TCM_ADDRESS_OUT_OF_RANGE ;
}
2013-10-15 02:49:23 +04:00
/* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
if ( flags [ 0 ] & 0x10 ) {
pr_warn ( " WRITE SAME with ANCHOR not supported \n " ) ;
return TCM_INVALID_CDB_FIELD ;
}
2012-05-20 19:59:14 +04:00
/*
2012-11-08 08:01:10 +04:00
* Special case for WRITE_SAME w / UNMAP = 1 that ends up getting
* translated into block discard requests within backend code .
2012-05-20 19:59:14 +04:00
*/
2012-11-08 08:01:10 +04:00
if ( flags [ 0 ] & 0x08 ) {
2015-06-19 16:11:00 +03:00
if ( ! ops - > execute_unmap )
2012-11-08 08:01:10 +04:00
return TCM_UNSUPPORTED_SCSI_OPCODE ;
2015-02-14 04:56:19 +03:00
if ( ! dev - > dev_attrib . emulate_tpws ) {
pr_err ( " Got WRITE_SAME w/ UNMAP=1, but backend device "
" has emulate_tpws disabled \n " ) ;
return TCM_UNSUPPORTED_SCSI_OPCODE ;
}
2015-06-19 16:11:00 +03:00
cmd - > execute_cmd = sbc_execute_write_same_unmap ;
2012-11-08 08:01:10 +04:00
return 0 ;
2012-05-20 19:59:14 +04:00
}
2012-11-08 08:01:10 +04:00
if ( ! ops - > execute_write_same )
return TCM_UNSUPPORTED_SCSI_OPCODE ;
2012-05-20 19:59:14 +04:00
2015-02-14 04:32:11 +03:00
ret = sbc_check_prot ( dev , cmd , & cmd - > t_task_cdb [ 0 ] , sectors , true ) ;
if ( ret )
return ret ;
2012-11-08 08:01:10 +04:00
cmd - > execute_cmd = ops - > execute_write_same ;
2012-05-20 19:59:14 +04:00
return 0 ;
}
2015-11-06 10:37:59 +03:00
static sense_reason_t xdreadwrite_callback ( struct se_cmd * cmd , bool success ,
int * post_ret )
2012-05-20 19:59:14 +04:00
{
unsigned char * buf , * addr ;
struct scatterlist * sg ;
unsigned int offset ;
2013-08-20 01:34:17 +04:00
sense_reason_t ret = TCM_NO_SENSE ;
int i , count ;
2012-05-20 19:59:14 +04:00
/*
* From sbc3r22 . pdf section 5.48 XDWRITEREAD ( 10 ) command
*
* 1 ) read the specified logical block ( s ) ;
* 2 ) transfer logical blocks from the data - out buffer ;
* 3 ) XOR the logical blocks transferred from the data - out buffer with
* the logical blocks read , storing the resulting XOR data in a buffer ;
* 4 ) if the DISABLE WRITE bit is set to zero , then write the logical
* blocks transferred from the data - out buffer ; and
* 5 ) transfer the resulting XOR data to the data - in buffer .
*/
buf = kmalloc ( cmd - > data_length , GFP_KERNEL ) ;
if ( ! buf ) {
pr_err ( " Unable to allocate xor_callback buf \n " ) ;
2013-08-20 01:34:17 +04:00
return TCM_OUT_OF_RESOURCES ;
2012-05-20 19:59:14 +04:00
}
/*
* Copy the scatterlist WRITE buffer located at cmd - > t_data_sg
* into the locally allocated * buf
*/
sg_copy_to_buffer ( cmd - > t_data_sg ,
cmd - > t_data_nents ,
buf ,
cmd - > data_length ) ;
/*
* Now perform the XOR against the BIDI read memory located at
* cmd - > t_mem_bidi_list
*/
offset = 0 ;
for_each_sg ( cmd - > t_bidi_data_sg , sg , cmd - > t_bidi_data_nents , count ) {
addr = kmap_atomic ( sg_page ( sg ) ) ;
2013-08-20 01:34:17 +04:00
if ( ! addr ) {
ret = TCM_OUT_OF_RESOURCES ;
2012-05-20 19:59:14 +04:00
goto out ;
2013-08-20 01:34:17 +04:00
}
2012-05-20 19:59:14 +04:00
for ( i = 0 ; i < sg - > length ; i + + )
* ( addr + sg - > offset + i ) ^ = * ( buf + offset + i ) ;
offset + = sg - > length ;
kunmap_atomic ( addr ) ;
}
out :
kfree ( buf ) ;
2013-08-20 01:34:17 +04:00
return ret ;
2012-05-20 19:59:14 +04:00
}
2013-08-20 10:57:30 +04:00
static sense_reason_t
sbc_execute_rw ( struct se_cmd * cmd )
{
2015-06-19 16:10:58 +03:00
struct sbc_ops * ops = cmd - > protocol_data ;
return ops - > execute_rw ( cmd , cmd - > t_data_sg , cmd - > t_data_nents ,
2013-08-20 10:57:30 +04:00
cmd - > data_direction ) ;
}
2015-11-06 10:37:59 +03:00
static sense_reason_t compare_and_write_post ( struct se_cmd * cmd , bool success ,
int * post_ret )
2013-08-20 02:20:28 +04:00
{
struct se_device * dev = cmd - > se_dev ;
2017-02-07 01:28:09 +03:00
sense_reason_t ret = TCM_NO_SENSE ;
2013-08-20 02:20:28 +04:00
2013-10-02 03:53:10 +04:00
/*
* Only set SCF_COMPARE_AND_WRITE_POST to force a response fall - through
* within target_complete_ok_work ( ) if the command was successfully
* sent to the backend driver .
*/
spin_lock_irq ( & cmd - > t_state_lock ) ;
2017-02-07 01:28:09 +03:00
if ( cmd - > transport_state & CMD_T_SENT ) {
2013-10-02 03:53:10 +04:00
cmd - > se_cmd_flags | = SCF_COMPARE_AND_WRITE_POST ;
2015-11-06 10:37:59 +03:00
* post_ret = 1 ;
2017-02-07 01:28:09 +03:00
if ( cmd - > scsi_status = = SAM_STAT_CHECK_CONDITION )
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE ;
2015-11-06 10:37:59 +03:00
}
2013-10-02 03:53:10 +04:00
spin_unlock_irq ( & cmd - > t_state_lock ) ;
2013-08-20 02:20:28 +04:00
/*
* Unlock - > caw_sem originally obtained during sbc_compare_and_write ( )
* before the original READ I / O submission .
*/
up ( & dev - > caw_sem ) ;
2017-02-07 01:28:09 +03:00
return ret ;
2013-08-20 02:20:28 +04:00
}
2015-11-06 10:37:59 +03:00
static sense_reason_t compare_and_write_callback ( struct se_cmd * cmd , bool success ,
int * post_ret )
2013-08-20 02:20:28 +04:00
{
struct se_device * dev = cmd - > se_dev ;
struct scatterlist * write_sg = NULL , * sg ;
2013-10-02 04:04:40 +04:00
unsigned char * buf = NULL , * addr ;
2013-08-20 02:20:28 +04:00
struct sg_mapping_iter m ;
unsigned int offset = 0 , len ;
unsigned int nlbas = cmd - > t_task_nolb ;
unsigned int block_size = dev - > dev_attrib . block_size ;
unsigned int compare_len = ( nlbas * block_size ) ;
sense_reason_t ret = TCM_NO_SENSE ;
int rc , i ;
2013-08-22 06:34:43 +04:00
/*
* Handle early failure in transport_generic_request_failure ( ) ,
2015-04-08 00:53:27 +03:00
* which will not have taken - > caw_sem yet . .
2013-08-22 06:34:43 +04:00
*/
2015-04-08 00:53:27 +03:00
if ( ! success & & ( ! cmd - > t_data_sg | | ! cmd - > t_bidi_data_sg ) )
2013-08-22 06:34:43 +04:00
return TCM_NO_SENSE ;
2015-04-08 00:53:27 +03:00
/*
* Handle special case for zero - length COMPARE_AND_WRITE
*/
if ( ! cmd - > data_length )
goto out ;
2013-10-02 04:04:40 +04:00
/*
* Immediately exit + release dev - > caw_sem if command has already
* been failed with a non - zero SCSI status .
*/
if ( cmd - > scsi_status ) {
2017-04-12 02:24:16 +03:00
pr_debug ( " compare_and_write_callback: non zero scsi_status: "
2013-10-02 04:04:40 +04:00
" 0x%02x \n " , cmd - > scsi_status ) ;
2017-04-12 02:24:16 +03:00
* post_ret = 1 ;
if ( cmd - > scsi_status = = SAM_STAT_CHECK_CONDITION )
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE ;
2013-10-02 04:04:40 +04:00
goto out ;
}
2013-08-22 06:34:43 +04:00
2013-08-20 02:20:28 +04:00
buf = kzalloc ( cmd - > data_length , GFP_KERNEL ) ;
if ( ! buf ) {
pr_err ( " Unable to allocate compare_and_write buf \n " ) ;
2013-08-22 05:10:04 +04:00
ret = TCM_OUT_OF_RESOURCES ;
goto out ;
2013-08-20 02:20:28 +04:00
}
2017-04-09 21:25:11 +03:00
write_sg = kmalloc_array ( cmd - > t_data_nents , sizeof ( * write_sg ) ,
GFP_KERNEL ) ;
2013-08-20 02:20:28 +04:00
if ( ! write_sg ) {
pr_err ( " Unable to allocate compare_and_write sg \n " ) ;
ret = TCM_OUT_OF_RESOURCES ;
goto out ;
}
2014-04-01 18:03:02 +04:00
sg_init_table ( write_sg , cmd - > t_data_nents ) ;
2013-08-20 02:20:28 +04:00
/*
* Setup verify and write data payloads from total NumberLBAs .
*/
rc = sg_copy_to_buffer ( cmd - > t_data_sg , cmd - > t_data_nents , buf ,
cmd - > data_length ) ;
if ( ! rc ) {
pr_err ( " sg_copy_to_buffer() failed for compare_and_write \n " ) ;
ret = TCM_OUT_OF_RESOURCES ;
goto out ;
}
/*
* Compare against SCSI READ payload against verify payload
*/
for_each_sg ( cmd - > t_bidi_data_sg , sg , cmd - > t_bidi_data_nents , i ) {
addr = ( unsigned char * ) kmap_atomic ( sg_page ( sg ) ) ;
if ( ! addr ) {
ret = TCM_OUT_OF_RESOURCES ;
goto out ;
}
len = min ( sg - > length , compare_len ) ;
if ( memcmp ( addr , buf + offset , len ) ) {
pr_warn ( " Detected MISCOMPARE for addr: %p buf: %p \n " ,
addr , buf + offset ) ;
kunmap_atomic ( addr ) ;
goto miscompare ;
}
kunmap_atomic ( addr ) ;
offset + = len ;
compare_len - = len ;
if ( ! compare_len )
break ;
}
i = 0 ;
len = cmd - > t_task_nolb * block_size ;
sg_miter_start ( & m , cmd - > t_data_sg , cmd - > t_data_nents , SG_MITER_TO_SG ) ;
/*
* Currently assumes NoLB = 1 and SGLs are PAGE_SIZE . .
*/
while ( len ) {
sg_miter_next ( & m ) ;
if ( block_size < PAGE_SIZE ) {
sg_set_page ( & write_sg [ i ] , m . page , block_size ,
2015-11-23 19:46:32 +03:00
m . piter . sg - > offset + block_size ) ;
2013-08-20 02:20:28 +04:00
} else {
sg_miter_next ( & m ) ;
sg_set_page ( & write_sg [ i ] , m . page , block_size ,
2015-11-23 19:46:32 +03:00
m . piter . sg - > offset ) ;
2013-08-20 02:20:28 +04:00
}
len - = block_size ;
i + + ;
}
sg_miter_stop ( & m ) ;
/*
* Save the original SGL + nents values before updating to new
* assignments , to be released in transport_free_pages ( ) - >
* transport_reset_sgl_orig ( )
*/
cmd - > t_data_sg_orig = cmd - > t_data_sg ;
cmd - > t_data_sg = write_sg ;
cmd - > t_data_nents_orig = cmd - > t_data_nents ;
cmd - > t_data_nents = 1 ;
2014-11-24 18:07:25 +03:00
cmd - > sam_task_attr = TCM_HEAD_TAG ;
2013-08-20 02:20:28 +04:00
cmd - > transport_complete_callback = compare_and_write_post ;
/*
* Now reset - > execute_cmd ( ) to the normal sbc_execute_rw ( ) handler
* for submitting the adjusted SGL to write instance user - data .
*/
cmd - > execute_cmd = sbc_execute_rw ;
spin_lock_irq ( & cmd - > t_state_lock ) ;
cmd - > t_state = TRANSPORT_PROCESSING ;
2017-01-03 12:44:11 +03:00
cmd - > transport_state | = CMD_T_ACTIVE | CMD_T_SENT ;
2013-08-20 02:20:28 +04:00
spin_unlock_irq ( & cmd - > t_state_lock ) ;
2016-05-18 08:19:10 +03:00
__target_execute_cmd ( cmd , false ) ;
2013-08-20 02:20:28 +04:00
kfree ( buf ) ;
return ret ;
miscompare :
pr_warn ( " Target/%s: Send MISCOMPARE check condition and sense \n " ,
dev - > transport - > name ) ;
ret = TCM_MISCOMPARE_VERIFY ;
out :
/*
* In the MISCOMPARE or failure case , unlock - > caw_sem obtained in
* sbc_compare_and_write ( ) before the original READ I / O submission .
*/
up ( & dev - > caw_sem ) ;
kfree ( write_sg ) ;
kfree ( buf ) ;
return ret ;
}
static sense_reason_t
sbc_compare_and_write ( struct se_cmd * cmd )
{
2015-06-19 16:10:58 +03:00
struct sbc_ops * ops = cmd - > protocol_data ;
2013-08-20 02:20:28 +04:00
struct se_device * dev = cmd - > se_dev ;
sense_reason_t ret ;
int rc ;
/*
* Submit the READ first for COMPARE_AND_WRITE to perform the
* comparision using SGLs at cmd - > t_bidi_data_sg . .
*/
rc = down_interruptible ( & dev - > caw_sem ) ;
2015-05-20 01:10:44 +03:00
if ( rc ! = 0 ) {
2013-08-20 02:20:28 +04:00
cmd - > transport_complete_callback = NULL ;
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE ;
}
2013-10-02 03:46:37 +04:00
/*
* Reset cmd - > data_length to individual block_size in order to not
* confuse backend drivers that depend on this value matching the
* size of the I / O being submitted .
*/
cmd - > data_length = cmd - > t_task_nolb * dev - > dev_attrib . block_size ;
2013-08-20 02:20:28 +04:00
2015-06-19 16:10:58 +03:00
ret = ops - > execute_rw ( cmd , cmd - > t_bidi_data_sg , cmd - > t_bidi_data_nents ,
2013-08-20 02:20:28 +04:00
DMA_FROM_DEVICE ) ;
if ( ret ) {
cmd - > transport_complete_callback = NULL ;
up ( & dev - > caw_sem ) ;
return ret ;
}
/*
* Unlock of dev - > caw_sem to occur in compare_and_write_callback ( )
* upon MISCOMPARE , or in compare_and_write_done ( ) upon completion
* of WRITE instance user - data .
*/
return TCM_NO_SENSE ;
}
2014-02-19 19:50:15 +04:00
static int
2015-02-28 09:05:21 +03:00
sbc_set_prot_op_checks ( u8 protect , bool fabric_prot , enum target_prot_type prot_type ,
2014-02-19 19:50:15 +04:00
bool is_write , struct se_cmd * cmd )
{
if ( is_write ) {
2015-02-28 09:05:21 +03:00
cmd - > prot_op = fabric_prot ? TARGET_PROT_DOUT_STRIP :
protect ? TARGET_PROT_DOUT_PASS :
TARGET_PROT_DOUT_INSERT ;
2014-02-19 19:50:15 +04:00
switch ( protect ) {
case 0x0 :
case 0x3 :
cmd - > prot_checks = 0 ;
break ;
case 0x1 :
case 0x5 :
cmd - > prot_checks = TARGET_DIF_CHECK_GUARD ;
if ( prot_type = = TARGET_DIF_TYPE1_PROT )
cmd - > prot_checks | = TARGET_DIF_CHECK_REFTAG ;
break ;
case 0x2 :
if ( prot_type = = TARGET_DIF_TYPE1_PROT )
cmd - > prot_checks = TARGET_DIF_CHECK_REFTAG ;
break ;
case 0x4 :
cmd - > prot_checks = TARGET_DIF_CHECK_GUARD ;
break ;
default :
pr_err ( " Unsupported protect field %d \n " , protect ) ;
return - EINVAL ;
}
} else {
2015-02-28 09:05:21 +03:00
cmd - > prot_op = fabric_prot ? TARGET_PROT_DIN_INSERT :
protect ? TARGET_PROT_DIN_PASS :
TARGET_PROT_DIN_STRIP ;
2014-02-19 19:50:15 +04:00
switch ( protect ) {
case 0x0 :
case 0x1 :
case 0x5 :
cmd - > prot_checks = TARGET_DIF_CHECK_GUARD ;
if ( prot_type = = TARGET_DIF_TYPE1_PROT )
cmd - > prot_checks | = TARGET_DIF_CHECK_REFTAG ;
break ;
case 0x2 :
if ( prot_type = = TARGET_DIF_TYPE1_PROT )
cmd - > prot_checks = TARGET_DIF_CHECK_REFTAG ;
break ;
case 0x3 :
cmd - > prot_checks = 0 ;
break ;
case 0x4 :
cmd - > prot_checks = TARGET_DIF_CHECK_GUARD ;
break ;
default :
pr_err ( " Unsupported protect field %d \n " , protect ) ;
return - EINVAL ;
}
}
return 0 ;
}
2015-02-14 01:49:38 +03:00
static sense_reason_t
2013-12-24 00:30:03 +04:00
sbc_check_prot ( struct se_device * dev , struct se_cmd * cmd , unsigned char * cdb ,
2014-02-19 19:50:15 +04:00
u32 sectors , bool is_write )
2013-12-24 00:30:03 +04:00
{
2014-02-19 19:50:15 +04:00
u8 protect = cdb [ 1 ] > > 5 ;
2015-02-28 09:05:21 +03:00
int sp_ops = cmd - > se_sess - > sup_prot_ops ;
int pi_prot_type = dev - > dev_attrib . pi_prot_type ;
bool fabric_prot = false ;
2014-02-19 19:50:15 +04:00
2015-02-14 01:49:38 +03:00
if ( ! cmd - > t_prot_sg | | ! cmd - > t_prot_nents ) {
2015-02-28 09:05:21 +03:00
if ( unlikely ( protect & &
! dev - > dev_attrib . pi_prot_type & & ! cmd - > se_sess - > sess_prot_type ) ) {
pr_err ( " CDB contains protect bit, but device + fabric does "
" not advertise PROTECT=1 feature bit \n " ) ;
2015-02-14 01:49:38 +03:00
return TCM_INVALID_CDB_FIELD ;
}
if ( cmd - > prot_pto )
return TCM_NO_SENSE ;
}
2013-12-24 00:30:03 +04:00
switch ( dev - > dev_attrib . pi_prot_type ) {
case TARGET_DIF_TYPE3_PROT :
cmd - > reftag_seed = 0xffffffff ;
break ;
case TARGET_DIF_TYPE2_PROT :
2014-02-19 19:50:15 +04:00
if ( protect )
2015-02-14 01:49:38 +03:00
return TCM_INVALID_CDB_FIELD ;
2013-12-24 00:30:03 +04:00
cmd - > reftag_seed = cmd - > t_task_lba ;
break ;
case TARGET_DIF_TYPE1_PROT :
cmd - > reftag_seed = cmd - > t_task_lba ;
break ;
case TARGET_DIF_TYPE0_PROT :
2015-02-28 09:05:21 +03:00
/*
* See if the fabric supports T10 - PI , and the session has been
* configured to allow export PROTECT = 1 feature bit with backend
* devices that don ' t support T10 - PI .
*/
fabric_prot = is_write ?
! ! ( sp_ops & ( TARGET_PROT_DOUT_PASS | TARGET_PROT_DOUT_STRIP ) ) :
! ! ( sp_ops & ( TARGET_PROT_DIN_PASS | TARGET_PROT_DIN_INSERT ) ) ;
if ( fabric_prot & & cmd - > se_sess - > sess_prot_type ) {
pi_prot_type = cmd - > se_sess - > sess_prot_type ;
break ;
}
2015-04-14 21:55:01 +03:00
if ( ! protect )
return TCM_NO_SENSE ;
2015-02-28 09:05:21 +03:00
/* Fallthrough */
2013-12-24 00:30:03 +04:00
default :
2015-04-14 21:55:01 +03:00
pr_err ( " Unable to determine pi_prot_type for CDB: 0x%02x "
" PROTECT: 0x%02x \n " , cdb [ 0 ] , protect ) ;
return TCM_INVALID_CDB_FIELD ;
2013-12-24 00:30:03 +04:00
}
2015-02-28 09:05:21 +03:00
if ( sbc_set_prot_op_checks ( protect , fabric_prot , pi_prot_type , is_write , cmd ) )
2015-02-14 01:49:38 +03:00
return TCM_INVALID_CDB_FIELD ;
2014-02-19 19:50:15 +04:00
2015-02-28 09:05:21 +03:00
cmd - > prot_type = pi_prot_type ;
2013-12-24 00:30:03 +04:00
cmd - > prot_length = dev - > prot_length * sectors ;
2014-06-11 13:09:59 +04:00
/**
* In case protection information exists over the wire
* we modify command data length to describe pure data .
* The actual transfer length is data length + protection
* length
* */
if ( protect )
cmd - > data_length = sectors * dev - > dev_attrib . block_size ;
pr_debug ( " %s: prot_type=%d, data_length=%d, prot_length=%d "
" prot_op=%d prot_checks=%d \n " ,
__func__ , cmd - > prot_type , cmd - > data_length , cmd - > prot_length ,
2014-02-19 19:50:21 +04:00
cmd - > prot_op , cmd - > prot_checks ) ;
2013-12-24 00:30:03 +04:00
2015-02-14 01:49:38 +03:00
return TCM_NO_SENSE ;
2013-12-24 00:30:03 +04:00
}
2015-02-14 02:28:27 +03:00
static int
sbc_check_dpofua ( struct se_device * dev , struct se_cmd * cmd , unsigned char * cdb )
{
if ( cdb [ 1 ] & 0x10 ) {
2015-04-20 16:00:30 +03:00
/* see explanation in spc_emulate_modesense */
if ( ! target_check_fua ( dev ) ) {
2015-02-14 02:28:27 +03:00
pr_err ( " Got CDB: 0x%02x with DPO bit set, but device "
" does not advertise support for DPO \n " , cdb [ 0 ] ) ;
return - EINVAL ;
}
}
if ( cdb [ 1 ] & 0x8 ) {
2015-04-20 16:00:30 +03:00
if ( ! target_check_fua ( dev ) ) {
2015-02-14 02:28:27 +03:00
pr_err ( " Got CDB: 0x%02x with FUA bit set, but device "
" does not advertise support for FUA write \n " ,
cdb [ 0 ] ) ;
return - EINVAL ;
}
cmd - > se_cmd_flags | = SCF_FUA ;
}
return 0 ;
2013-12-24 00:30:03 +04:00
}
2012-11-07 00:24:09 +04:00
sense_reason_t
sbc_parse_cdb ( struct se_cmd * cmd , struct sbc_ops * ops )
2012-05-20 19:59:14 +04:00
{
struct se_device * dev = cmd - > se_dev ;
unsigned char * cdb = cmd - > t_task_cdb ;
2012-05-20 19:59:15 +04:00
unsigned int size ;
2012-05-20 19:59:14 +04:00
u32 sectors = 0 ;
2012-11-07 00:24:09 +04:00
sense_reason_t ret ;
2012-05-20 19:59:14 +04:00
2015-06-19 16:10:58 +03:00
cmd - > protocol_data = ops ;
2012-05-20 19:59:14 +04:00
switch ( cdb [ 0 ] ) {
case READ_6 :
sectors = transport_get_sectors_6 ( cdb ) ;
cmd - > t_task_lba = transport_lba_21 ( cdb ) ;
cmd - > se_cmd_flags | = SCF_SCSI_DATA_CDB ;
2013-08-20 10:57:30 +04:00
cmd - > execute_cmd = sbc_execute_rw ;
2012-05-20 19:59:14 +04:00
break ;
case READ_10 :
sectors = transport_get_sectors_10 ( cdb ) ;
cmd - > t_task_lba = transport_lba_32 ( cdb ) ;
2013-12-24 00:30:03 +04:00
2015-02-14 02:28:27 +03:00
if ( sbc_check_dpofua ( dev , cmd , cdb ) )
return TCM_INVALID_CDB_FIELD ;
2015-02-14 01:49:38 +03:00
ret = sbc_check_prot ( dev , cmd , cdb , sectors , false ) ;
if ( ret )
return ret ;
2013-12-24 00:30:03 +04:00
2012-05-20 19:59:14 +04:00
cmd - > se_cmd_flags | = SCF_SCSI_DATA_CDB ;
2013-08-20 10:57:30 +04:00
cmd - > execute_cmd = sbc_execute_rw ;
2012-05-20 19:59:14 +04:00
break ;
case READ_12 :
sectors = transport_get_sectors_12 ( cdb ) ;
cmd - > t_task_lba = transport_lba_32 ( cdb ) ;
2013-12-24 00:30:03 +04:00
2015-02-14 02:28:27 +03:00
if ( sbc_check_dpofua ( dev , cmd , cdb ) )
return TCM_INVALID_CDB_FIELD ;
2015-02-14 01:49:38 +03:00
ret = sbc_check_prot ( dev , cmd , cdb , sectors , false ) ;
if ( ret )
return ret ;
2013-12-24 00:30:03 +04:00
2012-05-20 19:59:14 +04:00
cmd - > se_cmd_flags | = SCF_SCSI_DATA_CDB ;
2013-08-20 10:57:30 +04:00
cmd - > execute_cmd = sbc_execute_rw ;
2012-05-20 19:59:14 +04:00
break ;
case READ_16 :
sectors = transport_get_sectors_16 ( cdb ) ;
cmd - > t_task_lba = transport_lba_64 ( cdb ) ;
2013-12-24 00:30:03 +04:00
2015-02-14 02:28:27 +03:00
if ( sbc_check_dpofua ( dev , cmd , cdb ) )
return TCM_INVALID_CDB_FIELD ;
2015-02-14 01:49:38 +03:00
ret = sbc_check_prot ( dev , cmd , cdb , sectors , false ) ;
if ( ret )
return ret ;
2013-12-24 00:30:03 +04:00
2012-05-20 19:59:14 +04:00
cmd - > se_cmd_flags | = SCF_SCSI_DATA_CDB ;
2013-08-20 10:57:30 +04:00
cmd - > execute_cmd = sbc_execute_rw ;
2012-05-20 19:59:14 +04:00
break ;
case WRITE_6 :
sectors = transport_get_sectors_6 ( cdb ) ;
cmd - > t_task_lba = transport_lba_21 ( cdb ) ;
cmd - > se_cmd_flags | = SCF_SCSI_DATA_CDB ;
2013-08-20 10:57:30 +04:00
cmd - > execute_cmd = sbc_execute_rw ;
2012-05-20 19:59:14 +04:00
break ;
case WRITE_10 :
2017-05-11 10:23:08 +03:00
case WRITE_VERIFY :
2012-05-20 19:59:14 +04:00
sectors = transport_get_sectors_10 ( cdb ) ;
cmd - > t_task_lba = transport_lba_32 ( cdb ) ;
2013-12-24 00:30:03 +04:00
2015-02-14 02:28:27 +03:00
if ( sbc_check_dpofua ( dev , cmd , cdb ) )
return TCM_INVALID_CDB_FIELD ;
2015-02-14 01:49:38 +03:00
ret = sbc_check_prot ( dev , cmd , cdb , sectors , true ) ;
if ( ret )
return ret ;
2013-12-24 00:30:03 +04:00
2012-05-20 19:59:14 +04:00
cmd - > se_cmd_flags | = SCF_SCSI_DATA_CDB ;
2013-08-20 10:57:30 +04:00
cmd - > execute_cmd = sbc_execute_rw ;
2012-05-20 19:59:14 +04:00
break ;
case WRITE_12 :
sectors = transport_get_sectors_12 ( cdb ) ;
cmd - > t_task_lba = transport_lba_32 ( cdb ) ;
2013-12-24 00:30:03 +04:00
2015-02-14 02:28:27 +03:00
if ( sbc_check_dpofua ( dev , cmd , cdb ) )
return TCM_INVALID_CDB_FIELD ;
2015-02-14 01:49:38 +03:00
ret = sbc_check_prot ( dev , cmd , cdb , sectors , true ) ;
if ( ret )
return ret ;
2013-12-24 00:30:03 +04:00
2012-05-20 19:59:14 +04:00
cmd - > se_cmd_flags | = SCF_SCSI_DATA_CDB ;
2013-08-20 10:57:30 +04:00
cmd - > execute_cmd = sbc_execute_rw ;
2012-05-20 19:59:14 +04:00
break ;
case WRITE_16 :
2017-05-11 10:23:08 +03:00
case WRITE_VERIFY_16 :
2012-05-20 19:59:14 +04:00
sectors = transport_get_sectors_16 ( cdb ) ;
cmd - > t_task_lba = transport_lba_64 ( cdb ) ;
2013-12-24 00:30:03 +04:00
2015-02-14 02:28:27 +03:00
if ( sbc_check_dpofua ( dev , cmd , cdb ) )
return TCM_INVALID_CDB_FIELD ;
2015-02-14 01:49:38 +03:00
ret = sbc_check_prot ( dev , cmd , cdb , sectors , true ) ;
if ( ret )
return ret ;
2013-12-24 00:30:03 +04:00
2012-05-20 19:59:14 +04:00
cmd - > se_cmd_flags | = SCF_SCSI_DATA_CDB ;
2013-08-20 10:57:30 +04:00
cmd - > execute_cmd = sbc_execute_rw ;
2012-05-20 19:59:14 +04:00
break ;
case XDWRITEREAD_10 :
2012-11-07 00:24:09 +04:00
if ( cmd - > data_direction ! = DMA_TO_DEVICE | |
2012-05-20 19:59:14 +04:00
! ( cmd - > se_cmd_flags & SCF_BIDI ) )
2012-11-07 00:24:09 +04:00
return TCM_INVALID_CDB_FIELD ;
2012-05-20 19:59:14 +04:00
sectors = transport_get_sectors_10 ( cdb ) ;
2015-02-14 02:28:27 +03:00
if ( sbc_check_dpofua ( dev , cmd , cdb ) )
return TCM_INVALID_CDB_FIELD ;
2012-05-20 19:59:14 +04:00
cmd - > t_task_lba = transport_lba_32 ( cdb ) ;
cmd - > se_cmd_flags | = SCF_SCSI_DATA_CDB ;
/*
* Setup BIDI XOR callback to be run after I / O completion .
*/
2013-08-20 10:57:30 +04:00
cmd - > execute_cmd = sbc_execute_rw ;
2012-05-20 19:59:14 +04:00
cmd - > transport_complete_callback = & xdreadwrite_callback ;
break ;
case VARIABLE_LENGTH_CMD :
{
u16 service_action = get_unaligned_be16 ( & cdb [ 8 ] ) ;
switch ( service_action ) {
case XDWRITEREAD_32 :
sectors = transport_get_sectors_32 ( cdb ) ;
2015-02-14 02:28:27 +03:00
if ( sbc_check_dpofua ( dev , cmd , cdb ) )
return TCM_INVALID_CDB_FIELD ;
2012-05-20 19:59:14 +04:00
/*
* Use WRITE_32 and READ_32 opcodes for the emulated
* XDWRITE_READ_32 logic .
*/
cmd - > t_task_lba = transport_lba_64_ext ( cdb ) ;
cmd - > se_cmd_flags | = SCF_SCSI_DATA_CDB ;
/*
* Setup BIDI XOR callback to be run during after I / O
* completion .
*/
2013-08-20 10:57:30 +04:00
cmd - > execute_cmd = sbc_execute_rw ;
2012-05-20 19:59:14 +04:00
cmd - > transport_complete_callback = & xdreadwrite_callback ;
break ;
case WRITE_SAME_32 :
sectors = transport_get_sectors_32 ( cdb ) ;
if ( ! sectors ) {
pr_err ( " WSNZ=1, WRITE_SAME w/sectors=0 not "
" supported \n " ) ;
2012-11-07 00:24:09 +04:00
return TCM_INVALID_CDB_FIELD ;
2012-05-20 19:59:14 +04:00
}
2012-05-20 19:59:15 +04:00
size = sbc_get_size ( cmd , 1 ) ;
2012-05-20 19:59:14 +04:00
cmd - > t_task_lba = get_unaligned_be64 ( & cdb [ 12 ] ) ;
2012-11-08 08:01:10 +04:00
ret = sbc_setup_write_same ( cmd , & cdb [ 10 ] , ops ) ;
2012-11-27 18:27:01 +04:00
if ( ret )
2012-11-08 08:01:10 +04:00
return ret ;
2012-05-20 19:59:14 +04:00
break ;
default :
pr_err ( " VARIABLE_LENGTH_CMD service action "
" 0x%04x not supported \n " , service_action ) ;
2012-11-07 00:24:09 +04:00
return TCM_UNSUPPORTED_SCSI_OPCODE ;
2012-05-20 19:59:14 +04:00
}
break ;
}
2013-08-20 02:20:28 +04:00
case COMPARE_AND_WRITE :
2017-06-02 06:45:09 +03:00
if ( ! dev - > dev_attrib . emulate_caw ) {
pr_err_ratelimited ( " se_device %s/%s (vpd_unit_serial %s) reject "
" COMPARE_AND_WRITE \n " , dev - > se_hba - > backend - > ops - > name ,
dev - > dev_group . cg_item . ci_name , dev - > t10_wwn . unit_serial ) ;
return TCM_UNSUPPORTED_SCSI_OPCODE ;
}
2013-08-20 02:20:28 +04:00
sectors = cdb [ 13 ] ;
/*
* Currently enforce COMPARE_AND_WRITE for a single sector
*/
if ( sectors > 1 ) {
pr_err ( " COMPARE_AND_WRITE contains NoLB: %u greater "
" than 1 \n " , sectors ) ;
return TCM_INVALID_CDB_FIELD ;
}
2015-07-23 23:33:21 +03:00
if ( sbc_check_dpofua ( dev , cmd , cdb ) )
return TCM_INVALID_CDB_FIELD ;
2013-08-20 02:20:28 +04:00
/*
* Double size because we have two buffers , note that
* zero is not an error . .
*/
size = 2 * sbc_get_size ( cmd , sectors ) ;
cmd - > t_task_lba = get_unaligned_be64 ( & cdb [ 2 ] ) ;
cmd - > t_task_nolb = sectors ;
cmd - > se_cmd_flags | = SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE ;
cmd - > execute_cmd = sbc_compare_and_write ;
cmd - > transport_complete_callback = compare_and_write_callback ;
break ;
2012-05-20 19:59:14 +04:00
case READ_CAPACITY :
2012-05-20 19:59:15 +04:00
size = READ_CAP_LEN ;
cmd - > execute_cmd = sbc_emulate_readcapacity ;
2012-05-20 19:59:14 +04:00
break ;
2014-11-17 16:25:19 +03:00
case SERVICE_ACTION_IN_16 :
2012-05-20 19:59:14 +04:00
switch ( cmd - > t_task_cdb [ 1 ] & 0x1f ) {
case SAI_READ_CAPACITY_16 :
2012-05-20 19:59:15 +04:00
cmd - > execute_cmd = sbc_emulate_readcapacity_16 ;
2012-05-20 19:59:14 +04:00
break ;
2013-12-17 12:18:49 +04:00
case SAI_REPORT_REFERRALS :
cmd - > execute_cmd = target_emulate_report_referrals ;
break ;
2012-05-20 19:59:14 +04:00
default :
pr_err ( " Unsupported SA: 0x%02x \n " ,
cmd - > t_task_cdb [ 1 ] & 0x1f ) ;
2012-11-07 00:24:09 +04:00
return TCM_INVALID_CDB_FIELD ;
2012-05-20 19:59:14 +04:00
}
2017-05-24 02:48:27 +03:00
size = get_unaligned_be32 ( & cdb [ 10 ] ) ;
2012-05-20 19:59:14 +04:00
break ;
case SYNCHRONIZE_CACHE :
case SYNCHRONIZE_CACHE_16 :
if ( cdb [ 0 ] = = SYNCHRONIZE_CACHE ) {
sectors = transport_get_sectors_10 ( cdb ) ;
cmd - > t_task_lba = transport_lba_32 ( cdb ) ;
} else {
sectors = transport_get_sectors_16 ( cdb ) ;
cmd - > t_task_lba = transport_lba_64 ( cdb ) ;
}
2014-06-10 19:53:21 +04:00
if ( ops - > execute_sync_cache ) {
cmd - > execute_cmd = ops - > execute_sync_cache ;
goto check_lba ;
2012-05-20 19:59:14 +04:00
}
2014-06-10 19:53:21 +04:00
size = 0 ;
cmd - > execute_cmd = sbc_emulate_noop ;
2012-05-20 19:59:14 +04:00
break ;
case UNMAP :
2012-06-18 02:40:55 +04:00
if ( ! ops - > execute_unmap )
2012-11-07 00:24:09 +04:00
return TCM_UNSUPPORTED_SCSI_OPCODE ;
2012-06-18 02:40:55 +04:00
2015-02-14 05:29:50 +03:00
if ( ! dev - > dev_attrib . emulate_tpu ) {
pr_err ( " Got UNMAP, but backend device has "
" emulate_tpu disabled \n " ) ;
return TCM_UNSUPPORTED_SCSI_OPCODE ;
}
2012-05-20 19:59:15 +04:00
size = get_unaligned_be16 ( & cdb [ 7 ] ) ;
2015-06-19 16:10:59 +03:00
cmd - > execute_cmd = sbc_execute_unmap ;
2012-05-20 19:59:14 +04:00
break ;
case WRITE_SAME_16 :
sectors = transport_get_sectors_16 ( cdb ) ;
if ( ! sectors ) {
pr_err ( " WSNZ=1, WRITE_SAME w/sectors=0 not supported \n " ) ;
2012-11-07 00:24:09 +04:00
return TCM_INVALID_CDB_FIELD ;
2012-05-20 19:59:14 +04:00
}
2012-05-20 19:59:15 +04:00
size = sbc_get_size ( cmd , 1 ) ;
2012-05-20 19:59:14 +04:00
cmd - > t_task_lba = get_unaligned_be64 ( & cdb [ 2 ] ) ;
2012-11-08 08:01:10 +04:00
ret = sbc_setup_write_same ( cmd , & cdb [ 1 ] , ops ) ;
2012-11-27 18:27:01 +04:00
if ( ret )
2012-11-08 08:01:10 +04:00
return ret ;
2012-05-20 19:59:14 +04:00
break ;
case WRITE_SAME :
sectors = transport_get_sectors_10 ( cdb ) ;
if ( ! sectors ) {
pr_err ( " WSNZ=1, WRITE_SAME w/sectors=0 not supported \n " ) ;
2012-11-07 00:24:09 +04:00
return TCM_INVALID_CDB_FIELD ;
2012-05-20 19:59:14 +04:00
}
2012-05-20 19:59:15 +04:00
size = sbc_get_size ( cmd , 1 ) ;
2012-05-20 19:59:14 +04:00
cmd - > t_task_lba = get_unaligned_be32 ( & cdb [ 2 ] ) ;
/*
* Follow sbcr26 with WRITE_SAME ( 10 ) and check for the existence
* of byte 1 bit 3 UNMAP instead of original reserved field
*/
2012-11-08 08:01:10 +04:00
ret = sbc_setup_write_same ( cmd , & cdb [ 1 ] , ops ) ;
2012-11-27 18:27:01 +04:00
if ( ret )
2012-11-08 08:01:10 +04:00
return ret ;
2012-05-20 19:59:14 +04:00
break ;
case VERIFY :
2017-03-08 09:09:56 +03:00
case VERIFY_16 :
2017-05-11 10:23:08 +03:00
size = 0 ;
if ( cdb [ 0 ] = = VERIFY ) {
sectors = transport_get_sectors_10 ( cdb ) ;
cmd - > t_task_lba = transport_lba_32 ( cdb ) ;
} else {
sectors = transport_get_sectors_16 ( cdb ) ;
cmd - > t_task_lba = transport_lba_64 ( cdb ) ;
}
2012-11-07 08:59:41 +04:00
cmd - > execute_cmd = sbc_emulate_noop ;
2014-06-10 19:53:22 +04:00
goto check_lba ;
2012-10-24 17:53:58 +04:00
case REZERO_UNIT :
case SEEK_6 :
case SEEK_10 :
/*
* There are still clients out there which use these old SCSI - 2
* commands . This mainly happens when running VMs with legacy
* guest systems , connected via SCSI command pass - through to
* iSCSI targets . Make them happy and return status GOOD .
*/
size = 0 ;
cmd - > execute_cmd = sbc_emulate_noop ;
break ;
2015-07-24 01:27:46 +03:00
case START_STOP :
size = 0 ;
cmd - > execute_cmd = sbc_emulate_startstop ;
break ;
2012-05-20 19:59:14 +04:00
default :
2012-05-20 19:59:15 +04:00
ret = spc_parse_cdb ( cmd , & size ) ;
2012-05-20 19:59:14 +04:00
if ( ret )
return ret ;
}
/* reject any command that we don't have a handler for */
2014-10-02 03:07:02 +04:00
if ( ! cmd - > execute_cmd )
2012-11-07 00:24:09 +04:00
return TCM_UNSUPPORTED_SCSI_OPCODE ;
2012-05-20 19:59:14 +04:00
if ( cmd - > se_cmd_flags & SCF_SCSI_DATA_CDB ) {
2012-05-20 19:59:15 +04:00
unsigned long long end_lba ;
2014-06-10 19:53:21 +04:00
check_lba :
2012-05-20 19:59:15 +04:00
end_lba = dev - > transport - > get_blocks ( dev ) + 1 ;
2015-02-14 01:27:40 +03:00
if ( ( ( cmd - > t_task_lba + sectors ) < cmd - > t_task_lba ) | |
( ( cmd - > t_task_lba + sectors ) > end_lba ) ) {
2012-05-20 19:59:15 +04:00
pr_err ( " cmd exceeds last lba %llu "
" (lba %llu, sectors %u) \n " ,
end_lba , cmd - > t_task_lba , sectors ) ;
2013-06-27 04:36:18 +04:00
return TCM_ADDRESS_OUT_OF_RANGE ;
2012-05-20 19:59:15 +04:00
}
2013-08-20 02:20:28 +04:00
if ( ! ( cmd - > se_cmd_flags & SCF_COMPARE_AND_WRITE ) )
size = sbc_get_size ( cmd , sectors ) ;
2012-05-20 19:59:14 +04:00
}
2012-11-07 00:24:09 +04:00
return target_cmd_size_check ( cmd , size ) ;
2012-05-20 19:59:14 +04:00
}
EXPORT_SYMBOL ( sbc_parse_cdb ) ;
2012-10-07 18:55:53 +04:00
u32 sbc_get_device_type ( struct se_device * dev )
{
return TYPE_DISK ;
}
EXPORT_SYMBOL ( sbc_get_device_type ) ;
2013-02-25 10:03:46 +04:00
2015-06-19 16:10:59 +03:00
static sense_reason_t
sbc_execute_unmap ( struct se_cmd * cmd )
2013-02-25 10:03:46 +04:00
{
2015-06-19 16:10:59 +03:00
struct sbc_ops * ops = cmd - > protocol_data ;
2013-02-25 10:03:46 +04:00
struct se_device * dev = cmd - > se_dev ;
unsigned char * buf , * ptr = NULL ;
sector_t lba ;
int size ;
u32 range ;
sense_reason_t ret = 0 ;
int dl , bd_dl ;
/* We never set ANC_SUP */
if ( cmd - > t_task_cdb [ 1 ] )
return TCM_INVALID_CDB_FIELD ;
if ( cmd - > data_length = = 0 ) {
target_complete_cmd ( cmd , SAM_STAT_GOOD ) ;
return 0 ;
}
if ( cmd - > data_length < 8 ) {
pr_warn ( " UNMAP parameter list length %u too small \n " ,
cmd - > data_length ) ;
return TCM_PARAMETER_LIST_LENGTH_ERROR ;
}
buf = transport_kmap_data_sg ( cmd ) ;
if ( ! buf )
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE ;
dl = get_unaligned_be16 ( & buf [ 0 ] ) ;
bd_dl = get_unaligned_be16 ( & buf [ 2 ] ) ;
size = cmd - > data_length - 8 ;
if ( bd_dl > size )
pr_warn ( " UNMAP parameter list length %u too small, ignoring bd_dl %u \n " ,
cmd - > data_length , bd_dl ) ;
else
size = bd_dl ;
if ( size / 16 > dev - > dev_attrib . max_unmap_block_desc_count ) {
ret = TCM_INVALID_PARAMETER_LIST ;
goto err ;
}
/* First UNMAP block descriptor starts at 8 byte offset */
ptr = & buf [ 8 ] ;
pr_debug ( " UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u "
" ptr: %p \n " , dev - > transport - > name , dl , bd_dl , size , ptr ) ;
while ( size > = 16 ) {
lba = get_unaligned_be64 ( & ptr [ 0 ] ) ;
range = get_unaligned_be32 ( & ptr [ 8 ] ) ;
pr_debug ( " UNMAP: Using lba: %llu and range: %u \n " ,
( unsigned long long ) lba , range ) ;
if ( range > dev - > dev_attrib . max_unmap_lba_count ) {
ret = TCM_INVALID_PARAMETER_LIST ;
goto err ;
}
if ( lba + range > dev - > transport - > get_blocks ( dev ) + 1 ) {
ret = TCM_ADDRESS_OUT_OF_RANGE ;
goto err ;
}
2017-12-14 00:55:13 +03:00
if ( range ) {
ret = ops - > execute_unmap ( cmd , lba , range ) ;
if ( ret )
goto err ;
}
2013-02-25 10:03:46 +04:00
ptr + = 16 ;
size - = 16 ;
}
err :
transport_kunmap_data_sg ( cmd ) ;
if ( ! ret )
target_complete_cmd ( cmd , GOOD ) ;
return ret ;
}
2014-01-08 02:44:57 +04:00
2014-04-03 01:19:09 +04:00
void
sbc_dif_generate ( struct se_cmd * cmd )
{
struct se_device * dev = cmd - > se_dev ;
2015-06-29 13:08:19 +03:00
struct t10_pi_tuple * sdt ;
2015-05-01 09:23:51 +03:00
struct scatterlist * dsg = cmd - > t_data_sg , * psg ;
2014-04-03 01:19:09 +04:00
sector_t sector = cmd - > t_task_lba ;
void * daddr , * paddr ;
int i , j , offset = 0 ;
2015-05-01 09:23:51 +03:00
unsigned int block_size = dev - > dev_attrib . block_size ;
2014-04-03 01:19:09 +04:00
2015-05-01 09:23:51 +03:00
for_each_sg ( cmd - > t_prot_sg , psg , cmd - > t_prot_nents , i ) {
2014-04-03 01:19:09 +04:00
paddr = kmap_atomic ( sg_page ( psg ) ) + psg - > offset ;
2015-05-01 09:23:51 +03:00
daddr = kmap_atomic ( sg_page ( dsg ) ) + dsg - > offset ;
2014-04-03 01:19:09 +04:00
2015-05-01 09:23:51 +03:00
for ( j = 0 ; j < psg - > length ;
2015-06-29 13:08:19 +03:00
j + = sizeof ( * sdt ) ) {
2015-05-01 09:23:51 +03:00
__u16 crc ;
unsigned int avail ;
2014-04-03 01:19:09 +04:00
2015-05-01 09:23:51 +03:00
if ( offset > = dsg - > length ) {
offset - = dsg - > length ;
kunmap_atomic ( daddr - dsg - > offset ) ;
dsg = sg_next ( dsg ) ;
if ( ! dsg ) {
kunmap_atomic ( paddr - psg - > offset ) ;
return ;
}
daddr = kmap_atomic ( sg_page ( dsg ) ) + dsg - > offset ;
2014-04-03 01:19:09 +04:00
}
2015-05-01 09:23:51 +03:00
sdt = paddr + j ;
avail = min ( block_size , dsg - > length - offset ) ;
crc = crc_t10dif ( daddr + offset , avail ) ;
if ( avail < block_size ) {
kunmap_atomic ( daddr - dsg - > offset ) ;
dsg = sg_next ( dsg ) ;
if ( ! dsg ) {
kunmap_atomic ( paddr - psg - > offset ) ;
return ;
}
daddr = kmap_atomic ( sg_page ( dsg ) ) + dsg - > offset ;
offset = block_size - avail ;
crc = crc_t10dif_update ( crc , daddr , offset ) ;
} else {
offset + = block_size ;
2014-04-03 01:19:09 +04:00
}
2015-05-01 09:23:51 +03:00
sdt - > guard_tag = cpu_to_be16 ( crc ) ;
2015-02-28 09:42:11 +03:00
if ( cmd - > prot_type = = TARGET_DIF_TYPE1_PROT )
2014-04-03 01:19:09 +04:00
sdt - > ref_tag = cpu_to_be32 ( sector & 0xffffffff ) ;
sdt - > app_tag = 0 ;
2015-04-14 21:59:20 +03:00
pr_debug ( " DIF %s INSERT sector: %llu guard_tag: 0x%04x "
2014-04-03 01:19:09 +04:00
" app_tag: 0x%04x ref_tag: %u \n " ,
2015-04-14 21:59:20 +03:00
( cmd - > data_direction = = DMA_TO_DEVICE ) ?
" WRITE " : " READ " , ( unsigned long long ) sector ,
sdt - > guard_tag , sdt - > app_tag ,
be32_to_cpu ( sdt - > ref_tag ) ) ;
2014-04-03 01:19:09 +04:00
sector + + ;
}
2015-05-01 09:23:51 +03:00
kunmap_atomic ( daddr - dsg - > offset ) ;
kunmap_atomic ( paddr - psg - > offset ) ;
2014-04-03 01:19:09 +04:00
}
}
2014-01-08 02:44:57 +04:00
static sense_reason_t
2015-06-29 13:08:19 +03:00
sbc_dif_v1_verify ( struct se_cmd * cmd , struct t10_pi_tuple * sdt ,
2015-05-01 09:23:51 +03:00
__u16 crc , sector_t sector , unsigned int ei_lba )
2014-01-08 02:44:57 +04:00
{
__be16 csum ;
2015-04-14 21:57:43 +03:00
if ( ! ( cmd - > prot_checks & TARGET_DIF_CHECK_GUARD ) )
goto check_ref ;
2015-05-01 09:23:51 +03:00
csum = cpu_to_be16 ( crc ) ;
2014-01-08 02:44:57 +04:00
if ( sdt - > guard_tag ! = csum ) {
pr_err ( " DIFv1 checksum failed on sector %llu guard tag 0x%04x "
" csum 0x%04x \n " , ( unsigned long long ) sector ,
be16_to_cpu ( sdt - > guard_tag ) , be16_to_cpu ( csum ) ) ;
return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED ;
}
2015-04-14 21:57:43 +03:00
check_ref :
if ( ! ( cmd - > prot_checks & TARGET_DIF_CHECK_REFTAG ) )
return 0 ;
2015-02-28 09:42:11 +03:00
if ( cmd - > prot_type = = TARGET_DIF_TYPE1_PROT & &
2014-01-08 02:44:57 +04:00
be32_to_cpu ( sdt - > ref_tag ) ! = ( sector & 0xffffffff ) ) {
pr_err ( " DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x "
" sector MSB: 0x%08x \n " , ( unsigned long long ) sector ,
be32_to_cpu ( sdt - > ref_tag ) , ( u32 ) ( sector & 0xffffffff ) ) ;
return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED ;
}
2015-02-28 09:42:11 +03:00
if ( cmd - > prot_type = = TARGET_DIF_TYPE2_PROT & &
2014-01-08 02:44:57 +04:00
be32_to_cpu ( sdt - > ref_tag ) ! = ei_lba ) {
pr_err ( " DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x "
" ei_lba: 0x%08x \n " , ( unsigned long long ) sector ,
be32_to_cpu ( sdt - > ref_tag ) , ei_lba ) ;
return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED ;
}
return 0 ;
}
2015-04-19 20:27:19 +03:00
void sbc_dif_copy_prot ( struct se_cmd * cmd , unsigned int sectors , bool read ,
struct scatterlist * sg , int sg_off )
2014-01-08 02:44:57 +04:00
{
struct se_device * dev = cmd - > se_dev ;
struct scatterlist * psg ;
void * paddr , * addr ;
unsigned int i , len , left ;
2014-02-23 16:52:44 +04:00
unsigned int offset = sg_off ;
2014-01-08 02:44:57 +04:00
2015-02-28 09:05:21 +03:00
if ( ! sg )
return ;
2014-01-08 02:44:57 +04:00
left = sectors * dev - > prot_length ;
for_each_sg ( cmd - > t_prot_sg , psg , cmd - > t_prot_nents , i ) {
2014-03-05 16:05:09 +04:00
unsigned int psg_len , copied = 0 ;
2014-02-12 13:40:25 +04:00
2014-01-08 02:44:57 +04:00
paddr = kmap_atomic ( sg_page ( psg ) ) + psg - > offset ;
2014-03-05 16:05:09 +04:00
psg_len = min ( left , psg - > length ) ;
while ( psg_len ) {
len = min ( psg_len , sg - > length - offset ) ;
addr = kmap_atomic ( sg_page ( sg ) ) + sg - > offset + offset ;
if ( read )
memcpy ( paddr + copied , addr , len ) ;
else
memcpy ( addr , paddr + copied , len ) ;
left - = len ;
offset + = len ;
copied + = len ;
psg_len - = len ;
2015-05-01 09:23:48 +03:00
kunmap_atomic ( addr - sg - > offset - offset ) ;
2014-03-05 16:05:09 +04:00
if ( offset > = sg - > length ) {
sg = sg_next ( sg ) ;
offset = 0 ;
}
}
2015-05-01 09:23:48 +03:00
kunmap_atomic ( paddr - psg - > offset ) ;
2014-01-08 02:44:57 +04:00
}
}
2015-04-19 20:27:19 +03:00
EXPORT_SYMBOL ( sbc_dif_copy_prot ) ;
2014-01-08 02:44:57 +04:00
sense_reason_t
2015-04-19 20:27:19 +03:00
sbc_dif_verify ( struct se_cmd * cmd , sector_t start , unsigned int sectors ,
2015-04-19 20:27:21 +03:00
unsigned int ei_lba , struct scatterlist * psg , int psg_off )
2014-01-08 02:44:57 +04:00
{
struct se_device * dev = cmd - > se_dev ;
2015-06-29 13:08:19 +03:00
struct t10_pi_tuple * sdt ;
2015-05-01 09:23:51 +03:00
struct scatterlist * dsg = cmd - > t_data_sg ;
2014-01-08 02:44:57 +04:00
sector_t sector = start ;
void * daddr , * paddr ;
2015-05-01 09:23:51 +03:00
int i ;
2014-01-08 02:44:57 +04:00
sense_reason_t rc ;
2015-05-01 09:23:51 +03:00
int dsg_off = 0 ;
unsigned int block_size = dev - > dev_attrib . block_size ;
2014-01-08 02:44:57 +04:00
2015-05-01 09:23:51 +03:00
for ( ; psg & & sector < start + sectors ; psg = sg_next ( psg ) ) {
2014-01-08 02:44:57 +04:00
paddr = kmap_atomic ( sg_page ( psg ) ) + psg - > offset ;
daddr = kmap_atomic ( sg_page ( dsg ) ) + dsg - > offset ;
2015-05-01 09:23:51 +03:00
for ( i = psg_off ; i < psg - > length & &
sector < start + sectors ;
2015-06-29 13:08:19 +03:00
i + = sizeof ( * sdt ) ) {
2015-05-01 09:23:51 +03:00
__u16 crc ;
unsigned int avail ;
2014-01-08 02:44:57 +04:00
2015-05-01 09:23:51 +03:00
if ( dsg_off > = dsg - > length ) {
dsg_off - = dsg - > length ;
kunmap_atomic ( daddr - dsg - > offset ) ;
dsg = sg_next ( dsg ) ;
if ( ! dsg ) {
kunmap_atomic ( paddr - psg - > offset ) ;
return 0 ;
}
daddr = kmap_atomic ( sg_page ( dsg ) ) + dsg - > offset ;
2014-01-08 02:44:57 +04:00
}
2015-05-01 09:23:51 +03:00
sdt = paddr + i ;
2014-01-08 02:44:57 +04:00
pr_debug ( " DIF READ sector: %llu guard_tag: 0x%04x "
" app_tag: 0x%04x ref_tag: %u \n " ,
( unsigned long long ) sector , sdt - > guard_tag ,
sdt - > app_tag , be32_to_cpu ( sdt - > ref_tag ) ) ;
2017-06-29 21:31:12 +03:00
if ( sdt - > app_tag = = T10_PI_APP_ESCAPE ) {
2015-05-01 09:23:51 +03:00
dsg_off + = block_size ;
goto next ;
}
avail = min ( block_size , dsg - > length - dsg_off ) ;
crc = crc_t10dif ( daddr + dsg_off , avail ) ;
if ( avail < block_size ) {
kunmap_atomic ( daddr - dsg - > offset ) ;
dsg = sg_next ( dsg ) ;
if ( ! dsg ) {
kunmap_atomic ( paddr - psg - > offset ) ;
return 0 ;
}
daddr = kmap_atomic ( sg_page ( dsg ) ) + dsg - > offset ;
dsg_off = block_size - avail ;
crc = crc_t10dif_update ( crc , daddr , dsg_off ) ;
} else {
dsg_off + = block_size ;
2014-01-08 02:44:57 +04:00
}
2015-05-01 09:23:51 +03:00
rc = sbc_dif_v1_verify ( cmd , sdt , crc , sector , ei_lba ) ;
2014-01-08 02:44:57 +04:00
if ( rc ) {
2015-04-19 20:27:21 +03:00
kunmap_atomic ( daddr - dsg - > offset ) ;
2015-05-01 09:23:51 +03:00
kunmap_atomic ( paddr - psg - > offset ) ;
2014-01-23 21:29:38 +04:00
cmd - > bad_sector = sector ;
2014-01-08 02:44:57 +04:00
return rc ;
}
2015-05-01 09:23:51 +03:00
next :
2014-01-08 02:44:57 +04:00
sector + + ;
ei_lba + + ;
}
2015-05-01 09:23:51 +03:00
psg_off = 0 ;
2015-04-19 20:27:21 +03:00
kunmap_atomic ( daddr - dsg - > offset ) ;
2015-05-01 09:23:51 +03:00
kunmap_atomic ( paddr - psg - > offset ) ;
2014-01-08 02:44:57 +04:00
}
return 0 ;
}
2015-04-19 20:27:19 +03:00
EXPORT_SYMBOL ( sbc_dif_verify ) ;