2019-04-30 21:42:39 +03:00
// SPDX-License-Identifier: GPL-2.0
2014-09-27 03:20:07 +04:00
/*
* t10_pi . c - Functions for generating and verifying T10 Protection
* Information .
*/
# include <linux/t10-pi.h>
# include <linux/blkdev.h>
# include <linux/crc-t10dif.h>
2019-12-23 11:13:51 +03:00
# include <linux/module.h>
2014-09-27 03:20:07 +04:00
# include <net/checksum.h>
typedef __be16 ( csum_fn ) ( void * , unsigned int ) ;
static __be16 t10_pi_crc_fn ( void * data , unsigned int len )
{
return cpu_to_be16 ( crc_t10dif ( data , len ) ) ;
}
static __be16 t10_pi_ip_fn ( void * data , unsigned int len )
{
return ( __force __be16 ) ip_compute_csum ( data , len ) ;
}
/*
* Type 1 and Type 2 protection use the same format : 16 bit guard tag ,
* 16 bit app tag , 32 bit reference tag . Type 3 does not define the ref
* tag .
*/
2017-06-03 10:38:06 +03:00
static blk_status_t t10_pi_generate ( struct blk_integrity_iter * iter ,
2019-09-16 18:44:28 +03:00
csum_fn * fn , enum t10_dif_type type )
2014-09-27 03:20:07 +04:00
{
unsigned int i ;
for ( i = 0 ; i < iter - > data_size ; i + = iter - > interval ) {
struct t10_pi_tuple * pi = iter - > prot_buf ;
pi - > guard_tag = fn ( iter - > data_buf , iter - > interval ) ;
pi - > app_tag = 0 ;
2019-09-16 18:44:28 +03:00
if ( type = = T10_PI_TYPE1_PROTECTION )
2014-09-27 03:20:07 +04:00
pi - > ref_tag = cpu_to_be32 ( lower_32_bits ( iter - > seed ) ) ;
else
pi - > ref_tag = 0 ;
iter - > data_buf + = iter - > interval ;
iter - > prot_buf + = sizeof ( struct t10_pi_tuple ) ;
iter - > seed + + ;
}
2017-06-03 10:38:06 +03:00
return BLK_STS_OK ;
2014-09-27 03:20:07 +04:00
}
2017-06-03 10:38:06 +03:00
static blk_status_t t10_pi_verify ( struct blk_integrity_iter * iter ,
2019-09-16 18:44:28 +03:00
csum_fn * fn , enum t10_dif_type type )
2014-09-27 03:20:07 +04:00
{
unsigned int i ;
2019-09-22 12:46:55 +03:00
BUG_ON ( type = = T10_PI_TYPE0_PROTECTION ) ;
2014-09-27 03:20:07 +04:00
for ( i = 0 ; i < iter - > data_size ; i + = iter - > interval ) {
struct t10_pi_tuple * pi = iter - > prot_buf ;
__be16 csum ;
2019-09-22 12:46:55 +03:00
if ( type = = T10_PI_TYPE1_PROTECTION | |
type = = T10_PI_TYPE2_PROTECTION ) {
2017-06-29 21:31:12 +03:00
if ( pi - > app_tag = = T10_PI_APP_ESCAPE )
2014-09-27 03:20:07 +04:00
goto next ;
if ( be32_to_cpu ( pi - > ref_tag ) ! =
lower_32_bits ( iter - > seed ) ) {
pr_err ( " %s: ref tag error at location %llu " \
" (rcvd %u) \n " , iter - > disk_name ,
( unsigned long long )
iter - > seed , be32_to_cpu ( pi - > ref_tag ) ) ;
2017-06-13 18:07:33 +03:00
return BLK_STS_PROTECTION ;
2014-09-27 03:20:07 +04:00
}
2019-09-22 12:46:55 +03:00
} else if ( type = = T10_PI_TYPE3_PROTECTION ) {
2017-06-29 21:31:12 +03:00
if ( pi - > app_tag = = T10_PI_APP_ESCAPE & &
pi - > ref_tag = = T10_PI_REF_ESCAPE )
2014-09-27 03:20:07 +04:00
goto next ;
}
csum = fn ( iter - > data_buf , iter - > interval ) ;
if ( pi - > guard_tag ! = csum ) {
pr_err ( " %s: guard tag error at sector %llu " \
" (rcvd %04x, want %04x) \n " , iter - > disk_name ,
( unsigned long long ) iter - > seed ,
be16_to_cpu ( pi - > guard_tag ) , be16_to_cpu ( csum ) ) ;
2017-06-03 10:38:06 +03:00
return BLK_STS_PROTECTION ;
2014-09-27 03:20:07 +04:00
}
next :
iter - > data_buf + = iter - > interval ;
iter - > prot_buf + = sizeof ( struct t10_pi_tuple ) ;
iter - > seed + + ;
}
2017-06-03 10:38:06 +03:00
return BLK_STS_OK ;
2014-09-27 03:20:07 +04:00
}
2017-06-03 10:38:06 +03:00
static blk_status_t t10_pi_type1_generate_crc ( struct blk_integrity_iter * iter )
2014-09-27 03:20:07 +04:00
{
2019-09-16 18:44:28 +03:00
return t10_pi_generate ( iter , t10_pi_crc_fn , T10_PI_TYPE1_PROTECTION ) ;
2014-09-27 03:20:07 +04:00
}
2017-06-03 10:38:06 +03:00
static blk_status_t t10_pi_type1_generate_ip ( struct blk_integrity_iter * iter )
2014-09-27 03:20:07 +04:00
{
2019-09-16 18:44:28 +03:00
return t10_pi_generate ( iter , t10_pi_ip_fn , T10_PI_TYPE1_PROTECTION ) ;
2014-09-27 03:20:07 +04:00
}
2017-06-03 10:38:06 +03:00
static blk_status_t t10_pi_type1_verify_crc ( struct blk_integrity_iter * iter )
2014-09-27 03:20:07 +04:00
{
2019-09-16 18:44:28 +03:00
return t10_pi_verify ( iter , t10_pi_crc_fn , T10_PI_TYPE1_PROTECTION ) ;
2014-09-27 03:20:07 +04:00
}
2017-06-03 10:38:06 +03:00
static blk_status_t t10_pi_type1_verify_ip ( struct blk_integrity_iter * iter )
2014-09-27 03:20:07 +04:00
{
2019-09-16 18:44:28 +03:00
return t10_pi_verify ( iter , t10_pi_ip_fn , T10_PI_TYPE1_PROTECTION ) ;
2014-09-27 03:20:07 +04:00
}
2018-07-30 00:15:32 +03:00
/**
2019-09-16 18:44:29 +03:00
* t10_pi_type1_prepare - prepare PI prior submitting request to device
2018-07-30 00:15:32 +03:00
* @ rq : request with PI that should be prepared
*
* For Type 1 / Type 2 , the virtual start sector is the one that was
* originally submitted by the block layer for the ref_tag usage . Due to
* partitioning , MD / DM cloning , etc . the actual physical start sector is
* likely to be different . Remap protection information to match the
* physical LBA .
*/
2019-09-16 18:44:29 +03:00
static void t10_pi_type1_prepare ( struct request * rq )
2018-07-30 00:15:32 +03:00
{
const int tuple_sz = rq - > q - > integrity . tuple_size ;
u32 ref_tag = t10_pi_ref_tag ( rq ) ;
struct bio * bio ;
__rq_for_each_bio ( bio , rq ) {
struct bio_integrity_payload * bip = bio_integrity ( bio ) ;
u32 virt = bip_get_seed ( bip ) & 0xffffffff ;
struct bio_vec iv ;
struct bvec_iter iter ;
/* Already remapped? */
if ( bip - > bip_flags & BIP_MAPPED_INTEGRITY )
break ;
bip_for_each_vec ( iv , bip , iter ) {
void * p , * pmap ;
unsigned int j ;
pmap = kmap_atomic ( iv . bv_page ) ;
p = pmap + iv . bv_offset ;
for ( j = 0 ; j < iv . bv_len ; j + = tuple_sz ) {
struct t10_pi_tuple * pi = p ;
if ( be32_to_cpu ( pi - > ref_tag ) = = virt )
pi - > ref_tag = cpu_to_be32 ( ref_tag ) ;
virt + + ;
ref_tag + + ;
p + = tuple_sz ;
}
kunmap_atomic ( pmap ) ;
}
bip - > bip_flags | = BIP_MAPPED_INTEGRITY ;
}
}
/**
2019-09-16 18:44:29 +03:00
* t10_pi_type1_complete - prepare PI prior returning request to the blk layer
2018-07-30 00:15:32 +03:00
* @ rq : request with PI that should be prepared
2019-09-16 18:44:29 +03:00
* @ nr_bytes : total bytes to prepare
2018-07-30 00:15:32 +03:00
*
* For Type 1 / Type 2 , the virtual start sector is the one that was
* originally submitted by the block layer for the ref_tag usage . Due to
* partitioning , MD / DM cloning , etc . the actual physical start sector is
* likely to be different . Since the physical start sector was submitted
* to the device , we should remap it back to virtual values expected by the
* block layer .
*/
2019-09-16 18:44:29 +03:00
static void t10_pi_type1_complete ( struct request * rq , unsigned int nr_bytes )
2018-07-30 00:15:32 +03:00
{
2019-09-16 18:44:29 +03:00
unsigned intervals = nr_bytes > > rq - > q - > integrity . interval_exp ;
2018-07-30 00:15:32 +03:00
const int tuple_sz = rq - > q - > integrity . tuple_size ;
u32 ref_tag = t10_pi_ref_tag ( rq ) ;
struct bio * bio ;
__rq_for_each_bio ( bio , rq ) {
struct bio_integrity_payload * bip = bio_integrity ( bio ) ;
u32 virt = bip_get_seed ( bip ) & 0xffffffff ;
struct bio_vec iv ;
struct bvec_iter iter ;
bip_for_each_vec ( iv , bip , iter ) {
void * p , * pmap ;
unsigned int j ;
pmap = kmap_atomic ( iv . bv_page ) ;
p = pmap + iv . bv_offset ;
for ( j = 0 ; j < iv . bv_len & & intervals ; j + = tuple_sz ) {
struct t10_pi_tuple * pi = p ;
if ( be32_to_cpu ( pi - > ref_tag ) = = ref_tag )
pi - > ref_tag = cpu_to_be32 ( virt ) ;
virt + + ;
ref_tag + + ;
intervals - - ;
p + = tuple_sz ;
}
kunmap_atomic ( pmap ) ;
}
}
}
2019-09-16 18:44:29 +03:00
static blk_status_t t10_pi_type3_generate_crc ( struct blk_integrity_iter * iter )
{
return t10_pi_generate ( iter , t10_pi_crc_fn , T10_PI_TYPE3_PROTECTION ) ;
}
static blk_status_t t10_pi_type3_generate_ip ( struct blk_integrity_iter * iter )
{
return t10_pi_generate ( iter , t10_pi_ip_fn , T10_PI_TYPE3_PROTECTION ) ;
}
static blk_status_t t10_pi_type3_verify_crc ( struct blk_integrity_iter * iter )
{
return t10_pi_verify ( iter , t10_pi_crc_fn , T10_PI_TYPE3_PROTECTION ) ;
}
static blk_status_t t10_pi_type3_verify_ip ( struct blk_integrity_iter * iter )
{
return t10_pi_verify ( iter , t10_pi_ip_fn , T10_PI_TYPE3_PROTECTION ) ;
}
2019-10-01 02:00:40 +03:00
/* Type 3 does not have a reference tag so no remapping is required. */
2019-09-16 18:44:29 +03:00
static void t10_pi_type3_prepare ( struct request * rq )
{
}
2019-10-01 02:00:40 +03:00
/* Type 3 does not have a reference tag so no remapping is required. */
2019-09-16 18:44:29 +03:00
static void t10_pi_type3_complete ( struct request * rq , unsigned int nr_bytes )
{
}
const struct blk_integrity_profile t10_pi_type1_crc = {
. name = " T10-DIF-TYPE1-CRC " ,
. generate_fn = t10_pi_type1_generate_crc ,
. verify_fn = t10_pi_type1_verify_crc ,
. prepare_fn = t10_pi_type1_prepare ,
. complete_fn = t10_pi_type1_complete ,
} ;
EXPORT_SYMBOL ( t10_pi_type1_crc ) ;
const struct blk_integrity_profile t10_pi_type1_ip = {
. name = " T10-DIF-TYPE1-IP " ,
. generate_fn = t10_pi_type1_generate_ip ,
. verify_fn = t10_pi_type1_verify_ip ,
. prepare_fn = t10_pi_type1_prepare ,
. complete_fn = t10_pi_type1_complete ,
} ;
EXPORT_SYMBOL ( t10_pi_type1_ip ) ;
const struct blk_integrity_profile t10_pi_type3_crc = {
. name = " T10-DIF-TYPE3-CRC " ,
. generate_fn = t10_pi_type3_generate_crc ,
. verify_fn = t10_pi_type3_verify_crc ,
. prepare_fn = t10_pi_type3_prepare ,
. complete_fn = t10_pi_type3_complete ,
} ;
EXPORT_SYMBOL ( t10_pi_type3_crc ) ;
const struct blk_integrity_profile t10_pi_type3_ip = {
. name = " T10-DIF-TYPE3-IP " ,
. generate_fn = t10_pi_type3_generate_ip ,
. verify_fn = t10_pi_type3_verify_ip ,
. prepare_fn = t10_pi_type3_prepare ,
. complete_fn = t10_pi_type3_complete ,
} ;
EXPORT_SYMBOL ( t10_pi_type3_ip ) ;
2019-12-23 11:13:51 +03:00
MODULE_LICENSE ( " GPL " ) ;