2019-04-30 21:42:39 +03:00
// SPDX-License-Identifier: GPL-2.0
2014-09-27 03:20:07 +04:00
/*
* t10_pi . c - Functions for generating and verifying T10 Protection
* Information .
*/
# include <linux/t10-pi.h>
2021-09-20 15:33:27 +03:00
# include <linux/blk-integrity.h>
2014-09-27 03:20:07 +04:00
# include <linux/crc-t10dif.h>
2022-03-03 23:13:11 +03:00
# include <linux/crc64.h>
2019-12-23 11:13:51 +03:00
# include <linux/module.h>
2014-09-27 03:20:07 +04:00
# include <net/checksum.h>
2022-03-03 23:13:11 +03:00
# include <asm/unaligned.h>
2014-09-27 03:20:07 +04:00
2024-02-01 16:01:24 +03:00
typedef __be16 ( csum_fn ) ( __be16 , void * , unsigned int ) ;
2014-09-27 03:20:07 +04:00
2024-02-01 16:01:24 +03:00
static __be16 t10_pi_crc_fn ( __be16 crc , void * data , unsigned int len )
2014-09-27 03:20:07 +04:00
{
2024-02-01 16:01:24 +03:00
return cpu_to_be16 ( crc_t10dif_update ( be16_to_cpu ( crc ) , data , len ) ) ;
2014-09-27 03:20:07 +04:00
}
2024-02-01 16:01:24 +03:00
static __be16 t10_pi_ip_fn ( __be16 csum , void * data , unsigned int len )
2014-09-27 03:20:07 +04:00
{
return ( __force __be16 ) ip_compute_csum ( data , len ) ;
}
/*
* Type 1 and Type 2 protection use the same format : 16 bit guard tag ,
* 16 bit app tag , 32 bit reference tag . Type 3 does not define the ref
* tag .
*/
2017-06-03 10:38:06 +03:00
static blk_status_t t10_pi_generate ( struct blk_integrity_iter * iter ,
2019-09-16 18:44:28 +03:00
csum_fn * fn , enum t10_dif_type type )
2014-09-27 03:20:07 +04:00
{
2024-02-01 16:01:25 +03:00
u8 offset = iter - > pi_offset ;
2014-09-27 03:20:07 +04:00
unsigned int i ;
for ( i = 0 ; i < iter - > data_size ; i + = iter - > interval ) {
2024-02-01 16:01:25 +03:00
struct t10_pi_tuple * pi = iter - > prot_buf + offset ;
2014-09-27 03:20:07 +04:00
2024-02-01 16:01:24 +03:00
pi - > guard_tag = fn ( 0 , iter - > data_buf , iter - > interval ) ;
2024-02-01 16:01:25 +03:00
if ( offset )
pi - > guard_tag = fn ( pi - > guard_tag , iter - > prot_buf ,
offset ) ;
2014-09-27 03:20:07 +04:00
pi - > app_tag = 0 ;
2019-09-16 18:44:28 +03:00
if ( type = = T10_PI_TYPE1_PROTECTION )
2014-09-27 03:20:07 +04:00
pi - > ref_tag = cpu_to_be32 ( lower_32_bits ( iter - > seed ) ) ;
else
pi - > ref_tag = 0 ;
iter - > data_buf + = iter - > interval ;
2022-03-03 23:13:05 +03:00
iter - > prot_buf + = iter - > tuple_size ;
2014-09-27 03:20:07 +04:00
iter - > seed + + ;
}
2017-06-03 10:38:06 +03:00
return BLK_STS_OK ;
2014-09-27 03:20:07 +04:00
}
2017-06-03 10:38:06 +03:00
static blk_status_t t10_pi_verify ( struct blk_integrity_iter * iter ,
2019-09-16 18:44:28 +03:00
csum_fn * fn , enum t10_dif_type type )
2014-09-27 03:20:07 +04:00
{
2024-02-01 16:01:25 +03:00
u8 offset = iter - > pi_offset ;
2014-09-27 03:20:07 +04:00
unsigned int i ;
2019-09-22 12:46:55 +03:00
BUG_ON ( type = = T10_PI_TYPE0_PROTECTION ) ;
2014-09-27 03:20:07 +04:00
for ( i = 0 ; i < iter - > data_size ; i + = iter - > interval ) {
2024-02-01 16:01:25 +03:00
struct t10_pi_tuple * pi = iter - > prot_buf + offset ;
2014-09-27 03:20:07 +04:00
__be16 csum ;
2019-09-22 12:46:55 +03:00
if ( type = = T10_PI_TYPE1_PROTECTION | |
type = = T10_PI_TYPE2_PROTECTION ) {
2017-06-29 21:31:12 +03:00
if ( pi - > app_tag = = T10_PI_APP_ESCAPE )
2014-09-27 03:20:07 +04:00
goto next ;
if ( be32_to_cpu ( pi - > ref_tag ) ! =
lower_32_bits ( iter - > seed ) ) {
pr_err ( " %s: ref tag error at location %llu " \
" (rcvd %u) \n " , iter - > disk_name ,
( unsigned long long )
iter - > seed , be32_to_cpu ( pi - > ref_tag ) ) ;
2017-06-13 18:07:33 +03:00
return BLK_STS_PROTECTION ;
2014-09-27 03:20:07 +04:00
}
2019-09-22 12:46:55 +03:00
} else if ( type = = T10_PI_TYPE3_PROTECTION ) {
2017-06-29 21:31:12 +03:00
if ( pi - > app_tag = = T10_PI_APP_ESCAPE & &
pi - > ref_tag = = T10_PI_REF_ESCAPE )
2014-09-27 03:20:07 +04:00
goto next ;
}
2024-02-01 16:01:24 +03:00
csum = fn ( 0 , iter - > data_buf , iter - > interval ) ;
2024-02-01 16:01:25 +03:00
if ( offset )
csum = fn ( csum , iter - > prot_buf , offset ) ;
2014-09-27 03:20:07 +04:00
if ( pi - > guard_tag ! = csum ) {
pr_err ( " %s: guard tag error at sector %llu " \
" (rcvd %04x, want %04x) \n " , iter - > disk_name ,
( unsigned long long ) iter - > seed ,
be16_to_cpu ( pi - > guard_tag ) , be16_to_cpu ( csum ) ) ;
2017-06-03 10:38:06 +03:00
return BLK_STS_PROTECTION ;
2014-09-27 03:20:07 +04:00
}
next :
iter - > data_buf + = iter - > interval ;
2022-03-03 23:13:05 +03:00
iter - > prot_buf + = iter - > tuple_size ;
2014-09-27 03:20:07 +04:00
iter - > seed + + ;
}
2017-06-03 10:38:06 +03:00
return BLK_STS_OK ;
2014-09-27 03:20:07 +04:00
}
2017-06-03 10:38:06 +03:00
static blk_status_t t10_pi_type1_generate_crc ( struct blk_integrity_iter * iter )
2014-09-27 03:20:07 +04:00
{
2019-09-16 18:44:28 +03:00
return t10_pi_generate ( iter , t10_pi_crc_fn , T10_PI_TYPE1_PROTECTION ) ;
2014-09-27 03:20:07 +04:00
}
2017-06-03 10:38:06 +03:00
static blk_status_t t10_pi_type1_generate_ip ( struct blk_integrity_iter * iter )
2014-09-27 03:20:07 +04:00
{
2019-09-16 18:44:28 +03:00
return t10_pi_generate ( iter , t10_pi_ip_fn , T10_PI_TYPE1_PROTECTION ) ;
2014-09-27 03:20:07 +04:00
}
2017-06-03 10:38:06 +03:00
static blk_status_t t10_pi_type1_verify_crc ( struct blk_integrity_iter * iter )
2014-09-27 03:20:07 +04:00
{
2019-09-16 18:44:28 +03:00
return t10_pi_verify ( iter , t10_pi_crc_fn , T10_PI_TYPE1_PROTECTION ) ;
2014-09-27 03:20:07 +04:00
}
2017-06-03 10:38:06 +03:00
static blk_status_t t10_pi_type1_verify_ip ( struct blk_integrity_iter * iter )
2014-09-27 03:20:07 +04:00
{
2019-09-16 18:44:28 +03:00
return t10_pi_verify ( iter , t10_pi_ip_fn , T10_PI_TYPE1_PROTECTION ) ;
2014-09-27 03:20:07 +04:00
}
2018-07-30 00:15:32 +03:00
/**
2019-09-16 18:44:29 +03:00
* t10_pi_type1_prepare - prepare PI prior submitting request to device
2018-07-30 00:15:32 +03:00
* @ rq : request with PI that should be prepared
*
* For Type 1 / Type 2 , the virtual start sector is the one that was
* originally submitted by the block layer for the ref_tag usage . Due to
* partitioning , MD / DM cloning , etc . the actual physical start sector is
* likely to be different . Remap protection information to match the
* physical LBA .
*/
2019-09-16 18:44:29 +03:00
static void t10_pi_type1_prepare ( struct request * rq )
2018-07-30 00:15:32 +03:00
{
2024-02-01 16:01:25 +03:00
struct blk_integrity * bi = & rq - > q - > integrity ;
const int tuple_sz = bi - > tuple_size ;
2018-07-30 00:15:32 +03:00
u32 ref_tag = t10_pi_ref_tag ( rq ) ;
2024-02-01 16:01:25 +03:00
u8 offset = bi - > pi_offset ;
2018-07-30 00:15:32 +03:00
struct bio * bio ;
__rq_for_each_bio ( bio , rq ) {
struct bio_integrity_payload * bip = bio_integrity ( bio ) ;
u32 virt = bip_get_seed ( bip ) & 0xffffffff ;
struct bio_vec iv ;
struct bvec_iter iter ;
/* Already remapped? */
if ( bip - > bip_flags & BIP_MAPPED_INTEGRITY )
break ;
bip_for_each_vec ( iv , bip , iter ) {
unsigned int j ;
2021-07-27 08:56:45 +03:00
void * p ;
2018-07-30 00:15:32 +03:00
2021-07-27 08:56:45 +03:00
p = bvec_kmap_local ( & iv ) ;
2018-07-30 00:15:32 +03:00
for ( j = 0 ; j < iv . bv_len ; j + = tuple_sz ) {
2024-02-01 16:01:25 +03:00
struct t10_pi_tuple * pi = p + offset ;
2018-07-30 00:15:32 +03:00
if ( be32_to_cpu ( pi - > ref_tag ) = = virt )
pi - > ref_tag = cpu_to_be32 ( ref_tag ) ;
virt + + ;
ref_tag + + ;
p + = tuple_sz ;
}
2021-07-27 08:56:45 +03:00
kunmap_local ( p ) ;
2018-07-30 00:15:32 +03:00
}
bip - > bip_flags | = BIP_MAPPED_INTEGRITY ;
}
}
/**
2019-09-16 18:44:29 +03:00
* t10_pi_type1_complete - prepare PI prior returning request to the blk layer
2018-07-30 00:15:32 +03:00
* @ rq : request with PI that should be prepared
2019-09-16 18:44:29 +03:00
* @ nr_bytes : total bytes to prepare
2018-07-30 00:15:32 +03:00
*
* For Type 1 / Type 2 , the virtual start sector is the one that was
* originally submitted by the block layer for the ref_tag usage . Due to
* partitioning , MD / DM cloning , etc . the actual physical start sector is
* likely to be different . Since the physical start sector was submitted
* to the device , we should remap it back to virtual values expected by the
* block layer .
*/
2019-09-16 18:44:29 +03:00
static void t10_pi_type1_complete ( struct request * rq , unsigned int nr_bytes )
2018-07-30 00:15:32 +03:00
{
2024-02-01 16:01:25 +03:00
struct blk_integrity * bi = & rq - > q - > integrity ;
unsigned intervals = nr_bytes > > bi - > interval_exp ;
const int tuple_sz = bi - > tuple_size ;
2018-07-30 00:15:32 +03:00
u32 ref_tag = t10_pi_ref_tag ( rq ) ;
2024-02-01 16:01:25 +03:00
u8 offset = bi - > pi_offset ;
2018-07-30 00:15:32 +03:00
struct bio * bio ;
__rq_for_each_bio ( bio , rq ) {
struct bio_integrity_payload * bip = bio_integrity ( bio ) ;
u32 virt = bip_get_seed ( bip ) & 0xffffffff ;
struct bio_vec iv ;
struct bvec_iter iter ;
bip_for_each_vec ( iv , bip , iter ) {
unsigned int j ;
2021-07-27 08:56:45 +03:00
void * p ;
2018-07-30 00:15:32 +03:00
2021-07-27 08:56:45 +03:00
p = bvec_kmap_local ( & iv ) ;
2018-07-30 00:15:32 +03:00
for ( j = 0 ; j < iv . bv_len & & intervals ; j + = tuple_sz ) {
2024-02-01 16:01:25 +03:00
struct t10_pi_tuple * pi = p + offset ;
2018-07-30 00:15:32 +03:00
if ( be32_to_cpu ( pi - > ref_tag ) = = ref_tag )
pi - > ref_tag = cpu_to_be32 ( virt ) ;
virt + + ;
ref_tag + + ;
intervals - - ;
p + = tuple_sz ;
}
2021-07-27 08:56:45 +03:00
kunmap_local ( p ) ;
2018-07-30 00:15:32 +03:00
}
}
}
2019-09-16 18:44:29 +03:00
static blk_status_t t10_pi_type3_generate_crc ( struct blk_integrity_iter * iter )
{
return t10_pi_generate ( iter , t10_pi_crc_fn , T10_PI_TYPE3_PROTECTION ) ;
}
static blk_status_t t10_pi_type3_generate_ip ( struct blk_integrity_iter * iter )
{
return t10_pi_generate ( iter , t10_pi_ip_fn , T10_PI_TYPE3_PROTECTION ) ;
}
static blk_status_t t10_pi_type3_verify_crc ( struct blk_integrity_iter * iter )
{
return t10_pi_verify ( iter , t10_pi_crc_fn , T10_PI_TYPE3_PROTECTION ) ;
}
static blk_status_t t10_pi_type3_verify_ip ( struct blk_integrity_iter * iter )
{
return t10_pi_verify ( iter , t10_pi_ip_fn , T10_PI_TYPE3_PROTECTION ) ;
}
2019-10-01 02:00:40 +03:00
/* Type 3 does not have a reference tag so no remapping is required. */
2019-09-16 18:44:29 +03:00
static void t10_pi_type3_prepare ( struct request * rq )
{
}
2019-10-01 02:00:40 +03:00
/* Type 3 does not have a reference tag so no remapping is required. */
2019-09-16 18:44:29 +03:00
static void t10_pi_type3_complete ( struct request * rq , unsigned int nr_bytes )
{
}
const struct blk_integrity_profile t10_pi_type1_crc = {
. name = " T10-DIF-TYPE1-CRC " ,
. generate_fn = t10_pi_type1_generate_crc ,
. verify_fn = t10_pi_type1_verify_crc ,
. prepare_fn = t10_pi_type1_prepare ,
. complete_fn = t10_pi_type1_complete ,
} ;
EXPORT_SYMBOL ( t10_pi_type1_crc ) ;
const struct blk_integrity_profile t10_pi_type1_ip = {
. name = " T10-DIF-TYPE1-IP " ,
. generate_fn = t10_pi_type1_generate_ip ,
. verify_fn = t10_pi_type1_verify_ip ,
. prepare_fn = t10_pi_type1_prepare ,
. complete_fn = t10_pi_type1_complete ,
} ;
EXPORT_SYMBOL ( t10_pi_type1_ip ) ;
const struct blk_integrity_profile t10_pi_type3_crc = {
. name = " T10-DIF-TYPE3-CRC " ,
. generate_fn = t10_pi_type3_generate_crc ,
. verify_fn = t10_pi_type3_verify_crc ,
. prepare_fn = t10_pi_type3_prepare ,
. complete_fn = t10_pi_type3_complete ,
} ;
EXPORT_SYMBOL ( t10_pi_type3_crc ) ;
const struct blk_integrity_profile t10_pi_type3_ip = {
. name = " T10-DIF-TYPE3-IP " ,
. generate_fn = t10_pi_type3_generate_ip ,
. verify_fn = t10_pi_type3_verify_ip ,
. prepare_fn = t10_pi_type3_prepare ,
. complete_fn = t10_pi_type3_complete ,
} ;
EXPORT_SYMBOL ( t10_pi_type3_ip ) ;
2019-12-23 11:13:51 +03:00
2024-02-01 16:01:24 +03:00
static __be64 ext_pi_crc64 ( u64 crc , void * data , unsigned int len )
2022-03-03 23:13:11 +03:00
{
2024-02-01 16:01:24 +03:00
return cpu_to_be64 ( crc64_rocksoft_update ( crc , data , len ) ) ;
2022-03-03 23:13:11 +03:00
}
static blk_status_t ext_pi_crc64_generate ( struct blk_integrity_iter * iter ,
enum t10_dif_type type )
{
2024-02-01 16:01:25 +03:00
u8 offset = iter - > pi_offset ;
2022-03-03 23:13:11 +03:00
unsigned int i ;
for ( i = 0 ; i < iter - > data_size ; i + = iter - > interval ) {
2024-02-01 16:01:25 +03:00
struct crc64_pi_tuple * pi = iter - > prot_buf + offset ;
2022-03-03 23:13:11 +03:00
2024-02-01 16:01:24 +03:00
pi - > guard_tag = ext_pi_crc64 ( 0 , iter - > data_buf , iter - > interval ) ;
2024-02-01 16:01:25 +03:00
if ( offset )
pi - > guard_tag = ext_pi_crc64 ( be64_to_cpu ( pi - > guard_tag ) ,
iter - > prot_buf , offset ) ;
2022-03-03 23:13:11 +03:00
pi - > app_tag = 0 ;
if ( type = = T10_PI_TYPE1_PROTECTION )
put_unaligned_be48 ( iter - > seed , pi - > ref_tag ) ;
else
put_unaligned_be48 ( 0ULL , pi - > ref_tag ) ;
iter - > data_buf + = iter - > interval ;
iter - > prot_buf + = iter - > tuple_size ;
iter - > seed + + ;
}
return BLK_STS_OK ;
}
static bool ext_pi_ref_escape ( u8 * ref_tag )
{
static u8 ref_escape [ 6 ] = { 0xff , 0xff , 0xff , 0xff , 0xff , 0xff } ;
return memcmp ( ref_tag , ref_escape , sizeof ( ref_escape ) ) = = 0 ;
}
static blk_status_t ext_pi_crc64_verify ( struct blk_integrity_iter * iter ,
enum t10_dif_type type )
{
2024-02-01 16:01:25 +03:00
u8 offset = iter - > pi_offset ;
2022-03-03 23:13:11 +03:00
unsigned int i ;
for ( i = 0 ; i < iter - > data_size ; i + = iter - > interval ) {
2024-02-01 16:01:25 +03:00
struct crc64_pi_tuple * pi = iter - > prot_buf + offset ;
2022-03-03 23:13:11 +03:00
u64 ref , seed ;
__be64 csum ;
if ( type = = T10_PI_TYPE1_PROTECTION ) {
if ( pi - > app_tag = = T10_PI_APP_ESCAPE )
goto next ;
ref = get_unaligned_be48 ( pi - > ref_tag ) ;
seed = lower_48_bits ( iter - > seed ) ;
if ( ref ! = seed ) {
pr_err ( " %s: ref tag error at location %llu (rcvd %llu) \n " ,
iter - > disk_name , seed , ref ) ;
return BLK_STS_PROTECTION ;
}
} else if ( type = = T10_PI_TYPE3_PROTECTION ) {
if ( pi - > app_tag = = T10_PI_APP_ESCAPE & &
ext_pi_ref_escape ( pi - > ref_tag ) )
goto next ;
}
2024-02-01 16:01:24 +03:00
csum = ext_pi_crc64 ( 0 , iter - > data_buf , iter - > interval ) ;
2024-02-01 16:01:25 +03:00
if ( offset )
csum = ext_pi_crc64 ( be64_to_cpu ( csum ) , iter - > prot_buf ,
offset ) ;
2022-03-03 23:13:11 +03:00
if ( pi - > guard_tag ! = csum ) {
pr_err ( " %s: guard tag error at sector %llu " \
" (rcvd %016llx, want %016llx) \n " ,
iter - > disk_name , ( unsigned long long ) iter - > seed ,
be64_to_cpu ( pi - > guard_tag ) , be64_to_cpu ( csum ) ) ;
return BLK_STS_PROTECTION ;
}
next :
iter - > data_buf + = iter - > interval ;
iter - > prot_buf + = iter - > tuple_size ;
iter - > seed + + ;
}
return BLK_STS_OK ;
}
static blk_status_t ext_pi_type1_verify_crc64 ( struct blk_integrity_iter * iter )
{
return ext_pi_crc64_verify ( iter , T10_PI_TYPE1_PROTECTION ) ;
}
static blk_status_t ext_pi_type1_generate_crc64 ( struct blk_integrity_iter * iter )
{
return ext_pi_crc64_generate ( iter , T10_PI_TYPE1_PROTECTION ) ;
}
static void ext_pi_type1_prepare ( struct request * rq )
{
2024-02-01 16:01:25 +03:00
struct blk_integrity * bi = & rq - > q - > integrity ;
const int tuple_sz = bi - > tuple_size ;
2022-03-03 23:13:11 +03:00
u64 ref_tag = ext_pi_ref_tag ( rq ) ;
2024-02-01 16:01:25 +03:00
u8 offset = bi - > pi_offset ;
2022-03-03 23:13:11 +03:00
struct bio * bio ;
__rq_for_each_bio ( bio , rq ) {
struct bio_integrity_payload * bip = bio_integrity ( bio ) ;
u64 virt = lower_48_bits ( bip_get_seed ( bip ) ) ;
struct bio_vec iv ;
struct bvec_iter iter ;
/* Already remapped? */
if ( bip - > bip_flags & BIP_MAPPED_INTEGRITY )
break ;
bip_for_each_vec ( iv , bip , iter ) {
unsigned int j ;
void * p ;
p = bvec_kmap_local ( & iv ) ;
for ( j = 0 ; j < iv . bv_len ; j + = tuple_sz ) {
2024-02-01 16:01:25 +03:00
struct crc64_pi_tuple * pi = p + offset ;
2022-03-03 23:13:11 +03:00
u64 ref = get_unaligned_be48 ( pi - > ref_tag ) ;
if ( ref = = virt )
put_unaligned_be48 ( ref_tag , pi - > ref_tag ) ;
virt + + ;
ref_tag + + ;
p + = tuple_sz ;
}
kunmap_local ( p ) ;
}
bip - > bip_flags | = BIP_MAPPED_INTEGRITY ;
}
}
static void ext_pi_type1_complete ( struct request * rq , unsigned int nr_bytes )
{
2024-02-01 16:01:25 +03:00
struct blk_integrity * bi = & rq - > q - > integrity ;
unsigned intervals = nr_bytes > > bi - > interval_exp ;
const int tuple_sz = bi - > tuple_size ;
2022-03-03 23:13:11 +03:00
u64 ref_tag = ext_pi_ref_tag ( rq ) ;
2024-02-01 16:01:25 +03:00
u8 offset = bi - > pi_offset ;
2022-03-03 23:13:11 +03:00
struct bio * bio ;
__rq_for_each_bio ( bio , rq ) {
struct bio_integrity_payload * bip = bio_integrity ( bio ) ;
u64 virt = lower_48_bits ( bip_get_seed ( bip ) ) ;
struct bio_vec iv ;
struct bvec_iter iter ;
bip_for_each_vec ( iv , bip , iter ) {
unsigned int j ;
void * p ;
p = bvec_kmap_local ( & iv ) ;
for ( j = 0 ; j < iv . bv_len & & intervals ; j + = tuple_sz ) {
2024-02-01 16:01:25 +03:00
struct crc64_pi_tuple * pi = p + offset ;
2022-03-03 23:13:11 +03:00
u64 ref = get_unaligned_be48 ( pi - > ref_tag ) ;
if ( ref = = ref_tag )
put_unaligned_be48 ( virt , pi - > ref_tag ) ;
virt + + ;
ref_tag + + ;
intervals - - ;
p + = tuple_sz ;
}
kunmap_local ( p ) ;
}
}
}
static blk_status_t ext_pi_type3_verify_crc64 ( struct blk_integrity_iter * iter )
{
return ext_pi_crc64_verify ( iter , T10_PI_TYPE3_PROTECTION ) ;
}
static blk_status_t ext_pi_type3_generate_crc64 ( struct blk_integrity_iter * iter )
{
return ext_pi_crc64_generate ( iter , T10_PI_TYPE3_PROTECTION ) ;
}
const struct blk_integrity_profile ext_pi_type1_crc64 = {
. name = " EXT-DIF-TYPE1-CRC64 " ,
. generate_fn = ext_pi_type1_generate_crc64 ,
. verify_fn = ext_pi_type1_verify_crc64 ,
. prepare_fn = ext_pi_type1_prepare ,
. complete_fn = ext_pi_type1_complete ,
} ;
EXPORT_SYMBOL_GPL ( ext_pi_type1_crc64 ) ;
const struct blk_integrity_profile ext_pi_type3_crc64 = {
. name = " EXT-DIF-TYPE3-CRC64 " ,
. generate_fn = ext_pi_type3_generate_crc64 ,
. verify_fn = ext_pi_type3_verify_crc64 ,
. prepare_fn = t10_pi_type3_prepare ,
. complete_fn = t10_pi_type3_complete ,
} ;
EXPORT_SYMBOL_GPL ( ext_pi_type3_crc64 ) ;
MODULE_LICENSE ( " GPL " ) ;
2019-12-23 11:13:51 +03:00
MODULE_LICENSE ( " GPL " ) ;