2005-04-17 02:20:36 +04:00
/*
* Copyright ( C ) 2003 Christophe Saout < christophe @ saout . de >
* Copyright ( C ) 2004 Clemens Fruhwirth < clemens @ endorphin . org >
2006-10-03 12:15:37 +04:00
* Copyright ( C ) 2006 Red Hat , Inc . All rights reserved .
2005-04-17 02:20:36 +04:00
*
* This file is released under the GPL .
*/
2006-08-22 14:29:17 +04:00
# include <linux/err.h>
2005-04-17 02:20:36 +04:00
# include <linux/module.h>
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/bio.h>
# include <linux/blkdev.h>
# include <linux/mempool.h>
# include <linux/slab.h>
# include <linux/crypto.h>
# include <linux/workqueue.h>
2006-10-20 10:28:16 +04:00
# include <linux/backing-dev.h>
2005-04-17 02:20:36 +04:00
# include <asm/atomic.h>
2005-09-17 11:55:31 +04:00
# include <linux/scatterlist.h>
2005-04-17 02:20:36 +04:00
# include <asm/page.h>
2006-09-03 02:56:39 +04:00
# include <asm/unaligned.h>
2005-04-17 02:20:36 +04:00
# include "dm.h"
2006-06-26 11:27:35 +04:00
# define DM_MSG_PREFIX "crypt"
2006-10-03 12:15:37 +04:00
# define MESG_STR(x) x, sizeof(x)
2005-04-17 02:20:36 +04:00
/*
* per bio private data
*/
struct crypt_io {
struct dm_target * target ;
2006-10-03 12:15:37 +04:00
struct bio * base_bio ;
2005-04-17 02:20:36 +04:00
struct bio * first_clone ;
struct work_struct work ;
atomic_t pending ;
int error ;
2006-10-03 12:15:39 +04:00
int post_process ;
2005-04-17 02:20:36 +04:00
} ;
/*
* context holding the current state of a multi - part conversion
*/
struct convert_context {
struct bio * bio_in ;
struct bio * bio_out ;
unsigned int offset_in ;
unsigned int offset_out ;
unsigned int idx_in ;
unsigned int idx_out ;
sector_t sector ;
int write ;
} ;
struct crypt_config ;
struct crypt_iv_operations {
int ( * ctr ) ( struct crypt_config * cc , struct dm_target * ti ,
const char * opts ) ;
void ( * dtr ) ( struct crypt_config * cc ) ;
const char * ( * status ) ( struct crypt_config * cc ) ;
int ( * generator ) ( struct crypt_config * cc , u8 * iv , sector_t sector ) ;
} ;
/*
* Crypt : maps a linear range of a block device
* and encrypts / decrypts at the same time .
*/
2006-10-03 12:15:37 +04:00
enum flags { DM_CRYPT_SUSPENDED , DM_CRYPT_KEY_VALID } ;
2005-04-17 02:20:36 +04:00
struct crypt_config {
struct dm_dev * dev ;
sector_t start ;
/*
* pool for per bio private data and
* for encryption buffer pages
*/
mempool_t * io_pool ;
mempool_t * page_pool ;
2006-10-03 12:15:40 +04:00
struct bio_set * bs ;
2005-04-17 02:20:36 +04:00
/*
* crypto related data
*/
struct crypt_iv_operations * iv_gen_ops ;
char * iv_mode ;
2006-12-06 00:41:52 +03:00
union {
struct crypto_cipher * essiv_tfm ;
int benbi_shift ;
} iv_gen_private ;
2005-04-17 02:20:36 +04:00
sector_t iv_offset ;
unsigned int iv_size ;
2006-08-22 14:29:17 +04:00
char cipher [ CRYPTO_MAX_ALG_NAME ] ;
char chainmode [ CRYPTO_MAX_ALG_NAME ] ;
struct crypto_blkcipher * tfm ;
2006-10-03 12:15:37 +04:00
unsigned long flags ;
2005-04-17 02:20:36 +04:00
unsigned int key_size ;
u8 key [ 0 ] ;
} ;
2006-10-03 12:15:40 +04:00
# define MIN_IOS 16
2005-04-17 02:20:36 +04:00
# define MIN_POOL_PAGES 32
# define MIN_BIO_PAGES 8
2006-12-07 07:33:20 +03:00
static struct kmem_cache * _crypt_io_pool ;
2005-04-17 02:20:36 +04:00
/*
* Different IV generation algorithms :
*
2006-09-02 12:17:33 +04:00
* plain : the initial vector is the 32 - bit little - endian version of the sector
2005-04-17 02:20:36 +04:00
* number , padded with zeros if neccessary .
*
2006-09-02 12:17:33 +04:00
* essiv : " encrypted sector|salt initial vector " , the sector number is
* encrypted with the bulk cipher using a salt as key . The salt
* should be derived from the bulk cipher ' s key via hashing .
2005-04-17 02:20:36 +04:00
*
2006-09-03 02:56:39 +04:00
* benbi : the 64 - bit " big-endian 'narrow block'-count " , starting at 1
* ( needed for LRW - 32 - AES and possible other narrow block modes )
*
2005-04-17 02:20:36 +04:00
* plumb : unimplemented , see :
* http : //article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
*/
static int crypt_iv_plain_gen ( struct crypt_config * cc , u8 * iv , sector_t sector )
{
memset ( iv , 0 , cc - > iv_size ) ;
* ( u32 * ) iv = cpu_to_le32 ( sector & 0xffffffff ) ;
return 0 ;
}
static int crypt_iv_essiv_ctr ( struct crypt_config * cc , struct dm_target * ti ,
const char * opts )
{
2006-08-22 14:29:17 +04:00
struct crypto_cipher * essiv_tfm ;
2006-08-24 13:10:20 +04:00
struct crypto_hash * hash_tfm ;
struct hash_desc desc ;
2005-04-17 02:20:36 +04:00
struct scatterlist sg ;
unsigned int saltsize ;
u8 * salt ;
2006-08-22 14:29:17 +04:00
int err ;
2005-04-17 02:20:36 +04:00
if ( opts = = NULL ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Digest algorithm missing for ESSIV mode " ;
2005-04-17 02:20:36 +04:00
return - EINVAL ;
}
/* Hash the cipher key with the given hash algorithm */
2006-08-24 13:10:20 +04:00
hash_tfm = crypto_alloc_hash ( opts , 0 , CRYPTO_ALG_ASYNC ) ;
if ( IS_ERR ( hash_tfm ) ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Error initializing ESSIV hash " ;
2006-08-24 13:10:20 +04:00
return PTR_ERR ( hash_tfm ) ;
2005-04-17 02:20:36 +04:00
}
2006-08-24 13:10:20 +04:00
saltsize = crypto_hash_digestsize ( hash_tfm ) ;
2005-04-17 02:20:36 +04:00
salt = kmalloc ( saltsize , GFP_KERNEL ) ;
if ( salt = = NULL ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Error kmallocing salt storage in ESSIV " ;
2006-08-24 13:10:20 +04:00
crypto_free_hash ( hash_tfm ) ;
2005-04-17 02:20:36 +04:00
return - ENOMEM ;
}
2005-09-17 11:55:31 +04:00
sg_set_buf ( & sg , cc - > key , cc - > key_size ) ;
2006-08-24 13:10:20 +04:00
desc . tfm = hash_tfm ;
desc . flags = CRYPTO_TFM_REQ_MAY_SLEEP ;
err = crypto_hash_digest ( & desc , & sg , cc - > key_size , salt ) ;
crypto_free_hash ( hash_tfm ) ;
if ( err ) {
ti - > error = " Error calculating hash in ESSIV " ;
return err ;
}
2005-04-17 02:20:36 +04:00
/* Setup the essiv_tfm with the given salt */
2006-08-22 14:29:17 +04:00
essiv_tfm = crypto_alloc_cipher ( cc - > cipher , 0 , CRYPTO_ALG_ASYNC ) ;
if ( IS_ERR ( essiv_tfm ) ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Error allocating crypto tfm for ESSIV " ;
2005-04-17 02:20:36 +04:00
kfree ( salt ) ;
2006-08-22 14:29:17 +04:00
return PTR_ERR ( essiv_tfm ) ;
2005-04-17 02:20:36 +04:00
}
2006-08-22 14:29:17 +04:00
if ( crypto_cipher_blocksize ( essiv_tfm ) ! =
crypto_blkcipher_ivsize ( cc - > tfm ) ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Block size of ESSIV cipher does "
2005-04-17 02:20:36 +04:00
" not match IV size of block cipher " ;
2006-08-22 14:29:17 +04:00
crypto_free_cipher ( essiv_tfm ) ;
2005-04-17 02:20:36 +04:00
kfree ( salt ) ;
return - EINVAL ;
}
2006-08-22 14:29:17 +04:00
err = crypto_cipher_setkey ( essiv_tfm , salt , saltsize ) ;
if ( err ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Failed to set key for ESSIV cipher " ;
2006-08-22 14:29:17 +04:00
crypto_free_cipher ( essiv_tfm ) ;
2005-04-17 02:20:36 +04:00
kfree ( salt ) ;
2006-08-22 14:29:17 +04:00
return err ;
2005-04-17 02:20:36 +04:00
}
kfree ( salt ) ;
2006-12-06 00:41:52 +03:00
cc - > iv_gen_private . essiv_tfm = essiv_tfm ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
static void crypt_iv_essiv_dtr ( struct crypt_config * cc )
{
2006-12-06 00:41:52 +03:00
crypto_free_cipher ( cc - > iv_gen_private . essiv_tfm ) ;
cc - > iv_gen_private . essiv_tfm = NULL ;
2005-04-17 02:20:36 +04:00
}
static int crypt_iv_essiv_gen ( struct crypt_config * cc , u8 * iv , sector_t sector )
{
memset ( iv , 0 , cc - > iv_size ) ;
* ( u64 * ) iv = cpu_to_le64 ( sector ) ;
2006-12-06 00:41:52 +03:00
crypto_cipher_encrypt_one ( cc - > iv_gen_private . essiv_tfm , iv , iv ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2006-09-03 02:56:39 +04:00
static int crypt_iv_benbi_ctr ( struct crypt_config * cc , struct dm_target * ti ,
const char * opts )
{
unsigned int bs = crypto_blkcipher_blocksize ( cc - > tfm ) ;
2006-12-08 13:37:49 +03:00
int log = ilog2 ( bs ) ;
2006-09-03 02:56:39 +04:00
/* we need to calculate how far we must shift the sector count
* to get the cipher block count , we use this shift in _gen */
if ( 1 < < log ! = bs ) {
ti - > error = " cypher blocksize is not a power of 2 " ;
return - EINVAL ;
}
if ( log > 9 ) {
ti - > error = " cypher blocksize is > 512 " ;
return - EINVAL ;
}
2006-12-06 00:41:52 +03:00
cc - > iv_gen_private . benbi_shift = 9 - log ;
2006-09-03 02:56:39 +04:00
return 0 ;
}
static void crypt_iv_benbi_dtr ( struct crypt_config * cc )
{
}
static int crypt_iv_benbi_gen ( struct crypt_config * cc , u8 * iv , sector_t sector )
{
2006-12-06 00:41:52 +03:00
__be64 val ;
2006-09-03 02:56:39 +04:00
memset ( iv , 0 , cc - > iv_size - sizeof ( u64 ) ) ; /* rest is cleared below */
2006-12-06 00:41:52 +03:00
val = cpu_to_be64 ( ( ( u64 ) sector < < cc - > iv_gen_private . benbi_shift ) + 1 ) ;
put_unaligned ( val , ( __be64 * ) ( iv + cc - > iv_size - sizeof ( u64 ) ) ) ;
2006-09-03 02:56:39 +04:00
2005-04-17 02:20:36 +04:00
return 0 ;
}
static struct crypt_iv_operations crypt_iv_plain_ops = {
. generator = crypt_iv_plain_gen
} ;
static struct crypt_iv_operations crypt_iv_essiv_ops = {
. ctr = crypt_iv_essiv_ctr ,
. dtr = crypt_iv_essiv_dtr ,
. generator = crypt_iv_essiv_gen
} ;
2006-09-03 02:56:39 +04:00
static struct crypt_iv_operations crypt_iv_benbi_ops = {
. ctr = crypt_iv_benbi_ctr ,
. dtr = crypt_iv_benbi_dtr ,
. generator = crypt_iv_benbi_gen
} ;
2005-04-17 02:20:36 +04:00
2006-01-15 00:20:43 +03:00
static int
2005-04-17 02:20:36 +04:00
crypt_convert_scatterlist ( struct crypt_config * cc , struct scatterlist * out ,
struct scatterlist * in , unsigned int length ,
int write , sector_t sector )
{
2006-09-03 02:58:41 +04:00
u8 iv [ cc - > iv_size ] __attribute__ ( ( aligned ( __alignof__ ( u64 ) ) ) ) ;
2006-08-22 14:29:17 +04:00
struct blkcipher_desc desc = {
. tfm = cc - > tfm ,
. info = iv ,
. flags = CRYPTO_TFM_REQ_MAY_SLEEP ,
} ;
2005-04-17 02:20:36 +04:00
int r ;
if ( cc - > iv_gen_ops ) {
r = cc - > iv_gen_ops - > generator ( cc , iv , sector ) ;
if ( r < 0 )
return r ;
if ( write )
2006-08-22 14:29:17 +04:00
r = crypto_blkcipher_encrypt_iv ( & desc , out , in , length ) ;
2005-04-17 02:20:36 +04:00
else
2006-08-22 14:29:17 +04:00
r = crypto_blkcipher_decrypt_iv ( & desc , out , in , length ) ;
2005-04-17 02:20:36 +04:00
} else {
if ( write )
2006-08-22 14:29:17 +04:00
r = crypto_blkcipher_encrypt ( & desc , out , in , length ) ;
2005-04-17 02:20:36 +04:00
else
2006-08-22 14:29:17 +04:00
r = crypto_blkcipher_decrypt ( & desc , out , in , length ) ;
2005-04-17 02:20:36 +04:00
}
return r ;
}
static void
crypt_convert_init ( struct crypt_config * cc , struct convert_context * ctx ,
struct bio * bio_out , struct bio * bio_in ,
sector_t sector , int write )
{
ctx - > bio_in = bio_in ;
ctx - > bio_out = bio_out ;
ctx - > offset_in = 0 ;
ctx - > offset_out = 0 ;
ctx - > idx_in = bio_in ? bio_in - > bi_idx : 0 ;
ctx - > idx_out = bio_out ? bio_out - > bi_idx : 0 ;
ctx - > sector = sector + cc - > iv_offset ;
ctx - > write = write ;
}
/*
* Encrypt / decrypt data from one bio to another one ( can be the same one )
*/
static int crypt_convert ( struct crypt_config * cc ,
struct convert_context * ctx )
{
int r = 0 ;
while ( ctx - > idx_in < ctx - > bio_in - > bi_vcnt & &
ctx - > idx_out < ctx - > bio_out - > bi_vcnt ) {
struct bio_vec * bv_in = bio_iovec_idx ( ctx - > bio_in , ctx - > idx_in ) ;
struct bio_vec * bv_out = bio_iovec_idx ( ctx - > bio_out , ctx - > idx_out ) ;
struct scatterlist sg_in = {
. page = bv_in - > bv_page ,
. offset = bv_in - > bv_offset + ctx - > offset_in ,
. length = 1 < < SECTOR_SHIFT
} ;
struct scatterlist sg_out = {
. page = bv_out - > bv_page ,
. offset = bv_out - > bv_offset + ctx - > offset_out ,
. length = 1 < < SECTOR_SHIFT
} ;
ctx - > offset_in + = sg_in . length ;
if ( ctx - > offset_in > = bv_in - > bv_len ) {
ctx - > offset_in = 0 ;
ctx - > idx_in + + ;
}
ctx - > offset_out + = sg_out . length ;
if ( ctx - > offset_out > = bv_out - > bv_len ) {
ctx - > offset_out = 0 ;
ctx - > idx_out + + ;
}
r = crypt_convert_scatterlist ( cc , & sg_out , & sg_in , sg_in . length ,
ctx - > write , ctx - > sector ) ;
if ( r < 0 )
break ;
ctx - > sector + + ;
}
return r ;
}
2006-10-03 12:15:40 +04:00
static void dm_crypt_bio_destructor ( struct bio * bio )
{
struct crypt_io * io = bio - > bi_private ;
struct crypt_config * cc = io - > target - > private ;
bio_free ( bio , cc - > bs ) ;
}
2005-04-17 02:20:36 +04:00
/*
* Generate a new unfragmented bio with the given size
* This should never violate the device limitations
* May return a smaller bio when running out of pages
*/
static struct bio *
crypt_alloc_buffer ( struct crypt_config * cc , unsigned int size ,
struct bio * base_bio , unsigned int * bio_vec_idx )
{
2006-10-03 12:15:37 +04:00
struct bio * clone ;
2005-04-17 02:20:36 +04:00
unsigned int nr_iovecs = ( size + PAGE_SIZE - 1 ) > > PAGE_SHIFT ;
2005-10-21 11:22:34 +04:00
gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM ;
2005-04-17 02:20:36 +04:00
unsigned int i ;
2006-10-03 12:15:40 +04:00
if ( base_bio ) {
clone = bio_alloc_bioset ( GFP_NOIO , base_bio - > bi_max_vecs , cc - > bs ) ;
__bio_clone ( clone , base_bio ) ;
} else
clone = bio_alloc_bioset ( GFP_NOIO , nr_iovecs , cc - > bs ) ;
2006-10-03 12:15:37 +04:00
if ( ! clone )
2005-04-17 02:20:36 +04:00
return NULL ;
2006-10-03 12:15:40 +04:00
clone - > bi_destructor = dm_crypt_bio_destructor ;
2005-04-17 02:20:36 +04:00
/* if the last bio was not complete, continue where that one ended */
2006-10-03 12:15:37 +04:00
clone - > bi_idx = * bio_vec_idx ;
clone - > bi_vcnt = * bio_vec_idx ;
clone - > bi_size = 0 ;
clone - > bi_flags & = ~ ( 1 < < BIO_SEG_VALID ) ;
2005-04-17 02:20:36 +04:00
2006-10-03 12:15:37 +04:00
/* clone->bi_idx pages have already been allocated */
size - = clone - > bi_idx * PAGE_SIZE ;
2005-04-17 02:20:36 +04:00
2006-10-03 12:15:37 +04:00
for ( i = clone - > bi_idx ; i < nr_iovecs ; i + + ) {
struct bio_vec * bv = bio_iovec_idx ( clone , i ) ;
2005-04-17 02:20:36 +04:00
bv - > bv_page = mempool_alloc ( cc - > page_pool , gfp_mask ) ;
if ( ! bv - > bv_page )
break ;
/*
* if additional pages cannot be allocated without waiting ,
* return a partially allocated bio , the caller will then try
* to allocate additional bios while submitting this partial bio
*/
2006-10-03 12:15:37 +04:00
if ( ( i - clone - > bi_idx ) = = ( MIN_BIO_PAGES - 1 ) )
2005-04-17 02:20:36 +04:00
gfp_mask = ( gfp_mask | __GFP_NOWARN ) & ~ __GFP_WAIT ;
bv - > bv_offset = 0 ;
if ( size > PAGE_SIZE )
bv - > bv_len = PAGE_SIZE ;
else
bv - > bv_len = size ;
2006-10-03 12:15:37 +04:00
clone - > bi_size + = bv - > bv_len ;
clone - > bi_vcnt + + ;
2005-04-17 02:20:36 +04:00
size - = bv - > bv_len ;
}
2006-10-03 12:15:37 +04:00
if ( ! clone - > bi_size ) {
bio_put ( clone ) ;
2005-04-17 02:20:36 +04:00
return NULL ;
}
/*
* Remember the last bio_vec allocated to be able
* to correctly continue after the splitting .
*/
2006-10-03 12:15:37 +04:00
* bio_vec_idx = clone - > bi_vcnt ;
2005-04-17 02:20:36 +04:00
2006-10-03 12:15:37 +04:00
return clone ;
2005-04-17 02:20:36 +04:00
}
static void crypt_free_buffer_pages ( struct crypt_config * cc ,
2006-10-03 12:15:37 +04:00
struct bio * clone , unsigned int bytes )
2005-04-17 02:20:36 +04:00
{
unsigned int i , start , end ;
struct bio_vec * bv ;
/*
* This is ugly , but Jens Axboe thinks that using bi_idx in the
* endio function is too dangerous at the moment , so I calculate the
* correct position using bi_vcnt and bi_size .
* The bv_offset and bv_len fields might already be modified but we
* know that we always allocated whole pages .
* A fix to the bi_idx issue in the kernel is in the works , so
* we will hopefully be able to revert to the cleaner solution soon .
*/
2006-10-03 12:15:37 +04:00
i = clone - > bi_vcnt - 1 ;
bv = bio_iovec_idx ( clone , i ) ;
end = ( i < < PAGE_SHIFT ) + ( bv - > bv_offset + bv - > bv_len ) - clone - > bi_size ;
2005-04-17 02:20:36 +04:00
start = end - bytes ;
start > > = PAGE_SHIFT ;
2006-10-03 12:15:37 +04:00
if ( ! clone - > bi_size )
end = clone - > bi_vcnt ;
2005-04-17 02:20:36 +04:00
else
end > > = PAGE_SHIFT ;
2006-10-03 12:15:37 +04:00
for ( i = start ; i < end ; i + + ) {
bv = bio_iovec_idx ( clone , i ) ;
2005-04-17 02:20:36 +04:00
BUG_ON ( ! bv - > bv_page ) ;
mempool_free ( bv - > bv_page , cc - > page_pool ) ;
bv - > bv_page = NULL ;
}
}
/*
* One of the bios was finished . Check for completion of
* the whole request and correctly clean up the buffer .
*/
static void dec_pending ( struct crypt_io * io , int error )
{
struct crypt_config * cc = ( struct crypt_config * ) io - > target - > private ;
if ( error < 0 )
io - > error = error ;
if ( ! atomic_dec_and_test ( & io - > pending ) )
return ;
if ( io - > first_clone )
bio_put ( io - > first_clone ) ;
2006-10-03 12:15:37 +04:00
bio_endio ( io - > base_bio , io - > base_bio - > bi_size , io - > error ) ;
2005-04-17 02:20:36 +04:00
mempool_free ( io , cc - > io_pool ) ;
}
/*
* kcryptd :
*
* Needed because it would be very unwise to do decryption in an
2006-10-03 12:15:39 +04:00
* interrupt context .
2005-04-17 02:20:36 +04:00
*/
static struct workqueue_struct * _kcryptd_workqueue ;
2006-11-22 17:57:56 +03:00
static void kcryptd_do_work ( struct work_struct * work ) ;
2005-04-17 02:20:36 +04:00
2006-10-03 12:15:37 +04:00
static void kcryptd_queue_io ( struct crypt_io * io )
2005-04-17 02:20:36 +04:00
{
2006-11-22 17:57:56 +03:00
INIT_WORK ( & io - > work , kcryptd_do_work ) ;
2006-10-03 12:15:37 +04:00
queue_work ( _kcryptd_workqueue , & io - > work ) ;
}
static int crypt_endio ( struct bio * clone , unsigned int done , int error )
{
struct crypt_io * io = clone - > bi_private ;
struct crypt_config * cc = io - > target - > private ;
unsigned read_io = bio_data_dir ( clone ) = = READ ;
/*
* free the processed pages , even if
* it ' s only a partially completed write
*/
if ( ! read_io )
crypt_free_buffer_pages ( cc , clone , done ) ;
2006-10-03 12:15:39 +04:00
/* keep going - not finished yet */
2006-10-03 12:15:37 +04:00
if ( unlikely ( clone - > bi_size ) )
return 1 ;
if ( ! read_io )
goto out ;
if ( unlikely ( ! bio_flagged ( clone , BIO_UPTODATE ) ) ) {
error = - EIO ;
goto out ;
}
bio_put ( clone ) ;
2006-10-03 12:15:39 +04:00
io - > post_process = 1 ;
2006-10-03 12:15:37 +04:00
kcryptd_queue_io ( io ) ;
return 0 ;
out :
bio_put ( clone ) ;
dec_pending ( io , error ) ;
return error ;
}
static void clone_init ( struct crypt_io * io , struct bio * clone )
{
struct crypt_config * cc = io - > target - > private ;
clone - > bi_private = io ;
clone - > bi_end_io = crypt_endio ;
clone - > bi_bdev = cc - > dev - > bdev ;
clone - > bi_rw = io - > base_bio - > bi_rw ;
}
2006-10-03 12:15:39 +04:00
static void process_read ( struct crypt_io * io )
2006-10-03 12:15:37 +04:00
{
struct crypt_config * cc = io - > target - > private ;
struct bio * base_bio = io - > base_bio ;
struct bio * clone ;
2006-10-03 12:15:38 +04:00
sector_t sector = base_bio - > bi_sector - io - > target - > begin ;
atomic_inc ( & io - > pending ) ;
2006-10-03 12:15:37 +04:00
/*
* The block layer might modify the bvec array , so always
* copy the required bvecs because we need the original
* one in order to decrypt the whole bio data * afterwards * .
*/
2006-10-03 12:15:40 +04:00
clone = bio_alloc_bioset ( GFP_NOIO , bio_segments ( base_bio ) , cc - > bs ) ;
2006-10-03 12:15:38 +04:00
if ( unlikely ( ! clone ) ) {
dec_pending ( io , - ENOMEM ) ;
2006-10-03 12:15:39 +04:00
return ;
2006-10-03 12:15:38 +04:00
}
2006-10-03 12:15:37 +04:00
clone_init ( io , clone ) ;
2006-10-03 12:15:40 +04:00
clone - > bi_destructor = dm_crypt_bio_destructor ;
2006-10-03 12:15:37 +04:00
clone - > bi_idx = 0 ;
clone - > bi_vcnt = bio_segments ( base_bio ) ;
clone - > bi_size = base_bio - > bi_size ;
2006-10-03 12:15:38 +04:00
clone - > bi_sector = cc - > start + sector ;
2006-10-03 12:15:37 +04:00
memcpy ( clone - > bi_io_vec , bio_iovec ( base_bio ) ,
sizeof ( struct bio_vec ) * clone - > bi_vcnt ) ;
2006-10-03 12:15:38 +04:00
generic_make_request ( clone ) ;
2006-10-03 12:15:37 +04:00
}
2006-10-03 12:15:39 +04:00
static void process_write ( struct crypt_io * io )
2006-10-03 12:15:37 +04:00
{
struct crypt_config * cc = io - > target - > private ;
struct bio * base_bio = io - > base_bio ;
struct bio * clone ;
2006-10-03 12:15:38 +04:00
struct convert_context ctx ;
unsigned remaining = base_bio - > bi_size ;
sector_t sector = base_bio - > bi_sector - io - > target - > begin ;
unsigned bvec_idx = 0 ;
2006-10-03 12:15:37 +04:00
2006-10-03 12:15:38 +04:00
atomic_inc ( & io - > pending ) ;
2006-10-03 12:15:37 +04:00
2006-10-03 12:15:38 +04:00
crypt_convert_init ( cc , & ctx , NULL , base_bio , sector , 1 ) ;
2006-10-03 12:15:37 +04:00
2006-10-03 12:15:38 +04:00
/*
* The allocated buffers can be smaller than the whole bio ,
* so repeat the whole process until all the data can be handled .
*/
while ( remaining ) {
clone = crypt_alloc_buffer ( cc , base_bio - > bi_size ,
io - > first_clone , & bvec_idx ) ;
2006-10-03 12:15:39 +04:00
if ( unlikely ( ! clone ) ) {
dec_pending ( io , - ENOMEM ) ;
return ;
}
2006-10-03 12:15:38 +04:00
ctx . bio_out = clone ;
if ( unlikely ( crypt_convert ( cc , & ctx ) < 0 ) ) {
crypt_free_buffer_pages ( cc , clone , clone - > bi_size ) ;
bio_put ( clone ) ;
2006-10-03 12:15:39 +04:00
dec_pending ( io , - EIO ) ;
return ;
2006-10-03 12:15:38 +04:00
}
clone_init ( io , clone ) ;
clone - > bi_sector = cc - > start + sector ;
if ( ! io - > first_clone ) {
/*
* hold a reference to the first clone , because it
* holds the bio_vec array and that can ' t be freed
* before all other clones are released
*/
bio_get ( clone ) ;
io - > first_clone = clone ;
}
remaining - = clone - > bi_size ;
sector + = bio_sectors ( clone ) ;
2006-10-03 12:15:39 +04:00
/* prevent bio_put of first_clone */
if ( remaining )
atomic_inc ( & io - > pending ) ;
2006-10-03 12:15:38 +04:00
generic_make_request ( clone ) ;
/* out of memory -> run queues */
if ( remaining )
2006-10-20 10:28:16 +04:00
congestion_wait ( bio_data_dir ( clone ) , HZ / 100 ) ;
2006-10-03 12:15:38 +04:00
}
2006-10-03 12:15:37 +04:00
}
static void process_read_endio ( struct crypt_io * io )
{
struct crypt_config * cc = io - > target - > private ;
2005-04-17 02:20:36 +04:00
struct convert_context ctx ;
2006-10-03 12:15:37 +04:00
crypt_convert_init ( cc , & ctx , io - > base_bio , io - > base_bio ,
io - > base_bio - > bi_sector - io - > target - > begin , 0 ) ;
2005-04-17 02:20:36 +04:00
2006-10-03 12:15:37 +04:00
dec_pending ( io , crypt_convert ( cc , & ctx ) ) ;
2005-04-17 02:20:36 +04:00
}
2006-11-22 17:57:56 +03:00
static void kcryptd_do_work ( struct work_struct * work )
2005-04-17 02:20:36 +04:00
{
2006-11-22 17:57:56 +03:00
struct crypt_io * io = container_of ( work , struct crypt_io , work ) ;
2006-10-03 12:15:37 +04:00
2006-10-03 12:15:39 +04:00
if ( io - > post_process )
process_read_endio ( io ) ;
else if ( bio_data_dir ( io - > base_bio ) = = READ )
process_read ( io ) ;
else
process_write ( io ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Decode key from its hex representation
*/
static int crypt_decode_key ( u8 * key , char * hex , unsigned int size )
{
char buffer [ 3 ] ;
char * endp ;
unsigned int i ;
buffer [ 2 ] = ' \0 ' ;
2006-10-03 12:15:37 +04:00
for ( i = 0 ; i < size ; i + + ) {
2005-04-17 02:20:36 +04:00
buffer [ 0 ] = * hex + + ;
buffer [ 1 ] = * hex + + ;
key [ i ] = ( u8 ) simple_strtoul ( buffer , & endp , 16 ) ;
if ( endp ! = & buffer [ 2 ] )
return - EINVAL ;
}
if ( * hex ! = ' \0 ' )
return - EINVAL ;
return 0 ;
}
/*
* Encode key into its hex representation
*/
static void crypt_encode_key ( char * hex , u8 * key , unsigned int size )
{
unsigned int i ;
2006-10-03 12:15:37 +04:00
for ( i = 0 ; i < size ; i + + ) {
2005-04-17 02:20:36 +04:00
sprintf ( hex , " %02x " , * key ) ;
hex + = 2 ;
key + + ;
}
}
2006-10-03 12:15:37 +04:00
static int crypt_set_key ( struct crypt_config * cc , char * key )
{
unsigned key_size = strlen ( key ) > > 1 ;
if ( cc - > key_size & & cc - > key_size ! = key_size )
return - EINVAL ;
cc - > key_size = key_size ; /* initial settings */
if ( ( ! key_size & & strcmp ( key , " - " ) ) | |
( key_size & & crypt_decode_key ( cc - > key , key , key_size ) < 0 ) )
return - EINVAL ;
set_bit ( DM_CRYPT_KEY_VALID , & cc - > flags ) ;
return 0 ;
}
static int crypt_wipe_key ( struct crypt_config * cc )
{
clear_bit ( DM_CRYPT_KEY_VALID , & cc - > flags ) ;
memset ( & cc - > key , 0 , cc - > key_size * sizeof ( u8 ) ) ;
return 0 ;
}
2005-04-17 02:20:36 +04:00
/*
* Construct an encryption mapping :
* < cipher > < key > < iv_offset > < dev_path > < start >
*/
static int crypt_ctr ( struct dm_target * ti , unsigned int argc , char * * argv )
{
struct crypt_config * cc ;
2006-08-22 14:29:17 +04:00
struct crypto_blkcipher * tfm ;
2005-04-17 02:20:36 +04:00
char * tmp ;
char * cipher ;
char * chainmode ;
char * ivmode ;
char * ivopts ;
unsigned int key_size ;
2006-03-27 13:17:48 +04:00
unsigned long long tmpll ;
2005-04-17 02:20:36 +04:00
if ( argc ! = 5 ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Not enough arguments " ;
2005-04-17 02:20:36 +04:00
return - EINVAL ;
}
tmp = argv [ 0 ] ;
cipher = strsep ( & tmp , " - " ) ;
chainmode = strsep ( & tmp , " - " ) ;
ivopts = strsep ( & tmp , " - " ) ;
ivmode = strsep ( & ivopts , " : " ) ;
if ( tmp )
2006-06-26 11:27:35 +04:00
DMWARN ( " Unexpected additional cipher options " ) ;
2005-04-17 02:20:36 +04:00
key_size = strlen ( argv [ 1 ] ) > > 1 ;
2006-10-03 12:15:37 +04:00
cc = kzalloc ( sizeof ( * cc ) + key_size * sizeof ( u8 ) , GFP_KERNEL ) ;
2005-04-17 02:20:36 +04:00
if ( cc = = NULL ) {
ti - > error =
2006-06-26 11:27:35 +04:00
" Cannot allocate transparent encryption context " ;
2005-04-17 02:20:36 +04:00
return - ENOMEM ;
}
2006-10-03 12:15:37 +04:00
if ( crypt_set_key ( cc , argv [ 1 ] ) ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Error decoding key " ;
2005-04-17 02:20:36 +04:00
goto bad1 ;
}
/* Compatiblity mode for old dm-crypt cipher strings */
if ( ! chainmode | | ( strcmp ( chainmode , " plain " ) = = 0 & & ! ivmode ) ) {
chainmode = " cbc " ;
ivmode = " plain " ;
}
2006-08-22 14:29:17 +04:00
if ( strcmp ( chainmode , " ecb " ) & & ! ivmode ) {
ti - > error = " This chaining mode requires an IV mechanism " ;
2005-04-17 02:20:36 +04:00
goto bad1 ;
}
2006-08-22 14:29:17 +04:00
if ( snprintf ( cc - > cipher , CRYPTO_MAX_ALG_NAME , " %s(%s) " , chainmode ,
cipher ) > = CRYPTO_MAX_ALG_NAME ) {
ti - > error = " Chain mode + cipher name is too long " ;
2005-04-17 02:20:36 +04:00
goto bad1 ;
}
2006-08-22 14:29:17 +04:00
tfm = crypto_alloc_blkcipher ( cc - > cipher , 0 , CRYPTO_ALG_ASYNC ) ;
if ( IS_ERR ( tfm ) ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Error allocating crypto tfm " ;
2005-04-17 02:20:36 +04:00
goto bad1 ;
}
2006-08-22 14:29:17 +04:00
strcpy ( cc - > cipher , cipher ) ;
strcpy ( cc - > chainmode , chainmode ) ;
2005-04-17 02:20:36 +04:00
cc - > tfm = tfm ;
/*
2006-09-03 02:56:39 +04:00
* Choose ivmode . Valid modes : " plain " , " essiv:<esshash> " , " benbi " .
2005-04-17 02:20:36 +04:00
* See comments at iv code
*/
if ( ivmode = = NULL )
cc - > iv_gen_ops = NULL ;
else if ( strcmp ( ivmode , " plain " ) = = 0 )
cc - > iv_gen_ops = & crypt_iv_plain_ops ;
else if ( strcmp ( ivmode , " essiv " ) = = 0 )
cc - > iv_gen_ops = & crypt_iv_essiv_ops ;
2006-09-03 02:56:39 +04:00
else if ( strcmp ( ivmode , " benbi " ) = = 0 )
cc - > iv_gen_ops = & crypt_iv_benbi_ops ;
2005-04-17 02:20:36 +04:00
else {
2006-06-26 11:27:35 +04:00
ti - > error = " Invalid IV mode " ;
2005-04-17 02:20:36 +04:00
goto bad2 ;
}
if ( cc - > iv_gen_ops & & cc - > iv_gen_ops - > ctr & &
cc - > iv_gen_ops - > ctr ( cc , ti , ivopts ) < 0 )
goto bad2 ;
2006-08-22 14:29:17 +04:00
cc - > iv_size = crypto_blkcipher_ivsize ( tfm ) ;
if ( cc - > iv_size )
2005-04-17 02:20:36 +04:00
/* at least a 64 bit sector number should fit in our buffer */
2006-08-22 14:29:17 +04:00
cc - > iv_size = max ( cc - > iv_size ,
2005-04-17 02:20:36 +04:00
( unsigned int ) ( sizeof ( u64 ) / sizeof ( u8 ) ) ) ;
else {
if ( cc - > iv_gen_ops ) {
2006-06-26 11:27:35 +04:00
DMWARN ( " Selected cipher does not support IVs " ) ;
2005-04-17 02:20:36 +04:00
if ( cc - > iv_gen_ops - > dtr )
cc - > iv_gen_ops - > dtr ( cc ) ;
cc - > iv_gen_ops = NULL ;
}
}
2006-03-26 13:37:50 +04:00
cc - > io_pool = mempool_create_slab_pool ( MIN_IOS , _crypt_io_pool ) ;
2005-04-17 02:20:36 +04:00
if ( ! cc - > io_pool ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Cannot allocate crypt io mempool " ;
2005-04-17 02:20:36 +04:00
goto bad3 ;
}
2006-03-26 13:37:45 +04:00
cc - > page_pool = mempool_create_page_pool ( MIN_POOL_PAGES , 0 ) ;
2005-04-17 02:20:36 +04:00
if ( ! cc - > page_pool ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Cannot allocate page mempool " ;
2005-04-17 02:20:36 +04:00
goto bad4 ;
}
2007-04-02 12:06:42 +04:00
cc - > bs = bioset_create ( MIN_IOS , MIN_IOS ) ;
2006-10-03 12:15:40 +04:00
if ( ! cc - > bs ) {
ti - > error = " Cannot allocate crypt bioset " ;
goto bad_bs ;
}
2006-08-22 14:29:17 +04:00
if ( crypto_blkcipher_setkey ( tfm , cc - > key , key_size ) < 0 ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Error setting key " ;
2005-04-17 02:20:36 +04:00
goto bad5 ;
}
2006-03-27 13:17:48 +04:00
if ( sscanf ( argv [ 2 ] , " %llu " , & tmpll ) ! = 1 ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Invalid iv_offset sector " ;
2005-04-17 02:20:36 +04:00
goto bad5 ;
}
2006-03-27 13:17:48 +04:00
cc - > iv_offset = tmpll ;
2005-04-17 02:20:36 +04:00
2006-03-27 13:17:48 +04:00
if ( sscanf ( argv [ 4 ] , " %llu " , & tmpll ) ! = 1 ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Invalid device sector " ;
2005-04-17 02:20:36 +04:00
goto bad5 ;
}
2006-03-27 13:17:48 +04:00
cc - > start = tmpll ;
2005-04-17 02:20:36 +04:00
if ( dm_get_device ( ti , argv [ 3 ] , cc - > start , ti - > len ,
dm_table_get_mode ( ti - > table ) , & cc - > dev ) ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Device lookup failed " ;
2005-04-17 02:20:36 +04:00
goto bad5 ;
}
if ( ivmode & & cc - > iv_gen_ops ) {
if ( ivopts )
* ( ivopts - 1 ) = ' : ' ;
cc - > iv_mode = kmalloc ( strlen ( ivmode ) + 1 , GFP_KERNEL ) ;
if ( ! cc - > iv_mode ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Error kmallocing iv_mode string " ;
2005-04-17 02:20:36 +04:00
goto bad5 ;
}
strcpy ( cc - > iv_mode , ivmode ) ;
} else
cc - > iv_mode = NULL ;
ti - > private = cc ;
return 0 ;
bad5 :
2006-10-03 12:15:40 +04:00
bioset_free ( cc - > bs ) ;
bad_bs :
2005-04-17 02:20:36 +04:00
mempool_destroy ( cc - > page_pool ) ;
bad4 :
mempool_destroy ( cc - > io_pool ) ;
bad3 :
if ( cc - > iv_gen_ops & & cc - > iv_gen_ops - > dtr )
cc - > iv_gen_ops - > dtr ( cc ) ;
bad2 :
2006-08-22 14:29:17 +04:00
crypto_free_blkcipher ( tfm ) ;
2005-04-17 02:20:36 +04:00
bad1 :
2006-01-06 11:20:08 +03:00
/* Must zero key material before freeing */
memset ( cc , 0 , sizeof ( * cc ) + cc - > key_size * sizeof ( u8 ) ) ;
2005-04-17 02:20:36 +04:00
kfree ( cc ) ;
return - EINVAL ;
}
static void crypt_dtr ( struct dm_target * ti )
{
struct crypt_config * cc = ( struct crypt_config * ) ti - > private ;
2006-10-03 12:15:40 +04:00
bioset_free ( cc - > bs ) ;
2005-04-17 02:20:36 +04:00
mempool_destroy ( cc - > page_pool ) ;
mempool_destroy ( cc - > io_pool ) ;
2005-06-22 04:17:30 +04:00
kfree ( cc - > iv_mode ) ;
2005-04-17 02:20:36 +04:00
if ( cc - > iv_gen_ops & & cc - > iv_gen_ops - > dtr )
cc - > iv_gen_ops - > dtr ( cc ) ;
2006-08-22 14:29:17 +04:00
crypto_free_blkcipher ( cc - > tfm ) ;
2005-04-17 02:20:36 +04:00
dm_put_device ( ti , cc - > dev ) ;
2006-01-06 11:20:08 +03:00
/* Must zero key material before freeing */
memset ( cc , 0 , sizeof ( * cc ) + cc - > key_size * sizeof ( u8 ) ) ;
2005-04-17 02:20:36 +04:00
kfree ( cc ) ;
}
static int crypt_map ( struct dm_target * ti , struct bio * bio ,
union map_info * map_context )
{
2006-10-03 12:15:37 +04:00
struct crypt_config * cc = ti - > private ;
2006-10-03 12:15:37 +04:00
struct crypt_io * io ;
2005-04-17 02:20:36 +04:00
2006-10-03 12:15:37 +04:00
io = mempool_alloc ( cc - > io_pool , GFP_NOIO ) ;
2005-04-17 02:20:36 +04:00
io - > target = ti ;
2006-10-03 12:15:37 +04:00
io - > base_bio = bio ;
2005-04-17 02:20:36 +04:00
io - > first_clone = NULL ;
2006-10-03 12:15:39 +04:00
io - > error = io - > post_process = 0 ;
2006-10-03 12:15:38 +04:00
atomic_set ( & io - > pending , 0 ) ;
2006-10-03 12:15:39 +04:00
kcryptd_queue_io ( io ) ;
2005-04-17 02:20:36 +04:00
2006-12-08 13:41:06 +03:00
return DM_MAPIO_SUBMITTED ;
2005-04-17 02:20:36 +04:00
}
static int crypt_status ( struct dm_target * ti , status_type_t type ,
char * result , unsigned int maxlen )
{
struct crypt_config * cc = ( struct crypt_config * ) ti - > private ;
unsigned int sz = 0 ;
switch ( type ) {
case STATUSTYPE_INFO :
result [ 0 ] = ' \0 ' ;
break ;
case STATUSTYPE_TABLE :
if ( cc - > iv_mode )
2006-10-30 22:39:08 +03:00
DMEMIT ( " %s-%s-%s " , cc - > cipher , cc - > chainmode ,
cc - > iv_mode ) ;
2005-04-17 02:20:36 +04:00
else
2006-10-30 22:39:08 +03:00
DMEMIT ( " %s-%s " , cc - > cipher , cc - > chainmode ) ;
2005-04-17 02:20:36 +04:00
if ( cc - > key_size > 0 ) {
if ( ( maxlen - sz ) < ( ( cc - > key_size < < 1 ) + 1 ) )
return - ENOMEM ;
crypt_encode_key ( result + sz , cc - > key , cc - > key_size ) ;
sz + = cc - > key_size < < 1 ;
} else {
if ( sz > = maxlen )
return - ENOMEM ;
result [ sz + + ] = ' - ' ;
}
2006-03-27 13:17:48 +04:00
DMEMIT ( " %llu %s %llu " , ( unsigned long long ) cc - > iv_offset ,
cc - > dev - > name , ( unsigned long long ) cc - > start ) ;
2005-04-17 02:20:36 +04:00
break ;
}
return 0 ;
}
2006-10-03 12:15:37 +04:00
static void crypt_postsuspend ( struct dm_target * ti )
{
struct crypt_config * cc = ti - > private ;
set_bit ( DM_CRYPT_SUSPENDED , & cc - > flags ) ;
}
static int crypt_preresume ( struct dm_target * ti )
{
struct crypt_config * cc = ti - > private ;
if ( ! test_bit ( DM_CRYPT_KEY_VALID , & cc - > flags ) ) {
DMERR ( " aborting resume - crypt key is not set. " ) ;
return - EAGAIN ;
}
return 0 ;
}
static void crypt_resume ( struct dm_target * ti )
{
struct crypt_config * cc = ti - > private ;
clear_bit ( DM_CRYPT_SUSPENDED , & cc - > flags ) ;
}
/* Message interface
* key set < key >
* key wipe
*/
static int crypt_message ( struct dm_target * ti , unsigned argc , char * * argv )
{
struct crypt_config * cc = ti - > private ;
if ( argc < 2 )
goto error ;
if ( ! strnicmp ( argv [ 0 ] , MESG_STR ( " key " ) ) ) {
if ( ! test_bit ( DM_CRYPT_SUSPENDED , & cc - > flags ) ) {
DMWARN ( " not suspended during key manipulation. " ) ;
return - EINVAL ;
}
if ( argc = = 3 & & ! strnicmp ( argv [ 1 ] , MESG_STR ( " set " ) ) )
return crypt_set_key ( cc , argv [ 2 ] ) ;
if ( argc = = 2 & & ! strnicmp ( argv [ 1 ] , MESG_STR ( " wipe " ) ) )
return crypt_wipe_key ( cc ) ;
}
error :
DMWARN ( " unrecognised message received. " ) ;
return - EINVAL ;
}
2005-04-17 02:20:36 +04:00
static struct target_type crypt_target = {
. name = " crypt " ,
2006-10-03 12:15:39 +04:00
. version = { 1 , 3 , 0 } ,
2005-04-17 02:20:36 +04:00
. module = THIS_MODULE ,
. ctr = crypt_ctr ,
. dtr = crypt_dtr ,
. map = crypt_map ,
. status = crypt_status ,
2006-10-03 12:15:37 +04:00
. postsuspend = crypt_postsuspend ,
. preresume = crypt_preresume ,
. resume = crypt_resume ,
. message = crypt_message ,
2005-04-17 02:20:36 +04:00
} ;
static int __init dm_crypt_init ( void )
{
int r ;
_crypt_io_pool = kmem_cache_create ( " dm-crypt_io " ,
sizeof ( struct crypt_io ) ,
0 , 0 , NULL , NULL ) ;
if ( ! _crypt_io_pool )
return - ENOMEM ;
_kcryptd_workqueue = create_workqueue ( " kcryptd " ) ;
if ( ! _kcryptd_workqueue ) {
r = - ENOMEM ;
2006-06-26 11:27:35 +04:00
DMERR ( " couldn't create kcryptd " ) ;
2005-04-17 02:20:36 +04:00
goto bad1 ;
}
r = dm_register_target ( & crypt_target ) ;
if ( r < 0 ) {
2006-06-26 11:27:35 +04:00
DMERR ( " register failed %d " , r ) ;
2005-04-17 02:20:36 +04:00
goto bad2 ;
}
return 0 ;
bad2 :
destroy_workqueue ( _kcryptd_workqueue ) ;
bad1 :
kmem_cache_destroy ( _crypt_io_pool ) ;
return r ;
}
static void __exit dm_crypt_exit ( void )
{
int r = dm_unregister_target ( & crypt_target ) ;
if ( r < 0 )
2006-06-26 11:27:35 +04:00
DMERR ( " unregister failed %d " , r ) ;
2005-04-17 02:20:36 +04:00
destroy_workqueue ( _kcryptd_workqueue ) ;
kmem_cache_destroy ( _crypt_io_pool ) ;
}
module_init ( dm_crypt_init ) ;
module_exit ( dm_crypt_exit ) ;
MODULE_AUTHOR ( " Christophe Saout <christophe@saout.de> " ) ;
MODULE_DESCRIPTION ( DM_NAME " target for transparent encryption / decryption " ) ;
MODULE_LICENSE ( " GPL " ) ;