2005-04-17 02:20:36 +04:00
/*
* Copyright ( C ) 2003 Christophe Saout < christophe @ saout . de >
* Copyright ( C ) 2004 Clemens Fruhwirth < clemens @ endorphin . org >
2008-03-29 00:16:07 +03:00
* Copyright ( C ) 2006 - 2008 Red Hat , Inc . All rights reserved .
2005-04-17 02:20:36 +04:00
*
* This file is released under the GPL .
*/
2008-02-08 05:11:09 +03:00
# include <linux/completion.h>
2006-08-22 14:29:17 +04:00
# include <linux/err.h>
2005-04-17 02:20:36 +04:00
# include <linux/module.h>
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/bio.h>
# include <linux/blkdev.h>
# include <linux/mempool.h>
# include <linux/slab.h>
# include <linux/crypto.h>
# include <linux/workqueue.h>
2006-10-20 10:28:16 +04:00
# include <linux/backing-dev.h>
2005-04-17 02:20:36 +04:00
# include <asm/atomic.h>
2005-09-17 11:55:31 +04:00
# include <linux/scatterlist.h>
2005-04-17 02:20:36 +04:00
# include <asm/page.h>
2006-09-03 02:56:39 +04:00
# include <asm/unaligned.h>
2005-04-17 02:20:36 +04:00
# include "dm.h"
2006-06-26 11:27:35 +04:00
# define DM_MSG_PREFIX "crypt"
2006-10-03 12:15:37 +04:00
# define MESG_STR(x) x, sizeof(x)
2005-04-17 02:20:36 +04:00
/*
* context holding the current state of a multi - part conversion
*/
struct convert_context {
2008-02-08 05:11:09 +03:00
struct completion restart ;
2005-04-17 02:20:36 +04:00
struct bio * bio_in ;
struct bio * bio_out ;
unsigned int offset_in ;
unsigned int offset_out ;
unsigned int idx_in ;
unsigned int idx_out ;
sector_t sector ;
2008-02-08 05:11:09 +03:00
atomic_t pending ;
2005-04-17 02:20:36 +04:00
} ;
2008-02-08 05:10:38 +03:00
/*
* per bio private data
*/
struct dm_crypt_io {
struct dm_target * target ;
struct bio * base_bio ;
struct work_struct work ;
struct convert_context ctx ;
atomic_t pending ;
int error ;
2008-02-08 05:10:54 +03:00
sector_t sector ;
2008-02-08 05:10:38 +03:00
} ;
2008-02-08 05:11:04 +03:00
struct dm_crypt_request {
struct scatterlist sg_in ;
struct scatterlist sg_out ;
} ;
2005-04-17 02:20:36 +04:00
struct crypt_config ;
struct crypt_iv_operations {
int ( * ctr ) ( struct crypt_config * cc , struct dm_target * ti ,
2007-10-20 01:42:37 +04:00
const char * opts ) ;
2005-04-17 02:20:36 +04:00
void ( * dtr ) ( struct crypt_config * cc ) ;
const char * ( * status ) ( struct crypt_config * cc ) ;
int ( * generator ) ( struct crypt_config * cc , u8 * iv , sector_t sector ) ;
} ;
/*
* Crypt : maps a linear range of a block device
* and encrypts / decrypts at the same time .
*/
2006-10-03 12:15:37 +04:00
enum flags { DM_CRYPT_SUSPENDED , DM_CRYPT_KEY_VALID } ;
2005-04-17 02:20:36 +04:00
struct crypt_config {
struct dm_dev * dev ;
sector_t start ;
/*
2008-02-08 05:11:07 +03:00
* pool for per bio private data , crypto requests and
* encryption requeusts / buffer pages
2005-04-17 02:20:36 +04:00
*/
mempool_t * io_pool ;
2008-02-08 05:11:07 +03:00
mempool_t * req_pool ;
2005-04-17 02:20:36 +04:00
mempool_t * page_pool ;
2006-10-03 12:15:40 +04:00
struct bio_set * bs ;
2005-04-17 02:20:36 +04:00
2007-10-20 01:38:58 +04:00
struct workqueue_struct * io_queue ;
struct workqueue_struct * crypt_queue ;
2008-03-29 00:16:07 +03:00
wait_queue_head_t writeq ;
2005-04-17 02:20:36 +04:00
/*
* crypto related data
*/
struct crypt_iv_operations * iv_gen_ops ;
char * iv_mode ;
2006-12-06 00:41:52 +03:00
union {
struct crypto_cipher * essiv_tfm ;
int benbi_shift ;
} iv_gen_private ;
2005-04-17 02:20:36 +04:00
sector_t iv_offset ;
unsigned int iv_size ;
2008-02-08 05:11:07 +03:00
/*
* Layout of each crypto request :
*
* struct ablkcipher_request
* context
* padding
* struct dm_crypt_request
* padding
* IV
*
* The padding is added so that dm_crypt_request and the IV are
* correctly aligned .
*/
unsigned int dmreq_start ;
struct ablkcipher_request * req ;
2006-08-22 14:29:17 +04:00
char cipher [ CRYPTO_MAX_ALG_NAME ] ;
char chainmode [ CRYPTO_MAX_ALG_NAME ] ;
2008-02-08 05:11:14 +03:00
struct crypto_ablkcipher * tfm ;
2006-10-03 12:15:37 +04:00
unsigned long flags ;
2005-04-17 02:20:36 +04:00
unsigned int key_size ;
u8 key [ 0 ] ;
} ;
2006-10-03 12:15:40 +04:00
# define MIN_IOS 16
2005-04-17 02:20:36 +04:00
# define MIN_POOL_PAGES 32
# define MIN_BIO_PAGES 8
2006-12-07 07:33:20 +03:00
static struct kmem_cache * _crypt_io_pool ;
2005-04-17 02:20:36 +04:00
2007-07-12 20:26:32 +04:00
static void clone_init ( struct dm_crypt_io * , struct bio * ) ;
2008-02-08 05:10:52 +03:00
static void kcryptd_queue_crypt ( struct dm_crypt_io * io ) ;
2007-05-09 13:32:52 +04:00
2005-04-17 02:20:36 +04:00
/*
* Different IV generation algorithms :
*
2006-09-02 12:17:33 +04:00
* plain : the initial vector is the 32 - bit little - endian version of the sector
2007-10-20 01:10:43 +04:00
* number , padded with zeros if necessary .
2005-04-17 02:20:36 +04:00
*
2006-09-02 12:17:33 +04:00
* essiv : " encrypted sector|salt initial vector " , the sector number is
* encrypted with the bulk cipher using a salt as key . The salt
* should be derived from the bulk cipher ' s key via hashing .
2005-04-17 02:20:36 +04:00
*
2006-09-03 02:56:39 +04:00
* benbi : the 64 - bit " big-endian 'narrow block'-count " , starting at 1
* ( needed for LRW - 32 - AES and possible other narrow block modes )
*
2007-05-09 13:32:55 +04:00
* null : the initial vector is always zero . Provides compatibility with
* obsolete loop_fish2 devices . Do not use for new devices .
*
2005-04-17 02:20:36 +04:00
* plumb : unimplemented , see :
* http : //article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
*/
static int crypt_iv_plain_gen ( struct crypt_config * cc , u8 * iv , sector_t sector )
{
memset ( iv , 0 , cc - > iv_size ) ;
* ( u32 * ) iv = cpu_to_le32 ( sector & 0xffffffff ) ;
return 0 ;
}
static int crypt_iv_essiv_ctr ( struct crypt_config * cc , struct dm_target * ti ,
2007-10-20 01:42:37 +04:00
const char * opts )
2005-04-17 02:20:36 +04:00
{
2006-08-22 14:29:17 +04:00
struct crypto_cipher * essiv_tfm ;
2006-08-24 13:10:20 +04:00
struct crypto_hash * hash_tfm ;
struct hash_desc desc ;
2005-04-17 02:20:36 +04:00
struct scatterlist sg ;
unsigned int saltsize ;
u8 * salt ;
2006-08-22 14:29:17 +04:00
int err ;
2005-04-17 02:20:36 +04:00
if ( opts = = NULL ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Digest algorithm missing for ESSIV mode " ;
2005-04-17 02:20:36 +04:00
return - EINVAL ;
}
/* Hash the cipher key with the given hash algorithm */
2006-08-24 13:10:20 +04:00
hash_tfm = crypto_alloc_hash ( opts , 0 , CRYPTO_ALG_ASYNC ) ;
if ( IS_ERR ( hash_tfm ) ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Error initializing ESSIV hash " ;
2006-08-24 13:10:20 +04:00
return PTR_ERR ( hash_tfm ) ;
2005-04-17 02:20:36 +04:00
}
2006-08-24 13:10:20 +04:00
saltsize = crypto_hash_digestsize ( hash_tfm ) ;
2005-04-17 02:20:36 +04:00
salt = kmalloc ( saltsize , GFP_KERNEL ) ;
if ( salt = = NULL ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Error kmallocing salt storage in ESSIV " ;
2006-08-24 13:10:20 +04:00
crypto_free_hash ( hash_tfm ) ;
2005-04-17 02:20:36 +04:00
return - ENOMEM ;
}
2007-10-27 11:52:07 +04:00
sg_init_one ( & sg , cc - > key , cc - > key_size ) ;
2006-08-24 13:10:20 +04:00
desc . tfm = hash_tfm ;
desc . flags = CRYPTO_TFM_REQ_MAY_SLEEP ;
err = crypto_hash_digest ( & desc , & sg , cc - > key_size , salt ) ;
crypto_free_hash ( hash_tfm ) ;
if ( err ) {
ti - > error = " Error calculating hash in ESSIV " ;
2007-10-20 01:38:38 +04:00
kfree ( salt ) ;
2006-08-24 13:10:20 +04:00
return err ;
}
2005-04-17 02:20:36 +04:00
/* Setup the essiv_tfm with the given salt */
2006-08-22 14:29:17 +04:00
essiv_tfm = crypto_alloc_cipher ( cc - > cipher , 0 , CRYPTO_ALG_ASYNC ) ;
if ( IS_ERR ( essiv_tfm ) ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Error allocating crypto tfm for ESSIV " ;
2005-04-17 02:20:36 +04:00
kfree ( salt ) ;
2006-08-22 14:29:17 +04:00
return PTR_ERR ( essiv_tfm ) ;
2005-04-17 02:20:36 +04:00
}
2006-08-22 14:29:17 +04:00
if ( crypto_cipher_blocksize ( essiv_tfm ) ! =
2008-02-08 05:11:14 +03:00
crypto_ablkcipher_ivsize ( cc - > tfm ) ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Block size of ESSIV cipher does "
2007-10-20 01:42:37 +04:00
" not match IV size of block cipher " ;
2006-08-22 14:29:17 +04:00
crypto_free_cipher ( essiv_tfm ) ;
2005-04-17 02:20:36 +04:00
kfree ( salt ) ;
return - EINVAL ;
}
2006-08-22 14:29:17 +04:00
err = crypto_cipher_setkey ( essiv_tfm , salt , saltsize ) ;
if ( err ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Failed to set key for ESSIV cipher " ;
2006-08-22 14:29:17 +04:00
crypto_free_cipher ( essiv_tfm ) ;
2005-04-17 02:20:36 +04:00
kfree ( salt ) ;
2006-08-22 14:29:17 +04:00
return err ;
2005-04-17 02:20:36 +04:00
}
kfree ( salt ) ;
2006-12-06 00:41:52 +03:00
cc - > iv_gen_private . essiv_tfm = essiv_tfm ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
static void crypt_iv_essiv_dtr ( struct crypt_config * cc )
{
2006-12-06 00:41:52 +03:00
crypto_free_cipher ( cc - > iv_gen_private . essiv_tfm ) ;
cc - > iv_gen_private . essiv_tfm = NULL ;
2005-04-17 02:20:36 +04:00
}
static int crypt_iv_essiv_gen ( struct crypt_config * cc , u8 * iv , sector_t sector )
{
memset ( iv , 0 , cc - > iv_size ) ;
* ( u64 * ) iv = cpu_to_le64 ( sector ) ;
2006-12-06 00:41:52 +03:00
crypto_cipher_encrypt_one ( cc - > iv_gen_private . essiv_tfm , iv , iv ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2006-09-03 02:56:39 +04:00
static int crypt_iv_benbi_ctr ( struct crypt_config * cc , struct dm_target * ti ,
const char * opts )
{
2008-02-08 05:11:14 +03:00
unsigned bs = crypto_ablkcipher_blocksize ( cc - > tfm ) ;
2006-12-08 13:37:49 +03:00
int log = ilog2 ( bs ) ;
2006-09-03 02:56:39 +04:00
/* we need to calculate how far we must shift the sector count
* to get the cipher block count , we use this shift in _gen */
if ( 1 < < log ! = bs ) {
ti - > error = " cypher blocksize is not a power of 2 " ;
return - EINVAL ;
}
if ( log > 9 ) {
ti - > error = " cypher blocksize is > 512 " ;
return - EINVAL ;
}
2006-12-06 00:41:52 +03:00
cc - > iv_gen_private . benbi_shift = 9 - log ;
2006-09-03 02:56:39 +04:00
return 0 ;
}
static void crypt_iv_benbi_dtr ( struct crypt_config * cc )
{
}
static int crypt_iv_benbi_gen ( struct crypt_config * cc , u8 * iv , sector_t sector )
{
2006-12-06 00:41:52 +03:00
__be64 val ;
2006-09-03 02:56:39 +04:00
memset ( iv , 0 , cc - > iv_size - sizeof ( u64 ) ) ; /* rest is cleared below */
2006-12-06 00:41:52 +03:00
val = cpu_to_be64 ( ( ( u64 ) sector < < cc - > iv_gen_private . benbi_shift ) + 1 ) ;
put_unaligned ( val , ( __be64 * ) ( iv + cc - > iv_size - sizeof ( u64 ) ) ) ;
2006-09-03 02:56:39 +04:00
2005-04-17 02:20:36 +04:00
return 0 ;
}
2007-05-09 13:32:55 +04:00
static int crypt_iv_null_gen ( struct crypt_config * cc , u8 * iv , sector_t sector )
{
memset ( iv , 0 , cc - > iv_size ) ;
return 0 ;
}
2005-04-17 02:20:36 +04:00
static struct crypt_iv_operations crypt_iv_plain_ops = {
. generator = crypt_iv_plain_gen
} ;
static struct crypt_iv_operations crypt_iv_essiv_ops = {
. ctr = crypt_iv_essiv_ctr ,
. dtr = crypt_iv_essiv_dtr ,
. generator = crypt_iv_essiv_gen
} ;
2006-09-03 02:56:39 +04:00
static struct crypt_iv_operations crypt_iv_benbi_ops = {
. ctr = crypt_iv_benbi_ctr ,
. dtr = crypt_iv_benbi_dtr ,
. generator = crypt_iv_benbi_gen
} ;
2005-04-17 02:20:36 +04:00
2007-05-09 13:32:55 +04:00
static struct crypt_iv_operations crypt_iv_null_ops = {
. generator = crypt_iv_null_gen
} ;
2007-10-20 01:42:37 +04:00
static void crypt_convert_init ( struct crypt_config * cc ,
struct convert_context * ctx ,
struct bio * bio_out , struct bio * bio_in ,
2008-02-08 05:10:41 +03:00
sector_t sector )
2005-04-17 02:20:36 +04:00
{
ctx - > bio_in = bio_in ;
ctx - > bio_out = bio_out ;
ctx - > offset_in = 0 ;
ctx - > offset_out = 0 ;
ctx - > idx_in = bio_in ? bio_in - > bi_idx : 0 ;
ctx - > idx_out = bio_out ? bio_out - > bi_idx : 0 ;
ctx - > sector = sector + cc - > iv_offset ;
2008-02-08 05:11:09 +03:00
init_completion ( & ctx - > restart ) ;
2008-03-29 00:16:07 +03:00
atomic_set ( & ctx - > pending , 1 ) ;
2005-04-17 02:20:36 +04:00
}
2008-02-08 05:11:04 +03:00
static int crypt_convert_block ( struct crypt_config * cc ,
2008-02-08 05:11:14 +03:00
struct convert_context * ctx ,
struct ablkcipher_request * req )
2008-02-08 05:11:04 +03:00
{
struct bio_vec * bv_in = bio_iovec_idx ( ctx - > bio_in , ctx - > idx_in ) ;
struct bio_vec * bv_out = bio_iovec_idx ( ctx - > bio_out , ctx - > idx_out ) ;
2008-02-08 05:11:14 +03:00
struct dm_crypt_request * dmreq ;
u8 * iv ;
int r = 0 ;
dmreq = ( struct dm_crypt_request * ) ( ( char * ) req + cc - > dmreq_start ) ;
iv = ( u8 * ) ALIGN ( ( unsigned long ) ( dmreq + 1 ) ,
crypto_ablkcipher_alignmask ( cc - > tfm ) + 1 ) ;
2008-02-08 05:11:04 +03:00
2008-02-08 05:11:14 +03:00
sg_init_table ( & dmreq - > sg_in , 1 ) ;
sg_set_page ( & dmreq - > sg_in , bv_in - > bv_page , 1 < < SECTOR_SHIFT ,
2008-02-08 05:11:04 +03:00
bv_in - > bv_offset + ctx - > offset_in ) ;
2008-02-08 05:11:14 +03:00
sg_init_table ( & dmreq - > sg_out , 1 ) ;
sg_set_page ( & dmreq - > sg_out , bv_out - > bv_page , 1 < < SECTOR_SHIFT ,
2008-02-08 05:11:04 +03:00
bv_out - > bv_offset + ctx - > offset_out ) ;
ctx - > offset_in + = 1 < < SECTOR_SHIFT ;
if ( ctx - > offset_in > = bv_in - > bv_len ) {
ctx - > offset_in = 0 ;
ctx - > idx_in + + ;
}
ctx - > offset_out + = 1 < < SECTOR_SHIFT ;
if ( ctx - > offset_out > = bv_out - > bv_len ) {
ctx - > offset_out = 0 ;
ctx - > idx_out + + ;
}
2008-02-08 05:11:14 +03:00
if ( cc - > iv_gen_ops ) {
r = cc - > iv_gen_ops - > generator ( cc , iv , ctx - > sector ) ;
if ( r < 0 )
return r ;
}
ablkcipher_request_set_crypt ( req , & dmreq - > sg_in , & dmreq - > sg_out ,
1 < < SECTOR_SHIFT , iv ) ;
if ( bio_data_dir ( ctx - > bio_in ) = = WRITE )
r = crypto_ablkcipher_encrypt ( req ) ;
else
r = crypto_ablkcipher_decrypt ( req ) ;
return r ;
2008-02-08 05:11:04 +03:00
}
2008-02-08 05:11:12 +03:00
static void kcryptd_async_done ( struct crypto_async_request * async_req ,
int error ) ;
2008-02-08 05:11:07 +03:00
static void crypt_alloc_req ( struct crypt_config * cc ,
struct convert_context * ctx )
{
if ( ! cc - > req )
cc - > req = mempool_alloc ( cc - > req_pool , GFP_NOIO ) ;
2008-02-08 05:11:12 +03:00
ablkcipher_request_set_tfm ( cc - > req , cc - > tfm ) ;
ablkcipher_request_set_callback ( cc - > req , CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP ,
kcryptd_async_done , ctx ) ;
2008-02-08 05:11:07 +03:00
}
2005-04-17 02:20:36 +04:00
/*
* Encrypt / decrypt data from one bio to another one ( can be the same one )
*/
static int crypt_convert ( struct crypt_config * cc ,
2007-10-20 01:42:37 +04:00
struct convert_context * ctx )
2005-04-17 02:20:36 +04:00
{
2008-03-29 00:16:07 +03:00
int r ;
2005-04-17 02:20:36 +04:00
while ( ctx - > idx_in < ctx - > bio_in - > bi_vcnt & &
ctx - > idx_out < ctx - > bio_out - > bi_vcnt ) {
2008-02-08 05:11:14 +03:00
crypt_alloc_req ( cc , ctx ) ;
2008-03-29 00:16:07 +03:00
atomic_inc ( & ctx - > pending ) ;
2008-02-08 05:11:14 +03:00
r = crypt_convert_block ( cc , ctx , cc - > req ) ;
switch ( r ) {
2008-03-29 00:16:07 +03:00
/* async */
2008-02-08 05:11:14 +03:00
case - EBUSY :
wait_for_completion ( & ctx - > restart ) ;
INIT_COMPLETION ( ctx - > restart ) ;
/* fall through*/
case - EINPROGRESS :
cc - > req = NULL ;
2008-03-29 00:16:07 +03:00
ctx - > sector + + ;
continue ;
/* sync */
2008-02-08 05:11:14 +03:00
case 0 :
2008-03-29 00:16:07 +03:00
atomic_dec ( & ctx - > pending ) ;
2008-02-08 05:11:14 +03:00
ctx - > sector + + ;
2008-07-02 12:34:28 +04:00
cond_resched ( ) ;
2008-02-08 05:11:14 +03:00
continue ;
2008-03-29 00:16:07 +03:00
/* error */
default :
atomic_dec ( & ctx - > pending ) ;
return r ;
}
2005-04-17 02:20:36 +04:00
}
2008-03-29 00:16:07 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2007-10-20 01:42:37 +04:00
static void dm_crypt_bio_destructor ( struct bio * bio )
{
2007-07-12 20:26:32 +04:00
struct dm_crypt_io * io = bio - > bi_private ;
2006-10-03 12:15:40 +04:00
struct crypt_config * cc = io - > target - > private ;
bio_free ( bio , cc - > bs ) ;
2007-10-20 01:42:37 +04:00
}
2006-10-03 12:15:40 +04:00
2005-04-17 02:20:36 +04:00
/*
* Generate a new unfragmented bio with the given size
* This should never violate the device limitations
* May return a smaller bio when running out of pages
*/
2007-07-12 20:26:32 +04:00
static struct bio * crypt_alloc_buffer ( struct dm_crypt_io * io , unsigned size )
2005-04-17 02:20:36 +04:00
{
2007-05-09 13:32:52 +04:00
struct crypt_config * cc = io - > target - > private ;
2006-10-03 12:15:37 +04:00
struct bio * clone ;
2005-04-17 02:20:36 +04:00
unsigned int nr_iovecs = ( size + PAGE_SIZE - 1 ) > > PAGE_SHIFT ;
2005-10-21 11:22:34 +04:00
gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM ;
2007-12-13 17:16:10 +03:00
unsigned i , len ;
struct page * page ;
2005-04-17 02:20:36 +04:00
2007-05-09 13:32:53 +04:00
clone = bio_alloc_bioset ( GFP_NOIO , nr_iovecs , cc - > bs ) ;
2006-10-03 12:15:37 +04:00
if ( ! clone )
2005-04-17 02:20:36 +04:00
return NULL ;
2007-05-09 13:32:52 +04:00
clone_init ( io , clone ) ;
2006-10-03 12:15:40 +04:00
2007-05-09 13:32:54 +04:00
for ( i = 0 ; i < nr_iovecs ; i + + ) {
2007-12-13 17:16:10 +03:00
page = mempool_alloc ( cc - > page_pool , gfp_mask ) ;
if ( ! page )
2005-04-17 02:20:36 +04:00
break ;
/*
* if additional pages cannot be allocated without waiting ,
* return a partially allocated bio , the caller will then try
* to allocate additional bios while submitting this partial bio
*/
2007-05-09 13:32:54 +04:00
if ( i = = ( MIN_BIO_PAGES - 1 ) )
2005-04-17 02:20:36 +04:00
gfp_mask = ( gfp_mask | __GFP_NOWARN ) & ~ __GFP_WAIT ;
2007-12-13 17:16:10 +03:00
len = ( size > PAGE_SIZE ) ? PAGE_SIZE : size ;
if ( ! bio_add_page ( clone , page , len , 0 ) ) {
mempool_free ( page , cc - > page_pool ) ;
break ;
}
2005-04-17 02:20:36 +04:00
2007-12-13 17:16:10 +03:00
size - = len ;
2005-04-17 02:20:36 +04:00
}
2006-10-03 12:15:37 +04:00
if ( ! clone - > bi_size ) {
bio_put ( clone ) ;
2005-04-17 02:20:36 +04:00
return NULL ;
}
2006-10-03 12:15:37 +04:00
return clone ;
2005-04-17 02:20:36 +04:00
}
2007-10-16 15:48:46 +04:00
static void crypt_free_buffer_pages ( struct crypt_config * cc , struct bio * clone )
2005-04-17 02:20:36 +04:00
{
2007-10-16 15:48:46 +04:00
unsigned int i ;
2005-04-17 02:20:36 +04:00
struct bio_vec * bv ;
2007-10-16 15:48:46 +04:00
for ( i = 0 ; i < clone - > bi_vcnt ; i + + ) {
2006-10-03 12:15:37 +04:00
bv = bio_iovec_idx ( clone , i ) ;
2005-04-17 02:20:36 +04:00
BUG_ON ( ! bv - > bv_page ) ;
mempool_free ( bv - > bv_page , cc - > page_pool ) ;
bv - > bv_page = NULL ;
}
}
2008-10-10 16:37:03 +04:00
static struct dm_crypt_io * crypt_io_alloc ( struct dm_target * ti ,
struct bio * bio , sector_t sector )
{
struct crypt_config * cc = ti - > private ;
struct dm_crypt_io * io ;
io = mempool_alloc ( cc - > io_pool , GFP_NOIO ) ;
io - > target = ti ;
io - > base_bio = bio ;
io - > sector = sector ;
io - > error = 0 ;
atomic_set ( & io - > pending , 0 ) ;
return io ;
}
2008-10-10 16:37:02 +04:00
static void crypt_inc_pending ( struct dm_crypt_io * io )
{
atomic_inc ( & io - > pending ) ;
}
2005-04-17 02:20:36 +04:00
/*
* One of the bios was finished . Check for completion of
* the whole request and correctly clean up the buffer .
*/
2008-02-08 05:10:43 +03:00
static void crypt_dec_pending ( struct dm_crypt_io * io )
2005-04-17 02:20:36 +04:00
{
2008-02-08 05:10:43 +03:00
struct crypt_config * cc = io - > target - > private ;
2005-04-17 02:20:36 +04:00
if ( ! atomic_dec_and_test ( & io - > pending ) )
return ;
2007-09-27 14:47:43 +04:00
bio_endio ( io - > base_bio , io - > error ) ;
2005-04-17 02:20:36 +04:00
mempool_free ( io , cc - > io_pool ) ;
}
/*
2007-10-20 01:38:58 +04:00
* kcryptd / kcryptd_io :
2005-04-17 02:20:36 +04:00
*
* Needed because it would be very unwise to do decryption in an
2006-10-03 12:15:39 +04:00
* interrupt context .
2007-10-20 01:38:58 +04:00
*
* kcryptd performs the actual encryption or decryption .
*
* kcryptd_io performs the IO submission .
*
* They must be separated as otherwise the final stages could be
* starved by new requests which can block in the first stages due
* to memory allocation .
2005-04-17 02:20:36 +04:00
*/
2007-09-27 14:47:43 +04:00
static void crypt_endio ( struct bio * clone , int error )
2006-10-03 12:15:37 +04:00
{
2007-07-12 20:26:32 +04:00
struct dm_crypt_io * io = clone - > bi_private ;
2006-10-03 12:15:37 +04:00
struct crypt_config * cc = io - > target - > private ;
2008-02-08 05:10:46 +03:00
unsigned rw = bio_data_dir ( clone ) ;
2006-10-03 12:15:37 +04:00
2007-12-13 17:15:51 +03:00
if ( unlikely ( ! bio_flagged ( clone , BIO_UPTODATE ) & & ! error ) )
error = - EIO ;
2006-10-03 12:15:37 +04:00
/*
2007-09-27 14:47:43 +04:00
* free the processed pages
2006-10-03 12:15:37 +04:00
*/
2008-02-08 05:10:46 +03:00
if ( rw = = WRITE )
2007-10-16 15:48:46 +04:00
crypt_free_buffer_pages ( cc , clone ) ;
2006-10-03 12:15:37 +04:00
bio_put ( clone ) ;
2008-02-08 05:10:46 +03:00
if ( rw = = READ & & ! error ) {
kcryptd_queue_crypt ( io ) ;
return ;
}
2008-02-08 05:10:43 +03:00
if ( unlikely ( error ) )
io - > error = error ;
crypt_dec_pending ( io ) ;
2006-10-03 12:15:37 +04:00
}
2007-07-12 20:26:32 +04:00
static void clone_init ( struct dm_crypt_io * io , struct bio * clone )
2006-10-03 12:15:37 +04:00
{
struct crypt_config * cc = io - > target - > private ;
clone - > bi_private = io ;
clone - > bi_end_io = crypt_endio ;
clone - > bi_bdev = cc - > dev - > bdev ;
clone - > bi_rw = io - > base_bio - > bi_rw ;
2007-05-09 13:32:52 +04:00
clone - > bi_destructor = dm_crypt_bio_destructor ;
2006-10-03 12:15:37 +04:00
}
2008-02-08 05:10:49 +03:00
static void kcryptd_io_read ( struct dm_crypt_io * io )
2006-10-03 12:15:37 +04:00
{
struct crypt_config * cc = io - > target - > private ;
struct bio * base_bio = io - > base_bio ;
struct bio * clone ;
2006-10-03 12:15:38 +04:00
2008-10-10 16:37:02 +04:00
crypt_inc_pending ( io ) ;
2006-10-03 12:15:37 +04:00
/*
* The block layer might modify the bvec array , so always
* copy the required bvecs because we need the original
* one in order to decrypt the whole bio data * afterwards * .
*/
2006-10-03 12:15:40 +04:00
clone = bio_alloc_bioset ( GFP_NOIO , bio_segments ( base_bio ) , cc - > bs ) ;
2006-10-03 12:15:38 +04:00
if ( unlikely ( ! clone ) ) {
2008-02-08 05:10:43 +03:00
io - > error = - ENOMEM ;
crypt_dec_pending ( io ) ;
2006-10-03 12:15:39 +04:00
return ;
2006-10-03 12:15:38 +04:00
}
2006-10-03 12:15:37 +04:00
clone_init ( io , clone ) ;
clone - > bi_idx = 0 ;
clone - > bi_vcnt = bio_segments ( base_bio ) ;
clone - > bi_size = base_bio - > bi_size ;
2008-02-08 05:10:54 +03:00
clone - > bi_sector = cc - > start + io - > sector ;
2006-10-03 12:15:37 +04:00
memcpy ( clone - > bi_io_vec , bio_iovec ( base_bio ) ,
sizeof ( struct bio_vec ) * clone - > bi_vcnt ) ;
2006-10-03 12:15:38 +04:00
generic_make_request ( clone ) ;
2006-10-03 12:15:37 +04:00
}
2008-02-08 05:10:49 +03:00
static void kcryptd_io_write ( struct dm_crypt_io * io )
{
2008-02-08 05:11:12 +03:00
struct bio * clone = io - > ctx . bio_out ;
2008-03-29 00:16:07 +03:00
struct crypt_config * cc = io - > target - > private ;
2008-02-08 05:11:12 +03:00
generic_make_request ( clone ) ;
2008-03-29 00:16:07 +03:00
wake_up ( & cc - > writeq ) ;
2008-02-08 05:10:49 +03:00
}
2008-02-08 05:10:52 +03:00
static void kcryptd_io ( struct work_struct * work )
{
struct dm_crypt_io * io = container_of ( work , struct dm_crypt_io , work ) ;
if ( bio_data_dir ( io - > base_bio ) = = READ )
kcryptd_io_read ( io ) ;
else
kcryptd_io_write ( io ) ;
}
static void kcryptd_queue_io ( struct dm_crypt_io * io )
{
struct crypt_config * cc = io - > target - > private ;
INIT_WORK ( & io - > work , kcryptd_io ) ;
queue_work ( cc - > io_queue , & io - > work ) ;
}
2008-02-08 05:11:12 +03:00
static void kcryptd_crypt_write_io_submit ( struct dm_crypt_io * io ,
int error , int async )
2008-02-08 05:10:49 +03:00
{
2008-02-08 05:10:57 +03:00
struct bio * clone = io - > ctx . bio_out ;
struct crypt_config * cc = io - > target - > private ;
if ( unlikely ( error < 0 ) ) {
crypt_free_buffer_pages ( cc , clone ) ;
bio_put ( clone ) ;
io - > error = - EIO ;
return ;
}
/* crypt_convert should have filled the clone bio */
BUG_ON ( io - > ctx . idx_out < clone - > bi_vcnt ) ;
clone - > bi_sector = cc - > start + io - > sector ;
io - > sector + = bio_sectors ( clone ) ;
2008-02-08 05:11:02 +03:00
2008-02-08 05:11:12 +03:00
if ( async )
kcryptd_queue_io ( io ) ;
2008-10-10 16:37:05 +04:00
else
2008-02-08 05:11:12 +03:00
generic_make_request ( clone ) ;
2008-02-08 05:10:49 +03:00
}
2008-10-10 16:37:04 +04:00
static void kcryptd_crypt_write_convert ( struct dm_crypt_io * io )
2006-10-03 12:15:37 +04:00
{
struct crypt_config * cc = io - > target - > private ;
struct bio * clone ;
2008-02-08 05:10:57 +03:00
unsigned remaining = io - > base_bio - > bi_size ;
int r ;
2006-10-03 12:15:37 +04:00
2008-10-10 16:37:04 +04:00
/*
* Prevent io from disappearing until this function completes .
*/
crypt_inc_pending ( io ) ;
crypt_convert_init ( cc , & io - > ctx , NULL , io - > base_bio , io - > sector ) ;
2006-10-03 12:15:38 +04:00
/*
* The allocated buffers can be smaller than the whole bio ,
* so repeat the whole process until all the data can be handled .
*/
while ( remaining ) {
2007-05-09 13:32:54 +04:00
clone = crypt_alloc_buffer ( io , remaining ) ;
2006-10-03 12:15:39 +04:00
if ( unlikely ( ! clone ) ) {
2008-02-08 05:10:43 +03:00
io - > error = - ENOMEM ;
2008-10-10 16:37:04 +04:00
break ;
2006-10-03 12:15:39 +04:00
}
2006-10-03 12:15:38 +04:00
2008-02-08 05:10:38 +03:00
io - > ctx . bio_out = clone ;
io - > ctx . idx_out = 0 ;
2006-10-03 12:15:38 +04:00
2008-02-08 05:10:57 +03:00
remaining - = clone - > bi_size ;
2006-10-03 12:15:38 +04:00
2008-02-08 05:10:57 +03:00
r = crypt_convert ( cc , & io - > ctx ) ;
2007-05-09 13:32:54 +04:00
2008-03-29 00:16:07 +03:00
if ( atomic_dec_and_test ( & io - > ctx . pending ) ) {
/* processed, no running async crypto */
2008-10-10 16:37:05 +04:00
crypt_inc_pending ( io ) ;
2008-02-08 05:11:14 +03:00
kcryptd_crypt_write_io_submit ( io , r , 0 ) ;
2008-10-10 16:37:05 +04:00
if ( unlikely ( r < 0 ) ) {
crypt_dec_pending ( io ) ;
2008-10-10 16:37:04 +04:00
break ;
2008-10-10 16:37:05 +04:00
}
2008-02-08 05:11:14 +03:00
} else
2008-10-10 16:37:02 +04:00
crypt_inc_pending ( io ) ;
2006-10-03 12:15:38 +04:00
/* out of memory -> run queues */
2008-03-29 00:16:07 +03:00
if ( unlikely ( remaining ) ) {
/* wait for async crypto then reinitialize pending */
wait_event ( cc - > writeq , ! atomic_read ( & io - > ctx . pending ) ) ;
atomic_set ( & io - > ctx . pending , 1 ) ;
2007-05-09 13:32:52 +04:00
congestion_wait ( WRITE , HZ / 100 ) ;
2008-03-29 00:16:07 +03:00
}
2006-10-03 12:15:38 +04:00
}
2008-02-08 05:11:02 +03:00
crypt_dec_pending ( io ) ;
2008-02-08 05:10:59 +03:00
}
2008-02-08 05:10:49 +03:00
static void kcryptd_crypt_read_done ( struct dm_crypt_io * io , int error )
2008-02-08 05:10:43 +03:00
{
if ( unlikely ( error < 0 ) )
io - > error = - EIO ;
crypt_dec_pending ( io ) ;
}
2008-02-08 05:10:49 +03:00
static void kcryptd_crypt_read_convert ( struct dm_crypt_io * io )
2006-10-03 12:15:37 +04:00
{
struct crypt_config * cc = io - > target - > private ;
2008-02-08 05:10:43 +03:00
int r = 0 ;
2005-04-17 02:20:36 +04:00
2008-10-10 16:37:02 +04:00
crypt_inc_pending ( io ) ;
2008-02-08 05:11:14 +03:00
2008-02-08 05:10:38 +03:00
crypt_convert_init ( cc , & io - > ctx , io - > base_bio , io - > base_bio ,
2008-02-08 05:10:54 +03:00
io - > sector ) ;
2005-04-17 02:20:36 +04:00
2008-02-08 05:10:43 +03:00
r = crypt_convert ( cc , & io - > ctx ) ;
2008-03-29 00:16:07 +03:00
if ( atomic_dec_and_test ( & io - > ctx . pending ) )
2008-02-08 05:11:14 +03:00
kcryptd_crypt_read_done ( io , r ) ;
crypt_dec_pending ( io ) ;
2005-04-17 02:20:36 +04:00
}
2008-02-08 05:11:12 +03:00
static void kcryptd_async_done ( struct crypto_async_request * async_req ,
int error )
{
struct convert_context * ctx = async_req - > data ;
struct dm_crypt_io * io = container_of ( ctx , struct dm_crypt_io , ctx ) ;
struct crypt_config * cc = io - > target - > private ;
if ( error = = - EINPROGRESS ) {
complete ( & ctx - > restart ) ;
return ;
}
mempool_free ( ablkcipher_request_cast ( async_req ) , cc - > req_pool ) ;
if ( ! atomic_dec_and_test ( & ctx - > pending ) )
return ;
if ( bio_data_dir ( io - > base_bio ) = = READ )
kcryptd_crypt_read_done ( io , error ) ;
else
kcryptd_crypt_write_io_submit ( io , error , 1 ) ;
}
2008-02-08 05:10:52 +03:00
static void kcryptd_crypt ( struct work_struct * work )
2005-04-17 02:20:36 +04:00
{
2007-07-12 20:26:32 +04:00
struct dm_crypt_io * io = container_of ( work , struct dm_crypt_io , work ) ;
2006-10-03 12:15:37 +04:00
2007-10-20 01:38:58 +04:00
if ( bio_data_dir ( io - > base_bio ) = = READ )
2008-02-08 05:10:52 +03:00
kcryptd_crypt_read_convert ( io ) ;
2008-02-08 05:10:49 +03:00
else
2008-02-08 05:10:52 +03:00
kcryptd_crypt_write_convert ( io ) ;
2007-10-20 01:38:58 +04:00
}
2008-02-08 05:10:52 +03:00
static void kcryptd_queue_crypt ( struct dm_crypt_io * io )
2007-10-20 01:38:58 +04:00
{
2008-02-08 05:10:52 +03:00
struct crypt_config * cc = io - > target - > private ;
2007-10-20 01:38:58 +04:00
2008-02-08 05:10:52 +03:00
INIT_WORK ( & io - > work , kcryptd_crypt ) ;
queue_work ( cc - > crypt_queue , & io - > work ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Decode key from its hex representation
*/
static int crypt_decode_key ( u8 * key , char * hex , unsigned int size )
{
char buffer [ 3 ] ;
char * endp ;
unsigned int i ;
buffer [ 2 ] = ' \0 ' ;
2006-10-03 12:15:37 +04:00
for ( i = 0 ; i < size ; i + + ) {
2005-04-17 02:20:36 +04:00
buffer [ 0 ] = * hex + + ;
buffer [ 1 ] = * hex + + ;
key [ i ] = ( u8 ) simple_strtoul ( buffer , & endp , 16 ) ;
if ( endp ! = & buffer [ 2 ] )
return - EINVAL ;
}
if ( * hex ! = ' \0 ' )
return - EINVAL ;
return 0 ;
}
/*
* Encode key into its hex representation
*/
static void crypt_encode_key ( char * hex , u8 * key , unsigned int size )
{
unsigned int i ;
2006-10-03 12:15:37 +04:00
for ( i = 0 ; i < size ; i + + ) {
2005-04-17 02:20:36 +04:00
sprintf ( hex , " %02x " , * key ) ;
hex + = 2 ;
key + + ;
}
}
2006-10-03 12:15:37 +04:00
static int crypt_set_key ( struct crypt_config * cc , char * key )
{
unsigned key_size = strlen ( key ) > > 1 ;
if ( cc - > key_size & & cc - > key_size ! = key_size )
return - EINVAL ;
cc - > key_size = key_size ; /* initial settings */
if ( ( ! key_size & & strcmp ( key , " - " ) ) | |
2007-10-20 01:42:37 +04:00
( key_size & & crypt_decode_key ( cc - > key , key , key_size ) < 0 ) )
2006-10-03 12:15:37 +04:00
return - EINVAL ;
set_bit ( DM_CRYPT_KEY_VALID , & cc - > flags ) ;
return 0 ;
}
static int crypt_wipe_key ( struct crypt_config * cc )
{
clear_bit ( DM_CRYPT_KEY_VALID , & cc - > flags ) ;
memset ( & cc - > key , 0 , cc - > key_size * sizeof ( u8 ) ) ;
return 0 ;
}
2005-04-17 02:20:36 +04:00
/*
* Construct an encryption mapping :
* < cipher > < key > < iv_offset > < dev_path > < start >
*/
static int crypt_ctr ( struct dm_target * ti , unsigned int argc , char * * argv )
{
struct crypt_config * cc ;
2008-02-08 05:11:14 +03:00
struct crypto_ablkcipher * tfm ;
2005-04-17 02:20:36 +04:00
char * tmp ;
char * cipher ;
char * chainmode ;
char * ivmode ;
char * ivopts ;
unsigned int key_size ;
2006-03-27 13:17:48 +04:00
unsigned long long tmpll ;
2005-04-17 02:20:36 +04:00
if ( argc ! = 5 ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Not enough arguments " ;
2005-04-17 02:20:36 +04:00
return - EINVAL ;
}
tmp = argv [ 0 ] ;
cipher = strsep ( & tmp , " - " ) ;
chainmode = strsep ( & tmp , " - " ) ;
ivopts = strsep ( & tmp , " - " ) ;
ivmode = strsep ( & ivopts , " : " ) ;
if ( tmp )
2006-06-26 11:27:35 +04:00
DMWARN ( " Unexpected additional cipher options " ) ;
2005-04-17 02:20:36 +04:00
key_size = strlen ( argv [ 1 ] ) > > 1 ;
2006-10-03 12:15:37 +04:00
cc = kzalloc ( sizeof ( * cc ) + key_size * sizeof ( u8 ) , GFP_KERNEL ) ;
2005-04-17 02:20:36 +04:00
if ( cc = = NULL ) {
ti - > error =
2006-06-26 11:27:35 +04:00
" Cannot allocate transparent encryption context " ;
2005-04-17 02:20:36 +04:00
return - ENOMEM ;
}
2006-10-03 12:15:37 +04:00
if ( crypt_set_key ( cc , argv [ 1 ] ) ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Error decoding key " ;
2007-10-20 01:47:52 +04:00
goto bad_cipher ;
2005-04-17 02:20:36 +04:00
}
/* Compatiblity mode for old dm-crypt cipher strings */
if ( ! chainmode | | ( strcmp ( chainmode , " plain " ) = = 0 & & ! ivmode ) ) {
chainmode = " cbc " ;
ivmode = " plain " ;
}
2006-08-22 14:29:17 +04:00
if ( strcmp ( chainmode , " ecb " ) & & ! ivmode ) {
ti - > error = " This chaining mode requires an IV mechanism " ;
2007-10-20 01:47:52 +04:00
goto bad_cipher ;
2005-04-17 02:20:36 +04:00
}
2007-10-20 01:42:37 +04:00
if ( snprintf ( cc - > cipher , CRYPTO_MAX_ALG_NAME , " %s(%s) " ,
chainmode , cipher ) > = CRYPTO_MAX_ALG_NAME ) {
2006-08-22 14:29:17 +04:00
ti - > error = " Chain mode + cipher name is too long " ;
2007-10-20 01:47:52 +04:00
goto bad_cipher ;
2005-04-17 02:20:36 +04:00
}
2008-02-08 05:11:14 +03:00
tfm = crypto_alloc_ablkcipher ( cc - > cipher , 0 , 0 ) ;
2006-08-22 14:29:17 +04:00
if ( IS_ERR ( tfm ) ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Error allocating crypto tfm " ;
2007-10-20 01:47:52 +04:00
goto bad_cipher ;
2005-04-17 02:20:36 +04:00
}
2006-08-22 14:29:17 +04:00
strcpy ( cc - > cipher , cipher ) ;
strcpy ( cc - > chainmode , chainmode ) ;
2005-04-17 02:20:36 +04:00
cc - > tfm = tfm ;
/*
2006-09-03 02:56:39 +04:00
* Choose ivmode . Valid modes : " plain " , " essiv:<esshash> " , " benbi " .
2005-04-17 02:20:36 +04:00
* See comments at iv code
*/
if ( ivmode = = NULL )
cc - > iv_gen_ops = NULL ;
else if ( strcmp ( ivmode , " plain " ) = = 0 )
cc - > iv_gen_ops = & crypt_iv_plain_ops ;
else if ( strcmp ( ivmode , " essiv " ) = = 0 )
cc - > iv_gen_ops = & crypt_iv_essiv_ops ;
2006-09-03 02:56:39 +04:00
else if ( strcmp ( ivmode , " benbi " ) = = 0 )
cc - > iv_gen_ops = & crypt_iv_benbi_ops ;
2007-05-09 13:32:55 +04:00
else if ( strcmp ( ivmode , " null " ) = = 0 )
cc - > iv_gen_ops = & crypt_iv_null_ops ;
2005-04-17 02:20:36 +04:00
else {
2006-06-26 11:27:35 +04:00
ti - > error = " Invalid IV mode " ;
2007-10-20 01:47:52 +04:00
goto bad_ivmode ;
2005-04-17 02:20:36 +04:00
}
if ( cc - > iv_gen_ops & & cc - > iv_gen_ops - > ctr & &
cc - > iv_gen_ops - > ctr ( cc , ti , ivopts ) < 0 )
2007-10-20 01:47:52 +04:00
goto bad_ivmode ;
2005-04-17 02:20:36 +04:00
2008-02-08 05:11:14 +03:00
cc - > iv_size = crypto_ablkcipher_ivsize ( tfm ) ;
2006-08-22 14:29:17 +04:00
if ( cc - > iv_size )
2005-04-17 02:20:36 +04:00
/* at least a 64 bit sector number should fit in our buffer */
2006-08-22 14:29:17 +04:00
cc - > iv_size = max ( cc - > iv_size ,
2007-10-20 01:42:37 +04:00
( unsigned int ) ( sizeof ( u64 ) / sizeof ( u8 ) ) ) ;
2005-04-17 02:20:36 +04:00
else {
if ( cc - > iv_gen_ops ) {
2006-06-26 11:27:35 +04:00
DMWARN ( " Selected cipher does not support IVs " ) ;
2005-04-17 02:20:36 +04:00
if ( cc - > iv_gen_ops - > dtr )
cc - > iv_gen_ops - > dtr ( cc ) ;
cc - > iv_gen_ops = NULL ;
}
}
2006-03-26 13:37:50 +04:00
cc - > io_pool = mempool_create_slab_pool ( MIN_IOS , _crypt_io_pool ) ;
2005-04-17 02:20:36 +04:00
if ( ! cc - > io_pool ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Cannot allocate crypt io mempool " ;
2007-10-20 01:47:52 +04:00
goto bad_slab_pool ;
2005-04-17 02:20:36 +04:00
}
2008-02-08 05:11:07 +03:00
cc - > dmreq_start = sizeof ( struct ablkcipher_request ) ;
2008-02-08 05:11:14 +03:00
cc - > dmreq_start + = crypto_ablkcipher_reqsize ( tfm ) ;
2008-02-08 05:11:07 +03:00
cc - > dmreq_start = ALIGN ( cc - > dmreq_start , crypto_tfm_ctx_alignment ( ) ) ;
2008-02-08 05:11:14 +03:00
cc - > dmreq_start + = crypto_ablkcipher_alignmask ( tfm ) &
~ ( crypto_tfm_ctx_alignment ( ) - 1 ) ;
2008-02-08 05:11:07 +03:00
cc - > req_pool = mempool_create_kmalloc_pool ( MIN_IOS , cc - > dmreq_start +
sizeof ( struct dm_crypt_request ) + cc - > iv_size ) ;
if ( ! cc - > req_pool ) {
ti - > error = " Cannot allocate crypt request mempool " ;
goto bad_req_pool ;
}
cc - > req = NULL ;
2006-03-26 13:37:45 +04:00
cc - > page_pool = mempool_create_page_pool ( MIN_POOL_PAGES , 0 ) ;
2005-04-17 02:20:36 +04:00
if ( ! cc - > page_pool ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Cannot allocate page mempool " ;
2007-10-20 01:47:52 +04:00
goto bad_page_pool ;
2005-04-17 02:20:36 +04:00
}
2007-04-02 12:06:42 +04:00
cc - > bs = bioset_create ( MIN_IOS , MIN_IOS ) ;
2006-10-03 12:15:40 +04:00
if ( ! cc - > bs ) {
ti - > error = " Cannot allocate crypt bioset " ;
goto bad_bs ;
}
2008-02-08 05:11:14 +03:00
if ( crypto_ablkcipher_setkey ( tfm , cc - > key , key_size ) < 0 ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Error setting key " ;
2007-10-20 01:47:52 +04:00
goto bad_device ;
2005-04-17 02:20:36 +04:00
}
2006-03-27 13:17:48 +04:00
if ( sscanf ( argv [ 2 ] , " %llu " , & tmpll ) ! = 1 ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Invalid iv_offset sector " ;
2007-10-20 01:47:52 +04:00
goto bad_device ;
2005-04-17 02:20:36 +04:00
}
2006-03-27 13:17:48 +04:00
cc - > iv_offset = tmpll ;
2005-04-17 02:20:36 +04:00
2006-03-27 13:17:48 +04:00
if ( sscanf ( argv [ 4 ] , " %llu " , & tmpll ) ! = 1 ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Invalid device sector " ;
2007-10-20 01:47:52 +04:00
goto bad_device ;
2005-04-17 02:20:36 +04:00
}
2006-03-27 13:17:48 +04:00
cc - > start = tmpll ;
2005-04-17 02:20:36 +04:00
if ( dm_get_device ( ti , argv [ 3 ] , cc - > start , ti - > len ,
2007-10-20 01:42:37 +04:00
dm_table_get_mode ( ti - > table ) , & cc - > dev ) ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Device lookup failed " ;
2007-10-20 01:47:52 +04:00
goto bad_device ;
2005-04-17 02:20:36 +04:00
}
if ( ivmode & & cc - > iv_gen_ops ) {
if ( ivopts )
* ( ivopts - 1 ) = ' : ' ;
cc - > iv_mode = kmalloc ( strlen ( ivmode ) + 1 , GFP_KERNEL ) ;
if ( ! cc - > iv_mode ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Error kmallocing iv_mode string " ;
2007-10-20 01:47:52 +04:00
goto bad_ivmode_string ;
2005-04-17 02:20:36 +04:00
}
strcpy ( cc - > iv_mode , ivmode ) ;
} else
cc - > iv_mode = NULL ;
2007-10-20 01:38:58 +04:00
cc - > io_queue = create_singlethread_workqueue ( " kcryptd_io " ) ;
if ( ! cc - > io_queue ) {
ti - > error = " Couldn't create kcryptd io queue " ;
goto bad_io_queue ;
}
cc - > crypt_queue = create_singlethread_workqueue ( " kcryptd " ) ;
if ( ! cc - > crypt_queue ) {
2007-10-20 01:38:57 +04:00
ti - > error = " Couldn't create kcryptd queue " ;
2007-10-20 01:38:58 +04:00
goto bad_crypt_queue ;
2007-10-20 01:38:57 +04:00
}
2008-03-29 00:16:07 +03:00
init_waitqueue_head ( & cc - > writeq ) ;
2005-04-17 02:20:36 +04:00
ti - > private = cc ;
return 0 ;
2007-10-20 01:38:58 +04:00
bad_crypt_queue :
destroy_workqueue ( cc - > io_queue ) ;
bad_io_queue :
2007-10-20 01:38:57 +04:00
kfree ( cc - > iv_mode ) ;
2007-10-20 01:47:52 +04:00
bad_ivmode_string :
2007-10-20 01:38:37 +04:00
dm_put_device ( ti , cc - > dev ) ;
2007-10-20 01:47:52 +04:00
bad_device :
2006-10-03 12:15:40 +04:00
bioset_free ( cc - > bs ) ;
bad_bs :
2005-04-17 02:20:36 +04:00
mempool_destroy ( cc - > page_pool ) ;
2007-10-20 01:47:52 +04:00
bad_page_pool :
2008-02-08 05:11:07 +03:00
mempool_destroy ( cc - > req_pool ) ;
bad_req_pool :
2005-04-17 02:20:36 +04:00
mempool_destroy ( cc - > io_pool ) ;
2007-10-20 01:47:52 +04:00
bad_slab_pool :
2005-04-17 02:20:36 +04:00
if ( cc - > iv_gen_ops & & cc - > iv_gen_ops - > dtr )
cc - > iv_gen_ops - > dtr ( cc ) ;
2007-10-20 01:47:52 +04:00
bad_ivmode :
2008-02-08 05:11:14 +03:00
crypto_free_ablkcipher ( tfm ) ;
2007-10-20 01:47:52 +04:00
bad_cipher :
2006-01-06 11:20:08 +03:00
/* Must zero key material before freeing */
memset ( cc , 0 , sizeof ( * cc ) + cc - > key_size * sizeof ( u8 ) ) ;
2005-04-17 02:20:36 +04:00
kfree ( cc ) ;
return - EINVAL ;
}
static void crypt_dtr ( struct dm_target * ti )
{
struct crypt_config * cc = ( struct crypt_config * ) ti - > private ;
2007-10-20 01:38:58 +04:00
destroy_workqueue ( cc - > io_queue ) ;
destroy_workqueue ( cc - > crypt_queue ) ;
2007-07-21 15:37:27 +04:00
2008-02-08 05:11:07 +03:00
if ( cc - > req )
mempool_free ( cc - > req , cc - > req_pool ) ;
2006-10-03 12:15:40 +04:00
bioset_free ( cc - > bs ) ;
2005-04-17 02:20:36 +04:00
mempool_destroy ( cc - > page_pool ) ;
2008-02-08 05:11:07 +03:00
mempool_destroy ( cc - > req_pool ) ;
2005-04-17 02:20:36 +04:00
mempool_destroy ( cc - > io_pool ) ;
2005-06-22 04:17:30 +04:00
kfree ( cc - > iv_mode ) ;
2005-04-17 02:20:36 +04:00
if ( cc - > iv_gen_ops & & cc - > iv_gen_ops - > dtr )
cc - > iv_gen_ops - > dtr ( cc ) ;
2008-02-08 05:11:14 +03:00
crypto_free_ablkcipher ( cc - > tfm ) ;
2005-04-17 02:20:36 +04:00
dm_put_device ( ti , cc - > dev ) ;
2006-01-06 11:20:08 +03:00
/* Must zero key material before freeing */
memset ( cc , 0 , sizeof ( * cc ) + cc - > key_size * sizeof ( u8 ) ) ;
2005-04-17 02:20:36 +04:00
kfree ( cc ) ;
}
static int crypt_map ( struct dm_target * ti , struct bio * bio ,
union map_info * map_context )
{
2007-07-12 20:26:32 +04:00
struct dm_crypt_io * io ;
2005-04-17 02:20:36 +04:00
2008-10-10 16:37:03 +04:00
io = crypt_io_alloc ( ti , bio , bio - > bi_sector - ti - > begin ) ;
2007-10-20 01:38:58 +04:00
if ( bio_data_dir ( io - > base_bio ) = = READ )
kcryptd_queue_io ( io ) ;
else
kcryptd_queue_crypt ( io ) ;
2005-04-17 02:20:36 +04:00
2006-12-08 13:41:06 +03:00
return DM_MAPIO_SUBMITTED ;
2005-04-17 02:20:36 +04:00
}
static int crypt_status ( struct dm_target * ti , status_type_t type ,
char * result , unsigned int maxlen )
{
struct crypt_config * cc = ( struct crypt_config * ) ti - > private ;
unsigned int sz = 0 ;
switch ( type ) {
case STATUSTYPE_INFO :
result [ 0 ] = ' \0 ' ;
break ;
case STATUSTYPE_TABLE :
if ( cc - > iv_mode )
2006-10-30 22:39:08 +03:00
DMEMIT ( " %s-%s-%s " , cc - > cipher , cc - > chainmode ,
cc - > iv_mode ) ;
2005-04-17 02:20:36 +04:00
else
2006-10-30 22:39:08 +03:00
DMEMIT ( " %s-%s " , cc - > cipher , cc - > chainmode ) ;
2005-04-17 02:20:36 +04:00
if ( cc - > key_size > 0 ) {
if ( ( maxlen - sz ) < ( ( cc - > key_size < < 1 ) + 1 ) )
return - ENOMEM ;
crypt_encode_key ( result + sz , cc - > key , cc - > key_size ) ;
sz + = cc - > key_size < < 1 ;
} else {
if ( sz > = maxlen )
return - ENOMEM ;
result [ sz + + ] = ' - ' ;
}
2006-03-27 13:17:48 +04:00
DMEMIT ( " %llu %s %llu " , ( unsigned long long ) cc - > iv_offset ,
cc - > dev - > name , ( unsigned long long ) cc - > start ) ;
2005-04-17 02:20:36 +04:00
break ;
}
return 0 ;
}
2006-10-03 12:15:37 +04:00
static void crypt_postsuspend ( struct dm_target * ti )
{
struct crypt_config * cc = ti - > private ;
set_bit ( DM_CRYPT_SUSPENDED , & cc - > flags ) ;
}
static int crypt_preresume ( struct dm_target * ti )
{
struct crypt_config * cc = ti - > private ;
if ( ! test_bit ( DM_CRYPT_KEY_VALID , & cc - > flags ) ) {
DMERR ( " aborting resume - crypt key is not set. " ) ;
return - EAGAIN ;
}
return 0 ;
}
static void crypt_resume ( struct dm_target * ti )
{
struct crypt_config * cc = ti - > private ;
clear_bit ( DM_CRYPT_SUSPENDED , & cc - > flags ) ;
}
/* Message interface
* key set < key >
* key wipe
*/
static int crypt_message ( struct dm_target * ti , unsigned argc , char * * argv )
{
struct crypt_config * cc = ti - > private ;
if ( argc < 2 )
goto error ;
if ( ! strnicmp ( argv [ 0 ] , MESG_STR ( " key " ) ) ) {
if ( ! test_bit ( DM_CRYPT_SUSPENDED , & cc - > flags ) ) {
DMWARN ( " not suspended during key manipulation. " ) ;
return - EINVAL ;
}
if ( argc = = 3 & & ! strnicmp ( argv [ 1 ] , MESG_STR ( " set " ) ) )
return crypt_set_key ( cc , argv [ 2 ] ) ;
if ( argc = = 2 & & ! strnicmp ( argv [ 1 ] , MESG_STR ( " wipe " ) ) )
return crypt_wipe_key ( cc ) ;
}
error :
DMWARN ( " unrecognised message received. " ) ;
return - EINVAL ;
}
2008-07-21 15:00:40 +04:00
static int crypt_merge ( struct dm_target * ti , struct bvec_merge_data * bvm ,
struct bio_vec * biovec , int max_size )
{
struct crypt_config * cc = ti - > private ;
struct request_queue * q = bdev_get_queue ( cc - > dev - > bdev ) ;
if ( ! q - > merge_bvec_fn )
return max_size ;
bvm - > bi_bdev = cc - > dev - > bdev ;
bvm - > bi_sector = cc - > start + bvm - > bi_sector - ti - > begin ;
return min ( max_size , q - > merge_bvec_fn ( q , bvm , biovec ) ) ;
}
2005-04-17 02:20:36 +04:00
static struct target_type crypt_target = {
. name = " crypt " ,
2008-07-21 15:00:40 +04:00
. version = { 1 , 6 , 0 } ,
2005-04-17 02:20:36 +04:00
. module = THIS_MODULE ,
. ctr = crypt_ctr ,
. dtr = crypt_dtr ,
. map = crypt_map ,
. status = crypt_status ,
2006-10-03 12:15:37 +04:00
. postsuspend = crypt_postsuspend ,
. preresume = crypt_preresume ,
. resume = crypt_resume ,
. message = crypt_message ,
2008-07-21 15:00:40 +04:00
. merge = crypt_merge ,
2005-04-17 02:20:36 +04:00
} ;
static int __init dm_crypt_init ( void )
{
int r ;
2007-07-12 20:26:32 +04:00
_crypt_io_pool = KMEM_CACHE ( dm_crypt_io , 0 ) ;
2005-04-17 02:20:36 +04:00
if ( ! _crypt_io_pool )
return - ENOMEM ;
r = dm_register_target ( & crypt_target ) ;
if ( r < 0 ) {
2006-06-26 11:27:35 +04:00
DMERR ( " register failed %d " , r ) ;
2007-10-20 01:38:57 +04:00
kmem_cache_destroy ( _crypt_io_pool ) ;
2005-04-17 02:20:36 +04:00
}
return r ;
}
static void __exit dm_crypt_exit ( void )
{
int r = dm_unregister_target ( & crypt_target ) ;
if ( r < 0 )
2006-06-26 11:27:35 +04:00
DMERR ( " unregister failed %d " , r ) ;
2005-04-17 02:20:36 +04:00
kmem_cache_destroy ( _crypt_io_pool ) ;
}
module_init ( dm_crypt_init ) ;
module_exit ( dm_crypt_exit ) ;
MODULE_AUTHOR ( " Christophe Saout <christophe@saout.de> " ) ;
MODULE_DESCRIPTION ( DM_NAME " target for transparent encryption / decryption " ) ;
MODULE_LICENSE ( " GPL " ) ;