2005-04-17 02:20:36 +04:00
/*
* Copyright ( C ) 2003 Christophe Saout < christophe @ saout . de >
* Copyright ( C ) 2004 Clemens Fruhwirth < clemens @ endorphin . org >
2009-12-11 02:51:57 +03:00
* Copyright ( C ) 2006 - 2009 Red Hat , Inc . All rights reserved .
2005-04-17 02:20:36 +04:00
*
* This file is released under the GPL .
*/
2008-02-08 05:11:09 +03:00
# include <linux/completion.h>
2006-08-22 14:29:17 +04:00
# include <linux/err.h>
2005-04-17 02:20:36 +04:00
# include <linux/module.h>
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/bio.h>
# include <linux/blkdev.h>
# include <linux/mempool.h>
# include <linux/slab.h>
# include <linux/crypto.h>
# include <linux/workqueue.h>
2006-10-20 10:28:16 +04:00
# include <linux/backing-dev.h>
2011-01-13 22:59:53 +03:00
# include <linux/percpu.h>
2005-04-17 02:20:36 +04:00
# include <asm/atomic.h>
2005-09-17 11:55:31 +04:00
# include <linux/scatterlist.h>
2005-04-17 02:20:36 +04:00
# include <asm/page.h>
2006-09-03 02:56:39 +04:00
# include <asm/unaligned.h>
2011-01-13 22:59:55 +03:00
# include <crypto/hash.h>
# include <crypto/md5.h>
# include <crypto/algapi.h>
2005-04-17 02:20:36 +04:00
2008-10-21 20:44:59 +04:00
# include <linux/device-mapper.h>
2005-04-17 02:20:36 +04:00
2006-06-26 11:27:35 +04:00
# define DM_MSG_PREFIX "crypt"
2006-10-03 12:15:37 +04:00
# define MESG_STR(x) x, sizeof(x)
2005-04-17 02:20:36 +04:00
/*
* context holding the current state of a multi - part conversion
*/
struct convert_context {
2008-02-08 05:11:09 +03:00
struct completion restart ;
2005-04-17 02:20:36 +04:00
struct bio * bio_in ;
struct bio * bio_out ;
unsigned int offset_in ;
unsigned int offset_out ;
unsigned int idx_in ;
unsigned int idx_out ;
sector_t sector ;
2008-02-08 05:11:09 +03:00
atomic_t pending ;
2005-04-17 02:20:36 +04:00
} ;
2008-02-08 05:10:38 +03:00
/*
* per bio private data
*/
struct dm_crypt_io {
struct dm_target * target ;
struct bio * base_bio ;
struct work_struct work ;
struct convert_context ctx ;
atomic_t pending ;
int error ;
2008-02-08 05:10:54 +03:00
sector_t sector ;
2008-10-21 20:45:02 +04:00
struct dm_crypt_io * base_io ;
2008-02-08 05:10:38 +03:00
} ;
2008-02-08 05:11:04 +03:00
struct dm_crypt_request {
2009-03-16 20:44:33 +03:00
struct convert_context * ctx ;
2008-02-08 05:11:04 +03:00
struct scatterlist sg_in ;
struct scatterlist sg_out ;
2011-01-13 22:59:54 +03:00
sector_t iv_sector ;
2008-02-08 05:11:04 +03:00
} ;
2005-04-17 02:20:36 +04:00
struct crypt_config ;
struct crypt_iv_operations {
int ( * ctr ) ( struct crypt_config * cc , struct dm_target * ti ,
2007-10-20 01:42:37 +04:00
const char * opts ) ;
2005-04-17 02:20:36 +04:00
void ( * dtr ) ( struct crypt_config * cc ) ;
2009-12-11 02:51:56 +03:00
int ( * init ) ( struct crypt_config * cc ) ;
2009-12-11 02:51:57 +03:00
int ( * wipe ) ( struct crypt_config * cc ) ;
2011-01-13 22:59:54 +03:00
int ( * generator ) ( struct crypt_config * cc , u8 * iv ,
struct dm_crypt_request * dmreq ) ;
int ( * post ) ( struct crypt_config * cc , u8 * iv ,
struct dm_crypt_request * dmreq ) ;
2005-04-17 02:20:36 +04:00
} ;
2009-12-11 02:51:55 +03:00
struct iv_essiv_private {
2009-12-11 02:51:56 +03:00
struct crypto_hash * hash_tfm ;
u8 * salt ;
2009-12-11 02:51:55 +03:00
} ;
struct iv_benbi_private {
int shift ;
} ;
2011-01-13 22:59:55 +03:00
# define LMK_SEED_SIZE 64 /* hash + 0 */
struct iv_lmk_private {
struct crypto_shash * hash_tfm ;
u8 * seed ;
} ;
2005-04-17 02:20:36 +04:00
/*
* Crypt : maps a linear range of a block device
* and encrypts / decrypts at the same time .
*/
2006-10-03 12:15:37 +04:00
enum flags { DM_CRYPT_SUSPENDED , DM_CRYPT_KEY_VALID } ;
2011-01-13 22:59:53 +03:00
/*
* Duplicated per - CPU state for cipher .
*/
struct crypt_cpu {
struct ablkcipher_request * req ;
/* ESSIV: struct crypto_cipher *essiv_tfm */
void * iv_private ;
2011-01-13 22:59:54 +03:00
struct crypto_ablkcipher * tfms [ 0 ] ;
2011-01-13 22:59:53 +03:00
} ;
/*
* The fields in here must be read only after initialization ,
* changing state should be in crypt_cpu .
*/
2005-04-17 02:20:36 +04:00
struct crypt_config {
struct dm_dev * dev ;
sector_t start ;
/*
2008-02-08 05:11:07 +03:00
* pool for per bio private data , crypto requests and
* encryption requeusts / buffer pages
2005-04-17 02:20:36 +04:00
*/
mempool_t * io_pool ;
2008-02-08 05:11:07 +03:00
mempool_t * req_pool ;
2005-04-17 02:20:36 +04:00
mempool_t * page_pool ;
2006-10-03 12:15:40 +04:00
struct bio_set * bs ;
2005-04-17 02:20:36 +04:00
2007-10-20 01:38:58 +04:00
struct workqueue_struct * io_queue ;
struct workqueue_struct * crypt_queue ;
2008-03-29 00:16:07 +03:00
2010-08-12 07:14:07 +04:00
char * cipher ;
2011-01-13 22:59:52 +03:00
char * cipher_string ;
2010-08-12 07:14:07 +04:00
2005-04-17 02:20:36 +04:00
struct crypt_iv_operations * iv_gen_ops ;
2006-12-06 00:41:52 +03:00
union {
2009-12-11 02:51:55 +03:00
struct iv_essiv_private essiv ;
struct iv_benbi_private benbi ;
2011-01-13 22:59:55 +03:00
struct iv_lmk_private lmk ;
2006-12-06 00:41:52 +03:00
} iv_gen_private ;
2005-04-17 02:20:36 +04:00
sector_t iv_offset ;
unsigned int iv_size ;
2011-01-13 22:59:53 +03:00
/*
* Duplicated per cpu state . Access through
* per_cpu_ptr ( ) only .
*/
struct crypt_cpu __percpu * cpu ;
2011-01-13 22:59:54 +03:00
unsigned tfms_count ;
2011-01-13 22:59:53 +03:00
2008-02-08 05:11:07 +03:00
/*
* Layout of each crypto request :
*
* struct ablkcipher_request
* context
* padding
* struct dm_crypt_request
* padding
* IV
*
* The padding is added so that dm_crypt_request and the IV are
* correctly aligned .
*/
unsigned int dmreq_start ;
2006-10-03 12:15:37 +04:00
unsigned long flags ;
2005-04-17 02:20:36 +04:00
unsigned int key_size ;
2011-01-13 22:59:54 +03:00
unsigned int key_parts ;
2005-04-17 02:20:36 +04:00
u8 key [ 0 ] ;
} ;
2006-10-03 12:15:40 +04:00
# define MIN_IOS 16
2005-04-17 02:20:36 +04:00
# define MIN_POOL_PAGES 32
# define MIN_BIO_PAGES 8
2006-12-07 07:33:20 +03:00
static struct kmem_cache * _crypt_io_pool ;
2005-04-17 02:20:36 +04:00
2007-07-12 20:26:32 +04:00
static void clone_init ( struct dm_crypt_io * , struct bio * ) ;
2008-02-08 05:10:52 +03:00
static void kcryptd_queue_crypt ( struct dm_crypt_io * io ) ;
2011-01-13 22:59:54 +03:00
static u8 * iv_of_dmreq ( struct crypt_config * cc , struct dm_crypt_request * dmreq ) ;
2007-05-09 13:32:52 +04:00
2011-01-13 22:59:53 +03:00
static struct crypt_cpu * this_crypt_config ( struct crypt_config * cc )
{
return this_cpu_ptr ( cc - > cpu ) ;
}
/*
* Use this to access cipher attributes that are the same for each CPU .
*/
static struct crypto_ablkcipher * any_tfm ( struct crypt_config * cc )
{
2011-01-13 22:59:54 +03:00
return __this_cpu_ptr ( cc - > cpu ) - > tfms [ 0 ] ;
2011-01-13 22:59:53 +03:00
}
2005-04-17 02:20:36 +04:00
/*
* Different IV generation algorithms :
*
2006-09-02 12:17:33 +04:00
* plain : the initial vector is the 32 - bit little - endian version of the sector
2007-10-20 01:10:43 +04:00
* number , padded with zeros if necessary .
2005-04-17 02:20:36 +04:00
*
2009-12-11 02:52:25 +03:00
* plain64 : the initial vector is the 64 - bit little - endian version of the sector
* number , padded with zeros if necessary .
*
2006-09-02 12:17:33 +04:00
* essiv : " encrypted sector|salt initial vector " , the sector number is
* encrypted with the bulk cipher using a salt as key . The salt
* should be derived from the bulk cipher ' s key via hashing .
2005-04-17 02:20:36 +04:00
*
2006-09-03 02:56:39 +04:00
* benbi : the 64 - bit " big-endian 'narrow block'-count " , starting at 1
* ( needed for LRW - 32 - AES and possible other narrow block modes )
*
2007-05-09 13:32:55 +04:00
* null : the initial vector is always zero . Provides compatibility with
* obsolete loop_fish2 devices . Do not use for new devices .
*
2011-01-13 22:59:55 +03:00
* lmk : Compatible implementation of the block chaining mode used
* by the Loop - AES block device encryption system
* designed by Jari Ruusu . See http : //loop-aes.sourceforge.net/
* It operates on full 512 byte sectors and uses CBC
* with an IV derived from the sector number , the data and
* optionally extra IV seed .
* This means that after decryption the first block
* of sector must be tweaked according to decrypted data .
* Loop - AES can use three encryption schemes :
* version 1 : is plain aes - cbc mode
* version 2 : uses 64 multikey scheme with lmk IV generator
* version 3 : the same as version 2 with additional IV seed
* ( it uses 65 keys , last key is used as IV seed )
*
2005-04-17 02:20:36 +04:00
* plumb : unimplemented , see :
* http : //article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
*/
2011-01-13 22:59:54 +03:00
static int crypt_iv_plain_gen ( struct crypt_config * cc , u8 * iv ,
struct dm_crypt_request * dmreq )
2005-04-17 02:20:36 +04:00
{
memset ( iv , 0 , cc - > iv_size ) ;
2011-01-13 22:59:54 +03:00
* ( u32 * ) iv = cpu_to_le32 ( dmreq - > iv_sector & 0xffffffff ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2009-12-11 02:52:25 +03:00
static int crypt_iv_plain64_gen ( struct crypt_config * cc , u8 * iv ,
2011-01-13 22:59:54 +03:00
struct dm_crypt_request * dmreq )
2009-12-11 02:52:25 +03:00
{
memset ( iv , 0 , cc - > iv_size ) ;
2011-01-13 22:59:54 +03:00
* ( u64 * ) iv = cpu_to_le64 ( dmreq - > iv_sector ) ;
2009-12-11 02:52:25 +03:00
return 0 ;
}
2009-12-11 02:51:56 +03:00
/* Initialise ESSIV - compute salt but no local memory allocations */
static int crypt_iv_essiv_init ( struct crypt_config * cc )
{
struct iv_essiv_private * essiv = & cc - > iv_gen_private . essiv ;
struct hash_desc desc ;
struct scatterlist sg ;
2011-01-13 22:59:53 +03:00
struct crypto_cipher * essiv_tfm ;
int err , cpu ;
2009-12-11 02:51:56 +03:00
sg_init_one ( & sg , cc - > key , cc - > key_size ) ;
desc . tfm = essiv - > hash_tfm ;
desc . flags = CRYPTO_TFM_REQ_MAY_SLEEP ;
err = crypto_hash_digest ( & desc , & sg , cc - > key_size , essiv - > salt ) ;
if ( err )
return err ;
2011-01-13 22:59:53 +03:00
for_each_possible_cpu ( cpu ) {
essiv_tfm = per_cpu_ptr ( cc - > cpu , cpu ) - > iv_private ,
err = crypto_cipher_setkey ( essiv_tfm , essiv - > salt ,
2009-12-11 02:51:56 +03:00
crypto_hash_digestsize ( essiv - > hash_tfm ) ) ;
2011-01-13 22:59:53 +03:00
if ( err )
return err ;
}
return 0 ;
2009-12-11 02:51:56 +03:00
}
2009-12-11 02:51:57 +03:00
/* Wipe salt and reset key derived from volume key */
static int crypt_iv_essiv_wipe ( struct crypt_config * cc )
{
struct iv_essiv_private * essiv = & cc - > iv_gen_private . essiv ;
unsigned salt_size = crypto_hash_digestsize ( essiv - > hash_tfm ) ;
2011-01-13 22:59:53 +03:00
struct crypto_cipher * essiv_tfm ;
int cpu , r , err = 0 ;
2009-12-11 02:51:57 +03:00
memset ( essiv - > salt , 0 , salt_size ) ;
2011-01-13 22:59:53 +03:00
for_each_possible_cpu ( cpu ) {
essiv_tfm = per_cpu_ptr ( cc - > cpu , cpu ) - > iv_private ;
r = crypto_cipher_setkey ( essiv_tfm , essiv - > salt , salt_size ) ;
if ( r )
err = r ;
}
return err ;
}
/* Set up per cpu cipher state */
static struct crypto_cipher * setup_essiv_cpu ( struct crypt_config * cc ,
struct dm_target * ti ,
u8 * salt , unsigned saltsize )
{
struct crypto_cipher * essiv_tfm ;
int err ;
/* Setup the essiv_tfm with the given salt */
essiv_tfm = crypto_alloc_cipher ( cc - > cipher , 0 , CRYPTO_ALG_ASYNC ) ;
if ( IS_ERR ( essiv_tfm ) ) {
ti - > error = " Error allocating crypto tfm for ESSIV " ;
return essiv_tfm ;
}
if ( crypto_cipher_blocksize ( essiv_tfm ) ! =
crypto_ablkcipher_ivsize ( any_tfm ( cc ) ) ) {
ti - > error = " Block size of ESSIV cipher does "
" not match IV size of block cipher " ;
crypto_free_cipher ( essiv_tfm ) ;
return ERR_PTR ( - EINVAL ) ;
}
err = crypto_cipher_setkey ( essiv_tfm , salt , saltsize ) ;
if ( err ) {
ti - > error = " Failed to set key for ESSIV cipher " ;
crypto_free_cipher ( essiv_tfm ) ;
return ERR_PTR ( err ) ;
}
return essiv_tfm ;
2009-12-11 02:51:57 +03:00
}
2009-12-11 02:51:55 +03:00
static void crypt_iv_essiv_dtr ( struct crypt_config * cc )
{
2011-01-13 22:59:53 +03:00
int cpu ;
struct crypt_cpu * cpu_cc ;
struct crypto_cipher * essiv_tfm ;
2009-12-11 02:51:55 +03:00
struct iv_essiv_private * essiv = & cc - > iv_gen_private . essiv ;
2009-12-11 02:51:56 +03:00
crypto_free_hash ( essiv - > hash_tfm ) ;
essiv - > hash_tfm = NULL ;
kzfree ( essiv - > salt ) ;
essiv - > salt = NULL ;
2011-01-13 22:59:53 +03:00
for_each_possible_cpu ( cpu ) {
cpu_cc = per_cpu_ptr ( cc - > cpu , cpu ) ;
essiv_tfm = cpu_cc - > iv_private ;
if ( essiv_tfm )
crypto_free_cipher ( essiv_tfm ) ;
cpu_cc - > iv_private = NULL ;
}
2009-12-11 02:51:55 +03:00
}
2005-04-17 02:20:36 +04:00
static int crypt_iv_essiv_ctr ( struct crypt_config * cc , struct dm_target * ti ,
2007-10-20 01:42:37 +04:00
const char * opts )
2005-04-17 02:20:36 +04:00
{
2009-12-11 02:51:56 +03:00
struct crypto_cipher * essiv_tfm = NULL ;
struct crypto_hash * hash_tfm = NULL ;
u8 * salt = NULL ;
2011-01-13 22:59:53 +03:00
int err , cpu ;
2005-04-17 02:20:36 +04:00
2009-12-11 02:51:56 +03:00
if ( ! opts ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Digest algorithm missing for ESSIV mode " ;
2005-04-17 02:20:36 +04:00
return - EINVAL ;
}
2009-12-11 02:51:56 +03:00
/* Allocate hash algorithm */
2006-08-24 13:10:20 +04:00
hash_tfm = crypto_alloc_hash ( opts , 0 , CRYPTO_ALG_ASYNC ) ;
if ( IS_ERR ( hash_tfm ) ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Error initializing ESSIV hash " ;
2009-12-11 02:51:56 +03:00
err = PTR_ERR ( hash_tfm ) ;
goto bad ;
2005-04-17 02:20:36 +04:00
}
2009-12-11 02:51:56 +03:00
salt = kzalloc ( crypto_hash_digestsize ( hash_tfm ) , GFP_KERNEL ) ;
2009-12-11 02:51:56 +03:00
if ( ! salt ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Error kmallocing salt storage in ESSIV " ;
2009-12-11 02:51:56 +03:00
err = - ENOMEM ;
goto bad ;
2005-04-17 02:20:36 +04:00
}
2009-12-11 02:51:56 +03:00
cc - > iv_gen_private . essiv . salt = salt ;
cc - > iv_gen_private . essiv . hash_tfm = hash_tfm ;
2011-01-13 22:59:53 +03:00
for_each_possible_cpu ( cpu ) {
essiv_tfm = setup_essiv_cpu ( cc , ti , salt ,
crypto_hash_digestsize ( hash_tfm ) ) ;
if ( IS_ERR ( essiv_tfm ) ) {
crypt_iv_essiv_dtr ( cc ) ;
return PTR_ERR ( essiv_tfm ) ;
}
per_cpu_ptr ( cc - > cpu , cpu ) - > iv_private = essiv_tfm ;
}
2005-04-17 02:20:36 +04:00
return 0 ;
2009-12-11 02:51:56 +03:00
bad :
if ( hash_tfm & & ! IS_ERR ( hash_tfm ) )
crypto_free_hash ( hash_tfm ) ;
2009-12-11 02:51:56 +03:00
kfree ( salt ) ;
2009-12-11 02:51:56 +03:00
return err ;
2005-04-17 02:20:36 +04:00
}
2011-01-13 22:59:54 +03:00
static int crypt_iv_essiv_gen ( struct crypt_config * cc , u8 * iv ,
struct dm_crypt_request * dmreq )
2005-04-17 02:20:36 +04:00
{
2011-01-13 22:59:53 +03:00
struct crypto_cipher * essiv_tfm = this_crypt_config ( cc ) - > iv_private ;
2005-04-17 02:20:36 +04:00
memset ( iv , 0 , cc - > iv_size ) ;
2011-01-13 22:59:54 +03:00
* ( u64 * ) iv = cpu_to_le64 ( dmreq - > iv_sector ) ;
2011-01-13 22:59:53 +03:00
crypto_cipher_encrypt_one ( essiv_tfm , iv , iv ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2006-09-03 02:56:39 +04:00
static int crypt_iv_benbi_ctr ( struct crypt_config * cc , struct dm_target * ti ,
const char * opts )
{
2011-01-13 22:59:53 +03:00
unsigned bs = crypto_ablkcipher_blocksize ( any_tfm ( cc ) ) ;
2006-12-08 13:37:49 +03:00
int log = ilog2 ( bs ) ;
2006-09-03 02:56:39 +04:00
/* we need to calculate how far we must shift the sector count
* to get the cipher block count , we use this shift in _gen */
if ( 1 < < log ! = bs ) {
ti - > error = " cypher blocksize is not a power of 2 " ;
return - EINVAL ;
}
if ( log > 9 ) {
ti - > error = " cypher blocksize is > 512 " ;
return - EINVAL ;
}
2009-12-11 02:51:55 +03:00
cc - > iv_gen_private . benbi . shift = 9 - log ;
2006-09-03 02:56:39 +04:00
return 0 ;
}
static void crypt_iv_benbi_dtr ( struct crypt_config * cc )
{
}
2011-01-13 22:59:54 +03:00
static int crypt_iv_benbi_gen ( struct crypt_config * cc , u8 * iv ,
struct dm_crypt_request * dmreq )
2006-09-03 02:56:39 +04:00
{
2006-12-06 00:41:52 +03:00
__be64 val ;
2006-09-03 02:56:39 +04:00
memset ( iv , 0 , cc - > iv_size - sizeof ( u64 ) ) ; /* rest is cleared below */
2006-12-06 00:41:52 +03:00
2011-01-13 22:59:54 +03:00
val = cpu_to_be64 ( ( ( u64 ) dmreq - > iv_sector < < cc - > iv_gen_private . benbi . shift ) + 1 ) ;
2006-12-06 00:41:52 +03:00
put_unaligned ( val , ( __be64 * ) ( iv + cc - > iv_size - sizeof ( u64 ) ) ) ;
2006-09-03 02:56:39 +04:00
2005-04-17 02:20:36 +04:00
return 0 ;
}
2011-01-13 22:59:54 +03:00
static int crypt_iv_null_gen ( struct crypt_config * cc , u8 * iv ,
struct dm_crypt_request * dmreq )
2007-05-09 13:32:55 +04:00
{
memset ( iv , 0 , cc - > iv_size ) ;
return 0 ;
}
2011-01-13 22:59:55 +03:00
static void crypt_iv_lmk_dtr ( struct crypt_config * cc )
{
struct iv_lmk_private * lmk = & cc - > iv_gen_private . lmk ;
if ( lmk - > hash_tfm & & ! IS_ERR ( lmk - > hash_tfm ) )
crypto_free_shash ( lmk - > hash_tfm ) ;
lmk - > hash_tfm = NULL ;
kzfree ( lmk - > seed ) ;
lmk - > seed = NULL ;
}
static int crypt_iv_lmk_ctr ( struct crypt_config * cc , struct dm_target * ti ,
const char * opts )
{
struct iv_lmk_private * lmk = & cc - > iv_gen_private . lmk ;
lmk - > hash_tfm = crypto_alloc_shash ( " md5 " , 0 , 0 ) ;
if ( IS_ERR ( lmk - > hash_tfm ) ) {
ti - > error = " Error initializing LMK hash " ;
return PTR_ERR ( lmk - > hash_tfm ) ;
}
/* No seed in LMK version 2 */
if ( cc - > key_parts = = cc - > tfms_count ) {
lmk - > seed = NULL ;
return 0 ;
}
lmk - > seed = kzalloc ( LMK_SEED_SIZE , GFP_KERNEL ) ;
if ( ! lmk - > seed ) {
crypt_iv_lmk_dtr ( cc ) ;
ti - > error = " Error kmallocing seed storage in LMK " ;
return - ENOMEM ;
}
return 0 ;
}
static int crypt_iv_lmk_init ( struct crypt_config * cc )
{
struct iv_lmk_private * lmk = & cc - > iv_gen_private . lmk ;
int subkey_size = cc - > key_size / cc - > key_parts ;
/* LMK seed is on the position of LMK_KEYS + 1 key */
if ( lmk - > seed )
memcpy ( lmk - > seed , cc - > key + ( cc - > tfms_count * subkey_size ) ,
crypto_shash_digestsize ( lmk - > hash_tfm ) ) ;
return 0 ;
}
static int crypt_iv_lmk_wipe ( struct crypt_config * cc )
{
struct iv_lmk_private * lmk = & cc - > iv_gen_private . lmk ;
if ( lmk - > seed )
memset ( lmk - > seed , 0 , LMK_SEED_SIZE ) ;
return 0 ;
}
static int crypt_iv_lmk_one ( struct crypt_config * cc , u8 * iv ,
struct dm_crypt_request * dmreq ,
u8 * data )
{
struct iv_lmk_private * lmk = & cc - > iv_gen_private . lmk ;
struct {
struct shash_desc desc ;
char ctx [ crypto_shash_descsize ( lmk - > hash_tfm ) ] ;
} sdesc ;
struct md5_state md5state ;
u32 buf [ 4 ] ;
int i , r ;
sdesc . desc . tfm = lmk - > hash_tfm ;
sdesc . desc . flags = CRYPTO_TFM_REQ_MAY_SLEEP ;
r = crypto_shash_init ( & sdesc . desc ) ;
if ( r )
return r ;
if ( lmk - > seed ) {
r = crypto_shash_update ( & sdesc . desc , lmk - > seed , LMK_SEED_SIZE ) ;
if ( r )
return r ;
}
/* Sector is always 512B, block size 16, add data of blocks 1-31 */
r = crypto_shash_update ( & sdesc . desc , data + 16 , 16 * 31 ) ;
if ( r )
return r ;
/* Sector is cropped to 56 bits here */
buf [ 0 ] = cpu_to_le32 ( dmreq - > iv_sector & 0xFFFFFFFF ) ;
buf [ 1 ] = cpu_to_le32 ( ( ( ( u64 ) dmreq - > iv_sector > > 32 ) & 0x00FFFFFF ) | 0x80000000 ) ;
buf [ 2 ] = cpu_to_le32 ( 4024 ) ;
buf [ 3 ] = 0 ;
r = crypto_shash_update ( & sdesc . desc , ( u8 * ) buf , sizeof ( buf ) ) ;
if ( r )
return r ;
/* No MD5 padding here */
r = crypto_shash_export ( & sdesc . desc , & md5state ) ;
if ( r )
return r ;
for ( i = 0 ; i < MD5_HASH_WORDS ; i + + )
__cpu_to_le32s ( & md5state . hash [ i ] ) ;
memcpy ( iv , & md5state . hash , cc - > iv_size ) ;
return 0 ;
}
static int crypt_iv_lmk_gen ( struct crypt_config * cc , u8 * iv ,
struct dm_crypt_request * dmreq )
{
u8 * src ;
int r = 0 ;
if ( bio_data_dir ( dmreq - > ctx - > bio_in ) = = WRITE ) {
src = kmap_atomic ( sg_page ( & dmreq - > sg_in ) , KM_USER0 ) ;
r = crypt_iv_lmk_one ( cc , iv , dmreq , src + dmreq - > sg_in . offset ) ;
kunmap_atomic ( src , KM_USER0 ) ;
} else
memset ( iv , 0 , cc - > iv_size ) ;
return r ;
}
static int crypt_iv_lmk_post ( struct crypt_config * cc , u8 * iv ,
struct dm_crypt_request * dmreq )
{
u8 * dst ;
int r ;
if ( bio_data_dir ( dmreq - > ctx - > bio_in ) = = WRITE )
return 0 ;
dst = kmap_atomic ( sg_page ( & dmreq - > sg_out ) , KM_USER0 ) ;
r = crypt_iv_lmk_one ( cc , iv , dmreq , dst + dmreq - > sg_out . offset ) ;
/* Tweak the first block of plaintext sector */
if ( ! r )
crypto_xor ( dst + dmreq - > sg_out . offset , iv , cc - > iv_size ) ;
kunmap_atomic ( dst , KM_USER0 ) ;
return r ;
}
2005-04-17 02:20:36 +04:00
static struct crypt_iv_operations crypt_iv_plain_ops = {
. generator = crypt_iv_plain_gen
} ;
2009-12-11 02:52:25 +03:00
static struct crypt_iv_operations crypt_iv_plain64_ops = {
. generator = crypt_iv_plain64_gen
} ;
2005-04-17 02:20:36 +04:00
static struct crypt_iv_operations crypt_iv_essiv_ops = {
. ctr = crypt_iv_essiv_ctr ,
. dtr = crypt_iv_essiv_dtr ,
2009-12-11 02:51:56 +03:00
. init = crypt_iv_essiv_init ,
2009-12-11 02:51:57 +03:00
. wipe = crypt_iv_essiv_wipe ,
2005-04-17 02:20:36 +04:00
. generator = crypt_iv_essiv_gen
} ;
2006-09-03 02:56:39 +04:00
static struct crypt_iv_operations crypt_iv_benbi_ops = {
. ctr = crypt_iv_benbi_ctr ,
. dtr = crypt_iv_benbi_dtr ,
. generator = crypt_iv_benbi_gen
} ;
2005-04-17 02:20:36 +04:00
2007-05-09 13:32:55 +04:00
static struct crypt_iv_operations crypt_iv_null_ops = {
. generator = crypt_iv_null_gen
} ;
2011-01-13 22:59:55 +03:00
static struct crypt_iv_operations crypt_iv_lmk_ops = {
. ctr = crypt_iv_lmk_ctr ,
. dtr = crypt_iv_lmk_dtr ,
. init = crypt_iv_lmk_init ,
. wipe = crypt_iv_lmk_wipe ,
. generator = crypt_iv_lmk_gen ,
. post = crypt_iv_lmk_post
} ;
2007-10-20 01:42:37 +04:00
static void crypt_convert_init ( struct crypt_config * cc ,
struct convert_context * ctx ,
struct bio * bio_out , struct bio * bio_in ,
2008-02-08 05:10:41 +03:00
sector_t sector )
2005-04-17 02:20:36 +04:00
{
ctx - > bio_in = bio_in ;
ctx - > bio_out = bio_out ;
ctx - > offset_in = 0 ;
ctx - > offset_out = 0 ;
ctx - > idx_in = bio_in ? bio_in - > bi_idx : 0 ;
ctx - > idx_out = bio_out ? bio_out - > bi_idx : 0 ;
ctx - > sector = sector + cc - > iv_offset ;
2008-02-08 05:11:09 +03:00
init_completion ( & ctx - > restart ) ;
2005-04-17 02:20:36 +04:00
}
2009-03-16 20:44:33 +03:00
static struct dm_crypt_request * dmreq_of_req ( struct crypt_config * cc ,
struct ablkcipher_request * req )
{
return ( struct dm_crypt_request * ) ( ( char * ) req + cc - > dmreq_start ) ;
}
static struct ablkcipher_request * req_of_dmreq ( struct crypt_config * cc ,
struct dm_crypt_request * dmreq )
{
return ( struct ablkcipher_request * ) ( ( char * ) dmreq - cc - > dmreq_start ) ;
}
2011-01-13 22:59:54 +03:00
static u8 * iv_of_dmreq ( struct crypt_config * cc ,
struct dm_crypt_request * dmreq )
{
return ( u8 * ) ALIGN ( ( unsigned long ) ( dmreq + 1 ) ,
crypto_ablkcipher_alignmask ( any_tfm ( cc ) ) + 1 ) ;
}
2008-02-08 05:11:04 +03:00
static int crypt_convert_block ( struct crypt_config * cc ,
2008-02-08 05:11:14 +03:00
struct convert_context * ctx ,
struct ablkcipher_request * req )
2008-02-08 05:11:04 +03:00
{
struct bio_vec * bv_in = bio_iovec_idx ( ctx - > bio_in , ctx - > idx_in ) ;
struct bio_vec * bv_out = bio_iovec_idx ( ctx - > bio_out , ctx - > idx_out ) ;
2008-02-08 05:11:14 +03:00
struct dm_crypt_request * dmreq ;
u8 * iv ;
int r = 0 ;
2009-03-16 20:44:33 +03:00
dmreq = dmreq_of_req ( cc , req ) ;
2011-01-13 22:59:54 +03:00
iv = iv_of_dmreq ( cc , dmreq ) ;
2008-02-08 05:11:04 +03:00
2011-01-13 22:59:54 +03:00
dmreq - > iv_sector = ctx - > sector ;
2009-03-16 20:44:33 +03:00
dmreq - > ctx = ctx ;
2008-02-08 05:11:14 +03:00
sg_init_table ( & dmreq - > sg_in , 1 ) ;
sg_set_page ( & dmreq - > sg_in , bv_in - > bv_page , 1 < < SECTOR_SHIFT ,
2008-02-08 05:11:04 +03:00
bv_in - > bv_offset + ctx - > offset_in ) ;
2008-02-08 05:11:14 +03:00
sg_init_table ( & dmreq - > sg_out , 1 ) ;
sg_set_page ( & dmreq - > sg_out , bv_out - > bv_page , 1 < < SECTOR_SHIFT ,
2008-02-08 05:11:04 +03:00
bv_out - > bv_offset + ctx - > offset_out ) ;
ctx - > offset_in + = 1 < < SECTOR_SHIFT ;
if ( ctx - > offset_in > = bv_in - > bv_len ) {
ctx - > offset_in = 0 ;
ctx - > idx_in + + ;
}
ctx - > offset_out + = 1 < < SECTOR_SHIFT ;
if ( ctx - > offset_out > = bv_out - > bv_len ) {
ctx - > offset_out = 0 ;
ctx - > idx_out + + ;
}
2008-02-08 05:11:14 +03:00
if ( cc - > iv_gen_ops ) {
2011-01-13 22:59:54 +03:00
r = cc - > iv_gen_ops - > generator ( cc , iv , dmreq ) ;
2008-02-08 05:11:14 +03:00
if ( r < 0 )
return r ;
}
ablkcipher_request_set_crypt ( req , & dmreq - > sg_in , & dmreq - > sg_out ,
1 < < SECTOR_SHIFT , iv ) ;
if ( bio_data_dir ( ctx - > bio_in ) = = WRITE )
r = crypto_ablkcipher_encrypt ( req ) ;
else
r = crypto_ablkcipher_decrypt ( req ) ;
2011-01-13 22:59:54 +03:00
if ( ! r & & cc - > iv_gen_ops & & cc - > iv_gen_ops - > post )
r = cc - > iv_gen_ops - > post ( cc , iv , dmreq ) ;
2008-02-08 05:11:14 +03:00
return r ;
2008-02-08 05:11:04 +03:00
}
2008-02-08 05:11:12 +03:00
static void kcryptd_async_done ( struct crypto_async_request * async_req ,
int error ) ;
2011-01-13 22:59:53 +03:00
2008-02-08 05:11:07 +03:00
static void crypt_alloc_req ( struct crypt_config * cc ,
struct convert_context * ctx )
{
2011-01-13 22:59:53 +03:00
struct crypt_cpu * this_cc = this_crypt_config ( cc ) ;
2011-01-13 22:59:54 +03:00
unsigned key_index = ctx - > sector & ( cc - > tfms_count - 1 ) ;
2011-01-13 22:59:53 +03:00
if ( ! this_cc - > req )
this_cc - > req = mempool_alloc ( cc - > req_pool , GFP_NOIO ) ;
2011-01-13 22:59:54 +03:00
ablkcipher_request_set_tfm ( this_cc - > req , this_cc - > tfms [ key_index ] ) ;
2011-01-13 22:59:53 +03:00
ablkcipher_request_set_callback ( this_cc - > req ,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP ,
kcryptd_async_done , dmreq_of_req ( cc , this_cc - > req ) ) ;
2008-02-08 05:11:07 +03:00
}
2005-04-17 02:20:36 +04:00
/*
* Encrypt / decrypt data from one bio to another one ( can be the same one )
*/
static int crypt_convert ( struct crypt_config * cc ,
2007-10-20 01:42:37 +04:00
struct convert_context * ctx )
2005-04-17 02:20:36 +04:00
{
2011-01-13 22:59:53 +03:00
struct crypt_cpu * this_cc = this_crypt_config ( cc ) ;
2008-03-29 00:16:07 +03:00
int r ;
2005-04-17 02:20:36 +04:00
2008-10-10 16:37:08 +04:00
atomic_set ( & ctx - > pending , 1 ) ;
2005-04-17 02:20:36 +04:00
while ( ctx - > idx_in < ctx - > bio_in - > bi_vcnt & &
ctx - > idx_out < ctx - > bio_out - > bi_vcnt ) {
2008-02-08 05:11:14 +03:00
crypt_alloc_req ( cc , ctx ) ;
2008-03-29 00:16:07 +03:00
atomic_inc ( & ctx - > pending ) ;
2011-01-13 22:59:53 +03:00
r = crypt_convert_block ( cc , ctx , this_cc - > req ) ;
2008-02-08 05:11:14 +03:00
switch ( r ) {
2008-03-29 00:16:07 +03:00
/* async */
2008-02-08 05:11:14 +03:00
case - EBUSY :
wait_for_completion ( & ctx - > restart ) ;
INIT_COMPLETION ( ctx - > restart ) ;
/* fall through*/
case - EINPROGRESS :
2011-01-13 22:59:53 +03:00
this_cc - > req = NULL ;
2008-03-29 00:16:07 +03:00
ctx - > sector + + ;
continue ;
/* sync */
2008-02-08 05:11:14 +03:00
case 0 :
2008-03-29 00:16:07 +03:00
atomic_dec ( & ctx - > pending ) ;
2008-02-08 05:11:14 +03:00
ctx - > sector + + ;
2008-07-02 12:34:28 +04:00
cond_resched ( ) ;
2008-02-08 05:11:14 +03:00
continue ;
2008-03-29 00:16:07 +03:00
/* error */
default :
atomic_dec ( & ctx - > pending ) ;
return r ;
}
2005-04-17 02:20:36 +04:00
}
2008-03-29 00:16:07 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2007-10-20 01:42:37 +04:00
static void dm_crypt_bio_destructor ( struct bio * bio )
{
2007-07-12 20:26:32 +04:00
struct dm_crypt_io * io = bio - > bi_private ;
2006-10-03 12:15:40 +04:00
struct crypt_config * cc = io - > target - > private ;
bio_free ( bio , cc - > bs ) ;
2007-10-20 01:42:37 +04:00
}
2006-10-03 12:15:40 +04:00
2005-04-17 02:20:36 +04:00
/*
* Generate a new unfragmented bio with the given size
* This should never violate the device limitations
2008-10-10 16:37:08 +04:00
* May return a smaller bio when running out of pages , indicated by
* * out_of_pages set to 1.
2005-04-17 02:20:36 +04:00
*/
2008-10-10 16:37:08 +04:00
static struct bio * crypt_alloc_buffer ( struct dm_crypt_io * io , unsigned size ,
unsigned * out_of_pages )
2005-04-17 02:20:36 +04:00
{
2007-05-09 13:32:52 +04:00
struct crypt_config * cc = io - > target - > private ;
2006-10-03 12:15:37 +04:00
struct bio * clone ;
2005-04-17 02:20:36 +04:00
unsigned int nr_iovecs = ( size + PAGE_SIZE - 1 ) > > PAGE_SHIFT ;
2005-10-21 11:22:34 +04:00
gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM ;
2007-12-13 17:16:10 +03:00
unsigned i , len ;
struct page * page ;
2005-04-17 02:20:36 +04:00
2007-05-09 13:32:53 +04:00
clone = bio_alloc_bioset ( GFP_NOIO , nr_iovecs , cc - > bs ) ;
2006-10-03 12:15:37 +04:00
if ( ! clone )
2005-04-17 02:20:36 +04:00
return NULL ;
2007-05-09 13:32:52 +04:00
clone_init ( io , clone ) ;
2008-10-10 16:37:08 +04:00
* out_of_pages = 0 ;
2006-10-03 12:15:40 +04:00
2007-05-09 13:32:54 +04:00
for ( i = 0 ; i < nr_iovecs ; i + + ) {
2007-12-13 17:16:10 +03:00
page = mempool_alloc ( cc - > page_pool , gfp_mask ) ;
2008-10-10 16:37:08 +04:00
if ( ! page ) {
* out_of_pages = 1 ;
2005-04-17 02:20:36 +04:00
break ;
2008-10-10 16:37:08 +04:00
}
2005-04-17 02:20:36 +04:00
/*
* if additional pages cannot be allocated without waiting ,
* return a partially allocated bio , the caller will then try
* to allocate additional bios while submitting this partial bio
*/
2007-05-09 13:32:54 +04:00
if ( i = = ( MIN_BIO_PAGES - 1 ) )
2005-04-17 02:20:36 +04:00
gfp_mask = ( gfp_mask | __GFP_NOWARN ) & ~ __GFP_WAIT ;
2007-12-13 17:16:10 +03:00
len = ( size > PAGE_SIZE ) ? PAGE_SIZE : size ;
if ( ! bio_add_page ( clone , page , len , 0 ) ) {
mempool_free ( page , cc - > page_pool ) ;
break ;
}
2005-04-17 02:20:36 +04:00
2007-12-13 17:16:10 +03:00
size - = len ;
2005-04-17 02:20:36 +04:00
}
2006-10-03 12:15:37 +04:00
if ( ! clone - > bi_size ) {
bio_put ( clone ) ;
2005-04-17 02:20:36 +04:00
return NULL ;
}
2006-10-03 12:15:37 +04:00
return clone ;
2005-04-17 02:20:36 +04:00
}
2007-10-16 15:48:46 +04:00
static void crypt_free_buffer_pages ( struct crypt_config * cc , struct bio * clone )
2005-04-17 02:20:36 +04:00
{
2007-10-16 15:48:46 +04:00
unsigned int i ;
2005-04-17 02:20:36 +04:00
struct bio_vec * bv ;
2007-10-16 15:48:46 +04:00
for ( i = 0 ; i < clone - > bi_vcnt ; i + + ) {
2006-10-03 12:15:37 +04:00
bv = bio_iovec_idx ( clone , i ) ;
2005-04-17 02:20:36 +04:00
BUG_ON ( ! bv - > bv_page ) ;
mempool_free ( bv - > bv_page , cc - > page_pool ) ;
bv - > bv_page = NULL ;
}
}
2008-10-10 16:37:03 +04:00
static struct dm_crypt_io * crypt_io_alloc ( struct dm_target * ti ,
struct bio * bio , sector_t sector )
{
struct crypt_config * cc = ti - > private ;
struct dm_crypt_io * io ;
io = mempool_alloc ( cc - > io_pool , GFP_NOIO ) ;
io - > target = ti ;
io - > base_bio = bio ;
io - > sector = sector ;
io - > error = 0 ;
2008-10-21 20:45:02 +04:00
io - > base_io = NULL ;
2008-10-10 16:37:03 +04:00
atomic_set ( & io - > pending , 0 ) ;
return io ;
}
2008-10-10 16:37:02 +04:00
static void crypt_inc_pending ( struct dm_crypt_io * io )
{
atomic_inc ( & io - > pending ) ;
}
2005-04-17 02:20:36 +04:00
/*
* One of the bios was finished . Check for completion of
* the whole request and correctly clean up the buffer .
2008-10-21 20:45:02 +04:00
* If base_io is set , wait for the last fragment to complete .
2005-04-17 02:20:36 +04:00
*/
2008-02-08 05:10:43 +03:00
static void crypt_dec_pending ( struct dm_crypt_io * io )
2005-04-17 02:20:36 +04:00
{
2008-02-08 05:10:43 +03:00
struct crypt_config * cc = io - > target - > private ;
2009-03-16 20:44:36 +03:00
struct bio * base_bio = io - > base_bio ;
struct dm_crypt_io * base_io = io - > base_io ;
int error = io - > error ;
2005-04-17 02:20:36 +04:00
if ( ! atomic_dec_and_test ( & io - > pending ) )
return ;
2009-03-16 20:44:36 +03:00
mempool_free ( io , cc - > io_pool ) ;
if ( likely ( ! base_io ) )
bio_endio ( base_bio , error ) ;
2008-10-21 20:45:02 +04:00
else {
2009-03-16 20:44:36 +03:00
if ( error & & ! base_io - > error )
base_io - > error = error ;
crypt_dec_pending ( base_io ) ;
2008-10-21 20:45:02 +04:00
}
2005-04-17 02:20:36 +04:00
}
/*
2007-10-20 01:38:58 +04:00
* kcryptd / kcryptd_io :
2005-04-17 02:20:36 +04:00
*
* Needed because it would be very unwise to do decryption in an
2006-10-03 12:15:39 +04:00
* interrupt context .
2007-10-20 01:38:58 +04:00
*
* kcryptd performs the actual encryption or decryption .
*
* kcryptd_io performs the IO submission .
*
* They must be separated as otherwise the final stages could be
* starved by new requests which can block in the first stages due
* to memory allocation .
2011-01-13 22:59:53 +03:00
*
* The work is done per CPU global for all dm - crypt instances .
* They should not depend on each other and do not block .
2005-04-17 02:20:36 +04:00
*/
2007-09-27 14:47:43 +04:00
static void crypt_endio ( struct bio * clone , int error )
2006-10-03 12:15:37 +04:00
{
2007-07-12 20:26:32 +04:00
struct dm_crypt_io * io = clone - > bi_private ;
2006-10-03 12:15:37 +04:00
struct crypt_config * cc = io - > target - > private ;
2008-02-08 05:10:46 +03:00
unsigned rw = bio_data_dir ( clone ) ;
2006-10-03 12:15:37 +04:00
2007-12-13 17:15:51 +03:00
if ( unlikely ( ! bio_flagged ( clone , BIO_UPTODATE ) & & ! error ) )
error = - EIO ;
2006-10-03 12:15:37 +04:00
/*
2007-09-27 14:47:43 +04:00
* free the processed pages
2006-10-03 12:15:37 +04:00
*/
2008-02-08 05:10:46 +03:00
if ( rw = = WRITE )
2007-10-16 15:48:46 +04:00
crypt_free_buffer_pages ( cc , clone ) ;
2006-10-03 12:15:37 +04:00
bio_put ( clone ) ;
2008-02-08 05:10:46 +03:00
if ( rw = = READ & & ! error ) {
kcryptd_queue_crypt ( io ) ;
return ;
}
2008-02-08 05:10:43 +03:00
if ( unlikely ( error ) )
io - > error = error ;
crypt_dec_pending ( io ) ;
2006-10-03 12:15:37 +04:00
}
2007-07-12 20:26:32 +04:00
static void clone_init ( struct dm_crypt_io * io , struct bio * clone )
2006-10-03 12:15:37 +04:00
{
struct crypt_config * cc = io - > target - > private ;
clone - > bi_private = io ;
clone - > bi_end_io = crypt_endio ;
clone - > bi_bdev = cc - > dev - > bdev ;
clone - > bi_rw = io - > base_bio - > bi_rw ;
2007-05-09 13:32:52 +04:00
clone - > bi_destructor = dm_crypt_bio_destructor ;
2006-10-03 12:15:37 +04:00
}
2011-01-13 22:59:53 +03:00
static int kcryptd_io_read ( struct dm_crypt_io * io , gfp_t gfp )
2006-10-03 12:15:37 +04:00
{
struct crypt_config * cc = io - > target - > private ;
struct bio * base_bio = io - > base_bio ;
struct bio * clone ;
2006-10-03 12:15:38 +04:00
2006-10-03 12:15:37 +04:00
/*
* The block layer might modify the bvec array , so always
* copy the required bvecs because we need the original
* one in order to decrypt the whole bio data * afterwards * .
*/
2011-01-13 22:59:53 +03:00
clone = bio_alloc_bioset ( gfp , bio_segments ( base_bio ) , cc - > bs ) ;
2011-03-10 10:52:07 +03:00
if ( ! clone )
2011-01-13 22:59:53 +03:00
return 1 ;
2006-10-03 12:15:37 +04:00
2011-01-13 22:59:53 +03:00
crypt_inc_pending ( io ) ;
2006-10-03 12:15:37 +04:00
clone_init ( io , clone ) ;
clone - > bi_idx = 0 ;
clone - > bi_vcnt = bio_segments ( base_bio ) ;
clone - > bi_size = base_bio - > bi_size ;
2008-02-08 05:10:54 +03:00
clone - > bi_sector = cc - > start + io - > sector ;
2006-10-03 12:15:37 +04:00
memcpy ( clone - > bi_io_vec , bio_iovec ( base_bio ) ,
sizeof ( struct bio_vec ) * clone - > bi_vcnt ) ;
2006-10-03 12:15:38 +04:00
generic_make_request ( clone ) ;
2011-01-13 22:59:53 +03:00
return 0 ;
2006-10-03 12:15:37 +04:00
}
2008-02-08 05:10:49 +03:00
static void kcryptd_io_write ( struct dm_crypt_io * io )
{
2008-02-08 05:11:12 +03:00
struct bio * clone = io - > ctx . bio_out ;
generic_make_request ( clone ) ;
2008-02-08 05:10:49 +03:00
}
2008-02-08 05:10:52 +03:00
static void kcryptd_io ( struct work_struct * work )
{
struct dm_crypt_io * io = container_of ( work , struct dm_crypt_io , work ) ;
2011-01-13 22:59:53 +03:00
if ( bio_data_dir ( io - > base_bio ) = = READ ) {
crypt_inc_pending ( io ) ;
if ( kcryptd_io_read ( io , GFP_NOIO ) )
io - > error = - ENOMEM ;
crypt_dec_pending ( io ) ;
} else
2008-02-08 05:10:52 +03:00
kcryptd_io_write ( io ) ;
}
static void kcryptd_queue_io ( struct dm_crypt_io * io )
{
struct crypt_config * cc = io - > target - > private ;
INIT_WORK ( & io - > work , kcryptd_io ) ;
queue_work ( cc - > io_queue , & io - > work ) ;
}
2008-02-08 05:11:12 +03:00
static void kcryptd_crypt_write_io_submit ( struct dm_crypt_io * io ,
int error , int async )
2008-02-08 05:10:49 +03:00
{
2008-02-08 05:10:57 +03:00
struct bio * clone = io - > ctx . bio_out ;
struct crypt_config * cc = io - > target - > private ;
if ( unlikely ( error < 0 ) ) {
crypt_free_buffer_pages ( cc , clone ) ;
bio_put ( clone ) ;
io - > error = - EIO ;
2008-10-10 16:37:06 +04:00
crypt_dec_pending ( io ) ;
2008-02-08 05:10:57 +03:00
return ;
}
/* crypt_convert should have filled the clone bio */
BUG_ON ( io - > ctx . idx_out < clone - > bi_vcnt ) ;
clone - > bi_sector = cc - > start + io - > sector ;
2008-02-08 05:11:02 +03:00
2008-02-08 05:11:12 +03:00
if ( async )
kcryptd_queue_io ( io ) ;
2008-10-10 16:37:05 +04:00
else
2008-02-08 05:11:12 +03:00
generic_make_request ( clone ) ;
2008-02-08 05:10:49 +03:00
}
2008-10-10 16:37:04 +04:00
static void kcryptd_crypt_write_convert ( struct dm_crypt_io * io )
2006-10-03 12:15:37 +04:00
{
struct crypt_config * cc = io - > target - > private ;
struct bio * clone ;
2008-10-21 20:45:02 +04:00
struct dm_crypt_io * new_io ;
2008-10-10 16:37:08 +04:00
int crypt_finished ;
2008-10-10 16:37:08 +04:00
unsigned out_of_pages = 0 ;
2008-02-08 05:10:57 +03:00
unsigned remaining = io - > base_bio - > bi_size ;
2008-10-21 20:45:00 +04:00
sector_t sector = io - > sector ;
2008-02-08 05:10:57 +03:00
int r ;
2006-10-03 12:15:37 +04:00
2008-10-10 16:37:04 +04:00
/*
* Prevent io from disappearing until this function completes .
*/
crypt_inc_pending ( io ) ;
2008-10-21 20:45:00 +04:00
crypt_convert_init ( cc , & io - > ctx , NULL , io - > base_bio , sector ) ;
2008-10-10 16:37:04 +04:00
2006-10-03 12:15:38 +04:00
/*
* The allocated buffers can be smaller than the whole bio ,
* so repeat the whole process until all the data can be handled .
*/
while ( remaining ) {
2008-10-10 16:37:08 +04:00
clone = crypt_alloc_buffer ( io , remaining , & out_of_pages ) ;
2006-10-03 12:15:39 +04:00
if ( unlikely ( ! clone ) ) {
2008-02-08 05:10:43 +03:00
io - > error = - ENOMEM ;
2008-10-10 16:37:04 +04:00
break ;
2006-10-03 12:15:39 +04:00
}
2006-10-03 12:15:38 +04:00
2008-02-08 05:10:38 +03:00
io - > ctx . bio_out = clone ;
io - > ctx . idx_out = 0 ;
2006-10-03 12:15:38 +04:00
2008-02-08 05:10:57 +03:00
remaining - = clone - > bi_size ;
2008-10-21 20:45:00 +04:00
sector + = bio_sectors ( clone ) ;
2006-10-03 12:15:38 +04:00
2008-10-10 16:37:07 +04:00
crypt_inc_pending ( io ) ;
2008-02-08 05:10:57 +03:00
r = crypt_convert ( cc , & io - > ctx ) ;
2008-10-10 16:37:08 +04:00
crypt_finished = atomic_dec_and_test ( & io - > ctx . pending ) ;
2007-05-09 13:32:54 +04:00
2008-10-10 16:37:08 +04:00
/* Encryption was already finished, submit io now */
if ( crypt_finished ) {
2008-02-08 05:11:14 +03:00
kcryptd_crypt_write_io_submit ( io , r , 0 ) ;
2008-10-10 16:37:08 +04:00
/*
* If there was an error , do not try next fragments .
* For async , error is processed in async handler .
*/
2008-10-10 16:37:06 +04:00
if ( unlikely ( r < 0 ) )
2008-10-10 16:37:04 +04:00
break ;
2008-10-21 20:45:00 +04:00
io - > sector = sector ;
2008-10-10 16:37:07 +04:00
}
2006-10-03 12:15:38 +04:00
2008-10-10 16:37:08 +04:00
/*
* Out of memory - > run queues
* But don ' t wait if split was due to the io size restriction
*/
if ( unlikely ( out_of_pages ) )
2009-07-09 16:52:32 +04:00
congestion_wait ( BLK_RW_ASYNC , HZ / 100 ) ;
2008-10-10 16:37:08 +04:00
2008-10-21 20:45:02 +04:00
/*
* With async crypto it is unsafe to share the crypto context
* between fragments , so switch to a new dm_crypt_io structure .
*/
if ( unlikely ( ! crypt_finished & & remaining ) ) {
new_io = crypt_io_alloc ( io - > target , io - > base_bio ,
sector ) ;
crypt_inc_pending ( new_io ) ;
crypt_convert_init ( cc , & new_io - > ctx , NULL ,
io - > base_bio , sector ) ;
new_io - > ctx . idx_in = io - > ctx . idx_in ;
new_io - > ctx . offset_in = io - > ctx . offset_in ;
/*
* Fragments after the first use the base_io
* pending count .
*/
if ( ! io - > base_io )
new_io - > base_io = io ;
else {
new_io - > base_io = io - > base_io ;
crypt_inc_pending ( io - > base_io ) ;
crypt_dec_pending ( io ) ;
}
io = new_io ;
}
2006-10-03 12:15:38 +04:00
}
2008-02-08 05:11:02 +03:00
crypt_dec_pending ( io ) ;
2008-02-08 05:10:59 +03:00
}
2008-02-08 05:10:49 +03:00
static void kcryptd_crypt_read_done ( struct dm_crypt_io * io , int error )
2008-02-08 05:10:43 +03:00
{
if ( unlikely ( error < 0 ) )
io - > error = - EIO ;
crypt_dec_pending ( io ) ;
}
2008-02-08 05:10:49 +03:00
static void kcryptd_crypt_read_convert ( struct dm_crypt_io * io )
2006-10-03 12:15:37 +04:00
{
struct crypt_config * cc = io - > target - > private ;
2008-02-08 05:10:43 +03:00
int r = 0 ;
2005-04-17 02:20:36 +04:00
2008-10-10 16:37:02 +04:00
crypt_inc_pending ( io ) ;
2008-02-08 05:11:14 +03:00
2008-02-08 05:10:38 +03:00
crypt_convert_init ( cc , & io - > ctx , io - > base_bio , io - > base_bio ,
2008-02-08 05:10:54 +03:00
io - > sector ) ;
2005-04-17 02:20:36 +04:00
2008-02-08 05:10:43 +03:00
r = crypt_convert ( cc , & io - > ctx ) ;
2008-03-29 00:16:07 +03:00
if ( atomic_dec_and_test ( & io - > ctx . pending ) )
2008-02-08 05:11:14 +03:00
kcryptd_crypt_read_done ( io , r ) ;
crypt_dec_pending ( io ) ;
2005-04-17 02:20:36 +04:00
}
2008-02-08 05:11:12 +03:00
static void kcryptd_async_done ( struct crypto_async_request * async_req ,
int error )
{
2009-03-16 20:44:33 +03:00
struct dm_crypt_request * dmreq = async_req - > data ;
struct convert_context * ctx = dmreq - > ctx ;
2008-02-08 05:11:12 +03:00
struct dm_crypt_io * io = container_of ( ctx , struct dm_crypt_io , ctx ) ;
struct crypt_config * cc = io - > target - > private ;
if ( error = = - EINPROGRESS ) {
complete ( & ctx - > restart ) ;
return ;
}
2011-01-13 22:59:54 +03:00
if ( ! error & & cc - > iv_gen_ops & & cc - > iv_gen_ops - > post )
error = cc - > iv_gen_ops - > post ( cc , iv_of_dmreq ( cc , dmreq ) , dmreq ) ;
2009-03-16 20:44:33 +03:00
mempool_free ( req_of_dmreq ( cc , dmreq ) , cc - > req_pool ) ;
2008-02-08 05:11:12 +03:00
if ( ! atomic_dec_and_test ( & ctx - > pending ) )
return ;
if ( bio_data_dir ( io - > base_bio ) = = READ )
kcryptd_crypt_read_done ( io , error ) ;
else
kcryptd_crypt_write_io_submit ( io , error , 1 ) ;
}
2008-02-08 05:10:52 +03:00
static void kcryptd_crypt ( struct work_struct * work )
2005-04-17 02:20:36 +04:00
{
2007-07-12 20:26:32 +04:00
struct dm_crypt_io * io = container_of ( work , struct dm_crypt_io , work ) ;
2006-10-03 12:15:37 +04:00
2007-10-20 01:38:58 +04:00
if ( bio_data_dir ( io - > base_bio ) = = READ )
2008-02-08 05:10:52 +03:00
kcryptd_crypt_read_convert ( io ) ;
2008-02-08 05:10:49 +03:00
else
2008-02-08 05:10:52 +03:00
kcryptd_crypt_write_convert ( io ) ;
2007-10-20 01:38:58 +04:00
}
2008-02-08 05:10:52 +03:00
static void kcryptd_queue_crypt ( struct dm_crypt_io * io )
2007-10-20 01:38:58 +04:00
{
2008-02-08 05:10:52 +03:00
struct crypt_config * cc = io - > target - > private ;
2007-10-20 01:38:58 +04:00
2008-02-08 05:10:52 +03:00
INIT_WORK ( & io - > work , kcryptd_crypt ) ;
queue_work ( cc - > crypt_queue , & io - > work ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Decode key from its hex representation
*/
static int crypt_decode_key ( u8 * key , char * hex , unsigned int size )
{
char buffer [ 3 ] ;
char * endp ;
unsigned int i ;
buffer [ 2 ] = ' \0 ' ;
2006-10-03 12:15:37 +04:00
for ( i = 0 ; i < size ; i + + ) {
2005-04-17 02:20:36 +04:00
buffer [ 0 ] = * hex + + ;
buffer [ 1 ] = * hex + + ;
key [ i ] = ( u8 ) simple_strtoul ( buffer , & endp , 16 ) ;
if ( endp ! = & buffer [ 2 ] )
return - EINVAL ;
}
if ( * hex ! = ' \0 ' )
return - EINVAL ;
return 0 ;
}
/*
* Encode key into its hex representation
*/
static void crypt_encode_key ( char * hex , u8 * key , unsigned int size )
{
unsigned int i ;
2006-10-03 12:15:37 +04:00
for ( i = 0 ; i < size ; i + + ) {
2005-04-17 02:20:36 +04:00
sprintf ( hex , " %02x " , * key ) ;
hex + = 2 ;
key + + ;
}
}
2011-01-13 22:59:54 +03:00
static void crypt_free_tfms ( struct crypt_config * cc , int cpu )
{
struct crypt_cpu * cpu_cc = per_cpu_ptr ( cc - > cpu , cpu ) ;
unsigned i ;
for ( i = 0 ; i < cc - > tfms_count ; i + + )
if ( cpu_cc - > tfms [ i ] & & ! IS_ERR ( cpu_cc - > tfms [ i ] ) ) {
crypto_free_ablkcipher ( cpu_cc - > tfms [ i ] ) ;
cpu_cc - > tfms [ i ] = NULL ;
}
}
static int crypt_alloc_tfms ( struct crypt_config * cc , int cpu , char * ciphermode )
{
struct crypt_cpu * cpu_cc = per_cpu_ptr ( cc - > cpu , cpu ) ;
unsigned i ;
int err ;
for ( i = 0 ; i < cc - > tfms_count ; i + + ) {
cpu_cc - > tfms [ i ] = crypto_alloc_ablkcipher ( ciphermode , 0 , 0 ) ;
if ( IS_ERR ( cpu_cc - > tfms [ i ] ) ) {
err = PTR_ERR ( cpu_cc - > tfms [ i ] ) ;
crypt_free_tfms ( cc , cpu ) ;
return err ;
}
}
return 0 ;
}
2011-01-13 22:59:53 +03:00
static int crypt_setkey_allcpus ( struct crypt_config * cc )
{
2011-01-13 22:59:54 +03:00
unsigned subkey_size = cc - > key_size > > ilog2 ( cc - > tfms_count ) ;
int cpu , err = 0 , i , r ;
2011-01-13 22:59:53 +03:00
for_each_possible_cpu ( cpu ) {
2011-01-13 22:59:54 +03:00
for ( i = 0 ; i < cc - > tfms_count ; i + + ) {
r = crypto_ablkcipher_setkey ( per_cpu_ptr ( cc - > cpu , cpu ) - > tfms [ i ] ,
cc - > key + ( i * subkey_size ) , subkey_size ) ;
if ( r )
err = r ;
}
2011-01-13 22:59:53 +03:00
}
return err ;
}
2006-10-03 12:15:37 +04:00
static int crypt_set_key ( struct crypt_config * cc , char * key )
{
2011-03-24 16:54:27 +03:00
int r = - EINVAL ;
int key_string_len = strlen ( key ) ;
2011-01-13 22:59:49 +03:00
/* The key size may not be changed. */
2011-03-24 16:54:27 +03:00
if ( cc - > key_size ! = ( key_string_len > > 1 ) )
goto out ;
2006-10-03 12:15:37 +04:00
2011-01-13 22:59:49 +03:00
/* Hyphen (which gives a key_size of zero) means there is no key. */
if ( ! cc - > key_size & & strcmp ( key , " - " ) )
2011-03-24 16:54:27 +03:00
goto out ;
2006-10-03 12:15:37 +04:00
2011-01-13 22:59:49 +03:00
if ( cc - > key_size & & crypt_decode_key ( cc - > key , key , cc - > key_size ) < 0 )
2011-03-24 16:54:27 +03:00
goto out ;
2006-10-03 12:15:37 +04:00
set_bit ( DM_CRYPT_KEY_VALID , & cc - > flags ) ;
2011-03-24 16:54:27 +03:00
r = crypt_setkey_allcpus ( cc ) ;
out :
/* Hex key string not needed after here, so wipe it. */
memset ( key , ' 0 ' , key_string_len ) ;
return r ;
2006-10-03 12:15:37 +04:00
}
static int crypt_wipe_key ( struct crypt_config * cc )
{
clear_bit ( DM_CRYPT_KEY_VALID , & cc - > flags ) ;
memset ( & cc - > key , 0 , cc - > key_size * sizeof ( u8 ) ) ;
2011-01-13 22:59:53 +03:00
return crypt_setkey_allcpus ( cc ) ;
2006-10-03 12:15:37 +04:00
}
2010-08-12 07:14:06 +04:00
static void crypt_dtr ( struct dm_target * ti )
{
struct crypt_config * cc = ti - > private ;
2011-01-13 22:59:53 +03:00
struct crypt_cpu * cpu_cc ;
int cpu ;
2010-08-12 07:14:06 +04:00
ti - > private = NULL ;
if ( ! cc )
return ;
if ( cc - > io_queue )
destroy_workqueue ( cc - > io_queue ) ;
if ( cc - > crypt_queue )
destroy_workqueue ( cc - > crypt_queue ) ;
2011-01-13 22:59:53 +03:00
if ( cc - > cpu )
for_each_possible_cpu ( cpu ) {
cpu_cc = per_cpu_ptr ( cc - > cpu , cpu ) ;
if ( cpu_cc - > req )
mempool_free ( cpu_cc - > req , cc - > req_pool ) ;
2011-01-13 22:59:54 +03:00
crypt_free_tfms ( cc , cpu ) ;
2011-01-13 22:59:53 +03:00
}
2010-08-12 07:14:06 +04:00
if ( cc - > bs )
bioset_free ( cc - > bs ) ;
if ( cc - > page_pool )
mempool_destroy ( cc - > page_pool ) ;
if ( cc - > req_pool )
mempool_destroy ( cc - > req_pool ) ;
if ( cc - > io_pool )
mempool_destroy ( cc - > io_pool ) ;
if ( cc - > iv_gen_ops & & cc - > iv_gen_ops - > dtr )
cc - > iv_gen_ops - > dtr ( cc ) ;
if ( cc - > dev )
dm_put_device ( ti , cc - > dev ) ;
2011-01-13 22:59:53 +03:00
if ( cc - > cpu )
free_percpu ( cc - > cpu ) ;
2010-08-12 07:14:07 +04:00
kzfree ( cc - > cipher ) ;
2011-01-13 22:59:52 +03:00
kzfree ( cc - > cipher_string ) ;
2010-08-12 07:14:06 +04:00
/* Must zero key material before freeing */
kzfree ( cc ) ;
}
2010-08-12 07:14:07 +04:00
static int crypt_ctr_cipher ( struct dm_target * ti ,
char * cipher_in , char * key )
2005-04-17 02:20:36 +04:00
{
2010-08-12 07:14:07 +04:00
struct crypt_config * cc = ti - > private ;
2011-01-13 22:59:54 +03:00
char * tmp , * cipher , * chainmode , * ivmode , * ivopts , * keycount ;
2010-08-12 07:14:07 +04:00
char * cipher_api = NULL ;
2011-01-13 22:59:53 +03:00
int cpu , ret = - EINVAL ;
2005-04-17 02:20:36 +04:00
2010-08-12 07:14:07 +04:00
/* Convert to crypto api definition? */
if ( strchr ( cipher_in , ' ( ' ) ) {
ti - > error = " Bad cipher specification " ;
2005-04-17 02:20:36 +04:00
return - EINVAL ;
}
2011-01-13 22:59:52 +03:00
cc - > cipher_string = kstrdup ( cipher_in , GFP_KERNEL ) ;
if ( ! cc - > cipher_string )
goto bad_mem ;
2010-08-12 07:14:07 +04:00
/*
* Legacy dm - crypt cipher specification
2011-01-13 22:59:54 +03:00
* cipher [ : keycount ] - mode - iv : ivopts
2010-08-12 07:14:07 +04:00
*/
tmp = cipher_in ;
2011-01-13 22:59:54 +03:00
keycount = strsep ( & tmp , " - " ) ;
cipher = strsep ( & keycount , " : " ) ;
if ( ! keycount )
cc - > tfms_count = 1 ;
else if ( sscanf ( keycount , " %u " , & cc - > tfms_count ) ! = 1 | |
! is_power_of_2 ( cc - > tfms_count ) ) {
ti - > error = " Bad cipher key count specification " ;
return - EINVAL ;
}
cc - > key_parts = cc - > tfms_count ;
2010-08-12 07:14:07 +04:00
cc - > cipher = kstrdup ( cipher , GFP_KERNEL ) ;
if ( ! cc - > cipher )
goto bad_mem ;
2005-04-17 02:20:36 +04:00
chainmode = strsep ( & tmp , " - " ) ;
ivopts = strsep ( & tmp , " - " ) ;
ivmode = strsep ( & ivopts , " : " ) ;
if ( tmp )
2010-08-12 07:14:07 +04:00
DMWARN ( " Ignoring unexpected additional cipher options " ) ;
2005-04-17 02:20:36 +04:00
2011-01-13 22:59:54 +03:00
cc - > cpu = __alloc_percpu ( sizeof ( * ( cc - > cpu ) ) +
cc - > tfms_count * sizeof ( * ( cc - > cpu - > tfms ) ) ,
__alignof__ ( struct crypt_cpu ) ) ;
2011-01-13 22:59:53 +03:00
if ( ! cc - > cpu ) {
ti - > error = " Cannot allocate per cpu state " ;
goto bad_mem ;
}
2011-01-13 22:59:52 +03:00
/*
* For compatibility with the original dm - crypt mapping format , if
* only the cipher name is supplied , use cbc - plain .
*/
2010-08-12 07:14:07 +04:00
if ( ! chainmode | | ( ! strcmp ( chainmode , " plain " ) & & ! ivmode ) ) {
2005-04-17 02:20:36 +04:00
chainmode = " cbc " ;
ivmode = " plain " ;
}
2006-08-22 14:29:17 +04:00
if ( strcmp ( chainmode , " ecb " ) & & ! ivmode ) {
2010-08-12 07:14:07 +04:00
ti - > error = " IV mechanism required " ;
return - EINVAL ;
2005-04-17 02:20:36 +04:00
}
2010-08-12 07:14:07 +04:00
cipher_api = kmalloc ( CRYPTO_MAX_ALG_NAME , GFP_KERNEL ) ;
if ( ! cipher_api )
goto bad_mem ;
ret = snprintf ( cipher_api , CRYPTO_MAX_ALG_NAME ,
" %s(%s) " , chainmode , cipher ) ;
if ( ret < 0 ) {
kfree ( cipher_api ) ;
goto bad_mem ;
2005-04-17 02:20:36 +04:00
}
2010-08-12 07:14:07 +04:00
/* Allocate cipher */
2011-01-13 22:59:53 +03:00
for_each_possible_cpu ( cpu ) {
2011-01-13 22:59:54 +03:00
ret = crypt_alloc_tfms ( cc , cpu , cipher_api ) ;
if ( ret < 0 ) {
2011-01-13 22:59:53 +03:00
ti - > error = " Error allocating crypto tfm " ;
goto bad ;
}
2005-04-17 02:20:36 +04:00
}
2010-08-12 07:14:07 +04:00
/* Initialize and set key */
ret = crypt_set_key ( cc , key ) ;
2010-08-12 07:14:06 +04:00
if ( ret < 0 ) {
2009-12-11 02:51:55 +03:00
ti - > error = " Error decoding and setting key " ;
2010-08-12 07:14:06 +04:00
goto bad ;
2009-12-11 02:51:55 +03:00
}
2010-08-12 07:14:07 +04:00
/* Initialize IV */
2011-01-13 22:59:53 +03:00
cc - > iv_size = crypto_ablkcipher_ivsize ( any_tfm ( cc ) ) ;
2010-08-12 07:14:07 +04:00
if ( cc - > iv_size )
/* at least a 64 bit sector number should fit in our buffer */
cc - > iv_size = max ( cc - > iv_size ,
( unsigned int ) ( sizeof ( u64 ) / sizeof ( u8 ) ) ) ;
else if ( ivmode ) {
DMWARN ( " Selected cipher does not support IVs " ) ;
ivmode = NULL ;
}
/* Choose ivmode, see comments at iv code. */
2005-04-17 02:20:36 +04:00
if ( ivmode = = NULL )
cc - > iv_gen_ops = NULL ;
else if ( strcmp ( ivmode , " plain " ) = = 0 )
cc - > iv_gen_ops = & crypt_iv_plain_ops ;
2009-12-11 02:52:25 +03:00
else if ( strcmp ( ivmode , " plain64 " ) = = 0 )
cc - > iv_gen_ops = & crypt_iv_plain64_ops ;
2005-04-17 02:20:36 +04:00
else if ( strcmp ( ivmode , " essiv " ) = = 0 )
cc - > iv_gen_ops = & crypt_iv_essiv_ops ;
2006-09-03 02:56:39 +04:00
else if ( strcmp ( ivmode , " benbi " ) = = 0 )
cc - > iv_gen_ops = & crypt_iv_benbi_ops ;
2007-05-09 13:32:55 +04:00
else if ( strcmp ( ivmode , " null " ) = = 0 )
cc - > iv_gen_ops = & crypt_iv_null_ops ;
2011-01-13 22:59:55 +03:00
else if ( strcmp ( ivmode , " lmk " ) = = 0 ) {
cc - > iv_gen_ops = & crypt_iv_lmk_ops ;
/* Version 2 and 3 is recognised according
* to length of provided multi - key string .
* If present ( version 3 ) , last key is used as IV seed .
*/
if ( cc - > key_size % cc - > key_parts )
cc - > key_parts + + ;
} else {
2010-08-12 07:14:07 +04:00
ret = - EINVAL ;
2006-06-26 11:27:35 +04:00
ti - > error = " Invalid IV mode " ;
2010-08-12 07:14:06 +04:00
goto bad ;
2005-04-17 02:20:36 +04:00
}
2010-08-12 07:14:06 +04:00
/* Allocate IV */
if ( cc - > iv_gen_ops & & cc - > iv_gen_ops - > ctr ) {
ret = cc - > iv_gen_ops - > ctr ( cc , ti , ivopts ) ;
if ( ret < 0 ) {
ti - > error = " Error creating IV " ;
goto bad ;
}
}
2005-04-17 02:20:36 +04:00
2010-08-12 07:14:06 +04:00
/* Initialize IV (set keys for ESSIV etc) */
if ( cc - > iv_gen_ops & & cc - > iv_gen_ops - > init ) {
ret = cc - > iv_gen_ops - > init ( cc ) ;
if ( ret < 0 ) {
ti - > error = " Error initialising IV " ;
goto bad ;
}
2009-12-11 02:51:56 +03:00
}
2010-08-12 07:14:07 +04:00
ret = 0 ;
bad :
kfree ( cipher_api ) ;
return ret ;
bad_mem :
ti - > error = " Cannot allocate cipher strings " ;
return - ENOMEM ;
}
/*
* Construct an encryption mapping :
* < cipher > < key > < iv_offset > < dev_path > < start >
*/
static int crypt_ctr ( struct dm_target * ti , unsigned int argc , char * * argv )
{
struct crypt_config * cc ;
unsigned int key_size ;
unsigned long long tmpll ;
int ret ;
if ( argc ! = 5 ) {
ti - > error = " Not enough arguments " ;
return - EINVAL ;
2005-04-17 02:20:36 +04:00
}
2010-08-12 07:14:07 +04:00
key_size = strlen ( argv [ 1 ] ) > > 1 ;
cc = kzalloc ( sizeof ( * cc ) + key_size * sizeof ( u8 ) , GFP_KERNEL ) ;
if ( ! cc ) {
ti - > error = " Cannot allocate encryption context " ;
return - ENOMEM ;
}
2011-01-13 22:59:49 +03:00
cc - > key_size = key_size ;
2010-08-12 07:14:07 +04:00
ti - > private = cc ;
ret = crypt_ctr_cipher ( ti , argv [ 0 ] , argv [ 1 ] ) ;
if ( ret < 0 )
goto bad ;
2010-08-12 07:14:06 +04:00
ret = - ENOMEM ;
2006-03-26 13:37:50 +04:00
cc - > io_pool = mempool_create_slab_pool ( MIN_IOS , _crypt_io_pool ) ;
2005-04-17 02:20:36 +04:00
if ( ! cc - > io_pool ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Cannot allocate crypt io mempool " ;
2010-08-12 07:14:06 +04:00
goto bad ;
2005-04-17 02:20:36 +04:00
}
2008-02-08 05:11:07 +03:00
cc - > dmreq_start = sizeof ( struct ablkcipher_request ) ;
2011-01-13 22:59:53 +03:00
cc - > dmreq_start + = crypto_ablkcipher_reqsize ( any_tfm ( cc ) ) ;
2008-02-08 05:11:07 +03:00
cc - > dmreq_start = ALIGN ( cc - > dmreq_start , crypto_tfm_ctx_alignment ( ) ) ;
2011-01-13 22:59:53 +03:00
cc - > dmreq_start + = crypto_ablkcipher_alignmask ( any_tfm ( cc ) ) &
2008-02-08 05:11:14 +03:00
~ ( crypto_tfm_ctx_alignment ( ) - 1 ) ;
2008-02-08 05:11:07 +03:00
cc - > req_pool = mempool_create_kmalloc_pool ( MIN_IOS , cc - > dmreq_start +
sizeof ( struct dm_crypt_request ) + cc - > iv_size ) ;
if ( ! cc - > req_pool ) {
ti - > error = " Cannot allocate crypt request mempool " ;
2010-08-12 07:14:06 +04:00
goto bad ;
2008-02-08 05:11:07 +03:00
}
2006-03-26 13:37:45 +04:00
cc - > page_pool = mempool_create_page_pool ( MIN_POOL_PAGES , 0 ) ;
2005-04-17 02:20:36 +04:00
if ( ! cc - > page_pool ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Cannot allocate page mempool " ;
2010-08-12 07:14:06 +04:00
goto bad ;
2005-04-17 02:20:36 +04:00
}
2008-12-10 17:35:05 +03:00
cc - > bs = bioset_create ( MIN_IOS , 0 ) ;
2006-10-03 12:15:40 +04:00
if ( ! cc - > bs ) {
ti - > error = " Cannot allocate crypt bioset " ;
2010-08-12 07:14:06 +04:00
goto bad ;
2006-10-03 12:15:40 +04:00
}
2010-08-12 07:14:06 +04:00
ret = - EINVAL ;
2006-03-27 13:17:48 +04:00
if ( sscanf ( argv [ 2 ] , " %llu " , & tmpll ) ! = 1 ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Invalid iv_offset sector " ;
2010-08-12 07:14:06 +04:00
goto bad ;
2005-04-17 02:20:36 +04:00
}
2006-03-27 13:17:48 +04:00
cc - > iv_offset = tmpll ;
2005-04-17 02:20:36 +04:00
2010-08-12 07:14:06 +04:00
if ( dm_get_device ( ti , argv [ 3 ] , dm_table_get_mode ( ti - > table ) , & cc - > dev ) ) {
ti - > error = " Device lookup failed " ;
goto bad ;
}
2006-03-27 13:17:48 +04:00
if ( sscanf ( argv [ 4 ] , " %llu " , & tmpll ) ! = 1 ) {
2006-06-26 11:27:35 +04:00
ti - > error = " Invalid device sector " ;
2010-08-12 07:14:06 +04:00
goto bad ;
2005-04-17 02:20:36 +04:00
}
2006-03-27 13:17:48 +04:00
cc - > start = tmpll ;
2005-04-17 02:20:36 +04:00
2010-08-12 07:14:06 +04:00
ret = - ENOMEM ;
2011-01-13 22:59:53 +03:00
cc - > io_queue = alloc_workqueue ( " kcryptd_io " ,
WQ_NON_REENTRANT |
WQ_MEM_RECLAIM ,
1 ) ;
2007-10-20 01:38:58 +04:00
if ( ! cc - > io_queue ) {
ti - > error = " Couldn't create kcryptd io queue " ;
2010-08-12 07:14:06 +04:00
goto bad ;
2007-10-20 01:38:58 +04:00
}
2011-01-13 22:59:53 +03:00
cc - > crypt_queue = alloc_workqueue ( " kcryptd " ,
WQ_NON_REENTRANT |
WQ_CPU_INTENSIVE |
WQ_MEM_RECLAIM ,
1 ) ;
2007-10-20 01:38:58 +04:00
if ( ! cc - > crypt_queue ) {
2007-10-20 01:38:57 +04:00
ti - > error = " Couldn't create kcryptd queue " ;
2010-08-12 07:14:06 +04:00
goto bad ;
2007-10-20 01:38:57 +04:00
}
2009-06-22 13:12:23 +04:00
ti - > num_flush_requests = 1 ;
2005-04-17 02:20:36 +04:00
return 0 ;
2010-08-12 07:14:06 +04:00
bad :
crypt_dtr ( ti ) ;
return ret ;
2005-04-17 02:20:36 +04:00
}
static int crypt_map ( struct dm_target * ti , struct bio * bio ,
union map_info * map_context )
{
2007-07-12 20:26:32 +04:00
struct dm_crypt_io * io ;
2009-06-22 13:12:23 +04:00
struct crypt_config * cc ;
2010-09-03 13:56:19 +04:00
if ( bio - > bi_rw & REQ_FLUSH ) {
2009-06-22 13:12:23 +04:00
cc = ti - > private ;
bio - > bi_bdev = cc - > dev - > bdev ;
return DM_MAPIO_REMAPPED ;
}
2005-04-17 02:20:36 +04:00
2010-08-12 07:14:11 +04:00
io = crypt_io_alloc ( ti , bio , dm_target_offset ( ti , bio - > bi_sector ) ) ;
2007-10-20 01:38:58 +04:00
2011-01-13 22:59:53 +03:00
if ( bio_data_dir ( io - > base_bio ) = = READ ) {
if ( kcryptd_io_read ( io , GFP_NOWAIT ) )
kcryptd_queue_io ( io ) ;
} else
2007-10-20 01:38:58 +04:00
kcryptd_queue_crypt ( io ) ;
2005-04-17 02:20:36 +04:00
2006-12-08 13:41:06 +03:00
return DM_MAPIO_SUBMITTED ;
2005-04-17 02:20:36 +04:00
}
static int crypt_status ( struct dm_target * ti , status_type_t type ,
char * result , unsigned int maxlen )
{
2010-08-12 07:14:07 +04:00
struct crypt_config * cc = ti - > private ;
2005-04-17 02:20:36 +04:00
unsigned int sz = 0 ;
switch ( type ) {
case STATUSTYPE_INFO :
result [ 0 ] = ' \0 ' ;
break ;
case STATUSTYPE_TABLE :
2011-01-13 22:59:52 +03:00
DMEMIT ( " %s " , cc - > cipher_string ) ;
2005-04-17 02:20:36 +04:00
if ( cc - > key_size > 0 ) {
if ( ( maxlen - sz ) < ( ( cc - > key_size < < 1 ) + 1 ) )
return - ENOMEM ;
crypt_encode_key ( result + sz , cc - > key , cc - > key_size ) ;
sz + = cc - > key_size < < 1 ;
} else {
if ( sz > = maxlen )
return - ENOMEM ;
result [ sz + + ] = ' - ' ;
}
2006-03-27 13:17:48 +04:00
DMEMIT ( " %llu %s %llu " , ( unsigned long long ) cc - > iv_offset ,
cc - > dev - > name , ( unsigned long long ) cc - > start ) ;
2005-04-17 02:20:36 +04:00
break ;
}
return 0 ;
}
2006-10-03 12:15:37 +04:00
static void crypt_postsuspend ( struct dm_target * ti )
{
struct crypt_config * cc = ti - > private ;
set_bit ( DM_CRYPT_SUSPENDED , & cc - > flags ) ;
}
static int crypt_preresume ( struct dm_target * ti )
{
struct crypt_config * cc = ti - > private ;
if ( ! test_bit ( DM_CRYPT_KEY_VALID , & cc - > flags ) ) {
DMERR ( " aborting resume - crypt key is not set. " ) ;
return - EAGAIN ;
}
return 0 ;
}
static void crypt_resume ( struct dm_target * ti )
{
struct crypt_config * cc = ti - > private ;
clear_bit ( DM_CRYPT_SUSPENDED , & cc - > flags ) ;
}
/* Message interface
* key set < key >
* key wipe
*/
static int crypt_message ( struct dm_target * ti , unsigned argc , char * * argv )
{
struct crypt_config * cc = ti - > private ;
2009-12-11 02:51:57 +03:00
int ret = - EINVAL ;
2006-10-03 12:15:37 +04:00
if ( argc < 2 )
goto error ;
if ( ! strnicmp ( argv [ 0 ] , MESG_STR ( " key " ) ) ) {
if ( ! test_bit ( DM_CRYPT_SUSPENDED , & cc - > flags ) ) {
DMWARN ( " not suspended during key manipulation. " ) ;
return - EINVAL ;
}
2009-12-11 02:51:57 +03:00
if ( argc = = 3 & & ! strnicmp ( argv [ 1 ] , MESG_STR ( " set " ) ) ) {
ret = crypt_set_key ( cc , argv [ 2 ] ) ;
if ( ret )
return ret ;
if ( cc - > iv_gen_ops & & cc - > iv_gen_ops - > init )
ret = cc - > iv_gen_ops - > init ( cc ) ;
return ret ;
}
if ( argc = = 2 & & ! strnicmp ( argv [ 1 ] , MESG_STR ( " wipe " ) ) ) {
if ( cc - > iv_gen_ops & & cc - > iv_gen_ops - > wipe ) {
ret = cc - > iv_gen_ops - > wipe ( cc ) ;
if ( ret )
return ret ;
}
2006-10-03 12:15:37 +04:00
return crypt_wipe_key ( cc ) ;
2009-12-11 02:51:57 +03:00
}
2006-10-03 12:15:37 +04:00
}
error :
DMWARN ( " unrecognised message received. " ) ;
return - EINVAL ;
}
2008-07-21 15:00:40 +04:00
static int crypt_merge ( struct dm_target * ti , struct bvec_merge_data * bvm ,
struct bio_vec * biovec , int max_size )
{
struct crypt_config * cc = ti - > private ;
struct request_queue * q = bdev_get_queue ( cc - > dev - > bdev ) ;
if ( ! q - > merge_bvec_fn )
return max_size ;
bvm - > bi_bdev = cc - > dev - > bdev ;
2010-08-12 07:14:11 +04:00
bvm - > bi_sector = cc - > start + dm_target_offset ( ti , bvm - > bi_sector ) ;
2008-07-21 15:00:40 +04:00
return min ( max_size , q - > merge_bvec_fn ( q , bvm , biovec ) ) ;
}
2009-06-22 13:12:33 +04:00
static int crypt_iterate_devices ( struct dm_target * ti ,
iterate_devices_callout_fn fn , void * data )
{
struct crypt_config * cc = ti - > private ;
2009-07-23 23:30:42 +04:00
return fn ( ti , cc - > dev , cc - > start , ti - > len , data ) ;
2009-06-22 13:12:33 +04:00
}
2005-04-17 02:20:36 +04:00
static struct target_type crypt_target = {
. name = " crypt " ,
2011-01-13 22:59:54 +03:00
. version = { 1 , 10 , 0 } ,
2005-04-17 02:20:36 +04:00
. module = THIS_MODULE ,
. ctr = crypt_ctr ,
. dtr = crypt_dtr ,
. map = crypt_map ,
. status = crypt_status ,
2006-10-03 12:15:37 +04:00
. postsuspend = crypt_postsuspend ,
. preresume = crypt_preresume ,
. resume = crypt_resume ,
. message = crypt_message ,
2008-07-21 15:00:40 +04:00
. merge = crypt_merge ,
2009-06-22 13:12:33 +04:00
. iterate_devices = crypt_iterate_devices ,
2005-04-17 02:20:36 +04:00
} ;
static int __init dm_crypt_init ( void )
{
int r ;
2007-07-12 20:26:32 +04:00
_crypt_io_pool = KMEM_CACHE ( dm_crypt_io , 0 ) ;
2005-04-17 02:20:36 +04:00
if ( ! _crypt_io_pool )
return - ENOMEM ;
r = dm_register_target ( & crypt_target ) ;
if ( r < 0 ) {
2006-06-26 11:27:35 +04:00
DMERR ( " register failed %d " , r ) ;
2007-10-20 01:38:57 +04:00
kmem_cache_destroy ( _crypt_io_pool ) ;
2005-04-17 02:20:36 +04:00
}
return r ;
}
static void __exit dm_crypt_exit ( void )
{
2009-01-06 06:04:58 +03:00
dm_unregister_target ( & crypt_target ) ;
2005-04-17 02:20:36 +04:00
kmem_cache_destroy ( _crypt_io_pool ) ;
}
module_init ( dm_crypt_init ) ;
module_exit ( dm_crypt_exit ) ;
MODULE_AUTHOR ( " Christophe Saout <christophe@saout.de> " ) ;
MODULE_DESCRIPTION ( DM_NAME " target for transparent encryption / decryption " ) ;
MODULE_LICENSE ( " GPL " ) ;