2015-08-20 10:21:45 +03:00
/*
* Symmetric key cipher operations .
*
* Generic encrypt / decrypt wrapper for ciphers , handles operations across
* multiple page boundaries by using temporary blocks . In user context ,
* the kernel is given a chance to schedule us once per page .
*
* Copyright ( c ) 2015 Herbert Xu < herbert @ gondor . apana . org . au >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation ; either version 2 of the License , or ( at your option )
* any later version .
*
*/
2016-11-22 15:08:12 +03:00
# include <crypto/internal/aead.h>
2015-08-20 10:21:45 +03:00
# include <crypto/internal/skcipher.h>
2016-11-22 15:08:12 +03:00
# include <crypto/scatterwalk.h>
2015-08-20 10:21:45 +03:00
# include <linux/bug.h>
2016-07-12 08:17:31 +03:00
# include <linux/cryptouser.h>
2016-12-31 18:56:23 +03:00
# include <linux/compiler.h>
2016-11-22 15:08:12 +03:00
# include <linux/list.h>
2015-08-20 10:21:45 +03:00
# include <linux/module.h>
2016-07-12 08:17:31 +03:00
# include <linux/rtnetlink.h>
# include <linux/seq_file.h>
# include <net/netlink.h>
2015-08-20 10:21:45 +03:00
# include "internal.h"
2016-11-22 15:08:12 +03:00
enum {
SKCIPHER_WALK_PHYS = 1 < < 0 ,
SKCIPHER_WALK_SLOW = 1 < < 1 ,
SKCIPHER_WALK_COPY = 1 < < 2 ,
SKCIPHER_WALK_DIFF = 1 < < 3 ,
SKCIPHER_WALK_SLEEP = 1 < < 4 ,
} ;
struct skcipher_walk_buffer {
struct list_head entry ;
struct scatter_walk dst ;
unsigned int len ;
u8 * data ;
u8 buffer [ ] ;
} ;
static int skcipher_walk_next ( struct skcipher_walk * walk ) ;
static inline void skcipher_unmap ( struct scatter_walk * walk , void * vaddr )
{
if ( PageHighMem ( scatterwalk_page ( walk ) ) )
kunmap_atomic ( vaddr ) ;
}
static inline void * skcipher_map ( struct scatter_walk * walk )
{
struct page * page = scatterwalk_page ( walk ) ;
return ( PageHighMem ( page ) ? kmap_atomic ( page ) : page_address ( page ) ) +
offset_in_page ( walk - > offset ) ;
}
static inline void skcipher_map_src ( struct skcipher_walk * walk )
{
walk - > src . virt . addr = skcipher_map ( & walk - > in ) ;
}
static inline void skcipher_map_dst ( struct skcipher_walk * walk )
{
walk - > dst . virt . addr = skcipher_map ( & walk - > out ) ;
}
static inline void skcipher_unmap_src ( struct skcipher_walk * walk )
{
skcipher_unmap ( & walk - > in , walk - > src . virt . addr ) ;
}
static inline void skcipher_unmap_dst ( struct skcipher_walk * walk )
{
skcipher_unmap ( & walk - > out , walk - > dst . virt . addr ) ;
}
static inline gfp_t skcipher_walk_gfp ( struct skcipher_walk * walk )
{
return walk - > flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC ;
}
/* Get a spot of the specified length that does not straddle a page.
* The caller needs to ensure that there is enough space for this operation .
*/
static inline u8 * skcipher_get_spot ( u8 * start , unsigned int len )
{
u8 * end_page = ( u8 * ) ( ( ( unsigned long ) ( start + len - 1 ) ) & PAGE_MASK ) ;
return max ( start , end_page ) ;
}
crypto: skcipher - fix crash flushing dcache in error path
scatterwalk_done() is only meant to be called after a nonzero number of
bytes have been processed, since scatterwalk_pagedone() will flush the
dcache of the *previous* page. But in the error case of
skcipher_walk_done(), e.g. if the input wasn't an integer number of
blocks, scatterwalk_done() was actually called after advancing 0 bytes.
This caused a crash ("BUG: unable to handle kernel paging request")
during '!PageSlab(page)' on architectures like arm and arm64 that define
ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, provided that the input was
page-aligned as in that case walk->offset == 0.
Fix it by reorganizing skcipher_walk_done() to skip the
scatterwalk_advance() and scatterwalk_done() if an error has occurred.
This bug was found by syzkaller fuzzing.
Reproducer, assuming ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "skcipher",
.salg_name = "cbc(aes-generic)",
};
char buffer[4096] __attribute__((aligned(4096))) = { 0 };
int fd;
fd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(fd, (void *)&addr, sizeof(addr));
setsockopt(fd, SOL_ALG, ALG_SET_KEY, buffer, 16);
fd = accept(fd, NULL, NULL);
write(fd, buffer, 15);
read(fd, buffer, 15);
}
Reported-by: Liu Chao <liuchao741@huawei.com>
Fixes: b286d8b1a690 ("crypto: skcipher - Add skcipher walk interface")
Cc: <stable@vger.kernel.org> # v4.10+
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-07-23 20:54:56 +03:00
static void skcipher_done_slow ( struct skcipher_walk * walk , unsigned int bsize )
2016-11-22 15:08:12 +03:00
{
u8 * addr ;
addr = ( u8 * ) ALIGN ( ( unsigned long ) walk - > buffer , walk - > alignmask + 1 ) ;
addr = skcipher_get_spot ( addr , bsize ) ;
scatterwalk_copychunks ( addr , & walk - > out , bsize ,
( walk - > flags & SKCIPHER_WALK_PHYS ) ? 2 : 1 ) ;
}
int skcipher_walk_done ( struct skcipher_walk * walk , int err )
{
crypto: skcipher - fix crash flushing dcache in error path
scatterwalk_done() is only meant to be called after a nonzero number of
bytes have been processed, since scatterwalk_pagedone() will flush the
dcache of the *previous* page. But in the error case of
skcipher_walk_done(), e.g. if the input wasn't an integer number of
blocks, scatterwalk_done() was actually called after advancing 0 bytes.
This caused a crash ("BUG: unable to handle kernel paging request")
during '!PageSlab(page)' on architectures like arm and arm64 that define
ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, provided that the input was
page-aligned as in that case walk->offset == 0.
Fix it by reorganizing skcipher_walk_done() to skip the
scatterwalk_advance() and scatterwalk_done() if an error has occurred.
This bug was found by syzkaller fuzzing.
Reproducer, assuming ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "skcipher",
.salg_name = "cbc(aes-generic)",
};
char buffer[4096] __attribute__((aligned(4096))) = { 0 };
int fd;
fd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(fd, (void *)&addr, sizeof(addr));
setsockopt(fd, SOL_ALG, ALG_SET_KEY, buffer, 16);
fd = accept(fd, NULL, NULL);
write(fd, buffer, 15);
read(fd, buffer, 15);
}
Reported-by: Liu Chao <liuchao741@huawei.com>
Fixes: b286d8b1a690 ("crypto: skcipher - Add skcipher walk interface")
Cc: <stable@vger.kernel.org> # v4.10+
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-07-23 20:54:56 +03:00
unsigned int n ; /* bytes processed */
bool more ;
if ( unlikely ( err < 0 ) )
goto finish ;
n = walk - > nbytes - err ;
walk - > total - = n ;
more = ( walk - > total ! = 0 ) ;
if ( likely ( ! ( walk - > flags & ( SKCIPHER_WALK_PHYS |
SKCIPHER_WALK_SLOW |
SKCIPHER_WALK_COPY |
SKCIPHER_WALK_DIFF ) ) ) ) {
2016-11-22 15:08:12 +03:00
unmap_src :
skcipher_unmap_src ( walk ) ;
} else if ( walk - > flags & SKCIPHER_WALK_DIFF ) {
skcipher_unmap_dst ( walk ) ;
goto unmap_src ;
} else if ( walk - > flags & SKCIPHER_WALK_COPY ) {
skcipher_map_dst ( walk ) ;
memcpy ( walk - > dst . virt . addr , walk - > page , n ) ;
skcipher_unmap_dst ( walk ) ;
} else if ( unlikely ( walk - > flags & SKCIPHER_WALK_SLOW ) ) {
if ( WARN_ON ( err ) ) {
crypto: skcipher - fix crash flushing dcache in error path
scatterwalk_done() is only meant to be called after a nonzero number of
bytes have been processed, since scatterwalk_pagedone() will flush the
dcache of the *previous* page. But in the error case of
skcipher_walk_done(), e.g. if the input wasn't an integer number of
blocks, scatterwalk_done() was actually called after advancing 0 bytes.
This caused a crash ("BUG: unable to handle kernel paging request")
during '!PageSlab(page)' on architectures like arm and arm64 that define
ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, provided that the input was
page-aligned as in that case walk->offset == 0.
Fix it by reorganizing skcipher_walk_done() to skip the
scatterwalk_advance() and scatterwalk_done() if an error has occurred.
This bug was found by syzkaller fuzzing.
Reproducer, assuming ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "skcipher",
.salg_name = "cbc(aes-generic)",
};
char buffer[4096] __attribute__((aligned(4096))) = { 0 };
int fd;
fd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(fd, (void *)&addr, sizeof(addr));
setsockopt(fd, SOL_ALG, ALG_SET_KEY, buffer, 16);
fd = accept(fd, NULL, NULL);
write(fd, buffer, 15);
read(fd, buffer, 15);
}
Reported-by: Liu Chao <liuchao741@huawei.com>
Fixes: b286d8b1a690 ("crypto: skcipher - Add skcipher walk interface")
Cc: <stable@vger.kernel.org> # v4.10+
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-07-23 20:54:56 +03:00
/* unexpected case; didn't process all bytes */
2016-11-22 15:08:12 +03:00
err = - EINVAL ;
crypto: skcipher - fix crash flushing dcache in error path
scatterwalk_done() is only meant to be called after a nonzero number of
bytes have been processed, since scatterwalk_pagedone() will flush the
dcache of the *previous* page. But in the error case of
skcipher_walk_done(), e.g. if the input wasn't an integer number of
blocks, scatterwalk_done() was actually called after advancing 0 bytes.
This caused a crash ("BUG: unable to handle kernel paging request")
during '!PageSlab(page)' on architectures like arm and arm64 that define
ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, provided that the input was
page-aligned as in that case walk->offset == 0.
Fix it by reorganizing skcipher_walk_done() to skip the
scatterwalk_advance() and scatterwalk_done() if an error has occurred.
This bug was found by syzkaller fuzzing.
Reproducer, assuming ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "skcipher",
.salg_name = "cbc(aes-generic)",
};
char buffer[4096] __attribute__((aligned(4096))) = { 0 };
int fd;
fd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(fd, (void *)&addr, sizeof(addr));
setsockopt(fd, SOL_ALG, ALG_SET_KEY, buffer, 16);
fd = accept(fd, NULL, NULL);
write(fd, buffer, 15);
read(fd, buffer, 15);
}
Reported-by: Liu Chao <liuchao741@huawei.com>
Fixes: b286d8b1a690 ("crypto: skcipher - Add skcipher walk interface")
Cc: <stable@vger.kernel.org> # v4.10+
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-07-23 20:54:56 +03:00
goto finish ;
}
skcipher_done_slow ( walk , n ) ;
goto already_advanced ;
2016-11-22 15:08:12 +03:00
}
scatterwalk_advance ( & walk - > in , n ) ;
scatterwalk_advance ( & walk - > out , n ) ;
crypto: skcipher - fix crash flushing dcache in error path
scatterwalk_done() is only meant to be called after a nonzero number of
bytes have been processed, since scatterwalk_pagedone() will flush the
dcache of the *previous* page. But in the error case of
skcipher_walk_done(), e.g. if the input wasn't an integer number of
blocks, scatterwalk_done() was actually called after advancing 0 bytes.
This caused a crash ("BUG: unable to handle kernel paging request")
during '!PageSlab(page)' on architectures like arm and arm64 that define
ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, provided that the input was
page-aligned as in that case walk->offset == 0.
Fix it by reorganizing skcipher_walk_done() to skip the
scatterwalk_advance() and scatterwalk_done() if an error has occurred.
This bug was found by syzkaller fuzzing.
Reproducer, assuming ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "skcipher",
.salg_name = "cbc(aes-generic)",
};
char buffer[4096] __attribute__((aligned(4096))) = { 0 };
int fd;
fd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(fd, (void *)&addr, sizeof(addr));
setsockopt(fd, SOL_ALG, ALG_SET_KEY, buffer, 16);
fd = accept(fd, NULL, NULL);
write(fd, buffer, 15);
read(fd, buffer, 15);
}
Reported-by: Liu Chao <liuchao741@huawei.com>
Fixes: b286d8b1a690 ("crypto: skcipher - Add skcipher walk interface")
Cc: <stable@vger.kernel.org> # v4.10+
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-07-23 20:54:56 +03:00
already_advanced :
scatterwalk_done ( & walk - > in , 0 , more ) ;
scatterwalk_done ( & walk - > out , 1 , more ) ;
2016-11-22 15:08:12 +03:00
crypto: skcipher - fix crash flushing dcache in error path
scatterwalk_done() is only meant to be called after a nonzero number of
bytes have been processed, since scatterwalk_pagedone() will flush the
dcache of the *previous* page. But in the error case of
skcipher_walk_done(), e.g. if the input wasn't an integer number of
blocks, scatterwalk_done() was actually called after advancing 0 bytes.
This caused a crash ("BUG: unable to handle kernel paging request")
during '!PageSlab(page)' on architectures like arm and arm64 that define
ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, provided that the input was
page-aligned as in that case walk->offset == 0.
Fix it by reorganizing skcipher_walk_done() to skip the
scatterwalk_advance() and scatterwalk_done() if an error has occurred.
This bug was found by syzkaller fuzzing.
Reproducer, assuming ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "skcipher",
.salg_name = "cbc(aes-generic)",
};
char buffer[4096] __attribute__((aligned(4096))) = { 0 };
int fd;
fd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(fd, (void *)&addr, sizeof(addr));
setsockopt(fd, SOL_ALG, ALG_SET_KEY, buffer, 16);
fd = accept(fd, NULL, NULL);
write(fd, buffer, 15);
read(fd, buffer, 15);
}
Reported-by: Liu Chao <liuchao741@huawei.com>
Fixes: b286d8b1a690 ("crypto: skcipher - Add skcipher walk interface")
Cc: <stable@vger.kernel.org> # v4.10+
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-07-23 20:54:56 +03:00
if ( more ) {
2016-11-22 15:08:12 +03:00
crypto_yield ( walk - > flags & SKCIPHER_WALK_SLEEP ?
CRYPTO_TFM_REQ_MAY_SLEEP : 0 ) ;
return skcipher_walk_next ( walk ) ;
}
crypto: skcipher - fix crash flushing dcache in error path
scatterwalk_done() is only meant to be called after a nonzero number of
bytes have been processed, since scatterwalk_pagedone() will flush the
dcache of the *previous* page. But in the error case of
skcipher_walk_done(), e.g. if the input wasn't an integer number of
blocks, scatterwalk_done() was actually called after advancing 0 bytes.
This caused a crash ("BUG: unable to handle kernel paging request")
during '!PageSlab(page)' on architectures like arm and arm64 that define
ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, provided that the input was
page-aligned as in that case walk->offset == 0.
Fix it by reorganizing skcipher_walk_done() to skip the
scatterwalk_advance() and scatterwalk_done() if an error has occurred.
This bug was found by syzkaller fuzzing.
Reproducer, assuming ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE:
#include <linux/if_alg.h>
#include <sys/socket.h>
#include <unistd.h>
int main()
{
struct sockaddr_alg addr = {
.salg_type = "skcipher",
.salg_name = "cbc(aes-generic)",
};
char buffer[4096] __attribute__((aligned(4096))) = { 0 };
int fd;
fd = socket(AF_ALG, SOCK_SEQPACKET, 0);
bind(fd, (void *)&addr, sizeof(addr));
setsockopt(fd, SOL_ALG, ALG_SET_KEY, buffer, 16);
fd = accept(fd, NULL, NULL);
write(fd, buffer, 15);
read(fd, buffer, 15);
}
Reported-by: Liu Chao <liuchao741@huawei.com>
Fixes: b286d8b1a690 ("crypto: skcipher - Add skcipher walk interface")
Cc: <stable@vger.kernel.org> # v4.10+
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2018-07-23 20:54:56 +03:00
err = 0 ;
finish :
walk - > nbytes = 0 ;
2016-11-22 15:08:12 +03:00
/* Short-circuit for the common/fast path. */
if ( ! ( ( unsigned long ) walk - > buffer | ( unsigned long ) walk - > page ) )
goto out ;
if ( walk - > flags & SKCIPHER_WALK_PHYS )
goto out ;
if ( walk - > iv ! = walk - > oiv )
memcpy ( walk - > oiv , walk - > iv , walk - > ivsize ) ;
if ( walk - > buffer ! = walk - > page )
kfree ( walk - > buffer ) ;
if ( walk - > page )
free_page ( ( unsigned long ) walk - > page ) ;
out :
return err ;
}
EXPORT_SYMBOL_GPL ( skcipher_walk_done ) ;
void skcipher_walk_complete ( struct skcipher_walk * walk , int err )
{
struct skcipher_walk_buffer * p , * tmp ;
list_for_each_entry_safe ( p , tmp , & walk - > buffers , entry ) {
u8 * data ;
if ( err )
goto done ;
data = p - > data ;
if ( ! data ) {
data = PTR_ALIGN ( & p - > buffer [ 0 ] , walk - > alignmask + 1 ) ;
2016-12-29 17:09:08 +03:00
data = skcipher_get_spot ( data , walk - > stride ) ;
2016-11-22 15:08:12 +03:00
}
scatterwalk_copychunks ( data , & p - > dst , p - > len , 1 ) ;
2016-12-29 17:09:08 +03:00
if ( offset_in_page ( p - > data ) + p - > len + walk - > stride >
2016-11-22 15:08:12 +03:00
PAGE_SIZE )
free_page ( ( unsigned long ) p - > data ) ;
done :
list_del ( & p - > entry ) ;
kfree ( p ) ;
}
if ( ! err & & walk - > iv ! = walk - > oiv )
memcpy ( walk - > oiv , walk - > iv , walk - > ivsize ) ;
if ( walk - > buffer ! = walk - > page )
kfree ( walk - > buffer ) ;
if ( walk - > page )
free_page ( ( unsigned long ) walk - > page ) ;
}
EXPORT_SYMBOL_GPL ( skcipher_walk_complete ) ;
static void skcipher_queue_write ( struct skcipher_walk * walk ,
struct skcipher_walk_buffer * p )
{
p - > dst = walk - > out ;
list_add_tail ( & p - > entry , & walk - > buffers ) ;
}
static int skcipher_next_slow ( struct skcipher_walk * walk , unsigned int bsize )
{
bool phys = walk - > flags & SKCIPHER_WALK_PHYS ;
unsigned alignmask = walk - > alignmask ;
struct skcipher_walk_buffer * p ;
unsigned a ;
unsigned n ;
u8 * buffer ;
void * v ;
if ( ! phys ) {
2016-12-13 16:34:02 +03:00
if ( ! walk - > buffer )
walk - > buffer = walk - > page ;
buffer = walk - > buffer ;
2016-11-22 15:08:12 +03:00
if ( buffer )
goto ok ;
}
/* Start with the minimum alignment of kmalloc. */
a = crypto_tfm_ctx_alignment ( ) - 1 ;
n = bsize ;
if ( phys ) {
/* Calculate the minimum alignment of p->buffer. */
a & = ( sizeof ( * p ) ^ ( sizeof ( * p ) - 1 ) ) > > 1 ;
n + = sizeof ( * p ) ;
}
/* Minimum size to align p->buffer by alignmask. */
n + = alignmask & ~ a ;
/* Minimum size to ensure p->buffer does not straddle a page. */
n + = ( bsize - 1 ) & ~ ( alignmask | a ) ;
v = kzalloc ( n , skcipher_walk_gfp ( walk ) ) ;
if ( ! v )
return skcipher_walk_done ( walk , - ENOMEM ) ;
if ( phys ) {
p = v ;
p - > len = bsize ;
skcipher_queue_write ( walk , p ) ;
buffer = p - > buffer ;
} else {
walk - > buffer = v ;
buffer = v ;
}
ok :
walk - > dst . virt . addr = PTR_ALIGN ( buffer , alignmask + 1 ) ;
walk - > dst . virt . addr = skcipher_get_spot ( walk - > dst . virt . addr , bsize ) ;
walk - > src . virt . addr = walk - > dst . virt . addr ;
scatterwalk_copychunks ( walk - > src . virt . addr , & walk - > in , bsize , 0 ) ;
walk - > nbytes = bsize ;
walk - > flags | = SKCIPHER_WALK_SLOW ;
return 0 ;
}
static int skcipher_next_copy ( struct skcipher_walk * walk )
{
struct skcipher_walk_buffer * p ;
u8 * tmp = walk - > page ;
skcipher_map_src ( walk ) ;
memcpy ( tmp , walk - > src . virt . addr , walk - > nbytes ) ;
skcipher_unmap_src ( walk ) ;
walk - > src . virt . addr = tmp ;
walk - > dst . virt . addr = tmp ;
if ( ! ( walk - > flags & SKCIPHER_WALK_PHYS ) )
return 0 ;
p = kmalloc ( sizeof ( * p ) , skcipher_walk_gfp ( walk ) ) ;
if ( ! p )
return - ENOMEM ;
p - > data = walk - > page ;
p - > len = walk - > nbytes ;
skcipher_queue_write ( walk , p ) ;
2016-12-29 17:09:08 +03:00
if ( offset_in_page ( walk - > page ) + walk - > nbytes + walk - > stride >
2016-11-22 15:08:12 +03:00
PAGE_SIZE )
walk - > page = NULL ;
else
walk - > page + = walk - > nbytes ;
return 0 ;
}
static int skcipher_next_fast ( struct skcipher_walk * walk )
{
unsigned long diff ;
walk - > src . phys . page = scatterwalk_page ( & walk - > in ) ;
walk - > src . phys . offset = offset_in_page ( walk - > in . offset ) ;
walk - > dst . phys . page = scatterwalk_page ( & walk - > out ) ;
walk - > dst . phys . offset = offset_in_page ( walk - > out . offset ) ;
if ( walk - > flags & SKCIPHER_WALK_PHYS )
return 0 ;
diff = walk - > src . phys . offset - walk - > dst . phys . offset ;
diff | = walk - > src . virt . page - walk - > dst . virt . page ;
skcipher_map_src ( walk ) ;
walk - > dst . virt . addr = walk - > src . virt . addr ;
if ( diff ) {
walk - > flags | = SKCIPHER_WALK_DIFF ;
skcipher_map_dst ( walk ) ;
}
return 0 ;
}
static int skcipher_walk_next ( struct skcipher_walk * walk )
{
unsigned int bsize ;
unsigned int n ;
int err ;
walk - > flags & = ~ ( SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
SKCIPHER_WALK_DIFF ) ;
n = walk - > total ;
2016-12-29 17:09:08 +03:00
bsize = min ( walk - > stride , max ( n , walk - > blocksize ) ) ;
2016-11-22 15:08:12 +03:00
n = scatterwalk_clamp ( & walk - > in , n ) ;
n = scatterwalk_clamp ( & walk - > out , n ) ;
if ( unlikely ( n < bsize ) ) {
if ( unlikely ( walk - > total < walk - > blocksize ) )
return skcipher_walk_done ( walk , - EINVAL ) ;
slow_path :
err = skcipher_next_slow ( walk , bsize ) ;
goto set_phys_lowmem ;
}
if ( unlikely ( ( walk - > in . offset | walk - > out . offset ) & walk - > alignmask ) ) {
if ( ! walk - > page ) {
gfp_t gfp = skcipher_walk_gfp ( walk ) ;
walk - > page = ( void * ) __get_free_page ( gfp ) ;
if ( ! walk - > page )
goto slow_path ;
}
walk - > nbytes = min_t ( unsigned , n ,
PAGE_SIZE - offset_in_page ( walk - > page ) ) ;
walk - > flags | = SKCIPHER_WALK_COPY ;
err = skcipher_next_copy ( walk ) ;
goto set_phys_lowmem ;
}
walk - > nbytes = n ;
return skcipher_next_fast ( walk ) ;
set_phys_lowmem :
if ( ! err & & ( walk - > flags & SKCIPHER_WALK_PHYS ) ) {
walk - > src . phys . page = virt_to_page ( walk - > src . virt . addr ) ;
walk - > dst . phys . page = virt_to_page ( walk - > dst . virt . addr ) ;
walk - > src . phys . offset & = PAGE_SIZE - 1 ;
walk - > dst . phys . offset & = PAGE_SIZE - 1 ;
}
return err ;
}
static int skcipher_copy_iv ( struct skcipher_walk * walk )
{
unsigned a = crypto_tfm_ctx_alignment ( ) - 1 ;
unsigned alignmask = walk - > alignmask ;
unsigned ivsize = walk - > ivsize ;
2016-12-29 17:09:08 +03:00
unsigned bs = walk - > stride ;
2016-11-22 15:08:12 +03:00
unsigned aligned_bs ;
unsigned size ;
u8 * iv ;
2018-07-23 19:57:50 +03:00
aligned_bs = ALIGN ( bs , alignmask + 1 ) ;
2016-11-22 15:08:12 +03:00
/* Minimum size to align buffer by alignmask. */
size = alignmask & ~ a ;
if ( walk - > flags & SKCIPHER_WALK_PHYS )
size + = ivsize ;
else {
size + = aligned_bs + ivsize ;
/* Minimum size to ensure buffer does not straddle a page. */
size + = ( bs - 1 ) & ~ ( alignmask | a ) ;
}
walk - > buffer = kmalloc ( size , skcipher_walk_gfp ( walk ) ) ;
if ( ! walk - > buffer )
return - ENOMEM ;
iv = PTR_ALIGN ( walk - > buffer , alignmask + 1 ) ;
iv = skcipher_get_spot ( iv , bs ) + aligned_bs ;
walk - > iv = memcpy ( iv , walk - > iv , walk - > ivsize ) ;
return 0 ;
}
static int skcipher_walk_first ( struct skcipher_walk * walk )
{
if ( WARN_ON_ONCE ( in_irq ( ) ) )
return - EDEADLK ;
walk - > buffer = NULL ;
if ( unlikely ( ( ( unsigned long ) walk - > iv & walk - > alignmask ) ) ) {
int err = skcipher_copy_iv ( walk ) ;
if ( err )
return err ;
}
walk - > page = NULL ;
return skcipher_walk_next ( walk ) ;
}
static int skcipher_walk_skcipher ( struct skcipher_walk * walk ,
struct skcipher_request * req )
{
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm ( req ) ;
2017-10-07 06:29:48 +03:00
walk - > total = req - > cryptlen ;
walk - > nbytes = 0 ;
2017-11-29 12:18:57 +03:00
walk - > iv = req - > iv ;
walk - > oiv = req - > iv ;
2017-10-07 06:29:48 +03:00
if ( unlikely ( ! walk - > total ) )
return 0 ;
2016-11-22 15:08:12 +03:00
scatterwalk_start ( & walk - > in , req - > src ) ;
scatterwalk_start ( & walk - > out , req - > dst ) ;
walk - > flags & = ~ SKCIPHER_WALK_SLEEP ;
walk - > flags | = req - > base . flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
SKCIPHER_WALK_SLEEP : 0 ;
walk - > blocksize = crypto_skcipher_blocksize ( tfm ) ;
2016-12-29 17:09:08 +03:00
walk - > stride = crypto_skcipher_walksize ( tfm ) ;
2016-11-22 15:08:12 +03:00
walk - > ivsize = crypto_skcipher_ivsize ( tfm ) ;
walk - > alignmask = crypto_skcipher_alignmask ( tfm ) ;
return skcipher_walk_first ( walk ) ;
}
int skcipher_walk_virt ( struct skcipher_walk * walk ,
struct skcipher_request * req , bool atomic )
{
int err ;
walk - > flags & = ~ SKCIPHER_WALK_PHYS ;
err = skcipher_walk_skcipher ( walk , req ) ;
walk - > flags & = atomic ? ~ SKCIPHER_WALK_SLEEP : ~ 0 ;
return err ;
}
EXPORT_SYMBOL_GPL ( skcipher_walk_virt ) ;
void skcipher_walk_atomise ( struct skcipher_walk * walk )
{
walk - > flags & = ~ SKCIPHER_WALK_SLEEP ;
}
EXPORT_SYMBOL_GPL ( skcipher_walk_atomise ) ;
int skcipher_walk_async ( struct skcipher_walk * walk ,
struct skcipher_request * req )
{
walk - > flags | = SKCIPHER_WALK_PHYS ;
INIT_LIST_HEAD ( & walk - > buffers ) ;
return skcipher_walk_skcipher ( walk , req ) ;
}
EXPORT_SYMBOL_GPL ( skcipher_walk_async ) ;
2016-11-30 16:14:07 +03:00
static int skcipher_walk_aead_common ( struct skcipher_walk * walk ,
struct aead_request * req , bool atomic )
2016-11-22 15:08:12 +03:00
{
struct crypto_aead * tfm = crypto_aead_reqtfm ( req ) ;
int err ;
2017-10-07 06:29:48 +03:00
walk - > nbytes = 0 ;
2017-11-29 12:18:57 +03:00
walk - > iv = req - > iv ;
walk - > oiv = req - > iv ;
2017-10-07 06:29:48 +03:00
if ( unlikely ( ! walk - > total ) )
return 0 ;
2016-11-29 16:05:31 +03:00
walk - > flags & = ~ SKCIPHER_WALK_PHYS ;
2016-11-22 15:08:12 +03:00
scatterwalk_start ( & walk - > in , req - > src ) ;
scatterwalk_start ( & walk - > out , req - > dst ) ;
scatterwalk_copychunks ( NULL , & walk - > in , req - > assoclen , 2 ) ;
scatterwalk_copychunks ( NULL , & walk - > out , req - > assoclen , 2 ) ;
2017-11-23 15:49:06 +03:00
scatterwalk_done ( & walk - > in , 0 , walk - > total ) ;
scatterwalk_done ( & walk - > out , 0 , walk - > total ) ;
2016-11-22 15:08:12 +03:00
if ( req - > base . flags & CRYPTO_TFM_REQ_MAY_SLEEP )
walk - > flags | = SKCIPHER_WALK_SLEEP ;
else
walk - > flags & = ~ SKCIPHER_WALK_SLEEP ;
walk - > blocksize = crypto_aead_blocksize ( tfm ) ;
2016-12-29 17:09:08 +03:00
walk - > stride = crypto_aead_chunksize ( tfm ) ;
2016-11-22 15:08:12 +03:00
walk - > ivsize = crypto_aead_ivsize ( tfm ) ;
walk - > alignmask = crypto_aead_alignmask ( tfm ) ;
err = skcipher_walk_first ( walk ) ;
if ( atomic )
walk - > flags & = ~ SKCIPHER_WALK_SLEEP ;
return err ;
}
2016-11-30 16:14:07 +03:00
int skcipher_walk_aead ( struct skcipher_walk * walk , struct aead_request * req ,
bool atomic )
{
walk - > total = req - > cryptlen ;
return skcipher_walk_aead_common ( walk , req , atomic ) ;
}
2016-11-22 15:08:12 +03:00
EXPORT_SYMBOL_GPL ( skcipher_walk_aead ) ;
2016-11-30 16:14:07 +03:00
int skcipher_walk_aead_encrypt ( struct skcipher_walk * walk ,
struct aead_request * req , bool atomic )
{
walk - > total = req - > cryptlen ;
return skcipher_walk_aead_common ( walk , req , atomic ) ;
}
EXPORT_SYMBOL_GPL ( skcipher_walk_aead_encrypt ) ;
int skcipher_walk_aead_decrypt ( struct skcipher_walk * walk ,
struct aead_request * req , bool atomic )
{
struct crypto_aead * tfm = crypto_aead_reqtfm ( req ) ;
walk - > total = req - > cryptlen - crypto_aead_authsize ( tfm ) ;
return skcipher_walk_aead_common ( walk , req , atomic ) ;
}
EXPORT_SYMBOL_GPL ( skcipher_walk_aead_decrypt ) ;
2015-08-20 10:21:45 +03:00
static unsigned int crypto_skcipher_extsize ( struct crypto_alg * alg )
{
if ( alg - > cra_type = = & crypto_blkcipher_type )
return sizeof ( struct crypto_blkcipher * ) ;
2016-07-12 08:17:31 +03:00
if ( alg - > cra_type = = & crypto_ablkcipher_type | |
alg - > cra_type = = & crypto_givcipher_type )
return sizeof ( struct crypto_ablkcipher * ) ;
2015-08-20 10:21:45 +03:00
2016-07-12 08:17:31 +03:00
return crypto_alg_extsize ( alg ) ;
2015-08-20 10:21:45 +03:00
}
static int skcipher_setkey_blkcipher ( struct crypto_skcipher * tfm ,
const u8 * key , unsigned int keylen )
{
struct crypto_blkcipher * * ctx = crypto_skcipher_ctx ( tfm ) ;
struct crypto_blkcipher * blkcipher = * ctx ;
int err ;
crypto_blkcipher_clear_flags ( blkcipher , ~ 0 ) ;
crypto_blkcipher_set_flags ( blkcipher , crypto_skcipher_get_flags ( tfm ) &
CRYPTO_TFM_REQ_MASK ) ;
err = crypto_blkcipher_setkey ( blkcipher , key , keylen ) ;
crypto_skcipher_set_flags ( tfm , crypto_blkcipher_get_flags ( blkcipher ) &
CRYPTO_TFM_RES_MASK ) ;
2018-01-03 22:16:29 +03:00
if ( err )
return err ;
2015-08-20 10:21:45 +03:00
2018-01-03 22:16:29 +03:00
crypto_skcipher_clear_flags ( tfm , CRYPTO_TFM_NEED_KEY ) ;
return 0 ;
2015-08-20 10:21:45 +03:00
}
static int skcipher_crypt_blkcipher ( struct skcipher_request * req ,
int ( * crypt ) ( struct blkcipher_desc * ,
struct scatterlist * ,
struct scatterlist * ,
unsigned int ) )
{
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm ( req ) ;
struct crypto_blkcipher * * ctx = crypto_skcipher_ctx ( tfm ) ;
struct blkcipher_desc desc = {
. tfm = * ctx ,
. info = req - > iv ,
. flags = req - > base . flags ,
} ;
return crypt ( & desc , req - > dst , req - > src , req - > cryptlen ) ;
}
static int skcipher_encrypt_blkcipher ( struct skcipher_request * req )
{
struct crypto_skcipher * skcipher = crypto_skcipher_reqtfm ( req ) ;
struct crypto_tfm * tfm = crypto_skcipher_tfm ( skcipher ) ;
struct blkcipher_alg * alg = & tfm - > __crt_alg - > cra_blkcipher ;
return skcipher_crypt_blkcipher ( req , alg - > encrypt ) ;
}
static int skcipher_decrypt_blkcipher ( struct skcipher_request * req )
{
struct crypto_skcipher * skcipher = crypto_skcipher_reqtfm ( req ) ;
struct crypto_tfm * tfm = crypto_skcipher_tfm ( skcipher ) ;
struct blkcipher_alg * alg = & tfm - > __crt_alg - > cra_blkcipher ;
return skcipher_crypt_blkcipher ( req , alg - > decrypt ) ;
}
static void crypto_exit_skcipher_ops_blkcipher ( struct crypto_tfm * tfm )
{
struct crypto_blkcipher * * ctx = crypto_tfm_ctx ( tfm ) ;
crypto_free_blkcipher ( * ctx ) ;
}
2015-09-27 17:47:05 +03:00
static int crypto_init_skcipher_ops_blkcipher ( struct crypto_tfm * tfm )
2015-08-20 10:21:45 +03:00
{
struct crypto_alg * calg = tfm - > __crt_alg ;
struct crypto_skcipher * skcipher = __crypto_skcipher_cast ( tfm ) ;
struct crypto_blkcipher * * ctx = crypto_tfm_ctx ( tfm ) ;
struct crypto_blkcipher * blkcipher ;
struct crypto_tfm * btfm ;
if ( ! crypto_mod_get ( calg ) )
return - EAGAIN ;
btfm = __crypto_alloc_tfm ( calg , CRYPTO_ALG_TYPE_BLKCIPHER ,
CRYPTO_ALG_TYPE_MASK ) ;
if ( IS_ERR ( btfm ) ) {
crypto_mod_put ( calg ) ;
return PTR_ERR ( btfm ) ;
}
blkcipher = __crypto_blkcipher_cast ( btfm ) ;
* ctx = blkcipher ;
tfm - > exit = crypto_exit_skcipher_ops_blkcipher ;
skcipher - > setkey = skcipher_setkey_blkcipher ;
skcipher - > encrypt = skcipher_encrypt_blkcipher ;
skcipher - > decrypt = skcipher_decrypt_blkcipher ;
skcipher - > ivsize = crypto_blkcipher_ivsize ( blkcipher ) ;
2016-01-21 12:10:56 +03:00
skcipher - > keysize = calg - > cra_blkcipher . max_keysize ;
2015-08-20 10:21:45 +03:00
2018-01-03 22:16:29 +03:00
if ( skcipher - > keysize )
crypto_skcipher_set_flags ( skcipher , CRYPTO_TFM_NEED_KEY ) ;
2015-08-20 10:21:45 +03:00
return 0 ;
}
static int skcipher_setkey_ablkcipher ( struct crypto_skcipher * tfm ,
const u8 * key , unsigned int keylen )
{
struct crypto_ablkcipher * * ctx = crypto_skcipher_ctx ( tfm ) ;
struct crypto_ablkcipher * ablkcipher = * ctx ;
int err ;
crypto_ablkcipher_clear_flags ( ablkcipher , ~ 0 ) ;
crypto_ablkcipher_set_flags ( ablkcipher ,
crypto_skcipher_get_flags ( tfm ) &
CRYPTO_TFM_REQ_MASK ) ;
err = crypto_ablkcipher_setkey ( ablkcipher , key , keylen ) ;
crypto_skcipher_set_flags ( tfm ,
crypto_ablkcipher_get_flags ( ablkcipher ) &
CRYPTO_TFM_RES_MASK ) ;
2018-01-03 22:16:29 +03:00
if ( err )
return err ;
2015-08-20 10:21:45 +03:00
2018-01-03 22:16:29 +03:00
crypto_skcipher_clear_flags ( tfm , CRYPTO_TFM_NEED_KEY ) ;
return 0 ;
2015-08-20 10:21:45 +03:00
}
static int skcipher_crypt_ablkcipher ( struct skcipher_request * req ,
int ( * crypt ) ( struct ablkcipher_request * ) )
{
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm ( req ) ;
struct crypto_ablkcipher * * ctx = crypto_skcipher_ctx ( tfm ) ;
struct ablkcipher_request * subreq = skcipher_request_ctx ( req ) ;
ablkcipher_request_set_tfm ( subreq , * ctx ) ;
ablkcipher_request_set_callback ( subreq , skcipher_request_flags ( req ) ,
req - > base . complete , req - > base . data ) ;
ablkcipher_request_set_crypt ( subreq , req - > src , req - > dst , req - > cryptlen ,
req - > iv ) ;
return crypt ( subreq ) ;
}
static int skcipher_encrypt_ablkcipher ( struct skcipher_request * req )
{
struct crypto_skcipher * skcipher = crypto_skcipher_reqtfm ( req ) ;
struct crypto_tfm * tfm = crypto_skcipher_tfm ( skcipher ) ;
struct ablkcipher_alg * alg = & tfm - > __crt_alg - > cra_ablkcipher ;
return skcipher_crypt_ablkcipher ( req , alg - > encrypt ) ;
}
static int skcipher_decrypt_ablkcipher ( struct skcipher_request * req )
{
struct crypto_skcipher * skcipher = crypto_skcipher_reqtfm ( req ) ;
struct crypto_tfm * tfm = crypto_skcipher_tfm ( skcipher ) ;
struct ablkcipher_alg * alg = & tfm - > __crt_alg - > cra_ablkcipher ;
return skcipher_crypt_ablkcipher ( req , alg - > decrypt ) ;
}
static void crypto_exit_skcipher_ops_ablkcipher ( struct crypto_tfm * tfm )
{
struct crypto_ablkcipher * * ctx = crypto_tfm_ctx ( tfm ) ;
crypto_free_ablkcipher ( * ctx ) ;
}
2015-09-27 17:47:05 +03:00
static int crypto_init_skcipher_ops_ablkcipher ( struct crypto_tfm * tfm )
2015-08-20 10:21:45 +03:00
{
struct crypto_alg * calg = tfm - > __crt_alg ;
struct crypto_skcipher * skcipher = __crypto_skcipher_cast ( tfm ) ;
struct crypto_ablkcipher * * ctx = crypto_tfm_ctx ( tfm ) ;
struct crypto_ablkcipher * ablkcipher ;
struct crypto_tfm * abtfm ;
if ( ! crypto_mod_get ( calg ) )
return - EAGAIN ;
abtfm = __crypto_alloc_tfm ( calg , 0 , 0 ) ;
if ( IS_ERR ( abtfm ) ) {
crypto_mod_put ( calg ) ;
return PTR_ERR ( abtfm ) ;
}
ablkcipher = __crypto_ablkcipher_cast ( abtfm ) ;
* ctx = ablkcipher ;
tfm - > exit = crypto_exit_skcipher_ops_ablkcipher ;
skcipher - > setkey = skcipher_setkey_ablkcipher ;
skcipher - > encrypt = skcipher_encrypt_ablkcipher ;
skcipher - > decrypt = skcipher_decrypt_ablkcipher ;
skcipher - > ivsize = crypto_ablkcipher_ivsize ( ablkcipher ) ;
skcipher - > reqsize = crypto_ablkcipher_reqsize ( ablkcipher ) +
sizeof ( struct ablkcipher_request ) ;
2016-01-21 12:10:56 +03:00
skcipher - > keysize = calg - > cra_ablkcipher . max_keysize ;
2015-08-20 10:21:45 +03:00
2018-01-03 22:16:29 +03:00
if ( skcipher - > keysize )
crypto_skcipher_set_flags ( skcipher , CRYPTO_TFM_NEED_KEY ) ;
2015-08-20 10:21:45 +03:00
return 0 ;
}
2017-05-09 22:48:23 +03:00
static int skcipher_setkey_unaligned ( struct crypto_skcipher * tfm ,
const u8 * key , unsigned int keylen )
{
unsigned long alignmask = crypto_skcipher_alignmask ( tfm ) ;
struct skcipher_alg * cipher = crypto_skcipher_alg ( tfm ) ;
u8 * buffer , * alignbuffer ;
unsigned long absize ;
int ret ;
absize = keylen + alignmask ;
buffer = kmalloc ( absize , GFP_ATOMIC ) ;
if ( ! buffer )
return - ENOMEM ;
alignbuffer = ( u8 * ) ALIGN ( ( unsigned long ) buffer , alignmask + 1 ) ;
memcpy ( alignbuffer , key , keylen ) ;
ret = cipher - > setkey ( tfm , alignbuffer , keylen ) ;
kzfree ( buffer ) ;
return ret ;
}
static int skcipher_setkey ( struct crypto_skcipher * tfm , const u8 * key ,
unsigned int keylen )
{
struct skcipher_alg * cipher = crypto_skcipher_alg ( tfm ) ;
unsigned long alignmask = crypto_skcipher_alignmask ( tfm ) ;
2018-01-03 22:16:29 +03:00
int err ;
2017-05-09 22:48:23 +03:00
if ( keylen < cipher - > min_keysize | | keylen > cipher - > max_keysize ) {
crypto_skcipher_set_flags ( tfm , CRYPTO_TFM_RES_BAD_KEY_LEN ) ;
return - EINVAL ;
}
if ( ( unsigned long ) key & alignmask )
2018-01-03 22:16:29 +03:00
err = skcipher_setkey_unaligned ( tfm , key , keylen ) ;
else
err = cipher - > setkey ( tfm , key , keylen ) ;
if ( err )
return err ;
2017-05-09 22:48:23 +03:00
2018-01-03 22:16:29 +03:00
crypto_skcipher_clear_flags ( tfm , CRYPTO_TFM_NEED_KEY ) ;
return 0 ;
2017-05-09 22:48:23 +03:00
}
2016-07-12 08:17:31 +03:00
static void crypto_skcipher_exit_tfm ( struct crypto_tfm * tfm )
{
struct crypto_skcipher * skcipher = __crypto_skcipher_cast ( tfm ) ;
struct skcipher_alg * alg = crypto_skcipher_alg ( skcipher ) ;
alg - > exit ( skcipher ) ;
}
2015-08-20 10:21:45 +03:00
static int crypto_skcipher_init_tfm ( struct crypto_tfm * tfm )
{
2016-07-12 08:17:31 +03:00
struct crypto_skcipher * skcipher = __crypto_skcipher_cast ( tfm ) ;
struct skcipher_alg * alg = crypto_skcipher_alg ( skcipher ) ;
2015-08-20 10:21:45 +03:00
if ( tfm - > __crt_alg - > cra_type = = & crypto_blkcipher_type )
return crypto_init_skcipher_ops_blkcipher ( tfm ) ;
2016-07-12 08:17:31 +03:00
if ( tfm - > __crt_alg - > cra_type = = & crypto_ablkcipher_type | |
tfm - > __crt_alg - > cra_type = = & crypto_givcipher_type )
return crypto_init_skcipher_ops_ablkcipher ( tfm ) ;
2017-05-09 22:48:23 +03:00
skcipher - > setkey = skcipher_setkey ;
2016-07-12 08:17:31 +03:00
skcipher - > encrypt = alg - > encrypt ;
skcipher - > decrypt = alg - > decrypt ;
skcipher - > ivsize = alg - > ivsize ;
skcipher - > keysize = alg - > max_keysize ;
2018-01-03 22:16:29 +03:00
if ( skcipher - > keysize )
crypto_skcipher_set_flags ( skcipher , CRYPTO_TFM_NEED_KEY ) ;
2016-07-12 08:17:31 +03:00
if ( alg - > exit )
skcipher - > base . exit = crypto_skcipher_exit_tfm ;
2015-08-20 10:21:45 +03:00
2016-07-12 08:17:31 +03:00
if ( alg - > init )
return alg - > init ( skcipher ) ;
return 0 ;
}
static void crypto_skcipher_free_instance ( struct crypto_instance * inst )
{
struct skcipher_instance * skcipher =
container_of ( inst , struct skcipher_instance , s . base ) ;
skcipher - > free ( skcipher ) ;
}
static void crypto_skcipher_show ( struct seq_file * m , struct crypto_alg * alg )
2016-12-31 18:56:23 +03:00
__maybe_unused ;
2016-07-12 08:17:31 +03:00
static void crypto_skcipher_show ( struct seq_file * m , struct crypto_alg * alg )
{
struct skcipher_alg * skcipher = container_of ( alg , struct skcipher_alg ,
base ) ;
seq_printf ( m , " type : skcipher \n " ) ;
seq_printf ( m , " async : %s \n " ,
alg - > cra_flags & CRYPTO_ALG_ASYNC ? " yes " : " no " ) ;
seq_printf ( m , " blocksize : %u \n " , alg - > cra_blocksize ) ;
seq_printf ( m , " min keysize : %u \n " , skcipher - > min_keysize ) ;
seq_printf ( m , " max keysize : %u \n " , skcipher - > max_keysize ) ;
seq_printf ( m , " ivsize : %u \n " , skcipher - > ivsize ) ;
seq_printf ( m , " chunksize : %u \n " , skcipher - > chunksize ) ;
2016-12-29 17:09:08 +03:00
seq_printf ( m , " walksize : %u \n " , skcipher - > walksize ) ;
2015-08-20 10:21:45 +03:00
}
2016-07-12 08:17:31 +03:00
# ifdef CONFIG_NET
static int crypto_skcipher_report ( struct sk_buff * skb , struct crypto_alg * alg )
{
struct crypto_report_blkcipher rblkcipher ;
struct skcipher_alg * skcipher = container_of ( alg , struct skcipher_alg ,
base ) ;
strncpy ( rblkcipher . type , " skcipher " , sizeof ( rblkcipher . type ) ) ;
strncpy ( rblkcipher . geniv , " <none> " , sizeof ( rblkcipher . geniv ) ) ;
rblkcipher . blocksize = alg - > cra_blocksize ;
rblkcipher . min_keysize = skcipher - > min_keysize ;
rblkcipher . max_keysize = skcipher - > max_keysize ;
rblkcipher . ivsize = skcipher - > ivsize ;
if ( nla_put ( skb , CRYPTOCFGA_REPORT_BLKCIPHER ,
sizeof ( struct crypto_report_blkcipher ) , & rblkcipher ) )
goto nla_put_failure ;
return 0 ;
nla_put_failure :
return - EMSGSIZE ;
}
# else
static int crypto_skcipher_report ( struct sk_buff * skb , struct crypto_alg * alg )
{
return - ENOSYS ;
}
# endif
2015-08-20 10:21:45 +03:00
static const struct crypto_type crypto_skcipher_type2 = {
. extsize = crypto_skcipher_extsize ,
. init_tfm = crypto_skcipher_init_tfm ,
2016-07-12 08:17:31 +03:00
. free = crypto_skcipher_free_instance ,
# ifdef CONFIG_PROC_FS
. show = crypto_skcipher_show ,
# endif
. report = crypto_skcipher_report ,
2015-08-20 10:21:45 +03:00
. maskclear = ~ CRYPTO_ALG_TYPE_MASK ,
. maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK ,
2016-07-12 08:17:31 +03:00
. type = CRYPTO_ALG_TYPE_SKCIPHER ,
2015-08-20 10:21:45 +03:00
. tfmsize = offsetof ( struct crypto_skcipher , base ) ,
} ;
2016-07-12 08:17:50 +03:00
int crypto_grab_skcipher ( struct crypto_skcipher_spawn * spawn ,
2016-07-12 08:17:31 +03:00
const char * name , u32 type , u32 mask )
{
spawn - > base . frontend = & crypto_skcipher_type2 ;
return crypto_grab_spawn ( & spawn - > base , name , type , mask ) ;
}
2016-07-12 08:17:50 +03:00
EXPORT_SYMBOL_GPL ( crypto_grab_skcipher ) ;
2016-07-12 08:17:31 +03:00
2015-08-20 10:21:45 +03:00
struct crypto_skcipher * crypto_alloc_skcipher ( const char * alg_name ,
u32 type , u32 mask )
{
return crypto_alloc_tfm ( alg_name , & crypto_skcipher_type2 , type , mask ) ;
}
EXPORT_SYMBOL_GPL ( crypto_alloc_skcipher ) ;
2016-07-12 08:17:31 +03:00
int crypto_has_skcipher2 ( const char * alg_name , u32 type , u32 mask )
{
return crypto_type_has_alg ( alg_name , & crypto_skcipher_type2 ,
type , mask ) ;
}
EXPORT_SYMBOL_GPL ( crypto_has_skcipher2 ) ;
static int skcipher_prepare_alg ( struct skcipher_alg * alg )
{
struct crypto_alg * base = & alg - > base ;
2016-12-29 17:09:08 +03:00
if ( alg - > ivsize > PAGE_SIZE / 8 | | alg - > chunksize > PAGE_SIZE / 8 | |
alg - > walksize > PAGE_SIZE / 8 )
2016-07-12 08:17:31 +03:00
return - EINVAL ;
if ( ! alg - > chunksize )
alg - > chunksize = base - > cra_blocksize ;
2016-12-29 17:09:08 +03:00
if ( ! alg - > walksize )
alg - > walksize = alg - > chunksize ;
2016-07-12 08:17:31 +03:00
base - > cra_type = & crypto_skcipher_type2 ;
base - > cra_flags & = ~ CRYPTO_ALG_TYPE_MASK ;
base - > cra_flags | = CRYPTO_ALG_TYPE_SKCIPHER ;
return 0 ;
}
int crypto_register_skcipher ( struct skcipher_alg * alg )
{
struct crypto_alg * base = & alg - > base ;
int err ;
err = skcipher_prepare_alg ( alg ) ;
if ( err )
return err ;
return crypto_register_alg ( base ) ;
}
EXPORT_SYMBOL_GPL ( crypto_register_skcipher ) ;
void crypto_unregister_skcipher ( struct skcipher_alg * alg )
{
crypto_unregister_alg ( & alg - > base ) ;
}
EXPORT_SYMBOL_GPL ( crypto_unregister_skcipher ) ;
int crypto_register_skciphers ( struct skcipher_alg * algs , int count )
{
int i , ret ;
for ( i = 0 ; i < count ; i + + ) {
ret = crypto_register_skcipher ( & algs [ i ] ) ;
if ( ret )
goto err ;
}
return 0 ;
err :
for ( - - i ; i > = 0 ; - - i )
crypto_unregister_skcipher ( & algs [ i ] ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( crypto_register_skciphers ) ;
void crypto_unregister_skciphers ( struct skcipher_alg * algs , int count )
{
int i ;
for ( i = count - 1 ; i > = 0 ; - - i )
crypto_unregister_skcipher ( & algs [ i ] ) ;
}
EXPORT_SYMBOL_GPL ( crypto_unregister_skciphers ) ;
int skcipher_register_instance ( struct crypto_template * tmpl ,
struct skcipher_instance * inst )
{
int err ;
err = skcipher_prepare_alg ( & inst - > alg ) ;
if ( err )
return err ;
return crypto_register_instance ( tmpl , skcipher_crypto_instance ( inst ) ) ;
}
EXPORT_SYMBOL_GPL ( skcipher_register_instance ) ;
2015-08-20 10:21:45 +03:00
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " Symmetric key cipher type " ) ;