2012-03-12 11:03:32 +04:00
# define pr_fmt(fmt) "IPsec: " fmt
2008-01-29 06:35:05 +03:00
# include <crypto/aead.h>
# include <crypto/authenc.h>
2006-07-30 09:41:01 +04:00
# include <linux/err.h>
2005-04-17 02:20:36 +04:00
# include <linux/module.h>
# include <net/ip.h>
# include <net/xfrm.h>
# include <net/esp.h>
2007-10-27 09:53:58 +04:00
# include <linux/scatterlist.h>
2005-10-11 08:11:08 +04:00
# include <linux/kernel.h>
2005-04-17 02:20:36 +04:00
# include <linux/pfkeyv2.h>
2008-01-29 06:35:05 +03:00
# include <linux/rtnetlink.h>
# include <linux/slab.h>
2007-10-10 00:33:35 +04:00
# include <linux/spinlock.h>
2007-12-11 03:53:05 +03:00
# include <linux/in6.h>
2005-04-17 02:20:36 +04:00
# include <net/icmp.h>
2005-12-27 07:43:12 +03:00
# include <net/protocol.h>
2005-04-17 02:20:36 +04:00
# include <net/udp.h>
2017-01-17 12:22:57 +03:00
# include <linux/highmem.h>
2008-01-29 06:35:05 +03:00
struct esp_skb_cb {
struct xfrm_skb_cb xfrm ;
void * tmp ;
} ;
2016-06-18 08:03:36 +03:00
struct esp_output_extra {
__be32 seqhi ;
u32 esphoff ;
} ;
2008-01-29 06:35:05 +03:00
# define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
2010-12-08 07:37:50 +03:00
static u32 esp4_get_mtu ( struct xfrm_state * x , int mtu ) ;
2008-01-29 06:35:05 +03:00
/*
* Allocate an AEAD request structure with extra space for SG and IV .
*
* For alignment considerations the IV is placed at the front , followed
* by the request and finally the SG list .
*
* TODO : Use spare space in skb for this where possible .
*/
2016-06-18 08:03:36 +03:00
static void * esp_alloc_tmp ( struct crypto_aead * aead , int nfrags , int extralen )
2008-01-29 06:35:05 +03:00
{
unsigned int len ;
2016-06-18 08:03:36 +03:00
len = extralen ;
2011-03-08 03:07:14 +03:00
len + = crypto_aead_ivsize ( aead ) ;
2008-01-29 06:35:05 +03:00
if ( len ) {
len + = crypto_aead_alignmask ( aead ) &
~ ( crypto_tfm_ctx_alignment ( ) - 1 ) ;
len = ALIGN ( len , crypto_tfm_ctx_alignment ( ) ) ;
}
2015-05-27 11:03:46 +03:00
len + = sizeof ( struct aead_request ) + crypto_aead_reqsize ( aead ) ;
2008-01-29 06:35:05 +03:00
len = ALIGN ( len , __alignof__ ( struct scatterlist ) ) ;
len + = sizeof ( struct scatterlist ) * nfrags ;
return kmalloc ( len , GFP_ATOMIC ) ;
}
2016-06-18 08:03:36 +03:00
static inline void * esp_tmp_extra ( void * tmp )
2011-03-08 03:07:14 +03:00
{
2016-06-18 08:03:36 +03:00
return PTR_ALIGN ( tmp , __alignof__ ( struct esp_output_extra ) ) ;
2011-03-08 03:07:14 +03:00
}
2016-06-18 08:03:36 +03:00
static inline u8 * esp_tmp_iv ( struct crypto_aead * aead , void * tmp , int extralen )
2008-01-29 06:35:05 +03:00
{
return crypto_aead_ivsize ( aead ) ?
2016-06-18 08:03:36 +03:00
PTR_ALIGN ( ( u8 * ) tmp + extralen ,
crypto_aead_alignmask ( aead ) + 1 ) : tmp + extralen ;
2008-01-29 06:35:05 +03:00
}
static inline struct aead_request * esp_tmp_req ( struct crypto_aead * aead , u8 * iv )
{
struct aead_request * req ;
req = ( void * ) PTR_ALIGN ( iv + crypto_aead_ivsize ( aead ) ,
crypto_tfm_ctx_alignment ( ) ) ;
aead_request_set_tfm ( req , aead ) ;
return req ;
}
static inline struct scatterlist * esp_req_sg ( struct crypto_aead * aead ,
struct aead_request * req )
{
return ( void * ) ALIGN ( ( unsigned long ) ( req + 1 ) +
crypto_aead_reqsize ( aead ) ,
__alignof__ ( struct scatterlist ) ) ;
}
2017-01-17 12:22:57 +03:00
static void esp_ssg_unref ( struct xfrm_state * x , void * tmp )
{
struct esp_output_extra * extra = esp_tmp_extra ( tmp ) ;
struct crypto_aead * aead = x - > data ;
int extralen = 0 ;
u8 * iv ;
struct aead_request * req ;
struct scatterlist * sg ;
if ( x - > props . flags & XFRM_STATE_ESN )
extralen + = sizeof ( * extra ) ;
extra = esp_tmp_extra ( tmp ) ;
iv = esp_tmp_iv ( aead , tmp , extralen ) ;
req = esp_tmp_req ( aead , iv ) ;
/* Unref skb_frag_pages in the src scatterlist if necessary.
* Skip the first sg which comes from skb - > data .
*/
if ( req - > src ! = req - > dst )
for ( sg = sg_next ( req - > src ) ; sg ; sg = sg_next ( sg ) )
put_page ( sg_page ( sg ) ) ;
}
2008-01-29 06:35:05 +03:00
static void esp_output_done ( struct crypto_async_request * base , int err )
{
struct sk_buff * skb = base - > data ;
2017-01-17 12:22:57 +03:00
void * tmp ;
struct dst_entry * dst = skb_dst ( skb ) ;
struct xfrm_state * x = dst - > xfrm ;
2008-01-29 06:35:05 +03:00
2017-01-17 12:22:57 +03:00
tmp = ESP_SKB_CB ( skb ) - > tmp ;
esp_ssg_unref ( x , tmp ) ;
kfree ( tmp ) ;
2008-01-29 06:35:05 +03:00
xfrm_output_resume ( skb , err ) ;
}
2015-05-27 11:03:46 +03:00
/* Move ESP header back into place. */
static void esp_restore_header ( struct sk_buff * skb , unsigned int offset )
{
struct ip_esp_hdr * esph = ( void * ) ( skb - > data + offset ) ;
void * tmp = ESP_SKB_CB ( skb ) - > tmp ;
2016-06-18 08:03:36 +03:00
__be32 * seqhi = esp_tmp_extra ( tmp ) ;
2015-05-27 11:03:46 +03:00
esph - > seq_no = esph - > spi ;
esph - > spi = * seqhi ;
}
static void esp_output_restore_header ( struct sk_buff * skb )
{
2016-06-18 08:03:36 +03:00
void * tmp = ESP_SKB_CB ( skb ) - > tmp ;
struct esp_output_extra * extra = esp_tmp_extra ( tmp ) ;
esp_restore_header ( skb , skb_transport_offset ( skb ) + extra - > esphoff -
sizeof ( __be32 ) ) ;
2015-05-27 11:03:46 +03:00
}
2017-01-17 12:22:57 +03:00
static struct ip_esp_hdr * esp_output_set_extra ( struct sk_buff * skb ,
struct ip_esp_hdr * esph ,
struct esp_output_extra * extra )
{
struct xfrm_state * x = skb_dst ( skb ) - > xfrm ;
/* For ESN we move the header forward by 4 bytes to
* accomodate the high bits . We will move it back after
* encryption .
*/
if ( ( x - > props . flags & XFRM_STATE_ESN ) ) {
extra - > esphoff = ( unsigned char * ) esph -
skb_transport_header ( skb ) ;
esph = ( struct ip_esp_hdr * ) ( ( unsigned char * ) esph - 4 ) ;
extra - > seqhi = esph - > spi ;
esph - > seq_no = htonl ( XFRM_SKB_CB ( skb ) - > seq . output . hi ) ;
}
esph - > spi = x - > id . spi ;
return esph ;
}
2015-05-27 11:03:46 +03:00
static void esp_output_done_esn ( struct crypto_async_request * base , int err )
{
struct sk_buff * skb = base - > data ;
esp_output_restore_header ( skb ) ;
esp_output_done ( base , err ) ;
}
2005-04-17 02:20:36 +04:00
static int esp_output ( struct xfrm_state * x , struct sk_buff * skb )
{
2016-06-18 08:03:36 +03:00
struct esp_output_extra * extra ;
2017-01-17 12:22:57 +03:00
int err = - ENOMEM ;
2005-04-17 02:20:36 +04:00
struct ip_esp_hdr * esph ;
2008-01-29 06:35:05 +03:00
struct crypto_aead * aead ;
2015-05-27 11:03:46 +03:00
struct aead_request * req ;
2017-01-17 12:22:57 +03:00
struct scatterlist * sg , * dsg ;
2005-04-17 02:20:36 +04:00
struct sk_buff * trailer ;
2017-01-17 12:22:57 +03:00
struct page * page ;
2008-01-29 06:35:05 +03:00
void * tmp ;
u8 * iv ;
2007-04-20 07:29:13 +04:00
u8 * tail ;
2017-01-17 12:22:57 +03:00
u8 * vaddr ;
2005-04-17 02:20:36 +04:00
int blksize ;
int clen ;
int alen ;
2010-12-08 07:37:50 +03:00
int plen ;
2015-05-27 11:03:46 +03:00
int ivlen ;
2010-12-08 07:37:50 +03:00
int tfclen ;
2005-04-17 02:20:36 +04:00
int nfrags ;
2011-03-08 03:07:14 +03:00
int assoclen ;
2016-06-18 08:03:36 +03:00
int extralen ;
2017-01-17 12:22:57 +03:00
int tailen ;
2015-05-27 11:03:46 +03:00
__be64 seqno ;
2017-01-17 12:22:57 +03:00
__u8 proto = * skb_mac_header ( skb ) ;
2005-04-17 02:20:36 +04:00
2007-10-11 02:44:06 +04:00
/* skb is pure payload to encrypt */
2005-04-17 02:20:36 +04:00
2013-10-18 14:09:05 +04:00
aead = x - > data ;
2008-01-29 06:35:05 +03:00
alen = crypto_aead_authsize ( aead ) ;
2015-05-27 11:03:46 +03:00
ivlen = crypto_aead_ivsize ( aead ) ;
2008-01-29 06:35:05 +03:00
2010-12-08 07:37:50 +03:00
tfclen = 0 ;
if ( x - > tfcpad ) {
struct xfrm_dst * dst = ( struct xfrm_dst * ) skb_dst ( skb ) ;
u32 padto ;
padto = min ( x - > tfcpad , esp4_get_mtu ( x , dst - > child_mtu_cached ) ) ;
if ( skb - > len < padto )
tfclen = padto - skb - > len ;
}
2008-01-29 06:35:05 +03:00
blksize = ALIGN ( crypto_aead_blocksize ( aead ) , 4 ) ;
2010-12-08 07:37:50 +03:00
clen = ALIGN ( skb - > len + 2 + tfclen , blksize ) ;
plen = clen - skb - > len - tfclen ;
2017-01-17 12:22:57 +03:00
tailen = tfclen + plen + alen ;
2011-03-08 03:07:14 +03:00
assoclen = sizeof ( * esph ) ;
2016-06-18 08:03:36 +03:00
extralen = 0 ;
2011-03-08 03:07:14 +03:00
if ( x - > props . flags & XFRM_STATE_ESN ) {
2016-06-18 08:03:36 +03:00
extralen + = sizeof ( * extra ) ;
assoclen + = sizeof ( __be32 ) ;
2011-03-08 03:07:14 +03:00
}
2007-10-11 02:44:44 +04:00
* skb_mac_header ( skb ) = IPPROTO_ESP ;
2017-01-17 12:22:57 +03:00
esph = ip_esp_hdr ( skb ) ;
2005-04-17 02:20:36 +04:00
/* this is non-NULL only with UDP Encapsulation */
if ( x - > encap ) {
struct xfrm_encap_tmpl * encap = x - > encap ;
struct udphdr * uh ;
2006-11-08 11:23:14 +03:00
__be32 * udpdata32 ;
2008-03-18 08:50:23 +03:00
__be16 sport , dport ;
2008-01-29 06:35:05 +03:00
int encap_type ;
spin_lock_bh ( & x - > lock ) ;
sport = encap - > encap_sport ;
dport = encap - > encap_dport ;
encap_type = encap - > encap_type ;
spin_unlock_bh ( & x - > lock ) ;
2005-04-17 02:20:36 +04:00
uh = ( struct udphdr * ) esph ;
2008-01-29 06:35:05 +03:00
uh - > source = sport ;
uh - > dest = dport ;
2017-01-17 12:22:57 +03:00
uh - > len = htons ( skb - > len + tailen
- skb_transport_offset ( skb ) ) ;
2005-04-17 02:20:36 +04:00
uh - > check = 0 ;
2008-01-29 06:35:05 +03:00
switch ( encap_type ) {
2005-04-17 02:20:36 +04:00
default :
case UDP_ENCAP_ESPINUDP :
esph = ( struct ip_esp_hdr * ) ( uh + 1 ) ;
break ;
case UDP_ENCAP_ESPINUDP_NON_IKE :
2006-11-08 11:23:14 +03:00
udpdata32 = ( __be32 * ) ( uh + 1 ) ;
2005-04-17 02:20:36 +04:00
udpdata32 [ 0 ] = udpdata32 [ 1 ] = 0 ;
esph = ( struct ip_esp_hdr * ) ( udpdata32 + 2 ) ;
break ;
}
2007-10-11 02:44:44 +04:00
* skb_mac_header ( skb ) = IPPROTO_UDP ;
}
2005-04-17 02:20:36 +04:00
2017-01-17 12:22:57 +03:00
if ( ! skb_cloned ( skb ) ) {
if ( tailen < = skb_availroom ( skb ) ) {
nfrags = 1 ;
trailer = skb ;
tail = skb_tail_pointer ( trailer ) ;
2005-04-17 02:20:36 +04:00
2017-01-17 12:22:57 +03:00
goto skip_cow ;
} else if ( ( skb_shinfo ( skb ) - > nr_frags < MAX_SKB_FRAGS )
& & ! skb_has_frag_list ( skb ) ) {
int allocsize ;
struct sock * sk = skb - > sk ;
struct page_frag * pfrag = & x - > xfrag ;
2015-05-27 11:03:46 +03:00
2017-01-17 12:22:57 +03:00
allocsize = ALIGN ( tailen , L1_CACHE_BYTES ) ;
spin_lock_bh ( & x - > lock ) ;
if ( unlikely ( ! skb_page_frag_refill ( allocsize , pfrag , GFP_ATOMIC ) ) ) {
spin_unlock_bh ( & x - > lock ) ;
goto cow ;
}
page = pfrag - > page ;
get_page ( page ) ;
vaddr = kmap_atomic ( page ) ;
tail = vaddr + pfrag - > offset ;
/* Fill padding... */
if ( tfclen ) {
memset ( tail , 0 , tfclen ) ;
tail + = tfclen ;
}
do {
int i ;
for ( i = 0 ; i < plen - 2 ; i + + )
tail [ i ] = i + 1 ;
} while ( 0 ) ;
tail [ plen - 2 ] = plen - 2 ;
tail [ plen - 1 ] = proto ;
kunmap_atomic ( vaddr ) ;
nfrags = skb_shinfo ( skb ) - > nr_frags ;
__skb_fill_page_desc ( skb , nfrags , page , pfrag - > offset ,
tailen ) ;
skb_shinfo ( skb ) - > nr_frags = + + nfrags ;
pfrag - > offset = pfrag - > offset + allocsize ;
nfrags + + ;
skb - > len + = tailen ;
skb - > data_len + = tailen ;
skb - > truesize + = tailen ;
if ( sk )
atomic_add ( tailen , & sk - > sk_wmem_alloc ) ;
skb_push ( skb , - skb_network_offset ( skb ) ) ;
esph - > seq_no = htonl ( XFRM_SKB_CB ( skb ) - > seq . output . low ) ;
esph - > spi = x - > id . spi ;
tmp = esp_alloc_tmp ( aead , nfrags + 2 , extralen ) ;
if ( ! tmp ) {
spin_unlock_bh ( & x - > lock ) ;
err = - ENOMEM ;
goto error ;
}
extra = esp_tmp_extra ( tmp ) ;
iv = esp_tmp_iv ( aead , tmp , extralen ) ;
req = esp_tmp_req ( aead , iv ) ;
sg = esp_req_sg ( aead , req ) ;
dsg = & sg [ nfrags ] ;
esph = esp_output_set_extra ( skb , esph , extra ) ;
sg_init_table ( sg , nfrags ) ;
skb_to_sgvec ( skb , sg ,
( unsigned char * ) esph - skb - > data ,
assoclen + ivlen + clen + alen ) ;
allocsize = ALIGN ( skb - > data_len , L1_CACHE_BYTES ) ;
if ( unlikely ( ! skb_page_frag_refill ( allocsize , pfrag , GFP_ATOMIC ) ) ) {
spin_unlock_bh ( & x - > lock ) ;
err = - ENOMEM ;
goto error ;
}
skb_shinfo ( skb ) - > nr_frags = 1 ;
page = pfrag - > page ;
get_page ( page ) ;
/* replace page frags in skb with new page */
__skb_fill_page_desc ( skb , 0 , page , pfrag - > offset , skb - > data_len ) ;
pfrag - > offset = pfrag - > offset + allocsize ;
sg_init_table ( dsg , skb_shinfo ( skb ) - > nr_frags + 1 ) ;
skb_to_sgvec ( skb , dsg ,
( unsigned char * ) esph - skb - > data ,
assoclen + ivlen + clen + alen ) ;
spin_unlock_bh ( & x - > lock ) ;
goto skip_cow2 ;
}
2015-05-27 11:03:46 +03:00
}
2017-01-17 12:22:57 +03:00
cow :
err = skb_cow_data ( skb , tailen , & trailer ) ;
if ( err < 0 )
goto error ;
nfrags = err ;
tail = skb_tail_pointer ( trailer ) ;
esph = ip_esp_hdr ( skb ) ;
skip_cow :
/* Fill padding... */
if ( tfclen ) {
memset ( tail , 0 , tfclen ) ;
tail + = tfclen ;
}
do {
int i ;
for ( i = 0 ; i < plen - 2 ; i + + )
tail [ i ] = i + 1 ;
} while ( 0 ) ;
tail [ plen - 2 ] = plen - 2 ;
tail [ plen - 1 ] = proto ;
pskb_put ( skb , trailer , clen - skb - > len + alen ) ;
skb_push ( skb , - skb_network_offset ( skb ) ) ;
esph - > seq_no = htonl ( XFRM_SKB_CB ( skb ) - > seq . output . low ) ;
2015-05-27 11:03:46 +03:00
esph - > spi = x - > id . spi ;
2017-01-17 12:22:57 +03:00
tmp = esp_alloc_tmp ( aead , nfrags , extralen ) ;
if ( ! tmp ) {
err = - ENOMEM ;
goto error ;
}
extra = esp_tmp_extra ( tmp ) ;
iv = esp_tmp_iv ( aead , tmp , extralen ) ;
req = esp_tmp_req ( aead , iv ) ;
sg = esp_req_sg ( aead , req ) ;
dsg = sg ;
esph = esp_output_set_extra ( skb , esph , extra ) ;
2008-01-29 06:35:05 +03:00
sg_init_table ( sg , nfrags ) ;
skb_to_sgvec ( skb , sg ,
2015-05-27 11:03:46 +03:00
( unsigned char * ) esph - skb - > data ,
assoclen + ivlen + clen + alen ) ;
2011-03-08 03:07:14 +03:00
2017-01-17 12:22:57 +03:00
skip_cow2 :
if ( ( x - > props . flags & XFRM_STATE_ESN ) )
aead_request_set_callback ( req , 0 , esp_output_done_esn , skb ) ;
else
aead_request_set_callback ( req , 0 , esp_output_done , skb ) ;
aead_request_set_crypt ( req , sg , dsg , ivlen + clen , iv ) ;
2015-05-27 11:03:46 +03:00
aead_request_set_ad ( req , assoclen ) ;
seqno = cpu_to_be64 ( XFRM_SKB_CB ( skb ) - > seq . output . low +
( ( u64 ) XFRM_SKB_CB ( skb ) - > seq . output . hi < < 32 ) ) ;
memset ( iv , 0 , ivlen ) ;
memcpy ( iv + ivlen - min ( ivlen , 8 ) , ( u8 * ) & seqno + 8 - min ( ivlen , 8 ) ,
min ( ivlen , 8 ) ) ;
2008-01-29 06:35:05 +03:00
ESP_SKB_CB ( skb ) - > tmp = tmp ;
2015-05-27 11:03:46 +03:00
err = crypto_aead_encrypt ( req ) ;
switch ( err ) {
case - EINPROGRESS :
2008-01-29 06:35:05 +03:00
goto error ;
2005-04-17 02:20:36 +04:00
2015-05-27 11:03:46 +03:00
case - EBUSY :
2008-01-29 06:35:05 +03:00
err = NET_XMIT_DROP ;
2015-05-27 11:03:46 +03:00
break ;
case 0 :
if ( ( x - > props . flags & XFRM_STATE_ESN ) )
esp_output_restore_header ( skb ) ;
}
2005-04-17 02:20:36 +04:00
2017-01-17 12:22:57 +03:00
if ( sg ! = dsg )
esp_ssg_unref ( x , tmp ) ;
2008-01-29 06:35:05 +03:00
kfree ( tmp ) ;
2007-10-10 00:33:35 +04:00
2005-04-17 02:20:36 +04:00
error :
return err ;
}
2008-01-29 06:35:05 +03:00
static int esp_input_done2 ( struct sk_buff * skb , int err )
2005-04-17 02:20:36 +04:00
{
2011-04-22 08:53:02 +04:00
const struct iphdr * iph ;
2008-01-29 06:35:05 +03:00
struct xfrm_state * x = xfrm_input_state ( skb ) ;
2013-10-18 14:09:05 +04:00
struct crypto_aead * aead = x - > data ;
2008-01-29 06:35:05 +03:00
int alen = crypto_aead_authsize ( aead ) ;
int hlen = sizeof ( struct ip_esp_hdr ) + crypto_aead_ivsize ( aead ) ;
int elen = skb - > len - hlen ;
2006-05-28 10:06:13 +04:00
int ihl ;
2006-02-28 00:00:01 +03:00
u8 nexthdr [ 2 ] ;
int padlen ;
2005-04-17 02:20:36 +04:00
2008-01-29 06:35:05 +03:00
kfree ( ESP_SKB_CB ( skb ) - > tmp ) ;
2007-11-14 08:45:58 +03:00
2006-07-30 09:41:01 +04:00
if ( unlikely ( err ) )
2007-12-17 02:55:02 +03:00
goto out ;
2005-04-17 02:20:36 +04:00
2006-02-28 00:00:01 +03:00
if ( skb_copy_bits ( skb , skb - > len - alen - 2 , nexthdr , 2 ) )
BUG ( ) ;
2005-04-17 02:20:36 +04:00
2007-12-17 02:55:02 +03:00
err = - EINVAL ;
2006-02-28 00:00:01 +03:00
padlen = nexthdr [ 0 ] ;
2008-01-29 06:35:05 +03:00
if ( padlen + 2 + alen > = elen )
2006-02-28 00:00:01 +03:00
goto out ;
2005-04-17 02:20:36 +04:00
2007-02-09 17:24:47 +03:00
/* ... check padding bits here. Silly. :-) */
2005-04-17 02:20:36 +04:00
2007-04-21 09:47:35 +04:00
iph = ip_hdr ( skb ) ;
2006-05-28 10:06:13 +04:00
ihl = iph - > ihl * 4 ;
2006-02-28 00:00:40 +03:00
if ( x - > encap ) {
struct xfrm_encap_tmpl * encap = x - > encap ;
2007-04-11 07:50:43 +04:00
struct udphdr * uh = ( void * ) ( skb_network_header ( skb ) + ihl ) ;
2006-02-28 00:00:40 +03:00
/*
* 1 ) if the NAT - T peer ' s IP or port changed then
* advertize the change to the keying daemon .
* This is an inbound SA , so just compare
* SRC ports .
*/
if ( iph - > saddr ! = x - > props . saddr . a4 | |
uh - > source ! = encap - > encap_sport ) {
xfrm_address_t ipaddr ;
ipaddr . a4 = iph - > saddr ;
km_new_mapping ( x , & ipaddr , uh - > source ) ;
2007-02-09 17:24:47 +03:00
2006-02-28 00:00:40 +03:00
/* XXX: perhaps add an extra
* policy check here , to see
* if we should allow or
* reject a packet from a
* different source
* address / port .
*/
2005-04-17 02:20:36 +04:00
}
2007-02-09 17:24:47 +03:00
2006-02-28 00:00:40 +03:00
/*
* 2 ) ignore UDP / TCP checksums in case
* of NAT - T in Transport Mode , or
* perform other post - processing fixes
* as per draft - ietf - ipsec - udp - encaps - 06 ,
* section 3.1 .2
*/
2007-10-11 02:41:41 +04:00
if ( x - > props . mode = = XFRM_MODE_TRANSPORT )
2006-02-28 00:00:40 +03:00
skb - > ip_summed = CHECKSUM_UNNECESSARY ;
2005-04-17 02:20:36 +04:00
}
2006-02-28 00:00:01 +03:00
pskb_trim ( skb , skb - > len - alen - padlen - 2 ) ;
2008-01-29 06:35:05 +03:00
__skb_pull ( skb , hlen ) ;
2012-12-28 12:07:16 +04:00
if ( x - > props . mode = = XFRM_MODE_TUNNEL )
skb_reset_transport_header ( skb ) ;
else
skb_set_transport_header ( skb , - ihl ) ;
2006-02-28 00:00:01 +03:00
2008-01-29 06:35:05 +03:00
err = nexthdr [ 1 ] ;
/* RFC4303: Drop dummy packets without any error */
if ( err = = IPPROTO_NONE )
err = - EINVAL ;
out :
return err ;
}
static void esp_input_done ( struct crypto_async_request * base , int err )
{
struct sk_buff * skb = base - > data ;
xfrm_input_resume ( skb , esp_input_done2 ( skb , err ) ) ;
}
2015-05-27 11:03:46 +03:00
static void esp_input_restore_header ( struct sk_buff * skb )
{
esp_restore_header ( skb , 0 ) ;
__skb_pull ( skb , 4 ) ;
}
2017-01-17 12:22:57 +03:00
static void esp_input_set_header ( struct sk_buff * skb , __be32 * seqhi )
{
struct xfrm_state * x = xfrm_input_state ( skb ) ;
struct ip_esp_hdr * esph = ( struct ip_esp_hdr * ) skb - > data ;
/* For ESN we move the header forward by 4 bytes to
* accomodate the high bits . We will move it back after
* decryption .
*/
if ( ( x - > props . flags & XFRM_STATE_ESN ) ) {
esph = ( void * ) skb_push ( skb , 4 ) ;
* seqhi = esph - > spi ;
esph - > spi = esph - > seq_no ;
esph - > seq_no = XFRM_SKB_CB ( skb ) - > seq . input . hi ;
}
}
2015-05-27 11:03:46 +03:00
static void esp_input_done_esn ( struct crypto_async_request * base , int err )
{
struct sk_buff * skb = base - > data ;
esp_input_restore_header ( skb ) ;
esp_input_done ( base , err ) ;
}
2008-01-29 06:35:05 +03:00
/*
* Note : detecting truncated vs . non - truncated authentication data is very
* expensive , so we only support truncated data , which is the recommended
* and common case .
*/
static int esp_input ( struct xfrm_state * x , struct sk_buff * skb )
{
struct ip_esp_hdr * esph ;
2013-10-18 14:09:05 +04:00
struct crypto_aead * aead = x - > data ;
2008-01-29 06:35:05 +03:00
struct aead_request * req ;
struct sk_buff * trailer ;
2015-05-27 11:03:46 +03:00
int ivlen = crypto_aead_ivsize ( aead ) ;
int elen = skb - > len - sizeof ( * esph ) - ivlen ;
2008-01-29 06:35:05 +03:00
int nfrags ;
2011-03-08 03:07:14 +03:00
int assoclen ;
int seqhilen ;
__be32 * seqhi ;
2008-01-29 06:35:05 +03:00
void * tmp ;
u8 * iv ;
struct scatterlist * sg ;
int err = - EINVAL ;
2015-05-27 11:03:46 +03:00
if ( ! pskb_may_pull ( skb , sizeof ( * esph ) + ivlen ) )
2008-01-29 06:35:05 +03:00
goto out ;
if ( elen < = 0 )
goto out ;
2011-03-08 03:07:14 +03:00
assoclen = sizeof ( * esph ) ;
seqhilen = 0 ;
if ( x - > props . flags & XFRM_STATE_ESN ) {
seqhilen + = sizeof ( __be32 ) ;
assoclen + = seqhilen ;
}
2017-01-17 12:22:57 +03:00
if ( ! skb_cloned ( skb ) ) {
if ( ! skb_is_nonlinear ( skb ) ) {
nfrags = 1 ;
goto skip_cow ;
} else if ( ! skb_has_frag_list ( skb ) ) {
nfrags = skb_shinfo ( skb ) - > nr_frags ;
nfrags + + ;
goto skip_cow ;
}
}
err = skb_cow_data ( skb , 0 , & trailer ) ;
if ( err < 0 )
goto out ;
nfrags = err ;
skip_cow :
2008-01-29 06:35:05 +03:00
err = - ENOMEM ;
2015-05-27 11:03:46 +03:00
tmp = esp_alloc_tmp ( aead , nfrags , seqhilen ) ;
2008-01-29 06:35:05 +03:00
if ( ! tmp )
goto out ;
ESP_SKB_CB ( skb ) - > tmp = tmp ;
2016-06-18 08:03:36 +03:00
seqhi = esp_tmp_extra ( tmp ) ;
2011-03-08 03:07:14 +03:00
iv = esp_tmp_iv ( aead , tmp , seqhilen ) ;
2008-01-29 06:35:05 +03:00
req = esp_tmp_req ( aead , iv ) ;
2015-05-27 11:03:46 +03:00
sg = esp_req_sg ( aead , req ) ;
2008-01-29 06:35:05 +03:00
2017-01-17 12:22:57 +03:00
esp_input_set_header ( skb , seqhi ) ;
2008-01-29 06:35:05 +03:00
2017-01-17 12:22:57 +03:00
sg_init_table ( sg , nfrags ) ;
skb_to_sgvec ( skb , sg , 0 , skb - > len ) ;
2008-01-29 06:35:05 +03:00
2017-01-17 12:22:57 +03:00
skb - > ip_summed = CHECKSUM_NONE ;
2011-03-08 03:07:14 +03:00
2017-01-17 12:22:57 +03:00
if ( ( x - > props . flags & XFRM_STATE_ESN ) )
2015-05-27 11:03:46 +03:00
aead_request_set_callback ( req , 0 , esp_input_done_esn , skb ) ;
2017-01-17 12:22:57 +03:00
else
aead_request_set_callback ( req , 0 , esp_input_done , skb ) ;
2015-05-27 11:03:46 +03:00
aead_request_set_crypt ( req , sg , sg , elen + ivlen , iv ) ;
aead_request_set_ad ( req , assoclen ) ;
2008-01-29 06:35:05 +03:00
err = crypto_aead_decrypt ( req ) ;
if ( err = = - EINPROGRESS )
goto out ;
2015-05-27 11:03:46 +03:00
if ( ( x - > props . flags & XFRM_STATE_ESN ) )
esp_input_restore_header ( skb ) ;
2008-01-29 06:35:05 +03:00
err = esp_input_done2 ( skb , err ) ;
2005-04-17 02:20:36 +04:00
out :
2007-12-17 02:55:02 +03:00
return err ;
2005-04-17 02:20:36 +04:00
}
2007-04-09 22:47:18 +04:00
static u32 esp4_get_mtu ( struct xfrm_state * x , int mtu )
2005-04-17 02:20:36 +04:00
{
2013-10-18 14:09:05 +04:00
struct crypto_aead * aead = x - > data ;
u32 blksize = ALIGN ( crypto_aead_blocksize ( aead ) , 4 ) ;
2012-05-24 15:32:38 +04:00
unsigned int net_adj ;
2006-10-04 10:47:05 +04:00
switch ( x - > props . mode ) {
case XFRM_MODE_TRANSPORT :
case XFRM_MODE_BEET :
2012-05-24 15:32:38 +04:00
net_adj = sizeof ( struct iphdr ) ;
2006-10-04 10:47:05 +04:00
break ;
2012-05-24 15:32:38 +04:00
case XFRM_MODE_TUNNEL :
net_adj = 0 ;
break ;
default :
BUG ( ) ;
2005-04-17 02:20:36 +04:00
}
2006-10-04 10:47:05 +04:00
2013-10-18 14:09:05 +04:00
return ( ( mtu - x - > props . header_len - crypto_aead_authsize ( aead ) -
2013-10-18 14:09:04 +04:00
net_adj ) & ~ ( blksize - 1 ) ) + net_adj - 2 ;
2005-04-17 02:20:36 +04:00
}
2014-02-21 11:41:08 +04:00
static int esp4_err ( struct sk_buff * skb , u32 info )
2005-04-17 02:20:36 +04:00
{
2008-11-26 04:59:27 +03:00
struct net * net = dev_net ( skb - > dev ) ;
2011-04-22 08:53:02 +04:00
const struct iphdr * iph = ( const struct iphdr * ) skb - > data ;
2008-11-03 11:23:42 +03:00
struct ip_esp_hdr * esph = ( struct ip_esp_hdr * ) ( skb - > data + ( iph - > ihl < < 2 ) ) ;
2005-04-17 02:20:36 +04:00
struct xfrm_state * x ;
2012-07-12 08:27:49 +04:00
switch ( icmp_hdr ( skb ) - > type ) {
case ICMP_DEST_UNREACH :
if ( icmp_hdr ( skb ) - > code ! = ICMP_FRAG_NEEDED )
2014-02-21 11:41:08 +04:00
return 0 ;
2012-07-12 08:27:49 +04:00
case ICMP_REDIRECT :
break ;
default :
2014-02-21 11:41:08 +04:00
return 0 ;
2012-07-12 08:27:49 +04:00
}
2005-04-17 02:20:36 +04:00
2011-04-22 08:53:02 +04:00
x = xfrm_state_lookup ( net , skb - > mark , ( const xfrm_address_t * ) & iph - > daddr ,
esph - > spi , IPPROTO_ESP , AF_INET ) ;
2005-04-17 02:20:36 +04:00
if ( ! x )
2014-02-21 11:41:08 +04:00
return 0 ;
2012-07-12 08:27:49 +04:00
2013-05-28 00:46:31 +04:00
if ( icmp_hdr ( skb ) - > type = = ICMP_DEST_UNREACH )
2012-07-12 08:27:49 +04:00
ipv4_update_pmtu ( skb , net , info , 0 , 0 , IPPROTO_ESP , 0 ) ;
2013-05-28 00:46:31 +04:00
else
2012-07-12 08:27:49 +04:00
ipv4_redirect ( skb , net , 0 , 0 , IPPROTO_ESP , 0 ) ;
2005-04-17 02:20:36 +04:00
xfrm_state_put ( x ) ;
2014-02-21 11:41:08 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
static void esp_destroy ( struct xfrm_state * x )
{
2013-10-18 14:09:05 +04:00
struct crypto_aead * aead = x - > data ;
2005-04-17 02:20:36 +04:00
2013-10-18 14:09:05 +04:00
if ( ! aead )
2005-04-17 02:20:36 +04:00
return ;
2013-10-18 14:09:05 +04:00
crypto_free_aead ( aead ) ;
2005-04-17 02:20:36 +04:00
}
2008-01-29 06:37:29 +03:00
static int esp_init_aead ( struct xfrm_state * x )
2005-04-17 02:20:36 +04:00
{
2015-05-27 11:03:46 +03:00
char aead_name [ CRYPTO_MAX_ALG_NAME ] ;
2008-01-29 06:37:29 +03:00
struct crypto_aead * aead ;
int err ;
2015-05-27 11:03:46 +03:00
err = - ENAMETOOLONG ;
if ( snprintf ( aead_name , CRYPTO_MAX_ALG_NAME , " %s(%s) " ,
x - > geniv , x - > aead - > alg_name ) > = CRYPTO_MAX_ALG_NAME )
goto error ;
aead = crypto_alloc_aead ( aead_name , 0 , 0 ) ;
2008-01-29 06:37:29 +03:00
err = PTR_ERR ( aead ) ;
if ( IS_ERR ( aead ) )
goto error ;
2013-10-18 14:09:05 +04:00
x - > data = aead ;
2008-01-29 06:37:29 +03:00
err = crypto_aead_setkey ( aead , x - > aead - > alg_key ,
( x - > aead - > alg_key_len + 7 ) / 8 ) ;
if ( err )
goto error ;
err = crypto_aead_setauthsize ( aead , x - > aead - > alg_icv_len / 8 ) ;
if ( err )
goto error ;
error :
return err ;
}
static int esp_init_authenc ( struct xfrm_state * x )
{
2008-01-29 06:35:05 +03:00
struct crypto_aead * aead ;
struct crypto_authenc_key_param * param ;
struct rtattr * rta ;
char * key ;
char * p ;
char authenc_name [ CRYPTO_MAX_ALG_NAME ] ;
unsigned int keylen ;
int err ;
2005-04-17 02:20:36 +04:00
2008-01-29 06:37:29 +03:00
err = - EINVAL ;
2015-04-03 11:17:26 +03:00
if ( ! x - > ealg )
2008-01-29 06:37:29 +03:00
goto error ;
2008-01-29 06:35:05 +03:00
2008-01-29 06:37:29 +03:00
err = - ENAMETOOLONG ;
2011-03-08 03:07:14 +03:00
if ( ( x - > props . flags & XFRM_STATE_ESN ) ) {
if ( snprintf ( authenc_name , CRYPTO_MAX_ALG_NAME ,
2015-05-27 11:03:46 +03:00
" %s%sauthencesn(%s,%s)%s " ,
x - > geniv ? : " " , x - > geniv ? " ( " : " " ,
2011-03-08 03:07:14 +03:00
x - > aalg ? x - > aalg - > alg_name : " digest_null " ,
2015-05-27 11:03:46 +03:00
x - > ealg - > alg_name ,
x - > geniv ? " ) " : " " ) > = CRYPTO_MAX_ALG_NAME )
2011-03-08 03:07:14 +03:00
goto error ;
} else {
if ( snprintf ( authenc_name , CRYPTO_MAX_ALG_NAME ,
2015-05-27 11:03:46 +03:00
" %s%sauthenc(%s,%s)%s " ,
x - > geniv ? : " " , x - > geniv ? " ( " : " " ,
2011-03-08 03:07:14 +03:00
x - > aalg ? x - > aalg - > alg_name : " digest_null " ,
2015-05-27 11:03:46 +03:00
x - > ealg - > alg_name ,
x - > geniv ? " ) " : " " ) > = CRYPTO_MAX_ALG_NAME )
2011-03-08 03:07:14 +03:00
goto error ;
}
2008-01-29 06:35:05 +03:00
aead = crypto_alloc_aead ( authenc_name , 0 , 0 ) ;
err = PTR_ERR ( aead ) ;
if ( IS_ERR ( aead ) )
goto error ;
2013-10-18 14:09:05 +04:00
x - > data = aead ;
2008-01-29 06:35:05 +03:00
keylen = ( x - > aalg ? ( x - > aalg - > alg_key_len + 7 ) / 8 : 0 ) +
( x - > ealg - > alg_key_len + 7 ) / 8 + RTA_SPACE ( sizeof ( * param ) ) ;
err = - ENOMEM ;
key = kmalloc ( keylen , GFP_KERNEL ) ;
if ( ! key )
goto error ;
p = key ;
rta = ( void * ) p ;
rta - > rta_type = CRYPTO_AUTHENC_KEYA_PARAM ;
rta - > rta_len = RTA_LENGTH ( sizeof ( * param ) ) ;
param = RTA_DATA ( rta ) ;
p + = RTA_SPACE ( sizeof ( * param ) ) ;
2005-04-17 02:20:36 +04:00
if ( x - > aalg ) {
struct xfrm_algo_desc * aalg_desc ;
2008-01-29 06:35:05 +03:00
memcpy ( p , x - > aalg - > alg_key , ( x - > aalg - > alg_key_len + 7 ) / 8 ) ;
p + = ( x - > aalg - > alg_key_len + 7 ) / 8 ;
2005-04-17 02:20:36 +04:00
aalg_desc = xfrm_aalg_get_byname ( x - > aalg - > alg_name , 0 ) ;
BUG_ON ( ! aalg_desc ) ;
2008-01-29 06:35:05 +03:00
err = - EINVAL ;
2014-11-06 02:36:08 +03:00
if ( aalg_desc - > uinfo . auth . icv_fullbits / 8 ! =
2008-01-29 06:35:05 +03:00
crypto_aead_authsize ( aead ) ) {
2014-11-06 02:36:08 +03:00
pr_info ( " ESP: %s digestsize %u != %hu \n " ,
x - > aalg - > alg_name ,
crypto_aead_authsize ( aead ) ,
aalg_desc - > uinfo . auth . icv_fullbits / 8 ) ;
2008-01-29 06:35:05 +03:00
goto free_key ;
2005-04-17 02:20:36 +04:00
}
2008-01-29 06:35:05 +03:00
err = crypto_aead_setauthsize (
2009-11-25 03:29:53 +03:00
aead , x - > aalg - > alg_trunc_len / 8 ) ;
2008-01-29 06:35:05 +03:00
if ( err )
goto free_key ;
2005-04-17 02:20:36 +04:00
}
2007-10-09 04:13:44 +04:00
2008-01-29 06:35:05 +03:00
param - > enckeylen = cpu_to_be32 ( ( x - > ealg - > alg_key_len + 7 ) / 8 ) ;
memcpy ( p , x - > ealg - > alg_key , ( x - > ealg - > alg_key_len + 7 ) / 8 ) ;
err = crypto_aead_setkey ( aead , key , keylen ) ;
free_key :
kfree ( key ) ;
2008-01-29 06:37:29 +03:00
error :
return err ;
}
static int esp_init_state ( struct xfrm_state * x )
{
struct crypto_aead * aead ;
u32 align ;
int err ;
2013-10-18 14:09:05 +04:00
x - > data = NULL ;
2008-01-29 06:37:29 +03:00
if ( x - > aead )
err = esp_init_aead ( x ) ;
else
err = esp_init_authenc ( x ) ;
2008-01-29 06:35:05 +03:00
if ( err )
2005-04-17 02:20:36 +04:00
goto error ;
2008-01-29 06:35:05 +03:00
2013-10-18 14:09:05 +04:00
aead = x - > data ;
2008-01-29 06:37:29 +03:00
2008-01-29 06:35:05 +03:00
x - > props . header_len = sizeof ( struct ip_esp_hdr ) +
crypto_aead_ivsize ( aead ) ;
2006-09-23 02:05:15 +04:00
if ( x - > props . mode = = XFRM_MODE_TUNNEL )
2005-04-17 02:20:36 +04:00
x - > props . header_len + = sizeof ( struct iphdr ) ;
2008-08-06 13:39:30 +04:00
else if ( x - > props . mode = = XFRM_MODE_BEET & & x - > sel . family ! = AF_INET6 )
2007-04-09 22:47:58 +04:00
x - > props . header_len + = IPV4_BEET_PHMAXLEN ;
2005-04-17 02:20:36 +04:00
if ( x - > encap ) {
struct xfrm_encap_tmpl * encap = x - > encap ;
switch ( encap - > encap_type ) {
default :
goto error ;
case UDP_ENCAP_ESPINUDP :
x - > props . header_len + = sizeof ( struct udphdr ) ;
break ;
case UDP_ENCAP_ESPINUDP_NON_IKE :
x - > props . header_len + = sizeof ( struct udphdr ) + 2 * sizeof ( u32 ) ;
break ;
}
}
2008-01-29 06:35:05 +03:00
align = ALIGN ( crypto_aead_blocksize ( aead ) , 4 ) ;
2013-10-18 14:09:05 +04:00
x - > props . trailer_len = align + 1 + crypto_aead_authsize ( aead ) ;
2005-04-17 02:20:36 +04:00
error :
2008-01-29 06:35:05 +03:00
return err ;
2005-04-17 02:20:36 +04:00
}
2014-02-21 11:41:08 +04:00
static int esp4_rcv_cb ( struct sk_buff * skb , int err )
{
return 0 ;
}
2008-01-31 06:11:50 +03:00
static const struct xfrm_type esp_type =
2005-04-17 02:20:36 +04:00
{
. description = " ESP4 " ,
. owner = THIS_MODULE ,
. proto = IPPROTO_ESP ,
2007-10-09 04:25:53 +04:00
. flags = XFRM_TYPE_REPLAY_PROT ,
2005-04-17 02:20:36 +04:00
. init_state = esp_init_state ,
. destructor = esp_destroy ,
2007-04-09 22:47:18 +04:00
. get_mtu = esp4_get_mtu ,
2005-04-17 02:20:36 +04:00
. input = esp_input ,
. output = esp_output
} ;
2014-02-21 11:41:08 +04:00
static struct xfrm4_protocol esp4_protocol = {
2005-04-17 02:20:36 +04:00
. handler = xfrm4_rcv ,
2014-02-21 11:41:08 +04:00
. input_handler = xfrm_input ,
. cb_handler = esp4_rcv_cb ,
2005-04-17 02:20:36 +04:00
. err_handler = esp4_err ,
2014-02-21 11:41:08 +04:00
. priority = 0 ,
2005-04-17 02:20:36 +04:00
} ;
static int __init esp4_init ( void )
{
if ( xfrm_register_type ( & esp_type , AF_INET ) < 0 ) {
2012-03-11 22:36:11 +04:00
pr_info ( " %s: can't add xfrm type \n " , __func__ ) ;
2005-04-17 02:20:36 +04:00
return - EAGAIN ;
}
2014-02-21 11:41:08 +04:00
if ( xfrm4_protocol_register ( & esp4_protocol , IPPROTO_ESP ) < 0 ) {
2012-03-11 22:36:11 +04:00
pr_info ( " %s: can't add protocol \n " , __func__ ) ;
2005-04-17 02:20:36 +04:00
xfrm_unregister_type ( & esp_type , AF_INET ) ;
return - EAGAIN ;
}
return 0 ;
}
static void __exit esp4_fini ( void )
{
2014-02-21 11:41:08 +04:00
if ( xfrm4_protocol_deregister ( & esp4_protocol , IPPROTO_ESP ) < 0 )
2012-03-11 22:36:11 +04:00
pr_info ( " %s: can't remove protocol \n " , __func__ ) ;
2005-04-17 02:20:36 +04:00
if ( xfrm_unregister_type ( & esp_type , AF_INET ) < 0 )
2012-03-11 22:36:11 +04:00
pr_info ( " %s: can't remove xfrm type \n " , __func__ ) ;
2005-04-17 02:20:36 +04:00
}
module_init ( esp4_init ) ;
module_exit ( esp4_fini ) ;
MODULE_LICENSE ( " GPL " ) ;
2007-06-27 10:57:49 +04:00
MODULE_ALIAS_XFRM_TYPE ( AF_INET , XFRM_PROTO_ESP ) ;