2019-05-19 16:51:43 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2005-04-17 02:20:36 +04:00
/*
* Copyright ( C ) 2002 USAGI / WIDE Project
2007-02-09 17:24:49 +03:00
*
2005-04-17 02:20:36 +04:00
* Authors
*
2007-02-09 17:24:49 +03:00
* Mitsuru KANDA @ USAGI : IPv6 Support
2014-08-25 00:53:10 +04:00
* Kazunori MIYAZAWA @ USAGI :
* Kunihiro Ishiguro < kunihiro @ ipinfusion . com >
2007-02-09 17:24:49 +03:00
*
2014-08-25 00:53:10 +04:00
* This file is derived from net / ipv4 / esp . c
2005-04-17 02:20:36 +04:00
*/
2012-05-15 18:11:53 +04:00
# define pr_fmt(fmt) "IPv6: " fmt
2008-01-29 06:35:05 +03:00
# include <crypto/aead.h>
# include <crypto/authenc.h>
2006-07-30 09:41:01 +04:00
# include <linux/err.h>
2005-04-17 02:20:36 +04:00
# include <linux/module.h>
# include <net/ip.h>
# include <net/xfrm.h>
# include <net/esp.h>
2007-10-27 09:53:58 +04:00
# include <linux/scatterlist.h>
2005-10-11 08:11:08 +04:00
# include <linux/kernel.h>
2005-04-17 02:20:36 +04:00
# include <linux/pfkeyv2.h>
# include <linux/random.h>
2008-01-29 06:35:05 +03:00
# include <linux/slab.h>
2007-10-10 00:33:35 +04:00
# include <linux/spinlock.h>
2020-04-27 18:59:34 +03:00
# include <net/ip6_checksum.h>
2012-06-16 01:54:11 +04:00
# include <net/ip6_route.h>
2005-04-17 02:20:36 +04:00
# include <net/icmp.h>
# include <net/ipv6.h>
2005-12-27 07:43:12 +03:00
# include <net/protocol.h>
2020-04-27 18:59:34 +03:00
# include <net/udp.h>
2005-04-17 02:20:36 +04:00
# include <linux/icmpv6.h>
2020-04-27 18:59:35 +03:00
# include <net/tcp.h>
# include <net/espintcp.h>
# include <net/inet6_hashtables.h>
2005-04-17 02:20:36 +04:00
2017-01-17 12:23:03 +03:00
# include <linux/highmem.h>
2008-01-29 06:35:05 +03:00
struct esp_skb_cb {
struct xfrm_skb_cb xfrm ;
void * tmp ;
} ;
2020-04-27 18:59:34 +03:00
struct esp_output_extra {
__be32 seqhi ;
u32 esphoff ;
} ;
2008-01-29 06:35:05 +03:00
# define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
/*
* Allocate an AEAD request structure with extra space for SG and IV .
*
2011-03-08 03:07:51 +03:00
* For alignment considerations the upper 32 bits of the sequence number are
* placed at the front , if present . Followed by the IV , the request and finally
* the SG list .
2008-01-29 06:35:05 +03:00
*
* TODO : Use spare space in skb for this where possible .
*/
2011-03-08 03:07:51 +03:00
static void * esp_alloc_tmp ( struct crypto_aead * aead , int nfrags , int seqihlen )
2008-01-29 06:35:05 +03:00
{
unsigned int len ;
2011-03-08 03:07:51 +03:00
len = seqihlen ;
len + = crypto_aead_ivsize ( aead ) ;
2008-01-29 06:35:05 +03:00
if ( len ) {
len + = crypto_aead_alignmask ( aead ) &
~ ( crypto_tfm_ctx_alignment ( ) - 1 ) ;
len = ALIGN ( len , crypto_tfm_ctx_alignment ( ) ) ;
}
2015-05-27 11:03:47 +03:00
len + = sizeof ( struct aead_request ) + crypto_aead_reqsize ( aead ) ;
2008-01-29 06:35:05 +03:00
len = ALIGN ( len , __alignof__ ( struct scatterlist ) ) ;
len + = sizeof ( struct scatterlist ) * nfrags ;
return kmalloc ( len , GFP_ATOMIC ) ;
}
2020-04-27 18:59:34 +03:00
static inline void * esp_tmp_extra ( void * tmp )
2011-03-08 03:07:51 +03:00
{
2020-04-27 18:59:34 +03:00
return PTR_ALIGN ( tmp , __alignof__ ( struct esp_output_extra ) ) ;
2011-03-08 03:07:51 +03:00
}
static inline u8 * esp_tmp_iv ( struct crypto_aead * aead , void * tmp , int seqhilen )
2008-01-29 06:35:05 +03:00
{
return crypto_aead_ivsize ( aead ) ?
2011-03-08 03:07:51 +03:00
PTR_ALIGN ( ( u8 * ) tmp + seqhilen ,
crypto_aead_alignmask ( aead ) + 1 ) : tmp + seqhilen ;
2008-01-29 06:35:05 +03:00
}
static inline struct aead_request * esp_tmp_req ( struct crypto_aead * aead , u8 * iv )
{
struct aead_request * req ;
req = ( void * ) PTR_ALIGN ( iv + crypto_aead_ivsize ( aead ) ,
crypto_tfm_ctx_alignment ( ) ) ;
aead_request_set_tfm ( req , aead ) ;
return req ;
}
static inline struct scatterlist * esp_req_sg ( struct crypto_aead * aead ,
struct aead_request * req )
{
return ( void * ) ALIGN ( ( unsigned long ) ( req + 1 ) +
crypto_aead_reqsize ( aead ) ,
__alignof__ ( struct scatterlist ) ) ;
}
2017-01-17 12:23:03 +03:00
static void esp_ssg_unref ( struct xfrm_state * x , void * tmp )
{
2020-04-27 18:59:34 +03:00
struct esp_output_extra * extra = esp_tmp_extra ( tmp ) ;
2017-01-17 12:23:03 +03:00
struct crypto_aead * aead = x - > data ;
2020-04-27 18:59:34 +03:00
int extralen = 0 ;
2017-01-17 12:23:03 +03:00
u8 * iv ;
struct aead_request * req ;
struct scatterlist * sg ;
if ( x - > props . flags & XFRM_STATE_ESN )
2020-04-27 18:59:34 +03:00
extralen + = sizeof ( * extra ) ;
2017-01-17 12:23:03 +03:00
2020-04-27 18:59:34 +03:00
iv = esp_tmp_iv ( aead , tmp , extralen ) ;
2017-01-17 12:23:03 +03:00
req = esp_tmp_req ( aead , iv ) ;
/* Unref skb_frag_pages in the src scatterlist if necessary.
* Skip the first sg which comes from skb - > data .
*/
if ( req - > src ! = req - > dst )
for ( sg = sg_next ( req - > src ) ; sg ; sg = sg_next ( sg ) )
put_page ( sg_page ( sg ) ) ;
}
2020-04-27 18:59:35 +03:00
# ifdef CONFIG_INET6_ESPINTCP
struct esp_tcp_sk {
struct sock * sk ;
struct rcu_head rcu ;
} ;
static void esp_free_tcp_sk ( struct rcu_head * head )
{
struct esp_tcp_sk * esk = container_of ( head , struct esp_tcp_sk , rcu ) ;
sock_put ( esk - > sk ) ;
kfree ( esk ) ;
}
static struct sock * esp6_find_tcp_sk ( struct xfrm_state * x )
{
struct xfrm_encap_tmpl * encap = x - > encap ;
struct esp_tcp_sk * esk ;
__be16 sport , dport ;
struct sock * nsk ;
struct sock * sk ;
sk = rcu_dereference ( x - > encap_sk ) ;
if ( sk & & sk - > sk_state = = TCP_ESTABLISHED )
return sk ;
spin_lock_bh ( & x - > lock ) ;
sport = encap - > encap_sport ;
dport = encap - > encap_dport ;
nsk = rcu_dereference_protected ( x - > encap_sk ,
lockdep_is_held ( & x - > lock ) ) ;
if ( sk & & sk = = nsk ) {
esk = kmalloc ( sizeof ( * esk ) , GFP_ATOMIC ) ;
if ( ! esk ) {
spin_unlock_bh ( & x - > lock ) ;
return ERR_PTR ( - ENOMEM ) ;
}
RCU_INIT_POINTER ( x - > encap_sk , NULL ) ;
esk - > sk = sk ;
call_rcu ( & esk - > rcu , esp_free_tcp_sk ) ;
}
spin_unlock_bh ( & x - > lock ) ;
sk = __inet6_lookup_established ( xs_net ( x ) , & tcp_hashinfo , & x - > id . daddr . in6 ,
dport , & x - > props . saddr . in6 , ntohs ( sport ) , 0 , 0 ) ;
if ( ! sk )
return ERR_PTR ( - ENOENT ) ;
if ( ! tcp_is_ulp_esp ( sk ) ) {
sock_put ( sk ) ;
return ERR_PTR ( - EINVAL ) ;
}
spin_lock_bh ( & x - > lock ) ;
nsk = rcu_dereference_protected ( x - > encap_sk ,
lockdep_is_held ( & x - > lock ) ) ;
if ( encap - > encap_sport ! = sport | |
encap - > encap_dport ! = dport ) {
sock_put ( sk ) ;
sk = nsk ? : ERR_PTR ( - EREMCHG ) ;
} else if ( sk = = nsk ) {
sock_put ( sk ) ;
} else {
rcu_assign_pointer ( x - > encap_sk , sk ) ;
}
spin_unlock_bh ( & x - > lock ) ;
return sk ;
}
static int esp_output_tcp_finish ( struct xfrm_state * x , struct sk_buff * skb )
{
struct sock * sk ;
int err ;
rcu_read_lock ( ) ;
sk = esp6_find_tcp_sk ( x ) ;
err = PTR_ERR_OR_ZERO ( sk ) ;
if ( err )
goto out ;
bh_lock_sock ( sk ) ;
if ( sock_owned_by_user ( sk ) )
err = espintcp_queue_out ( sk , skb ) ;
else
err = espintcp_push_skb ( sk , skb ) ;
bh_unlock_sock ( sk ) ;
out :
rcu_read_unlock ( ) ;
return err ;
}
static int esp_output_tcp_encap_cb ( struct net * net , struct sock * sk ,
struct sk_buff * skb )
{
struct dst_entry * dst = skb_dst ( skb ) ;
struct xfrm_state * x = dst - > xfrm ;
return esp_output_tcp_finish ( x , skb ) ;
}
static int esp_output_tail_tcp ( struct xfrm_state * x , struct sk_buff * skb )
{
int err ;
local_bh_disable ( ) ;
err = xfrm_trans_queue_net ( xs_net ( x ) , skb , esp_output_tcp_encap_cb ) ;
local_bh_enable ( ) ;
/* EINPROGRESS just happens to do the right thing. It
* actually means that the skb has been consumed and
* isn ' t coming back .
*/
return err ? : - EINPROGRESS ;
}
# else
static int esp_output_tail_tcp ( struct xfrm_state * x , struct sk_buff * skb )
{
kfree_skb ( skb ) ;
return - EOPNOTSUPP ;
}
# endif
2020-04-27 18:59:34 +03:00
static void esp_output_encap_csum ( struct sk_buff * skb )
{
/* UDP encap with IPv6 requires a valid checksum */
if ( * skb_mac_header ( skb ) = = IPPROTO_UDP ) {
struct udphdr * uh = udp_hdr ( skb ) ;
struct ipv6hdr * ip6h = ipv6_hdr ( skb ) ;
int len = ntohs ( uh - > len ) ;
unsigned int offset = skb_transport_offset ( skb ) ;
__wsum csum = skb_checksum ( skb , offset , skb - > len - offset , 0 ) ;
uh - > check = csum_ipv6_magic ( & ip6h - > saddr , & ip6h - > daddr ,
len , IPPROTO_UDP , csum ) ;
if ( uh - > check = = 0 )
uh - > check = CSUM_MANGLED_0 ;
}
}
2008-01-29 06:35:05 +03:00
static void esp_output_done ( struct crypto_async_request * base , int err )
{
struct sk_buff * skb = base - > data ;
2017-12-20 12:41:36 +03:00
struct xfrm_offload * xo = xfrm_offload ( skb ) ;
2017-01-17 12:23:03 +03:00
void * tmp ;
2017-12-20 12:41:36 +03:00
struct xfrm_state * x ;
2018-12-18 19:15:20 +03:00
if ( xo & & ( xo - > flags & XFRM_DEV_RESUME ) ) {
struct sec_path * sp = skb_sec_path ( skb ) ;
x = sp - > xvec [ sp - > len - 1 ] ;
} else {
2017-12-20 12:41:36 +03:00
x = skb_dst ( skb ) - > xfrm ;
2018-12-18 19:15:20 +03:00
}
2008-01-29 06:35:05 +03:00
2017-01-17 12:23:03 +03:00
tmp = ESP_SKB_CB ( skb ) - > tmp ;
esp_ssg_unref ( x , tmp ) ;
kfree ( tmp ) ;
2017-12-20 12:41:36 +03:00
2020-04-27 18:59:34 +03:00
esp_output_encap_csum ( skb ) ;
2017-12-20 12:41:36 +03:00
if ( xo & & ( xo - > flags & XFRM_DEV_RESUME ) ) {
if ( err ) {
XFRM_INC_STATS ( xs_net ( x ) , LINUX_MIB_XFRMOUTSTATEPROTOERROR ) ;
kfree_skb ( skb ) ;
return ;
}
skb_push ( skb , skb - > data - skb_mac_header ( skb ) ) ;
secpath_reset ( skb ) ;
xfrm_dev_resume ( skb ) ;
} else {
2020-04-27 18:59:35 +03:00
if ( ! err & &
x - > encap & & x - > encap - > encap_type = = TCP_ENCAP_ESPINTCP )
esp_output_tail_tcp ( x , skb ) ;
else
2021-03-01 22:00:04 +03:00
xfrm_output_resume ( skb - > sk , skb , err ) ;
2017-12-20 12:41:36 +03:00
}
2008-01-29 06:35:05 +03:00
}
2015-05-27 11:03:47 +03:00
/* Move ESP header back into place. */
static void esp_restore_header ( struct sk_buff * skb , unsigned int offset )
{
struct ip_esp_hdr * esph = ( void * ) ( skb - > data + offset ) ;
void * tmp = ESP_SKB_CB ( skb ) - > tmp ;
2020-04-27 18:59:34 +03:00
__be32 * seqhi = esp_tmp_extra ( tmp ) ;
2015-05-27 11:03:47 +03:00
esph - > seq_no = esph - > spi ;
esph - > spi = * seqhi ;
}
static void esp_output_restore_header ( struct sk_buff * skb )
{
2020-04-27 18:59:34 +03:00
void * tmp = ESP_SKB_CB ( skb ) - > tmp ;
struct esp_output_extra * extra = esp_tmp_extra ( tmp ) ;
esp_restore_header ( skb , skb_transport_offset ( skb ) + extra - > esphoff -
sizeof ( __be32 ) ) ;
2015-05-27 11:03:47 +03:00
}
2017-01-17 12:23:03 +03:00
static struct ip_esp_hdr * esp_output_set_esn ( struct sk_buff * skb ,
2017-04-14 11:06:42 +03:00
struct xfrm_state * x ,
2017-01-17 12:23:03 +03:00
struct ip_esp_hdr * esph ,
2020-04-27 18:59:34 +03:00
struct esp_output_extra * extra )
2017-01-17 12:23:03 +03:00
{
/* For ESN we move the header forward by 4 bytes to
* accomodate the high bits . We will move it back after
* encryption .
*/
if ( ( x - > props . flags & XFRM_STATE_ESN ) ) {
2020-04-27 18:59:34 +03:00
__u32 seqhi ;
2017-04-14 11:06:50 +03:00
struct xfrm_offload * xo = xfrm_offload ( skb ) ;
if ( xo )
2020-04-27 18:59:34 +03:00
seqhi = xo - > seq . hi ;
2017-04-14 11:06:50 +03:00
else
2020-04-27 18:59:34 +03:00
seqhi = XFRM_SKB_CB ( skb ) - > seq . output . hi ;
extra - > esphoff = ( unsigned char * ) esph -
skb_transport_header ( skb ) ;
esph = ( struct ip_esp_hdr * ) ( ( unsigned char * ) esph - 4 ) ;
extra - > seqhi = esph - > spi ;
esph - > seq_no = htonl ( seqhi ) ;
2017-01-17 12:23:03 +03:00
}
esph - > spi = x - > id . spi ;
return esph ;
}
2015-05-27 11:03:47 +03:00
static void esp_output_done_esn ( struct crypto_async_request * base , int err )
{
struct sk_buff * skb = base - > data ;
esp_output_restore_header ( skb ) ;
esp_output_done ( base , err ) ;
}
2020-04-27 18:59:34 +03:00
static struct ip_esp_hdr * esp6_output_udp_encap ( struct sk_buff * skb ,
int encap_type ,
struct esp_info * esp ,
__be16 sport ,
__be16 dport )
{
struct udphdr * uh ;
__be32 * udpdata32 ;
unsigned int len ;
len = skb - > len + esp - > tailen - skb_transport_offset ( skb ) ;
if ( len > U16_MAX )
return ERR_PTR ( - EMSGSIZE ) ;
uh = ( struct udphdr * ) esp - > esph ;
uh - > source = sport ;
uh - > dest = dport ;
uh - > len = htons ( len ) ;
uh - > check = 0 ;
* skb_mac_header ( skb ) = IPPROTO_UDP ;
if ( encap_type = = UDP_ENCAP_ESPINUDP_NON_IKE ) {
udpdata32 = ( __be32 * ) ( uh + 1 ) ;
udpdata32 [ 0 ] = udpdata32 [ 1 ] = 0 ;
return ( struct ip_esp_hdr * ) ( udpdata32 + 2 ) ;
}
return ( struct ip_esp_hdr * ) ( uh + 1 ) ;
}
2020-04-27 18:59:35 +03:00
# ifdef CONFIG_INET6_ESPINTCP
static struct ip_esp_hdr * esp6_output_tcp_encap ( struct xfrm_state * x ,
struct sk_buff * skb ,
struct esp_info * esp )
{
__be16 * lenp = ( void * ) esp - > esph ;
struct ip_esp_hdr * esph ;
unsigned int len ;
struct sock * sk ;
len = skb - > len + esp - > tailen - skb_transport_offset ( skb ) ;
if ( len > IP_MAX_MTU )
return ERR_PTR ( - EMSGSIZE ) ;
rcu_read_lock ( ) ;
sk = esp6_find_tcp_sk ( x ) ;
rcu_read_unlock ( ) ;
if ( IS_ERR ( sk ) )
return ERR_CAST ( sk ) ;
* lenp = htons ( len ) ;
esph = ( struct ip_esp_hdr * ) ( lenp + 1 ) ;
return esph ;
}
# else
static struct ip_esp_hdr * esp6_output_tcp_encap ( struct xfrm_state * x ,
struct sk_buff * skb ,
struct esp_info * esp )
{
return ERR_PTR ( - EOPNOTSUPP ) ;
}
# endif
2020-04-27 18:59:34 +03:00
static int esp6_output_encap ( struct xfrm_state * x , struct sk_buff * skb ,
struct esp_info * esp )
{
struct xfrm_encap_tmpl * encap = x - > encap ;
struct ip_esp_hdr * esph ;
__be16 sport , dport ;
int encap_type ;
spin_lock_bh ( & x - > lock ) ;
sport = encap - > encap_sport ;
dport = encap - > encap_dport ;
encap_type = encap - > encap_type ;
spin_unlock_bh ( & x - > lock ) ;
switch ( encap_type ) {
default :
case UDP_ENCAP_ESPINUDP :
case UDP_ENCAP_ESPINUDP_NON_IKE :
esph = esp6_output_udp_encap ( skb , encap_type , esp , sport , dport ) ;
break ;
2020-04-27 18:59:35 +03:00
case TCP_ENCAP_ESPINTCP :
esph = esp6_output_tcp_encap ( x , skb , esp ) ;
break ;
2020-04-27 18:59:34 +03:00
}
if ( IS_ERR ( esph ) )
return PTR_ERR ( esph ) ;
esp - > esph = esph ;
return 0 ;
}
2017-04-14 11:06:42 +03:00
int esp6_output_head ( struct xfrm_state * x , struct sk_buff * skb , struct esp_info * esp )
2005-04-17 02:20:36 +04:00
{
2007-04-20 07:29:13 +04:00
u8 * tail ;
2017-04-14 11:06:42 +03:00
int nfrags ;
2020-04-27 18:59:34 +03:00
int esph_offset ;
2017-04-14 11:06:42 +03:00
struct page * page ;
struct sk_buff * trailer ;
int tailen = esp - > tailen ;
2011-03-08 03:07:51 +03:00
2020-04-27 18:59:34 +03:00
if ( x - > encap ) {
int err = esp6_output_encap ( x , skb , esp ) ;
if ( err < 0 )
return err ;
}
2017-01-17 12:23:03 +03:00
if ( ! skb_cloned ( skb ) ) {
2017-08-25 08:34:35 +03:00
if ( tailen < = skb_tailroom ( skb ) ) {
2017-01-17 12:23:03 +03:00
nfrags = 1 ;
trailer = skb ;
tail = skb_tail_pointer ( trailer ) ;
goto skip_cow ;
} else if ( ( skb_shinfo ( skb ) - > nr_frags < MAX_SKB_FRAGS )
& & ! skb_has_frag_list ( skb ) ) {
int allocsize ;
struct sock * sk = skb - > sk ;
struct page_frag * pfrag = & x - > xfrag ;
2017-04-14 11:06:42 +03:00
esp - > inplace = false ;
2017-01-17 12:23:03 +03:00
allocsize = ALIGN ( tailen , L1_CACHE_BYTES ) ;
spin_lock_bh ( & x - > lock ) ;
if ( unlikely ( ! skb_page_frag_refill ( allocsize , pfrag , GFP_ATOMIC ) ) ) {
spin_unlock_bh ( & x - > lock ) ;
goto cow ;
}
page = pfrag - > page ;
get_page ( page ) ;
2021-01-10 01:18:34 +03:00
tail = page_address ( page ) + pfrag - > offset ;
2017-01-17 12:23:03 +03:00
2017-04-14 11:06:42 +03:00
esp_output_fill_trailer ( tail , esp - > tfclen , esp - > plen , esp - > proto ) ;
2017-01-17 12:23:03 +03:00
nfrags = skb_shinfo ( skb ) - > nr_frags ;
__skb_fill_page_desc ( skb , nfrags , page , pfrag - > offset ,
tailen ) ;
skb_shinfo ( skb ) - > nr_frags = + + nfrags ;
pfrag - > offset = pfrag - > offset + allocsize ;
2017-08-25 08:16:07 +03:00
spin_unlock_bh ( & x - > lock ) ;
2017-01-17 12:23:03 +03:00
nfrags + + ;
skb - > len + = tailen ;
skb - > data_len + = tailen ;
skb - > truesize + = tailen ;
2019-01-28 11:35:35 +03:00
if ( sk & & sk_fullsock ( sk ) )
2017-06-30 13:08:00 +03:00
refcount_add ( tailen , & sk - > sk_wmem_alloc ) ;
2017-01-17 12:23:03 +03:00
2017-04-14 11:06:42 +03:00
goto out ;
2017-01-17 12:23:03 +03:00
}
2012-08-29 10:49:12 +04:00
}
2008-01-29 06:35:05 +03:00
2017-01-17 12:23:03 +03:00
cow :
2020-04-27 18:59:34 +03:00
esph_offset = ( unsigned char * ) esp - > esph - skb_transport_header ( skb ) ;
2017-04-14 11:06:42 +03:00
nfrags = skb_cow_data ( skb , tailen , & trailer ) ;
if ( nfrags < 0 )
goto out ;
2007-04-20 07:29:13 +04:00
tail = skb_tail_pointer ( trailer ) ;
2020-04-27 18:59:34 +03:00
esp - > esph = ( struct ip_esp_hdr * ) ( skb_transport_header ( skb ) + esph_offset ) ;
2017-01-17 12:23:03 +03:00
skip_cow :
2017-04-14 11:06:42 +03:00
esp_output_fill_trailer ( tail , esp - > tfclen , esp - > plen , esp - > proto ) ;
pskb_put ( skb , trailer , tailen ) ;
2005-04-17 02:20:36 +04:00
2017-04-14 11:06:42 +03:00
out :
return nfrags ;
}
EXPORT_SYMBOL_GPL ( esp6_output_head ) ;
2005-04-17 02:20:36 +04:00
2017-04-14 11:06:42 +03:00
int esp6_output_tail ( struct xfrm_state * x , struct sk_buff * skb , struct esp_info * esp )
{
u8 * iv ;
int alen ;
void * tmp ;
int ivlen ;
int assoclen ;
2020-04-27 18:59:34 +03:00
int extralen ;
2017-04-14 11:06:42 +03:00
struct page * page ;
struct ip_esp_hdr * esph ;
struct aead_request * req ;
struct crypto_aead * aead ;
struct scatterlist * sg , * dsg ;
2020-04-27 18:59:34 +03:00
struct esp_output_extra * extra ;
2017-04-14 11:06:42 +03:00
int err = - ENOMEM ;
assoclen = sizeof ( struct ip_esp_hdr ) ;
2020-04-27 18:59:34 +03:00
extralen = 0 ;
2017-04-14 11:06:42 +03:00
if ( x - > props . flags & XFRM_STATE_ESN ) {
2020-04-27 18:59:34 +03:00
extralen + = sizeof ( * extra ) ;
2017-04-14 11:06:42 +03:00
assoclen + = sizeof ( __be32 ) ;
}
2005-04-17 02:20:36 +04:00
2017-04-14 11:06:42 +03:00
aead = x - > data ;
alen = crypto_aead_authsize ( aead ) ;
ivlen = crypto_aead_ivsize ( aead ) ;
2020-04-27 18:59:34 +03:00
tmp = esp_alloc_tmp ( aead , esp - > nfrags + 2 , extralen ) ;
2017-04-24 08:33:56 +03:00
if ( ! tmp )
2017-01-17 12:23:03 +03:00
goto error ;
2015-05-27 11:03:47 +03:00
2020-04-27 18:59:34 +03:00
extra = esp_tmp_extra ( tmp ) ;
iv = esp_tmp_iv ( aead , tmp , extralen ) ;
2017-01-17 12:23:03 +03:00
req = esp_tmp_req ( aead , iv ) ;
sg = esp_req_sg ( aead , req ) ;
2017-04-14 11:06:42 +03:00
if ( esp - > inplace )
dsg = sg ;
else
dsg = & sg [ esp - > nfrags ] ;
2020-04-27 18:59:34 +03:00
esph = esp_output_set_esn ( skb , x , esp - > esph , extra ) ;
esp - > esph = esph ;
2015-05-27 11:03:47 +03:00
2017-04-14 11:06:42 +03:00
sg_init_table ( sg , esp - > nfrags ) ;
2017-06-04 05:16:23 +03:00
err = skb_to_sgvec ( skb , sg ,
( unsigned char * ) esph - skb - > data ,
assoclen + ivlen + esp - > clen + alen ) ;
if ( unlikely ( err < 0 ) )
2017-07-13 10:13:30 +03:00
goto error_free ;
2017-04-14 11:06:42 +03:00
if ( ! esp - > inplace ) {
int allocsize ;
struct page_frag * pfrag = & x - > xfrag ;
allocsize = ALIGN ( skb - > data_len , L1_CACHE_BYTES ) ;
spin_lock_bh ( & x - > lock ) ;
if ( unlikely ( ! skb_page_frag_refill ( allocsize , pfrag , GFP_ATOMIC ) ) ) {
spin_unlock_bh ( & x - > lock ) ;
2017-07-13 10:13:30 +03:00
goto error_free ;
2017-04-14 11:06:42 +03:00
}
skb_shinfo ( skb ) - > nr_frags = 1 ;
page = pfrag - > page ;
get_page ( page ) ;
/* replace page frags in skb with new page */
__skb_fill_page_desc ( skb , 0 , page , pfrag - > offset , skb - > data_len ) ;
pfrag - > offset = pfrag - > offset + allocsize ;
spin_unlock_bh ( & x - > lock ) ;
sg_init_table ( dsg , skb_shinfo ( skb ) - > nr_frags + 1 ) ;
2017-06-04 05:16:23 +03:00
err = skb_to_sgvec ( skb , dsg ,
( unsigned char * ) esph - skb - > data ,
assoclen + ivlen + esp - > clen + alen ) ;
if ( unlikely ( err < 0 ) )
2017-07-13 10:13:30 +03:00
goto error_free ;
2017-04-14 11:06:42 +03:00
}
2011-03-08 03:07:51 +03:00
2017-01-17 12:23:03 +03:00
if ( ( x - > props . flags & XFRM_STATE_ESN ) )
aead_request_set_callback ( req , 0 , esp_output_done_esn , skb ) ;
else
aead_request_set_callback ( req , 0 , esp_output_done , skb ) ;
2017-04-14 11:06:42 +03:00
aead_request_set_crypt ( req , sg , dsg , ivlen + esp - > clen , iv ) ;
2015-05-27 11:03:47 +03:00
aead_request_set_ad ( req , assoclen ) ;
memset ( iv , 0 , ivlen ) ;
2017-04-14 11:06:42 +03:00
memcpy ( iv + ivlen - min ( ivlen , 8 ) , ( u8 * ) & esp - > seqno + 8 - min ( ivlen , 8 ) ,
2015-05-27 11:03:47 +03:00
min ( ivlen , 8 ) ) ;
2005-04-17 02:20:36 +04:00
2008-01-29 06:35:05 +03:00
ESP_SKB_CB ( skb ) - > tmp = tmp ;
2015-05-27 11:03:47 +03:00
err = crypto_aead_encrypt ( req ) ;
switch ( err ) {
case - EINPROGRESS :
2008-01-29 06:35:05 +03:00
goto error ;
2005-04-17 02:20:36 +04:00
2017-10-18 10:00:35 +03:00
case - ENOSPC :
2008-01-29 06:35:05 +03:00
err = NET_XMIT_DROP ;
2015-05-27 11:03:47 +03:00
break ;
case 0 :
if ( ( x - > props . flags & XFRM_STATE_ESN ) )
esp_output_restore_header ( skb ) ;
2020-04-27 18:59:34 +03:00
esp_output_encap_csum ( skb ) ;
2015-05-27 11:03:47 +03:00
}
2008-01-29 06:35:05 +03:00
2017-01-17 12:23:03 +03:00
if ( sg ! = dsg )
esp_ssg_unref ( x , tmp ) ;
2008-01-29 06:35:05 +03:00
2020-04-27 18:59:35 +03:00
if ( ! err & & x - > encap & & x - > encap - > encap_type = = TCP_ENCAP_ESPINTCP )
err = esp_output_tail_tcp ( x , skb ) ;
2017-07-13 10:13:30 +03:00
error_free :
kfree ( tmp ) ;
2008-01-29 06:35:05 +03:00
error :
return err ;
}
2017-04-14 11:06:42 +03:00
EXPORT_SYMBOL_GPL ( esp6_output_tail ) ;
static int esp6_output ( struct xfrm_state * x , struct sk_buff * skb )
{
int alen ;
int blksize ;
struct ip_esp_hdr * esph ;
struct crypto_aead * aead ;
struct esp_info esp ;
esp . inplace = true ;
esp . proto = * skb_mac_header ( skb ) ;
* skb_mac_header ( skb ) = IPPROTO_ESP ;
/* skb is pure payload to encrypt */
aead = x - > data ;
alen = crypto_aead_authsize ( aead ) ;
esp . tfclen = 0 ;
if ( x - > tfcpad ) {
struct xfrm_dst * dst = ( struct xfrm_dst * ) skb_dst ( skb ) ;
u32 padto ;
2021-04-16 12:27:59 +03:00
padto = min ( x - > tfcpad , __xfrm_state_mtu ( x , dst - > child_mtu_cached ) ) ;
2017-04-14 11:06:42 +03:00
if ( skb - > len < padto )
esp . tfclen = padto - skb - > len ;
}
blksize = ALIGN ( crypto_aead_blocksize ( aead ) , 4 ) ;
esp . clen = ALIGN ( skb - > len + 2 + esp . tfclen , blksize ) ;
esp . plen = esp . clen - skb - > len - esp . tfclen ;
esp . tailen = esp . tfclen + esp . plen + alen ;
2020-04-27 18:59:34 +03:00
esp . esph = ip_esp_hdr ( skb ) ;
2017-04-14 11:06:42 +03:00
esp . nfrags = esp6_output_head ( x , skb , & esp ) ;
if ( esp . nfrags < 0 )
return esp . nfrags ;
2020-04-27 18:59:34 +03:00
esph = esp . esph ;
2017-04-14 11:06:42 +03:00
esph - > spi = x - > id . spi ;
esph - > seq_no = htonl ( XFRM_SKB_CB ( skb ) - > seq . output . low ) ;
esp . seqno = cpu_to_be64 ( XFRM_SKB_CB ( skb ) - > seq . output . low +
( ( u64 ) XFRM_SKB_CB ( skb ) - > seq . output . hi < < 32 ) ) ;
skb_push ( skb , - skb_network_offset ( skb ) ) ;
return esp6_output_tail ( x , skb , & esp ) ;
}
2008-01-29 06:35:05 +03:00
2017-08-30 11:30:39 +03:00
static inline int esp_remove_trailer ( struct sk_buff * skb )
2008-01-29 06:35:05 +03:00
{
struct xfrm_state * x = xfrm_input_state ( skb ) ;
2017-04-14 11:06:10 +03:00
struct xfrm_offload * xo = xfrm_offload ( skb ) ;
2013-10-18 14:09:05 +04:00
struct crypto_aead * aead = x - > data ;
2017-08-30 11:30:39 +03:00
int alen , hlen , elen ;
2017-08-01 12:49:05 +03:00
int padlen , trimlen ;
__wsum csumdiff ;
2008-01-29 06:35:05 +03:00
u8 nexthdr [ 2 ] ;
2017-08-30 11:30:39 +03:00
int ret ;
2008-01-29 06:35:05 +03:00
2017-08-30 11:30:39 +03:00
alen = crypto_aead_authsize ( aead ) ;
hlen = sizeof ( struct ip_esp_hdr ) + crypto_aead_ivsize ( aead ) ;
elen = skb - > len - hlen ;
2005-04-17 02:20:36 +04:00
2017-08-30 11:30:39 +03:00
if ( xo & & ( xo - > flags & XFRM_ESP_NO_TRAILER ) ) {
ret = xo - > proto ;
2008-01-29 06:35:05 +03:00
goto out ;
2017-08-30 11:30:39 +03:00
}
2006-07-30 09:41:01 +04:00
2017-10-26 15:51:06 +03:00
ret = skb_copy_bits ( skb , skb - > len - alen - 2 , nexthdr , 2 ) ;
BUG_ON ( ret ) ;
2005-04-17 02:20:36 +04:00
2017-08-30 11:30:39 +03:00
ret = - EINVAL ;
2008-01-29 06:35:05 +03:00
padlen = nexthdr [ 0 ] ;
if ( padlen + 2 + alen > = elen ) {
2014-11-11 21:59:17 +03:00
net_dbg_ratelimited ( " ipsec esp packet is garbage padlen=%d, elen=%d \n " ,
padlen + 2 , elen - alen ) ;
2008-01-29 06:35:05 +03:00
goto out ;
2005-04-17 02:20:36 +04:00
}
2017-08-01 12:49:05 +03:00
trimlen = alen + padlen + 2 ;
if ( skb - > ip_summed = = CHECKSUM_COMPLETE ) {
csumdiff = skb_checksum ( skb , skb - > len - trimlen , trimlen , 0 ) ;
skb - > csum = csum_block_sub ( skb - > csum , csumdiff ,
skb - > len - trimlen ) ;
}
pskb_trim ( skb , skb - > len - trimlen ) ;
2017-08-30 11:30:39 +03:00
ret = nexthdr [ 1 ] ;
out :
return ret ;
}
int esp6_input_done2 ( struct sk_buff * skb , int err )
{
struct xfrm_state * x = xfrm_input_state ( skb ) ;
struct xfrm_offload * xo = xfrm_offload ( skb ) ;
struct crypto_aead * aead = x - > data ;
int hlen = sizeof ( struct ip_esp_hdr ) + crypto_aead_ivsize ( aead ) ;
int hdr_len = skb_network_header_len ( skb ) ;
2021-02-03 05:44:30 +03:00
if ( ! xo | | ! ( xo - > flags & CRYPTO_DONE ) )
2017-08-30 11:30:39 +03:00
kfree ( ESP_SKB_CB ( skb ) - > tmp ) ;
if ( unlikely ( err ) )
goto out ;
err = esp_remove_trailer ( skb ) ;
if ( unlikely ( err < 0 ) )
goto out ;
2020-04-27 18:59:34 +03:00
if ( x - > encap ) {
const struct ipv6hdr * ip6h = ipv6_hdr ( skb ) ;
2020-07-03 17:57:09 +03:00
int offset = skb_network_offset ( skb ) + sizeof ( * ip6h ) ;
2020-04-27 18:59:34 +03:00
struct xfrm_encap_tmpl * encap = x - > encap ;
2020-07-03 17:57:09 +03:00
u8 nexthdr = ip6h - > nexthdr ;
__be16 frag_off , source ;
struct udphdr * uh ;
struct tcphdr * th ;
offset = ipv6_skip_exthdr ( skb , offset , & nexthdr , & frag_off ) ;
uh = ( void * ) ( skb - > data + offset ) ;
th = ( void * ) ( skb - > data + offset ) ;
2020-07-27 17:03:47 +03:00
hdr_len + = offset ;
2020-04-27 18:59:34 +03:00
switch ( x - > encap - > encap_type ) {
2020-04-27 18:59:35 +03:00
case TCP_ENCAP_ESPINTCP :
source = th - > source ;
break ;
2020-04-27 18:59:34 +03:00
case UDP_ENCAP_ESPINUDP :
case UDP_ENCAP_ESPINUDP_NON_IKE :
source = uh - > source ;
break ;
default :
WARN_ON_ONCE ( 1 ) ;
err = - EINVAL ;
goto out ;
}
/*
* 1 ) if the NAT - T peer ' s IP or port changed then
* advertize the change to the keying daemon .
* This is an inbound SA , so just compare
* SRC ports .
*/
if ( ! ipv6_addr_equal ( & ip6h - > saddr , & x - > props . saddr . in6 ) | |
source ! = encap - > encap_sport ) {
xfrm_address_t ipaddr ;
memcpy ( & ipaddr . a6 , & ip6h - > saddr . s6_addr , sizeof ( ipaddr . a6 ) ) ;
km_new_mapping ( x , & ipaddr , source ) ;
/* XXX: perhaps add an extra
* policy check here , to see
* if we should allow or
* reject a packet from a
* different source
* address / port .
*/
}
/*
* 2 ) ignore UDP / TCP checksums in case
* of NAT - T in Transport Mode , or
* perform other post - processing fixes
* as per draft - ietf - ipsec - udp - encaps - 06 ,
* section 3.1 .2
*/
if ( x - > props . mode = = XFRM_MODE_TRANSPORT )
skb - > ip_summed = CHECKSUM_UNNECESSARY ;
}
2017-08-30 11:30:39 +03:00
skb_postpull_rcsum ( skb , skb_network_header ( skb ) ,
skb_network_header_len ( skb ) ) ;
2017-08-01 12:49:05 +03:00
skb_pull_rcsum ( skb , hlen ) ;
2013-01-08 11:41:12 +04:00
if ( x - > props . mode = = XFRM_MODE_TUNNEL )
skb_reset_transport_header ( skb ) ;
else
skb_set_transport_header ( skb , - hdr_len ) ;
2008-01-29 06:35:05 +03:00
/* RFC4303: Drop dummy packets without any error */
if ( err = = IPPROTO_NONE )
err = - EINVAL ;
out :
2005-04-17 02:20:36 +04:00
return err ;
}
2017-04-14 11:06:42 +03:00
EXPORT_SYMBOL_GPL ( esp6_input_done2 ) ;
2005-04-17 02:20:36 +04:00
2008-01-29 06:35:05 +03:00
static void esp_input_done ( struct crypto_async_request * base , int err )
{
struct sk_buff * skb = base - > data ;
2017-04-14 11:06:21 +03:00
xfrm_input_resume ( skb , esp6_input_done2 ( skb , err ) ) ;
2008-01-29 06:35:05 +03:00
}
2015-05-27 11:03:47 +03:00
static void esp_input_restore_header ( struct sk_buff * skb )
{
esp_restore_header ( skb , 0 ) ;
__skb_pull ( skb , 4 ) ;
}
2017-01-17 12:23:03 +03:00
static void esp_input_set_header ( struct sk_buff * skb , __be32 * seqhi )
{
struct xfrm_state * x = xfrm_input_state ( skb ) ;
/* For ESN we move the header forward by 4 bytes to
* accomodate the high bits . We will move it back after
* decryption .
*/
if ( ( x - > props . flags & XFRM_STATE_ESN ) ) {
2017-10-19 16:09:47 +03:00
struct ip_esp_hdr * esph = skb_push ( skb , 4 ) ;
2017-01-17 12:23:03 +03:00
* seqhi = esph - > spi ;
esph - > spi = esph - > seq_no ;
esph - > seq_no = XFRM_SKB_CB ( skb ) - > seq . input . hi ;
}
}
2015-05-27 11:03:47 +03:00
static void esp_input_done_esn ( struct crypto_async_request * base , int err )
{
struct sk_buff * skb = base - > data ;
esp_input_restore_header ( skb ) ;
esp_input_done ( base , err ) ;
}
2006-04-01 12:52:46 +04:00
static int esp6_input ( struct xfrm_state * x , struct sk_buff * skb )
2005-04-17 02:20:36 +04:00
{
2013-10-18 14:09:05 +04:00
struct crypto_aead * aead = x - > data ;
2008-01-29 06:35:05 +03:00
struct aead_request * req ;
2005-04-17 02:20:36 +04:00
struct sk_buff * trailer ;
2015-05-27 11:03:47 +03:00
int ivlen = crypto_aead_ivsize ( aead ) ;
2018-08-17 10:51:00 +03:00
int elen = skb - > len - sizeof ( struct ip_esp_hdr ) - ivlen ;
2005-04-17 02:20:36 +04:00
int nfrags ;
2011-03-08 03:07:51 +03:00
int assoclen ;
int seqhilen ;
2005-04-17 02:20:36 +04:00
int ret = 0 ;
2008-01-29 06:35:05 +03:00
void * tmp ;
2011-03-08 03:07:51 +03:00
__be32 * seqhi ;
2008-01-29 06:35:05 +03:00
u8 * iv ;
struct scatterlist * sg ;
2005-04-17 02:20:36 +04:00
2018-08-17 10:51:00 +03:00
if ( ! pskb_may_pull ( skb , sizeof ( struct ip_esp_hdr ) + ivlen ) ) {
2005-04-17 02:20:36 +04:00
ret = - EINVAL ;
2006-05-28 10:06:13 +04:00
goto out ;
2005-04-17 02:20:36 +04:00
}
2008-01-29 06:35:05 +03:00
if ( elen < = 0 ) {
2005-04-17 02:20:36 +04:00
ret = - EINVAL ;
2006-05-28 10:06:13 +04:00
goto out ;
2005-04-17 02:20:36 +04:00
}
2018-08-17 10:51:00 +03:00
assoclen = sizeof ( struct ip_esp_hdr ) ;
2011-03-08 03:07:51 +03:00
seqhilen = 0 ;
if ( x - > props . flags & XFRM_STATE_ESN ) {
seqhilen + = sizeof ( __be32 ) ;
assoclen + = seqhilen ;
}
2017-01-17 12:23:03 +03:00
if ( ! skb_cloned ( skb ) ) {
if ( ! skb_is_nonlinear ( skb ) ) {
nfrags = 1 ;
goto skip_cow ;
} else if ( ! skb_has_frag_list ( skb ) ) {
nfrags = skb_shinfo ( skb ) - > nr_frags ;
nfrags + + ;
goto skip_cow ;
}
}
nfrags = skb_cow_data ( skb , 0 , & trailer ) ;
if ( nfrags < 0 ) {
ret = - EINVAL ;
goto out ;
}
skip_cow :
ret = - ENOMEM ;
2015-05-27 11:03:47 +03:00
tmp = esp_alloc_tmp ( aead , nfrags , seqhilen ) ;
2008-01-29 06:35:05 +03:00
if ( ! tmp )
goto out ;
2005-04-17 02:20:36 +04:00
2008-01-29 06:35:05 +03:00
ESP_SKB_CB ( skb ) - > tmp = tmp ;
2020-04-27 18:59:34 +03:00
seqhi = esp_tmp_extra ( tmp ) ;
2011-03-08 03:07:51 +03:00
iv = esp_tmp_iv ( aead , tmp , seqhilen ) ;
2008-01-29 06:35:05 +03:00
req = esp_tmp_req ( aead , iv ) ;
2015-05-27 11:03:47 +03:00
sg = esp_req_sg ( aead , req ) ;
2005-04-17 02:20:36 +04:00
2017-01-17 12:23:03 +03:00
esp_input_set_header ( skb , seqhi ) ;
2005-04-17 02:20:36 +04:00
2017-01-17 12:23:03 +03:00
sg_init_table ( sg , nfrags ) ;
2017-06-04 05:16:23 +03:00
ret = skb_to_sgvec ( skb , sg , 0 , skb - > len ) ;
2018-06-27 06:49:28 +03:00
if ( unlikely ( ret < 0 ) ) {
kfree ( tmp ) ;
2017-06-04 05:16:23 +03:00
goto out ;
2018-06-27 06:49:28 +03:00
}
2005-04-17 02:20:36 +04:00
2017-01-17 12:23:03 +03:00
skb - > ip_summed = CHECKSUM_NONE ;
2011-03-08 03:07:51 +03:00
2017-01-17 12:23:03 +03:00
if ( ( x - > props . flags & XFRM_STATE_ESN ) )
2015-05-27 11:03:47 +03:00
aead_request_set_callback ( req , 0 , esp_input_done_esn , skb ) ;
2017-01-17 12:23:03 +03:00
else
aead_request_set_callback ( req , 0 , esp_input_done , skb ) ;
2015-05-27 11:03:47 +03:00
aead_request_set_crypt ( req , sg , sg , elen + ivlen , iv ) ;
aead_request_set_ad ( req , assoclen ) ;
2005-04-17 02:20:36 +04:00
2008-01-29 06:35:05 +03:00
ret = crypto_aead_decrypt ( req ) ;
if ( ret = = - EINPROGRESS )
goto out ;
2007-12-11 03:53:29 +03:00
2015-05-27 11:03:47 +03:00
if ( ( x - > props . flags & XFRM_STATE_ESN ) )
esp_input_restore_header ( skb ) ;
2017-04-14 11:06:21 +03:00
ret = esp6_input_done2 ( skb , ret ) ;
2005-04-17 02:20:36 +04:00
out :
return ret ;
}
2014-03-14 10:28:07 +04:00
static int esp6_err ( struct sk_buff * skb , struct inet6_skb_parm * opt ,
u8 type , u8 code , int offset , __be32 info )
2005-04-17 02:20:36 +04:00
{
2008-11-26 04:59:27 +03:00
struct net * net = dev_net ( skb - > dev ) ;
2011-04-22 08:53:02 +04:00
const struct ipv6hdr * iph = ( const struct ipv6hdr * ) skb - > data ;
2007-10-11 02:45:25 +04:00
struct ip_esp_hdr * esph = ( struct ip_esp_hdr * ) ( skb - > data + offset ) ;
2005-04-17 02:20:36 +04:00
struct xfrm_state * x ;
2013-09-10 15:43:09 +04:00
if ( type ! = ICMPV6_PKT_TOOBIG & &
2012-07-12 11:25:15 +04:00
type ! = NDISC_REDIRECT )
2014-03-14 10:28:07 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
2011-04-22 08:53:02 +04:00
x = xfrm_state_lookup ( net , skb - > mark , ( const xfrm_address_t * ) & iph - > daddr ,
esph - > spi , IPPROTO_ESP , AF_INET6 ) ;
2005-04-17 02:20:36 +04:00
if ( ! x )
2014-03-14 10:28:07 +04:00
return 0 ;
2012-07-12 11:25:15 +04:00
if ( type = = NDISC_REDIRECT )
2016-11-03 20:23:43 +03:00
ip6_redirect ( skb , net , skb - > dev - > ifindex , 0 ,
sock_net_uid ( net , NULL ) ) ;
2012-07-12 11:25:15 +04:00
else
2016-11-03 20:23:43 +03:00
ip6_update_pmtu ( skb , net , info , 0 , 0 , sock_net_uid ( net , NULL ) ) ;
2005-04-17 02:20:36 +04:00
xfrm_state_put ( x ) ;
2014-03-14 10:28:07 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
static void esp6_destroy ( struct xfrm_state * x )
{
2013-10-18 14:09:05 +04:00
struct crypto_aead * aead = x - > data ;
2005-04-17 02:20:36 +04:00
2013-10-18 14:09:05 +04:00
if ( ! aead )
2005-04-17 02:20:36 +04:00
return ;
2013-10-18 14:09:05 +04:00
crypto_free_aead ( aead ) ;
2005-04-17 02:20:36 +04:00
}
2008-01-29 06:37:29 +03:00
static int esp_init_aead ( struct xfrm_state * x )
{
2015-05-27 11:03:47 +03:00
char aead_name [ CRYPTO_MAX_ALG_NAME ] ;
2008-01-29 06:37:29 +03:00
struct crypto_aead * aead ;
int err ;
2015-05-27 11:03:47 +03:00
err = - ENAMETOOLONG ;
if ( snprintf ( aead_name , CRYPTO_MAX_ALG_NAME , " %s(%s) " ,
x - > geniv , x - > aead - > alg_name ) > = CRYPTO_MAX_ALG_NAME )
goto error ;
2017-12-20 12:41:53 +03:00
aead = crypto_alloc_aead ( aead_name , 0 , 0 ) ;
2008-01-29 06:37:29 +03:00
err = PTR_ERR ( aead ) ;
if ( IS_ERR ( aead ) )
goto error ;
2013-10-18 14:09:05 +04:00
x - > data = aead ;
2008-01-29 06:37:29 +03:00
err = crypto_aead_setkey ( aead , x - > aead - > alg_key ,
( x - > aead - > alg_key_len + 7 ) / 8 ) ;
if ( err )
goto error ;
err = crypto_aead_setauthsize ( aead , x - > aead - > alg_icv_len / 8 ) ;
if ( err )
goto error ;
error :
return err ;
}
static int esp_init_authenc ( struct xfrm_state * x )
2005-04-17 02:20:36 +04:00
{
2008-01-29 06:35:05 +03:00
struct crypto_aead * aead ;
struct crypto_authenc_key_param * param ;
struct rtattr * rta ;
char * key ;
char * p ;
char authenc_name [ CRYPTO_MAX_ALG_NAME ] ;
unsigned int keylen ;
int err ;
2005-04-17 02:20:36 +04:00
2008-01-29 06:37:29 +03:00
err = - EINVAL ;
2015-03-29 16:00:04 +03:00
if ( ! x - > ealg )
2008-01-29 06:37:29 +03:00
goto error ;
2008-01-29 06:35:05 +03:00
2008-01-29 06:37:29 +03:00
err = - ENAMETOOLONG ;
2011-03-08 03:07:51 +03:00
if ( ( x - > props . flags & XFRM_STATE_ESN ) ) {
if ( snprintf ( authenc_name , CRYPTO_MAX_ALG_NAME ,
2015-05-27 11:03:47 +03:00
" %s%sauthencesn(%s,%s)%s " ,
x - > geniv ? : " " , x - > geniv ? " ( " : " " ,
2011-03-08 03:07:51 +03:00
x - > aalg ? x - > aalg - > alg_name : " digest_null " ,
2015-05-27 11:03:47 +03:00
x - > ealg - > alg_name ,
x - > geniv ? " ) " : " " ) > = CRYPTO_MAX_ALG_NAME )
2011-03-08 03:07:51 +03:00
goto error ;
} else {
if ( snprintf ( authenc_name , CRYPTO_MAX_ALG_NAME ,
2015-05-27 11:03:47 +03:00
" %s%sauthenc(%s,%s)%s " ,
x - > geniv ? : " " , x - > geniv ? " ( " : " " ,
2011-03-08 03:07:51 +03:00
x - > aalg ? x - > aalg - > alg_name : " digest_null " ,
2015-05-27 11:03:47 +03:00
x - > ealg - > alg_name ,
x - > geniv ? " ) " : " " ) > = CRYPTO_MAX_ALG_NAME )
2011-03-08 03:07:51 +03:00
goto error ;
}
2008-01-29 06:35:05 +03:00
2017-12-20 12:41:53 +03:00
aead = crypto_alloc_aead ( authenc_name , 0 , 0 ) ;
2008-01-29 06:35:05 +03:00
err = PTR_ERR ( aead ) ;
if ( IS_ERR ( aead ) )
goto error ;
2013-10-18 14:09:05 +04:00
x - > data = aead ;
2008-01-29 06:35:05 +03:00
keylen = ( x - > aalg ? ( x - > aalg - > alg_key_len + 7 ) / 8 : 0 ) +
( x - > ealg - > alg_key_len + 7 ) / 8 + RTA_SPACE ( sizeof ( * param ) ) ;
err = - ENOMEM ;
key = kmalloc ( keylen , GFP_KERNEL ) ;
if ( ! key )
goto error ;
p = key ;
rta = ( void * ) p ;
rta - > rta_type = CRYPTO_AUTHENC_KEYA_PARAM ;
rta - > rta_len = RTA_LENGTH ( sizeof ( * param ) ) ;
param = RTA_DATA ( rta ) ;
p + = RTA_SPACE ( sizeof ( * param ) ) ;
2005-04-17 02:20:36 +04:00
if ( x - > aalg ) {
struct xfrm_algo_desc * aalg_desc ;
2008-01-29 06:35:05 +03:00
memcpy ( p , x - > aalg - > alg_key , ( x - > aalg - > alg_key_len + 7 ) / 8 ) ;
p + = ( x - > aalg - > alg_key_len + 7 ) / 8 ;
2007-02-09 17:24:49 +03:00
2005-04-17 02:20:36 +04:00
aalg_desc = xfrm_aalg_get_byname ( x - > aalg - > alg_name , 0 ) ;
BUG_ON ( ! aalg_desc ) ;
2007-02-09 17:24:49 +03:00
2008-01-29 06:35:05 +03:00
err = - EINVAL ;
2014-11-06 02:36:08 +03:00
if ( aalg_desc - > uinfo . auth . icv_fullbits / 8 ! =
2008-01-29 06:35:05 +03:00
crypto_aead_authsize ( aead ) ) {
2021-03-22 14:56:49 +03:00
pr_info ( " ESP: %s digestsize %u != %u \n " ,
2014-11-06 02:36:08 +03:00
x - > aalg - > alg_name ,
crypto_aead_authsize ( aead ) ,
aalg_desc - > uinfo . auth . icv_fullbits / 8 ) ;
2008-01-29 06:35:05 +03:00
goto free_key ;
2005-04-17 02:20:36 +04:00
}
2007-02-09 17:24:49 +03:00
2008-01-29 06:35:05 +03:00
err = crypto_aead_setauthsize (
2009-11-25 03:29:53 +03:00
aead , x - > aalg - > alg_trunc_len / 8 ) ;
2008-01-29 06:35:05 +03:00
if ( err )
goto free_key ;
2005-04-17 02:20:36 +04:00
}
2008-01-29 06:35:05 +03:00
param - > enckeylen = cpu_to_be32 ( ( x - > ealg - > alg_key_len + 7 ) / 8 ) ;
memcpy ( p , x - > ealg - > alg_key , ( x - > ealg - > alg_key_len + 7 ) / 8 ) ;
err = crypto_aead_setkey ( aead , key , keylen ) ;
free_key :
kfree ( key ) ;
2008-01-29 06:37:29 +03:00
error :
return err ;
}
static int esp6_init_state ( struct xfrm_state * x )
{
struct crypto_aead * aead ;
u32 align ;
int err ;
2013-10-18 14:09:05 +04:00
x - > data = NULL ;
2008-01-29 06:37:29 +03:00
if ( x - > aead )
err = esp_init_aead ( x ) ;
else
err = esp_init_authenc ( x ) ;
2008-01-29 06:35:05 +03:00
if ( err )
2005-04-17 02:20:36 +04:00
goto error ;
2008-01-29 06:35:05 +03:00
2013-10-18 14:09:05 +04:00
aead = x - > data ;
2008-01-29 06:37:29 +03:00
2008-01-29 06:35:05 +03:00
x - > props . header_len = sizeof ( struct ip_esp_hdr ) +
crypto_aead_ivsize ( aead ) ;
2007-10-18 08:35:15 +04:00
switch ( x - > props . mode ) {
case XFRM_MODE_BEET :
2008-08-06 13:40:25 +04:00
if ( x - > sel . family ! = AF_INET6 )
x - > props . header_len + = IPV4_BEET_PHMAXLEN +
2014-08-25 00:53:10 +04:00
( sizeof ( struct ipv6hdr ) - sizeof ( struct iphdr ) ) ;
2008-08-06 13:40:25 +04:00
break ;
2018-01-05 14:12:32 +03:00
default :
2007-10-18 08:35:15 +04:00
case XFRM_MODE_TRANSPORT :
break ;
case XFRM_MODE_TUNNEL :
2005-04-17 02:20:36 +04:00
x - > props . header_len + = sizeof ( struct ipv6hdr ) ;
2007-10-22 13:30:15 +04:00
break ;
2007-10-18 08:35:15 +04:00
}
2008-01-29 06:35:05 +03:00
2020-04-27 18:59:34 +03:00
if ( x - > encap ) {
struct xfrm_encap_tmpl * encap = x - > encap ;
switch ( encap - > encap_type ) {
default :
err = - EINVAL ;
goto error ;
case UDP_ENCAP_ESPINUDP :
x - > props . header_len + = sizeof ( struct udphdr ) ;
break ;
case UDP_ENCAP_ESPINUDP_NON_IKE :
x - > props . header_len + = sizeof ( struct udphdr ) + 2 * sizeof ( u32 ) ;
break ;
2020-04-27 18:59:35 +03:00
# ifdef CONFIG_INET6_ESPINTCP
case TCP_ENCAP_ESPINTCP :
/* only the length field, TCP encap is done by
* the socket
*/
x - > props . header_len + = 2 ;
break ;
# endif
2020-04-27 18:59:34 +03:00
}
}
2008-01-29 06:35:05 +03:00
align = ALIGN ( crypto_aead_blocksize ( aead ) , 4 ) ;
2013-10-18 14:09:05 +04:00
x - > props . trailer_len = align + 1 + crypto_aead_authsize ( aead ) ;
2005-04-17 02:20:36 +04:00
error :
2008-01-29 06:35:05 +03:00
return err ;
2005-04-17 02:20:36 +04:00
}
2014-03-14 10:28:07 +04:00
static int esp6_rcv_cb ( struct sk_buff * skb , int err )
{
return 0 ;
}
2014-08-25 00:53:11 +04:00
static const struct xfrm_type esp6_type = {
. owner = THIS_MODULE ,
. proto = IPPROTO_ESP ,
2007-10-09 04:25:53 +04:00
. flags = XFRM_TYPE_REPLAY_PROT ,
2005-04-17 02:20:36 +04:00
. init_state = esp6_init_state ,
. destructor = esp6_destroy ,
. input = esp6_input ,
2006-08-24 04:57:28 +04:00
. output = esp6_output ,
2005-04-17 02:20:36 +04:00
} ;
2014-03-14 10:28:07 +04:00
static struct xfrm6_protocol esp6_protocol = {
. handler = xfrm6_rcv ,
2020-04-27 18:59:34 +03:00
. input_handler = xfrm_input ,
2014-03-14 10:28:07 +04:00
. cb_handler = esp6_rcv_cb ,
2005-04-17 02:20:36 +04:00
. err_handler = esp6_err ,
2014-03-14 10:28:07 +04:00
. priority = 0 ,
2005-04-17 02:20:36 +04:00
} ;
static int __init esp6_init ( void )
{
if ( xfrm_register_type ( & esp6_type , AF_INET6 ) < 0 ) {
2012-05-15 18:11:53 +04:00
pr_info ( " %s: can't add xfrm type \n " , __func__ ) ;
2005-04-17 02:20:36 +04:00
return - EAGAIN ;
}
2014-03-14 10:28:07 +04:00
if ( xfrm6_protocol_register ( & esp6_protocol , IPPROTO_ESP ) < 0 ) {
2012-05-15 18:11:53 +04:00
pr_info ( " %s: can't add protocol \n " , __func__ ) ;
2005-04-17 02:20:36 +04:00
xfrm_unregister_type ( & esp6_type , AF_INET6 ) ;
return - EAGAIN ;
}
return 0 ;
}
static void __exit esp6_fini ( void )
{
2014-03-14 10:28:07 +04:00
if ( xfrm6_protocol_deregister ( & esp6_protocol , IPPROTO_ESP ) < 0 )
2012-05-15 18:11:53 +04:00
pr_info ( " %s: can't remove protocol \n " , __func__ ) ;
2019-05-03 18:46:19 +03:00
xfrm_unregister_type ( & esp6_type , AF_INET6 ) ;
2005-04-17 02:20:36 +04:00
}
module_init ( esp6_init ) ;
module_exit ( esp6_fini ) ;
MODULE_LICENSE ( " GPL " ) ;
2007-06-27 10:57:49 +04:00
MODULE_ALIAS_XFRM_TYPE ( AF_INET6 , XFRM_PROTO_ESP ) ;