2019-05-19 15:08:20 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2012-03-12 11:03:32 +04:00
# define pr_fmt(fmt) "IPsec: " fmt
2008-01-29 06:35:05 +03:00
# include <crypto/aead.h>
# include <crypto/authenc.h>
2006-07-30 09:41:01 +04:00
# include <linux/err.h>
2005-04-17 02:20:36 +04:00
# include <linux/module.h>
# include <net/ip.h>
# include <net/xfrm.h>
# include <net/esp.h>
2007-10-27 09:53:58 +04:00
# include <linux/scatterlist.h>
2005-10-11 08:11:08 +04:00
# include <linux/kernel.h>
2005-04-17 02:20:36 +04:00
# include <linux/pfkeyv2.h>
2008-01-29 06:35:05 +03:00
# include <linux/rtnetlink.h>
# include <linux/slab.h>
2007-10-10 00:33:35 +04:00
# include <linux/spinlock.h>
2007-12-11 03:53:05 +03:00
# include <linux/in6.h>
2005-04-17 02:20:36 +04:00
# include <net/icmp.h>
2005-12-27 07:43:12 +03:00
# include <net/protocol.h>
2005-04-17 02:20:36 +04:00
# include <net/udp.h>
2019-11-25 16:49:02 +03:00
# include <net/tcp.h>
# include <net/espintcp.h>
2005-04-17 02:20:36 +04:00
2017-01-17 12:22:57 +03:00
# include <linux/highmem.h>
2008-01-29 06:35:05 +03:00
struct esp_skb_cb {
struct xfrm_skb_cb xfrm ;
void * tmp ;
} ;
2016-06-18 08:03:36 +03:00
struct esp_output_extra {
__be32 seqhi ;
u32 esphoff ;
} ;
2008-01-29 06:35:05 +03:00
# define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
/*
* Allocate an AEAD request structure with extra space for SG and IV .
*
* For alignment considerations the IV is placed at the front , followed
* by the request and finally the SG list .
*
* TODO : Use spare space in skb for this where possible .
*/
2016-06-18 08:03:36 +03:00
static void * esp_alloc_tmp ( struct crypto_aead * aead , int nfrags , int extralen )
2008-01-29 06:35:05 +03:00
{
unsigned int len ;
2016-06-18 08:03:36 +03:00
len = extralen ;
2011-03-08 03:07:14 +03:00
len + = crypto_aead_ivsize ( aead ) ;
2008-01-29 06:35:05 +03:00
if ( len ) {
len + = crypto_aead_alignmask ( aead ) &
~ ( crypto_tfm_ctx_alignment ( ) - 1 ) ;
len = ALIGN ( len , crypto_tfm_ctx_alignment ( ) ) ;
}
2015-05-27 11:03:46 +03:00
len + = sizeof ( struct aead_request ) + crypto_aead_reqsize ( aead ) ;
2008-01-29 06:35:05 +03:00
len = ALIGN ( len , __alignof__ ( struct scatterlist ) ) ;
len + = sizeof ( struct scatterlist ) * nfrags ;
return kmalloc ( len , GFP_ATOMIC ) ;
}
2016-06-18 08:03:36 +03:00
static inline void * esp_tmp_extra ( void * tmp )
2011-03-08 03:07:14 +03:00
{
2016-06-18 08:03:36 +03:00
return PTR_ALIGN ( tmp , __alignof__ ( struct esp_output_extra ) ) ;
2011-03-08 03:07:14 +03:00
}
2016-06-18 08:03:36 +03:00
static inline u8 * esp_tmp_iv ( struct crypto_aead * aead , void * tmp , int extralen )
2008-01-29 06:35:05 +03:00
{
return crypto_aead_ivsize ( aead ) ?
2016-06-18 08:03:36 +03:00
PTR_ALIGN ( ( u8 * ) tmp + extralen ,
crypto_aead_alignmask ( aead ) + 1 ) : tmp + extralen ;
2008-01-29 06:35:05 +03:00
}
static inline struct aead_request * esp_tmp_req ( struct crypto_aead * aead , u8 * iv )
{
struct aead_request * req ;
req = ( void * ) PTR_ALIGN ( iv + crypto_aead_ivsize ( aead ) ,
crypto_tfm_ctx_alignment ( ) ) ;
aead_request_set_tfm ( req , aead ) ;
return req ;
}
static inline struct scatterlist * esp_req_sg ( struct crypto_aead * aead ,
struct aead_request * req )
{
return ( void * ) ALIGN ( ( unsigned long ) ( req + 1 ) +
crypto_aead_reqsize ( aead ) ,
__alignof__ ( struct scatterlist ) ) ;
}
2024-03-08 18:26:00 +03:00
static void esp_ssg_unref ( struct xfrm_state * x , void * tmp , struct sk_buff * skb )
2017-01-17 12:22:57 +03:00
{
struct crypto_aead * aead = x - > data ;
int extralen = 0 ;
u8 * iv ;
struct aead_request * req ;
struct scatterlist * sg ;
if ( x - > props . flags & XFRM_STATE_ESN )
2021-07-16 23:28:46 +03:00
extralen + = sizeof ( struct esp_output_extra ) ;
2017-01-17 12:22:57 +03:00
iv = esp_tmp_iv ( aead , tmp , extralen ) ;
req = esp_tmp_req ( aead , iv ) ;
/* Unref skb_frag_pages in the src scatterlist if necessary.
* Skip the first sg which comes from skb - > data .
*/
if ( req - > src ! = req - > dst )
for ( sg = sg_next ( req - > src ) ; sg ; sg = sg_next ( sg ) )
2024-03-29 19:55:06 +03:00
skb_page_unref ( skb , sg_page ( sg ) ) ;
2017-01-17 12:22:57 +03:00
}
2019-11-25 16:49:02 +03:00
# ifdef CONFIG_INET_ESPINTCP
struct esp_tcp_sk {
struct sock * sk ;
struct rcu_head rcu ;
} ;
static void esp_free_tcp_sk ( struct rcu_head * head )
{
struct esp_tcp_sk * esk = container_of ( head , struct esp_tcp_sk , rcu ) ;
sock_put ( esk - > sk ) ;
kfree ( esk ) ;
}
static struct sock * esp_find_tcp_sk ( struct xfrm_state * x )
{
struct xfrm_encap_tmpl * encap = x - > encap ;
2022-09-08 04:10:20 +03:00
struct net * net = xs_net ( x ) ;
2019-11-25 16:49:02 +03:00
struct esp_tcp_sk * esk ;
__be16 sport , dport ;
struct sock * nsk ;
struct sock * sk ;
sk = rcu_dereference ( x - > encap_sk ) ;
if ( sk & & sk - > sk_state = = TCP_ESTABLISHED )
return sk ;
spin_lock_bh ( & x - > lock ) ;
sport = encap - > encap_sport ;
dport = encap - > encap_dport ;
nsk = rcu_dereference_protected ( x - > encap_sk ,
lockdep_is_held ( & x - > lock ) ) ;
if ( sk & & sk = = nsk ) {
esk = kmalloc ( sizeof ( * esk ) , GFP_ATOMIC ) ;
if ( ! esk ) {
spin_unlock_bh ( & x - > lock ) ;
return ERR_PTR ( - ENOMEM ) ;
}
RCU_INIT_POINTER ( x - > encap_sk , NULL ) ;
esk - > sk = sk ;
call_rcu ( & esk - > rcu , esp_free_tcp_sk ) ;
}
spin_unlock_bh ( & x - > lock ) ;
2022-09-08 04:10:20 +03:00
sk = inet_lookup_established ( net , net - > ipv4 . tcp_death_row . hashinfo , x - > id . daddr . a4 ,
2019-11-25 16:49:02 +03:00
dport , x - > props . saddr . a4 , sport , 0 ) ;
if ( ! sk )
return ERR_PTR ( - ENOENT ) ;
if ( ! tcp_is_ulp_esp ( sk ) ) {
sock_put ( sk ) ;
return ERR_PTR ( - EINVAL ) ;
}
spin_lock_bh ( & x - > lock ) ;
nsk = rcu_dereference_protected ( x - > encap_sk ,
lockdep_is_held ( & x - > lock ) ) ;
if ( encap - > encap_sport ! = sport | |
encap - > encap_dport ! = dport ) {
sock_put ( sk ) ;
sk = nsk ? : ERR_PTR ( - EREMCHG ) ;
} else if ( sk = = nsk ) {
sock_put ( sk ) ;
} else {
rcu_assign_pointer ( x - > encap_sk , sk ) ;
}
spin_unlock_bh ( & x - > lock ) ;
return sk ;
}
static int esp_output_tcp_finish ( struct xfrm_state * x , struct sk_buff * skb )
{
struct sock * sk ;
int err ;
rcu_read_lock ( ) ;
sk = esp_find_tcp_sk ( x ) ;
err = PTR_ERR_OR_ZERO ( sk ) ;
if ( err )
goto out ;
bh_lock_sock ( sk ) ;
if ( sock_owned_by_user ( sk ) )
err = espintcp_queue_out ( sk , skb ) ;
else
err = espintcp_push_skb ( sk , skb ) ;
bh_unlock_sock ( sk ) ;
out :
rcu_read_unlock ( ) ;
return err ;
}
static int esp_output_tcp_encap_cb ( struct net * net , struct sock * sk ,
struct sk_buff * skb )
{
struct dst_entry * dst = skb_dst ( skb ) ;
struct xfrm_state * x = dst - > xfrm ;
return esp_output_tcp_finish ( x , skb ) ;
}
static int esp_output_tail_tcp ( struct xfrm_state * x , struct sk_buff * skb )
{
int err ;
local_bh_disable ( ) ;
err = xfrm_trans_queue_net ( xs_net ( x ) , skb , esp_output_tcp_encap_cb ) ;
local_bh_enable ( ) ;
/* EINPROGRESS just happens to do the right thing. It
* actually means that the skb has been consumed and
* isn ' t coming back .
*/
return err ? : - EINPROGRESS ;
}
# else
static int esp_output_tail_tcp ( struct xfrm_state * x , struct sk_buff * skb )
{
kfree_skb ( skb ) ;
return - EOPNOTSUPP ;
}
# endif
2023-02-06 13:22:38 +03:00
static void esp_output_done ( void * data , int err )
2008-01-29 06:35:05 +03:00
{
2023-02-06 13:22:38 +03:00
struct sk_buff * skb = data ;
2017-12-20 12:41:36 +03:00
struct xfrm_offload * xo = xfrm_offload ( skb ) ;
2017-01-17 12:22:57 +03:00
void * tmp ;
2017-12-20 12:41:36 +03:00
struct xfrm_state * x ;
2018-12-18 19:15:20 +03:00
if ( xo & & ( xo - > flags & XFRM_DEV_RESUME ) ) {
struct sec_path * sp = skb_sec_path ( skb ) ;
x = sp - > xvec [ sp - > len - 1 ] ;
} else {
2017-12-20 12:41:36 +03:00
x = skb_dst ( skb ) - > xfrm ;
2018-12-18 19:15:20 +03:00
}
2008-01-29 06:35:05 +03:00
2017-01-17 12:22:57 +03:00
tmp = ESP_SKB_CB ( skb ) - > tmp ;
2024-03-08 18:26:00 +03:00
esp_ssg_unref ( x , tmp , skb ) ;
2017-01-17 12:22:57 +03:00
kfree ( tmp ) ;
2017-12-20 12:41:36 +03:00
if ( xo & & ( xo - > flags & XFRM_DEV_RESUME ) ) {
if ( err ) {
XFRM_INC_STATS ( xs_net ( x ) , LINUX_MIB_XFRMOUTSTATEPROTOERROR ) ;
kfree_skb ( skb ) ;
return ;
}
skb_push ( skb , skb - > data - skb_mac_header ( skb ) ) ;
secpath_reset ( skb ) ;
xfrm_dev_resume ( skb ) ;
} else {
2019-11-25 16:49:02 +03:00
if ( ! err & &
x - > encap & & x - > encap - > encap_type = = TCP_ENCAP_ESPINTCP )
esp_output_tail_tcp ( x , skb ) ;
else
2021-03-01 22:00:04 +03:00
xfrm_output_resume ( skb - > sk , skb , err ) ;
2017-12-20 12:41:36 +03:00
}
2008-01-29 06:35:05 +03:00
}
2015-05-27 11:03:46 +03:00
/* Move ESP header back into place. */
static void esp_restore_header ( struct sk_buff * skb , unsigned int offset )
{
struct ip_esp_hdr * esph = ( void * ) ( skb - > data + offset ) ;
void * tmp = ESP_SKB_CB ( skb ) - > tmp ;
2016-06-18 08:03:36 +03:00
__be32 * seqhi = esp_tmp_extra ( tmp ) ;
2015-05-27 11:03:46 +03:00
esph - > seq_no = esph - > spi ;
esph - > spi = * seqhi ;
}
static void esp_output_restore_header ( struct sk_buff * skb )
{
2016-06-18 08:03:36 +03:00
void * tmp = ESP_SKB_CB ( skb ) - > tmp ;
struct esp_output_extra * extra = esp_tmp_extra ( tmp ) ;
esp_restore_header ( skb , skb_transport_offset ( skb ) + extra - > esphoff -
sizeof ( __be32 ) ) ;
2015-05-27 11:03:46 +03:00
}
2017-01-17 12:22:57 +03:00
static struct ip_esp_hdr * esp_output_set_extra ( struct sk_buff * skb ,
2017-04-14 11:06:33 +03:00
struct xfrm_state * x ,
2017-01-17 12:22:57 +03:00
struct ip_esp_hdr * esph ,
struct esp_output_extra * extra )
{
/* For ESN we move the header forward by 4 bytes to
2021-03-25 09:38:25 +03:00
* accommodate the high bits . We will move it back after
2017-01-17 12:22:57 +03:00
* encryption .
*/
if ( ( x - > props . flags & XFRM_STATE_ESN ) ) {
2017-04-14 11:06:50 +03:00
__u32 seqhi ;
struct xfrm_offload * xo = xfrm_offload ( skb ) ;
if ( xo )
seqhi = xo - > seq . hi ;
else
seqhi = XFRM_SKB_CB ( skb ) - > seq . output . hi ;
2017-01-17 12:22:57 +03:00
extra - > esphoff = ( unsigned char * ) esph -
skb_transport_header ( skb ) ;
esph = ( struct ip_esp_hdr * ) ( ( unsigned char * ) esph - 4 ) ;
extra - > seqhi = esph - > spi ;
2017-04-14 11:06:50 +03:00
esph - > seq_no = htonl ( seqhi ) ;
2017-01-17 12:22:57 +03:00
}
esph - > spi = x - > id . spi ;
return esph ;
}
2023-02-06 13:22:38 +03:00
static void esp_output_done_esn ( void * data , int err )
2015-05-27 11:03:46 +03:00
{
2023-02-06 13:22:38 +03:00
struct sk_buff * skb = data ;
2015-05-27 11:03:46 +03:00
esp_output_restore_header ( skb ) ;
2023-02-06 13:22:21 +03:00
esp_output_done ( data , err ) ;
2015-05-27 11:03:46 +03:00
}
2019-11-25 16:49:01 +03:00
static struct ip_esp_hdr * esp_output_udp_encap ( struct sk_buff * skb ,
int encap_type ,
struct esp_info * esp ,
__be16 sport ,
__be16 dport )
2005-04-17 02:20:36 +04:00
{
2017-04-14 11:06:33 +03:00
struct udphdr * uh ;
__be32 * udpdata32 ;
2019-03-25 16:30:00 +03:00
unsigned int len ;
2017-04-14 11:06:33 +03:00
2019-03-25 16:30:00 +03:00
len = skb - > len + esp - > tailen - skb_transport_offset ( skb ) ;
2019-11-25 16:49:02 +03:00
if ( len + sizeof ( struct iphdr ) > IP_MAX_MTU )
2019-11-25 16:49:01 +03:00
return ERR_PTR ( - EMSGSIZE ) ;
2019-03-25 16:30:00 +03:00
2019-11-25 16:49:01 +03:00
uh = ( struct udphdr * ) esp - > esph ;
2017-04-14 11:06:33 +03:00
uh - > source = sport ;
uh - > dest = dport ;
2019-03-25 16:30:00 +03:00
uh - > len = htons ( len ) ;
2017-04-14 11:06:33 +03:00
uh - > check = 0 ;
2019-11-25 16:49:01 +03:00
* skb_mac_header ( skb ) = IPPROTO_UDP ;
if ( encap_type = = UDP_ENCAP_ESPINUDP_NON_IKE ) {
udpdata32 = ( __be32 * ) ( uh + 1 ) ;
udpdata32 [ 0 ] = udpdata32 [ 1 ] = 0 ;
return ( struct ip_esp_hdr * ) ( udpdata32 + 2 ) ;
}
return ( struct ip_esp_hdr * ) ( uh + 1 ) ;
}
2019-11-25 16:49:02 +03:00
# ifdef CONFIG_INET_ESPINTCP
static struct ip_esp_hdr * esp_output_tcp_encap ( struct xfrm_state * x ,
struct sk_buff * skb ,
struct esp_info * esp )
{
__be16 * lenp = ( void * ) esp - > esph ;
struct ip_esp_hdr * esph ;
unsigned int len ;
struct sock * sk ;
len = skb - > len + esp - > tailen - skb_transport_offset ( skb ) ;
if ( len > IP_MAX_MTU )
return ERR_PTR ( - EMSGSIZE ) ;
rcu_read_lock ( ) ;
sk = esp_find_tcp_sk ( x ) ;
rcu_read_unlock ( ) ;
if ( IS_ERR ( sk ) )
return ERR_CAST ( sk ) ;
* lenp = htons ( len ) ;
esph = ( struct ip_esp_hdr * ) ( lenp + 1 ) ;
return esph ;
}
# else
static struct ip_esp_hdr * esp_output_tcp_encap ( struct xfrm_state * x ,
struct sk_buff * skb ,
struct esp_info * esp )
{
return ERR_PTR ( - EOPNOTSUPP ) ;
}
# endif
2019-11-25 16:49:01 +03:00
static int esp_output_encap ( struct xfrm_state * x , struct sk_buff * skb ,
struct esp_info * esp )
{
struct xfrm_encap_tmpl * encap = x - > encap ;
struct ip_esp_hdr * esph ;
__be16 sport , dport ;
int encap_type ;
spin_lock_bh ( & x - > lock ) ;
sport = encap - > encap_sport ;
dport = encap - > encap_dport ;
encap_type = encap - > encap_type ;
spin_unlock_bh ( & x - > lock ) ;
2017-04-14 11:06:33 +03:00
switch ( encap_type ) {
default :
case UDP_ENCAP_ESPINUDP :
case UDP_ENCAP_ESPINUDP_NON_IKE :
2019-11-25 16:49:01 +03:00
esph = esp_output_udp_encap ( skb , encap_type , esp , sport , dport ) ;
2017-04-14 11:06:33 +03:00
break ;
2019-11-25 16:49:02 +03:00
case TCP_ENCAP_ESPINTCP :
esph = esp_output_tcp_encap ( x , skb , esp ) ;
break ;
2010-12-08 07:37:50 +03:00
}
2011-03-08 03:07:14 +03:00
2019-11-25 16:49:01 +03:00
if ( IS_ERR ( esph ) )
return PTR_ERR ( esph ) ;
2017-04-14 11:06:33 +03:00
esp - > esph = esph ;
2019-03-25 16:30:00 +03:00
return 0 ;
2017-04-14 11:06:33 +03:00
}
2011-03-08 03:07:14 +03:00
2017-04-14 11:06:33 +03:00
int esp_output_head ( struct xfrm_state * x , struct sk_buff * skb , struct esp_info * esp )
{
u8 * tail ;
int nfrags ;
2017-05-03 09:44:27 +03:00
int esph_offset ;
2017-04-14 11:06:33 +03:00
struct page * page ;
struct sk_buff * trailer ;
int tailen = esp - > tailen ;
2005-04-17 02:20:36 +04:00
2019-11-25 16:49:02 +03:00
/* this is non-NULL only with TCP/UDP Encapsulation */
2019-03-25 16:30:00 +03:00
if ( x - > encap ) {
2019-11-25 16:49:01 +03:00
int err = esp_output_encap ( x , skb , esp ) ;
2019-03-25 16:30:00 +03:00
if ( err < 0 )
return err ;
}
2005-04-17 02:20:36 +04:00
2022-04-13 11:10:50 +03:00
if ( ALIGN ( tailen , L1_CACHE_BYTES ) > PAGE_SIZE | |
ALIGN ( skb - > data_len , L1_CACHE_BYTES ) > PAGE_SIZE )
2022-03-07 15:11:39 +03:00
goto cow ;
2017-01-17 12:22:57 +03:00
if ( ! skb_cloned ( skb ) ) {
2017-08-25 08:34:35 +03:00
if ( tailen < = skb_tailroom ( skb ) ) {
2017-01-17 12:22:57 +03:00
nfrags = 1 ;
trailer = skb ;
tail = skb_tail_pointer ( trailer ) ;
2005-04-17 02:20:36 +04:00
2017-01-17 12:22:57 +03:00
goto skip_cow ;
} else if ( ( skb_shinfo ( skb ) - > nr_frags < MAX_SKB_FRAGS )
& & ! skb_has_frag_list ( skb ) ) {
int allocsize ;
struct sock * sk = skb - > sk ;
struct page_frag * pfrag = & x - > xfrag ;
2015-05-27 11:03:46 +03:00
2017-04-14 11:06:33 +03:00
esp - > inplace = false ;
2017-01-17 12:22:57 +03:00
allocsize = ALIGN ( tailen , L1_CACHE_BYTES ) ;
spin_lock_bh ( & x - > lock ) ;
if ( unlikely ( ! skb_page_frag_refill ( allocsize , pfrag , GFP_ATOMIC ) ) ) {
spin_unlock_bh ( & x - > lock ) ;
goto cow ;
}
page = pfrag - > page ;
get_page ( page ) ;
2021-01-10 01:18:34 +03:00
tail = page_address ( page ) + pfrag - > offset ;
2017-01-17 12:22:57 +03:00
2017-04-14 11:06:33 +03:00
esp_output_fill_trailer ( tail , esp - > tfclen , esp - > plen , esp - > proto ) ;
2017-01-17 12:22:57 +03:00
nfrags = skb_shinfo ( skb ) - > nr_frags ;
__skb_fill_page_desc ( skb , nfrags , page , pfrag - > offset ,
tailen ) ;
skb_shinfo ( skb ) - > nr_frags = + + nfrags ;
pfrag - > offset = pfrag - > offset + allocsize ;
2017-08-25 08:16:07 +03:00
spin_unlock_bh ( & x - > lock ) ;
2017-01-17 12:22:57 +03:00
nfrags + + ;
2022-06-22 19:09:03 +03:00
skb_len_add ( skb , tailen ) ;
2019-01-28 11:35:35 +03:00
if ( sk & & sk_fullsock ( sk ) )
2017-06-30 13:08:00 +03:00
refcount_add ( tailen , & sk - > sk_wmem_alloc ) ;
2017-01-17 12:22:57 +03:00
2017-04-14 11:06:33 +03:00
goto out ;
2017-01-17 12:22:57 +03:00
}
2015-05-27 11:03:46 +03:00
}
2017-01-17 12:22:57 +03:00
cow :
2017-05-03 09:44:27 +03:00
esph_offset = ( unsigned char * ) esp - > esph - skb_transport_header ( skb ) ;
2017-04-14 11:06:33 +03:00
nfrags = skb_cow_data ( skb , tailen , & trailer ) ;
if ( nfrags < 0 )
goto out ;
2017-01-17 12:22:57 +03:00
tail = skb_tail_pointer ( trailer ) ;
2017-05-03 09:44:27 +03:00
esp - > esph = ( struct ip_esp_hdr * ) ( skb_transport_header ( skb ) + esph_offset ) ;
2017-01-17 12:22:57 +03:00
skip_cow :
2017-04-14 11:06:33 +03:00
esp_output_fill_trailer ( tail , esp - > tfclen , esp - > plen , esp - > proto ) ;
pskb_put ( skb , trailer , tailen ) ;
2017-01-17 12:22:57 +03:00
2017-04-14 11:06:33 +03:00
out :
return nfrags ;
}
EXPORT_SYMBOL_GPL ( esp_output_head ) ;
int esp_output_tail ( struct xfrm_state * x , struct sk_buff * skb , struct esp_info * esp )
{
u8 * iv ;
int alen ;
void * tmp ;
int ivlen ;
int assoclen ;
int extralen ;
struct page * page ;
struct ip_esp_hdr * esph ;
struct crypto_aead * aead ;
struct aead_request * req ;
struct scatterlist * sg , * dsg ;
struct esp_output_extra * extra ;
int err = - ENOMEM ;
assoclen = sizeof ( struct ip_esp_hdr ) ;
extralen = 0 ;
if ( x - > props . flags & XFRM_STATE_ESN ) {
extralen + = sizeof ( * extra ) ;
assoclen + = sizeof ( __be32 ) ;
}
2015-05-27 11:03:46 +03:00
2017-04-14 11:06:33 +03:00
aead = x - > data ;
alen = crypto_aead_authsize ( aead ) ;
ivlen = crypto_aead_ivsize ( aead ) ;
tmp = esp_alloc_tmp ( aead , esp - > nfrags + 2 , extralen ) ;
2017-04-24 08:33:56 +03:00
if ( ! tmp )
2017-01-17 12:22:57 +03:00
goto error ;
extra = esp_tmp_extra ( tmp ) ;
iv = esp_tmp_iv ( aead , tmp , extralen ) ;
req = esp_tmp_req ( aead , iv ) ;
sg = esp_req_sg ( aead , req ) ;
2017-04-14 11:06:33 +03:00
if ( esp - > inplace )
dsg = sg ;
else
dsg = & sg [ esp - > nfrags ] ;
2017-01-17 12:22:57 +03:00
2017-04-14 11:06:33 +03:00
esph = esp_output_set_extra ( skb , x , esp - > esph , extra ) ;
esp - > esph = esph ;
sg_init_table ( sg , esp - > nfrags ) ;
2017-06-04 05:16:23 +03:00
err = skb_to_sgvec ( skb , sg ,
( unsigned char * ) esph - skb - > data ,
assoclen + ivlen + esp - > clen + alen ) ;
if ( unlikely ( err < 0 ) )
2017-07-13 10:13:30 +03:00
goto error_free ;
2017-04-14 11:06:33 +03:00
if ( ! esp - > inplace ) {
int allocsize ;
struct page_frag * pfrag = & x - > xfrag ;
allocsize = ALIGN ( skb - > data_len , L1_CACHE_BYTES ) ;
spin_lock_bh ( & x - > lock ) ;
if ( unlikely ( ! skb_page_frag_refill ( allocsize , pfrag , GFP_ATOMIC ) ) ) {
spin_unlock_bh ( & x - > lock ) ;
2017-07-13 10:13:30 +03:00
goto error_free ;
2017-04-14 11:06:33 +03:00
}
skb_shinfo ( skb ) - > nr_frags = 1 ;
page = pfrag - > page ;
get_page ( page ) ;
/* replace page frags in skb with new page */
__skb_fill_page_desc ( skb , 0 , page , pfrag - > offset , skb - > data_len ) ;
pfrag - > offset = pfrag - > offset + allocsize ;
spin_unlock_bh ( & x - > lock ) ;
sg_init_table ( dsg , skb_shinfo ( skb ) - > nr_frags + 1 ) ;
2017-06-04 05:16:23 +03:00
err = skb_to_sgvec ( skb , dsg ,
( unsigned char * ) esph - skb - > data ,
assoclen + ivlen + esp - > clen + alen ) ;
if ( unlikely ( err < 0 ) )
2017-07-13 10:13:30 +03:00
goto error_free ;
2017-04-14 11:06:33 +03:00
}
2011-03-08 03:07:14 +03:00
2017-01-17 12:22:57 +03:00
if ( ( x - > props . flags & XFRM_STATE_ESN ) )
aead_request_set_callback ( req , 0 , esp_output_done_esn , skb ) ;
else
aead_request_set_callback ( req , 0 , esp_output_done , skb ) ;
2017-04-14 11:06:33 +03:00
aead_request_set_crypt ( req , sg , dsg , ivlen + esp - > clen , iv ) ;
2015-05-27 11:03:46 +03:00
aead_request_set_ad ( req , assoclen ) ;
memset ( iv , 0 , ivlen ) ;
2017-04-14 11:06:33 +03:00
memcpy ( iv + ivlen - min ( ivlen , 8 ) , ( u8 * ) & esp - > seqno + 8 - min ( ivlen , 8 ) ,
2015-05-27 11:03:46 +03:00
min ( ivlen , 8 ) ) ;
2008-01-29 06:35:05 +03:00
ESP_SKB_CB ( skb ) - > tmp = tmp ;
2015-05-27 11:03:46 +03:00
err = crypto_aead_encrypt ( req ) ;
switch ( err ) {
case - EINPROGRESS :
2008-01-29 06:35:05 +03:00
goto error ;
2005-04-17 02:20:36 +04:00
2017-10-18 10:00:35 +03:00
case - ENOSPC :
2008-01-29 06:35:05 +03:00
err = NET_XMIT_DROP ;
2015-05-27 11:03:46 +03:00
break ;
case 0 :
if ( ( x - > props . flags & XFRM_STATE_ESN ) )
esp_output_restore_header ( skb ) ;
}
2005-04-17 02:20:36 +04:00
2017-01-17 12:22:57 +03:00
if ( sg ! = dsg )
2024-03-08 18:26:00 +03:00
esp_ssg_unref ( x , tmp , skb ) ;
2007-10-10 00:33:35 +04:00
2019-11-25 16:49:02 +03:00
if ( ! err & & x - > encap & & x - > encap - > encap_type = = TCP_ENCAP_ESPINTCP )
err = esp_output_tail_tcp ( x , skb ) ;
2017-07-13 10:13:30 +03:00
error_free :
kfree ( tmp ) ;
2005-04-17 02:20:36 +04:00
error :
return err ;
}
2017-04-14 11:06:33 +03:00
EXPORT_SYMBOL_GPL ( esp_output_tail ) ;
static int esp_output ( struct xfrm_state * x , struct sk_buff * skb )
{
int alen ;
int blksize ;
struct ip_esp_hdr * esph ;
struct crypto_aead * aead ;
struct esp_info esp ;
esp . inplace = true ;
esp . proto = * skb_mac_header ( skb ) ;
* skb_mac_header ( skb ) = IPPROTO_ESP ;
/* skb is pure payload to encrypt */
aead = x - > data ;
alen = crypto_aead_authsize ( aead ) ;
esp . tfclen = 0 ;
if ( x - > tfcpad ) {
struct xfrm_dst * dst = ( struct xfrm_dst * ) skb_dst ( skb ) ;
u32 padto ;
2022-01-26 18:00:18 +03:00
padto = min ( x - > tfcpad , xfrm_state_mtu ( x , dst - > child_mtu_cached ) ) ;
2017-04-14 11:06:33 +03:00
if ( skb - > len < padto )
esp . tfclen = padto - skb - > len ;
}
blksize = ALIGN ( crypto_aead_blocksize ( aead ) , 4 ) ;
esp . clen = ALIGN ( skb - > len + 2 + esp . tfclen , blksize ) ;
esp . plen = esp . clen - skb - > len - esp . tfclen ;
esp . tailen = esp . tfclen + esp . plen + alen ;
esp . esph = ip_esp_hdr ( skb ) ;
esp . nfrags = esp_output_head ( x , skb , & esp ) ;
if ( esp . nfrags < 0 )
return esp . nfrags ;
esph = esp . esph ;
esph - > spi = x - > id . spi ;
esph - > seq_no = htonl ( XFRM_SKB_CB ( skb ) - > seq . output . low ) ;
esp . seqno = cpu_to_be64 ( XFRM_SKB_CB ( skb ) - > seq . output . low +
( ( u64 ) XFRM_SKB_CB ( skb ) - > seq . output . hi < < 32 ) ) ;
skb_push ( skb , - skb_network_offset ( skb ) ) ;
return esp_output_tail ( x , skb , & esp ) ;
}
2005-04-17 02:20:36 +04:00
2017-08-30 11:30:39 +03:00
static inline int esp_remove_trailer ( struct sk_buff * skb )
{
struct xfrm_state * x = xfrm_input_state ( skb ) ;
struct crypto_aead * aead = x - > data ;
int alen , hlen , elen ;
int padlen , trimlen ;
__wsum csumdiff ;
u8 nexthdr [ 2 ] ;
int ret ;
alen = crypto_aead_authsize ( aead ) ;
hlen = sizeof ( struct ip_esp_hdr ) + crypto_aead_ivsize ( aead ) ;
elen = skb - > len - hlen ;
if ( skb_copy_bits ( skb , skb - > len - alen - 2 , nexthdr , 2 ) )
BUG ( ) ;
ret = - EINVAL ;
padlen = nexthdr [ 0 ] ;
if ( padlen + 2 + alen > = elen ) {
net_dbg_ratelimited ( " ipsec esp packet is garbage padlen=%d, elen=%d \n " ,
padlen + 2 , elen - alen ) ;
goto out ;
}
trimlen = alen + padlen + 2 ;
if ( skb - > ip_summed = = CHECKSUM_COMPLETE ) {
csumdiff = skb_checksum ( skb , skb - > len - trimlen , trimlen , 0 ) ;
skb - > csum = csum_block_sub ( skb - > csum , csumdiff ,
skb - > len - trimlen ) ;
}
2023-10-09 04:13:37 +03:00
ret = pskb_trim ( skb , skb - > len - trimlen ) ;
if ( unlikely ( ret ) )
return ret ;
2017-08-30 11:30:39 +03:00
ret = nexthdr [ 1 ] ;
out :
return ret ;
}
2017-04-14 11:06:33 +03:00
int esp_input_done2 ( struct sk_buff * skb , int err )
2005-04-17 02:20:36 +04:00
{
2011-04-22 08:53:02 +04:00
const struct iphdr * iph ;
2008-01-29 06:35:05 +03:00
struct xfrm_state * x = xfrm_input_state ( skb ) ;
2017-04-14 11:06:10 +03:00
struct xfrm_offload * xo = xfrm_offload ( skb ) ;
2013-10-18 14:09:05 +04:00
struct crypto_aead * aead = x - > data ;
2008-01-29 06:35:05 +03:00
int hlen = sizeof ( struct ip_esp_hdr ) + crypto_aead_ivsize ( aead ) ;
2006-05-28 10:06:13 +04:00
int ihl ;
2005-04-17 02:20:36 +04:00
2021-03-01 13:46:02 +03:00
if ( ! xo | | ! ( xo - > flags & CRYPTO_DONE ) )
2017-04-14 11:06:10 +03:00
kfree ( ESP_SKB_CB ( skb ) - > tmp ) ;
2007-11-14 08:45:58 +03:00
2006-07-30 09:41:01 +04:00
if ( unlikely ( err ) )
2007-12-17 02:55:02 +03:00
goto out ;
2005-04-17 02:20:36 +04:00
2017-08-30 11:30:39 +03:00
err = esp_remove_trailer ( skb ) ;
if ( unlikely ( err < 0 ) )
2006-02-28 00:00:01 +03:00
goto out ;
2005-04-17 02:20:36 +04:00
2007-04-21 09:47:35 +04:00
iph = ip_hdr ( skb ) ;
2006-05-28 10:06:13 +04:00
ihl = iph - > ihl * 4 ;
2006-02-28 00:00:40 +03:00
if ( x - > encap ) {
struct xfrm_encap_tmpl * encap = x - > encap ;
2019-11-25 16:49:02 +03:00
struct tcphdr * th = ( void * ) ( skb_network_header ( skb ) + ihl ) ;
2007-04-11 07:50:43 +04:00
struct udphdr * uh = ( void * ) ( skb_network_header ( skb ) + ihl ) ;
2019-11-25 16:49:00 +03:00
__be16 source ;
switch ( x - > encap - > encap_type ) {
2019-11-25 16:49:02 +03:00
case TCP_ENCAP_ESPINTCP :
source = th - > source ;
break ;
2019-11-25 16:49:00 +03:00
case UDP_ENCAP_ESPINUDP :
case UDP_ENCAP_ESPINUDP_NON_IKE :
source = uh - > source ;
break ;
default :
WARN_ON_ONCE ( 1 ) ;
err = - EINVAL ;
goto out ;
}
2006-02-28 00:00:40 +03:00
/*
* 1 ) if the NAT - T peer ' s IP or port changed then
2023-10-25 09:14:34 +03:00
* advertise the change to the keying daemon .
2006-02-28 00:00:40 +03:00
* This is an inbound SA , so just compare
* SRC ports .
*/
if ( iph - > saddr ! = x - > props . saddr . a4 | |
2019-11-25 16:49:00 +03:00
source ! = encap - > encap_sport ) {
2006-02-28 00:00:40 +03:00
xfrm_address_t ipaddr ;
ipaddr . a4 = iph - > saddr ;
2019-11-25 16:49:00 +03:00
km_new_mapping ( x , & ipaddr , source ) ;
2007-02-09 17:24:47 +03:00
2006-02-28 00:00:40 +03:00
/* XXX: perhaps add an extra
* policy check here , to see
* if we should allow or
* reject a packet from a
* different source
* address / port .
*/
2005-04-17 02:20:36 +04:00
}
2007-02-09 17:24:47 +03:00
2006-02-28 00:00:40 +03:00
/*
* 2 ) ignore UDP / TCP checksums in case
* of NAT - T in Transport Mode , or
* perform other post - processing fixes
* as per draft - ietf - ipsec - udp - encaps - 06 ,
* section 3.1 .2
*/
2007-10-11 02:41:41 +04:00
if ( x - > props . mode = = XFRM_MODE_TRANSPORT )
2006-02-28 00:00:40 +03:00
skb - > ip_summed = CHECKSUM_UNNECESSARY ;
2005-04-17 02:20:36 +04:00
}
2017-08-01 12:49:04 +03:00
skb_pull_rcsum ( skb , hlen ) ;
2012-12-28 12:07:16 +04:00
if ( x - > props . mode = = XFRM_MODE_TUNNEL )
skb_reset_transport_header ( skb ) ;
else
skb_set_transport_header ( skb , - ihl ) ;
2006-02-28 00:00:01 +03:00
2008-01-29 06:35:05 +03:00
/* RFC4303: Drop dummy packets without any error */
if ( err = = IPPROTO_NONE )
err = - EINVAL ;
out :
return err ;
}
2017-04-14 11:06:33 +03:00
EXPORT_SYMBOL_GPL ( esp_input_done2 ) ;
2008-01-29 06:35:05 +03:00
2023-02-06 13:22:38 +03:00
static void esp_input_done ( void * data , int err )
2008-01-29 06:35:05 +03:00
{
2023-02-06 13:22:38 +03:00
struct sk_buff * skb = data ;
2008-01-29 06:35:05 +03:00
xfrm_input_resume ( skb , esp_input_done2 ( skb , err ) ) ;
}
2015-05-27 11:03:46 +03:00
static void esp_input_restore_header ( struct sk_buff * skb )
{
esp_restore_header ( skb , 0 ) ;
__skb_pull ( skb , 4 ) ;
}
2017-01-17 12:22:57 +03:00
static void esp_input_set_header ( struct sk_buff * skb , __be32 * seqhi )
{
struct xfrm_state * x = xfrm_input_state ( skb ) ;
2018-01-30 17:53:48 +03:00
struct ip_esp_hdr * esph ;
2017-01-17 12:22:57 +03:00
/* For ESN we move the header forward by 4 bytes to
2021-03-25 09:38:25 +03:00
* accommodate the high bits . We will move it back after
2017-01-17 12:22:57 +03:00
* decryption .
*/
if ( ( x - > props . flags & XFRM_STATE_ESN ) ) {
networking: make skb_push & __skb_push return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions return void * and remove all the casts across
the tree, adding a (u8 *) cast only where the unsigned char pointer
was used directly, all done with the following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
@@
expression SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- fn(SKB, LEN)[0]
+ *(u8 *)fn(SKB, LEN)
Note that the last part there converts from push(...)[0] to the
more idiomatic *(u8 *)push(...).
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:23 +03:00
esph = skb_push ( skb , 4 ) ;
2017-01-17 12:22:57 +03:00
* seqhi = esph - > spi ;
esph - > spi = esph - > seq_no ;
esph - > seq_no = XFRM_SKB_CB ( skb ) - > seq . input . hi ;
}
}
2023-02-06 13:22:38 +03:00
static void esp_input_done_esn ( void * data , int err )
2015-05-27 11:03:46 +03:00
{
2023-02-06 13:22:38 +03:00
struct sk_buff * skb = data ;
2015-05-27 11:03:46 +03:00
esp_input_restore_header ( skb ) ;
2023-02-06 13:22:21 +03:00
esp_input_done ( data , err ) ;
2015-05-27 11:03:46 +03:00
}
2008-01-29 06:35:05 +03:00
/*
* Note : detecting truncated vs . non - truncated authentication data is very
* expensive , so we only support truncated data , which is the recommended
* and common case .
*/
static int esp_input ( struct xfrm_state * x , struct sk_buff * skb )
{
2013-10-18 14:09:05 +04:00
struct crypto_aead * aead = x - > data ;
2008-01-29 06:35:05 +03:00
struct aead_request * req ;
struct sk_buff * trailer ;
2015-05-27 11:03:46 +03:00
int ivlen = crypto_aead_ivsize ( aead ) ;
2018-08-17 10:51:00 +03:00
int elen = skb - > len - sizeof ( struct ip_esp_hdr ) - ivlen ;
2008-01-29 06:35:05 +03:00
int nfrags ;
2011-03-08 03:07:14 +03:00
int assoclen ;
int seqhilen ;
__be32 * seqhi ;
2008-01-29 06:35:05 +03:00
void * tmp ;
u8 * iv ;
struct scatterlist * sg ;
int err = - EINVAL ;
2018-08-17 10:51:00 +03:00
if ( ! pskb_may_pull ( skb , sizeof ( struct ip_esp_hdr ) + ivlen ) )
2008-01-29 06:35:05 +03:00
goto out ;
if ( elen < = 0 )
goto out ;
2018-08-17 10:51:00 +03:00
assoclen = sizeof ( struct ip_esp_hdr ) ;
2011-03-08 03:07:14 +03:00
seqhilen = 0 ;
if ( x - > props . flags & XFRM_STATE_ESN ) {
seqhilen + = sizeof ( __be32 ) ;
assoclen + = seqhilen ;
}
2017-01-17 12:22:57 +03:00
if ( ! skb_cloned ( skb ) ) {
if ( ! skb_is_nonlinear ( skb ) ) {
nfrags = 1 ;
goto skip_cow ;
} else if ( ! skb_has_frag_list ( skb ) ) {
nfrags = skb_shinfo ( skb ) - > nr_frags ;
nfrags + + ;
goto skip_cow ;
}
}
err = skb_cow_data ( skb , 0 , & trailer ) ;
if ( err < 0 )
goto out ;
nfrags = err ;
skip_cow :
2008-01-29 06:35:05 +03:00
err = - ENOMEM ;
2015-05-27 11:03:46 +03:00
tmp = esp_alloc_tmp ( aead , nfrags , seqhilen ) ;
2008-01-29 06:35:05 +03:00
if ( ! tmp )
goto out ;
ESP_SKB_CB ( skb ) - > tmp = tmp ;
2016-06-18 08:03:36 +03:00
seqhi = esp_tmp_extra ( tmp ) ;
2011-03-08 03:07:14 +03:00
iv = esp_tmp_iv ( aead , tmp , seqhilen ) ;
2008-01-29 06:35:05 +03:00
req = esp_tmp_req ( aead , iv ) ;
2015-05-27 11:03:46 +03:00
sg = esp_req_sg ( aead , req ) ;
2008-01-29 06:35:05 +03:00
2017-01-17 12:22:57 +03:00
esp_input_set_header ( skb , seqhi ) ;
2008-01-29 06:35:05 +03:00
2017-01-17 12:22:57 +03:00
sg_init_table ( sg , nfrags ) ;
2017-06-04 05:16:23 +03:00
err = skb_to_sgvec ( skb , sg , 0 , skb - > len ) ;
2017-07-13 10:13:30 +03:00
if ( unlikely ( err < 0 ) ) {
kfree ( tmp ) ;
2017-06-04 05:16:23 +03:00
goto out ;
2017-07-13 10:13:30 +03:00
}
2008-01-29 06:35:05 +03:00
2017-01-17 12:22:57 +03:00
skb - > ip_summed = CHECKSUM_NONE ;
2011-03-08 03:07:14 +03:00
2017-01-17 12:22:57 +03:00
if ( ( x - > props . flags & XFRM_STATE_ESN ) )
2015-05-27 11:03:46 +03:00
aead_request_set_callback ( req , 0 , esp_input_done_esn , skb ) ;
2017-01-17 12:22:57 +03:00
else
aead_request_set_callback ( req , 0 , esp_input_done , skb ) ;
2015-05-27 11:03:46 +03:00
aead_request_set_crypt ( req , sg , sg , elen + ivlen , iv ) ;
aead_request_set_ad ( req , assoclen ) ;
2008-01-29 06:35:05 +03:00
err = crypto_aead_decrypt ( req ) ;
if ( err = = - EINPROGRESS )
goto out ;
2015-05-27 11:03:46 +03:00
if ( ( x - > props . flags & XFRM_STATE_ESN ) )
esp_input_restore_header ( skb ) ;
2008-01-29 06:35:05 +03:00
err = esp_input_done2 ( skb , err ) ;
2005-04-17 02:20:36 +04:00
out :
2007-12-17 02:55:02 +03:00
return err ;
2005-04-17 02:20:36 +04:00
}
2014-02-21 11:41:08 +04:00
static int esp4_err ( struct sk_buff * skb , u32 info )
2005-04-17 02:20:36 +04:00
{
2008-11-26 04:59:27 +03:00
struct net * net = dev_net ( skb - > dev ) ;
2011-04-22 08:53:02 +04:00
const struct iphdr * iph = ( const struct iphdr * ) skb - > data ;
2008-11-03 11:23:42 +03:00
struct ip_esp_hdr * esph = ( struct ip_esp_hdr * ) ( skb - > data + ( iph - > ihl < < 2 ) ) ;
2005-04-17 02:20:36 +04:00
struct xfrm_state * x ;
2012-07-12 08:27:49 +04:00
switch ( icmp_hdr ( skb ) - > type ) {
case ICMP_DEST_UNREACH :
if ( icmp_hdr ( skb ) - > code ! = ICMP_FRAG_NEEDED )
2014-02-21 11:41:08 +04:00
return 0 ;
2020-11-20 21:25:57 +03:00
break ;
2012-07-12 08:27:49 +04:00
case ICMP_REDIRECT :
break ;
default :
2014-02-21 11:41:08 +04:00
return 0 ;
2012-07-12 08:27:49 +04:00
}
2005-04-17 02:20:36 +04:00
2011-04-22 08:53:02 +04:00
x = xfrm_state_lookup ( net , skb - > mark , ( const xfrm_address_t * ) & iph - > daddr ,
esph - > spi , IPPROTO_ESP , AF_INET ) ;
2005-04-17 02:20:36 +04:00
if ( ! x )
2014-02-21 11:41:08 +04:00
return 0 ;
2012-07-12 08:27:49 +04:00
2013-05-28 00:46:31 +04:00
if ( icmp_hdr ( skb ) - > type = = ICMP_DEST_UNREACH )
2018-09-26 06:56:26 +03:00
ipv4_update_pmtu ( skb , net , info , 0 , IPPROTO_ESP ) ;
2013-05-28 00:46:31 +04:00
else
2018-09-26 06:56:27 +03:00
ipv4_redirect ( skb , net , 0 , IPPROTO_ESP ) ;
2005-04-17 02:20:36 +04:00
xfrm_state_put ( x ) ;
2014-02-21 11:41:08 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
static void esp_destroy ( struct xfrm_state * x )
{
2013-10-18 14:09:05 +04:00
struct crypto_aead * aead = x - > data ;
2005-04-17 02:20:36 +04:00
2013-10-18 14:09:05 +04:00
if ( ! aead )
2005-04-17 02:20:36 +04:00
return ;
2013-10-18 14:09:05 +04:00
crypto_free_aead ( aead ) ;
2005-04-17 02:20:36 +04:00
}
2022-09-27 18:45:31 +03:00
static int esp_init_aead ( struct xfrm_state * x , struct netlink_ext_ack * extack )
2005-04-17 02:20:36 +04:00
{
2015-05-27 11:03:46 +03:00
char aead_name [ CRYPTO_MAX_ALG_NAME ] ;
2008-01-29 06:37:29 +03:00
struct crypto_aead * aead ;
int err ;
2015-05-27 11:03:46 +03:00
if ( snprintf ( aead_name , CRYPTO_MAX_ALG_NAME , " %s(%s) " ,
2022-09-27 18:45:31 +03:00
x - > geniv , x - > aead - > alg_name ) > = CRYPTO_MAX_ALG_NAME ) {
NL_SET_ERR_MSG ( extack , " Algorithm name is too long " ) ;
return - ENAMETOOLONG ;
}
2015-05-27 11:03:46 +03:00
2017-12-20 12:41:53 +03:00
aead = crypto_alloc_aead ( aead_name , 0 , 0 ) ;
2008-01-29 06:37:29 +03:00
err = PTR_ERR ( aead ) ;
if ( IS_ERR ( aead ) )
goto error ;
2013-10-18 14:09:05 +04:00
x - > data = aead ;
2008-01-29 06:37:29 +03:00
err = crypto_aead_setkey ( aead , x - > aead - > alg_key ,
( x - > aead - > alg_key_len + 7 ) / 8 ) ;
if ( err )
goto error ;
err = crypto_aead_setauthsize ( aead , x - > aead - > alg_icv_len / 8 ) ;
if ( err )
goto error ;
2022-09-27 18:45:31 +03:00
return 0 ;
2008-01-29 06:37:29 +03:00
error :
2022-09-27 18:45:31 +03:00
NL_SET_ERR_MSG ( extack , " Kernel was unable to initialize cryptographic operations " ) ;
2008-01-29 06:37:29 +03:00
return err ;
}
2022-09-27 18:45:31 +03:00
static int esp_init_authenc ( struct xfrm_state * x ,
struct netlink_ext_ack * extack )
2008-01-29 06:37:29 +03:00
{
2008-01-29 06:35:05 +03:00
struct crypto_aead * aead ;
struct crypto_authenc_key_param * param ;
struct rtattr * rta ;
char * key ;
char * p ;
char authenc_name [ CRYPTO_MAX_ALG_NAME ] ;
unsigned int keylen ;
int err ;
2005-04-17 02:20:36 +04:00
2008-01-29 06:37:29 +03:00
err = - ENAMETOOLONG ;
2011-03-08 03:07:14 +03:00
if ( ( x - > props . flags & XFRM_STATE_ESN ) ) {
if ( snprintf ( authenc_name , CRYPTO_MAX_ALG_NAME ,
2015-05-27 11:03:46 +03:00
" %s%sauthencesn(%s,%s)%s " ,
x - > geniv ? : " " , x - > geniv ? " ( " : " " ,
2011-03-08 03:07:14 +03:00
x - > aalg ? x - > aalg - > alg_name : " digest_null " ,
2015-05-27 11:03:46 +03:00
x - > ealg - > alg_name ,
2022-09-27 18:45:31 +03:00
x - > geniv ? " ) " : " " ) > = CRYPTO_MAX_ALG_NAME ) {
NL_SET_ERR_MSG ( extack , " Algorithm name is too long " ) ;
2011-03-08 03:07:14 +03:00
goto error ;
2022-09-27 18:45:31 +03:00
}
2011-03-08 03:07:14 +03:00
} else {
if ( snprintf ( authenc_name , CRYPTO_MAX_ALG_NAME ,
2015-05-27 11:03:46 +03:00
" %s%sauthenc(%s,%s)%s " ,
x - > geniv ? : " " , x - > geniv ? " ( " : " " ,
2011-03-08 03:07:14 +03:00
x - > aalg ? x - > aalg - > alg_name : " digest_null " ,
2015-05-27 11:03:46 +03:00
x - > ealg - > alg_name ,
2022-09-27 18:45:31 +03:00
x - > geniv ? " ) " : " " ) > = CRYPTO_MAX_ALG_NAME ) {
NL_SET_ERR_MSG ( extack , " Algorithm name is too long " ) ;
2011-03-08 03:07:14 +03:00
goto error ;
2022-09-27 18:45:31 +03:00
}
2011-03-08 03:07:14 +03:00
}
2008-01-29 06:35:05 +03:00
2017-12-20 12:41:53 +03:00
aead = crypto_alloc_aead ( authenc_name , 0 , 0 ) ;
2008-01-29 06:35:05 +03:00
err = PTR_ERR ( aead ) ;
2022-09-27 18:45:31 +03:00
if ( IS_ERR ( aead ) ) {
NL_SET_ERR_MSG ( extack , " Kernel was unable to initialize cryptographic operations " ) ;
2008-01-29 06:35:05 +03:00
goto error ;
2022-09-27 18:45:31 +03:00
}
2008-01-29 06:35:05 +03:00
2013-10-18 14:09:05 +04:00
x - > data = aead ;
2008-01-29 06:35:05 +03:00
keylen = ( x - > aalg ? ( x - > aalg - > alg_key_len + 7 ) / 8 : 0 ) +
( x - > ealg - > alg_key_len + 7 ) / 8 + RTA_SPACE ( sizeof ( * param ) ) ;
err = - ENOMEM ;
key = kmalloc ( keylen , GFP_KERNEL ) ;
if ( ! key )
goto error ;
p = key ;
rta = ( void * ) p ;
rta - > rta_type = CRYPTO_AUTHENC_KEYA_PARAM ;
rta - > rta_len = RTA_LENGTH ( sizeof ( * param ) ) ;
param = RTA_DATA ( rta ) ;
p + = RTA_SPACE ( sizeof ( * param ) ) ;
2005-04-17 02:20:36 +04:00
if ( x - > aalg ) {
struct xfrm_algo_desc * aalg_desc ;
2008-01-29 06:35:05 +03:00
memcpy ( p , x - > aalg - > alg_key , ( x - > aalg - > alg_key_len + 7 ) / 8 ) ;
p + = ( x - > aalg - > alg_key_len + 7 ) / 8 ;
2005-04-17 02:20:36 +04:00
aalg_desc = xfrm_aalg_get_byname ( x - > aalg - > alg_name , 0 ) ;
BUG_ON ( ! aalg_desc ) ;
2008-01-29 06:35:05 +03:00
err = - EINVAL ;
2014-11-06 02:36:08 +03:00
if ( aalg_desc - > uinfo . auth . icv_fullbits / 8 ! =
2008-01-29 06:35:05 +03:00
crypto_aead_authsize ( aead ) ) {
2022-09-27 18:45:31 +03:00
NL_SET_ERR_MSG ( extack , " Kernel was unable to initialize cryptographic operations " ) ;
2008-01-29 06:35:05 +03:00
goto free_key ;
2005-04-17 02:20:36 +04:00
}
2008-01-29 06:35:05 +03:00
err = crypto_aead_setauthsize (
2009-11-25 03:29:53 +03:00
aead , x - > aalg - > alg_trunc_len / 8 ) ;
2022-09-27 18:45:31 +03:00
if ( err ) {
NL_SET_ERR_MSG ( extack , " Kernel was unable to initialize cryptographic operations " ) ;
2008-01-29 06:35:05 +03:00
goto free_key ;
2022-09-27 18:45:31 +03:00
}
2005-04-17 02:20:36 +04:00
}
2007-10-09 04:13:44 +04:00
2008-01-29 06:35:05 +03:00
param - > enckeylen = cpu_to_be32 ( ( x - > ealg - > alg_key_len + 7 ) / 8 ) ;
memcpy ( p , x - > ealg - > alg_key , ( x - > ealg - > alg_key_len + 7 ) / 8 ) ;
err = crypto_aead_setkey ( aead , key , keylen ) ;
free_key :
2023-07-17 12:59:19 +03:00
kfree_sensitive ( key ) ;
2008-01-29 06:35:05 +03:00
2008-01-29 06:37:29 +03:00
error :
return err ;
}
2022-09-27 18:45:29 +03:00
static int esp_init_state ( struct xfrm_state * x , struct netlink_ext_ack * extack )
2008-01-29 06:37:29 +03:00
{
struct crypto_aead * aead ;
u32 align ;
int err ;
2013-10-18 14:09:05 +04:00
x - > data = NULL ;
2008-01-29 06:37:29 +03:00
2022-09-27 18:45:31 +03:00
if ( x - > aead ) {
err = esp_init_aead ( x , extack ) ;
} else if ( x - > ealg ) {
err = esp_init_authenc ( x , extack ) ;
} else {
NL_SET_ERR_MSG ( extack , " ESP: AEAD or CRYPT must be provided " ) ;
err = - EINVAL ;
}
2008-01-29 06:37:29 +03:00
2008-01-29 06:35:05 +03:00
if ( err )
2005-04-17 02:20:36 +04:00
goto error ;
2008-01-29 06:35:05 +03:00
2013-10-18 14:09:05 +04:00
aead = x - > data ;
2008-01-29 06:37:29 +03:00
2008-01-29 06:35:05 +03:00
x - > props . header_len = sizeof ( struct ip_esp_hdr ) +
crypto_aead_ivsize ( aead ) ;
2006-09-23 02:05:15 +04:00
if ( x - > props . mode = = XFRM_MODE_TUNNEL )
2005-04-17 02:20:36 +04:00
x - > props . header_len + = sizeof ( struct iphdr ) ;
2008-08-06 13:39:30 +04:00
else if ( x - > props . mode = = XFRM_MODE_BEET & & x - > sel . family ! = AF_INET6 )
2007-04-09 22:47:58 +04:00
x - > props . header_len + = IPV4_BEET_PHMAXLEN ;
2005-04-17 02:20:36 +04:00
if ( x - > encap ) {
struct xfrm_encap_tmpl * encap = x - > encap ;
switch ( encap - > encap_type ) {
default :
2022-09-27 18:45:31 +03:00
NL_SET_ERR_MSG ( extack , " Unsupported encapsulation type for ESP " ) ;
2018-01-05 14:12:32 +03:00
err = - EINVAL ;
2005-04-17 02:20:36 +04:00
goto error ;
case UDP_ENCAP_ESPINUDP :
x - > props . header_len + = sizeof ( struct udphdr ) ;
break ;
case UDP_ENCAP_ESPINUDP_NON_IKE :
x - > props . header_len + = sizeof ( struct udphdr ) + 2 * sizeof ( u32 ) ;
break ;
2019-11-25 16:49:02 +03:00
# ifdef CONFIG_INET_ESPINTCP
case TCP_ENCAP_ESPINTCP :
/* only the length field, TCP encap is done by
* the socket
*/
x - > props . header_len + = 2 ;
break ;
# endif
2005-04-17 02:20:36 +04:00
}
}
2008-01-29 06:35:05 +03:00
align = ALIGN ( crypto_aead_blocksize ( aead ) , 4 ) ;
2013-10-18 14:09:05 +04:00
x - > props . trailer_len = align + 1 + crypto_aead_authsize ( aead ) ;
2005-04-17 02:20:36 +04:00
error :
2008-01-29 06:35:05 +03:00
return err ;
2005-04-17 02:20:36 +04:00
}
2014-02-21 11:41:08 +04:00
static int esp4_rcv_cb ( struct sk_buff * skb , int err )
{
return 0 ;
}
2008-01-31 06:11:50 +03:00
static const struct xfrm_type esp_type =
2005-04-17 02:20:36 +04:00
{
. owner = THIS_MODULE ,
. proto = IPPROTO_ESP ,
2007-10-09 04:25:53 +04:00
. flags = XFRM_TYPE_REPLAY_PROT ,
2005-04-17 02:20:36 +04:00
. init_state = esp_init_state ,
. destructor = esp_destroy ,
. input = esp_input ,
2017-04-14 11:06:33 +03:00
. output = esp_output ,
2005-04-17 02:20:36 +04:00
} ;
2014-02-21 11:41:08 +04:00
static struct xfrm4_protocol esp4_protocol = {
2005-04-17 02:20:36 +04:00
. handler = xfrm4_rcv ,
2014-02-21 11:41:08 +04:00
. input_handler = xfrm_input ,
. cb_handler = esp4_rcv_cb ,
2005-04-17 02:20:36 +04:00
. err_handler = esp4_err ,
2014-02-21 11:41:08 +04:00
. priority = 0 ,
2005-04-17 02:20:36 +04:00
} ;
static int __init esp4_init ( void )
{
if ( xfrm_register_type ( & esp_type , AF_INET ) < 0 ) {
2012-03-11 22:36:11 +04:00
pr_info ( " %s: can't add xfrm type \n " , __func__ ) ;
2005-04-17 02:20:36 +04:00
return - EAGAIN ;
}
2014-02-21 11:41:08 +04:00
if ( xfrm4_protocol_register ( & esp4_protocol , IPPROTO_ESP ) < 0 ) {
2012-03-11 22:36:11 +04:00
pr_info ( " %s: can't add protocol \n " , __func__ ) ;
2005-04-17 02:20:36 +04:00
xfrm_unregister_type ( & esp_type , AF_INET ) ;
return - EAGAIN ;
}
return 0 ;
}
static void __exit esp4_fini ( void )
{
2014-02-21 11:41:08 +04:00
if ( xfrm4_protocol_deregister ( & esp4_protocol , IPPROTO_ESP ) < 0 )
2012-03-11 22:36:11 +04:00
pr_info ( " %s: can't remove protocol \n " , __func__ ) ;
2019-05-03 18:46:19 +03:00
xfrm_unregister_type ( & esp_type , AF_INET ) ;
2005-04-17 02:20:36 +04:00
}
module_init ( esp4_init ) ;
module_exit ( esp4_fini ) ;
2024-02-08 19:42:41 +03:00
MODULE_DESCRIPTION ( " IPv4 ESP transformation library " ) ;
2005-04-17 02:20:36 +04:00
MODULE_LICENSE ( " GPL " ) ;
2007-06-27 10:57:49 +04:00
MODULE_ALIAS_XFRM_TYPE ( AF_INET , XFRM_PROTO_ESP ) ;