2012-08-31 16:29:11 +04:00
# include <linux/err.h>
2012-07-19 10:43:05 +04:00
# include <linux/init.h>
# include <linux/kernel.h>
2012-08-31 16:29:11 +04:00
# include <linux/list.h>
# include <linux/tcp.h>
# include <linux/rcupdate.h>
# include <linux/rculist.h>
# include <net/inetpeer.h>
# include <net/tcp.h>
2012-07-19 10:43:05 +04:00
2013-10-31 20:19:32 +04:00
int sysctl_tcp_fastopen __read_mostly = TFO_CLIENT_ENABLE ;
2012-08-31 16:29:11 +04:00
struct tcp_fastopen_context __rcu * tcp_fastopen_ctx ;
static DEFINE_SPINLOCK ( tcp_fastopen_ctx_lock ) ;
2013-10-19 23:48:58 +04:00
void tcp_fastopen_init_key_once ( bool publish )
{
static u8 key [ TCP_FASTOPEN_KEY_LENGTH ] ;
/* tcp_fastopen_reset_cipher publishes the new context
* atomically , so we allow this race happening here .
*
* All call sites of tcp_fastopen_cookie_gen also check
* for a valid cookie , so this is an acceptable risk .
*/
if ( net_get_random_once ( key , sizeof ( key ) ) & & publish )
tcp_fastopen_reset_cipher ( key , sizeof ( key ) ) ;
}
2012-08-31 16:29:11 +04:00
static void tcp_fastopen_ctx_free ( struct rcu_head * head )
{
struct tcp_fastopen_context * ctx =
container_of ( head , struct tcp_fastopen_context , rcu ) ;
crypto_free_cipher ( ctx - > tfm ) ;
kfree ( ctx ) ;
}
int tcp_fastopen_reset_cipher ( void * key , unsigned int len )
{
int err ;
struct tcp_fastopen_context * ctx , * octx ;
ctx = kmalloc ( sizeof ( * ctx ) , GFP_KERNEL ) ;
if ( ! ctx )
return - ENOMEM ;
ctx - > tfm = crypto_alloc_cipher ( " aes " , 0 , 0 ) ;
if ( IS_ERR ( ctx - > tfm ) ) {
err = PTR_ERR ( ctx - > tfm ) ;
error : kfree ( ctx ) ;
pr_err ( " TCP: TFO aes cipher alloc error: %d \n " , err ) ;
return err ;
}
err = crypto_cipher_setkey ( ctx - > tfm , key , len ) ;
if ( err ) {
pr_err ( " TCP: TFO cipher key error: %d \n " , err ) ;
crypto_free_cipher ( ctx - > tfm ) ;
goto error ;
}
memcpy ( ctx - > key , key , len ) ;
spin_lock ( & tcp_fastopen_ctx_lock ) ;
octx = rcu_dereference_protected ( tcp_fastopen_ctx ,
lockdep_is_held ( & tcp_fastopen_ctx_lock ) ) ;
rcu_assign_pointer ( tcp_fastopen_ctx , ctx ) ;
spin_unlock ( & tcp_fastopen_ctx_lock ) ;
if ( octx )
call_rcu ( & octx - > rcu , tcp_fastopen_ctx_free ) ;
return err ;
}
2014-05-12 07:22:13 +04:00
static bool __tcp_fastopen_cookie_gen ( const void * path ,
struct tcp_fastopen_cookie * foc )
2012-08-31 16:29:11 +04:00
{
struct tcp_fastopen_context * ctx ;
2014-05-12 07:22:13 +04:00
bool ok = false ;
2012-08-31 16:29:11 +04:00
2013-10-19 23:48:58 +04:00
tcp_fastopen_init_key_once ( true ) ;
2012-08-31 16:29:11 +04:00
rcu_read_lock ( ) ;
ctx = rcu_dereference ( tcp_fastopen_ctx ) ;
if ( ctx ) {
2014-05-12 07:22:13 +04:00
crypto_cipher_encrypt_one ( ctx - > tfm , foc - > val , path ) ;
2012-08-31 16:29:11 +04:00
foc - > len = TCP_FASTOPEN_COOKIE_SIZE ;
2014-05-12 07:22:13 +04:00
ok = true ;
2012-08-31 16:29:11 +04:00
}
rcu_read_unlock ( ) ;
2014-05-12 07:22:13 +04:00
return ok ;
}
/* Generate the fastopen cookie by doing aes128 encryption on both
* the source and destination addresses . Pad 0 s for IPv4 or IPv4 - mapped - IPv6
* addresses . For the longer IPv6 addresses use CBC - MAC .
*
* XXX ( TFO ) - refactor when TCP_FASTOPEN_COOKIE_SIZE ! = AES_BLOCK_SIZE .
*/
static bool tcp_fastopen_cookie_gen ( struct request_sock * req ,
struct sk_buff * syn ,
struct tcp_fastopen_cookie * foc )
{
if ( req - > rsk_ops - > family = = AF_INET ) {
const struct iphdr * iph = ip_hdr ( syn ) ;
__be32 path [ 4 ] = { iph - > saddr , iph - > daddr , 0 , 0 } ;
return __tcp_fastopen_cookie_gen ( path , foc ) ;
}
# if IS_ENABLED(CONFIG_IPV6)
if ( req - > rsk_ops - > family = = AF_INET6 ) {
const struct ipv6hdr * ip6h = ipv6_hdr ( syn ) ;
struct tcp_fastopen_cookie tmp ;
if ( __tcp_fastopen_cookie_gen ( & ip6h - > saddr , & tmp ) ) {
struct in6_addr * buf = ( struct in6_addr * ) tmp . val ;
int i = 4 ;
for ( i = 0 ; i < 4 ; i + + )
buf - > s6_addr32 [ i ] ^ = ip6h - > daddr . s6_addr32 [ i ] ;
return __tcp_fastopen_cookie_gen ( buf , foc ) ;
}
}
# endif
return false ;
2012-08-31 16:29:11 +04:00
}
2014-05-12 07:22:09 +04:00
2014-05-12 07:22:11 +04:00
static bool tcp_fastopen_create_child ( struct sock * sk ,
struct sk_buff * skb ,
struct dst_entry * dst ,
struct request_sock * req )
2014-05-12 07:22:09 +04:00
{
struct tcp_sock * tp = tcp_sk ( sk ) ;
struct request_sock_queue * queue = & inet_csk ( sk ) - > icsk_accept_queue ;
struct sock * child ;
req - > num_retrans = 0 ;
req - > num_timeout = 0 ;
req - > sk = NULL ;
child = inet_csk ( sk ) - > icsk_af_ops - > syn_recv_sock ( sk , skb , req , NULL ) ;
2014-05-12 07:22:11 +04:00
if ( child = = NULL )
return false ;
2014-05-12 07:22:09 +04:00
spin_lock ( & queue - > fastopenq - > lock ) ;
queue - > fastopenq - > qlen + + ;
spin_unlock ( & queue - > fastopenq - > lock ) ;
/* Initialize the child socket. Have to fix some values to take
* into account the child is a Fast Open socket and is created
* only out of the bits carried in the SYN packet .
*/
tp = tcp_sk ( child ) ;
tp - > fastopen_rsk = req ;
/* Do a hold on the listner sk so that if the listener is being
* closed , the child that has been accepted can live on and still
* access listen_lock .
*/
sock_hold ( sk ) ;
tcp_rsk ( req ) - > listener = sk ;
/* RFC1323: The window in SYN & SYN/ACK segments is never
* scaled . So correct it appropriately .
*/
tp - > snd_wnd = ntohs ( tcp_hdr ( skb ) - > window ) ;
/* Activate the retrans timer so that SYNACK can be retransmitted.
* The request socket is not added to the SYN table of the parent
* because it ' s been added to the accept queue directly .
*/
inet_csk_reset_xmit_timer ( child , ICSK_TIME_RETRANS ,
TCP_TIMEOUT_INIT , TCP_RTO_MAX ) ;
/* Add the child socket directly into the accept queue */
inet_csk_reqsk_queue_add ( sk , req , child ) ;
/* Now finish processing the fastopen child socket. */
inet_csk ( child ) - > icsk_af_ops - > rebuild_header ( child ) ;
tcp_init_congestion_control ( child ) ;
tcp_mtup_init ( child ) ;
tcp_init_metrics ( child ) ;
tcp_init_buffer_space ( child ) ;
/* Queue the data carried in the SYN packet. We need to first
* bump skb ' s refcnt because the caller will attempt to free it .
*
2014-05-12 07:22:11 +04:00
* XXX ( TFO ) - we honor a zero - payload TFO request for now ,
* ( any reason not to ? ) but no need to queue the skb since
* there is no data . How about SYN + FIN ?
2014-05-12 07:22:09 +04:00
*/
2014-05-12 07:22:11 +04:00
if ( TCP_SKB_CB ( skb ) - > end_seq ! = TCP_SKB_CB ( skb ) - > seq + 1 ) {
2014-05-12 07:22:09 +04:00
skb = skb_get ( skb ) ;
skb_dst_drop ( skb ) ;
__skb_pull ( skb , tcp_hdr ( skb ) - > doff * 4 ) ;
skb_set_owner_r ( skb , child ) ;
__skb_queue_tail ( & child - > sk_receive_queue , skb ) ;
tp - > syn_data_acked = 1 ;
}
2014-05-12 07:22:11 +04:00
tcp_rsk ( req ) - > rcv_nxt = tp - > rcv_nxt = TCP_SKB_CB ( skb ) - > end_seq ;
2014-05-12 07:22:09 +04:00
sk - > sk_data_ready ( sk ) ;
bh_unlock_sock ( child ) ;
sock_put ( child ) ;
WARN_ON ( req - > sk = = NULL ) ;
2014-05-12 07:22:11 +04:00
return true ;
2014-05-12 07:22:09 +04:00
}
EXPORT_SYMBOL ( tcp_fastopen_create_child ) ;
static bool tcp_fastopen_queue_check ( struct sock * sk )
{
struct fastopen_queue * fastopenq ;
/* Make sure the listener has enabled fastopen, and we don't
* exceed the max # of pending TFO requests allowed before trying
* to validating the cookie in order to avoid burning CPU cycles
* unnecessarily .
*
* XXX ( TFO ) - The implication of checking the max_qlen before
* processing a cookie request is that clients can ' t differentiate
* between qlen overflow causing Fast Open to be disabled
* temporarily vs a server not supporting Fast Open at all .
*/
fastopenq = inet_csk ( sk ) - > icsk_accept_queue . fastopenq ;
if ( fastopenq = = NULL | | fastopenq - > max_qlen = = 0 )
return false ;
if ( fastopenq - > qlen > = fastopenq - > max_qlen ) {
struct request_sock * req1 ;
spin_lock ( & fastopenq - > lock ) ;
req1 = fastopenq - > rskq_rst_head ;
if ( ( req1 = = NULL ) | | time_after ( req1 - > expires , jiffies ) ) {
spin_unlock ( & fastopenq - > lock ) ;
NET_INC_STATS_BH ( sock_net ( sk ) ,
LINUX_MIB_TCPFASTOPENLISTENOVERFLOW ) ;
return false ;
}
fastopenq - > rskq_rst_head = req1 - > dl_next ;
fastopenq - > qlen - - ;
spin_unlock ( & fastopenq - > lock ) ;
reqsk_free ( req1 ) ;
}
return true ;
}
2014-05-12 07:22:10 +04:00
/* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
* may be updated and return the client in the SYN - ACK later . E . g . , Fast Open
* cookie request ( foc - > len = = 0 ) .
*/
2014-05-12 07:22:11 +04:00
bool tcp_try_fastopen ( struct sock * sk , struct sk_buff * skb ,
struct request_sock * req ,
struct tcp_fastopen_cookie * foc ,
struct dst_entry * dst )
2014-05-12 07:22:09 +04:00
{
2014-05-12 07:22:10 +04:00
struct tcp_fastopen_cookie valid_foc = { . len = - 1 } ;
bool syn_data = TCP_SKB_CB ( skb ) - > end_seq ! = TCP_SKB_CB ( skb ) - > seq + 1 ;
2014-05-12 07:22:09 +04:00
2014-05-12 07:22:10 +04:00
if ( ! ( ( sysctl_tcp_fastopen & TFO_SERVER_ENABLE ) & &
( syn_data | | foc - > len > = 0 ) & &
tcp_fastopen_queue_check ( sk ) ) ) {
foc - > len = - 1 ;
2014-05-12 07:22:09 +04:00
return false ;
}
2014-05-12 07:22:10 +04:00
if ( syn_data & & ( sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD ) )
goto fastopen ;
2014-05-12 07:22:13 +04:00
if ( tcp_fastopen_cookie_gen ( req , skb , & valid_foc ) & &
foc - > len = = TCP_FASTOPEN_COOKIE_SIZE & &
2014-05-12 07:22:10 +04:00
foc - > len = = valid_foc . len & &
! memcmp ( foc - > val , valid_foc . val , foc - > len ) ) {
2014-05-12 07:22:11 +04:00
/* Cookie is valid. Create a (full) child socket to accept
* the data in SYN before returning a SYN - ACK to ack the
* data . If we fail to create the socket , fall back and
* ack the ISN only but includes the same cookie .
*
* Note : Data - less SYN with valid cookie is allowed to send
* data in SYN_RECV state .
*/
2014-05-12 07:22:10 +04:00
fastopen :
2014-05-12 07:22:11 +04:00
if ( tcp_fastopen_create_child ( sk , skb , dst , req ) ) {
foc - > len = - 1 ;
NET_INC_STATS_BH ( sock_net ( sk ) ,
LINUX_MIB_TCPFASTOPENPASSIVE ) ;
return true ;
}
2014-05-12 07:22:09 +04:00
}
2014-05-12 07:22:10 +04:00
NET_INC_STATS_BH ( sock_net ( sk ) , foc - > len ?
LINUX_MIB_TCPFASTOPENPASSIVEFAIL :
LINUX_MIB_TCPFASTOPENCOOKIEREQD ) ;
* foc = valid_foc ;
2014-05-12 07:22:09 +04:00
return false ;
}
2014-05-12 07:22:11 +04:00
EXPORT_SYMBOL ( tcp_try_fastopen ) ;