2012-08-31 16:29:11 +04:00
# include <linux/err.h>
2012-07-19 10:43:05 +04:00
# include <linux/init.h>
# include <linux/kernel.h>
2012-08-31 16:29:11 +04:00
# include <linux/list.h>
# include <linux/tcp.h>
# include <linux/rcupdate.h>
# include <linux/rculist.h>
# include <net/inetpeer.h>
# include <net/tcp.h>
2012-07-19 10:43:05 +04:00
2012-08-31 16:29:11 +04:00
int sysctl_tcp_fastopen __read_mostly ;
struct tcp_fastopen_context __rcu * tcp_fastopen_ctx ;
static DEFINE_SPINLOCK ( tcp_fastopen_ctx_lock ) ;
2013-10-19 23:48:58 +04:00
void tcp_fastopen_init_key_once ( bool publish )
{
static u8 key [ TCP_FASTOPEN_KEY_LENGTH ] ;
/* tcp_fastopen_reset_cipher publishes the new context
* atomically , so we allow this race happening here .
*
* All call sites of tcp_fastopen_cookie_gen also check
* for a valid cookie , so this is an acceptable risk .
*/
if ( net_get_random_once ( key , sizeof ( key ) ) & & publish )
tcp_fastopen_reset_cipher ( key , sizeof ( key ) ) ;
}
2012-08-31 16:29:11 +04:00
static void tcp_fastopen_ctx_free ( struct rcu_head * head )
{
struct tcp_fastopen_context * ctx =
container_of ( head , struct tcp_fastopen_context , rcu ) ;
crypto_free_cipher ( ctx - > tfm ) ;
kfree ( ctx ) ;
}
int tcp_fastopen_reset_cipher ( void * key , unsigned int len )
{
int err ;
struct tcp_fastopen_context * ctx , * octx ;
ctx = kmalloc ( sizeof ( * ctx ) , GFP_KERNEL ) ;
if ( ! ctx )
return - ENOMEM ;
ctx - > tfm = crypto_alloc_cipher ( " aes " , 0 , 0 ) ;
if ( IS_ERR ( ctx - > tfm ) ) {
err = PTR_ERR ( ctx - > tfm ) ;
error : kfree ( ctx ) ;
pr_err ( " TCP: TFO aes cipher alloc error: %d \n " , err ) ;
return err ;
}
err = crypto_cipher_setkey ( ctx - > tfm , key , len ) ;
if ( err ) {
pr_err ( " TCP: TFO cipher key error: %d \n " , err ) ;
crypto_free_cipher ( ctx - > tfm ) ;
goto error ;
}
memcpy ( ctx - > key , key , len ) ;
spin_lock ( & tcp_fastopen_ctx_lock ) ;
octx = rcu_dereference_protected ( tcp_fastopen_ctx ,
lockdep_is_held ( & tcp_fastopen_ctx_lock ) ) ;
rcu_assign_pointer ( tcp_fastopen_ctx , ctx ) ;
spin_unlock ( & tcp_fastopen_ctx_lock ) ;
if ( octx )
call_rcu ( & octx - > rcu , tcp_fastopen_ctx_free ) ;
return err ;
}
2013-08-09 01:06:22 +04:00
/* Computes the fastopen cookie for the IP path.
* The path is a 128 bits long ( pad with zeros for IPv4 ) .
2012-08-31 16:29:11 +04:00
*
* The caller must check foc - > len to determine if a valid cookie
* has been generated successfully .
*/
2013-08-09 01:06:22 +04:00
void tcp_fastopen_cookie_gen ( __be32 src , __be32 dst ,
struct tcp_fastopen_cookie * foc )
2012-08-31 16:29:11 +04:00
{
2013-08-09 01:06:22 +04:00
__be32 path [ 4 ] = { src , dst , 0 , 0 } ;
2012-08-31 16:29:11 +04:00
struct tcp_fastopen_context * ctx ;
2013-10-19 23:48:58 +04:00
tcp_fastopen_init_key_once ( true ) ;
2012-08-31 16:29:11 +04:00
rcu_read_lock ( ) ;
ctx = rcu_dereference ( tcp_fastopen_ctx ) ;
if ( ctx ) {
2013-08-09 01:06:22 +04:00
crypto_cipher_encrypt_one ( ctx - > tfm , foc - > val , ( __u8 * ) path ) ;
2012-08-31 16:29:11 +04:00
foc - > len = TCP_FASTOPEN_COOKIE_SIZE ;
}
rcu_read_unlock ( ) ;
}