2020-01-09 07:59:19 -08:00
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Multipath TCP
*
* Copyright ( c ) 2017 - 2019 , Intel Corporation .
*/
# ifndef __NET_MPTCP_H
# define __NET_MPTCP_H
2020-01-09 07:59:20 -08:00
# include <linux/skbuff.h>
2020-01-21 16:56:16 -08:00
# include <linux/tcp.h>
2020-01-09 07:59:19 -08:00
# include <linux/types.h>
2020-03-27 14:48:50 -07:00
struct seq_file ;
2020-01-09 07:59:19 -08:00
/* MPTCP sk_buff extension data */
struct mptcp_ext {
2020-05-14 08:53:03 -07:00
union {
u64 data_ack ;
u32 data_ack32 ;
} ;
2020-01-09 07:59:19 -08:00
u64 data_seq ;
u32 subflow_seq ;
u16 data_len ;
u8 use_map : 1 ,
dsn64 : 1 ,
data_fin : 1 ,
use_ack : 1 ,
ack64 : 1 ,
2020-01-21 16:56:31 -08:00
mpc_map : 1 ,
__unused : 2 ;
2020-01-09 07:59:19 -08:00
/* one byte hole */
} ;
2020-01-21 16:56:16 -08:00
struct mptcp_out_options {
# if IS_ENABLED(CONFIG_MPTCP)
u16 suboptions ;
u64 sndr_key ;
u64 rcvr_key ;
2020-03-27 14:48:37 -07:00
union {
struct in_addr addr ;
# if IS_ENABLED(CONFIG_MPTCP_IPV6)
struct in6_addr addr6 ;
# endif
} ;
u8 addr_id ;
u64 ahmac ;
u8 rm_id ;
2020-03-27 14:48:39 -07:00
u8 join_id ;
u8 backup ;
u32 nonce ;
u64 thmac ;
2020-03-27 14:48:40 -07:00
u32 token ;
u8 hmac [ 20 ] ;
2020-01-21 16:56:23 -08:00
struct mptcp_ext ext_copy ;
2020-01-21 16:56:16 -08:00
# endif
} ;
2020-01-09 07:59:20 -08:00
# ifdef CONFIG_MPTCP
2020-07-30 21:25:53 +02:00
extern struct request_sock_ops mptcp_subflow_request_sock_ops ;
2020-01-09 07:59:20 -08:00
2020-01-21 16:56:15 -08:00
void mptcp_init ( void ) ;
2020-01-21 16:56:18 -08:00
static inline bool sk_is_mptcp ( const struct sock * sk )
{
return tcp_sk ( sk ) - > is_mptcp ;
}
static inline bool rsk_is_mptcp ( const struct request_sock * req )
{
return tcp_rsk ( req ) - > is_mptcp ;
}
2020-05-15 19:22:15 +02:00
static inline bool rsk_drop_req ( const struct request_sock * req )
{
return tcp_rsk ( req ) - > is_mptcp & & tcp_rsk ( req ) - > drop_req ;
}
2020-04-24 12:31:50 +02:00
void mptcp_space ( const struct sock * ssk , int * space , int * full_space ) ;
2020-01-21 16:56:31 -08:00
bool mptcp_syn_options ( struct sock * sk , const struct sk_buff * skb ,
unsigned int * size , struct mptcp_out_options * opts ) ;
2020-01-21 16:56:18 -08:00
bool mptcp_synack_options ( const struct request_sock * req , unsigned int * size ,
struct mptcp_out_options * opts ) ;
bool mptcp_established_options ( struct sock * sk , struct sk_buff * skb ,
unsigned int * size , unsigned int remaining ,
struct mptcp_out_options * opts ) ;
2020-09-25 01:23:02 +02:00
void mptcp_incoming_options ( struct sock * sk , struct sk_buff * skb ) ;
2020-01-21 16:56:18 -08:00
2020-01-21 16:56:16 -08:00
void mptcp_write_options ( __be32 * ptr , struct mptcp_out_options * opts ) ;
2020-01-09 07:59:20 -08:00
/* move the skb extension owership, with the assumption that 'to' is
* newly allocated
*/
static inline void mptcp_skb_ext_move ( struct sk_buff * to ,
struct sk_buff * from )
{
if ( ! skb_ext_exist ( from , SKB_EXT_MPTCP ) )
return ;
if ( WARN_ON_ONCE ( to - > active_extensions ) )
skb_ext_put ( to ) ;
to - > active_extensions = from - > active_extensions ;
to - > extensions = from - > extensions ;
from - > active_extensions = 0 ;
}
static inline bool mptcp_ext_matches ( const struct mptcp_ext * to_ext ,
const struct mptcp_ext * from_ext )
{
/* MPTCP always clears the ext when adding it to the skb, so
* holes do not bother us here
*/
return ! from_ext | |
( to_ext & & from_ext & &
! memcmp ( from_ext , to_ext , sizeof ( struct mptcp_ext ) ) ) ;
}
/* check if skbs can be collapsed.
* MPTCP collapse is allowed if neither @ to or @ from carry an mptcp data
* mapping , or if the extension of @ to is the same as @ from .
* Collapsing is not possible if @ to lacks an extension , but @ from carries one .
*/
static inline bool mptcp_skb_can_collapse ( const struct sk_buff * to ,
const struct sk_buff * from )
{
return mptcp_ext_matches ( skb_ext_find ( to , SKB_EXT_MPTCP ) ,
skb_ext_find ( from , SKB_EXT_MPTCP ) ) ;
}
2020-03-27 14:48:50 -07:00
void mptcp_seq_show ( struct seq_file * seq ) ;
2020-07-30 21:25:54 +02:00
int mptcp_subflow_init_cookie_req ( struct request_sock * req ,
const struct sock * sk_listener ,
struct sk_buff * skb ) ;
2020-01-09 07:59:20 -08:00
# else
2020-01-21 16:56:15 -08:00
static inline void mptcp_init ( void )
{
}
2020-01-21 16:56:18 -08:00
static inline bool sk_is_mptcp ( const struct sock * sk )
{
return false ;
}
static inline bool rsk_is_mptcp ( const struct request_sock * req )
{
return false ;
}
2020-05-15 19:22:15 +02:00
static inline bool rsk_drop_req ( const struct request_sock * req )
{
return false ;
}
2020-01-21 16:56:31 -08:00
static inline void mptcp_parse_option ( const struct sk_buff * skb ,
const unsigned char * ptr , int opsize ,
2020-01-21 16:56:16 -08:00
struct tcp_options_received * opt_rx )
{
}
2020-01-21 16:56:31 -08:00
static inline bool mptcp_syn_options ( struct sock * sk , const struct sk_buff * skb ,
unsigned int * size ,
2020-01-21 16:56:18 -08:00
struct mptcp_out_options * opts )
{
return false ;
}
static inline bool mptcp_synack_options ( const struct request_sock * req ,
unsigned int * size ,
struct mptcp_out_options * opts )
{
return false ;
}
static inline bool mptcp_established_options ( struct sock * sk ,
struct sk_buff * skb ,
unsigned int * size ,
unsigned int remaining ,
struct mptcp_out_options * opts )
{
return false ;
}
2020-01-21 16:56:24 -08:00
static inline void mptcp_incoming_options ( struct sock * sk ,
2020-09-25 01:23:02 +02:00
struct sk_buff * skb )
2020-01-21 16:56:24 -08:00
{
}
2020-01-09 07:59:20 -08:00
static inline void mptcp_skb_ext_move ( struct sk_buff * to ,
const struct sk_buff * from )
{
}
static inline bool mptcp_skb_can_collapse ( const struct sk_buff * to ,
const struct sk_buff * from )
{
return true ;
}
2020-04-24 12:31:50 +02:00
static inline void mptcp_space ( const struct sock * ssk , int * s , int * fs ) { }
2020-03-27 14:48:50 -07:00
static inline void mptcp_seq_show ( struct seq_file * seq ) { }
2020-07-30 21:25:54 +02:00
static inline int mptcp_subflow_init_cookie_req ( struct request_sock * req ,
const struct sock * sk_listener ,
struct sk_buff * skb )
{
return 0 ; /* TCP fallback */
}
2020-01-09 07:59:20 -08:00
# endif /* CONFIG_MPTCP */
2020-01-21 16:56:15 -08:00
# if IS_ENABLED(CONFIG_MPTCP_IPV6)
int mptcpv6_init ( void ) ;
2020-01-30 10:45:26 +01:00
void mptcpv6_handle_mapped ( struct sock * sk , bool mapped ) ;
2020-01-21 16:56:15 -08:00
# elif IS_ENABLED(CONFIG_IPV6)
2020-01-30 10:45:26 +01:00
static inline int mptcpv6_init ( void ) { return 0 ; }
static inline void mptcpv6_handle_mapped ( struct sock * sk , bool mapped ) { }
2020-01-21 16:56:15 -08:00
# endif
2020-01-09 07:59:19 -08:00
# endif /* __NET_MPTCP_H */