2020-01-09 18:59:19 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Multipath TCP
*
* Copyright ( c ) 2017 - 2019 , Intel Corporation .
*/
# ifndef __NET_MPTCP_H
# define __NET_MPTCP_H
2020-01-09 18:59:20 +03:00
# include <linux/skbuff.h>
2020-01-22 03:56:16 +03:00
# include <linux/tcp.h>
2020-01-09 18:59:19 +03:00
# include <linux/types.h>
2021-09-18 02:33:18 +03:00
struct mptcp_info ;
struct mptcp_sock ;
2020-03-28 00:48:50 +03:00
struct seq_file ;
2020-01-09 18:59:19 +03:00
/* MPTCP sk_buff extension data */
struct mptcp_ext {
2020-05-14 18:53:03 +03:00
union {
u64 data_ack ;
u32 data_ack32 ;
} ;
2020-01-09 18:59:19 +03:00
u64 data_seq ;
u32 subflow_seq ;
u16 data_len ;
2021-06-18 02:46:08 +03:00
__sum16 csum ;
2020-01-09 18:59:19 +03:00
u8 use_map : 1 ,
dsn64 : 1 ,
data_fin : 1 ,
use_ack : 1 ,
ack64 : 1 ,
2020-01-22 03:56:31 +03:00
mpc_map : 1 ,
2020-11-03 22:05:05 +03:00
frozen : 1 ,
2021-04-02 02:19:44 +03:00
reset_transient : 1 ;
2021-06-18 02:46:14 +03:00
u8 reset_reason : 4 ,
2022-04-23 00:55:39 +03:00
csum_reqd : 1 ,
infinite_map : 1 ;
2020-01-09 18:59:19 +03:00
} ;
2022-07-08 20:14:08 +03:00
# define MPTCPOPT_HMAC_LEN 20
2021-03-13 04:16:11 +03:00
# define MPTCP_RM_IDS_MAX 8
struct mptcp_rm_list {
u8 ids [ MPTCP_RM_IDS_MAX ] ;
u8 nr ;
} ;
2021-04-07 03:15:58 +03:00
struct mptcp_addr_info {
u8 id ;
sa_family_t family ;
__be16 port ;
union {
struct in_addr addr ;
# if IS_ENABLED(CONFIG_MPTCP_IPV6)
struct in6_addr addr6 ;
# endif
} ;
} ;
2020-01-22 03:56:16 +03:00
struct mptcp_out_options {
# if IS_ENABLED(CONFIG_MPTCP)
u16 suboptions ;
2021-03-13 04:16:11 +03:00
struct mptcp_rm_list rm_list ;
2020-03-28 00:48:39 +03:00
u8 join_id ;
u8 backup ;
2021-06-18 02:46:09 +03:00
u8 reset_reason : 4 ,
reset_transient : 1 ,
2021-06-22 22:25:19 +03:00
csum_reqd : 1 ,
allow_join_id0 : 1 ;
2021-08-25 02:26:14 +03:00
union {
struct {
u64 sndr_key ;
u64 rcvr_key ;
2021-10-27 23:38:55 +03:00
u64 data_seq ;
u32 subflow_seq ;
u16 data_len ;
__sum16 csum ;
2021-08-25 02:26:14 +03:00
} ;
struct {
struct mptcp_addr_info addr ;
u64 ahmac ;
} ;
2021-08-25 02:26:15 +03:00
struct {
struct mptcp_ext ext_copy ;
u64 fail_seq ;
} ;
2021-08-25 02:26:14 +03:00
struct {
u32 nonce ;
u32 token ;
u64 thmac ;
2022-07-08 20:14:08 +03:00
u8 hmac [ MPTCPOPT_HMAC_LEN ] ;
2021-08-25 02:26:14 +03:00
} ;
} ;
2020-01-22 03:56:16 +03:00
# endif
} ;
2023-08-22 01:25:14 +03:00
# define MPTCP_SCHED_NAME_MAX 16
2024-05-14 04:13:28 +03:00
# define MPTCP_SCHED_MAX 128
# define MPTCP_SCHED_BUF_MAX (MPTCP_SCHED_NAME_MAX * MPTCP_SCHED_MAX)
2023-08-22 01:25:14 +03:00
# define MPTCP_SUBFLOWS_MAX 8
struct mptcp_sched_data {
bool reinject ;
u8 subflows ;
struct mptcp_subflow_context * contexts [ MPTCP_SUBFLOWS_MAX ] ;
} ;
struct mptcp_sched_ops {
int ( * get_subflow ) ( struct mptcp_sock * msk ,
struct mptcp_sched_data * data ) ;
char name [ MPTCP_SCHED_NAME_MAX ] ;
struct module * owner ;
struct list_head list ;
void ( * init ) ( struct mptcp_sock * msk ) ;
void ( * release ) ( struct mptcp_sock * msk ) ;
} ____cacheline_aligned_in_smp ;
2020-01-09 18:59:20 +03:00
# ifdef CONFIG_MPTCP
2020-01-22 03:56:15 +03:00
void mptcp_init ( void ) ;
2020-01-22 03:56:18 +03:00
static inline bool sk_is_mptcp ( const struct sock * sk )
{
return tcp_sk ( sk ) - > is_mptcp ;
}
static inline bool rsk_is_mptcp ( const struct request_sock * req )
{
return tcp_rsk ( req ) - > is_mptcp ;
}
2020-05-15 20:22:15 +03:00
static inline bool rsk_drop_req ( const struct request_sock * req )
{
return tcp_rsk ( req ) - > is_mptcp & & tcp_rsk ( req ) - > drop_req ;
}
2020-04-24 13:31:50 +03:00
void mptcp_space ( const struct sock * ssk , int * space , int * full_space ) ;
2020-01-22 03:56:31 +03:00
bool mptcp_syn_options ( struct sock * sk , const struct sk_buff * skb ,
unsigned int * size , struct mptcp_out_options * opts ) ;
2020-01-22 03:56:18 +03:00
bool mptcp_synack_options ( const struct request_sock * req , unsigned int * size ,
struct mptcp_out_options * opts ) ;
bool mptcp_established_options ( struct sock * sk , struct sk_buff * skb ,
unsigned int * size , unsigned int remaining ,
struct mptcp_out_options * opts ) ;
2021-07-10 03:20:49 +03:00
bool mptcp_incoming_options ( struct sock * sk , struct sk_buff * skb ) ;
2020-01-22 03:56:18 +03:00
2022-05-05 00:54:06 +03:00
void mptcp_write_options ( struct tcphdr * th , __be32 * ptr , struct tcp_sock * tp ,
2020-11-19 22:46:02 +03:00
struct mptcp_out_options * opts ) ;
2020-01-22 03:56:16 +03:00
2021-09-18 02:33:18 +03:00
void mptcp_diag_fill_info ( struct mptcp_sock * msk , struct mptcp_info * info ) ;
2020-01-09 18:59:20 +03:00
/* move the skb extension owership, with the assumption that 'to' is
* newly allocated
*/
static inline void mptcp_skb_ext_move ( struct sk_buff * to ,
struct sk_buff * from )
{
if ( ! skb_ext_exist ( from , SKB_EXT_MPTCP ) )
return ;
if ( WARN_ON_ONCE ( to - > active_extensions ) )
skb_ext_put ( to ) ;
to - > active_extensions = from - > active_extensions ;
to - > extensions = from - > extensions ;
from - > active_extensions = 0 ;
}
2020-11-03 22:05:05 +03:00
static inline void mptcp_skb_ext_copy ( struct sk_buff * to ,
struct sk_buff * from )
{
struct mptcp_ext * from_ext ;
from_ext = skb_ext_find ( from , SKB_EXT_MPTCP ) ;
if ( ! from_ext )
return ;
from_ext - > frozen = 1 ;
skb_ext_copy ( to , from ) ;
}
2020-01-09 18:59:20 +03:00
static inline bool mptcp_ext_matches ( const struct mptcp_ext * to_ext ,
const struct mptcp_ext * from_ext )
{
/* MPTCP always clears the ext when adding it to the skb, so
* holes do not bother us here
*/
return ! from_ext | |
( to_ext & & from_ext & &
! memcmp ( from_ext , to_ext , sizeof ( struct mptcp_ext ) ) ) ;
}
/* check if skbs can be collapsed.
* MPTCP collapse is allowed if neither @ to or @ from carry an mptcp data
* mapping , or if the extension of @ to is the same as @ from .
* Collapsing is not possible if @ to lacks an extension , but @ from carries one .
*/
static inline bool mptcp_skb_can_collapse ( const struct sk_buff * to ,
const struct sk_buff * from )
{
return mptcp_ext_matches ( skb_ext_find ( to , SKB_EXT_MPTCP ) ,
skb_ext_find ( from , SKB_EXT_MPTCP ) ) ;
}
2020-03-28 00:48:50 +03:00
void mptcp_seq_show ( struct seq_file * seq ) ;
2020-07-30 22:25:54 +03:00
int mptcp_subflow_init_cookie_req ( struct request_sock * req ,
const struct sock * sk_listener ,
struct sk_buff * skb ) ;
2022-12-10 03:28:08 +03:00
struct request_sock * mptcp_subflow_reqsk_alloc ( const struct request_sock_ops * ops ,
struct sock * sk_listener ,
bool attach_listener ) ;
2021-04-02 02:19:44 +03:00
__be32 mptcp_get_reset_option ( const struct sk_buff * skb ) ;
static inline __be32 mptcp_reset_option ( const struct sk_buff * skb )
{
if ( skb_ext_exist ( skb , SKB_EXT_MPTCP ) )
return mptcp_get_reset_option ( skb ) ;
return htonl ( 0u ) ;
}
2020-01-09 18:59:20 +03:00
# else
2020-01-22 03:56:15 +03:00
static inline void mptcp_init ( void )
{
}
2020-01-22 03:56:18 +03:00
static inline bool sk_is_mptcp ( const struct sock * sk )
{
return false ;
}
static inline bool rsk_is_mptcp ( const struct request_sock * req )
{
return false ;
}
2020-05-15 20:22:15 +03:00
static inline bool rsk_drop_req ( const struct request_sock * req )
{
return false ;
}
2020-01-22 03:56:31 +03:00
static inline bool mptcp_syn_options ( struct sock * sk , const struct sk_buff * skb ,
unsigned int * size ,
2020-01-22 03:56:18 +03:00
struct mptcp_out_options * opts )
{
return false ;
}
static inline bool mptcp_synack_options ( const struct request_sock * req ,
unsigned int * size ,
struct mptcp_out_options * opts )
{
return false ;
}
static inline bool mptcp_established_options ( struct sock * sk ,
struct sk_buff * skb ,
unsigned int * size ,
unsigned int remaining ,
struct mptcp_out_options * opts )
{
return false ;
}
2021-07-10 03:20:49 +03:00
static inline bool mptcp_incoming_options ( struct sock * sk ,
2020-09-25 02:23:02 +03:00
struct sk_buff * skb )
2020-01-22 03:56:24 +03:00
{
2021-07-10 03:20:49 +03:00
return true ;
2020-01-22 03:56:24 +03:00
}
2020-01-09 18:59:20 +03:00
static inline void mptcp_skb_ext_move ( struct sk_buff * to ,
const struct sk_buff * from )
{
}
2020-11-03 22:05:05 +03:00
static inline void mptcp_skb_ext_copy ( struct sk_buff * to ,
struct sk_buff * from )
{
}
2020-01-09 18:59:20 +03:00
static inline bool mptcp_skb_can_collapse ( const struct sk_buff * to ,
const struct sk_buff * from )
{
return true ;
}
2020-04-24 13:31:50 +03:00
static inline void mptcp_space ( const struct sock * ssk , int * s , int * fs ) { }
2020-03-28 00:48:50 +03:00
static inline void mptcp_seq_show ( struct seq_file * seq ) { }
2020-07-30 22:25:54 +03:00
static inline int mptcp_subflow_init_cookie_req ( struct request_sock * req ,
const struct sock * sk_listener ,
struct sk_buff * skb )
{
return 0 ; /* TCP fallback */
}
2021-04-02 02:19:44 +03:00
2022-12-10 03:28:08 +03:00
static inline struct request_sock * mptcp_subflow_reqsk_alloc ( const struct request_sock_ops * ops ,
struct sock * sk_listener ,
bool attach_listener )
{
return NULL ;
}
2021-04-02 02:19:44 +03:00
static inline __be32 mptcp_reset_option ( const struct sk_buff * skb ) { return htonl ( 0u ) ; }
2020-01-09 18:59:20 +03:00
# endif /* CONFIG_MPTCP */
2020-01-22 03:56:15 +03:00
# if IS_ENABLED(CONFIG_MPTCP_IPV6)
int mptcpv6_init ( void ) ;
2020-01-30 12:45:26 +03:00
void mptcpv6_handle_mapped ( struct sock * sk , bool mapped ) ;
2020-01-22 03:56:15 +03:00
# elif IS_ENABLED(CONFIG_IPV6)
2020-01-30 12:45:26 +03:00
static inline int mptcpv6_init ( void ) { return 0 ; }
static inline void mptcpv6_handle_mapped ( struct sock * sk , bool mapped ) { }
2020-01-22 03:56:15 +03:00
# endif
2022-05-20 02:30:10 +03:00
# if defined(CONFIG_MPTCP) && defined(CONFIG_BPF_SYSCALL)
struct mptcp_sock * bpf_mptcp_sock_from_subflow ( struct sock * sk ) ;
# else
static inline struct mptcp_sock * bpf_mptcp_sock_from_subflow ( struct sock * sk ) { return NULL ; }
# endif
2022-08-02 19:33:24 +03:00
# if !IS_ENABLED(CONFIG_MPTCP)
struct mptcp_sock { } ;
# endif
2020-01-09 18:59:19 +03:00
# endif /* __NET_MPTCP_H */