2020-03-28 00:48:38 +03:00
// SPDX-License-Identifier: GPL-2.0
/* Multipath TCP
*
* Copyright ( c ) 2019 , Intel Corporation .
*/
2020-04-03 12:14:08 +03:00
# define pr_fmt(fmt) "MPTCP: " fmt
2020-03-28 00:48:38 +03:00
# include <linux/kernel.h>
# include <net/tcp.h>
# include <net/mptcp.h>
# include "protocol.h"
2021-08-14 01:15:46 +03:00
# include "mib.h"
2020-03-28 00:48:38 +03:00
/* path manager command handlers */
int mptcp_pm_announce_addr ( struct mptcp_sock * msk ,
2020-09-24 03:29:50 +03:00
const struct mptcp_addr_info * addr ,
2021-03-26 21:26:31 +03:00
bool echo )
2020-03-28 00:48:38 +03:00
{
2020-12-10 02:51:27 +03:00
u8 add_addr = READ_ONCE ( msk - > pm . addr_signal ) ;
2020-11-19 22:45:59 +03:00
2020-03-28 00:48:41 +03:00
pr_debug ( " msk=%p, local_id=%d " , msk , addr - > id ) ;
2021-02-05 02:23:30 +03:00
lockdep_assert_held ( & msk - > pm . lock ) ;
2020-12-10 02:51:26 +03:00
if ( add_addr ) {
pr_warn ( " addr_signal error, add_addr=%d " , add_addr ) ;
return - EINVAL ;
}
2020-03-28 00:48:41 +03:00
msk - > pm . local = * addr ;
2020-11-19 22:45:59 +03:00
add_addr | = BIT ( MPTCP_ADD_ADDR_SIGNAL ) ;
if ( echo )
add_addr | = BIT ( MPTCP_ADD_ADDR_ECHO ) ;
2020-11-19 22:46:00 +03:00
if ( addr - > family = = AF_INET6 )
add_addr | = BIT ( MPTCP_ADD_ADDR_IPV6 ) ;
2021-03-26 21:26:31 +03:00
if ( addr - > port )
2020-12-10 02:51:24 +03:00
add_addr | = BIT ( MPTCP_ADD_ADDR_PORT ) ;
2020-12-10 02:51:27 +03:00
WRITE_ONCE ( msk - > pm . addr_signal , add_addr ) ;
2020-03-28 00:48:41 +03:00
return 0 ;
2020-03-28 00:48:38 +03:00
}
2021-03-13 04:16:12 +03:00
int mptcp_pm_remove_addr ( struct mptcp_sock * msk , const struct mptcp_rm_list * rm_list )
2020-03-28 00:48:38 +03:00
{
2020-12-10 02:51:27 +03:00
u8 rm_addr = READ_ONCE ( msk - > pm . addr_signal ) ;
2020-12-10 02:51:26 +03:00
2021-03-13 04:16:12 +03:00
pr_debug ( " msk=%p, rm_list_nr=%d " , msk , rm_list - > nr ) ;
2020-09-24 03:29:54 +03:00
2020-12-10 02:51:26 +03:00
if ( rm_addr ) {
pr_warn ( " addr_signal error, rm_addr=%d " , rm_addr ) ;
return - EINVAL ;
}
2021-03-13 04:16:12 +03:00
msk - > pm . rm_list_tx = * rm_list ;
2020-12-10 02:51:26 +03:00
rm_addr | = BIT ( MPTCP_RM_ADDR_SIGNAL ) ;
2020-12-10 02:51:27 +03:00
WRITE_ONCE ( msk - > pm . addr_signal , rm_addr ) ;
2021-03-26 21:26:41 +03:00
mptcp_pm_nl_addr_send_ack ( msk ) ;
2020-09-24 03:29:54 +03:00
return 0 ;
2020-03-28 00:48:38 +03:00
}
2021-03-13 04:16:16 +03:00
int mptcp_pm_remove_subflow ( struct mptcp_sock * msk , const struct mptcp_rm_list * rm_list )
2020-03-28 00:48:38 +03:00
{
2021-03-13 04:16:16 +03:00
pr_debug ( " msk=%p, rm_list_nr=%d " , msk , rm_list - > nr ) ;
2020-09-24 03:29:55 +03:00
spin_lock_bh ( & msk - > pm . lock ) ;
2021-03-13 04:16:16 +03:00
mptcp_pm_nl_rm_subflow_received ( msk , rm_list ) ;
2020-09-24 03:29:55 +03:00
spin_unlock_bh ( & msk - > pm . lock ) ;
return 0 ;
2020-03-28 00:48:38 +03:00
}
/* path manager event handlers */
2021-02-13 02:59:58 +03:00
void mptcp_pm_new_connection ( struct mptcp_sock * msk , const struct sock * ssk , int server_side )
2020-03-28 00:48:38 +03:00
{
struct mptcp_pm_data * pm = & msk - > pm ;
pr_debug ( " msk=%p, token=%u side=%d " , msk , msk - > token , server_side ) ;
WRITE_ONCE ( pm - > server_side , server_side ) ;
2021-02-13 03:00:01 +03:00
mptcp_event ( MPTCP_EVENT_CREATED , msk , ssk , GFP_ATOMIC ) ;
2020-03-28 00:48:38 +03:00
}
bool mptcp_pm_allow_new_subflow ( struct mptcp_sock * msk )
{
2020-03-28 00:48:41 +03:00
struct mptcp_pm_data * pm = & msk - > pm ;
2021-02-02 02:09:07 +03:00
unsigned int subflows_max ;
2020-09-24 03:29:53 +03:00
int ret = 0 ;
2020-03-28 00:48:41 +03:00
2021-02-02 02:09:07 +03:00
subflows_max = mptcp_pm_get_subflows_max ( msk ) ;
2020-03-28 00:48:41 +03:00
pr_debug ( " msk=%p subflows=%d max=%d allow=%d " , msk , pm - > subflows ,
2021-02-02 02:09:07 +03:00
subflows_max , READ_ONCE ( pm - > accept_subflow ) ) ;
2020-03-28 00:48:41 +03:00
/* try to avoid acquiring the lock below */
if ( ! READ_ONCE ( pm - > accept_subflow ) )
return false ;
spin_lock_bh ( & pm - > lock ) ;
2020-09-24 03:29:53 +03:00
if ( READ_ONCE ( pm - > accept_subflow ) ) {
2021-02-02 02:09:07 +03:00
ret = pm - > subflows < subflows_max ;
if ( ret & & + + pm - > subflows = = subflows_max )
2020-09-24 03:29:53 +03:00
WRITE_ONCE ( pm - > accept_subflow , false ) ;
}
2020-03-28 00:48:41 +03:00
spin_unlock_bh ( & pm - > lock ) ;
return ret ;
}
/* return true if the new status bit is currently cleared, that is, this event
* can be server , eventually by an already scheduled work
*/
static bool mptcp_pm_schedule_work ( struct mptcp_sock * msk ,
enum mptcp_pm_status new_status )
{
pr_debug ( " msk=%p status=%x new=%lx " , msk , msk - > pm . status ,
BIT ( new_status ) ) ;
if ( msk - > pm . status & BIT ( new_status ) )
return false ;
msk - > pm . status | = BIT ( new_status ) ;
2020-11-16 12:48:05 +03:00
mptcp_schedule_work ( ( struct sock * ) msk ) ;
2020-03-28 00:48:41 +03:00
return true ;
2020-03-28 00:48:38 +03:00
}
2021-02-13 02:59:58 +03:00
void mptcp_pm_fully_established ( struct mptcp_sock * msk , const struct sock * ssk , gfp_t gfp )
2020-03-28 00:48:38 +03:00
{
2020-03-28 00:48:41 +03:00
struct mptcp_pm_data * pm = & msk - > pm ;
2021-02-13 03:00:01 +03:00
bool announce = false ;
2020-03-28 00:48:41 +03:00
2020-03-28 00:48:38 +03:00
pr_debug ( " msk=%p " , msk ) ;
2020-03-28 00:48:41 +03:00
spin_lock_bh ( & pm - > lock ) ;
2020-12-09 14:03:29 +03:00
/* mptcp_pm_fully_established() can be invoked by multiple
* racing paths - accept ( ) and check_fully_established ( )
* be sure to serve this event only once .
*/
if ( READ_ONCE ( pm - > work_pending ) & &
! ( msk - > pm . status & BIT ( MPTCP_PM_ALREADY_ESTABLISHED ) ) )
2020-03-28 00:48:41 +03:00
mptcp_pm_schedule_work ( msk , MPTCP_PM_ESTABLISHED ) ;
2021-02-13 03:00:01 +03:00
if ( ( msk - > pm . status & BIT ( MPTCP_PM_ALREADY_ESTABLISHED ) ) = = 0 )
announce = true ;
msk - > pm . status | = BIT ( MPTCP_PM_ALREADY_ESTABLISHED ) ;
2020-03-28 00:48:41 +03:00
spin_unlock_bh ( & pm - > lock ) ;
2021-02-13 03:00:01 +03:00
if ( announce )
mptcp_event ( MPTCP_EVENT_ESTABLISHED , msk , ssk , gfp ) ;
2020-03-28 00:48:38 +03:00
}
void mptcp_pm_connection_closed ( struct mptcp_sock * msk )
{
pr_debug ( " msk=%p " , msk ) ;
}
2021-03-26 21:26:33 +03:00
void mptcp_pm_subflow_established ( struct mptcp_sock * msk )
2020-03-28 00:48:38 +03:00
{
2020-03-28 00:48:41 +03:00
struct mptcp_pm_data * pm = & msk - > pm ;
2020-03-28 00:48:38 +03:00
pr_debug ( " msk=%p " , msk ) ;
2020-03-28 00:48:41 +03:00
if ( ! READ_ONCE ( pm - > work_pending ) )
return ;
spin_lock_bh ( & pm - > lock ) ;
if ( READ_ONCE ( pm - > work_pending ) )
mptcp_pm_schedule_work ( msk , MPTCP_PM_SUBFLOW_ESTABLISHED ) ;
spin_unlock_bh ( & pm - > lock ) ;
2020-03-28 00:48:38 +03:00
}
void mptcp_pm_subflow_closed ( struct mptcp_sock * msk , u8 id )
{
pr_debug ( " msk=%p " , msk ) ;
}
void mptcp_pm_add_addr_received ( struct mptcp_sock * msk ,
const struct mptcp_addr_info * addr )
{
2020-03-28 00:48:41 +03:00
struct mptcp_pm_data * pm = & msk - > pm ;
pr_debug ( " msk=%p remote_id=%d accept=%d " , msk , addr - > id ,
READ_ONCE ( pm - > accept_addr ) ) ;
2021-02-13 03:00:01 +03:00
mptcp_event_addr_announced ( msk , addr ) ;
2020-03-28 00:48:41 +03:00
spin_lock_bh ( & pm - > lock ) ;
2020-11-19 22:46:00 +03:00
if ( ! READ_ONCE ( pm - > accept_addr ) ) {
2021-03-26 21:26:31 +03:00
mptcp_pm_announce_addr ( msk , addr , true ) ;
2020-11-19 22:46:00 +03:00
mptcp_pm_add_addr_send_ack ( msk ) ;
} else if ( mptcp_pm_schedule_work ( msk , MPTCP_PM_ADD_ADDR_RECEIVED ) ) {
2020-03-28 00:48:41 +03:00
pm - > remote = * addr ;
2020-11-19 22:46:00 +03:00
}
2020-03-28 00:48:41 +03:00
spin_unlock_bh ( & pm - > lock ) ;
2020-11-19 22:46:00 +03:00
}
2021-03-26 21:26:38 +03:00
void mptcp_pm_add_addr_echoed ( struct mptcp_sock * msk ,
struct mptcp_addr_info * addr )
{
struct mptcp_pm_data * pm = & msk - > pm ;
pr_debug ( " msk=%p " , msk ) ;
spin_lock_bh ( & pm - > lock ) ;
if ( mptcp_lookup_anno_list_by_saddr ( msk , addr ) & & READ_ONCE ( pm - > work_pending ) )
mptcp_pm_schedule_work ( msk , MPTCP_PM_SUBFLOW_ESTABLISHED ) ;
spin_unlock_bh ( & pm - > lock ) ;
}
2020-11-19 22:46:00 +03:00
void mptcp_pm_add_addr_send_ack ( struct mptcp_sock * msk )
{
2021-02-02 02:09:09 +03:00
if ( ! mptcp_pm_should_add_signal ( msk ) )
2020-11-19 22:46:00 +03:00
return ;
mptcp_pm_schedule_work ( msk , MPTCP_PM_ADD_ADDR_SEND_ACK ) ;
2020-03-28 00:48:38 +03:00
}
2021-03-13 04:16:13 +03:00
void mptcp_pm_rm_addr_received ( struct mptcp_sock * msk ,
const struct mptcp_rm_list * rm_list )
2020-09-24 03:29:49 +03:00
{
struct mptcp_pm_data * pm = & msk - > pm ;
2021-03-13 04:16:13 +03:00
u8 i ;
2020-09-24 03:29:49 +03:00
2021-03-13 04:16:13 +03:00
pr_debug ( " msk=%p remote_ids_nr=%d " , msk , rm_list - > nr ) ;
2020-09-24 03:29:49 +03:00
2021-03-13 04:16:13 +03:00
for ( i = 0 ; i < rm_list - > nr ; i + + )
mptcp_event_addr_removed ( msk , rm_list - > ids [ i ] ) ;
2021-02-13 03:00:01 +03:00
2020-09-24 03:29:49 +03:00
spin_lock_bh ( & pm - > lock ) ;
mptcp_pm_schedule_work ( msk , MPTCP_PM_RM_ADDR_RECEIVED ) ;
2021-03-13 04:16:14 +03:00
pm - > rm_list_rx = * rm_list ;
2020-09-24 03:29:49 +03:00
spin_unlock_bh ( & pm - > lock ) ;
}
2021-01-09 03:47:58 +03:00
void mptcp_pm_mp_prio_received ( struct sock * sk , u8 bkup )
{
struct mptcp_subflow_context * subflow = mptcp_subflow_ctx ( sk ) ;
pr_debug ( " subflow->backup=%d, bkup=%d \n " , subflow - > backup , bkup ) ;
subflow - > backup = bkup ;
2021-02-13 03:00:01 +03:00
mptcp_event ( MPTCP_EVENT_SUB_PRIORITY , mptcp_sk ( subflow - > conn ) , sk , GFP_ATOMIC ) ;
2021-01-09 03:47:58 +03:00
}
2020-03-28 00:48:38 +03:00
/* path manager helpers */
2021-08-24 04:05:39 +03:00
bool mptcp_pm_add_addr_signal ( struct mptcp_sock * msk , struct sk_buff * skb ,
unsigned int opt_size , unsigned int remaining ,
struct mptcp_addr_info * saddr , bool * echo ,
bool * port , bool * drop_other_suboptions )
2020-03-28 00:48:38 +03:00
{
2020-03-28 00:48:41 +03:00
int ret = false ;
spin_lock_bh ( & msk - > pm . lock ) ;
/* double check after the lock is acquired */
2020-09-24 03:29:47 +03:00
if ( ! mptcp_pm_should_add_signal ( msk ) )
2020-03-28 00:48:41 +03:00
goto out_unlock ;
2021-08-24 04:05:39 +03:00
/* always drop every other options for pure ack ADD_ADDR; this is a
* plain dup - ack from TCP perspective . The other MPTCP - relevant info ,
* if any , will be carried by the ' original ' TCP ack
*/
if ( skb & & skb_is_tcp_pure_ack ( skb ) ) {
remaining + = opt_size ;
* drop_other_suboptions = true ;
}
2020-11-19 22:45:59 +03:00
* echo = mptcp_pm_should_add_signal_echo ( msk ) ;
2020-12-10 02:51:22 +03:00
* port = mptcp_pm_should_add_signal_port ( msk ) ;
2020-10-03 18:36:56 +03:00
2020-12-10 02:51:22 +03:00
if ( remaining < mptcp_add_addr_len ( msk - > pm . local . family , * echo , * port ) )
2020-03-28 00:48:41 +03:00
goto out_unlock ;
* saddr = msk - > pm . local ;
2020-12-10 02:51:27 +03:00
WRITE_ONCE ( msk - > pm . addr_signal , 0 ) ;
2020-03-28 00:48:41 +03:00
ret = true ;
out_unlock :
spin_unlock_bh ( & msk - > pm . lock ) ;
return ret ;
2020-03-28 00:48:38 +03:00
}
2020-09-24 03:29:48 +03:00
bool mptcp_pm_rm_addr_signal ( struct mptcp_sock * msk , unsigned int remaining ,
2021-03-13 04:16:11 +03:00
struct mptcp_rm_list * rm_list )
2020-09-24 03:29:48 +03:00
{
2021-03-13 04:16:12 +03:00
int ret = false , len ;
2020-09-24 03:29:48 +03:00
spin_lock_bh ( & msk - > pm . lock ) ;
/* double check after the lock is acquired */
if ( ! mptcp_pm_should_rm_signal ( msk ) )
goto out_unlock ;
2021-03-13 04:16:12 +03:00
len = mptcp_rm_addr_len ( & msk - > pm . rm_list_tx ) ;
if ( len < 0 ) {
WRITE_ONCE ( msk - > pm . addr_signal , 0 ) ;
goto out_unlock ;
}
if ( remaining < len )
2020-09-24 03:29:48 +03:00
goto out_unlock ;
2021-03-13 04:16:12 +03:00
* rm_list = msk - > pm . rm_list_tx ;
2020-12-10 02:51:27 +03:00
WRITE_ONCE ( msk - > pm . addr_signal , 0 ) ;
2020-09-24 03:29:48 +03:00
ret = true ;
out_unlock :
spin_unlock_bh ( & msk - > pm . lock ) ;
return ret ;
}
2020-03-28 00:48:38 +03:00
int mptcp_pm_get_local_id ( struct mptcp_sock * msk , struct sock_common * skc )
{
2020-03-28 00:48:51 +03:00
return mptcp_pm_nl_get_local_id ( msk , skc ) ;
2020-03-28 00:48:38 +03:00
}
2021-08-14 01:15:42 +03:00
void mptcp_pm_subflow_chk_stale ( const struct mptcp_sock * msk , struct sock * ssk )
{
struct mptcp_subflow_context * subflow = mptcp_subflow_ctx ( ssk ) ;
u32 rcv_tstamp = READ_ONCE ( tcp_sk ( ssk ) - > rcv_tstamp ) ;
/* keep track of rtx periods with no progress */
if ( ! subflow - > stale_count ) {
subflow - > stale_rcv_tstamp = rcv_tstamp ;
subflow - > stale_count + + ;
} else if ( subflow - > stale_rcv_tstamp = = rcv_tstamp ) {
if ( subflow - > stale_count < U8_MAX )
subflow - > stale_count + + ;
2021-08-14 01:15:45 +03:00
mptcp_pm_nl_subflow_chk_stale ( msk , ssk ) ;
2021-08-14 01:15:42 +03:00
} else {
subflow - > stale_count = 0 ;
2021-08-14 01:15:45 +03:00
mptcp_subflow_set_active ( subflow ) ;
2021-08-14 01:15:42 +03:00
}
}
2020-03-28 00:48:38 +03:00
void mptcp_pm_data_init ( struct mptcp_sock * msk )
{
msk - > pm . add_addr_signaled = 0 ;
msk - > pm . add_addr_accepted = 0 ;
msk - > pm . local_addr_used = 0 ;
msk - > pm . subflows = 0 ;
2021-03-13 04:16:12 +03:00
msk - > pm . rm_list_tx . nr = 0 ;
2021-03-13 04:16:14 +03:00
msk - > pm . rm_list_rx . nr = 0 ;
2020-03-28 00:48:38 +03:00
WRITE_ONCE ( msk - > pm . work_pending , false ) ;
2020-12-10 02:51:27 +03:00
WRITE_ONCE ( msk - > pm . addr_signal , 0 ) ;
2020-03-28 00:48:38 +03:00
WRITE_ONCE ( msk - > pm . accept_addr , false ) ;
WRITE_ONCE ( msk - > pm . accept_subflow , false ) ;
2021-06-22 22:25:20 +03:00
WRITE_ONCE ( msk - > pm . remote_deny_join_id0 , false ) ;
2020-03-28 00:48:38 +03:00
msk - > pm . status = 0 ;
spin_lock_init ( & msk - > pm . lock ) ;
2020-09-24 03:29:54 +03:00
INIT_LIST_HEAD ( & msk - > pm . anno_list ) ;
2020-03-28 00:48:51 +03:00
mptcp_pm_nl_data_init ( msk ) ;
2020-03-28 00:48:38 +03:00
}
2020-06-26 20:29:59 +03:00
void __init mptcp_pm_init ( void )
2020-03-28 00:48:38 +03:00
{
2020-03-28 00:48:51 +03:00
mptcp_pm_nl_init ( ) ;
2020-03-28 00:48:38 +03:00
}