2020-03-27 14:48:38 -07:00
// SPDX-License-Identifier: GPL-2.0
/* Multipath TCP
*
* Copyright ( c ) 2019 , Intel Corporation .
*/
2020-04-03 17:14:08 +08:00
# define pr_fmt(fmt) "MPTCP: " fmt
2020-03-27 14:48:38 -07:00
# include <linux/kernel.h>
# include <net/tcp.h>
# include <net/mptcp.h>
# include "protocol.h"
/* path manager command handlers */
int mptcp_pm_announce_addr ( struct mptcp_sock * msk ,
2020-09-24 08:29:50 +08:00
const struct mptcp_addr_info * addr ,
2020-12-09 15:51:24 -08:00
bool echo , bool port )
2020-03-27 14:48:38 -07:00
{
2020-12-09 15:51:27 -08:00
u8 add_addr = READ_ONCE ( msk - > pm . addr_signal ) ;
2020-11-19 11:45:59 -08:00
2020-03-27 14:48:41 -07:00
pr_debug ( " msk=%p, local_id=%d " , msk , addr - > id ) ;
2021-02-04 15:23:30 -08:00
lockdep_assert_held ( & msk - > pm . lock ) ;
2020-12-09 15:51:26 -08:00
if ( add_addr ) {
pr_warn ( " addr_signal error, add_addr=%d " , add_addr ) ;
return - EINVAL ;
}
2020-03-27 14:48:41 -07:00
msk - > pm . local = * addr ;
2020-11-19 11:45:59 -08:00
add_addr | = BIT ( MPTCP_ADD_ADDR_SIGNAL ) ;
if ( echo )
add_addr | = BIT ( MPTCP_ADD_ADDR_ECHO ) ;
2020-11-19 11:46:00 -08:00
if ( addr - > family = = AF_INET6 )
add_addr | = BIT ( MPTCP_ADD_ADDR_IPV6 ) ;
2020-12-09 15:51:24 -08:00
if ( port )
add_addr | = BIT ( MPTCP_ADD_ADDR_PORT ) ;
2020-12-09 15:51:27 -08:00
WRITE_ONCE ( msk - > pm . addr_signal , add_addr ) ;
2020-03-27 14:48:41 -07:00
return 0 ;
2020-03-27 14:48:38 -07:00
}
2021-03-12 17:16:12 -08:00
int mptcp_pm_remove_addr ( struct mptcp_sock * msk , const struct mptcp_rm_list * rm_list )
2020-03-27 14:48:38 -07:00
{
2020-12-09 15:51:27 -08:00
u8 rm_addr = READ_ONCE ( msk - > pm . addr_signal ) ;
2020-12-09 15:51:26 -08:00
2021-03-12 17:16:12 -08:00
pr_debug ( " msk=%p, rm_list_nr=%d " , msk , rm_list - > nr ) ;
2020-09-24 08:29:54 +08:00
2020-12-09 15:51:26 -08:00
if ( rm_addr ) {
pr_warn ( " addr_signal error, rm_addr=%d " , rm_addr ) ;
return - EINVAL ;
}
2021-03-12 17:16:12 -08:00
msk - > pm . rm_list_tx = * rm_list ;
2020-12-09 15:51:26 -08:00
rm_addr | = BIT ( MPTCP_RM_ADDR_SIGNAL ) ;
2020-12-09 15:51:27 -08:00
WRITE_ONCE ( msk - > pm . addr_signal , rm_addr ) ;
2020-09-24 08:29:54 +08:00
return 0 ;
2020-03-27 14:48:38 -07:00
}
2020-09-24 08:29:55 +08:00
int mptcp_pm_remove_subflow ( struct mptcp_sock * msk , u8 local_id )
2020-03-27 14:48:38 -07:00
{
2020-09-24 08:29:55 +08:00
pr_debug ( " msk=%p, local_id=%d " , msk , local_id ) ;
spin_lock_bh ( & msk - > pm . lock ) ;
mptcp_pm_nl_rm_subflow_received ( msk , local_id ) ;
spin_unlock_bh ( & msk - > pm . lock ) ;
return 0 ;
2020-03-27 14:48:38 -07:00
}
/* path manager event handlers */
2021-02-12 15:59:58 -08:00
void mptcp_pm_new_connection ( struct mptcp_sock * msk , const struct sock * ssk , int server_side )
2020-03-27 14:48:38 -07:00
{
struct mptcp_pm_data * pm = & msk - > pm ;
pr_debug ( " msk=%p, token=%u side=%d " , msk , msk - > token , server_side ) ;
WRITE_ONCE ( pm - > server_side , server_side ) ;
2021-02-12 16:00:01 -08:00
mptcp_event ( MPTCP_EVENT_CREATED , msk , ssk , GFP_ATOMIC ) ;
2020-03-27 14:48:38 -07:00
}
bool mptcp_pm_allow_new_subflow ( struct mptcp_sock * msk )
{
2020-03-27 14:48:41 -07:00
struct mptcp_pm_data * pm = & msk - > pm ;
2021-02-01 15:09:07 -08:00
unsigned int subflows_max ;
2020-09-24 08:29:53 +08:00
int ret = 0 ;
2020-03-27 14:48:41 -07:00
2021-02-01 15:09:07 -08:00
subflows_max = mptcp_pm_get_subflows_max ( msk ) ;
2020-03-27 14:48:41 -07:00
pr_debug ( " msk=%p subflows=%d max=%d allow=%d " , msk , pm - > subflows ,
2021-02-01 15:09:07 -08:00
subflows_max , READ_ONCE ( pm - > accept_subflow ) ) ;
2020-03-27 14:48:41 -07:00
/* try to avoid acquiring the lock below */
if ( ! READ_ONCE ( pm - > accept_subflow ) )
return false ;
spin_lock_bh ( & pm - > lock ) ;
2020-09-24 08:29:53 +08:00
if ( READ_ONCE ( pm - > accept_subflow ) ) {
2021-02-01 15:09:07 -08:00
ret = pm - > subflows < subflows_max ;
if ( ret & & + + pm - > subflows = = subflows_max )
2020-09-24 08:29:53 +08:00
WRITE_ONCE ( pm - > accept_subflow , false ) ;
}
2020-03-27 14:48:41 -07:00
spin_unlock_bh ( & pm - > lock ) ;
return ret ;
}
/* return true if the new status bit is currently cleared, that is, this event
* can be server , eventually by an already scheduled work
*/
static bool mptcp_pm_schedule_work ( struct mptcp_sock * msk ,
enum mptcp_pm_status new_status )
{
pr_debug ( " msk=%p status=%x new=%lx " , msk , msk - > pm . status ,
BIT ( new_status ) ) ;
if ( msk - > pm . status & BIT ( new_status ) )
return false ;
msk - > pm . status | = BIT ( new_status ) ;
2020-11-16 10:48:05 +01:00
mptcp_schedule_work ( ( struct sock * ) msk ) ;
2020-03-27 14:48:41 -07:00
return true ;
2020-03-27 14:48:38 -07:00
}
2021-02-12 15:59:58 -08:00
void mptcp_pm_fully_established ( struct mptcp_sock * msk , const struct sock * ssk , gfp_t gfp )
2020-03-27 14:48:38 -07:00
{
2020-03-27 14:48:41 -07:00
struct mptcp_pm_data * pm = & msk - > pm ;
2021-02-12 16:00:01 -08:00
bool announce = false ;
2020-03-27 14:48:41 -07:00
2020-03-27 14:48:38 -07:00
pr_debug ( " msk=%p " , msk ) ;
2020-03-27 14:48:41 -07:00
spin_lock_bh ( & pm - > lock ) ;
2020-12-09 12:03:29 +01:00
/* mptcp_pm_fully_established() can be invoked by multiple
* racing paths - accept ( ) and check_fully_established ( )
* be sure to serve this event only once .
*/
if ( READ_ONCE ( pm - > work_pending ) & &
! ( msk - > pm . status & BIT ( MPTCP_PM_ALREADY_ESTABLISHED ) ) )
2020-03-27 14:48:41 -07:00
mptcp_pm_schedule_work ( msk , MPTCP_PM_ESTABLISHED ) ;
2021-02-12 16:00:01 -08:00
if ( ( msk - > pm . status & BIT ( MPTCP_PM_ALREADY_ESTABLISHED ) ) = = 0 )
announce = true ;
msk - > pm . status | = BIT ( MPTCP_PM_ALREADY_ESTABLISHED ) ;
2020-03-27 14:48:41 -07:00
spin_unlock_bh ( & pm - > lock ) ;
2021-02-12 16:00:01 -08:00
if ( announce )
mptcp_event ( MPTCP_EVENT_ESTABLISHED , msk , ssk , gfp ) ;
2020-03-27 14:48:38 -07:00
}
void mptcp_pm_connection_closed ( struct mptcp_sock * msk )
{
pr_debug ( " msk=%p " , msk ) ;
}
void mptcp_pm_subflow_established ( struct mptcp_sock * msk ,
struct mptcp_subflow_context * subflow )
{
2020-03-27 14:48:41 -07:00
struct mptcp_pm_data * pm = & msk - > pm ;
2020-03-27 14:48:38 -07:00
pr_debug ( " msk=%p " , msk ) ;
2020-03-27 14:48:41 -07:00
if ( ! READ_ONCE ( pm - > work_pending ) )
return ;
spin_lock_bh ( & pm - > lock ) ;
if ( READ_ONCE ( pm - > work_pending ) )
mptcp_pm_schedule_work ( msk , MPTCP_PM_SUBFLOW_ESTABLISHED ) ;
spin_unlock_bh ( & pm - > lock ) ;
2020-03-27 14:48:38 -07:00
}
void mptcp_pm_subflow_closed ( struct mptcp_sock * msk , u8 id )
{
pr_debug ( " msk=%p " , msk ) ;
}
void mptcp_pm_add_addr_received ( struct mptcp_sock * msk ,
const struct mptcp_addr_info * addr )
{
2020-03-27 14:48:41 -07:00
struct mptcp_pm_data * pm = & msk - > pm ;
pr_debug ( " msk=%p remote_id=%d accept=%d " , msk , addr - > id ,
READ_ONCE ( pm - > accept_addr ) ) ;
2021-02-12 16:00:01 -08:00
mptcp_event_addr_announced ( msk , addr ) ;
2020-03-27 14:48:41 -07:00
spin_lock_bh ( & pm - > lock ) ;
2020-11-19 11:46:00 -08:00
if ( ! READ_ONCE ( pm - > accept_addr ) ) {
2020-12-09 15:51:24 -08:00
mptcp_pm_announce_addr ( msk , addr , true , addr - > port ) ;
2020-11-19 11:46:00 -08:00
mptcp_pm_add_addr_send_ack ( msk ) ;
} else if ( mptcp_pm_schedule_work ( msk , MPTCP_PM_ADD_ADDR_RECEIVED ) ) {
2020-03-27 14:48:41 -07:00
pm - > remote = * addr ;
2020-11-19 11:46:00 -08:00
}
2020-03-27 14:48:41 -07:00
spin_unlock_bh ( & pm - > lock ) ;
2020-11-19 11:46:00 -08:00
}
void mptcp_pm_add_addr_send_ack ( struct mptcp_sock * msk )
{
2021-02-01 15:09:09 -08:00
if ( ! mptcp_pm_should_add_signal ( msk ) )
2020-11-19 11:46:00 -08:00
return ;
mptcp_pm_schedule_work ( msk , MPTCP_PM_ADD_ADDR_SEND_ACK ) ;
2020-03-27 14:48:38 -07:00
}
2021-03-12 17:16:13 -08:00
void mptcp_pm_rm_addr_received ( struct mptcp_sock * msk ,
const struct mptcp_rm_list * rm_list )
2020-09-24 08:29:49 +08:00
{
struct mptcp_pm_data * pm = & msk - > pm ;
2021-03-12 17:16:13 -08:00
u8 i ;
2020-09-24 08:29:49 +08:00
2021-03-12 17:16:13 -08:00
pr_debug ( " msk=%p remote_ids_nr=%d " , msk , rm_list - > nr ) ;
2020-09-24 08:29:49 +08:00
2021-03-12 17:16:13 -08:00
for ( i = 0 ; i < rm_list - > nr ; i + + )
mptcp_event_addr_removed ( msk , rm_list - > ids [ i ] ) ;
2021-02-12 16:00:01 -08:00
2020-09-24 08:29:49 +08:00
spin_lock_bh ( & pm - > lock ) ;
mptcp_pm_schedule_work ( msk , MPTCP_PM_RM_ADDR_RECEIVED ) ;
2021-03-12 17:16:14 -08:00
pm - > rm_list_rx = * rm_list ;
2020-09-24 08:29:49 +08:00
spin_unlock_bh ( & pm - > lock ) ;
}
2021-01-08 16:47:58 -08:00
void mptcp_pm_mp_prio_received ( struct sock * sk , u8 bkup )
{
struct mptcp_subflow_context * subflow = mptcp_subflow_ctx ( sk ) ;
pr_debug ( " subflow->backup=%d, bkup=%d \n " , subflow - > backup , bkup ) ;
subflow - > backup = bkup ;
2021-02-12 16:00:01 -08:00
mptcp_event ( MPTCP_EVENT_SUB_PRIORITY , mptcp_sk ( subflow - > conn ) , sk , GFP_ATOMIC ) ;
2021-01-08 16:47:58 -08:00
}
2020-03-27 14:48:38 -07:00
/* path manager helpers */
2020-09-24 08:29:47 +08:00
bool mptcp_pm_add_addr_signal ( struct mptcp_sock * msk , unsigned int remaining ,
2020-12-09 15:51:22 -08:00
struct mptcp_addr_info * saddr , bool * echo , bool * port )
2020-03-27 14:48:38 -07:00
{
2020-03-27 14:48:41 -07:00
int ret = false ;
spin_lock_bh ( & msk - > pm . lock ) ;
/* double check after the lock is acquired */
2020-09-24 08:29:47 +08:00
if ( ! mptcp_pm_should_add_signal ( msk ) )
2020-03-27 14:48:41 -07:00
goto out_unlock ;
2020-11-19 11:45:59 -08:00
* echo = mptcp_pm_should_add_signal_echo ( msk ) ;
2020-12-09 15:51:22 -08:00
* port = mptcp_pm_should_add_signal_port ( msk ) ;
2020-10-03 17:36:56 +02:00
2020-12-09 15:51:22 -08:00
if ( remaining < mptcp_add_addr_len ( msk - > pm . local . family , * echo , * port ) )
2020-03-27 14:48:41 -07:00
goto out_unlock ;
* saddr = msk - > pm . local ;
2020-12-09 15:51:27 -08:00
WRITE_ONCE ( msk - > pm . addr_signal , 0 ) ;
2020-03-27 14:48:41 -07:00
ret = true ;
out_unlock :
spin_unlock_bh ( & msk - > pm . lock ) ;
return ret ;
2020-03-27 14:48:38 -07:00
}
2020-09-24 08:29:48 +08:00
bool mptcp_pm_rm_addr_signal ( struct mptcp_sock * msk , unsigned int remaining ,
2021-03-12 17:16:11 -08:00
struct mptcp_rm_list * rm_list )
2020-09-24 08:29:48 +08:00
{
2021-03-12 17:16:12 -08:00
int ret = false , len ;
2020-09-24 08:29:48 +08:00
spin_lock_bh ( & msk - > pm . lock ) ;
/* double check after the lock is acquired */
if ( ! mptcp_pm_should_rm_signal ( msk ) )
goto out_unlock ;
2021-03-12 17:16:12 -08:00
len = mptcp_rm_addr_len ( & msk - > pm . rm_list_tx ) ;
if ( len < 0 ) {
WRITE_ONCE ( msk - > pm . addr_signal , 0 ) ;
goto out_unlock ;
}
if ( remaining < len )
2020-09-24 08:29:48 +08:00
goto out_unlock ;
2021-03-12 17:16:12 -08:00
* rm_list = msk - > pm . rm_list_tx ;
2020-12-09 15:51:27 -08:00
WRITE_ONCE ( msk - > pm . addr_signal , 0 ) ;
2020-09-24 08:29:48 +08:00
ret = true ;
out_unlock :
spin_unlock_bh ( & msk - > pm . lock ) ;
return ret ;
}
2020-03-27 14:48:38 -07:00
int mptcp_pm_get_local_id ( struct mptcp_sock * msk , struct sock_common * skc )
{
2020-03-27 14:48:51 -07:00
return mptcp_pm_nl_get_local_id ( msk , skc ) ;
2020-03-27 14:48:38 -07:00
}
void mptcp_pm_data_init ( struct mptcp_sock * msk )
{
msk - > pm . add_addr_signaled = 0 ;
msk - > pm . add_addr_accepted = 0 ;
msk - > pm . local_addr_used = 0 ;
msk - > pm . subflows = 0 ;
2021-03-12 17:16:12 -08:00
msk - > pm . rm_list_tx . nr = 0 ;
2021-03-12 17:16:14 -08:00
msk - > pm . rm_list_rx . nr = 0 ;
2020-03-27 14:48:38 -07:00
WRITE_ONCE ( msk - > pm . work_pending , false ) ;
2020-12-09 15:51:27 -08:00
WRITE_ONCE ( msk - > pm . addr_signal , 0 ) ;
2020-03-27 14:48:38 -07:00
WRITE_ONCE ( msk - > pm . accept_addr , false ) ;
WRITE_ONCE ( msk - > pm . accept_subflow , false ) ;
msk - > pm . status = 0 ;
spin_lock_init ( & msk - > pm . lock ) ;
2020-09-24 08:29:54 +08:00
INIT_LIST_HEAD ( & msk - > pm . anno_list ) ;
2020-03-27 14:48:51 -07:00
mptcp_pm_nl_data_init ( msk ) ;
2020-03-27 14:48:38 -07:00
}
2020-06-26 19:29:59 +02:00
void __init mptcp_pm_init ( void )
2020-03-27 14:48:38 -07:00
{
2020-03-27 14:48:51 -07:00
mptcp_pm_nl_init ( ) ;
2020-03-27 14:48:38 -07:00
}