2020-03-28 00:48:38 +03:00
// SPDX-License-Identifier: GPL-2.0
/* Multipath TCP
*
* Copyright ( c ) 2019 , Intel Corporation .
*/
2020-04-03 12:14:08 +03:00
# define pr_fmt(fmt) "MPTCP: " fmt
2020-03-28 00:48:38 +03:00
# include <linux/kernel.h>
# include <net/tcp.h>
# include <net/mptcp.h>
# include "protocol.h"
2021-08-14 01:15:46 +03:00
# include "mib.h"
2020-03-28 00:48:38 +03:00
/* path manager command handlers */
int mptcp_pm_announce_addr ( struct mptcp_sock * msk ,
2020-09-24 03:29:50 +03:00
const struct mptcp_addr_info * addr ,
2021-03-26 21:26:31 +03:00
bool echo )
2020-03-28 00:48:38 +03:00
{
2020-12-10 02:51:27 +03:00
u8 add_addr = READ_ONCE ( msk - > pm . addr_signal ) ;
2020-11-19 22:45:59 +03:00
2021-08-24 04:05:40 +03:00
pr_debug ( " msk=%p, local_id=%d, echo=%d " , msk , addr - > id , echo ) ;
2020-03-28 00:48:41 +03:00
2021-02-05 02:23:30 +03:00
lockdep_assert_held ( & msk - > pm . lock ) ;
2021-08-24 04:05:40 +03:00
if ( add_addr &
( echo ? BIT ( MPTCP_ADD_ADDR_ECHO ) : BIT ( MPTCP_ADD_ADDR_SIGNAL ) ) ) {
pr_warn ( " addr_signal error, add_addr=%d, echo=%d " , add_addr , echo ) ;
2020-12-10 02:51:26 +03:00
return - EINVAL ;
}
2021-08-24 04:05:40 +03:00
if ( echo ) {
msk - > pm . remote = * addr ;
2020-11-19 22:45:59 +03:00
add_addr | = BIT ( MPTCP_ADD_ADDR_ECHO ) ;
2021-08-24 04:05:40 +03:00
} else {
msk - > pm . local = * addr ;
add_addr | = BIT ( MPTCP_ADD_ADDR_SIGNAL ) ;
}
2020-12-10 02:51:27 +03:00
WRITE_ONCE ( msk - > pm . addr_signal , add_addr ) ;
2020-03-28 00:48:41 +03:00
return 0 ;
2020-03-28 00:48:38 +03:00
}
2021-03-13 04:16:12 +03:00
int mptcp_pm_remove_addr ( struct mptcp_sock * msk , const struct mptcp_rm_list * rm_list )
2020-03-28 00:48:38 +03:00
{
2020-12-10 02:51:27 +03:00
u8 rm_addr = READ_ONCE ( msk - > pm . addr_signal ) ;
2020-12-10 02:51:26 +03:00
2021-03-13 04:16:12 +03:00
pr_debug ( " msk=%p, rm_list_nr=%d " , msk , rm_list - > nr ) ;
2020-09-24 03:29:54 +03:00
2020-12-10 02:51:26 +03:00
if ( rm_addr ) {
pr_warn ( " addr_signal error, rm_addr=%d " , rm_addr ) ;
return - EINVAL ;
}
2021-03-13 04:16:12 +03:00
msk - > pm . rm_list_tx = * rm_list ;
2020-12-10 02:51:26 +03:00
rm_addr | = BIT ( MPTCP_RM_ADDR_SIGNAL ) ;
2020-12-10 02:51:27 +03:00
WRITE_ONCE ( msk - > pm . addr_signal , rm_addr ) ;
2021-03-26 21:26:41 +03:00
mptcp_pm_nl_addr_send_ack ( msk ) ;
2020-09-24 03:29:54 +03:00
return 0 ;
2020-03-28 00:48:38 +03:00
}
2021-03-13 04:16:16 +03:00
int mptcp_pm_remove_subflow ( struct mptcp_sock * msk , const struct mptcp_rm_list * rm_list )
2020-03-28 00:48:38 +03:00
{
2021-03-13 04:16:16 +03:00
pr_debug ( " msk=%p, rm_list_nr=%d " , msk , rm_list - > nr ) ;
2020-09-24 03:29:55 +03:00
spin_lock_bh ( & msk - > pm . lock ) ;
2021-03-13 04:16:16 +03:00
mptcp_pm_nl_rm_subflow_received ( msk , rm_list ) ;
2020-09-24 03:29:55 +03:00
spin_unlock_bh ( & msk - > pm . lock ) ;
return 0 ;
2020-03-28 00:48:38 +03:00
}
/* path manager event handlers */
2021-02-13 02:59:58 +03:00
void mptcp_pm_new_connection ( struct mptcp_sock * msk , const struct sock * ssk , int server_side )
2020-03-28 00:48:38 +03:00
{
struct mptcp_pm_data * pm = & msk - > pm ;
pr_debug ( " msk=%p, token=%u side=%d " , msk , msk - > token , server_side ) ;
WRITE_ONCE ( pm - > server_side , server_side ) ;
2021-02-13 03:00:01 +03:00
mptcp_event ( MPTCP_EVENT_CREATED , msk , ssk , GFP_ATOMIC ) ;
2020-03-28 00:48:38 +03:00
}
bool mptcp_pm_allow_new_subflow ( struct mptcp_sock * msk )
{
2020-03-28 00:48:41 +03:00
struct mptcp_pm_data * pm = & msk - > pm ;
2021-02-02 02:09:07 +03:00
unsigned int subflows_max ;
2020-09-24 03:29:53 +03:00
int ret = 0 ;
2020-03-28 00:48:41 +03:00
2022-05-02 23:52:31 +03:00
if ( mptcp_pm_is_userspace ( msk ) )
return mptcp_userspace_pm_active ( msk ) ;
2021-02-02 02:09:07 +03:00
subflows_max = mptcp_pm_get_subflows_max ( msk ) ;
2020-03-28 00:48:41 +03:00
pr_debug ( " msk=%p subflows=%d max=%d allow=%d " , msk , pm - > subflows ,
2021-02-02 02:09:07 +03:00
subflows_max , READ_ONCE ( pm - > accept_subflow ) ) ;
2020-03-28 00:48:41 +03:00
/* try to avoid acquiring the lock below */
if ( ! READ_ONCE ( pm - > accept_subflow ) )
return false ;
spin_lock_bh ( & pm - > lock ) ;
2020-09-24 03:29:53 +03:00
if ( READ_ONCE ( pm - > accept_subflow ) ) {
2021-02-02 02:09:07 +03:00
ret = pm - > subflows < subflows_max ;
if ( ret & & + + pm - > subflows = = subflows_max )
2020-09-24 03:29:53 +03:00
WRITE_ONCE ( pm - > accept_subflow , false ) ;
}
2020-03-28 00:48:41 +03:00
spin_unlock_bh ( & pm - > lock ) ;
return ret ;
}
/* return true if the new status bit is currently cleared, that is, this event
* can be server , eventually by an already scheduled work
*/
static bool mptcp_pm_schedule_work ( struct mptcp_sock * msk ,
enum mptcp_pm_status new_status )
{
pr_debug ( " msk=%p status=%x new=%lx " , msk , msk - > pm . status ,
BIT ( new_status ) ) ;
if ( msk - > pm . status & BIT ( new_status ) )
return false ;
msk - > pm . status | = BIT ( new_status ) ;
2020-11-16 12:48:05 +03:00
mptcp_schedule_work ( ( struct sock * ) msk ) ;
2020-03-28 00:48:41 +03:00
return true ;
2020-03-28 00:48:38 +03:00
}
2023-04-14 17:08:00 +03:00
void mptcp_pm_fully_established ( struct mptcp_sock * msk , const struct sock * ssk )
2020-03-28 00:48:38 +03:00
{
2020-03-28 00:48:41 +03:00
struct mptcp_pm_data * pm = & msk - > pm ;
2021-02-13 03:00:01 +03:00
bool announce = false ;
2020-03-28 00:48:41 +03:00
2020-03-28 00:48:38 +03:00
pr_debug ( " msk=%p " , msk ) ;
2020-03-28 00:48:41 +03:00
spin_lock_bh ( & pm - > lock ) ;
2020-12-09 14:03:29 +03:00
/* mptcp_pm_fully_established() can be invoked by multiple
* racing paths - accept ( ) and check_fully_established ( )
* be sure to serve this event only once .
*/
if ( READ_ONCE ( pm - > work_pending ) & &
! ( msk - > pm . status & BIT ( MPTCP_PM_ALREADY_ESTABLISHED ) ) )
2020-03-28 00:48:41 +03:00
mptcp_pm_schedule_work ( msk , MPTCP_PM_ESTABLISHED ) ;
2021-02-13 03:00:01 +03:00
if ( ( msk - > pm . status & BIT ( MPTCP_PM_ALREADY_ESTABLISHED ) ) = = 0 )
announce = true ;
msk - > pm . status | = BIT ( MPTCP_PM_ALREADY_ESTABLISHED ) ;
2020-03-28 00:48:41 +03:00
spin_unlock_bh ( & pm - > lock ) ;
2021-02-13 03:00:01 +03:00
if ( announce )
2023-04-14 17:08:00 +03:00
mptcp_event ( MPTCP_EVENT_ESTABLISHED , msk , ssk , GFP_ATOMIC ) ;
2020-03-28 00:48:38 +03:00
}
void mptcp_pm_connection_closed ( struct mptcp_sock * msk )
{
pr_debug ( " msk=%p " , msk ) ;
}
2021-03-26 21:26:33 +03:00
void mptcp_pm_subflow_established ( struct mptcp_sock * msk )
2020-03-28 00:48:38 +03:00
{
2020-03-28 00:48:41 +03:00
struct mptcp_pm_data * pm = & msk - > pm ;
2020-03-28 00:48:38 +03:00
pr_debug ( " msk=%p " , msk ) ;
2020-03-28 00:48:41 +03:00
if ( ! READ_ONCE ( pm - > work_pending ) )
return ;
spin_lock_bh ( & pm - > lock ) ;
if ( READ_ONCE ( pm - > work_pending ) )
mptcp_pm_schedule_work ( msk , MPTCP_PM_SUBFLOW_ESTABLISHED ) ;
spin_unlock_bh ( & pm - > lock ) ;
2020-03-28 00:48:38 +03:00
}
2022-01-07 03:20:23 +03:00
void mptcp_pm_subflow_check_next ( struct mptcp_sock * msk , const struct sock * ssk ,
const struct mptcp_subflow_context * subflow )
2020-03-28 00:48:38 +03:00
{
2022-01-07 03:20:23 +03:00
struct mptcp_pm_data * pm = & msk - > pm ;
bool update_subflows ;
2022-05-19 21:23:59 +03:00
update_subflows = ( subflow - > request_join | | subflow - > mp_join ) & &
2022-05-02 23:52:31 +03:00
mptcp_pm_is_kernel ( msk ) ;
2022-01-07 03:20:23 +03:00
if ( ! READ_ONCE ( pm - > work_pending ) & & ! update_subflows )
return ;
spin_lock_bh ( & pm - > lock ) ;
if ( update_subflows )
2022-05-13 02:26:41 +03:00
__mptcp_pm_close_subflow ( msk ) ;
2022-01-07 03:20:23 +03:00
/* Even if this subflow is not really established, tell the PM to try
* to pick the next ones , if possible .
*/
if ( mptcp_pm_nl_check_work_pending ( msk ) )
mptcp_pm_schedule_work ( msk , MPTCP_PM_SUBFLOW_ESTABLISHED ) ;
spin_unlock_bh ( & pm - > lock ) ;
2020-03-28 00:48:38 +03:00
}
2022-05-02 23:52:34 +03:00
void mptcp_pm_add_addr_received ( const struct sock * ssk ,
2020-03-28 00:48:38 +03:00
const struct mptcp_addr_info * addr )
{
2022-05-02 23:52:34 +03:00
struct mptcp_subflow_context * subflow = mptcp_subflow_ctx ( ssk ) ;
struct mptcp_sock * msk = mptcp_sk ( subflow - > conn ) ;
2020-03-28 00:48:41 +03:00
struct mptcp_pm_data * pm = & msk - > pm ;
pr_debug ( " msk=%p remote_id=%d accept=%d " , msk , addr - > id ,
READ_ONCE ( pm - > accept_addr ) ) ;
2022-05-02 23:52:34 +03:00
mptcp_event_addr_announced ( ssk , addr ) ;
2021-02-13 03:00:01 +03:00
2020-03-28 00:48:41 +03:00
spin_lock_bh ( & pm - > lock ) ;
2022-05-02 23:52:31 +03:00
if ( mptcp_pm_is_userspace ( msk ) ) {
if ( mptcp_userspace_pm_active ( msk ) ) {
mptcp_pm_announce_addr ( msk , addr , true ) ;
mptcp_pm_add_addr_send_ack ( msk ) ;
} else {
__MPTCP_INC_STATS ( sock_net ( ( struct sock * ) msk ) , MPTCP_MIB_ADDADDRDROP ) ;
}
} else if ( ! READ_ONCE ( pm - > accept_addr ) ) {
2021-03-26 21:26:31 +03:00
mptcp_pm_announce_addr ( msk , addr , true ) ;
2020-11-19 22:46:00 +03:00
mptcp_pm_add_addr_send_ack ( msk ) ;
} else if ( mptcp_pm_schedule_work ( msk , MPTCP_PM_ADD_ADDR_RECEIVED ) ) {
2020-03-28 00:48:41 +03:00
pm - > remote = * addr ;
2022-02-19 00:35:42 +03:00
} else {
__MPTCP_INC_STATS ( sock_net ( ( struct sock * ) msk ) , MPTCP_MIB_ADDADDRDROP ) ;
2020-11-19 22:46:00 +03:00
}
2020-03-28 00:48:41 +03:00
spin_unlock_bh ( & pm - > lock ) ;
2020-11-19 22:46:00 +03:00
}
2021-03-26 21:26:38 +03:00
void mptcp_pm_add_addr_echoed ( struct mptcp_sock * msk ,
2022-02-16 05:11:28 +03:00
const struct mptcp_addr_info * addr )
2021-03-26 21:26:38 +03:00
{
struct mptcp_pm_data * pm = & msk - > pm ;
pr_debug ( " msk=%p " , msk ) ;
spin_lock_bh ( & pm - > lock ) ;
if ( mptcp_lookup_anno_list_by_saddr ( msk , addr ) & & READ_ONCE ( pm - > work_pending ) )
mptcp_pm_schedule_work ( msk , MPTCP_PM_SUBFLOW_ESTABLISHED ) ;
spin_unlock_bh ( & pm - > lock ) ;
}
2020-11-19 22:46:00 +03:00
void mptcp_pm_add_addr_send_ack ( struct mptcp_sock * msk )
{
2021-02-02 02:09:09 +03:00
if ( ! mptcp_pm_should_add_signal ( msk ) )
2020-11-19 22:46:00 +03:00
return ;
mptcp_pm_schedule_work ( msk , MPTCP_PM_ADD_ADDR_SEND_ACK ) ;
2020-03-28 00:48:38 +03:00
}
2021-03-13 04:16:13 +03:00
void mptcp_pm_rm_addr_received ( struct mptcp_sock * msk ,
const struct mptcp_rm_list * rm_list )
2020-09-24 03:29:49 +03:00
{
struct mptcp_pm_data * pm = & msk - > pm ;
2021-03-13 04:16:13 +03:00
u8 i ;
2020-09-24 03:29:49 +03:00
2021-03-13 04:16:13 +03:00
pr_debug ( " msk=%p remote_ids_nr=%d " , msk , rm_list - > nr ) ;
2020-09-24 03:29:49 +03:00
2021-03-13 04:16:13 +03:00
for ( i = 0 ; i < rm_list - > nr ; i + + )
mptcp_event_addr_removed ( msk , rm_list - > ids [ i ] ) ;
2021-02-13 03:00:01 +03:00
2020-09-24 03:29:49 +03:00
spin_lock_bh ( & pm - > lock ) ;
2022-02-19 00:35:42 +03:00
if ( mptcp_pm_schedule_work ( msk , MPTCP_PM_RM_ADDR_RECEIVED ) )
pm - > rm_list_rx = * rm_list ;
else
__MPTCP_INC_STATS ( sock_net ( ( struct sock * ) msk ) , MPTCP_MIB_RMADDRDROP ) ;
2020-09-24 03:29:49 +03:00
spin_unlock_bh ( & pm - > lock ) ;
}
2022-04-08 22:45:55 +03:00
void mptcp_pm_mp_prio_received ( struct sock * ssk , u8 bkup )
2021-01-09 03:47:58 +03:00
{
2022-04-08 22:45:55 +03:00
struct mptcp_subflow_context * subflow = mptcp_subflow_ctx ( ssk ) ;
struct sock * sk = subflow - > conn ;
struct mptcp_sock * msk ;
2021-01-09 03:47:58 +03:00
pr_debug ( " subflow->backup=%d, bkup=%d \n " , subflow - > backup , bkup ) ;
2022-04-08 22:45:55 +03:00
msk = mptcp_sk ( sk ) ;
if ( subflow - > backup ! = bkup ) {
subflow - > backup = bkup ;
mptcp_data_lock ( sk ) ;
if ( ! sock_owned_by_user ( sk ) )
msk - > last_snd = NULL ;
else
__set_bit ( MPTCP_RESET_SCHEDULER , & msk - > cb_flags ) ;
mptcp_data_unlock ( sk ) ;
}
2021-02-13 03:00:01 +03:00
2022-04-08 22:45:55 +03:00
mptcp_event ( MPTCP_EVENT_SUB_PRIORITY , msk , ssk , GFP_ATOMIC ) ;
2021-01-09 03:47:58 +03:00
}
2021-08-25 02:26:16 +03:00
void mptcp_pm_mp_fail_received ( struct sock * sk , u64 fail_seq )
{
2022-04-23 00:55:39 +03:00
struct mptcp_subflow_context * subflow = mptcp_subflow_ctx ( sk ) ;
struct mptcp_sock * msk = mptcp_sk ( subflow - > conn ) ;
2021-08-25 02:26:16 +03:00
pr_debug ( " fail_seq=%llu " , fail_seq ) ;
2022-04-23 00:55:39 +03:00
2022-05-19 01:04:43 +03:00
if ( ! READ_ONCE ( msk - > allow_infinite_fallback ) )
2022-04-27 00:57:14 +03:00
return ;
2022-06-28 04:02:37 +03:00
if ( ! subflow - > fail_tout ) {
2022-04-27 00:57:14 +03:00
pr_debug ( " send MP_FAIL response and infinite map " ) ;
subflow - > send_mp_fail = 1 ;
2022-04-23 00:55:39 +03:00
subflow - > send_infinite_map = 1 ;
2022-06-28 04:02:37 +03:00
tcp_send_ack ( sk ) ;
} else {
2022-04-27 00:57:15 +03:00
pr_debug ( " MP_FAIL response received " ) ;
2022-06-28 04:02:37 +03:00
WRITE_ONCE ( subflow - > fail_tout , 0 ) ;
2022-04-27 00:57:14 +03:00
}
2021-08-25 02:26:16 +03:00
}
2020-03-28 00:48:38 +03:00
/* path manager helpers */
2022-02-16 05:11:28 +03:00
bool mptcp_pm_add_addr_signal ( struct mptcp_sock * msk , const struct sk_buff * skb ,
2021-08-24 04:05:39 +03:00
unsigned int opt_size , unsigned int remaining ,
2021-08-24 04:05:42 +03:00
struct mptcp_addr_info * addr , bool * echo ,
2022-02-16 05:11:27 +03:00
bool * drop_other_suboptions )
2020-03-28 00:48:38 +03:00
{
2020-03-28 00:48:41 +03:00
int ret = false ;
2021-08-24 04:05:41 +03:00
u8 add_addr ;
2021-08-24 04:05:42 +03:00
u8 family ;
2022-02-16 05:11:27 +03:00
bool port ;
2020-03-28 00:48:41 +03:00
spin_lock_bh ( & msk - > pm . lock ) ;
/* double check after the lock is acquired */
2020-09-24 03:29:47 +03:00
if ( ! mptcp_pm_should_add_signal ( msk ) )
2020-03-28 00:48:41 +03:00
goto out_unlock ;
2021-08-24 04:05:39 +03:00
/* always drop every other options for pure ack ADD_ADDR; this is a
* plain dup - ack from TCP perspective . The other MPTCP - relevant info ,
* if any , will be carried by the ' original ' TCP ack
*/
if ( skb & & skb_is_tcp_pure_ack ( skb ) ) {
remaining + = opt_size ;
* drop_other_suboptions = true ;
}
2020-11-19 22:45:59 +03:00
* echo = mptcp_pm_should_add_signal_echo ( msk ) ;
2022-02-16 05:11:27 +03:00
port = ! ! ( * echo ? msk - > pm . remote . port : msk - > pm . local . port ) ;
2020-10-03 18:36:56 +03:00
2021-08-24 04:05:42 +03:00
family = * echo ? msk - > pm . remote . family : msk - > pm . local . family ;
2022-02-16 05:11:27 +03:00
if ( remaining < mptcp_add_addr_len ( family , * echo , port ) )
2020-03-28 00:48:41 +03:00
goto out_unlock ;
2021-08-24 04:05:42 +03:00
if ( * echo ) {
* addr = msk - > pm . remote ;
2021-08-24 04:05:41 +03:00
add_addr = msk - > pm . addr_signal & ~ BIT ( MPTCP_ADD_ADDR_ECHO ) ;
2021-08-24 04:05:42 +03:00
} else {
* addr = msk - > pm . local ;
2021-08-24 04:05:41 +03:00
add_addr = msk - > pm . addr_signal & ~ BIT ( MPTCP_ADD_ADDR_SIGNAL ) ;
2021-08-24 04:05:42 +03:00
}
2021-08-24 04:05:41 +03:00
WRITE_ONCE ( msk - > pm . addr_signal , add_addr ) ;
2020-03-28 00:48:41 +03:00
ret = true ;
out_unlock :
spin_unlock_bh ( & msk - > pm . lock ) ;
return ret ;
2020-03-28 00:48:38 +03:00
}
2020-09-24 03:29:48 +03:00
bool mptcp_pm_rm_addr_signal ( struct mptcp_sock * msk , unsigned int remaining ,
2021-03-13 04:16:11 +03:00
struct mptcp_rm_list * rm_list )
2020-09-24 03:29:48 +03:00
{
2021-03-13 04:16:12 +03:00
int ret = false , len ;
2021-08-24 04:05:41 +03:00
u8 rm_addr ;
2020-09-24 03:29:48 +03:00
spin_lock_bh ( & msk - > pm . lock ) ;
/* double check after the lock is acquired */
if ( ! mptcp_pm_should_rm_signal ( msk ) )
goto out_unlock ;
2021-08-24 04:05:41 +03:00
rm_addr = msk - > pm . addr_signal & ~ BIT ( MPTCP_RM_ADDR_SIGNAL ) ;
2021-03-13 04:16:12 +03:00
len = mptcp_rm_addr_len ( & msk - > pm . rm_list_tx ) ;
if ( len < 0 ) {
2021-08-24 04:05:41 +03:00
WRITE_ONCE ( msk - > pm . addr_signal , rm_addr ) ;
2021-03-13 04:16:12 +03:00
goto out_unlock ;
}
if ( remaining < len )
2020-09-24 03:29:48 +03:00
goto out_unlock ;
2021-03-13 04:16:12 +03:00
* rm_list = msk - > pm . rm_list_tx ;
2021-08-24 04:05:41 +03:00
WRITE_ONCE ( msk - > pm . addr_signal , rm_addr ) ;
2020-09-24 03:29:48 +03:00
ret = true ;
out_unlock :
spin_unlock_bh ( & msk - > pm . lock ) ;
return ret ;
}
2020-03-28 00:48:38 +03:00
int mptcp_pm_get_local_id ( struct mptcp_sock * msk , struct sock_common * skc )
{
2020-03-28 00:48:51 +03:00
return mptcp_pm_nl_get_local_id ( msk , skc ) ;
2020-03-28 00:48:38 +03:00
}
2021-08-14 01:15:42 +03:00
void mptcp_pm_subflow_chk_stale ( const struct mptcp_sock * msk , struct sock * ssk )
{
struct mptcp_subflow_context * subflow = mptcp_subflow_ctx ( ssk ) ;
u32 rcv_tstamp = READ_ONCE ( tcp_sk ( ssk ) - > rcv_tstamp ) ;
/* keep track of rtx periods with no progress */
if ( ! subflow - > stale_count ) {
subflow - > stale_rcv_tstamp = rcv_tstamp ;
subflow - > stale_count + + ;
} else if ( subflow - > stale_rcv_tstamp = = rcv_tstamp ) {
if ( subflow - > stale_count < U8_MAX )
subflow - > stale_count + + ;
2021-08-14 01:15:45 +03:00
mptcp_pm_nl_subflow_chk_stale ( msk , ssk ) ;
2021-08-14 01:15:42 +03:00
} else {
subflow - > stale_count = 0 ;
2021-08-14 01:15:45 +03:00
mptcp_subflow_set_active ( subflow ) ;
2021-08-14 01:15:42 +03:00
}
}
2023-01-12 20:42:52 +03:00
/* if sk is ipv4 or ipv6_only allows only same-family local and remote addresses,
* otherwise allow any matching local / remote pair
*/
bool mptcp_pm_addr_families_match ( const struct sock * sk ,
const struct mptcp_addr_info * loc ,
const struct mptcp_addr_info * rem )
{
bool mptcp_is_v4 = sk - > sk_family = = AF_INET ;
# if IS_ENABLED(CONFIG_MPTCP_IPV6)
bool loc_is_v4 = loc - > family = = AF_INET | | ipv6_addr_v4mapped ( & loc - > addr6 ) ;
bool rem_is_v4 = rem - > family = = AF_INET | | ipv6_addr_v4mapped ( & rem - > addr6 ) ;
if ( mptcp_is_v4 )
return loc_is_v4 & & rem_is_v4 ;
if ( ipv6_only_sock ( sk ) )
return ! loc_is_v4 & & ! rem_is_v4 ;
return loc_is_v4 = = rem_is_v4 ;
# else
return mptcp_is_v4 & & loc - > family = = AF_INET & & rem - > family = = AF_INET ;
# endif
}
2022-01-07 03:20:16 +03:00
void mptcp_pm_data_reset ( struct mptcp_sock * msk )
2020-03-28 00:48:38 +03:00
{
2022-04-28 01:50:01 +03:00
u8 pm_type = mptcp_get_pm_type ( sock_net ( ( struct sock * ) msk ) ) ;
2022-04-28 01:49:57 +03:00
struct mptcp_pm_data * pm = & msk - > pm ;
2020-03-28 00:48:38 +03:00
2022-04-28 01:49:57 +03:00
pm - > add_addr_signaled = 0 ;
pm - > add_addr_accepted = 0 ;
pm - > local_addr_used = 0 ;
pm - > subflows = 0 ;
pm - > rm_list_tx . nr = 0 ;
pm - > rm_list_rx . nr = 0 ;
2022-04-28 01:50:01 +03:00
WRITE_ONCE ( pm - > pm_type , pm_type ) ;
if ( pm_type = = MPTCP_PM_TYPE_KERNEL ) {
bool subflows_allowed = ! ! mptcp_pm_get_subflows_max ( msk ) ;
/* pm->work_pending must be only be set to 'true' when
* pm - > pm_type is set to MPTCP_PM_TYPE_KERNEL
*/
WRITE_ONCE ( pm - > work_pending ,
( ! ! mptcp_pm_get_local_addr_max ( msk ) & &
subflows_allowed ) | |
! ! mptcp_pm_get_add_addr_signal_max ( msk ) ) ;
WRITE_ONCE ( pm - > accept_addr ,
! ! mptcp_pm_get_add_addr_accept_max ( msk ) & &
subflows_allowed ) ;
WRITE_ONCE ( pm - > accept_subflow , subflows_allowed ) ;
} else {
WRITE_ONCE ( pm - > work_pending , 0 ) ;
WRITE_ONCE ( pm - > accept_addr , 0 ) ;
WRITE_ONCE ( pm - > accept_subflow , 0 ) ;
}
2022-04-28 01:49:57 +03:00
WRITE_ONCE ( pm - > addr_signal , 0 ) ;
WRITE_ONCE ( pm - > remote_deny_join_id0 , false ) ;
pm - > status = 0 ;
bitmap_fill ( msk - > pm . id_avail_bitmap , MPTCP_PM_MAX_ADDR_ID + 1 ) ;
2022-01-07 03:20:16 +03:00
}
void mptcp_pm_data_init ( struct mptcp_sock * msk )
{
2020-03-28 00:48:38 +03:00
spin_lock_init ( & msk - > pm . lock ) ;
2020-09-24 03:29:54 +03:00
INIT_LIST_HEAD ( & msk - > pm . anno_list ) ;
2022-05-04 05:38:49 +03:00
INIT_LIST_HEAD ( & msk - > pm . userspace_pm_local_addr_list ) ;
2022-01-07 03:20:16 +03:00
mptcp_pm_data_reset ( msk ) ;
2020-03-28 00:48:38 +03:00
}
2020-06-26 20:29:59 +03:00
void __init mptcp_pm_init ( void )
2020-03-28 00:48:38 +03:00
{
2020-03-28 00:48:51 +03:00
mptcp_pm_nl_init ( ) ;
2020-03-28 00:48:38 +03:00
}