2020-01-22 03:56:15 +03:00
// SPDX-License-Identifier: GPL-2.0
/* Multipath TCP
*
* Copyright ( c ) 2017 - 2019 , Intel Corporation .
*/
# define pr_fmt(fmt) "MPTCP: " fmt
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/netdevice.h>
# include <net/sock.h>
# include <net/inet_common.h>
# include <net/inet_hashtables.h>
# include <net/protocol.h>
# include <net/tcp.h>
2020-01-22 03:56:19 +03:00
# if IS_ENABLED(CONFIG_MPTCP_IPV6)
# include <net/transp_v6.h>
# endif
2020-01-22 03:56:15 +03:00
# include <net/mptcp.h>
# include "protocol.h"
2020-01-22 03:56:17 +03:00
# define MPTCP_SAME_STATE TCP_MAX_STATES
/* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
* completed yet or has failed , return the subflow socket .
* Otherwise return NULL .
*/
static struct socket * __mptcp_nmpc_socket ( const struct mptcp_sock * msk )
{
2020-01-22 03:56:18 +03:00
if ( ! msk - > subflow | | mptcp_subflow_ctx ( msk - > subflow - > sk ) - > fourth_ack )
2020-01-22 03:56:17 +03:00
return NULL ;
return msk - > subflow ;
}
2020-01-22 03:56:18 +03:00
/* if msk has a single subflow, and the mp_capable handshake is failed,
* return it .
* Otherwise returns NULL
*/
static struct socket * __mptcp_tcp_fallback ( const struct mptcp_sock * msk )
{
struct socket * ssock = __mptcp_nmpc_socket ( msk ) ;
sock_owned_by_me ( ( const struct sock * ) msk ) ;
if ( ! ssock | | sk_is_mptcp ( ssock - > sk ) )
return NULL ;
return ssock ;
}
2020-01-22 03:56:17 +03:00
static bool __mptcp_can_create_subflow ( const struct mptcp_sock * msk )
{
return ( ( struct sock * ) msk ) - > sk_state = = TCP_CLOSE ;
}
static struct socket * __mptcp_socket_create ( struct mptcp_sock * msk , int state )
{
struct mptcp_subflow_context * subflow ;
struct sock * sk = ( struct sock * ) msk ;
struct socket * ssock ;
int err ;
ssock = __mptcp_nmpc_socket ( msk ) ;
if ( ssock )
goto set_state ;
if ( ! __mptcp_can_create_subflow ( msk ) )
return ERR_PTR ( - EINVAL ) ;
err = mptcp_subflow_create_socket ( sk , & ssock ) ;
if ( err )
return ERR_PTR ( err ) ;
msk - > subflow = ssock ;
subflow = mptcp_subflow_ctx ( ssock - > sk ) ;
2020-01-22 03:56:18 +03:00
list_add ( & subflow - > node , & msk - > conn_list ) ;
2020-01-22 03:56:17 +03:00
subflow - > request_mptcp = 1 ;
set_state :
if ( state ! = MPTCP_SAME_STATE )
inet_sk_state_store ( sk , state ) ;
return ssock ;
}
2020-01-22 03:56:18 +03:00
static struct sock * mptcp_subflow_get ( const struct mptcp_sock * msk )
{
struct mptcp_subflow_context * subflow ;
sock_owned_by_me ( ( const struct sock * ) msk ) ;
mptcp_for_each_subflow ( msk , subflow ) {
return mptcp_subflow_tcp_sock ( subflow ) ;
}
return NULL ;
}
2020-01-22 03:56:15 +03:00
static int mptcp_sendmsg ( struct sock * sk , struct msghdr * msg , size_t len )
{
struct mptcp_sock * msk = mptcp_sk ( sk ) ;
2020-01-22 03:56:18 +03:00
struct socket * ssock ;
struct sock * ssk ;
int ret ;
2020-01-22 03:56:15 +03:00
if ( msg - > msg_flags & ~ ( MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL ) )
return - EOPNOTSUPP ;
2020-01-22 03:56:18 +03:00
lock_sock ( sk ) ;
ssock = __mptcp_tcp_fallback ( msk ) ;
if ( ssock ) {
pr_debug ( " fallback passthrough " ) ;
ret = sock_sendmsg ( ssock , msg ) ;
release_sock ( sk ) ;
return ret ;
}
ssk = mptcp_subflow_get ( msk ) ;
if ( ! ssk ) {
release_sock ( sk ) ;
return - ENOTCONN ;
}
ret = sock_sendmsg ( ssk - > sk_socket , msg ) ;
release_sock ( sk ) ;
return ret ;
2020-01-22 03:56:15 +03:00
}
static int mptcp_recvmsg ( struct sock * sk , struct msghdr * msg , size_t len ,
int nonblock , int flags , int * addr_len )
{
struct mptcp_sock * msk = mptcp_sk ( sk ) ;
2020-01-22 03:56:18 +03:00
struct socket * ssock ;
struct sock * ssk ;
int copied = 0 ;
2020-01-22 03:56:15 +03:00
if ( msg - > msg_flags & ~ ( MSG_WAITALL | MSG_DONTWAIT ) )
return - EOPNOTSUPP ;
2020-01-22 03:56:18 +03:00
lock_sock ( sk ) ;
ssock = __mptcp_tcp_fallback ( msk ) ;
if ( ssock ) {
pr_debug ( " fallback-read subflow=%p " ,
mptcp_subflow_ctx ( ssock - > sk ) ) ;
copied = sock_recvmsg ( ssock , msg , flags ) ;
release_sock ( sk ) ;
return copied ;
}
ssk = mptcp_subflow_get ( msk ) ;
if ( ! ssk ) {
release_sock ( sk ) ;
return - ENOTCONN ;
}
copied = sock_recvmsg ( ssk - > sk_socket , msg , flags ) ;
release_sock ( sk ) ;
return copied ;
}
/* subflow sockets can be either outgoing (connect) or incoming
* ( accept ) .
*
* Outgoing subflows use in - kernel sockets .
* Incoming subflows do not have their own ' struct socket ' allocated ,
* so we need to use tcp_close ( ) after detaching them from the mptcp
* parent socket .
*/
static void __mptcp_close_ssk ( struct sock * sk , struct sock * ssk ,
struct mptcp_subflow_context * subflow ,
long timeout )
{
struct socket * sock = READ_ONCE ( ssk - > sk_socket ) ;
list_del ( & subflow - > node ) ;
if ( sock & & sock ! = sk - > sk_socket ) {
/* outgoing subflow */
sock_release ( sock ) ;
} else {
/* incoming subflow */
tcp_close ( ssk , timeout ) ;
}
2020-01-22 03:56:15 +03:00
}
static int mptcp_init_sock ( struct sock * sk )
{
2020-01-22 03:56:18 +03:00
struct mptcp_sock * msk = mptcp_sk ( sk ) ;
INIT_LIST_HEAD ( & msk - > conn_list ) ;
2020-01-22 03:56:15 +03:00
return 0 ;
}
static void mptcp_close ( struct sock * sk , long timeout )
{
2020-01-22 03:56:18 +03:00
struct mptcp_subflow_context * subflow , * tmp ;
2020-01-22 03:56:15 +03:00
struct mptcp_sock * msk = mptcp_sk ( sk ) ;
2020-01-22 03:56:20 +03:00
mptcp_token_destroy ( msk - > token ) ;
2020-01-22 03:56:15 +03:00
inet_sk_state_store ( sk , TCP_CLOSE ) ;
2020-01-22 03:56:18 +03:00
lock_sock ( sk ) ;
list_for_each_entry_safe ( subflow , tmp , & msk - > conn_list , node ) {
struct sock * ssk = mptcp_subflow_tcp_sock ( subflow ) ;
__mptcp_close_ssk ( sk , ssk , subflow , timeout ) ;
2020-01-22 03:56:15 +03:00
}
2020-01-22 03:56:18 +03:00
release_sock ( sk ) ;
sk_common_release ( sk ) ;
2020-01-22 03:56:15 +03:00
}
2020-01-22 03:56:19 +03:00
static void mptcp_copy_inaddrs ( struct sock * msk , const struct sock * ssk )
{
# if IS_ENABLED(CONFIG_MPTCP_IPV6)
const struct ipv6_pinfo * ssk6 = inet6_sk ( ssk ) ;
struct ipv6_pinfo * msk6 = inet6_sk ( msk ) ;
msk - > sk_v6_daddr = ssk - > sk_v6_daddr ;
msk - > sk_v6_rcv_saddr = ssk - > sk_v6_rcv_saddr ;
if ( msk6 & & ssk6 ) {
msk6 - > saddr = ssk6 - > saddr ;
msk6 - > flow_label = ssk6 - > flow_label ;
}
# endif
inet_sk ( msk ) - > inet_num = inet_sk ( ssk ) - > inet_num ;
inet_sk ( msk ) - > inet_dport = inet_sk ( ssk ) - > inet_dport ;
inet_sk ( msk ) - > inet_sport = inet_sk ( ssk ) - > inet_sport ;
inet_sk ( msk ) - > inet_daddr = inet_sk ( ssk ) - > inet_daddr ;
inet_sk ( msk ) - > inet_saddr = inet_sk ( ssk ) - > inet_saddr ;
inet_sk ( msk ) - > inet_rcv_saddr = inet_sk ( ssk ) - > inet_rcv_saddr ;
}
static struct sock * mptcp_accept ( struct sock * sk , int flags , int * err ,
bool kern )
{
struct mptcp_sock * msk = mptcp_sk ( sk ) ;
struct socket * listener ;
struct sock * newsk ;
listener = __mptcp_nmpc_socket ( msk ) ;
if ( WARN_ON_ONCE ( ! listener ) ) {
* err = - EINVAL ;
return NULL ;
}
pr_debug ( " msk=%p, listener=%p " , msk , mptcp_subflow_ctx ( listener - > sk ) ) ;
newsk = inet_csk_accept ( listener - > sk , flags , err , kern ) ;
if ( ! newsk )
return NULL ;
pr_debug ( " msk=%p, subflow is mptcp=%d " , msk , sk_is_mptcp ( newsk ) ) ;
if ( sk_is_mptcp ( newsk ) ) {
struct mptcp_subflow_context * subflow ;
struct sock * new_mptcp_sock ;
struct sock * ssk = newsk ;
subflow = mptcp_subflow_ctx ( newsk ) ;
lock_sock ( sk ) ;
local_bh_disable ( ) ;
new_mptcp_sock = sk_clone_lock ( sk , GFP_ATOMIC ) ;
if ( ! new_mptcp_sock ) {
* err = - ENOBUFS ;
local_bh_enable ( ) ;
release_sock ( sk ) ;
tcp_close ( newsk , 0 ) ;
return NULL ;
}
mptcp_init_sock ( new_mptcp_sock ) ;
msk = mptcp_sk ( new_mptcp_sock ) ;
msk - > remote_key = subflow - > remote_key ;
msk - > local_key = subflow - > local_key ;
2020-01-22 03:56:20 +03:00
msk - > token = subflow - > token ;
2020-01-22 03:56:19 +03:00
msk - > subflow = NULL ;
2020-01-22 03:56:20 +03:00
mptcp_token_update_accept ( newsk , new_mptcp_sock ) ;
2020-01-22 03:56:19 +03:00
newsk = new_mptcp_sock ;
mptcp_copy_inaddrs ( newsk , ssk ) ;
list_add ( & subflow - > node , & msk - > conn_list ) ;
/* will be fully established at mptcp_stream_accept()
* completion .
*/
inet_sk_state_store ( new_mptcp_sock , TCP_SYN_RECV ) ;
bh_unlock_sock ( new_mptcp_sock ) ;
local_bh_enable ( ) ;
release_sock ( sk ) ;
}
return newsk ;
}
2020-01-22 03:56:20 +03:00
static void mptcp_destroy ( struct sock * sk )
{
}
2020-01-22 03:56:18 +03:00
static int mptcp_get_port ( struct sock * sk , unsigned short snum )
2020-01-22 03:56:15 +03:00
{
struct mptcp_sock * msk = mptcp_sk ( sk ) ;
2020-01-22 03:56:18 +03:00
struct socket * ssock ;
2020-01-22 03:56:15 +03:00
2020-01-22 03:56:18 +03:00
ssock = __mptcp_nmpc_socket ( msk ) ;
pr_debug ( " msk=%p, subflow=%p " , msk , ssock ) ;
if ( WARN_ON_ONCE ( ! ssock ) )
return - EINVAL ;
2020-01-22 03:56:15 +03:00
2020-01-22 03:56:18 +03:00
return inet_csk_get_port ( ssock - > sk , snum ) ;
}
2020-01-22 03:56:15 +03:00
2020-01-22 03:56:18 +03:00
void mptcp_finish_connect ( struct sock * ssk )
{
struct mptcp_subflow_context * subflow ;
struct mptcp_sock * msk ;
struct sock * sk ;
2020-01-22 03:56:15 +03:00
2020-01-22 03:56:18 +03:00
subflow = mptcp_subflow_ctx ( ssk ) ;
2020-01-22 03:56:15 +03:00
2020-01-22 03:56:18 +03:00
if ( ! subflow - > mp_capable )
return ;
sk = subflow - > conn ;
msk = mptcp_sk ( sk ) ;
/* the socket is not connected yet, no msk/subflow ops can access/race
* accessing the field below
*/
WRITE_ONCE ( msk - > remote_key , subflow - > remote_key ) ;
WRITE_ONCE ( msk - > local_key , subflow - > local_key ) ;
2020-01-22 03:56:20 +03:00
WRITE_ONCE ( msk - > token , subflow - > token ) ;
2020-01-22 03:56:15 +03:00
}
2020-01-22 03:56:19 +03:00
static void mptcp_sock_graft ( struct sock * sk , struct socket * parent )
{
write_lock_bh ( & sk - > sk_callback_lock ) ;
rcu_assign_pointer ( sk - > sk_wq , & parent - > wq ) ;
sk_set_socket ( sk , parent ) ;
sk - > sk_uid = SOCK_INODE ( parent ) - > i_uid ;
write_unlock_bh ( & sk - > sk_callback_lock ) ;
}
2020-01-22 03:56:15 +03:00
static struct proto mptcp_prot = {
. name = " MPTCP " ,
. owner = THIS_MODULE ,
. init = mptcp_init_sock ,
. close = mptcp_close ,
2020-01-22 03:56:19 +03:00
. accept = mptcp_accept ,
2020-01-22 03:56:15 +03:00
. shutdown = tcp_shutdown ,
2020-01-22 03:56:20 +03:00
. destroy = mptcp_destroy ,
2020-01-22 03:56:15 +03:00
. sendmsg = mptcp_sendmsg ,
. recvmsg = mptcp_recvmsg ,
. hash = inet_hash ,
. unhash = inet_unhash ,
2020-01-22 03:56:18 +03:00
. get_port = mptcp_get_port ,
2020-01-22 03:56:15 +03:00
. obj_size = sizeof ( struct mptcp_sock ) ,
. no_autobind = true ,
} ;
2020-01-22 03:56:17 +03:00
static int mptcp_bind ( struct socket * sock , struct sockaddr * uaddr , int addr_len )
{
struct mptcp_sock * msk = mptcp_sk ( sock - > sk ) ;
struct socket * ssock ;
2020-01-22 03:56:19 +03:00
int err ;
2020-01-22 03:56:17 +03:00
lock_sock ( sock - > sk ) ;
ssock = __mptcp_socket_create ( msk , MPTCP_SAME_STATE ) ;
if ( IS_ERR ( ssock ) ) {
err = PTR_ERR ( ssock ) ;
goto unlock ;
}
err = ssock - > ops - > bind ( ssock , uaddr , addr_len ) ;
2020-01-22 03:56:19 +03:00
if ( ! err )
mptcp_copy_inaddrs ( sock - > sk , ssock - > sk ) ;
2020-01-22 03:56:17 +03:00
unlock :
release_sock ( sock - > sk ) ;
return err ;
}
static int mptcp_stream_connect ( struct socket * sock , struct sockaddr * uaddr ,
int addr_len , int flags )
{
struct mptcp_sock * msk = mptcp_sk ( sock - > sk ) ;
struct socket * ssock ;
int err ;
lock_sock ( sock - > sk ) ;
ssock = __mptcp_socket_create ( msk , TCP_SYN_SENT ) ;
if ( IS_ERR ( ssock ) ) {
err = PTR_ERR ( ssock ) ;
goto unlock ;
}
2020-01-22 03:56:19 +03:00
# ifdef CONFIG_TCP_MD5SIG
/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
* TCP option space .
*/
if ( rcu_access_pointer ( tcp_sk ( ssock - > sk ) - > md5sig_info ) )
mptcp_subflow_ctx ( ssock - > sk ) - > request_mptcp = 0 ;
# endif
2020-01-22 03:56:17 +03:00
err = ssock - > ops - > connect ( ssock , uaddr , addr_len , flags ) ;
inet_sk_state_store ( sock - > sk , inet_sk_state_load ( ssock - > sk ) ) ;
2020-01-22 03:56:19 +03:00
mptcp_copy_inaddrs ( sock - > sk , ssock - > sk ) ;
2020-01-22 03:56:17 +03:00
unlock :
release_sock ( sock - > sk ) ;
return err ;
}
2020-01-22 03:56:19 +03:00
static int mptcp_v4_getname ( struct socket * sock , struct sockaddr * uaddr ,
int peer )
{
if ( sock - > sk - > sk_prot = = & tcp_prot ) {
/* we are being invoked from __sys_accept4, after
* mptcp_accept ( ) has just accepted a non - mp - capable
* flow : sk is a tcp_sk , not an mptcp one .
*
* Hand the socket over to tcp so all further socket ops
* bypass mptcp .
*/
sock - > ops = & inet_stream_ops ;
}
return inet_getname ( sock , uaddr , peer ) ;
}
# if IS_ENABLED(CONFIG_MPTCP_IPV6)
static int mptcp_v6_getname ( struct socket * sock , struct sockaddr * uaddr ,
int peer )
{
if ( sock - > sk - > sk_prot = = & tcpv6_prot ) {
/* we are being invoked from __sys_accept4 after
* mptcp_accept ( ) has accepted a non - mp - capable
* subflow : sk is a tcp_sk , not mptcp .
*
* Hand the socket over to tcp so all further
* socket ops bypass mptcp .
*/
sock - > ops = & inet6_stream_ops ;
}
return inet6_getname ( sock , uaddr , peer ) ;
}
# endif
static int mptcp_listen ( struct socket * sock , int backlog )
{
struct mptcp_sock * msk = mptcp_sk ( sock - > sk ) ;
struct socket * ssock ;
int err ;
pr_debug ( " msk=%p " , msk ) ;
lock_sock ( sock - > sk ) ;
ssock = __mptcp_socket_create ( msk , TCP_LISTEN ) ;
if ( IS_ERR ( ssock ) ) {
err = PTR_ERR ( ssock ) ;
goto unlock ;
}
err = ssock - > ops - > listen ( ssock , backlog ) ;
inet_sk_state_store ( sock - > sk , inet_sk_state_load ( ssock - > sk ) ) ;
if ( ! err )
mptcp_copy_inaddrs ( sock - > sk , ssock - > sk ) ;
unlock :
release_sock ( sock - > sk ) ;
return err ;
}
static bool is_tcp_proto ( const struct proto * p )
{
# if IS_ENABLED(CONFIG_MPTCP_IPV6)
return p = = & tcp_prot | | p = = & tcpv6_prot ;
# else
return p = = & tcp_prot ;
# endif
}
static int mptcp_stream_accept ( struct socket * sock , struct socket * newsock ,
int flags , bool kern )
{
struct mptcp_sock * msk = mptcp_sk ( sock - > sk ) ;
struct socket * ssock ;
int err ;
pr_debug ( " msk=%p " , msk ) ;
lock_sock ( sock - > sk ) ;
if ( sock - > sk - > sk_state ! = TCP_LISTEN )
goto unlock_fail ;
ssock = __mptcp_nmpc_socket ( msk ) ;
if ( ! ssock )
goto unlock_fail ;
sock_hold ( ssock - > sk ) ;
release_sock ( sock - > sk ) ;
err = ssock - > ops - > accept ( sock , newsock , flags , kern ) ;
if ( err = = 0 & & ! is_tcp_proto ( newsock - > sk - > sk_prot ) ) {
struct mptcp_sock * msk = mptcp_sk ( newsock - > sk ) ;
struct mptcp_subflow_context * subflow ;
/* set ssk->sk_socket of accept()ed flows to mptcp socket.
* This is needed so NOSPACE flag can be set from tcp stack .
*/
list_for_each_entry ( subflow , & msk - > conn_list , node ) {
struct sock * ssk = mptcp_subflow_tcp_sock ( subflow ) ;
if ( ! ssk - > sk_socket )
mptcp_sock_graft ( ssk , newsock ) ;
}
inet_sk_state_store ( newsock - > sk , TCP_ESTABLISHED ) ;
}
sock_put ( ssock - > sk ) ;
return err ;
unlock_fail :
release_sock ( sock - > sk ) ;
return - EINVAL ;
}
2020-01-22 03:56:17 +03:00
static __poll_t mptcp_poll ( struct file * file , struct socket * sock ,
struct poll_table_struct * wait )
{
__poll_t mask = 0 ;
return mask ;
}
static struct proto_ops mptcp_stream_ops ;
2020-01-22 03:56:15 +03:00
static struct inet_protosw mptcp_protosw = {
. type = SOCK_STREAM ,
. protocol = IPPROTO_MPTCP ,
. prot = & mptcp_prot ,
2020-01-22 03:56:17 +03:00
. ops = & mptcp_stream_ops ,
. flags = INET_PROTOSW_ICSK ,
2020-01-22 03:56:15 +03:00
} ;
void __init mptcp_init ( void )
{
2020-01-22 03:56:17 +03:00
mptcp_prot . h . hashinfo = tcp_prot . h . hashinfo ;
mptcp_stream_ops = inet_stream_ops ;
mptcp_stream_ops . bind = mptcp_bind ;
mptcp_stream_ops . connect = mptcp_stream_connect ;
mptcp_stream_ops . poll = mptcp_poll ;
2020-01-22 03:56:19 +03:00
mptcp_stream_ops . accept = mptcp_stream_accept ;
mptcp_stream_ops . getname = mptcp_v4_getname ;
mptcp_stream_ops . listen = mptcp_listen ;
2020-01-22 03:56:17 +03:00
mptcp_subflow_init ( ) ;
2020-01-22 03:56:15 +03:00
if ( proto_register ( & mptcp_prot , 1 ) ! = 0 )
panic ( " Failed to register MPTCP proto. \n " ) ;
inet_register_protosw ( & mptcp_protosw ) ;
}
# if IS_ENABLED(CONFIG_MPTCP_IPV6)
2020-01-22 03:56:17 +03:00
static struct proto_ops mptcp_v6_stream_ops ;
2020-01-22 03:56:15 +03:00
static struct proto mptcp_v6_prot ;
2020-01-22 03:56:20 +03:00
static void mptcp_v6_destroy ( struct sock * sk )
{
mptcp_destroy ( sk ) ;
inet6_destroy_sock ( sk ) ;
}
2020-01-22 03:56:15 +03:00
static struct inet_protosw mptcp_v6_protosw = {
. type = SOCK_STREAM ,
. protocol = IPPROTO_MPTCP ,
. prot = & mptcp_v6_prot ,
2020-01-22 03:56:17 +03:00
. ops = & mptcp_v6_stream_ops ,
2020-01-22 03:56:15 +03:00
. flags = INET_PROTOSW_ICSK ,
} ;
int mptcpv6_init ( void )
{
int err ;
mptcp_v6_prot = mptcp_prot ;
strcpy ( mptcp_v6_prot . name , " MPTCPv6 " ) ;
mptcp_v6_prot . slab = NULL ;
2020-01-22 03:56:20 +03:00
mptcp_v6_prot . destroy = mptcp_v6_destroy ;
2020-01-22 03:56:15 +03:00
mptcp_v6_prot . obj_size = sizeof ( struct mptcp_sock ) +
sizeof ( struct ipv6_pinfo ) ;
err = proto_register ( & mptcp_v6_prot , 1 ) ;
if ( err )
return err ;
2020-01-22 03:56:17 +03:00
mptcp_v6_stream_ops = inet6_stream_ops ;
mptcp_v6_stream_ops . bind = mptcp_bind ;
mptcp_v6_stream_ops . connect = mptcp_stream_connect ;
mptcp_v6_stream_ops . poll = mptcp_poll ;
2020-01-22 03:56:19 +03:00
mptcp_v6_stream_ops . accept = mptcp_stream_accept ;
mptcp_v6_stream_ops . getname = mptcp_v6_getname ;
mptcp_v6_stream_ops . listen = mptcp_listen ;
2020-01-22 03:56:17 +03:00
2020-01-22 03:56:15 +03:00
err = inet6_register_protosw ( & mptcp_v6_protosw ) ;
if ( err )
proto_unregister ( & mptcp_v6_prot ) ;
return err ;
}
# endif