2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2016-04-04 16:00:34 +03:00
/* Peer event handling, typically ICMP messages.
2007-04-27 02:48:28 +04:00
*
* Copyright ( C ) 2007 Red Hat , Inc . All Rights Reserved .
* Written by David Howells ( dhowells @ redhat . com )
*/
# include <linux/module.h>
# include <linux/net.h>
# include <linux/skbuff.h>
# include <linux/errqueue.h>
# include <linux/udp.h>
# include <linux/in.h>
# include <linux/in6.h>
# include <linux/icmp.h>
# include <net/sock.h>
# include <net/af_rxrpc.h>
# include <net/ip.h>
# include "ar-internal.h"
2020-01-23 16:13:41 +03:00
static void rxrpc_store_error ( struct rxrpc_peer * , struct sk_buff * ) ;
static void rxrpc_distribute_error ( struct rxrpc_peer * , struct sk_buff * ,
enum rxrpc_call_completion , int ) ;
2016-04-04 16:00:34 +03:00
2022-08-26 17:39:28 +03:00
/*
* Find the peer associated with a local error .
*/
static struct rxrpc_peer * rxrpc_lookup_peer_local_rcu ( struct rxrpc_local * local ,
const struct sk_buff * skb ,
struct sockaddr_rxrpc * srx )
{
struct sock_exterr_skb * serr = SKB_EXT_ERR ( skb ) ;
_enter ( " " ) ;
memset ( srx , 0 , sizeof ( * srx ) ) ;
srx - > transport_type = local - > srx . transport_type ;
srx - > transport_len = local - > srx . transport_len ;
srx - > transport . family = local - > srx . transport . family ;
2022-10-12 11:51:12 +03:00
/* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice
* versa ?
*/
2018-05-11 01:26:01 +03:00
switch ( srx - > transport . family ) {
2016-04-04 16:00:32 +03:00
case AF_INET :
2018-10-04 11:32:28 +03:00
srx - > transport_len = sizeof ( srx - > transport . sin ) ;
srx - > transport . family = AF_INET ;
2018-05-11 01:26:01 +03:00
srx - > transport . sin . sin_port = serr - > port ;
2016-04-04 16:00:32 +03:00
switch ( serr - > ee . ee_origin ) {
case SO_EE_ORIGIN_ICMP :
2018-05-11 01:26:01 +03:00
memcpy ( & srx - > transport . sin . sin_addr ,
2016-04-04 16:00:32 +03:00
skb_network_header ( skb ) + serr - > addr_offset ,
sizeof ( struct in_addr ) ) ;
break ;
case SO_EE_ORIGIN_ICMP6 :
2018-05-11 01:26:01 +03:00
memcpy ( & srx - > transport . sin . sin_addr ,
2016-04-04 16:00:32 +03:00
skb_network_header ( skb ) + serr - > addr_offset + 12 ,
sizeof ( struct in_addr ) ) ;
break ;
default :
2018-05-11 01:26:01 +03:00
memcpy ( & srx - > transport . sin . sin_addr , & ip_hdr ( skb ) - > saddr ,
2016-04-04 16:00:32 +03:00
sizeof ( struct in_addr ) ) ;
break ;
}
break ;
2016-09-17 09:26:01 +03:00
# ifdef CONFIG_AF_RXRPC_IPV6
2016-09-13 10:49:05 +03:00
case AF_INET6 :
switch ( serr - > ee . ee_origin ) {
case SO_EE_ORIGIN_ICMP6 :
2018-10-04 11:32:28 +03:00
srx - > transport . sin6 . sin6_port = serr - > port ;
2018-05-11 01:26:01 +03:00
memcpy ( & srx - > transport . sin6 . sin6_addr ,
2016-09-13 10:49:05 +03:00
skb_network_header ( skb ) + serr - > addr_offset ,
sizeof ( struct in6_addr ) ) ;
break ;
case SO_EE_ORIGIN_ICMP :
2018-10-04 11:32:28 +03:00
srx - > transport_len = sizeof ( srx - > transport . sin ) ;
srx - > transport . family = AF_INET ;
srx - > transport . sin . sin_port = serr - > port ;
memcpy ( & srx - > transport . sin . sin_addr ,
2016-09-13 10:49:05 +03:00
skb_network_header ( skb ) + serr - > addr_offset ,
sizeof ( struct in_addr ) ) ;
break ;
default :
2018-05-11 01:26:01 +03:00
memcpy ( & srx - > transport . sin6 . sin6_addr ,
2016-09-13 10:49:05 +03:00
& ipv6_hdr ( skb ) - > saddr ,
sizeof ( struct in6_addr ) ) ;
break ;
}
break ;
2016-09-17 09:26:01 +03:00
# endif
2016-09-13 10:49:05 +03:00
2016-04-04 16:00:32 +03:00
default :
BUG ( ) ;
}
2018-05-11 01:26:01 +03:00
return rxrpc_lookup_peer_rcu ( local , srx ) ;
2016-04-04 16:00:32 +03:00
}
2016-04-04 16:00:33 +03:00
/*
* Handle an MTU / fragmentation problem .
*/
2022-08-26 17:39:28 +03:00
static void rxrpc_adjust_mtu ( struct rxrpc_peer * peer , unsigned int mtu )
2016-04-04 16:00:33 +03:00
{
/* wind down the local interface MTU */
2022-10-20 14:04:20 +03:00
if ( mtu > 0 & & peer - > if_mtu = = 65535 & & mtu < peer - > if_mtu )
2016-04-04 16:00:33 +03:00
peer - > if_mtu = mtu ;
if ( mtu = = 0 ) {
/* they didn't give us a size, estimate one */
mtu = peer - > if_mtu ;
if ( mtu > 1500 ) {
mtu > > = 1 ;
if ( mtu < 1500 )
mtu = 1500 ;
} else {
mtu - = 100 ;
if ( mtu < peer - > hdrsize )
mtu = peer - > hdrsize + 4 ;
}
}
if ( mtu < peer - > mtu ) {
2020-01-24 13:21:15 +03:00
spin_lock ( & peer - > lock ) ;
2016-04-04 16:00:33 +03:00
peer - > mtu = mtu ;
peer - > maxdata = peer - > mtu - peer - > hdrsize ;
2020-01-24 13:21:15 +03:00
spin_unlock ( & peer - > lock ) ;
2016-04-04 16:00:33 +03:00
}
}
2007-04-27 02:48:28 +04:00
/*
2016-04-04 16:00:34 +03:00
* Handle an error received on the local endpoint .
2007-04-27 02:48:28 +04:00
*/
2022-10-10 13:47:31 +03:00
void rxrpc_input_error ( struct rxrpc_local * local , struct sk_buff * skb )
2007-04-27 02:48:28 +04:00
{
2022-10-10 13:47:31 +03:00
struct sock_exterr_skb * serr = SKB_EXT_ERR ( skb ) ;
2018-05-11 01:26:01 +03:00
struct sockaddr_rxrpc srx ;
2022-08-26 17:39:28 +03:00
struct rxrpc_peer * peer = NULL ;
2007-04-27 02:48:28 +04:00
2022-10-10 13:47:31 +03:00
_enter ( " L=%x " , local - > debug_id ) ;
2007-04-27 02:48:28 +04:00
2022-10-12 11:51:12 +03:00
if ( ! skb - > len & & serr - > ee . ee_origin = = SO_EE_ORIGIN_TIMESTAMPING ) {
_leave ( " UDP empty message " ) ;
return ;
}
2018-05-11 01:26:01 +03:00
2022-10-10 13:47:31 +03:00
rcu_read_lock ( ) ;
2022-10-12 11:51:12 +03:00
peer = rxrpc_lookup_peer_local_rcu ( local , skb , & srx ) ;
2022-10-21 15:39:34 +03:00
if ( peer & & ! rxrpc_get_peer_maybe ( peer , rxrpc_peer_get_input_error ) )
2022-10-12 11:51:12 +03:00
peer = NULL ;
2022-10-10 13:47:31 +03:00
rcu_read_unlock ( ) ;
if ( ! peer )
2022-10-12 11:51:12 +03:00
return ;
2007-04-27 02:48:28 +04:00
2022-10-12 11:51:12 +03:00
trace_rxrpc_rx_icmp ( peer , & serr - > ee , & srx ) ;
if ( ( serr - > ee . ee_origin = = SO_EE_ORIGIN_ICMP & &
serr - > ee . ee_type = = ICMP_DEST_UNREACH & &
serr - > ee . ee_code = = ICMP_FRAG_NEEDED ) ) {
rxrpc_adjust_mtu ( peer , serr - > ee . ee_info ) ;
goto out ;
}
2020-01-23 16:13:41 +03:00
rxrpc_store_error ( peer , skb ) ;
2022-10-12 11:51:12 +03:00
out :
2022-10-21 15:39:34 +03:00
rxrpc_put_peer ( peer , rxrpc_peer_put_input_error ) ;
2007-04-27 02:48:28 +04:00
}
/*
2016-04-04 16:00:34 +03:00
* Map an error report to error codes on the peer record .
2007-04-27 02:48:28 +04:00
*/
2020-01-23 16:13:41 +03:00
static void rxrpc_store_error ( struct rxrpc_peer * peer , struct sk_buff * skb )
2007-04-27 02:48:28 +04:00
{
2018-09-27 17:13:09 +03:00
enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR ;
2020-01-23 16:13:41 +03:00
struct sock_exterr_skb * serr = SKB_EXT_ERR ( skb ) ;
struct sock_extended_err * ee = & serr - > ee ;
int err = ee - > ee_errno ;
2007-04-27 02:48:28 +04:00
_enter ( " " ) ;
switch ( ee - > ee_origin ) {
2016-04-04 16:00:34 +03:00
case SO_EE_ORIGIN_NONE :
2007-04-27 02:48:28 +04:00
case SO_EE_ORIGIN_LOCAL :
2018-09-27 17:13:09 +03:00
compl = RXRPC_CALL_LOCAL_ERROR ;
2007-04-27 02:48:28 +04:00
break ;
case SO_EE_ORIGIN_ICMP6 :
2020-05-02 15:31:19 +03:00
if ( err = = EACCES )
err = EHOSTUNREACH ;
2020-08-24 01:36:59 +03:00
fallthrough ;
2022-10-20 14:04:20 +03:00
case SO_EE_ORIGIN_ICMP :
2007-04-27 02:48:28 +04:00
default :
break ;
}
2020-01-23 16:13:41 +03:00
rxrpc_distribute_error ( peer , skb , compl , err ) ;
2016-04-04 16:00:34 +03:00
}
/*
2018-09-27 17:13:09 +03:00
* Distribute an error that occurred on a peer .
2016-04-04 16:00:34 +03:00
*/
2020-01-23 16:13:41 +03:00
static void rxrpc_distribute_error ( struct rxrpc_peer * peer , struct sk_buff * skb ,
enum rxrpc_call_completion compl , int err )
2016-04-04 16:00:34 +03:00
{
struct rxrpc_call * call ;
2022-10-12 17:42:06 +03:00
HLIST_HEAD ( error_targets ) ;
spin_lock ( & peer - > lock ) ;
hlist_move_list ( & peer - > error_targets , & error_targets ) ;
while ( ! hlist_empty ( & error_targets ) ) {
call = hlist_entry ( error_targets . first ,
struct rxrpc_call , error_link ) ;
hlist_del_init ( & call - > error_link ) ;
spin_unlock ( & peer - > lock ) ;
2007-04-27 02:48:28 +04:00
2022-10-21 16:39:26 +03:00
rxrpc_see_call ( call , rxrpc_call_see_distribute_error ) ;
2020-01-23 16:13:41 +03:00
rxrpc_set_call_completion ( call , compl , 0 , - err ) ;
rxrpc_input_call_event ( call , skb ) ;
2022-10-12 17:42:06 +03:00
spin_lock ( & peer - > lock ) ;
2007-04-27 02:48:28 +04:00
}
2022-10-12 17:42:06 +03:00
spin_unlock ( & peer - > lock ) ;
2007-04-27 02:48:28 +04:00
}
2016-09-22 02:41:53 +03:00
2018-03-30 23:04:43 +03:00
/*
2018-08-08 13:30:02 +03:00
* Perform keep - alive pings .
2018-03-30 23:04:43 +03:00
*/
2018-08-08 13:30:02 +03:00
static void rxrpc_peer_keepalive_dispatch ( struct rxrpc_net * rxnet ,
struct list_head * collector ,
time64_t base ,
u8 cursor )
2018-03-30 23:04:43 +03:00
{
struct rxrpc_peer * peer ;
2018-08-08 13:30:02 +03:00
const u8 mask = ARRAY_SIZE ( rxnet - > peer_keepalive ) - 1 ;
time64_t keepalive_at ;
2022-12-15 19:20:21 +03:00
bool use ;
2018-08-08 13:30:02 +03:00
int slot ;
2018-03-30 23:04:43 +03:00
2020-01-24 13:21:15 +03:00
spin_lock ( & rxnet - > peer_hash_lock ) ;
2018-03-30 23:04:43 +03:00
2018-08-08 13:30:02 +03:00
while ( ! list_empty ( collector ) ) {
peer = list_entry ( collector - > next ,
struct rxrpc_peer , keepalive_link ) ;
2018-03-30 23:04:43 +03:00
2018-08-08 13:30:02 +03:00
list_del_init ( & peer - > keepalive_link ) ;
2022-10-21 15:39:34 +03:00
if ( ! rxrpc_get_peer_maybe ( peer , rxrpc_peer_get_keepalive ) )
2018-08-08 13:30:02 +03:00
continue ;
2018-03-30 23:04:43 +03:00
2022-12-15 19:20:21 +03:00
use = __rxrpc_use_local ( peer - > local , rxrpc_local_use_peer_keepalive ) ;
spin_unlock ( & rxnet - > peer_hash_lock ) ;
2020-01-31 00:50:36 +03:00
2022-12-15 19:20:21 +03:00
if ( use ) {
2020-01-31 00:50:36 +03:00
keepalive_at = peer - > last_tx_at + RXRPC_KEEPALIVE_TIME ;
slot = keepalive_at - base ;
_debug ( " %02x peer %u t=%d {%pISp} " ,
cursor , peer - > debug_id , slot , & peer - > srx . transport ) ;
if ( keepalive_at < = base | |
keepalive_at > base + RXRPC_KEEPALIVE_TIME ) {
rxrpc_send_keepalive ( peer ) ;
slot = RXRPC_KEEPALIVE_TIME ;
}
2018-08-08 13:30:02 +03:00
2020-01-31 00:50:36 +03:00
/* A transmission to this peer occurred since last we
* examined it so put it into the appropriate future
* bucket .
*/
slot + = cursor ;
slot & = mask ;
2020-01-24 13:21:15 +03:00
spin_lock ( & rxnet - > peer_hash_lock ) ;
2020-01-31 00:50:36 +03:00
list_add_tail ( & peer - > keepalive_link ,
& rxnet - > peer_keepalive [ slot & mask ] ) ;
2022-12-15 19:20:21 +03:00
spin_unlock ( & rxnet - > peer_hash_lock ) ;
2022-10-21 15:00:34 +03:00
rxrpc_unuse_local ( peer - > local , rxrpc_local_unuse_peer_keepalive ) ;
2018-03-30 23:04:43 +03:00
}
2022-12-15 19:20:21 +03:00
rxrpc_put_peer ( peer , rxrpc_peer_put_keepalive ) ;
spin_lock ( & rxnet - > peer_hash_lock ) ;
2018-03-30 23:04:43 +03:00
}
2020-01-24 13:21:15 +03:00
spin_unlock ( & rxnet - > peer_hash_lock ) ;
2018-08-08 13:30:02 +03:00
}
2018-03-30 23:04:43 +03:00
2018-08-08 13:30:02 +03:00
/*
* Perform keep - alive pings with VERSION packets to keep any NAT alive .
*/
void rxrpc_peer_keepalive_worker ( struct work_struct * work )
{
struct rxrpc_net * rxnet =
container_of ( work , struct rxrpc_net , peer_keepalive_work ) ;
const u8 mask = ARRAY_SIZE ( rxnet - > peer_keepalive ) - 1 ;
time64_t base , now , delay ;
u8 cursor , stop ;
LIST_HEAD ( collector ) ;
2018-03-30 23:04:43 +03:00
2018-08-08 13:30:02 +03:00
now = ktime_get_seconds ( ) ;
base = rxnet - > peer_keepalive_base ;
cursor = rxnet - > peer_keepalive_cursor ;
_enter ( " %lld,%u " , base - now , cursor ) ;
2018-03-30 23:04:43 +03:00
2018-08-08 13:30:02 +03:00
if ( ! rxnet - > live )
return ;
2018-03-30 23:04:43 +03:00
2018-08-08 13:30:02 +03:00
/* Remove to a temporary list all the peers that are currently lodged
* in expired buckets plus all new peers .
*
* Everything in the bucket at the cursor is processed this
* second ; the bucket at cursor + 1 goes at now + 1 s and so
* on . . .
2018-03-30 23:04:43 +03:00
*/
2020-01-24 13:21:15 +03:00
spin_lock ( & rxnet - > peer_hash_lock ) ;
2018-08-08 13:30:02 +03:00
list_splice_init ( & rxnet - > peer_keepalive_new , & collector ) ;
stop = cursor + ARRAY_SIZE ( rxnet - > peer_keepalive ) ;
while ( base < = now & & ( s8 ) ( cursor - stop ) < 0 ) {
list_splice_tail_init ( & rxnet - > peer_keepalive [ cursor & mask ] ,
& collector ) ;
base + + ;
cursor + + ;
}
2018-03-30 23:04:43 +03:00
2018-08-08 13:30:02 +03:00
base = now ;
2020-01-24 13:21:15 +03:00
spin_unlock ( & rxnet - > peer_hash_lock ) ;
2018-03-30 23:04:43 +03:00
rxnet - > peer_keepalive_base = base ;
rxnet - > peer_keepalive_cursor = cursor ;
2018-08-08 13:30:02 +03:00
rxrpc_peer_keepalive_dispatch ( rxnet , & collector , base , cursor ) ;
ASSERT ( list_empty ( & collector ) ) ;
/* Schedule the timer for the next occupied timeslot. */
cursor = rxnet - > peer_keepalive_cursor ;
stop = cursor + RXRPC_KEEPALIVE_TIME - 1 ;
for ( ; ( s8 ) ( cursor - stop ) < 0 ; cursor + + ) {
if ( ! list_empty ( & rxnet - > peer_keepalive [ cursor & mask ] ) )
break ;
base + + ;
}
now = ktime_get_seconds ( ) ;
delay = base - now ;
if ( delay < 1 )
delay = 1 ;
delay * = HZ ;
if ( rxnet - > live )
timer_reduce ( & rxnet - > peer_keepalive_timer , jiffies + delay ) ;
2018-03-30 23:04:43 +03:00
_leave ( " " ) ;
}