2019-05-27 08:55:01 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2016-04-04 14:00:34 +01:00
/* Peer event handling, typically ICMP messages.
2007-04-26 15:48:28 -07:00
*
* Copyright ( C ) 2007 Red Hat , Inc . All Rights Reserved .
* Written by David Howells ( dhowells @ redhat . com )
*/
# include <linux/module.h>
# include <linux/net.h>
# include <linux/skbuff.h>
# include <linux/errqueue.h>
# include <linux/udp.h>
# include <linux/in.h>
# include <linux/in6.h>
# include <linux/icmp.h>
# include <net/sock.h>
# include <net/af_rxrpc.h>
# include <net/ip.h>
# include "ar-internal.h"
2016-04-04 14:00:34 +01:00
static void rxrpc_store_error ( struct rxrpc_peer * , struct sock_exterr_skb * ) ;
2018-09-27 15:13:09 +01:00
static void rxrpc_distribute_error ( struct rxrpc_peer * , int ,
enum rxrpc_call_completion ) ;
2016-04-04 14:00:34 +01:00
2016-04-04 14:00:32 +01:00
/*
* Find the peer associated with an ICMP packet .
*/
static struct rxrpc_peer * rxrpc_lookup_peer_icmp_rcu ( struct rxrpc_local * local ,
2018-05-10 23:26:01 +01:00
const struct sk_buff * skb ,
struct sockaddr_rxrpc * srx )
2016-04-04 14:00:32 +01:00
{
struct sock_exterr_skb * serr = SKB_EXT_ERR ( skb ) ;
_enter ( " " ) ;
2018-05-10 23:26:01 +01:00
memset ( srx , 0 , sizeof ( * srx ) ) ;
srx - > transport_type = local - > srx . transport_type ;
srx - > transport_len = local - > srx . transport_len ;
srx - > transport . family = local - > srx . transport . family ;
2016-04-04 14:00:32 +01:00
/* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice
* versa ?
*/
2018-05-10 23:26:01 +01:00
switch ( srx - > transport . family ) {
2016-04-04 14:00:32 +01:00
case AF_INET :
2018-10-04 09:32:28 +01:00
srx - > transport_len = sizeof ( srx - > transport . sin ) ;
srx - > transport . family = AF_INET ;
2018-05-10 23:26:01 +01:00
srx - > transport . sin . sin_port = serr - > port ;
2016-04-04 14:00:32 +01:00
switch ( serr - > ee . ee_origin ) {
case SO_EE_ORIGIN_ICMP :
_net ( " Rx ICMP " ) ;
2018-05-10 23:26:01 +01:00
memcpy ( & srx - > transport . sin . sin_addr ,
2016-04-04 14:00:32 +01:00
skb_network_header ( skb ) + serr - > addr_offset ,
sizeof ( struct in_addr ) ) ;
break ;
case SO_EE_ORIGIN_ICMP6 :
_net ( " Rx ICMP6 on v4 sock " ) ;
2018-05-10 23:26:01 +01:00
memcpy ( & srx - > transport . sin . sin_addr ,
2016-04-04 14:00:32 +01:00
skb_network_header ( skb ) + serr - > addr_offset + 12 ,
sizeof ( struct in_addr ) ) ;
break ;
default :
2018-05-10 23:26:01 +01:00
memcpy ( & srx - > transport . sin . sin_addr , & ip_hdr ( skb ) - > saddr ,
2016-04-04 14:00:32 +01:00
sizeof ( struct in_addr ) ) ;
break ;
}
break ;
2016-09-17 07:26:01 +01:00
# ifdef CONFIG_AF_RXRPC_IPV6
2016-09-13 08:49:05 +01:00
case AF_INET6 :
switch ( serr - > ee . ee_origin ) {
case SO_EE_ORIGIN_ICMP6 :
_net ( " Rx ICMP6 " ) ;
2018-10-04 09:32:28 +01:00
srx - > transport . sin6 . sin6_port = serr - > port ;
2018-05-10 23:26:01 +01:00
memcpy ( & srx - > transport . sin6 . sin6_addr ,
2016-09-13 08:49:05 +01:00
skb_network_header ( skb ) + serr - > addr_offset ,
sizeof ( struct in6_addr ) ) ;
break ;
case SO_EE_ORIGIN_ICMP :
_net ( " Rx ICMP on v6 sock " ) ;
2018-10-04 09:32:28 +01:00
srx - > transport_len = sizeof ( srx - > transport . sin ) ;
srx - > transport . family = AF_INET ;
srx - > transport . sin . sin_port = serr - > port ;
memcpy ( & srx - > transport . sin . sin_addr ,
2016-09-13 08:49:05 +01:00
skb_network_header ( skb ) + serr - > addr_offset ,
sizeof ( struct in_addr ) ) ;
break ;
default :
2018-05-10 23:26:01 +01:00
memcpy ( & srx - > transport . sin6 . sin6_addr ,
2016-09-13 08:49:05 +01:00
& ipv6_hdr ( skb ) - > saddr ,
sizeof ( struct in6_addr ) ) ;
break ;
}
break ;
2016-09-17 07:26:01 +01:00
# endif
2016-09-13 08:49:05 +01:00
2016-04-04 14:00:32 +01:00
default :
BUG ( ) ;
}
2018-05-10 23:26:01 +01:00
return rxrpc_lookup_peer_rcu ( local , srx ) ;
2016-04-04 14:00:32 +01:00
}
2016-04-04 14:00:33 +01:00
/*
* Handle an MTU / fragmentation problem .
*/
static void rxrpc_adjust_mtu ( struct rxrpc_peer * peer , struct sock_exterr_skb * serr )
{
u32 mtu = serr - > ee . ee_info ;
_net ( " Rx ICMP Fragmentation Needed (%d) " , mtu ) ;
/* wind down the local interface MTU */
if ( mtu > 0 & & peer - > if_mtu = = 65535 & & mtu < peer - > if_mtu ) {
peer - > if_mtu = mtu ;
_net ( " I/F MTU %u " , mtu ) ;
}
if ( mtu = = 0 ) {
/* they didn't give us a size, estimate one */
mtu = peer - > if_mtu ;
if ( mtu > 1500 ) {
mtu > > = 1 ;
if ( mtu < 1500 )
mtu = 1500 ;
} else {
mtu - = 100 ;
if ( mtu < peer - > hdrsize )
mtu = peer - > hdrsize + 4 ;
}
}
if ( mtu < peer - > mtu ) {
spin_lock_bh ( & peer - > lock ) ;
peer - > mtu = mtu ;
peer - > maxdata = peer - > mtu - peer - > hdrsize ;
spin_unlock_bh ( & peer - > lock ) ;
_net ( " Net MTU %u (maxdata %u) " ,
peer - > mtu , peer - > maxdata ) ;
}
}
2007-04-26 15:48:28 -07:00
/*
2016-04-04 14:00:34 +01:00
* Handle an error received on the local endpoint .
2007-04-26 15:48:28 -07:00
*/
2016-04-04 14:00:32 +01:00
void rxrpc_error_report ( struct sock * sk )
2007-04-26 15:48:28 -07:00
{
struct sock_exterr_skb * serr ;
2018-05-10 23:26:01 +01:00
struct sockaddr_rxrpc srx ;
2019-10-14 06:04:38 -07:00
struct rxrpc_local * local ;
2007-04-26 15:48:28 -07:00
struct rxrpc_peer * peer ;
struct sk_buff * skb ;
2019-10-14 06:04:38 -07:00
rcu_read_lock ( ) ;
local = rcu_dereference_sk_user_data ( sk ) ;
if ( unlikely ( ! local ) ) {
rcu_read_unlock ( ) ;
2019-10-10 15:52:34 +01:00
return ;
2019-10-14 06:04:38 -07:00
}
2007-04-26 15:48:28 -07:00
_enter ( " %p{%d} " , sk , local - > debug_id ) ;
2019-04-12 16:33:40 +01:00
/* Clear the outstanding error value on the socket so that it doesn't
* cause kernel_sendmsg ( ) to return it later .
*/
sock_error ( sk ) ;
2014-08-31 21:30:27 -04:00
skb = sock_dequeue_err_skb ( sk ) ;
2007-04-26 15:48:28 -07:00
if ( ! skb ) {
2019-10-14 06:04:38 -07:00
rcu_read_unlock ( ) ;
2007-04-26 15:48:28 -07:00
_leave ( " UDP socket errqueue empty " ) ;
return ;
}
2019-08-19 09:25:38 +01:00
rxrpc_new_skb ( skb , rxrpc_skb_received ) ;
2015-03-07 20:33:22 -05:00
serr = SKB_EXT_ERR ( skb ) ;
if ( ! skb - > len & & serr - > ee . ee_origin = = SO_EE_ORIGIN_TIMESTAMPING ) {
2015-01-30 13:29:31 -05:00
_leave ( " UDP empty message " ) ;
2019-10-14 06:04:38 -07:00
rcu_read_unlock ( ) ;
2019-08-19 09:25:38 +01:00
rxrpc_free_skb ( skb , rxrpc_skb_freed ) ;
2015-01-30 13:29:31 -05:00
return ;
}
2007-04-26 15:48:28 -07:00
2018-05-10 23:26:01 +01:00
peer = rxrpc_lookup_peer_icmp_rcu ( local , skb , & srx ) ;
2016-04-04 14:00:32 +01:00
if ( peer & & ! rxrpc_get_peer_maybe ( peer ) )
peer = NULL ;
if ( ! peer ) {
rcu_read_unlock ( ) ;
2019-08-19 09:25:38 +01:00
rxrpc_free_skb ( skb , rxrpc_skb_freed ) ;
2007-04-26 15:48:28 -07:00
_leave ( " [no peer] " ) ;
return ;
}
2018-05-10 23:26:01 +01:00
trace_rxrpc_rx_icmp ( peer , & serr - > ee , & srx ) ;
2016-04-04 14:00:33 +01:00
if ( ( serr - > ee . ee_origin = = SO_EE_ORIGIN_ICMP & &
serr - > ee . ee_type = = ICMP_DEST_UNREACH & &
serr - > ee . ee_code = = ICMP_FRAG_NEEDED ) ) {
rxrpc_adjust_mtu ( peer , serr ) ;
2016-04-04 14:00:34 +01:00
rcu_read_unlock ( ) ;
2019-08-19 09:25:38 +01:00
rxrpc_free_skb ( skb , rxrpc_skb_freed ) ;
2016-04-04 14:00:34 +01:00
rxrpc_put_peer ( peer ) ;
_leave ( " [MTU update] " ) ;
return ;
2007-04-26 15:48:28 -07:00
}
2016-04-04 14:00:34 +01:00
rxrpc_store_error ( peer , serr ) ;
2016-04-04 14:00:32 +01:00
rcu_read_unlock ( ) ;
2019-08-19 09:25:38 +01:00
rxrpc_free_skb ( skb , rxrpc_skb_freed ) ;
2018-10-15 22:37:21 +01:00
rxrpc_put_peer ( peer ) ;
2007-04-26 15:48:28 -07:00
_leave ( " " ) ;
}
/*
2016-04-04 14:00:34 +01:00
* Map an error report to error codes on the peer record .
2007-04-26 15:48:28 -07:00
*/
2016-04-04 14:00:34 +01:00
static void rxrpc_store_error ( struct rxrpc_peer * peer ,
struct sock_exterr_skb * serr )
2007-04-26 15:48:28 -07:00
{
2018-09-27 15:13:09 +01:00
enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR ;
2007-04-26 15:48:28 -07:00
struct sock_extended_err * ee ;
2011-05-19 18:37:11 -04:00
int err ;
2007-04-26 15:48:28 -07:00
_enter ( " " ) ;
ee = & serr - > ee ;
err = ee - > ee_errno ;
switch ( ee - > ee_origin ) {
case SO_EE_ORIGIN_ICMP :
switch ( ee - > ee_type ) {
case ICMP_DEST_UNREACH :
switch ( ee - > ee_code ) {
case ICMP_NET_UNREACH :
_net ( " Rx Received ICMP Network Unreachable " ) ;
break ;
case ICMP_HOST_UNREACH :
_net ( " Rx Received ICMP Host Unreachable " ) ;
break ;
case ICMP_PORT_UNREACH :
_net ( " Rx Received ICMP Port Unreachable " ) ;
break ;
case ICMP_NET_UNKNOWN :
_net ( " Rx Received ICMP Unknown Network " ) ;
break ;
case ICMP_HOST_UNKNOWN :
_net ( " Rx Received ICMP Unknown Host " ) ;
break ;
default :
_net ( " Rx Received ICMP DestUnreach code=%u " ,
ee - > ee_code ) ;
break ;
}
break ;
case ICMP_TIME_EXCEEDED :
_net ( " Rx Received ICMP TTL Exceeded " ) ;
break ;
default :
_proto ( " Rx Received ICMP error { type=%u code=%u } " ,
ee - > ee_type , ee - > ee_code ) ;
break ;
}
break ;
2016-04-04 14:00:34 +01:00
case SO_EE_ORIGIN_NONE :
2007-04-26 15:48:28 -07:00
case SO_EE_ORIGIN_LOCAL :
2016-04-04 14:00:34 +01:00
_proto ( " Rx Received local error { error=%d } " , err ) ;
2018-09-27 15:13:09 +01:00
compl = RXRPC_CALL_LOCAL_ERROR ;
2007-04-26 15:48:28 -07:00
break ;
case SO_EE_ORIGIN_ICMP6 :
2020-05-02 13:31:19 +01:00
if ( err = = EACCES )
err = EHOSTUNREACH ;
/* Fall through */
2007-04-26 15:48:28 -07:00
default :
2016-04-04 14:00:34 +01:00
_proto ( " Rx Received error report { orig=%u } " , ee - > ee_origin ) ;
2007-04-26 15:48:28 -07:00
break ;
}
2018-09-27 15:13:09 +01:00
rxrpc_distribute_error ( peer , err , compl ) ;
2016-04-04 14:00:34 +01:00
}
/*
2018-09-27 15:13:09 +01:00
* Distribute an error that occurred on a peer .
2016-04-04 14:00:34 +01:00
*/
2018-09-27 15:13:09 +01:00
static void rxrpc_distribute_error ( struct rxrpc_peer * peer , int error ,
enum rxrpc_call_completion compl )
2016-04-04 14:00:34 +01:00
{
struct rxrpc_call * call ;
2007-04-26 15:48:28 -07:00
2018-09-27 15:13:09 +01:00
hlist_for_each_entry_rcu ( call , & peer - > error_targets , error_link ) {
2016-08-30 09:49:29 +01:00
rxrpc_see_call ( call ) ;
2020-06-03 22:21:16 +01:00
rxrpc_set_call_completion ( call , compl , 0 , - error ) ;
2007-04-26 15:48:28 -07:00
}
}
2016-09-22 00:41:53 +01:00
2018-03-30 21:04:43 +01:00
/*
2018-08-08 11:30:02 +01:00
* Perform keep - alive pings .
2018-03-30 21:04:43 +01:00
*/
2018-08-08 11:30:02 +01:00
static void rxrpc_peer_keepalive_dispatch ( struct rxrpc_net * rxnet ,
struct list_head * collector ,
time64_t base ,
u8 cursor )
2018-03-30 21:04:43 +01:00
{
struct rxrpc_peer * peer ;
2018-08-08 11:30:02 +01:00
const u8 mask = ARRAY_SIZE ( rxnet - > peer_keepalive ) - 1 ;
time64_t keepalive_at ;
int slot ;
2018-03-30 21:04:43 +01:00
2018-08-08 11:30:02 +01:00
spin_lock_bh ( & rxnet - > peer_hash_lock ) ;
2018-03-30 21:04:43 +01:00
2018-08-08 11:30:02 +01:00
while ( ! list_empty ( collector ) ) {
peer = list_entry ( collector - > next ,
struct rxrpc_peer , keepalive_link ) ;
2018-03-30 21:04:43 +01:00
2018-08-08 11:30:02 +01:00
list_del_init ( & peer - > keepalive_link ) ;
if ( ! rxrpc_get_peer_maybe ( peer ) )
continue ;
2018-03-30 21:04:43 +01:00
2020-01-30 21:50:36 +00:00
if ( __rxrpc_use_local ( peer - > local ) ) {
spin_unlock_bh ( & rxnet - > peer_hash_lock ) ;
keepalive_at = peer - > last_tx_at + RXRPC_KEEPALIVE_TIME ;
slot = keepalive_at - base ;
_debug ( " %02x peer %u t=%d {%pISp} " ,
cursor , peer - > debug_id , slot , & peer - > srx . transport ) ;
if ( keepalive_at < = base | |
keepalive_at > base + RXRPC_KEEPALIVE_TIME ) {
rxrpc_send_keepalive ( peer ) ;
slot = RXRPC_KEEPALIVE_TIME ;
}
2018-08-08 11:30:02 +01:00
2020-01-30 21:50:36 +00:00
/* A transmission to this peer occurred since last we
* examined it so put it into the appropriate future
* bucket .
*/
slot + = cursor ;
slot & = mask ;
spin_lock_bh ( & rxnet - > peer_hash_lock ) ;
list_add_tail ( & peer - > keepalive_link ,
& rxnet - > peer_keepalive [ slot & mask ] ) ;
rxrpc_unuse_local ( peer - > local ) ;
2018-03-30 21:04:43 +01:00
}
2019-07-30 14:42:50 +01:00
rxrpc_put_peer_locked ( peer ) ;
2018-03-30 21:04:43 +01:00
}
spin_unlock_bh ( & rxnet - > peer_hash_lock ) ;
2018-08-08 11:30:02 +01:00
}
2018-03-30 21:04:43 +01:00
2018-08-08 11:30:02 +01:00
/*
* Perform keep - alive pings with VERSION packets to keep any NAT alive .
*/
void rxrpc_peer_keepalive_worker ( struct work_struct * work )
{
struct rxrpc_net * rxnet =
container_of ( work , struct rxrpc_net , peer_keepalive_work ) ;
const u8 mask = ARRAY_SIZE ( rxnet - > peer_keepalive ) - 1 ;
time64_t base , now , delay ;
u8 cursor , stop ;
LIST_HEAD ( collector ) ;
2018-03-30 21:04:43 +01:00
2018-08-08 11:30:02 +01:00
now = ktime_get_seconds ( ) ;
base = rxnet - > peer_keepalive_base ;
cursor = rxnet - > peer_keepalive_cursor ;
_enter ( " %lld,%u " , base - now , cursor ) ;
2018-03-30 21:04:43 +01:00
2018-08-08 11:30:02 +01:00
if ( ! rxnet - > live )
return ;
2018-03-30 21:04:43 +01:00
2018-08-08 11:30:02 +01:00
/* Remove to a temporary list all the peers that are currently lodged
* in expired buckets plus all new peers .
*
* Everything in the bucket at the cursor is processed this
* second ; the bucket at cursor + 1 goes at now + 1 s and so
* on . . .
2018-03-30 21:04:43 +01:00
*/
spin_lock_bh ( & rxnet - > peer_hash_lock ) ;
2018-08-08 11:30:02 +01:00
list_splice_init ( & rxnet - > peer_keepalive_new , & collector ) ;
stop = cursor + ARRAY_SIZE ( rxnet - > peer_keepalive ) ;
while ( base < = now & & ( s8 ) ( cursor - stop ) < 0 ) {
list_splice_tail_init ( & rxnet - > peer_keepalive [ cursor & mask ] ,
& collector ) ;
base + + ;
cursor + + ;
}
2018-03-30 21:04:43 +01:00
2018-08-08 11:30:02 +01:00
base = now ;
spin_unlock_bh ( & rxnet - > peer_hash_lock ) ;
2018-03-30 21:04:43 +01:00
rxnet - > peer_keepalive_base = base ;
rxnet - > peer_keepalive_cursor = cursor ;
2018-08-08 11:30:02 +01:00
rxrpc_peer_keepalive_dispatch ( rxnet , & collector , base , cursor ) ;
ASSERT ( list_empty ( & collector ) ) ;
/* Schedule the timer for the next occupied timeslot. */
cursor = rxnet - > peer_keepalive_cursor ;
stop = cursor + RXRPC_KEEPALIVE_TIME - 1 ;
for ( ; ( s8 ) ( cursor - stop ) < 0 ; cursor + + ) {
if ( ! list_empty ( & rxnet - > peer_keepalive [ cursor & mask ] ) )
break ;
base + + ;
}
now = ktime_get_seconds ( ) ;
delay = base - now ;
if ( delay < 1 )
delay = 1 ;
delay * = HZ ;
if ( rxnet - > live )
timer_reduce ( & rxnet - > peer_keepalive_timer , jiffies + delay ) ;
2018-03-30 21:04:43 +01:00
_leave ( " " ) ;
}