2019-04-05 13:31:34 -04:00
// SPDX-License-Identifier: GPL-2.0
2014-07-04 23:34:38 +02:00
/* Copyright 2011-2014 Autronica Fire and Security AS
2013-10-30 21:10:47 +01:00
*
* Author ( s ) :
2014-07-04 23:34:38 +02:00
* 2011 - 2014 Arvid Brodin , arvid . brodin @ alten . se
2013-10-30 21:10:47 +01:00
*
* The HSR spec says never to forward the same frame twice on the same
* interface . A frame is identified by its source MAC address and its HSR
* sequence number . This code keeps track of senders and their sequence numbers
* to allow filtering of duplicate frames , and to detect HSR ring errors .
2020-07-22 10:40:16 -04:00
* Same code handles filtering of duplicates for PRP as well .
2013-10-30 21:10:47 +01:00
*/
# include <linux/if_ether.h>
# include <linux/etherdevice.h>
# include <linux/slab.h>
# include <linux/rculist.h>
2022-02-05 15:40:38 +00:00
# include <linux/jhash.h>
2013-10-30 21:10:47 +01:00
# include "hsr_main.h"
# include "hsr_framereg.h"
# include "hsr_netlink.h"
2022-02-20 15:32:50 +00:00
# ifdef CONFIG_LOCKDEP
int lockdep_hsr_is_held ( spinlock_t * lock )
{
return lockdep_is_held ( lock ) ;
}
# endif
2022-02-05 15:40:38 +00:00
u32 hsr_mac_hash ( struct hsr_priv * hsr , const unsigned char * addr )
{
u32 hash = jhash ( addr , ETH_ALEN , hsr - > hash_seed ) ;
return reciprocal_scale ( hash , hsr - > hash_buckets ) ;
}
2022-02-20 15:32:50 +00:00
struct hsr_node * hsr_node_get_first ( struct hlist_head * head , spinlock_t * lock )
2022-02-05 15:40:38 +00:00
{
struct hlist_node * first ;
2022-02-20 15:32:50 +00:00
first = rcu_dereference_bh_check ( hlist_first_rcu ( head ) ,
lockdep_hsr_is_held ( lock ) ) ;
2022-02-05 15:40:38 +00:00
if ( first )
return hlist_entry ( first , struct hsr_node , mac_list ) ;
return NULL ;
}
2013-10-30 21:10:47 +01:00
2014-07-04 23:41:03 +02:00
/* seq_nr_after(a, b) - return true if a is after (higher in sequence than) b,
* false otherwise .
2013-10-30 21:10:47 +01:00
*/
2014-07-04 23:41:03 +02:00
static bool seq_nr_after ( u16 a , u16 b )
2013-10-30 21:10:47 +01:00
{
2014-07-04 23:41:03 +02:00
/* Remove inconsistency where
* seq_nr_after ( a , b ) = = seq_nr_before ( a , b )
*/
2019-04-05 13:31:29 -04:00
if ( ( int ) b - a = = 32768 )
2014-07-04 23:41:03 +02:00
return false ;
2013-10-30 21:10:47 +01:00
2019-04-05 13:31:29 -04:00
return ( ( ( s16 ) ( b - a ) ) < 0 ) ;
2013-10-30 21:10:47 +01:00
}
2019-04-05 13:31:33 -04:00
2014-07-04 23:41:03 +02:00
# define seq_nr_before(a, b) seq_nr_after((b), (a))
# define seq_nr_before_or_eq(a, b) (!seq_nr_after((a), (b)))
2013-10-30 21:10:47 +01:00
2014-07-04 23:41:03 +02:00
bool hsr_addr_is_self ( struct hsr_priv * hsr , unsigned char * addr )
2013-10-30 21:10:47 +01:00
{
2014-07-04 23:34:38 +02:00
struct hsr_node * node ;
2013-10-30 21:10:47 +01:00
2022-02-20 15:32:50 +00:00
node = hsr_node_get_first ( & hsr - > self_node_db , & hsr - > list_lock ) ;
2014-07-04 23:41:03 +02:00
if ( ! node ) {
WARN_ONCE ( 1 , " HSR: No self node \n " ) ;
return false ;
2013-10-30 21:10:47 +01:00
}
2019-04-05 13:31:32 -04:00
if ( ether_addr_equal ( addr , node - > macaddress_A ) )
2014-07-04 23:41:03 +02:00
return true ;
2019-04-05 13:31:32 -04:00
if ( ether_addr_equal ( addr , node - > macaddress_B ) )
2014-07-04 23:41:03 +02:00
return true ;
2013-10-30 21:10:47 +01:00
2014-07-04 23:41:03 +02:00
return false ;
}
2013-10-30 21:10:47 +01:00
/* Search for mac entry. Caller must hold rcu read lock.
*/
2022-02-05 15:40:38 +00:00
static struct hsr_node * find_node_by_addr_A ( struct hlist_head * node_db ,
2019-04-05 13:31:32 -04:00
const unsigned char addr [ ETH_ALEN ] )
2013-10-30 21:10:47 +01:00
{
2014-07-04 23:34:38 +02:00
struct hsr_node * node ;
2013-10-30 21:10:47 +01:00
2022-02-05 15:40:38 +00:00
hlist_for_each_entry_rcu ( node , node_db , mac_list ) {
2019-04-05 13:31:32 -04:00
if ( ether_addr_equal ( node - > macaddress_A , addr ) )
2013-10-30 21:10:47 +01:00
return node ;
}
return NULL ;
}
/* Helper for device init; the self_node_db is used in hsr_rcv() to recognize
* frames from self that ' s been looped over the HSR ring .
*/
2019-12-22 11:26:54 +00:00
int hsr_create_self_node ( struct hsr_priv * hsr ,
2021-10-22 16:21:00 -07:00
const unsigned char addr_a [ ETH_ALEN ] ,
const unsigned char addr_b [ ETH_ALEN ] )
2013-10-30 21:10:47 +01:00
{
2022-02-05 15:40:38 +00:00
struct hlist_head * self_node_db = & hsr - > self_node_db ;
2014-07-04 23:34:38 +02:00
struct hsr_node * node , * oldnode ;
2013-10-30 21:10:47 +01:00
node = kmalloc ( sizeof ( * node ) , GFP_KERNEL ) ;
if ( ! node )
return - ENOMEM ;
2019-04-05 13:31:32 -04:00
ether_addr_copy ( node - > macaddress_A , addr_a ) ;
ether_addr_copy ( node - > macaddress_B , addr_b ) ;
2013-10-30 21:10:47 +01:00
2019-12-22 11:26:54 +00:00
spin_lock_bh ( & hsr - > list_lock ) ;
2022-02-20 15:32:50 +00:00
oldnode = hsr_node_get_first ( self_node_db , & hsr - > list_lock ) ;
2013-10-30 21:10:47 +01:00
if ( oldnode ) {
2022-02-05 15:40:38 +00:00
hlist_replace_rcu ( & oldnode - > mac_list , & node - > mac_list ) ;
2019-12-22 11:26:54 +00:00
spin_unlock_bh ( & hsr - > list_lock ) ;
kfree_rcu ( oldnode , rcu_head ) ;
2013-10-30 21:10:47 +01:00
} else {
2022-02-05 15:40:38 +00:00
hlist_add_tail_rcu ( & node - > mac_list , self_node_db ) ;
2019-12-22 11:26:54 +00:00
spin_unlock_bh ( & hsr - > list_lock ) ;
2013-10-30 21:10:47 +01:00
}
return 0 ;
}
2019-12-22 11:26:54 +00:00
void hsr_del_self_node ( struct hsr_priv * hsr )
2019-03-06 22:45:01 +08:00
{
2022-02-05 15:40:38 +00:00
struct hlist_head * self_node_db = & hsr - > self_node_db ;
2019-03-06 22:45:01 +08:00
struct hsr_node * node ;
2019-12-22 11:26:54 +00:00
spin_lock_bh ( & hsr - > list_lock ) ;
2022-02-20 15:32:50 +00:00
node = hsr_node_get_first ( self_node_db , & hsr - > list_lock ) ;
2019-03-06 22:45:01 +08:00
if ( node ) {
2022-02-05 15:40:38 +00:00
hlist_del_rcu ( & node - > mac_list ) ;
2019-12-22 11:26:54 +00:00
kfree_rcu ( node , rcu_head ) ;
2019-03-06 22:45:01 +08:00
}
2019-12-22 11:26:54 +00:00
spin_unlock_bh ( & hsr - > list_lock ) ;
2019-03-06 22:45:01 +08:00
}
2013-10-30 21:10:47 +01:00
2022-02-05 15:40:38 +00:00
void hsr_del_nodes ( struct hlist_head * node_db )
2019-07-03 17:21:13 -07:00
{
struct hsr_node * node ;
2022-02-05 15:40:38 +00:00
struct hlist_node * tmp ;
2019-07-03 17:21:13 -07:00
2022-02-05 15:40:38 +00:00
hlist_for_each_entry_safe ( node , tmp , node_db , mac_list )
kfree_rcu ( node , rcu_head ) ;
2019-07-03 17:21:13 -07:00
}
2020-07-22 10:40:21 -04:00
void prp_handle_san_frame ( bool san , enum hsr_port_type port ,
struct hsr_node * node )
{
/* Mark if the SAN node is over LAN_A or LAN_B */
if ( port = = HSR_PT_SLAVE_A ) {
node - > san_a = true ;
return ;
}
if ( port = = HSR_PT_SLAVE_B )
node - > san_b = true ;
}
2019-04-05 13:31:32 -04:00
/* Allocate an hsr_node and add it to node_db. 'addr' is the node's address_A;
2014-07-04 23:41:03 +02:00
* seq_out is used to initialize filtering of outgoing duplicate frames
* originating from the newly added node .
2013-10-30 21:10:47 +01:00
*/
2019-12-22 11:26:54 +00:00
static struct hsr_node * hsr_add_node ( struct hsr_priv * hsr ,
2022-02-05 15:40:38 +00:00
struct hlist_head * node_db ,
2019-12-22 11:26:54 +00:00
unsigned char addr [ ] ,
2020-07-22 10:40:21 -04:00
u16 seq_out , bool san ,
enum hsr_port_type rx_port )
2013-10-30 21:10:47 +01:00
{
2019-12-22 11:26:54 +00:00
struct hsr_node * new_node , * node ;
2013-10-30 21:10:47 +01:00
unsigned long now ;
2014-07-04 23:41:03 +02:00
int i ;
2013-10-30 21:10:47 +01:00
2019-12-22 11:26:54 +00:00
new_node = kzalloc ( sizeof ( * new_node ) , GFP_ATOMIC ) ;
if ( ! new_node )
2013-10-30 21:10:47 +01:00
return NULL ;
2019-12-22 11:26:54 +00:00
ether_addr_copy ( new_node - > macaddress_A , addr ) ;
2013-10-30 21:10:47 +01:00
/* We are only interested in time diffs here, so use current jiffies
* as initialization . ( 0 could trigger an spurious ring error warning ) .
*/
now = jiffies ;
2021-02-24 10:46:49 +01:00
for ( i = 0 ; i < HSR_PT_PORTS ; i + + ) {
2019-12-22 11:26:54 +00:00
new_node - > time_in [ i ] = now ;
2021-02-24 10:46:49 +01:00
new_node - > time_out [ i ] = now ;
}
2014-07-04 23:38:05 +02:00
for ( i = 0 ; i < HSR_PT_PORTS ; i + + )
2019-12-22 11:26:54 +00:00
new_node - > seq_out [ i ] = seq_out ;
2013-10-30 21:10:47 +01:00
2020-07-22 10:40:21 -04:00
if ( san & & hsr - > proto_ops - > handle_san_frame )
hsr - > proto_ops - > handle_san_frame ( san , rx_port , new_node ) ;
2019-12-22 11:26:54 +00:00
spin_lock_bh ( & hsr - > list_lock ) ;
2022-02-05 15:40:38 +00:00
hlist_for_each_entry_rcu ( node , node_db , mac_list ,
2022-02-20 15:32:50 +00:00
lockdep_hsr_is_held ( & hsr - > list_lock ) ) {
2019-12-22 11:26:54 +00:00
if ( ether_addr_equal ( node - > macaddress_A , addr ) )
goto out ;
if ( ether_addr_equal ( node - > macaddress_B , addr ) )
goto out ;
}
2022-02-05 15:40:38 +00:00
hlist_add_tail_rcu ( & new_node - > mac_list , node_db ) ;
2019-12-22 11:26:54 +00:00
spin_unlock_bh ( & hsr - > list_lock ) ;
return new_node ;
out :
spin_unlock_bh ( & hsr - > list_lock ) ;
kfree ( new_node ) ;
2013-10-30 21:10:47 +01:00
return node ;
}
2020-07-22 10:40:21 -04:00
void prp_update_san_info ( struct hsr_node * node , bool is_sup )
{
if ( ! is_sup )
return ;
node - > san_a = false ;
node - > san_b = false ;
}
2014-07-04 23:41:03 +02:00
/* Get the hsr_node from which 'skb' was sent.
*/
2022-02-05 15:40:38 +00:00
struct hsr_node * hsr_get_node ( struct hsr_port * port , struct hlist_head * node_db ,
2020-07-22 10:40:21 -04:00
struct sk_buff * skb , bool is_sup ,
enum hsr_port_type rx_port )
2014-07-04 23:41:03 +02:00
{
2019-12-22 11:26:54 +00:00
struct hsr_priv * hsr = port - > hsr ;
2014-07-04 23:41:03 +02:00
struct hsr_node * node ;
struct ethhdr * ethhdr ;
2020-07-22 10:40:21 -04:00
struct prp_rct * rct ;
bool san = false ;
2014-07-04 23:41:03 +02:00
u16 seq_out ;
if ( ! skb_mac_header_was_set ( skb ) )
return NULL ;
2019-04-05 13:31:29 -04:00
ethhdr = ( struct ethhdr * ) skb_mac_header ( skb ) ;
2014-07-04 23:41:03 +02:00
2022-02-05 15:40:38 +00:00
hlist_for_each_entry_rcu ( node , node_db , mac_list ) {
2020-07-22 10:40:21 -04:00
if ( ether_addr_equal ( node - > macaddress_A , ethhdr - > h_source ) ) {
if ( hsr - > proto_ops - > update_san_info )
hsr - > proto_ops - > update_san_info ( node , is_sup ) ;
2014-07-04 23:41:03 +02:00
return node ;
2020-07-22 10:40:21 -04:00
}
if ( ether_addr_equal ( node - > macaddress_B , ethhdr - > h_source ) ) {
if ( hsr - > proto_ops - > update_san_info )
hsr - > proto_ops - > update_san_info ( node , is_sup ) ;
2014-07-04 23:41:03 +02:00
return node ;
2020-07-22 10:40:21 -04:00
}
2014-07-04 23:41:03 +02:00
}
2020-07-22 10:40:21 -04:00
/* Everyone may create a node entry, connected node to a HSR/PRP
* device .
*/
2019-04-05 13:31:30 -04:00
if ( ethhdr - > h_proto = = htons ( ETH_P_PRP ) | |
ethhdr - > h_proto = = htons ( ETH_P_HSR ) ) {
2014-07-04 23:41:03 +02:00
/* Use the existing sequence_nr from the tag as starting point
* for filtering duplicate frames .
*/
seq_out = hsr_get_skb_sequence_nr ( skb ) - 1 ;
} else {
2020-07-22 10:40:21 -04:00
rct = skb_get_PRP_rct ( skb ) ;
if ( rct & & prp_check_lsdu_size ( skb , rct , is_sup ) ) {
seq_out = prp_get_skb_sequence_nr ( rct ) ;
} else {
if ( rx_port ! = HSR_PT_MASTER )
san = true ;
seq_out = HSR_SEQNR_START ;
}
2014-07-04 23:41:03 +02:00
}
2020-07-22 10:40:21 -04:00
return hsr_add_node ( hsr , node_db , ethhdr - > h_source , seq_out ,
san , rx_port ) ;
2014-07-04 23:41:03 +02:00
}
2019-04-05 13:31:32 -04:00
/* Use the Supervision frame's info about an eventual macaddress_B for merging
* nodes that has previously had their macaddress_B registered as a separate
2014-07-04 23:41:03 +02:00
* node .
*/
2020-07-22 10:40:21 -04:00
void hsr_handle_sup_frame ( struct hsr_frame_info * frame )
2014-07-04 23:41:03 +02:00
{
2020-07-22 10:40:21 -04:00
struct hsr_node * node_curr = frame - > node_src ;
struct hsr_port * port_rcv = frame - > port_rcv ;
2019-12-22 11:26:54 +00:00
struct hsr_priv * hsr = port_rcv - > hsr ;
2014-07-04 23:41:03 +02:00
struct hsr_sup_payload * hsr_sp ;
2021-10-25 20:56:18 +02:00
struct hsr_sup_tlv * hsr_sup_tlv ;
2019-12-22 11:26:54 +00:00
struct hsr_node * node_real ;
2020-07-22 10:40:21 -04:00
struct sk_buff * skb = NULL ;
2022-02-05 15:40:38 +00:00
struct hlist_head * node_db ;
2019-12-22 11:26:54 +00:00
struct ethhdr * ethhdr ;
2014-07-04 23:41:03 +02:00
int i ;
2021-10-25 20:56:18 +02:00
unsigned int pull_size = 0 ;
unsigned int total_pull_size = 0 ;
2022-02-05 15:40:38 +00:00
u32 hash ;
2014-07-04 23:41:03 +02:00
2020-07-22 10:40:21 -04:00
/* Here either frame->skb_hsr or frame->skb_prp should be
* valid as supervision frame always will have protocol
* header info .
*/
if ( frame - > skb_hsr )
skb = frame - > skb_hsr ;
else if ( frame - > skb_prp )
skb = frame - > skb_prp ;
2021-02-09 19:02:11 -06:00
else if ( frame - > skb_std )
skb = frame - > skb_std ;
2020-07-22 10:40:21 -04:00
if ( ! skb )
return ;
2016-04-13 13:52:22 +02:00
/* Leave the ethernet header. */
2021-10-25 20:56:18 +02:00
pull_size = sizeof ( struct ethhdr ) ;
skb_pull ( skb , pull_size ) ;
total_pull_size + = pull_size ;
ethhdr = ( struct ethhdr * ) skb_mac_header ( skb ) ;
2016-04-13 13:52:22 +02:00
/* And leave the HSR tag. */
2021-10-25 20:56:18 +02:00
if ( ethhdr - > h_proto = = htons ( ETH_P_HSR ) ) {
pull_size = sizeof ( struct ethhdr ) ;
skb_pull ( skb , pull_size ) ;
total_pull_size + = pull_size ;
}
2016-04-13 13:52:22 +02:00
/* And leave the HSR sup tag. */
2021-10-25 20:56:18 +02:00
pull_size = sizeof ( struct hsr_tag ) ;
skb_pull ( skb , pull_size ) ;
total_pull_size + = pull_size ;
2016-04-13 13:52:22 +02:00
2021-10-25 20:56:18 +02:00
/* get HSR sup payload */
2019-04-05 13:31:29 -04:00
hsr_sp = ( struct hsr_sup_payload * ) skb - > data ;
2014-07-04 23:41:03 +02:00
2019-04-05 13:31:32 -04:00
/* Merge node_curr (registered on macaddress_B) into node_real */
2022-02-05 15:40:38 +00:00
node_db = port_rcv - > hsr - > node_db ;
hash = hsr_mac_hash ( hsr , hsr_sp - > macaddress_A ) ;
node_real = find_node_by_addr_A ( & node_db [ hash ] , hsr_sp - > macaddress_A ) ;
2014-07-04 23:41:03 +02:00
if ( ! node_real )
/* No frame received from AddrA of this node yet */
2022-02-05 15:40:38 +00:00
node_real = hsr_add_node ( hsr , & node_db [ hash ] ,
hsr_sp - > macaddress_A ,
2020-07-22 10:40:21 -04:00
HSR_SEQNR_START - 1 , true ,
port_rcv - > type ) ;
2014-07-04 23:41:03 +02:00
if ( ! node_real )
goto done ; /* No mem */
if ( node_real = = node_curr )
/* Node has already been merged */
goto done ;
2021-10-25 20:56:18 +02:00
/* Leave the first HSR sup payload. */
pull_size = sizeof ( struct hsr_sup_payload ) ;
skb_pull ( skb , pull_size ) ;
total_pull_size + = pull_size ;
/* Get second supervision tlv */
hsr_sup_tlv = ( struct hsr_sup_tlv * ) skb - > data ;
/* And check if it is a redbox mac TLV */
if ( hsr_sup_tlv - > HSR_TLV_type = = PRP_TLV_REDBOX_MAC ) {
/* We could stop here after pushing hsr_sup_payload,
* or proceed and allow macaddress_B and for redboxes .
*/
/* Sanity check length */
if ( hsr_sup_tlv - > HSR_TLV_length ! = 6 )
goto done ;
/* Leave the second HSR sup tlv. */
pull_size = sizeof ( struct hsr_sup_tlv ) ;
skb_pull ( skb , pull_size ) ;
total_pull_size + = pull_size ;
/* Get redbox mac address. */
hsr_sp = ( struct hsr_sup_payload * ) skb - > data ;
/* Check if redbox mac and node mac are equal. */
2022-02-05 15:40:38 +00:00
if ( ! ether_addr_equal ( node_real - > macaddress_A ,
hsr_sp - > macaddress_A ) ) {
2021-10-25 20:56:18 +02:00
/* This is a redbox supervision frame for a VDAN! */
goto done ;
}
}
2019-04-05 13:31:32 -04:00
ether_addr_copy ( node_real - > macaddress_B , ethhdr - > h_source ) ;
2014-07-04 23:41:03 +02:00
for ( i = 0 ; i < HSR_PT_PORTS ; i + + ) {
if ( ! node_curr - > time_in_stale [ i ] & &
time_after ( node_curr - > time_in [ i ] , node_real - > time_in [ i ] ) ) {
node_real - > time_in [ i ] = node_curr - > time_in [ i ] ;
2019-04-05 13:31:23 -04:00
node_real - > time_in_stale [ i ] =
node_curr - > time_in_stale [ i ] ;
2014-07-04 23:41:03 +02:00
}
if ( seq_nr_after ( node_curr - > seq_out [ i ] , node_real - > seq_out [ i ] ) )
node_real - > seq_out [ i ] = node_curr - > seq_out [ i ] ;
}
2019-04-05 13:31:32 -04:00
node_real - > addr_B_port = port_rcv - > type ;
2014-07-04 23:41:03 +02:00
2019-12-22 11:26:54 +00:00
spin_lock_bh ( & hsr - > list_lock ) ;
2022-02-05 15:40:38 +00:00
hlist_del_rcu ( & node_curr - > mac_list ) ;
2019-12-22 11:26:54 +00:00
spin_unlock_bh ( & hsr - > list_lock ) ;
2014-07-04 23:41:03 +02:00
kfree_rcu ( node_curr , rcu_head ) ;
done :
2021-10-25 20:56:18 +02:00
/* Push back here */
skb_push ( skb , total_pull_size ) ;
2014-07-04 23:41:03 +02:00
}
2013-10-30 21:10:47 +01:00
/* 'skb' is a frame meant for this host, that is to be passed to upper layers.
*
2014-07-04 23:41:03 +02:00
* If the frame was sent by a node ' s B interface , replace the source
2019-04-05 13:31:32 -04:00
* address with that node ' s " official " address ( macaddress_A ) so that upper
2013-10-30 21:10:47 +01:00
* layers recognize where it came from .
*/
2014-07-04 23:41:03 +02:00
void hsr_addr_subst_source ( struct hsr_node * node , struct sk_buff * skb )
2013-10-30 21:10:47 +01:00
{
if ( ! skb_mac_header_was_set ( skb ) ) {
WARN_ONCE ( 1 , " %s: Mac header not set \n " , __func__ ) ;
return ;
}
2019-04-05 13:31:32 -04:00
memcpy ( & eth_hdr ( skb ) - > h_source , node - > macaddress_A , ETH_ALEN ) ;
2013-10-30 21:10:47 +01:00
}
/* 'skb' is a frame meant for another host.
2014-07-04 23:41:03 +02:00
* ' port ' is the outgoing interface
2013-10-30 21:10:47 +01:00
*
* Substitute the target ( dest ) MAC address if necessary , so the it matches the
* recipient interface MAC address , regardless of whether that is the
* recipient ' s A or B interface .
* This is needed to keep the packets flowing through switches that learn on
* which " side " the different interfaces are .
*/
2014-07-04 23:41:03 +02:00
void hsr_addr_subst_dest ( struct hsr_node * node_src , struct sk_buff * skb ,
2014-07-04 23:38:05 +02:00
struct hsr_port * port )
2013-10-30 21:10:47 +01:00
{
2014-07-04 23:41:03 +02:00
struct hsr_node * node_dst ;
2022-02-05 15:40:38 +00:00
u32 hash ;
2013-10-30 21:10:47 +01:00
2014-07-04 23:41:03 +02:00
if ( ! skb_mac_header_was_set ( skb ) ) {
WARN_ONCE ( 1 , " %s: Mac header not set \n " , __func__ ) ;
return ;
}
2013-10-30 21:10:47 +01:00
2014-07-04 23:41:03 +02:00
if ( ! is_unicast_ether_addr ( eth_hdr ( skb ) - > h_dest ) )
return ;
2013-10-30 21:10:47 +01:00
2022-02-05 15:40:38 +00:00
hash = hsr_mac_hash ( port - > hsr , eth_hdr ( skb ) - > h_dest ) ;
node_dst = find_node_by_addr_A ( & port - > hsr - > node_db [ hash ] ,
2019-04-05 13:31:32 -04:00
eth_hdr ( skb ) - > h_dest ) ;
2014-07-04 23:41:03 +02:00
if ( ! node_dst ) {
2020-02-28 18:01:46 +00:00
if ( net_ratelimit ( ) )
netdev_err ( skb - > dev , " %s: Unknown node \n " , __func__ ) ;
2014-07-04 23:41:03 +02:00
return ;
}
2019-04-05 13:31:32 -04:00
if ( port - > type ! = node_dst - > addr_B_port )
2014-07-04 23:41:03 +02:00
return ;
2013-10-30 21:10:47 +01:00
2020-07-17 10:55:10 -04:00
if ( is_valid_ether_addr ( node_dst - > macaddress_B ) )
ether_addr_copy ( eth_hdr ( skb ) - > h_dest , node_dst - > macaddress_B ) ;
2013-10-30 21:10:47 +01:00
}
2014-07-04 23:41:03 +02:00
void hsr_register_frame_in ( struct hsr_node * node , struct hsr_port * port ,
u16 sequence_nr )
2013-10-30 21:10:47 +01:00
{
2014-07-04 23:41:03 +02:00
/* Don't register incoming frames without a valid sequence number. This
* ensures entries of restarted nodes gets pruned so that they can
* re - register and resume communications .
*/
2021-06-15 12:50:37 -05:00
if ( ! ( port - > dev - > features & NETIF_F_HW_HSR_TAG_RM ) & &
seq_nr_before ( sequence_nr , node - > seq_out [ port - > type ] ) )
2014-07-04 23:41:03 +02:00
return ;
2014-07-04 23:38:05 +02:00
node - > time_in [ port - > type ] = jiffies ;
node - > time_in_stale [ port - > type ] = false ;
2013-10-30 21:10:47 +01:00
}
/* 'skb' is a HSR Ethernet frame (with a HSR tag inserted), with a valid
* ethhdr - > h_source address and skb - > mac_header set .
*
* Return :
* 1 if frame can be shown to have been sent recently on this interface ,
* 0 otherwise , or
* negative error code on error
*/
2014-07-04 23:41:03 +02:00
int hsr_register_frame_out ( struct hsr_port * port , struct hsr_node * node ,
u16 sequence_nr )
2013-10-30 21:10:47 +01:00
{
2021-02-24 10:46:49 +01:00
if ( seq_nr_before_or_eq ( sequence_nr , node - > seq_out [ port - > type ] ) & &
time_is_after_jiffies ( node - > time_out [ port - > type ] +
msecs_to_jiffies ( HSR_ENTRY_FORGET_TIME ) ) )
2013-10-30 21:10:47 +01:00
return 1 ;
2021-02-24 10:46:49 +01:00
node - > time_out [ port - > type ] = jiffies ;
2014-07-04 23:38:05 +02:00
node - > seq_out [ port - > type ] = sequence_nr ;
2013-10-30 21:10:47 +01:00
return 0 ;
}
2014-07-04 23:38:05 +02:00
static struct hsr_port * get_late_port ( struct hsr_priv * hsr ,
struct hsr_node * node )
2013-10-30 21:10:47 +01:00
{
2014-07-04 23:38:05 +02:00
if ( node - > time_in_stale [ HSR_PT_SLAVE_A ] )
return hsr_port_get_hsr ( hsr , HSR_PT_SLAVE_A ) ;
if ( node - > time_in_stale [ HSR_PT_SLAVE_B ] )
return hsr_port_get_hsr ( hsr , HSR_PT_SLAVE_B ) ;
if ( time_after ( node - > time_in [ HSR_PT_SLAVE_B ] ,
node - > time_in [ HSR_PT_SLAVE_A ] +
msecs_to_jiffies ( MAX_SLAVE_DIFF ) ) )
return hsr_port_get_hsr ( hsr , HSR_PT_SLAVE_A ) ;
if ( time_after ( node - > time_in [ HSR_PT_SLAVE_A ] ,
node - > time_in [ HSR_PT_SLAVE_B ] +
msecs_to_jiffies ( MAX_SLAVE_DIFF ) ) )
return hsr_port_get_hsr ( hsr , HSR_PT_SLAVE_B ) ;
2013-10-30 21:10:47 +01:00
2014-07-04 23:38:05 +02:00
return NULL ;
2013-10-30 21:10:47 +01:00
}
/* Remove stale sequence_nr records. Called by timer every
* HSR_LIFE_CHECK_INTERVAL ( two seconds or so ) .
*/
2017-10-24 01:46:16 -07:00
void hsr_prune_nodes ( struct timer_list * t )
2013-10-30 21:10:47 +01:00
{
2017-10-24 01:46:16 -07:00
struct hsr_priv * hsr = from_timer ( hsr , t , prune_timer ) ;
2022-02-05 15:40:38 +00:00
struct hlist_node * tmp ;
2014-07-04 23:34:38 +02:00
struct hsr_node * node ;
2014-07-04 23:38:05 +02:00
struct hsr_port * port ;
2013-10-30 21:10:47 +01:00
unsigned long timestamp ;
unsigned long time_a , time_b ;
2022-02-05 15:40:38 +00:00
int i ;
2013-10-30 21:10:47 +01:00
2019-12-22 11:26:54 +00:00
spin_lock_bh ( & hsr - > list_lock ) ;
2013-10-30 21:10:47 +01:00
2022-02-05 15:40:38 +00:00
for ( i = 0 ; i < hsr - > hash_buckets ; i + + ) {
hlist_for_each_entry_safe ( node , tmp , & hsr - > node_db [ i ] ,
mac_list ) {
/* Don't prune own node.
* Neither time_in [ HSR_PT_SLAVE_A ]
* nor time_in [ HSR_PT_SLAVE_B ] , will ever be updated
* for the master port . Thus the master node will be
* repeatedly pruned leading to packet loss .
*/
if ( hsr_addr_is_self ( hsr , node - > macaddress_A ) )
continue ;
/* Shorthand */
time_a = node - > time_in [ HSR_PT_SLAVE_A ] ;
time_b = node - > time_in [ HSR_PT_SLAVE_B ] ;
/* Check for timestamps old enough to
* risk wrap - around
*/
if ( time_after ( jiffies , time_a + MAX_JIFFY_OFFSET / 2 ) )
node - > time_in_stale [ HSR_PT_SLAVE_A ] = true ;
if ( time_after ( jiffies , time_b + MAX_JIFFY_OFFSET / 2 ) )
node - > time_in_stale [ HSR_PT_SLAVE_B ] = true ;
/* Get age of newest frame from node.
* At least one time_in is OK here ; nodes get pruned
* long before both time_ins can get stale
*/
timestamp = time_a ;
if ( node - > time_in_stale [ HSR_PT_SLAVE_A ] | |
( ! node - > time_in_stale [ HSR_PT_SLAVE_B ] & &
time_after ( time_b , time_a ) ) )
timestamp = time_b ;
/* Warn of ring error only as long as we get
* frames at all
*/
if ( time_is_after_jiffies ( timestamp +
msecs_to_jiffies ( 1.5 * MAX_SLAVE_DIFF ) ) ) {
rcu_read_lock ( ) ;
port = get_late_port ( hsr , node ) ;
if ( port )
hsr_nl_ringerror ( hsr ,
node - > macaddress_A ,
port ) ;
rcu_read_unlock ( ) ;
}
/* Prune old entries */
if ( time_is_before_jiffies ( timestamp +
msecs_to_jiffies ( HSR_NODE_FORGET_TIME ) ) ) {
hsr_nl_nodedown ( hsr , node - > macaddress_A ) ;
hlist_del_rcu ( & node - > mac_list ) ;
/* Note that we need to free this
* entry later :
*/
kfree_rcu ( node , rcu_head ) ;
}
2013-10-30 21:10:47 +01:00
}
}
2019-12-22 11:26:54 +00:00
spin_unlock_bh ( & hsr - > list_lock ) ;
2019-04-05 13:31:36 -04:00
/* Restart timer */
mod_timer ( & hsr - > prune_timer ,
jiffies + msecs_to_jiffies ( PRUNE_PERIOD ) ) ;
2013-10-30 21:10:47 +01:00
}
2014-07-04 23:34:38 +02:00
void * hsr_get_next_node ( struct hsr_priv * hsr , void * _pos ,
2013-10-30 21:10:47 +01:00
unsigned char addr [ ETH_ALEN ] )
{
2014-07-04 23:34:38 +02:00
struct hsr_node * node ;
2022-02-05 15:40:38 +00:00
u32 hash ;
hash = hsr_mac_hash ( hsr , addr ) ;
2013-10-30 21:10:47 +01:00
if ( ! _pos ) {
2022-02-19 15:29:59 +00:00
node = hsr_node_get_first ( & hsr - > node_db [ hash ] ,
2022-02-20 15:32:50 +00:00
& hsr - > list_lock ) ;
2013-10-30 21:10:47 +01:00
if ( node )
2019-04-05 13:31:32 -04:00
ether_addr_copy ( addr , node - > macaddress_A ) ;
2013-10-30 21:10:47 +01:00
return node ;
}
node = _pos ;
2022-02-05 15:40:38 +00:00
hlist_for_each_entry_continue_rcu ( node , mac_list ) {
2019-04-05 13:31:32 -04:00
ether_addr_copy ( addr , node - > macaddress_A ) ;
2013-10-30 21:10:47 +01:00
return node ;
}
return NULL ;
}
2014-07-04 23:34:38 +02:00
int hsr_get_node_data ( struct hsr_priv * hsr ,
2013-10-30 21:10:47 +01:00
const unsigned char * addr ,
unsigned char addr_b [ ETH_ALEN ] ,
unsigned int * addr_b_ifindex ,
int * if1_age ,
u16 * if1_seq ,
int * if2_age ,
u16 * if2_seq )
{
2014-07-04 23:34:38 +02:00
struct hsr_node * node ;
2014-07-04 23:38:05 +02:00
struct hsr_port * port ;
2013-10-30 21:10:47 +01:00
unsigned long tdiff ;
2022-02-05 15:40:38 +00:00
u32 hash ;
hash = hsr_mac_hash ( hsr , addr ) ;
2013-10-30 21:10:47 +01:00
2022-02-05 15:40:38 +00:00
node = find_node_by_addr_A ( & hsr - > node_db [ hash ] , addr ) ;
2020-03-13 06:50:14 +00:00
if ( ! node )
return - ENOENT ;
2013-10-30 21:10:47 +01:00
2019-04-05 13:31:32 -04:00
ether_addr_copy ( addr_b , node - > macaddress_B ) ;
2013-10-30 21:10:47 +01:00
2014-07-04 23:38:05 +02:00
tdiff = jiffies - node - > time_in [ HSR_PT_SLAVE_A ] ;
if ( node - > time_in_stale [ HSR_PT_SLAVE_A ] )
2013-10-30 21:10:47 +01:00
* if1_age = INT_MAX ;
# if HZ <= MSEC_PER_SEC
else if ( tdiff > msecs_to_jiffies ( INT_MAX ) )
* if1_age = INT_MAX ;
# endif
else
* if1_age = jiffies_to_msecs ( tdiff ) ;
2014-07-04 23:38:05 +02:00
tdiff = jiffies - node - > time_in [ HSR_PT_SLAVE_B ] ;
if ( node - > time_in_stale [ HSR_PT_SLAVE_B ] )
2013-10-30 21:10:47 +01:00
* if2_age = INT_MAX ;
# if HZ <= MSEC_PER_SEC
else if ( tdiff > msecs_to_jiffies ( INT_MAX ) )
* if2_age = INT_MAX ;
# endif
else
* if2_age = jiffies_to_msecs ( tdiff ) ;
/* Present sequence numbers as if they were incoming on interface */
2014-07-04 23:38:05 +02:00
* if1_seq = node - > seq_out [ HSR_PT_SLAVE_B ] ;
* if2_seq = node - > seq_out [ HSR_PT_SLAVE_A ] ;
2013-10-30 21:10:47 +01:00
2019-04-05 13:31:32 -04:00
if ( node - > addr_B_port ! = HSR_PT_NONE ) {
port = hsr_port_get_hsr ( hsr , node - > addr_B_port ) ;
2014-07-04 23:38:05 +02:00
* addr_b_ifindex = port - > dev - > ifindex ;
} else {
2013-10-30 21:10:47 +01:00
* addr_b_ifindex = - 1 ;
2014-07-04 23:38:05 +02:00
}
2013-10-30 21:10:47 +01:00
return 0 ;
}