2019-05-27 09:55:01 +03:00
/* SPDX-License-Identifier: GPL-2.0-or-later */
2005-04-17 02:20:36 +04:00
/*
* INET An implementation of the TCP / IP protocol suite for the LINUX
* operating system . INET is implemented using the BSD Socket
* interface as the means of communication with the user level .
*
* Definitions for the Forwarding Information Base .
*
* Authors : A . N . Kuznetsov , < kuznet @ ms2 . inr . ac . ru >
*/
# ifndef _NET_IP_FIB_H
# define _NET_IP_FIB_H
# include <net/flow.h>
# include <linux/seq_file.h>
2012-07-17 15:19:00 +04:00
# include <linux/rcupdate.h>
2017-08-03 14:28:11 +03:00
# include <net/fib_notifier.h>
2006-08-04 14:39:22 +04:00
# include <net/fib_rules.h>
2012-06-11 11:01:52 +04:00
# include <net/inetpeer.h>
2012-07-31 09:45:30 +04:00
# include <linux/percpu.h>
2016-09-26 13:52:29 +03:00
# include <linux/notifier.h>
2017-07-04 09:35:02 +03:00
# include <linux/refcount.h>
2005-04-17 02:20:36 +04:00
2006-08-18 05:14:52 +04:00
struct fib_config {
u8 fc_dst_len ;
u8 fc_tos ;
u8 fc_protocol ;
u8 fc_scope ;
u8 fc_type ;
2019-04-06 02:30:28 +03:00
u8 fc_gw_family ;
/* 2 bytes unused */
2006-08-18 05:14:52 +04:00
u32 fc_table ;
2006-09-27 09:15:46 +04:00
__be32 fc_dst ;
2019-04-06 02:30:30 +03:00
union {
__be32 fc_gw4 ;
struct in6_addr fc_gw6 ;
} ;
2006-08-18 05:14:52 +04:00
int fc_oif ;
u32 fc_flags ;
u32 fc_priority ;
2006-09-27 09:15:46 +04:00
__be32 fc_prefsrc ;
2019-06-09 00:53:32 +03:00
u32 fc_nh_id ;
2006-08-18 05:14:52 +04:00
struct nlattr * fc_mx ;
struct rtnexthop * fc_mp ;
int fc_mx_len ;
int fc_mp_len ;
u32 fc_flow ;
u32 fc_nlflags ;
struct nl_info fc_nlinfo ;
2015-07-21 11:43:47 +03:00
struct nlattr * fc_encap ;
u16 fc_encap_type ;
} ;
2005-04-17 02:20:36 +04:00
struct fib_info ;
2012-07-17 23:20:47 +04:00
struct rtable ;
2005-04-17 02:20:36 +04:00
2012-07-17 15:19:00 +04:00
struct fib_nh_exception {
struct fib_nh_exception __rcu * fnhe_next ;
2013-05-28 00:46:33 +04:00
int fnhe_genid ;
2012-07-17 15:19:00 +04:00
__be32 fnhe_daddr ;
u32 fnhe_pmtu ;
2018-03-14 12:21:14 +03:00
bool fnhe_mtu_locked ;
2012-07-18 14:15:35 +04:00
__be32 fnhe_gw ;
2012-07-17 15:19:00 +04:00
unsigned long fnhe_expires ;
2013-06-27 11:27:05 +04:00
struct rtable __rcu * fnhe_rth_input ;
struct rtable __rcu * fnhe_rth_output ;
2012-07-17 15:19:00 +04:00
unsigned long fnhe_stamp ;
2016-02-18 16:21:19 +03:00
struct rcu_head rcu ;
2012-07-17 15:19:00 +04:00
} ;
struct fnhe_hash_bucket {
struct fib_nh_exception __rcu * chain ;
} ;
2014-09-04 19:21:31 +04:00
# define FNHE_HASH_SHIFT 11
# define FNHE_HASH_SIZE (1 << FNHE_HASH_SHIFT)
2012-07-17 15:19:00 +04:00
# define FNHE_RECLAIM_DEPTH 5
2019-03-28 06:53:57 +03:00
struct fib_nh_common {
struct net_device * nhc_dev ;
int nhc_oif ;
unsigned char nhc_scope ;
u8 nhc_family ;
2019-04-06 02:30:26 +03:00
u8 nhc_gw_family ;
2019-04-23 18:48:09 +03:00
unsigned char nhc_flags ;
struct lwtunnel_state * nhc_lwtstate ;
2019-04-06 02:30:26 +03:00
2019-03-28 06:53:57 +03:00
union {
__be32 ipv4 ;
struct in6_addr ipv6 ;
} nhc_gw ;
int nhc_weight ;
atomic_t nhc_upper_bound ;
2019-04-30 17:45:48 +03:00
/* v4 specific, but allows fib6_nh with v4 routes */
struct rtable __rcu * __percpu * nhc_pcpu_rth_output ;
struct rtable __rcu * nhc_rth_input ;
2019-04-30 17:45:50 +03:00
struct fnhe_hash_bucket __rcu * nhc_exceptions ;
2019-03-28 06:53:57 +03:00
} ;
2005-04-17 02:20:36 +04:00
struct fib_nh {
2019-03-28 06:53:57 +03:00
struct fib_nh_common nh_common ;
2005-04-17 02:20:36 +04:00
struct hlist_node nh_hash ;
struct fib_info * nh_parent ;
2011-01-14 15:36:42 +03:00
# ifdef CONFIG_IP_ROUTE_CLASSID
2005-04-17 02:20:36 +04:00
__u32 nh_tclassid ;
# endif
2011-03-08 07:54:48 +03:00
__be32 nh_saddr ;
2011-03-25 03:42:21 +03:00
int nh_saddr_genid ;
2019-03-28 06:53:57 +03:00
# define fib_nh_family nh_common.nhc_family
# define fib_nh_dev nh_common.nhc_dev
# define fib_nh_oif nh_common.nhc_oif
# define fib_nh_flags nh_common.nhc_flags
# define fib_nh_lws nh_common.nhc_lwtstate
# define fib_nh_scope nh_common.nhc_scope
2019-04-06 02:30:26 +03:00
# define fib_nh_gw_family nh_common.nhc_gw_family
2019-03-28 06:53:57 +03:00
# define fib_nh_gw4 nh_common.nhc_gw.ipv4
# define fib_nh_gw6 nh_common.nhc_gw.ipv6
# define fib_nh_weight nh_common.nhc_weight
# define fib_nh_upper_bound nh_common.nhc_upper_bound
2005-04-17 02:20:36 +04:00
} ;
/*
* This structure contains data shared by many of routes .
*/
2019-06-04 06:19:51 +03:00
struct nexthop ;
2005-04-17 02:20:36 +04:00
struct fib_info {
struct hlist_node fib_hash ;
struct hlist_node fib_lhash ;
2019-06-04 06:19:51 +03:00
struct list_head nh_list ;
2008-02-01 05:49:32 +03:00
struct net * fib_net ;
2005-04-17 02:20:36 +04:00
int fib_treeref ;
2017-07-04 09:35:02 +03:00
refcount_t fib_clntref ;
2012-04-15 09:58:06 +04:00
unsigned int fib_flags ;
2011-03-25 04:06:47 +03:00
unsigned char fib_dead ;
unsigned char fib_protocol ;
unsigned char fib_scope ;
2012-10-04 05:25:26 +04:00
unsigned char fib_type ;
2006-09-27 09:14:15 +04:00
__be32 fib_prefsrc ;
2016-09-05 01:20:20 +03:00
u32 fib_tb_id ;
2005-04-17 02:20:36 +04:00
u32 fib_priority ;
2017-05-26 00:27:35 +03:00
struct dst_metrics * fib_metrics ;
# define fib_mtu fib_metrics->metrics[RTAX_MTU-1]
# define fib_window fib_metrics->metrics[RTAX_WINDOW-1]
# define fib_rtt fib_metrics->metrics[RTAX_RTT-1]
# define fib_advmss fib_metrics->metrics[RTAX_ADVMSS-1]
2005-04-17 02:20:36 +04:00
int fib_nhs ;
2019-04-06 02:30:39 +03:00
bool fib_nh_is_v6 ;
2019-05-22 22:04:42 +03:00
bool nh_updated ;
2019-06-04 06:19:51 +03:00
struct nexthop * nh ;
2010-10-05 14:41:36 +04:00
struct rcu_head rcu ;
2005-04-17 02:20:36 +04:00
struct fib_nh fib_nh [ 0 ] ;
} ;
# ifdef CONFIG_IP_MULTIPLE_TABLES
struct fib_rule ;
# endif
2011-02-01 03:10:03 +03:00
struct fib_table ;
2005-04-17 02:20:36 +04:00
struct fib_result {
2019-04-03 00:11:55 +03:00
__be32 prefix ;
unsigned char prefixlen ;
unsigned char nh_sel ;
unsigned char type ;
unsigned char scope ;
u32 tclassid ;
struct fib_nh_common * nhc ;
struct fib_info * fi ;
struct fib_table * table ;
struct hlist_head * fa_head ;
2005-04-17 02:20:36 +04:00
} ;
2005-06-21 00:36:39 +04:00
struct fib_result_nl {
2006-09-27 09:19:36 +04:00
__be32 fl_addr ; /* To be looked up*/
2006-11-10 02:21:41 +03:00
u32 fl_mark ;
2005-06-21 00:36:39 +04:00
unsigned char fl_tos ;
unsigned char fl_scope ;
unsigned char tb_id_in ;
unsigned char tb_id ; /* Results */
unsigned char prefixlen ;
unsigned char nh_sel ;
unsigned char type ;
unsigned char scope ;
2018-02-28 02:48:21 +03:00
int err ;
2005-06-21 00:36:39 +04:00
} ;
2005-04-17 02:20:36 +04:00
2013-03-13 04:24:15 +04:00
# ifdef CONFIG_IP_MULTIPLE_TABLES
2008-01-10 14:23:38 +03:00
# define FIB_TABLE_HASHSZ 256
2013-03-13 04:24:15 +04:00
# else
# define FIB_TABLE_HASHSZ 2
# endif
2005-04-17 02:20:36 +04:00
2019-06-04 06:19:50 +03:00
__be32 fib_info_update_nhc_saddr ( struct net * net , struct fib_nh_common * nhc ,
unsigned char scope ) ;
2019-04-03 00:11:55 +03:00
__be32 fib_result_prefsrc ( struct net * net , struct fib_result * res ) ;
2011-03-25 03:42:21 +03:00
2019-04-03 00:11:55 +03:00
# define FIB_RES_NHC(res) ((res).nhc)
# define FIB_RES_DEV(res) (FIB_RES_NHC(res)->nhc_dev)
# define FIB_RES_OIF(res) (FIB_RES_NHC(res)->nhc_oif)
2011-03-08 07:54:48 +03:00
2016-09-26 13:52:29 +03:00
struct fib_entry_notifier_info {
struct fib_notifier_info info ; /* must be first */
u32 dst ;
int dst_len ;
struct fib_info * fi ;
u8 tos ;
u8 type ;
u32 tb_id ;
} ;
2017-02-08 13:16:39 +03:00
struct fib_nh_notifier_info {
struct fib_notifier_info info ; /* must be first */
struct fib_nh * fib_nh ;
} ;
2017-08-03 14:28:11 +03:00
int call_fib4_notifier ( struct notifier_block * nb , struct net * net ,
enum fib_event_type event_type ,
2016-09-26 13:52:29 +03:00
struct fib_notifier_info * info ) ;
2017-08-03 14:28:11 +03:00
int call_fib4_notifiers ( struct net * net , enum fib_event_type event_type ,
struct fib_notifier_info * info ) ;
int __net_init fib4_notifier_init ( struct net * net ) ;
void __net_exit fib4_notifier_exit ( struct net * net ) ;
2016-09-26 13:52:29 +03:00
2019-05-22 22:04:42 +03:00
void fib_info_notify_update ( struct net * net , struct nl_info * info ) ;
2017-03-10 10:56:19 +03:00
void fib_notify ( struct net * net , struct notifier_block * nb ) ;
2017-03-10 10:56:18 +03:00
2005-04-17 02:20:36 +04:00
struct fib_table {
2012-06-11 11:01:52 +04:00
struct hlist_node tb_hlist ;
u32 tb_id ;
int tb_num_default ;
2015-03-05 02:02:44 +03:00
struct rcu_head rcu ;
2015-03-07 00:47:00 +03:00
unsigned long * tb_data ;
unsigned long __data [ 0 ] ;
2005-04-17 02:20:36 +04:00
} ;
2018-10-16 04:56:42 +03:00
struct fib_dump_filter {
u32 table_id ;
/* filter_set is an optimization that an entry is set */
bool filter_set ;
2018-10-24 22:59:01 +03:00
bool dump_all_families ;
fib_frontend, ip6_fib: Select routes or exceptions dump from RTM_F_CLONED
The following patches add back the ability to dump IPv4 and IPv6 exception
routes, and we need to allow selection of regular routes or exceptions.
Use RTM_F_CLONED as filter to decide whether to dump routes or exceptions:
iproute2 passes it in dump requests (except for IPv6 cache flush requests,
this will be fixed in iproute2) and this used to work as long as
exceptions were stored directly in the FIB, for both IPv4 and IPv6.
Caveat: if strict checking is not requested (that is, if the dump request
doesn't go through ip_valid_fib_dump_req()), we can't filter on protocol,
tables or route types.
In this case, filtering on RTM_F_CLONED would be inconsistent: we would
fix 'ip route list cache' by returning exception routes and at the same
time introduce another bug in case another selector is present, e.g. on
'ip route list cache table main' we would return all exception routes,
without filtering on tables.
Keep this consistent by applying no filters at all, and dumping both
routes and exceptions, if strict checking is not requested. iproute2
currently filters results anyway, and no unwanted results will be
presented to the user. The kernel will just dump more data than needed.
v7: No changes
v6: Rebase onto net-next, no changes
v5: New patch: add dump_routes and dump_exceptions flags in filter and
simply clear the unwanted one if strict checking is enabled, don't
ignore NLM_F_MATCH and don't set filter_set if NLM_F_MATCH is set.
Skip filtering altogether if no strict checking is requested:
selecting routes or exceptions only would be inconsistent with the
fact we can't filter on tables.
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
Reviewed-by: David Ahern <dsahern@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-06-21 18:45:20 +03:00
bool dump_routes ;
bool dump_exceptions ;
2018-10-16 04:56:42 +03:00
unsigned char protocol ;
unsigned char rt_type ;
unsigned int flags ;
struct net_device * dev ;
} ;
2013-09-21 21:22:42 +04:00
int fib_table_lookup ( struct fib_table * tb , const struct flowi4 * flp ,
struct fib_result * res , int fib_flags ) ;
2017-05-21 19:12:02 +03:00
int fib_table_insert ( struct net * , struct fib_table * , struct fib_config * ,
struct netlink_ext_ack * extack ) ;
2017-05-28 01:19:26 +03:00
int fib_table_delete ( struct net * , struct fib_table * , struct fib_config * ,
struct netlink_ext_ack * extack ) ;
2013-09-21 21:22:42 +04:00
int fib_table_dump ( struct fib_table * table , struct sk_buff * skb ,
2018-10-16 04:56:43 +03:00
struct netlink_callback * cb , struct fib_dump_filter * filter ) ;
2019-01-09 12:57:39 +03:00
int fib_table_flush ( struct net * net , struct fib_table * table , bool flush_all ) ;
2015-03-07 00:47:00 +03:00
struct fib_table * fib_trie_unmerge ( struct fib_table * main_tb ) ;
2016-11-15 13:46:06 +03:00
void fib_table_flush_external ( struct fib_table * table ) ;
2013-09-21 21:22:42 +04:00
void fib_free_table ( struct fib_table * tb ) ;
2010-10-28 06:00:43 +04:00
2005-04-17 02:20:36 +04:00
# ifndef CONFIG_IP_MULTIPLE_TABLES
2014-12-02 21:58:21 +03:00
# define TABLE_LOCAL_INDEX (RT_TABLE_LOCAL & (FIB_TABLE_HASHSZ - 1))
# define TABLE_MAIN_INDEX (RT_TABLE_MAIN & (FIB_TABLE_HASHSZ - 1))
2005-04-17 02:20:36 +04:00
2008-01-10 14:24:11 +03:00
static inline struct fib_table * fib_get_table ( struct net * net , u32 id )
2005-04-17 02:20:36 +04:00
{
2015-03-05 02:02:44 +03:00
struct hlist_node * tb_hlist ;
2008-01-10 14:23:38 +03:00
struct hlist_head * ptr ;
ptr = id = = RT_TABLE_LOCAL ?
2008-01-10 14:28:24 +03:00
& net - > ipv4 . fib_table_hash [ TABLE_LOCAL_INDEX ] :
& net - > ipv4 . fib_table_hash [ TABLE_MAIN_INDEX ] ;
2015-03-05 02:02:44 +03:00
tb_hlist = rcu_dereference_rtnl ( hlist_first_rcu ( ptr ) ) ;
return hlist_entry ( tb_hlist , struct fib_table , tb_hlist ) ;
2005-04-17 02:20:36 +04:00
}
2008-01-10 14:24:11 +03:00
static inline struct fib_table * fib_new_table ( struct net * net , u32 id )
2005-04-17 02:20:36 +04:00
{
2008-01-10 14:24:11 +03:00
return fib_get_table ( net , id ) ;
2005-04-17 02:20:36 +04:00
}
2011-03-12 03:54:08 +03:00
static inline int fib_lookup ( struct net * net , const struct flowi4 * flp ,
2015-06-23 20:45:37 +03:00
struct fib_result * res , unsigned int flags )
2005-04-17 02:20:36 +04:00
{
2015-03-05 02:02:44 +03:00
struct fib_table * tb ;
2015-03-07 00:47:00 +03:00
int err = - ENETUNREACH ;
2014-12-31 21:56:24 +03:00
rcu_read_lock ( ) ;
2015-03-07 00:47:00 +03:00
tb = fib_get_table ( net , RT_TABLE_MAIN ) ;
2015-09-17 17:01:32 +03:00
if ( tb )
err = fib_table_lookup ( tb , flp , res , flags | FIB_LOOKUP_NOREF ) ;
if ( err = = - EAGAIN )
err = - ENETUNREACH ;
2008-01-10 14:23:38 +03:00
2014-12-31 21:56:24 +03:00
rcu_read_unlock ( ) ;
2008-01-10 14:23:38 +03:00
2014-12-31 21:56:24 +03:00
return err ;
2005-04-17 02:20:36 +04:00
}
ipv4: fib_rules: Check if rule is a default rule
Currently, when non-default (custom) FIB rules are used, devices capable
of layer 3 offloading flush their tables and let the kernel do the
forwarding instead.
When these devices' drivers are loaded they register to the FIB
notification chain, which lets them know about the existence of any
custom FIB rules. This is done by sending a RULE_ADD notification based
on the value of 'net->ipv4.fib_has_custom_rules'.
This approach is problematic when VRF offload is taken into account, as
upon the creation of the first VRF netdev, a l3mdev rule is programmed
to direct skbs to the VRF's table.
Instead of merely reading the above value and sending a single RULE_ADD
notification, we should iterate over all the FIB rules and send a
detailed notification for each, thereby allowing offloading drivers to
sanitize the rules they don't support and potentially flush their
tables.
While l3mdev rules are uniquely marked, the default rules are not.
Therefore, when they are being notified they might invoke offloading
drivers to unnecessarily flush their tables.
Solve this by adding an helper to check if a FIB rule is a default rule.
Namely, its selector should match all packets and its action should
point to the local, main or default tables.
As noted by David Ahern, uniquely marking the default rules is
insufficient. When using VRFs, it's common to avoid false hits by moving
the rule for the local table to just before the main table:
Default configuration:
$ ip rule show
0: from all lookup local
32766: from all lookup main
32767: from all lookup default
Common configuration with VRFs:
$ ip rule show
1000: from all lookup [l3mdev-table]
32765: from all lookup local
32766: from all lookup main
32767: from all lookup default
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Acked-by: David Ahern <dsa@cumulusnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-03-16 11:08:12 +03:00
static inline bool fib4_rule_default ( const struct fib_rule * rule )
{
return true ;
}
2017-08-03 14:28:14 +03:00
static inline int fib4_rules_dump ( struct net * net , struct notifier_block * nb )
{
return 0 ;
}
static inline unsigned int fib4_rules_seq_read ( struct net * net )
{
return 0 ;
}
2018-03-01 06:42:41 +03:00
static inline bool fib4_rules_early_flow_dissect ( struct net * net ,
struct sk_buff * skb ,
struct flowi4 * fl4 ,
struct flow_keys * flkeys )
{
return false ;
}
2005-04-17 02:20:36 +04:00
# else /* CONFIG_IP_MULTIPLE_TABLES */
2013-09-21 21:22:42 +04:00
int __net_init fib4_rules_init ( struct net * net ) ;
void __net_exit fib4_rules_exit ( struct net * net ) ;
2007-11-07 10:34:04 +03:00
2013-09-21 21:22:42 +04:00
struct fib_table * fib_new_table ( struct net * net , u32 id ) ;
struct fib_table * fib_get_table ( struct net * net , u32 id ) ;
2005-04-17 02:20:36 +04:00
2015-06-23 20:45:37 +03:00
int __fib_lookup ( struct net * net , struct flowi4 * flp ,
struct fib_result * res , unsigned int flags ) ;
2012-07-06 09:13:13 +04:00
static inline int fib_lookup ( struct net * net , struct flowi4 * flp ,
2015-06-23 20:45:37 +03:00
struct fib_result * res , unsigned int flags )
2012-07-06 09:13:13 +04:00
{
2015-03-05 02:02:44 +03:00
struct fib_table * tb ;
2015-09-17 17:01:32 +03:00
int err = - ENETUNREACH ;
2015-03-05 02:02:44 +03:00
2015-06-23 20:45:37 +03:00
flags | = FIB_LOOKUP_NOREF ;
2015-03-05 02:02:44 +03:00
if ( net - > ipv4 . fib_has_custom_rules )
2015-06-23 20:45:37 +03:00
return __fib_lookup ( net , flp , res , flags ) ;
2015-03-05 02:02:44 +03:00
rcu_read_lock ( ) ;
res - > tclassid = 0 ;
2015-09-17 17:01:32 +03:00
tb = rcu_dereference_rtnl ( net - > ipv4 . fib_main ) ;
if ( tb )
err = fib_table_lookup ( tb , flp , res , flags ) ;
if ( ! err )
goto out ;
tb = rcu_dereference_rtnl ( net - > ipv4 . fib_default ) ;
if ( tb )
err = fib_table_lookup ( tb , flp , res , flags ) ;
2015-03-05 02:02:44 +03:00
2015-09-17 17:01:32 +03:00
out :
if ( err = = - EAGAIN )
err = - ENETUNREACH ;
2015-03-05 02:02:44 +03:00
rcu_read_unlock ( ) ;
return err ;
2012-07-06 09:13:13 +04:00
}
ipv4: fib_rules: Check if rule is a default rule
Currently, when non-default (custom) FIB rules are used, devices capable
of layer 3 offloading flush their tables and let the kernel do the
forwarding instead.
When these devices' drivers are loaded they register to the FIB
notification chain, which lets them know about the existence of any
custom FIB rules. This is done by sending a RULE_ADD notification based
on the value of 'net->ipv4.fib_has_custom_rules'.
This approach is problematic when VRF offload is taken into account, as
upon the creation of the first VRF netdev, a l3mdev rule is programmed
to direct skbs to the VRF's table.
Instead of merely reading the above value and sending a single RULE_ADD
notification, we should iterate over all the FIB rules and send a
detailed notification for each, thereby allowing offloading drivers to
sanitize the rules they don't support and potentially flush their
tables.
While l3mdev rules are uniquely marked, the default rules are not.
Therefore, when they are being notified they might invoke offloading
drivers to unnecessarily flush their tables.
Solve this by adding an helper to check if a FIB rule is a default rule.
Namely, its selector should match all packets and its action should
point to the local, main or default tables.
As noted by David Ahern, uniquely marking the default rules is
insufficient. When using VRFs, it's common to avoid false hits by moving
the rule for the local table to just before the main table:
Default configuration:
$ ip rule show
0: from all lookup local
32766: from all lookup main
32767: from all lookup default
Common configuration with VRFs:
$ ip rule show
1000: from all lookup [l3mdev-table]
32765: from all lookup local
32766: from all lookup main
32767: from all lookup default
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Acked-by: David Ahern <dsa@cumulusnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-03-16 11:08:12 +03:00
bool fib4_rule_default ( const struct fib_rule * rule ) ;
2017-08-03 14:28:14 +03:00
int fib4_rules_dump ( struct net * net , struct notifier_block * nb ) ;
unsigned int fib4_rules_seq_read ( struct net * net ) ;
ipv4: fib_rules: Check if rule is a default rule
Currently, when non-default (custom) FIB rules are used, devices capable
of layer 3 offloading flush their tables and let the kernel do the
forwarding instead.
When these devices' drivers are loaded they register to the FIB
notification chain, which lets them know about the existence of any
custom FIB rules. This is done by sending a RULE_ADD notification based
on the value of 'net->ipv4.fib_has_custom_rules'.
This approach is problematic when VRF offload is taken into account, as
upon the creation of the first VRF netdev, a l3mdev rule is programmed
to direct skbs to the VRF's table.
Instead of merely reading the above value and sending a single RULE_ADD
notification, we should iterate over all the FIB rules and send a
detailed notification for each, thereby allowing offloading drivers to
sanitize the rules they don't support and potentially flush their
tables.
While l3mdev rules are uniquely marked, the default rules are not.
Therefore, when they are being notified they might invoke offloading
drivers to unnecessarily flush their tables.
Solve this by adding an helper to check if a FIB rule is a default rule.
Namely, its selector should match all packets and its action should
point to the local, main or default tables.
As noted by David Ahern, uniquely marking the default rules is
insufficient. When using VRFs, it's common to avoid false hits by moving
the rule for the local table to just before the main table:
Default configuration:
$ ip rule show
0: from all lookup local
32766: from all lookup main
32767: from all lookup default
Common configuration with VRFs:
$ ip rule show
1000: from all lookup [l3mdev-table]
32765: from all lookup local
32766: from all lookup main
32767: from all lookup default
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Acked-by: David Ahern <dsa@cumulusnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-03-16 11:08:12 +03:00
2018-03-01 06:42:41 +03:00
static inline bool fib4_rules_early_flow_dissect ( struct net * net ,
struct sk_buff * skb ,
struct flowi4 * fl4 ,
struct flow_keys * flkeys )
{
unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP ;
if ( ! net - > ipv4 . fib_rules_require_fldissect )
return false ;
skb_flow_dissect_flow_keys ( skb , flkeys , flag ) ;
fl4 - > fl4_sport = flkeys - > ports . src ;
fl4 - > fl4_dport = flkeys - > ports . dst ;
fl4 - > flowi4_proto = flkeys - > basic . ip_proto ;
return true ;
}
2005-04-17 02:20:36 +04:00
# endif /* CONFIG_IP_MULTIPLE_TABLES */
/* Exported by fib_frontend.c */
2007-06-05 23:38:30 +04:00
extern const struct nla_policy rtm_ipv4_policy [ ] ;
2013-09-21 21:22:42 +04:00
void ip_fib_init ( void ) ;
2019-04-06 02:30:40 +03:00
int fib_gw_from_via ( struct fib_config * cfg , struct nlattr * nla ,
struct netlink_ext_ack * extack ) ;
2013-09-21 21:22:42 +04:00
__be32 fib_compute_spec_dst ( struct sk_buff * skb ) ;
2018-09-20 23:50:47 +03:00
bool fib_info_nh_uses_dev ( struct fib_info * fi , const struct net_device * dev ) ;
2013-09-21 21:22:42 +04:00
int fib_validate_source ( struct sk_buff * skb , __be32 src , __be32 dst ,
u8 tos , int oif , struct net_device * dev ,
struct in_device * idev , u32 * itag ) ;
2012-06-29 12:32:45 +04:00
# ifdef CONFIG_IP_ROUTE_CLASSID
2012-07-06 09:13:13 +04:00
static inline int fib_num_tclassid_users ( struct net * net )
{
return net - > ipv4 . fib_num_tclassid_users ;
}
2012-06-29 12:32:45 +04:00
# else
2012-07-06 09:13:13 +04:00
static inline int fib_num_tclassid_users ( struct net * net )
{
return 0 ;
}
2012-06-29 12:32:45 +04:00
# endif
2015-03-07 00:47:00 +03:00
int fib_unmerge ( struct net * net ) ;
2005-12-27 07:43:12 +03:00
2005-04-17 02:20:36 +04:00
/* Exported by fib_semantics.c */
2013-09-21 21:22:42 +04:00
int ip_fib_check_default ( __be32 gw , struct net_device * dev ) ;
2015-10-30 11:23:33 +03:00
int fib_sync_down_dev ( struct net_device * dev , unsigned long event , bool force ) ;
2016-09-05 01:20:20 +03:00
int fib_sync_down_addr ( struct net_device * dev , __be32 local ) ;
2019-04-23 18:48:09 +03:00
int fib_sync_up ( struct net_device * dev , unsigned char nh_flags ) ;
net: ipv4: update fnhe_pmtu when first hop's MTU changes
Since commit 5aad1de5ea2c ("ipv4: use separate genid for next hop
exceptions"), exceptions get deprecated separately from cached
routes. In particular, administrative changes don't clear PMTU anymore.
As Stefano described in commit e9fa1495d738 ("ipv6: Reflect MTU changes
on PMTU of exceptions for MTU-less routes"), the PMTU discovered before
the local MTU change can become stale:
- if the local MTU is now lower than the PMTU, that PMTU is now
incorrect
- if the local MTU was the lowest value in the path, and is increased,
we might discover a higher PMTU
Similarly to what commit e9fa1495d738 did for IPv6, update PMTU in those
cases.
If the exception was locked, the discovered PMTU was smaller than the
minimal accepted PMTU. In that case, if the new local MTU is smaller
than the current PMTU, let PMTU discovery figure out if locking of the
exception is still needed.
To do this, we need to know the old link MTU in the NETDEV_CHANGEMTU
notifier. By the time the notifier is called, dev->mtu has been
changed. This patch adds the old MTU as additional information in the
notifier structure, and a new call_netdevice_notifiers_u32() function.
Fixes: 5aad1de5ea2c ("ipv4: use separate genid for next hop exceptions")
Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Reviewed-by: David Ahern <dsahern@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-10-09 18:48:14 +03:00
void fib_sync_mtu ( struct net_device * dev , u32 orig_mtu ) ;
2019-05-22 22:04:46 +03:00
void fib_nhc_update_mtu ( struct fib_nh_common * nhc , u32 new , u32 orig ) ;
2015-09-30 11:12:21 +03:00
2017-03-16 16:28:00 +03:00
# ifdef CONFIG_IP_ROUTE_MULTIPATH
2018-03-02 19:32:12 +03:00
int fib_multipath_hash ( const struct net * net , const struct flowi4 * fl4 ,
2018-03-01 06:42:41 +03:00
const struct sk_buff * skb , struct flow_keys * flkeys ) ;
2017-03-16 16:28:00 +03:00
# endif
2019-05-22 22:04:43 +03:00
int fib_check_nh ( struct net * net , struct fib_nh * nh , u32 table , u8 scope ,
struct netlink_ext_ack * extack ) ;
2015-09-30 11:12:21 +03:00
void fib_select_multipath ( struct fib_result * res , int hash ) ;
2015-10-05 18:51:25 +03:00
void fib_select_path ( struct net * net , struct fib_result * res ,
2017-03-16 16:28:00 +03:00
struct flowi4 * fl4 , const struct sk_buff * skb ) ;
2005-04-17 02:20:36 +04:00
2019-03-28 06:53:48 +03:00
int fib_nh_init ( struct net * net , struct fib_nh * fib_nh ,
struct fib_config * cfg , int nh_weight ,
struct netlink_ext_ack * extack ) ;
2019-03-28 06:53:49 +03:00
void fib_nh_release ( struct net * net , struct fib_nh * fib_nh ) ;
2019-03-28 06:53:58 +03:00
int fib_nh_common_init ( struct fib_nh_common * nhc , struct nlattr * fc_encap ,
u16 fc_encap_type , void * cfg , gfp_t gfp_flags ,
struct netlink_ext_ack * extack ) ;
void fib_nh_common_release ( struct fib_nh_common * nhc ) ;
2019-03-28 06:53:48 +03:00
2011-02-02 02:30:56 +03:00
/* Exported by fib_trie.c */
2013-09-21 21:22:42 +04:00
void fib_trie_init ( void ) ;
2015-03-07 00:47:00 +03:00
struct fib_table * fib_trie_table ( u32 id , struct fib_table * alias ) ;
2005-04-17 02:20:36 +04:00
2011-02-17 09:04:57 +03:00
static inline void fib_combine_itag ( u32 * itag , const struct fib_result * res )
2005-04-17 02:20:36 +04:00
{
2011-01-14 15:36:42 +03:00
# ifdef CONFIG_IP_ROUTE_CLASSID
2019-04-03 00:11:55 +03:00
struct fib_nh_common * nhc = res - > nhc ;
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_IP_MULTIPLE_TABLES
u32 rtag ;
# endif
2019-06-04 06:19:50 +03:00
if ( nhc - > nhc_family = = AF_INET ) {
struct fib_nh * nh ;
nh = container_of ( nhc , struct fib_nh , nh_common ) ;
* itag = nh - > nh_tclassid < < 16 ;
} else {
* itag = 0 ;
}
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_IP_MULTIPLE_TABLES
2012-07-13 19:21:29 +04:00
rtag = res - > tclassid ;
2005-04-17 02:20:36 +04:00
if ( * itag = = 0 )
* itag = ( rtag < < 16 ) ;
* itag | = ( rtag > > 16 ) ;
# endif
# endif
}
2019-05-22 22:04:44 +03:00
void fib_flush ( struct net * net ) ;
2013-09-21 21:22:42 +04:00
void free_fib_info ( struct fib_info * fi ) ;
2005-04-17 02:20:36 +04:00
2016-12-03 18:44:59 +03:00
static inline void fib_info_hold ( struct fib_info * fi )
{
2017-07-04 09:35:02 +03:00
refcount_inc ( & fi - > fib_clntref ) ;
2016-12-03 18:44:59 +03:00
}
2005-04-17 02:20:36 +04:00
static inline void fib_info_put ( struct fib_info * fi )
{
2017-07-04 09:35:02 +03:00
if ( refcount_dec_and_test ( & fi - > fib_clntref ) )
2005-04-17 02:20:36 +04:00
free_fib_info ( fi ) ;
}
2005-08-16 09:18:02 +04:00
# ifdef CONFIG_PROC_FS
2013-09-21 21:22:42 +04:00
int __net_init fib_proc_init ( struct net * net ) ;
void __net_exit fib_proc_exit ( struct net * net ) ;
2008-02-05 13:54:16 +03:00
# else
static inline int fib_proc_init ( struct net * net )
{
return 0 ;
}
static inline void fib_proc_exit ( struct net * net )
{
}
2005-08-16 09:18:02 +04:00
# endif
2018-05-21 19:08:13 +03:00
u32 ip_mtu_from_fib_result ( struct fib_result * res , __be32 daddr ) ;
2018-10-16 04:56:42 +03:00
int ip_valid_fib_dump_req ( struct net * net , const struct nlmsghdr * nlh ,
struct fib_dump_filter * filter ,
2018-10-16 04:56:48 +03:00
struct netlink_callback * cb ) ;
2019-04-03 00:11:58 +03:00
int fib_nexthop_info ( struct sk_buff * skb , const struct fib_nh_common * nh ,
2019-09-04 17:11:58 +03:00
u8 rt_family , unsigned char * flags , bool skip_oif ) ;
2019-04-03 00:11:58 +03:00
int fib_add_nexthop ( struct sk_buff * skb , const struct fib_nh_common * nh ,
2019-09-04 17:11:58 +03:00
int nh_weight , u8 rt_family ) ;
2005-04-17 02:20:36 +04:00
# endif /* _NET_FIB_H */