2019-05-27 09:55:01 +03:00
/* SPDX-License-Identifier: GPL-2.0-or-later */
2005-04-17 02:20:36 +04:00
/*
* Linux INET6 implementation
*
* Authors :
* Pedro Roque < roque @ di . fc . ul . pt >
*/
# ifndef _IP6_FIB_H
# define _IP6_FIB_H
# include <linux/ipv6_route.h>
# include <linux/rtnetlink.h>
# include <linux/spinlock.h>
2017-08-03 14:28:16 +03:00
# include <linux/notifier.h>
2006-08-22 11:01:08 +04:00
# include <net/dst.h>
# include <net/flow.h>
2019-03-28 06:53:57 +03:00
# include <net/ip_fib.h>
2006-08-22 11:01:08 +04:00
# include <net/netlink.h>
2010-11-30 23:27:11 +03:00
# include <net/inetpeer.h>
2017-08-03 14:28:16 +03:00
# include <net/fib_notifier.h>
2020-06-23 19:42:32 +03:00
# include <linux/indirect_call_wrapper.h>
2005-04-17 02:20:36 +04:00
2009-07-31 05:52:15 +04:00
# ifdef CONFIG_IPV6_MULTIPLE_TABLES
# define FIB6_TABLE_HASHSZ 256
# else
# define FIB6_TABLE_HASHSZ 1
# endif
2017-10-06 22:06:01 +03:00
# define RT6_DEBUG 2
# if RT6_DEBUG >= 3
# define RT6_TRACE(x...) pr_debug(x)
# else
# define RT6_TRACE(x...) do { ; } while (0)
# endif
2005-04-17 02:20:36 +04:00
struct rt6_info ;
2018-04-18 03:33:24 +03:00
struct fib6_info ;
2005-04-17 02:20:36 +04:00
2009-11-03 06:26:03 +03:00
struct fib6_config {
2006-08-22 11:01:08 +04:00
u32 fc_table ;
u32 fc_metric ;
int fc_dst_len ;
int fc_src_len ;
int fc_ifindex ;
u32 fc_flags ;
u32 fc_protocol ;
2017-02-02 23:37:08 +03:00
u16 fc_type ; /* only 8 bits are used */
u16 fc_delete_all_nh : 1 ,
2019-03-21 15:21:35 +03:00
fc_ignore_dev_down : 1 ,
__unused : 14 ;
2019-06-09 00:53:34 +03:00
u32 fc_nh_id ;
2006-08-22 11:01:08 +04:00
struct in6_addr fc_dst ;
struct in6_addr fc_src ;
2011-04-14 01:10:57 +04:00
struct in6_addr fc_prefsrc ;
2006-08-22 11:01:08 +04:00
struct in6_addr fc_gateway ;
unsigned long fc_expires ;
struct nlattr * fc_mx ;
int fc_mx_len ;
2012-10-22 07:42:09 +04:00
int fc_mp_len ;
struct nlattr * fc_mp ;
2006-08-22 11:01:08 +04:00
struct nl_info fc_nlinfo ;
2015-07-21 11:43:48 +03:00
struct nlattr * fc_encap ;
u16 fc_encap_type ;
2020-05-22 08:26:13 +03:00
bool fc_is_fdb ;
2006-08-22 11:01:08 +04:00
} ;
2009-11-03 06:26:03 +03:00
struct fib6_node {
ipv6: replace rwlock with rcu and spinlock in fib6_table
With all the preparation work before, we are now ready to replace rwlock
with rcu and spinlock in fib6_table.
That means now all fib6_node in fib6_table are protected by rcu. And
when freeing fib6_node, call_rcu() is used to wait for the rcu grace
period before releasing the memory.
When accessing fib6_node, corresponding rcu APIs need to be used.
And all previous sessions protected by the write lock will now be
protected by the spin lock per table.
All previous sessions protected by read lock will now be protected by
rcu_read_lock().
A couple of things to note here:
1. As part of the work of replacing rwlock with rcu, the linked list of
fn->leaf now has to be rcu protected as well. So both fn->leaf and
rt->dst.rt6_next are now __rcu tagged and corresponding rcu APIs are
used when manipulating them.
2. For fn->rr_ptr, first of all, it also needs to be rcu protected now
and is tagged with __rcu and rcu APIs are used in corresponding places.
Secondly, fn->rr_ptr is changed in rt6_select() which is a reader
thread. This makes the issue a bit complicated. We think a valid
solution for it is to let rt6_select() grab the tb6_lock if it decides
to change it. As it is not in the normal operation and only happens when
there is no valid neighbor cache for the route, we think the performance
impact should be low.
3. fib6_walk_continue() has to be called with tb6_lock held even in the
route dumping related functions, e.g. inet6_dump_fib(),
fib6_tables_dump() and ipv6_route_seq_ops. It is because
fib6_walk_continue() makes modifications to the walker structure, and so
are fib6_repair_tree() and fib6_del_route(). In order to do proper
syncing between them, we need to let fib6_walk_continue() hold the lock.
We may be able to do further improvement on the way we do the tree walk
to get rid of the need for holding the spin lock. But not for now.
4. When fib6_del_route() removes a route from the tree, we no longer
mark rt->dst.rt6_next to NULL to make simultaneous reader be able to
further traverse the list with rcu. However, rt->dst.rt6_next is only
valid within this same rcu period. No one should access it later.
5. All the operation of atomic_inc(rt->rt6i_ref) is changed to be
performed before we publish this route (either by linking it to fn->leaf
or insert it in the list pointed by fn->leaf) just to be safe because as
soon as we publish the route, some read thread will be able to access it.
Signed-off-by: Wei Wang <weiwan@google.com>
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-06 22:06:10 +03:00
struct fib6_node __rcu * parent ;
struct fib6_node __rcu * left ;
struct fib6_node __rcu * right ;
2006-12-14 03:38:29 +03:00
# ifdef CONFIG_IPV6_SUBTREES
ipv6: replace rwlock with rcu and spinlock in fib6_table
With all the preparation work before, we are now ready to replace rwlock
with rcu and spinlock in fib6_table.
That means now all fib6_node in fib6_table are protected by rcu. And
when freeing fib6_node, call_rcu() is used to wait for the rcu grace
period before releasing the memory.
When accessing fib6_node, corresponding rcu APIs need to be used.
And all previous sessions protected by the write lock will now be
protected by the spin lock per table.
All previous sessions protected by read lock will now be protected by
rcu_read_lock().
A couple of things to note here:
1. As part of the work of replacing rwlock with rcu, the linked list of
fn->leaf now has to be rcu protected as well. So both fn->leaf and
rt->dst.rt6_next are now __rcu tagged and corresponding rcu APIs are
used when manipulating them.
2. For fn->rr_ptr, first of all, it also needs to be rcu protected now
and is tagged with __rcu and rcu APIs are used in corresponding places.
Secondly, fn->rr_ptr is changed in rt6_select() which is a reader
thread. This makes the issue a bit complicated. We think a valid
solution for it is to let rt6_select() grab the tb6_lock if it decides
to change it. As it is not in the normal operation and only happens when
there is no valid neighbor cache for the route, we think the performance
impact should be low.
3. fib6_walk_continue() has to be called with tb6_lock held even in the
route dumping related functions, e.g. inet6_dump_fib(),
fib6_tables_dump() and ipv6_route_seq_ops. It is because
fib6_walk_continue() makes modifications to the walker structure, and so
are fib6_repair_tree() and fib6_del_route(). In order to do proper
syncing between them, we need to let fib6_walk_continue() hold the lock.
We may be able to do further improvement on the way we do the tree walk
to get rid of the need for holding the spin lock. But not for now.
4. When fib6_del_route() removes a route from the tree, we no longer
mark rt->dst.rt6_next to NULL to make simultaneous reader be able to
further traverse the list with rcu. However, rt->dst.rt6_next is only
valid within this same rcu period. No one should access it later.
5. All the operation of atomic_inc(rt->rt6i_ref) is changed to be
performed before we publish this route (either by linking it to fn->leaf
or insert it in the list pointed by fn->leaf) just to be safe because as
soon as we publish the route, some read thread will be able to access it.
Signed-off-by: Wei Wang <weiwan@google.com>
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-06 22:06:10 +03:00
struct fib6_node __rcu * subtree ;
2006-12-14 03:38:29 +03:00
# endif
2018-04-18 03:33:26 +03:00
struct fib6_info __rcu * leaf ;
2005-04-17 02:20:36 +04:00
__u16 fn_bit ; /* bit key */
__u16 fn_flags ;
2014-10-06 21:58:35 +04:00
int fn_sernum ;
2018-04-18 03:33:26 +03:00
struct fib6_info __rcu * rr_ptr ;
2017-08-21 19:47:10 +03:00
struct rcu_head rcu ;
2005-04-17 02:20:36 +04:00
} ;
2017-10-06 22:06:01 +03:00
struct fib6_gc_args {
int timeout ;
int more ;
} ;
2006-08-24 04:22:24 +04:00
# ifndef CONFIG_IPV6_SUBTREES
# define FIB6_SUBTREE(fn) NULL
2019-11-20 15:47:34 +03:00
static inline bool fib6_routes_require_src ( const struct net * net )
{
return false ;
}
static inline void fib6_routes_require_src_inc ( struct net * net ) { }
static inline void fib6_routes_require_src_dec ( struct net * net ) { }
2006-08-24 04:22:24 +04:00
# else
2019-11-20 15:47:34 +03:00
static inline bool fib6_routes_require_src ( const struct net * net )
{
return net - > ipv6 . fib6_routes_require_src > 0 ;
}
static inline void fib6_routes_require_src_inc ( struct net * net )
{
net - > ipv6 . fib6_routes_require_src + + ;
}
static inline void fib6_routes_require_src_dec ( struct net * net )
{
net - > ipv6 . fib6_routes_require_src - - ;
}
ipv6: replace rwlock with rcu and spinlock in fib6_table
With all the preparation work before, we are now ready to replace rwlock
with rcu and spinlock in fib6_table.
That means now all fib6_node in fib6_table are protected by rcu. And
when freeing fib6_node, call_rcu() is used to wait for the rcu grace
period before releasing the memory.
When accessing fib6_node, corresponding rcu APIs need to be used.
And all previous sessions protected by the write lock will now be
protected by the spin lock per table.
All previous sessions protected by read lock will now be protected by
rcu_read_lock().
A couple of things to note here:
1. As part of the work of replacing rwlock with rcu, the linked list of
fn->leaf now has to be rcu protected as well. So both fn->leaf and
rt->dst.rt6_next are now __rcu tagged and corresponding rcu APIs are
used when manipulating them.
2. For fn->rr_ptr, first of all, it also needs to be rcu protected now
and is tagged with __rcu and rcu APIs are used in corresponding places.
Secondly, fn->rr_ptr is changed in rt6_select() which is a reader
thread. This makes the issue a bit complicated. We think a valid
solution for it is to let rt6_select() grab the tb6_lock if it decides
to change it. As it is not in the normal operation and only happens when
there is no valid neighbor cache for the route, we think the performance
impact should be low.
3. fib6_walk_continue() has to be called with tb6_lock held even in the
route dumping related functions, e.g. inet6_dump_fib(),
fib6_tables_dump() and ipv6_route_seq_ops. It is because
fib6_walk_continue() makes modifications to the walker structure, and so
are fib6_repair_tree() and fib6_del_route(). In order to do proper
syncing between them, we need to let fib6_walk_continue() hold the lock.
We may be able to do further improvement on the way we do the tree walk
to get rid of the need for holding the spin lock. But not for now.
4. When fib6_del_route() removes a route from the tree, we no longer
mark rt->dst.rt6_next to NULL to make simultaneous reader be able to
further traverse the list with rcu. However, rt->dst.rt6_next is only
valid within this same rcu period. No one should access it later.
5. All the operation of atomic_inc(rt->rt6i_ref) is changed to be
performed before we publish this route (either by linking it to fn->leaf
or insert it in the list pointed by fn->leaf) just to be safe because as
soon as we publish the route, some read thread will be able to access it.
Signed-off-by: Wei Wang <weiwan@google.com>
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-06 22:06:10 +03:00
# define FIB6_SUBTREE(fn) (rcu_dereference_protected((fn)->subtree, 1))
2006-08-24 04:22:24 +04:00
# endif
2005-04-17 02:20:36 +04:00
/*
* routing information
*
*/
2009-11-03 06:26:03 +03:00
struct rt6key {
2005-04-17 02:20:36 +04:00
struct in6_addr addr ;
int plen ;
} ;
2006-08-05 10:20:06 +04:00
struct fib6_table ;
2017-10-06 22:05:57 +03:00
struct rt6_exception_bucket {
struct hlist_head chain ;
int depth ;
} ;
struct rt6_exception {
struct hlist_node hlist ;
struct rt6_info * rt6i ;
unsigned long stamp ;
struct rcu_head rcu ;
} ;
# define FIB6_EXCEPTION_BUCKET_SIZE_SHIFT 10
# define FIB6_EXCEPTION_BUCKET_SIZE (1 << FIB6_EXCEPTION_BUCKET_SIZE_SHIFT)
# define FIB6_MAX_DEPTH 5
2018-04-18 03:33:14 +03:00
struct fib6_nh {
2019-03-28 06:53:57 +03:00
struct fib_nh_common nh_common ;
2019-04-10 00:41:12 +03:00
# ifdef CONFIG_IPV6_ROUTER_PREF
unsigned long last_probe ;
# endif
2019-05-23 06:27:55 +03:00
struct rt6_info * __percpu * rt6i_pcpu ;
2019-05-23 06:27:58 +03:00
struct rt6_exception_bucket __rcu * rt6i_exception_bucket ;
2018-04-18 03:33:14 +03:00
} ;
2005-04-17 02:20:36 +04:00
2018-04-18 03:33:24 +03:00
struct fib6_info {
2018-04-19 01:38:59 +03:00
struct fib6_table * fib6_table ;
2018-05-04 23:54:24 +03:00
struct fib6_info __rcu * fib6_next ;
2018-04-19 01:38:59 +03:00
struct fib6_node __rcu * fib6_node ;
2005-04-17 02:20:36 +04:00
2012-10-22 07:42:09 +04:00
/* Multipath routes:
2020-07-15 19:42:44 +03:00
* siblings is a list of fib6_info that have the same metric / weight ,
2012-10-22 07:42:09 +04:00
* destination , but not the same gateway . nsiblings is just a cache
* to speed up lookup .
*/
2019-06-04 06:19:52 +03:00
union {
struct list_head fib6_siblings ;
struct list_head nh_list ;
} ;
2018-04-19 01:38:59 +03:00
unsigned int fib6_nsiblings ;
2012-10-22 07:42:09 +04:00
2019-04-23 04:35:03 +03:00
refcount_t fib6_ref ;
2018-04-18 03:33:24 +03:00
unsigned long expires ;
struct dst_metrics * fib6_metrics ;
# define fib6_pmtu fib6_metrics->metrics[RTAX_MTU-1]
2007-09-06 14:31:25 +04:00
2018-04-19 01:38:59 +03:00
struct rt6key fib6_dst ;
u32 fib6_flags ;
struct rt6key fib6_src ;
struct rt6key fib6_prefsrc ;
2017-08-15 10:09:49 +03:00
2018-04-19 01:38:59 +03:00
u32 fib6_metric ;
u8 fib6_protocol ;
2018-04-18 03:33:24 +03:00
u8 fib6_type ;
2019-05-23 06:27:58 +03:00
u8 should_flush : 1 ,
2018-04-18 03:33:24 +03:00
dst_nocount : 1 ,
dst_nopolicy : 1 ,
2019-05-16 05:39:52 +03:00
fib6_destroying : 1 ,
2020-01-14 14:23:12 +03:00
offload : 1 ,
trap : 1 ,
2021-02-07 11:22:52 +03:00
offload_failed : 1 ,
unused : 1 ;
2018-04-18 03:33:24 +03:00
2018-06-18 15:24:31 +03:00
struct rcu_head rcu ;
2019-06-04 06:19:52 +03:00
struct nexthop * nh ;
2020-03-02 15:06:07 +03:00
struct fib6_nh fib6_nh [ ] ;
2018-04-18 03:33:24 +03:00
} ;
2009-11-03 06:26:03 +03:00
struct rt6_info {
2010-06-11 10:31:35 +04:00
struct dst_entry dst ;
2018-04-21 01:38:02 +03:00
struct fib6_info __rcu * from ;
ipv6: Use global sernum for dst validation with nexthop objects
Nik reported a bug with pcpu dst cache when nexthop objects are
used illustrated by the following:
$ ip netns add foo
$ ip -netns foo li set lo up
$ ip -netns foo addr add 2001:db8:11::1/128 dev lo
$ ip netns exec foo sysctl net.ipv6.conf.all.forwarding=1
$ ip li add veth1 type veth peer name veth2
$ ip li set veth1 up
$ ip addr add 2001:db8:10::1/64 dev veth1
$ ip li set dev veth2 netns foo
$ ip -netns foo li set veth2 up
$ ip -netns foo addr add 2001:db8:10::2/64 dev veth2
$ ip -6 nexthop add id 100 via 2001:db8:10::2 dev veth1
$ ip -6 route add 2001:db8:11::1/128 nhid 100
Create a pcpu entry on cpu 0:
$ taskset -a -c 0 ip -6 route get 2001:db8:11::1
Re-add the route entry:
$ ip -6 ro del 2001:db8:11::1
$ ip -6 route add 2001:db8:11::1/128 nhid 100
Route get on cpu 0 returns the stale pcpu:
$ taskset -a -c 0 ip -6 route get 2001:db8:11::1
RTNETLINK answers: Network is unreachable
While cpu 1 works:
$ taskset -a -c 1 ip -6 route get 2001:db8:11::1
2001:db8:11::1 from :: via 2001:db8:10::2 dev veth1 src 2001:db8:10::1 metric 1024 pref medium
Conversion of FIB entries to work with external nexthop objects
missed an important difference between IPv4 and IPv6 - how dst
entries are invalidated when the FIB changes. IPv4 has a per-network
namespace generation id (rt_genid) that is bumped on changes to the FIB.
Checking if a dst_entry is still valid means comparing rt_genid in the
rtable to the current value of rt_genid for the namespace.
IPv6 also has a per network namespace counter, fib6_sernum, but the
count is saved per fib6_node. With the per-node counter only dst_entries
based on fib entries under the node are invalidated when changes are
made to the routes - limiting the scope of invalidations. IPv6 uses a
reference in the rt6_info, 'from', to track the corresponding fib entry
used to create the dst_entry. When validating a dst_entry, the 'from'
is used to backtrack to the fib6_node and check the sernum of it to the
cookie passed to the dst_check operation.
With the inline format (nexthop definition inline with the fib6_info),
dst_entries cached in the fib6_nh have a 1:1 correlation between fib
entries, nexthop data and dst_entries. With external nexthops, IPv6
looks more like IPv4 which means multiple fib entries across disparate
fib6_nodes can all reference the same fib6_nh. That means validation
of dst_entries based on external nexthops needs to use the IPv4 format
- the per-network namespace counter.
Add sernum to rt6_info and set it when creating a pcpu dst entry. Update
rt6_get_cookie to return sernum if it is set and update dst_check for
IPv6 to look for sernum set and based the check on it if so. Finally,
rt6_get_pcpu_route needs to validate the cached entry before returning
a pcpu entry (similar to the rt_cache_valid calls in __mkroute_input and
__mkroute_output for IPv4).
This problem only affects routes using the new, external nexthops.
Thanks to the kbuild test robot for catching the IS_ENABLED needed
around rt_genid_ipv6 before I sent this out.
Fixes: 5b98324ebe29 ("ipv6: Allow routes to use nexthop objects")
Reported-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
Signed-off-by: David Ahern <dsahern@kernel.org>
Reviewed-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
Tested-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-01 17:53:08 +03:00
int sernum ;
2005-04-17 02:20:36 +04:00
2018-04-18 03:33:27 +03:00
struct rt6key rt6i_dst ;
2010-04-01 02:24:22 +04:00
struct rt6key rt6i_src ;
2005-04-17 02:20:36 +04:00
struct in6_addr rt6i_gateway ;
2018-04-18 03:33:27 +03:00
struct inet6_dev * rt6i_idev ;
2010-04-01 02:24:22 +04:00
u32 rt6i_flags ;
2007-11-14 08:33:32 +03:00
2015-05-23 06:56:04 +03:00
struct list_head rt6i_uncached ;
struct uncached_list * rt6i_uncached_list ;
2010-04-01 02:24:22 +04:00
/* more non-fragment space at head required */
unsigned short rt6i_nfheader_len ;
2005-04-17 02:20:36 +04:00
} ;
2019-04-17 00:35:59 +03:00
struct fib6_result {
struct fib6_nh * nh ;
struct fib6_info * f6i ;
2019-04-17 00:36:11 +03:00
u32 fib6_flags ;
u8 fib6_type ;
2019-04-24 04:05:33 +03:00
struct rt6_info * rt6 ;
2019-04-17 00:35:59 +03:00
} ;
ipv6: replace rwlock with rcu and spinlock in fib6_table
With all the preparation work before, we are now ready to replace rwlock
with rcu and spinlock in fib6_table.
That means now all fib6_node in fib6_table are protected by rcu. And
when freeing fib6_node, call_rcu() is used to wait for the rcu grace
period before releasing the memory.
When accessing fib6_node, corresponding rcu APIs need to be used.
And all previous sessions protected by the write lock will now be
protected by the spin lock per table.
All previous sessions protected by read lock will now be protected by
rcu_read_lock().
A couple of things to note here:
1. As part of the work of replacing rwlock with rcu, the linked list of
fn->leaf now has to be rcu protected as well. So both fn->leaf and
rt->dst.rt6_next are now __rcu tagged and corresponding rcu APIs are
used when manipulating them.
2. For fn->rr_ptr, first of all, it also needs to be rcu protected now
and is tagged with __rcu and rcu APIs are used in corresponding places.
Secondly, fn->rr_ptr is changed in rt6_select() which is a reader
thread. This makes the issue a bit complicated. We think a valid
solution for it is to let rt6_select() grab the tb6_lock if it decides
to change it. As it is not in the normal operation and only happens when
there is no valid neighbor cache for the route, we think the performance
impact should be low.
3. fib6_walk_continue() has to be called with tb6_lock held even in the
route dumping related functions, e.g. inet6_dump_fib(),
fib6_tables_dump() and ipv6_route_seq_ops. It is because
fib6_walk_continue() makes modifications to the walker structure, and so
are fib6_repair_tree() and fib6_del_route(). In order to do proper
syncing between them, we need to let fib6_walk_continue() hold the lock.
We may be able to do further improvement on the way we do the tree walk
to get rid of the need for holding the spin lock. But not for now.
4. When fib6_del_route() removes a route from the tree, we no longer
mark rt->dst.rt6_next to NULL to make simultaneous reader be able to
further traverse the list with rcu. However, rt->dst.rt6_next is only
valid within this same rcu period. No one should access it later.
5. All the operation of atomic_inc(rt->rt6i_ref) is changed to be
performed before we publish this route (either by linking it to fn->leaf
or insert it in the list pointed by fn->leaf) just to be safe because as
soon as we publish the route, some read thread will be able to access it.
Signed-off-by: Wei Wang <weiwan@google.com>
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-06 22:06:10 +03:00
# define for_each_fib6_node_rt_rcu(fn) \
for ( rt = rcu_dereference ( ( fn ) - > leaf ) ; rt ; \
2018-05-04 23:54:24 +03:00
rt = rcu_dereference ( rt - > fib6_next ) )
ipv6: replace rwlock with rcu and spinlock in fib6_table
With all the preparation work before, we are now ready to replace rwlock
with rcu and spinlock in fib6_table.
That means now all fib6_node in fib6_table are protected by rcu. And
when freeing fib6_node, call_rcu() is used to wait for the rcu grace
period before releasing the memory.
When accessing fib6_node, corresponding rcu APIs need to be used.
And all previous sessions protected by the write lock will now be
protected by the spin lock per table.
All previous sessions protected by read lock will now be protected by
rcu_read_lock().
A couple of things to note here:
1. As part of the work of replacing rwlock with rcu, the linked list of
fn->leaf now has to be rcu protected as well. So both fn->leaf and
rt->dst.rt6_next are now __rcu tagged and corresponding rcu APIs are
used when manipulating them.
2. For fn->rr_ptr, first of all, it also needs to be rcu protected now
and is tagged with __rcu and rcu APIs are used in corresponding places.
Secondly, fn->rr_ptr is changed in rt6_select() which is a reader
thread. This makes the issue a bit complicated. We think a valid
solution for it is to let rt6_select() grab the tb6_lock if it decides
to change it. As it is not in the normal operation and only happens when
there is no valid neighbor cache for the route, we think the performance
impact should be low.
3. fib6_walk_continue() has to be called with tb6_lock held even in the
route dumping related functions, e.g. inet6_dump_fib(),
fib6_tables_dump() and ipv6_route_seq_ops. It is because
fib6_walk_continue() makes modifications to the walker structure, and so
are fib6_repair_tree() and fib6_del_route(). In order to do proper
syncing between them, we need to let fib6_walk_continue() hold the lock.
We may be able to do further improvement on the way we do the tree walk
to get rid of the need for holding the spin lock. But not for now.
4. When fib6_del_route() removes a route from the tree, we no longer
mark rt->dst.rt6_next to NULL to make simultaneous reader be able to
further traverse the list with rcu. However, rt->dst.rt6_next is only
valid within this same rcu period. No one should access it later.
5. All the operation of atomic_inc(rt->rt6i_ref) is changed to be
performed before we publish this route (either by linking it to fn->leaf
or insert it in the list pointed by fn->leaf) just to be safe because as
soon as we publish the route, some read thread will be able to access it.
Signed-off-by: Wei Wang <weiwan@google.com>
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-06 22:06:10 +03:00
# define for_each_fib6_walker_rt(w) \
for ( rt = ( w ) - > leaf ; rt ; \
2018-05-04 23:54:24 +03:00
rt = rcu_dereference_protected ( rt - > fib6_next , 1 ) )
ipv6: replace rwlock with rcu and spinlock in fib6_table
With all the preparation work before, we are now ready to replace rwlock
with rcu and spinlock in fib6_table.
That means now all fib6_node in fib6_table are protected by rcu. And
when freeing fib6_node, call_rcu() is used to wait for the rcu grace
period before releasing the memory.
When accessing fib6_node, corresponding rcu APIs need to be used.
And all previous sessions protected by the write lock will now be
protected by the spin lock per table.
All previous sessions protected by read lock will now be protected by
rcu_read_lock().
A couple of things to note here:
1. As part of the work of replacing rwlock with rcu, the linked list of
fn->leaf now has to be rcu protected as well. So both fn->leaf and
rt->dst.rt6_next are now __rcu tagged and corresponding rcu APIs are
used when manipulating them.
2. For fn->rr_ptr, first of all, it also needs to be rcu protected now
and is tagged with __rcu and rcu APIs are used in corresponding places.
Secondly, fn->rr_ptr is changed in rt6_select() which is a reader
thread. This makes the issue a bit complicated. We think a valid
solution for it is to let rt6_select() grab the tb6_lock if it decides
to change it. As it is not in the normal operation and only happens when
there is no valid neighbor cache for the route, we think the performance
impact should be low.
3. fib6_walk_continue() has to be called with tb6_lock held even in the
route dumping related functions, e.g. inet6_dump_fib(),
fib6_tables_dump() and ipv6_route_seq_ops. It is because
fib6_walk_continue() makes modifications to the walker structure, and so
are fib6_repair_tree() and fib6_del_route(). In order to do proper
syncing between them, we need to let fib6_walk_continue() hold the lock.
We may be able to do further improvement on the way we do the tree walk
to get rid of the need for holding the spin lock. But not for now.
4. When fib6_del_route() removes a route from the tree, we no longer
mark rt->dst.rt6_next to NULL to make simultaneous reader be able to
further traverse the list with rcu. However, rt->dst.rt6_next is only
valid within this same rcu period. No one should access it later.
5. All the operation of atomic_inc(rt->rt6i_ref) is changed to be
performed before we publish this route (either by linking it to fn->leaf
or insert it in the list pointed by fn->leaf) just to be safe because as
soon as we publish the route, some read thread will be able to access it.
Signed-off-by: Wei Wang <weiwan@google.com>
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-06 22:06:10 +03:00
2006-10-13 11:17:25 +04:00
static inline struct inet6_dev * ip6_dst_idev ( struct dst_entry * dst )
{
return ( ( struct rt6_info * ) dst ) - > rt6i_idev ;
}
2019-11-20 15:47:34 +03:00
static inline bool fib6_requires_src ( const struct fib6_info * rt )
{
return rt - > fib6_src . plen > 0 ;
}
2018-04-18 03:33:26 +03:00
static inline void fib6_clean_expires ( struct fib6_info * f6i )
2012-04-06 04:13:10 +04:00
{
2018-04-19 01:38:59 +03:00
f6i - > fib6_flags & = ~ RTF_EXPIRES ;
2018-04-18 03:33:17 +03:00
f6i - > expires = 0 ;
2012-04-06 04:13:10 +04:00
}
2018-04-18 03:33:26 +03:00
static inline void fib6_set_expires ( struct fib6_info * f6i ,
2018-04-18 03:33:17 +03:00
unsigned long expires )
2012-04-06 04:13:10 +04:00
{
2018-04-18 03:33:17 +03:00
f6i - > expires = expires ;
2018-04-19 01:38:59 +03:00
f6i - > fib6_flags | = RTF_EXPIRES ;
2012-04-06 04:13:10 +04:00
}
2018-04-18 03:33:26 +03:00
static inline bool fib6_check_expired ( const struct fib6_info * f6i )
2012-04-06 04:13:10 +04:00
{
2018-04-19 01:38:59 +03:00
if ( f6i - > fib6_flags & RTF_EXPIRES )
2018-04-18 03:33:17 +03:00
return time_after ( jiffies , f6i - > expires ) ;
return false ;
2012-04-06 04:13:10 +04:00
}
2015-05-23 06:56:01 +03:00
2017-08-21 19:47:10 +03:00
/* Function to safely get fn->sernum for passed in rt
* and store result in passed in cookie .
* Return true if we can get cookie safely
* Return false if not
*/
2018-04-21 01:37:58 +03:00
static inline bool fib6_get_cookie_safe ( const struct fib6_info * f6i ,
u32 * cookie )
2017-08-21 19:47:10 +03:00
{
struct fib6_node * fn ;
bool status = false ;
2018-04-19 01:38:59 +03:00
fn = rcu_dereference ( f6i - > fib6_node ) ;
2017-08-21 19:47:10 +03:00
if ( fn ) {
* cookie = fn - > fn_sernum ;
2017-10-06 22:06:07 +03:00
/* pairs with smp_wmb() in fib6_update_sernum_upto_root() */
smp_rmb ( ) ;
2017-08-21 19:47:10 +03:00
status = true ;
}
return status ;
}
2015-05-23 06:56:01 +03:00
static inline u32 rt6_get_cookie ( const struct rt6_info * rt )
{
2018-04-21 01:38:02 +03:00
struct fib6_info * from ;
2017-08-21 19:47:10 +03:00
u32 cookie = 0 ;
ipv6: Use global sernum for dst validation with nexthop objects
Nik reported a bug with pcpu dst cache when nexthop objects are
used illustrated by the following:
$ ip netns add foo
$ ip -netns foo li set lo up
$ ip -netns foo addr add 2001:db8:11::1/128 dev lo
$ ip netns exec foo sysctl net.ipv6.conf.all.forwarding=1
$ ip li add veth1 type veth peer name veth2
$ ip li set veth1 up
$ ip addr add 2001:db8:10::1/64 dev veth1
$ ip li set dev veth2 netns foo
$ ip -netns foo li set veth2 up
$ ip -netns foo addr add 2001:db8:10::2/64 dev veth2
$ ip -6 nexthop add id 100 via 2001:db8:10::2 dev veth1
$ ip -6 route add 2001:db8:11::1/128 nhid 100
Create a pcpu entry on cpu 0:
$ taskset -a -c 0 ip -6 route get 2001:db8:11::1
Re-add the route entry:
$ ip -6 ro del 2001:db8:11::1
$ ip -6 route add 2001:db8:11::1/128 nhid 100
Route get on cpu 0 returns the stale pcpu:
$ taskset -a -c 0 ip -6 route get 2001:db8:11::1
RTNETLINK answers: Network is unreachable
While cpu 1 works:
$ taskset -a -c 1 ip -6 route get 2001:db8:11::1
2001:db8:11::1 from :: via 2001:db8:10::2 dev veth1 src 2001:db8:10::1 metric 1024 pref medium
Conversion of FIB entries to work with external nexthop objects
missed an important difference between IPv4 and IPv6 - how dst
entries are invalidated when the FIB changes. IPv4 has a per-network
namespace generation id (rt_genid) that is bumped on changes to the FIB.
Checking if a dst_entry is still valid means comparing rt_genid in the
rtable to the current value of rt_genid for the namespace.
IPv6 also has a per network namespace counter, fib6_sernum, but the
count is saved per fib6_node. With the per-node counter only dst_entries
based on fib entries under the node are invalidated when changes are
made to the routes - limiting the scope of invalidations. IPv6 uses a
reference in the rt6_info, 'from', to track the corresponding fib entry
used to create the dst_entry. When validating a dst_entry, the 'from'
is used to backtrack to the fib6_node and check the sernum of it to the
cookie passed to the dst_check operation.
With the inline format (nexthop definition inline with the fib6_info),
dst_entries cached in the fib6_nh have a 1:1 correlation between fib
entries, nexthop data and dst_entries. With external nexthops, IPv6
looks more like IPv4 which means multiple fib entries across disparate
fib6_nodes can all reference the same fib6_nh. That means validation
of dst_entries based on external nexthops needs to use the IPv4 format
- the per-network namespace counter.
Add sernum to rt6_info and set it when creating a pcpu dst entry. Update
rt6_get_cookie to return sernum if it is set and update dst_check for
IPv6 to look for sernum set and based the check on it if so. Finally,
rt6_get_pcpu_route needs to validate the cached entry before returning
a pcpu entry (similar to the rt_cache_valid calls in __mkroute_input and
__mkroute_output for IPv4).
This problem only affects routes using the new, external nexthops.
Thanks to the kbuild test robot for catching the IS_ENABLED needed
around rt_genid_ipv6 before I sent this out.
Fixes: 5b98324ebe29 ("ipv6: Allow routes to use nexthop objects")
Reported-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
Signed-off-by: David Ahern <dsahern@kernel.org>
Reviewed-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
Tested-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-01 17:53:08 +03:00
if ( rt - > sernum )
return rt - > sernum ;
2018-04-21 01:38:00 +03:00
rcu_read_lock ( ) ;
2018-04-21 01:38:02 +03:00
from = rcu_dereference ( rt - > from ) ;
2019-06-02 14:10:46 +03:00
if ( from )
2018-04-21 01:38:02 +03:00
fib6_get_cookie_safe ( from , & cookie ) ;
2015-05-23 06:56:03 +03:00
2018-04-21 01:38:00 +03:00
rcu_read_unlock ( ) ;
2017-08-21 19:47:10 +03:00
return cookie ;
2015-05-23 06:56:01 +03:00
}
2012-04-06 04:13:10 +04:00
2012-10-29 04:13:19 +04:00
static inline void ip6_rt_put ( struct rt6_info * rt )
{
/* dst_release() accepts a NULL parameter.
* We rely on dst being first structure in struct rt6_info
*/
BUILD_BUG_ON ( offsetof ( struct rt6_info , dst ) ! = 0 ) ;
dst_release ( & rt - > dst ) ;
}
2019-05-23 06:27:59 +03:00
struct fib6_info * fib6_info_alloc ( gfp_t gfp_flags , bool with_fib6_nh ) ;
2018-06-18 15:24:31 +03:00
void fib6_info_destroy_rcu ( struct rcu_head * head ) ;
2017-08-03 14:28:25 +03:00
2018-04-18 03:33:26 +03:00
static inline void fib6_info_hold ( struct fib6_info * f6i )
2017-08-03 14:28:25 +03:00
{
2019-04-23 04:35:03 +03:00
refcount_inc ( & f6i - > fib6_ref ) ;
2017-08-03 14:28:25 +03:00
}
2018-07-22 06:56:32 +03:00
static inline bool fib6_info_hold_safe ( struct fib6_info * f6i )
{
2019-04-23 04:35:03 +03:00
return refcount_inc_not_zero ( & f6i - > fib6_ref ) ;
2018-07-22 06:56:32 +03:00
}
2018-04-18 03:33:26 +03:00
static inline void fib6_info_release ( struct fib6_info * f6i )
2017-08-03 14:28:25 +03:00
{
2019-04-23 04:35:03 +03:00
if ( f6i & & refcount_dec_and_test ( & f6i - > fib6_ref ) )
2018-06-18 15:24:31 +03:00
call_rcu ( & f6i - > rcu , fib6_info_destroy_rcu ) ;
2017-08-03 14:28:25 +03:00
}
2014-10-06 21:58:34 +04:00
enum fib6_walk_state {
# ifdef CONFIG_IPV6_SUBTREES
FWS_S ,
# endif
FWS_L ,
FWS_R ,
FWS_C ,
FWS_U
} ;
struct fib6_walker {
2010-02-18 11:13:30 +03:00
struct list_head lh ;
2005-04-17 02:20:36 +04:00
struct fib6_node * root , * node ;
2018-04-18 03:33:26 +03:00
struct fib6_info * leaf ;
2014-10-06 21:58:34 +04:00
enum fib6_walk_state state ;
2010-02-08 08:19:03 +03:00
unsigned int skip ;
unsigned int count ;
ipv6: Dump route exceptions if requested
Since commit 2b760fcf5cfb ("ipv6: hook up exception table to store dst
cache"), route exceptions reside in a separate hash table, and won't be
found by walking the FIB, so they won't be dumped to userspace on a
RTM_GETROUTE message.
This causes 'ip -6 route list cache' and 'ip -6 route flush cache' to
have no function anymore:
# ip -6 route get fc00:3::1
fc00:3::1 via fc00:1::2 dev veth_A-R1 src fc00:1::1 metric 1024 expires 539sec mtu 1400 pref medium
# ip -6 route get fc00:4::1
fc00:4::1 via fc00:2::2 dev veth_A-R2 src fc00:2::1 metric 1024 expires 536sec mtu 1500 pref medium
# ip -6 route list cache
# ip -6 route flush cache
# ip -6 route get fc00:3::1
fc00:3::1 via fc00:1::2 dev veth_A-R1 src fc00:1::1 metric 1024 expires 520sec mtu 1400 pref medium
# ip -6 route get fc00:4::1
fc00:4::1 via fc00:2::2 dev veth_A-R2 src fc00:2::1 metric 1024 expires 519sec mtu 1500 pref medium
because iproute2 lists cached routes using RTM_GETROUTE, and flushes them
by listing all the routes, and deleting them with RTM_DELROUTE one by one.
If cached routes are requested using the RTM_F_CLONED flag together with
strict checking, or if no strict checking is requested (and hence we can't
consistently apply filters), look up exceptions in the hash table
associated with the current fib6_info in rt6_dump_route(), and, if present
and not expired, add them to the dump.
We might be unable to dump all the entries for a given node in a single
message, so keep track of how many entries were handled for the current
node in fib6_walker, and skip that amount in case we start from the same
partially dumped node.
When a partial dump restarts, as the starting node might change when
'sernum' changes, we have no guarantee that we need to skip the same
amount of in-node entries. Therefore, we need two counters, and we need to
zero the in-node counter if the node from which the dump is resumed
differs.
Note that, with the current version of iproute2, this only fixes the
'ip -6 route list cache': on a flush command, iproute2 doesn't pass
RTM_F_CLONED and, due to this inconsistency, 'ip -6 route flush cache' is
still unable to fetch the routes to be flushed. This will be addressed in
a patch for iproute2.
To flush cached routes, a procfs entry could be introduced instead: that's
how it works for IPv4. We already have a rt6_flush_exception() function
ready to be wired to it. However, this would not solve the issue for
listing.
Versions of iproute2 and kernel tested:
iproute2
kernel 4.14.0 4.15.0 4.19.0 5.0.0 5.1.0 5.1.0, patched
3.18 list + + + + + +
flush + + + + + +
4.4 list + + + + + +
flush + + + + + +
4.9 list + + + + + +
flush + + + + + +
4.14 list + + + + + +
flush + + + + + +
4.15 list
flush
4.19 list
flush
5.0 list
flush
5.1 list
flush
with list + + + + + +
fix flush + + + +
v7:
- Explain usage of "skip" counters in commit message (suggested by
David Ahern)
v6:
- Rebase onto net-next, use recently introduced nexthop walker
- Make rt6_nh_dump_exceptions() a separate function (suggested by David
Ahern)
v5:
- Use dump_routes and dump_exceptions from filter, ignore NLM_F_MATCH,
update test results (flushing works with iproute2 < 5.0.0 now)
v4:
- Split NLM_F_MATCH and strict check handling in separate patches
- Filter routes using RTM_F_CLONED: if it's not set, only return
non-cached routes, and if it's set, only return cached routes:
change requested by David Ahern and Martin Lau. This implies that
iproute2 needs a separate patch to be able to flush IPv6 cached
routes. This is not ideal because we can't fix the breakage caused
by 2b760fcf5cfb entirely in kernel. However, two years have passed
since then, and this makes it more tolerable
v3:
- More descriptive comment about expired exceptions in rt6_dump_route()
- Swap return values of rt6_dump_route() (suggested by Martin Lau)
- Don't zero skip_in_node in case we don't dump anything in a given pass
(also suggested by Martin Lau)
- Remove check on RTM_F_CLONED altogether: in the current UAPI semantic,
it's just a flag to indicate the route was cloned, not to filter on
routes
v2: Add tracking of number of entries to be skipped in current node after
a partial dump. As we restart from the same node, if not all the
exceptions for a given node fit in a single message, the dump will
not terminate, as suggested by Martin Lau. This is a concrete
possibility, setting up a big number of exceptions for the same route
actually causes the issue, suggested by David Ahern.
Reported-by: Jianlin Shi <jishi@redhat.com>
Fixes: 2b760fcf5cfb ("ipv6: hook up exception table to store dst cache")
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
Reviewed-by: David Ahern <dsahern@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-06-21 18:45:27 +03:00
unsigned int skip_in_node ;
2014-10-06 21:58:34 +04:00
int ( * func ) ( struct fib6_walker * ) ;
2005-04-17 02:20:36 +04:00
void * args ;
} ;
struct rt6_statistics {
2017-10-06 22:06:11 +03:00
__u32 fib_nodes ; /* all fib6 nodes */
__u32 fib_route_nodes ; /* intermediate nodes */
__u32 fib_rt_entries ; /* rt entries in fib table */
__u32 fib_rt_cache ; /* cached rt entries in exception table */
__u32 fib_discarded_routes ; /* total number of routes delete */
/* The following stats are not protected by any lock */
atomic_t fib_rt_alloc ; /* total number of routes alloced */
atomic_t fib_rt_uncache ; /* rt entries in uncached list */
2005-04-17 02:20:36 +04:00
} ;
# define RTN_TL_ROOT 0x0001
# define RTN_ROOT 0x0002 /* tree root node */
# define RTN_RTINFO 0x0004 /* node with valid routing info */
/*
* priority levels ( or metrics )
*
*/
2006-08-05 10:20:06 +04:00
struct fib6_table {
struct hlist_node tb6_hlist ;
u32 tb6_id ;
ipv6: replace rwlock with rcu and spinlock in fib6_table
With all the preparation work before, we are now ready to replace rwlock
with rcu and spinlock in fib6_table.
That means now all fib6_node in fib6_table are protected by rcu. And
when freeing fib6_node, call_rcu() is used to wait for the rcu grace
period before releasing the memory.
When accessing fib6_node, corresponding rcu APIs need to be used.
And all previous sessions protected by the write lock will now be
protected by the spin lock per table.
All previous sessions protected by read lock will now be protected by
rcu_read_lock().
A couple of things to note here:
1. As part of the work of replacing rwlock with rcu, the linked list of
fn->leaf now has to be rcu protected as well. So both fn->leaf and
rt->dst.rt6_next are now __rcu tagged and corresponding rcu APIs are
used when manipulating them.
2. For fn->rr_ptr, first of all, it also needs to be rcu protected now
and is tagged with __rcu and rcu APIs are used in corresponding places.
Secondly, fn->rr_ptr is changed in rt6_select() which is a reader
thread. This makes the issue a bit complicated. We think a valid
solution for it is to let rt6_select() grab the tb6_lock if it decides
to change it. As it is not in the normal operation and only happens when
there is no valid neighbor cache for the route, we think the performance
impact should be low.
3. fib6_walk_continue() has to be called with tb6_lock held even in the
route dumping related functions, e.g. inet6_dump_fib(),
fib6_tables_dump() and ipv6_route_seq_ops. It is because
fib6_walk_continue() makes modifications to the walker structure, and so
are fib6_repair_tree() and fib6_del_route(). In order to do proper
syncing between them, we need to let fib6_walk_continue() hold the lock.
We may be able to do further improvement on the way we do the tree walk
to get rid of the need for holding the spin lock. But not for now.
4. When fib6_del_route() removes a route from the tree, we no longer
mark rt->dst.rt6_next to NULL to make simultaneous reader be able to
further traverse the list with rcu. However, rt->dst.rt6_next is only
valid within this same rcu period. No one should access it later.
5. All the operation of atomic_inc(rt->rt6i_ref) is changed to be
performed before we publish this route (either by linking it to fn->leaf
or insert it in the list pointed by fn->leaf) just to be safe because as
soon as we publish the route, some read thread will be able to access it.
Signed-off-by: Wei Wang <weiwan@google.com>
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-06 22:06:10 +03:00
spinlock_t tb6_lock ;
2006-08-05 10:20:06 +04:00
struct fib6_node tb6_root ;
2012-06-11 11:01:52 +04:00
struct inet_peer_base tb6_peers ;
2016-10-24 20:52:35 +03:00
unsigned int flags ;
2017-08-03 14:28:19 +03:00
unsigned int fib_seq ;
2016-10-24 20:52:35 +03:00
# define RT6_TABLE_HAS_DFLT_ROUTER BIT(0)
2006-08-05 10:20:06 +04:00
} ;
# define RT6_TABLE_UNSPEC RT_TABLE_UNSPEC
# define RT6_TABLE_MAIN RT_TABLE_MAIN
# define RT6_TABLE_DFLT RT6_TABLE_MAIN
# define RT6_TABLE_INFO RT6_TABLE_MAIN
# define RT6_TABLE_PREFIX RT6_TABLE_MAIN
# ifdef CONFIG_IPV6_MULTIPLE_TABLES
# define FIB6_TABLE_MIN 1
# define FIB6_TABLE_MAX RT_TABLE_MAX
2006-08-04 14:39:02 +04:00
# define RT6_TABLE_LOCAL RT_TABLE_LOCAL
2006-08-05 10:20:06 +04:00
# else
# define FIB6_TABLE_MIN RT_TABLE_MAIN
# define FIB6_TABLE_MAX FIB6_TABLE_MIN
2006-08-04 14:39:02 +04:00
# define RT6_TABLE_LOCAL RT6_TABLE_MAIN
2006-08-05 10:20:06 +04:00
# endif
2008-03-05 00:48:30 +03:00
typedef struct rt6_info * ( * pol_lookup_t ) ( struct net * ,
struct fib6_table * ,
2018-03-02 19:32:17 +03:00
struct flowi6 * ,
const struct sk_buff * , int ) ;
2005-04-17 02:20:36 +04:00
2017-08-03 14:28:17 +03:00
struct fib6_entry_notifier_info {
struct fib_notifier_info info ; /* must be first */
2018-04-18 03:33:26 +03:00
struct fib6_info * rt ;
2019-06-18 18:12:45 +03:00
unsigned int nsiblings ;
2017-08-03 14:28:17 +03:00
} ;
2005-04-17 02:20:36 +04:00
/*
* exported functions
*/
2013-09-21 21:22:42 +04:00
struct fib6_table * fib6_get_table ( struct net * net , u32 id ) ;
struct fib6_table * fib6_new_table ( struct net * net , u32 id ) ;
struct dst_entry * fib6_rule_lookup ( struct net * net , struct flowi6 * fl6 ,
2018-03-02 19:32:17 +03:00
const struct sk_buff * skb ,
2013-09-21 21:22:42 +04:00
int flags , pol_lookup_t lookup ) ;
2006-08-05 10:20:06 +04:00
2018-05-10 06:34:23 +03:00
/* called with rcu lock held; can return error pointer
* caller needs to select path
*/
2019-04-17 00:36:10 +03:00
int fib6_lookup ( struct net * net , int oif , struct flowi6 * fl6 ,
struct fib6_result * res , int flags ) ;
2018-05-10 06:34:23 +03:00
2018-05-10 06:34:21 +03:00
/* called with rcu lock held; caller needs to select path */
2019-04-17 00:36:10 +03:00
int fib6_table_lookup ( struct net * net , struct fib6_table * table ,
int oif , struct flowi6 * fl6 , struct fib6_result * res ,
int strict ) ;
2018-05-10 06:34:21 +03:00
2019-04-17 00:35:59 +03:00
void fib6_select_path ( const struct net * net , struct fib6_result * res ,
struct flowi6 * fl6 , int oif , bool have_oif_match ,
const struct sk_buff * skb , int strict ) ;
2018-05-10 06:34:19 +03:00
struct fib6_node * fib6_node_lookup ( struct fib6_node * root ,
const struct in6_addr * daddr ,
const struct in6_addr * saddr ) ;
2005-04-17 02:20:36 +04:00
2013-09-21 21:22:42 +04:00
struct fib6_node * fib6_locate ( struct fib6_node * root ,
const struct in6_addr * daddr , int dst_len ,
2017-10-06 22:06:02 +03:00
const struct in6_addr * saddr , int src_len ,
bool exact_match ) ;
2005-04-17 02:20:36 +04:00
2018-04-18 03:33:26 +03:00
void fib6_clean_all ( struct net * net , int ( * func ) ( struct fib6_info * , void * arg ) ,
2013-12-27 12:32:38 +04:00
void * arg ) ;
2018-10-12 06:17:21 +03:00
void fib6_clean_all_skip_notify ( struct net * net ,
int ( * func ) ( struct fib6_info * , void * arg ) ,
void * arg ) ;
2006-08-05 10:20:06 +04:00
2018-04-18 03:33:26 +03:00
int fib6_add ( struct fib6_node * root , struct fib6_info * rt ,
2018-04-18 03:33:16 +03:00
struct nl_info * info , struct netlink_ext_ack * extack ) ;
2018-04-18 03:33:26 +03:00
int fib6_del ( struct fib6_info * rt , struct nl_info * info ) ;
2005-04-17 02:20:36 +04:00
2018-09-11 03:21:42 +03:00
static inline
void rt6_get_prefsrc ( const struct rt6_info * rt , struct in6_addr * addr )
{
const struct fib6_info * from ;
rcu_read_lock ( ) ;
from = rcu_dereference ( rt - > from ) ;
if ( from ) {
* addr = from - > fib6_prefsrc . addr ;
} else {
struct in6_addr in6_zero = { } ;
* addr = in6_zero ;
}
rcu_read_unlock ( ) ;
}
2018-04-19 01:39:01 +03:00
2019-03-28 06:53:50 +03:00
int fib6_nh_init ( struct net * net , struct fib6_nh * fib6_nh ,
struct fib6_config * cfg , gfp_t gfp_flags ,
struct netlink_ext_ack * extack ) ;
2019-03-28 06:53:51 +03:00
void fib6_nh_release ( struct fib6_nh * fib6_nh ) ;
2019-03-28 06:53:50 +03:00
2019-05-22 22:04:41 +03:00
int call_fib6_entry_notifiers ( struct net * net ,
enum fib_event_type event_type ,
struct fib6_info * rt ,
struct netlink_ext_ack * extack ) ;
2019-06-18 18:12:45 +03:00
int call_fib6_multipath_entry_notifiers ( struct net * net ,
enum fib_event_type event_type ,
struct fib6_info * rt ,
unsigned int nsiblings ,
struct netlink_ext_ack * extack ) ;
2019-12-23 16:28:17 +03:00
int call_fib6_entry_notifiers_replace ( struct net * net , struct fib6_info * rt ) ;
2019-05-22 22:04:41 +03:00
void fib6_rt_update ( struct net * net , struct fib6_info * rt ,
struct nl_info * info ) ;
2018-04-18 03:33:26 +03:00
void inet6_rt_notify ( int event , struct fib6_info * rt , struct nl_info * info ,
2015-09-13 20:18:33 +03:00
unsigned int flags ) ;
2005-04-17 02:20:36 +04:00
2013-09-21 21:22:42 +04:00
void fib6_run_gc ( unsigned long expires , struct net * net , bool force ) ;
2005-04-17 02:20:36 +04:00
2013-09-21 21:22:42 +04:00
void fib6_gc_cleanup ( void ) ;
2005-04-17 02:20:36 +04:00
2013-09-21 21:22:42 +04:00
int fib6_init ( void ) ;
2006-08-04 14:39:02 +04:00
2018-04-10 20:42:55 +03:00
struct ipv6_route_iter {
struct seq_net_private p ;
struct fib6_walker w ;
loff_t skip ;
struct fib6_table * tbl ;
int sernum ;
} ;
extern const struct seq_operations ipv6_route_seq_ops ;
2013-09-21 18:55:59 +04:00
2019-10-03 12:49:27 +03:00
int call_fib6_notifier ( struct notifier_block * nb ,
2017-08-03 14:28:16 +03:00
enum fib_event_type event_type ,
struct fib_notifier_info * info ) ;
int call_fib6_notifiers ( struct net * net , enum fib_event_type event_type ,
struct fib_notifier_info * info ) ;
int __net_init fib6_notifier_init ( struct net * net ) ;
void __net_exit fib6_notifier_exit ( struct net * net ) ;
2017-08-03 14:28:19 +03:00
unsigned int fib6_tables_seq_read ( struct net * net ) ;
2019-10-03 12:49:30 +03:00
int fib6_tables_dump ( struct net * net , struct notifier_block * nb ,
struct netlink_ext_ack * extack ) ;
2017-08-03 14:28:19 +03:00
2018-04-18 03:33:26 +03:00
void fib6_update_sernum ( struct net * net , struct fib6_info * rt ) ;
void fib6_update_sernum_upto_root ( struct net * net , struct fib6_info * rt ) ;
2019-05-22 22:04:40 +03:00
void fib6_update_sernum_stub ( struct net * net , struct fib6_info * f6i ) ;
2017-10-06 22:05:56 +03:00
2018-04-18 03:33:26 +03:00
void fib6_metric_set ( struct fib6_info * f6i , int metric , u32 val ) ;
static inline bool fib6_metric_locked ( struct fib6_info * f6i , int metric )
2018-04-18 03:33:16 +03:00
{
return ! ! ( f6i - > fib6_metrics - > metrics [ RTAX_LOCK - 1 ] & ( 1 < < metric ) ) ;
}
2021-02-01 22:47:55 +03:00
void fib6_info_hw_flags_set ( struct net * net , struct fib6_info * f6i ,
2021-02-07 11:22:52 +03:00
bool offload , bool trap , bool offload_failed ) ;
2017-10-06 22:05:56 +03:00
2020-05-13 21:02:21 +03:00
# if IS_BUILTIN(CONFIG_IPV6) && defined(CONFIG_BPF_SYSCALL)
struct bpf_iter__ipv6_route {
__bpf_md_ptr ( struct bpf_iter_meta * , meta ) ;
__bpf_md_ptr ( struct fib6_info * , rt ) ;
} ;
# endif
2020-06-23 19:42:32 +03:00
INDIRECT_CALLABLE_DECLARE ( struct rt6_info * ip6_pol_route_output ( struct net * net ,
struct fib6_table * table ,
struct flowi6 * fl6 ,
const struct sk_buff * skb ,
int flags ) ) ;
INDIRECT_CALLABLE_DECLARE ( struct rt6_info * ip6_pol_route_input ( struct net * net ,
struct fib6_table * table ,
struct flowi6 * fl6 ,
const struct sk_buff * skb ,
int flags ) ) ;
INDIRECT_CALLABLE_DECLARE ( struct rt6_info * __ip6_route_redirect ( struct net * net ,
struct fib6_table * table ,
struct flowi6 * fl6 ,
const struct sk_buff * skb ,
int flags ) ) ;
INDIRECT_CALLABLE_DECLARE ( struct rt6_info * ip6_pol_route_lookup ( struct net * net ,
struct fib6_table * table ,
struct flowi6 * fl6 ,
const struct sk_buff * skb ,
int flags ) ) ;
static inline struct rt6_info * pol_lookup_func ( pol_lookup_t lookup ,
struct net * net ,
struct fib6_table * table ,
struct flowi6 * fl6 ,
const struct sk_buff * skb ,
int flags )
{
return INDIRECT_CALL_4 ( lookup ,
ip6_pol_route_output ,
ip6_pol_route_input ,
ip6_pol_route_lookup ,
__ip6_route_redirect ,
net , table , fl6 , skb , flags ) ;
}
2007-12-08 11:14:54 +03:00
# ifdef CONFIG_IPV6_MULTIPLE_TABLES
2019-11-20 15:47:33 +03:00
static inline bool fib6_has_custom_rules ( const struct net * net )
{
return net - > ipv6 . fib6_has_custom_rules ;
}
2013-09-21 21:22:42 +04:00
int fib6_rules_init ( void ) ;
void fib6_rules_cleanup ( void ) ;
2017-08-03 14:28:15 +03:00
bool fib6_rule_default ( const struct fib_rule * rule ) ;
2019-10-03 12:49:30 +03:00
int fib6_rules_dump ( struct net * net , struct notifier_block * nb ,
struct netlink_ext_ack * extack ) ;
2017-08-03 14:28:18 +03:00
unsigned int fib6_rules_seq_read ( struct net * net ) ;
2018-03-01 06:43:22 +03:00
static inline bool fib6_rules_early_flow_dissect ( struct net * net ,
struct sk_buff * skb ,
struct flowi6 * fl6 ,
struct flow_keys * flkeys )
{
unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP ;
if ( ! net - > ipv6 . fib6_rules_require_fldissect )
return false ;
skb_flow_dissect_flow_keys ( skb , flkeys , flag ) ;
fl6 - > fl6_sport = flkeys - > ports . src ;
fl6 - > fl6_dport = flkeys - > ports . dst ;
fl6 - > flowi6_proto = flkeys - > basic . ip_proto ;
return true ;
}
2007-12-08 11:14:54 +03:00
# else
2019-11-20 15:47:33 +03:00
static inline bool fib6_has_custom_rules ( const struct net * net )
{
return false ;
}
2007-12-08 11:14:54 +03:00
static inline int fib6_rules_init ( void )
{
return 0 ;
}
static inline void fib6_rules_cleanup ( void )
{
return ;
}
2017-08-03 14:28:15 +03:00
static inline bool fib6_rule_default ( const struct fib_rule * rule )
{
return true ;
}
2019-10-03 12:49:30 +03:00
static inline int fib6_rules_dump ( struct net * net , struct notifier_block * nb ,
struct netlink_ext_ack * extack )
2017-08-03 14:28:18 +03:00
{
return 0 ;
}
static inline unsigned int fib6_rules_seq_read ( struct net * net )
{
return 0 ;
}
2018-03-01 06:43:22 +03:00
static inline bool fib6_rules_early_flow_dissect ( struct net * net ,
struct sk_buff * skb ,
struct flowi6 * fl6 ,
struct flow_keys * flkeys )
{
return false ;
}
2007-12-08 11:14:54 +03:00
# endif
2005-04-17 02:20:36 +04:00
# endif