2005-04-17 02:20:36 +04:00
/*
* Linux INET6 implementation
*
* Authors :
* Pedro Roque < roque @ di . fc . ul . pt >
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*/
# ifndef _IP6_FIB_H
# define _IP6_FIB_H
# include <linux/ipv6_route.h>
# include <linux/rtnetlink.h>
# include <linux/spinlock.h>
2006-08-22 11:01:08 +04:00
# include <net/dst.h>
# include <net/flow.h>
# include <net/netlink.h>
2010-11-30 23:27:11 +03:00
# include <net/inetpeer.h>
2005-04-17 02:20:36 +04:00
2009-07-31 05:52:15 +04:00
# ifdef CONFIG_IPV6_MULTIPLE_TABLES
# define FIB6_TABLE_HASHSZ 256
# else
# define FIB6_TABLE_HASHSZ 1
# endif
2005-04-17 02:20:36 +04:00
struct rt6_info ;
2009-11-03 06:26:03 +03:00
struct fib6_config {
2006-08-22 11:01:08 +04:00
u32 fc_table ;
u32 fc_metric ;
int fc_dst_len ;
int fc_src_len ;
int fc_ifindex ;
u32 fc_flags ;
u32 fc_protocol ;
2012-09-05 06:12:42 +04:00
u32 fc_type ; /* only 8 bits are used */
2006-08-22 11:01:08 +04:00
struct in6_addr fc_dst ;
struct in6_addr fc_src ;
2011-04-14 01:10:57 +04:00
struct in6_addr fc_prefsrc ;
2006-08-22 11:01:08 +04:00
struct in6_addr fc_gateway ;
unsigned long fc_expires ;
struct nlattr * fc_mx ;
int fc_mx_len ;
struct nl_info fc_nlinfo ;
} ;
2009-11-03 06:26:03 +03:00
struct fib6_node {
2005-04-17 02:20:36 +04:00
struct fib6_node * parent ;
struct fib6_node * left ;
struct fib6_node * right ;
2006-12-14 03:38:29 +03:00
# ifdef CONFIG_IPV6_SUBTREES
2005-04-17 02:20:36 +04:00
struct fib6_node * subtree ;
2006-12-14 03:38:29 +03:00
# endif
2005-04-17 02:20:36 +04:00
struct rt6_info * leaf ;
__u16 fn_bit ; /* bit key */
__u16 fn_flags ;
__u32 fn_sernum ;
[IPV6]: Fix routing round-robin locking.
As per RFC2461, section 6.3.6, item #2, when no routers on the
matching list are known to be reachable or probably reachable we
do round robin on those available routes so that we make sure
to probe as many of them as possible to detect when one becomes
reachable faster.
Each routing table has a rwlock protecting the tree and the linked
list of routes at each leaf. The round robin code executes during
lookup and thus with the rwlock taken as a reader. A small local
spinlock tries to provide protection but this does not work at all
for two reasons:
1) The round-robin list manipulation, as coded, goes like this (with
read lock held):
walk routes finding head and tail
spin_lock();
rotate list using head and tail
spin_unlock();
While one thread is rotating the list, another thread can
end up with stale values of head and tail and then proceed
to corrupt the list when it gets the lock. This ends up causing
the OOPS in fib6_add() later onthat many people have been hitting.
2) All the other code paths that run with the rwlock held as
a reader do not expect the list to change on them, they
expect it to remain completely fixed while they hold the
lock in that way.
So, simply stated, it is impossible to implement this correctly using
a manipulation of the list without violating the rwlock locking
semantics.
Reimplement using a per-fib6_node round-robin pointer. This way we
don't need to manipulate the list at all, and since the round-robin
pointer can only ever point to real existing entries we don't need
to perform any locking on the changing of the round-robin pointer
itself. We only need to reset the round-robin pointer to NULL when
the entry it is pointing to is removed.
The idea is from Thomas Graf and it is very similar to how this
was implemented before the advanced router selection code when in.
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-03-25 07:36:25 +04:00
struct rt6_info * rr_ptr ;
2005-04-17 02:20:36 +04:00
} ;
2006-08-24 04:22:24 +04:00
# ifndef CONFIG_IPV6_SUBTREES
# define FIB6_SUBTREE(fn) NULL
# else
# define FIB6_SUBTREE(fn) ((fn)->subtree)
# endif
2005-04-17 02:20:36 +04:00
/*
* routing information
*
*/
2009-11-03 06:26:03 +03:00
struct rt6key {
2005-04-17 02:20:36 +04:00
struct in6_addr addr ;
int plen ;
} ;
2006-08-05 10:20:06 +04:00
struct fib6_table ;
2009-11-03 06:26:03 +03:00
struct rt6_info {
2010-06-11 10:31:35 +04:00
struct dst_entry dst ;
2005-04-17 02:20:36 +04:00
2012-07-03 09:43:47 +04:00
struct neighbour * n ;
2010-04-01 02:24:22 +04:00
/*
* Tail elements of dst_entry ( __refcnt etc . )
* and these elements ( rarely used in hot path ) are in
* the same cache line .
*/
struct fib6_table * rt6i_table ;
2005-04-17 02:20:36 +04:00
struct fib6_node * rt6i_node ;
struct in6_addr rt6i_gateway ;
2010-04-01 02:24:22 +04:00
atomic_t rt6i_ref ;
2007-09-06 14:31:25 +04:00
2010-04-01 02:24:22 +04:00
/* These are in a separate cache line. */
struct rt6key rt6i_dst ____cacheline_aligned_in_smp ;
u32 rt6i_flags ;
struct rt6key rt6i_src ;
2011-04-14 01:10:57 +04:00
struct rt6key rt6i_prefsrc ;
2010-04-01 02:24:22 +04:00
u32 rt6i_metric ;
inet: Create a mechanism for upward inetpeer propagation into routes.
If we didn't have a routing cache, we would not be able to properly
propagate certain kinds of dynamic path attributes, for example
PMTU information and redirects.
The reason is that if we didn't have a routing cache, then there would
be no way to lookup all of the active cached routes hanging off of
sockets, tunnels, IPSEC bundles, etc.
Consider the case where we created a cached route, but no inetpeer
entry existed and also we were not asked to pre-COW the route metrics
and therefore did not force the creation a new inetpeer entry.
If we later get a PMTU message, or a redirect, and store this
information in a new inetpeer entry, there is no way to teach that
cached route about the newly existing inetpeer entry.
The facilities implemented here handle this problem.
First we create a generation ID. When we create a cached route of any
kind, we remember the generation ID at the time of attachment. Any
time we force-create an inetpeer entry in response to new path
information, we bump that generation ID.
The dst_ops->check() callback is where the knowledge of this event
is propagated. If the global generation ID does not equal the one
stored in the cached route, and the cached route has not attached
to an inetpeer yet, we look it up and attach if one is found. Now
that we've updated the cached route's information, we update the
route's generation ID too.
This clears the way for implementing PMTU and redirects directly in
the inetpeer cache. There is absolutely no need to consult cached
route information in order to maintain this information.
At this point nothing bumps the inetpeer genids, that comes in the
later changes which handle PMTUs and redirects using inetpeers.
Signed-off-by: David S. Miller <davem@davemloft.net>
2011-02-08 07:38:06 +03:00
u32 rt6i_peer_genid ;
2007-11-14 08:33:32 +03:00
2010-04-01 02:24:22 +04:00
struct inet6_dev * rt6i_idev ;
2012-06-10 09:36:36 +04:00
unsigned long _rt6i_peer ;
2007-11-14 08:33:32 +03:00
2012-09-11 02:09:46 +04:00
u32 rt6i_genid ;
2010-04-01 02:24:22 +04:00
/* more non-fragment space at head required */
unsigned short rt6i_nfheader_len ;
2007-11-14 08:33:32 +03:00
2010-04-01 02:24:22 +04:00
u8 rt6i_protocol ;
2005-04-17 02:20:36 +04:00
} ;
2012-06-10 09:36:36 +04:00
static inline struct inet_peer * rt6_peer_ptr ( struct rt6_info * rt )
{
return inetpeer_ptr ( rt - > _rt6i_peer ) ;
}
static inline bool rt6_has_peer ( struct rt6_info * rt )
{
return inetpeer_ptr_is_peer ( rt - > _rt6i_peer ) ;
}
static inline void __rt6_set_peer ( struct rt6_info * rt , struct inet_peer * peer )
{
__inetpeer_ptr_set_peer ( & rt - > _rt6i_peer , peer ) ;
}
static inline bool rt6_set_peer ( struct rt6_info * rt , struct inet_peer * peer )
{
return inetpeer_ptr_set_peer ( & rt - > _rt6i_peer , peer ) ;
}
static inline void rt6_init_peer ( struct rt6_info * rt , struct inet_peer_base * base )
{
inetpeer_init_ptr ( & rt - > _rt6i_peer , base ) ;
}
static inline void rt6_transfer_peer ( struct rt6_info * rt , struct rt6_info * ort )
{
inetpeer_transfer_peer ( & rt - > _rt6i_peer , & ort - > _rt6i_peer ) ;
}
2006-10-13 11:17:25 +04:00
static inline struct inet6_dev * ip6_dst_idev ( struct dst_entry * dst )
{
return ( ( struct rt6_info * ) dst ) - > rt6i_idev ;
}
2012-04-06 04:13:10 +04:00
static inline void rt6_clean_expires ( struct rt6_info * rt )
{
if ( ! ( rt - > rt6i_flags & RTF_EXPIRES ) & & rt - > dst . from )
dst_release ( rt - > dst . from ) ;
rt - > rt6i_flags & = ~ RTF_EXPIRES ;
2012-04-16 07:35:41 +04:00
rt - > dst . from = NULL ;
2012-04-06 04:13:10 +04:00
}
static inline void rt6_set_expires ( struct rt6_info * rt , unsigned long expires )
{
if ( ! ( rt - > rt6i_flags & RTF_EXPIRES ) & & rt - > dst . from )
dst_release ( rt - > dst . from ) ;
rt - > rt6i_flags | = RTF_EXPIRES ;
rt - > dst . expires = expires ;
}
static inline void rt6_update_expires ( struct rt6_info * rt , int timeout )
{
2012-04-16 07:34:39 +04:00
if ( ! ( rt - > rt6i_flags & RTF_EXPIRES ) ) {
if ( rt - > dst . from )
dst_release ( rt - > dst . from ) ;
/* dst_set_expires relies on expires == 0
* if it has not been set previously .
*/
rt - > dst . expires = 0 ;
}
2012-04-06 04:13:10 +04:00
dst_set_expires ( & rt - > dst , timeout ) ;
rt - > rt6i_flags | = RTF_EXPIRES ;
}
static inline void rt6_set_from ( struct rt6_info * rt , struct rt6_info * from )
{
struct dst_entry * new = ( struct dst_entry * ) from ;
if ( ! ( rt - > rt6i_flags & RTF_EXPIRES ) & & rt - > dst . from ) {
if ( new = = rt - > dst . from )
return ;
dst_release ( rt - > dst . from ) ;
}
rt - > rt6i_flags & = ~ RTF_EXPIRES ;
rt - > dst . from = new ;
dst_hold ( new ) ;
}
2009-11-03 06:26:03 +03:00
struct fib6_walker_t {
2010-02-18 11:13:30 +03:00
struct list_head lh ;
2005-04-17 02:20:36 +04:00
struct fib6_node * root , * node ;
struct rt6_info * leaf ;
unsigned char state ;
unsigned char prune ;
2010-02-08 08:19:03 +03:00
unsigned int skip ;
unsigned int count ;
2005-04-17 02:20:36 +04:00
int ( * func ) ( struct fib6_walker_t * ) ;
void * args ;
} ;
struct rt6_statistics {
__u32 fib_nodes ;
__u32 fib_route_nodes ;
__u32 fib_rt_alloc ; /* permanent routes */
__u32 fib_rt_entries ; /* rt entries in table */
__u32 fib_rt_cache ; /* cache routes */
__u32 fib_discarded_routes ;
} ;
# define RTN_TL_ROOT 0x0001
# define RTN_ROOT 0x0002 /* tree root node */
# define RTN_RTINFO 0x0004 /* node with valid routing info */
/*
* priority levels ( or metrics )
*
*/
2006-08-05 10:20:06 +04:00
struct fib6_table {
struct hlist_node tb6_hlist ;
u32 tb6_id ;
rwlock_t tb6_lock ;
struct fib6_node tb6_root ;
2012-06-11 11:01:52 +04:00
struct inet_peer_base tb6_peers ;
2006-08-05 10:20:06 +04:00
} ;
# define RT6_TABLE_UNSPEC RT_TABLE_UNSPEC
# define RT6_TABLE_MAIN RT_TABLE_MAIN
# define RT6_TABLE_DFLT RT6_TABLE_MAIN
# define RT6_TABLE_INFO RT6_TABLE_MAIN
# define RT6_TABLE_PREFIX RT6_TABLE_MAIN
# ifdef CONFIG_IPV6_MULTIPLE_TABLES
# define FIB6_TABLE_MIN 1
# define FIB6_TABLE_MAX RT_TABLE_MAX
2006-08-04 14:39:02 +04:00
# define RT6_TABLE_LOCAL RT_TABLE_LOCAL
2006-08-05 10:20:06 +04:00
# else
# define FIB6_TABLE_MIN RT_TABLE_MAIN
# define FIB6_TABLE_MAX FIB6_TABLE_MIN
2006-08-04 14:39:02 +04:00
# define RT6_TABLE_LOCAL RT6_TABLE_MAIN
2006-08-05 10:20:06 +04:00
# endif
2008-03-05 00:48:30 +03:00
typedef struct rt6_info * ( * pol_lookup_t ) ( struct net * ,
struct fib6_table * ,
2011-03-13 00:22:43 +03:00
struct flowi6 * , int ) ;
2005-04-17 02:20:36 +04:00
/*
* exported functions
*/
2008-03-04 10:25:27 +03:00
extern struct fib6_table * fib6_get_table ( struct net * net , u32 id ) ;
extern struct fib6_table * fib6_new_table ( struct net * net , u32 id ) ;
extern struct dst_entry * fib6_rule_lookup ( struct net * net ,
2011-03-13 00:22:43 +03:00
struct flowi6 * fl6 , int flags ,
2008-03-04 10:25:27 +03:00
pol_lookup_t lookup ) ;
2006-08-05 10:20:06 +04:00
2005-04-17 02:20:36 +04:00
extern struct fib6_node * fib6_lookup ( struct fib6_node * root ,
2011-04-22 08:53:02 +04:00
const struct in6_addr * daddr ,
const struct in6_addr * saddr ) ;
2005-04-17 02:20:36 +04:00
struct fib6_node * fib6_locate ( struct fib6_node * root ,
2011-04-22 08:53:02 +04:00
const struct in6_addr * daddr , int dst_len ,
const struct in6_addr * saddr , int src_len ) ;
2005-04-17 02:20:36 +04:00
IPv6: Avoid taking write lock for /proc/net/ipv6_route
During some debugging I needed to look into how /proc/net/ipv6_route
operated and in my digging I found its calling fib6_clean_all() which uses
"write_lock_bh(&table->tb6_lock)" before doing the walk of the table. I
found this on 2.6.32, but reading the code I believe the same basic idea
exists currently. Looking at the rtnetlink code they are only calling
"read_lock_bh(&table->tb6_lock);" via fib6_dump_table(). While I realize
reading from proc isn't the recommended way of fetching the ipv6 route
table; taking a write lock seems unnecessary and would probably cause
network performance issues.
To verify this I loaded up the ipv6 route table and then ran iperf in 3
cases:
* doing nothing
* reading ipv6 route table via proc
(while :; do cat /proc/net/ipv6_route > /dev/null; done)
* reading ipv6 route table via rtnetlink
(while :; do ip -6 route show table all > /dev/null; done)
* Load the ipv6 route table up with:
* for ((i = 0;i < 4000;i++)); do ip route add unreachable 2000::$i; done
* iperf commands:
* client: iperf -i 1 -V -c <ipv6 addr>
* server: iperf -V -s
* iperf results - 3 runs each (in Mbits/sec)
* nothing: client: 927,927,927 server: 927,927,927
* proc: client: 179,97,96,113 server: 142,112,133
* iproute: client: 928,927,928 server: 927,927,927
lock_stat shows taking the write lock is causing the slowdown. Using this
info I decided to write a version of fib6_clean_all() which replaces
write_lock_bh(&table->tb6_lock) with read_lock_bh(&table->tb6_lock). With
this new function I see the same results as with my rtnetlink iperf test.
Signed-off-by: Josh Hunt <joshhunt00@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2011-12-28 17:23:07 +04:00
extern void fib6_clean_all_ro ( struct net * net ,
int ( * func ) ( struct rt6_info * , void * arg ) ,
int prune , void * arg ) ;
2008-03-04 10:27:06 +03:00
extern void fib6_clean_all ( struct net * net ,
int ( * func ) ( struct rt6_info * , void * arg ) ,
2006-08-05 10:20:06 +04:00
int prune , void * arg ) ;
2005-04-17 02:20:36 +04:00
extern int fib6_add ( struct fib6_node * root ,
struct rt6_info * rt ,
2006-08-22 11:01:08 +04:00
struct nl_info * info ) ;
2005-04-17 02:20:36 +04:00
extern int fib6_del ( struct rt6_info * rt ,
2006-08-22 11:01:08 +04:00
struct nl_info * info ) ;
2005-04-17 02:20:36 +04:00
extern void inet6_rt_notify ( int event , struct rt6_info * rt ,
2006-08-22 11:01:08 +04:00
struct nl_info * info ) ;
2005-04-17 02:20:36 +04:00
2008-03-04 10:28:58 +03:00
extern void fib6_run_gc ( unsigned long expires ,
struct net * net ) ;
2005-04-17 02:20:36 +04:00
extern void fib6_gc_cleanup ( void ) ;
2007-12-07 11:40:34 +03:00
extern int fib6_init ( void ) ;
2006-08-04 14:39:02 +04:00
2007-12-08 11:14:54 +03:00
# ifdef CONFIG_IPV6_MULTIPLE_TABLES
2007-12-07 11:42:52 +03:00
extern int fib6_rules_init ( void ) ;
2006-08-04 14:39:02 +04:00
extern void fib6_rules_cleanup ( void ) ;
2007-12-08 11:14:54 +03:00
# else
static inline int fib6_rules_init ( void )
{
return 0 ;
}
static inline void fib6_rules_cleanup ( void )
{
return ;
}
# endif
2005-04-17 02:20:36 +04:00
# endif