2005-04-17 02:20:36 +04:00
/*
* INETPEER - A storage for permanent information about peers
*
* Authors : Andrey V . Savochkin < saw @ msu . ru >
*/
# ifndef _NET_INETPEER_H
# define _NET_INETPEER_H
# include <linux/types.h>
# include <linux/init.h>
# include <linux/jiffies.h>
# include <linux/spinlock.h>
# include <asm/atomic.h>
2009-11-03 06:26:03 +03:00
struct inet_peer {
2006-10-20 11:28:35 +04:00
/* group together avl_left,avl_right,v4daddr to speedup lookups */
2005-04-17 02:20:36 +04:00
struct inet_peer * avl_left , * avl_right ;
2006-10-20 11:28:35 +04:00
__be32 v4daddr ; /* peer's address */
2009-11-12 12:33:09 +03:00
__u32 avl_height ;
2007-11-13 08:27:28 +03:00
struct list_head unused ;
2006-10-13 08:21:06 +04:00
__u32 dtime ; /* the time of last use of not
2005-04-17 02:20:36 +04:00
* referenced entries */
atomic_t refcnt ;
2005-12-14 10:14:27 +03:00
atomic_t rid ; /* Frag reception counter */
2009-11-12 12:33:09 +03:00
atomic_t ip_id_count ; /* IP ID for the next packet */
2005-04-17 02:20:36 +04:00
__u32 tcp_ts ;
2009-11-12 12:33:09 +03:00
__u32 tcp_ts_stamp ;
inetpeer: RCU conversion
inetpeer currently uses an AVL tree protected by an rwlock.
It's possible to make most lookups use RCU
1) Add a struct rcu_head to struct inet_peer
2) add a lookup_rcu_bh() helper to perform lockless and opportunistic
lookup. This is a normal function, not a macro like lookup().
3) Add a limit to number of links followed by lookup_rcu_bh(). This is
needed in case we fall in a loop.
4) add an smp_wmb() in link_to_pool() right before node insert.
5) make unlink_from_pool() use atomic_cmpxchg() to make sure it can take
last reference to an inet_peer, since lockless readers could increase
refcount, even while we hold peers.lock.
6) Delay struct inet_peer freeing after rcu grace period so that
lookup_rcu_bh() cannot crash.
7) inet_getpeer() first attempts lockless lookup.
Note this lookup can fail even if target is in AVL tree, but a
concurrent writer can let tree in a non correct form.
If this attemps fails, lock is taken a regular lookup is performed
again.
8) convert peers.lock from rwlock to a spinlock
9) Remove SLAB_HWCACHE_ALIGN when peer_cachep is created, because
rcu_head adds 16 bytes on 64bit arches, doubling effective size (64 ->
128 bytes)
In a future patch, this is probably possible to revert this part, if rcu
field is put in an union to share space with rid, ip_id_count, tcp_ts &
tcp_ts_stamp. These fields being manipulated only with refcnt > 0.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2010-06-15 12:23:14 +04:00
struct rcu_head rcu ;
2005-04-17 02:20:36 +04:00
} ;
void inet_initpeers ( void ) __init ;
/* can be called with or without local BH being disabled */
2006-09-27 09:18:43 +04:00
struct inet_peer * inet_getpeer ( __be32 daddr , int create ) ;
2005-04-17 02:20:36 +04:00
/* can be called from BH context or outside */
2006-10-13 08:21:06 +04:00
extern void inet_putpeer ( struct inet_peer * p ) ;
2005-04-17 02:20:36 +04:00
/* can be called with or without local BH being disabled */
static inline __u16 inet_getid ( struct inet_peer * p , int more )
{
2009-11-12 12:33:09 +03:00
more + + ;
return atomic_add_return ( more , & p - > ip_id_count ) - more ;
2005-04-17 02:20:36 +04:00
}
# endif /* _NET_INETPEER_H */