[DECnet]: Use RCU locking in dn_rules.c
As per Robert Olsson's patch for ipv4, this is the DECnet version to keep the code "in step". It changes the list of rules to use RCU rather than an rwlock. Inspired-by: Robert Olsson <robert.olsson@its.uu.se> Signed-off-by: Steven Whitehouse <steve@chygwyn.com> Signed-off-by: Patrick Caulfield <patrick@tykepenguin.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
c60992db46
commit
ecba320f2e
@ -27,6 +27,8 @@
|
|||||||
#include <linux/timer.h>
|
#include <linux/timer.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/in_route.h>
|
#include <linux/in_route.h>
|
||||||
|
#include <linux/list.h>
|
||||||
|
#include <linux/rcupdate.h>
|
||||||
#include <asm/atomic.h>
|
#include <asm/atomic.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
#include <net/neighbour.h>
|
#include <net/neighbour.h>
|
||||||
@ -39,7 +41,7 @@
|
|||||||
|
|
||||||
struct dn_fib_rule
|
struct dn_fib_rule
|
||||||
{
|
{
|
||||||
struct dn_fib_rule *r_next;
|
struct hlist_node r_hlist;
|
||||||
atomic_t r_clntref;
|
atomic_t r_clntref;
|
||||||
u32 r_preference;
|
u32 r_preference;
|
||||||
unsigned char r_table;
|
unsigned char r_table;
|
||||||
@ -58,6 +60,7 @@ struct dn_fib_rule
|
|||||||
int r_ifindex;
|
int r_ifindex;
|
||||||
char r_ifname[IFNAMSIZ];
|
char r_ifname[IFNAMSIZ];
|
||||||
int r_dead;
|
int r_dead;
|
||||||
|
struct rcu_head rcu;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct dn_fib_rule default_rule = {
|
static struct dn_fib_rule default_rule = {
|
||||||
@ -67,18 +70,17 @@ static struct dn_fib_rule default_rule = {
|
|||||||
.r_action = RTN_UNICAST
|
.r_action = RTN_UNICAST
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct dn_fib_rule *dn_fib_rules = &default_rule;
|
static struct hlist_head dn_fib_rules;
|
||||||
static DEFINE_RWLOCK(dn_fib_rules_lock);
|
|
||||||
|
|
||||||
|
|
||||||
int dn_fib_rtm_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
|
int dn_fib_rtm_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
|
||||||
{
|
{
|
||||||
struct rtattr **rta = arg;
|
struct rtattr **rta = arg;
|
||||||
struct rtmsg *rtm = NLMSG_DATA(nlh);
|
struct rtmsg *rtm = NLMSG_DATA(nlh);
|
||||||
struct dn_fib_rule *r, **rp;
|
struct dn_fib_rule *r;
|
||||||
|
struct hlist_node *node;
|
||||||
int err = -ESRCH;
|
int err = -ESRCH;
|
||||||
|
|
||||||
for(rp=&dn_fib_rules; (r=*rp) != NULL; rp = &r->r_next) {
|
hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) {
|
||||||
if ((!rta[RTA_SRC-1] || memcmp(RTA_DATA(rta[RTA_SRC-1]), &r->r_src, 2) == 0) &&
|
if ((!rta[RTA_SRC-1] || memcmp(RTA_DATA(rta[RTA_SRC-1]), &r->r_src, 2) == 0) &&
|
||||||
rtm->rtm_src_len == r->r_src_len &&
|
rtm->rtm_src_len == r->r_src_len &&
|
||||||
rtm->rtm_dst_len == r->r_dst_len &&
|
rtm->rtm_dst_len == r->r_dst_len &&
|
||||||
@ -95,10 +97,8 @@ int dn_fib_rtm_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
|
|||||||
if (r == &default_rule)
|
if (r == &default_rule)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
write_lock_bh(&dn_fib_rules_lock);
|
hlist_del_rcu(&r->r_hlist);
|
||||||
*rp = r->r_next;
|
|
||||||
r->r_dead = 1;
|
r->r_dead = 1;
|
||||||
write_unlock_bh(&dn_fib_rules_lock);
|
|
||||||
dn_fib_rule_put(r);
|
dn_fib_rule_put(r);
|
||||||
err = 0;
|
err = 0;
|
||||||
break;
|
break;
|
||||||
@ -108,11 +108,17 @@ int dn_fib_rtm_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void dn_fib_rule_put_rcu(struct rcu_head *head)
|
||||||
|
{
|
||||||
|
struct dn_fib_rule *r = container_of(head, struct dn_fib_rule, rcu);
|
||||||
|
kfree(r);
|
||||||
|
}
|
||||||
|
|
||||||
void dn_fib_rule_put(struct dn_fib_rule *r)
|
void dn_fib_rule_put(struct dn_fib_rule *r)
|
||||||
{
|
{
|
||||||
if (atomic_dec_and_test(&r->r_clntref)) {
|
if (atomic_dec_and_test(&r->r_clntref)) {
|
||||||
if (r->r_dead)
|
if (r->r_dead)
|
||||||
kfree(r);
|
call_rcu(&r->rcu, dn_fib_rule_put_rcu);
|
||||||
else
|
else
|
||||||
printk(KERN_DEBUG "Attempt to free alive dn_fib_rule\n");
|
printk(KERN_DEBUG "Attempt to free alive dn_fib_rule\n");
|
||||||
}
|
}
|
||||||
@ -123,7 +129,8 @@ int dn_fib_rtm_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
|
|||||||
{
|
{
|
||||||
struct rtattr **rta = arg;
|
struct rtattr **rta = arg;
|
||||||
struct rtmsg *rtm = NLMSG_DATA(nlh);
|
struct rtmsg *rtm = NLMSG_DATA(nlh);
|
||||||
struct dn_fib_rule *r, *new_r, **rp;
|
struct dn_fib_rule *r, *new_r, *last = NULL;
|
||||||
|
struct hlist_node *node = NULL;
|
||||||
unsigned char table_id;
|
unsigned char table_id;
|
||||||
|
|
||||||
if (rtm->rtm_src_len > 16 || rtm->rtm_dst_len > 16)
|
if (rtm->rtm_src_len > 16 || rtm->rtm_dst_len > 16)
|
||||||
@ -149,6 +156,7 @@ int dn_fib_rtm_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
|
|||||||
if (!new_r)
|
if (!new_r)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
memset(new_r, 0, sizeof(*new_r));
|
memset(new_r, 0, sizeof(*new_r));
|
||||||
|
|
||||||
if (rta[RTA_SRC-1])
|
if (rta[RTA_SRC-1])
|
||||||
memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 2);
|
memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 2);
|
||||||
if (rta[RTA_DST-1])
|
if (rta[RTA_DST-1])
|
||||||
@ -179,27 +187,26 @@ int dn_fib_rtm_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
rp = &dn_fib_rules;
|
r = container_of(dn_fib_rules.first, struct dn_fib_rule, r_hlist);
|
||||||
if (!new_r->r_preference) {
|
if (!new_r->r_preference) {
|
||||||
r = dn_fib_rules;
|
if (r && r->r_hlist.next != NULL) {
|
||||||
if (r && (r = r->r_next) != NULL) {
|
r = container_of(r->r_hlist.next, struct dn_fib_rule, r_hlist);
|
||||||
rp = &dn_fib_rules->r_next;
|
|
||||||
if (r->r_preference)
|
if (r->r_preference)
|
||||||
new_r->r_preference = r->r_preference - 1;
|
new_r->r_preference = r->r_preference - 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
while((r=*rp) != NULL) {
|
hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) {
|
||||||
if (r->r_preference > new_r->r_preference)
|
if (r->r_preference > new_r->r_preference)
|
||||||
break;
|
break;
|
||||||
rp = &r->r_next;
|
last = r;
|
||||||
}
|
}
|
||||||
|
|
||||||
new_r->r_next = r;
|
|
||||||
atomic_inc(&new_r->r_clntref);
|
atomic_inc(&new_r->r_clntref);
|
||||||
write_lock_bh(&dn_fib_rules_lock);
|
|
||||||
*rp = new_r;
|
if (last)
|
||||||
write_unlock_bh(&dn_fib_rules_lock);
|
hlist_add_after_rcu(&last->r_hlist, &new_r->r_hlist);
|
||||||
|
else
|
||||||
|
hlist_add_before_rcu(&new_r->r_hlist, &r->r_hlist);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -210,10 +217,12 @@ int dn_fib_lookup(const struct flowi *flp, struct dn_fib_res *res)
|
|||||||
struct dn_fib_table *tb;
|
struct dn_fib_table *tb;
|
||||||
__le16 saddr = flp->fld_src;
|
__le16 saddr = flp->fld_src;
|
||||||
__le16 daddr = flp->fld_dst;
|
__le16 daddr = flp->fld_dst;
|
||||||
|
struct hlist_node *node;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
read_lock(&dn_fib_rules_lock);
|
rcu_read_lock();
|
||||||
for(r = dn_fib_rules; r; r = r->r_next) {
|
|
||||||
|
hlist_for_each_entry_rcu(r, node, &dn_fib_rules, r_hlist) {
|
||||||
if (((saddr^r->r_src) & r->r_srcmask) ||
|
if (((saddr^r->r_src) & r->r_srcmask) ||
|
||||||
((daddr^r->r_dst) & r->r_dstmask) ||
|
((daddr^r->r_dst) & r->r_dstmask) ||
|
||||||
#ifdef CONFIG_DECNET_ROUTE_FWMARK
|
#ifdef CONFIG_DECNET_ROUTE_FWMARK
|
||||||
@ -228,14 +237,14 @@ int dn_fib_lookup(const struct flowi *flp, struct dn_fib_res *res)
|
|||||||
policy = r;
|
policy = r;
|
||||||
break;
|
break;
|
||||||
case RTN_UNREACHABLE:
|
case RTN_UNREACHABLE:
|
||||||
read_unlock(&dn_fib_rules_lock);
|
rcu_read_unlock();
|
||||||
return -ENETUNREACH;
|
return -ENETUNREACH;
|
||||||
default:
|
default:
|
||||||
case RTN_BLACKHOLE:
|
case RTN_BLACKHOLE:
|
||||||
read_unlock(&dn_fib_rules_lock);
|
rcu_read_unlock();
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
case RTN_PROHIBIT:
|
case RTN_PROHIBIT:
|
||||||
read_unlock(&dn_fib_rules_lock);
|
rcu_read_unlock();
|
||||||
return -EACCES;
|
return -EACCES;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -246,16 +255,16 @@ int dn_fib_lookup(const struct flowi *flp, struct dn_fib_res *res)
|
|||||||
res->r = policy;
|
res->r = policy;
|
||||||
if (policy)
|
if (policy)
|
||||||
atomic_inc(&policy->r_clntref);
|
atomic_inc(&policy->r_clntref);
|
||||||
read_unlock(&dn_fib_rules_lock);
|
rcu_read_unlock();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if (err < 0 && err != -EAGAIN) {
|
if (err < 0 && err != -EAGAIN) {
|
||||||
read_unlock(&dn_fib_rules_lock);
|
rcu_read_unlock();
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
read_unlock(&dn_fib_rules_lock);
|
rcu_read_unlock();
|
||||||
return -ESRCH;
|
return -ESRCH;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -297,27 +306,23 @@ __le16 dn_fib_rules_policy(__le16 saddr, struct dn_fib_res *res, unsigned *flags
|
|||||||
|
|
||||||
static void dn_fib_rules_detach(struct net_device *dev)
|
static void dn_fib_rules_detach(struct net_device *dev)
|
||||||
{
|
{
|
||||||
|
struct hlist_node *node;
|
||||||
struct dn_fib_rule *r;
|
struct dn_fib_rule *r;
|
||||||
|
|
||||||
for(r = dn_fib_rules; r; r = r->r_next) {
|
hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) {
|
||||||
if (r->r_ifindex == dev->ifindex) {
|
if (r->r_ifindex == dev->ifindex)
|
||||||
write_lock_bh(&dn_fib_rules_lock);
|
|
||||||
r->r_ifindex = -1;
|
r->r_ifindex = -1;
|
||||||
write_unlock_bh(&dn_fib_rules_lock);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dn_fib_rules_attach(struct net_device *dev)
|
static void dn_fib_rules_attach(struct net_device *dev)
|
||||||
{
|
{
|
||||||
|
struct hlist_node *node;
|
||||||
struct dn_fib_rule *r;
|
struct dn_fib_rule *r;
|
||||||
|
|
||||||
for(r = dn_fib_rules; r; r = r->r_next) {
|
hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) {
|
||||||
if (r->r_ifindex == -1 && strcmp(dev->name, r->r_ifname) == 0) {
|
if (r->r_ifindex == -1 && strcmp(dev->name, r->r_ifname) == 0)
|
||||||
write_lock_bh(&dn_fib_rules_lock);
|
|
||||||
r->r_ifindex = dev->ifindex;
|
r->r_ifindex = dev->ifindex;
|
||||||
write_unlock_bh(&dn_fib_rules_lock);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -387,18 +392,20 @@ rtattr_failure:
|
|||||||
|
|
||||||
int dn_fib_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
|
int dn_fib_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
|
||||||
{
|
{
|
||||||
int idx;
|
int idx = 0;
|
||||||
int s_idx = cb->args[0];
|
int s_idx = cb->args[0];
|
||||||
struct dn_fib_rule *r;
|
struct dn_fib_rule *r;
|
||||||
|
struct hlist_node *node;
|
||||||
|
|
||||||
read_lock(&dn_fib_rules_lock);
|
rcu_read_lock();
|
||||||
for(r = dn_fib_rules, idx = 0; r; r = r->r_next, idx++) {
|
hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) {
|
||||||
if (idx < s_idx)
|
if (idx < s_idx)
|
||||||
continue;
|
continue;
|
||||||
if (dn_fib_fill_rule(skb, r, cb, NLM_F_MULTI) < 0)
|
if (dn_fib_fill_rule(skb, r, cb, NLM_F_MULTI) < 0)
|
||||||
break;
|
break;
|
||||||
|
idx++;
|
||||||
}
|
}
|
||||||
read_unlock(&dn_fib_rules_lock);
|
rcu_read_unlock();
|
||||||
cb->args[0] = idx;
|
cb->args[0] = idx;
|
||||||
|
|
||||||
return skb->len;
|
return skb->len;
|
||||||
@ -406,6 +413,8 @@ int dn_fib_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
|
|||||||
|
|
||||||
void __init dn_fib_rules_init(void)
|
void __init dn_fib_rules_init(void)
|
||||||
{
|
{
|
||||||
|
INIT_HLIST_HEAD(&dn_fib_rules);
|
||||||
|
hlist_add_head(&default_rule.r_hlist, &dn_fib_rules);
|
||||||
register_netdevice_notifier(&dn_fib_rules_notifier);
|
register_netdevice_notifier(&dn_fib_rules_notifier);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user