Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@ -148,6 +148,9 @@ struct list_head ptype_all __read_mostly; /* Taps */
|
||||
static struct list_head offload_base __read_mostly;
|
||||
|
||||
static int netif_rx_internal(struct sk_buff *skb);
|
||||
static int call_netdevice_notifiers_info(unsigned long val,
|
||||
struct net_device *dev,
|
||||
struct netdev_notifier_info *info);
|
||||
|
||||
/*
|
||||
* The @dev_base_head list is protected by @dev_base_lock and the rtnl
|
||||
@ -1214,7 +1217,11 @@ EXPORT_SYMBOL(netdev_features_change);
|
||||
void netdev_state_change(struct net_device *dev)
|
||||
{
|
||||
if (dev->flags & IFF_UP) {
|
||||
call_netdevice_notifiers(NETDEV_CHANGE, dev);
|
||||
struct netdev_notifier_change_info change_info;
|
||||
|
||||
change_info.flags_changed = 0;
|
||||
call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
|
||||
&change_info.info);
|
||||
rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
|
||||
}
|
||||
}
|
||||
@ -4234,9 +4241,8 @@ static int process_backlog(struct napi_struct *napi, int quota)
|
||||
#endif
|
||||
napi->weight = weight_p;
|
||||
local_irq_disable();
|
||||
while (work < quota) {
|
||||
while (1) {
|
||||
struct sk_buff *skb;
|
||||
unsigned int qlen;
|
||||
|
||||
while ((skb = __skb_dequeue(&sd->process_queue))) {
|
||||
local_irq_enable();
|
||||
@ -4250,24 +4256,24 @@ static int process_backlog(struct napi_struct *napi, int quota)
|
||||
}
|
||||
|
||||
rps_lock(sd);
|
||||
qlen = skb_queue_len(&sd->input_pkt_queue);
|
||||
if (qlen)
|
||||
skb_queue_splice_tail_init(&sd->input_pkt_queue,
|
||||
&sd->process_queue);
|
||||
|
||||
if (qlen < quota - work) {
|
||||
if (skb_queue_empty(&sd->input_pkt_queue)) {
|
||||
/*
|
||||
* Inline a custom version of __napi_complete().
|
||||
* only current cpu owns and manipulates this napi,
|
||||
* and NAPI_STATE_SCHED is the only possible flag set on backlog.
|
||||
* we can use a plain write instead of clear_bit(),
|
||||
* and NAPI_STATE_SCHED is the only possible flag set
|
||||
* on backlog.
|
||||
* We can use a plain write instead of clear_bit(),
|
||||
* and we dont need an smp_mb() memory barrier.
|
||||
*/
|
||||
list_del(&napi->poll_list);
|
||||
napi->state = 0;
|
||||
rps_unlock(sd);
|
||||
|
||||
quota = work + qlen;
|
||||
break;
|
||||
}
|
||||
|
||||
skb_queue_splice_tail_init(&sd->input_pkt_queue,
|
||||
&sd->process_queue);
|
||||
rps_unlock(sd);
|
||||
}
|
||||
local_irq_enable();
|
||||
|
@ -74,61 +74,6 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy kernel to iovec. Returns -EFAULT on error.
|
||||
*/
|
||||
|
||||
int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
|
||||
int offset, int len)
|
||||
{
|
||||
int copy;
|
||||
for (; len > 0; ++iov) {
|
||||
/* Skip over the finished iovecs */
|
||||
if (unlikely(offset >= iov->iov_len)) {
|
||||
offset -= iov->iov_len;
|
||||
continue;
|
||||
}
|
||||
copy = min_t(unsigned int, iov->iov_len - offset, len);
|
||||
if (copy_to_user(iov->iov_base + offset, kdata, copy))
|
||||
return -EFAULT;
|
||||
offset = 0;
|
||||
kdata += copy;
|
||||
len -= copy;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(memcpy_toiovecend);
|
||||
|
||||
/*
|
||||
* Copy iovec to kernel. Returns -EFAULT on error.
|
||||
*/
|
||||
|
||||
int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
|
||||
int offset, int len)
|
||||
{
|
||||
/* Skip over the finished iovecs */
|
||||
while (offset >= iov->iov_len) {
|
||||
offset -= iov->iov_len;
|
||||
iov++;
|
||||
}
|
||||
|
||||
while (len > 0) {
|
||||
u8 __user *base = iov->iov_base + offset;
|
||||
int copy = min_t(unsigned int, len, iov->iov_len - offset);
|
||||
|
||||
offset = 0;
|
||||
if (copy_from_user(kdata, base, copy))
|
||||
return -EFAULT;
|
||||
len -= copy;
|
||||
kdata += copy;
|
||||
iov++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(memcpy_fromiovecend);
|
||||
|
||||
/*
|
||||
* And now for the all-in-one: copy and checksum from a user iovec
|
||||
* directly to a datagram
|
||||
|
@ -3059,11 +3059,12 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
|
||||
memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
|
||||
sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
|
||||
} else {
|
||||
struct neigh_table *tbl = p->tbl;
|
||||
dev_name_source = "default";
|
||||
t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
|
||||
t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
|
||||
t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
|
||||
t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
|
||||
t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
|
||||
t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
|
||||
t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
|
||||
t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
|
||||
}
|
||||
|
||||
if (handler) {
|
||||
|
Reference in New Issue
Block a user