Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "Just a couple more stragglers, I really hope this is it. 1) Don't let frags slip down into the GRO segmentation handlers, from Steffen Klassert. 2) Truesize under-estimation triggers warnings in TCP over loopback with socket filters, 2 part fix from Eric Dumazet. 3) Fix undesirable reset of bonding MTU to ETH_HLEN on slave removal, from Paolo Abeni. 4) If we flush the XFRM policy after garbage collection, it doesn't work because stray entries can be created afterwards. Fix from Xin Long. 5) Hung socket connection fixes in TIPC from Parthasarathy Bhuvaragan. 6) Fix GRO regression with IPSEC when netfilter is disabled, from Sabrina Dubroca. 7) Fix cpsw driver Kconfig dependency regression, from Arnd Bergmann" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: net: hso: register netdev later to avoid a race condition net: adjust skb->truesize in ___pskb_trim() tcp: do not underestimate skb->truesize in tcp_trim_head() bonding: avoid defaulting hard_header_len to ETH_HLEN on slave removal ipv4: Don't pass IP fragments to upper layer GRO handlers. cpsw/netcp: refine cpts dependency tipc: close the connection if protocol messages contain errors tipc: improve error validations for sockets in CONNECTING state tipc: Fix missing connection request handling xfrm: fix GRO for !CONFIG_NETFILTER xfrm: do the garbage collection after flushing policy
This commit is contained in:
commit
0e9117882d
@ -1104,11 +1104,11 @@ static void bond_compute_features(struct bonding *bond)
|
||||
gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
|
||||
gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
|
||||
}
|
||||
bond_dev->hard_header_len = max_hard_header_len;
|
||||
|
||||
done:
|
||||
bond_dev->vlan_features = vlan_features;
|
||||
bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL;
|
||||
bond_dev->hard_header_len = max_hard_header_len;
|
||||
bond_dev->gso_max_segs = gso_max_segs;
|
||||
netif_set_gso_max_size(bond_dev, gso_max_size);
|
||||
|
||||
|
@ -76,7 +76,7 @@ config TI_CPSW
|
||||
config TI_CPTS
|
||||
bool "TI Common Platform Time Sync (CPTS) Support"
|
||||
depends on TI_CPSW || TI_KEYSTONE_NETCP
|
||||
depends on PTP_1588_CLOCK
|
||||
depends on POSIX_TIMERS
|
||||
---help---
|
||||
This driver supports the Common Platform Time Sync unit of
|
||||
the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem.
|
||||
@ -87,6 +87,8 @@ config TI_CPTS_MOD
|
||||
tristate
|
||||
depends on TI_CPTS
|
||||
default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y
|
||||
select NET_PTP_CLASSIFY
|
||||
imply PTP_1588_CLOCK
|
||||
default m
|
||||
|
||||
config TI_KEYSTONE_NETCP
|
||||
|
@ -2534,13 +2534,6 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
|
||||
SET_NETDEV_DEV(net, &interface->dev);
|
||||
SET_NETDEV_DEVTYPE(net, &hso_type);
|
||||
|
||||
/* registering our net device */
|
||||
result = register_netdev(net);
|
||||
if (result) {
|
||||
dev_err(&interface->dev, "Failed to register device\n");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* start allocating */
|
||||
for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
|
||||
hso_net->mux_bulk_rx_urb_pool[i] = usb_alloc_urb(0, GFP_KERNEL);
|
||||
@ -2560,6 +2553,13 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
|
||||
|
||||
add_net_device(hso_dev);
|
||||
|
||||
/* registering our net device */
|
||||
result = register_netdev(net);
|
||||
if (result) {
|
||||
dev_err(&interface->dev, "Failed to register device\n");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
hso_log_port(hso_dev);
|
||||
|
||||
hso_create_rfkill(hso_dev, interface);
|
||||
|
@ -1576,6 +1576,8 @@ done:
|
||||
skb_set_tail_pointer(skb, len);
|
||||
}
|
||||
|
||||
if (!skb->sk || skb->destructor == sock_edemux)
|
||||
skb_condense(skb);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(___pskb_trim);
|
||||
|
@ -1343,6 +1343,9 @@ struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb)
|
||||
if (*(u8 *)iph != 0x45)
|
||||
goto out_unlock;
|
||||
|
||||
if (ip_is_fragment(iph))
|
||||
goto out_unlock;
|
||||
|
||||
if (unlikely(ip_fast_csum((u8 *)iph, 5)))
|
||||
goto out_unlock;
|
||||
|
||||
|
@ -1267,7 +1267,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
|
||||
* eventually). The difference is that pulled data not copied, but
|
||||
* immediately discarded.
|
||||
*/
|
||||
static void __pskb_trim_head(struct sk_buff *skb, int len)
|
||||
static int __pskb_trim_head(struct sk_buff *skb, int len)
|
||||
{
|
||||
struct skb_shared_info *shinfo;
|
||||
int i, k, eat;
|
||||
@ -1277,7 +1277,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
|
||||
__skb_pull(skb, eat);
|
||||
len -= eat;
|
||||
if (!len)
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
eat = len;
|
||||
k = 0;
|
||||
@ -1303,23 +1303,28 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
|
||||
skb_reset_tail_pointer(skb);
|
||||
skb->data_len -= len;
|
||||
skb->len = skb->data_len;
|
||||
return len;
|
||||
}
|
||||
|
||||
/* Remove acked data from a packet in the transmit queue. */
|
||||
int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
|
||||
{
|
||||
u32 delta_truesize;
|
||||
|
||||
if (skb_unclone(skb, GFP_ATOMIC))
|
||||
return -ENOMEM;
|
||||
|
||||
__pskb_trim_head(skb, len);
|
||||
delta_truesize = __pskb_trim_head(skb, len);
|
||||
|
||||
TCP_SKB_CB(skb)->seq += len;
|
||||
skb->ip_summed = CHECKSUM_PARTIAL;
|
||||
|
||||
skb->truesize -= len;
|
||||
sk->sk_wmem_queued -= len;
|
||||
sk_mem_uncharge(sk, len);
|
||||
sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
|
||||
if (delta_truesize) {
|
||||
skb->truesize -= delta_truesize;
|
||||
sk->sk_wmem_queued -= delta_truesize;
|
||||
sk_mem_uncharge(sk, delta_truesize);
|
||||
sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
|
||||
}
|
||||
|
||||
/* Any change of skb->len requires recalculation of tso factor. */
|
||||
if (tcp_skb_pcount(skb) > 1)
|
||||
|
@ -866,6 +866,14 @@ static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
|
||||
if (!tsk_peer_msg(tsk, hdr))
|
||||
goto exit;
|
||||
|
||||
if (unlikely(msg_errcode(hdr))) {
|
||||
tipc_set_sk_state(sk, TIPC_DISCONNECTING);
|
||||
tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
|
||||
tsk_peer_port(tsk));
|
||||
sk->sk_state_change(sk);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
tsk->probe_unacked = false;
|
||||
|
||||
if (mtyp == CONN_PROBE) {
|
||||
@ -1259,7 +1267,10 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
|
||||
struct sock *sk = sock->sk;
|
||||
DEFINE_WAIT(wait);
|
||||
long timeo = *timeop;
|
||||
int err;
|
||||
int err = sock_error(sk);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
for (;;) {
|
||||
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
|
||||
@ -1281,6 +1292,10 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
|
||||
err = sock_intr_errno(timeo);
|
||||
if (signal_pending(current))
|
||||
break;
|
||||
|
||||
err = sock_error(sk);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
*timeop = timeo;
|
||||
@ -1551,6 +1566,8 @@ static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
|
||||
struct sock *sk = &tsk->sk;
|
||||
struct net *net = sock_net(sk);
|
||||
struct tipc_msg *hdr = buf_msg(skb);
|
||||
u32 pport = msg_origport(hdr);
|
||||
u32 pnode = msg_orignode(hdr);
|
||||
|
||||
if (unlikely(msg_mcast(hdr)))
|
||||
return false;
|
||||
@ -1558,18 +1575,28 @@ static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
|
||||
switch (sk->sk_state) {
|
||||
case TIPC_CONNECTING:
|
||||
/* Accept only ACK or NACK message */
|
||||
if (unlikely(!msg_connected(hdr)))
|
||||
return false;
|
||||
if (unlikely(!msg_connected(hdr))) {
|
||||
if (pport != tsk_peer_port(tsk) ||
|
||||
pnode != tsk_peer_node(tsk))
|
||||
return false;
|
||||
|
||||
tipc_set_sk_state(sk, TIPC_DISCONNECTING);
|
||||
sk->sk_err = ECONNREFUSED;
|
||||
sk->sk_state_change(sk);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (unlikely(msg_errcode(hdr))) {
|
||||
tipc_set_sk_state(sk, TIPC_DISCONNECTING);
|
||||
sk->sk_err = ECONNREFUSED;
|
||||
sk->sk_state_change(sk);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (unlikely(!msg_isdata(hdr))) {
|
||||
tipc_set_sk_state(sk, TIPC_DISCONNECTING);
|
||||
sk->sk_err = EINVAL;
|
||||
sk->sk_state_change(sk);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1581,8 +1608,7 @@ static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
|
||||
return true;
|
||||
|
||||
/* If empty 'ACK-' message, wake up sleeping connect() */
|
||||
if (waitqueue_active(sk_sleep(sk)))
|
||||
wake_up_interruptible(sk_sleep(sk));
|
||||
sk->sk_data_ready(sk);
|
||||
|
||||
/* 'ACK-' message is neither accepted nor rejected: */
|
||||
msg_set_dest_droppable(hdr, 1);
|
||||
|
@ -395,7 +395,7 @@ resume:
|
||||
if (xo)
|
||||
xfrm_gro = xo->flags & XFRM_GRO;
|
||||
|
||||
err = x->inner_mode->afinfo->transport_finish(skb, async);
|
||||
err = x->inner_mode->afinfo->transport_finish(skb, xfrm_gro || async);
|
||||
if (xfrm_gro) {
|
||||
skb_dst_drop(skb);
|
||||
gro_cells_receive(&gro_cells, skb);
|
||||
|
@ -1006,6 +1006,10 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
|
||||
err = -ESRCH;
|
||||
out:
|
||||
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
|
||||
|
||||
if (cnt)
|
||||
xfrm_garbage_collect(net);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(xfrm_policy_flush);
|
||||
|
Loading…
Reference in New Issue
Block a user