net: Clone skb before setting peeked flag
Shared skbs must not be modified and this is crucial for broadcast and/or multicast paths where we use it as an optimisation to avoid unnecessary cloning. The function skb_recv_datagram breaks this rule by setting peeked without cloning the skb first. This causes funky races which leads to double-free. This patch fixes this by cloning the skb and replacing the skb in the list when setting skb->peeked. Fixes: a59322be07c9 ("[UDP]: Only increment counter on first peek/recv") Reported-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
035d210f92
commit
738ac1ebb9
@ -131,6 +131,35 @@ out_noerr:
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int skb_set_peeked(struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
struct sk_buff *nskb;
|
||||||
|
|
||||||
|
if (skb->peeked)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* We have to unshare an skb before modifying it. */
|
||||||
|
if (!skb_shared(skb))
|
||||||
|
goto done;
|
||||||
|
|
||||||
|
nskb = skb_clone(skb, GFP_ATOMIC);
|
||||||
|
if (!nskb)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
skb->prev->next = nskb;
|
||||||
|
skb->next->prev = nskb;
|
||||||
|
nskb->prev = skb->prev;
|
||||||
|
nskb->next = skb->next;
|
||||||
|
|
||||||
|
consume_skb(skb);
|
||||||
|
skb = nskb;
|
||||||
|
|
||||||
|
done:
|
||||||
|
skb->peeked = 1;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __skb_recv_datagram - Receive a datagram skbuff
|
* __skb_recv_datagram - Receive a datagram skbuff
|
||||||
* @sk: socket
|
* @sk: socket
|
||||||
@ -165,7 +194,9 @@ out_noerr:
|
|||||||
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
|
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
|
||||||
int *peeked, int *off, int *err)
|
int *peeked, int *off, int *err)
|
||||||
{
|
{
|
||||||
|
struct sk_buff_head *queue = &sk->sk_receive_queue;
|
||||||
struct sk_buff *skb, *last;
|
struct sk_buff *skb, *last;
|
||||||
|
unsigned long cpu_flags;
|
||||||
long timeo;
|
long timeo;
|
||||||
/*
|
/*
|
||||||
* Caller is allowed not to check sk->sk_err before skb_recv_datagram()
|
* Caller is allowed not to check sk->sk_err before skb_recv_datagram()
|
||||||
@ -184,8 +215,6 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
|
|||||||
* Look at current nfs client by the way...
|
* Look at current nfs client by the way...
|
||||||
* However, this function was correct in any case. 8)
|
* However, this function was correct in any case. 8)
|
||||||
*/
|
*/
|
||||||
unsigned long cpu_flags;
|
|
||||||
struct sk_buff_head *queue = &sk->sk_receive_queue;
|
|
||||||
int _off = *off;
|
int _off = *off;
|
||||||
|
|
||||||
last = (struct sk_buff *)queue;
|
last = (struct sk_buff *)queue;
|
||||||
@ -199,7 +228,11 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
|
|||||||
_off -= skb->len;
|
_off -= skb->len;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
skb->peeked = 1;
|
|
||||||
|
error = skb_set_peeked(skb);
|
||||||
|
if (error)
|
||||||
|
goto unlock_err;
|
||||||
|
|
||||||
atomic_inc(&skb->users);
|
atomic_inc(&skb->users);
|
||||||
} else
|
} else
|
||||||
__skb_unlink(skb, queue);
|
__skb_unlink(skb, queue);
|
||||||
@ -223,6 +256,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
|
|||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
unlock_err:
|
||||||
|
spin_unlock_irqrestore(&queue->lock, cpu_flags);
|
||||||
no_packet:
|
no_packet:
|
||||||
*err = error;
|
*err = error;
|
||||||
return NULL;
|
return NULL;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user