[SCTP]: Allow spillover of receive buffer to avoid deadlock.
This patch fixes a deadlock situation in the receive path by allowing temporary spillover of the receive buffer. - If the chunk we receive has a tsn that immediately follows the ctsn, accept it even if we run out of receive buffer space and renege data with higher TSNs. - Once we accept one chunk in a packet, accept all the remaining chunks even if we run out of receive buffer space. Signed-off-by: Neil Horman <nhorman@tuxdriver.com> Acked-by: Mark Butler <butlerm@middle.net> Acked-by: Vlad Yasevich <vladislav.yasevich@hp.com> Signed-off-by: Sridhar Samudrala <sri@us.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
91ef5d2d6e
commit
7c3ceb4fb9
@ -712,6 +712,7 @@ struct sctp_chunk {
|
|||||||
__u8 tsn_gap_acked; /* Is this chunk acked by a GAP ACK? */
|
__u8 tsn_gap_acked; /* Is this chunk acked by a GAP ACK? */
|
||||||
__s8 fast_retransmit; /* Is this chunk fast retransmitted? */
|
__s8 fast_retransmit; /* Is this chunk fast retransmitted? */
|
||||||
__u8 tsn_missing_report; /* Data chunk missing counter. */
|
__u8 tsn_missing_report; /* Data chunk missing counter. */
|
||||||
|
__u8 data_accepted; /* At least 1 chunk in this packet accepted */
|
||||||
};
|
};
|
||||||
|
|
||||||
void sctp_chunk_hold(struct sctp_chunk *);
|
void sctp_chunk_hold(struct sctp_chunk *);
|
||||||
|
@ -149,6 +149,7 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
|
|||||||
/* This is the first chunk in the packet. */
|
/* This is the first chunk in the packet. */
|
||||||
chunk->singleton = 1;
|
chunk->singleton = 1;
|
||||||
ch = (sctp_chunkhdr_t *) chunk->skb->data;
|
ch = (sctp_chunkhdr_t *) chunk->skb->data;
|
||||||
|
chunk->data_accepted = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
chunk->chunk_hdr = ch;
|
chunk->chunk_hdr = ch;
|
||||||
|
@ -5151,7 +5151,9 @@ static int sctp_eat_data(const struct sctp_association *asoc,
|
|||||||
int tmp;
|
int tmp;
|
||||||
__u32 tsn;
|
__u32 tsn;
|
||||||
int account_value;
|
int account_value;
|
||||||
|
struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
|
||||||
struct sock *sk = asoc->base.sk;
|
struct sock *sk = asoc->base.sk;
|
||||||
|
int rcvbuf_over = 0;
|
||||||
|
|
||||||
data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data;
|
data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data;
|
||||||
skb_pull(chunk->skb, sizeof(sctp_datahdr_t));
|
skb_pull(chunk->skb, sizeof(sctp_datahdr_t));
|
||||||
@ -5162,10 +5164,16 @@ static int sctp_eat_data(const struct sctp_association *asoc,
|
|||||||
/* ASSERT: Now skb->data is really the user data. */
|
/* ASSERT: Now skb->data is really the user data. */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if we are established, and we have used up our receive
|
* If we are established, and we have used up our receive buffer
|
||||||
* buffer memory, drop the frame
|
* memory, think about droping the frame.
|
||||||
|
* Note that we have an opportunity to improve performance here.
|
||||||
|
* If we accept one chunk from an skbuff, we have to keep all the
|
||||||
|
* memory of that skbuff around until the chunk is read into user
|
||||||
|
* space. Therefore, once we accept 1 chunk we may as well accept all
|
||||||
|
* remaining chunks in the skbuff. The data_accepted flag helps us do
|
||||||
|
* that.
|
||||||
*/
|
*/
|
||||||
if (asoc->state == SCTP_STATE_ESTABLISHED) {
|
if ((asoc->state == SCTP_STATE_ESTABLISHED) && (!chunk->data_accepted)) {
|
||||||
/*
|
/*
|
||||||
* If the receive buffer policy is 1, then each
|
* If the receive buffer policy is 1, then each
|
||||||
* association can allocate up to sk_rcvbuf bytes
|
* association can allocate up to sk_rcvbuf bytes
|
||||||
@ -5176,9 +5184,25 @@ static int sctp_eat_data(const struct sctp_association *asoc,
|
|||||||
account_value = atomic_read(&asoc->rmem_alloc);
|
account_value = atomic_read(&asoc->rmem_alloc);
|
||||||
else
|
else
|
||||||
account_value = atomic_read(&sk->sk_rmem_alloc);
|
account_value = atomic_read(&sk->sk_rmem_alloc);
|
||||||
|
if (account_value > sk->sk_rcvbuf) {
|
||||||
if (account_value > sk->sk_rcvbuf)
|
/*
|
||||||
|
* We need to make forward progress, even when we are
|
||||||
|
* under memory pressure, so we always allow the
|
||||||
|
* next tsn after the ctsn ack point to be accepted.
|
||||||
|
* This lets us avoid deadlocks in which we have to
|
||||||
|
* drop frames that would otherwise let us drain the
|
||||||
|
* receive queue.
|
||||||
|
*/
|
||||||
|
if ((sctp_tsnmap_get_ctsn(map) + 1) != tsn)
|
||||||
return SCTP_IERROR_IGNORE_TSN;
|
return SCTP_IERROR_IGNORE_TSN;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We're going to accept the frame but we should renege
|
||||||
|
* to make space for it. This will send us down that
|
||||||
|
* path later in this function.
|
||||||
|
*/
|
||||||
|
rcvbuf_over = 1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Process ECN based congestion.
|
/* Process ECN based congestion.
|
||||||
@ -5226,6 +5250,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
|
|||||||
datalen -= sizeof(sctp_data_chunk_t);
|
datalen -= sizeof(sctp_data_chunk_t);
|
||||||
|
|
||||||
deliver = SCTP_CMD_CHUNK_ULP;
|
deliver = SCTP_CMD_CHUNK_ULP;
|
||||||
|
chunk->data_accepted = 1;
|
||||||
|
|
||||||
/* Think about partial delivery. */
|
/* Think about partial delivery. */
|
||||||
if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {
|
if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {
|
||||||
@ -5242,7 +5267,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
|
|||||||
* large spill over.
|
* large spill over.
|
||||||
*/
|
*/
|
||||||
if (!asoc->rwnd || asoc->rwnd_over ||
|
if (!asoc->rwnd || asoc->rwnd_over ||
|
||||||
(datalen > asoc->rwnd + asoc->frag_point)) {
|
(datalen > asoc->rwnd + asoc->frag_point) ||
|
||||||
|
rcvbuf_over) {
|
||||||
|
|
||||||
/* If this is the next TSN, consider reneging to make
|
/* If this is the next TSN, consider reneging to make
|
||||||
* room. Note: Playing nice with a confused sender. A
|
* room. Note: Playing nice with a confused sender. A
|
||||||
@ -5250,8 +5276,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
|
|||||||
* space and in the future we may want to detect and
|
* space and in the future we may want to detect and
|
||||||
* do more drastic reneging.
|
* do more drastic reneging.
|
||||||
*/
|
*/
|
||||||
if (sctp_tsnmap_has_gap(&asoc->peer.tsn_map) &&
|
if (sctp_tsnmap_has_gap(map) &&
|
||||||
(sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) {
|
(sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
|
||||||
SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn);
|
SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn);
|
||||||
deliver = SCTP_CMD_RENEGE;
|
deliver = SCTP_CMD_RENEGE;
|
||||||
} else {
|
} else {
|
||||||
|
Loading…
Reference in New Issue
Block a user