xen-netback: require fewer guest Rx slots when not using GSO

Commit f48da8b14d (xen-netback: fix
unlimited guest Rx internal queue and carrier flapping) introduced a
regression.

The PV frontend in IPXE only places 4 requests on the guest Rx ring.
Since netback required at least (MAX_SKB_FRAGS + 1) slots, IPXE could
not receive any packets.

a) If GSO is not enabled on the VIF, fewer guest Rx slots are required
   for the largest possible packet.  Calculate the required slots
   based on the maximum GSO size or the MTU.

   This calculation of the number of required slots relies on
   1650d5455b (xen-netback: always fully coalesce guest Rx packets)
   which present in 4.0-rc1 and later.

b) Reduce the Rx stall detection to checking for at least one
   available Rx request.  This is fine since we're predominately
   concerned with detecting interfaces which are down and thus have
   zero available Rx requests.

Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David Vrabel 2015-09-08 14:25:14 +01:00 committed by David S. Miller
parent 9b57ab8b58
commit 1d5d485239
2 changed files with 16 additions and 17 deletions

View File

@ -200,11 +200,6 @@ struct xenvif_queue { /* Per-queue data for xenvif */
struct xenvif_stats stats;
};
/* Maximum number of Rx slots a to-guest packet may use, including the
* slot needed for GSO meta-data.
*/
#define XEN_NETBK_RX_SLOTS_MAX (MAX_SKB_FRAGS + 1)
enum state_bit_shift {
/* This bit marks that the vif is connected */
VIF_STATUS_CONNECTED,
@ -317,11 +312,6 @@ int xenvif_dealloc_kthread(void *data);
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
/* Determine whether the needed number of slots (req) are available,
* and set req_event if not.
*/
bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed);
void xenvif_carrier_on(struct xenvif *vif);
/* Callback from stack when TX packet can be released */

View File

@ -149,9 +149,20 @@ static inline pending_ring_idx_t pending_index(unsigned i)
return i & (MAX_PENDING_REQS-1);
}
bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed)
static int xenvif_rx_ring_slots_needed(struct xenvif *vif)
{
if (vif->gso_mask)
return DIV_ROUND_UP(vif->dev->gso_max_size, PAGE_SIZE) + 1;
else
return DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
}
static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
{
RING_IDX prod, cons;
int needed;
needed = xenvif_rx_ring_slots_needed(queue->vif);
do {
prod = queue->rx.sring->req_prod;
@ -513,7 +524,7 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
skb_queue_head_init(&rxq);
while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
while (xenvif_rx_ring_slots_available(queue)
&& (skb = xenvif_rx_dequeue(queue)) != NULL) {
queue->last_rx_time = jiffies;
@ -1938,8 +1949,7 @@ static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons;
return !queue->stalled
&& prod - cons < XEN_NETBK_RX_SLOTS_MAX
return !queue->stalled && prod - cons < 1
&& time_after(jiffies,
queue->last_rx_time + queue->vif->stall_timeout);
}
@ -1951,14 +1961,13 @@ static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons;
return queue->stalled
&& prod - cons >= XEN_NETBK_RX_SLOTS_MAX;
return queue->stalled && prod - cons >= 1;
}
static bool xenvif_have_rx_work(struct xenvif_queue *queue)
{
return (!skb_queue_empty(&queue->rx_queue)
&& xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX))
&& xenvif_rx_ring_slots_available(queue))
|| (queue->vif->stall_timeout &&
(xenvif_rx_queue_stalled(queue)
|| xenvif_rx_queue_ready(queue)))