2016-10-04 12:29:12 +03:00
/*
* Copyright ( c ) 2016 Citrix Systems Inc .
* Copyright ( c ) 2002 - 2005 , K A Fraser
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation ; or , when distributed
* separately from the Linux kernel or incorporated into other
* software packages , subject to the following license :
*
* Permission is hereby granted , free of charge , to any person obtaining a copy
* of this source file ( the " Software " ) , to deal in the Software without
* restriction , including without limitation the rights to use , copy , modify ,
* merge , publish , distribute , sublicense , and / or sell copies of the Software ,
* and to permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
* FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE .
*/
# include "common.h"
# include <linux/kthread.h>
# include <xen/xen.h>
# include <xen/events.h>
static bool xenvif_rx_ring_slots_available ( struct xenvif_queue * queue )
{
RING_IDX prod , cons ;
struct sk_buff * skb ;
int needed ;
skb = skb_peek ( & queue - > rx_queue ) ;
if ( ! skb )
return false ;
needed = DIV_ROUND_UP ( skb - > len , XEN_PAGE_SIZE ) ;
if ( skb_is_gso ( skb ) )
needed + + ;
if ( skb - > sw_hash )
needed + + ;
do {
prod = queue - > rx . sring - > req_prod ;
cons = queue - > rx . req_cons ;
if ( prod - cons > = needed )
return true ;
queue - > rx . sring - > req_event = prod + 1 ;
/* Make sure event is visible before we check prod
* again .
*/
mb ( ) ;
} while ( queue - > rx . sring - > req_prod ! = prod ) ;
return false ;
}
void xenvif_rx_queue_tail ( struct xenvif_queue * queue , struct sk_buff * skb )
{
unsigned long flags ;
spin_lock_irqsave ( & queue - > rx_queue . lock , flags ) ;
__skb_queue_tail ( & queue - > rx_queue , skb ) ;
queue - > rx_queue_len + = skb - > len ;
if ( queue - > rx_queue_len > queue - > rx_queue_max ) {
struct net_device * dev = queue - > vif - > dev ;
netif_tx_stop_queue ( netdev_get_tx_queue ( dev , queue - > id ) ) ;
}
spin_unlock_irqrestore ( & queue - > rx_queue . lock , flags ) ;
}
static struct sk_buff * xenvif_rx_dequeue ( struct xenvif_queue * queue )
{
struct sk_buff * skb ;
spin_lock_irq ( & queue - > rx_queue . lock ) ;
skb = __skb_dequeue ( & queue - > rx_queue ) ;
2016-10-04 12:29:15 +03:00
if ( skb ) {
2016-10-04 12:29:12 +03:00
queue - > rx_queue_len - = skb - > len ;
2016-10-04 12:29:15 +03:00
if ( queue - > rx_queue_len < queue - > rx_queue_max ) {
struct netdev_queue * txq ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:15 +03:00
txq = netdev_get_tx_queue ( queue - > vif - > dev , queue - > id ) ;
netif_tx_wake_queue ( txq ) ;
}
2016-10-04 12:29:12 +03:00
}
spin_unlock_irq ( & queue - > rx_queue . lock ) ;
2016-10-04 12:29:15 +03:00
return skb ;
2016-10-04 12:29:12 +03:00
}
static void xenvif_rx_queue_purge ( struct xenvif_queue * queue )
{
struct sk_buff * skb ;
while ( ( skb = xenvif_rx_dequeue ( queue ) ) ! = NULL )
kfree_skb ( skb ) ;
}
static void xenvif_rx_queue_drop_expired ( struct xenvif_queue * queue )
{
struct sk_buff * skb ;
for ( ; ; ) {
skb = skb_peek ( & queue - > rx_queue ) ;
if ( ! skb )
break ;
if ( time_before ( jiffies , XENVIF_RX_CB ( skb ) - > expires ) )
break ;
xenvif_rx_dequeue ( queue ) ;
kfree_skb ( skb ) ;
}
}
2016-10-04 12:29:14 +03:00
static void xenvif_rx_copy_flush ( struct xenvif_queue * queue )
2016-10-04 12:29:12 +03:00
{
2016-10-04 12:29:14 +03:00
unsigned int i ;
2016-10-04 12:29:17 +03:00
int notify ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
gnttab_batch_copy ( queue - > rx_copy . op , queue - > rx_copy . num ) ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
for ( i = 0 ; i < queue - > rx_copy . num ; i + + ) {
struct gnttab_copy * op ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
op = & queue - > rx_copy . op [ i ] ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
/* If the copy failed, overwrite the status field in
* the corresponding response .
*/
if ( unlikely ( op - > status ! = GNTST_okay ) ) {
struct xen_netif_rx_response * rsp ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
rsp = RING_GET_RESPONSE ( & queue - > rx ,
queue - > rx_copy . idx [ i ] ) ;
rsp - > status = op - > status ;
}
}
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
queue - > rx_copy . num = 0 ;
2016-10-04 12:29:17 +03:00
/* Push responses for all completed packets. */
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY ( & queue - > rx , notify ) ;
if ( notify )
notify_remote_via_irq ( queue - > rx_irq ) ;
__skb_queue_purge ( queue - > rx_copy . completed ) ;
2016-10-04 12:29:14 +03:00
}
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
static void xenvif_rx_copy_add ( struct xenvif_queue * queue ,
struct xen_netif_rx_request * req ,
unsigned int offset , void * data , size_t len )
2016-10-04 12:29:12 +03:00
{
2016-10-04 12:29:14 +03:00
struct gnttab_copy * op ;
struct page * page ;
2016-10-04 12:29:12 +03:00
struct xen_page_foreign * foreign ;
2016-10-04 12:29:14 +03:00
if ( queue - > rx_copy . num = = COPY_BATCH_SIZE )
xenvif_rx_copy_flush ( queue ) ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
op = & queue - > rx_copy . op [ queue - > rx_copy . num ] ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
page = virt_to_page ( data ) ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
op - > flags = GNTCOPY_dest_gref ;
2016-10-04 12:29:12 +03:00
foreign = xen_page_foreign ( page ) ;
if ( foreign ) {
2016-10-04 12:29:14 +03:00
op - > source . domid = foreign - > domid ;
op - > source . u . ref = foreign - > gref ;
op - > flags | = GNTCOPY_source_gref ;
2016-10-04 12:29:12 +03:00
} else {
2016-10-04 12:29:14 +03:00
op - > source . u . gmfn = virt_to_gfn ( data ) ;
op - > source . domid = DOMID_SELF ;
2016-10-04 12:29:12 +03:00
}
2016-10-04 12:29:14 +03:00
op - > source . offset = xen_offset_in_page ( data ) ;
op - > dest . u . ref = req - > gref ;
op - > dest . domid = queue - > vif - > domid ;
op - > dest . offset = offset ;
op - > len = len ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
queue - > rx_copy . idx [ queue - > rx_copy . num ] = queue - > rx . req_cons ;
queue - > rx_copy . num + + ;
2016-10-04 12:29:12 +03:00
}
2016-10-04 12:29:14 +03:00
static unsigned int xenvif_gso_type ( struct sk_buff * skb )
2016-10-04 12:29:12 +03:00
{
if ( skb_is_gso ( skb ) ) {
if ( skb_shinfo ( skb ) - > gso_type & SKB_GSO_TCPV4 )
2016-10-04 12:29:14 +03:00
return XEN_NETIF_GSO_TYPE_TCPV4 ;
else
return XEN_NETIF_GSO_TYPE_TCPV6 ;
2016-10-04 12:29:12 +03:00
}
2016-10-04 12:29:14 +03:00
return XEN_NETIF_GSO_TYPE_NONE ;
}
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
struct xenvif_pkt_state {
struct sk_buff * skb ;
size_t remaining_len ;
2016-10-04 12:29:18 +03:00
struct sk_buff * frag_iter ;
int frag ; /* frag == -1 => frag_iter->head */
2016-10-04 12:29:14 +03:00
unsigned int frag_offset ;
struct xen_netif_extra_info extras [ XEN_NETIF_EXTRA_TYPE_MAX - 1 ] ;
unsigned int extra_count ;
unsigned int slot ;
} ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
static void xenvif_rx_next_skb ( struct xenvif_queue * queue ,
struct xenvif_pkt_state * pkt )
{
struct sk_buff * skb ;
unsigned int gso_type ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
skb = xenvif_rx_dequeue ( queue ) ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
queue - > stats . tx_bytes + = skb - > len ;
queue - > stats . tx_packets + + ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
/* Reset packet state. */
memset ( pkt , 0 , sizeof ( struct xenvif_pkt_state ) ) ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
pkt - > skb = skb ;
2016-10-04 12:29:18 +03:00
pkt - > frag_iter = skb ;
2016-10-04 12:29:14 +03:00
pkt - > remaining_len = skb - > len ;
pkt - > frag = - 1 ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
gso_type = xenvif_gso_type ( skb ) ;
if ( ( 1 < < gso_type ) & queue - > vif - > gso_mask ) {
struct xen_netif_extra_info * extra ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
extra = & pkt - > extras [ XEN_NETIF_EXTRA_TYPE_GSO - 1 ] ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
extra - > u . gso . type = gso_type ;
extra - > u . gso . size = skb_shinfo ( skb ) - > gso_size ;
extra - > u . gso . pad = 0 ;
extra - > u . gso . features = 0 ;
extra - > type = XEN_NETIF_EXTRA_TYPE_GSO ;
extra - > flags = 0 ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
pkt - > extra_count + + ;
2016-10-04 12:29:12 +03:00
}
2016-10-04 12:29:14 +03:00
if ( skb - > sw_hash ) {
struct xen_netif_extra_info * extra ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
extra = & pkt - > extras [ XEN_NETIF_EXTRA_TYPE_HASH - 1 ] ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
extra - > u . hash . algorithm =
XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
if ( skb - > l4_hash )
extra - > u . hash . type =
skb - > protocol = = htons ( ETH_P_IP ) ?
_XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
_XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP ;
else
extra - > u . hash . type =
skb - > protocol = = htons ( ETH_P_IP ) ?
_XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
_XEN_NETIF_CTRL_HASH_TYPE_IPV6 ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
* ( uint32_t * ) extra - > u . hash . value = skb_get_hash_raw ( skb ) ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
extra - > type = XEN_NETIF_EXTRA_TYPE_HASH ;
extra - > flags = 0 ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
pkt - > extra_count + + ;
2016-10-04 12:29:12 +03:00
}
}
2016-10-04 12:29:14 +03:00
static void xenvif_rx_complete ( struct xenvif_queue * queue ,
struct xenvif_pkt_state * pkt )
2016-10-04 12:29:12 +03:00
{
2016-10-04 12:29:17 +03:00
/* All responses are ready to be pushed. */
2016-10-04 12:29:14 +03:00
queue - > rx . rsp_prod_pvt = queue - > rx . req_cons ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:17 +03:00
__skb_queue_tail ( queue - > rx_copy . completed , pkt - > skb ) ;
2016-10-04 12:29:12 +03:00
}
2016-10-04 12:29:18 +03:00
static void xenvif_rx_next_frag ( struct xenvif_pkt_state * pkt )
{
struct sk_buff * frag_iter = pkt - > frag_iter ;
unsigned int nr_frags = skb_shinfo ( frag_iter ) - > nr_frags ;
pkt - > frag + + ;
pkt - > frag_offset = 0 ;
if ( pkt - > frag > = nr_frags ) {
if ( frag_iter = = pkt - > skb )
pkt - > frag_iter = skb_shinfo ( frag_iter ) - > frag_list ;
else
pkt - > frag_iter = frag_iter - > next ;
pkt - > frag = - 1 ;
}
}
2016-10-04 12:29:14 +03:00
static void xenvif_rx_next_chunk ( struct xenvif_queue * queue ,
struct xenvif_pkt_state * pkt ,
unsigned int offset , void * * data ,
size_t * len )
2016-10-04 12:29:12 +03:00
{
2016-10-04 12:29:18 +03:00
struct sk_buff * frag_iter = pkt - > frag_iter ;
2016-10-04 12:29:14 +03:00
void * frag_data ;
size_t frag_len , chunk_len ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:18 +03:00
BUG_ON ( ! frag_iter ) ;
2016-10-04 12:29:14 +03:00
if ( pkt - > frag = = - 1 ) {
2016-10-04 12:29:18 +03:00
frag_data = frag_iter - > data ;
frag_len = skb_headlen ( frag_iter ) ;
2016-10-04 12:29:14 +03:00
} else {
2016-10-04 12:29:18 +03:00
skb_frag_t * frag = & skb_shinfo ( frag_iter ) - > frags [ pkt - > frag ] ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
frag_data = skb_frag_address ( frag ) ;
frag_len = skb_frag_size ( frag ) ;
2016-10-04 12:29:12 +03:00
}
2016-10-04 12:29:14 +03:00
frag_data + = pkt - > frag_offset ;
frag_len - = pkt - > frag_offset ;
2016-10-04 12:29:12 +03:00
2016-10-12 17:54:01 +03:00
chunk_len = min_t ( size_t , frag_len , XEN_PAGE_SIZE - offset ) ;
chunk_len = min_t ( size_t , chunk_len , XEN_PAGE_SIZE -
xen_offset_in_page ( frag_data ) ) ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
pkt - > frag_offset + = chunk_len ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
/* Advance to next frag? */
2016-10-04 12:29:18 +03:00
if ( frag_len = = chunk_len )
xenvif_rx_next_frag ( pkt ) ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
* data = frag_data ;
* len = chunk_len ;
}
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
static void xenvif_rx_data_slot ( struct xenvif_queue * queue ,
struct xenvif_pkt_state * pkt ,
struct xen_netif_rx_request * req ,
struct xen_netif_rx_response * rsp )
{
unsigned int offset = 0 ;
unsigned int flags ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
do {
size_t len ;
void * data ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
xenvif_rx_next_chunk ( queue , pkt , offset , & data , & len ) ;
xenvif_rx_copy_add ( queue , req , offset , data , len ) ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
offset + = len ;
pkt - > remaining_len - = len ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
} while ( offset < XEN_PAGE_SIZE & & pkt - > remaining_len > 0 ) ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
if ( pkt - > remaining_len > 0 )
flags = XEN_NETRXF_more_data ;
else
flags = 0 ;
if ( pkt - > slot = = 0 ) {
struct sk_buff * skb = pkt - > skb ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
if ( skb - > ip_summed = = CHECKSUM_PARTIAL )
2016-10-04 12:29:12 +03:00
flags | = XEN_NETRXF_csum_blank |
XEN_NETRXF_data_validated ;
else if ( skb - > ip_summed = = CHECKSUM_UNNECESSARY )
flags | = XEN_NETRXF_data_validated ;
2016-10-04 12:29:14 +03:00
if ( pkt - > extra_count ! = 0 )
flags | = XEN_NETRXF_extra_info ;
}
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
rsp - > offset = 0 ;
rsp - > flags = flags ;
rsp - > id = req - > id ;
rsp - > status = ( s16 ) offset ;
}
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
static void xenvif_rx_extra_slot ( struct xenvif_queue * queue ,
struct xenvif_pkt_state * pkt ,
struct xen_netif_rx_request * req ,
struct xen_netif_rx_response * rsp )
{
struct xen_netif_extra_info * extra = ( void * ) rsp ;
unsigned int i ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
pkt - > extra_count - - ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
for ( i = 0 ; i < ARRAY_SIZE ( pkt - > extras ) ; i + + ) {
if ( pkt - > extras [ i ] . type ) {
* extra = pkt - > extras [ i ] ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
if ( pkt - > extra_count ! = 0 )
2016-10-04 12:29:12 +03:00
extra - > flags | = XEN_NETIF_EXTRA_FLAG_MORE ;
2016-10-04 12:29:14 +03:00
pkt - > extras [ i ] . type = 0 ;
return ;
2016-10-04 12:29:12 +03:00
}
2016-10-04 12:29:14 +03:00
}
BUG ( ) ;
}
2016-10-04 12:29:12 +03:00
2018-02-23 20:16:57 +03:00
static void xenvif_rx_skb ( struct xenvif_queue * queue )
2016-10-04 12:29:14 +03:00
{
struct xenvif_pkt_state pkt ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
xenvif_rx_next_skb ( queue , & pkt ) ;
2016-10-04 12:29:12 +03:00
2016-10-11 18:48:27 +03:00
queue - > last_rx_time = jiffies ;
2016-10-04 12:29:14 +03:00
do {
struct xen_netif_rx_request * req ;
struct xen_netif_rx_response * rsp ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
req = RING_GET_REQUEST ( & queue - > rx , queue - > rx . req_cons ) ;
rsp = RING_GET_RESPONSE ( & queue - > rx , queue - > rx . req_cons ) ;
2016-10-04 12:29:12 +03:00
2016-10-04 12:29:14 +03:00
/* Extras must go after the first data slot */
if ( pkt . slot ! = 0 & & pkt . extra_count ! = 0 )
xenvif_rx_extra_slot ( queue , & pkt , req , rsp ) ;
else
xenvif_rx_data_slot ( queue , & pkt , req , rsp ) ;
queue - > rx . req_cons + + ;
pkt . slot + + ;
} while ( pkt . remaining_len > 0 | | pkt . extra_count ! = 0 ) ;
xenvif_rx_complete ( queue , & pkt ) ;
2016-10-04 12:29:12 +03:00
}
2016-10-04 12:29:16 +03:00
# define RX_BATCH_SIZE 64
void xenvif_rx_action ( struct xenvif_queue * queue )
{
2016-10-04 12:29:17 +03:00
struct sk_buff_head completed_skbs ;
2016-10-04 12:29:16 +03:00
unsigned int work_done = 0 ;
2016-10-04 12:29:17 +03:00
__skb_queue_head_init ( & completed_skbs ) ;
queue - > rx_copy . completed = & completed_skbs ;
2016-10-04 12:29:16 +03:00
while ( xenvif_rx_ring_slots_available ( queue ) & &
work_done < RX_BATCH_SIZE ) {
xenvif_rx_skb ( queue ) ;
work_done + + ;
}
2016-10-04 12:29:17 +03:00
/* Flush any pending copies and complete all skbs. */
xenvif_rx_copy_flush ( queue ) ;
2016-10-04 12:29:16 +03:00
}
2016-10-04 12:29:12 +03:00
static bool xenvif_rx_queue_stalled ( struct xenvif_queue * queue )
{
RING_IDX prod , cons ;
prod = queue - > rx . sring - > req_prod ;
cons = queue - > rx . req_cons ;
return ! queue - > stalled & &
prod - cons < 1 & &
time_after ( jiffies ,
queue - > last_rx_time + queue - > vif - > stall_timeout ) ;
}
static bool xenvif_rx_queue_ready ( struct xenvif_queue * queue )
{
RING_IDX prod , cons ;
prod = queue - > rx . sring - > req_prod ;
cons = queue - > rx . req_cons ;
return queue - > stalled & & prod - cons > = 1 ;
}
static bool xenvif_have_rx_work ( struct xenvif_queue * queue )
{
return xenvif_rx_ring_slots_available ( queue ) | |
( queue - > vif - > stall_timeout & &
( xenvif_rx_queue_stalled ( queue ) | |
xenvif_rx_queue_ready ( queue ) ) ) | |
kthread_should_stop ( ) | |
queue - > vif - > disabled ;
}
static long xenvif_rx_queue_timeout ( struct xenvif_queue * queue )
{
struct sk_buff * skb ;
long timeout ;
skb = skb_peek ( & queue - > rx_queue ) ;
if ( ! skb )
return MAX_SCHEDULE_TIMEOUT ;
timeout = XENVIF_RX_CB ( skb ) - > expires - jiffies ;
return timeout < 0 ? 0 : timeout ;
}
/* Wait until the guest Rx thread has work.
*
* The timeout needs to be adjusted based on the current head of the
* queue ( and not just the head at the beginning ) . In particular , if
* the queue is initially empty an infinite timeout is used and this
* needs to be reduced when a skb is queued .
*
* This cannot be done with wait_event_timeout ( ) because it only
* calculates the timeout once .
*/
static void xenvif_wait_for_rx_work ( struct xenvif_queue * queue )
{
DEFINE_WAIT ( wait ) ;
if ( xenvif_have_rx_work ( queue ) )
return ;
for ( ; ; ) {
long ret ;
prepare_to_wait ( & queue - > wq , & wait , TASK_INTERRUPTIBLE ) ;
if ( xenvif_have_rx_work ( queue ) )
break ;
ret = schedule_timeout ( xenvif_rx_queue_timeout ( queue ) ) ;
if ( ! ret )
break ;
}
finish_wait ( & queue - > wq , & wait ) ;
}
static void xenvif_queue_carrier_off ( struct xenvif_queue * queue )
{
struct xenvif * vif = queue - > vif ;
queue - > stalled = true ;
/* At least one queue has stalled? Disable the carrier. */
spin_lock ( & vif - > lock ) ;
if ( vif - > stalled_queues + + = = 0 ) {
netdev_info ( vif - > dev , " Guest Rx stalled " ) ;
netif_carrier_off ( vif - > dev ) ;
}
spin_unlock ( & vif - > lock ) ;
}
static void xenvif_queue_carrier_on ( struct xenvif_queue * queue )
{
struct xenvif * vif = queue - > vif ;
queue - > last_rx_time = jiffies ; /* Reset Rx stall detection. */
queue - > stalled = false ;
/* All queues are ready? Enable the carrier. */
spin_lock ( & vif - > lock ) ;
if ( - - vif - > stalled_queues = = 0 ) {
netdev_info ( vif - > dev , " Guest Rx ready " ) ;
netif_carrier_on ( vif - > dev ) ;
}
spin_unlock ( & vif - > lock ) ;
}
int xenvif_kthread_guest_rx ( void * data )
{
struct xenvif_queue * queue = data ;
struct xenvif * vif = queue - > vif ;
if ( ! vif - > stall_timeout )
xenvif_queue_carrier_on ( queue ) ;
for ( ; ; ) {
xenvif_wait_for_rx_work ( queue ) ;
if ( kthread_should_stop ( ) )
break ;
/* This frontend is found to be rogue, disable it in
* kthread context . Currently this is only set when
* netback finds out frontend sends malformed packet ,
* but we cannot disable the interface in softirq
* context so we defer it here , if this thread is
* associated with queue 0.
*/
if ( unlikely ( vif - > disabled & & queue - > id = = 0 ) ) {
xenvif_carrier_off ( vif ) ;
break ;
}
if ( ! skb_queue_empty ( & queue - > rx_queue ) )
xenvif_rx_action ( queue ) ;
/* If the guest hasn't provided any Rx slots for a
* while it ' s probably not responsive , drop the
* carrier so packets are dropped earlier .
*/
if ( vif - > stall_timeout ) {
if ( xenvif_rx_queue_stalled ( queue ) )
xenvif_queue_carrier_off ( queue ) ;
else if ( xenvif_rx_queue_ready ( queue ) )
xenvif_queue_carrier_on ( queue ) ;
}
/* Queued packets may have foreign pages from other
* domains . These cannot be queued indefinitely as
* this would starve guests of grant refs and transmit
* slots .
*/
xenvif_rx_queue_drop_expired ( queue ) ;
cond_resched ( ) ;
}
/* Bin any remaining skbs */
xenvif_rx_queue_purge ( queue ) ;
return 0 ;
}