2011-03-15 03:06:18 +03:00
/*
* Back - end of the driver for virtual network devices . This portion of the
* driver exports a ' unified ' network - device interface that can be accessed
* by any operating system that implements a compatible front end . A
* reference front - end implementation can be found in :
* drivers / net / xen - netfront . c
*
* Copyright ( c ) 2002 - 2005 , K A Fraser
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation ; or , when distributed
* separately from the Linux kernel or incorporated into other
* software packages , subject to the following license :
*
* Permission is hereby granted , free of charge , to any person obtaining a copy
* of this source file ( the " Software " ) , to deal in the Software without
* restriction , including without limitation the rights to use , copy , modify ,
* merge , publish , distribute , sublicense , and / or sell copies of the Software ,
* and to permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
* FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE .
*/
# include "common.h"
# include <linux/kthread.h>
# include <linux/if_vlan.h>
# include <linux/udp.h>
2014-03-07 01:48:29 +04:00
# include <linux/highmem.h>
2011-03-15 03:06:18 +03:00
# include <net/tcp.h>
2012-08-08 21:21:23 +04:00
# include <xen/xen.h>
2011-03-15 03:06:18 +03:00
# include <xen/events.h>
# include <xen/interface/memory.h>
2015-06-17 17:28:02 +03:00
# include <xen/page.h>
2011-03-15 03:06:18 +03:00
# include <asm/xen/hypercall.h>
2013-05-22 10:34:45 +04:00
/* Provide an option to disable split event channels at load time as
* event channels are limited resource . Split event channels are
* enabled by default .
*/
2015-05-25 20:49:31 +03:00
bool separate_tx_rx_irq = true ;
2013-05-22 10:34:45 +04:00
module_param ( separate_tx_rx_irq , bool , 0644 ) ;
2014-10-22 17:08:54 +04:00
/* The time that packets can stay on the guest Rx internal queue
* before they are dropped .
2014-03-07 01:48:30 +04:00
*/
unsigned int rx_drain_timeout_msecs = 10000 ;
module_param ( rx_drain_timeout_msecs , uint , 0444 ) ;
2014-10-22 17:08:55 +04:00
/* The length of time before the frontend is considered unresponsive
* because it isn ' t providing Rx slots .
*/
2014-12-18 14:13:06 +03:00
unsigned int rx_stall_timeout_msecs = 60000 ;
2014-10-22 17:08:55 +04:00
module_param ( rx_stall_timeout_msecs , uint , 0444 ) ;
2017-01-10 16:32:52 +03:00
# define MAX_QUEUES_DEFAULT 8
2014-06-04 13:30:43 +04:00
unsigned int xenvif_max_queues ;
module_param_named ( max_queues , xenvif_max_queues , uint , 0644 ) ;
MODULE_PARM_DESC ( max_queues ,
" Maximum number of queues per virtual interface " ) ;
2013-04-22 06:20:42 +04:00
/*
* This is the maximum slots a skb can have . If a guest sends a skb
* which exceeds this limit it is considered malicious .
*/
2013-05-02 04:43:59 +04:00
# define FATAL_SKB_SLOTS_DEFAULT 20
static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT ;
module_param ( fatal_skb_slots , uint , 0444 ) ;
2014-11-05 13:50:22 +03:00
/* The amount to copy out of the first guest Tx slot into the skb's
* linear area . If the first slot has more data , it will be mapped
* and put into the first frag .
*
* This is sized to avoid pulling headers from the frags for most
* TCP / IP packets .
*/
# define XEN_NETBACK_TX_COPY_LEN 128
2016-05-13 11:37:27 +03:00
/* This is the maximum number of flows in the hash cache. */
# define XENVIF_HASH_CACHE_SIZE_DEFAULT 64
unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT ;
module_param_named ( hash_cache_size , xenvif_hash_cache_size , uint , 0644 ) ;
MODULE_PARM_DESC ( hash_cache_size , " Number of flows in the hash cache " ) ;
2014-11-05 13:50:22 +03:00
2014-06-04 13:30:42 +04:00
static void xenvif_idx_release ( struct xenvif_queue * queue , u16 pending_idx ,
2013-08-26 15:59:39 +04:00
u8 status ) ;
2014-06-04 13:30:42 +04:00
static void make_tx_response ( struct xenvif_queue * queue ,
2011-03-15 03:06:18 +03:00
struct xen_netif_tx_request * txp ,
2016-03-10 15:30:27 +03:00
unsigned int extra_count ,
2011-03-15 03:06:18 +03:00
s8 st ) ;
2015-03-11 18:27:59 +03:00
static void push_tx_responses ( struct xenvif_queue * queue ) ;
2013-08-26 15:59:38 +04:00
2014-06-04 13:30:42 +04:00
static inline int tx_work_todo ( struct xenvif_queue * queue ) ;
2013-08-26 15:59:38 +04:00
2014-06-04 13:30:42 +04:00
static inline unsigned long idx_to_pfn ( struct xenvif_queue * queue ,
2011-10-05 04:28:46 +04:00
u16 idx )
2011-03-15 03:06:18 +03:00
{
2014-06-04 13:30:42 +04:00
return page_to_pfn ( queue - > mmap_pages [ idx ] ) ;
2011-03-15 03:06:18 +03:00
}
2014-06-04 13:30:42 +04:00
static inline unsigned long idx_to_kaddr ( struct xenvif_queue * queue ,
2011-10-05 04:28:46 +04:00
u16 idx )
2011-03-15 03:06:18 +03:00
{
2014-06-04 13:30:42 +04:00
return ( unsigned long ) pfn_to_kaddr ( idx_to_pfn ( queue , idx ) ) ;
2011-03-15 03:06:18 +03:00
}
2014-03-25 03:59:51 +04:00
# define callback_param(vif, pending_idx) \
( vif - > pending_tx_info [ pending_idx ] . callback_struct )
2014-03-07 01:48:26 +04:00
/* Find the containing VIF's structure from a pointer in pending_tx_info array
*/
2014-06-04 13:30:42 +04:00
static inline struct xenvif_queue * ubuf_to_queue ( const struct ubuf_info * ubuf )
2014-03-07 01:48:25 +04:00
{
2014-03-07 01:48:26 +04:00
u16 pending_idx = ubuf - > desc ;
struct pending_tx_info * temp =
container_of ( ubuf , struct pending_tx_info , callback_struct ) ;
return container_of ( temp - pending_idx ,
2014-06-04 13:30:42 +04:00
struct xenvif_queue ,
2014-03-07 01:48:26 +04:00
pending_tx_info [ 0 ] ) ;
2014-03-07 01:48:25 +04:00
}
2014-03-07 01:48:26 +04:00
2011-10-05 04:28:46 +04:00
static u16 frag_get_pending_idx ( skb_frag_t * frag )
{
return ( u16 ) frag - > page_offset ;
}
static void frag_set_pending_idx ( skb_frag_t * frag , u16 pending_idx )
{
frag - > page_offset = pending_idx ;
}
2011-03-15 03:06:18 +03:00
static inline pending_ring_idx_t pending_index ( unsigned i )
{
return i & ( MAX_PENDING_REQS - 1 ) ;
}
2014-06-04 13:30:42 +04:00
void xenvif_kick_thread ( struct xenvif_queue * queue )
2013-08-26 15:59:38 +04:00
{
2014-06-04 13:30:42 +04:00
wake_up ( & queue - > wq ) ;
2013-08-26 15:59:38 +04:00
}
2014-06-04 13:30:42 +04:00
void xenvif_napi_schedule_or_enable_events ( struct xenvif_queue * queue )
2011-03-15 03:06:18 +03:00
{
int more_to_do ;
2014-06-04 13:30:42 +04:00
RING_FINAL_CHECK_FOR_REQUESTS ( & queue - > tx , more_to_do ) ;
2011-03-15 03:06:18 +03:00
if ( more_to_do )
2014-06-04 13:30:42 +04:00
napi_schedule ( & queue - > napi ) ;
2011-03-15 03:06:18 +03:00
}
2014-06-04 13:30:42 +04:00
static void tx_add_credit ( struct xenvif_queue * queue )
2011-03-15 03:06:18 +03:00
{
unsigned long max_burst , max_credit ;
/*
* Allow a burst big enough to transmit a jumbo packet of up to 128 kB .
* Otherwise the interface can seize up due to insufficient credit .
*/
2015-10-30 18:16:01 +03:00
max_burst = max ( 131072UL , queue - > credit_bytes ) ;
2011-03-15 03:06:18 +03:00
/* Take care that adding a new chunk of credit doesn't wrap to zero. */
2014-06-04 13:30:42 +04:00
max_credit = queue - > remaining_credit + queue - > credit_bytes ;
if ( max_credit < queue - > remaining_credit )
2011-03-15 03:06:18 +03:00
max_credit = ULONG_MAX ; /* wrapped: clamp to ULONG_MAX */
2014-06-04 13:30:42 +04:00
queue - > remaining_credit = min ( max_credit , max_burst ) ;
2017-06-21 12:21:22 +03:00
queue - > rate_limited = false ;
2011-03-15 03:06:18 +03:00
}
2015-03-19 13:05:42 +03:00
void xenvif_tx_credit_callback ( unsigned long data )
2011-03-15 03:06:18 +03:00
{
2014-06-04 13:30:42 +04:00
struct xenvif_queue * queue = ( struct xenvif_queue * ) data ;
tx_add_credit ( queue ) ;
xenvif_napi_schedule_or_enable_events ( queue ) ;
2011-03-15 03:06:18 +03:00
}
2014-06-04 13:30:42 +04:00
static void xenvif_tx_err ( struct xenvif_queue * queue ,
2016-03-10 15:30:27 +03:00
struct xen_netif_tx_request * txp ,
unsigned int extra_count , RING_IDX end )
2011-03-15 03:06:18 +03:00
{
2014-06-04 13:30:42 +04:00
RING_IDX cons = queue - > tx . req_cons ;
2014-03-07 01:48:26 +04:00
unsigned long flags ;
2011-03-15 03:06:18 +03:00
do {
2014-06-04 13:30:42 +04:00
spin_lock_irqsave ( & queue - > response_lock , flags ) ;
2016-03-10 15:30:27 +03:00
make_tx_response ( queue , txp , extra_count , XEN_NETIF_RSP_ERROR ) ;
2015-03-11 18:27:59 +03:00
push_tx_responses ( queue ) ;
2014-06-04 13:30:42 +04:00
spin_unlock_irqrestore ( & queue - > response_lock , flags ) ;
2013-02-07 03:41:38 +04:00
if ( cons = = end )
2011-03-15 03:06:18 +03:00
break ;
2015-10-30 18:17:06 +03:00
RING_COPY_REQUEST ( & queue - > tx , cons + + , txp ) ;
2016-05-12 16:43:03 +03:00
extra_count = 0 ; /* only the first frag can have extras */
2011-03-15 03:06:18 +03:00
} while ( 1 ) ;
2014-06-04 13:30:42 +04:00
queue - > tx . req_cons = cons ;
2011-03-15 03:06:18 +03:00
}
2013-08-26 15:59:39 +04:00
static void xenvif_fatal_tx_err ( struct xenvif * vif )
2013-02-07 03:41:35 +04:00
{
netdev_err ( vif - > dev , " fatal error; disabling device \n " ) ;
2014-04-01 15:46:12 +04:00
vif - > disabled = true ;
2014-06-04 13:30:42 +04:00
/* Disable the vif from queue 0's kthread */
2017-03-11 00:36:22 +03:00
if ( vif - > num_queues )
2014-06-04 13:30:42 +04:00
xenvif_kick_thread ( & vif - > queues [ 0 ] ) ;
2013-02-07 03:41:35 +04:00
}
2014-06-04 13:30:42 +04:00
static int xenvif_count_requests ( struct xenvif_queue * queue ,
2013-08-26 15:59:39 +04:00
struct xen_netif_tx_request * first ,
2016-03-10 15:30:27 +03:00
unsigned int extra_count ,
2013-08-26 15:59:39 +04:00
struct xen_netif_tx_request * txp ,
int work_to_do )
2011-03-15 03:06:18 +03:00
{
2014-06-04 13:30:42 +04:00
RING_IDX cons = queue - > tx . req_cons ;
2013-04-22 06:20:42 +04:00
int slots = 0 ;
int drop_err = 0 ;
2013-05-02 04:43:58 +04:00
int more_data ;
2011-03-15 03:06:18 +03:00
if ( ! ( first - > flags & XEN_NETTXF_more_data ) )
return 0 ;
do {
2013-05-02 04:43:58 +04:00
struct xen_netif_tx_request dropped_tx = { 0 } ;
2013-04-22 06:20:42 +04:00
if ( slots > = work_to_do ) {
2014-06-04 13:30:42 +04:00
netdev_err ( queue - > vif - > dev ,
2013-04-22 06:20:42 +04:00
" Asked for %d slots but exceeds this limit \n " ,
work_to_do ) ;
2014-06-04 13:30:42 +04:00
xenvif_fatal_tx_err ( queue - > vif ) ;
2013-02-14 07:18:57 +04:00
return - ENODATA ;
2011-03-15 03:06:18 +03:00
}
2013-04-22 06:20:42 +04:00
/* This guest is really using too many slots and
* considered malicious .
*/
2013-05-02 04:43:59 +04:00
if ( unlikely ( slots > = fatal_skb_slots ) ) {
2014-06-04 13:30:42 +04:00
netdev_err ( queue - > vif - > dev ,
2013-04-22 06:20:42 +04:00
" Malicious frontend using %d slots, threshold %u \n " ,
2013-05-02 04:43:59 +04:00
slots , fatal_skb_slots ) ;
2014-06-04 13:30:42 +04:00
xenvif_fatal_tx_err ( queue - > vif ) ;
2013-02-14 07:18:57 +04:00
return - E2BIG ;
2011-03-15 03:06:18 +03:00
}
2013-04-22 06:20:42 +04:00
/* Xen network protocol had implicit dependency on
2013-05-02 04:43:59 +04:00
* MAX_SKB_FRAGS . XEN_NETBK_LEGACY_SLOTS_MAX is set to
* the historical MAX_SKB_FRAGS value 18 to honor the
* same behavior as before . Any packet using more than
* 18 slots but less than fatal_skb_slots slots is
* dropped
2013-04-22 06:20:42 +04:00
*/
2013-05-02 04:43:59 +04:00
if ( ! drop_err & & slots > = XEN_NETBK_LEGACY_SLOTS_MAX ) {
2013-04-22 06:20:42 +04:00
if ( net_ratelimit ( ) )
2014-06-04 13:30:42 +04:00
netdev_dbg ( queue - > vif - > dev ,
2013-04-22 06:20:42 +04:00
" Too many slots (%d) exceeding limit (%d), dropping packet \n " ,
2013-05-02 04:43:59 +04:00
slots , XEN_NETBK_LEGACY_SLOTS_MAX ) ;
2013-04-22 06:20:42 +04:00
drop_err = - E2BIG ;
}
2013-05-02 04:43:58 +04:00
if ( drop_err )
txp = & dropped_tx ;
2015-10-30 18:17:06 +03:00
RING_COPY_REQUEST ( & queue - > tx , cons + slots , txp ) ;
2013-04-22 06:20:43 +04:00
/* If the guest submitted a frame >= 64 KiB then
* first - > size overflowed and following slots will
* appear to be larger than the frame .
*
* This cannot be fatal error as there are buggy
* frontends that do this .
*
* Consume all slots and drop the packet .
*/
if ( ! drop_err & & txp - > size > first - > size ) {
if ( net_ratelimit ( ) )
2014-06-04 13:30:42 +04:00
netdev_dbg ( queue - > vif - > dev ,
2013-04-22 06:20:43 +04:00
" Invalid tx request, slot size %u > remaining size %u \n " ,
txp - > size , first - > size ) ;
drop_err = - EIO ;
2011-03-15 03:06:18 +03:00
}
first - > size - = txp - > size ;
2013-04-22 06:20:42 +04:00
slots + + ;
2011-03-15 03:06:18 +03:00
2015-05-05 15:15:29 +03:00
if ( unlikely ( ( txp - > offset + txp - > size ) > XEN_PAGE_SIZE ) ) {
2015-06-16 22:10:48 +03:00
netdev_err ( queue - > vif - > dev , " Cross page boundary, txp->offset: %u, size: %u \n " ,
2011-03-15 03:06:18 +03:00
txp - > offset , txp - > size ) ;
2014-06-04 13:30:42 +04:00
xenvif_fatal_tx_err ( queue - > vif ) ;
2013-02-14 07:18:57 +04:00
return - EINVAL ;
2011-03-15 03:06:18 +03:00
}
2013-05-02 04:43:58 +04:00
more_data = txp - > flags & XEN_NETTXF_more_data ;
if ( ! drop_err )
txp + + ;
} while ( more_data ) ;
2013-04-22 06:20:42 +04:00
if ( drop_err ) {
2016-03-10 15:30:27 +03:00
xenvif_tx_err ( queue , first , extra_count , cons + slots ) ;
2013-04-22 06:20:42 +04:00
return drop_err ;
}
return slots ;
2011-03-15 03:06:18 +03:00
}
2014-03-07 01:48:23 +04:00
struct xenvif_tx_cb {
u16 pending_idx ;
} ;
# define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
2014-06-04 13:30:42 +04:00
static inline void xenvif_tx_create_map_op ( struct xenvif_queue * queue ,
2016-03-10 15:30:27 +03:00
u16 pending_idx ,
struct xen_netif_tx_request * txp ,
unsigned int extra_count ,
struct gnttab_map_grant_ref * mop )
2014-03-07 01:48:26 +04:00
{
2014-06-04 13:30:42 +04:00
queue - > pages_to_map [ mop - queue - > tx_map_ops ] = queue - > mmap_pages [ pending_idx ] ;
gnttab_set_map_op ( mop , idx_to_kaddr ( queue , pending_idx ) ,
2014-03-07 01:48:26 +04:00
GNTMAP_host_map | GNTMAP_readonly ,
2014-06-04 13:30:42 +04:00
txp - > gref , queue - > vif - > domid ) ;
2014-03-07 01:48:26 +04:00
2014-06-04 13:30:42 +04:00
memcpy ( & queue - > pending_tx_info [ pending_idx ] . req , txp ,
2014-03-07 01:48:26 +04:00
sizeof ( * txp ) ) ;
2016-03-10 15:30:27 +03:00
queue - > pending_tx_info [ pending_idx ] . extra_count = extra_count ;
2014-03-07 01:48:26 +04:00
}
2014-03-07 01:48:29 +04:00
static inline struct sk_buff * xenvif_alloc_skb ( unsigned int size )
{
struct sk_buff * skb =
alloc_skb ( size + NET_SKB_PAD + NET_IP_ALIGN ,
GFP_ATOMIC | __GFP_NOWARN ) ;
if ( unlikely ( skb = = NULL ) )
return NULL ;
/* Packets passed to netif_rx() must have some headroom. */
skb_reserve ( skb , NET_SKB_PAD + NET_IP_ALIGN ) ;
/* Initialize it here to avoid later surprises */
skb_shinfo ( skb ) - > destructor_arg = NULL ;
return skb ;
}
2014-06-04 13:30:42 +04:00
static struct gnttab_map_grant_ref * xenvif_get_requests ( struct xenvif_queue * queue ,
2014-03-07 01:48:26 +04:00
struct sk_buff * skb ,
struct xen_netif_tx_request * txp ,
2015-08-03 17:38:03 +03:00
struct gnttab_map_grant_ref * gop ,
unsigned int frag_overflow ,
struct sk_buff * nskb )
2011-03-15 03:06:18 +03:00
{
struct skb_shared_info * shinfo = skb_shinfo ( skb ) ;
skb_frag_t * frags = shinfo - > frags ;
2014-03-07 01:48:23 +04:00
u16 pending_idx = XENVIF_TX_CB ( skb ) - > pending_idx ;
2014-03-07 01:48:27 +04:00
int start ;
pending_ring_idx_t index ;
2015-08-03 17:38:03 +03:00
unsigned int nr_slots ;
2013-04-22 06:20:42 +04:00
nr_slots = shinfo - > nr_frags ;
2011-03-15 03:06:18 +03:00
/* Skip first skb fragment if it is on same page as header fragment. */
2011-10-05 04:28:46 +04:00
start = ( frag_get_pending_idx ( & shinfo - > frags [ 0 ] ) = = pending_idx ) ;
2011-03-15 03:06:18 +03:00
2014-03-07 01:48:26 +04:00
for ( shinfo - > nr_frags = start ; shinfo - > nr_frags < nr_slots ;
shinfo - > nr_frags + + , txp + + , gop + + ) {
2014-06-04 13:30:42 +04:00
index = pending_index ( queue - > pending_cons + + ) ;
pending_idx = queue - > pending_ring [ index ] ;
2016-03-10 15:30:27 +03:00
xenvif_tx_create_map_op ( queue , pending_idx , txp , 0 , gop ) ;
2014-03-07 01:48:26 +04:00
frag_set_pending_idx ( & frags [ shinfo - > nr_frags ] , pending_idx ) ;
2011-03-15 03:06:18 +03:00
}
2014-03-07 01:48:29 +04:00
if ( frag_overflow ) {
shinfo = skb_shinfo ( nskb ) ;
frags = shinfo - > frags ;
for ( shinfo - > nr_frags = 0 ; shinfo - > nr_frags < frag_overflow ;
shinfo - > nr_frags + + , txp + + , gop + + ) {
2014-06-04 13:30:42 +04:00
index = pending_index ( queue - > pending_cons + + ) ;
pending_idx = queue - > pending_ring [ index ] ;
2016-03-10 15:30:27 +03:00
xenvif_tx_create_map_op ( queue , pending_idx , txp , 0 ,
gop ) ;
2014-03-07 01:48:29 +04:00
frag_set_pending_idx ( & frags [ shinfo - > nr_frags ] ,
pending_idx ) ;
}
skb_shinfo ( skb ) - > frag_list = nskb ;
}
2013-04-22 06:20:42 +04:00
2011-03-15 03:06:18 +03:00
return gop ;
}
2014-06-04 13:30:42 +04:00
static inline void xenvif_grant_handle_set ( struct xenvif_queue * queue ,
2014-03-07 01:48:26 +04:00
u16 pending_idx ,
grant_handle_t handle )
{
2014-06-04 13:30:42 +04:00
if ( unlikely ( queue - > grant_tx_handle [ pending_idx ] ! =
2014-03-07 01:48:26 +04:00
NETBACK_INVALID_HANDLE ) ) {
2014-06-04 13:30:42 +04:00
netdev_err ( queue - > vif - > dev ,
2015-06-16 22:10:48 +03:00
" Trying to overwrite active handle! pending_idx: 0x%x \n " ,
2014-03-07 01:48:26 +04:00
pending_idx ) ;
BUG ( ) ;
}
2014-06-04 13:30:42 +04:00
queue - > grant_tx_handle [ pending_idx ] = handle ;
2014-03-07 01:48:26 +04:00
}
2014-06-04 13:30:42 +04:00
static inline void xenvif_grant_handle_reset ( struct xenvif_queue * queue ,
2014-03-07 01:48:26 +04:00
u16 pending_idx )
{
2014-06-04 13:30:42 +04:00
if ( unlikely ( queue - > grant_tx_handle [ pending_idx ] = =
2014-03-07 01:48:26 +04:00
NETBACK_INVALID_HANDLE ) ) {
2014-06-04 13:30:42 +04:00
netdev_err ( queue - > vif - > dev ,
2015-06-16 22:10:48 +03:00
" Trying to unmap invalid handle! pending_idx: 0x%x \n " ,
2014-03-07 01:48:26 +04:00
pending_idx ) ;
BUG ( ) ;
}
2014-06-04 13:30:42 +04:00
queue - > grant_tx_handle [ pending_idx ] = NETBACK_INVALID_HANDLE ;
2014-03-07 01:48:26 +04:00
}
2014-06-04 13:30:42 +04:00
static int xenvif_tx_check_gop ( struct xenvif_queue * queue ,
2013-08-26 15:59:39 +04:00
struct sk_buff * skb ,
2014-04-02 21:04:58 +04:00
struct gnttab_map_grant_ref * * gopp_map ,
struct gnttab_copy * * gopp_copy )
2011-03-15 03:06:18 +03:00
{
2014-04-02 21:04:57 +04:00
struct gnttab_map_grant_ref * gop_map = * gopp_map ;
2014-03-07 01:48:23 +04:00
u16 pending_idx = XENVIF_TX_CB ( skb ) - > pending_idx ;
2014-07-18 22:08:02 +04:00
/* This always points to the shinfo of the skb being checked, which
* could be either the first or the one on the frag_list
*/
2011-03-15 03:06:18 +03:00
struct skb_shared_info * shinfo = skb_shinfo ( skb ) ;
2014-07-18 22:08:02 +04:00
/* If this is non-NULL, we are currently checking the frag_list skb, and
* this points to the shinfo of the first one
*/
struct skb_shared_info * first_shinfo = NULL ;
2011-03-15 03:06:18 +03:00
int nr_frags = shinfo - > nr_frags ;
2014-07-18 22:08:04 +04:00
const bool sharedslot = nr_frags & &
frag_get_pending_idx ( & shinfo - > frags [ 0 ] ) = = pending_idx ;
2014-04-02 21:04:58 +04:00
int i , err ;
2011-03-15 03:06:18 +03:00
/* Check status of header. */
2014-04-02 21:04:58 +04:00
err = ( * gopp_copy ) - > status ;
if ( unlikely ( err ) ) {
if ( net_ratelimit ( ) )
2014-06-04 13:30:42 +04:00
netdev_dbg ( queue - > vif - > dev ,
2014-04-04 18:45:24 +04:00
" Grant copy of header failed! status: %d pending_idx: %u ref: %u \n " ,
2014-04-02 21:04:58 +04:00
( * gopp_copy ) - > status ,
pending_idx ,
( * gopp_copy ) - > source . u . ref ) ;
2014-07-18 22:08:04 +04:00
/* The first frag might still have this slot mapped */
if ( ! sharedslot )
xenvif_idx_release ( queue , pending_idx ,
XEN_NETIF_RSP_ERROR ) ;
2014-04-02 21:04:58 +04:00
}
2014-07-18 22:08:05 +04:00
( * gopp_copy ) + + ;
2011-03-15 03:06:18 +03:00
2014-03-07 01:48:29 +04:00
check_frags :
2014-04-02 21:04:58 +04:00
for ( i = 0 ; i < nr_frags ; i + + , gop_map + + ) {
2011-03-15 03:06:18 +03:00
int j , newerr ;
2011-10-05 04:28:46 +04:00
pending_idx = frag_get_pending_idx ( & shinfo - > frags [ i ] ) ;
2011-03-15 03:06:18 +03:00
/* Check error status: if okay then remember grant handle. */
2014-04-02 21:04:58 +04:00
newerr = gop_map - > status ;
2013-04-22 06:20:42 +04:00
2011-03-15 03:06:18 +03:00
if ( likely ( ! newerr ) ) {
2014-06-04 13:30:42 +04:00
xenvif_grant_handle_set ( queue ,
2014-04-02 21:04:57 +04:00
pending_idx ,
gop_map - > handle ) ;
2011-03-15 03:06:18 +03:00
/* Had a previous error? Invalidate this fragment. */
2014-07-18 22:08:04 +04:00
if ( unlikely ( err ) ) {
2014-06-04 13:30:42 +04:00
xenvif_idx_unmap ( queue , pending_idx ) ;
2014-07-18 22:08:04 +04:00
/* If the mapping of the first frag was OK, but
* the header ' s copy failed , and they are
* sharing a slot , send an error
*/
if ( i = = 0 & & sharedslot )
xenvif_idx_release ( queue , pending_idx ,
XEN_NETIF_RSP_ERROR ) ;
else
xenvif_idx_release ( queue , pending_idx ,
XEN_NETIF_RSP_OKAY ) ;
}
2011-03-15 03:06:18 +03:00
continue ;
}
/* Error on this fragment: respond to client with an error. */
2014-04-02 21:04:58 +04:00
if ( net_ratelimit ( ) )
2014-06-04 13:30:42 +04:00
netdev_dbg ( queue - > vif - > dev ,
2014-04-04 18:45:24 +04:00
" Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u \n " ,
2014-04-02 21:04:58 +04:00
i ,
gop_map - > status ,
pending_idx ,
gop_map - > ref ) ;
2014-07-18 22:08:04 +04:00
2014-06-04 13:30:42 +04:00
xenvif_idx_release ( queue , pending_idx , XEN_NETIF_RSP_ERROR ) ;
2011-03-15 03:06:18 +03:00
/* Not the first error? Preceding frags already invalidated. */
if ( err )
continue ;
2014-07-18 22:08:04 +04:00
/* First error: if the header haven't shared a slot with the
* first frag , release it as well .
*/
if ( ! sharedslot )
xenvif_idx_release ( queue ,
XENVIF_TX_CB ( skb ) - > pending_idx ,
XEN_NETIF_RSP_OKAY ) ;
/* Invalidate preceding fragments of this skb. */
2014-04-02 21:04:58 +04:00
for ( j = 0 ; j < i ; j + + ) {
2011-11-18 09:42:05 +04:00
pending_idx = frag_get_pending_idx ( & shinfo - > frags [ j ] ) ;
2014-06-04 13:30:42 +04:00
xenvif_idx_unmap ( queue , pending_idx ) ;
2014-07-18 22:08:04 +04:00
xenvif_idx_release ( queue , pending_idx ,
XEN_NETIF_RSP_OKAY ) ;
2011-03-15 03:06:18 +03:00
}
2014-07-18 22:08:02 +04:00
/* And if we found the error while checking the frag_list, unmap
* the first skb ' s frags
*/
if ( first_shinfo ) {
for ( j = 0 ; j < first_shinfo - > nr_frags ; j + + ) {
pending_idx = frag_get_pending_idx ( & first_shinfo - > frags [ j ] ) ;
xenvif_idx_unmap ( queue , pending_idx ) ;
2014-07-18 22:08:04 +04:00
xenvif_idx_release ( queue , pending_idx ,
XEN_NETIF_RSP_OKAY ) ;
2014-07-18 22:08:02 +04:00
}
2011-03-15 03:06:18 +03:00
}
/* Remember the error: invalidate all subsequent fragments. */
err = newerr ;
}
2014-07-18 22:08:02 +04:00
if ( skb_has_frag_list ( skb ) & & ! first_shinfo ) {
first_shinfo = skb_shinfo ( skb ) ;
shinfo = skb_shinfo ( skb_shinfo ( skb ) - > frag_list ) ;
2014-03-07 01:48:29 +04:00
nr_frags = shinfo - > nr_frags ;
goto check_frags ;
}
2014-04-02 21:04:58 +04:00
* gopp_map = gop_map ;
2011-03-15 03:06:18 +03:00
return err ;
}
2014-06-04 13:30:42 +04:00
static void xenvif_fill_frags ( struct xenvif_queue * queue , struct sk_buff * skb )
2011-03-15 03:06:18 +03:00
{
struct skb_shared_info * shinfo = skb_shinfo ( skb ) ;
int nr_frags = shinfo - > nr_frags ;
int i ;
2014-03-07 01:48:26 +04:00
u16 prev_pending_idx = INVALID_PENDING_IDX ;
2011-03-15 03:06:18 +03:00
for ( i = 0 ; i < nr_frags ; i + + ) {
skb_frag_t * frag = shinfo - > frags + i ;
struct xen_netif_tx_request * txp ;
2011-10-05 04:28:46 +04:00
struct page * page ;
u16 pending_idx ;
2011-03-15 03:06:18 +03:00
2011-10-05 04:28:46 +04:00
pending_idx = frag_get_pending_idx ( frag ) ;
2011-03-15 03:06:18 +03:00
2014-03-07 01:48:26 +04:00
/* If this is not the first frag, chain it to the previous*/
2014-04-02 21:04:58 +04:00
if ( prev_pending_idx = = INVALID_PENDING_IDX )
2014-03-07 01:48:26 +04:00
skb_shinfo ( skb ) - > destructor_arg =
2014-06-04 13:30:42 +04:00
& callback_param ( queue , pending_idx ) ;
2014-04-02 21:04:58 +04:00
else
2014-06-04 13:30:42 +04:00
callback_param ( queue , prev_pending_idx ) . ctx =
& callback_param ( queue , pending_idx ) ;
2014-03-07 01:48:26 +04:00
2014-06-04 13:30:42 +04:00
callback_param ( queue , pending_idx ) . ctx = NULL ;
2014-03-07 01:48:26 +04:00
prev_pending_idx = pending_idx ;
2014-06-04 13:30:42 +04:00
txp = & queue - > pending_tx_info [ pending_idx ] . req ;
page = virt_to_page ( idx_to_kaddr ( queue , pending_idx ) ) ;
2011-10-05 04:28:46 +04:00
__skb_fill_page_desc ( skb , i , page , txp - > offset , txp - > size ) ;
2011-03-15 03:06:18 +03:00
skb - > len + = txp - > size ;
skb - > data_len + = txp - > size ;
skb - > truesize + = txp - > size ;
2014-03-07 01:48:26 +04:00
/* Take an extra reference to offset network stack's put_page */
2014-06-04 13:30:42 +04:00
get_page ( queue - > mmap_pages [ pending_idx ] ) ;
2011-03-15 03:06:18 +03:00
}
}
2014-06-04 13:30:42 +04:00
static int xenvif_get_extras ( struct xenvif_queue * queue ,
2016-03-10 15:30:27 +03:00
struct xen_netif_extra_info * extras ,
unsigned int * extra_count ,
int work_to_do )
2011-03-15 03:06:18 +03:00
{
struct xen_netif_extra_info extra ;
2014-06-04 13:30:42 +04:00
RING_IDX cons = queue - > tx . req_cons ;
2011-03-15 03:06:18 +03:00
do {
if ( unlikely ( work_to_do - - < = 0 ) ) {
2014-06-04 13:30:42 +04:00
netdev_err ( queue - > vif - > dev , " Missing extra info \n " ) ;
xenvif_fatal_tx_err ( queue - > vif ) ;
2011-03-15 03:06:18 +03:00
return - EBADR ;
}
2015-10-30 18:17:06 +03:00
RING_COPY_REQUEST ( & queue - > tx , cons , & extra ) ;
2016-03-10 15:30:27 +03:00
queue - > tx . req_cons = + + cons ;
( * extra_count ) + + ;
2011-03-15 03:06:18 +03:00
if ( unlikely ( ! extra . type | |
extra . type > = XEN_NETIF_EXTRA_TYPE_MAX ) ) {
2014-06-04 13:30:42 +04:00
netdev_err ( queue - > vif - > dev ,
2011-03-15 03:06:18 +03:00
" Invalid extra type: %d \n " , extra . type ) ;
2014-06-04 13:30:42 +04:00
xenvif_fatal_tx_err ( queue - > vif ) ;
2011-03-15 03:06:18 +03:00
return - EINVAL ;
}
memcpy ( & extras [ extra . type - 1 ] , & extra , sizeof ( extra ) ) ;
} while ( extra . flags & XEN_NETIF_EXTRA_FLAG_MORE ) ;
return work_to_do ;
}
2013-08-26 15:59:39 +04:00
static int xenvif_set_skb_gso ( struct xenvif * vif ,
struct sk_buff * skb ,
struct xen_netif_extra_info * gso )
2011-03-15 03:06:18 +03:00
{
if ( ! gso - > u . gso . size ) {
2013-02-07 03:41:35 +04:00
netdev_err ( vif - > dev , " GSO size must not be zero. \n " ) ;
2013-08-26 15:59:39 +04:00
xenvif_fatal_tx_err ( vif ) ;
2011-03-15 03:06:18 +03:00
return - EINVAL ;
}
2013-10-16 20:50:31 +04:00
switch ( gso - > u . gso . type ) {
case XEN_NETIF_GSO_TYPE_TCPV4 :
skb_shinfo ( skb ) - > gso_type = SKB_GSO_TCPV4 ;
break ;
case XEN_NETIF_GSO_TYPE_TCPV6 :
skb_shinfo ( skb ) - > gso_type = SKB_GSO_TCPV6 ;
break ;
default :
2013-02-07 03:41:35 +04:00
netdev_err ( vif - > dev , " Bad GSO type %d. \n " , gso - > u . gso . type ) ;
2013-08-26 15:59:39 +04:00
xenvif_fatal_tx_err ( vif ) ;
2011-03-15 03:06:18 +03:00
return - EINVAL ;
}
skb_shinfo ( skb ) - > gso_size = gso - > u . gso . size ;
2013-12-17 15:44:35 +04:00
/* gso_segs will be calculated later */
2011-03-15 03:06:18 +03:00
return 0 ;
}
2014-06-04 13:30:42 +04:00
static int checksum_setup ( struct xenvif_queue * queue , struct sk_buff * skb )
2013-10-16 20:50:29 +04:00
{
2014-01-09 14:02:47 +04:00
bool recalculate_partial_csum = false ;
2013-10-16 20:50:29 +04:00
/* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
* peers can fail to set NETRXF_csum_blank when sending a GSO
* frame . In this case force the SKB to CHECKSUM_PARTIAL and
* recalculate the partial checksum .
*/
if ( skb - > ip_summed ! = CHECKSUM_PARTIAL & & skb_is_gso ( skb ) ) {
2014-06-04 13:30:42 +04:00
queue - > stats . rx_gso_checksum_fixup + + ;
2013-10-16 20:50:29 +04:00
skb - > ip_summed = CHECKSUM_PARTIAL ;
2014-01-09 14:02:47 +04:00
recalculate_partial_csum = true ;
2013-10-16 20:50:29 +04:00
}
/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
if ( skb - > ip_summed ! = CHECKSUM_PARTIAL )
return 0 ;
2014-01-09 14:02:47 +04:00
return skb_checksum_setup ( skb , recalculate_partial_csum ) ;
2013-10-16 20:50:29 +04:00
}
2014-06-04 13:30:42 +04:00
static bool tx_credit_exceeded ( struct xenvif_queue * queue , unsigned size )
2011-03-15 03:06:18 +03:00
{
2013-10-28 16:07:57 +04:00
u64 now = get_jiffies_64 ( ) ;
2014-06-04 13:30:42 +04:00
u64 next_credit = queue - > credit_window_start +
msecs_to_jiffies ( queue - > credit_usec / 1000 ) ;
2011-03-15 03:06:18 +03:00
/* Timer could already be pending in rare cases. */
2017-06-21 12:21:22 +03:00
if ( timer_pending ( & queue - > credit_timeout ) ) {
queue - > rate_limited = true ;
2011-03-15 03:06:18 +03:00
return true ;
2017-06-21 12:21:22 +03:00
}
2011-03-15 03:06:18 +03:00
/* Passed the point where we can replenish credit? */
2013-10-28 16:07:57 +04:00
if ( time_after_eq64 ( now , next_credit ) ) {
2014-06-04 13:30:42 +04:00
queue - > credit_window_start = now ;
tx_add_credit ( queue ) ;
2011-03-15 03:06:18 +03:00
}
/* Still too big to send right now? Set a callback. */
2014-06-04 13:30:42 +04:00
if ( size > queue - > remaining_credit ) {
queue - > credit_timeout . data =
( unsigned long ) queue ;
mod_timer ( & queue - > credit_timeout ,
2011-03-15 03:06:18 +03:00
next_credit ) ;
2014-06-04 13:30:42 +04:00
queue - > credit_window_start = next_credit ;
2017-06-21 12:21:22 +03:00
queue - > rate_limited = true ;
2011-03-15 03:06:18 +03:00
return true ;
}
return false ;
}
2015-09-02 19:58:36 +03:00
/* No locking is required in xenvif_mcast_add/del() as they are
* only ever invoked from NAPI poll . An RCU list is used because
* xenvif_mcast_match ( ) is called asynchronously , during start_xmit .
*/
static int xenvif_mcast_add ( struct xenvif * vif , const u8 * addr )
{
struct xenvif_mcast_addr * mcast ;
if ( vif - > fe_mcast_count = = XEN_NETBK_MCAST_MAX ) {
if ( net_ratelimit ( ) )
netdev_err ( vif - > dev ,
" Too many multicast addresses \n " ) ;
return - ENOSPC ;
}
mcast = kzalloc ( sizeof ( * mcast ) , GFP_ATOMIC ) ;
if ( ! mcast )
return - ENOMEM ;
ether_addr_copy ( mcast - > addr , addr ) ;
list_add_tail_rcu ( & mcast - > entry , & vif - > fe_mcast_addr ) ;
vif - > fe_mcast_count + + ;
return 0 ;
}
static void xenvif_mcast_del ( struct xenvif * vif , const u8 * addr )
{
struct xenvif_mcast_addr * mcast ;
list_for_each_entry_rcu ( mcast , & vif - > fe_mcast_addr , entry ) {
if ( ether_addr_equal ( addr , mcast - > addr ) ) {
- - vif - > fe_mcast_count ;
list_del_rcu ( & mcast - > entry ) ;
kfree_rcu ( mcast , rcu ) ;
break ;
}
}
}
bool xenvif_mcast_match ( struct xenvif * vif , const u8 * addr )
{
struct xenvif_mcast_addr * mcast ;
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( mcast , & vif - > fe_mcast_addr , entry ) {
if ( ether_addr_equal ( addr , mcast - > addr ) ) {
rcu_read_unlock ( ) ;
return true ;
}
}
rcu_read_unlock ( ) ;
return false ;
}
void xenvif_mcast_addr_list_free ( struct xenvif * vif )
{
/* No need for locking or RCU here. NAPI poll and TX queue
* are stopped .
*/
while ( ! list_empty ( & vif - > fe_mcast_addr ) ) {
struct xenvif_mcast_addr * mcast ;
mcast = list_first_entry ( & vif - > fe_mcast_addr ,
struct xenvif_mcast_addr ,
entry ) ;
- - vif - > fe_mcast_count ;
list_del ( & mcast - > entry ) ;
kfree ( mcast ) ;
}
}
2014-06-04 13:30:42 +04:00
static void xenvif_tx_build_gops ( struct xenvif_queue * queue ,
2014-04-02 21:04:58 +04:00
int budget ,
unsigned * copy_ops ,
unsigned * map_ops )
2011-03-15 03:06:18 +03:00
{
2015-08-03 17:38:03 +03:00
struct gnttab_map_grant_ref * gop = queue - > tx_map_ops ;
struct sk_buff * skb , * nskb ;
2011-03-15 03:06:18 +03:00
int ret ;
2015-08-03 17:38:03 +03:00
unsigned int frag_overflow ;
2011-03-15 03:06:18 +03:00
2014-06-04 13:30:42 +04:00
while ( skb_queue_len ( & queue - > tx_queue ) < budget ) {
2011-03-15 03:06:18 +03:00
struct xen_netif_tx_request txreq ;
2013-05-02 04:43:59 +04:00
struct xen_netif_tx_request txfrags [ XEN_NETBK_LEGACY_SLOTS_MAX ] ;
2011-03-15 03:06:18 +03:00
struct xen_netif_extra_info extras [ XEN_NETIF_EXTRA_TYPE_MAX - 1 ] ;
2016-03-10 15:30:27 +03:00
unsigned int extra_count ;
2011-03-15 03:06:18 +03:00
u16 pending_idx ;
RING_IDX idx ;
int work_to_do ;
unsigned int data_len ;
pending_ring_idx_t index ;
2014-06-04 13:30:42 +04:00
if ( queue - > tx . sring - > req_prod - queue - > tx . req_cons >
2013-02-07 03:41:35 +04:00
XEN_NETIF_TX_RING_SIZE ) {
2014-06-04 13:30:42 +04:00
netdev_err ( queue - > vif - > dev ,
2013-02-07 03:41:35 +04:00
" Impossible number of requests. "
" req_prod %d, req_cons %d, size %ld \n " ,
2014-06-04 13:30:42 +04:00
queue - > tx . sring - > req_prod , queue - > tx . req_cons ,
2013-02-07 03:41:35 +04:00
XEN_NETIF_TX_RING_SIZE ) ;
2014-06-04 13:30:42 +04:00
xenvif_fatal_tx_err ( queue - > vif ) ;
2014-04-01 15:46:12 +04:00
break ;
2013-02-07 03:41:35 +04:00
}
2014-06-04 13:30:42 +04:00
work_to_do = RING_HAS_UNCONSUMED_REQUESTS ( & queue - > tx ) ;
2013-08-26 15:59:38 +04:00
if ( ! work_to_do )
break ;
2011-03-15 03:06:18 +03:00
2014-06-04 13:30:42 +04:00
idx = queue - > tx . req_cons ;
2011-03-15 03:06:18 +03:00
rmb ( ) ; /* Ensure that we see the request before we copy it. */
2015-10-30 18:17:06 +03:00
RING_COPY_REQUEST ( & queue - > tx , idx , & txreq ) ;
2011-03-15 03:06:18 +03:00
/* Credit-based scheduling. */
2014-06-04 13:30:42 +04:00
if ( txreq . size > queue - > remaining_credit & &
tx_credit_exceeded ( queue , txreq . size ) )
2013-08-26 15:59:38 +04:00
break ;
2011-03-15 03:06:18 +03:00
2014-06-04 13:30:42 +04:00
queue - > remaining_credit - = txreq . size ;
2011-03-15 03:06:18 +03:00
work_to_do - - ;
2014-06-04 13:30:42 +04:00
queue - > tx . req_cons = + + idx ;
2011-03-15 03:06:18 +03:00
memset ( extras , 0 , sizeof ( extras ) ) ;
2016-03-10 15:30:27 +03:00
extra_count = 0 ;
2011-03-15 03:06:18 +03:00
if ( txreq . flags & XEN_NETTXF_extra_info ) {
2014-06-04 13:30:42 +04:00
work_to_do = xenvif_get_extras ( queue , extras ,
2016-03-10 15:30:27 +03:00
& extra_count ,
2013-08-26 15:59:39 +04:00
work_to_do ) ;
2014-06-04 13:30:42 +04:00
idx = queue - > tx . req_cons ;
2013-02-07 03:41:35 +04:00
if ( unlikely ( work_to_do < 0 ) )
2013-08-26 15:59:38 +04:00
break ;
2011-03-15 03:06:18 +03:00
}
2015-09-02 19:58:36 +03:00
if ( extras [ XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1 ] . type ) {
struct xen_netif_extra_info * extra ;
extra = & extras [ XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1 ] ;
ret = xenvif_mcast_add ( queue - > vif , extra - > u . mcast . addr ) ;
2016-03-10 15:30:27 +03:00
make_tx_response ( queue , & txreq , extra_count ,
2015-09-02 19:58:36 +03:00
( ret = = 0 ) ?
XEN_NETIF_RSP_OKAY :
XEN_NETIF_RSP_ERROR ) ;
push_tx_responses ( queue ) ;
continue ;
}
if ( extras [ XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1 ] . type ) {
struct xen_netif_extra_info * extra ;
extra = & extras [ XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1 ] ;
xenvif_mcast_del ( queue - > vif , extra - > u . mcast . addr ) ;
2016-03-10 15:30:27 +03:00
make_tx_response ( queue , & txreq , extra_count ,
XEN_NETIF_RSP_OKAY ) ;
2015-09-02 19:58:36 +03:00
push_tx_responses ( queue ) ;
continue ;
}
2016-03-10 15:30:27 +03:00
ret = xenvif_count_requests ( queue , & txreq , extra_count ,
txfrags , work_to_do ) ;
2013-02-07 03:41:35 +04:00
if ( unlikely ( ret < 0 ) )
2013-08-26 15:59:38 +04:00
break ;
2013-02-07 03:41:35 +04:00
2011-03-15 03:06:18 +03:00
idx + = ret ;
if ( unlikely ( txreq . size < ETH_HLEN ) ) {
2014-06-04 13:30:42 +04:00
netdev_dbg ( queue - > vif - > dev ,
2011-03-15 03:06:18 +03:00
" Bad packet size: %d \n " , txreq . size ) ;
2016-03-10 15:30:27 +03:00
xenvif_tx_err ( queue , & txreq , extra_count , idx ) ;
2013-08-26 15:59:38 +04:00
break ;
2011-03-15 03:06:18 +03:00
}
/* No crossing a page as the payload mustn't fragment. */
2015-05-05 15:15:29 +03:00
if ( unlikely ( ( txreq . offset + txreq . size ) > XEN_PAGE_SIZE ) ) {
2014-06-04 13:30:42 +04:00
netdev_err ( queue - > vif - > dev ,
2015-06-16 22:10:48 +03:00
" txreq.offset: %u, size: %u, end: %lu \n " ,
2011-03-15 03:06:18 +03:00
txreq . offset , txreq . size ,
2015-05-05 15:15:29 +03:00
( unsigned long ) ( txreq . offset & ~ XEN_PAGE_MASK ) + txreq . size ) ;
2014-06-04 13:30:42 +04:00
xenvif_fatal_tx_err ( queue - > vif ) ;
2013-08-26 15:59:38 +04:00
break ;
2011-03-15 03:06:18 +03:00
}
2014-06-04 13:30:42 +04:00
index = pending_index ( queue - > pending_cons ) ;
pending_idx = queue - > pending_ring [ index ] ;
2011-03-15 03:06:18 +03:00
2014-11-05 13:50:22 +03:00
data_len = ( txreq . size > XEN_NETBACK_TX_COPY_LEN & &
2013-05-02 04:43:59 +04:00
ret < XEN_NETBK_LEGACY_SLOTS_MAX ) ?
2014-11-05 13:50:22 +03:00
XEN_NETBACK_TX_COPY_LEN : txreq . size ;
2011-03-15 03:06:18 +03:00
2014-03-07 01:48:29 +04:00
skb = xenvif_alloc_skb ( data_len ) ;
2011-03-15 03:06:18 +03:00
if ( unlikely ( skb = = NULL ) ) {
2014-06-04 13:30:42 +04:00
netdev_dbg ( queue - > vif - > dev ,
2011-03-15 03:06:18 +03:00
" Can't allocate a skb in start_xmit. \n " ) ;
2016-03-10 15:30:27 +03:00
xenvif_tx_err ( queue , & txreq , extra_count , idx ) ;
2011-03-15 03:06:18 +03:00
break ;
}
2015-08-03 17:38:03 +03:00
skb_shinfo ( skb ) - > nr_frags = ret ;
if ( data_len < txreq . size )
skb_shinfo ( skb ) - > nr_frags + + ;
/* At this point shinfo->nr_frags is in fact the number of
* slots , which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX .
*/
frag_overflow = 0 ;
nskb = NULL ;
if ( skb_shinfo ( skb ) - > nr_frags > MAX_SKB_FRAGS ) {
frag_overflow = skb_shinfo ( skb ) - > nr_frags - MAX_SKB_FRAGS ;
BUG_ON ( frag_overflow > MAX_SKB_FRAGS ) ;
skb_shinfo ( skb ) - > nr_frags = MAX_SKB_FRAGS ;
nskb = xenvif_alloc_skb ( 0 ) ;
if ( unlikely ( nskb = = NULL ) ) {
kfree_skb ( skb ) ;
2016-03-10 15:30:27 +03:00
xenvif_tx_err ( queue , & txreq , extra_count , idx ) ;
2015-08-03 17:38:03 +03:00
if ( net_ratelimit ( ) )
netdev_err ( queue - > vif - > dev ,
" Can't allocate the frag_list skb. \n " ) ;
break ;
}
}
2011-03-15 03:06:18 +03:00
if ( extras [ XEN_NETIF_EXTRA_TYPE_GSO - 1 ] . type ) {
struct xen_netif_extra_info * gso ;
gso = & extras [ XEN_NETIF_EXTRA_TYPE_GSO - 1 ] ;
2014-06-04 13:30:42 +04:00
if ( xenvif_set_skb_gso ( queue - > vif , skb , gso ) ) {
2013-08-26 15:59:39 +04:00
/* Failure in xenvif_set_skb_gso is fatal. */
2011-03-15 03:06:18 +03:00
kfree_skb ( skb ) ;
2015-08-03 17:38:03 +03:00
kfree_skb ( nskb ) ;
2013-08-26 15:59:38 +04:00
break ;
2011-03-15 03:06:18 +03:00
}
}
2016-05-13 11:37:29 +03:00
if ( extras [ XEN_NETIF_EXTRA_TYPE_HASH - 1 ] . type ) {
struct xen_netif_extra_info * extra ;
enum pkt_hash_types type = PKT_HASH_TYPE_NONE ;
extra = & extras [ XEN_NETIF_EXTRA_TYPE_HASH - 1 ] ;
switch ( extra - > u . hash . type ) {
case _XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
case _XEN_NETIF_CTRL_HASH_TYPE_IPV6 :
type = PKT_HASH_TYPE_L3 ;
break ;
case _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
case _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP :
type = PKT_HASH_TYPE_L4 ;
break ;
default :
break ;
}
if ( type ! = PKT_HASH_TYPE_NONE )
skb_set_hash ( skb ,
* ( u32 * ) extra - > u . hash . value ,
type ) ;
}
2014-03-07 01:48:23 +04:00
XENVIF_TX_CB ( skb ) - > pending_idx = pending_idx ;
2011-03-15 03:06:18 +03:00
__skb_put ( skb , data_len ) ;
2014-06-04 13:30:42 +04:00
queue - > tx_copy_ops [ * copy_ops ] . source . u . ref = txreq . gref ;
queue - > tx_copy_ops [ * copy_ops ] . source . domid = queue - > vif - > domid ;
queue - > tx_copy_ops [ * copy_ops ] . source . offset = txreq . offset ;
2014-04-02 21:04:58 +04:00
2014-06-04 13:30:42 +04:00
queue - > tx_copy_ops [ * copy_ops ] . dest . u . gmfn =
2015-08-07 19:34:37 +03:00
virt_to_gfn ( skb - > data ) ;
2014-06-04 13:30:42 +04:00
queue - > tx_copy_ops [ * copy_ops ] . dest . domid = DOMID_SELF ;
queue - > tx_copy_ops [ * copy_ops ] . dest . offset =
2015-05-05 15:15:29 +03:00
offset_in_page ( skb - > data ) & ~ XEN_PAGE_MASK ;
2014-04-02 21:04:58 +04:00
2014-06-04 13:30:42 +04:00
queue - > tx_copy_ops [ * copy_ops ] . len = data_len ;
queue - > tx_copy_ops [ * copy_ops ] . flags = GNTCOPY_source_gref ;
2014-04-02 21:04:58 +04:00
( * copy_ops ) + + ;
2011-03-15 03:06:18 +03:00
if ( data_len < txreq . size ) {
2011-10-05 04:28:46 +04:00
frag_set_pending_idx ( & skb_shinfo ( skb ) - > frags [ 0 ] ,
pending_idx ) ;
2016-03-10 15:30:27 +03:00
xenvif_tx_create_map_op ( queue , pending_idx , & txreq ,
extra_count , gop ) ;
2014-04-02 21:04:58 +04:00
gop + + ;
2011-03-15 03:06:18 +03:00
} else {
2011-10-05 04:28:46 +04:00
frag_set_pending_idx ( & skb_shinfo ( skb ) - > frags [ 0 ] ,
INVALID_PENDING_IDX ) ;
2016-03-10 15:30:27 +03:00
memcpy ( & queue - > pending_tx_info [ pending_idx ] . req ,
& txreq , sizeof ( txreq ) ) ;
queue - > pending_tx_info [ pending_idx ] . extra_count =
extra_count ;
2011-03-15 03:06:18 +03:00
}
2014-06-04 13:30:42 +04:00
queue - > pending_cons + + ;
2011-03-15 03:06:18 +03:00
2015-08-03 17:38:03 +03:00
gop = xenvif_get_requests ( queue , skb , txfrags , gop ,
frag_overflow , nskb ) ;
2011-03-15 03:06:18 +03:00
2014-06-04 13:30:42 +04:00
__skb_queue_tail ( & queue - > tx_queue , skb ) ;
2012-06-27 04:46:58 +04:00
2014-06-04 13:30:42 +04:00
queue - > tx . req_cons = idx ;
2011-03-15 03:06:18 +03:00
2014-06-04 13:30:42 +04:00
if ( ( ( gop - queue - > tx_map_ops ) > = ARRAY_SIZE ( queue - > tx_map_ops ) ) | |
( * copy_ops > = ARRAY_SIZE ( queue - > tx_copy_ops ) ) )
2011-03-15 03:06:18 +03:00
break ;
}
2014-06-04 13:30:42 +04:00
( * map_ops ) = gop - queue - > tx_map_ops ;
2014-04-02 21:04:58 +04:00
return ;
2011-03-15 03:06:18 +03:00
}
2014-03-07 01:48:29 +04:00
/* Consolidate skb with a frag_list into a brand new one with local pages on
* frags . Returns 0 or - ENOMEM if can ' t allocate new pages .
*/
2014-06-04 13:30:42 +04:00
static int xenvif_handle_frag_list ( struct xenvif_queue * queue , struct sk_buff * skb )
2014-03-07 01:48:29 +04:00
{
unsigned int offset = skb_headlen ( skb ) ;
skb_frag_t frags [ MAX_SKB_FRAGS ] ;
2015-03-04 14:14:47 +03:00
int i , f ;
2014-03-07 01:48:29 +04:00
struct ubuf_info * uarg ;
struct sk_buff * nskb = skb_shinfo ( skb ) - > frag_list ;
2014-06-04 13:30:42 +04:00
queue - > stats . tx_zerocopy_sent + = 2 ;
queue - > stats . tx_frag_overflow + + ;
2014-03-07 01:48:29 +04:00
2014-06-04 13:30:42 +04:00
xenvif_fill_frags ( queue , nskb ) ;
2014-03-07 01:48:29 +04:00
/* Subtract frags size, we will correct it later */
skb - > truesize - = skb - > data_len ;
skb - > len + = nskb - > len ;
skb - > data_len + = nskb - > len ;
/* create a brand new frags array and coalesce there */
for ( i = 0 ; offset < skb - > len ; i + + ) {
struct page * page ;
unsigned int len ;
BUG_ON ( i > = MAX_SKB_FRAGS ) ;
2014-10-28 18:29:31 +03:00
page = alloc_page ( GFP_ATOMIC ) ;
2014-03-07 01:48:29 +04:00
if ( ! page ) {
int j ;
skb - > truesize + = skb - > data_len ;
for ( j = 0 ; j < i ; j + + )
put_page ( frags [ j ] . page . p ) ;
return - ENOMEM ;
}
if ( offset + PAGE_SIZE < skb - > len )
len = PAGE_SIZE ;
else
len = skb - > len - offset ;
if ( skb_copy_bits ( skb , offset , page_address ( page ) , len ) )
BUG ( ) ;
offset + = len ;
frags [ i ] . page . p = page ;
frags [ i ] . page_offset = 0 ;
skb_frag_size_set ( & frags [ i ] , len ) ;
}
2015-03-04 14:14:47 +03:00
2015-03-04 14:14:48 +03:00
/* Copied all the bits from the frag list -- free it. */
skb_frag_list_init ( skb ) ;
xenvif_skb_zerocopy_prepare ( queue , nskb ) ;
kfree_skb ( nskb ) ;
2015-03-04 14:14:47 +03:00
/* Release all the original (foreign) frags. */
for ( f = 0 ; f < skb_shinfo ( skb ) - > nr_frags ; f + + )
skb_frag_unref ( skb , f ) ;
2014-03-07 01:48:29 +04:00
uarg = skb_shinfo ( skb ) - > destructor_arg ;
2014-08-12 14:48:07 +04:00
/* increase inflight counter to offset decrement in callback */
atomic_inc ( & queue - > inflight_packets ) ;
2014-03-07 01:48:29 +04:00
uarg - > callback ( uarg , true ) ;
skb_shinfo ( skb ) - > destructor_arg = NULL ;
2015-03-04 14:14:48 +03:00
/* Fill the skb with the new (local) frags. */
memcpy ( skb_shinfo ( skb ) - > frags , frags , i * sizeof ( skb_frag_t ) ) ;
skb_shinfo ( skb ) - > nr_frags = i ;
skb - > truesize + = i * PAGE_SIZE ;
2014-03-07 01:48:29 +04:00
return 0 ;
}
2013-08-26 15:59:38 +04:00
2014-06-04 13:30:42 +04:00
static int xenvif_tx_submit ( struct xenvif_queue * queue )
2011-03-15 03:06:18 +03:00
{
2014-06-04 13:30:42 +04:00
struct gnttab_map_grant_ref * gop_map = queue - > tx_map_ops ;
struct gnttab_copy * gop_copy = queue - > tx_copy_ops ;
2011-03-15 03:06:18 +03:00
struct sk_buff * skb ;
2013-08-26 15:59:38 +04:00
int work_done = 0 ;
2011-03-15 03:06:18 +03:00
2014-06-04 13:30:42 +04:00
while ( ( skb = __skb_dequeue ( & queue - > tx_queue ) ) ! = NULL ) {
2011-03-15 03:06:18 +03:00
struct xen_netif_tx_request * txp ;
u16 pending_idx ;
unsigned data_len ;
2014-03-07 01:48:23 +04:00
pending_idx = XENVIF_TX_CB ( skb ) - > pending_idx ;
2014-06-04 13:30:42 +04:00
txp = & queue - > pending_tx_info [ pending_idx ] . req ;
2011-03-15 03:06:18 +03:00
/* Check the remap error code. */
2014-06-04 13:30:42 +04:00
if ( unlikely ( xenvif_tx_check_gop ( queue , skb , & gop_map , & gop_copy ) ) ) {
2014-07-18 22:08:03 +04:00
/* If there was an error, xenvif_tx_check_gop is
* expected to release all the frags which were mapped ,
* so kfree_skb shouldn ' t do it again
*/
2011-03-15 03:06:18 +03:00
skb_shinfo ( skb ) - > nr_frags = 0 ;
2014-07-18 22:08:03 +04:00
if ( skb_has_frag_list ( skb ) ) {
struct sk_buff * nskb =
skb_shinfo ( skb ) - > frag_list ;
skb_shinfo ( nskb ) - > nr_frags = 0 ;
}
2011-03-15 03:06:18 +03:00
kfree_skb ( skb ) ;
continue ;
}
data_len = skb - > len ;
2014-06-04 13:30:42 +04:00
callback_param ( queue , pending_idx ) . ctx = NULL ;
2011-03-15 03:06:18 +03:00
if ( data_len < txp - > size ) {
/* Append the packet payload as a fragment. */
txp - > offset + = data_len ;
txp - > size - = data_len ;
} else {
/* Schedule a response immediately. */
2014-06-04 13:30:42 +04:00
xenvif_idx_release ( queue , pending_idx ,
2014-04-02 21:04:58 +04:00
XEN_NETIF_RSP_OKAY ) ;
2011-03-15 03:06:18 +03:00
}
if ( txp - > flags & XEN_NETTXF_csum_blank )
skb - > ip_summed = CHECKSUM_PARTIAL ;
else if ( txp - > flags & XEN_NETTXF_data_validated )
skb - > ip_summed = CHECKSUM_UNNECESSARY ;
2014-06-04 13:30:42 +04:00
xenvif_fill_frags ( queue , skb ) ;
2011-03-15 03:06:18 +03:00
2014-03-07 01:48:29 +04:00
if ( unlikely ( skb_has_frag_list ( skb ) ) ) {
2014-06-04 13:30:42 +04:00
if ( xenvif_handle_frag_list ( queue , skb ) ) {
2014-03-07 01:48:29 +04:00
if ( net_ratelimit ( ) )
2014-06-04 13:30:42 +04:00
netdev_err ( queue - > vif - > dev ,
2014-03-07 01:48:29 +04:00
" Not enough memory to consolidate frag_list! \n " ) ;
2014-08-12 14:48:07 +04:00
xenvif_skb_zerocopy_prepare ( queue , skb ) ;
2014-03-07 01:48:29 +04:00
kfree_skb ( skb ) ;
continue ;
}
}
2014-06-04 13:30:42 +04:00
skb - > dev = queue - > vif - > dev ;
2011-03-15 03:06:18 +03:00
skb - > protocol = eth_type_trans ( skb , skb - > dev ) ;
2013-03-26 00:19:58 +04:00
skb_reset_network_header ( skb ) ;
2011-03-15 03:06:18 +03:00
2014-06-04 13:30:42 +04:00
if ( checksum_setup ( queue , skb ) ) {
netdev_dbg ( queue - > vif - > dev ,
2011-03-15 03:06:18 +03:00
" Can't setup checksum in net_tx_action \n " ) ;
2014-03-07 01:48:26 +04:00
/* We have to set this flag to trigger the callback */
if ( skb_shinfo ( skb ) - > destructor_arg )
2014-08-12 14:48:07 +04:00
xenvif_skb_zerocopy_prepare ( queue , skb ) ;
2011-03-15 03:06:18 +03:00
kfree_skb ( skb ) ;
continue ;
}
2013-03-27 03:11:22 +04:00
skb_probe_transport_header ( skb , 0 ) ;
2013-03-26 00:19:58 +04:00
2013-12-17 15:44:35 +04:00
/* If the packet is GSO then we will have just set up the
* transport header offset in checksum_setup so it ' s now
* straightforward to calculate gso_segs .
*/
if ( skb_is_gso ( skb ) ) {
int mss = skb_shinfo ( skb ) - > gso_size ;
int hdrlen = skb_transport_header ( skb ) -
skb_mac_header ( skb ) +
tcp_hdrlen ( skb ) ;
skb_shinfo ( skb ) - > gso_segs =
DIV_ROUND_UP ( skb - > len - hdrlen , mss ) ;
}
2014-06-04 13:30:42 +04:00
queue - > stats . rx_bytes + = skb - > len ;
queue - > stats . rx_packets + + ;
2011-03-15 03:06:18 +03:00
2013-08-26 15:59:38 +04:00
work_done + + ;
2014-03-07 01:48:26 +04:00
/* Set this flag right before netif_receive_skb, otherwise
* someone might think this packet already left netback , and
* do a skb_copy_ubufs while we are still in control of the
* skb . E . g . the __pskb_pull_tail earlier can do such thing .
*/
2014-03-07 01:48:28 +04:00
if ( skb_shinfo ( skb ) - > destructor_arg ) {
2014-08-12 14:48:07 +04:00
xenvif_skb_zerocopy_prepare ( queue , skb ) ;
2014-06-04 13:30:42 +04:00
queue - > stats . tx_zerocopy_sent + + ;
2014-03-07 01:48:28 +04:00
}
2014-03-07 01:48:26 +04:00
2013-08-26 15:59:38 +04:00
netif_receive_skb ( skb ) ;
2011-03-15 03:06:18 +03:00
}
2013-08-26 15:59:38 +04:00
return work_done ;
2011-03-15 03:06:18 +03:00
}
2014-03-07 01:48:25 +04:00
void xenvif_zerocopy_callback ( struct ubuf_info * ubuf , bool zerocopy_success )
{
2014-03-07 01:48:26 +04:00
unsigned long flags ;
pending_ring_idx_t index ;
2014-06-04 13:30:42 +04:00
struct xenvif_queue * queue = ubuf_to_queue ( ubuf ) ;
2014-03-07 01:48:26 +04:00
/* This is the only place where we grab this lock, to protect callbacks
* from each other .
*/
2014-06-04 13:30:42 +04:00
spin_lock_irqsave ( & queue - > callback_lock , flags ) ;
2014-03-07 01:48:26 +04:00
do {
u16 pending_idx = ubuf - > desc ;
ubuf = ( struct ubuf_info * ) ubuf - > ctx ;
2014-06-04 13:30:42 +04:00
BUG_ON ( queue - > dealloc_prod - queue - > dealloc_cons > =
2014-03-07 01:48:26 +04:00
MAX_PENDING_REQS ) ;
2014-06-04 13:30:42 +04:00
index = pending_index ( queue - > dealloc_prod ) ;
queue - > dealloc_ring [ index ] = pending_idx ;
2014-03-07 01:48:26 +04:00
/* Sync with xenvif_tx_dealloc_action:
* insert idx then incr producer .
*/
smp_wmb ( ) ;
2014-06-04 13:30:42 +04:00
queue - > dealloc_prod + + ;
2014-03-07 01:48:26 +04:00
} while ( ubuf ) ;
2014-06-04 13:30:42 +04:00
spin_unlock_irqrestore ( & queue - > callback_lock , flags ) ;
2014-03-07 01:48:26 +04:00
2014-03-07 01:48:28 +04:00
if ( likely ( zerocopy_success ) )
2014-06-04 13:30:42 +04:00
queue - > stats . tx_zerocopy_success + + ;
2014-03-07 01:48:28 +04:00
else
2014-06-04 13:30:42 +04:00
queue - > stats . tx_zerocopy_fail + + ;
2014-08-12 14:48:07 +04:00
xenvif_skb_zerocopy_complete ( queue ) ;
2014-03-07 01:48:26 +04:00
}
2014-06-04 13:30:42 +04:00
static inline void xenvif_tx_dealloc_action ( struct xenvif_queue * queue )
2014-03-07 01:48:26 +04:00
{
struct gnttab_unmap_grant_ref * gop ;
pending_ring_idx_t dc , dp ;
u16 pending_idx , pending_idx_release [ MAX_PENDING_REQS ] ;
unsigned int i = 0 ;
2014-06-04 13:30:42 +04:00
dc = queue - > dealloc_cons ;
gop = queue - > tx_unmap_ops ;
2014-03-07 01:48:26 +04:00
/* Free up any grants we have finished using */
do {
2014-06-04 13:30:42 +04:00
dp = queue - > dealloc_prod ;
2014-03-07 01:48:26 +04:00
/* Ensure we see all indices enqueued by all
* xenvif_zerocopy_callback ( ) .
*/
smp_rmb ( ) ;
while ( dc ! = dp ) {
2015-07-12 01:20:55 +03:00
BUG_ON ( gop - queue - > tx_unmap_ops > = MAX_PENDING_REQS ) ;
2014-03-07 01:48:26 +04:00
pending_idx =
2014-06-04 13:30:42 +04:00
queue - > dealloc_ring [ pending_index ( dc + + ) ] ;
2014-03-07 01:48:26 +04:00
2015-07-12 01:20:55 +03:00
pending_idx_release [ gop - queue - > tx_unmap_ops ] =
2014-03-07 01:48:26 +04:00
pending_idx ;
2015-07-12 01:20:55 +03:00
queue - > pages_to_unmap [ gop - queue - > tx_unmap_ops ] =
2014-06-04 13:30:42 +04:00
queue - > mmap_pages [ pending_idx ] ;
2014-03-07 01:48:26 +04:00
gnttab_set_unmap_op ( gop ,
2014-06-04 13:30:42 +04:00
idx_to_kaddr ( queue , pending_idx ) ,
2014-03-07 01:48:26 +04:00
GNTMAP_host_map ,
2014-06-04 13:30:42 +04:00
queue - > grant_tx_handle [ pending_idx ] ) ;
xenvif_grant_handle_reset ( queue , pending_idx ) ;
2014-03-07 01:48:26 +04:00
+ + gop ;
}
2014-06-04 13:30:42 +04:00
} while ( dp ! = queue - > dealloc_prod ) ;
2014-03-07 01:48:26 +04:00
2014-06-04 13:30:42 +04:00
queue - > dealloc_cons = dc ;
2014-03-07 01:48:26 +04:00
2014-06-04 13:30:42 +04:00
if ( gop - queue - > tx_unmap_ops > 0 ) {
2014-03-07 01:48:26 +04:00
int ret ;
2014-06-04 13:30:42 +04:00
ret = gnttab_unmap_refs ( queue - > tx_unmap_ops ,
2014-03-07 01:48:26 +04:00
NULL ,
2014-06-04 13:30:42 +04:00
queue - > pages_to_unmap ,
gop - queue - > tx_unmap_ops ) ;
2014-03-07 01:48:26 +04:00
if ( ret ) {
2015-06-16 22:10:48 +03:00
netdev_err ( queue - > vif - > dev , " Unmap fail: nr_ops %tu ret %d \n " ,
2014-06-04 13:30:42 +04:00
gop - queue - > tx_unmap_ops , ret ) ;
for ( i = 0 ; i < gop - queue - > tx_unmap_ops ; + + i ) {
2014-03-07 01:48:26 +04:00
if ( gop [ i ] . status ! = GNTST_okay )
2014-06-04 13:30:42 +04:00
netdev_err ( queue - > vif - > dev ,
2015-06-16 22:10:48 +03:00
" host_addr: 0x%llx handle: 0x%x status: %d \n " ,
2014-03-07 01:48:26 +04:00
gop [ i ] . host_addr ,
gop [ i ] . handle ,
gop [ i ] . status ) ;
}
BUG ( ) ;
}
}
2014-06-04 13:30:42 +04:00
for ( i = 0 ; i < gop - queue - > tx_unmap_ops ; + + i )
xenvif_idx_release ( queue , pending_idx_release [ i ] ,
2014-03-07 01:48:26 +04:00
XEN_NETIF_RSP_OKAY ) ;
2014-03-07 01:48:25 +04:00
}
2014-03-07 01:48:26 +04:00
2011-03-15 03:06:18 +03:00
/* Called after netfront has transmitted */
2014-06-04 13:30:42 +04:00
int xenvif_tx_action ( struct xenvif_queue * queue , int budget )
2011-03-15 03:06:18 +03:00
{
2014-04-02 21:04:58 +04:00
unsigned nr_mops , nr_cops = 0 ;
2014-03-07 01:48:26 +04:00
int work_done , ret ;
2011-03-15 03:06:18 +03:00
2014-06-04 13:30:42 +04:00
if ( unlikely ( ! tx_work_todo ( queue ) ) )
2013-08-26 15:59:38 +04:00
return 0 ;
2014-06-04 13:30:42 +04:00
xenvif_tx_build_gops ( queue , budget , & nr_cops , & nr_mops ) ;
2011-03-15 03:06:18 +03:00
2014-04-02 21:04:58 +04:00
if ( nr_cops = = 0 )
2013-08-26 15:59:38 +04:00
return 0 ;
2014-06-04 13:30:42 +04:00
gnttab_batch_copy ( queue - > tx_copy_ops , nr_cops ) ;
2014-04-02 21:04:58 +04:00
if ( nr_mops ! = 0 ) {
2014-06-04 13:30:42 +04:00
ret = gnttab_map_refs ( queue - > tx_map_ops ,
2014-04-02 21:04:58 +04:00
NULL ,
2014-06-04 13:30:42 +04:00
queue - > pages_to_map ,
2014-04-02 21:04:58 +04:00
nr_mops ) ;
BUG_ON ( ret ) ;
}
2011-03-15 03:06:18 +03:00
2014-06-04 13:30:42 +04:00
work_done = xenvif_tx_submit ( queue ) ;
2011-03-15 03:06:18 +03:00
2013-08-26 15:59:38 +04:00
return work_done ;
2011-03-15 03:06:18 +03:00
}
2014-06-04 13:30:42 +04:00
static void xenvif_idx_release ( struct xenvif_queue * queue , u16 pending_idx ,
2013-08-26 15:59:39 +04:00
u8 status )
2011-03-15 03:06:18 +03:00
{
struct pending_tx_info * pending_tx_info ;
2014-03-07 01:48:26 +04:00
pending_ring_idx_t index ;
unsigned long flags ;
2013-04-22 06:20:42 +04:00
2014-06-04 13:30:42 +04:00
pending_tx_info = & queue - > pending_tx_info [ pending_idx ] ;
2015-02-24 14:17:59 +03:00
2014-06-04 13:30:42 +04:00
spin_lock_irqsave ( & queue - > response_lock , flags ) ;
2015-02-24 14:17:59 +03:00
2016-03-10 15:30:27 +03:00
make_tx_response ( queue , & pending_tx_info - > req ,
pending_tx_info - > extra_count , status ) ;
2015-02-24 14:17:59 +03:00
/* Release the pending index before pusing the Tx response so
* its available before a new Tx request is pushed by the
* frontend .
*/
index = pending_index ( queue - > pending_prod + + ) ;
2014-06-04 13:30:42 +04:00
queue - > pending_ring [ index ] = pending_idx ;
2015-02-24 14:17:59 +03:00
2015-03-11 18:27:59 +03:00
push_tx_responses ( queue ) ;
2015-02-24 14:17:59 +03:00
2014-06-04 13:30:42 +04:00
spin_unlock_irqrestore ( & queue - > response_lock , flags ) ;
2011-03-15 03:06:18 +03:00
}
2013-04-22 06:20:42 +04:00
2014-06-04 13:30:42 +04:00
static void make_tx_response ( struct xenvif_queue * queue ,
2011-03-15 03:06:18 +03:00
struct xen_netif_tx_request * txp ,
2016-03-10 15:30:27 +03:00
unsigned int extra_count ,
2011-03-15 03:06:18 +03:00
s8 st )
{
2014-06-04 13:30:42 +04:00
RING_IDX i = queue - > tx . rsp_prod_pvt ;
2011-03-15 03:06:18 +03:00
struct xen_netif_tx_response * resp ;
2014-06-04 13:30:42 +04:00
resp = RING_GET_RESPONSE ( & queue - > tx , i ) ;
2011-03-15 03:06:18 +03:00
resp - > id = txp - > id ;
resp - > status = st ;
2016-03-10 15:30:27 +03:00
while ( extra_count - - ! = 0 )
2014-06-04 13:30:42 +04:00
RING_GET_RESPONSE ( & queue - > tx , + + i ) - > status = XEN_NETIF_RSP_NULL ;
2011-03-15 03:06:18 +03:00
2014-06-04 13:30:42 +04:00
queue - > tx . rsp_prod_pvt = + + i ;
2011-03-15 03:06:18 +03:00
}
2015-03-11 18:27:59 +03:00
static void push_tx_responses ( struct xenvif_queue * queue )
{
int notify ;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY ( & queue - > tx , notify ) ;
if ( notify )
notify_remote_via_irq ( queue - > tx_irq ) ;
}
2014-06-04 13:30:42 +04:00
void xenvif_idx_unmap ( struct xenvif_queue * queue , u16 pending_idx )
2014-03-07 01:48:26 +04:00
{
int ret ;
struct gnttab_unmap_grant_ref tx_unmap_op ;
gnttab_set_unmap_op ( & tx_unmap_op ,
2014-06-04 13:30:42 +04:00
idx_to_kaddr ( queue , pending_idx ) ,
2014-03-07 01:48:26 +04:00
GNTMAP_host_map ,
2014-06-04 13:30:42 +04:00
queue - > grant_tx_handle [ pending_idx ] ) ;
xenvif_grant_handle_reset ( queue , pending_idx ) ;
2014-03-07 01:48:26 +04:00
ret = gnttab_unmap_refs ( & tx_unmap_op , NULL ,
2014-06-04 13:30:42 +04:00
& queue - > mmap_pages [ pending_idx ] , 1 ) ;
2014-03-25 03:59:51 +04:00
if ( ret ) {
2014-06-04 13:30:42 +04:00
netdev_err ( queue - > vif - > dev ,
2015-06-16 22:10:48 +03:00
" Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d \n " ,
2014-03-25 03:59:51 +04:00
ret ,
pending_idx ,
tx_unmap_op . host_addr ,
tx_unmap_op . handle ,
tx_unmap_op . status ) ;
BUG ( ) ;
}
2014-03-07 01:48:26 +04:00
}
2014-06-04 13:30:42 +04:00
static inline int tx_work_todo ( struct xenvif_queue * queue )
2011-03-15 03:06:18 +03:00
{
2014-06-04 13:30:42 +04:00
if ( likely ( RING_HAS_UNCONSUMED_REQUESTS ( & queue - > tx ) ) )
2011-03-15 03:06:18 +03:00
return 1 ;
return 0 ;
}
2014-06-04 13:30:42 +04:00
static inline bool tx_dealloc_work_todo ( struct xenvif_queue * queue )
2014-03-07 01:48:26 +04:00
{
2014-06-04 13:30:42 +04:00
return queue - > dealloc_cons ! = queue - > dealloc_prod ;
2014-03-07 01:48:26 +04:00
}
2016-05-13 11:37:26 +03:00
void xenvif_unmap_frontend_data_rings ( struct xenvif_queue * queue )
2011-03-15 03:06:18 +03:00
{
2014-06-04 13:30:42 +04:00
if ( queue - > tx . sring )
xenbus_unmap_ring_vfree ( xenvif_to_xenbus_device ( queue - > vif ) ,
queue - > tx . sring ) ;
if ( queue - > rx . sring )
xenbus_unmap_ring_vfree ( xenvif_to_xenbus_device ( queue - > vif ) ,
queue - > rx . sring ) ;
2011-03-15 03:06:18 +03:00
}
2016-05-13 11:37:26 +03:00
int xenvif_map_frontend_data_rings ( struct xenvif_queue * queue ,
grant_ref_t tx_ring_ref ,
grant_ref_t rx_ring_ref )
2011-03-15 03:06:18 +03:00
{
2011-09-29 19:53:31 +04:00
void * addr ;
2011-03-15 03:06:18 +03:00
struct xen_netif_tx_sring * txs ;
struct xen_netif_rx_sring * rxs ;
int err = - ENOMEM ;
2014-06-04 13:30:42 +04:00
err = xenbus_map_ring_valloc ( xenvif_to_xenbus_device ( queue - > vif ) ,
2015-04-03 09:44:59 +03:00
& tx_ring_ref , 1 , & addr ) ;
2011-09-29 19:53:31 +04:00
if ( err )
2011-03-15 03:06:18 +03:00
goto err ;
2011-09-29 19:53:31 +04:00
txs = ( struct xen_netif_tx_sring * ) addr ;
2015-05-05 15:15:29 +03:00
BACK_RING_INIT ( & queue - > tx , txs , XEN_PAGE_SIZE ) ;
2011-03-15 03:06:18 +03:00
2014-06-04 13:30:42 +04:00
err = xenbus_map_ring_valloc ( xenvif_to_xenbus_device ( queue - > vif ) ,
2015-04-03 09:44:59 +03:00
& rx_ring_ref , 1 , & addr ) ;
2011-09-29 19:53:31 +04:00
if ( err )
2011-03-15 03:06:18 +03:00
goto err ;
2011-09-29 19:53:31 +04:00
rxs = ( struct xen_netif_rx_sring * ) addr ;
2015-05-05 15:15:29 +03:00
BACK_RING_INIT ( & queue - > rx , rxs , XEN_PAGE_SIZE ) ;
2011-03-15 03:06:18 +03:00
return 0 ;
err :
2016-05-13 11:37:26 +03:00
xenvif_unmap_frontend_data_rings ( queue ) ;
2011-03-15 03:06:18 +03:00
return err ;
}
2014-08-12 14:48:07 +04:00
static bool xenvif_dealloc_kthread_should_stop ( struct xenvif_queue * queue )
{
/* Dealloc thread must remain running until all inflight
* packets complete .
*/
return kthread_should_stop ( ) & &
! atomic_read ( & queue - > inflight_packets ) ;
}
2014-03-07 01:48:26 +04:00
int xenvif_dealloc_kthread ( void * data )
{
2014-06-04 13:30:42 +04:00
struct xenvif_queue * queue = data ;
2014-03-07 01:48:26 +04:00
2014-08-12 14:48:07 +04:00
for ( ; ; ) {
2014-06-04 13:30:42 +04:00
wait_event_interruptible ( queue - > dealloc_wq ,
tx_dealloc_work_todo ( queue ) | |
2014-08-12 14:48:07 +04:00
xenvif_dealloc_kthread_should_stop ( queue ) ) ;
if ( xenvif_dealloc_kthread_should_stop ( queue ) )
2014-03-07 01:48:26 +04:00
break ;
2014-06-04 13:30:42 +04:00
xenvif_tx_dealloc_action ( queue ) ;
2014-03-07 01:48:26 +04:00
cond_resched ( ) ;
}
/* Unmap anything remaining*/
2014-06-04 13:30:42 +04:00
if ( tx_dealloc_work_todo ( queue ) )
xenvif_tx_dealloc_action ( queue ) ;
2014-03-07 01:48:26 +04:00
return 0 ;
}
2016-05-13 11:37:26 +03:00
static void make_ctrl_response ( struct xenvif * vif ,
const struct xen_netif_ctrl_request * req ,
u32 status , u32 data )
{
RING_IDX idx = vif - > ctrl . rsp_prod_pvt ;
struct xen_netif_ctrl_response rsp = {
. id = req - > id ,
. type = req - > type ,
. status = status ,
. data = data ,
} ;
* RING_GET_RESPONSE ( & vif - > ctrl , idx ) = rsp ;
vif - > ctrl . rsp_prod_pvt = + + idx ;
}
static void push_ctrl_response ( struct xenvif * vif )
{
int notify ;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY ( & vif - > ctrl , notify ) ;
if ( notify )
notify_remote_via_irq ( vif - > ctrl_irq ) ;
}
static void process_ctrl_request ( struct xenvif * vif ,
const struct xen_netif_ctrl_request * req )
{
2016-05-13 11:37:27 +03:00
u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED ;
u32 data = 0 ;
switch ( req - > type ) {
case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM :
status = xenvif_set_hash_alg ( vif , req - > data [ 0 ] ) ;
break ;
case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS :
status = xenvif_get_hash_flags ( vif , & data ) ;
break ;
case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS :
status = xenvif_set_hash_flags ( vif , req - > data [ 0 ] ) ;
break ;
case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY :
status = xenvif_set_hash_key ( vif , req - > data [ 0 ] ,
req - > data [ 1 ] ) ;
break ;
case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE :
status = XEN_NETIF_CTRL_STATUS_SUCCESS ;
data = XEN_NETBK_MAX_HASH_MAPPING_SIZE ;
break ;
case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE :
status = xenvif_set_hash_mapping_size ( vif ,
req - > data [ 0 ] ) ;
break ;
case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING :
status = xenvif_set_hash_mapping ( vif , req - > data [ 0 ] ,
req - > data [ 1 ] ,
req - > data [ 2 ] ) ;
break ;
default :
break ;
}
make_ctrl_response ( vif , req , status , data ) ;
2016-05-13 11:37:26 +03:00
push_ctrl_response ( vif ) ;
}
static void xenvif_ctrl_action ( struct xenvif * vif )
{
for ( ; ; ) {
RING_IDX req_prod , req_cons ;
req_prod = vif - > ctrl . sring - > req_prod ;
req_cons = vif - > ctrl . req_cons ;
/* Make sure we can see requests before we process them. */
rmb ( ) ;
if ( req_cons = = req_prod )
break ;
while ( req_cons ! = req_prod ) {
struct xen_netif_ctrl_request req ;
RING_COPY_REQUEST ( & vif - > ctrl , req_cons , & req ) ;
req_cons + + ;
process_ctrl_request ( vif , & req ) ;
}
vif - > ctrl . req_cons = req_cons ;
vif - > ctrl . sring - > req_event = req_cons + 1 ;
}
}
static bool xenvif_ctrl_work_todo ( struct xenvif * vif )
{
if ( likely ( RING_HAS_UNCONSUMED_REQUESTS ( & vif - > ctrl ) ) )
return 1 ;
return 0 ;
}
2016-09-22 12:06:25 +03:00
irqreturn_t xenvif_ctrl_irq_fn ( int irq , void * data )
2016-05-13 11:37:26 +03:00
{
struct xenvif * vif = data ;
2016-09-22 12:06:25 +03:00
while ( xenvif_ctrl_work_todo ( vif ) )
xenvif_ctrl_action ( vif ) ;
2016-05-13 11:37:26 +03:00
2016-09-22 12:06:25 +03:00
return IRQ_HANDLED ;
2016-05-13 11:37:26 +03:00
}
2011-03-15 03:06:18 +03:00
static int __init netback_init ( void )
{
int rc = 0 ;
2011-12-15 00:12:13 +04:00
if ( ! xen_domain ( ) )
2011-03-15 03:06:18 +03:00
return - ENODEV ;
2017-01-10 16:32:52 +03:00
/* Allow as many queues as there are CPUs but max. 8 if user has not
2015-09-10 13:18:57 +03:00
* specified a value .
*/
if ( xenvif_max_queues = = 0 )
2017-01-10 16:32:52 +03:00
xenvif_max_queues = min_t ( unsigned int , MAX_QUEUES_DEFAULT ,
num_online_cpus ( ) ) ;
2014-06-04 13:30:43 +04:00
2013-05-02 04:43:59 +04:00
if ( fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX ) {
2013-06-28 08:57:49 +04:00
pr_info ( " fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d) \n " ,
fatal_skb_slots , XEN_NETBK_LEGACY_SLOTS_MAX ) ;
2013-05-02 04:43:59 +04:00
fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX ;
2013-04-22 06:20:42 +04:00
}
2011-03-15 03:06:18 +03:00
rc = xenvif_xenbus_init ( ) ;
if ( rc )
goto failed_init ;
2014-07-08 22:49:14 +04:00
# ifdef CONFIG_DEBUG_FS
xen_netback_dbg_root = debugfs_create_dir ( " xen-netback " , NULL ) ;
if ( IS_ERR_OR_NULL ( xen_netback_dbg_root ) )
pr_warn ( " Init of debugfs returned %ld! \n " ,
PTR_ERR ( xen_netback_dbg_root ) ) ;
# endif /* CONFIG_DEBUG_FS */
2011-03-15 03:06:18 +03:00
return 0 ;
failed_init :
return rc ;
}
module_init ( netback_init ) ;
2013-05-17 03:26:11 +04:00
static void __exit netback_fini ( void )
{
2014-07-08 22:49:14 +04:00
# ifdef CONFIG_DEBUG_FS
if ( ! IS_ERR_OR_NULL ( xen_netback_dbg_root ) )
debugfs_remove_recursive ( xen_netback_dbg_root ) ;
# endif /* CONFIG_DEBUG_FS */
2013-05-17 03:26:11 +04:00
xenvif_xenbus_fini ( ) ;
}
module_exit ( netback_fini ) ;
2011-03-15 03:06:18 +03:00
MODULE_LICENSE ( " Dual BSD/GPL " ) ;
2011-06-30 22:19:09 +04:00
MODULE_ALIAS ( " xen-backend:vif " ) ;