2019-05-29 17:12:43 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2011-10-26 06:26:31 +04:00
/*
2014-09-16 06:20:31 +04:00
* Copyright ( c ) 2007 - 2014 Nicira , Inc .
2011-10-26 06:26:31 +04:00
*/
# include <linux/etherdevice.h>
# include <linux/if.h>
# include <linux/if_vlan.h>
2012-02-23 07:58:59 +04:00
# include <linux/jhash.h>
2011-10-26 06:26:31 +04:00
# include <linux/kernel.h>
# include <linux/list.h>
# include <linux/mutex.h>
# include <linux/percpu.h>
# include <linux/rcupdate.h>
# include <linux/rtnetlink.h>
# include <linux/compat.h>
2012-02-23 07:58:59 +04:00
# include <net/net_namespace.h>
2014-10-22 19:29:06 +04:00
# include <linux/module.h>
2011-10-26 06:26:31 +04:00
2012-02-23 07:58:59 +04:00
# include "datapath.h"
2011-10-26 06:26:31 +04:00
# include "vport.h"
# include "vport-internal_dev.h"
2014-10-22 19:29:06 +04:00
static LIST_HEAD ( vport_ops_list ) ;
2011-10-26 06:26:31 +04:00
2013-04-16 00:23:03 +04:00
/* Protected by RCU read lock for reading, ovs_mutex for writing. */
2011-10-26 06:26:31 +04:00
static struct hlist_head * dev_table ;
# define VPORT_HASH_BUCKETS 1024
/**
* ovs_vport_init - initialize vport subsystem
*
* Called at module load time to initialize the vport subsystem .
*/
int ovs_vport_init ( void )
{
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 00:03:40 +03:00
dev_table = kcalloc ( VPORT_HASH_BUCKETS , sizeof ( struct hlist_head ) ,
2011-10-26 06:26:31 +04:00
GFP_KERNEL ) ;
if ( ! dev_table )
return - ENOMEM ;
return 0 ;
}
/**
* ovs_vport_exit - shutdown vport subsystem
*
* Called at module exit time to shutdown the vport subsystem .
*/
void ovs_vport_exit ( void )
{
kfree ( dev_table ) ;
}
2014-11-06 17:58:52 +03:00
static struct hlist_head * hash_bucket ( const struct net * net , const char * name )
2011-10-26 06:26:31 +04:00
{
2012-02-23 07:58:59 +04:00
unsigned int hash = jhash ( name , strlen ( name ) , ( unsigned long ) net ) ;
2011-10-26 06:26:31 +04:00
return & dev_table [ hash & ( VPORT_HASH_BUCKETS - 1 ) ] ;
}
2015-11-30 14:31:43 +03:00
int __ovs_vport_ops_register ( struct vport_ops * ops )
2014-10-22 19:29:06 +04:00
{
int err = - EEXIST ;
struct vport_ops * o ;
ovs_lock ( ) ;
list_for_each_entry ( o , & vport_ops_list , list )
if ( ops - > type = = o - > type )
goto errout ;
list_add_tail ( & ops - > list , & vport_ops_list ) ;
err = 0 ;
errout :
ovs_unlock ( ) ;
return err ;
}
2015-11-30 14:31:43 +03:00
EXPORT_SYMBOL_GPL ( __ovs_vport_ops_register ) ;
2014-10-22 19:29:06 +04:00
void ovs_vport_ops_unregister ( struct vport_ops * ops )
{
ovs_lock ( ) ;
list_del ( & ops - > list ) ;
ovs_unlock ( ) ;
}
2014-11-06 17:44:27 +03:00
EXPORT_SYMBOL_GPL ( ovs_vport_ops_unregister ) ;
2014-10-22 19:29:06 +04:00
2011-10-26 06:26:31 +04:00
/**
* ovs_vport_locate - find a port that has already been created
*
2020-07-13 02:15:09 +03:00
* @ net : network namespace
2011-10-26 06:26:31 +04:00
* @ name : name of port to find
*
2013-04-16 00:23:03 +04:00
* Must be called with ovs or RCU read lock .
2011-10-26 06:26:31 +04:00
*/
2014-11-06 17:58:52 +03:00
struct vport * ovs_vport_locate ( const struct net * net , const char * name )
2011-10-26 06:26:31 +04:00
{
2012-02-23 07:58:59 +04:00
struct hlist_head * bucket = hash_bucket ( net , name ) ;
2011-10-26 06:26:31 +04:00
struct vport * vport ;
2020-02-18 22:57:42 +03:00
hlist_for_each_entry_rcu ( vport , bucket , hash_node ,
2020-09-01 15:26:12 +03:00
lockdep_ovsl_is_held ( ) )
2015-07-21 11:44:05 +03:00
if ( ! strcmp ( name , ovs_vport_name ( vport ) ) & &
2012-02-23 07:58:59 +04:00
net_eq ( ovs_dp_get_net ( vport - > dp ) , net ) )
2011-10-26 06:26:31 +04:00
return vport ;
return NULL ;
}
/**
* ovs_vport_alloc - allocate and initialize new vport
*
* @ priv_size : Size of private data area to allocate .
* @ ops : vport device ops
2020-10-28 03:48:49 +03:00
* @ parms : information about new vport .
2011-10-26 06:26:31 +04:00
*
* Allocate and initialize a new vport defined by @ ops . The vport will contain
* a private data area of size @ priv_size that can be accessed using
2020-10-28 03:48:49 +03:00
* vport_priv ( ) . Some parameters of the vport will be initialized from @ parms .
* @ vports that are no longer needed should be released with
2011-10-26 06:26:31 +04:00
* vport_free ( ) .
*/
struct vport * ovs_vport_alloc ( int priv_size , const struct vport_ops * ops ,
2020-09-01 15:26:12 +03:00
const struct vport_parms * parms )
2011-10-26 06:26:31 +04:00
{
struct vport * vport ;
size_t alloc_size ;
2023-06-06 14:56:35 +03:00
int err ;
2011-10-26 06:26:31 +04:00
alloc_size = sizeof ( struct vport ) ;
if ( priv_size ) {
alloc_size = ALIGN ( alloc_size , VPORT_ALIGN ) ;
alloc_size + = priv_size ;
}
vport = kzalloc ( alloc_size , GFP_KERNEL ) ;
if ( ! vport )
return ERR_PTR ( - ENOMEM ) ;
2023-06-06 14:56:35 +03:00
vport - > upcall_stats = netdev_alloc_pcpu_stats ( struct vport_upcall_stats_percpu ) ;
if ( ! vport - > upcall_stats ) {
err = - ENOMEM ;
goto err_kfree_vport ;
}
2011-10-26 06:26:31 +04:00
vport - > dp = parms - > dp ;
vport - > port_no = parms - > port_no ;
vport - > ops = ops ;
2012-08-23 23:40:54 +04:00
INIT_HLIST_NODE ( & vport - > dp_hash_node ) ;
2011-10-26 06:26:31 +04:00
2014-08-12 11:27:57 +04:00
if ( ovs_vport_set_upcall_portids ( vport , parms - > upcall_portids ) ) {
2023-06-06 14:56:35 +03:00
err = - EINVAL ;
goto err_free_percpu ;
2014-08-12 11:27:57 +04:00
}
2014-07-18 02:14:13 +04:00
2011-10-26 06:26:31 +04:00
return vport ;
2023-06-06 14:56:35 +03:00
err_free_percpu :
free_percpu ( vport - > upcall_stats ) ;
err_kfree_vport :
kfree ( vport ) ;
return ERR_PTR ( err ) ;
2011-10-26 06:26:31 +04:00
}
2014-11-06 17:44:27 +03:00
EXPORT_SYMBOL_GPL ( ovs_vport_alloc ) ;
2011-10-26 06:26:31 +04:00
/**
* ovs_vport_free - uninitialize and free vport
*
* @ vport : vport to free
*
* Frees a vport allocated with vport_alloc ( ) when it is no longer needed .
*
* The caller must ensure that an RCU grace period has passed since the last
* time @ vport was in a datapath .
*/
void ovs_vport_free ( struct vport * vport )
{
2014-07-18 02:14:13 +04:00
/* vport is freed from RCU callback or error path, Therefore
* it is safe to use raw dereference .
*/
kfree ( rcu_dereference_raw ( vport - > upcall_portids ) ) ;
2023-06-06 14:56:35 +03:00
free_percpu ( vport - > upcall_stats ) ;
2011-10-26 06:26:31 +04:00
kfree ( vport ) ;
}
2014-11-06 17:44:27 +03:00
EXPORT_SYMBOL_GPL ( ovs_vport_free ) ;
2014-10-22 19:29:06 +04:00
static struct vport_ops * ovs_vport_lookup ( const struct vport_parms * parms )
{
struct vport_ops * ops ;
list_for_each_entry ( ops , & vport_ops_list , list )
if ( ops - > type = = parms - > type )
return ops ;
return NULL ;
}
2011-10-26 06:26:31 +04:00
/**
* ovs_vport_add - add vport device ( for kernel callers )
*
* @ parms : Information about new vport .
*
* Creates a new vport with the specified configuration ( which is dependent on
2013-04-16 00:23:03 +04:00
* device type ) . ovs_mutex must be held .
2011-10-26 06:26:31 +04:00
*/
struct vport * ovs_vport_add ( const struct vport_parms * parms )
{
2014-10-22 19:29:06 +04:00
struct vport_ops * ops ;
2011-10-26 06:26:31 +04:00
struct vport * vport ;
2014-10-22 19:29:06 +04:00
ops = ovs_vport_lookup ( parms ) ;
if ( ops ) {
struct hlist_head * bucket ;
2012-02-23 07:58:59 +04:00
2014-10-22 19:29:06 +04:00
if ( ! try_module_get ( ops - > owner ) )
return ERR_PTR ( - EAFNOSUPPORT ) ;
2011-10-26 06:26:31 +04:00
2014-10-22 19:29:06 +04:00
vport = ops - > create ( parms ) ;
if ( IS_ERR ( vport ) ) {
module_put ( ops - > owner ) ;
2011-10-26 06:26:31 +04:00
return vport ;
}
2014-10-22 19:29:06 +04:00
bucket = hash_bucket ( ovs_dp_get_net ( vport - > dp ) ,
2015-07-21 11:44:05 +03:00
ovs_vport_name ( vport ) ) ;
2014-10-22 19:29:06 +04:00
hlist_add_head_rcu ( & vport - > hash_node , bucket ) ;
return vport ;
2011-10-26 06:26:31 +04:00
}
2014-10-22 19:29:06 +04:00
/* Unlock to attempt module load and return -EAGAIN if load
* was successful as we need to restart the port addition
* workflow .
*/
ovs_unlock ( ) ;
request_module ( " vport-type-%d " , parms - > type ) ;
ovs_lock ( ) ;
2011-10-26 06:26:31 +04:00
2014-10-22 19:29:06 +04:00
if ( ! ovs_vport_lookup ( parms ) )
return ERR_PTR ( - EAFNOSUPPORT ) ;
else
return ERR_PTR ( - EAGAIN ) ;
2011-10-26 06:26:31 +04:00
}
/**
* ovs_vport_set_options - modify existing vport device ( for kernel callers )
*
* @ vport : vport to modify .
2013-08-20 04:49:29 +04:00
* @ options : New configuration .
2011-10-26 06:26:31 +04:00
*
* Modifies an existing device with the specified configuration ( which is
2013-04-16 00:23:03 +04:00
* dependent on device type ) . ovs_mutex must be held .
2011-10-26 06:26:31 +04:00
*/
int ovs_vport_set_options ( struct vport * vport , struct nlattr * options )
{
if ( ! vport - > ops - > set_options )
return - EOPNOTSUPP ;
return vport - > ops - > set_options ( vport , options ) ;
}
/**
* ovs_vport_del - delete existing vport device
*
* @ vport : vport to delete .
*
2015-11-24 21:51:53 +03:00
* Detaches @ vport from its datapath and destroys it . ovs_mutex must
* be held .
2011-10-26 06:26:31 +04:00
*/
void ovs_vport_del ( struct vport * vport )
{
hlist_del_rcu ( & vport - > hash_node ) ;
2014-10-22 19:29:06 +04:00
module_put ( vport - > ops - > owner ) ;
2015-03-30 14:57:41 +03:00
vport - > ops - > destroy ( vport ) ;
2011-10-26 06:26:31 +04:00
}
/**
* ovs_vport_get_stats - retrieve device stats
*
* @ vport : vport from which to retrieve the stats
* @ stats : location to store stats
*
* Retrieves transmit , receive , and error stats for the given device .
*
2013-04-16 00:23:03 +04:00
* Must be called with ovs_mutex or rcu_read_lock .
2011-10-26 06:26:31 +04:00
*/
void ovs_vport_get_stats ( struct vport * vport , struct ovs_vport_stats * stats )
{
2015-10-03 00:56:34 +03:00
const struct rtnl_link_stats64 * dev_stats ;
struct rtnl_link_stats64 temp ;
dev_stats = dev_get_stats ( vport - > dev , & temp ) ;
stats - > rx_errors = dev_stats - > rx_errors ;
stats - > tx_errors = dev_stats - > tx_errors ;
stats - > tx_dropped = dev_stats - > tx_dropped ;
stats - > rx_dropped = dev_stats - > rx_dropped ;
stats - > rx_bytes = dev_stats - > rx_bytes ;
stats - > rx_packets = dev_stats - > rx_packets ;
stats - > tx_bytes = dev_stats - > tx_bytes ;
stats - > tx_packets = dev_stats - > tx_packets ;
2011-10-26 06:26:31 +04:00
}
2022-12-07 04:38:57 +03:00
/**
* ovs_vport_get_upcall_stats - retrieve upcall stats
*
* @ vport : vport from which to retrieve the stats .
* @ skb : sk_buff where upcall stats should be appended .
*
* Retrieves upcall stats for the given device .
*
* Must be called with ovs_mutex or rcu_read_lock .
*/
int ovs_vport_get_upcall_stats ( struct vport * vport , struct sk_buff * skb )
{
struct nlattr * nla ;
int i ;
__u64 tx_success = 0 ;
__u64 tx_fail = 0 ;
for_each_possible_cpu ( i ) {
const struct vport_upcall_stats_percpu * stats ;
unsigned int start ;
stats = per_cpu_ptr ( vport - > upcall_stats , i ) ;
do {
start = u64_stats_fetch_begin ( & stats - > syncp ) ;
tx_success + = u64_stats_read ( & stats - > n_success ) ;
tx_fail + = u64_stats_read ( & stats - > n_fail ) ;
} while ( u64_stats_fetch_retry ( & stats - > syncp , start ) ) ;
}
nla = nla_nest_start_noflag ( skb , OVS_VPORT_ATTR_UPCALL_STATS ) ;
if ( ! nla )
return - EMSGSIZE ;
if ( nla_put_u64_64bit ( skb , OVS_VPORT_UPCALL_ATTR_SUCCESS , tx_success ,
OVS_VPORT_ATTR_PAD ) ) {
nla_nest_cancel ( skb , nla ) ;
return - EMSGSIZE ;
}
if ( nla_put_u64_64bit ( skb , OVS_VPORT_UPCALL_ATTR_FAIL , tx_fail ,
OVS_VPORT_ATTR_PAD ) ) {
nla_nest_cancel ( skb , nla ) ;
return - EMSGSIZE ;
}
nla_nest_end ( skb , nla ) ;
return 0 ;
}
2011-10-26 06:26:31 +04:00
/**
* ovs_vport_get_options - retrieve device options
*
* @ vport : vport from which to retrieve the options .
* @ skb : sk_buff where options should be appended .
*
* Retrieves the configuration of the given device , appending an
* % OVS_VPORT_ATTR_OPTIONS attribute that in turn contains nested
* vport - specific attributes to @ skb .
*
* Returns 0 if successful , - EMSGSIZE if @ skb has insufficient room , or another
* negative error code if a real error occurred . If an error occurs , @ skb is
* left unmodified .
*
2013-04-16 00:23:03 +04:00
* Must be called with ovs_mutex or rcu_read_lock .
2011-10-26 06:26:31 +04:00
*/
int ovs_vport_get_options ( const struct vport * vport , struct sk_buff * skb )
{
struct nlattr * nla ;
2013-04-03 02:30:43 +04:00
int err ;
if ( ! vport - > ops - > get_options )
return 0 ;
2011-10-26 06:26:31 +04:00
2019-04-26 12:13:06 +03:00
nla = nla_nest_start_noflag ( skb , OVS_VPORT_ATTR_OPTIONS ) ;
2011-10-26 06:26:31 +04:00
if ( ! nla )
return - EMSGSIZE ;
2013-04-03 02:30:43 +04:00
err = vport - > ops - > get_options ( vport , skb ) ;
if ( err ) {
nla_nest_cancel ( skb , nla ) ;
return err ;
2011-10-26 06:26:31 +04:00
}
nla_nest_end ( skb , nla ) ;
return 0 ;
}
2014-07-18 02:14:13 +04:00
/**
* ovs_vport_set_upcall_portids - set upcall portids of @ vport .
*
* @ vport : vport to modify .
* @ ids : new configuration , an array of port ids .
*
* Sets the vport ' s upcall_portids to @ ids .
*
* Returns 0 if successful , - EINVAL if @ ids is zero length or cannot be parsed
* as an array of U32 .
*
* Must be called with ovs_mutex .
*/
2014-11-06 17:58:52 +03:00
int ovs_vport_set_upcall_portids ( struct vport * vport , const struct nlattr * ids )
2014-07-18 02:14:13 +04:00
{
struct vport_portids * old , * vport_portids ;
if ( ! nla_len ( ids ) | | nla_len ( ids ) % sizeof ( u32 ) )
return - EINVAL ;
old = ovsl_dereference ( vport - > upcall_portids ) ;
vport_portids = kmalloc ( sizeof ( * vport_portids ) + nla_len ( ids ) ,
GFP_KERNEL ) ;
if ( ! vport_portids )
return - ENOMEM ;
vport_portids - > n_ids = nla_len ( ids ) / sizeof ( u32 ) ;
vport_portids - > rn_ids = reciprocal_value ( vport_portids - > n_ids ) ;
nla_memcpy ( vport_portids - > ids , ids , nla_len ( ids ) ) ;
rcu_assign_pointer ( vport - > upcall_portids , vport_portids ) ;
if ( old )
kfree_rcu ( old , rcu ) ;
return 0 ;
}
/**
* ovs_vport_get_upcall_portids - get the upcall_portids of @ vport .
*
* @ vport : vport from which to retrieve the portids .
* @ skb : sk_buff where portids should be appended .
*
* Retrieves the configuration of the given vport , appending the
* % OVS_VPORT_ATTR_UPCALL_PID attribute which is the array of upcall
* portids to @ skb .
*
* Returns 0 if successful , - EMSGSIZE if @ skb has insufficient room .
* If an error occurs , @ skb is left unmodified . Must be called with
* ovs_mutex or rcu_read_lock .
*/
int ovs_vport_get_upcall_portids ( const struct vport * vport ,
struct sk_buff * skb )
{
struct vport_portids * ids ;
ids = rcu_dereference_ovsl ( vport - > upcall_portids ) ;
if ( vport - > dp - > user_features & OVS_DP_F_VPORT_PIDS )
return nla_put ( skb , OVS_VPORT_ATTR_UPCALL_PID ,
ids - > n_ids * sizeof ( u32 ) , ( void * ) ids - > ids ) ;
else
return nla_put_u32 ( skb , OVS_VPORT_ATTR_UPCALL_PID , ids - > ids [ 0 ] ) ;
}
/**
* ovs_vport_find_upcall_portid - find the upcall portid to send upcall .
*
* @ vport : vport from which the missed packet is received .
* @ skb : skb that the missed packet was received .
*
* Uses the skb_get_hash ( ) to select the upcall portid to send the
* upcall .
*
* Returns the portid of the target socket . Must be called with rcu_read_lock .
*/
2020-09-01 15:26:12 +03:00
u32 ovs_vport_find_upcall_portid ( const struct vport * vport ,
struct sk_buff * skb )
2014-07-18 02:14:13 +04:00
{
struct vport_portids * ids ;
u32 ids_index ;
u32 hash ;
2014-10-15 23:03:41 +04:00
ids = rcu_dereference ( vport - > upcall_portids ) ;
2014-07-18 02:14:13 +04:00
2019-11-06 19:34:28 +03:00
/* If there is only one portid, select it in the fast-path. */
if ( ids - > n_ids = = 1 )
return ids - > ids [ 0 ] ;
2014-07-18 02:14:13 +04:00
hash = skb_get_hash ( skb ) ;
ids_index = hash - ids - > n_ids * reciprocal_divide ( hash , ids - > rn_ids ) ;
return ids - > ids [ ids_index ] ;
}
2011-10-26 06:26:31 +04:00
/**
* ovs_vport_receive - pass up received packet to the datapath for processing
*
* @ vport : vport that received the packet
* @ skb : skb that was received
2020-07-13 02:15:09 +03:00
* @ tun_info : tunnel ( if any ) that carried packet
2011-10-26 06:26:31 +04:00
*
* Must be called with rcu_read_lock . The packet cannot be shared and
2013-02-22 15:41:26 +04:00
* skb - > data should point to the Ethernet header .
2011-10-26 06:26:31 +04:00
*/
2015-08-30 03:44:07 +03:00
int ovs_vport_receive ( struct vport * vport , struct sk_buff * skb ,
const struct ip_tunnel_info * tun_info )
2011-10-26 06:26:31 +04:00
{
2014-09-16 06:28:44 +04:00
struct sw_flow_key key ;
int error ;
2011-10-26 06:26:31 +04:00
2014-09-16 06:20:31 +04:00
OVS_CB ( skb ) - > input_vport = vport ;
2015-08-26 21:31:48 +03:00
OVS_CB ( skb ) - > mru = 0 ;
2016-06-10 21:49:33 +03:00
OVS_CB ( skb ) - > cutlen = 0 ;
2015-10-16 21:08:18 +03:00
if ( unlikely ( dev_net ( skb - > dev ) ! = ovs_dp_get_net ( vport - > dp ) ) ) {
u32 mark ;
mark = skb - > mark ;
skb_scrub_packet ( skb , true ) ;
skb - > mark = mark ;
tun_info = NULL ;
}
2014-09-16 06:28:44 +04:00
/* Extract flow from 'skb' into 'key'. */
2014-10-04 02:35:31 +04:00
error = ovs_flow_key_extract ( tun_info , skb , & key ) ;
2014-09-16 06:28:44 +04:00
if ( unlikely ( error ) ) {
kfree_skb ( skb ) ;
2015-08-30 03:44:07 +03:00
return error ;
2014-09-16 06:28:44 +04:00
}
ovs_dp_process_packet ( skb , & key ) ;
2015-08-30 03:44:07 +03:00
return 0 ;
2011-10-26 06:26:31 +04:00
}
2018-03-08 02:38:48 +03:00
static int packet_length ( const struct sk_buff * skb ,
struct net_device * dev )
2015-10-21 09:00:10 +03:00
{
2018-03-08 02:38:48 +03:00
int length = skb - > len - dev - > hard_header_len ;
2015-10-21 09:00:10 +03:00
2016-10-10 18:02:43 +03:00
if ( ! skb_vlan_tag_present ( skb ) & &
eth_type_vlan ( skb - > protocol ) )
2015-10-21 09:00:10 +03:00
length - = VLAN_HLEN ;
2016-09-07 19:56:59 +03:00
/* Don't subtract for multiple VLAN tags. Most (all?) drivers allow
* ( ETH_LEN + VLAN_HLEN ) in addition to the mtu value , but almost none
* account for 802.1 ad . e . g . is_skb_forwardable ( ) .
*/
2018-03-08 02:38:48 +03:00
return length > 0 ? length : 0 ;
2015-10-21 09:00:10 +03:00
}
2016-11-10 18:28:19 +03:00
void ovs_vport_send ( struct vport * vport , struct sk_buff * skb , u8 mac_proto )
2015-10-21 09:00:10 +03:00
{
int mtu = vport - > dev - > mtu ;
2016-11-10 18:28:21 +03:00
switch ( vport - > dev - > type ) {
case ARPHRD_NONE :
if ( mac_proto = = MAC_PROTO_ETHERNET ) {
skb_reset_network_header ( skb ) ;
skb_reset_mac_len ( skb ) ;
skb - > protocol = htons ( ETH_P_TEB ) ;
} else if ( mac_proto ! = MAC_PROTO_NONE ) {
WARN_ON_ONCE ( 1 ) ;
goto drop ;
}
break ;
case ARPHRD_ETHER :
if ( mac_proto ! = MAC_PROTO_ETHERNET )
goto drop ;
break ;
default :
goto drop ;
}
2016-11-10 18:28:17 +03:00
if ( unlikely ( packet_length ( skb , vport - > dev ) > mtu & &
! skb_is_gso ( skb ) ) ) {
2015-10-21 09:00:10 +03:00
vport - > dev - > stats . tx_errors + + ;
2021-03-16 23:14:27 +03:00
if ( vport - > dev - > flags & IFF_UP )
net_warn_ratelimited ( " %s: dropped over-mtu packet: "
" %d > %d \n " , vport - > dev - > name ,
packet_length ( skb , vport - > dev ) ,
mtu ) ;
2015-10-21 09:00:10 +03:00
goto drop ;
}
skb - > dev = vport - > dev ;
2022-03-02 22:55:31 +03:00
skb_clear_tstamp ( skb ) ;
2015-10-21 09:00:10 +03:00
vport - > ops - > send ( skb ) ;
return ;
drop :
kfree_skb ( skb ) ;
}