2019-05-28 19:57:21 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2010-03-30 17:56:25 +04:00
/*
* CAIF Interface registration .
* Copyright ( C ) ST - Ericsson AB 2010
2013-04-23 03:57:01 +04:00
* Author : Sjur Brendeland
2010-03-30 17:56:25 +04:00
*
2012-06-14 02:29:03 +04:00
* Borrowed heavily from file : pn_dev . c . Thanks to Remi Denis - Courmont
2010-03-30 17:56:25 +04:00
* and Sakari Ailus < sakari . ailus @ nokia . com >
*/
2010-09-06 01:31:11 +04:00
# define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
2010-03-30 17:56:25 +04:00
# include <linux/kernel.h>
# include <linux/if_arp.h>
# include <linux/net.h>
# include <linux/netdevice.h>
2011-05-13 06:44:00 +04:00
# include <linux/mutex.h>
2011-05-27 17:12:25 +04:00
# include <linux/module.h>
2011-12-04 15:22:54 +04:00
# include <linux/spinlock.h>
2010-03-30 17:56:25 +04:00
# include <net/netns/generic.h>
# include <net/net_namespace.h>
# include <net/pkt_sched.h>
# include <net/caif/caif_device.h>
# include <net/caif/caif_layer.h>
2014-02-09 18:29:04 +04:00
# include <net/caif/caif_dev.h>
2010-03-30 17:56:25 +04:00
# include <net/caif/cfpkt.h>
# include <net/caif/cfcnfg.h>
2011-11-30 13:22:47 +04:00
# include <net/caif/cfserl.h>
2010-03-30 17:56:25 +04:00
MODULE_LICENSE ( " GPL " ) ;
/* Used for local tracking of the CAIF net devices */
struct caif_device_entry {
struct cflayer layer ;
struct list_head list ;
struct net_device * netdev ;
2011-05-13 06:44:00 +04:00
int __percpu * pcpu_refcnt ;
2011-12-04 15:22:54 +04:00
spinlock_t flow_lock ;
2011-12-04 15:22:55 +04:00
struct sk_buff * xoff_skb ;
void ( * xoff_skb_dtor ) ( struct sk_buff * skb ) ;
2011-12-04 15:22:54 +04:00
bool xoff ;
2010-03-30 17:56:25 +04:00
} ;
struct caif_device_entry_list {
struct list_head list ;
/* Protects simulanous deletes in list */
2011-05-13 06:44:00 +04:00
struct mutex lock ;
2010-03-30 17:56:25 +04:00
} ;
struct caif_net {
2011-05-13 06:44:05 +04:00
struct cfcnfg * cfg ;
2010-03-30 17:56:25 +04:00
struct caif_device_entry_list caifdevs ;
} ;
netns: make struct pernet_operations::id unsigned int
Make struct pernet_operations::id unsigned.
There are 2 reasons to do so:
1)
This field is really an index into an zero based array and
thus is unsigned entity. Using negative value is out-of-bound
access by definition.
2)
On x86_64 unsigned 32-bit data which are mixed with pointers
via array indexing or offsets added or subtracted to pointers
are preffered to signed 32-bit data.
"int" being used as an array index needs to be sign-extended
to 64-bit before being used.
void f(long *p, int i)
{
g(p[i]);
}
roughly translates to
movsx rsi, esi
mov rdi, [rsi+...]
call g
MOVSX is 3 byte instruction which isn't necessary if the variable is
unsigned because x86_64 is zero extending by default.
Now, there is net_generic() function which, you guessed it right, uses
"int" as an array index:
static inline void *net_generic(const struct net *net, int id)
{
...
ptr = ng->ptr[id - 1];
...
}
And this function is used a lot, so those sign extensions add up.
Patch snipes ~1730 bytes on allyesconfig kernel (without all junk
messing with code generation):
add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730)
Unfortunately some functions actually grow bigger.
This is a semmingly random artefact of code generation with register
allocator being used differently. gcc decides that some variable
needs to live in new r8+ registers and every access now requires REX
prefix. Or it is shifted into r12, so [r12+0] addressing mode has to be
used which is longer than [r8]
However, overall balance is in negative direction:
add/remove: 0/0 grow/shrink: 70/598 up/down: 396/-2126 (-1730)
function old new delta
nfsd4_lock 3886 3959 +73
tipc_link_build_proto_msg 1096 1140 +44
mac80211_hwsim_new_radio 2776 2808 +32
tipc_mon_rcv 1032 1058 +26
svcauth_gss_legacy_init 1413 1429 +16
tipc_bcbase_select_primary 379 392 +13
nfsd4_exchange_id 1247 1260 +13
nfsd4_setclientid_confirm 782 793 +11
...
put_client_renew_locked 494 480 -14
ip_set_sockfn_get 730 716 -14
geneve_sock_add 829 813 -16
nfsd4_sequence_done 721 703 -18
nlmclnt_lookup_host 708 686 -22
nfsd4_lockt 1085 1063 -22
nfs_get_client 1077 1050 -27
tcf_bpf_init 1106 1076 -30
nfsd4_encode_fattr 5997 5930 -67
Total: Before=154856051, After=154854321, chg -0.00%
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-11-17 04:58:21 +03:00
static unsigned int caif_net_id ;
2011-12-04 15:22:54 +04:00
static int q_high = 50 ; /* Percent */
2011-05-13 06:44:05 +04:00
struct cfcnfg * get_cfcnfg ( struct net * net )
{
struct caif_net * caifn ;
caifn = net_generic ( net , caif_net_id ) ;
return caifn - > cfg ;
}
EXPORT_SYMBOL ( get_cfcnfg ) ;
2010-03-30 17:56:25 +04:00
static struct caif_device_entry_list * caif_device_list ( struct net * net )
{
struct caif_net * caifn ;
caifn = net_generic ( net , caif_net_id ) ;
return & caifn - > caifdevs ;
}
2011-05-13 06:44:00 +04:00
static void caifd_put ( struct caif_device_entry * e )
{
2011-12-22 21:58:51 +04:00
this_cpu_dec ( * e - > pcpu_refcnt ) ;
2011-05-13 06:44:00 +04:00
}
static void caifd_hold ( struct caif_device_entry * e )
{
2011-12-22 21:58:51 +04:00
this_cpu_inc ( * e - > pcpu_refcnt ) ;
2011-05-13 06:44:00 +04:00
}
static int caifd_refcnt_read ( struct caif_device_entry * e )
{
int i , refcnt = 0 ;
for_each_possible_cpu ( i )
refcnt + = * per_cpu_ptr ( e - > pcpu_refcnt , i ) ;
return refcnt ;
}
2010-03-30 17:56:25 +04:00
/* Allocate new CAIF device. */
static struct caif_device_entry * caif_device_alloc ( struct net_device * dev )
{
struct caif_device_entry * caifd ;
2011-05-13 06:44:00 +04:00
2011-09-02 06:19:23 +04:00
caifd = kzalloc ( sizeof ( * caifd ) , GFP_KERNEL ) ;
2010-03-30 17:56:25 +04:00
if ( ! caifd )
return NULL ;
2011-05-13 06:44:00 +04:00
caifd - > pcpu_refcnt = alloc_percpu ( int ) ;
2011-09-02 06:19:23 +04:00
if ( ! caifd - > pcpu_refcnt ) {
kfree ( caifd ) ;
return NULL ;
}
2010-03-30 17:56:25 +04:00
caifd - > netdev = dev ;
2011-05-13 06:44:00 +04:00
dev_hold ( dev ) ;
2010-03-30 17:56:25 +04:00
return caifd ;
}
static struct caif_device_entry * caif_get ( struct net_device * dev )
{
struct caif_device_entry_list * caifdevs =
caif_device_list ( dev_net ( dev ) ) ;
struct caif_device_entry * caifd ;
2011-11-30 13:22:47 +04:00
2020-03-12 08:34:20 +03:00
list_for_each_entry_rcu ( caifd , & caifdevs - > list , list ,
lockdep_rtnl_is_held ( ) ) {
2010-03-30 17:56:25 +04:00
if ( caifd - > netdev = = dev )
return caifd ;
}
return NULL ;
}
2013-03-02 13:45:19 +04:00
static void caif_flow_cb ( struct sk_buff * skb )
2011-12-04 15:22:54 +04:00
{
struct caif_device_entry * caifd ;
2011-12-04 15:22:55 +04:00
void ( * dtor ) ( struct sk_buff * skb ) = NULL ;
2011-12-04 15:22:54 +04:00
bool send_xoff ;
WARN_ON ( skb - > dev = = NULL ) ;
rcu_read_lock ( ) ;
caifd = caif_get ( skb - > dev ) ;
2012-06-25 11:49:34 +04:00
WARN_ON ( caifd = = NULL ) ;
2018-07-19 05:27:13 +03:00
if ( ! caifd ) {
rcu_read_unlock ( ) ;
2012-06-25 11:49:34 +04:00
return ;
2018-07-19 05:27:13 +03:00
}
2012-06-25 11:49:34 +04:00
2011-12-04 15:22:54 +04:00
caifd_hold ( caifd ) ;
rcu_read_unlock ( ) ;
spin_lock_bh ( & caifd - > flow_lock ) ;
send_xoff = caifd - > xoff ;
caifd - > xoff = 0 ;
2012-01-17 07:03:14 +04:00
dtor = caifd - > xoff_skb_dtor ;
if ( WARN_ON ( caifd - > xoff_skb ! = skb ) )
skb = NULL ;
caifd - > xoff_skb = NULL ;
caifd - > xoff_skb_dtor = NULL ;
2011-12-04 15:22:54 +04:00
spin_unlock_bh ( & caifd - > flow_lock ) ;
2012-01-17 07:03:14 +04:00
if ( dtor & & skb )
2011-12-04 15:22:55 +04:00
dtor ( skb ) ;
2011-12-04 15:22:54 +04:00
if ( send_xoff )
caifd - > layer . up - >
ctrlcmd ( caifd - > layer . up ,
_CAIF_CTRLCMD_PHYIF_FLOW_ON_IND ,
caifd - > layer . id ) ;
caifd_put ( caifd ) ;
}
2010-03-30 17:56:25 +04:00
static int transmit ( struct cflayer * layer , struct cfpkt * pkt )
{
2011-12-04 15:22:54 +04:00
int err , high = 0 , qlen = 0 ;
2010-03-30 17:56:25 +04:00
struct caif_device_entry * caifd =
container_of ( layer , struct caif_device_entry , layer ) ;
2011-04-11 14:43:51 +04:00
struct sk_buff * skb ;
2011-12-04 15:22:54 +04:00
struct netdev_queue * txq ;
rcu_read_lock_bh ( ) ;
2011-04-11 14:43:51 +04:00
2010-03-30 17:56:25 +04:00
skb = cfpkt_tonative ( pkt ) ;
skb - > dev = caifd - > netdev ;
2011-11-30 13:22:47 +04:00
skb_reset_network_header ( skb ) ;
skb - > protocol = htons ( ETH_P_CAIF ) ;
2011-12-04 15:22:54 +04:00
/* Check if we need to handle xoff */
2015-08-18 11:30:46 +03:00
if ( likely ( caifd - > netdev - > priv_flags & IFF_NO_QUEUE ) )
2011-12-04 15:22:54 +04:00
goto noxoff ;
if ( unlikely ( caifd - > xoff ) )
goto noxoff ;
if ( likely ( ! netif_queue_stopped ( caifd - > netdev ) ) ) {
2019-04-10 15:32:37 +03:00
struct Qdisc * sch ;
2011-12-04 15:22:54 +04:00
/* If we run with a TX queue, check if the queue is too long*/
txq = netdev_get_tx_queue ( skb - > dev , 0 ) ;
2019-04-10 15:32:37 +03:00
sch = rcu_dereference_bh ( txq - > qdisc ) ;
if ( likely ( qdisc_is_empty ( sch ) ) )
2011-12-04 15:22:54 +04:00
goto noxoff ;
2019-04-10 15:32:37 +03:00
/* can check for explicit qdisc len value only !NOLOCK,
* always set flow off otherwise
*/
2011-12-04 15:22:54 +04:00
high = ( caifd - > netdev - > tx_queue_len * q_high ) / 100 ;
2019-04-10 15:32:37 +03:00
if ( ! ( sch - > flags & TCQ_F_NOLOCK ) & & likely ( sch - > q . qlen < high ) )
2011-12-04 15:22:54 +04:00
goto noxoff ;
}
/* Hold lock while accessing xoff */
spin_lock_bh ( & caifd - > flow_lock ) ;
if ( caifd - > xoff ) {
spin_unlock_bh ( & caifd - > flow_lock ) ;
goto noxoff ;
}
/*
* Handle flow off , we do this by temporary hi - jacking this
* skb ' s destructor function , and replace it with our own
* flow - on callback . The callback will set flow - on and call
* the original destructor .
*/
pr_debug ( " queue has stopped(%d) or is full (%d > %d) \n " ,
netif_queue_stopped ( caifd - > netdev ) ,
qlen , high ) ;
caifd - > xoff = 1 ;
2011-12-04 15:22:55 +04:00
caifd - > xoff_skb = skb ;
caifd - > xoff_skb_dtor = skb - > destructor ;
skb - > destructor = caif_flow_cb ;
2011-12-04 15:22:54 +04:00
spin_unlock_bh ( & caifd - > flow_lock ) ;
caifd - > layer . up - > ctrlcmd ( caifd - > layer . up ,
_CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND ,
caifd - > layer . id ) ;
noxoff :
rcu_read_unlock_bh ( ) ;
2011-04-11 14:43:51 +04:00
2011-05-13 06:44:06 +04:00
err = dev_queue_xmit ( skb ) ;
if ( err > 0 )
err = - EIO ;
2010-03-30 17:56:25 +04:00
2011-05-13 06:44:06 +04:00
return err ;
2010-03-30 17:56:25 +04:00
}
/*
2011-05-13 06:44:00 +04:00
* Stuff received packets into the CAIF stack .
2010-03-30 17:56:25 +04:00
* On error , returns non - zero and releases the skb .
*/
static int receive ( struct sk_buff * skb , struct net_device * dev ,
struct packet_type * pkttype , struct net_device * orig_dev )
{
struct cfpkt * pkt ;
struct caif_device_entry * caifd ;
2011-05-22 15:18:54 +04:00
int err ;
2011-05-13 06:44:00 +04:00
2010-03-30 17:56:25 +04:00
pkt = cfpkt_fromnative ( CAIF_DIR_IN , skb ) ;
2011-05-13 06:44:00 +04:00
rcu_read_lock ( ) ;
2010-03-30 17:56:25 +04:00
caifd = caif_get ( dev ) ;
2011-05-13 06:44:00 +04:00
if ( ! caifd | | ! caifd - > layer . up | | ! caifd - > layer . up - > receive | |
! netif_oper_up ( caifd - > netdev ) ) {
rcu_read_unlock ( ) ;
kfree_skb ( skb ) ;
2010-03-30 17:56:25 +04:00
return NET_RX_DROP ;
2011-05-13 06:44:00 +04:00
}
/* Hold reference to netdevice while using CAIF stack */
caifd_hold ( caifd ) ;
rcu_read_unlock ( ) ;
2010-03-30 17:56:25 +04:00
2011-05-22 15:18:54 +04:00
err = caifd - > layer . up - > receive ( caifd - > layer . up , pkt ) ;
/* For -EILSEQ the packet is not freed so so it now */
if ( err = = - EILSEQ )
cfpkt_destroy ( pkt ) ;
2011-05-13 06:44:00 +04:00
/* Release reference to stack upwards */
caifd_put ( caifd ) ;
2011-11-30 13:22:47 +04:00
if ( err ! = 0 )
err = NET_RX_DROP ;
return err ;
2010-03-30 17:56:25 +04:00
}
static struct packet_type caif_packet_type __read_mostly = {
. type = cpu_to_be16 ( ETH_P_CAIF ) ,
. func = receive ,
} ;
static void dev_flowctrl ( struct net_device * dev , int on )
{
2011-05-13 06:44:00 +04:00
struct caif_device_entry * caifd ;
rcu_read_lock ( ) ;
caifd = caif_get ( dev ) ;
if ( ! caifd | | ! caifd - > layer . up | | ! caifd - > layer . up - > ctrlcmd ) {
rcu_read_unlock ( ) ;
2010-03-30 17:56:25 +04:00
return ;
2011-05-13 06:44:00 +04:00
}
caifd_hold ( caifd ) ;
rcu_read_unlock ( ) ;
2010-03-30 17:56:25 +04:00
caifd - > layer . up - > ctrlcmd ( caifd - > layer . up ,
on ?
_CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
_CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND ,
caifd - > layer . id ) ;
2011-05-13 06:44:00 +04:00
caifd_put ( caifd ) ;
2010-03-30 17:56:25 +04:00
}
2011-11-30 13:22:47 +04:00
void caif_enroll_dev ( struct net_device * dev , struct caif_dev_common * caifdev ,
2013-03-06 23:39:57 +04:00
struct cflayer * link_support , int head_room ,
struct cflayer * * layer ,
int ( * * rcv_func ) ( struct sk_buff * , struct net_device * ,
struct packet_type * ,
struct net_device * ) )
2011-11-30 13:22:47 +04:00
{
struct caif_device_entry * caifd ;
enum cfcnfg_phy_preference pref ;
struct cfcnfg * cfg = get_cfcnfg ( dev_net ( dev ) ) ;
struct caif_device_entry_list * caifdevs ;
caifdevs = caif_device_list ( dev_net ( dev ) ) ;
caifd = caif_device_alloc ( dev ) ;
if ( ! caifd )
return ;
* layer = & caifd - > layer ;
2011-12-04 15:22:54 +04:00
spin_lock_init ( & caifd - > flow_lock ) ;
2011-11-30 13:22:47 +04:00
switch ( caifdev - > link_select ) {
case CAIF_LINK_HIGH_BANDW :
pref = CFPHYPREF_HIGH_BW ;
break ;
case CAIF_LINK_LOW_LATENCY :
pref = CFPHYPREF_LOW_LAT ;
break ;
default :
pref = CFPHYPREF_HIGH_BW ;
break ;
}
mutex_lock ( & caifdevs - > lock ) ;
list_add_rcu ( & caifd - > list , & caifdevs - > list ) ;
2018-01-08 14:43:00 +03:00
strlcpy ( caifd - > layer . name , dev - > name ,
sizeof ( caifd - > layer . name ) ) ;
2011-11-30 13:22:47 +04:00
caifd - > layer . transmit = transmit ;
cfcnfg_add_phy_layer ( cfg ,
dev ,
& caifd - > layer ,
pref ,
link_support ,
caifdev - > use_fcs ,
head_room ) ;
mutex_unlock ( & caifdevs - > lock ) ;
if ( rcv_func )
* rcv_func = receive ;
}
2011-12-04 15:22:53 +04:00
EXPORT_SYMBOL ( caif_enroll_dev ) ;
2011-11-30 13:22:47 +04:00
2010-03-30 17:56:25 +04:00
/* notify Caif of device events */
static int caif_device_notify ( struct notifier_block * me , unsigned long what ,
2013-05-28 05:30:21 +04:00
void * ptr )
2010-03-30 17:56:25 +04:00
{
2013-05-28 05:30:21 +04:00
struct net_device * dev = netdev_notifier_info_to_dev ( ptr ) ;
2010-03-30 17:56:25 +04:00
struct caif_device_entry * caifd = NULL ;
struct caif_dev_common * caifdev ;
2011-05-13 06:44:05 +04:00
struct cfcnfg * cfg ;
2011-11-30 13:22:47 +04:00
struct cflayer * layer , * link_support ;
int head_room = 0 ;
2011-10-25 01:25:21 +04:00
struct caif_device_entry_list * caifdevs ;
2010-03-30 17:56:25 +04:00
2011-05-13 06:44:05 +04:00
cfg = get_cfcnfg ( dev_net ( dev ) ) ;
2011-11-30 13:22:47 +04:00
caifdevs = caif_device_list ( dev_net ( dev ) ) ;
2011-05-13 06:44:05 +04:00
2011-11-30 13:22:47 +04:00
caifd = caif_get ( dev ) ;
if ( caifd = = NULL & & dev - > type ! = ARPHRD_CAIF )
return 0 ;
2011-10-25 01:25:21 +04:00
2010-03-30 17:56:25 +04:00
switch ( what ) {
case NETDEV_REGISTER :
2011-11-30 13:22:47 +04:00
if ( caifd ! = NULL )
break ;
2011-05-13 06:44:00 +04:00
2010-03-30 17:56:25 +04:00
caifdev = netdev_priv ( dev ) ;
2011-11-30 13:22:47 +04:00
link_support = NULL ;
if ( caifdev - > use_frag ) {
head_room = 1 ;
link_support = cfserl_create ( dev - > ifindex ,
2011-11-30 13:22:48 +04:00
caifdev - > use_stx ) ;
2011-11-30 13:22:47 +04:00
if ( ! link_support ) {
pr_warn ( " Out of memory \n " ) ;
break ;
}
2010-03-30 17:56:25 +04:00
}
2011-11-30 13:22:47 +04:00
caif_enroll_dev ( dev , caifdev , link_support , head_room ,
& layer , NULL ) ;
caifdev - > flowctrl = dev_flowctrl ;
2010-03-30 17:56:25 +04:00
break ;
2011-05-13 06:44:00 +04:00
case NETDEV_UP :
rcu_read_lock ( ) ;
2010-03-30 17:56:25 +04:00
caifd = caif_get ( dev ) ;
2011-05-13 06:44:00 +04:00
if ( caifd = = NULL ) {
rcu_read_unlock ( ) ;
2010-03-30 17:56:25 +04:00
break ;
2011-05-13 06:44:00 +04:00
}
2010-03-30 17:56:25 +04:00
2011-12-04 15:22:54 +04:00
caifd - > xoff = 0 ;
2011-05-13 06:44:00 +04:00
cfcnfg_set_phy_state ( cfg , & caifd - > layer , true ) ;
rcu_read_unlock ( ) ;
2010-03-30 17:56:25 +04:00
break ;
case NETDEV_DOWN :
2011-05-13 06:44:00 +04:00
rcu_read_lock ( ) ;
2010-03-30 17:56:25 +04:00
caifd = caif_get ( dev ) ;
2011-05-13 06:44:00 +04:00
if ( ! caifd | | ! caifd - > layer . up | | ! caifd - > layer . up - > ctrlcmd ) {
rcu_read_unlock ( ) ;
return - EINVAL ;
}
cfcnfg_set_phy_state ( cfg , & caifd - > layer , false ) ;
caifd_hold ( caifd ) ;
rcu_read_unlock ( ) ;
caifd - > layer . up - > ctrlcmd ( caifd - > layer . up ,
_CAIF_CTRLCMD_PHYIF_DOWN_IND ,
caifd - > layer . id ) ;
2011-12-04 15:22:55 +04:00
spin_lock_bh ( & caifd - > flow_lock ) ;
/*
* Replace our xoff - destructor with original destructor .
* We trust that skb - > destructor * always * is called before
* the skb reference is invalid . The hijacked SKB destructor
* takes the flow_lock so manipulating the skb - > destructor here
* should be safe .
*/
if ( caifd - > xoff_skb_dtor ! = NULL & & caifd - > xoff_skb ! = NULL )
caifd - > xoff_skb - > destructor = caifd - > xoff_skb_dtor ;
caifd - > xoff = 0 ;
caifd - > xoff_skb_dtor = NULL ;
caifd - > xoff_skb = NULL ;
spin_unlock_bh ( & caifd - > flow_lock ) ;
2011-05-13 06:44:00 +04:00
caifd_put ( caifd ) ;
2010-03-30 17:56:25 +04:00
break ;
case NETDEV_UNREGISTER :
2011-05-13 06:44:00 +04:00
mutex_lock ( & caifdevs - > lock ) ;
2010-03-30 17:56:25 +04:00
caifd = caif_get ( dev ) ;
2011-05-13 06:44:00 +04:00
if ( caifd = = NULL ) {
mutex_unlock ( & caifdevs - > lock ) ;
2010-11-01 14:52:47 +03:00
break ;
2011-05-13 06:44:00 +04:00
}
list_del_rcu ( & caifd - > list ) ;
/*
* NETDEV_UNREGISTER is called repeatedly until all reference
* counts for the net - device are released . If references to
* caifd is taken , simply ignore NETDEV_UNREGISTER and wait for
* the next call to NETDEV_UNREGISTER .
*
* If any packets are in flight down the CAIF Stack ,
* cfcnfg_del_phy_layer will return nonzero .
* If no packets are in flight , the CAIF Stack associated
* with the net - device un - registering is freed .
*/
if ( caifd_refcnt_read ( caifd ) ! = 0 | |
cfcnfg_del_phy_layer ( cfg , & caifd - > layer ) ! = 0 ) {
pr_info ( " Wait for device inuse \n " ) ;
/* Enrole device if CAIF Stack is still in use */
list_add_rcu ( & caifd - > list , & caifdevs - > list ) ;
mutex_unlock ( & caifdevs - > lock ) ;
break ;
}
synchronize_rcu ( ) ;
dev_put ( caifd - > netdev ) ;
free_percpu ( caifd - > pcpu_refcnt ) ;
kfree ( caifd ) ;
mutex_unlock ( & caifdevs - > lock ) ;
2010-03-30 17:56:25 +04:00
break ;
}
return 0 ;
}
static struct notifier_block caif_device_notifier = {
. notifier_call = caif_device_notify ,
. priority = 0 ,
} ;
/* Per-namespace Caif devices handling */
static int caif_init_net ( struct net * net )
{
struct caif_net * caifn = net_generic ( net , caif_net_id ) ;
INIT_LIST_HEAD ( & caifn - > caifdevs . list ) ;
2011-05-13 06:44:00 +04:00
mutex_init ( & caifn - > caifdevs . lock ) ;
2011-05-13 06:44:05 +04:00
caifn - > cfg = cfcnfg_create ( ) ;
2011-12-06 16:15:44 +04:00
if ( ! caifn - > cfg )
2011-05-13 06:44:05 +04:00
return - ENOMEM ;
2010-03-30 17:56:25 +04:00
return 0 ;
}
static void caif_exit_net ( struct net * net )
{
2011-05-13 06:44:00 +04:00
struct caif_device_entry * caifd , * tmp ;
struct caif_device_entry_list * caifdevs =
caif_device_list ( net ) ;
2011-11-30 13:22:47 +04:00
struct cfcnfg * cfg = get_cfcnfg ( net ) ;
2010-03-30 17:56:25 +04:00
rtnl_lock ( ) ;
2011-05-13 06:44:00 +04:00
mutex_lock ( & caifdevs - > lock ) ;
list_for_each_entry_safe ( caifd , tmp , & caifdevs - > list , list ) {
int i = 0 ;
list_del_rcu ( & caifd - > list ) ;
cfcnfg_set_phy_state ( cfg , & caifd - > layer , false ) ;
while ( i < 10 & &
( caifd_refcnt_read ( caifd ) ! = 0 | |
cfcnfg_del_phy_layer ( cfg , & caifd - > layer ) ! = 0 ) ) {
pr_info ( " Wait for device inuse \n " ) ;
msleep ( 250 ) ;
i + + ;
}
synchronize_rcu ( ) ;
dev_put ( caifd - > netdev ) ;
free_percpu ( caifd - > pcpu_refcnt ) ;
kfree ( caifd ) ;
2010-03-30 17:56:25 +04:00
}
2011-05-13 06:44:05 +04:00
cfcnfg_remove ( cfg ) ;
2011-05-13 06:44:00 +04:00
mutex_unlock ( & caifdevs - > lock ) ;
2010-03-30 17:56:25 +04:00
rtnl_unlock ( ) ;
}
static struct pernet_operations caif_net_ops = {
. init = caif_init_net ,
. exit = caif_exit_net ,
. id = & caif_net_id ,
. size = sizeof ( struct caif_net ) ,
} ;
/* Initialize Caif devices list */
static int __init caif_device_init ( void )
{
int result ;
2011-05-13 06:44:00 +04:00
2012-01-26 18:04:53 +04:00
result = register_pernet_subsys ( & caif_net_ops ) ;
2010-03-30 17:56:25 +04:00
2011-05-13 06:44:05 +04:00
if ( result )
2010-03-30 17:56:25 +04:00
return result ;
2011-05-13 06:44:05 +04:00
2010-03-30 17:56:25 +04:00
register_netdevice_notifier ( & caif_device_notifier ) ;
2011-05-13 06:44:05 +04:00
dev_add_pack ( & caif_packet_type ) ;
2010-03-30 17:56:25 +04:00
return result ;
}
static void __exit caif_device_exit ( void )
{
unregister_netdevice_notifier ( & caif_device_notifier ) ;
2011-05-13 06:44:05 +04:00
dev_remove_pack ( & caif_packet_type ) ;
2012-07-15 14:10:14 +04:00
unregister_pernet_subsys ( & caif_net_ops ) ;
2010-03-30 17:56:25 +04:00
}
module_init ( caif_device_init ) ;
module_exit ( caif_device_exit ) ;