2013-12-11 19:05:37 +04:00
/*
2014-06-18 17:37:08 +04:00
Copyright ( c ) 2013 - 2014 Intel Corp .
2013-12-11 19:05:37 +04:00
This program is free software ; you can redistribute it and / or modify
it under the terms of the GNU General Public License version 2 and
only version 2 as published by the Free Software Foundation .
This program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
*/
# include <linux/if_arp.h>
# include <linux/netdevice.h>
# include <linux/etherdevice.h>
2014-06-18 17:37:09 +04:00
# include <linux/module.h>
2014-06-18 17:37:08 +04:00
# include <linux/debugfs.h>
2013-12-11 19:05:37 +04:00
# include <net/ipv6.h>
# include <net/ip6_route.h>
# include <net/addrconf.h>
2017-04-11 22:21:02 +03:00
# include <net/pkt_sched.h>
2013-12-11 19:05:37 +04:00
# include <net/bluetooth/bluetooth.h>
# include <net/bluetooth/hci_core.h>
# include <net/bluetooth/l2cap.h>
2014-03-05 17:29:05 +04:00
# include <net/6lowpan.h> /* for the compression support */
2013-12-11 19:05:37 +04:00
2014-06-18 17:37:08 +04:00
# define VERSION "0.1"
2015-01-08 18:00:55 +03:00
static struct dentry * lowpan_enable_debugfs ;
2014-06-18 17:37:08 +04:00
static struct dentry * lowpan_control_debugfs ;
2013-12-11 19:05:37 +04:00
# define IFACE_NAME_TEMPLATE "bt%d"
struct skb_cb {
struct in6_addr addr ;
2014-09-08 13:11:45 +04:00
struct in6_addr gw ;
2014-06-18 17:37:08 +04:00
struct l2cap_chan * chan ;
2013-12-11 19:05:37 +04:00
} ;
# define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
/* The devices list contains those devices that we are acting
* as a proxy . The BT 6L oWPAN device is a virtual device that
* connects to the Bluetooth LE device . The real connection to
* BT device is done via l2cap layer . There exists one
* virtual device / one BT 6L oWPAN network ( = hciX device ) .
* The list contains struct lowpan_dev elements .
*/
static LIST_HEAD ( bt_6lowpan_devices ) ;
2014-10-28 18:16:47 +03:00
static DEFINE_SPINLOCK ( devices_lock ) ;
2013-12-11 19:05:37 +04:00
2015-01-08 18:00:55 +03:00
static bool enable_6lowpan ;
2014-06-18 17:37:08 +04:00
/* We are listening incoming connections via this channel
*/
static struct l2cap_chan * listen_chan ;
2013-12-11 19:05:37 +04:00
struct lowpan_peer {
struct list_head list ;
2014-10-28 18:16:47 +03:00
struct rcu_head rcu ;
2014-06-18 17:37:08 +04:00
struct l2cap_chan * chan ;
2013-12-11 19:05:37 +04:00
/* peer addresses in various formats */
2017-03-12 11:19:37 +03:00
unsigned char lladdr [ ETH_ALEN ] ;
2013-12-11 19:05:37 +04:00
struct in6_addr peer_addr ;
} ;
2016-04-11 12:04:18 +03:00
struct lowpan_btle_dev {
2013-12-11 19:05:37 +04:00
struct list_head list ;
struct hci_dev * hdev ;
struct net_device * netdev ;
struct list_head peers ;
atomic_t peer_count ; /* number of items in peers list */
struct work_struct delete_netdev ;
struct delayed_work notify_peers ;
} ;
2016-04-11 12:04:18 +03:00
static inline struct lowpan_btle_dev *
lowpan_btle_dev ( const struct net_device * netdev )
2013-12-11 19:05:37 +04:00
{
2016-04-11 12:04:18 +03:00
return ( struct lowpan_btle_dev * ) lowpan_dev ( netdev ) - > priv ;
2013-12-11 19:05:37 +04:00
}
2016-04-11 12:04:18 +03:00
static inline void peer_add ( struct lowpan_btle_dev * dev ,
struct lowpan_peer * peer )
2013-12-11 19:05:37 +04:00
{
2014-10-28 18:16:47 +03:00
list_add_rcu ( & peer - > list , & dev - > peers ) ;
2013-12-11 19:05:37 +04:00
atomic_inc ( & dev - > peer_count ) ;
}
2016-04-11 12:04:18 +03:00
static inline bool peer_del ( struct lowpan_btle_dev * dev ,
struct lowpan_peer * peer )
2013-12-11 19:05:37 +04:00
{
2014-10-28 18:16:47 +03:00
list_del_rcu ( & peer - > list ) ;
2014-11-11 15:16:29 +03:00
kfree_rcu ( peer , rcu ) ;
2013-12-11 19:05:37 +04:00
2014-06-18 17:37:10 +04:00
module_put ( THIS_MODULE ) ;
2013-12-11 19:05:37 +04:00
if ( atomic_dec_and_test ( & dev - > peer_count ) ) {
BT_DBG ( " last peer " ) ;
return true ;
}
return false ;
}
2016-04-11 12:04:18 +03:00
static inline struct lowpan_peer * peer_lookup_ba ( struct lowpan_btle_dev * dev ,
2013-12-11 19:05:37 +04:00
bdaddr_t * ba , __u8 type )
{
2014-10-28 18:16:47 +03:00
struct lowpan_peer * peer ;
2013-12-11 19:05:37 +04:00
BT_DBG ( " peers %d addr %pMR type %d " , atomic_read ( & dev - > peer_count ) ,
ba , type ) ;
2014-10-28 18:16:47 +03:00
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( peer , & dev - > peers , list ) {
2014-06-18 17:37:08 +04:00
BT_DBG ( " dst addr %pMR dst type %d " ,
& peer - > chan - > dst , peer - > chan - > dst_type ) ;
2013-12-11 19:05:37 +04:00
2014-06-18 17:37:08 +04:00
if ( bacmp ( & peer - > chan - > dst , ba ) )
2013-12-11 19:05:37 +04:00
continue ;
2014-10-28 18:16:47 +03:00
if ( type = = peer - > chan - > dst_type ) {
rcu_read_unlock ( ) ;
2014-06-18 17:37:08 +04:00
return peer ;
2014-10-28 18:16:47 +03:00
}
2014-06-18 17:37:08 +04:00
}
2014-10-28 18:16:47 +03:00
rcu_read_unlock ( ) ;
2014-06-18 17:37:08 +04:00
return NULL ;
}
2016-04-11 12:04:18 +03:00
static inline struct lowpan_peer *
__peer_lookup_chan ( struct lowpan_btle_dev * dev , struct l2cap_chan * chan )
2014-06-18 17:37:08 +04:00
{
2014-10-28 18:16:47 +03:00
struct lowpan_peer * peer ;
2014-06-18 17:37:08 +04:00
2014-10-28 18:16:47 +03:00
list_for_each_entry_rcu ( peer , & dev - > peers , list ) {
2014-06-18 17:37:08 +04:00
if ( peer - > chan = = chan )
2013-12-11 19:05:37 +04:00
return peer ;
}
return NULL ;
}
2016-04-11 12:04:18 +03:00
static inline struct lowpan_peer *
__peer_lookup_conn ( struct lowpan_btle_dev * dev , struct l2cap_conn * conn )
2013-12-11 19:05:37 +04:00
{
2014-10-28 18:16:47 +03:00
struct lowpan_peer * peer ;
2013-12-11 19:05:37 +04:00
2014-10-28 18:16:47 +03:00
list_for_each_entry_rcu ( peer , & dev - > peers , list ) {
2014-06-18 17:37:08 +04:00
if ( peer - > chan - > conn = = conn )
2013-12-11 19:05:37 +04:00
return peer ;
}
return NULL ;
}
2016-04-11 12:04:18 +03:00
static inline struct lowpan_peer * peer_lookup_dst ( struct lowpan_btle_dev * dev ,
2014-09-08 13:11:45 +04:00
struct in6_addr * daddr ,
struct sk_buff * skb )
{
2014-10-28 18:16:47 +03:00
struct lowpan_peer * peer ;
2014-09-08 13:11:45 +04:00
struct in6_addr * nexthop ;
struct rt6_info * rt = ( struct rt6_info * ) skb_dst ( skb ) ;
int count = atomic_read ( & dev - > peer_count ) ;
BT_DBG ( " peers %d addr %pI6c rt %p " , count , daddr , rt ) ;
/* If we have multiple 6lowpan peers, then check where we should
* send the packet . If only one peer exists , then we can send the
* packet right away .
*/
2014-10-28 18:16:47 +03:00
if ( count = = 1 ) {
rcu_read_lock ( ) ;
peer = list_first_or_null_rcu ( & dev - > peers , struct lowpan_peer ,
list ) ;
rcu_read_unlock ( ) ;
return peer ;
}
2014-09-08 13:11:45 +04:00
if ( ! rt ) {
nexthop = & lowpan_cb ( skb ) - > gw ;
if ( ipv6_addr_any ( nexthop ) )
return NULL ;
} else {
2015-05-23 06:55:58 +03:00
nexthop = rt6_nexthop ( rt , daddr ) ;
2014-09-08 13:11:45 +04:00
/* We need to remember the address because it is needed
* by bt_xmit ( ) when sending the packet . In bt_xmit ( ) , the
* destination routing info is not set .
*/
memcpy ( & lowpan_cb ( skb ) - > gw , nexthop , sizeof ( struct in6_addr ) ) ;
}
BT_DBG ( " gw %pI6c " , nexthop ) ;
2014-10-28 18:16:47 +03:00
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( peer , & dev - > peers , list ) {
2014-09-08 13:11:45 +04:00
BT_DBG ( " dst addr %pMR dst type %d ip %pI6c " ,
& peer - > chan - > dst , peer - > chan - > dst_type ,
& peer - > peer_addr ) ;
2014-10-28 18:16:47 +03:00
if ( ! ipv6_addr_cmp ( & peer - > peer_addr , nexthop ) ) {
rcu_read_unlock ( ) ;
2014-09-08 13:11:45 +04:00
return peer ;
2014-10-28 18:16:47 +03:00
}
2014-09-08 13:11:45 +04:00
}
2014-10-28 18:16:47 +03:00
rcu_read_unlock ( ) ;
2014-09-08 13:11:45 +04:00
return NULL ;
}
2013-12-11 19:05:37 +04:00
static struct lowpan_peer * lookup_peer ( struct l2cap_conn * conn )
{
2016-04-11 12:04:18 +03:00
struct lowpan_btle_dev * entry ;
2013-12-11 19:05:37 +04:00
struct lowpan_peer * peer = NULL ;
2014-10-28 18:16:47 +03:00
rcu_read_lock ( ) ;
2013-12-11 19:05:37 +04:00
2014-10-28 18:16:47 +03:00
list_for_each_entry_rcu ( entry , & bt_6lowpan_devices , list ) {
peer = __peer_lookup_conn ( entry , conn ) ;
2013-12-11 19:05:37 +04:00
if ( peer )
break ;
}
2014-10-28 18:16:47 +03:00
rcu_read_unlock ( ) ;
2013-12-11 19:05:37 +04:00
return peer ;
}
2016-04-11 12:04:18 +03:00
static struct lowpan_btle_dev * lookup_dev ( struct l2cap_conn * conn )
2013-12-11 19:05:37 +04:00
{
2016-04-11 12:04:18 +03:00
struct lowpan_btle_dev * entry ;
struct lowpan_btle_dev * dev = NULL ;
2013-12-11 19:05:37 +04:00
2014-10-28 18:16:47 +03:00
rcu_read_lock ( ) ;
2013-12-11 19:05:37 +04:00
2014-10-28 18:16:47 +03:00
list_for_each_entry_rcu ( entry , & bt_6lowpan_devices , list ) {
2013-12-11 19:05:37 +04:00
if ( conn - > hcon - > hdev = = entry - > hdev ) {
dev = entry ;
break ;
}
}
2014-10-28 18:16:47 +03:00
rcu_read_unlock ( ) ;
2013-12-11 19:05:37 +04:00
return dev ;
}
static int give_skb_to_upper ( struct sk_buff * skb , struct net_device * dev )
{
struct sk_buff * skb_cp ;
skb_cp = skb_copy ( skb , GFP_ATOMIC ) ;
if ( ! skb_cp )
2014-10-23 18:40:53 +04:00
return NET_RX_DROP ;
2013-12-11 19:05:37 +04:00
2015-10-27 10:35:24 +03:00
return netif_rx_ni ( skb_cp ) ;
2013-12-11 19:05:37 +04:00
}
2014-10-23 18:40:56 +04:00
static int iphc_decompress ( struct sk_buff * skb , struct net_device * netdev ,
2017-04-03 17:48:55 +03:00
struct lowpan_peer * peer )
2013-12-11 19:05:37 +04:00
{
2017-03-12 11:19:33 +03:00
const u8 * saddr ;
2016-04-11 12:04:18 +03:00
struct lowpan_btle_dev * dev ;
2013-12-11 19:05:37 +04:00
2016-04-11 12:04:18 +03:00
dev = lowpan_btle_dev ( netdev ) ;
2013-12-11 19:05:37 +04:00
2017-03-12 11:19:37 +03:00
saddr = peer - > lladdr ;
2013-12-11 19:05:37 +04:00
2017-03-12 11:19:37 +03:00
return lowpan_header_decompress ( skb , netdev , netdev - > dev_addr , saddr ) ;
2013-12-11 19:05:37 +04:00
}
static int recv_pkt ( struct sk_buff * skb , struct net_device * dev ,
2017-04-03 17:48:55 +03:00
struct lowpan_peer * peer )
2013-12-11 19:05:37 +04:00
{
struct sk_buff * local_skb ;
int ret ;
if ( ! netif_running ( dev ) )
goto drop ;
2015-10-13 14:42:55 +03:00
if ( dev - > type ! = ARPHRD_6LOWPAN | | ! skb - > len )
2013-12-11 19:05:37 +04:00
goto drop ;
2015-10-13 14:42:55 +03:00
skb_reset_network_header ( skb ) ;
2014-10-13 14:00:56 +04:00
skb = skb_share_check ( skb , GFP_ATOMIC ) ;
if ( ! skb )
goto drop ;
2013-12-11 19:05:37 +04:00
/* check that it's our buffer */
2015-10-13 14:42:55 +03:00
if ( lowpan_is_ipv6 ( * skb_network_header ( skb ) ) ) {
2016-01-13 18:57:48 +03:00
/* Pull off the 1-byte of 6lowpan header. */
skb_pull ( skb , 1 ) ;
2013-12-11 19:05:37 +04:00
/* Copy the packet so that the IPv6 header is
* properly aligned .
*/
local_skb = skb_copy_expand ( skb , NET_SKB_PAD - 1 ,
skb_tailroom ( skb ) , GFP_ATOMIC ) ;
if ( ! local_skb )
goto drop ;
local_skb - > protocol = htons ( ETH_P_IPV6 ) ;
local_skb - > pkt_type = PACKET_HOST ;
2016-01-13 18:41:42 +03:00
local_skb - > dev = dev ;
2013-12-11 19:05:37 +04:00
skb_set_transport_header ( local_skb , sizeof ( struct ipv6hdr ) ) ;
if ( give_skb_to_upper ( local_skb , dev ) ! = NET_RX_SUCCESS ) {
kfree_skb ( local_skb ) ;
goto drop ;
}
dev - > stats . rx_bytes + = skb - > len ;
dev - > stats . rx_packets + + ;
2014-10-23 18:40:55 +04:00
consume_skb ( local_skb ) ;
consume_skb ( skb ) ;
2015-10-13 14:42:55 +03:00
} else if ( lowpan_is_iphc ( * skb_network_header ( skb ) ) ) {
local_skb = skb_clone ( skb , GFP_ATOMIC ) ;
if ( ! local_skb )
goto drop ;
2016-01-13 18:41:42 +03:00
local_skb - > dev = dev ;
2017-04-03 17:48:55 +03:00
ret = iphc_decompress ( local_skb , dev , peer ) ;
2015-10-13 14:42:55 +03:00
if ( ret < 0 ) {
2017-04-03 17:48:56 +03:00
BT_DBG ( " iphc_decompress failed: %d " , ret ) ;
2015-10-13 14:42:55 +03:00
kfree_skb ( local_skb ) ;
goto drop ;
}
local_skb - > protocol = htons ( ETH_P_IPV6 ) ;
local_skb - > pkt_type = PACKET_HOST ;
if ( give_skb_to_upper ( local_skb , dev )
! = NET_RX_SUCCESS ) {
kfree_skb ( local_skb ) ;
goto drop ;
2013-12-11 19:05:37 +04:00
}
2015-10-13 14:42:55 +03:00
dev - > stats . rx_bytes + = skb - > len ;
dev - > stats . rx_packets + + ;
consume_skb ( local_skb ) ;
consume_skb ( skb ) ;
} else {
2017-04-03 17:48:56 +03:00
BT_DBG ( " unknown packet type " ) ;
2015-10-13 14:42:55 +03:00
goto drop ;
2013-12-11 19:05:37 +04:00
}
return NET_RX_SUCCESS ;
drop :
2014-06-18 17:37:08 +04:00
dev - > stats . rx_dropped + + ;
2013-12-11 19:05:37 +04:00
return NET_RX_DROP ;
}
/* Packet from BT LE device */
2014-06-18 17:37:08 +04:00
static int chan_recv_cb ( struct l2cap_chan * chan , struct sk_buff * skb )
2013-12-11 19:05:37 +04:00
{
2016-04-11 12:04:18 +03:00
struct lowpan_btle_dev * dev ;
2013-12-11 19:05:37 +04:00
struct lowpan_peer * peer ;
int err ;
2014-06-18 17:37:08 +04:00
peer = lookup_peer ( chan - > conn ) ;
2013-12-11 19:05:37 +04:00
if ( ! peer )
return - ENOENT ;
2014-06-18 17:37:08 +04:00
dev = lookup_dev ( chan - > conn ) ;
2013-12-12 11:53:21 +04:00
if ( ! dev | | ! dev - > netdev )
2013-12-11 19:05:37 +04:00
return - ENOENT ;
2017-04-03 17:48:55 +03:00
err = recv_pkt ( skb , dev - > netdev , peer ) ;
2014-06-18 17:37:08 +04:00
if ( err ) {
BT_DBG ( " recv pkt %d " , err ) ;
err = - EAGAIN ;
2013-12-11 19:05:37 +04:00
}
2014-06-18 17:37:08 +04:00
return err ;
2013-12-11 19:05:37 +04:00
}
2014-09-29 17:37:25 +04:00
static int setup_header ( struct sk_buff * skb , struct net_device * netdev ,
bdaddr_t * peer_addr , u8 * peer_addr_type )
2013-12-11 19:05:37 +04:00
{
2014-09-29 17:37:25 +04:00
struct in6_addr ipv6_daddr ;
2016-04-22 19:06:11 +03:00
struct ipv6hdr * hdr ;
2016-04-11 12:04:18 +03:00
struct lowpan_btle_dev * dev ;
2013-12-11 19:05:37 +04:00
struct lowpan_peer * peer ;
2017-03-12 11:19:38 +03:00
u8 * daddr ;
2014-09-29 17:37:25 +04:00
int err , status = 0 ;
2013-12-11 19:05:37 +04:00
2016-04-22 19:06:11 +03:00
hdr = ipv6_hdr ( skb ) ;
2016-04-11 12:04:18 +03:00
dev = lowpan_btle_dev ( netdev ) ;
2013-12-11 19:05:37 +04:00
2016-04-22 19:06:11 +03:00
memcpy ( & ipv6_daddr , & hdr - > daddr , sizeof ( ipv6_daddr ) ) ;
2014-09-29 17:37:25 +04:00
if ( ipv6_addr_is_multicast ( & ipv6_daddr ) ) {
2014-06-18 17:37:08 +04:00
lowpan_cb ( skb ) - > chan = NULL ;
2017-03-12 11:19:38 +03:00
daddr = NULL ;
2013-12-11 19:05:37 +04:00
} else {
2017-03-12 11:19:38 +03:00
BT_DBG ( " dest IP %pI6c " , & ipv6_daddr ) ;
2013-12-11 19:05:37 +04:00
2017-03-12 11:19:38 +03:00
/* The packet might be sent to 6lowpan interface
* because of routing ( either via default route
* or user set route ) so get peer according to
* the destination address .
2013-12-11 19:05:37 +04:00
*/
2017-03-12 11:19:38 +03:00
peer = peer_lookup_dst ( dev , & ipv6_daddr , skb ) ;
2013-12-11 19:05:37 +04:00
if ( ! peer ) {
2017-03-12 11:19:38 +03:00
BT_DBG ( " no such peer " ) ;
return - ENOENT ;
2013-12-11 19:05:37 +04:00
}
2017-03-12 11:19:37 +03:00
daddr = peer - > lladdr ;
2017-03-28 15:11:29 +03:00
* peer_addr = peer - > chan - > dst ;
2017-03-12 11:19:38 +03:00
* peer_addr_type = peer - > chan - > dst_type ;
2014-06-18 17:37:08 +04:00
lowpan_cb ( skb ) - > chan = peer - > chan ;
2014-09-29 17:37:25 +04:00
status = 1 ;
2013-12-11 19:05:37 +04:00
}
2015-10-13 14:42:57 +03:00
lowpan_header_compress ( skb , netdev , daddr , dev - > netdev - > dev_addr ) ;
2014-09-29 17:37:25 +04:00
err = dev_hard_header ( skb , netdev , ETH_P_IPV6 , NULL , NULL , 0 ) ;
if ( err < 0 )
return err ;
return status ;
}
static int header_create ( struct sk_buff * skb , struct net_device * netdev ,
unsigned short type , const void * _daddr ,
const void * _saddr , unsigned int len )
{
if ( type ! = ETH_P_IPV6 )
return - EINVAL ;
return 0 ;
2013-12-11 19:05:37 +04:00
}
/* Packet to BT LE device */
2014-06-18 17:37:08 +04:00
static int send_pkt ( struct l2cap_chan * chan , struct sk_buff * skb ,
2014-10-01 16:59:14 +04:00
struct net_device * netdev )
2013-12-11 19:05:37 +04:00
{
2014-06-18 17:37:08 +04:00
struct msghdr msg ;
struct kvec iv ;
int err ;
/* Remember the skb so that we can send EAGAIN to the caller if
2014-10-01 16:59:14 +04:00
* we run out of credits .
2014-06-18 17:37:08 +04:00
*/
2014-10-01 16:59:14 +04:00
chan - > data = skb ;
2014-06-18 17:37:08 +04:00
iv . iov_base = skb - > data ;
iv . iov_len = skb - > len ;
2014-11-24 18:42:55 +03:00
memset ( & msg , 0 , sizeof ( msg ) ) ;
2014-11-25 01:07:38 +03:00
iov_iter_kvec ( & msg . msg_iter , WRITE | ITER_KVEC , & iv , 1 , skb - > len ) ;
2014-11-24 18:42:55 +03:00
2014-06-18 17:37:08 +04:00
err = l2cap_chan_send ( chan , & msg , skb - > len ) ;
if ( err > 0 ) {
netdev - > stats . tx_bytes + = err ;
netdev - > stats . tx_packets + + ;
return 0 ;
}
2017-04-11 22:20:59 +03:00
if ( err < 0 )
netdev - > stats . tx_errors + + ;
2013-12-11 19:05:37 +04:00
2014-06-18 17:37:08 +04:00
return err ;
2013-12-11 19:05:37 +04:00
}
2014-10-01 16:59:15 +04:00
static int send_mcast_pkt ( struct sk_buff * skb , struct net_device * netdev )
2013-12-11 19:05:37 +04:00
{
struct sk_buff * local_skb ;
2016-04-11 12:04:18 +03:00
struct lowpan_btle_dev * entry ;
2014-10-01 16:59:15 +04:00
int err = 0 ;
2013-12-11 19:05:37 +04:00
2014-10-28 18:16:47 +03:00
rcu_read_lock ( ) ;
2013-12-11 19:05:37 +04:00
2014-10-28 18:16:47 +03:00
list_for_each_entry_rcu ( entry , & bt_6lowpan_devices , list ) {
struct lowpan_peer * pentry ;
2016-04-11 12:04:18 +03:00
struct lowpan_btle_dev * dev ;
2013-12-11 19:05:37 +04:00
if ( entry - > netdev ! = netdev )
continue ;
2016-04-11 12:04:18 +03:00
dev = lowpan_btle_dev ( entry - > netdev ) ;
2013-12-11 19:05:37 +04:00
2014-10-28 18:16:47 +03:00
list_for_each_entry_rcu ( pentry , & dev - > peers , list ) {
2014-10-01 16:59:15 +04:00
int ret ;
2013-12-11 19:05:37 +04:00
local_skb = skb_clone ( skb , GFP_ATOMIC ) ;
2014-09-29 17:37:25 +04:00
BT_DBG ( " xmit %s to %pMR type %d IP %pI6c chan %p " ,
netdev - > name ,
& pentry - > chan - > dst , pentry - > chan - > dst_type ,
& pentry - > peer_addr , pentry - > chan ) ;
2014-10-01 16:59:15 +04:00
ret = send_pkt ( pentry - > chan , local_skb , netdev ) ;
if ( ret < 0 )
err = ret ;
2013-12-11 19:05:37 +04:00
kfree_skb ( local_skb ) ;
}
}
2014-10-28 18:16:47 +03:00
rcu_read_unlock ( ) ;
2014-10-01 16:59:15 +04:00
return err ;
2013-12-11 19:05:37 +04:00
}
static netdev_tx_t bt_xmit ( struct sk_buff * skb , struct net_device * netdev )
{
int err = 0 ;
bdaddr_t addr ;
u8 addr_type ;
2014-09-29 17:37:25 +04:00
/* We must take a copy of the skb before we modify/replace the ipv6
* header as the header could be used elsewhere
*/
2014-10-08 12:24:53 +04:00
skb = skb_unshare ( skb , GFP_ATOMIC ) ;
if ( ! skb )
2014-09-29 17:37:25 +04:00
return NET_XMIT_DROP ;
/* Return values from setup_header()
* < 0 - error , packet is dropped
* 0 - this is a multicast packet
* 1 - this is unicast packet
*/
err = setup_header ( skb , netdev , & addr , & addr_type ) ;
if ( err < 0 ) {
kfree_skb ( skb ) ;
return NET_XMIT_DROP ;
}
2013-12-11 19:05:37 +04:00
2014-09-29 17:37:25 +04:00
if ( err ) {
if ( lowpan_cb ( skb ) - > chan ) {
BT_DBG ( " xmit %s to %pMR type %d IP %pI6c chan %p " ,
netdev - > name , & addr , addr_type ,
& lowpan_cb ( skb ) - > addr , lowpan_cb ( skb ) - > chan ) ;
2014-10-01 16:59:14 +04:00
err = send_pkt ( lowpan_cb ( skb ) - > chan , skb , netdev ) ;
2014-09-29 17:37:25 +04:00
} else {
2014-06-18 17:37:08 +04:00
err = - ENOENT ;
2014-09-29 17:37:25 +04:00
}
} else {
/* We need to send the packet to every device behind this
* interface .
*/
2014-10-01 16:59:15 +04:00
err = send_mcast_pkt ( skb , netdev ) ;
2013-12-11 19:05:37 +04:00
}
2014-10-01 12:30:26 +04:00
dev_kfree_skb ( skb ) ;
2013-12-11 19:05:37 +04:00
if ( err )
BT_DBG ( " ERROR: xmit failed (%d) " , err ) ;
2014-09-29 17:37:25 +04:00
return err < 0 ? NET_XMIT_DROP : err ;
2013-12-11 19:05:37 +04:00
}
2014-10-28 18:16:48 +03:00
static int bt_dev_init ( struct net_device * dev )
{
2016-06-09 17:45:12 +03:00
netdev_lockdep_set_classes ( dev ) ;
2014-10-28 18:16:48 +03:00
return 0 ;
}
2013-12-11 19:05:37 +04:00
static const struct net_device_ops netdev_ops = {
2014-10-28 18:16:48 +03:00
. ndo_init = bt_dev_init ,
2013-12-11 19:05:37 +04:00
. ndo_start_xmit = bt_xmit ,
} ;
static struct header_ops header_ops = {
. create = header_create ,
} ;
static void netdev_setup ( struct net_device * dev )
{
dev - > hard_header_len = 0 ;
dev - > needed_tailroom = 0 ;
2014-09-29 17:37:26 +04:00
dev - > flags = IFF_RUNNING | IFF_POINTOPOINT |
IFF_MULTICAST ;
2013-12-11 19:05:37 +04:00
dev - > watchdog_timeo = 0 ;
2017-04-11 22:21:02 +03:00
dev - > tx_queue_len = DEFAULT_TX_QUEUE_LEN ;
2013-12-11 19:05:37 +04:00
dev - > netdev_ops = & netdev_ops ;
dev - > header_ops = & header_ops ;
dev - > destructor = free_netdev ;
}
static struct device_type bt_type = {
. name = " bluetooth " ,
} ;
static void ifup ( struct net_device * netdev )
{
int err ;
rtnl_lock ( ) ;
err = dev_open ( netdev ) ;
if ( err < 0 )
BT_INFO ( " iface %s cannot be opened (%d) " , netdev - > name , err ) ;
rtnl_unlock ( ) ;
}
2014-06-18 17:37:11 +04:00
static void ifdown ( struct net_device * netdev )
{
int err ;
rtnl_lock ( ) ;
err = dev_close ( netdev ) ;
if ( err < 0 )
BT_INFO ( " iface %s cannot be closed (%d) " , netdev - > name , err ) ;
rtnl_unlock ( ) ;
}
2013-12-11 19:05:37 +04:00
static void do_notify_peers ( struct work_struct * work )
{
2016-04-11 12:04:18 +03:00
struct lowpan_btle_dev * dev = container_of ( work , struct lowpan_btle_dev ,
notify_peers . work ) ;
2013-12-11 19:05:37 +04:00
netdev_notify_peers ( dev - > netdev ) ; /* send neighbour adv at startup */
}
static bool is_bt_6lowpan ( struct hci_conn * hcon )
{
if ( hcon - > type ! = LE_LINK )
return false ;
2015-01-08 18:00:55 +03:00
if ( ! enable_6lowpan )
2014-06-18 17:37:08 +04:00
return false ;
return true ;
2013-12-11 19:05:37 +04:00
}
2014-06-18 17:37:08 +04:00
static struct l2cap_chan * chan_create ( void )
{
struct l2cap_chan * chan ;
chan = l2cap_chan_create ( ) ;
if ( ! chan )
return NULL ;
l2cap_chan_set_defaults ( chan ) ;
chan - > chan_type = L2CAP_CHAN_CONN_ORIENTED ;
chan - > mode = L2CAP_MODE_LE_FLOWCTL ;
2015-10-06 13:03:19 +03:00
chan - > imtu = 1280 ;
2014-06-18 17:37:08 +04:00
return chan ;
}
static struct l2cap_chan * add_peer_chan ( struct l2cap_chan * chan ,
2017-03-29 09:10:54 +03:00
struct lowpan_btle_dev * dev ,
bool new_netdev )
2013-12-11 19:05:37 +04:00
{
struct lowpan_peer * peer ;
peer = kzalloc ( sizeof ( * peer ) , GFP_ATOMIC ) ;
if ( ! peer )
2014-06-18 17:37:08 +04:00
return NULL ;
2013-12-11 19:05:37 +04:00
2014-06-18 17:37:08 +04:00
peer - > chan = chan ;
2013-12-11 19:05:37 +04:00
memset ( & peer - > peer_addr , 0 , sizeof ( struct in6_addr ) ) ;
2017-03-12 11:19:37 +03:00
baswap ( ( void * ) peer - > lladdr , & chan - > dst ) ;
2013-12-11 19:05:37 +04:00
2017-03-12 11:19:37 +03:00
lowpan_iphc_uncompress_eui48_lladdr ( & peer - > peer_addr , peer - > lladdr ) ;
2013-12-11 19:05:37 +04:00
2014-10-28 18:16:47 +03:00
spin_lock ( & devices_lock ) ;
2013-12-11 19:05:37 +04:00
INIT_LIST_HEAD ( & peer - > list ) ;
peer_add ( dev , peer ) ;
2014-10-28 18:16:47 +03:00
spin_unlock ( & devices_lock ) ;
2013-12-11 19:05:37 +04:00
/* Notifying peers about us needs to be done without locks held */
2017-03-29 09:10:54 +03:00
if ( new_netdev )
INIT_DELAYED_WORK ( & dev - > notify_peers , do_notify_peers ) ;
2013-12-11 19:05:37 +04:00
schedule_delayed_work ( & dev - > notify_peers , msecs_to_jiffies ( 100 ) ) ;
2014-06-18 17:37:08 +04:00
return peer - > chan ;
2013-12-11 19:05:37 +04:00
}
2016-04-11 12:04:18 +03:00
static int setup_netdev ( struct l2cap_chan * chan , struct lowpan_btle_dev * * dev )
2013-12-11 19:05:37 +04:00
{
struct net_device * netdev ;
int err = 0 ;
2016-04-11 12:04:18 +03:00
netdev = alloc_netdev ( LOWPAN_PRIV_SIZE ( sizeof ( struct lowpan_btle_dev ) ) ,
2015-08-11 22:44:08 +03:00
IFACE_NAME_TEMPLATE , NET_NAME_UNKNOWN ,
netdev_setup ) ;
2013-12-11 19:05:37 +04:00
if ( ! netdev )
return - ENOMEM ;
2017-03-12 11:19:33 +03:00
netdev - > addr_assign_type = NET_ADDR_PERM ;
baswap ( ( void * ) netdev - > dev_addr , & chan - > src ) ;
2013-12-11 19:05:37 +04:00
netdev - > netdev_ops = & netdev_ops ;
2015-06-17 17:32:25 +03:00
SET_NETDEV_DEV ( netdev , & chan - > conn - > hcon - > hdev - > dev ) ;
2013-12-11 19:05:37 +04:00
SET_NETDEV_DEVTYPE ( netdev , & bt_type ) ;
2016-04-11 12:04:18 +03:00
* dev = lowpan_btle_dev ( netdev ) ;
2015-07-30 10:40:53 +03:00
( * dev ) - > netdev = netdev ;
( * dev ) - > hdev = chan - > conn - > hcon - > hdev ;
INIT_LIST_HEAD ( & ( * dev ) - > peers ) ;
spin_lock ( & devices_lock ) ;
INIT_LIST_HEAD ( & ( * dev ) - > list ) ;
list_add_rcu ( & ( * dev ) - > list , & bt_6lowpan_devices ) ;
spin_unlock ( & devices_lock ) ;
2015-12-10 00:46:29 +03:00
err = lowpan_register_netdev ( netdev , LOWPAN_LLTYPE_BTLE ) ;
2013-12-11 19:05:37 +04:00
if ( err < 0 ) {
BT_INFO ( " register_netdev failed %d " , err ) ;
2015-07-30 10:40:53 +03:00
spin_lock ( & devices_lock ) ;
list_del_rcu ( & ( * dev ) - > list ) ;
spin_unlock ( & devices_lock ) ;
2013-12-11 19:05:37 +04:00
free_netdev ( netdev ) ;
goto out ;
}
2014-06-18 17:37:08 +04:00
BT_DBG ( " ifindex %d peer bdaddr %pMR type %d my addr %pMR type %d " ,
netdev - > ifindex , & chan - > dst , chan - > dst_type ,
& chan - > src , chan - > src_type ) ;
2013-12-11 19:05:37 +04:00
set_bit ( __LINK_STATE_PRESENT , & netdev - > state ) ;
2014-06-18 17:37:08 +04:00
return 0 ;
2013-12-11 19:05:37 +04:00
out :
return err ;
}
2014-06-18 17:37:08 +04:00
static inline void chan_ready_cb ( struct l2cap_chan * chan )
{
2016-04-11 12:04:18 +03:00
struct lowpan_btle_dev * dev ;
2017-03-29 09:10:54 +03:00
bool new_netdev = false ;
2014-06-18 17:37:08 +04:00
dev = lookup_dev ( chan - > conn ) ;
BT_DBG ( " chan %p conn %p dev %p " , chan , chan - > conn , dev ) ;
if ( ! dev ) {
if ( setup_netdev ( chan , & dev ) < 0 ) {
l2cap_chan_del ( chan , - ENOENT ) ;
return ;
}
2017-03-29 09:10:54 +03:00
new_netdev = true ;
2014-06-18 17:37:08 +04:00
}
2014-06-18 17:37:10 +04:00
if ( ! try_module_get ( THIS_MODULE ) )
return ;
2017-03-29 09:10:54 +03:00
add_peer_chan ( chan , dev , new_netdev ) ;
2014-06-18 17:37:08 +04:00
ifup ( dev - > netdev ) ;
}
2014-08-07 11:03:32 +04:00
static inline struct l2cap_chan * chan_new_conn_cb ( struct l2cap_chan * pchan )
2014-06-18 17:37:08 +04:00
{
2014-08-07 11:03:32 +04:00
struct l2cap_chan * chan ;
2014-06-18 17:37:08 +04:00
2015-10-06 13:03:22 +03:00
chan = chan_create ( ) ;
if ( ! chan )
return NULL ;
2014-08-07 11:03:32 +04:00
chan - > ops = pchan - > ops ;
2014-06-18 17:37:08 +04:00
BT_DBG ( " chan %p pchan %p " , chan , pchan ) ;
2014-08-07 11:03:32 +04:00
return chan ;
2014-06-18 17:37:08 +04:00
}
2013-12-11 19:05:37 +04:00
static void delete_netdev ( struct work_struct * work )
{
2016-04-11 12:04:18 +03:00
struct lowpan_btle_dev * entry = container_of ( work ,
struct lowpan_btle_dev ,
delete_netdev ) ;
2013-12-11 19:05:37 +04:00
2015-12-10 00:46:29 +03:00
lowpan_unregister_netdev ( entry - > netdev ) ;
2013-12-11 19:05:37 +04:00
2015-06-17 17:32:26 +03:00
/* The entry pointer is deleted by the netdev destructor. */
2013-12-11 19:05:37 +04:00
}
2014-06-18 17:37:08 +04:00
static void chan_close_cb ( struct l2cap_chan * chan )
2013-12-11 19:05:37 +04:00
{
2016-04-11 12:04:18 +03:00
struct lowpan_btle_dev * entry ;
struct lowpan_btle_dev * dev = NULL ;
2013-12-11 19:05:37 +04:00
struct lowpan_peer * peer ;
int err = - ENOENT ;
2015-06-17 17:32:24 +03:00
bool last = false , remove = true ;
2013-12-11 19:05:37 +04:00
2014-06-18 17:37:08 +04:00
BT_DBG ( " chan %p conn %p " , chan , chan - > conn ) ;
if ( chan - > conn & & chan - > conn - > hcon ) {
if ( ! is_bt_6lowpan ( chan - > conn - > hcon ) )
return ;
/* If conn is set, then the netdev is also there and we should
* not remove it .
*/
2015-06-17 17:32:24 +03:00
remove = false ;
2014-06-18 17:37:08 +04:00
}
2013-12-11 19:05:37 +04:00
2014-10-28 18:16:47 +03:00
spin_lock ( & devices_lock ) ;
2013-12-11 19:05:37 +04:00
2014-10-28 18:16:47 +03:00
list_for_each_entry_rcu ( entry , & bt_6lowpan_devices , list ) {
2016-04-11 12:04:18 +03:00
dev = lowpan_btle_dev ( entry - > netdev ) ;
2014-10-28 18:16:47 +03:00
peer = __peer_lookup_chan ( dev , chan ) ;
2013-12-11 19:05:37 +04:00
if ( peer ) {
last = peer_del ( dev , peer ) ;
err = 0 ;
2014-06-18 17:37:08 +04:00
BT_DBG ( " dev %p removing %speer %p " , dev ,
last ? " last " : " 1 " , peer ) ;
BT_DBG ( " chan %p orig refcnt %d " , chan ,
2016-11-14 19:29:48 +03:00
kref_read ( & chan - > kref ) ) ;
2014-06-18 17:37:08 +04:00
l2cap_chan_put ( chan ) ;
2013-12-11 19:05:37 +04:00
break ;
}
}
if ( ! err & & last & & dev & & ! atomic_read ( & dev - > peer_count ) ) {
2014-10-28 18:16:47 +03:00
spin_unlock ( & devices_lock ) ;
2013-12-11 19:05:37 +04:00
cancel_delayed_work_sync ( & dev - > notify_peers ) ;
2014-06-18 17:37:11 +04:00
ifdown ( dev - > netdev ) ;
2015-06-17 17:32:24 +03:00
if ( remove ) {
2014-06-18 17:37:08 +04:00
INIT_WORK ( & entry - > delete_netdev , delete_netdev ) ;
schedule_work ( & entry - > delete_netdev ) ;
}
2013-12-11 19:05:37 +04:00
} else {
2014-10-28 18:16:47 +03:00
spin_unlock ( & devices_lock ) ;
2013-12-11 19:05:37 +04:00
}
2014-06-18 17:37:08 +04:00
return ;
}
static void chan_state_change_cb ( struct l2cap_chan * chan , int state , int err )
{
BT_DBG ( " chan %p conn %p state %s err %d " , chan , chan - > conn ,
state_to_string ( state ) , err ) ;
}
static struct sk_buff * chan_alloc_skb_cb ( struct l2cap_chan * chan ,
unsigned long hdr_len ,
unsigned long len , int nb )
{
/* Note that we must allocate using GFP_ATOMIC here as
* this function is called originally from netdev hard xmit
* function in atomic context .
*/
return bt_skb_alloc ( hdr_len + len , GFP_ATOMIC ) ;
}
static void chan_suspend_cb ( struct l2cap_chan * chan )
{
2017-04-11 22:21:00 +03:00
struct lowpan_btle_dev * dev ;
2017-03-29 09:10:18 +03:00
BT_DBG ( " chan %p suspend " , chan ) ;
2017-04-11 22:21:00 +03:00
dev = lookup_dev ( chan - > conn ) ;
if ( ! dev | | ! dev - > netdev )
return ;
netif_stop_queue ( dev - > netdev ) ;
2014-06-18 17:37:08 +04:00
}
static void chan_resume_cb ( struct l2cap_chan * chan )
{
2017-04-11 22:21:00 +03:00
struct lowpan_btle_dev * dev ;
2017-03-29 09:10:18 +03:00
BT_DBG ( " chan %p resume " , chan ) ;
2017-04-11 22:21:00 +03:00
dev = lookup_dev ( chan - > conn ) ;
if ( ! dev | | ! dev - > netdev )
return ;
netif_wake_queue ( dev - > netdev ) ;
2014-06-18 17:37:08 +04:00
}
static long chan_get_sndtimeo_cb ( struct l2cap_chan * chan )
{
2014-09-08 13:11:43 +04:00
return L2CAP_CONN_TIMEOUT ;
2014-06-18 17:37:08 +04:00
}
static const struct l2cap_ops bt_6lowpan_chan_ops = {
. name = " L2CAP 6LoWPAN channel " ,
. new_connection = chan_new_conn_cb ,
. recv = chan_recv_cb ,
. close = chan_close_cb ,
. state_change = chan_state_change_cb ,
. ready = chan_ready_cb ,
. resume = chan_resume_cb ,
. suspend = chan_suspend_cb ,
. get_sndtimeo = chan_get_sndtimeo_cb ,
. alloc_skb = chan_alloc_skb_cb ,
. teardown = l2cap_chan_no_teardown ,
. defer = l2cap_chan_no_defer ,
. set_shutdown = l2cap_chan_no_set_shutdown ,
} ;
static inline __u8 bdaddr_type ( __u8 type )
{
if ( type = = ADDR_LE_DEV_PUBLIC )
return BDADDR_LE_PUBLIC ;
else
return BDADDR_LE_RANDOM ;
}
static int bt_6lowpan_connect ( bdaddr_t * addr , u8 dst_type )
{
2015-10-06 13:03:23 +03:00
struct l2cap_chan * chan ;
2014-06-18 17:37:08 +04:00
int err ;
2015-10-06 13:03:24 +03:00
chan = chan_create ( ) ;
2015-10-06 13:03:23 +03:00
if ( ! chan )
2014-06-18 17:37:08 +04:00
return - EINVAL ;
2015-10-06 13:03:24 +03:00
chan - > ops = & bt_6lowpan_chan_ops ;
2015-10-06 13:03:23 +03:00
err = l2cap_chan_connect ( chan , cpu_to_le16 ( L2CAP_PSM_IPSP ) , 0 ,
2014-06-18 17:37:08 +04:00
addr , dst_type ) ;
2015-10-06 13:03:23 +03:00
BT_DBG ( " chan %p err %d " , chan , err ) ;
2014-06-18 17:37:08 +04:00
if ( err < 0 )
2015-10-06 13:03:23 +03:00
l2cap_chan_put ( chan ) ;
2014-06-18 17:37:08 +04:00
2013-12-11 19:05:37 +04:00
return err ;
}
2014-06-18 17:37:08 +04:00
static int bt_6lowpan_disconnect ( struct l2cap_conn * conn , u8 dst_type )
{
struct lowpan_peer * peer ;
BT_DBG ( " conn %p dst type %d " , conn , dst_type ) ;
peer = lookup_peer ( conn ) ;
if ( ! peer )
return - ENOENT ;
BT_DBG ( " peer %p chan %p " , peer , peer - > chan ) ;
l2cap_chan_close ( peer - > chan , ENOENT ) ;
return 0 ;
}
static struct l2cap_chan * bt_6lowpan_listen ( void )
{
bdaddr_t * addr = BDADDR_ANY ;
2015-10-06 13:03:23 +03:00
struct l2cap_chan * chan ;
2014-06-18 17:37:08 +04:00
int err ;
2015-01-08 18:00:55 +03:00
if ( ! enable_6lowpan )
2014-06-18 17:37:08 +04:00
return NULL ;
2015-10-06 13:03:24 +03:00
chan = chan_create ( ) ;
2015-10-06 13:03:23 +03:00
if ( ! chan )
2014-06-18 17:37:08 +04:00
return NULL ;
2015-10-06 13:03:24 +03:00
chan - > ops = & bt_6lowpan_chan_ops ;
2015-10-06 13:03:23 +03:00
chan - > state = BT_LISTEN ;
chan - > src_type = BDADDR_LE_PUBLIC ;
2014-06-18 17:37:08 +04:00
2015-10-06 13:03:23 +03:00
atomic_set ( & chan - > nesting , L2CAP_NESTING_PARENT ) ;
2014-11-13 10:46:05 +03:00
2015-10-06 13:03:23 +03:00
BT_DBG ( " chan %p src type %d " , chan , chan - > src_type ) ;
2014-06-18 17:37:08 +04:00
2015-10-06 13:03:23 +03:00
err = l2cap_add_psm ( chan , addr , cpu_to_le16 ( L2CAP_PSM_IPSP ) ) ;
2014-06-18 17:37:08 +04:00
if ( err ) {
2015-10-06 13:03:23 +03:00
l2cap_chan_put ( chan ) ;
2014-06-18 17:37:08 +04:00
BT_ERR ( " psm cannot be added err %d " , err ) ;
return NULL ;
}
2015-10-06 13:03:23 +03:00
return chan ;
2014-06-18 17:37:08 +04:00
}
static int get_l2cap_conn ( char * buf , bdaddr_t * addr , u8 * addr_type ,
struct l2cap_conn * * conn )
{
struct hci_conn * hcon ;
struct hci_dev * hdev ;
int n ;
n = sscanf ( buf , " %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu " ,
& addr - > b [ 5 ] , & addr - > b [ 4 ] , & addr - > b [ 3 ] ,
& addr - > b [ 2 ] , & addr - > b [ 1 ] , & addr - > b [ 0 ] ,
addr_type ) ;
if ( n < 7 )
return - EINVAL ;
2016-11-12 18:03:07 +03:00
/* The LE_PUBLIC address type is ignored because of BDADDR_ANY */
hdev = hci_get_route ( addr , BDADDR_ANY , BDADDR_LE_PUBLIC ) ;
2014-06-18 17:37:08 +04:00
if ( ! hdev )
return - ENOENT ;
hci_dev_lock ( hdev ) ;
2015-10-21 18:03:02 +03:00
hcon = hci_conn_hash_lookup_le ( hdev , addr , * addr_type ) ;
2014-06-18 17:37:08 +04:00
hci_dev_unlock ( hdev ) ;
if ( ! hcon )
return - ENOENT ;
* conn = ( struct l2cap_conn * ) hcon - > l2cap_data ;
BT_DBG ( " conn %p dst %pMR type %d " , * conn , & hcon - > dst , hcon - > dst_type ) ;
return 0 ;
}
static void disconnect_all_peers ( void )
{
2016-04-11 12:04:18 +03:00
struct lowpan_btle_dev * entry ;
2014-06-18 17:37:08 +04:00
struct lowpan_peer * peer , * tmp_peer , * new_peer ;
struct list_head peers ;
INIT_LIST_HEAD ( & peers ) ;
/* We make a separate list of peers as the close_cb() will
* modify the device peers list so it is better not to mess
* with the same list at the same time .
*/
2014-10-28 18:16:47 +03:00
rcu_read_lock ( ) ;
2014-06-18 17:37:08 +04:00
2014-10-28 18:16:47 +03:00
list_for_each_entry_rcu ( entry , & bt_6lowpan_devices , list ) {
list_for_each_entry_rcu ( peer , & entry - > peers , list ) {
2014-06-18 17:37:08 +04:00
new_peer = kmalloc ( sizeof ( * new_peer ) , GFP_ATOMIC ) ;
if ( ! new_peer )
break ;
new_peer - > chan = peer - > chan ;
INIT_LIST_HEAD ( & new_peer - > list ) ;
list_add ( & new_peer - > list , & peers ) ;
}
}
2014-10-28 18:16:47 +03:00
rcu_read_unlock ( ) ;
2014-06-18 17:37:08 +04:00
2014-10-28 18:16:47 +03:00
spin_lock ( & devices_lock ) ;
2014-06-18 17:37:08 +04:00
list_for_each_entry_safe ( peer , tmp_peer , & peers , list ) {
l2cap_chan_close ( peer - > chan , ENOENT ) ;
2014-10-28 18:16:47 +03:00
list_del_rcu ( & peer - > list ) ;
2014-11-11 15:16:29 +03:00
kfree_rcu ( peer , rcu ) ;
2014-06-18 17:37:08 +04:00
}
2014-10-28 18:16:47 +03:00
spin_unlock ( & devices_lock ) ;
2014-06-18 17:37:08 +04:00
}
2015-01-08 18:00:55 +03:00
struct set_enable {
2014-10-28 18:16:47 +03:00
struct work_struct work ;
2015-01-08 18:00:55 +03:00
bool flag ;
2014-10-28 18:16:47 +03:00
} ;
2014-06-18 17:37:08 +04:00
2015-01-08 18:00:55 +03:00
static void do_enable_set ( struct work_struct * work )
2014-10-28 18:16:47 +03:00
{
2015-01-08 18:00:55 +03:00
struct set_enable * set_enable = container_of ( work ,
struct set_enable , work ) ;
2014-10-28 18:16:47 +03:00
2015-01-08 18:00:55 +03:00
if ( ! set_enable - > flag | | enable_6lowpan ! = set_enable - > flag )
2014-06-18 17:37:08 +04:00
/* Disconnect existing connections if 6lowpan is
2015-01-08 18:00:55 +03:00
* disabled
2014-06-18 17:37:08 +04:00
*/
disconnect_all_peers ( ) ;
2015-01-08 18:00:55 +03:00
enable_6lowpan = set_enable - > flag ;
2014-06-18 17:37:08 +04:00
if ( listen_chan ) {
l2cap_chan_close ( listen_chan , 0 ) ;
l2cap_chan_put ( listen_chan ) ;
}
listen_chan = bt_6lowpan_listen ( ) ;
2015-01-08 18:00:55 +03:00
kfree ( set_enable ) ;
2014-10-28 18:16:47 +03:00
}
2015-01-08 18:00:55 +03:00
static int lowpan_enable_set ( void * data , u64 val )
2014-10-28 18:16:47 +03:00
{
2015-01-08 18:00:55 +03:00
struct set_enable * set_enable ;
2014-10-28 18:16:47 +03:00
2015-01-08 18:00:55 +03:00
set_enable = kzalloc ( sizeof ( * set_enable ) , GFP_KERNEL ) ;
if ( ! set_enable )
2014-10-28 18:16:47 +03:00
return - ENOMEM ;
2015-01-08 18:00:55 +03:00
set_enable - > flag = ! ! val ;
INIT_WORK ( & set_enable - > work , do_enable_set ) ;
2014-10-28 18:16:47 +03:00
2015-01-08 18:00:55 +03:00
schedule_work ( & set_enable - > work ) ;
2014-10-28 18:16:47 +03:00
2014-06-18 17:37:08 +04:00
return 0 ;
}
2015-01-08 18:00:55 +03:00
static int lowpan_enable_get ( void * data , u64 * val )
2014-06-18 17:37:08 +04:00
{
2015-01-08 18:00:55 +03:00
* val = enable_6lowpan ;
2014-06-18 17:37:08 +04:00
return 0 ;
}
2015-01-08 18:00:55 +03:00
DEFINE_SIMPLE_ATTRIBUTE ( lowpan_enable_fops , lowpan_enable_get ,
lowpan_enable_set , " %llu \n " ) ;
2014-06-18 17:37:08 +04:00
static ssize_t lowpan_control_write ( struct file * fp ,
const char __user * user_buffer ,
size_t count ,
loff_t * position )
{
char buf [ 32 ] ;
size_t buf_size = min ( count , sizeof ( buf ) - 1 ) ;
int ret ;
bdaddr_t addr ;
u8 addr_type ;
struct l2cap_conn * conn = NULL ;
if ( copy_from_user ( buf , user_buffer , buf_size ) )
return - EFAULT ;
buf [ buf_size ] = ' \0 ' ;
if ( memcmp ( buf , " connect " , 8 ) = = 0 ) {
ret = get_l2cap_conn ( & buf [ 8 ] , & addr , & addr_type , & conn ) ;
if ( ret = = - EINVAL )
return ret ;
if ( listen_chan ) {
l2cap_chan_close ( listen_chan , 0 ) ;
l2cap_chan_put ( listen_chan ) ;
listen_chan = NULL ;
}
if ( conn ) {
struct lowpan_peer * peer ;
if ( ! is_bt_6lowpan ( conn - > hcon ) )
return - EINVAL ;
peer = lookup_peer ( conn ) ;
if ( peer ) {
BT_DBG ( " 6LoWPAN connection already exists " ) ;
return - EALREADY ;
}
BT_DBG ( " conn %p dst %pMR type %d user %d " , conn ,
& conn - > hcon - > dst , conn - > hcon - > dst_type ,
addr_type ) ;
}
ret = bt_6lowpan_connect ( & addr , addr_type ) ;
if ( ret < 0 )
return ret ;
return count ;
}
if ( memcmp ( buf , " disconnect " , 11 ) = = 0 ) {
ret = get_l2cap_conn ( & buf [ 11 ] , & addr , & addr_type , & conn ) ;
if ( ret < 0 )
return ret ;
ret = bt_6lowpan_disconnect ( conn , addr_type ) ;
if ( ret < 0 )
return ret ;
return count ;
}
return count ;
}
static int lowpan_control_show ( struct seq_file * f , void * ptr )
{
2016-04-11 12:04:18 +03:00
struct lowpan_btle_dev * entry ;
2014-10-28 18:16:47 +03:00
struct lowpan_peer * peer ;
2014-06-18 17:37:08 +04:00
2014-10-28 18:16:47 +03:00
spin_lock ( & devices_lock ) ;
2014-06-18 17:37:08 +04:00
2014-10-28 18:16:47 +03:00
list_for_each_entry ( entry , & bt_6lowpan_devices , list ) {
list_for_each_entry ( peer , & entry - > peers , list )
2014-06-18 17:37:08 +04:00
seq_printf ( f , " %pMR (type %u) \n " ,
& peer - > chan - > dst , peer - > chan - > dst_type ) ;
}
2014-10-28 18:16:47 +03:00
spin_unlock ( & devices_lock ) ;
2014-06-18 17:37:08 +04:00
return 0 ;
}
static int lowpan_control_open ( struct inode * inode , struct file * file )
{
return single_open ( file , lowpan_control_show , inode - > i_private ) ;
}
static const struct file_operations lowpan_control_fops = {
. open = lowpan_control_open ,
. read = seq_read ,
. write = lowpan_control_write ,
. llseek = seq_lseek ,
. release = single_release ,
} ;
2014-06-18 17:37:11 +04:00
static void disconnect_devices ( void )
{
2016-04-11 12:04:18 +03:00
struct lowpan_btle_dev * entry , * tmp , * new_dev ;
2014-06-18 17:37:11 +04:00
struct list_head devices ;
INIT_LIST_HEAD ( & devices ) ;
/* We make a separate list of devices because the unregister_netdev()
* will call device_event ( ) which will also want to modify the same
* devices list .
*/
2014-10-28 18:16:47 +03:00
rcu_read_lock ( ) ;
2014-06-18 17:37:11 +04:00
2014-10-28 18:16:47 +03:00
list_for_each_entry_rcu ( entry , & bt_6lowpan_devices , list ) {
2014-06-18 17:37:11 +04:00
new_dev = kmalloc ( sizeof ( * new_dev ) , GFP_ATOMIC ) ;
if ( ! new_dev )
break ;
new_dev - > netdev = entry - > netdev ;
INIT_LIST_HEAD ( & new_dev - > list ) ;
2014-10-28 18:16:47 +03:00
list_add_rcu ( & new_dev - > list , & devices ) ;
2014-06-18 17:37:11 +04:00
}
2014-10-28 18:16:47 +03:00
rcu_read_unlock ( ) ;
2014-06-18 17:37:11 +04:00
2014-10-29 19:10:57 +03:00
list_for_each_entry_safe ( entry , tmp , & devices , list ) {
2014-06-18 17:37:11 +04:00
ifdown ( entry - > netdev ) ;
BT_DBG ( " Unregistering netdev %s %p " ,
entry - > netdev - > name , entry - > netdev ) ;
2015-12-10 00:46:29 +03:00
lowpan_unregister_netdev ( entry - > netdev ) ;
2014-06-18 17:37:11 +04:00
kfree ( entry ) ;
}
}
2013-12-11 19:05:37 +04:00
static int device_event ( struct notifier_block * unused ,
unsigned long event , void * ptr )
{
struct net_device * netdev = netdev_notifier_info_to_dev ( ptr ) ;
2016-04-11 12:04:18 +03:00
struct lowpan_btle_dev * entry ;
2013-12-11 19:05:37 +04:00
if ( netdev - > type ! = ARPHRD_6LOWPAN )
return NOTIFY_DONE ;
switch ( event ) {
case NETDEV_UNREGISTER :
2014-10-28 18:16:47 +03:00
spin_lock ( & devices_lock ) ;
list_for_each_entry ( entry , & bt_6lowpan_devices , list ) {
2013-12-11 19:05:37 +04:00
if ( entry - > netdev = = netdev ) {
2014-06-18 17:37:11 +04:00
BT_DBG ( " Unregistered netdev %s %p " ,
netdev - > name , netdev ) ;
2013-12-11 19:05:37 +04:00
list_del ( & entry - > list ) ;
break ;
}
}
2014-10-28 18:16:47 +03:00
spin_unlock ( & devices_lock ) ;
2013-12-11 19:05:37 +04:00
break ;
}
return NOTIFY_DONE ;
}
static struct notifier_block bt_6lowpan_dev_notifier = {
. notifier_call = device_event ,
} ;
2014-06-18 17:37:09 +04:00
static int __init bt_6lowpan_init ( void )
2013-12-11 19:05:37 +04:00
{
2015-01-08 18:00:55 +03:00
lowpan_enable_debugfs = debugfs_create_file ( " 6lowpan_enable " , 0644 ,
bt_debugfs , NULL ,
& lowpan_enable_fops ) ;
2014-06-18 17:37:08 +04:00
lowpan_control_debugfs = debugfs_create_file ( " 6lowpan_control " , 0644 ,
bt_debugfs , NULL ,
& lowpan_control_fops ) ;
2013-12-11 19:05:37 +04:00
return register_netdevice_notifier ( & bt_6lowpan_dev_notifier ) ;
}
2014-06-18 17:37:09 +04:00
static void __exit bt_6lowpan_exit ( void )
2013-12-11 19:05:37 +04:00
{
2015-01-08 18:00:55 +03:00
debugfs_remove ( lowpan_enable_debugfs ) ;
2014-06-18 17:37:08 +04:00
debugfs_remove ( lowpan_control_debugfs ) ;
if ( listen_chan ) {
l2cap_chan_close ( listen_chan , 0 ) ;
l2cap_chan_put ( listen_chan ) ;
}
2014-06-18 17:37:11 +04:00
disconnect_devices ( ) ;
2013-12-11 19:05:37 +04:00
unregister_netdevice_notifier ( & bt_6lowpan_dev_notifier ) ;
}
2014-06-18 17:37:09 +04:00
module_init ( bt_6lowpan_init ) ;
module_exit ( bt_6lowpan_exit ) ;
MODULE_AUTHOR ( " Jukka Rissanen <jukka.rissanen@linux.intel.com> " ) ;
MODULE_DESCRIPTION ( " Bluetooth 6LoWPAN " ) ;
MODULE_VERSION ( VERSION ) ;
MODULE_LICENSE ( " GPL " ) ;