2007-02-09 17:24:33 +03:00
/*
2005-04-17 02:20:36 +04:00
BlueZ - Bluetooth protocol stack for Linux
Copyright ( C ) 2000 - 2001 Qualcomm Incorporated
Written 2000 , 2001 by Maxim Krasnyansky < maxk @ qualcomm . com >
This program is free software ; you can redistribute it and / or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation ;
THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS
OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS .
IN NO EVENT SHALL THE COPYRIGHT HOLDER ( S ) AND AUTHOR ( S ) BE LIABLE FOR ANY
2007-02-09 17:24:33 +03:00
CLAIM , OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES , OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE , DATA OR PROFITS , WHETHER IN AN
ACTION OF CONTRACT , NEGLIGENCE OR OTHER TORTIOUS ACTION , ARISING OUT OF
2005-04-17 02:20:36 +04:00
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE .
2007-02-09 17:24:33 +03:00
ALL LIABILITY , INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS ,
COPYRIGHTS , TRADEMARKS OR OTHER RIGHTS , RELATING TO USE OF THIS
2005-04-17 02:20:36 +04:00
SOFTWARE IS DISCLAIMED .
*/
/* Bluetooth HCI sockets. */
2019-03-14 16:10:33 +03:00
# include <linux/compat.h>
2012-05-23 11:04:22 +04:00
# include <linux/export.h>
2016-01-06 16:38:40 +03:00
# include <linux/utsname.h>
2016-08-27 21:23:38 +03:00
# include <linux/sched.h>
2005-04-17 02:20:36 +04:00
# include <asm/unaligned.h>
# include <net/bluetooth/bluetooth.h>
# include <net/bluetooth/hci_core.h>
2012-02-20 23:34:38 +04:00
# include <net/bluetooth/hci_mon.h>
2015-03-17 14:48:50 +03:00
# include <net/bluetooth/mgmt.h>
# include "mgmt_util.h"
2005-04-17 02:20:36 +04:00
2015-03-06 22:08:50 +03:00
static LIST_HEAD ( mgmt_chan_list ) ;
static DEFINE_MUTEX ( mgmt_chan_list_lock ) ;
2016-08-27 21:23:38 +03:00
static DEFINE_IDA ( sock_cookie_ida ) ;
2012-02-20 23:34:38 +04:00
static atomic_t monitor_promisc = ATOMIC_INIT ( 0 ) ;
2005-04-17 02:20:36 +04:00
/* ----- HCI socket interface ----- */
2014-07-11 07:41:00 +04:00
/* Socket info */
# define hci_pi(sk) ((struct hci_pinfo *) sk)
struct hci_pinfo {
struct bt_sock bt ;
struct hci_dev * hdev ;
struct hci_filter filter ;
2020-06-11 17:26:10 +03:00
__u8 cmsg_mask ;
2014-07-11 07:41:00 +04:00
unsigned short channel ;
2015-03-15 05:27:53 +03:00
unsigned long flags ;
2016-08-27 21:23:38 +03:00
__u32 cookie ;
char comm [ TASK_COMM_LEN ] ;
2021-09-16 23:10:46 +03:00
__u16 mtu ;
2014-07-11 07:41:00 +04:00
} ;
2021-08-04 13:26:56 +03:00
static struct hci_dev * hci_hdev_from_sock ( struct sock * sk )
{
struct hci_dev * hdev = hci_pi ( sk ) - > hdev ;
if ( ! hdev )
return ERR_PTR ( - EBADFD ) ;
if ( hci_dev_test_flag ( hdev , HCI_UNREGISTER ) )
return ERR_PTR ( - EPIPE ) ;
return hdev ;
}
2015-03-15 05:27:53 +03:00
void hci_sock_set_flag ( struct sock * sk , int nr )
{
set_bit ( nr , & hci_pi ( sk ) - > flags ) ;
}
void hci_sock_clear_flag ( struct sock * sk , int nr )
{
clear_bit ( nr , & hci_pi ( sk ) - > flags ) ;
}
2015-03-15 05:28:00 +03:00
int hci_sock_test_flag ( struct sock * sk , int nr )
{
return test_bit ( nr , & hci_pi ( sk ) - > flags ) ;
}
2015-03-17 14:48:46 +03:00
unsigned short hci_sock_get_channel ( struct sock * sk )
{
return hci_pi ( sk ) - > channel ;
}
2016-08-27 21:23:38 +03:00
u32 hci_sock_get_cookie ( struct sock * sk )
{
return hci_pi ( sk ) - > cookie ;
}
2016-08-30 06:00:34 +03:00
static bool hci_sock_gen_cookie ( struct sock * sk )
{
int id = hci_pi ( sk ) - > cookie ;
if ( ! id ) {
id = ida_simple_get ( & sock_cookie_ida , 1 , 0 , GFP_KERNEL ) ;
if ( id < 0 )
id = 0xffffffff ;
hci_pi ( sk ) - > cookie = id ;
get_task_comm ( hci_pi ( sk ) - > comm , current ) ;
return true ;
}
return false ;
}
static void hci_sock_free_cookie ( struct sock * sk )
{
int id = hci_pi ( sk ) - > cookie ;
if ( id ) {
hci_pi ( sk ) - > cookie = 0xffffffff ;
ida_simple_remove ( & sock_cookie_ida , id ) ;
}
}
2015-02-19 17:20:43 +03:00
static inline int hci_test_bit ( int nr , const void * addr )
2005-04-17 02:20:36 +04:00
{
2015-02-19 17:20:43 +03:00
return * ( ( const __u32 * ) addr + ( nr > > 5 ) ) & ( ( __u32 ) 1 < < ( nr & 31 ) ) ;
2005-04-17 02:20:36 +04:00
}
/* Security filter */
2014-07-11 07:36:39 +04:00
# define HCI_SFLT_MAX_OGF 5
struct hci_sec_filter {
__u32 type_mask ;
__u32 event_mask [ 2 ] ;
__u32 ocf_mask [ HCI_SFLT_MAX_OGF + 1 ] [ 4 ] ;
} ;
2014-07-11 07:36:40 +04:00
static const struct hci_sec_filter hci_sec_filter = {
2005-04-17 02:20:36 +04:00
/* Packet types */
0x10 ,
/* Events */
2005-10-28 21:20:53 +04:00
{ 0x1000d9fe , 0x0000b00c } ,
2005-04-17 02:20:36 +04:00
/* Commands */
{
{ 0x0 } ,
/* OGF_LINK_CTL */
2007-09-09 10:39:43 +04:00
{ 0xbe000006 , 0x00000001 , 0x00000000 , 0x00 } ,
2005-04-17 02:20:36 +04:00
/* OGF_LINK_POLICY */
2007-09-09 10:39:43 +04:00
{ 0x00005200 , 0x00000000 , 0x00000000 , 0x00 } ,
2005-04-17 02:20:36 +04:00
/* OGF_HOST_CTL */
2007-09-09 10:39:43 +04:00
{ 0xaab00200 , 0x2b402aaa , 0x05220154 , 0x00 } ,
2005-04-17 02:20:36 +04:00
/* OGF_INFO_PARAM */
2007-09-09 10:39:43 +04:00
{ 0x000002be , 0x00000000 , 0x00000000 , 0x00 } ,
2005-04-17 02:20:36 +04:00
/* OGF_STATUS_PARAM */
2007-09-09 10:39:43 +04:00
{ 0x000000ea , 0x00000000 , 0x00000000 , 0x00 }
2005-04-17 02:20:36 +04:00
}
} ;
static struct bt_sock_list hci_sk_list = {
2008-03-29 02:17:38 +03:00
. lock = __RW_LOCK_UNLOCKED ( hci_sk_list . lock )
2005-04-17 02:20:36 +04:00
} ;
2013-08-26 10:25:15 +04:00
static bool is_filtered_packet ( struct sock * sk , struct sk_buff * skb )
{
struct hci_filter * flt ;
int flt_type , flt_event ;
/* Apply filter */
flt = & hci_pi ( sk ) - > filter ;
2015-11-05 09:10:00 +03:00
flt_type = hci_skb_pkt_type ( skb ) & HCI_FLT_TYPE_BITS ;
2013-08-26 10:25:15 +04:00
if ( ! test_bit ( flt_type , & flt - > type_mask ) )
return true ;
/* Extra filter for event packets only */
2015-11-05 09:10:00 +03:00
if ( hci_skb_pkt_type ( skb ) ! = HCI_EVENT_PKT )
2013-08-26 10:25:15 +04:00
return false ;
flt_event = ( * ( __u8 * ) skb - > data & HCI_FLT_EVENT_BITS ) ;
if ( ! hci_test_bit ( flt_event , & flt - > event_mask ) )
return true ;
/* Check filter only when opcode is set */
if ( ! flt - > opcode )
return false ;
if ( flt_event = = HCI_EV_CMD_COMPLETE & &
flt - > opcode ! = get_unaligned ( ( __le16 * ) ( skb - > data + 3 ) ) )
return true ;
if ( flt_event = = HCI_EV_CMD_STATUS & &
flt - > opcode ! = get_unaligned ( ( __le16 * ) ( skb - > data + 4 ) ) )
return true ;
return false ;
}
2005-04-17 02:20:36 +04:00
/* Send frame to RAW socket */
2012-02-20 17:50:30 +04:00
void hci_send_to_sock ( struct hci_dev * hdev , struct sk_buff * skb )
2005-04-17 02:20:36 +04:00
{
struct sock * sk ;
2012-02-20 17:50:36 +04:00
struct sk_buff * skb_copy = NULL ;
2005-04-17 02:20:36 +04:00
BT_DBG ( " hdev %p len %d " , hdev , skb - > len ) ;
read_lock ( & hci_sk_list . lock ) ;
2012-02-20 17:50:30 +04:00
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
sk_for_each ( sk , & hci_sk_list . head ) {
2005-04-17 02:20:36 +04:00
struct sk_buff * nskb ;
if ( sk - > sk_state ! = BT_BOUND | | hci_pi ( sk ) - > hdev ! = hdev )
continue ;
/* Don't send frame to the socket it came from */
if ( skb - > sk = = sk )
continue ;
2013-08-27 08:40:52 +04:00
if ( hci_pi ( sk ) - > channel = = HCI_CHANNEL_RAW ) {
2015-11-05 09:10:00 +03:00
if ( hci_skb_pkt_type ( skb ) ! = HCI_COMMAND_PKT & &
hci_skb_pkt_type ( skb ) ! = HCI_EVENT_PKT & &
hci_skb_pkt_type ( skb ) ! = HCI_ACLDATA_PKT & &
2020-01-25 11:23:47 +03:00
hci_skb_pkt_type ( skb ) ! = HCI_SCODATA_PKT & &
hci_skb_pkt_type ( skb ) ! = HCI_ISODATA_PKT )
2015-10-09 17:13:50 +03:00
continue ;
2013-08-27 08:40:52 +04:00
if ( is_filtered_packet ( sk , skb ) )
continue ;
} else if ( hci_pi ( sk ) - > channel = = HCI_CHANNEL_USER ) {
if ( ! bt_cb ( skb ) - > incoming )
continue ;
2015-11-05 09:10:00 +03:00
if ( hci_skb_pkt_type ( skb ) ! = HCI_EVENT_PKT & &
hci_skb_pkt_type ( skb ) ! = HCI_ACLDATA_PKT & &
2020-01-25 11:23:47 +03:00
hci_skb_pkt_type ( skb ) ! = HCI_SCODATA_PKT & &
hci_skb_pkt_type ( skb ) ! = HCI_ISODATA_PKT )
2013-08-27 08:40:52 +04:00
continue ;
} else {
/* Don't send frame to other channel types */
2005-04-17 02:20:36 +04:00
continue ;
2013-08-27 08:40:52 +04:00
}
2005-04-17 02:20:36 +04:00
2012-02-20 17:50:36 +04:00
if ( ! skb_copy ) {
/* Create a private copy with headroom */
2014-06-12 02:36:26 +04:00
skb_copy = __pskb_copy_fclone ( skb , 1 , GFP_ATOMIC , true ) ;
2012-02-20 17:50:36 +04:00
if ( ! skb_copy )
continue ;
/* Put type byte before the data */
2015-11-05 09:10:00 +03:00
memcpy ( skb_push ( skb_copy , 1 ) , & hci_skb_pkt_type ( skb ) , 1 ) ;
2012-02-20 17:50:36 +04:00
}
nskb = skb_clone ( skb_copy , GFP_ATOMIC ) ;
2010-12-01 17:58:25 +03:00
if ( ! nskb )
2005-04-17 02:20:36 +04:00
continue ;
2012-02-20 17:50:30 +04:00
if ( sock_queue_rcv_skb ( sk , nskb ) )
kfree_skb ( nskb ) ;
}
read_unlock ( & hci_sk_list . lock ) ;
2012-02-20 17:50:36 +04:00
kfree_skb ( skb_copy ) ;
2012-02-20 17:50:30 +04:00
}
2023-05-26 02:46:43 +03:00
static void hci_sock_copy_creds ( struct sock * sk , struct sk_buff * skb )
{
struct scm_creds * creds ;
if ( ! sk | | WARN_ON ( ! skb ) )
return ;
creds = & bt_cb ( skb ) - > creds ;
/* Check if peer credentials is set */
if ( ! sk - > sk_peer_pid ) {
/* Check if parent peer credentials is set */
if ( bt_sk ( sk ) - > parent & & bt_sk ( sk ) - > parent - > sk_peer_pid )
sk = bt_sk ( sk ) - > parent ;
else
return ;
}
/* Check if scm_creds already set */
if ( creds - > pid = = pid_vnr ( sk - > sk_peer_pid ) )
return ;
memset ( creds , 0 , sizeof ( * creds ) ) ;
creds - > pid = pid_vnr ( sk - > sk_peer_pid ) ;
if ( sk - > sk_peer_cred ) {
creds - > uid = sk - > sk_peer_cred - > uid ;
creds - > gid = sk - > sk_peer_cred - > gid ;
}
}
static struct sk_buff * hci_skb_clone ( struct sk_buff * skb )
{
struct sk_buff * nskb ;
if ( ! skb )
return NULL ;
nskb = skb_clone ( skb , GFP_ATOMIC ) ;
if ( ! nskb )
return NULL ;
hci_sock_copy_creds ( skb - > sk , nskb ) ;
return nskb ;
}
2015-02-20 14:26:23 +03:00
/* Send frame to sockets with specific channel */
2017-09-21 16:51:23 +03:00
static void __hci_send_to_channel ( unsigned short channel , struct sk_buff * skb ,
int flag , struct sock * skip_sk )
2012-02-20 17:50:30 +04:00
{
struct sock * sk ;
2015-02-20 14:26:23 +03:00
BT_DBG ( " channel %u len %d " , channel , skb - > len ) ;
2012-02-20 17:50:30 +04:00
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
sk_for_each ( sk , & hci_sk_list . head ) {
2012-02-20 17:50:30 +04:00
struct sk_buff * nskb ;
2015-03-15 05:27:59 +03:00
/* Ignore socket without the flag set */
2015-03-15 05:28:00 +03:00
if ( ! hci_sock_test_flag ( sk , flag ) )
2015-01-12 06:33:32 +03:00
continue ;
2015-03-15 05:27:59 +03:00
/* Skip the original socket */
if ( sk = = skip_sk )
2015-03-15 05:27:54 +03:00
continue ;
if ( sk - > sk_state ! = BT_BOUND )
continue ;
if ( hci_pi ( sk ) - > channel ! = channel )
continue ;
2023-05-26 02:46:43 +03:00
nskb = hci_skb_clone ( skb ) ;
2015-03-15 05:27:54 +03:00
if ( ! nskb )
continue ;
if ( sock_queue_rcv_skb ( sk , nskb ) )
kfree_skb ( nskb ) ;
}
2017-09-21 16:51:23 +03:00
}
void hci_send_to_channel ( unsigned short channel , struct sk_buff * skb ,
int flag , struct sock * skip_sk )
{
read_lock ( & hci_sk_list . lock ) ;
__hci_send_to_channel ( channel , skb , flag , skip_sk ) ;
2015-03-15 05:27:54 +03:00
read_unlock ( & hci_sk_list . lock ) ;
}
2012-02-20 23:34:38 +04:00
/* Send frame to monitor socket */
void hci_send_to_monitor ( struct hci_dev * hdev , struct sk_buff * skb )
{
struct sk_buff * skb_copy = NULL ;
2015-01-12 06:33:31 +03:00
struct hci_mon_hdr * hdr ;
2012-02-20 23:34:38 +04:00
__le16 opcode ;
if ( ! atomic_read ( & monitor_promisc ) )
return ;
BT_DBG ( " hdev %p len %d " , hdev , skb - > len ) ;
2015-11-05 09:10:00 +03:00
switch ( hci_skb_pkt_type ( skb ) ) {
2012-02-20 23:34:38 +04:00
case HCI_COMMAND_PKT :
2014-03-12 21:52:35 +04:00
opcode = cpu_to_le16 ( HCI_MON_COMMAND_PKT ) ;
2012-02-20 23:34:38 +04:00
break ;
case HCI_EVENT_PKT :
2014-03-12 21:52:35 +04:00
opcode = cpu_to_le16 ( HCI_MON_EVENT_PKT ) ;
2012-02-20 23:34:38 +04:00
break ;
case HCI_ACLDATA_PKT :
if ( bt_cb ( skb ) - > incoming )
2014-03-12 21:52:35 +04:00
opcode = cpu_to_le16 ( HCI_MON_ACL_RX_PKT ) ;
2012-02-20 23:34:38 +04:00
else
2014-03-12 21:52:35 +04:00
opcode = cpu_to_le16 ( HCI_MON_ACL_TX_PKT ) ;
2012-02-20 23:34:38 +04:00
break ;
case HCI_SCODATA_PKT :
if ( bt_cb ( skb ) - > incoming )
2014-03-12 21:52:35 +04:00
opcode = cpu_to_le16 ( HCI_MON_SCO_RX_PKT ) ;
2012-02-20 23:34:38 +04:00
else
2014-03-12 21:52:35 +04:00
opcode = cpu_to_le16 ( HCI_MON_SCO_TX_PKT ) ;
2012-02-20 23:34:38 +04:00
break ;
2020-01-16 00:02:17 +03:00
case HCI_ISODATA_PKT :
if ( bt_cb ( skb ) - > incoming )
opcode = cpu_to_le16 ( HCI_MON_ISO_RX_PKT ) ;
else
opcode = cpu_to_le16 ( HCI_MON_ISO_TX_PKT ) ;
break ;
2015-10-07 17:38:35 +03:00
case HCI_DIAG_PKT :
opcode = cpu_to_le16 ( HCI_MON_VENDOR_DIAG ) ;
break ;
2012-02-20 23:34:38 +04:00
default :
return ;
}
2015-01-12 06:33:31 +03:00
/* Create a private copy with headroom */
skb_copy = __pskb_copy_fclone ( skb , HCI_MON_HDR_SIZE , GFP_ATOMIC , true ) ;
if ( ! skb_copy )
return ;
2023-05-26 02:46:43 +03:00
hci_sock_copy_creds ( skb - > sk , skb_copy ) ;
2015-01-12 06:33:31 +03:00
/* Put header before the data */
networking: make skb_push & __skb_push return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions return void * and remove all the casts across
the tree, adding a (u8 *) cast only where the unsigned char pointer
was used directly, all done with the following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
@@
expression SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- fn(SKB, LEN)[0]
+ *(u8 *)fn(SKB, LEN)
Note that the last part there converts from push(...)[0] to the
more idiomatic *(u8 *)push(...).
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:23 +03:00
hdr = skb_push ( skb_copy , HCI_MON_HDR_SIZE ) ;
2015-01-12 06:33:31 +03:00
hdr - > opcode = opcode ;
hdr - > index = cpu_to_le16 ( hdev - > id ) ;
hdr - > len = cpu_to_le16 ( skb - > len ) ;
2015-03-15 05:27:59 +03:00
hci_send_to_channel ( HCI_CHANNEL_MONITOR , skb_copy ,
HCI_SOCK_TRUSTED , NULL ) ;
2012-02-20 23:34:38 +04:00
kfree_skb ( skb_copy ) ;
}
2016-08-27 21:23:41 +03:00
void hci_send_monitor_ctrl_event ( struct hci_dev * hdev , u16 event ,
void * data , u16 data_len , ktime_t tstamp ,
int flag , struct sock * skip_sk )
{
struct sock * sk ;
__le16 index ;
if ( hdev )
index = cpu_to_le16 ( hdev - > id ) ;
else
index = cpu_to_le16 ( MGMT_INDEX_NONE ) ;
read_lock ( & hci_sk_list . lock ) ;
sk_for_each ( sk , & hci_sk_list . head ) {
struct hci_mon_hdr * hdr ;
struct sk_buff * skb ;
if ( hci_pi ( sk ) - > channel ! = HCI_CHANNEL_CONTROL )
continue ;
/* Ignore socket without the flag set */
if ( ! hci_sock_test_flag ( sk , flag ) )
continue ;
/* Skip the original socket */
if ( sk = = skip_sk )
continue ;
skb = bt_skb_alloc ( 6 + data_len , GFP_ATOMIC ) ;
if ( ! skb )
continue ;
put_unaligned_le32 ( hci_pi ( sk ) - > cookie , skb_put ( skb , 4 ) ) ;
put_unaligned_le16 ( event , skb_put ( skb , 2 ) ) ;
if ( data )
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:20 +03:00
skb_put_data ( skb , data , data_len ) ;
2016-08-27 21:23:41 +03:00
skb - > tstamp = tstamp ;
networking: make skb_push & __skb_push return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions return void * and remove all the casts across
the tree, adding a (u8 *) cast only where the unsigned char pointer
was used directly, all done with the following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
@@
expression SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- fn(SKB, LEN)[0]
+ *(u8 *)fn(SKB, LEN)
Note that the last part there converts from push(...)[0] to the
more idiomatic *(u8 *)push(...).
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:23 +03:00
hdr = skb_push ( skb , HCI_MON_HDR_SIZE ) ;
2016-08-27 21:23:41 +03:00
hdr - > opcode = cpu_to_le16 ( HCI_MON_CTRL_EVENT ) ;
hdr - > index = index ;
hdr - > len = cpu_to_le16 ( skb - > len - HCI_MON_HDR_SIZE ) ;
2017-09-21 16:51:23 +03:00
__hci_send_to_channel ( HCI_CHANNEL_MONITOR , skb ,
HCI_SOCK_TRUSTED , NULL ) ;
2016-08-27 21:23:41 +03:00
kfree_skb ( skb ) ;
}
read_unlock ( & hci_sk_list . lock ) ;
}
2012-02-20 23:34:38 +04:00
static struct sk_buff * create_monitor_event ( struct hci_dev * hdev , int event )
{
struct hci_mon_hdr * hdr ;
struct hci_mon_new_index * ni ;
2015-10-07 16:32:13 +03:00
struct hci_mon_index_info * ii ;
2012-02-20 23:34:38 +04:00
struct sk_buff * skb ;
__le16 opcode ;
switch ( event ) {
case HCI_DEV_REG :
skb = bt_skb_alloc ( HCI_MON_NEW_INDEX_SIZE , GFP_ATOMIC ) ;
if ( ! skb )
return NULL ;
networking: make skb_put & friends return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions (skb_put, __skb_put and pskb_put) return void *
and remove all the casts across the tree, adding a (u8 *) cast only
where the unsigned char pointer was used directly, all done with the
following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_put, __skb_put };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_put, __skb_put };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
which actually doesn't cover pskb_put since there are only three
users overall.
A handful of stragglers were converted manually, notably a macro in
drivers/isdn/i4l/isdn_bsdcomp.c and, oddly enough, one of the many
instances in net/bluetooth/hci_sock.c. In the former file, I also
had to fix one whitespace problem spatch introduced.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:21 +03:00
ni = skb_put ( skb , HCI_MON_NEW_INDEX_SIZE ) ;
2012-02-20 23:34:38 +04:00
ni - > type = hdev - > dev_type ;
ni - > bus = hdev - > bus ;
bacpy ( & ni - > bdaddr , & hdev - > bdaddr ) ;
memcpy ( ni - > name , hdev - > name , 8 ) ;
2014-03-12 21:52:35 +04:00
opcode = cpu_to_le16 ( HCI_MON_NEW_INDEX ) ;
2012-02-20 23:34:38 +04:00
break ;
case HCI_DEV_UNREG :
skb = bt_skb_alloc ( 0 , GFP_ATOMIC ) ;
if ( ! skb )
return NULL ;
2014-03-12 21:52:35 +04:00
opcode = cpu_to_le16 ( HCI_MON_DEL_INDEX ) ;
2012-02-20 23:34:38 +04:00
break ;
2015-10-20 03:30:47 +03:00
case HCI_DEV_SETUP :
if ( hdev - > manufacturer = = 0xffff )
return NULL ;
2020-07-08 23:18:23 +03:00
fallthrough ;
2015-10-20 03:30:47 +03:00
2015-10-07 16:32:13 +03:00
case HCI_DEV_UP :
skb = bt_skb_alloc ( HCI_MON_INDEX_INFO_SIZE , GFP_ATOMIC ) ;
if ( ! skb )
return NULL ;
networking: make skb_put & friends return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions (skb_put, __skb_put and pskb_put) return void *
and remove all the casts across the tree, adding a (u8 *) cast only
where the unsigned char pointer was used directly, all done with the
following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_put, __skb_put };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_put, __skb_put };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
which actually doesn't cover pskb_put since there are only three
users overall.
A handful of stragglers were converted manually, notably a macro in
drivers/isdn/i4l/isdn_bsdcomp.c and, oddly enough, one of the many
instances in net/bluetooth/hci_sock.c. In the former file, I also
had to fix one whitespace problem spatch introduced.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:21 +03:00
ii = skb_put ( skb , HCI_MON_INDEX_INFO_SIZE ) ;
2015-10-07 16:32:13 +03:00
bacpy ( & ii - > bdaddr , & hdev - > bdaddr ) ;
ii - > manufacturer = cpu_to_le16 ( hdev - > manufacturer ) ;
opcode = cpu_to_le16 ( HCI_MON_INDEX_INFO ) ;
break ;
2015-10-05 00:34:03 +03:00
case HCI_DEV_OPEN :
skb = bt_skb_alloc ( 0 , GFP_ATOMIC ) ;
if ( ! skb )
return NULL ;
opcode = cpu_to_le16 ( HCI_MON_OPEN_INDEX ) ;
break ;
case HCI_DEV_CLOSE :
skb = bt_skb_alloc ( 0 , GFP_ATOMIC ) ;
if ( ! skb )
return NULL ;
opcode = cpu_to_le16 ( HCI_MON_CLOSE_INDEX ) ;
break ;
2012-02-20 23:34:38 +04:00
default :
return NULL ;
}
__net_timestamp ( skb ) ;
networking: make skb_push & __skb_push return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions return void * and remove all the casts across
the tree, adding a (u8 *) cast only where the unsigned char pointer
was used directly, all done with the following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
@@
expression SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- fn(SKB, LEN)[0]
+ *(u8 *)fn(SKB, LEN)
Note that the last part there converts from push(...)[0] to the
more idiomatic *(u8 *)push(...).
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:23 +03:00
hdr = skb_push ( skb , HCI_MON_HDR_SIZE ) ;
2012-02-20 23:34:38 +04:00
hdr - > opcode = opcode ;
hdr - > index = cpu_to_le16 ( hdev - > id ) ;
hdr - > len = cpu_to_le16 ( skb - > len - HCI_MON_HDR_SIZE ) ;
return skb ;
}
2016-08-27 21:23:40 +03:00
static struct sk_buff * create_monitor_ctrl_open ( struct sock * sk )
{
struct hci_mon_hdr * hdr ;
struct sk_buff * skb ;
2016-08-30 06:00:38 +03:00
u16 format ;
2016-08-27 21:23:40 +03:00
u8 ver [ 3 ] ;
u32 flags ;
2016-08-30 06:00:36 +03:00
/* No message needed when cookie is not present */
if ( ! hci_pi ( sk ) - > cookie )
return NULL ;
2016-08-30 06:00:38 +03:00
switch ( hci_pi ( sk ) - > channel ) {
2016-08-30 06:00:39 +03:00
case HCI_CHANNEL_RAW :
format = 0x0000 ;
ver [ 0 ] = BT_SUBSYS_VERSION ;
put_unaligned_le16 ( BT_SUBSYS_REVISION , ver + 1 ) ;
break ;
2016-09-01 20:48:28 +03:00
case HCI_CHANNEL_USER :
format = 0x0001 ;
ver [ 0 ] = BT_SUBSYS_VERSION ;
put_unaligned_le16 ( BT_SUBSYS_REVISION , ver + 1 ) ;
break ;
2016-08-30 06:00:38 +03:00
case HCI_CHANNEL_CONTROL :
format = 0x0002 ;
mgmt_fill_version_info ( ver ) ;
break ;
default :
/* No message for unsupported format */
return NULL ;
}
2023-05-26 02:46:43 +03:00
skb = bt_skb_alloc ( 14 + TASK_COMM_LEN , GFP_ATOMIC ) ;
2016-08-27 21:23:40 +03:00
if ( ! skb )
return NULL ;
2023-05-26 02:46:43 +03:00
hci_sock_copy_creds ( sk , skb ) ;
2016-08-27 21:23:40 +03:00
flags = hci_sock_test_flag ( sk , HCI_SOCK_TRUSTED ) ? 0x1 : 0x0 ;
put_unaligned_le32 ( hci_pi ( sk ) - > cookie , skb_put ( skb , 4 ) ) ;
put_unaligned_le16 ( format , skb_put ( skb , 2 ) ) ;
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:20 +03:00
skb_put_data ( skb , ver , sizeof ( ver ) ) ;
2016-08-27 21:23:40 +03:00
put_unaligned_le32 ( flags , skb_put ( skb , 4 ) ) ;
networking: add and use skb_put_u8()
Joe and Bjørn suggested that it'd be nicer to not have the
cast in the fairly common case of doing
*(u8 *)skb_put(skb, 1) = c;
Add skb_put_u8() for this case, and use it across the code,
using the following spatch:
@@
expression SKB, C, S;
typedef u8;
identifier fn = {skb_put};
fresh identifier fn2 = fn ## "_u8";
@@
- *(u8 *)fn(SKB, S) = C;
+ fn2(SKB, C);
Note that due to the "S", the spatch isn't perfect, it should
have checked that S is 1, but there's also places that use a
sizeof expression like sizeof(var) or sizeof(u8) etc. Turns
out that nobody ever did something like
*(u8 *)skb_put(skb, 2) = c;
which would be wrong anyway since the second byte wouldn't be
initialized.
Suggested-by: Joe Perches <joe@perches.com>
Suggested-by: Bjørn Mork <bjorn@mork.no>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:24 +03:00
skb_put_u8 ( skb , TASK_COMM_LEN ) ;
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:20 +03:00
skb_put_data ( skb , hci_pi ( sk ) - > comm , TASK_COMM_LEN ) ;
2016-08-27 21:23:40 +03:00
__net_timestamp ( skb ) ;
networking: make skb_push & __skb_push return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions return void * and remove all the casts across
the tree, adding a (u8 *) cast only where the unsigned char pointer
was used directly, all done with the following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
@@
expression SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- fn(SKB, LEN)[0]
+ *(u8 *)fn(SKB, LEN)
Note that the last part there converts from push(...)[0] to the
more idiomatic *(u8 *)push(...).
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:23 +03:00
hdr = skb_push ( skb , HCI_MON_HDR_SIZE ) ;
2016-08-27 21:23:40 +03:00
hdr - > opcode = cpu_to_le16 ( HCI_MON_CTRL_OPEN ) ;
2016-08-30 06:00:36 +03:00
if ( hci_pi ( sk ) - > hdev )
hdr - > index = cpu_to_le16 ( hci_pi ( sk ) - > hdev - > id ) ;
else
hdr - > index = cpu_to_le16 ( HCI_DEV_NONE ) ;
2016-08-27 21:23:40 +03:00
hdr - > len = cpu_to_le16 ( skb - > len - HCI_MON_HDR_SIZE ) ;
return skb ;
}
static struct sk_buff * create_monitor_ctrl_close ( struct sock * sk )
{
struct hci_mon_hdr * hdr ;
struct sk_buff * skb ;
2016-08-30 06:00:36 +03:00
/* No message needed when cookie is not present */
if ( ! hci_pi ( sk ) - > cookie )
return NULL ;
2016-08-30 06:00:38 +03:00
switch ( hci_pi ( sk ) - > channel ) {
2016-08-30 06:00:39 +03:00
case HCI_CHANNEL_RAW :
2016-09-01 20:48:28 +03:00
case HCI_CHANNEL_USER :
2016-08-30 06:00:38 +03:00
case HCI_CHANNEL_CONTROL :
break ;
default :
/* No message for unsupported format */
return NULL ;
}
2016-08-27 21:23:40 +03:00
skb = bt_skb_alloc ( 4 , GFP_ATOMIC ) ;
if ( ! skb )
return NULL ;
2023-05-26 02:46:43 +03:00
hci_sock_copy_creds ( sk , skb ) ;
2016-08-27 21:23:40 +03:00
put_unaligned_le32 ( hci_pi ( sk ) - > cookie , skb_put ( skb , 4 ) ) ;
__net_timestamp ( skb ) ;
networking: make skb_push & __skb_push return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions return void * and remove all the casts across
the tree, adding a (u8 *) cast only where the unsigned char pointer
was used directly, all done with the following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
@@
expression SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- fn(SKB, LEN)[0]
+ *(u8 *)fn(SKB, LEN)
Note that the last part there converts from push(...)[0] to the
more idiomatic *(u8 *)push(...).
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:23 +03:00
hdr = skb_push ( skb , HCI_MON_HDR_SIZE ) ;
2016-08-27 21:23:40 +03:00
hdr - > opcode = cpu_to_le16 ( HCI_MON_CTRL_CLOSE ) ;
2016-08-30 06:00:36 +03:00
if ( hci_pi ( sk ) - > hdev )
hdr - > index = cpu_to_le16 ( hci_pi ( sk ) - > hdev - > id ) ;
else
hdr - > index = cpu_to_le16 ( HCI_DEV_NONE ) ;
2016-08-27 21:23:40 +03:00
hdr - > len = cpu_to_le16 ( skb - > len - HCI_MON_HDR_SIZE ) ;
return skb ;
}
2016-08-27 21:23:41 +03:00
static struct sk_buff * create_monitor_ctrl_command ( struct sock * sk , u16 index ,
u16 opcode , u16 len ,
const void * buf )
{
struct hci_mon_hdr * hdr ;
struct sk_buff * skb ;
skb = bt_skb_alloc ( 6 + len , GFP_ATOMIC ) ;
if ( ! skb )
return NULL ;
2023-05-26 02:46:43 +03:00
hci_sock_copy_creds ( sk , skb ) ;
2016-08-27 21:23:41 +03:00
put_unaligned_le32 ( hci_pi ( sk ) - > cookie , skb_put ( skb , 4 ) ) ;
put_unaligned_le16 ( opcode , skb_put ( skb , 2 ) ) ;
if ( buf )
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:20 +03:00
skb_put_data ( skb , buf , len ) ;
2016-08-27 21:23:41 +03:00
__net_timestamp ( skb ) ;
networking: make skb_push & __skb_push return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions return void * and remove all the casts across
the tree, adding a (u8 *) cast only where the unsigned char pointer
was used directly, all done with the following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
@@
expression SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- fn(SKB, LEN)[0]
+ *(u8 *)fn(SKB, LEN)
Note that the last part there converts from push(...)[0] to the
more idiomatic *(u8 *)push(...).
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:23 +03:00
hdr = skb_push ( skb , HCI_MON_HDR_SIZE ) ;
2016-08-27 21:23:41 +03:00
hdr - > opcode = cpu_to_le16 ( HCI_MON_CTRL_COMMAND ) ;
hdr - > index = cpu_to_le16 ( index ) ;
hdr - > len = cpu_to_le16 ( skb - > len - HCI_MON_HDR_SIZE ) ;
return skb ;
}
2016-01-06 16:38:40 +03:00
static void __printf ( 2 , 3 )
send_monitor_note ( struct sock * sk , const char * fmt , . . . )
2015-11-08 09:47:12 +03:00
{
2016-01-06 16:38:40 +03:00
size_t len ;
2015-11-08 09:47:12 +03:00
struct hci_mon_hdr * hdr ;
struct sk_buff * skb ;
2016-01-06 16:38:40 +03:00
va_list args ;
va_start ( args , fmt ) ;
len = vsnprintf ( NULL , 0 , fmt , args ) ;
va_end ( args ) ;
2015-11-08 09:47:12 +03:00
skb = bt_skb_alloc ( len + 1 , GFP_ATOMIC ) ;
if ( ! skb )
return ;
2023-05-26 02:46:43 +03:00
hci_sock_copy_creds ( sk , skb ) ;
2016-01-06 16:38:40 +03:00
va_start ( args , fmt ) ;
vsprintf ( skb_put ( skb , len ) , fmt , args ) ;
networking: make skb_put & friends return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions (skb_put, __skb_put and pskb_put) return void *
and remove all the casts across the tree, adding a (u8 *) cast only
where the unsigned char pointer was used directly, all done with the
following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_put, __skb_put };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_put, __skb_put };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
which actually doesn't cover pskb_put since there are only three
users overall.
A handful of stragglers were converted manually, notably a macro in
drivers/isdn/i4l/isdn_bsdcomp.c and, oddly enough, one of the many
instances in net/bluetooth/hci_sock.c. In the former file, I also
had to fix one whitespace problem spatch introduced.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:21 +03:00
* ( u8 * ) skb_put ( skb , 1 ) = 0 ;
2016-01-06 16:38:40 +03:00
va_end ( args ) ;
2015-11-08 09:47:12 +03:00
__net_timestamp ( skb ) ;
hdr = ( void * ) skb_push ( skb , HCI_MON_HDR_SIZE ) ;
hdr - > opcode = cpu_to_le16 ( HCI_MON_SYSTEM_NOTE ) ;
hdr - > index = cpu_to_le16 ( HCI_DEV_NONE ) ;
hdr - > len = cpu_to_le16 ( skb - > len - HCI_MON_HDR_SIZE ) ;
if ( sock_queue_rcv_skb ( sk , skb ) )
kfree_skb ( skb ) ;
}
2012-02-20 23:34:38 +04:00
static void send_monitor_replay ( struct sock * sk )
{
struct hci_dev * hdev ;
read_lock ( & hci_dev_list_lock ) ;
list_for_each_entry ( hdev , & hci_dev_list , list ) {
struct sk_buff * skb ;
skb = create_monitor_event ( hdev , HCI_DEV_REG ) ;
if ( ! skb )
continue ;
if ( sock_queue_rcv_skb ( sk , skb ) )
kfree_skb ( skb ) ;
2015-10-05 00:34:03 +03:00
if ( ! test_bit ( HCI_RUNNING , & hdev - > flags ) )
continue ;
skb = create_monitor_event ( hdev , HCI_DEV_OPEN ) ;
if ( ! skb )
continue ;
if ( sock_queue_rcv_skb ( sk , skb ) )
kfree_skb ( skb ) ;
2015-10-07 16:32:13 +03:00
2015-10-20 03:30:47 +03:00
if ( test_bit ( HCI_UP , & hdev - > flags ) )
skb = create_monitor_event ( hdev , HCI_DEV_UP ) ;
else if ( hci_dev_test_flag ( hdev , HCI_SETUP ) )
skb = create_monitor_event ( hdev , HCI_DEV_SETUP ) ;
else
skb = NULL ;
2015-10-07 16:32:13 +03:00
2015-10-20 03:30:47 +03:00
if ( skb ) {
if ( sock_queue_rcv_skb ( sk , skb ) )
kfree_skb ( skb ) ;
}
2012-02-20 23:34:38 +04:00
}
read_unlock ( & hci_dev_list_lock ) ;
}
2016-08-27 21:23:40 +03:00
static void send_monitor_control_replay ( struct sock * mon_sk )
{
struct sock * sk ;
read_lock ( & hci_sk_list . lock ) ;
sk_for_each ( sk , & hci_sk_list . head ) {
struct sk_buff * skb ;
skb = create_monitor_ctrl_open ( sk ) ;
if ( ! skb )
continue ;
if ( sock_queue_rcv_skb ( mon_sk , skb ) )
kfree_skb ( skb ) ;
}
read_unlock ( & hci_sk_list . lock ) ;
}
2012-02-20 17:50:37 +04:00
/* Generate internal stack event */
static void hci_si_event ( struct hci_dev * hdev , int type , int dlen , void * data )
{
struct hci_event_hdr * hdr ;
struct hci_ev_stack_internal * ev ;
struct sk_buff * skb ;
skb = bt_skb_alloc ( HCI_EVENT_HDR_SIZE + sizeof ( * ev ) + dlen , GFP_ATOMIC ) ;
if ( ! skb )
return ;
networking: make skb_put & friends return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions (skb_put, __skb_put and pskb_put) return void *
and remove all the casts across the tree, adding a (u8 *) cast only
where the unsigned char pointer was used directly, all done with the
following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_put, __skb_put };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_put, __skb_put };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
which actually doesn't cover pskb_put since there are only three
users overall.
A handful of stragglers were converted manually, notably a macro in
drivers/isdn/i4l/isdn_bsdcomp.c and, oddly enough, one of the many
instances in net/bluetooth/hci_sock.c. In the former file, I also
had to fix one whitespace problem spatch introduced.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:21 +03:00
hdr = skb_put ( skb , HCI_EVENT_HDR_SIZE ) ;
2012-02-20 17:50:37 +04:00
hdr - > evt = HCI_EV_STACK_INTERNAL ;
hdr - > plen = sizeof ( * ev ) + dlen ;
networking: make skb_put & friends return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions (skb_put, __skb_put and pskb_put) return void *
and remove all the casts across the tree, adding a (u8 *) cast only
where the unsigned char pointer was used directly, all done with the
following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_put, __skb_put };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_put, __skb_put };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
which actually doesn't cover pskb_put since there are only three
users overall.
A handful of stragglers were converted manually, notably a macro in
drivers/isdn/i4l/isdn_bsdcomp.c and, oddly enough, one of the many
instances in net/bluetooth/hci_sock.c. In the former file, I also
had to fix one whitespace problem spatch introduced.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:21 +03:00
ev = skb_put ( skb , sizeof ( * ev ) + dlen ) ;
2012-02-20 17:50:37 +04:00
ev - > type = type ;
memcpy ( ev - > data , data , dlen ) ;
bt_cb ( skb ) - > incoming = 1 ;
__net_timestamp ( skb ) ;
2015-11-05 09:10:00 +03:00
hci_skb_pkt_type ( skb ) = HCI_EVENT_PKT ;
2012-02-20 17:50:37 +04:00
hci_send_to_sock ( hdev , skb ) ;
kfree_skb ( skb ) ;
}
void hci_sock_dev_event ( struct hci_dev * hdev , int event )
{
BT_DBG ( " hdev %s event %d " , hdev - > name , event ) ;
2012-02-20 23:34:38 +04:00
if ( atomic_read ( & monitor_promisc ) ) {
struct sk_buff * skb ;
2015-10-05 00:33:59 +03:00
/* Send event to monitor */
2012-02-20 23:34:38 +04:00
skb = create_monitor_event ( hdev , event ) ;
if ( skb ) {
2015-03-15 05:27:59 +03:00
hci_send_to_channel ( HCI_CHANNEL_MONITOR , skb ,
HCI_SOCK_TRUSTED , NULL ) ;
2012-02-20 23:34:38 +04:00
kfree_skb ( skb ) ;
}
}
2015-10-05 00:33:59 +03:00
if ( event < = HCI_DEV_DOWN ) {
struct hci_ev_si_device ev ;
/* Send event to sockets */
ev . event = event ;
ev . dev_id = hdev - > id ;
hci_si_event ( NULL , HCI_EV_SI_DEVICE , sizeof ( ev ) , & ev ) ;
}
2012-02-20 17:50:37 +04:00
if ( event = = HCI_DEV_UNREG ) {
struct sock * sk ;
2021-08-04 13:26:56 +03:00
/* Wake up sockets using this dead device */
2012-02-20 17:50:37 +04:00
read_lock ( & hci_sk_list . lock ) ;
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:06:00 +04:00
sk_for_each ( sk , & hci_sk_list . head ) {
2012-02-20 17:50:37 +04:00
if ( hci_pi ( sk ) - > hdev = = hdev ) {
sk - > sk_err = EPIPE ;
sk - > sk_state_change ( sk ) ;
}
}
read_unlock ( & hci_sk_list . lock ) ;
}
}
2015-03-06 22:08:50 +03:00
static struct hci_mgmt_chan * __hci_mgmt_chan_find ( unsigned short channel )
{
struct hci_mgmt_chan * c ;
list_for_each_entry ( c , & mgmt_chan_list , list ) {
if ( c - > channel = = channel )
return c ;
}
return NULL ;
}
static struct hci_mgmt_chan * hci_mgmt_chan_find ( unsigned short channel )
{
struct hci_mgmt_chan * c ;
mutex_lock ( & mgmt_chan_list_lock ) ;
c = __hci_mgmt_chan_find ( channel ) ;
mutex_unlock ( & mgmt_chan_list_lock ) ;
return c ;
}
int hci_mgmt_chan_register ( struct hci_mgmt_chan * c )
{
if ( c - > channel < HCI_CHANNEL_CONTROL )
return - EINVAL ;
mutex_lock ( & mgmt_chan_list_lock ) ;
if ( __hci_mgmt_chan_find ( c - > channel ) ) {
mutex_unlock ( & mgmt_chan_list_lock ) ;
return - EALREADY ;
}
list_add_tail ( & c - > list , & mgmt_chan_list ) ;
mutex_unlock ( & mgmt_chan_list_lock ) ;
return 0 ;
}
EXPORT_SYMBOL ( hci_mgmt_chan_register ) ;
void hci_mgmt_chan_unregister ( struct hci_mgmt_chan * c )
{
mutex_lock ( & mgmt_chan_list_lock ) ;
list_del ( & c - > list ) ;
mutex_unlock ( & mgmt_chan_list_lock ) ;
}
EXPORT_SYMBOL ( hci_mgmt_chan_unregister ) ;
2005-04-17 02:20:36 +04:00
static int hci_sock_release ( struct socket * sock )
{
struct sock * sk = sock - > sk ;
2006-02-13 13:40:03 +03:00
struct hci_dev * hdev ;
2016-08-27 21:23:40 +03:00
struct sk_buff * skb ;
2005-04-17 02:20:36 +04:00
BT_DBG ( " sock %p sk %p " , sock , sk ) ;
if ( ! sk )
return 0 ;
2020-01-15 20:49:04 +03:00
lock_sock ( sk ) ;
2016-08-27 21:23:38 +03:00
switch ( hci_pi ( sk ) - > channel ) {
case HCI_CHANNEL_MONITOR :
2012-02-20 23:34:38 +04:00
atomic_dec ( & monitor_promisc ) ;
2016-08-27 21:23:38 +03:00
break ;
2016-08-30 06:00:39 +03:00
case HCI_CHANNEL_RAW :
2016-09-01 20:48:28 +03:00
case HCI_CHANNEL_USER :
2016-08-27 21:23:38 +03:00
case HCI_CHANNEL_CONTROL :
2016-08-27 21:23:40 +03:00
/* Send event to monitor */
skb = create_monitor_ctrl_close ( sk ) ;
if ( skb ) {
hci_send_to_channel ( HCI_CHANNEL_MONITOR , skb ,
HCI_SOCK_TRUSTED , NULL ) ;
kfree_skb ( skb ) ;
}
2016-08-30 06:00:34 +03:00
hci_sock_free_cookie ( sk ) ;
2016-08-27 21:23:38 +03:00
break ;
}
2012-02-20 23:34:38 +04:00
2005-04-17 02:20:36 +04:00
bt_sock_unlink ( & hci_sk_list , sk ) ;
2019-02-03 03:56:36 +03:00
hdev = hci_pi ( sk ) - > hdev ;
2005-04-17 02:20:36 +04:00
if ( hdev ) {
2022-06-02 19:46:49 +03:00
if ( hci_pi ( sk ) - > channel = = HCI_CHANNEL_USER & &
! hci_dev_test_flag ( hdev , HCI_UNREGISTER ) ) {
2017-02-28 01:28:47 +03:00
/* When releasing a user channel exclusive access,
2015-09-02 13:10:12 +03:00
* call hci_dev_do_close directly instead of calling
* hci_dev_close to ensure the exclusive access will
* be released and the controller brought back down .
*
* The checking of HCI_AUTO_OFF is not needed in this
* case since it will have been cleared already when
* opening the user channel .
2022-06-02 19:46:49 +03:00
*
* Make sure to also check that we haven ' t already
* unregistered since all the cleanup will have already
* been complete and hdev will get released when we put
* below .
2015-09-02 13:10:12 +03:00
*/
hci_dev_do_close ( hdev ) ;
2015-05-21 17:46:41 +03:00
hci_dev_clear_flag ( hdev , HCI_USER_CHANNEL ) ;
mgmt_index_added ( hdev ) ;
2013-08-27 08:40:52 +04:00
}
2005-04-17 02:20:36 +04:00
atomic_dec ( & hdev - > promisc ) ;
hci_dev_put ( hdev ) ;
}
sock_orphan ( sk ) ;
2020-01-15 20:49:04 +03:00
release_sock ( sk ) ;
2005-04-17 02:20:36 +04:00
sock_put ( sk ) ;
return 0 ;
}
2021-06-04 11:26:27 +03:00
static int hci_sock_reject_list_add ( struct hci_dev * hdev , void __user * arg )
2010-05-18 15:20:32 +04:00
{
bdaddr_t bdaddr ;
2011-08-25 17:48:02 +04:00
int err ;
2010-05-18 15:20:32 +04:00
if ( copy_from_user ( & bdaddr , arg , sizeof ( bdaddr ) ) )
return - EFAULT ;
2011-06-17 20:03:21 +04:00
hci_dev_lock ( hdev ) ;
2011-08-25 17:48:02 +04:00
2021-06-04 11:26:27 +03:00
err = hci_bdaddr_list_add ( & hdev - > reject_list , & bdaddr , BDADDR_BREDR ) ;
2011-08-25 17:48:02 +04:00
2011-06-17 20:03:21 +04:00
hci_dev_unlock ( hdev ) ;
2011-08-25 17:48:02 +04:00
return err ;
2010-05-18 15:20:32 +04:00
}
2021-06-04 11:26:27 +03:00
static int hci_sock_reject_list_del ( struct hci_dev * hdev , void __user * arg )
2010-05-18 15:20:32 +04:00
{
bdaddr_t bdaddr ;
2011-08-25 17:48:02 +04:00
int err ;
2010-05-18 15:20:32 +04:00
if ( copy_from_user ( & bdaddr , arg , sizeof ( bdaddr ) ) )
return - EFAULT ;
2011-06-17 20:03:21 +04:00
hci_dev_lock ( hdev ) ;
2011-08-25 17:48:02 +04:00
2021-06-04 11:26:27 +03:00
err = hci_bdaddr_list_del ( & hdev - > reject_list , & bdaddr , BDADDR_BREDR ) ;
2011-08-25 17:48:02 +04:00
2011-06-17 20:03:21 +04:00
hci_dev_unlock ( hdev ) ;
2011-08-25 17:48:02 +04:00
return err ;
2010-05-18 15:20:32 +04:00
}
2007-02-09 17:24:33 +03:00
/* Ioctls that require bound socket */
2012-05-23 11:04:18 +04:00
static int hci_sock_bound_ioctl ( struct sock * sk , unsigned int cmd ,
unsigned long arg )
2005-04-17 02:20:36 +04:00
{
2021-08-04 13:26:56 +03:00
struct hci_dev * hdev = hci_hdev_from_sock ( sk ) ;
2005-04-17 02:20:36 +04:00
2021-08-04 13:26:56 +03:00
if ( IS_ERR ( hdev ) )
return PTR_ERR ( hdev ) ;
2005-04-17 02:20:36 +04:00
2015-03-13 12:11:00 +03:00
if ( hci_dev_test_flag ( hdev , HCI_USER_CHANNEL ) )
2013-08-27 08:40:51 +04:00
return - EBUSY ;
2015-03-13 12:11:00 +03:00
if ( hci_dev_test_flag ( hdev , HCI_UNCONFIGURED ) )
2014-06-29 14:13:05 +04:00
return - EOPNOTSUPP ;
2016-07-05 15:30:14 +03:00
if ( hdev - > dev_type ! = HCI_PRIMARY )
2013-10-10 21:02:08 +04:00
return - EOPNOTSUPP ;
2005-04-17 02:20:36 +04:00
switch ( cmd ) {
case HCISETRAW :
if ( ! capable ( CAP_NET_ADMIN ) )
2012-09-21 02:37:25 +04:00
return - EPERM ;
2014-04-17 07:04:38 +04:00
return - EOPNOTSUPP ;
2005-04-17 02:20:36 +04:00
case HCIGETCONNINFO :
2015-11-08 09:47:11 +03:00
return hci_get_conn_info ( hdev , ( void __user * ) arg ) ;
2008-07-14 22:13:50 +04:00
case HCIGETAUTHINFO :
2015-11-08 09:47:11 +03:00
return hci_get_auth_info ( hdev , ( void __user * ) arg ) ;
2005-04-17 02:20:36 +04:00
2010-05-18 15:20:32 +04:00
case HCIBLOCKADDR :
if ( ! capable ( CAP_NET_ADMIN ) )
2012-09-21 02:37:25 +04:00
return - EPERM ;
2021-06-04 11:26:27 +03:00
return hci_sock_reject_list_add ( hdev , ( void __user * ) arg ) ;
2010-05-18 15:20:32 +04:00
case HCIUNBLOCKADDR :
if ( ! capable ( CAP_NET_ADMIN ) )
2012-09-21 02:37:25 +04:00
return - EPERM ;
2021-06-04 11:26:27 +03:00
return hci_sock_reject_list_del ( hdev , ( void __user * ) arg ) ;
2005-04-17 02:20:36 +04:00
}
2013-08-27 08:40:51 +04:00
2013-10-10 21:50:06 +04:00
return - ENOIOCTLCMD ;
2005-04-17 02:20:36 +04:00
}
2012-05-23 11:04:21 +04:00
static int hci_sock_ioctl ( struct socket * sock , unsigned int cmd ,
unsigned long arg )
2005-04-17 02:20:36 +04:00
{
2015-11-08 09:47:11 +03:00
void __user * argp = ( void __user * ) arg ;
2013-08-27 08:40:51 +04:00
struct sock * sk = sock - > sk ;
2005-04-17 02:20:36 +04:00
int err ;
BT_DBG ( " cmd %x arg %lx " , cmd , arg ) ;
2023-04-16 11:02:51 +03:00
/* Make sure the cmd is valid before doing anything */
switch ( cmd ) {
case HCIGETDEVLIST :
case HCIGETDEVINFO :
case HCIGETCONNLIST :
case HCIDEVUP :
case HCIDEVDOWN :
case HCIDEVRESET :
case HCIDEVRESTAT :
case HCISETSCAN :
case HCISETAUTH :
case HCISETENCRYPT :
case HCISETPTYPE :
case HCISETLINKPOL :
case HCISETLINKMODE :
case HCISETACLMTU :
case HCISETSCOMTU :
case HCIINQUIRY :
case HCISETRAW :
case HCIGETCONNINFO :
case HCIGETAUTHINFO :
case HCIBLOCKADDR :
case HCIUNBLOCKADDR :
break ;
default :
return - ENOIOCTLCMD ;
}
2013-08-26 20:39:55 +04:00
lock_sock ( sk ) ;
if ( hci_pi ( sk ) - > channel ! = HCI_CHANNEL_RAW ) {
err = - EBADFD ;
goto done ;
}
2016-08-30 06:00:39 +03:00
/* When calling an ioctl on an unbound raw socket, then ensure
* that the monitor gets informed . Ensure that the resulting event
* is only send once by checking if the cookie exists or not . The
* socket cookie will be only ever generated once for the lifetime
* of a given socket .
*/
if ( hci_sock_gen_cookie ( sk ) ) {
struct sk_buff * skb ;
2023-04-16 11:14:04 +03:00
/* Perform careful checks before setting the HCI_SOCK_TRUSTED
* flag . Make sure that not only the current task but also
* the socket opener has the required capability , since
* privileged programs can be tricked into making ioctl calls
* on HCI sockets , and the socket should not be marked as
* trusted simply because the ioctl caller is privileged .
*/
if ( sk_capable ( sk , CAP_NET_ADMIN ) )
2016-08-30 06:00:39 +03:00
hci_sock_set_flag ( sk , HCI_SOCK_TRUSTED ) ;
/* Send event to monitor */
skb = create_monitor_ctrl_open ( sk ) ;
if ( skb ) {
hci_send_to_channel ( HCI_CHANNEL_MONITOR , skb ,
HCI_SOCK_TRUSTED , NULL ) ;
kfree_skb ( skb ) ;
}
}
2013-08-26 20:39:55 +04:00
release_sock ( sk ) ;
2005-04-17 02:20:36 +04:00
switch ( cmd ) {
case HCIGETDEVLIST :
return hci_get_dev_list ( argp ) ;
case HCIGETDEVINFO :
return hci_get_dev_info ( argp ) ;
case HCIGETCONNLIST :
return hci_get_conn_list ( argp ) ;
case HCIDEVUP :
if ( ! capable ( CAP_NET_ADMIN ) )
2012-09-21 02:37:25 +04:00
return - EPERM ;
2005-04-17 02:20:36 +04:00
return hci_dev_open ( arg ) ;
case HCIDEVDOWN :
if ( ! capable ( CAP_NET_ADMIN ) )
2012-09-21 02:37:25 +04:00
return - EPERM ;
2005-04-17 02:20:36 +04:00
return hci_dev_close ( arg ) ;
case HCIDEVRESET :
if ( ! capable ( CAP_NET_ADMIN ) )
2012-09-21 02:37:25 +04:00
return - EPERM ;
2005-04-17 02:20:36 +04:00
return hci_dev_reset ( arg ) ;
case HCIDEVRESTAT :
if ( ! capable ( CAP_NET_ADMIN ) )
2012-09-21 02:37:25 +04:00
return - EPERM ;
2005-04-17 02:20:36 +04:00
return hci_dev_reset_stat ( arg ) ;
case HCISETSCAN :
case HCISETAUTH :
case HCISETENCRYPT :
case HCISETPTYPE :
case HCISETLINKPOL :
case HCISETLINKMODE :
case HCISETACLMTU :
case HCISETSCOMTU :
if ( ! capable ( CAP_NET_ADMIN ) )
2012-09-21 02:37:25 +04:00
return - EPERM ;
2005-04-17 02:20:36 +04:00
return hci_dev_cmd ( cmd , argp ) ;
case HCIINQUIRY :
return hci_inquiry ( argp ) ;
}
2013-08-26 20:39:55 +04:00
lock_sock ( sk ) ;
err = hci_sock_bound_ioctl ( sk , cmd , arg ) ;
done :
release_sock ( sk ) ;
return err ;
2005-04-17 02:20:36 +04:00
}
2019-03-14 16:10:33 +03:00
# ifdef CONFIG_COMPAT
static int hci_sock_compat_ioctl ( struct socket * sock , unsigned int cmd ,
unsigned long arg )
{
switch ( cmd ) {
case HCIDEVUP :
case HCIDEVDOWN :
case HCIDEVRESET :
case HCIDEVRESTAT :
return hci_sock_ioctl ( sock , cmd , arg ) ;
}
return hci_sock_ioctl ( sock , cmd , ( unsigned long ) compat_ptr ( arg ) ) ;
}
# endif
2012-05-23 11:04:21 +04:00
static int hci_sock_bind ( struct socket * sock , struct sockaddr * addr ,
int addr_len )
2005-04-17 02:20:36 +04:00
{
2010-12-08 01:21:06 +03:00
struct sockaddr_hci haddr ;
2005-04-17 02:20:36 +04:00
struct sock * sk = sock - > sk ;
struct hci_dev * hdev = NULL ;
2016-08-30 06:00:40 +03:00
struct sk_buff * skb ;
2010-12-08 01:21:06 +03:00
int len , err = 0 ;
2005-04-17 02:20:36 +04:00
BT_DBG ( " sock %p sk %p " , sock , sk ) ;
2010-12-08 01:21:06 +03:00
if ( ! addr )
return - EINVAL ;
memset ( & haddr , 0 , sizeof ( haddr ) ) ;
len = min_t ( unsigned int , sizeof ( haddr ) , addr_len ) ;
memcpy ( & haddr , addr , len ) ;
if ( haddr . hci_family ! = AF_BLUETOOTH )
return - EINVAL ;
2005-04-17 02:20:36 +04:00
lock_sock ( sk ) ;
2021-08-04 13:26:56 +03:00
/* Allow detaching from dead device and attaching to alive device, if
* the caller wants to re - bind ( instead of close ) this socket in
* response to hci_sock_dev_event ( HCI_DEV_UNREG ) notification .
*/
hdev = hci_pi ( sk ) - > hdev ;
if ( hdev & & hci_dev_test_flag ( hdev , HCI_UNREGISTER ) ) {
hci_pi ( sk ) - > hdev = NULL ;
sk - > sk_state = BT_OPEN ;
hci_dev_put ( hdev ) ;
}
hdev = NULL ;
2012-02-20 17:50:35 +04:00
if ( sk - > sk_state = = BT_BOUND ) {
2005-04-17 02:20:36 +04:00
err = - EALREADY ;
goto done ;
}
2012-02-20 17:50:35 +04:00
switch ( haddr . hci_channel ) {
case HCI_CHANNEL_RAW :
if ( hci_pi ( sk ) - > hdev ) {
err = - EALREADY ;
2005-04-17 02:20:36 +04:00
goto done ;
}
2012-02-20 17:50:35 +04:00
if ( haddr . hci_dev ! = HCI_DEV_NONE ) {
hdev = hci_dev_get ( haddr . hci_dev ) ;
if ( ! hdev ) {
err = - ENODEV ;
goto done ;
}
atomic_inc ( & hdev - > promisc ) ;
}
2016-08-30 06:00:37 +03:00
hci_pi ( sk ) - > channel = haddr . hci_channel ;
2016-08-30 06:00:39 +03:00
2016-08-30 06:00:40 +03:00
if ( ! hci_sock_gen_cookie ( sk ) ) {
/* In the case when a cookie has already been assigned,
* then there has been already an ioctl issued against
2021-06-02 09:54:58 +03:00
* an unbound socket and with that triggered an open
2016-08-30 06:00:40 +03:00
* notification . Send a close notification first to
* allow the state transition to bounded .
*/
skb = create_monitor_ctrl_close ( sk ) ;
2016-08-30 06:00:39 +03:00
if ( skb ) {
hci_send_to_channel ( HCI_CHANNEL_MONITOR , skb ,
HCI_SOCK_TRUSTED , NULL ) ;
kfree_skb ( skb ) ;
}
}
2016-08-30 06:00:40 +03:00
if ( capable ( CAP_NET_ADMIN ) )
hci_sock_set_flag ( sk , HCI_SOCK_TRUSTED ) ;
hci_pi ( sk ) - > hdev = hdev ;
/* Send event to monitor */
skb = create_monitor_ctrl_open ( sk ) ;
if ( skb ) {
hci_send_to_channel ( HCI_CHANNEL_MONITOR , skb ,
HCI_SOCK_TRUSTED , NULL ) ;
kfree_skb ( skb ) ;
}
2012-02-20 17:50:35 +04:00
break ;
2013-08-27 08:40:52 +04:00
case HCI_CHANNEL_USER :
if ( hci_pi ( sk ) - > hdev ) {
err = - EALREADY ;
goto done ;
}
if ( haddr . hci_dev = = HCI_DEV_NONE ) {
err = - EINVAL ;
goto done ;
}
2013-10-02 09:59:24 +04:00
if ( ! capable ( CAP_NET_ADMIN ) ) {
2013-08-27 08:40:52 +04:00
err = - EPERM ;
goto done ;
}
hdev = hci_dev_get ( haddr . hci_dev ) ;
if ( ! hdev ) {
err = - ENODEV ;
goto done ;
}
2015-06-06 07:06:49 +03:00
if ( test_bit ( HCI_INIT , & hdev - > flags ) | |
2015-03-13 12:11:00 +03:00
hci_dev_test_flag ( hdev , HCI_SETUP ) | |
2015-06-06 07:06:49 +03:00
hci_dev_test_flag ( hdev , HCI_CONFIG ) | |
( ! hci_dev_test_flag ( hdev , HCI_AUTO_OFF ) & &
test_bit ( HCI_UP , & hdev - > flags ) ) ) {
2013-08-27 08:40:52 +04:00
err = - EBUSY ;
hci_dev_put ( hdev ) ;
goto done ;
}
2015-03-13 12:11:06 +03:00
if ( hci_dev_test_and_set_flag ( hdev , HCI_USER_CHANNEL ) ) {
2013-08-27 08:40:52 +04:00
err = - EUSERS ;
hci_dev_put ( hdev ) ;
goto done ;
}
2014-07-02 23:30:54 +04:00
mgmt_index_removed ( hdev ) ;
2013-08-27 08:40:52 +04:00
err = hci_dev_open ( hdev - > id ) ;
if ( err ) {
2015-06-06 07:06:49 +03:00
if ( err = = - EALREADY ) {
/* In case the transport is already up and
* running , clear the error here .
*
2017-02-28 01:28:47 +03:00
* This can happen when opening a user
2015-06-06 07:06:49 +03:00
* channel and HCI_AUTO_OFF grace period
* is still active .
*/
err = 0 ;
} else {
hci_dev_clear_flag ( hdev , HCI_USER_CHANNEL ) ;
mgmt_index_added ( hdev ) ;
hci_dev_put ( hdev ) ;
goto done ;
}
2013-08-27 08:40:52 +04:00
}
2016-08-30 06:00:37 +03:00
hci_pi ( sk ) - > channel = haddr . hci_channel ;
2016-09-01 20:48:28 +03:00
if ( ! hci_sock_gen_cookie ( sk ) ) {
/* In the case when a cookie has already been assigned,
* this socket will transition from a raw socket into
2017-02-28 01:28:47 +03:00
* a user channel socket . For a clean transition , send
2016-09-01 20:48:28 +03:00
* the close notification first .
*/
skb = create_monitor_ctrl_close ( sk ) ;
if ( skb ) {
hci_send_to_channel ( HCI_CHANNEL_MONITOR , skb ,
HCI_SOCK_TRUSTED , NULL ) ;
kfree_skb ( skb ) ;
}
}
/* The user channel is restricted to CAP_NET_ADMIN
* capabilities and with that implicitly trusted .
*/
hci_sock_set_flag ( sk , HCI_SOCK_TRUSTED ) ;
2013-08-27 08:40:52 +04:00
hci_pi ( sk ) - > hdev = hdev ;
2016-08-30 06:00:37 +03:00
2016-09-01 20:48:28 +03:00
/* Send event to monitor */
skb = create_monitor_ctrl_open ( sk ) ;
if ( skb ) {
hci_send_to_channel ( HCI_CHANNEL_MONITOR , skb ,
HCI_SOCK_TRUSTED , NULL ) ;
kfree_skb ( skb ) ;
}
2016-08-30 06:00:37 +03:00
atomic_inc ( & hdev - > promisc ) ;
2013-08-27 08:40:52 +04:00
break ;
2012-02-20 23:34:38 +04:00
case HCI_CHANNEL_MONITOR :
if ( haddr . hci_dev ! = HCI_DEV_NONE ) {
err = - EINVAL ;
goto done ;
}
if ( ! capable ( CAP_NET_RAW ) ) {
err = - EPERM ;
goto done ;
}
2016-08-30 06:00:37 +03:00
hci_pi ( sk ) - > channel = haddr . hci_channel ;
2015-03-15 05:27:58 +03:00
/* The monitor interface is restricted to CAP_NET_RAW
* capabilities and with that implicitly trusted .
*/
hci_sock_set_flag ( sk , HCI_SOCK_TRUSTED ) ;
2016-01-06 16:38:40 +03:00
send_monitor_note ( sk , " Linux version %s (%s) " ,
init_utsname ( ) - > release ,
init_utsname ( ) - > machine ) ;
2016-08-30 06:00:35 +03:00
send_monitor_note ( sk , " Bluetooth subsystem version %u.%u " ,
BT_SUBSYS_VERSION , BT_SUBSYS_REVISION ) ;
2012-02-20 23:34:38 +04:00
send_monitor_replay ( sk ) ;
2016-08-27 21:23:40 +03:00
send_monitor_control_replay ( sk ) ;
2012-02-20 23:34:38 +04:00
atomic_inc ( & monitor_promisc ) ;
break ;
2015-11-08 09:47:13 +03:00
case HCI_CHANNEL_LOGGING :
if ( haddr . hci_dev ! = HCI_DEV_NONE ) {
err = - EINVAL ;
goto done ;
}
if ( ! capable ( CAP_NET_ADMIN ) ) {
err = - EPERM ;
goto done ;
}
2016-08-30 06:00:37 +03:00
hci_pi ( sk ) - > channel = haddr . hci_channel ;
2015-11-08 09:47:13 +03:00
break ;
2012-02-20 17:50:35 +04:00
default :
2015-03-06 22:08:50 +03:00
if ( ! hci_mgmt_chan_find ( haddr . hci_channel ) ) {
err = - EINVAL ;
goto done ;
}
if ( haddr . hci_dev ! = HCI_DEV_NONE ) {
err = - EINVAL ;
goto done ;
}
2015-03-15 05:28:04 +03:00
/* Users with CAP_NET_ADMIN capabilities are allowed
* access to all management commands and events . For
* untrusted users the interface is restricted and
* also only untrusted events are sent .
2015-03-15 05:27:58 +03:00
*/
2015-03-15 05:28:04 +03:00
if ( capable ( CAP_NET_ADMIN ) )
hci_sock_set_flag ( sk , HCI_SOCK_TRUSTED ) ;
2015-03-15 05:27:58 +03:00
2016-08-30 06:00:37 +03:00
hci_pi ( sk ) - > channel = haddr . hci_channel ;
2015-03-15 05:27:55 +03:00
/* At the moment the index and unconfigured index events
* are enabled unconditionally . Setting them on each
* socket when binding keeps this functionality . They
* however might be cleared later and then sending of these
* events will be disabled , but that is then intentional .
2015-03-15 05:28:05 +03:00
*
* This also enables generic events that are safe to be
* received by untrusted users . Example for such events
* are changes to settings , class of device , name etc .
2015-03-15 05:27:55 +03:00
*/
2016-08-30 06:00:37 +03:00
if ( hci_pi ( sk ) - > channel = = HCI_CHANNEL_CONTROL ) {
2016-08-30 06:00:40 +03:00
if ( ! hci_sock_gen_cookie ( sk ) ) {
/* In the case when a cookie has already been
2021-06-02 09:54:58 +03:00
* assigned , this socket will transition from
2016-08-30 06:00:40 +03:00
* a raw socket into a control socket . To
2021-06-02 09:54:58 +03:00
* allow for a clean transition , send the
2016-08-30 06:00:40 +03:00
* close notification first .
*/
skb = create_monitor_ctrl_close ( sk ) ;
if ( skb ) {
hci_send_to_channel ( HCI_CHANNEL_MONITOR , skb ,
HCI_SOCK_TRUSTED , NULL ) ;
kfree_skb ( skb ) ;
}
}
2016-08-27 21:23:38 +03:00
2016-08-27 21:23:40 +03:00
/* Send event to monitor */
skb = create_monitor_ctrl_open ( sk ) ;
if ( skb ) {
hci_send_to_channel ( HCI_CHANNEL_MONITOR , skb ,
HCI_SOCK_TRUSTED , NULL ) ;
kfree_skb ( skb ) ;
}
2015-03-15 05:27:55 +03:00
hci_sock_set_flag ( sk , HCI_MGMT_INDEX_EVENTS ) ;
hci_sock_set_flag ( sk , HCI_MGMT_UNCONF_INDEX_EVENTS ) ;
2016-08-29 07:19:46 +03:00
hci_sock_set_flag ( sk , HCI_MGMT_OPTION_EVENTS ) ;
hci_sock_set_flag ( sk , HCI_MGMT_SETTING_EVENTS ) ;
hci_sock_set_flag ( sk , HCI_MGMT_DEV_CLASS_EVENTS ) ;
hci_sock_set_flag ( sk , HCI_MGMT_LOCAL_NAME_EVENTS ) ;
2015-03-15 05:27:55 +03:00
}
2015-03-06 22:08:50 +03:00
break ;
2005-04-17 02:20:36 +04:00
}
2021-09-16 23:10:46 +03:00
/* Default MTU to HCI_MAX_FRAME_SIZE if not set */
if ( ! hci_pi ( sk ) - > mtu )
hci_pi ( sk ) - > mtu = HCI_MAX_FRAME_SIZE ;
2005-04-17 02:20:36 +04:00
sk - > sk_state = BT_BOUND ;
done :
release_sock ( sk ) ;
return err ;
}
2012-05-23 11:04:21 +04:00
static int hci_sock_getname ( struct socket * sock , struct sockaddr * addr ,
2018-02-12 22:00:20 +03:00
int peer )
2005-04-17 02:20:36 +04:00
{
2015-11-08 09:47:11 +03:00
struct sockaddr_hci * haddr = ( struct sockaddr_hci * ) addr ;
2005-04-17 02:20:36 +04:00
struct sock * sk = sock - > sk ;
2013-08-26 11:20:37 +04:00
struct hci_dev * hdev ;
int err = 0 ;
2005-04-17 02:20:36 +04:00
BT_DBG ( " sock %p sk %p " , sock , sk ) ;
2013-08-26 11:06:30 +04:00
if ( peer )
return - EOPNOTSUPP ;
2005-04-17 02:20:36 +04:00
lock_sock ( sk ) ;
2021-08-04 13:26:56 +03:00
hdev = hci_hdev_from_sock ( sk ) ;
if ( IS_ERR ( hdev ) ) {
err = PTR_ERR ( hdev ) ;
2013-08-26 11:20:37 +04:00
goto done ;
}
2005-04-17 02:20:36 +04:00
haddr - > hci_family = AF_BLUETOOTH ;
2006-02-13 13:40:03 +03:00
haddr - > hci_dev = hdev - > id ;
2013-08-26 11:20:37 +04:00
haddr - > hci_channel = hci_pi ( sk ) - > channel ;
2018-02-12 22:00:20 +03:00
err = sizeof ( * haddr ) ;
2005-04-17 02:20:36 +04:00
2013-08-26 11:20:37 +04:00
done :
2005-04-17 02:20:36 +04:00
release_sock ( sk ) ;
2013-08-26 11:20:37 +04:00
return err ;
2005-04-17 02:20:36 +04:00
}
2012-05-23 11:04:18 +04:00
static void hci_sock_cmsg ( struct sock * sk , struct msghdr * msg ,
struct sk_buff * skb )
2005-04-17 02:20:36 +04:00
{
2020-06-11 17:26:10 +03:00
__u8 mask = hci_pi ( sk ) - > cmsg_mask ;
2005-04-17 02:20:36 +04:00
2005-08-10 07:30:28 +04:00
if ( mask & HCI_CMSG_DIR ) {
int incoming = bt_cb ( skb ) - > incoming ;
2012-05-23 11:04:21 +04:00
put_cmsg ( msg , SOL_HCI , HCI_CMSG_DIR , sizeof ( incoming ) ,
& incoming ) ;
2005-08-10 07:30:28 +04:00
}
2005-04-17 02:20:36 +04:00
2005-08-15 04:24:31 +04:00
if ( mask & HCI_CMSG_TSTAMP ) {
2010-02-16 00:23:48 +03:00
# ifdef CONFIG_COMPAT
2019-02-02 18:34:48 +03:00
struct old_timeval32 ctv ;
2010-02-16 00:23:48 +03:00
# endif
2019-02-02 18:34:48 +03:00
struct __kernel_old_timeval tv ;
2007-09-09 10:39:34 +04:00
void * data ;
int len ;
2005-08-15 04:24:31 +04:00
skb_get_timestamp ( skb , & tv ) ;
2007-09-09 10:39:34 +04:00
2007-09-12 16:10:58 +04:00
data = & tv ;
len = sizeof ( tv ) ;
# ifdef CONFIG_COMPAT
2012-02-11 02:12:15 +04:00
if ( ! COMPAT_USE_64BIT_TIME & &
( msg - > msg_flags & MSG_CMSG_COMPAT ) ) {
2007-09-09 10:39:34 +04:00
ctv . tv_sec = tv . tv_sec ;
ctv . tv_usec = tv . tv_usec ;
data = & ctv ;
len = sizeof ( ctv ) ;
}
2007-09-12 16:10:58 +04:00
# endif
2007-09-09 10:39:34 +04:00
put_cmsg ( msg , SOL_HCI , HCI_CMSG_TSTAMP , len , data ) ;
2005-08-15 04:24:31 +04:00
}
2005-04-17 02:20:36 +04:00
}
2007-02-09 17:24:33 +03:00
2015-11-08 09:47:11 +03:00
static int hci_sock_recvmsg ( struct socket * sock , struct msghdr * msg ,
size_t len , int flags )
2005-04-17 02:20:36 +04:00
{
2023-05-26 02:46:43 +03:00
struct scm_cookie scm ;
2005-04-17 02:20:36 +04:00
struct sock * sk = sock - > sk ;
struct sk_buff * skb ;
int copied , err ;
2016-06-27 19:01:13 +03:00
unsigned int skblen ;
2005-04-17 02:20:36 +04:00
BT_DBG ( " sock %p, sk %p " , sock , sk ) ;
2015-10-26 00:45:18 +03:00
if ( flags & MSG_OOB )
2005-04-17 02:20:36 +04:00
return - EOPNOTSUPP ;
2015-11-08 09:47:13 +03:00
if ( hci_pi ( sk ) - > channel = = HCI_CHANNEL_LOGGING )
return - EOPNOTSUPP ;
2005-04-17 02:20:36 +04:00
if ( sk - > sk_state = = BT_CLOSED )
return 0 ;
2022-04-04 19:30:22 +03:00
skb = skb_recv_datagram ( sk , flags , & err ) ;
2010-12-01 17:58:25 +03:00
if ( ! skb )
2005-04-17 02:20:36 +04:00
return err ;
2016-06-27 19:01:13 +03:00
skblen = skb - > len ;
2005-04-17 02:20:36 +04:00
copied = skb - > len ;
if ( len < copied ) {
msg - > msg_flags | = MSG_TRUNC ;
copied = len ;
}
2007-03-13 19:06:52 +03:00
skb_reset_transport_header ( skb ) ;
2014-11-06 00:46:40 +03:00
err = skb_copy_datagram_msg ( skb , 0 , msg , copied ) ;
2005-04-17 02:20:36 +04:00
2012-02-20 17:50:34 +04:00
switch ( hci_pi ( sk ) - > channel ) {
case HCI_CHANNEL_RAW :
hci_sock_cmsg ( sk , msg , skb ) ;
break ;
2013-08-27 08:40:52 +04:00
case HCI_CHANNEL_USER :
2012-02-20 23:34:38 +04:00
case HCI_CHANNEL_MONITOR :
sock_recv_timestamp ( msg , sk , skb ) ;
break ;
2015-03-06 22:08:50 +03:00
default :
if ( hci_mgmt_chan_find ( hci_pi ( sk ) - > channel ) )
sock_recv_timestamp ( msg , sk , skb ) ;
break ;
2012-02-20 17:50:34 +04:00
}
2005-04-17 02:20:36 +04:00
2023-05-26 02:46:43 +03:00
memset ( & scm , 0 , sizeof ( scm ) ) ;
scm . creds = bt_cb ( skb ) - > creds ;
2005-04-17 02:20:36 +04:00
skb_free_datagram ( sk , skb ) ;
2016-08-15 16:02:20 +03:00
if ( flags & MSG_TRUNC )
2016-06-27 19:01:13 +03:00
copied = skblen ;
2023-05-26 02:46:43 +03:00
scm_recv ( sock , msg , & scm , flags ) ;
2005-04-17 02:20:36 +04:00
return err ? : copied ;
}
2021-09-30 23:33:52 +03:00
static int hci_mgmt_cmd ( struct hci_mgmt_chan * chan , struct sock * sk ,
struct sk_buff * skb )
2015-03-17 14:48:50 +03:00
{
u8 * cp ;
struct mgmt_hdr * hdr ;
u16 opcode , index , len ;
struct hci_dev * hdev = NULL ;
const struct hci_mgmt_handler * handler ;
bool var_len , no_hdev ;
int err ;
2021-09-30 23:33:52 +03:00
BT_DBG ( " got %d bytes " , skb - > len ) ;
2015-03-17 14:48:50 +03:00
2021-09-30 23:33:52 +03:00
if ( skb - > len < sizeof ( * hdr ) )
2015-03-17 14:48:50 +03:00
return - EINVAL ;
2021-09-30 23:33:52 +03:00
hdr = ( void * ) skb - > data ;
2015-03-17 14:48:50 +03:00
opcode = __le16_to_cpu ( hdr - > opcode ) ;
index = __le16_to_cpu ( hdr - > index ) ;
len = __le16_to_cpu ( hdr - > len ) ;
2021-09-30 23:33:52 +03:00
if ( len ! = skb - > len - sizeof ( * hdr ) ) {
2015-03-17 14:48:50 +03:00
err = - EINVAL ;
goto done ;
}
2016-08-27 21:23:41 +03:00
if ( chan - > channel = = HCI_CHANNEL_CONTROL ) {
2021-09-30 23:33:52 +03:00
struct sk_buff * cmd ;
2016-08-27 21:23:41 +03:00
/* Send event to monitor */
2021-09-30 23:33:52 +03:00
cmd = create_monitor_ctrl_command ( sk , index , opcode , len ,
skb - > data + sizeof ( * hdr ) ) ;
if ( cmd ) {
hci_send_to_channel ( HCI_CHANNEL_MONITOR , cmd ,
2016-08-27 21:23:41 +03:00
HCI_SOCK_TRUSTED , NULL ) ;
2021-09-30 23:33:52 +03:00
kfree_skb ( cmd ) ;
2016-08-27 21:23:41 +03:00
}
}
2015-03-17 14:48:50 +03:00
if ( opcode > = chan - > handler_count | |
chan - > handlers [ opcode ] . func = = NULL ) {
BT_DBG ( " Unknown op %u " , opcode ) ;
err = mgmt_cmd_status ( sk , index , opcode ,
MGMT_STATUS_UNKNOWN_COMMAND ) ;
goto done ;
}
handler = & chan - > handlers [ opcode ] ;
if ( ! hci_sock_test_flag ( sk , HCI_SOCK_TRUSTED ) & &
! ( handler - > flags & HCI_MGMT_UNTRUSTED ) ) {
err = mgmt_cmd_status ( sk , index , opcode ,
MGMT_STATUS_PERMISSION_DENIED ) ;
goto done ;
}
if ( index ! = MGMT_INDEX_NONE ) {
hdev = hci_dev_get ( index ) ;
if ( ! hdev ) {
err = mgmt_cmd_status ( sk , index , opcode ,
MGMT_STATUS_INVALID_INDEX ) ;
goto done ;
}
if ( hci_dev_test_flag ( hdev , HCI_SETUP ) | |
hci_dev_test_flag ( hdev , HCI_CONFIG ) | |
hci_dev_test_flag ( hdev , HCI_USER_CHANNEL ) ) {
err = mgmt_cmd_status ( sk , index , opcode ,
MGMT_STATUS_INVALID_INDEX ) ;
goto done ;
}
if ( hci_dev_test_flag ( hdev , HCI_UNCONFIGURED ) & &
! ( handler - > flags & HCI_MGMT_UNCONFIGURED ) ) {
err = mgmt_cmd_status ( sk , index , opcode ,
MGMT_STATUS_INVALID_INDEX ) ;
goto done ;
}
}
2020-05-06 10:57:49 +03:00
if ( ! ( handler - > flags & HCI_MGMT_HDEV_OPTIONAL ) ) {
no_hdev = ( handler - > flags & HCI_MGMT_NO_HDEV ) ;
if ( no_hdev ! = ! hdev ) {
err = mgmt_cmd_status ( sk , index , opcode ,
MGMT_STATUS_INVALID_INDEX ) ;
goto done ;
}
2015-03-17 14:48:50 +03:00
}
var_len = ( handler - > flags & HCI_MGMT_VAR_LEN ) ;
if ( ( var_len & & len < handler - > data_len ) | |
( ! var_len & & len ! = handler - > data_len ) ) {
err = mgmt_cmd_status ( sk , index , opcode ,
MGMT_STATUS_INVALID_PARAMS ) ;
goto done ;
}
if ( hdev & & chan - > hdev_init )
chan - > hdev_init ( sk , hdev ) ;
2021-09-30 23:33:52 +03:00
cp = skb - > data + sizeof ( * hdr ) ;
2015-03-17 14:48:50 +03:00
err = handler - > func ( sk , hdev , cp , len ) ;
if ( err < 0 )
goto done ;
2021-09-30 23:33:52 +03:00
err = skb - > len ;
2015-03-17 14:48:50 +03:00
done :
if ( hdev )
hci_dev_put ( hdev ) ;
return err ;
}
2021-09-30 23:33:52 +03:00
static int hci_logging_frame ( struct sock * sk , struct sk_buff * skb ,
unsigned int flags )
2015-11-08 09:47:13 +03:00
{
struct hci_mon_hdr * hdr ;
struct hci_dev * hdev ;
u16 index ;
int err ;
/* The logging frame consists at minimum of the standard header,
* the priority byte , the ident length byte and at least one string
* terminator NUL byte . Anything shorter are invalid packets .
*/
2021-09-30 23:33:52 +03:00
if ( skb - > len < sizeof ( * hdr ) + 3 )
2015-11-08 09:47:13 +03:00
return - EINVAL ;
hdr = ( void * ) skb - > data ;
2021-09-30 23:33:52 +03:00
if ( __le16_to_cpu ( hdr - > len ) ! = skb - > len - sizeof ( * hdr ) )
return - EINVAL ;
2015-11-08 09:47:13 +03:00
if ( __le16_to_cpu ( hdr - > opcode ) = = 0x0000 ) {
__u8 priority = skb - > data [ sizeof ( * hdr ) ] ;
__u8 ident_len = skb - > data [ sizeof ( * hdr ) + 1 ] ;
/* Only the priorities 0-7 are valid and with that any other
* value results in an invalid packet .
*
* The priority byte is followed by an ident length byte and
* the NUL terminated ident string . Check that the ident
* length is not overflowing the packet and also that the
* ident string itself is NUL terminated . In case the ident
* length is zero , the length value actually doubles as NUL
* terminator identifier .
*
* The message follows the ident string ( if present ) and
* must be NUL terminated . Otherwise it is not a valid packet .
*/
2021-09-30 23:33:52 +03:00
if ( priority > 7 | | skb - > data [ skb - > len - 1 ] ! = 0x00 | |
ident_len > skb - > len - sizeof ( * hdr ) - 3 | |
skb - > data [ sizeof ( * hdr ) + ident_len + 1 ] ! = 0x00 )
return - EINVAL ;
2015-11-08 09:47:13 +03:00
} else {
2021-09-30 23:33:52 +03:00
return - EINVAL ;
2015-11-08 09:47:13 +03:00
}
index = __le16_to_cpu ( hdr - > index ) ;
if ( index ! = MGMT_INDEX_NONE ) {
hdev = hci_dev_get ( index ) ;
2021-09-30 23:33:52 +03:00
if ( ! hdev )
return - ENODEV ;
2015-11-08 09:47:13 +03:00
} else {
hdev = NULL ;
}
hdr - > opcode = cpu_to_le16 ( HCI_MON_USER_LOGGING ) ;
hci_send_to_channel ( HCI_CHANNEL_MONITOR , skb , HCI_SOCK_TRUSTED , NULL ) ;
2021-09-30 23:33:52 +03:00
err = skb - > len ;
2015-11-08 09:47:13 +03:00
if ( hdev )
hci_dev_put ( hdev ) ;
return err ;
}
2015-03-02 10:37:48 +03:00
static int hci_sock_sendmsg ( struct socket * sock , struct msghdr * msg ,
size_t len )
2005-04-17 02:20:36 +04:00
{
struct sock * sk = sock - > sk ;
2015-03-06 22:08:50 +03:00
struct hci_mgmt_chan * chan ;
2005-04-17 02:20:36 +04:00
struct hci_dev * hdev ;
struct sk_buff * skb ;
int err ;
2021-07-22 10:42:08 +03:00
const unsigned int flags = msg - > msg_flags ;
2005-04-17 02:20:36 +04:00
BT_DBG ( " sock %p sk %p " , sock , sk ) ;
2021-07-22 10:42:08 +03:00
if ( flags & MSG_OOB )
2005-04-17 02:20:36 +04:00
return - EOPNOTSUPP ;
2021-07-22 10:42:08 +03:00
if ( flags & ~ ( MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT ) )
2005-04-17 02:20:36 +04:00
return - EINVAL ;
2021-09-16 23:10:46 +03:00
if ( len < 4 | | len > hci_pi ( sk ) - > mtu )
2005-04-17 02:20:36 +04:00
return - EINVAL ;
2021-09-30 23:33:52 +03:00
skb = bt_skb_sendmsg ( sk , msg , len , len , 0 , 0 ) ;
if ( IS_ERR ( skb ) )
return PTR_ERR ( skb ) ;
2021-07-22 10:42:08 +03:00
2005-04-17 02:20:36 +04:00
lock_sock ( sk ) ;
2010-12-08 01:21:06 +03:00
switch ( hci_pi ( sk ) - > channel ) {
case HCI_CHANNEL_RAW :
2013-08-27 08:40:52 +04:00
case HCI_CHANNEL_USER :
2010-12-08 01:21:06 +03:00
break ;
2012-02-20 23:34:38 +04:00
case HCI_CHANNEL_MONITOR :
err = - EOPNOTSUPP ;
2021-09-30 23:33:52 +03:00
goto drop ;
2015-11-08 09:47:13 +03:00
case HCI_CHANNEL_LOGGING :
2021-09-30 23:33:52 +03:00
err = hci_logging_frame ( sk , skb , flags ) ;
goto drop ;
2010-12-08 01:21:06 +03:00
default :
2015-03-06 22:08:50 +03:00
mutex_lock ( & mgmt_chan_list_lock ) ;
chan = __hci_mgmt_chan_find ( hci_pi ( sk ) - > channel ) ;
if ( chan )
2021-09-30 23:33:52 +03:00
err = hci_mgmt_cmd ( chan , sk , skb ) ;
2015-03-06 22:08:50 +03:00
else
err = - EINVAL ;
mutex_unlock ( & mgmt_chan_list_lock ) ;
2021-09-30 23:33:52 +03:00
goto drop ;
2010-12-08 01:21:06 +03:00
}
2021-08-04 13:26:56 +03:00
hdev = hci_hdev_from_sock ( sk ) ;
if ( IS_ERR ( hdev ) ) {
err = PTR_ERR ( hdev ) ;
2021-09-30 23:33:52 +03:00
goto drop ;
2005-04-17 02:20:36 +04:00
}
2009-11-18 03:05:00 +03:00
if ( ! test_bit ( HCI_UP , & hdev - > flags ) ) {
err = - ENETDOWN ;
2021-09-30 23:33:52 +03:00
goto drop ;
2009-11-18 03:05:00 +03:00
}
2015-11-08 09:47:11 +03:00
hci_skb_pkt_type ( skb ) = skb - > data [ 0 ] ;
2005-04-17 02:20:36 +04:00
skb_pull ( skb , 1 ) ;
2013-12-17 15:21:25 +04:00
if ( hci_pi ( sk ) - > channel = = HCI_CHANNEL_USER ) {
/* No permission check is needed for user channel
* since that gets enforced when binding the socket .
*
* However check that the packet type is valid .
*/
2015-11-05 09:10:00 +03:00
if ( hci_skb_pkt_type ( skb ) ! = HCI_COMMAND_PKT & &
hci_skb_pkt_type ( skb ) ! = HCI_ACLDATA_PKT & &
2020-01-25 11:23:47 +03:00
hci_skb_pkt_type ( skb ) ! = HCI_SCODATA_PKT & &
hci_skb_pkt_type ( skb ) ! = HCI_ISODATA_PKT ) {
2013-12-17 15:21:25 +04:00
err = - EINVAL ;
goto drop ;
}
skb_queue_tail ( & hdev - > raw_q , skb ) ;
queue_work ( hdev - > workqueue , & hdev - > tx_work ) ;
2015-11-05 09:10:00 +03:00
} else if ( hci_skb_pkt_type ( skb ) = = HCI_COMMAND_PKT ) {
2008-05-03 03:25:46 +04:00
u16 opcode = get_unaligned_le16 ( skb - > data ) ;
2005-04-17 02:20:36 +04:00
u16 ogf = hci_opcode_ogf ( opcode ) ;
u16 ocf = hci_opcode_ocf ( opcode ) ;
if ( ( ( ogf > HCI_SFLT_MAX_OGF ) | |
2012-05-17 07:36:22 +04:00
! hci_test_bit ( ocf & HCI_FLT_OCF_BITS ,
& hci_sec_filter . ocf_mask [ ogf ] ) ) & &
! capable ( CAP_NET_RAW ) ) {
2005-04-17 02:20:36 +04:00
err = - EPERM ;
goto drop ;
}
2015-11-06 09:42:20 +03:00
/* Since the opcode has already been extracted here, store
* a copy of the value for later use by the drivers .
*/
hci_skb_opcode ( skb ) = opcode ;
2014-06-29 14:13:05 +04:00
if ( ogf = = 0x3f ) {
2005-04-17 02:20:36 +04:00
skb_queue_tail ( & hdev - > raw_q , skb ) ;
2011-12-15 06:50:02 +04:00
queue_work ( hdev - > workqueue , & hdev - > tx_work ) ;
2005-04-17 02:20:36 +04:00
} else {
2014-10-28 07:12:20 +03:00
/* Stand-alone HCI commands must be flagged as
2013-03-05 22:37:47 +04:00
* single - command requests .
*/
2015-11-05 10:31:40 +03:00
bt_cb ( skb ) - > hci . req_flags | = HCI_REQ_START ;
2013-03-05 22:37:47 +04:00
2005-04-17 02:20:36 +04:00
skb_queue_tail ( & hdev - > cmd_q , skb ) ;
2011-12-15 05:53:47 +04:00
queue_work ( hdev - > workqueue , & hdev - > cmd_work ) ;
2005-04-17 02:20:36 +04:00
}
} else {
if ( ! capable ( CAP_NET_RAW ) ) {
err = - EPERM ;
goto drop ;
}
2015-11-05 09:10:00 +03:00
if ( hci_skb_pkt_type ( skb ) ! = HCI_ACLDATA_PKT & &
2020-01-25 11:23:47 +03:00
hci_skb_pkt_type ( skb ) ! = HCI_SCODATA_PKT & &
hci_skb_pkt_type ( skb ) ! = HCI_ISODATA_PKT ) {
2015-10-09 17:13:50 +03:00
err = - EINVAL ;
goto drop ;
}
2005-04-17 02:20:36 +04:00
skb_queue_tail ( & hdev - > raw_q , skb ) ;
2011-12-15 06:50:02 +04:00
queue_work ( hdev - > workqueue , & hdev - > tx_work ) ;
2005-04-17 02:20:36 +04:00
}
err = len ;
done :
release_sock ( sk ) ;
return err ;
drop :
kfree_skb ( skb ) ;
goto done ;
}
2021-09-16 23:10:46 +03:00
static int hci_sock_setsockopt_old ( struct socket * sock , int level , int optname ,
sockptr_t optval , unsigned int len )
2005-04-17 02:20:36 +04:00
{
struct hci_ufilter uf = { . opcode = 0 } ;
struct sock * sk = sock - > sk ;
int err = 0 , opt = 0 ;
BT_DBG ( " sk %p, opt %d " , sk , optname ) ;
lock_sock ( sk ) ;
2012-02-20 17:50:32 +04:00
if ( hci_pi ( sk ) - > channel ! = HCI_CHANNEL_RAW ) {
2013-08-26 20:29:39 +04:00
err = - EBADFD ;
2012-02-20 17:50:32 +04:00
goto done ;
}
2005-04-17 02:20:36 +04:00
switch ( optname ) {
case HCI_DATA_DIR :
2020-07-23 09:09:07 +03:00
if ( copy_from_sockptr ( & opt , optval , sizeof ( opt ) ) ) {
2005-04-17 02:20:36 +04:00
err = - EFAULT ;
break ;
}
if ( opt )
hci_pi ( sk ) - > cmsg_mask | = HCI_CMSG_DIR ;
else
hci_pi ( sk ) - > cmsg_mask & = ~ HCI_CMSG_DIR ;
break ;
case HCI_TIME_STAMP :
2020-07-23 09:09:07 +03:00
if ( copy_from_sockptr ( & opt , optval , sizeof ( opt ) ) ) {
2005-04-17 02:20:36 +04:00
err = - EFAULT ;
break ;
}
if ( opt )
hci_pi ( sk ) - > cmsg_mask | = HCI_CMSG_TSTAMP ;
else
hci_pi ( sk ) - > cmsg_mask & = ~ HCI_CMSG_TSTAMP ;
break ;
case HCI_FILTER :
2007-05-05 02:35:59 +04:00
{
struct hci_filter * f = & hci_pi ( sk ) - > filter ;
uf . type_mask = f - > type_mask ;
uf . opcode = f - > opcode ;
uf . event_mask [ 0 ] = * ( ( u32 * ) f - > event_mask + 0 ) ;
uf . event_mask [ 1 ] = * ( ( u32 * ) f - > event_mask + 1 ) ;
}
2005-04-17 02:20:36 +04:00
len = min_t ( unsigned int , len , sizeof ( uf ) ) ;
2020-07-23 09:09:07 +03:00
if ( copy_from_sockptr ( & uf , optval , len ) ) {
2005-04-17 02:20:36 +04:00
err = - EFAULT ;
break ;
}
if ( ! capable ( CAP_NET_RAW ) ) {
uf . type_mask & = hci_sec_filter . type_mask ;
uf . event_mask [ 0 ] & = * ( ( u32 * ) hci_sec_filter . event_mask + 0 ) ;
uf . event_mask [ 1 ] & = * ( ( u32 * ) hci_sec_filter . event_mask + 1 ) ;
}
{
struct hci_filter * f = & hci_pi ( sk ) - > filter ;
f - > type_mask = uf . type_mask ;
f - > opcode = uf . opcode ;
* ( ( u32 * ) f - > event_mask + 0 ) = uf . event_mask [ 0 ] ;
* ( ( u32 * ) f - > event_mask + 1 ) = uf . event_mask [ 1 ] ;
}
2007-02-09 17:24:33 +03:00
break ;
2005-04-17 02:20:36 +04:00
default :
err = - ENOPROTOOPT ;
break ;
}
2012-02-20 17:50:32 +04:00
done :
2005-04-17 02:20:36 +04:00
release_sock ( sk ) ;
return err ;
}
2021-09-16 23:10:46 +03:00
static int hci_sock_setsockopt ( struct socket * sock , int level , int optname ,
sockptr_t optval , unsigned int len )
2005-04-17 02:20:36 +04:00
{
struct sock * sk = sock - > sk ;
2022-01-07 10:17:27 +03:00
int err = 0 ;
u16 opt ;
2012-02-20 17:50:33 +04:00
BT_DBG ( " sk %p, opt %d " , sk , optname ) ;
2005-04-17 02:20:36 +04:00
2021-09-16 23:10:46 +03:00
if ( level = = SOL_HCI )
return hci_sock_setsockopt_old ( sock , level , optname , optval ,
len ) ;
if ( level ! = SOL_BLUETOOTH )
2016-08-27 21:23:37 +03:00
return - ENOPROTOOPT ;
2021-09-16 23:10:46 +03:00
lock_sock ( sk ) ;
switch ( optname ) {
case BT_SNDMTU :
case BT_RCVMTU :
switch ( hci_pi ( sk ) - > channel ) {
/* Don't allow changing MTU for channels that are meant for HCI
* traffic only .
*/
case HCI_CHANNEL_RAW :
case HCI_CHANNEL_USER :
err = - ENOPROTOOPT ;
goto done ;
}
2022-01-07 10:17:27 +03:00
if ( copy_from_sockptr ( & opt , optval , sizeof ( opt ) ) ) {
2021-09-16 23:10:46 +03:00
err = - EFAULT ;
break ;
}
hci_pi ( sk ) - > mtu = opt ;
break ;
default :
err = - ENOPROTOOPT ;
break ;
}
done :
release_sock ( sk ) ;
return err ;
}
static int hci_sock_getsockopt_old ( struct socket * sock , int level , int optname ,
char __user * optval , int __user * optlen )
{
struct hci_ufilter uf ;
struct sock * sk = sock - > sk ;
int len , opt , err = 0 ;
BT_DBG ( " sk %p, opt %d " , sk , optname ) ;
2005-04-17 02:20:36 +04:00
if ( get_user ( len , optlen ) )
return - EFAULT ;
2012-02-20 17:50:33 +04:00
lock_sock ( sk ) ;
if ( hci_pi ( sk ) - > channel ! = HCI_CHANNEL_RAW ) {
2013-08-26 20:29:39 +04:00
err = - EBADFD ;
2012-02-20 17:50:33 +04:00
goto done ;
}
2005-04-17 02:20:36 +04:00
switch ( optname ) {
case HCI_DATA_DIR :
if ( hci_pi ( sk ) - > cmsg_mask & HCI_CMSG_DIR )
opt = 1 ;
2007-02-09 17:24:33 +03:00
else
2005-04-17 02:20:36 +04:00
opt = 0 ;
if ( put_user ( opt , optval ) )
2012-02-20 17:50:33 +04:00
err = - EFAULT ;
2005-04-17 02:20:36 +04:00
break ;
case HCI_TIME_STAMP :
if ( hci_pi ( sk ) - > cmsg_mask & HCI_CMSG_TSTAMP )
opt = 1 ;
2007-02-09 17:24:33 +03:00
else
2005-04-17 02:20:36 +04:00
opt = 0 ;
if ( put_user ( opt , optval ) )
2012-02-20 17:50:33 +04:00
err = - EFAULT ;
2005-04-17 02:20:36 +04:00
break ;
case HCI_FILTER :
{
struct hci_filter * f = & hci_pi ( sk ) - > filter ;
2012-08-15 15:31:46 +04:00
memset ( & uf , 0 , sizeof ( uf ) ) ;
2005-04-17 02:20:36 +04:00
uf . type_mask = f - > type_mask ;
uf . opcode = f - > opcode ;
uf . event_mask [ 0 ] = * ( ( u32 * ) f - > event_mask + 0 ) ;
uf . event_mask [ 1 ] = * ( ( u32 * ) f - > event_mask + 1 ) ;
}
len = min_t ( unsigned int , len , sizeof ( uf ) ) ;
if ( copy_to_user ( optval , & uf , len ) )
2012-02-20 17:50:33 +04:00
err = - EFAULT ;
2005-04-17 02:20:36 +04:00
break ;
default :
2012-02-20 17:50:33 +04:00
err = - ENOPROTOOPT ;
2005-04-17 02:20:36 +04:00
break ;
}
2012-02-20 17:50:33 +04:00
done :
release_sock ( sk ) ;
return err ;
2005-04-17 02:20:36 +04:00
}
2021-09-16 23:10:46 +03:00
static int hci_sock_getsockopt ( struct socket * sock , int level , int optname ,
char __user * optval , int __user * optlen )
{
struct sock * sk = sock - > sk ;
int err = 0 ;
BT_DBG ( " sk %p, opt %d " , sk , optname ) ;
if ( level = = SOL_HCI )
return hci_sock_getsockopt_old ( sock , level , optname , optval ,
optlen ) ;
if ( level ! = SOL_BLUETOOTH )
return - ENOPROTOOPT ;
lock_sock ( sk ) ;
switch ( optname ) {
case BT_SNDMTU :
case BT_RCVMTU :
if ( put_user ( hci_pi ( sk ) - > mtu , ( u16 __user * ) optval ) )
err = - EFAULT ;
break ;
default :
err = - ENOPROTOOPT ;
break ;
}
release_sock ( sk ) ;
return err ;
}
2021-10-07 22:04:24 +03:00
static void hci_sock_destruct ( struct sock * sk )
{
2022-09-01 22:19:13 +03:00
mgmt_cleanup ( sk ) ;
2021-10-07 22:04:24 +03:00
skb_queue_purge ( & sk - > sk_receive_queue ) ;
skb_queue_purge ( & sk - > sk_write_queue ) ;
}
2005-12-22 23:49:22 +03:00
static const struct proto_ops hci_sock_ops = {
2005-04-17 02:20:36 +04:00
. family = PF_BLUETOOTH ,
. owner = THIS_MODULE ,
. release = hci_sock_release ,
. bind = hci_sock_bind ,
. getname = hci_sock_getname ,
. sendmsg = hci_sock_sendmsg ,
. recvmsg = hci_sock_recvmsg ,
. ioctl = hci_sock_ioctl ,
2019-03-14 16:10:33 +03:00
# ifdef CONFIG_COMPAT
. compat_ioctl = hci_sock_compat_ioctl ,
# endif
2018-06-28 19:43:44 +03:00
. poll = datagram_poll ,
2005-04-17 02:20:36 +04:00
. listen = sock_no_listen ,
. shutdown = sock_no_shutdown ,
. setsockopt = hci_sock_setsockopt ,
. getsockopt = hci_sock_getsockopt ,
. connect = sock_no_connect ,
. socketpair = sock_no_socketpair ,
. accept = sock_no_accept ,
. mmap = sock_no_mmap
} ;
static struct proto hci_sk_proto = {
. name = " HCI " ,
. owner = THIS_MODULE ,
. obj_size = sizeof ( struct hci_pinfo )
} ;
2009-11-06 09:18:14 +03:00
static int hci_sock_create ( struct net * net , struct socket * sock , int protocol ,
int kern )
2005-04-17 02:20:36 +04:00
{
struct sock * sk ;
BT_DBG ( " sock %p " , sock ) ;
if ( sock - > type ! = SOCK_RAW )
return - ESOCKTNOSUPPORT ;
sock - > ops = & hci_sock_ops ;
2023-05-26 02:46:41 +03:00
sk = bt_sock_alloc ( net , sock , & hci_sk_proto , protocol , GFP_ATOMIC ,
kern ) ;
2005-04-17 02:20:36 +04:00
if ( ! sk )
return - ENOMEM ;
sock - > state = SS_UNCONNECTED ;
2021-10-07 22:04:24 +03:00
sk - > sk_destruct = hci_sock_destruct ;
2005-04-17 02:20:36 +04:00
bt_sock_link ( & hci_sk_list , sk ) ;
return 0 ;
}
2009-10-05 09:58:39 +04:00
static const struct net_proto_family hci_sock_family_ops = {
2005-04-17 02:20:36 +04:00
. family = PF_BLUETOOTH ,
. owner = THIS_MODULE ,
. create = hci_sock_create ,
} ;
int __init hci_sock_init ( void )
{
int err ;
2015-01-12 02:18:17 +03:00
BUILD_BUG_ON ( sizeof ( struct sockaddr_hci ) > sizeof ( struct sockaddr ) ) ;
2005-04-17 02:20:36 +04:00
err = proto_register ( & hci_sk_proto , 0 ) ;
if ( err < 0 )
return err ;
err = bt_sock_register ( BTPROTO_HCI , & hci_sock_family_ops ) ;
2012-07-25 20:28:36 +04:00
if ( err < 0 ) {
BT_ERR ( " HCI socket registration failed " ) ;
2005-04-17 02:20:36 +04:00
goto error ;
2012-07-25 20:28:36 +04:00
}
2013-04-05 03:14:33 +04:00
err = bt_procfs_init ( & init_net , " hci " , & hci_sk_list , NULL ) ;
2012-07-25 20:28:36 +04:00
if ( err < 0 ) {
BT_ERR ( " Failed to create HCI proc file " ) ;
bt_sock_unregister ( BTPROTO_HCI ) ;
goto error ;
}
2005-04-17 02:20:36 +04:00
BT_INFO ( " HCI socket layer initialized " ) ;
return 0 ;
error :
proto_unregister ( & hci_sk_proto ) ;
return err ;
}
2011-02-22 10:13:09 +03:00
void hci_sock_cleanup ( void )
2005-04-17 02:20:36 +04:00
{
2012-07-25 20:28:36 +04:00
bt_procfs_cleanup ( & init_net , " hci " ) ;
2013-02-24 22:36:51 +04:00
bt_sock_unregister ( BTPROTO_HCI ) ;
2005-04-17 02:20:36 +04:00
proto_unregister ( & hci_sk_proto ) ;
}