2007-02-09 17:24:33 +03:00
/*
2005-04-17 02:20:36 +04:00
BlueZ - Bluetooth protocol stack for Linux
Copyright ( C ) 2000 - 2001 Qualcomm Incorporated
2011-12-18 19:39:33 +04:00
Copyright ( C ) 2011 ProFUSION Embedded Systems
2005-04-17 02:20:36 +04:00
Written 2000 , 2001 by Maxim Krasnyansky < maxk @ qualcomm . com >
This program is free software ; you can redistribute it and / or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation ;
THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS
OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS .
IN NO EVENT SHALL THE COPYRIGHT HOLDER ( S ) AND AUTHOR ( S ) BE LIABLE FOR ANY
2007-02-09 17:24:33 +03:00
CLAIM , OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES , OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE , DATA OR PROFITS , WHETHER IN AN
ACTION OF CONTRACT , NEGLIGENCE OR OTHER TORTIOUS ACTION , ARISING OUT OF
2005-04-17 02:20:36 +04:00
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE .
2007-02-09 17:24:33 +03:00
ALL LIABILITY , INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS ,
COPYRIGHTS , TRADEMARKS OR OTHER RIGHTS , RELATING TO USE OF THIS
2005-04-17 02:20:36 +04:00
SOFTWARE IS DISCLAIMED .
*/
/* Bluetooth HCI core. */
2012-05-23 11:04:22 +04:00
# include <linux/export.h>
# include <linux/rfkill.h>
2013-10-16 14:28:55 +04:00
# include <linux/debugfs.h>
2014-02-18 12:40:07 +04:00
# include <linux/crypto.h>
2022-07-14 13:48:14 +03:00
# include <linux/kcov.h>
2019-02-19 23:05:57 +03:00
# include <linux/property.h>
2020-03-11 18:54:00 +03:00
# include <linux/suspend.h>
# include <linux/wait.h>
2013-10-18 04:24:15 +04:00
# include <asm/unaligned.h>
2005-04-17 02:20:36 +04:00
# include <net/bluetooth/bluetooth.h>
# include <net/bluetooth/hci_core.h>
2014-05-20 10:45:47 +04:00
# include <net/bluetooth/l2cap.h>
2014-07-01 16:11:20 +04:00
# include <net/bluetooth/mgmt.h>
2005-04-17 02:20:36 +04:00
2014-12-19 14:40:20 +03:00
# include "hci_request.h"
2014-12-20 18:05:13 +03:00
# include "hci_debugfs.h"
2014-02-18 12:19:33 +04:00
# include "smp.h"
2016-01-08 21:28:58 +03:00
# include "leds.h"
2020-04-03 22:44:01 +03:00
# include "msft.h"
2021-04-06 22:55:52 +03:00
# include "aosp.h"
2021-09-07 13:12:37 +03:00
# include "hci_codec.h"
2014-02-18 12:19:33 +04:00
2010-08-09 07:06:53 +04:00
static void hci_rx_work ( struct work_struct * work ) ;
2011-12-15 05:53:47 +04:00
static void hci_cmd_work ( struct work_struct * work ) ;
2011-12-15 06:50:02 +04:00
static void hci_tx_work ( struct work_struct * work ) ;
2005-04-17 02:20:36 +04:00
/* HCI device list */
LIST_HEAD ( hci_dev_list ) ;
DEFINE_RWLOCK ( hci_dev_list_lock ) ;
/* HCI callback list */
LIST_HEAD ( hci_cb_list ) ;
2015-02-18 15:53:55 +03:00
DEFINE_MUTEX ( hci_cb_list_lock ) ;
2005-04-17 02:20:36 +04:00
2012-05-28 00:36:56 +04:00
/* HCI ID Numbering */
static DEFINE_IDA ( hci_index_ida ) ;
2015-11-11 09:11:25 +03:00
static int hci_scan_req ( struct hci_request * req , unsigned long opt )
2005-04-17 02:20:36 +04:00
{
__u8 scan = opt ;
2013-03-05 22:37:49 +04:00
BT_DBG ( " %s %x " , req - > hdev - > name , scan ) ;
2005-04-17 02:20:36 +04:00
/* Inquiry and Page scans */
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_WRITE_SCAN_ENABLE , 1 , & scan ) ;
2015-11-11 09:11:25 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2015-11-11 09:11:25 +03:00
static int hci_auth_req ( struct hci_request * req , unsigned long opt )
2005-04-17 02:20:36 +04:00
{
__u8 auth = opt ;
2013-03-05 22:37:49 +04:00
BT_DBG ( " %s %x " , req - > hdev - > name , auth ) ;
2005-04-17 02:20:36 +04:00
/* Authentication */
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_WRITE_AUTH_ENABLE , 1 , & auth ) ;
2015-11-11 09:11:25 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2015-11-11 09:11:25 +03:00
static int hci_encrypt_req ( struct hci_request * req , unsigned long opt )
2005-04-17 02:20:36 +04:00
{
__u8 encrypt = opt ;
2013-03-05 22:37:49 +04:00
BT_DBG ( " %s %x " , req - > hdev - > name , encrypt ) ;
2005-04-17 02:20:36 +04:00
2008-07-14 22:13:47 +04:00
/* Encryption */
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_WRITE_ENCRYPT_MODE , 1 , & encrypt ) ;
2015-11-11 09:11:25 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2015-11-11 09:11:25 +03:00
static int hci_linkpol_req ( struct hci_request * req , unsigned long opt )
2008-07-14 22:13:47 +04:00
{
__le16 policy = cpu_to_le16 ( opt ) ;
2013-03-05 22:37:49 +04:00
BT_DBG ( " %s %x " , req - > hdev - > name , policy ) ;
2008-07-14 22:13:47 +04:00
/* Default link policy */
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_WRITE_DEF_LINK_POLICY , 2 , & policy ) ;
2015-11-11 09:11:25 +03:00
return 0 ;
2008-07-14 22:13:47 +04:00
}
2007-02-09 17:24:33 +03:00
/* Get HCI device by index.
2005-04-17 02:20:36 +04:00
* Device is held on return . */
struct hci_dev * hci_dev_get ( int index )
{
2011-11-01 12:58:56 +04:00
struct hci_dev * hdev = NULL , * d ;
2005-04-17 02:20:36 +04:00
BT_DBG ( " %d " , index ) ;
if ( index < 0 )
return NULL ;
read_lock ( & hci_dev_list_lock ) ;
2011-11-01 12:58:56 +04:00
list_for_each_entry ( d , & hci_dev_list , list ) {
2005-04-17 02:20:36 +04:00
if ( d - > id = = index ) {
hdev = hci_dev_hold ( d ) ;
break ;
}
}
read_unlock ( & hci_dev_list_lock ) ;
return hdev ;
}
/* ---- Inquiry support ---- */
2012-01-04 16:23:45 +04:00
2012-01-04 17:44:20 +04:00
bool hci_discovery_active ( struct hci_dev * hdev )
{
struct discovery_state * discov = & hdev - > discovery ;
2012-02-04 00:47:58 +04:00
switch ( discov - > state ) {
2012-02-18 03:39:37 +04:00
case DISCOVERY_FINDING :
2012-02-04 00:47:58 +04:00
case DISCOVERY_RESOLVING :
2012-01-04 17:44:20 +04:00
return true ;
2012-02-04 00:47:58 +04:00
default :
return false ;
}
2012-01-04 17:44:20 +04:00
}
2012-01-04 16:23:45 +04:00
void hci_discovery_set_state ( struct hci_dev * hdev , int state )
{
2014-07-07 14:24:58 +04:00
int old_state = hdev - > discovery . state ;
2012-01-04 16:23:45 +04:00
BT_DBG ( " %s state %u -> %u " , hdev - > name , hdev - > discovery . state , state ) ;
2014-07-07 14:24:58 +04:00
if ( old_state = = state )
2012-01-04 16:23:45 +04:00
return ;
2014-07-07 14:24:58 +04:00
hdev - > discovery . state = state ;
2012-01-04 16:23:45 +04:00
switch ( state ) {
case DISCOVERY_STOPPED :
2021-10-28 02:58:43 +03:00
hci_update_passive_scan ( hdev ) ;
2014-02-27 03:21:50 +04:00
2014-07-07 14:24:58 +04:00
if ( old_state ! = DISCOVERY_STARTING )
2012-02-13 22:41:02 +04:00
mgmt_discovering ( hdev , 0 ) ;
2012-01-04 16:23:45 +04:00
break ;
case DISCOVERY_STARTING :
break ;
2012-02-18 03:39:37 +04:00
case DISCOVERY_FINDING :
2012-01-04 16:23:45 +04:00
mgmt_discovering ( hdev , 1 ) ;
break ;
2012-01-04 17:44:20 +04:00
case DISCOVERY_RESOLVING :
break ;
2012-01-04 16:23:45 +04:00
case DISCOVERY_STOPPING :
break ;
}
}
2013-04-30 22:29:27 +04:00
void hci_inquiry_cache_flush ( struct hci_dev * hdev )
2005-04-17 02:20:36 +04:00
{
2012-01-04 16:16:21 +04:00
struct discovery_state * cache = & hdev - > discovery ;
2012-01-03 18:03:00 +04:00
struct inquiry_entry * p , * n ;
2005-04-17 02:20:36 +04:00
2012-01-04 15:31:59 +04:00
list_for_each_entry_safe ( p , n , & cache - > all , all ) {
list_del ( & p - > all ) ;
2012-01-03 18:03:00 +04:00
kfree ( p ) ;
2005-04-17 02:20:36 +04:00
}
2012-01-04 15:31:59 +04:00
INIT_LIST_HEAD ( & cache - > unknown ) ;
INIT_LIST_HEAD ( & cache - > resolve ) ;
2005-04-17 02:20:36 +04:00
}
2012-05-17 07:36:26 +04:00
struct inquiry_entry * hci_inquiry_cache_lookup ( struct hci_dev * hdev ,
bdaddr_t * bdaddr )
2005-04-17 02:20:36 +04:00
{
2012-01-04 16:16:21 +04:00
struct discovery_state * cache = & hdev - > discovery ;
2005-04-17 02:20:36 +04:00
struct inquiry_entry * e ;
2012-09-25 13:49:43 +04:00
BT_DBG ( " cache %p, %pMR " , cache , bdaddr ) ;
2005-04-17 02:20:36 +04:00
2012-01-04 15:31:59 +04:00
list_for_each_entry ( e , & cache - > all , all ) {
if ( ! bacmp ( & e - > data . bdaddr , bdaddr ) )
return e ;
}
return NULL ;
}
struct inquiry_entry * hci_inquiry_cache_lookup_unknown ( struct hci_dev * hdev ,
2012-03-08 08:25:00 +04:00
bdaddr_t * bdaddr )
2012-01-04 15:31:59 +04:00
{
2012-01-04 16:16:21 +04:00
struct discovery_state * cache = & hdev - > discovery ;
2012-01-04 15:31:59 +04:00
struct inquiry_entry * e ;
2012-09-25 13:49:43 +04:00
BT_DBG ( " cache %p, %pMR " , cache , bdaddr ) ;
2012-01-04 15:31:59 +04:00
list_for_each_entry ( e , & cache - > unknown , list ) {
2005-04-17 02:20:36 +04:00
if ( ! bacmp ( & e - > data . bdaddr , bdaddr ) )
2012-01-03 18:03:00 +04:00
return e ;
}
return NULL ;
2005-04-17 02:20:36 +04:00
}
2012-01-04 17:44:20 +04:00
struct inquiry_entry * hci_inquiry_cache_lookup_resolve ( struct hci_dev * hdev ,
2012-03-08 08:25:00 +04:00
bdaddr_t * bdaddr ,
int state )
2012-01-04 17:44:20 +04:00
{
struct discovery_state * cache = & hdev - > discovery ;
struct inquiry_entry * e ;
2012-09-25 13:49:43 +04:00
BT_DBG ( " cache %p bdaddr %pMR state %d " , cache , bdaddr , state ) ;
2012-01-04 17:44:20 +04:00
list_for_each_entry ( e , & cache - > resolve , list ) {
if ( ! bacmp ( bdaddr , BDADDR_ANY ) & & e - > name_state = = state )
return e ;
if ( ! bacmp ( & e - > data . bdaddr , bdaddr ) )
return e ;
}
return NULL ;
}
2012-01-09 02:53:02 +04:00
void hci_inquiry_cache_update_resolve ( struct hci_dev * hdev ,
2012-03-08 08:25:00 +04:00
struct inquiry_entry * ie )
2012-01-09 02:53:02 +04:00
{
struct discovery_state * cache = & hdev - > discovery ;
struct list_head * pos = & cache - > resolve ;
struct inquiry_entry * p ;
list_del ( & ie - > list ) ;
list_for_each_entry ( p , & cache - > resolve , list ) {
if ( p - > name_state ! = NAME_PENDING & &
2012-05-17 07:36:26 +04:00
abs ( p - > data . rssi ) > = abs ( ie - > data . rssi ) )
2012-01-09 02:53:02 +04:00
break ;
pos = & p - > list ;
}
list_add ( & ie - > list , pos ) ;
}
2014-07-01 16:11:20 +04:00
u32 hci_inquiry_cache_update ( struct hci_dev * hdev , struct inquiry_data * data ,
bool name_known )
2005-04-17 02:20:36 +04:00
{
2012-01-04 16:16:21 +04:00
struct discovery_state * cache = & hdev - > discovery ;
2010-12-01 17:58:25 +03:00
struct inquiry_entry * ie ;
2014-07-01 16:11:20 +04:00
u32 flags = 0 ;
2005-04-17 02:20:36 +04:00
2012-09-25 13:49:43 +04:00
BT_DBG ( " cache %p, %pMR " , cache , & data - > bdaddr ) ;
2005-04-17 02:20:36 +04:00
2014-10-26 22:46:09 +03:00
hci_remove_remote_oob_data ( hdev , & data - > bdaddr , BDADDR_BREDR ) ;
2012-11-20 14:38:54 +04:00
2014-07-01 16:11:20 +04:00
if ( ! data - > ssp_mode )
flags | = MGMT_DEV_FOUND_LEGACY_PAIRING ;
2012-02-23 02:38:59 +04:00
2010-12-01 17:58:25 +03:00
ie = hci_inquiry_cache_lookup ( hdev , & data - > bdaddr ) ;
2012-01-09 02:53:02 +04:00
if ( ie ) {
2014-07-01 16:11:20 +04:00
if ( ! ie - > data . ssp_mode )
flags | = MGMT_DEV_FOUND_LEGACY_PAIRING ;
2012-02-23 02:38:59 +04:00
2012-01-09 02:53:02 +04:00
if ( ie - > name_state = = NAME_NEEDED & &
2012-05-17 07:36:26 +04:00
data - > rssi ! = ie - > data . rssi ) {
2012-01-09 02:53:02 +04:00
ie - > data . rssi = data - > rssi ;
hci_inquiry_cache_update_resolve ( hdev , ie ) ;
}
2012-01-04 15:31:59 +04:00
goto update ;
2012-01-09 02:53:02 +04:00
}
2012-01-04 15:31:59 +04:00
/* Entry not in the cache. Add new one. */
2014-07-21 11:50:06 +04:00
ie = kzalloc ( sizeof ( * ie ) , GFP_KERNEL ) ;
2014-07-01 16:11:20 +04:00
if ( ! ie ) {
flags | = MGMT_DEV_FOUND_CONFIRM_NAME ;
goto done ;
}
2012-01-04 15:31:59 +04:00
list_add ( & ie - > all , & cache - > all ) ;
if ( name_known ) {
ie - > name_state = NAME_KNOWN ;
} else {
ie - > name_state = NAME_NOT_KNOWN ;
list_add ( & ie - > list , & cache - > unknown ) ;
}
2010-12-01 17:58:25 +03:00
2012-01-04 15:31:59 +04:00
update :
if ( name_known & & ie - > name_state ! = NAME_KNOWN & &
2012-05-17 07:36:26 +04:00
ie - > name_state ! = NAME_PENDING ) {
2012-01-04 15:31:59 +04:00
ie - > name_state = NAME_KNOWN ;
list_del ( & ie - > list ) ;
2005-04-17 02:20:36 +04:00
}
2010-12-01 17:58:25 +03:00
memcpy ( & ie - > data , data , sizeof ( * data ) ) ;
ie - > timestamp = jiffies ;
2005-04-17 02:20:36 +04:00
cache - > timestamp = jiffies ;
2012-01-04 15:39:52 +04:00
if ( ie - > name_state = = NAME_NOT_KNOWN )
2014-07-01 16:11:20 +04:00
flags | = MGMT_DEV_FOUND_CONFIRM_NAME ;
2012-01-04 15:39:52 +04:00
2014-07-01 16:11:20 +04:00
done :
return flags ;
2005-04-17 02:20:36 +04:00
}
static int inquiry_cache_dump ( struct hci_dev * hdev , int num , __u8 * buf )
{
2012-01-04 16:16:21 +04:00
struct discovery_state * cache = & hdev - > discovery ;
2005-04-17 02:20:36 +04:00
struct inquiry_info * info = ( struct inquiry_info * ) buf ;
struct inquiry_entry * e ;
int copied = 0 ;
2012-01-04 15:31:59 +04:00
list_for_each_entry ( e , & cache - > all , all ) {
2005-04-17 02:20:36 +04:00
struct inquiry_data * data = & e - > data ;
2012-01-03 18:03:00 +04:00
if ( copied > = num )
break ;
2005-04-17 02:20:36 +04:00
bacpy ( & info - > bdaddr , & data - > bdaddr ) ;
info - > pscan_rep_mode = data - > pscan_rep_mode ;
info - > pscan_period_mode = data - > pscan_period_mode ;
info - > pscan_mode = data - > pscan_mode ;
memcpy ( info - > dev_class , data - > dev_class , 3 ) ;
info - > clock_offset = data - > clock_offset ;
2012-01-03 18:03:00 +04:00
2005-04-17 02:20:36 +04:00
info + + ;
2012-01-03 18:03:00 +04:00
copied + + ;
2005-04-17 02:20:36 +04:00
}
BT_DBG ( " cache %p, copied %d " , cache , copied ) ;
return copied ;
}
2015-11-11 09:11:25 +03:00
static int hci_inq_req ( struct hci_request * req , unsigned long opt )
2005-04-17 02:20:36 +04:00
{
struct hci_inquiry_req * ir = ( struct hci_inquiry_req * ) opt ;
2013-03-05 22:37:49 +04:00
struct hci_dev * hdev = req - > hdev ;
2005-04-17 02:20:36 +04:00
struct hci_cp_inquiry cp ;
BT_DBG ( " %s " , hdev - > name ) ;
if ( test_bit ( HCI_INQUIRY , & hdev - > flags ) )
2015-11-11 09:11:25 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
/* Start Inquiry */
memcpy ( & cp . lap , & ir - > lap , 3 ) ;
cp . length = ir - > length ;
cp . num_rsp = ir - > num_rsp ;
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_INQUIRY , sizeof ( cp ) , & cp ) ;
2015-11-11 09:11:25 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
int hci_inquiry ( void __user * arg )
{
__u8 __user * ptr = arg ;
struct hci_inquiry_req ir ;
struct hci_dev * hdev ;
int err = 0 , do_inquiry = 0 , max_rsp ;
long timeo ;
__u8 * buf ;
if ( copy_from_user ( & ir , ptr , sizeof ( ir ) ) )
return - EFAULT ;
2011-01-11 18:20:20 +03:00
hdev = hci_dev_get ( ir . dev_id ) ;
if ( ! hdev )
2005-04-17 02:20:36 +04:00
return - ENODEV ;
2015-03-13 12:11:00 +03:00
if ( hci_dev_test_flag ( hdev , HCI_USER_CHANNEL ) ) {
2013-08-27 08:40:51 +04:00
err = - EBUSY ;
goto done ;
}
2015-03-13 12:11:00 +03:00
if ( hci_dev_test_flag ( hdev , HCI_UNCONFIGURED ) ) {
2014-06-29 14:13:05 +04:00
err = - EOPNOTSUPP ;
goto done ;
}
2016-07-05 15:30:14 +03:00
if ( hdev - > dev_type ! = HCI_PRIMARY ) {
2013-10-10 21:02:08 +04:00
err = - EOPNOTSUPP ;
goto done ;
}
2015-03-13 12:11:00 +03:00
if ( ! hci_dev_test_flag ( hdev , HCI_BREDR_ENABLED ) ) {
2013-10-02 14:43:13 +04:00
err = - EOPNOTSUPP ;
goto done ;
}
2021-08-19 18:15:21 +03:00
/* Restrict maximum inquiry length to 60 seconds */
if ( ir . length > 60 ) {
err = - EINVAL ;
goto done ;
}
2011-06-17 20:03:21 +04:00
hci_dev_lock ( hdev ) ;
2007-02-09 17:24:33 +03:00
if ( inquiry_cache_age ( hdev ) > INQUIRY_CACHE_AGE_MAX | |
2012-05-17 07:36:26 +04:00
inquiry_cache_empty ( hdev ) | | ir . flags & IREQ_CACHE_FLUSH ) {
2013-04-30 22:29:27 +04:00
hci_inquiry_cache_flush ( hdev ) ;
2005-04-17 02:20:36 +04:00
do_inquiry = 1 ;
}
2011-06-17 20:03:21 +04:00
hci_dev_unlock ( hdev ) ;
2005-04-17 02:20:36 +04:00
2006-07-03 12:02:33 +04:00
timeo = ir . length * msecs_to_jiffies ( 2000 ) ;
2010-12-01 17:58:25 +03:00
if ( do_inquiry ) {
2013-03-05 22:37:41 +04:00
err = hci_req_sync ( hdev , hci_inq_req , ( unsigned long ) & ir ,
2015-11-11 09:11:19 +03:00
timeo , NULL ) ;
2010-12-01 17:58:25 +03:00
if ( err < 0 )
goto done ;
2013-03-28 03:04:56 +04:00
/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
* cleared ) . If it is interrupted by a signal , return - EINTR .
*/
sched: Remove proliferation of wait_on_bit() action functions
The current "wait_on_bit" interface requires an 'action'
function to be provided which does the actual waiting.
There are over 20 such functions, many of them identical.
Most cases can be satisfied by one of just two functions, one
which uses io_schedule() and one which just uses schedule().
So:
Rename wait_on_bit and wait_on_bit_lock to
wait_on_bit_action and wait_on_bit_lock_action
to make it explicit that they need an action function.
Introduce new wait_on_bit{,_lock} and wait_on_bit{,_lock}_io
which are *not* given an action function but implicitly use
a standard one.
The decision to error-out if a signal is pending is now made
based on the 'mode' argument rather than being encoded in the action
function.
All instances of the old wait_on_bit and wait_on_bit_lock which
can use the new version have been changed accordingly and their
action functions have been discarded.
wait_on_bit{_lock} does not return any specific error code in the
event of a signal so the caller must check for non-zero and
interpolate their own error code as appropriate.
The wait_on_bit() call in __fscache_wait_on_invalidate() was
ambiguous as it specified TASK_UNINTERRUPTIBLE but used
fscache_wait_bit_interruptible as an action function.
David Howells confirms this should be uniformly
"uninterruptible"
The main remaining user of wait_on_bit{,_lock}_action is NFS
which needs to use a freezer-aware schedule() call.
A comment in fs/gfs2/glock.c notes that having multiple 'action'
functions is useful as they display differently in the 'wchan'
field of 'ps'. (and /proc/$PID/wchan).
As the new bit_wait{,_io} functions are tagged "__sched", they
will not show up at all, but something higher in the stack. So
the distinction will still be visible, only with different
function names (gds2_glock_wait versus gfs2_glock_dq_wait in the
gfs2/glock.c case).
Since first version of this patch (against 3.15) two new action
functions appeared, on in NFS and one in CIFS. CIFS also now
uses an action function that makes the same freezer aware
schedule call as NFS.
Signed-off-by: NeilBrown <neilb@suse.de>
Acked-by: David Howells <dhowells@redhat.com> (fscache, keys)
Acked-by: Steven Whitehouse <swhiteho@redhat.com> (gfs2)
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Steve French <sfrench@samba.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/20140707051603.28027.72349.stgit@notabene.brown
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2014-07-07 09:16:04 +04:00
if ( wait_on_bit ( & hdev - > flags , HCI_INQUIRY ,
2021-01-21 11:10:45 +03:00
TASK_INTERRUPTIBLE ) ) {
err = - EINTR ;
goto done ;
}
2010-12-01 17:58:25 +03:00
}
2005-04-17 02:20:36 +04:00
2012-05-23 11:04:21 +04:00
/* for unlimited number of responses we will use buffer with
* 255 entries
*/
2005-04-17 02:20:36 +04:00
max_rsp = ( ir . num_rsp = = 0 ) ? 255 : ir . num_rsp ;
/* cache_dump can't sleep. Therefore we allocate temp buffer and then
* copy it to the user space .
*/
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 23:55:00 +03:00
buf = kmalloc_array ( max_rsp , sizeof ( struct inquiry_info ) , GFP_KERNEL ) ;
2010-12-01 17:58:25 +03:00
if ( ! buf ) {
2005-04-17 02:20:36 +04:00
err = - ENOMEM ;
goto done ;
}
2011-06-17 20:03:21 +04:00
hci_dev_lock ( hdev ) ;
2005-04-17 02:20:36 +04:00
ir . num_rsp = inquiry_cache_dump ( hdev , max_rsp , buf ) ;
2011-06-17 20:03:21 +04:00
hci_dev_unlock ( hdev ) ;
2005-04-17 02:20:36 +04:00
BT_DBG ( " num_rsp %d " , ir . num_rsp ) ;
if ( ! copy_to_user ( ptr , & ir , sizeof ( ir ) ) ) {
ptr + = sizeof ( ir ) ;
if ( copy_to_user ( ptr , buf , sizeof ( struct inquiry_info ) *
2012-05-17 07:36:26 +04:00
ir . num_rsp ) )
2005-04-17 02:20:36 +04:00
err = - EFAULT ;
2007-02-09 17:24:33 +03:00
} else
2005-04-17 02:20:36 +04:00
err = - EFAULT ;
kfree ( buf ) ;
done :
hci_dev_put ( hdev ) ;
return err ;
}
2021-10-28 02:58:44 +03:00
static int hci_dev_do_open ( struct hci_dev * hdev )
{
int ret = 0 ;
BT_DBG ( " %s %p " , hdev - > name , hdev ) ;
hci_req_sync_lock ( hdev ) ;
ret = hci_dev_open_sync ( hdev ) ;
2015-11-10 10:44:55 +03:00
hci_req_sync_unlock ( hdev ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
2013-10-01 23:44:49 +04:00
/* ---- HCI ioctl helpers ---- */
int hci_dev_open ( __u16 dev )
{
struct hci_dev * hdev ;
int err ;
hdev = hci_dev_get ( dev ) ;
if ( ! hdev )
return - ENODEV ;
2014-07-02 21:10:33 +04:00
/* Devices that are marked as unconfigured can only be powered
2014-06-29 14:13:05 +04:00
* up as user channel . Trying to bring them up as normal devices
* will result into a failure . Only user channel operation is
* possible .
*
* When this function is called for a user channel , the flag
* HCI_USER_CHANNEL will be set first before attempting to
* open the device .
*/
2015-03-13 12:11:00 +03:00
if ( hci_dev_test_flag ( hdev , HCI_UNCONFIGURED ) & &
! hci_dev_test_flag ( hdev , HCI_USER_CHANNEL ) ) {
2014-06-29 14:13:05 +04:00
err = - EOPNOTSUPP ;
goto done ;
}
2013-10-01 23:44:50 +04:00
/* We need to ensure that no other power on/off work is pending
* before proceeding to call hci_dev_do_open . This is
* particularly important if the setup procedure has not yet
* completed .
*/
2015-03-13 12:11:05 +03:00
if ( hci_dev_test_and_clear_flag ( hdev , HCI_AUTO_OFF ) )
2013-10-01 23:44:50 +04:00
cancel_delayed_work ( & hdev - > power_off ) ;
2013-10-06 12:08:57 +04:00
/* After this call it is guaranteed that the setup procedure
* has finished . This means that error conditions like RFKILL
* or no valid public or static random address apply .
*/
2013-10-01 23:44:50 +04:00
flush_workqueue ( hdev - > req_workqueue ) ;
2014-07-10 17:25:22 +04:00
/* For controllers not using the management interface and that
2014-07-30 10:22:22 +04:00
* are brought up using legacy ioctl , set the HCI_BONDABLE bit
2014-07-10 17:25:22 +04:00
* so that pairing works for them . Once the management interface
* is in use this bit will be cleared again and userspace has
* to explicitly enable it .
*/
2015-03-13 12:11:00 +03:00
if ( ! hci_dev_test_flag ( hdev , HCI_USER_CHANNEL ) & &
! hci_dev_test_flag ( hdev , HCI_MGMT ) )
2015-03-13 12:11:01 +03:00
hci_dev_set_flag ( hdev , HCI_BONDABLE ) ;
2014-07-10 17:25:22 +04:00
2013-10-01 23:44:49 +04:00
err = hci_dev_do_open ( hdev ) ;
2014-06-29 14:13:05 +04:00
done :
2013-10-01 23:44:49 +04:00
hci_dev_put ( hdev ) ;
return err ;
}
2021-10-28 02:58:44 +03:00
int hci_dev_do_close ( struct hci_dev * hdev )
{
int err ;
BT_DBG ( " %s %p " , hdev - > name , hdev ) ;
hci_req_sync_lock ( hdev ) ;
err = hci_dev_close_sync ( hdev ) ;
2015-11-10 10:44:55 +03:00
hci_req_sync_unlock ( hdev ) ;
2005-04-17 02:20:36 +04:00
2021-08-19 18:27:18 +03:00
return err ;
2005-04-17 02:20:36 +04:00
}
int hci_dev_close ( __u16 dev )
{
struct hci_dev * hdev ;
int err ;
2010-12-01 17:58:25 +03:00
hdev = hci_dev_get ( dev ) ;
if ( ! hdev )
2005-04-17 02:20:36 +04:00
return - ENODEV ;
2012-02-21 15:33:48 +04:00
2015-03-13 12:11:00 +03:00
if ( hci_dev_test_flag ( hdev , HCI_USER_CHANNEL ) ) {
2013-08-27 08:40:51 +04:00
err = - EBUSY ;
goto done ;
}
2022-07-05 15:59:31 +03:00
cancel_work_sync ( & hdev - > power_on ) ;
2015-03-13 12:11:05 +03:00
if ( hci_dev_test_and_clear_flag ( hdev , HCI_AUTO_OFF ) )
2012-02-21 15:33:48 +04:00
cancel_delayed_work ( & hdev - > power_off ) ;
2005-04-17 02:20:36 +04:00
err = hci_dev_do_close ( hdev ) ;
2012-02-21 15:33:48 +04:00
2013-08-27 08:40:51 +04:00
done :
2005-04-17 02:20:36 +04:00
hci_dev_put ( hdev ) ;
return err ;
}
2015-01-28 22:53:05 +03:00
static int hci_dev_do_reset ( struct hci_dev * hdev )
2005-04-17 02:20:36 +04:00
{
2015-01-28 22:53:05 +03:00
int ret ;
2005-04-17 02:20:36 +04:00
2015-01-28 22:53:05 +03:00
BT_DBG ( " %s %p " , hdev - > name , hdev ) ;
2005-04-17 02:20:36 +04:00
2015-11-10 10:44:55 +03:00
hci_req_sync_lock ( hdev ) ;
2005-04-17 02:20:36 +04:00
/* Drop queues */
skb_queue_purge ( & hdev - > rx_q ) ;
skb_queue_purge ( & hdev - > cmd_q ) ;
2022-06-03 11:19:14 +03:00
/* Cancel these to avoid queueing non-chained pending work */
hci_dev_set_flag ( hdev , HCI_CMD_DRAIN_WORKQUEUE ) ;
2022-09-02 14:23:48 +03:00
/* Wait for
*
* if ( ! hci_dev_test_flag ( hdev , HCI_CMD_DRAIN_WORKQUEUE ) )
* queue_delayed_work ( & hdev - > { cmd , ncmd } _timer )
*
* inside RCU section to see the flag or complete scheduling .
*/
synchronize_rcu ( ) ;
/* Explicitly cancel works in case scheduled after setting the flag. */
2022-06-03 11:19:14 +03:00
cancel_delayed_work ( & hdev - > cmd_timer ) ;
cancel_delayed_work ( & hdev - > ncmd_timer ) ;
2014-11-18 10:00:14 +03:00
/* Avoid potential lockdep warnings from the *_flush() calls by
* ensuring the workqueue is empty up front .
*/
drain_workqueue ( hdev - > workqueue ) ;
2011-06-17 20:03:21 +04:00
hci_dev_lock ( hdev ) ;
2013-04-30 22:29:27 +04:00
hci_inquiry_cache_flush ( hdev ) ;
2005-04-17 02:20:36 +04:00
hci_conn_hash_flush ( hdev ) ;
2011-06-17 20:03:21 +04:00
hci_dev_unlock ( hdev ) ;
2005-04-17 02:20:36 +04:00
if ( hdev - > flush )
hdev - > flush ( hdev ) ;
2022-06-03 11:19:14 +03:00
hci_dev_clear_flag ( hdev , HCI_CMD_DRAIN_WORKQUEUE ) ;
2007-02-09 17:24:33 +03:00
atomic_set ( & hdev - > cmd_cnt , 1 ) ;
2019-07-29 18:15:43 +03:00
hdev - > acl_cnt = 0 ;
hdev - > sco_cnt = 0 ;
hdev - > le_cnt = 0 ;
hdev - > iso_cnt = 0 ;
2005-04-17 02:20:36 +04:00
2021-10-28 02:58:59 +03:00
ret = hci_reset_sync ( hdev ) ;
2005-04-17 02:20:36 +04:00
2015-11-10 10:44:55 +03:00
hci_req_sync_unlock ( hdev ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
2015-01-28 22:53:05 +03:00
int hci_dev_reset ( __u16 dev )
{
struct hci_dev * hdev ;
int err ;
hdev = hci_dev_get ( dev ) ;
if ( ! hdev )
return - ENODEV ;
if ( ! test_bit ( HCI_UP , & hdev - > flags ) ) {
err = - ENETDOWN ;
goto done ;
}
2015-03-13 12:11:00 +03:00
if ( hci_dev_test_flag ( hdev , HCI_USER_CHANNEL ) ) {
2015-01-28 22:53:05 +03:00
err = - EBUSY ;
goto done ;
}
2015-03-13 12:11:00 +03:00
if ( hci_dev_test_flag ( hdev , HCI_UNCONFIGURED ) ) {
2015-01-28 22:53:05 +03:00
err = - EOPNOTSUPP ;
goto done ;
}
err = hci_dev_do_reset ( hdev ) ;
done :
hci_dev_put ( hdev ) ;
return err ;
}
2005-04-17 02:20:36 +04:00
int hci_dev_reset_stat ( __u16 dev )
{
struct hci_dev * hdev ;
int ret = 0 ;
2010-12-01 17:58:25 +03:00
hdev = hci_dev_get ( dev ) ;
if ( ! hdev )
2005-04-17 02:20:36 +04:00
return - ENODEV ;
2015-03-13 12:11:00 +03:00
if ( hci_dev_test_flag ( hdev , HCI_USER_CHANNEL ) ) {
2013-08-27 08:40:51 +04:00
ret = - EBUSY ;
goto done ;
}
2015-03-13 12:11:00 +03:00
if ( hci_dev_test_flag ( hdev , HCI_UNCONFIGURED ) ) {
2014-06-29 14:13:05 +04:00
ret = - EOPNOTSUPP ;
goto done ;
}
2005-04-17 02:20:36 +04:00
memset ( & hdev - > stat , 0 , sizeof ( struct hci_dev_stats ) ) ;
2013-08-27 08:40:51 +04:00
done :
2005-04-17 02:20:36 +04:00
hci_dev_put ( hdev ) ;
return ret ;
}
2021-10-28 02:58:43 +03:00
static void hci_update_passive_scan_state ( struct hci_dev * hdev , u8 scan )
2014-07-10 13:09:07 +04:00
{
2014-07-10 13:09:08 +04:00
bool conn_changed , discov_changed ;
2014-07-10 13:09:07 +04:00
BT_DBG ( " %s scan 0x%02x " , hdev - > name , scan ) ;
if ( ( scan & SCAN_PAGE ) )
2015-03-13 12:11:06 +03:00
conn_changed = ! hci_dev_test_and_set_flag ( hdev ,
HCI_CONNECTABLE ) ;
2014-07-10 13:09:07 +04:00
else
2015-03-13 12:11:05 +03:00
conn_changed = hci_dev_test_and_clear_flag ( hdev ,
HCI_CONNECTABLE ) ;
2014-07-10 13:09:07 +04:00
2014-07-10 13:09:08 +04:00
if ( ( scan & SCAN_INQUIRY ) ) {
2015-03-13 12:11:06 +03:00
discov_changed = ! hci_dev_test_and_set_flag ( hdev ,
HCI_DISCOVERABLE ) ;
2014-07-10 13:09:08 +04:00
} else {
2015-03-13 12:11:02 +03:00
hci_dev_clear_flag ( hdev , HCI_LIMITED_DISCOVERABLE ) ;
2015-03-13 12:11:05 +03:00
discov_changed = hci_dev_test_and_clear_flag ( hdev ,
HCI_DISCOVERABLE ) ;
2014-07-10 13:09:08 +04:00
}
2015-03-13 12:11:00 +03:00
if ( ! hci_dev_test_flag ( hdev , HCI_MGMT ) )
2014-07-10 13:09:07 +04:00
return ;
2014-07-10 13:09:08 +04:00
if ( conn_changed | | discov_changed ) {
/* In case this was disabled through mgmt */
2015-03-13 12:11:01 +03:00
hci_dev_set_flag ( hdev , HCI_BREDR_ENABLED ) ;
2014-07-10 13:09:08 +04:00
2015-03-13 12:11:00 +03:00
if ( hci_dev_test_flag ( hdev , HCI_LE_ENABLED ) )
2022-08-06 02:42:35 +03:00
hci_update_adv_data ( hdev , hdev - > cur_adv_instance ) ;
2014-07-10 13:09:08 +04:00
2014-07-10 13:09:07 +04:00
mgmt_new_settings ( hdev ) ;
2014-07-10 13:09:08 +04:00
}
2014-07-10 13:09:07 +04:00
}
2005-04-17 02:20:36 +04:00
int hci_dev_cmd ( unsigned int cmd , void __user * arg )
{
struct hci_dev * hdev ;
struct hci_dev_req dr ;
int err = 0 ;
if ( copy_from_user ( & dr , arg , sizeof ( dr ) ) )
return - EFAULT ;
2010-12-01 17:58:25 +03:00
hdev = hci_dev_get ( dr . dev_id ) ;
if ( ! hdev )
2005-04-17 02:20:36 +04:00
return - ENODEV ;
2015-03-13 12:11:00 +03:00
if ( hci_dev_test_flag ( hdev , HCI_USER_CHANNEL ) ) {
2013-08-27 08:40:51 +04:00
err = - EBUSY ;
goto done ;
}
2015-03-13 12:11:00 +03:00
if ( hci_dev_test_flag ( hdev , HCI_UNCONFIGURED ) ) {
2014-06-29 14:13:05 +04:00
err = - EOPNOTSUPP ;
goto done ;
}
2016-07-05 15:30:14 +03:00
if ( hdev - > dev_type ! = HCI_PRIMARY ) {
2013-10-10 21:02:08 +04:00
err = - EOPNOTSUPP ;
goto done ;
}
2015-03-13 12:11:00 +03:00
if ( ! hci_dev_test_flag ( hdev , HCI_BREDR_ENABLED ) ) {
2013-10-02 14:43:13 +04:00
err = - EOPNOTSUPP ;
goto done ;
}
2005-04-17 02:20:36 +04:00
switch ( cmd ) {
case HCISETAUTH :
2013-03-05 22:37:41 +04:00
err = hci_req_sync ( hdev , hci_auth_req , dr . dev_opt ,
2015-11-11 09:11:19 +03:00
HCI_INIT_TIMEOUT , NULL ) ;
2005-04-17 02:20:36 +04:00
break ;
case HCISETENCRYPT :
if ( ! lmp_encrypt_capable ( hdev ) ) {
err = - EOPNOTSUPP ;
break ;
}
if ( ! test_bit ( HCI_AUTH , & hdev - > flags ) ) {
/* Auth must be enabled first */
2013-03-05 22:37:41 +04:00
err = hci_req_sync ( hdev , hci_auth_req , dr . dev_opt ,
2015-11-11 09:11:19 +03:00
HCI_INIT_TIMEOUT , NULL ) ;
2005-04-17 02:20:36 +04:00
if ( err )
break ;
}
2013-03-05 22:37:41 +04:00
err = hci_req_sync ( hdev , hci_encrypt_req , dr . dev_opt ,
2015-11-11 09:11:19 +03:00
HCI_INIT_TIMEOUT , NULL ) ;
2005-04-17 02:20:36 +04:00
break ;
case HCISETSCAN :
2013-03-05 22:37:41 +04:00
err = hci_req_sync ( hdev , hci_scan_req , dr . dev_opt ,
2015-11-11 09:11:19 +03:00
HCI_INIT_TIMEOUT , NULL ) ;
2014-07-09 14:28:26 +04:00
2014-07-10 13:09:08 +04:00
/* Ensure that the connectable and discoverable states
* get correctly modified as this was a non - mgmt change .
2014-07-09 14:28:26 +04:00
*/
2014-07-10 13:09:07 +04:00
if ( ! err )
2021-10-28 02:58:43 +03:00
hci_update_passive_scan_state ( hdev , dr . dev_opt ) ;
2005-04-17 02:20:36 +04:00
break ;
case HCISETLINKPOL :
2013-03-05 22:37:41 +04:00
err = hci_req_sync ( hdev , hci_linkpol_req , dr . dev_opt ,
2015-11-11 09:11:19 +03:00
HCI_INIT_TIMEOUT , NULL ) ;
2005-04-17 02:20:36 +04:00
break ;
case HCISETLINKMODE :
2008-07-14 22:13:47 +04:00
hdev - > link_mode = ( ( __u16 ) dr . dev_opt ) &
( HCI_LM_MASTER | HCI_LM_ACCEPT ) ;
break ;
case HCISETPTYPE :
2018-07-19 14:39:36 +03:00
if ( hdev - > pkt_type = = ( __u16 ) dr . dev_opt )
break ;
2008-07-14 22:13:47 +04:00
hdev - > pkt_type = ( __u16 ) dr . dev_opt ;
2018-07-19 14:39:36 +03:00
mgmt_phy_configuration_changed ( hdev , NULL ) ;
2005-04-17 02:20:36 +04:00
break ;
case HCISETACLMTU :
2008-07-14 22:13:47 +04:00
hdev - > acl_mtu = * ( ( __u16 * ) & dr . dev_opt + 1 ) ;
hdev - > acl_pkts = * ( ( __u16 * ) & dr . dev_opt + 0 ) ;
2005-04-17 02:20:36 +04:00
break ;
case HCISETSCOMTU :
2008-07-14 22:13:47 +04:00
hdev - > sco_mtu = * ( ( __u16 * ) & dr . dev_opt + 1 ) ;
hdev - > sco_pkts = * ( ( __u16 * ) & dr . dev_opt + 0 ) ;
2005-04-17 02:20:36 +04:00
break ;
default :
err = - EINVAL ;
break ;
}
2008-07-14 22:13:47 +04:00
2013-08-27 08:40:51 +04:00
done :
2005-04-17 02:20:36 +04:00
hci_dev_put ( hdev ) ;
return err ;
}
int hci_get_dev_list ( void __user * arg )
{
2011-11-01 12:58:56 +04:00
struct hci_dev * hdev ;
2005-04-17 02:20:36 +04:00
struct hci_dev_list_req * dl ;
struct hci_dev_req * dr ;
int n = 0 , size , err ;
__u16 dev_num ;
if ( get_user ( dev_num , ( __u16 __user * ) arg ) )
return - EFAULT ;
if ( ! dev_num | | dev_num > ( PAGE_SIZE * 2 ) / sizeof ( * dr ) )
return - EINVAL ;
size = sizeof ( * dl ) + dev_num * sizeof ( * dr ) ;
2010-12-01 17:58:25 +03:00
dl = kzalloc ( size , GFP_KERNEL ) ;
if ( ! dl )
2005-04-17 02:20:36 +04:00
return - ENOMEM ;
dr = dl - > dev_req ;
2011-12-22 22:30:27 +04:00
read_lock ( & hci_dev_list_lock ) ;
2011-11-01 12:58:56 +04:00
list_for_each_entry ( hdev , & hci_dev_list , list ) {
2014-07-10 15:17:37 +04:00
unsigned long flags = hdev - > flags ;
2011-01-26 14:11:03 +03:00
2014-07-10 15:17:37 +04:00
/* When the auto-off is configured it means the transport
* is running , but in that case still indicate that the
* device is actually down .
*/
2015-03-13 12:11:00 +03:00
if ( hci_dev_test_flag ( hdev , HCI_AUTO_OFF ) )
2014-07-10 15:17:37 +04:00
flags & = ~ BIT ( HCI_UP ) ;
2011-01-26 14:11:03 +03:00
2005-04-17 02:20:36 +04:00
( dr + n ) - > dev_id = hdev - > id ;
2014-07-10 15:17:37 +04:00
( dr + n ) - > dev_opt = flags ;
2011-01-26 14:11:03 +03:00
2005-04-17 02:20:36 +04:00
if ( + + n > = dev_num )
break ;
}
2011-12-22 22:30:27 +04:00
read_unlock ( & hci_dev_list_lock ) ;
2005-04-17 02:20:36 +04:00
dl - > dev_num = n ;
size = sizeof ( * dl ) + n * sizeof ( * dr ) ;
err = copy_to_user ( arg , dl , size ) ;
kfree ( dl ) ;
return err ? - EFAULT : 0 ;
}
int hci_get_dev_info ( void __user * arg )
{
struct hci_dev * hdev ;
struct hci_dev_info di ;
2014-07-10 15:17:37 +04:00
unsigned long flags ;
2005-04-17 02:20:36 +04:00
int err = 0 ;
if ( copy_from_user ( & di , arg , sizeof ( di ) ) )
return - EFAULT ;
2010-12-01 17:58:25 +03:00
hdev = hci_dev_get ( di . dev_id ) ;
if ( ! hdev )
2005-04-17 02:20:36 +04:00
return - ENODEV ;
2014-07-10 15:17:37 +04:00
/* When the auto-off is configured it means the transport
* is running , but in that case still indicate that the
* device is actually down .
*/
2015-03-13 12:11:00 +03:00
if ( hci_dev_test_flag ( hdev , HCI_AUTO_OFF ) )
2014-07-10 15:17:37 +04:00
flags = hdev - > flags & ~ BIT ( HCI_UP ) ;
else
flags = hdev - > flags ;
2011-01-26 14:11:03 +03:00
2005-04-17 02:20:36 +04:00
strcpy ( di . name , hdev - > name ) ;
di . bdaddr = hdev - > bdaddr ;
2013-10-02 09:59:20 +04:00
di . type = ( hdev - > bus & 0x0f ) | ( ( hdev - > dev_type & 0x03 ) < < 4 ) ;
2014-07-10 15:17:37 +04:00
di . flags = flags ;
2005-04-17 02:20:36 +04:00
di . pkt_type = hdev - > pkt_type ;
2012-10-19 21:57:46 +04:00
if ( lmp_bredr_capable ( hdev ) ) {
di . acl_mtu = hdev - > acl_mtu ;
di . acl_pkts = hdev - > acl_pkts ;
di . sco_mtu = hdev - > sco_mtu ;
di . sco_pkts = hdev - > sco_pkts ;
} else {
di . acl_mtu = hdev - > le_mtu ;
di . acl_pkts = hdev - > le_pkts ;
di . sco_mtu = 0 ;
di . sco_pkts = 0 ;
}
2005-04-17 02:20:36 +04:00
di . link_policy = hdev - > link_policy ;
di . link_mode = hdev - > link_mode ;
memcpy ( & di . stat , & hdev - > stat , sizeof ( di . stat ) ) ;
memcpy ( & di . features , & hdev - > features , sizeof ( di . features ) ) ;
if ( copy_to_user ( arg , & di , sizeof ( di ) ) )
err = - EFAULT ;
hci_dev_put ( hdev ) ;
return err ;
}
/* ---- Interface to HCI drivers ---- */
2009-06-08 16:41:38 +04:00
static int hci_rfkill_set_block ( void * data , bool blocked )
{
struct hci_dev * hdev = data ;
BT_DBG ( " %p name %s blocked %d " , hdev , hdev - > name , blocked ) ;
2015-03-13 12:11:00 +03:00
if ( hci_dev_test_flag ( hdev , HCI_USER_CHANNEL ) )
2013-08-27 08:40:51 +04:00
return - EBUSY ;
2013-09-13 09:58:17 +04:00
if ( blocked ) {
2015-03-13 12:11:01 +03:00
hci_dev_set_flag ( hdev , HCI_RFKILLED ) ;
2015-03-13 12:11:00 +03:00
if ( ! hci_dev_test_flag ( hdev , HCI_SETUP ) & &
! hci_dev_test_flag ( hdev , HCI_CONFIG ) )
2013-09-13 09:58:18 +04:00
hci_dev_do_close ( hdev ) ;
2013-09-13 09:58:17 +04:00
} else {
2015-03-13 12:11:02 +03:00
hci_dev_clear_flag ( hdev , HCI_RFKILLED ) ;
2013-09-27 18:56:14 +04:00
}
2009-06-08 16:41:38 +04:00
return 0 ;
}
static const struct rfkill_ops hci_rfkill_ops = {
. set_block = hci_rfkill_set_block ,
} ;
2010-12-15 14:53:18 +03:00
static void hci_power_on ( struct work_struct * work )
{
struct hci_dev * hdev = container_of ( work , struct hci_dev , power_on ) ;
2013-05-29 10:51:29 +04:00
int err ;
2010-12-15 14:53:18 +03:00
BT_DBG ( " %s " , hdev - > name ) ;
2015-11-25 17:15:44 +03:00
if ( test_bit ( HCI_UP , & hdev - > flags ) & &
hci_dev_test_flag ( hdev , HCI_MGMT ) & &
hci_dev_test_and_clear_flag ( hdev , HCI_AUTO_OFF ) ) {
2016-02-15 12:09:51 +03:00
cancel_delayed_work ( & hdev - > power_off ) ;
2021-10-28 02:58:44 +03:00
err = hci_powered_update_sync ( hdev ) ;
2015-11-25 17:15:44 +03:00
mgmt_power_on ( hdev , err ) ;
return ;
}
2013-10-01 23:44:49 +04:00
err = hci_dev_do_open ( hdev ) ;
2013-05-29 10:51:29 +04:00
if ( err < 0 ) {
2014-12-11 09:13:12 +03:00
hci_dev_lock ( hdev ) ;
2013-05-29 10:51:29 +04:00
mgmt_set_powered_failed ( hdev , err ) ;
2014-12-11 09:13:12 +03:00
hci_dev_unlock ( hdev ) ;
2010-12-15 14:53:18 +03:00
return ;
2013-05-29 10:51:29 +04:00
}
2010-12-15 14:53:18 +03:00
2013-10-06 12:08:57 +04:00
/* During the HCI setup phase, a few error conditions are
* ignored and they need to be checked now . If they are still
* valid , it is important to turn the device back off .
*/
2015-03-13 12:11:00 +03:00
if ( hci_dev_test_flag ( hdev , HCI_RFKILLED ) | |
hci_dev_test_flag ( hdev , HCI_UNCONFIGURED ) | |
2016-07-05 15:30:14 +03:00
( hdev - > dev_type = = HCI_PRIMARY & &
2013-10-06 12:08:57 +04:00
! bacmp ( & hdev - > bdaddr , BDADDR_ANY ) & &
! bacmp ( & hdev - > static_addr , BDADDR_ANY ) ) ) {
2015-03-13 12:11:02 +03:00
hci_dev_clear_flag ( hdev , HCI_AUTO_OFF ) ;
2013-09-13 09:58:18 +04:00
hci_dev_do_close ( hdev ) ;
2015-03-13 12:11:00 +03:00
} else if ( hci_dev_test_flag ( hdev , HCI_AUTO_OFF ) ) {
2013-01-15 00:33:51 +04:00
queue_delayed_work ( hdev - > req_workqueue , & hdev - > power_off ,
HCI_AUTO_OFF_TIMEOUT ) ;
2013-09-13 09:58:18 +04:00
}
2010-12-15 14:53:18 +03:00
2015-03-13 12:11:05 +03:00
if ( hci_dev_test_and_clear_flag ( hdev , HCI_SETUP ) ) {
2014-07-02 21:10:33 +04:00
/* For unconfigured devices, set the HCI_RAW flag
* so that userspace can easily identify them .
*/
2015-03-13 12:11:00 +03:00
if ( hci_dev_test_flag ( hdev , HCI_UNCONFIGURED ) )
2014-07-02 21:10:33 +04:00
set_bit ( HCI_RAW , & hdev - > flags ) ;
2014-07-02 23:30:54 +04:00
/* For fully configured devices, this will send
* the Index Added event . For unconfigured devices ,
* it will send Unconfigued Index Added event .
*
* Devices with HCI_QUIRK_RAW_DEVICE are ignored
* and no event will be send .
*/
mgmt_index_added ( hdev ) ;
2015-03-13 12:11:05 +03:00
} else if ( hci_dev_test_and_clear_flag ( hdev , HCI_CONFIG ) ) {
2014-07-06 14:11:16 +04:00
/* When the controller is now configured, then it
* is important to clear the HCI_RAW flag .
*/
2015-03-13 12:11:00 +03:00
if ( ! hci_dev_test_flag ( hdev , HCI_UNCONFIGURED ) )
2014-07-06 14:11:16 +04:00
clear_bit ( HCI_RAW , & hdev - > flags ) ;
2014-07-06 14:11:14 +04:00
/* Powering on the controller with HCI_CONFIG set only
* happens with the transition from unconfigured to
* configured . This will send the Index Added event .
*/
2011-11-08 22:40:14 +04:00
mgmt_index_added ( hdev ) ;
2014-06-29 14:13:05 +04:00
}
2010-12-15 14:53:18 +03:00
}
static void hci_power_off ( struct work_struct * work )
{
2011-11-08 00:16:04 +04:00
struct hci_dev * hdev = container_of ( work , struct hci_dev ,
2012-05-17 07:36:26 +04:00
power_off . work ) ;
2010-12-15 14:53:18 +03:00
BT_DBG ( " %s " , hdev - > name ) ;
2012-02-21 15:33:48 +04:00
hci_dev_do_close ( hdev ) ;
2010-12-15 14:53:18 +03:00
}
2015-01-28 22:09:55 +03:00
static void hci_error_reset ( struct work_struct * work )
{
struct hci_dev * hdev = container_of ( work , struct hci_dev , error_reset ) ;
BT_DBG ( " %s " , hdev - > name ) ;
if ( hdev - > hw_error )
hdev - > hw_error ( hdev , hdev - > hw_error_code ) ;
else
2017-10-30 12:42:59 +03:00
bt_dev_err ( hdev , " hardware error 0x%2.2x " , hdev - > hw_error_code ) ;
2015-01-28 22:09:55 +03:00
if ( hci_dev_do_close ( hdev ) )
return ;
hci_dev_do_open ( hdev ) ;
}
2014-02-18 19:14:32 +04:00
void hci_uuids_clear ( struct hci_dev * hdev )
2011-01-04 13:08:51 +03:00
{
2013-01-27 02:31:28 +04:00
struct bt_uuid * uuid , * tmp ;
2011-01-04 13:08:51 +03:00
2013-01-27 02:31:28 +04:00
list_for_each_entry_safe ( uuid , tmp , & hdev - > uuids , list ) {
list_del ( & uuid - > list ) ;
2011-01-04 13:08:51 +03:00
kfree ( uuid ) ;
}
}
2014-02-18 19:14:32 +04:00
void hci_link_keys_clear ( struct hci_dev * hdev )
2011-01-17 15:41:05 +03:00
{
2014-11-19 16:22:22 +03:00
struct link_key * key ;
2011-01-17 15:41:05 +03:00
2020-02-25 16:08:09 +03:00
list_for_each_entry ( key , & hdev - > link_keys , list ) {
2014-11-19 16:22:22 +03:00
list_del_rcu ( & key - > list ) ;
kfree_rcu ( key , rcu ) ;
2011-01-17 15:41:05 +03:00
}
}
2014-02-18 19:14:32 +04:00
void hci_smp_ltks_clear ( struct hci_dev * hdev )
2012-02-03 04:08:00 +04:00
{
2014-11-13 15:37:47 +03:00
struct smp_ltk * k ;
2012-02-03 04:08:00 +04:00
2020-02-25 16:08:09 +03:00
list_for_each_entry ( k , & hdev - > long_term_keys , list ) {
2014-11-13 15:37:47 +03:00
list_del_rcu ( & k - > list ) ;
kfree_rcu ( k , rcu ) ;
2012-02-03 04:08:00 +04:00
}
}
2014-02-18 12:19:33 +04:00
void hci_smp_irks_clear ( struct hci_dev * hdev )
{
2014-11-13 15:37:48 +03:00
struct smp_irk * k ;
2014-02-18 12:19:33 +04:00
2020-02-25 16:08:09 +03:00
list_for_each_entry ( k , & hdev - > identity_resolving_keys , list ) {
2014-11-13 15:37:48 +03:00
list_del_rcu ( & k - > list ) ;
kfree_rcu ( k , rcu ) ;
2014-02-18 12:19:33 +04:00
}
}
2020-01-07 03:43:17 +03:00
void hci_blocked_keys_clear ( struct hci_dev * hdev )
{
struct blocked_key * b ;
2020-02-25 16:08:09 +03:00
list_for_each_entry ( b , & hdev - > blocked_keys , list ) {
2020-01-07 03:43:17 +03:00
list_del_rcu ( & b - > list ) ;
kfree_rcu ( b , rcu ) ;
}
}
bool hci_is_blocked_key ( struct hci_dev * hdev , u8 type , u8 val [ 16 ] )
{
bool blocked = false ;
struct blocked_key * b ;
rcu_read_lock ( ) ;
2020-02-25 16:17:53 +03:00
list_for_each_entry_rcu ( b , & hdev - > blocked_keys , list ) {
2020-01-07 03:43:17 +03:00
if ( b - > type = = type & & ! memcmp ( b - > val , val , sizeof ( b - > val ) ) ) {
blocked = true ;
break ;
}
}
rcu_read_unlock ( ) ;
return blocked ;
}
2011-01-17 15:41:05 +03:00
struct link_key * hci_find_link_key ( struct hci_dev * hdev , bdaddr_t * bdaddr )
{
2011-11-01 12:58:56 +04:00
struct link_key * k ;
2011-01-17 15:41:05 +03:00
2014-11-19 16:22:22 +03:00
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( k , & hdev - > link_keys , list ) {
if ( bacmp ( bdaddr , & k - > bdaddr ) = = 0 ) {
rcu_read_unlock ( ) ;
2020-01-07 03:43:17 +03:00
if ( hci_is_blocked_key ( hdev ,
HCI_BLOCKED_KEY_TYPE_LINKKEY ,
k - > val ) ) {
bt_dev_warn_ratelimited ( hdev ,
" Link key blocked for %pMR " ,
& k - > bdaddr ) ;
return NULL ;
}
2011-01-17 15:41:05 +03:00
return k ;
2014-11-19 16:22:22 +03:00
}
}
rcu_read_unlock ( ) ;
2011-01-17 15:41:05 +03:00
return NULL ;
}
2012-04-13 16:13:22 +04:00
static bool hci_persistent_key ( struct hci_dev * hdev , struct hci_conn * conn ,
2012-05-17 07:36:26 +04:00
u8 key_type , u8 old_key_type )
2011-04-28 22:28:59 +04:00
{
/* Legacy key */
if ( key_type < 0x03 )
2012-04-13 16:13:22 +04:00
return true ;
2011-04-28 22:28:59 +04:00
/* Debug keys are insecure so don't store them persistently */
if ( key_type = = HCI_LK_DEBUG_COMBINATION )
2012-04-13 16:13:22 +04:00
return false ;
2011-04-28 22:28:59 +04:00
/* Changed combination key and there's no previous one */
if ( key_type = = HCI_LK_CHANGED_COMBINATION & & old_key_type = = 0xff )
2012-04-13 16:13:22 +04:00
return false ;
2011-04-28 22:28:59 +04:00
/* Security mode 3 case */
if ( ! conn )
2012-04-13 16:13:22 +04:00
return true ;
2011-04-28 22:28:59 +04:00
2014-06-01 17:33:39 +04:00
/* BR/EDR key derived using SC from an LE link */
if ( conn - > type = = LE_LINK )
return true ;
2011-04-28 22:28:59 +04:00
/* Neither local nor remote side had no-bonding as requirement */
if ( conn - > auth_type > 0x01 & & conn - > remote_auth > 0x01 )
2012-04-13 16:13:22 +04:00
return true ;
2011-04-28 22:28:59 +04:00
/* Local side had dedicated bonding as requirement */
if ( conn - > auth_type = = 0x02 | | conn - > auth_type = = 0x03 )
2012-04-13 16:13:22 +04:00
return true ;
2011-04-28 22:28:59 +04:00
/* Remote side had dedicated bonding as requirement */
if ( conn - > remote_auth = = 0x02 | | conn - > remote_auth = = 0x03 )
2012-04-13 16:13:22 +04:00
return true ;
2011-04-28 22:28:59 +04:00
/* If none of the above criteria match, then don't store the key
* persistently */
2012-04-13 16:13:22 +04:00
return false ;
2011-04-28 22:28:59 +04:00
}
2014-07-16 12:42:28 +04:00
static u8 ltk_role ( u8 type )
2014-01-31 07:40:00 +04:00
{
2014-07-16 12:42:28 +04:00
if ( type = = SMP_LTK )
return HCI_ROLE_MASTER ;
2014-01-31 07:40:00 +04:00
2014-07-16 12:42:28 +04:00
return HCI_ROLE_SLAVE ;
2014-01-31 07:40:00 +04:00
}
2014-05-29 16:02:59 +04:00
struct smp_ltk * hci_find_ltk ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
u8 addr_type , u8 role )
2011-07-08 01:59:36 +04:00
{
2012-02-03 04:08:01 +04:00
struct smp_ltk * k ;
2011-07-08 01:59:36 +04:00
2014-11-13 15:37:47 +03:00
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( k , & hdev - > long_term_keys , list ) {
2014-05-29 15:00:39 +04:00
if ( addr_type ! = k - > bdaddr_type | | bacmp ( bdaddr , & k - > bdaddr ) )
continue ;
2014-12-03 13:43:39 +03:00
if ( smp_ltk_is_sc ( k ) | | ltk_role ( k - > type ) = = role ) {
2014-11-13 15:37:47 +03:00
rcu_read_unlock ( ) ;
2020-01-07 03:43:17 +03:00
if ( hci_is_blocked_key ( hdev , HCI_BLOCKED_KEY_TYPE_LTK ,
k - > val ) ) {
bt_dev_warn_ratelimited ( hdev ,
" LTK blocked for %pMR " ,
& k - > bdaddr ) ;
return NULL ;
}
2011-07-08 01:59:36 +04:00
return k ;
2014-11-13 15:37:47 +03:00
}
}
rcu_read_unlock ( ) ;
2011-07-08 01:59:36 +04:00
return NULL ;
}
2014-02-18 12:19:33 +04:00
struct smp_irk * hci_find_irk_by_rpa ( struct hci_dev * hdev , bdaddr_t * rpa )
{
2020-01-07 03:43:17 +03:00
struct smp_irk * irk_to_return = NULL ;
2014-02-18 12:19:33 +04:00
struct smp_irk * irk ;
2014-11-13 15:37:48 +03:00
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( irk , & hdev - > identity_resolving_keys , list ) {
if ( ! bacmp ( & irk - > rpa , rpa ) ) {
2020-01-07 03:43:17 +03:00
irk_to_return = irk ;
goto done ;
2014-11-13 15:37:48 +03:00
}
2014-02-18 12:19:33 +04:00
}
2014-11-13 15:37:48 +03:00
list_for_each_entry_rcu ( irk , & hdev - > identity_resolving_keys , list ) {
2014-08-08 10:37:17 +04:00
if ( smp_irk_matches ( hdev , irk - > val , rpa ) ) {
2014-02-18 12:19:33 +04:00
bacpy ( & irk - > rpa , rpa ) ;
2020-01-07 03:43:17 +03:00
irk_to_return = irk ;
goto done ;
2014-02-18 12:19:33 +04:00
}
}
2020-01-07 03:43:17 +03:00
done :
if ( irk_to_return & & hci_is_blocked_key ( hdev , HCI_BLOCKED_KEY_TYPE_IRK ,
irk_to_return - > val ) ) {
bt_dev_warn_ratelimited ( hdev , " Identity key blocked for %pMR " ,
& irk_to_return - > bdaddr ) ;
irk_to_return = NULL ;
}
2014-11-13 15:37:48 +03:00
rcu_read_unlock ( ) ;
2014-02-18 12:19:33 +04:00
2020-01-07 03:43:17 +03:00
return irk_to_return ;
2014-02-18 12:19:33 +04:00
}
struct smp_irk * hci_find_irk_by_addr ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
u8 addr_type )
{
2020-01-07 03:43:17 +03:00
struct smp_irk * irk_to_return = NULL ;
2014-02-18 12:19:33 +04:00
struct smp_irk * irk ;
2014-02-18 23:41:35 +04:00
/* Identity Address must be public or static random */
if ( addr_type = = ADDR_LE_DEV_RANDOM & & ( bdaddr - > b [ 5 ] & 0xc0 ) ! = 0xc0 )
return NULL ;
2014-11-13 15:37:48 +03:00
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( irk , & hdev - > identity_resolving_keys , list ) {
2014-02-18 12:19:33 +04:00
if ( addr_type = = irk - > addr_type & &
2014-11-13 15:37:48 +03:00
bacmp ( bdaddr , & irk - > bdaddr ) = = 0 ) {
2020-01-07 03:43:17 +03:00
irk_to_return = irk ;
goto done ;
2014-11-13 15:37:48 +03:00
}
2014-02-18 12:19:33 +04:00
}
2020-01-07 03:43:17 +03:00
done :
if ( irk_to_return & & hci_is_blocked_key ( hdev , HCI_BLOCKED_KEY_TYPE_IRK ,
irk_to_return - > val ) ) {
bt_dev_warn_ratelimited ( hdev , " Identity key blocked for %pMR " ,
& irk_to_return - > bdaddr ) ;
irk_to_return = NULL ;
}
2014-11-13 15:37:48 +03:00
rcu_read_unlock ( ) ;
2014-02-18 12:19:33 +04:00
2020-01-07 03:43:17 +03:00
return irk_to_return ;
2014-02-18 12:19:33 +04:00
}
2014-06-24 14:15:48 +04:00
struct link_key * hci_add_link_key ( struct hci_dev * hdev , struct hci_conn * conn ,
2014-06-24 14:15:49 +04:00
bdaddr_t * bdaddr , u8 * val , u8 type ,
u8 pin_len , bool * persistent )
2011-01-17 15:41:05 +03:00
{
struct link_key * key , * old_key ;
2012-04-13 16:13:22 +04:00
u8 old_key_type ;
2011-01-17 15:41:05 +03:00
old_key = hci_find_link_key ( hdev , bdaddr ) ;
if ( old_key ) {
old_key_type = old_key - > type ;
key = old_key ;
} else {
2011-04-28 22:29:00 +04:00
old_key_type = conn ? conn - > key_type : 0xff ;
2014-02-19 16:57:43 +04:00
key = kzalloc ( sizeof ( * key ) , GFP_KERNEL ) ;
2011-01-17 15:41:05 +03:00
if ( ! key )
2014-06-24 14:15:48 +04:00
return NULL ;
2014-11-19 16:22:22 +03:00
list_add_rcu ( & key - > list , & hdev - > link_keys ) ;
2011-01-17 15:41:05 +03:00
}
2012-09-25 13:49:43 +04:00
BT_DBG ( " %s key for %pMR type %u " , hdev - > name , bdaddr , type ) ;
2011-01-17 15:41:05 +03:00
2011-04-28 22:28:59 +04:00
/* Some buggy controller combinations generate a changed
* combination key for legacy pairing even when there ' s no
* previous key */
if ( type = = HCI_LK_CHANGED_COMBINATION & &
2012-05-17 07:36:26 +04:00
( ! conn | | conn - > remote_auth = = 0xff ) & & old_key_type = = 0xff ) {
2011-04-28 22:28:59 +04:00
type = HCI_LK_COMBINATION ;
2011-04-28 22:29:01 +04:00
if ( conn )
conn - > key_type = type ;
}
2011-04-28 22:28:59 +04:00
2011-01-17 15:41:05 +03:00
bacpy ( & key - > bdaddr , bdaddr ) ;
2012-05-23 12:31:20 +04:00
memcpy ( key - > val , val , HCI_LINK_KEY_SIZE ) ;
2011-01-17 15:41:05 +03:00
key - > pin_len = pin_len ;
2011-04-28 14:07:53 +04:00
if ( type = = HCI_LK_CHANGED_COMBINATION )
2011-01-17 15:41:05 +03:00
key - > type = old_key_type ;
2011-04-28 22:29:02 +04:00
else
key - > type = type ;
2014-06-24 14:15:49 +04:00
if ( persistent )
* persistent = hci_persistent_key ( hdev , conn , type ,
old_key_type ) ;
2011-04-28 22:29:03 +04:00
2014-06-24 14:15:48 +04:00
return key ;
2011-01-17 15:41:05 +03:00
}
2014-02-19 16:57:44 +04:00
struct smp_ltk * hci_add_ltk ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
2014-02-19 16:57:47 +04:00
u8 addr_type , u8 type , u8 authenticated ,
2014-02-28 04:00:28 +04:00
u8 tk [ 16 ] , u8 enc_size , __le16 ediv , __le64 rand )
2011-07-08 01:59:36 +04:00
{
2012-02-03 04:08:01 +04:00
struct smp_ltk * key , * old_key ;
2014-07-16 12:42:28 +04:00
u8 role = ltk_role ( type ) ;
2011-07-08 01:59:36 +04:00
2014-05-29 16:02:59 +04:00
old_key = hci_find_ltk ( hdev , bdaddr , addr_type , role ) ;
2012-02-03 04:08:01 +04:00
if ( old_key )
2011-07-08 01:59:36 +04:00
key = old_key ;
2012-02-03 04:08:01 +04:00
else {
2014-02-19 16:57:43 +04:00
key = kzalloc ( sizeof ( * key ) , GFP_KERNEL ) ;
2011-07-08 01:59:36 +04:00
if ( ! key )
2014-02-19 16:57:44 +04:00
return NULL ;
2014-11-13 15:37:47 +03:00
list_add_rcu ( & key - > list , & hdev - > long_term_keys ) ;
2011-07-08 01:59:36 +04:00
}
bacpy ( & key - > bdaddr , bdaddr ) ;
2012-02-03 04:08:01 +04:00
key - > bdaddr_type = addr_type ;
memcpy ( key - > val , tk , sizeof ( key - > val ) ) ;
key - > authenticated = authenticated ;
key - > ediv = ediv ;
2014-02-28 04:00:28 +04:00
key - > rand = rand ;
2012-02-03 04:08:01 +04:00
key - > enc_size = enc_size ;
key - > type = type ;
2011-07-08 01:59:36 +04:00
2014-02-19 16:57:44 +04:00
return key ;
2011-07-08 01:59:36 +04:00
}
2014-02-19 16:57:44 +04:00
struct smp_irk * hci_add_irk ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
u8 addr_type , u8 val [ 16 ] , bdaddr_t * rpa )
2014-02-18 12:19:33 +04:00
{
struct smp_irk * irk ;
irk = hci_find_irk_by_addr ( hdev , bdaddr , addr_type ) ;
if ( ! irk ) {
irk = kzalloc ( sizeof ( * irk ) , GFP_KERNEL ) ;
if ( ! irk )
2014-02-19 16:57:44 +04:00
return NULL ;
2014-02-18 12:19:33 +04:00
bacpy ( & irk - > bdaddr , bdaddr ) ;
irk - > addr_type = addr_type ;
2014-11-13 15:37:48 +03:00
list_add_rcu ( & irk - > list , & hdev - > identity_resolving_keys ) ;
2014-02-18 12:19:33 +04:00
}
memcpy ( irk - > val , val , 16 ) ;
bacpy ( & irk - > rpa , rpa ) ;
2014-02-19 16:57:44 +04:00
return irk ;
2014-02-18 12:19:33 +04:00
}
2011-01-17 15:41:05 +03:00
int hci_remove_link_key ( struct hci_dev * hdev , bdaddr_t * bdaddr )
{
struct link_key * key ;
key = hci_find_link_key ( hdev , bdaddr ) ;
if ( ! key )
return - ENOENT ;
2012-09-25 13:49:43 +04:00
BT_DBG ( " %s removing %pMR " , hdev - > name , bdaddr ) ;
2011-01-17 15:41:05 +03:00
2014-11-19 16:22:22 +03:00
list_del_rcu ( & key - > list ) ;
kfree_rcu ( key , rcu ) ;
2011-01-17 15:41:05 +03:00
return 0 ;
}
2014-02-18 19:14:31 +04:00
int hci_remove_ltk ( struct hci_dev * hdev , bdaddr_t * bdaddr , u8 bdaddr_type )
2012-02-03 04:08:00 +04:00
{
2023-05-30 23:48:44 +03:00
struct smp_ltk * k , * tmp ;
2014-02-18 19:14:33 +04:00
int removed = 0 ;
2012-02-03 04:08:00 +04:00
2023-05-30 23:48:44 +03:00
list_for_each_entry_safe ( k , tmp , & hdev - > long_term_keys , list ) {
2014-02-18 19:14:31 +04:00
if ( bacmp ( bdaddr , & k - > bdaddr ) | | k - > bdaddr_type ! = bdaddr_type )
2012-02-03 04:08:00 +04:00
continue ;
2012-09-25 13:49:43 +04:00
BT_DBG ( " %s removing %pMR " , hdev - > name , bdaddr ) ;
2012-02-03 04:08:00 +04:00
2014-11-13 15:37:47 +03:00
list_del_rcu ( & k - > list ) ;
kfree_rcu ( k , rcu ) ;
2014-02-18 19:14:33 +04:00
removed + + ;
2012-02-03 04:08:00 +04:00
}
2014-02-18 19:14:33 +04:00
return removed ? 0 : - ENOENT ;
2012-02-03 04:08:00 +04:00
}
2014-02-18 19:14:35 +04:00
void hci_remove_irk ( struct hci_dev * hdev , bdaddr_t * bdaddr , u8 addr_type )
{
2023-05-30 23:48:44 +03:00
struct smp_irk * k , * tmp ;
2014-02-18 19:14:35 +04:00
2023-05-30 23:48:44 +03:00
list_for_each_entry_safe ( k , tmp , & hdev - > identity_resolving_keys , list ) {
2014-02-18 19:14:35 +04:00
if ( bacmp ( bdaddr , & k - > bdaddr ) | | k - > addr_type ! = addr_type )
continue ;
BT_DBG ( " %s removing %pMR " , hdev - > name , bdaddr ) ;
2014-11-13 15:37:48 +03:00
list_del_rcu ( & k - > list ) ;
kfree_rcu ( k , rcu ) ;
2014-02-18 19:14:35 +04:00
}
}
2015-03-10 23:34:40 +03:00
bool hci_bdaddr_is_paired ( struct hci_dev * hdev , bdaddr_t * bdaddr , u8 type )
{
struct smp_ltk * k ;
2015-03-11 11:52:08 +03:00
struct smp_irk * irk ;
2015-03-10 23:34:40 +03:00
u8 addr_type ;
if ( type = = BDADDR_BREDR ) {
if ( hci_find_link_key ( hdev , bdaddr ) )
return true ;
return false ;
}
/* Convert to HCI addr type which struct smp_ltk uses */
if ( type = = BDADDR_LE_PUBLIC )
addr_type = ADDR_LE_DEV_PUBLIC ;
else
addr_type = ADDR_LE_DEV_RANDOM ;
2015-03-11 11:52:08 +03:00
irk = hci_get_irk ( hdev , bdaddr , addr_type ) ;
if ( irk ) {
bdaddr = & irk - > bdaddr ;
addr_type = irk - > addr_type ;
}
2015-03-10 23:34:40 +03:00
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( k , & hdev - > long_term_keys , list ) {
2015-03-11 09:55:51 +03:00
if ( k - > bdaddr_type = = addr_type & & ! bacmp ( bdaddr , & k - > bdaddr ) ) {
rcu_read_unlock ( ) ;
2015-03-10 23:34:40 +03:00
return true ;
2015-03-11 09:55:51 +03:00
}
2015-03-10 23:34:40 +03:00
}
rcu_read_unlock ( ) ;
return false ;
}
2011-02-16 17:32:41 +03:00
/* HCI command timer function */
2014-06-16 14:30:56 +04:00
static void hci_cmd_timeout ( struct work_struct * work )
2011-02-16 17:32:41 +03:00
{
2014-06-16 14:30:56 +04:00
struct hci_dev * hdev = container_of ( work , struct hci_dev ,
cmd_timer . work ) ;
2011-02-16 17:32:41 +03:00
2012-06-11 12:13:08 +04:00
if ( hdev - > sent_cmd ) {
struct hci_command_hdr * sent = ( void * ) hdev - > sent_cmd - > data ;
u16 opcode = __le16_to_cpu ( sent - > opcode ) ;
2017-10-30 12:42:59 +03:00
bt_dev_err ( hdev , " command 0x%4.4x tx timeout " , opcode ) ;
2012-06-11 12:13:08 +04:00
} else {
2017-10-30 12:42:59 +03:00
bt_dev_err ( hdev , " command tx timeout " ) ;
2012-06-11 12:13:08 +04:00
}
2019-01-25 02:28:13 +03:00
if ( hdev - > cmd_timeout )
hdev - > cmd_timeout ( hdev ) ;
2011-02-16 17:32:41 +03:00
atomic_set ( & hdev - > cmd_cnt , 1 ) ;
2011-12-15 05:53:47 +04:00
queue_work ( hdev - > workqueue , & hdev - > cmd_work ) ;
2011-02-16 17:32:41 +03:00
}
2021-04-29 20:24:22 +03:00
/* HCI ncmd timer function */
static void hci_ncmd_timeout ( struct work_struct * work )
{
struct hci_dev * hdev = container_of ( work , struct hci_dev ,
ncmd_timer . work ) ;
bt_dev_err ( hdev , " Controller not accepting commands anymore: ncmd = 0 " ) ;
/* During HCI_INIT phase no events can be injected if the ncmd timer
* triggers since the procedure has its own timeout handling .
*/
if ( test_bit ( HCI_INIT , & hdev - > flags ) )
return ;
/* This is an irrecoverable state, inject hardware error event */
hci_reset_dev ( hdev ) ;
}
2011-03-22 15:12:22 +03:00
struct oob_data * hci_find_remote_oob_data ( struct hci_dev * hdev ,
2014-10-26 22:46:09 +03:00
bdaddr_t * bdaddr , u8 bdaddr_type )
2011-03-22 15:12:22 +03:00
{
struct oob_data * data ;
2014-10-26 22:46:09 +03:00
list_for_each_entry ( data , & hdev - > remote_oob_data , list ) {
if ( bacmp ( bdaddr , & data - > bdaddr ) ! = 0 )
continue ;
if ( data - > bdaddr_type ! = bdaddr_type )
continue ;
return data ;
}
2011-03-22 15:12:22 +03:00
return NULL ;
}
2014-10-26 22:46:09 +03:00
int hci_remove_remote_oob_data ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
u8 bdaddr_type )
2011-03-22 15:12:22 +03:00
{
struct oob_data * data ;
2014-10-26 22:46:09 +03:00
data = hci_find_remote_oob_data ( hdev , bdaddr , bdaddr_type ) ;
2011-03-22 15:12:22 +03:00
if ( ! data )
return - ENOENT ;
2014-10-26 22:46:09 +03:00
BT_DBG ( " %s removing %pMR (%u) " , hdev - > name , bdaddr , bdaddr_type ) ;
2011-03-22 15:12:22 +03:00
list_del ( & data - > list ) ;
kfree ( data ) ;
return 0 ;
}
2014-02-18 19:14:32 +04:00
void hci_remote_oob_data_clear ( struct hci_dev * hdev )
2011-03-22 15:12:22 +03:00
{
struct oob_data * data , * n ;
list_for_each_entry_safe ( data , n , & hdev - > remote_oob_data , list ) {
list_del ( & data - > list ) ;
kfree ( data ) ;
}
}
2014-01-10 14:07:29 +04:00
int hci_add_remote_oob_data ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
2014-10-26 22:46:09 +03:00
u8 bdaddr_type , u8 * hash192 , u8 * rand192 ,
2014-10-26 22:33:47 +03:00
u8 * hash256 , u8 * rand256 )
2011-03-22 15:12:22 +03:00
{
struct oob_data * data ;
2014-10-26 22:46:09 +03:00
data = hci_find_remote_oob_data ( hdev , bdaddr , bdaddr_type ) ;
2011-03-22 15:12:22 +03:00
if ( ! data ) {
2014-02-19 16:57:43 +04:00
data = kmalloc ( sizeof ( * data ) , GFP_KERNEL ) ;
2011-03-22 15:12:22 +03:00
if ( ! data )
return - ENOMEM ;
bacpy ( & data - > bdaddr , bdaddr ) ;
2014-10-26 22:46:09 +03:00
data - > bdaddr_type = bdaddr_type ;
2011-03-22 15:12:22 +03:00
list_add ( & data - > list , & hdev - > remote_oob_data ) ;
}
2014-10-26 22:33:47 +03:00
if ( hash192 & & rand192 ) {
memcpy ( data - > hash192 , hash192 , sizeof ( data - > hash192 ) ) ;
memcpy ( data - > rand192 , rand192 , sizeof ( data - > rand192 ) ) ;
2015-01-31 10:20:55 +03:00
if ( hash256 & & rand256 )
data - > present = 0x03 ;
2014-10-26 22:33:47 +03:00
} else {
memset ( data - > hash192 , 0 , sizeof ( data - > hash192 ) ) ;
memset ( data - > rand192 , 0 , sizeof ( data - > rand192 ) ) ;
2015-01-31 10:20:55 +03:00
if ( hash256 & & rand256 )
data - > present = 0x02 ;
else
data - > present = 0x00 ;
2014-01-10 14:07:29 +04:00
}
2014-10-26 22:33:47 +03:00
if ( hash256 & & rand256 ) {
memcpy ( data - > hash256 , hash256 , sizeof ( data - > hash256 ) ) ;
memcpy ( data - > rand256 , rand256 , sizeof ( data - > rand256 ) ) ;
} else {
memset ( data - > hash256 , 0 , sizeof ( data - > hash256 ) ) ;
memset ( data - > rand256 , 0 , sizeof ( data - > rand256 ) ) ;
2015-01-31 10:20:55 +03:00
if ( hash192 & & rand192 )
data - > present = 0x01 ;
2014-10-26 22:33:47 +03:00
}
2014-01-10 14:07:29 +04:00
2012-09-25 13:49:43 +04:00
BT_DBG ( " %s for %pMR " , hdev - > name , bdaddr ) ;
2011-03-22 15:12:22 +03:00
return 0 ;
}
2015-06-18 04:16:34 +03:00
/* This function requires the caller holds hdev->lock */
struct adv_info * hci_find_adv_instance ( struct hci_dev * hdev , u8 instance )
{
struct adv_info * adv_instance ;
list_for_each_entry ( adv_instance , & hdev - > adv_instances , list ) {
if ( adv_instance - > instance = = instance )
return adv_instance ;
}
return NULL ;
}
/* This function requires the caller holds hdev->lock */
2015-11-18 15:38:41 +03:00
struct adv_info * hci_get_next_instance ( struct hci_dev * hdev , u8 instance )
{
2015-06-18 04:16:34 +03:00
struct adv_info * cur_instance ;
cur_instance = hci_find_adv_instance ( hdev , instance ) ;
if ( ! cur_instance )
return NULL ;
if ( cur_instance = = list_last_entry ( & hdev - > adv_instances ,
struct adv_info , list ) )
return list_first_entry ( & hdev - > adv_instances ,
struct adv_info , list ) ;
else
return list_next_entry ( cur_instance , list ) ;
}
/* This function requires the caller holds hdev->lock */
int hci_remove_adv_instance ( struct hci_dev * hdev , u8 instance )
{
struct adv_info * adv_instance ;
adv_instance = hci_find_adv_instance ( hdev , instance ) ;
if ( ! adv_instance )
return - ENOENT ;
BT_DBG ( " %s removing %dMR " , hdev - > name , instance ) ;
2015-11-30 12:21:45 +03:00
if ( hdev - > cur_adv_instance = = instance ) {
if ( hdev - > adv_instance_timeout ) {
cancel_delayed_work ( & hdev - > adv_instance_expire ) ;
hdev - > adv_instance_timeout = 0 ;
}
hdev - > cur_adv_instance = 0x00 ;
2015-06-18 04:16:35 +03:00
}
2018-07-19 14:39:45 +03:00
cancel_delayed_work_sync ( & adv_instance - > rpa_expired_cb ) ;
2015-06-18 04:16:34 +03:00
list_del ( & adv_instance - > list ) ;
kfree ( adv_instance ) ;
hdev - > adv_instance_cnt - - ;
return 0 ;
}
2018-07-19 14:39:45 +03:00
void hci_adv_instances_set_rpa_expired ( struct hci_dev * hdev , bool rpa_expired )
{
struct adv_info * adv_instance , * n ;
list_for_each_entry_safe ( adv_instance , n , & hdev - > adv_instances , list )
adv_instance - > rpa_expired = rpa_expired ;
}
2015-06-18 04:16:34 +03:00
/* This function requires the caller holds hdev->lock */
void hci_adv_instances_clear ( struct hci_dev * hdev )
{
struct adv_info * adv_instance , * n ;
2015-06-18 04:16:35 +03:00
if ( hdev - > adv_instance_timeout ) {
cancel_delayed_work ( & hdev - > adv_instance_expire ) ;
hdev - > adv_instance_timeout = 0 ;
}
2015-06-18 04:16:34 +03:00
list_for_each_entry_safe ( adv_instance , n , & hdev - > adv_instances , list ) {
2018-07-19 14:39:45 +03:00
cancel_delayed_work_sync ( & adv_instance - > rpa_expired_cb ) ;
2015-06-18 04:16:34 +03:00
list_del ( & adv_instance - > list ) ;
kfree ( adv_instance ) ;
}
hdev - > adv_instance_cnt = 0 ;
2015-11-30 12:21:45 +03:00
hdev - > cur_adv_instance = 0x00 ;
2015-06-18 04:16:34 +03:00
}
2018-07-19 14:39:45 +03:00
static void adv_instance_rpa_expired ( struct work_struct * work )
{
struct adv_info * adv_instance = container_of ( work , struct adv_info ,
rpa_expired_cb . work ) ;
BT_DBG ( " " ) ;
adv_instance - > rpa_expired = true ;
}
2015-06-18 04:16:34 +03:00
/* This function requires the caller holds hdev->lock */
2022-03-10 00:22:20 +03:00
struct adv_info * hci_add_adv_instance ( struct hci_dev * hdev , u8 instance ,
u32 flags , u16 adv_data_len , u8 * adv_data ,
u16 scan_rsp_len , u8 * scan_rsp_data ,
u16 timeout , u16 duration , s8 tx_power ,
2022-09-01 22:19:13 +03:00
u32 min_interval , u32 max_interval ,
u8 mesh_handle )
2015-06-18 04:16:34 +03:00
{
2022-03-10 00:22:20 +03:00
struct adv_info * adv ;
2015-06-18 04:16:34 +03:00
2022-03-10 00:22:20 +03:00
adv = hci_find_adv_instance ( hdev , instance ) ;
if ( adv ) {
memset ( adv - > adv_data , 0 , sizeof ( adv - > adv_data ) ) ;
memset ( adv - > scan_rsp_data , 0 , sizeof ( adv - > scan_rsp_data ) ) ;
memset ( adv - > per_adv_data , 0 , sizeof ( adv - > per_adv_data ) ) ;
2015-06-18 04:16:34 +03:00
} else {
2019-06-03 13:48:42 +03:00
if ( hdev - > adv_instance_cnt > = hdev - > le_num_of_adv_sets | |
2022-09-01 22:19:13 +03:00
instance < 1 | | instance > hdev - > le_num_of_adv_sets + 1 )
2022-03-10 00:22:20 +03:00
return ERR_PTR ( - EOVERFLOW ) ;
2015-06-18 04:16:34 +03:00
2022-03-10 00:22:20 +03:00
adv = kzalloc ( sizeof ( * adv ) , GFP_KERNEL ) ;
if ( ! adv )
return ERR_PTR ( - ENOMEM ) ;
2015-06-18 04:16:34 +03:00
2022-03-10 00:22:20 +03:00
adv - > pending = true ;
adv - > instance = instance ;
list_add ( & adv - > list , & hdev - > adv_instances ) ;
2015-06-18 04:16:34 +03:00
hdev - > adv_instance_cnt + + ;
}
2022-03-10 00:22:20 +03:00
adv - > flags = flags ;
adv - > min_interval = min_interval ;
adv - > max_interval = max_interval ;
adv - > tx_power = tx_power ;
2022-09-01 22:19:13 +03:00
/* Defining a mesh_handle changes the timing units to ms,
* rather than seconds , and ties the instance to the requested
* mesh_tx queue .
*/
adv - > mesh = mesh_handle ;
2015-06-18 04:16:34 +03:00
2022-06-09 01:00:01 +03:00
hci_set_adv_instance_data ( hdev , instance , adv_data_len , adv_data ,
scan_rsp_len , scan_rsp_data ) ;
2015-06-18 04:16:34 +03:00
2022-03-10 00:22:20 +03:00
adv - > timeout = timeout ;
adv - > remaining_time = timeout ;
2015-06-18 04:16:34 +03:00
if ( duration = = 0 )
2022-03-10 00:22:20 +03:00
adv - > duration = hdev - > def_multi_adv_rotation_duration ;
2015-06-18 04:16:34 +03:00
else
2022-03-10 00:22:20 +03:00
adv - > duration = duration ;
2015-06-18 04:16:34 +03:00
2022-03-10 00:22:20 +03:00
INIT_DELAYED_WORK ( & adv - > rpa_expired_cb , adv_instance_rpa_expired ) ;
2018-07-19 14:39:45 +03:00
2015-06-18 04:16:34 +03:00
BT_DBG ( " %s for %dMR " , hdev - > name , instance ) ;
2022-03-10 00:22:20 +03:00
return adv ;
}
/* This function requires the caller holds hdev->lock */
struct adv_info * hci_add_per_instance ( struct hci_dev * hdev , u8 instance ,
u32 flags , u8 data_len , u8 * data ,
u32 min_interval , u32 max_interval )
{
struct adv_info * adv ;
adv = hci_add_adv_instance ( hdev , instance , flags , 0 , NULL , 0 , NULL ,
0 , 0 , HCI_ADV_TX_POWER_NO_PREFERENCE ,
2022-09-01 22:19:13 +03:00
min_interval , max_interval , 0 ) ;
2022-03-10 00:22:20 +03:00
if ( IS_ERR ( adv ) )
return adv ;
adv - > periodic = true ;
adv - > per_adv_data_len = data_len ;
if ( data )
memcpy ( adv - > per_adv_data , data , data_len ) ;
return adv ;
2015-06-18 04:16:34 +03:00
}
2020-12-03 23:12:48 +03:00
/* This function requires the caller holds hdev->lock */
int hci_set_adv_instance_data ( struct hci_dev * hdev , u8 instance ,
u16 adv_data_len , u8 * adv_data ,
u16 scan_rsp_len , u8 * scan_rsp_data )
{
2022-06-09 01:00:01 +03:00
struct adv_info * adv ;
2020-12-03 23:12:48 +03:00
2022-06-09 01:00:01 +03:00
adv = hci_find_adv_instance ( hdev , instance ) ;
2020-12-03 23:12:48 +03:00
/* If advertisement doesn't exist, we can't modify its data */
2022-06-09 01:00:01 +03:00
if ( ! adv )
2020-12-03 23:12:48 +03:00
return - ENOENT ;
2022-06-09 01:00:01 +03:00
if ( adv_data_len & & ADV_DATA_CMP ( adv , adv_data , adv_data_len ) ) {
memset ( adv - > adv_data , 0 , sizeof ( adv - > adv_data ) ) ;
memcpy ( adv - > adv_data , adv_data , adv_data_len ) ;
adv - > adv_data_len = adv_data_len ;
adv - > adv_data_changed = true ;
2020-12-03 23:12:48 +03:00
}
2022-06-09 01:00:01 +03:00
if ( scan_rsp_len & & SCAN_RSP_CMP ( adv , scan_rsp_data , scan_rsp_len ) ) {
memset ( adv - > scan_rsp_data , 0 , sizeof ( adv - > scan_rsp_data ) ) ;
memcpy ( adv - > scan_rsp_data , scan_rsp_data , scan_rsp_len ) ;
adv - > scan_rsp_len = scan_rsp_len ;
adv - > scan_rsp_changed = true ;
2020-12-03 23:12:48 +03:00
}
2022-06-09 01:00:01 +03:00
/* Mark as changed if there are flags which would affect it */
if ( ( ( adv - > flags & MGMT_ADV_FLAG_APPEARANCE ) & & hdev - > appearance ) | |
adv - > flags & MGMT_ADV_FLAG_LOCAL_NAME )
adv - > scan_rsp_changed = true ;
2020-12-03 23:12:48 +03:00
return 0 ;
}
2021-09-21 01:59:37 +03:00
/* This function requires the caller holds hdev->lock */
u32 hci_adv_instance_flags ( struct hci_dev * hdev , u8 instance )
{
u32 flags ;
struct adv_info * adv ;
if ( instance = = 0x00 ) {
/* Instance 0 always manages the "Tx Power" and "Flags"
* fields
*/
flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS ;
/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
* corresponds to the " connectable " instance flag .
*/
if ( hci_dev_test_flag ( hdev , HCI_ADVERTISING_CONNECTABLE ) )
flags | = MGMT_ADV_FLAG_CONNECTABLE ;
if ( hci_dev_test_flag ( hdev , HCI_LIMITED_DISCOVERABLE ) )
flags | = MGMT_ADV_FLAG_LIMITED_DISCOV ;
else if ( hci_dev_test_flag ( hdev , HCI_DISCOVERABLE ) )
flags | = MGMT_ADV_FLAG_DISCOV ;
return flags ;
}
adv = hci_find_adv_instance ( hdev , instance ) ;
/* Return 0 when we got an invalid instance identifier. */
if ( ! adv )
return 0 ;
return adv - > flags ;
}
bool hci_adv_instance_is_scannable ( struct hci_dev * hdev , u8 instance )
{
struct adv_info * adv ;
/* Instance 0x00 always set local name */
if ( instance = = 0x00 )
return true ;
adv = hci_find_adv_instance ( hdev , instance ) ;
if ( ! adv )
return false ;
if ( adv - > flags & MGMT_ADV_FLAG_APPEARANCE | |
adv - > flags & MGMT_ADV_FLAG_LOCAL_NAME )
return true ;
return adv - > scan_rsp_len ? true : false ;
}
2020-06-17 17:39:13 +03:00
/* This function requires the caller holds hdev->lock */
void hci_adv_monitors_clear ( struct hci_dev * hdev )
{
2020-06-17 17:39:14 +03:00
struct adv_monitor * monitor ;
int handle ;
idr_for_each_entry ( & hdev - > adv_monitors_idr , monitor , handle )
2021-01-22 11:36:13 +03:00
hci_free_adv_monitor ( hdev , monitor ) ;
2020-06-17 17:39:14 +03:00
2020-06-17 17:39:13 +03:00
idr_destroy ( & hdev - > adv_monitors_idr ) ;
}
2021-01-22 11:36:13 +03:00
/* Frees the monitor structure and do some bookkeepings.
* This function requires the caller holds hdev - > lock .
*/
void hci_free_adv_monitor ( struct hci_dev * hdev , struct adv_monitor * monitor )
2020-06-17 17:39:14 +03:00
{
struct adv_pattern * pattern ;
struct adv_pattern * tmp ;
if ( ! monitor )
return ;
2021-01-22 11:36:13 +03:00
list_for_each_entry_safe ( pattern , tmp , & monitor - > patterns , list ) {
list_del ( & pattern - > list ) ;
2020-06-17 17:39:14 +03:00
kfree ( pattern ) ;
2021-01-22 11:36:13 +03:00
}
if ( monitor - > handle )
idr_remove ( & hdev - > adv_monitors_idr , monitor - > handle ) ;
if ( monitor - > state ! = ADV_MONITOR_STATE_NOT_REGISTERED ) {
hdev - > adv_monitors_cnt - - ;
mgmt_adv_monitor_removed ( hdev , monitor - > handle ) ;
}
2020-06-17 17:39:14 +03:00
kfree ( monitor ) ;
}
2021-01-22 11:36:12 +03:00
/* Assigns handle to a monitor, and if offloading is supported and power is on,
* also attempts to forward the request to the controller .
2022-07-21 02:21:13 +03:00
* This function requires the caller holds hci_req_sync_lock .
2021-01-22 11:36:12 +03:00
*/
2022-07-21 02:21:13 +03:00
int hci_add_adv_monitor ( struct hci_dev * hdev , struct adv_monitor * monitor )
2020-06-17 17:39:14 +03:00
{
int min , max , handle ;
2022-07-21 02:21:13 +03:00
int status = 0 ;
2020-06-17 17:39:14 +03:00
2022-07-21 02:21:13 +03:00
if ( ! monitor )
return - EINVAL ;
2021-01-22 11:36:12 +03:00
2022-07-21 02:21:13 +03:00
hci_dev_lock ( hdev ) ;
2020-06-17 17:39:14 +03:00
min = HCI_MIN_ADV_MONITOR_HANDLE ;
max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES ;
handle = idr_alloc ( & hdev - > adv_monitors_idr , monitor , min , max ,
GFP_KERNEL ) ;
2022-07-21 02:21:13 +03:00
hci_dev_unlock ( hdev ) ;
if ( handle < 0 )
return handle ;
2020-06-17 17:39:14 +03:00
monitor - > handle = handle ;
2020-06-17 17:39:18 +03:00
2021-01-22 11:36:12 +03:00
if ( ! hdev_is_powered ( hdev ) )
2022-07-21 02:21:13 +03:00
return status ;
2020-06-17 17:39:18 +03:00
2021-01-22 11:36:12 +03:00
switch ( hci_get_adv_monitor_offload_ext ( hdev ) ) {
case HCI_ADV_MONITOR_EXT_NONE :
2022-07-21 02:21:13 +03:00
bt_dev_dbg ( hdev , " %s add monitor %d status %d " , hdev - > name ,
monitor - > handle , status ) ;
2021-01-22 11:36:12 +03:00
/* Message was not forwarded to controller - not an error */
2022-07-21 02:21:13 +03:00
break ;
2021-01-22 11:36:12 +03:00
case HCI_ADV_MONITOR_EXT_MSFT :
2022-07-21 02:21:13 +03:00
status = msft_add_monitor_pattern ( hdev , monitor ) ;
bt_dev_dbg ( hdev , " %s add monitor %d msft status %d " , hdev - > name ,
monitor - > handle , status ) ;
2021-01-22 11:36:12 +03:00
break ;
}
2022-07-21 02:21:13 +03:00
return status ;
2020-06-17 17:39:14 +03:00
}
2021-01-22 11:36:13 +03:00
/* Attempts to tell the controller and free the monitor. If somehow the
* controller doesn ' t have a corresponding handle , remove anyway .
2022-07-21 02:21:14 +03:00
* This function requires the caller holds hci_req_sync_lock .
2021-01-22 11:36:13 +03:00
*/
2022-07-21 02:21:14 +03:00
static int hci_remove_adv_monitor ( struct hci_dev * hdev ,
struct adv_monitor * monitor )
2020-06-17 17:39:15 +03:00
{
2022-07-21 02:21:14 +03:00
int status = 0 ;
2023-07-01 01:33:14 +03:00
int handle ;
2020-06-17 17:39:15 +03:00
2021-01-22 11:36:13 +03:00
switch ( hci_get_adv_monitor_offload_ext ( hdev ) ) {
case HCI_ADV_MONITOR_EXT_NONE : /* also goes here when powered off */
2022-07-21 02:21:14 +03:00
bt_dev_dbg ( hdev , " %s remove monitor %d status %d " , hdev - > name ,
monitor - > handle , status ) ;
2021-01-22 11:36:13 +03:00
goto free_monitor ;
2022-07-21 02:21:14 +03:00
2021-01-22 11:36:13 +03:00
case HCI_ADV_MONITOR_EXT_MSFT :
2023-07-01 01:33:14 +03:00
handle = monitor - > handle ;
2022-07-21 02:21:14 +03:00
status = msft_remove_monitor ( hdev , monitor ) ;
bt_dev_dbg ( hdev , " %s remove monitor %d msft status %d " ,
2023-07-01 01:33:14 +03:00
hdev - > name , handle , status ) ;
2021-01-22 11:36:13 +03:00
break ;
}
2020-06-17 17:39:15 +03:00
2021-01-22 11:36:13 +03:00
/* In case no matching handle registered, just free the monitor */
2022-07-21 02:21:14 +03:00
if ( status = = - ENOENT )
2021-01-22 11:36:13 +03:00
goto free_monitor ;
2022-07-21 02:21:14 +03:00
return status ;
2021-01-22 11:36:13 +03:00
free_monitor :
2022-07-21 02:21:14 +03:00
if ( status = = - ENOENT )
2021-01-22 11:36:13 +03:00
bt_dev_warn ( hdev , " Removing monitor with no matching handle %d " ,
monitor - > handle ) ;
hci_free_adv_monitor ( hdev , monitor ) ;
2022-07-21 02:21:14 +03:00
return status ;
2020-06-17 17:39:15 +03:00
}
2022-07-21 02:21:14 +03:00
/* This function requires the caller holds hci_req_sync_lock */
int hci_remove_single_adv_monitor ( struct hci_dev * hdev , u16 handle )
2021-01-22 11:36:13 +03:00
{
struct adv_monitor * monitor = idr_find ( & hdev - > adv_monitors_idr , handle ) ;
2022-07-21 02:21:14 +03:00
if ( ! monitor )
return - EINVAL ;
2021-01-22 11:36:13 +03:00
2022-07-21 02:21:14 +03:00
return hci_remove_adv_monitor ( hdev , monitor ) ;
2021-01-22 11:36:13 +03:00
}
2022-07-21 02:21:14 +03:00
/* This function requires the caller holds hci_req_sync_lock */
int hci_remove_all_adv_monitor ( struct hci_dev * hdev )
2020-06-17 17:39:15 +03:00
{
struct adv_monitor * monitor ;
2021-01-22 11:36:13 +03:00
int idr_next_id = 0 ;
2022-07-21 02:21:14 +03:00
int status = 0 ;
2021-01-22 11:36:13 +03:00
2022-07-21 02:21:14 +03:00
while ( 1 ) {
2021-01-22 11:36:13 +03:00
monitor = idr_get_next ( & hdev - > adv_monitors_idr , & idr_next_id ) ;
2020-06-17 17:39:15 +03:00
if ( ! monitor )
2021-01-22 11:36:13 +03:00
break ;
2020-06-17 17:39:15 +03:00
2022-07-21 02:21:14 +03:00
status = hci_remove_adv_monitor ( hdev , monitor ) ;
if ( status )
return status ;
2021-01-22 11:36:13 +03:00
2022-07-21 02:21:14 +03:00
idr_next_id + + ;
2020-06-17 17:39:15 +03:00
}
2022-07-21 02:21:14 +03:00
return status ;
2020-06-17 17:39:15 +03:00
}
2020-06-17 17:39:18 +03:00
/* This function requires the caller holds hdev->lock */
bool hci_is_adv_monitoring ( struct hci_dev * hdev )
{
return ! idr_is_empty ( & hdev - > adv_monitors_idr ) ;
}
2021-01-22 11:36:12 +03:00
int hci_get_adv_monitor_offload_ext ( struct hci_dev * hdev )
{
if ( msft_monitor_supported ( hdev ) )
return HCI_ADV_MONITOR_EXT_MSFT ;
return HCI_ADV_MONITOR_EXT_NONE ;
}
2014-07-09 13:59:13 +04:00
struct bdaddr_list * hci_bdaddr_list_lookup ( struct list_head * bdaddr_list ,
2013-10-18 04:24:13 +04:00
bdaddr_t * bdaddr , u8 type )
2011-06-15 13:01:14 +04:00
{
2011-11-01 12:58:56 +04:00
struct bdaddr_list * b ;
2011-06-15 13:01:14 +04:00
2014-07-09 13:59:13 +04:00
list_for_each_entry ( b , bdaddr_list , list ) {
2013-10-18 04:24:13 +04:00
if ( ! bacmp ( & b - > bdaddr , bdaddr ) & & b - > bdaddr_type = = type )
2011-06-15 13:01:14 +04:00
return b ;
2013-10-18 04:24:13 +04:00
}
2011-06-15 13:01:14 +04:00
return NULL ;
}
2018-08-17 04:59:19 +03:00
struct bdaddr_list_with_irk * hci_bdaddr_list_lookup_with_irk (
struct list_head * bdaddr_list , bdaddr_t * bdaddr ,
u8 type )
{
struct bdaddr_list_with_irk * b ;
list_for_each_entry ( b , bdaddr_list , list ) {
if ( ! bacmp ( & b - > bdaddr , bdaddr ) & & b - > bdaddr_type = = type )
return b ;
}
return NULL ;
}
2020-06-17 17:39:08 +03:00
struct bdaddr_list_with_flags *
hci_bdaddr_list_lookup_with_flags ( struct list_head * bdaddr_list ,
bdaddr_t * bdaddr , u8 type )
{
struct bdaddr_list_with_flags * b ;
list_for_each_entry ( b , bdaddr_list , list ) {
if ( ! bacmp ( & b - > bdaddr , bdaddr ) & & b - > bdaddr_type = = type )
return b ;
}
return NULL ;
}
2014-07-09 13:59:13 +04:00
void hci_bdaddr_list_clear ( struct list_head * bdaddr_list )
2011-06-15 13:01:14 +04:00
{
2015-12-18 18:33:25 +03:00
struct bdaddr_list * b , * n ;
2011-06-15 13:01:14 +04:00
2015-12-18 18:33:25 +03:00
list_for_each_entry_safe ( b , n , bdaddr_list , list ) {
list_del ( & b - > list ) ;
2011-06-15 13:01:14 +04:00
kfree ( b ) ;
}
}
2014-07-09 13:59:13 +04:00
int hci_bdaddr_list_add ( struct list_head * list , bdaddr_t * bdaddr , u8 type )
2011-06-15 13:01:14 +04:00
{
struct bdaddr_list * entry ;
2013-10-18 04:24:13 +04:00
if ( ! bacmp ( bdaddr , BDADDR_ANY ) )
2011-06-15 13:01:14 +04:00
return - EBADF ;
2014-07-09 13:59:13 +04:00
if ( hci_bdaddr_list_lookup ( list , bdaddr , type ) )
2011-08-25 17:48:02 +04:00
return - EEXIST ;
2011-06-15 13:01:14 +04:00
2014-07-21 11:50:06 +04:00
entry = kzalloc ( sizeof ( * entry ) , GFP_KERNEL ) ;
2011-08-25 17:48:02 +04:00
if ( ! entry )
return - ENOMEM ;
2011-06-15 13:01:14 +04:00
bacpy ( & entry - > bdaddr , bdaddr ) ;
2013-10-18 04:24:13 +04:00
entry - > bdaddr_type = type ;
2011-06-15 13:01:14 +04:00
2014-07-09 13:59:13 +04:00
list_add ( & entry - > list , list ) ;
2011-06-15 13:01:14 +04:00
2014-07-01 23:09:47 +04:00
return 0 ;
2011-06-15 13:01:14 +04:00
}
2018-08-17 04:59:19 +03:00
int hci_bdaddr_list_add_with_irk ( struct list_head * list , bdaddr_t * bdaddr ,
u8 type , u8 * peer_irk , u8 * local_irk )
{
struct bdaddr_list_with_irk * entry ;
if ( ! bacmp ( bdaddr , BDADDR_ANY ) )
return - EBADF ;
if ( hci_bdaddr_list_lookup ( list , bdaddr , type ) )
return - EEXIST ;
entry = kzalloc ( sizeof ( * entry ) , GFP_KERNEL ) ;
if ( ! entry )
return - ENOMEM ;
bacpy ( & entry - > bdaddr , bdaddr ) ;
entry - > bdaddr_type = type ;
if ( peer_irk )
memcpy ( entry - > peer_irk , peer_irk , 16 ) ;
if ( local_irk )
memcpy ( entry - > local_irk , local_irk , 16 ) ;
list_add ( & entry - > list , list ) ;
return 0 ;
}
2020-06-17 17:39:08 +03:00
int hci_bdaddr_list_add_with_flags ( struct list_head * list , bdaddr_t * bdaddr ,
u8 type , u32 flags )
{
struct bdaddr_list_with_flags * entry ;
if ( ! bacmp ( bdaddr , BDADDR_ANY ) )
return - EBADF ;
if ( hci_bdaddr_list_lookup ( list , bdaddr , type ) )
return - EEXIST ;
entry = kzalloc ( sizeof ( * entry ) , GFP_KERNEL ) ;
if ( ! entry )
return - ENOMEM ;
bacpy ( & entry - > bdaddr , bdaddr ) ;
entry - > bdaddr_type = type ;
bluetooth: don't use bitmaps for random flag accesses
The bluetooth code uses our bitmap infrastructure for the two bits (!)
of connection setup flags, and in the process causes odd problems when
it converts between a bitmap and just the regular values of said bits.
It's completely pointless to do things like bitmap_to_arr32() to convert
a bitmap into a u32. It shoudln't have been a bitmap in the first
place. The reason to use bitmaps is if you have arbitrary number of
bits you want to manage (not two!), or if you rely on the atomicity
guarantees of the bitmap setting and clearing.
The code could use an "atomic_t" and use "atomic_or/andnot()" to set and
clear the bit values, but considering that it then copies the bitmaps
around with "bitmap_to_arr32()" and friends, there clearly cannot be a
lot of atomicity requirements.
So just use a regular integer.
In the process, this avoids the warnings about erroneous use of
bitmap_from_u64() which were triggered on 32-bit architectures when
conversion from a u64 would access two words (and, surprise, surprise,
only one word is needed - and indeed overkill - for a 2-bit bitmap).
That was always problematic, but the compiler seems to notice it and
warn about the invalid pattern only after commit 0a97953fd221 ("lib: add
bitmap_{from,to}_arr64") changed the exact implementation details of
'bitmap_from_u64()', as reported by Sudip Mukherjee and Stephen Rothwell.
Fixes: fe92ee6425a2 ("Bluetooth: hci_core: Rework hci_conn_params flags")
Link: https://lore.kernel.org/all/YpyJ9qTNHJzz0FHY@debian/
Link: https://lore.kernel.org/all/20220606080631.0c3014f2@canb.auug.org.au/
Link: https://lore.kernel.org/all/20220605162537.1604762-1-yury.norov@gmail.com/
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Reported-by: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
Reviewed-by: Yury Norov <yury.norov@gmail.com>
Cc: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
Cc: Marcel Holtmann <marcel@holtmann.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2022-06-05 21:51:48 +03:00
entry - > flags = flags ;
2020-06-17 17:39:08 +03:00
list_add ( & entry - > list , list ) ;
return 0 ;
}
2014-07-09 13:59:13 +04:00
int hci_bdaddr_list_del ( struct list_head * list , bdaddr_t * bdaddr , u8 type )
2011-06-15 13:01:14 +04:00
{
struct bdaddr_list * entry ;
2014-02-18 19:14:32 +04:00
if ( ! bacmp ( bdaddr , BDADDR_ANY ) ) {
2014-07-09 13:59:13 +04:00
hci_bdaddr_list_clear ( list ) ;
2014-02-18 19:14:32 +04:00
return 0 ;
}
2011-06-15 13:01:14 +04:00
2014-07-09 13:59:13 +04:00
entry = hci_bdaddr_list_lookup ( list , bdaddr , type ) ;
2014-02-28 08:37:30 +04:00
if ( ! entry )
return - ENOENT ;
list_del ( & entry - > list ) ;
kfree ( entry ) ;
return 0 ;
}
2018-08-17 04:59:19 +03:00
int hci_bdaddr_list_del_with_irk ( struct list_head * list , bdaddr_t * bdaddr ,
u8 type )
{
struct bdaddr_list_with_irk * entry ;
if ( ! bacmp ( bdaddr , BDADDR_ANY ) ) {
hci_bdaddr_list_clear ( list ) ;
return 0 ;
}
entry = hci_bdaddr_list_lookup_with_irk ( list , bdaddr , type ) ;
if ( ! entry )
return - ENOENT ;
list_del ( & entry - > list ) ;
kfree ( entry ) ;
return 0 ;
}
2020-06-17 17:39:08 +03:00
int hci_bdaddr_list_del_with_flags ( struct list_head * list , bdaddr_t * bdaddr ,
u8 type )
{
struct bdaddr_list_with_flags * entry ;
if ( ! bacmp ( bdaddr , BDADDR_ANY ) ) {
hci_bdaddr_list_clear ( list ) ;
return 0 ;
}
entry = hci_bdaddr_list_lookup_with_flags ( list , bdaddr , type ) ;
if ( ! entry )
return - ENOENT ;
list_del ( & entry - > list ) ;
kfree ( entry ) ;
return 0 ;
}
2014-02-03 20:56:18 +04:00
/* This function requires the caller holds hdev->lock */
struct hci_conn_params * hci_conn_params_lookup ( struct hci_dev * hdev ,
bdaddr_t * addr , u8 addr_type )
{
struct hci_conn_params * params ;
list_for_each_entry ( params , & hdev - > le_conn_params , list ) {
if ( bacmp ( & params - > addr , addr ) = = 0 & &
params - > addr_type = = addr_type ) {
return params ;
}
}
return NULL ;
}
Bluetooth: use RCU for hci_conn_params and iterate safely in hci_sync
hci_update_accept_list_sync iterates over hdev->pend_le_conns and
hdev->pend_le_reports, and waits for controller events in the loop body,
without holding hdev lock.
Meanwhile, these lists and the items may be modified e.g. by
le_scan_cleanup. This can invalidate the list cursor or any other item
in the list, resulting to invalid behavior (eg use-after-free).
Use RCU for the hci_conn_params action lists. Since the loop bodies in
hci_sync block and we cannot use RCU or hdev->lock for the whole loop,
copy list items first and then iterate on the copy. Only the flags field
is written from elsewhere, so READ_ONCE/WRITE_ONCE should guarantee we
read valid values.
Free params everywhere with hci_conn_params_free so the cleanup is
guaranteed to be done properly.
This fixes the following, which can be triggered e.g. by BlueZ new
mgmt-tester case "Add + Remove Device Nowait - Success", or by changing
hci_le_set_cig_params to always return false, and running iso-tester:
==================================================================
BUG: KASAN: slab-use-after-free in hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
Read of size 8 at addr ffff888001265018 by task kworker/u3:0/32
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.2-1.fc38 04/01/2014
Workqueue: hci0 hci_cmd_sync_work
Call Trace:
<TASK>
dump_stack_lvl (./arch/x86/include/asm/irqflags.h:134 lib/dump_stack.c:107)
print_report (mm/kasan/report.c:320 mm/kasan/report.c:430)
? __virt_addr_valid (./include/linux/mmzone.h:1915 ./include/linux/mmzone.h:2011 arch/x86/mm/physaddr.c:65)
? hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
kasan_report (mm/kasan/report.c:538)
? hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
? __pfx_hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2780)
? mutex_lock (kernel/locking/mutex.c:282)
? __pfx_mutex_lock (kernel/locking/mutex.c:282)
? __pfx_mutex_unlock (kernel/locking/mutex.c:538)
? __pfx_update_passive_scan_sync (net/bluetooth/hci_sync.c:2861)
hci_cmd_sync_work (net/bluetooth/hci_sync.c:306)
process_one_work (./arch/x86/include/asm/preempt.h:27 kernel/workqueue.c:2399)
worker_thread (./include/linux/list.h:292 kernel/workqueue.c:2538)
? __pfx_worker_thread (kernel/workqueue.c:2480)
kthread (kernel/kthread.c:376)
? __pfx_kthread (kernel/kthread.c:331)
ret_from_fork (arch/x86/entry/entry_64.S:314)
</TASK>
Allocated by task 31:
kasan_save_stack (mm/kasan/common.c:46)
kasan_set_track (mm/kasan/common.c:52)
__kasan_kmalloc (mm/kasan/common.c:374 mm/kasan/common.c:383)
hci_conn_params_add (./include/linux/slab.h:580 ./include/linux/slab.h:720 net/bluetooth/hci_core.c:2277)
hci_connect_le_scan (net/bluetooth/hci_conn.c:1419 net/bluetooth/hci_conn.c:1589)
hci_connect_cis (net/bluetooth/hci_conn.c:2266)
iso_connect_cis (net/bluetooth/iso.c:390)
iso_sock_connect (net/bluetooth/iso.c:899)
__sys_connect (net/socket.c:2003 net/socket.c:2020)
__x64_sys_connect (net/socket.c:2027)
do_syscall_64 (arch/x86/entry/common.c:50 arch/x86/entry/common.c:80)
entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:120)
Freed by task 15:
kasan_save_stack (mm/kasan/common.c:46)
kasan_set_track (mm/kasan/common.c:52)
kasan_save_free_info (mm/kasan/generic.c:523)
__kasan_slab_free (mm/kasan/common.c:238 mm/kasan/common.c:200 mm/kasan/common.c:244)
__kmem_cache_free (mm/slub.c:1807 mm/slub.c:3787 mm/slub.c:3800)
hci_conn_params_del (net/bluetooth/hci_core.c:2323)
le_scan_cleanup (net/bluetooth/hci_conn.c:202)
process_one_work (./arch/x86/include/asm/preempt.h:27 kernel/workqueue.c:2399)
worker_thread (./include/linux/list.h:292 kernel/workqueue.c:2538)
kthread (kernel/kthread.c:376)
ret_from_fork (arch/x86/entry/entry_64.S:314)
==================================================================
Fixes: e8907f76544f ("Bluetooth: hci_sync: Make use of hci_cmd_sync_queue set 3")
Signed-off-by: Pauli Virtanen <pav@iki.fi>
Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
2023-06-19 01:04:31 +03:00
/* This function requires the caller holds hdev->lock or rcu_read_lock */
2014-07-04 13:37:26 +04:00
struct hci_conn_params * hci_pend_le_action_lookup ( struct list_head * list ,
bdaddr_t * addr , u8 addr_type )
2014-02-27 03:21:52 +04:00
{
2014-07-03 20:33:49 +04:00
struct hci_conn_params * param ;
2014-02-27 03:21:52 +04:00
Bluetooth: use RCU for hci_conn_params and iterate safely in hci_sync
hci_update_accept_list_sync iterates over hdev->pend_le_conns and
hdev->pend_le_reports, and waits for controller events in the loop body,
without holding hdev lock.
Meanwhile, these lists and the items may be modified e.g. by
le_scan_cleanup. This can invalidate the list cursor or any other item
in the list, resulting to invalid behavior (eg use-after-free).
Use RCU for the hci_conn_params action lists. Since the loop bodies in
hci_sync block and we cannot use RCU or hdev->lock for the whole loop,
copy list items first and then iterate on the copy. Only the flags field
is written from elsewhere, so READ_ONCE/WRITE_ONCE should guarantee we
read valid values.
Free params everywhere with hci_conn_params_free so the cleanup is
guaranteed to be done properly.
This fixes the following, which can be triggered e.g. by BlueZ new
mgmt-tester case "Add + Remove Device Nowait - Success", or by changing
hci_le_set_cig_params to always return false, and running iso-tester:
==================================================================
BUG: KASAN: slab-use-after-free in hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
Read of size 8 at addr ffff888001265018 by task kworker/u3:0/32
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.2-1.fc38 04/01/2014
Workqueue: hci0 hci_cmd_sync_work
Call Trace:
<TASK>
dump_stack_lvl (./arch/x86/include/asm/irqflags.h:134 lib/dump_stack.c:107)
print_report (mm/kasan/report.c:320 mm/kasan/report.c:430)
? __virt_addr_valid (./include/linux/mmzone.h:1915 ./include/linux/mmzone.h:2011 arch/x86/mm/physaddr.c:65)
? hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
kasan_report (mm/kasan/report.c:538)
? hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
? __pfx_hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2780)
? mutex_lock (kernel/locking/mutex.c:282)
? __pfx_mutex_lock (kernel/locking/mutex.c:282)
? __pfx_mutex_unlock (kernel/locking/mutex.c:538)
? __pfx_update_passive_scan_sync (net/bluetooth/hci_sync.c:2861)
hci_cmd_sync_work (net/bluetooth/hci_sync.c:306)
process_one_work (./arch/x86/include/asm/preempt.h:27 kernel/workqueue.c:2399)
worker_thread (./include/linux/list.h:292 kernel/workqueue.c:2538)
? __pfx_worker_thread (kernel/workqueue.c:2480)
kthread (kernel/kthread.c:376)
? __pfx_kthread (kernel/kthread.c:331)
ret_from_fork (arch/x86/entry/entry_64.S:314)
</TASK>
Allocated by task 31:
kasan_save_stack (mm/kasan/common.c:46)
kasan_set_track (mm/kasan/common.c:52)
__kasan_kmalloc (mm/kasan/common.c:374 mm/kasan/common.c:383)
hci_conn_params_add (./include/linux/slab.h:580 ./include/linux/slab.h:720 net/bluetooth/hci_core.c:2277)
hci_connect_le_scan (net/bluetooth/hci_conn.c:1419 net/bluetooth/hci_conn.c:1589)
hci_connect_cis (net/bluetooth/hci_conn.c:2266)
iso_connect_cis (net/bluetooth/iso.c:390)
iso_sock_connect (net/bluetooth/iso.c:899)
__sys_connect (net/socket.c:2003 net/socket.c:2020)
__x64_sys_connect (net/socket.c:2027)
do_syscall_64 (arch/x86/entry/common.c:50 arch/x86/entry/common.c:80)
entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:120)
Freed by task 15:
kasan_save_stack (mm/kasan/common.c:46)
kasan_set_track (mm/kasan/common.c:52)
kasan_save_free_info (mm/kasan/generic.c:523)
__kasan_slab_free (mm/kasan/common.c:238 mm/kasan/common.c:200 mm/kasan/common.c:244)
__kmem_cache_free (mm/slub.c:1807 mm/slub.c:3787 mm/slub.c:3800)
hci_conn_params_del (net/bluetooth/hci_core.c:2323)
le_scan_cleanup (net/bluetooth/hci_conn.c:202)
process_one_work (./arch/x86/include/asm/preempt.h:27 kernel/workqueue.c:2399)
worker_thread (./include/linux/list.h:292 kernel/workqueue.c:2538)
kthread (kernel/kthread.c:376)
ret_from_fork (arch/x86/entry/entry_64.S:314)
==================================================================
Fixes: e8907f76544f ("Bluetooth: hci_sync: Make use of hci_cmd_sync_queue set 3")
Signed-off-by: Pauli Virtanen <pav@iki.fi>
Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
2023-06-19 01:04:31 +03:00
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( param , list , action ) {
2014-07-03 20:33:49 +04:00
if ( bacmp ( & param - > addr , addr ) = = 0 & &
Bluetooth: use RCU for hci_conn_params and iterate safely in hci_sync
hci_update_accept_list_sync iterates over hdev->pend_le_conns and
hdev->pend_le_reports, and waits for controller events in the loop body,
without holding hdev lock.
Meanwhile, these lists and the items may be modified e.g. by
le_scan_cleanup. This can invalidate the list cursor or any other item
in the list, resulting to invalid behavior (eg use-after-free).
Use RCU for the hci_conn_params action lists. Since the loop bodies in
hci_sync block and we cannot use RCU or hdev->lock for the whole loop,
copy list items first and then iterate on the copy. Only the flags field
is written from elsewhere, so READ_ONCE/WRITE_ONCE should guarantee we
read valid values.
Free params everywhere with hci_conn_params_free so the cleanup is
guaranteed to be done properly.
This fixes the following, which can be triggered e.g. by BlueZ new
mgmt-tester case "Add + Remove Device Nowait - Success", or by changing
hci_le_set_cig_params to always return false, and running iso-tester:
==================================================================
BUG: KASAN: slab-use-after-free in hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
Read of size 8 at addr ffff888001265018 by task kworker/u3:0/32
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.2-1.fc38 04/01/2014
Workqueue: hci0 hci_cmd_sync_work
Call Trace:
<TASK>
dump_stack_lvl (./arch/x86/include/asm/irqflags.h:134 lib/dump_stack.c:107)
print_report (mm/kasan/report.c:320 mm/kasan/report.c:430)
? __virt_addr_valid (./include/linux/mmzone.h:1915 ./include/linux/mmzone.h:2011 arch/x86/mm/physaddr.c:65)
? hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
kasan_report (mm/kasan/report.c:538)
? hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
? __pfx_hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2780)
? mutex_lock (kernel/locking/mutex.c:282)
? __pfx_mutex_lock (kernel/locking/mutex.c:282)
? __pfx_mutex_unlock (kernel/locking/mutex.c:538)
? __pfx_update_passive_scan_sync (net/bluetooth/hci_sync.c:2861)
hci_cmd_sync_work (net/bluetooth/hci_sync.c:306)
process_one_work (./arch/x86/include/asm/preempt.h:27 kernel/workqueue.c:2399)
worker_thread (./include/linux/list.h:292 kernel/workqueue.c:2538)
? __pfx_worker_thread (kernel/workqueue.c:2480)
kthread (kernel/kthread.c:376)
? __pfx_kthread (kernel/kthread.c:331)
ret_from_fork (arch/x86/entry/entry_64.S:314)
</TASK>
Allocated by task 31:
kasan_save_stack (mm/kasan/common.c:46)
kasan_set_track (mm/kasan/common.c:52)
__kasan_kmalloc (mm/kasan/common.c:374 mm/kasan/common.c:383)
hci_conn_params_add (./include/linux/slab.h:580 ./include/linux/slab.h:720 net/bluetooth/hci_core.c:2277)
hci_connect_le_scan (net/bluetooth/hci_conn.c:1419 net/bluetooth/hci_conn.c:1589)
hci_connect_cis (net/bluetooth/hci_conn.c:2266)
iso_connect_cis (net/bluetooth/iso.c:390)
iso_sock_connect (net/bluetooth/iso.c:899)
__sys_connect (net/socket.c:2003 net/socket.c:2020)
__x64_sys_connect (net/socket.c:2027)
do_syscall_64 (arch/x86/entry/common.c:50 arch/x86/entry/common.c:80)
entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:120)
Freed by task 15:
kasan_save_stack (mm/kasan/common.c:46)
kasan_set_track (mm/kasan/common.c:52)
kasan_save_free_info (mm/kasan/generic.c:523)
__kasan_slab_free (mm/kasan/common.c:238 mm/kasan/common.c:200 mm/kasan/common.c:244)
__kmem_cache_free (mm/slub.c:1807 mm/slub.c:3787 mm/slub.c:3800)
hci_conn_params_del (net/bluetooth/hci_core.c:2323)
le_scan_cleanup (net/bluetooth/hci_conn.c:202)
process_one_work (./arch/x86/include/asm/preempt.h:27 kernel/workqueue.c:2399)
worker_thread (./include/linux/list.h:292 kernel/workqueue.c:2538)
kthread (kernel/kthread.c:376)
ret_from_fork (arch/x86/entry/entry_64.S:314)
==================================================================
Fixes: e8907f76544f ("Bluetooth: hci_sync: Make use of hci_cmd_sync_queue set 3")
Signed-off-by: Pauli Virtanen <pav@iki.fi>
Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
2023-06-19 01:04:31 +03:00
param - > addr_type = = addr_type ) {
rcu_read_unlock ( ) ;
2014-07-03 20:33:49 +04:00
return param ;
Bluetooth: use RCU for hci_conn_params and iterate safely in hci_sync
hci_update_accept_list_sync iterates over hdev->pend_le_conns and
hdev->pend_le_reports, and waits for controller events in the loop body,
without holding hdev lock.
Meanwhile, these lists and the items may be modified e.g. by
le_scan_cleanup. This can invalidate the list cursor or any other item
in the list, resulting to invalid behavior (eg use-after-free).
Use RCU for the hci_conn_params action lists. Since the loop bodies in
hci_sync block and we cannot use RCU or hdev->lock for the whole loop,
copy list items first and then iterate on the copy. Only the flags field
is written from elsewhere, so READ_ONCE/WRITE_ONCE should guarantee we
read valid values.
Free params everywhere with hci_conn_params_free so the cleanup is
guaranteed to be done properly.
This fixes the following, which can be triggered e.g. by BlueZ new
mgmt-tester case "Add + Remove Device Nowait - Success", or by changing
hci_le_set_cig_params to always return false, and running iso-tester:
==================================================================
BUG: KASAN: slab-use-after-free in hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
Read of size 8 at addr ffff888001265018 by task kworker/u3:0/32
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.2-1.fc38 04/01/2014
Workqueue: hci0 hci_cmd_sync_work
Call Trace:
<TASK>
dump_stack_lvl (./arch/x86/include/asm/irqflags.h:134 lib/dump_stack.c:107)
print_report (mm/kasan/report.c:320 mm/kasan/report.c:430)
? __virt_addr_valid (./include/linux/mmzone.h:1915 ./include/linux/mmzone.h:2011 arch/x86/mm/physaddr.c:65)
? hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
kasan_report (mm/kasan/report.c:538)
? hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
? __pfx_hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2780)
? mutex_lock (kernel/locking/mutex.c:282)
? __pfx_mutex_lock (kernel/locking/mutex.c:282)
? __pfx_mutex_unlock (kernel/locking/mutex.c:538)
? __pfx_update_passive_scan_sync (net/bluetooth/hci_sync.c:2861)
hci_cmd_sync_work (net/bluetooth/hci_sync.c:306)
process_one_work (./arch/x86/include/asm/preempt.h:27 kernel/workqueue.c:2399)
worker_thread (./include/linux/list.h:292 kernel/workqueue.c:2538)
? __pfx_worker_thread (kernel/workqueue.c:2480)
kthread (kernel/kthread.c:376)
? __pfx_kthread (kernel/kthread.c:331)
ret_from_fork (arch/x86/entry/entry_64.S:314)
</TASK>
Allocated by task 31:
kasan_save_stack (mm/kasan/common.c:46)
kasan_set_track (mm/kasan/common.c:52)
__kasan_kmalloc (mm/kasan/common.c:374 mm/kasan/common.c:383)
hci_conn_params_add (./include/linux/slab.h:580 ./include/linux/slab.h:720 net/bluetooth/hci_core.c:2277)
hci_connect_le_scan (net/bluetooth/hci_conn.c:1419 net/bluetooth/hci_conn.c:1589)
hci_connect_cis (net/bluetooth/hci_conn.c:2266)
iso_connect_cis (net/bluetooth/iso.c:390)
iso_sock_connect (net/bluetooth/iso.c:899)
__sys_connect (net/socket.c:2003 net/socket.c:2020)
__x64_sys_connect (net/socket.c:2027)
do_syscall_64 (arch/x86/entry/common.c:50 arch/x86/entry/common.c:80)
entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:120)
Freed by task 15:
kasan_save_stack (mm/kasan/common.c:46)
kasan_set_track (mm/kasan/common.c:52)
kasan_save_free_info (mm/kasan/generic.c:523)
__kasan_slab_free (mm/kasan/common.c:238 mm/kasan/common.c:200 mm/kasan/common.c:244)
__kmem_cache_free (mm/slub.c:1807 mm/slub.c:3787 mm/slub.c:3800)
hci_conn_params_del (net/bluetooth/hci_core.c:2323)
le_scan_cleanup (net/bluetooth/hci_conn.c:202)
process_one_work (./arch/x86/include/asm/preempt.h:27 kernel/workqueue.c:2399)
worker_thread (./include/linux/list.h:292 kernel/workqueue.c:2538)
kthread (kernel/kthread.c:376)
ret_from_fork (arch/x86/entry/entry_64.S:314)
==================================================================
Fixes: e8907f76544f ("Bluetooth: hci_sync: Make use of hci_cmd_sync_queue set 3")
Signed-off-by: Pauli Virtanen <pav@iki.fi>
Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
2023-06-19 01:04:31 +03:00
}
2014-06-29 15:41:49 +04:00
}
Bluetooth: use RCU for hci_conn_params and iterate safely in hci_sync
hci_update_accept_list_sync iterates over hdev->pend_le_conns and
hdev->pend_le_reports, and waits for controller events in the loop body,
without holding hdev lock.
Meanwhile, these lists and the items may be modified e.g. by
le_scan_cleanup. This can invalidate the list cursor or any other item
in the list, resulting to invalid behavior (eg use-after-free).
Use RCU for the hci_conn_params action lists. Since the loop bodies in
hci_sync block and we cannot use RCU or hdev->lock for the whole loop,
copy list items first and then iterate on the copy. Only the flags field
is written from elsewhere, so READ_ONCE/WRITE_ONCE should guarantee we
read valid values.
Free params everywhere with hci_conn_params_free so the cleanup is
guaranteed to be done properly.
This fixes the following, which can be triggered e.g. by BlueZ new
mgmt-tester case "Add + Remove Device Nowait - Success", or by changing
hci_le_set_cig_params to always return false, and running iso-tester:
==================================================================
BUG: KASAN: slab-use-after-free in hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
Read of size 8 at addr ffff888001265018 by task kworker/u3:0/32
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.2-1.fc38 04/01/2014
Workqueue: hci0 hci_cmd_sync_work
Call Trace:
<TASK>
dump_stack_lvl (./arch/x86/include/asm/irqflags.h:134 lib/dump_stack.c:107)
print_report (mm/kasan/report.c:320 mm/kasan/report.c:430)
? __virt_addr_valid (./include/linux/mmzone.h:1915 ./include/linux/mmzone.h:2011 arch/x86/mm/physaddr.c:65)
? hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
kasan_report (mm/kasan/report.c:538)
? hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
? __pfx_hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2780)
? mutex_lock (kernel/locking/mutex.c:282)
? __pfx_mutex_lock (kernel/locking/mutex.c:282)
? __pfx_mutex_unlock (kernel/locking/mutex.c:538)
? __pfx_update_passive_scan_sync (net/bluetooth/hci_sync.c:2861)
hci_cmd_sync_work (net/bluetooth/hci_sync.c:306)
process_one_work (./arch/x86/include/asm/preempt.h:27 kernel/workqueue.c:2399)
worker_thread (./include/linux/list.h:292 kernel/workqueue.c:2538)
? __pfx_worker_thread (kernel/workqueue.c:2480)
kthread (kernel/kthread.c:376)
? __pfx_kthread (kernel/kthread.c:331)
ret_from_fork (arch/x86/entry/entry_64.S:314)
</TASK>
Allocated by task 31:
kasan_save_stack (mm/kasan/common.c:46)
kasan_set_track (mm/kasan/common.c:52)
__kasan_kmalloc (mm/kasan/common.c:374 mm/kasan/common.c:383)
hci_conn_params_add (./include/linux/slab.h:580 ./include/linux/slab.h:720 net/bluetooth/hci_core.c:2277)
hci_connect_le_scan (net/bluetooth/hci_conn.c:1419 net/bluetooth/hci_conn.c:1589)
hci_connect_cis (net/bluetooth/hci_conn.c:2266)
iso_connect_cis (net/bluetooth/iso.c:390)
iso_sock_connect (net/bluetooth/iso.c:899)
__sys_connect (net/socket.c:2003 net/socket.c:2020)
__x64_sys_connect (net/socket.c:2027)
do_syscall_64 (arch/x86/entry/common.c:50 arch/x86/entry/common.c:80)
entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:120)
Freed by task 15:
kasan_save_stack (mm/kasan/common.c:46)
kasan_set_track (mm/kasan/common.c:52)
kasan_save_free_info (mm/kasan/generic.c:523)
__kasan_slab_free (mm/kasan/common.c:238 mm/kasan/common.c:200 mm/kasan/common.c:244)
__kmem_cache_free (mm/slub.c:1807 mm/slub.c:3787 mm/slub.c:3800)
hci_conn_params_del (net/bluetooth/hci_core.c:2323)
le_scan_cleanup (net/bluetooth/hci_conn.c:202)
process_one_work (./arch/x86/include/asm/preempt.h:27 kernel/workqueue.c:2399)
worker_thread (./include/linux/list.h:292 kernel/workqueue.c:2538)
kthread (kernel/kthread.c:376)
ret_from_fork (arch/x86/entry/entry_64.S:314)
==================================================================
Fixes: e8907f76544f ("Bluetooth: hci_sync: Make use of hci_cmd_sync_queue set 3")
Signed-off-by: Pauli Virtanen <pav@iki.fi>
Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
2023-06-19 01:04:31 +03:00
rcu_read_unlock ( ) ;
2014-06-29 15:41:49 +04:00
return NULL ;
2014-02-27 03:21:52 +04:00
}
Bluetooth: use RCU for hci_conn_params and iterate safely in hci_sync
hci_update_accept_list_sync iterates over hdev->pend_le_conns and
hdev->pend_le_reports, and waits for controller events in the loop body,
without holding hdev lock.
Meanwhile, these lists and the items may be modified e.g. by
le_scan_cleanup. This can invalidate the list cursor or any other item
in the list, resulting to invalid behavior (eg use-after-free).
Use RCU for the hci_conn_params action lists. Since the loop bodies in
hci_sync block and we cannot use RCU or hdev->lock for the whole loop,
copy list items first and then iterate on the copy. Only the flags field
is written from elsewhere, so READ_ONCE/WRITE_ONCE should guarantee we
read valid values.
Free params everywhere with hci_conn_params_free so the cleanup is
guaranteed to be done properly.
This fixes the following, which can be triggered e.g. by BlueZ new
mgmt-tester case "Add + Remove Device Nowait - Success", or by changing
hci_le_set_cig_params to always return false, and running iso-tester:
==================================================================
BUG: KASAN: slab-use-after-free in hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
Read of size 8 at addr ffff888001265018 by task kworker/u3:0/32
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.2-1.fc38 04/01/2014
Workqueue: hci0 hci_cmd_sync_work
Call Trace:
<TASK>
dump_stack_lvl (./arch/x86/include/asm/irqflags.h:134 lib/dump_stack.c:107)
print_report (mm/kasan/report.c:320 mm/kasan/report.c:430)
? __virt_addr_valid (./include/linux/mmzone.h:1915 ./include/linux/mmzone.h:2011 arch/x86/mm/physaddr.c:65)
? hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
kasan_report (mm/kasan/report.c:538)
? hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
? __pfx_hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2780)
? mutex_lock (kernel/locking/mutex.c:282)
? __pfx_mutex_lock (kernel/locking/mutex.c:282)
? __pfx_mutex_unlock (kernel/locking/mutex.c:538)
? __pfx_update_passive_scan_sync (net/bluetooth/hci_sync.c:2861)
hci_cmd_sync_work (net/bluetooth/hci_sync.c:306)
process_one_work (./arch/x86/include/asm/preempt.h:27 kernel/workqueue.c:2399)
worker_thread (./include/linux/list.h:292 kernel/workqueue.c:2538)
? __pfx_worker_thread (kernel/workqueue.c:2480)
kthread (kernel/kthread.c:376)
? __pfx_kthread (kernel/kthread.c:331)
ret_from_fork (arch/x86/entry/entry_64.S:314)
</TASK>
Allocated by task 31:
kasan_save_stack (mm/kasan/common.c:46)
kasan_set_track (mm/kasan/common.c:52)
__kasan_kmalloc (mm/kasan/common.c:374 mm/kasan/common.c:383)
hci_conn_params_add (./include/linux/slab.h:580 ./include/linux/slab.h:720 net/bluetooth/hci_core.c:2277)
hci_connect_le_scan (net/bluetooth/hci_conn.c:1419 net/bluetooth/hci_conn.c:1589)
hci_connect_cis (net/bluetooth/hci_conn.c:2266)
iso_connect_cis (net/bluetooth/iso.c:390)
iso_sock_connect (net/bluetooth/iso.c:899)
__sys_connect (net/socket.c:2003 net/socket.c:2020)
__x64_sys_connect (net/socket.c:2027)
do_syscall_64 (arch/x86/entry/common.c:50 arch/x86/entry/common.c:80)
entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:120)
Freed by task 15:
kasan_save_stack (mm/kasan/common.c:46)
kasan_set_track (mm/kasan/common.c:52)
kasan_save_free_info (mm/kasan/generic.c:523)
__kasan_slab_free (mm/kasan/common.c:238 mm/kasan/common.c:200 mm/kasan/common.c:244)
__kmem_cache_free (mm/slub.c:1807 mm/slub.c:3787 mm/slub.c:3800)
hci_conn_params_del (net/bluetooth/hci_core.c:2323)
le_scan_cleanup (net/bluetooth/hci_conn.c:202)
process_one_work (./arch/x86/include/asm/preempt.h:27 kernel/workqueue.c:2399)
worker_thread (./include/linux/list.h:292 kernel/workqueue.c:2538)
kthread (kernel/kthread.c:376)
ret_from_fork (arch/x86/entry/entry_64.S:314)
==================================================================
Fixes: e8907f76544f ("Bluetooth: hci_sync: Make use of hci_cmd_sync_queue set 3")
Signed-off-by: Pauli Virtanen <pav@iki.fi>
Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
2023-06-19 01:04:31 +03:00
/* This function requires the caller holds hdev->lock */
void hci_pend_le_list_del_init ( struct hci_conn_params * param )
{
if ( list_empty ( & param - > action ) )
return ;
list_del_rcu ( & param - > action ) ;
synchronize_rcu ( ) ;
INIT_LIST_HEAD ( & param - > action ) ;
}
/* This function requires the caller holds hdev->lock */
void hci_pend_le_list_add ( struct hci_conn_params * param ,
struct list_head * list )
{
list_add_rcu ( & param - > action , list ) ;
}
2014-02-03 20:56:18 +04:00
/* This function requires the caller holds hdev->lock */
2014-07-01 14:11:04 +04:00
struct hci_conn_params * hci_conn_params_add ( struct hci_dev * hdev ,
bdaddr_t * addr , u8 addr_type )
2014-02-03 20:56:18 +04:00
{
struct hci_conn_params * params ;
params = hci_conn_params_lookup ( hdev , addr , addr_type ) ;
2014-02-27 03:21:49 +04:00
if ( params )
2014-07-01 14:11:04 +04:00
return params ;
2014-02-03 20:56:18 +04:00
params = kzalloc ( sizeof ( * params ) , GFP_KERNEL ) ;
if ( ! params ) {
2017-10-30 12:42:59 +03:00
bt_dev_err ( hdev , " out of memory " ) ;
2014-07-01 14:11:04 +04:00
return NULL ;
2014-02-03 20:56:18 +04:00
}
bacpy ( & params - > addr , addr ) ;
params - > addr_type = addr_type ;
2014-02-27 03:21:49 +04:00
list_add ( & params - > list , & hdev - > le_conn_params ) ;
2014-07-04 13:37:17 +04:00
INIT_LIST_HEAD ( & params - > action ) ;
2014-02-27 03:21:49 +04:00
2014-06-30 14:34:39 +04:00
params - > conn_min_interval = hdev - > le_conn_min_interval ;
params - > conn_max_interval = hdev - > le_conn_max_interval ;
params - > conn_latency = hdev - > le_conn_latency ;
params - > supervision_timeout = hdev - > le_supv_timeout ;
params - > auto_connect = HCI_AUTO_CONN_DISABLED ;
BT_DBG ( " addr %pMR (type %u) " , addr , addr_type ) ;
2014-07-01 14:11:04 +04:00
return params ;
2014-06-30 14:34:39 +04:00
}
Bluetooth: use RCU for hci_conn_params and iterate safely in hci_sync
hci_update_accept_list_sync iterates over hdev->pend_le_conns and
hdev->pend_le_reports, and waits for controller events in the loop body,
without holding hdev lock.
Meanwhile, these lists and the items may be modified e.g. by
le_scan_cleanup. This can invalidate the list cursor or any other item
in the list, resulting to invalid behavior (eg use-after-free).
Use RCU for the hci_conn_params action lists. Since the loop bodies in
hci_sync block and we cannot use RCU or hdev->lock for the whole loop,
copy list items first and then iterate on the copy. Only the flags field
is written from elsewhere, so READ_ONCE/WRITE_ONCE should guarantee we
read valid values.
Free params everywhere with hci_conn_params_free so the cleanup is
guaranteed to be done properly.
This fixes the following, which can be triggered e.g. by BlueZ new
mgmt-tester case "Add + Remove Device Nowait - Success", or by changing
hci_le_set_cig_params to always return false, and running iso-tester:
==================================================================
BUG: KASAN: slab-use-after-free in hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
Read of size 8 at addr ffff888001265018 by task kworker/u3:0/32
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.2-1.fc38 04/01/2014
Workqueue: hci0 hci_cmd_sync_work
Call Trace:
<TASK>
dump_stack_lvl (./arch/x86/include/asm/irqflags.h:134 lib/dump_stack.c:107)
print_report (mm/kasan/report.c:320 mm/kasan/report.c:430)
? __virt_addr_valid (./include/linux/mmzone.h:1915 ./include/linux/mmzone.h:2011 arch/x86/mm/physaddr.c:65)
? hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
kasan_report (mm/kasan/report.c:538)
? hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
? __pfx_hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2780)
? mutex_lock (kernel/locking/mutex.c:282)
? __pfx_mutex_lock (kernel/locking/mutex.c:282)
? __pfx_mutex_unlock (kernel/locking/mutex.c:538)
? __pfx_update_passive_scan_sync (net/bluetooth/hci_sync.c:2861)
hci_cmd_sync_work (net/bluetooth/hci_sync.c:306)
process_one_work (./arch/x86/include/asm/preempt.h:27 kernel/workqueue.c:2399)
worker_thread (./include/linux/list.h:292 kernel/workqueue.c:2538)
? __pfx_worker_thread (kernel/workqueue.c:2480)
kthread (kernel/kthread.c:376)
? __pfx_kthread (kernel/kthread.c:331)
ret_from_fork (arch/x86/entry/entry_64.S:314)
</TASK>
Allocated by task 31:
kasan_save_stack (mm/kasan/common.c:46)
kasan_set_track (mm/kasan/common.c:52)
__kasan_kmalloc (mm/kasan/common.c:374 mm/kasan/common.c:383)
hci_conn_params_add (./include/linux/slab.h:580 ./include/linux/slab.h:720 net/bluetooth/hci_core.c:2277)
hci_connect_le_scan (net/bluetooth/hci_conn.c:1419 net/bluetooth/hci_conn.c:1589)
hci_connect_cis (net/bluetooth/hci_conn.c:2266)
iso_connect_cis (net/bluetooth/iso.c:390)
iso_sock_connect (net/bluetooth/iso.c:899)
__sys_connect (net/socket.c:2003 net/socket.c:2020)
__x64_sys_connect (net/socket.c:2027)
do_syscall_64 (arch/x86/entry/common.c:50 arch/x86/entry/common.c:80)
entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:120)
Freed by task 15:
kasan_save_stack (mm/kasan/common.c:46)
kasan_set_track (mm/kasan/common.c:52)
kasan_save_free_info (mm/kasan/generic.c:523)
__kasan_slab_free (mm/kasan/common.c:238 mm/kasan/common.c:200 mm/kasan/common.c:244)
__kmem_cache_free (mm/slub.c:1807 mm/slub.c:3787 mm/slub.c:3800)
hci_conn_params_del (net/bluetooth/hci_core.c:2323)
le_scan_cleanup (net/bluetooth/hci_conn.c:202)
process_one_work (./arch/x86/include/asm/preempt.h:27 kernel/workqueue.c:2399)
worker_thread (./include/linux/list.h:292 kernel/workqueue.c:2538)
kthread (kernel/kthread.c:376)
ret_from_fork (arch/x86/entry/entry_64.S:314)
==================================================================
Fixes: e8907f76544f ("Bluetooth: hci_sync: Make use of hci_cmd_sync_queue set 3")
Signed-off-by: Pauli Virtanen <pav@iki.fi>
Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
2023-06-19 01:04:31 +03:00
void hci_conn_params_free ( struct hci_conn_params * params )
2014-02-03 20:56:18 +04:00
{
Bluetooth: use RCU for hci_conn_params and iterate safely in hci_sync
hci_update_accept_list_sync iterates over hdev->pend_le_conns and
hdev->pend_le_reports, and waits for controller events in the loop body,
without holding hdev lock.
Meanwhile, these lists and the items may be modified e.g. by
le_scan_cleanup. This can invalidate the list cursor or any other item
in the list, resulting to invalid behavior (eg use-after-free).
Use RCU for the hci_conn_params action lists. Since the loop bodies in
hci_sync block and we cannot use RCU or hdev->lock for the whole loop,
copy list items first and then iterate on the copy. Only the flags field
is written from elsewhere, so READ_ONCE/WRITE_ONCE should guarantee we
read valid values.
Free params everywhere with hci_conn_params_free so the cleanup is
guaranteed to be done properly.
This fixes the following, which can be triggered e.g. by BlueZ new
mgmt-tester case "Add + Remove Device Nowait - Success", or by changing
hci_le_set_cig_params to always return false, and running iso-tester:
==================================================================
BUG: KASAN: slab-use-after-free in hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
Read of size 8 at addr ffff888001265018 by task kworker/u3:0/32
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.2-1.fc38 04/01/2014
Workqueue: hci0 hci_cmd_sync_work
Call Trace:
<TASK>
dump_stack_lvl (./arch/x86/include/asm/irqflags.h:134 lib/dump_stack.c:107)
print_report (mm/kasan/report.c:320 mm/kasan/report.c:430)
? __virt_addr_valid (./include/linux/mmzone.h:1915 ./include/linux/mmzone.h:2011 arch/x86/mm/physaddr.c:65)
? hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
kasan_report (mm/kasan/report.c:538)
? hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
? __pfx_hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2780)
? mutex_lock (kernel/locking/mutex.c:282)
? __pfx_mutex_lock (kernel/locking/mutex.c:282)
? __pfx_mutex_unlock (kernel/locking/mutex.c:538)
? __pfx_update_passive_scan_sync (net/bluetooth/hci_sync.c:2861)
hci_cmd_sync_work (net/bluetooth/hci_sync.c:306)
process_one_work (./arch/x86/include/asm/preempt.h:27 kernel/workqueue.c:2399)
worker_thread (./include/linux/list.h:292 kernel/workqueue.c:2538)
? __pfx_worker_thread (kernel/workqueue.c:2480)
kthread (kernel/kthread.c:376)
? __pfx_kthread (kernel/kthread.c:331)
ret_from_fork (arch/x86/entry/entry_64.S:314)
</TASK>
Allocated by task 31:
kasan_save_stack (mm/kasan/common.c:46)
kasan_set_track (mm/kasan/common.c:52)
__kasan_kmalloc (mm/kasan/common.c:374 mm/kasan/common.c:383)
hci_conn_params_add (./include/linux/slab.h:580 ./include/linux/slab.h:720 net/bluetooth/hci_core.c:2277)
hci_connect_le_scan (net/bluetooth/hci_conn.c:1419 net/bluetooth/hci_conn.c:1589)
hci_connect_cis (net/bluetooth/hci_conn.c:2266)
iso_connect_cis (net/bluetooth/iso.c:390)
iso_sock_connect (net/bluetooth/iso.c:899)
__sys_connect (net/socket.c:2003 net/socket.c:2020)
__x64_sys_connect (net/socket.c:2027)
do_syscall_64 (arch/x86/entry/common.c:50 arch/x86/entry/common.c:80)
entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:120)
Freed by task 15:
kasan_save_stack (mm/kasan/common.c:46)
kasan_set_track (mm/kasan/common.c:52)
kasan_save_free_info (mm/kasan/generic.c:523)
__kasan_slab_free (mm/kasan/common.c:238 mm/kasan/common.c:200 mm/kasan/common.c:244)
__kmem_cache_free (mm/slub.c:1807 mm/slub.c:3787 mm/slub.c:3800)
hci_conn_params_del (net/bluetooth/hci_core.c:2323)
le_scan_cleanup (net/bluetooth/hci_conn.c:202)
process_one_work (./arch/x86/include/asm/preempt.h:27 kernel/workqueue.c:2399)
worker_thread (./include/linux/list.h:292 kernel/workqueue.c:2538)
kthread (kernel/kthread.c:376)
ret_from_fork (arch/x86/entry/entry_64.S:314)
==================================================================
Fixes: e8907f76544f ("Bluetooth: hci_sync: Make use of hci_cmd_sync_queue set 3")
Signed-off-by: Pauli Virtanen <pav@iki.fi>
Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
2023-06-19 01:04:31 +03:00
hci_pend_le_list_del_init ( params ) ;
2014-08-18 00:28:57 +04:00
if ( params - > conn ) {
2014-08-15 22:06:54 +04:00
hci_conn_drop ( params - > conn ) ;
2014-08-18 00:28:57 +04:00
hci_conn_put ( params - > conn ) ;
}
2014-08-15 22:06:54 +04:00
2014-02-03 20:56:18 +04:00
list_del ( & params - > list ) ;
kfree ( params ) ;
2014-08-15 22:06:59 +04:00
}
/* This function requires the caller holds hdev->lock */
void hci_conn_params_del ( struct hci_dev * hdev , bdaddr_t * addr , u8 addr_type )
{
struct hci_conn_params * params ;
params = hci_conn_params_lookup ( hdev , addr , addr_type ) ;
if ( ! params )
return ;
hci_conn_params_free ( params ) ;
2014-02-03 20:56:18 +04:00
2021-10-28 02:58:43 +03:00
hci_update_passive_scan ( hdev ) ;
2014-07-04 13:37:21 +04:00
2014-02-03 20:56:18 +04:00
BT_DBG ( " addr %pMR (type %u) " , addr , addr_type ) ;
}
/* This function requires the caller holds hdev->lock */
2014-07-02 18:37:26 +04:00
void hci_conn_params_clear_disabled ( struct hci_dev * hdev )
2014-02-03 20:56:18 +04:00
{
struct hci_conn_params * params , * tmp ;
list_for_each_entry_safe ( params , tmp , & hdev - > le_conn_params , list ) {
2014-07-02 18:37:26 +04:00
if ( params - > auto_connect ! = HCI_AUTO_CONN_DISABLED )
continue ;
2015-08-07 21:22:53 +03:00
2021-06-02 09:54:58 +03:00
/* If trying to establish one time connection to disabled
2015-08-07 21:22:53 +03:00
* device , leave the params , but mark them as just once .
*/
if ( params - > explicit_connect ) {
params - > auto_connect = HCI_AUTO_CONN_EXPLICIT ;
continue ;
}
Bluetooth: use RCU for hci_conn_params and iterate safely in hci_sync
hci_update_accept_list_sync iterates over hdev->pend_le_conns and
hdev->pend_le_reports, and waits for controller events in the loop body,
without holding hdev lock.
Meanwhile, these lists and the items may be modified e.g. by
le_scan_cleanup. This can invalidate the list cursor or any other item
in the list, resulting to invalid behavior (eg use-after-free).
Use RCU for the hci_conn_params action lists. Since the loop bodies in
hci_sync block and we cannot use RCU or hdev->lock for the whole loop,
copy list items first and then iterate on the copy. Only the flags field
is written from elsewhere, so READ_ONCE/WRITE_ONCE should guarantee we
read valid values.
Free params everywhere with hci_conn_params_free so the cleanup is
guaranteed to be done properly.
This fixes the following, which can be triggered e.g. by BlueZ new
mgmt-tester case "Add + Remove Device Nowait - Success", or by changing
hci_le_set_cig_params to always return false, and running iso-tester:
==================================================================
BUG: KASAN: slab-use-after-free in hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
Read of size 8 at addr ffff888001265018 by task kworker/u3:0/32
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.2-1.fc38 04/01/2014
Workqueue: hci0 hci_cmd_sync_work
Call Trace:
<TASK>
dump_stack_lvl (./arch/x86/include/asm/irqflags.h:134 lib/dump_stack.c:107)
print_report (mm/kasan/report.c:320 mm/kasan/report.c:430)
? __virt_addr_valid (./include/linux/mmzone.h:1915 ./include/linux/mmzone.h:2011 arch/x86/mm/physaddr.c:65)
? hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
kasan_report (mm/kasan/report.c:538)
? hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2536 net/bluetooth/hci_sync.c:2723 net/bluetooth/hci_sync.c:2841)
? __pfx_hci_update_passive_scan_sync (net/bluetooth/hci_sync.c:2780)
? mutex_lock (kernel/locking/mutex.c:282)
? __pfx_mutex_lock (kernel/locking/mutex.c:282)
? __pfx_mutex_unlock (kernel/locking/mutex.c:538)
? __pfx_update_passive_scan_sync (net/bluetooth/hci_sync.c:2861)
hci_cmd_sync_work (net/bluetooth/hci_sync.c:306)
process_one_work (./arch/x86/include/asm/preempt.h:27 kernel/workqueue.c:2399)
worker_thread (./include/linux/list.h:292 kernel/workqueue.c:2538)
? __pfx_worker_thread (kernel/workqueue.c:2480)
kthread (kernel/kthread.c:376)
? __pfx_kthread (kernel/kthread.c:331)
ret_from_fork (arch/x86/entry/entry_64.S:314)
</TASK>
Allocated by task 31:
kasan_save_stack (mm/kasan/common.c:46)
kasan_set_track (mm/kasan/common.c:52)
__kasan_kmalloc (mm/kasan/common.c:374 mm/kasan/common.c:383)
hci_conn_params_add (./include/linux/slab.h:580 ./include/linux/slab.h:720 net/bluetooth/hci_core.c:2277)
hci_connect_le_scan (net/bluetooth/hci_conn.c:1419 net/bluetooth/hci_conn.c:1589)
hci_connect_cis (net/bluetooth/hci_conn.c:2266)
iso_connect_cis (net/bluetooth/iso.c:390)
iso_sock_connect (net/bluetooth/iso.c:899)
__sys_connect (net/socket.c:2003 net/socket.c:2020)
__x64_sys_connect (net/socket.c:2027)
do_syscall_64 (arch/x86/entry/common.c:50 arch/x86/entry/common.c:80)
entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:120)
Freed by task 15:
kasan_save_stack (mm/kasan/common.c:46)
kasan_set_track (mm/kasan/common.c:52)
kasan_save_free_info (mm/kasan/generic.c:523)
__kasan_slab_free (mm/kasan/common.c:238 mm/kasan/common.c:200 mm/kasan/common.c:244)
__kmem_cache_free (mm/slub.c:1807 mm/slub.c:3787 mm/slub.c:3800)
hci_conn_params_del (net/bluetooth/hci_core.c:2323)
le_scan_cleanup (net/bluetooth/hci_conn.c:202)
process_one_work (./arch/x86/include/asm/preempt.h:27 kernel/workqueue.c:2399)
worker_thread (./include/linux/list.h:292 kernel/workqueue.c:2538)
kthread (kernel/kthread.c:376)
ret_from_fork (arch/x86/entry/entry_64.S:314)
==================================================================
Fixes: e8907f76544f ("Bluetooth: hci_sync: Make use of hci_cmd_sync_queue set 3")
Signed-off-by: Pauli Virtanen <pav@iki.fi>
Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
2023-06-19 01:04:31 +03:00
hci_conn_params_free ( params ) ;
2014-02-03 20:56:18 +04:00
}
2014-07-02 18:37:26 +04:00
BT_DBG ( " All LE disabled connection parameters were removed " ) ;
2014-02-27 03:21:46 +04:00
}
/* This function requires the caller holds hdev->lock */
2015-11-10 10:44:53 +03:00
static void hci_conn_params_clear_all ( struct hci_dev * hdev )
2014-02-27 03:21:46 +04:00
{
2014-02-03 20:56:18 +04:00
struct hci_conn_params * params , * tmp ;
2014-02-27 03:21:46 +04:00
2014-08-15 22:06:59 +04:00
list_for_each_entry_safe ( params , tmp , & hdev - > le_conn_params , list )
hci_conn_params_free ( params ) ;
2014-02-27 03:21:46 +04:00
2014-02-03 20:56:18 +04:00
BT_DBG ( " All LE connection parameters were removed " ) ;
2014-02-27 03:21:46 +04:00
}
2014-02-27 16:05:41 +04:00
/* Copy the Identity Address of the controller.
*
* If the controller has a public BD_ADDR , then by default use that one .
* If this is a LE only controller without a public address , default to
* the static random address .
*
* For debugging purposes it is possible to force controllers with a
* public address to use the static random address instead .
2014-12-20 01:05:35 +03:00
*
* In case BR / EDR has been disabled on a dual - mode controller and
* userspace has configured a static address , then that address
* becomes the identity address instead of the public BR / EDR address .
2014-02-27 16:05:41 +04:00
*/
void hci_copy_identity_address ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
u8 * bdaddr_type )
{
2015-03-13 20:20:35 +03:00
if ( hci_dev_test_flag ( hdev , HCI_FORCE_STATIC_ADDR ) | |
2014-12-20 01:05:35 +03:00
! bacmp ( & hdev - > bdaddr , BDADDR_ANY ) | |
2015-03-13 12:11:00 +03:00
( ! hci_dev_test_flag ( hdev , HCI_BREDR_ENABLED ) & &
2014-12-20 01:05:35 +03:00
bacmp ( & hdev - > static_addr , BDADDR_ANY ) ) ) {
2014-02-27 16:05:41 +04:00
bacpy ( bdaddr , & hdev - > static_addr ) ;
* bdaddr_type = ADDR_LE_DEV_RANDOM ;
} else {
bacpy ( bdaddr , & hdev - > bdaddr ) ;
* bdaddr_type = ADDR_LE_DEV_PUBLIC ;
}
}
2020-09-12 00:07:13 +03:00
static void hci_clear_wake_reason ( struct hci_dev * hdev )
{
hci_dev_lock ( hdev ) ;
hdev - > wake_reason = 0 ;
bacpy ( & hdev - > wake_addr , BDADDR_ANY ) ;
hdev - > wake_addr_type = 0 ;
hci_dev_unlock ( hdev ) ;
}
2020-03-11 18:54:00 +03:00
static int hci_suspend_notifier ( struct notifier_block * nb , unsigned long action ,
void * data )
{
struct hci_dev * hdev =
container_of ( nb , struct hci_dev , suspend_notifier ) ;
int ret = 0 ;
2022-09-27 19:58:15 +03:00
/* Userspace has full control of this device. Do nothing. */
if ( hci_dev_test_flag ( hdev , HCI_USER_CHANNEL ) )
return NOTIFY_DONE ;
2021-09-29 00:36:51 +03:00
if ( action = = PM_SUSPEND_PREPARE )
ret = hci_suspend_dev ( hdev ) ;
else if ( action = = PM_POST_SUSPEND )
ret = hci_resume_dev ( hdev ) ;
2020-09-12 00:07:13 +03:00
2020-06-05 23:50:15 +03:00
if ( ret )
bt_dev_err ( hdev , " Suspend notifier action (%lu) failed: %d " ,
action , ret ) ;
2020-07-23 13:47:42 +03:00
return NOTIFY_DONE ;
2020-03-11 18:54:00 +03:00
}
2020-03-20 03:07:12 +03:00
2012-04-22 16:39:57 +04:00
/* Alloc HCI device */
2021-08-05 03:32:08 +03:00
struct hci_dev * hci_alloc_dev_priv ( int sizeof_priv )
2012-04-22 16:39:57 +04:00
{
struct hci_dev * hdev ;
2021-08-05 03:32:08 +03:00
unsigned int alloc_size ;
2012-04-22 16:39:57 +04:00
2021-08-05 03:32:08 +03:00
alloc_size = sizeof ( * hdev ) ;
if ( sizeof_priv ) {
/* Fixme: May need ALIGN-ment? */
alloc_size + = sizeof_priv ;
}
2012-04-22 16:39:57 +04:00
2021-08-05 03:32:08 +03:00
hdev = kzalloc ( alloc_size , GFP_KERNEL ) ;
2012-04-22 16:39:57 +04:00
if ( ! hdev )
return NULL ;
2012-04-22 16:39:58 +04:00
hdev - > pkt_type = ( HCI_DM1 | HCI_DH1 | HCI_HV1 ) ;
hdev - > esco_type = ( ESCO_HV1 ) ;
hdev - > link_mode = ( HCI_LM_ACCEPT ) ;
2013-10-15 00:56:16 +04:00
hdev - > num_iac = 0x01 ; /* One IAC support is mandatory */
hdev - > io_capability = 0x03 ; /* No Input No Output */
2014-07-02 13:30:51 +04:00
hdev - > manufacturer = 0xffff ; /* Default to internal use */
2012-11-08 04:22:59 +04:00
hdev - > inq_tx_power = HCI_TX_POWER_INVALID ;
hdev - > adv_tx_power = HCI_TX_POWER_INVALID ;
2015-06-18 04:16:34 +03:00
hdev - > adv_instance_cnt = 0 ;
hdev - > cur_adv_instance = 0x00 ;
2015-06-18 04:16:35 +03:00
hdev - > adv_instance_timeout = 0 ;
2012-04-22 16:39:58 +04:00
2020-11-26 07:22:21 +03:00
hdev - > advmon_allowlist_duration = 300 ;
hdev - > advmon_no_filter_duration = 500 ;
2020-11-26 07:22:25 +03:00
hdev - > enable_advmon_interleave_scan = 0x00 ; /* Default to disable */
2020-11-26 07:22:21 +03:00
2012-04-22 16:39:58 +04:00
hdev - > sniff_max_interval = 800 ;
hdev - > sniff_min_interval = 80 ;
2014-02-20 23:55:56 +04:00
hdev - > le_adv_channel_map = 0x07 ;
2014-07-26 15:59:57 +04:00
hdev - > le_adv_min_interval = 0x0800 ;
hdev - > le_adv_max_interval = 0x0800 ;
2013-10-11 19:23:19 +04:00
hdev - > le_scan_interval = 0x0060 ;
hdev - > le_scan_window = 0x0030 ;
2020-06-11 05:01:56 +03:00
hdev - > le_scan_int_suspend = 0x0400 ;
hdev - > le_scan_window_suspend = 0x0012 ;
hdev - > le_scan_int_discovery = DISCOV_LE_SCAN_INT ;
hdev - > le_scan_window_discovery = DISCOV_LE_SCAN_WIN ;
2021-04-06 22:55:55 +03:00
hdev - > le_scan_int_adv_monitor = 0x0060 ;
hdev - > le_scan_window_adv_monitor = 0x0030 ;
2020-06-11 05:01:56 +03:00
hdev - > le_scan_int_connect = 0x0060 ;
hdev - > le_scan_window_connect = 0x0060 ;
2017-02-23 17:17:02 +03:00
hdev - > le_conn_min_interval = 0x0018 ;
hdev - > le_conn_max_interval = 0x0028 ;
2014-06-30 14:34:36 +04:00
hdev - > le_conn_latency = 0x0000 ;
hdev - > le_supv_timeout = 0x002a ;
2014-12-20 18:28:40 +03:00
hdev - > le_def_tx_len = 0x001b ;
hdev - > le_def_tx_time = 0x0148 ;
hdev - > le_max_tx_len = 0x001b ;
hdev - > le_max_tx_time = 0x0148 ;
hdev - > le_max_rx_len = 0x001b ;
hdev - > le_max_rx_time = 0x0148 ;
2018-09-28 21:54:30 +03:00
hdev - > le_max_key_size = SMP_MAX_ENC_KEY_SIZE ;
hdev - > le_min_key_size = SMP_MIN_ENC_KEY_SIZE ;
2018-07-19 14:39:32 +03:00
hdev - > le_tx_def_phys = HCI_LE_SET_PHY_1M ;
hdev - > le_rx_def_phys = HCI_LE_SET_PHY_1M ;
2019-06-03 13:48:42 +03:00
hdev - > le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES ;
2020-06-11 05:01:56 +03:00
hdev - > def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION ;
2020-06-29 19:11:00 +03:00
hdev - > def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT ;
2020-12-03 23:12:51 +03:00
hdev - > min_le_tx_power = HCI_TX_POWER_INVALID ;
hdev - > max_le_tx_power = HCI_TX_POWER_INVALID ;
2013-10-11 19:23:19 +04:00
2014-02-23 21:42:20 +04:00
hdev - > rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT ;
2014-03-27 23:55:20 +04:00
hdev - > discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT ;
2014-05-14 15:43:02 +04:00
hdev - > conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE ;
hdev - > conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE ;
2019-06-21 12:21:56 +03:00
hdev - > auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT ;
2019-07-16 21:34:41 +03:00
hdev - > min_enc_key_size = HCI_MIN_ENC_KEY_SIZE ;
2014-02-23 21:42:20 +04:00
2020-06-11 05:01:56 +03:00
/* default 1.28 sec page scan */
hdev - > def_page_scan_type = PAGE_SCAN_TYPE_STANDARD ;
hdev - > def_page_scan_int = 0x0800 ;
hdev - > def_page_scan_window = 0x0012 ;
2012-04-22 16:39:58 +04:00
mutex_init ( & hdev - > lock ) ;
mutex_init ( & hdev - > req_lock ) ;
2022-09-01 22:19:13 +03:00
INIT_LIST_HEAD ( & hdev - > mesh_pending ) ;
2012-04-22 16:39:58 +04:00
INIT_LIST_HEAD ( & hdev - > mgmt_pending ) ;
2021-06-04 11:26:27 +03:00
INIT_LIST_HEAD ( & hdev - > reject_list ) ;
INIT_LIST_HEAD ( & hdev - > accept_list ) ;
2012-04-22 16:39:58 +04:00
INIT_LIST_HEAD ( & hdev - > uuids ) ;
INIT_LIST_HEAD ( & hdev - > link_keys ) ;
INIT_LIST_HEAD ( & hdev - > long_term_keys ) ;
2014-02-18 12:19:33 +04:00
INIT_LIST_HEAD ( & hdev - > identity_resolving_keys ) ;
2012-04-22 16:39:58 +04:00
INIT_LIST_HEAD ( & hdev - > remote_oob_data ) ;
2021-06-04 11:26:27 +03:00
INIT_LIST_HEAD ( & hdev - > le_accept_list ) ;
2018-06-29 09:42:50 +03:00
INIT_LIST_HEAD ( & hdev - > le_resolv_list ) ;
2014-02-03 20:56:18 +04:00
INIT_LIST_HEAD ( & hdev - > le_conn_params ) ;
2014-02-27 03:21:46 +04:00
INIT_LIST_HEAD ( & hdev - > pend_le_conns ) ;
2014-07-04 13:37:18 +04:00
INIT_LIST_HEAD ( & hdev - > pend_le_reports ) ;
2012-08-31 17:39:28 +04:00
INIT_LIST_HEAD ( & hdev - > conn_hash . list ) ;
2015-06-18 04:16:34 +03:00
INIT_LIST_HEAD ( & hdev - > adv_instances ) ;
2020-01-07 03:43:17 +03:00
INIT_LIST_HEAD ( & hdev - > blocked_keys ) ;
2022-01-11 19:14:25 +03:00
INIT_LIST_HEAD ( & hdev - > monitored_devices ) ;
2012-04-22 16:39:58 +04:00
2021-09-07 13:12:37 +03:00
INIT_LIST_HEAD ( & hdev - > local_codecs ) ;
2012-04-22 16:39:58 +04:00
INIT_WORK ( & hdev - > rx_work , hci_rx_work ) ;
INIT_WORK ( & hdev - > cmd_work , hci_cmd_work ) ;
INIT_WORK ( & hdev - > tx_work , hci_tx_work ) ;
INIT_WORK ( & hdev - > power_on , hci_power_on ) ;
2015-01-28 22:09:55 +03:00
INIT_WORK ( & hdev - > error_reset , hci_error_reset ) ;
2012-04-22 16:39:58 +04:00
2021-10-28 02:58:38 +03:00
hci_cmd_sync_init ( hdev ) ;
2012-04-22 16:39:58 +04:00
INIT_DELAYED_WORK ( & hdev - > power_off , hci_power_off ) ;
skb_queue_head_init ( & hdev - > rx_q ) ;
skb_queue_head_init ( & hdev - > cmd_q ) ;
skb_queue_head_init ( & hdev - > raw_q ) ;
init_waitqueue_head ( & hdev - > req_wait_q ) ;
2014-06-16 14:30:56 +04:00
INIT_DELAYED_WORK ( & hdev - > cmd_timer , hci_cmd_timeout ) ;
2021-04-29 20:24:22 +03:00
INIT_DELAYED_WORK ( & hdev - > ncmd_timer , hci_ncmd_timeout ) ;
2012-04-22 16:39:58 +04:00
2023-03-30 19:58:23 +03:00
hci_devcd_setup ( hdev ) ;
2015-11-11 09:11:16 +03:00
hci_request_setup ( hdev ) ;
2012-04-22 16:39:58 +04:00
hci_init_sysfs ( hdev ) ;
discovery_init ( hdev ) ;
2012-04-22 16:39:57 +04:00
return hdev ;
}
2021-08-05 03:32:08 +03:00
EXPORT_SYMBOL ( hci_alloc_dev_priv ) ;
2012-04-22 16:39:57 +04:00
/* Free HCI device */
void hci_free_dev ( struct hci_dev * hdev )
{
/* will free via device release */
put_device ( & hdev - > dev ) ;
}
EXPORT_SYMBOL ( hci_free_dev ) ;
2005-04-17 02:20:36 +04:00
/* Register HCI device */
int hci_register_dev ( struct hci_dev * hdev )
{
2012-04-22 16:39:58 +04:00
int id , error ;
2005-04-17 02:20:36 +04:00
2014-07-06 17:50:27 +04:00
if ( ! hdev - > open | | ! hdev - > close | | ! hdev - > send )
2005-04-17 02:20:36 +04:00
return - EINVAL ;
2011-11-03 03:18:36 +04:00
/* Do not allow HCI_AMP devices to register at index 0,
* so the index can be used as the AMP controller ID .
*/
2012-05-28 00:36:56 +04:00
switch ( hdev - > dev_type ) {
2016-07-05 15:30:14 +03:00
case HCI_PRIMARY :
2022-05-07 15:32:48 +03:00
id = ida_simple_get ( & hci_index_ida , 0 , HCI_MAX_ID , GFP_KERNEL ) ;
2012-05-28 00:36:56 +04:00
break ;
case HCI_AMP :
2022-05-07 15:32:48 +03:00
id = ida_simple_get ( & hci_index_ida , 1 , HCI_MAX_ID , GFP_KERNEL ) ;
2012-05-28 00:36:56 +04:00
break ;
default :
return - EINVAL ;
2005-04-17 02:20:36 +04:00
}
2007-02-09 17:24:33 +03:00
2012-05-28 00:36:56 +04:00
if ( id < 0 )
return id ;
2022-05-07 15:32:48 +03:00
snprintf ( hdev - > name , sizeof ( hdev - > name ) , " hci%d " , id ) ;
2005-04-17 02:20:36 +04:00
hdev - > id = id ;
2012-04-16 17:32:04 +04:00
BT_DBG ( " %p name %s bus %d " , hdev , hdev - > name , hdev - > bus ) ;
2017-06-28 21:44:06 +03:00
hdev - > workqueue = alloc_ordered_workqueue ( " %s " , WQ_HIGHPRI , hdev - > name ) ;
2011-10-08 16:58:49 +04:00
if ( ! hdev - > workqueue ) {
error = - ENOMEM ;
goto err ;
}
2010-03-20 17:20:04 +03:00
2017-06-28 21:44:06 +03:00
hdev - > req_workqueue = alloc_ordered_workqueue ( " %s " , WQ_HIGHPRI ,
hdev - > name ) ;
2013-01-15 00:33:50 +04:00
if ( ! hdev - > req_workqueue ) {
destroy_workqueue ( hdev - > workqueue ) ;
error = - ENOMEM ;
goto err ;
}
2013-10-18 04:24:17 +04:00
if ( ! IS_ERR_OR_NULL ( bt_debugfs ) )
hdev - > debugfs = debugfs_create_dir ( hdev - > name , bt_debugfs ) ;
2013-10-18 04:24:19 +04:00
dev_set_name ( & hdev - > dev , " %s " , hdev - > name ) ;
error = device_add ( & hdev - > dev ) ;
2011-10-08 16:58:49 +04:00
if ( error < 0 )
2014-08-08 10:32:51 +04:00
goto err_wqueue ;
2005-04-17 02:20:36 +04:00
2016-01-08 21:28:58 +03:00
hci_leds_init ( hdev ) ;
2009-06-08 16:41:38 +04:00
hdev - > rfkill = rfkill_alloc ( hdev - > name , & hdev - > dev ,
2012-05-17 07:36:26 +04:00
RFKILL_TYPE_BLUETOOTH , & hci_rfkill_ops ,
hdev ) ;
2009-06-08 16:41:38 +04:00
if ( hdev - > rfkill ) {
if ( rfkill_register ( hdev - > rfkill ) < 0 ) {
rfkill_destroy ( hdev - > rfkill ) ;
hdev - > rfkill = NULL ;
}
}
2013-09-13 09:58:17 +04:00
if ( hdev - > rfkill & & rfkill_blocked ( hdev - > rfkill ) )
2015-03-13 12:11:01 +03:00
hci_dev_set_flag ( hdev , HCI_RFKILLED ) ;
2013-09-13 09:58:17 +04:00
2015-03-13 12:11:01 +03:00
hci_dev_set_flag ( hdev , HCI_SETUP ) ;
hci_dev_set_flag ( hdev , HCI_AUTO_OFF ) ;
2012-06-29 16:07:00 +04:00
2016-07-05 15:30:14 +03:00
if ( hdev - > dev_type = = HCI_PRIMARY ) {
2013-10-02 14:43:13 +04:00
/* Assume BR/EDR support until proven otherwise (such as
* through reading supported features during init .
*/
2015-03-13 12:11:01 +03:00
hci_dev_set_flag ( hdev , HCI_BREDR_ENABLED ) ;
2013-10-02 14:43:13 +04:00
}
2012-06-29 16:07:00 +04:00
2013-07-11 14:34:28 +04:00
write_lock ( & hci_dev_list_lock ) ;
list_add ( & hdev - > list , & hci_dev_list ) ;
write_unlock ( & hci_dev_list_lock ) ;
2014-07-02 21:10:33 +04:00
/* Devices that are marked for raw-only usage are unconfigured
* and should not be included in normal operation .
2014-06-29 14:13:05 +04:00
*/
if ( test_bit ( HCI_QUIRK_RAW_DEVICE , & hdev - > quirks ) )
2015-03-13 12:11:01 +03:00
hci_dev_set_flag ( hdev , HCI_UNCONFIGURED ) ;
2014-06-29 14:13:05 +04:00
2021-12-01 22:49:50 +03:00
/* Mark Remote Wakeup connection flag as supported if driver has wakeup
* callback .
*/
if ( hdev - > wakeup )
bluetooth: don't use bitmaps for random flag accesses
The bluetooth code uses our bitmap infrastructure for the two bits (!)
of connection setup flags, and in the process causes odd problems when
it converts between a bitmap and just the regular values of said bits.
It's completely pointless to do things like bitmap_to_arr32() to convert
a bitmap into a u32. It shoudln't have been a bitmap in the first
place. The reason to use bitmaps is if you have arbitrary number of
bits you want to manage (not two!), or if you rely on the atomicity
guarantees of the bitmap setting and clearing.
The code could use an "atomic_t" and use "atomic_or/andnot()" to set and
clear the bit values, but considering that it then copies the bitmaps
around with "bitmap_to_arr32()" and friends, there clearly cannot be a
lot of atomicity requirements.
So just use a regular integer.
In the process, this avoids the warnings about erroneous use of
bitmap_from_u64() which were triggered on 32-bit architectures when
conversion from a u64 would access two words (and, surprise, surprise,
only one word is needed - and indeed overkill - for a 2-bit bitmap).
That was always problematic, but the compiler seems to notice it and
warn about the invalid pattern only after commit 0a97953fd221 ("lib: add
bitmap_{from,to}_arr64") changed the exact implementation details of
'bitmap_from_u64()', as reported by Sudip Mukherjee and Stephen Rothwell.
Fixes: fe92ee6425a2 ("Bluetooth: hci_core: Rework hci_conn_params flags")
Link: https://lore.kernel.org/all/YpyJ9qTNHJzz0FHY@debian/
Link: https://lore.kernel.org/all/20220606080631.0c3014f2@canb.auug.org.au/
Link: https://lore.kernel.org/all/20220605162537.1604762-1-yury.norov@gmail.com/
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Reported-by: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
Reviewed-by: Yury Norov <yury.norov@gmail.com>
Cc: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
Cc: Marcel Holtmann <marcel@holtmann.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2022-06-05 21:51:48 +03:00
hdev - > conn_flags | = HCI_CONN_FLAG_REMOTE_WAKEUP ;
2021-12-01 22:49:50 +03:00
2015-10-26 01:29:22 +03:00
hci_sock_dev_event ( hdev , HCI_DEV_REG ) ;
2012-01-07 18:47:24 +04:00
hci_dev_hold ( hdev ) ;
2005-04-17 02:20:36 +04:00
2022-06-08 16:46:13 +03:00
error = hci_register_suspend_notifier ( hdev ) ;
if ( error )
2022-10-20 05:16:56 +03:00
BT_WARN ( " register suspend notifier failed error:%d \n " , error ) ;
2020-03-11 18:54:00 +03:00
2013-01-15 00:33:51 +04:00
queue_work ( hdev - > req_workqueue , & hdev - > power_on ) ;
2012-10-30 12:35:40 +04:00
2020-06-17 17:39:13 +03:00
idr_init ( & hdev - > adv_monitors_idr ) ;
2021-09-10 00:10:23 +03:00
msft_register ( hdev ) ;
2020-06-17 17:39:13 +03:00
2005-04-17 02:20:36 +04:00
return id ;
2010-03-20 17:20:04 +03:00
2011-10-08 16:58:49 +04:00
err_wqueue :
2021-10-13 11:55:46 +03:00
debugfs_remove_recursive ( hdev - > debugfs ) ;
2011-10-08 16:58:49 +04:00
destroy_workqueue ( hdev - > workqueue ) ;
2013-01-15 00:33:50 +04:00
destroy_workqueue ( hdev - > req_workqueue ) ;
2011-10-08 16:58:49 +04:00
err :
2012-05-28 00:36:56 +04:00
ida_simple_remove ( & hci_index_ida , hdev - > id ) ;
2010-03-20 17:20:04 +03:00
2011-10-08 16:58:49 +04:00
return error ;
2005-04-17 02:20:36 +04:00
}
EXPORT_SYMBOL ( hci_register_dev ) ;
/* Unregister HCI device */
2011-10-26 12:43:19 +04:00
void hci_unregister_dev ( struct hci_dev * hdev )
2005-04-17 02:20:36 +04:00
{
2010-02-08 17:27:07 +03:00
BT_DBG ( " %p name %s bus %d " , hdev , hdev - > name , hdev - > bus ) ;
2005-04-17 02:20:36 +04:00
2023-05-25 03:11:58 +03:00
mutex_lock ( & hdev - > unregister_lock ) ;
2015-03-13 12:11:01 +03:00
hci_dev_set_flag ( hdev , HCI_UNREGISTER ) ;
2023-05-25 03:11:58 +03:00
mutex_unlock ( & hdev - > unregister_lock ) ;
2012-03-15 17:48:41 +04:00
2011-12-22 22:30:27 +04:00
write_lock ( & hci_dev_list_lock ) ;
2005-04-17 02:20:36 +04:00
list_del ( & hdev - > list ) ;
2011-12-22 22:30:27 +04:00
write_unlock ( & hci_dev_list_lock ) ;
2005-04-17 02:20:36 +04:00
2022-07-05 15:59:31 +03:00
cancel_work_sync ( & hdev - > power_on ) ;
2021-10-28 02:58:38 +03:00
hci_cmd_sync_clear ( hdev ) ;
2022-06-02 19:46:50 +03:00
hci_unregister_suspend_notifier ( hdev ) ;
2020-07-28 19:58:07 +03:00
2021-09-10 00:10:23 +03:00
msft_unregister ( hdev ) ;
2020-07-28 19:58:07 +03:00
hci_dev_do_close ( hdev ) ;
2020-03-11 18:54:00 +03:00
2010-12-15 14:53:18 +03:00
if ( ! test_bit ( HCI_INIT , & hdev - > flags ) & &
2015-03-13 12:11:00 +03:00
! hci_dev_test_flag ( hdev , HCI_SETUP ) & &
! hci_dev_test_flag ( hdev , HCI_CONFIG ) ) {
2011-06-17 20:03:21 +04:00
hci_dev_lock ( hdev ) ;
2011-11-08 22:40:14 +04:00
mgmt_index_removed ( hdev ) ;
2011-06-17 20:03:21 +04:00
hci_dev_unlock ( hdev ) ;
2011-11-08 22:40:16 +04:00
}
2010-12-15 14:53:18 +03:00
2011-11-08 22:40:15 +04:00
/* mgmt_index_removed should take care of emptying the
* pending list */
BUG_ON ( ! list_empty ( & hdev - > mgmt_pending ) ) ;
2015-10-26 01:29:22 +03:00
hci_sock_dev_event ( hdev , HCI_DEV_UNREG ) ;
2005-04-17 02:20:36 +04:00
2009-06-08 16:41:38 +04:00
if ( hdev - > rfkill ) {
rfkill_unregister ( hdev - > rfkill ) ;
rfkill_destroy ( hdev - > rfkill ) ;
}
2013-10-18 04:24:19 +04:00
device_del ( & hdev - > dev ) ;
2021-08-20 14:16:05 +03:00
/* Actual cleanup is deferred until hci_release_dev(). */
2021-08-04 13:26:56 +03:00
hci_dev_put ( hdev ) ;
}
EXPORT_SYMBOL ( hci_unregister_dev ) ;
2008-03-06 05:45:59 +03:00
2021-07-27 00:12:04 +03:00
/* Release HCI device */
void hci_release_dev ( struct hci_dev * hdev )
2021-08-04 13:26:56 +03:00
{
2013-10-18 04:24:17 +04:00
debugfs_remove_recursive ( hdev - > debugfs ) ;
2016-07-17 20:55:16 +03:00
kfree_const ( hdev - > hw_info ) ;
kfree_const ( hdev - > fw_info ) ;
2013-10-18 04:24:17 +04:00
2010-03-20 17:20:04 +03:00
destroy_workqueue ( hdev - > workqueue ) ;
2013-01-15 00:33:50 +04:00
destroy_workqueue ( hdev - > req_workqueue ) ;
2010-03-20 17:20:04 +03:00
2011-06-17 20:03:21 +04:00
hci_dev_lock ( hdev ) ;
2021-06-04 11:26:27 +03:00
hci_bdaddr_list_clear ( & hdev - > reject_list ) ;
hci_bdaddr_list_clear ( & hdev - > accept_list ) ;
2011-01-04 13:08:51 +03:00
hci_uuids_clear ( hdev ) ;
2011-01-17 15:41:05 +03:00
hci_link_keys_clear ( hdev ) ;
2012-02-03 04:08:00 +04:00
hci_smp_ltks_clear ( hdev ) ;
2014-02-18 12:19:33 +04:00
hci_smp_irks_clear ( hdev ) ;
2011-03-22 15:12:22 +03:00
hci_remote_oob_data_clear ( hdev ) ;
2015-06-18 04:16:34 +03:00
hci_adv_instances_clear ( hdev ) ;
2020-06-17 17:39:13 +03:00
hci_adv_monitors_clear ( hdev ) ;
2021-06-04 11:26:27 +03:00
hci_bdaddr_list_clear ( & hdev - > le_accept_list ) ;
2018-06-29 09:42:50 +03:00
hci_bdaddr_list_clear ( & hdev - > le_resolv_list ) ;
2014-07-02 18:37:25 +04:00
hci_conn_params_clear_all ( hdev ) ;
2014-12-05 13:45:22 +03:00
hci_discovery_filter_clear ( hdev ) ;
2020-01-07 03:43:17 +03:00
hci_blocked_keys_clear ( hdev ) ;
2011-06-17 20:03:21 +04:00
hci_dev_unlock ( hdev ) ;
2011-01-04 13:08:50 +03:00
2021-08-04 13:26:56 +03:00
ida_simple_remove ( & hci_index_ida , hdev - > id ) ;
2022-02-05 00:12:35 +03:00
kfree_skb ( hdev - > sent_cmd ) ;
2022-02-05 00:04:38 +03:00
kfree_skb ( hdev - > recv_event ) ;
2021-07-27 00:12:04 +03:00
kfree ( hdev ) ;
2005-04-17 02:20:36 +04:00
}
2021-07-27 00:12:04 +03:00
EXPORT_SYMBOL ( hci_release_dev ) ;
2005-04-17 02:20:36 +04:00
2022-06-02 19:46:50 +03:00
int hci_register_suspend_notifier ( struct hci_dev * hdev )
{
int ret = 0 ;
2022-11-29 23:54:13 +03:00
if ( ! hdev - > suspend_notifier . notifier_call & &
! test_bit ( HCI_QUIRK_NO_SUSPEND_NOTIFIER , & hdev - > quirks ) ) {
2022-06-02 19:46:50 +03:00
hdev - > suspend_notifier . notifier_call = hci_suspend_notifier ;
ret = register_pm_notifier ( & hdev - > suspend_notifier ) ;
}
return ret ;
}
int hci_unregister_suspend_notifier ( struct hci_dev * hdev )
{
int ret = 0 ;
2022-11-29 23:54:13 +03:00
if ( hdev - > suspend_notifier . notifier_call ) {
2022-06-02 19:46:50 +03:00
ret = unregister_pm_notifier ( & hdev - > suspend_notifier ) ;
2022-11-29 23:54:13 +03:00
if ( ! ret )
hdev - > suspend_notifier . notifier_call = NULL ;
}
2022-06-02 19:46:50 +03:00
return ret ;
}
2005-04-17 02:20:36 +04:00
/* Suspend HCI device */
int hci_suspend_dev ( struct hci_dev * hdev )
{
2021-09-29 00:36:51 +03:00
int ret ;
bt_dev_dbg ( hdev , " " ) ;
/* Suspend should only act on when powered. */
if ( ! hdev_is_powered ( hdev ) | |
hci_dev_test_flag ( hdev , HCI_UNREGISTER ) )
return 0 ;
2021-10-28 02:59:00 +03:00
/* If powering down don't attempt to suspend */
if ( mgmt_powering_down ( hdev ) )
return 0 ;
2021-10-01 23:22:31 +03:00
2023-04-20 15:23:36 +03:00
/* Cancel potentially blocking sync operation before suspend */
__hci_cmd_sync_cancel ( hdev , - EHOSTDOWN ) ;
2021-10-28 02:59:00 +03:00
hci_req_sync_lock ( hdev ) ;
ret = hci_suspend_sync ( hdev ) ;
hci_req_sync_unlock ( hdev ) ;
2021-09-29 00:36:51 +03:00
hci_clear_wake_reason ( hdev ) ;
2021-10-28 02:59:00 +03:00
mgmt_suspending ( hdev , hdev - > suspend_state ) ;
2021-09-29 00:36:51 +03:00
2015-10-26 01:29:22 +03:00
hci_sock_dev_event ( hdev , HCI_DEV_SUSPEND ) ;
2021-09-29 00:36:51 +03:00
return ret ;
2005-04-17 02:20:36 +04:00
}
EXPORT_SYMBOL ( hci_suspend_dev ) ;
/* Resume HCI device */
int hci_resume_dev ( struct hci_dev * hdev )
{
2021-09-29 00:36:51 +03:00
int ret ;
bt_dev_dbg ( hdev , " " ) ;
/* Resume should only act on when powered. */
if ( ! hdev_is_powered ( hdev ) | |
hci_dev_test_flag ( hdev , HCI_UNREGISTER ) )
return 0 ;
/* If powering down don't attempt to resume */
if ( mgmt_powering_down ( hdev ) )
return 0 ;
2021-10-28 02:59:00 +03:00
hci_req_sync_lock ( hdev ) ;
ret = hci_resume_sync ( hdev ) ;
hci_req_sync_unlock ( hdev ) ;
2021-09-29 00:36:51 +03:00
mgmt_resuming ( hdev , hdev - > wake_reason , & hdev - > wake_addr ,
2021-10-28 02:59:00 +03:00
hdev - > wake_addr_type ) ;
2021-09-29 00:36:51 +03:00
2015-10-26 01:29:22 +03:00
hci_sock_dev_event ( hdev , HCI_DEV_RESUME ) ;
2021-09-29 00:36:51 +03:00
return ret ;
2005-04-17 02:20:36 +04:00
}
EXPORT_SYMBOL ( hci_resume_dev ) ;
2014-11-02 10:15:38 +03:00
/* Reset HCI device */
int hci_reset_dev ( struct hci_dev * hdev )
{
2019-01-24 20:22:54 +03:00
static const u8 hw_err [ ] = { HCI_EV_HARDWARE_ERROR , 0x01 , 0x00 } ;
2014-11-02 10:15:38 +03:00
struct sk_buff * skb ;
skb = bt_skb_alloc ( 3 , GFP_ATOMIC ) ;
if ( ! skb )
return - ENOMEM ;
2015-11-05 09:10:00 +03:00
hci_skb_pkt_type ( skb ) = HCI_EVENT_PKT ;
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:20 +03:00
skb_put_data ( skb , hw_err , 3 ) ;
2014-11-02 10:15:38 +03:00
2021-04-29 20:24:22 +03:00
bt_dev_err ( hdev , " Injecting HCI hardware error event " ) ;
2014-11-02 10:15:38 +03:00
/* Send Hardware Error to upper stack */
return hci_recv_frame ( hdev , skb ) ;
}
EXPORT_SYMBOL ( hci_reset_dev ) ;
2009-11-18 02:40:39 +03:00
/* Receive frame from HCI drivers */
2013-10-11 03:52:43 +04:00
int hci_recv_frame ( struct hci_dev * hdev , struct sk_buff * skb )
2009-11-18 02:40:39 +03:00
{
if ( ! hdev | | ( ! test_bit ( HCI_UP , & hdev - > flags )
2012-05-17 07:36:26 +04:00
& & ! test_bit ( HCI_INIT , & hdev - > flags ) ) ) {
2009-11-18 02:40:39 +03:00
kfree_skb ( skb ) ;
return - ENXIO ;
}
2023-02-25 02:43:31 +03:00
switch ( hci_skb_pkt_type ( skb ) ) {
case HCI_EVENT_PKT :
break ;
case HCI_ACLDATA_PKT :
/* Detect if ISO packet has been sent as ACL */
if ( hci_conn_num ( hdev , ISO_LINK ) ) {
__u16 handle = __le16_to_cpu ( hci_acl_hdr ( skb ) - > handle ) ;
__u8 type ;
type = hci_conn_lookup_type ( hdev , hci_handle ( handle ) ) ;
if ( type = = ISO_LINK )
hci_skb_pkt_type ( skb ) = HCI_ISODATA_PKT ;
}
break ;
case HCI_SCODATA_PKT :
break ;
case HCI_ISODATA_PKT :
break ;
default :
2015-10-08 04:14:28 +03:00
kfree_skb ( skb ) ;
return - EINVAL ;
}
2012-12-27 20:33:02 +04:00
/* Incoming skb */
2009-11-18 02:40:39 +03:00
bt_cb ( skb ) - > incoming = 1 ;
/* Time stamp */
__net_timestamp ( skb ) ;
skb_queue_tail ( & hdev - > rx_q , skb ) ;
2010-08-09 07:06:53 +04:00
queue_work ( hdev - > workqueue , & hdev - > rx_work ) ;
2009-11-18 03:02:54 +03:00
2009-11-18 02:40:39 +03:00
return 0 ;
}
EXPORT_SYMBOL ( hci_recv_frame ) ;
2015-10-07 17:38:35 +03:00
/* Receive diagnostic message from HCI drivers */
int hci_recv_diag ( struct hci_dev * hdev , struct sk_buff * skb )
{
2015-10-09 17:13:51 +03:00
/* Mark as diagnostic packet */
2015-11-05 09:10:00 +03:00
hci_skb_pkt_type ( skb ) = HCI_DIAG_PKT ;
2015-10-09 17:13:51 +03:00
2015-10-07 17:38:35 +03:00
/* Time stamp */
__net_timestamp ( skb ) ;
2015-10-09 17:13:51 +03:00
skb_queue_tail ( & hdev - > rx_q , skb ) ;
queue_work ( hdev - > workqueue , & hdev - > rx_work ) ;
2015-10-07 17:38:35 +03:00
return 0 ;
}
EXPORT_SYMBOL ( hci_recv_diag ) ;
2016-07-17 20:55:16 +03:00
void hci_set_hw_info ( struct hci_dev * hdev , const char * fmt , . . . )
{
va_list vargs ;
va_start ( vargs , fmt ) ;
kfree_const ( hdev - > hw_info ) ;
hdev - > hw_info = kvasprintf_const ( GFP_KERNEL , fmt , vargs ) ;
va_end ( vargs ) ;
}
EXPORT_SYMBOL ( hci_set_hw_info ) ;
void hci_set_fw_info ( struct hci_dev * hdev , const char * fmt , . . . )
{
va_list vargs ;
va_start ( vargs , fmt ) ;
kfree_const ( hdev - > fw_info ) ;
hdev - > fw_info = kvasprintf_const ( GFP_KERNEL , fmt , vargs ) ;
va_end ( vargs ) ;
}
EXPORT_SYMBOL ( hci_set_fw_info ) ;
2005-04-17 02:20:36 +04:00
/* ---- Interface to upper protocols ---- */
int hci_register_cb ( struct hci_cb * cb )
{
BT_DBG ( " %p name %s " , cb , cb - > name ) ;
2015-02-18 15:53:55 +03:00
mutex_lock ( & hci_cb_list_lock ) ;
2015-02-18 15:53:54 +03:00
list_add_tail ( & cb - > list , & hci_cb_list ) ;
2015-02-18 15:53:55 +03:00
mutex_unlock ( & hci_cb_list_lock ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
EXPORT_SYMBOL ( hci_register_cb ) ;
int hci_unregister_cb ( struct hci_cb * cb )
{
BT_DBG ( " %p name %s " , cb , cb - > name ) ;
2015-02-18 15:53:55 +03:00
mutex_lock ( & hci_cb_list_lock ) ;
2005-04-17 02:20:36 +04:00
list_del ( & cb - > list ) ;
2015-02-18 15:53:55 +03:00
mutex_unlock ( & hci_cb_list_lock ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
EXPORT_SYMBOL ( hci_unregister_cb ) ;
2021-12-03 17:59:01 +03:00
static int hci_send_frame ( struct hci_dev * hdev , struct sk_buff * skb )
2005-04-17 02:20:36 +04:00
{
2014-07-06 17:36:15 +04:00
int err ;
2015-11-05 09:10:00 +03:00
BT_DBG ( " %s type %d len %d " , hdev - > name , hci_skb_pkt_type ( skb ) ,
skb - > len ) ;
2005-04-17 02:20:36 +04:00
2012-02-20 23:34:38 +04:00
/* Time stamp */
__net_timestamp ( skb ) ;
2005-04-17 02:20:36 +04:00
2012-02-20 23:34:38 +04:00
/* Send copy to monitor */
hci_send_to_monitor ( hdev , skb ) ;
if ( atomic_read ( & hdev - > promisc ) ) {
/* Send copy to the sockets */
2012-02-20 17:50:30 +04:00
hci_send_to_sock ( hdev , skb ) ;
2005-04-17 02:20:36 +04:00
}
/* Get rid of skb owner, prior to sending to the driver. */
skb_orphan ( skb ) ;
2015-10-05 00:34:01 +03:00
if ( ! test_bit ( HCI_RUNNING , & hdev - > flags ) ) {
kfree_skb ( skb ) ;
2021-12-03 17:59:01 +03:00
return - EINVAL ;
2015-10-05 00:34:01 +03:00
}
2014-07-06 17:36:15 +04:00
err = hdev - > send ( hdev , skb ) ;
if ( err < 0 ) {
2017-10-30 12:42:59 +03:00
bt_dev_err ( hdev , " sending frame failed (%d) " , err ) ;
2014-07-06 17:36:15 +04:00
kfree_skb ( skb ) ;
2021-12-03 17:59:01 +03:00
return err ;
2014-07-06 17:36:15 +04:00
}
2021-12-03 17:59:01 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2013-03-05 22:37:45 +04:00
/* Send HCI command */
2013-04-19 11:14:51 +04:00
int hci_send_cmd ( struct hci_dev * hdev , __u16 opcode , __u32 plen ,
const void * param )
2013-03-05 22:37:45 +04:00
{
struct sk_buff * skb ;
BT_DBG ( " %s opcode 0x%4.4x plen %d " , hdev - > name , opcode , plen ) ;
skb = hci_prepare_cmd ( hdev , opcode , plen , param ) ;
if ( ! skb ) {
2017-10-30 12:42:59 +03:00
bt_dev_err ( hdev , " no memory for command " ) ;
2013-03-05 22:37:45 +04:00
return - ENOMEM ;
}
2014-10-28 07:12:20 +03:00
/* Stand-alone HCI commands must be flagged as
2013-03-05 22:37:47 +04:00
* single - command requests .
*/
2015-11-05 10:31:40 +03:00
bt_cb ( skb ) - > hci . req_flags | = HCI_REQ_START ;
2013-03-05 22:37:47 +04:00
2005-04-17 02:20:36 +04:00
skb_queue_tail ( & hdev - > cmd_q , skb ) ;
2011-12-15 05:53:47 +04:00
queue_work ( hdev - > workqueue , & hdev - > cmd_work ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2018-04-26 14:13:26 +03:00
int __hci_cmd_send ( struct hci_dev * hdev , u16 opcode , u32 plen ,
const void * param )
{
struct sk_buff * skb ;
if ( hci_opcode_ogf ( opcode ) ! = 0x3f ) {
/* A controller receiving a command shall respond with either
* a Command Status Event or a Command Complete Event .
* Therefore , all standard HCI commands must be sent via the
* standard API , using hci_send_cmd or hci_cmd_sync helpers .
* Some vendors do not comply with this rule for vendor - specific
* commands and do not return any event . We want to support
* unresponded commands for such cases only .
*/
bt_dev_err ( hdev , " unresponded command not supported " ) ;
return - EINVAL ;
}
skb = hci_prepare_cmd ( hdev , opcode , plen , param ) ;
if ( ! skb ) {
bt_dev_err ( hdev , " no memory for command (opcode 0x%4.4x) " ,
opcode ) ;
return - ENOMEM ;
}
hci_send_frame ( hdev , skb ) ;
return 0 ;
}
EXPORT_SYMBOL ( __hci_cmd_send ) ;
2005-04-17 02:20:36 +04:00
/* Get data from the previously sent command */
2007-10-20 15:33:56 +04:00
void * hci_sent_cmd_data ( struct hci_dev * hdev , __u16 opcode )
2005-04-17 02:20:36 +04:00
{
struct hci_command_hdr * hdr ;
if ( ! hdev - > sent_cmd )
return NULL ;
hdr = ( void * ) hdev - > sent_cmd - > data ;
2007-10-20 15:33:56 +04:00
if ( hdr - > opcode ! = cpu_to_le16 ( opcode ) )
2005-04-17 02:20:36 +04:00
return NULL ;
2012-06-11 12:13:09 +04:00
BT_DBG ( " %s opcode 0x%4.4x " , hdev - > name , opcode ) ;
2005-04-17 02:20:36 +04:00
return hdev - > sent_cmd - > data + HCI_COMMAND_HDR_SIZE ;
}
2022-02-05 00:04:38 +03:00
/* Get data from last received event */
void * hci_recv_event_data ( struct hci_dev * hdev , __u8 event )
{
struct hci_event_hdr * hdr ;
int offset ;
if ( ! hdev - > recv_event )
return NULL ;
hdr = ( void * ) hdev - > recv_event - > data ;
offset = sizeof ( * hdr ) ;
if ( hdr - > evt ! = event ) {
/* In case of LE metaevent check the subevent match */
if ( hdr - > evt = = HCI_EV_LE_META ) {
struct hci_ev_le_meta * ev ;
ev = ( void * ) hdev - > recv_event - > data + offset ;
offset + = sizeof ( * ev ) ;
if ( ev - > subevent = = event )
goto found ;
}
return NULL ;
}
found :
bt_dev_dbg ( hdev , " event 0x%2.2x " , event ) ;
return hdev - > recv_event - > data + offset ;
}
2005-04-17 02:20:36 +04:00
/* Send ACL data */
static void hci_add_acl_hdr ( struct sk_buff * skb , __u16 handle , __u16 flags )
{
struct hci_acl_hdr * hdr ;
int len = skb - > len ;
2007-03-13 19:06:52 +03:00
skb_push ( skb , HCI_ACL_HDR_SIZE ) ;
skb_reset_transport_header ( skb ) ;
2007-04-26 05:04:18 +04:00
hdr = ( struct hci_acl_hdr * ) skb_transport_header ( skb ) ;
2007-03-26 07:12:50 +04:00
hdr - > handle = cpu_to_le16 ( hci_handle_pack ( handle , flags ) ) ;
hdr - > dlen = cpu_to_le16 ( len ) ;
2005-04-17 02:20:36 +04:00
}
2012-09-21 13:30:04 +04:00
static void hci_queue_acl ( struct hci_chan * chan , struct sk_buff_head * queue ,
2012-05-17 07:36:26 +04:00
struct sk_buff * skb , __u16 flags )
2005-04-17 02:20:36 +04:00
{
2012-09-21 13:30:04 +04:00
struct hci_conn * conn = chan - > conn ;
2005-04-17 02:20:36 +04:00
struct hci_dev * hdev = conn - > hdev ;
struct sk_buff * list ;
2012-05-11 20:16:11 +04:00
skb - > len = skb_headlen ( skb ) ;
skb - > data_len = 0 ;
2015-11-05 09:10:00 +03:00
hci_skb_pkt_type ( skb ) = HCI_ACLDATA_PKT ;
2012-10-15 12:58:39 +04:00
switch ( hdev - > dev_type ) {
2016-07-05 15:30:14 +03:00
case HCI_PRIMARY :
2012-10-15 12:58:39 +04:00
hci_add_acl_hdr ( skb , conn - > handle , flags ) ;
break ;
case HCI_AMP :
hci_add_acl_hdr ( skb , chan - > handle , flags ) ;
break ;
default :
2017-10-30 12:42:59 +03:00
bt_dev_err ( hdev , " unknown dev_type %d " , hdev - > dev_type ) ;
2012-10-15 12:58:39 +04:00
return ;
}
2012-05-11 20:16:11 +04:00
2010-12-01 17:58:25 +03:00
list = skb_shinfo ( skb ) - > frag_list ;
if ( ! list ) {
2005-04-17 02:20:36 +04:00
/* Non fragmented */
BT_DBG ( " %s nonfrag skb %p len %d " , hdev - > name , skb , skb - > len ) ;
2011-11-02 17:52:01 +04:00
skb_queue_tail ( queue , skb ) ;
2005-04-17 02:20:36 +04:00
} else {
/* Fragmented */
BT_DBG ( " %s frag %p len %d " , hdev - > name , skb , skb - > len ) ;
skb_shinfo ( skb ) - > frag_list = NULL ;
2014-10-29 11:16:00 +03:00
/* Queue all fragments atomically. We need to use spin_lock_bh
* here because of 6L oWPAN links , as there this function is
* called from softirq and using normal spin lock could cause
* deadlocks .
*/
spin_lock_bh ( & queue - > lock ) ;
2005-04-17 02:20:36 +04:00
2011-11-02 17:52:01 +04:00
__skb_queue_tail ( queue , skb ) ;
2011-01-03 12:14:36 +03:00
flags & = ~ ACL_START ;
flags | = ACL_CONT ;
2005-04-17 02:20:36 +04:00
do {
skb = list ; list = list - > next ;
2007-02-09 17:24:33 +03:00
2015-11-05 09:10:00 +03:00
hci_skb_pkt_type ( skb ) = HCI_ACLDATA_PKT ;
2011-01-03 12:14:36 +03:00
hci_add_acl_hdr ( skb , conn - > handle , flags ) ;
2005-04-17 02:20:36 +04:00
BT_DBG ( " %s frag %p len %d " , hdev - > name , skb , skb - > len ) ;
2011-11-02 17:52:01 +04:00
__skb_queue_tail ( queue , skb ) ;
2005-04-17 02:20:36 +04:00
} while ( list ) ;
2014-10-29 11:16:00 +03:00
spin_unlock_bh ( & queue - > lock ) ;
2005-04-17 02:20:36 +04:00
}
2011-11-02 17:52:01 +04:00
}
void hci_send_acl ( struct hci_chan * chan , struct sk_buff * skb , __u16 flags )
{
2012-09-21 13:30:04 +04:00
struct hci_dev * hdev = chan - > conn - > hdev ;
2011-11-02 17:52:01 +04:00
2012-06-11 12:13:09 +04:00
BT_DBG ( " %s chan %p flags 0x%4.4x " , hdev - > name , chan , flags ) ;
2011-11-02 17:52:01 +04:00
2012-09-21 13:30:04 +04:00
hci_queue_acl ( chan , & chan - > data_q , skb , flags ) ;
2005-04-17 02:20:36 +04:00
2011-12-15 06:50:02 +04:00
queue_work ( hdev - > workqueue , & hdev - > tx_work ) ;
2005-04-17 02:20:36 +04:00
}
/* Send SCO data */
2010-05-01 23:15:35 +04:00
void hci_send_sco ( struct hci_conn * conn , struct sk_buff * skb )
2005-04-17 02:20:36 +04:00
{
struct hci_dev * hdev = conn - > hdev ;
struct hci_sco_hdr hdr ;
BT_DBG ( " %s len %d " , hdev - > name , skb - > len ) ;
2007-03-26 07:12:50 +04:00
hdr . handle = cpu_to_le16 ( conn - > handle ) ;
2005-04-17 02:20:36 +04:00
hdr . dlen = skb - > len ;
2007-03-13 19:06:52 +03:00
skb_push ( skb , HCI_SCO_HDR_SIZE ) ;
skb_reset_transport_header ( skb ) ;
2007-04-26 05:04:18 +04:00
memcpy ( skb_transport_header ( skb ) , & hdr , HCI_SCO_HDR_SIZE ) ;
2005-04-17 02:20:36 +04:00
2015-11-05 09:10:00 +03:00
hci_skb_pkt_type ( skb ) = HCI_SCODATA_PKT ;
2009-11-18 03:02:54 +03:00
2005-04-17 02:20:36 +04:00
skb_queue_tail ( & conn - > data_q , skb ) ;
2011-12-15 06:50:02 +04:00
queue_work ( hdev - > workqueue , & hdev - > tx_work ) ;
2005-04-17 02:20:36 +04:00
}
2019-07-29 18:15:43 +03:00
/* Send ISO data */
static void hci_add_iso_hdr ( struct sk_buff * skb , __u16 handle , __u8 flags )
{
struct hci_iso_hdr * hdr ;
int len = skb - > len ;
skb_push ( skb , HCI_ISO_HDR_SIZE ) ;
skb_reset_transport_header ( skb ) ;
hdr = ( struct hci_iso_hdr * ) skb_transport_header ( skb ) ;
hdr - > handle = cpu_to_le16 ( hci_handle_pack ( handle , flags ) ) ;
hdr - > dlen = cpu_to_le16 ( len ) ;
}
static void hci_queue_iso ( struct hci_conn * conn , struct sk_buff_head * queue ,
struct sk_buff * skb )
{
struct hci_dev * hdev = conn - > hdev ;
struct sk_buff * list ;
__u16 flags ;
skb - > len = skb_headlen ( skb ) ;
skb - > data_len = 0 ;
hci_skb_pkt_type ( skb ) = HCI_ISODATA_PKT ;
list = skb_shinfo ( skb ) - > frag_list ;
flags = hci_iso_flags_pack ( list ? ISO_START : ISO_SINGLE , 0x00 ) ;
hci_add_iso_hdr ( skb , conn - > handle , flags ) ;
if ( ! list ) {
/* Non fragmented */
BT_DBG ( " %s nonfrag skb %p len %d " , hdev - > name , skb , skb - > len ) ;
skb_queue_tail ( queue , skb ) ;
} else {
/* Fragmented */
BT_DBG ( " %s frag %p len %d " , hdev - > name , skb , skb - > len ) ;
skb_shinfo ( skb ) - > frag_list = NULL ;
__skb_queue_tail ( queue , skb ) ;
do {
skb = list ; list = list - > next ;
hci_skb_pkt_type ( skb ) = HCI_ISODATA_PKT ;
flags = hci_iso_flags_pack ( list ? ISO_CONT : ISO_END ,
0x00 ) ;
hci_add_iso_hdr ( skb , conn - > handle , flags ) ;
BT_DBG ( " %s frag %p len %d " , hdev - > name , skb , skb - > len ) ;
__skb_queue_tail ( queue , skb ) ;
} while ( list ) ;
}
}
void hci_send_iso ( struct hci_conn * conn , struct sk_buff * skb )
{
struct hci_dev * hdev = conn - > hdev ;
BT_DBG ( " %s len %d " , hdev - > name , skb - > len ) ;
hci_queue_iso ( conn , & conn - > data_q , skb ) ;
queue_work ( hdev - > workqueue , & hdev - > tx_work ) ;
}
2005-04-17 02:20:36 +04:00
/* ---- HCI TX task (outgoing data) ---- */
/* HCI Connection scheduler */
2019-07-29 18:15:43 +03:00
static inline void hci_quote_sent ( struct hci_conn * conn , int num , int * quote )
{
struct hci_dev * hdev ;
int cnt , q ;
if ( ! conn ) {
* quote = 0 ;
return ;
}
hdev = conn - > hdev ;
switch ( conn - > type ) {
case ACL_LINK :
cnt = hdev - > acl_cnt ;
break ;
case AMP_LINK :
cnt = hdev - > block_cnt ;
break ;
case SCO_LINK :
case ESCO_LINK :
cnt = hdev - > sco_cnt ;
break ;
case LE_LINK :
cnt = hdev - > le_mtu ? hdev - > le_cnt : hdev - > acl_cnt ;
break ;
case ISO_LINK :
cnt = hdev - > iso_mtu ? hdev - > iso_cnt :
hdev - > le_mtu ? hdev - > le_cnt : hdev - > acl_cnt ;
break ;
default :
cnt = 0 ;
bt_dev_err ( hdev , " unknown link type %d " , conn - > type ) ;
}
q = cnt / num ;
* quote = q ? q : 1 ;
}
2012-05-23 11:04:18 +04:00
static struct hci_conn * hci_low_sent ( struct hci_dev * hdev , __u8 type ,
int * quote )
2005-04-17 02:20:36 +04:00
{
struct hci_conn_hash * h = & hdev - > conn_hash ;
2011-11-01 12:58:56 +04:00
struct hci_conn * conn = NULL , * c ;
2012-04-11 10:48:47 +04:00
unsigned int num = 0 , min = ~ 0 ;
2005-04-17 02:20:36 +04:00
2007-02-09 17:24:33 +03:00
/* We don't have to lock device here. Connections are always
2005-04-17 02:20:36 +04:00
* added and removed with TX task disabled . */
2011-12-15 04:54:12 +04:00
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( c , & h - > list , list ) {
2008-07-14 22:13:49 +04:00
if ( c - > type ! = type | | skb_queue_empty ( & c - > data_q ) )
2005-04-17 02:20:36 +04:00
continue ;
2008-07-14 22:13:49 +04:00
if ( c - > state ! = BT_CONNECTED & & c - > state ! = BT_CONFIG )
continue ;
2005-04-17 02:20:36 +04:00
num + + ;
if ( c - > sent < min ) {
min = c - > sent ;
conn = c ;
}
2011-08-17 17:23:00 +04:00
if ( hci_conn_num ( hdev , type ) = = num )
break ;
2005-04-17 02:20:36 +04:00
}
2011-12-15 04:54:12 +04:00
rcu_read_unlock ( ) ;
2019-07-29 18:15:43 +03:00
hci_quote_sent ( conn , num , quote ) ;
2005-04-17 02:20:36 +04:00
BT_DBG ( " conn %p quote %d " , conn , * quote ) ;
return conn ;
}
2012-05-23 11:04:18 +04:00
static void hci_link_tx_to ( struct hci_dev * hdev , __u8 type )
2005-04-17 02:20:36 +04:00
{
struct hci_conn_hash * h = & hdev - > conn_hash ;
2011-11-01 12:58:56 +04:00
struct hci_conn * c ;
2005-04-17 02:20:36 +04:00
2017-10-30 12:42:59 +03:00
bt_dev_err ( hdev , " link tx timeout " ) ;
2005-04-17 02:20:36 +04:00
2011-12-15 04:54:12 +04:00
rcu_read_lock ( ) ;
2005-04-17 02:20:36 +04:00
/* Kill stalled connections */
2011-12-15 04:54:12 +04:00
list_for_each_entry_rcu ( c , & h - > list , list ) {
2011-02-11 04:38:53 +03:00
if ( c - > type = = type & & c - > sent ) {
2017-10-30 12:42:59 +03:00
bt_dev_err ( hdev , " killing stalled connection %pMR " ,
& c - > dst ) ;
2013-01-30 18:50:56 +04:00
hci_disconnect ( c , HCI_ERROR_REMOTE_USER_TERM ) ;
2005-04-17 02:20:36 +04:00
}
}
2011-12-15 04:54:12 +04:00
rcu_read_unlock ( ) ;
2005-04-17 02:20:36 +04:00
}
2012-05-23 11:04:18 +04:00
static struct hci_chan * hci_chan_sent ( struct hci_dev * hdev , __u8 type ,
int * quote )
2005-04-17 02:20:36 +04:00
{
2011-11-02 17:52:01 +04:00
struct hci_conn_hash * h = & hdev - > conn_hash ;
struct hci_chan * chan = NULL ;
2012-04-11 10:48:47 +04:00
unsigned int num = 0 , min = ~ 0 , cur_prio = 0 ;
2005-04-17 02:20:36 +04:00
struct hci_conn * conn ;
2019-07-29 18:15:43 +03:00
int conn_num = 0 ;
2011-11-02 17:52:01 +04:00
BT_DBG ( " %s " , hdev - > name ) ;
2011-12-15 04:54:12 +04:00
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( conn , & h - > list , list ) {
2011-11-02 17:52:01 +04:00
struct hci_chan * tmp ;
if ( conn - > type ! = type )
continue ;
if ( conn - > state ! = BT_CONNECTED & & conn - > state ! = BT_CONFIG )
continue ;
conn_num + + ;
2011-12-14 21:08:48 +04:00
list_for_each_entry_rcu ( tmp , & conn - > chan_list , list ) {
2011-11-02 17:52:01 +04:00
struct sk_buff * skb ;
if ( skb_queue_empty ( & tmp - > data_q ) )
continue ;
skb = skb_peek ( & tmp - > data_q ) ;
if ( skb - > priority < cur_prio )
continue ;
if ( skb - > priority > cur_prio ) {
num = 0 ;
min = ~ 0 ;
cur_prio = skb - > priority ;
}
num + + ;
if ( conn - > sent < min ) {
min = conn - > sent ;
chan = tmp ;
}
}
if ( hci_conn_num ( hdev , type ) = = conn_num )
break ;
}
2011-12-15 04:54:12 +04:00
rcu_read_unlock ( ) ;
2011-11-02 17:52:01 +04:00
if ( ! chan )
return NULL ;
2019-07-29 18:15:43 +03:00
hci_quote_sent ( chan - > conn , num , quote ) ;
2011-11-02 17:52:01 +04:00
BT_DBG ( " chan %p quote %d " , chan , * quote ) ;
return chan ;
}
2011-11-02 17:52:03 +04:00
static void hci_prio_recalculate ( struct hci_dev * hdev , __u8 type )
{
struct hci_conn_hash * h = & hdev - > conn_hash ;
struct hci_conn * conn ;
int num = 0 ;
BT_DBG ( " %s " , hdev - > name ) ;
2011-12-15 04:54:12 +04:00
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( conn , & h - > list , list ) {
2011-11-02 17:52:03 +04:00
struct hci_chan * chan ;
if ( conn - > type ! = type )
continue ;
if ( conn - > state ! = BT_CONNECTED & & conn - > state ! = BT_CONFIG )
continue ;
num + + ;
2011-12-14 21:08:48 +04:00
list_for_each_entry_rcu ( chan , & conn - > chan_list , list ) {
2011-11-02 17:52:03 +04:00
struct sk_buff * skb ;
if ( chan - > sent ) {
chan - > sent = 0 ;
continue ;
}
if ( skb_queue_empty ( & chan - > data_q ) )
continue ;
skb = skb_peek ( & chan - > data_q ) ;
if ( skb - > priority > = HCI_PRIO_MAX - 1 )
continue ;
skb - > priority = HCI_PRIO_MAX - 1 ;
BT_DBG ( " chan %p skb %p promoted to %d " , chan , skb ,
2012-05-17 07:36:26 +04:00
skb - > priority ) ;
2011-11-02 17:52:03 +04:00
}
if ( hci_conn_num ( hdev , type ) = = num )
break ;
}
2011-12-15 04:54:12 +04:00
rcu_read_unlock ( ) ;
2011-11-02 17:52:03 +04:00
}
2012-02-03 18:27:54 +04:00
static inline int __get_blocks ( struct hci_dev * hdev , struct sk_buff * skb )
{
/* Calculate count of blocks used by this packet */
return DIV_ROUND_UP ( skb - > len - HCI_ACL_HDR_SIZE , hdev - > block_len ) ;
}
2022-09-27 01:44:42 +03:00
static void __check_timeout ( struct hci_dev * hdev , unsigned int cnt , u8 type )
2011-11-02 17:52:01 +04:00
{
2022-09-27 01:44:42 +03:00
unsigned long last_tx ;
if ( hci_dev_test_flag ( hdev , HCI_UNCONFIGURED ) )
return ;
switch ( type ) {
case LE_LINK :
last_tx = hdev - > le_last_tx ;
break ;
default :
last_tx = hdev - > acl_last_tx ;
break ;
2005-04-17 02:20:36 +04:00
}
2022-09-27 01:44:42 +03:00
/* tx timeout must be longer than maximum link supervision timeout
* ( 40.9 seconds )
*/
if ( ! cnt & & time_after ( jiffies , last_tx + HCI_ACL_TX_TIMEOUT ) )
hci_link_tx_to ( hdev , type ) ;
2012-02-03 18:27:55 +04:00
}
2005-04-17 02:20:36 +04:00
2020-03-23 22:45:07 +03:00
/* Schedule SCO */
static void hci_sched_sco ( struct hci_dev * hdev )
{
struct hci_conn * conn ;
struct sk_buff * skb ;
int quote ;
BT_DBG ( " %s " , hdev - > name ) ;
if ( ! hci_conn_num ( hdev , SCO_LINK ) )
return ;
while ( hdev - > sco_cnt & & ( conn = hci_low_sent ( hdev , SCO_LINK , & quote ) ) ) {
while ( quote - - & & ( skb = skb_dequeue ( & conn - > data_q ) ) ) {
BT_DBG ( " skb %p len %d " , skb , skb - > len ) ;
hci_send_frame ( hdev , skb ) ;
conn - > sent + + ;
if ( conn - > sent = = ~ 0 )
conn - > sent = 0 ;
}
}
}
static void hci_sched_esco ( struct hci_dev * hdev )
{
struct hci_conn * conn ;
struct sk_buff * skb ;
int quote ;
BT_DBG ( " %s " , hdev - > name ) ;
if ( ! hci_conn_num ( hdev , ESCO_LINK ) )
return ;
while ( hdev - > sco_cnt & & ( conn = hci_low_sent ( hdev , ESCO_LINK ,
& quote ) ) ) {
while ( quote - - & & ( skb = skb_dequeue ( & conn - > data_q ) ) ) {
BT_DBG ( " skb %p len %d " , skb , skb - > len ) ;
hci_send_frame ( hdev , skb ) ;
conn - > sent + + ;
if ( conn - > sent = = ~ 0 )
conn - > sent = 0 ;
}
}
}
2012-05-23 11:04:18 +04:00
static void hci_sched_acl_pkt ( struct hci_dev * hdev )
2012-02-03 18:27:55 +04:00
{
unsigned int cnt = hdev - > acl_cnt ;
struct hci_chan * chan ;
struct sk_buff * skb ;
int quote ;
2022-09-27 01:44:42 +03:00
__check_timeout ( hdev , cnt , ACL_LINK ) ;
2006-07-03 12:02:33 +04:00
2011-11-02 17:52:01 +04:00
while ( hdev - > acl_cnt & &
2012-05-17 07:36:26 +04:00
( chan = hci_chan_sent ( hdev , ACL_LINK , & quote ) ) ) {
2011-11-02 17:52:02 +04:00
u32 priority = ( skb_peek ( & chan - > data_q ) ) - > priority ;
while ( quote - - & & ( skb = skb_peek ( & chan - > data_q ) ) ) {
2011-11-02 17:52:01 +04:00
BT_DBG ( " chan %p skb %p len %d priority %u " , chan , skb ,
2012-05-17 07:36:26 +04:00
skb - > len , skb - > priority ) ;
2011-11-02 17:52:01 +04:00
2011-11-02 17:52:02 +04:00
/* Stop if priority has changed */
if ( skb - > priority < priority )
break ;
skb = skb_dequeue ( & chan - > data_q ) ;
2011-11-02 17:52:01 +04:00
hci_conn_enter_active_mode ( chan - > conn ,
2012-03-08 08:25:00 +04:00
bt_cb ( skb ) - > force_active ) ;
2006-07-03 12:02:33 +04:00
2013-10-11 01:54:17 +04:00
hci_send_frame ( hdev , skb ) ;
2005-04-17 02:20:36 +04:00
hdev - > acl_last_tx = jiffies ;
hdev - > acl_cnt - - ;
2011-11-02 17:52:01 +04:00
chan - > sent + + ;
chan - > conn - > sent + + ;
2020-03-23 22:45:07 +03:00
/* Send pending SCO packets right away */
hci_sched_sco ( hdev ) ;
hci_sched_esco ( hdev ) ;
2005-04-17 02:20:36 +04:00
}
}
2011-11-02 17:52:03 +04:00
if ( cnt ! = hdev - > acl_cnt )
hci_prio_recalculate ( hdev , ACL_LINK ) ;
2005-04-17 02:20:36 +04:00
}
2012-05-23 11:04:18 +04:00
static void hci_sched_acl_blk ( struct hci_dev * hdev )
2012-02-03 18:27:54 +04:00
{
2012-02-03 18:27:55 +04:00
unsigned int cnt = hdev - > block_cnt ;
2012-02-03 18:27:54 +04:00
struct hci_chan * chan ;
struct sk_buff * skb ;
int quote ;
2012-10-10 18:38:30 +04:00
u8 type ;
2012-02-03 18:27:54 +04:00
2012-10-10 18:38:30 +04:00
BT_DBG ( " %s " , hdev - > name ) ;
if ( hdev - > dev_type = = HCI_AMP )
type = AMP_LINK ;
else
type = ACL_LINK ;
2022-09-27 01:44:42 +03:00
__check_timeout ( hdev , cnt , type ) ;
2012-02-03 18:27:54 +04:00
while ( hdev - > block_cnt > 0 & &
2012-10-10 18:38:30 +04:00
( chan = hci_chan_sent ( hdev , type , & quote ) ) ) {
2012-02-03 18:27:54 +04:00
u32 priority = ( skb_peek ( & chan - > data_q ) ) - > priority ;
while ( quote > 0 & & ( skb = skb_peek ( & chan - > data_q ) ) ) {
int blocks ;
BT_DBG ( " chan %p skb %p len %d priority %u " , chan , skb ,
2012-05-17 07:36:26 +04:00
skb - > len , skb - > priority ) ;
2012-02-03 18:27:54 +04:00
/* Stop if priority has changed */
if ( skb - > priority < priority )
break ;
skb = skb_dequeue ( & chan - > data_q ) ;
blocks = __get_blocks ( hdev , skb ) ;
if ( blocks > hdev - > block_cnt )
return ;
hci_conn_enter_active_mode ( chan - > conn ,
2012-05-17 07:36:26 +04:00
bt_cb ( skb ) - > force_active ) ;
2012-02-03 18:27:54 +04:00
2013-10-11 01:54:17 +04:00
hci_send_frame ( hdev , skb ) ;
2012-02-03 18:27:54 +04:00
hdev - > acl_last_tx = jiffies ;
hdev - > block_cnt - = blocks ;
quote - = blocks ;
chan - > sent + = blocks ;
chan - > conn - > sent + = blocks ;
}
}
if ( cnt ! = hdev - > block_cnt )
2012-10-10 18:38:30 +04:00
hci_prio_recalculate ( hdev , type ) ;
2012-02-03 18:27:54 +04:00
}
2012-05-23 11:04:18 +04:00
static void hci_sched_acl ( struct hci_dev * hdev )
2012-02-03 18:27:54 +04:00
{
BT_DBG ( " %s " , hdev - > name ) ;
2012-10-10 18:38:30 +04:00
/* No ACL link over BR/EDR controller */
2016-07-05 15:30:14 +03:00
if ( ! hci_conn_num ( hdev , ACL_LINK ) & & hdev - > dev_type = = HCI_PRIMARY )
2012-10-10 18:38:30 +04:00
return ;
/* No AMP link over AMP controller */
if ( ! hci_conn_num ( hdev , AMP_LINK ) & & hdev - > dev_type = = HCI_AMP )
2012-02-03 18:27:54 +04:00
return ;
switch ( hdev - > flow_ctl_mode ) {
case HCI_FLOW_CTL_MODE_PACKET_BASED :
hci_sched_acl_pkt ( hdev ) ;
break ;
case HCI_FLOW_CTL_MODE_BLOCK_BASED :
hci_sched_acl_blk ( hdev ) ;
break ;
}
}
2012-05-23 11:04:18 +04:00
static void hci_sched_le ( struct hci_dev * hdev )
2011-02-11 04:38:48 +03:00
{
2011-11-02 17:52:01 +04:00
struct hci_chan * chan ;
2011-02-11 04:38:48 +03:00
struct sk_buff * skb ;
2011-11-02 17:52:03 +04:00
int quote , cnt , tmp ;
2011-02-11 04:38:48 +03:00
BT_DBG ( " %s " , hdev - > name ) ;
2011-08-17 17:23:00 +04:00
if ( ! hci_conn_num ( hdev , LE_LINK ) )
return ;
2011-02-11 04:38:48 +03:00
cnt = hdev - > le_pkts ? hdev - > le_cnt : hdev - > acl_cnt ;
2020-01-16 00:02:18 +03:00
2022-09-27 01:44:42 +03:00
__check_timeout ( hdev , cnt , LE_LINK ) ;
2020-01-16 00:02:18 +03:00
2011-11-02 17:52:03 +04:00
tmp = cnt ;
2011-11-02 17:52:01 +04:00
while ( cnt & & ( chan = hci_chan_sent ( hdev , LE_LINK , & quote ) ) ) {
2011-11-02 17:52:02 +04:00
u32 priority = ( skb_peek ( & chan - > data_q ) ) - > priority ;
while ( quote - - & & ( skb = skb_peek ( & chan - > data_q ) ) ) {
2011-11-02 17:52:01 +04:00
BT_DBG ( " chan %p skb %p len %d priority %u " , chan , skb ,
2012-05-17 07:36:26 +04:00
skb - > len , skb - > priority ) ;
2011-02-11 04:38:48 +03:00
2011-11-02 17:52:02 +04:00
/* Stop if priority has changed */
if ( skb - > priority < priority )
break ;
skb = skb_dequeue ( & chan - > data_q ) ;
2013-10-11 01:54:17 +04:00
hci_send_frame ( hdev , skb ) ;
2011-02-11 04:38:48 +03:00
hdev - > le_last_tx = jiffies ;
cnt - - ;
2011-11-02 17:52:01 +04:00
chan - > sent + + ;
chan - > conn - > sent + + ;
2020-03-23 22:45:07 +03:00
/* Send pending SCO packets right away */
hci_sched_sco ( hdev ) ;
hci_sched_esco ( hdev ) ;
2011-02-11 04:38:48 +03:00
}
}
2011-11-02 17:52:01 +04:00
2011-02-11 04:38:48 +03:00
if ( hdev - > le_pkts )
hdev - > le_cnt = cnt ;
else
hdev - > acl_cnt = cnt ;
2011-11-02 17:52:03 +04:00
if ( cnt ! = tmp )
hci_prio_recalculate ( hdev , LE_LINK ) ;
2011-02-11 04:38:48 +03:00
}
2019-07-29 18:15:43 +03:00
/* Schedule CIS */
static void hci_sched_iso ( struct hci_dev * hdev )
{
struct hci_conn * conn ;
struct sk_buff * skb ;
int quote , * cnt ;
BT_DBG ( " %s " , hdev - > name ) ;
if ( ! hci_conn_num ( hdev , ISO_LINK ) )
return ;
cnt = hdev - > iso_pkts ? & hdev - > iso_cnt :
hdev - > le_pkts ? & hdev - > le_cnt : & hdev - > acl_cnt ;
while ( * cnt & & ( conn = hci_low_sent ( hdev , ISO_LINK , & quote ) ) ) {
while ( quote - - & & ( skb = skb_dequeue ( & conn - > data_q ) ) ) {
BT_DBG ( " skb %p len %d " , skb , skb - > len ) ;
hci_send_frame ( hdev , skb ) ;
conn - > sent + + ;
if ( conn - > sent = = ~ 0 )
conn - > sent = 0 ;
( * cnt ) - - ;
}
}
}
2011-12-15 06:50:02 +04:00
static void hci_tx_work ( struct work_struct * work )
2005-04-17 02:20:36 +04:00
{
2011-12-15 06:50:02 +04:00
struct hci_dev * hdev = container_of ( work , struct hci_dev , tx_work ) ;
2005-04-17 02:20:36 +04:00
struct sk_buff * skb ;
2019-07-29 18:15:43 +03:00
BT_DBG ( " %s acl %d sco %d le %d iso %d " , hdev - > name , hdev - > acl_cnt ,
hdev - > sco_cnt , hdev - > le_cnt , hdev - > iso_cnt ) ;
2005-04-17 02:20:36 +04:00
2015-03-13 12:11:00 +03:00
if ( ! hci_dev_test_flag ( hdev , HCI_USER_CHANNEL ) ) {
2013-09-04 05:08:38 +04:00
/* Schedule queues and send stuff to HCI driver */
hci_sched_sco ( hdev ) ;
hci_sched_esco ( hdev ) ;
2019-07-29 18:15:43 +03:00
hci_sched_iso ( hdev ) ;
2020-03-23 22:45:07 +03:00
hci_sched_acl ( hdev ) ;
2013-09-04 05:08:38 +04:00
hci_sched_le ( hdev ) ;
}
2011-02-11 04:38:48 +03:00
2005-04-17 02:20:36 +04:00
/* Send next queued raw (unknown type) packet */
while ( ( skb = skb_dequeue ( & hdev - > raw_q ) ) )
2013-10-11 01:54:17 +04:00
hci_send_frame ( hdev , skb ) ;
2005-04-17 02:20:36 +04:00
}
2011-03-31 05:57:33 +04:00
/* ----- HCI RX task (incoming data processing) ----- */
2005-04-17 02:20:36 +04:00
/* ACL data packet */
2012-05-23 11:04:18 +04:00
static void hci_acldata_packet ( struct hci_dev * hdev , struct sk_buff * skb )
2005-04-17 02:20:36 +04:00
{
struct hci_acl_hdr * hdr = ( void * ) skb - > data ;
struct hci_conn * conn ;
__u16 handle , flags ;
skb_pull ( skb , HCI_ACL_HDR_SIZE ) ;
handle = __le16_to_cpu ( hdr - > handle ) ;
flags = hci_flags ( handle ) ;
handle = hci_handle ( handle ) ;
2012-06-11 12:13:09 +04:00
BT_DBG ( " %s len %d handle 0x%4.4x flags 0x%4.4x " , hdev - > name , skb - > len ,
2012-05-17 07:36:26 +04:00
handle , flags ) ;
2005-04-17 02:20:36 +04:00
hdev - > stat . acl_rx + + ;
hci_dev_lock ( hdev ) ;
conn = hci_conn_hash_lookup_handle ( hdev , handle ) ;
hci_dev_unlock ( hdev ) ;
2007-02-09 17:24:33 +03:00
2005-04-17 02:20:36 +04:00
if ( conn ) {
2011-12-14 03:06:02 +04:00
hci_conn_enter_active_mode ( conn , BT_POWER_FORCE_ACTIVE_OFF ) ;
2006-07-03 12:02:33 +04:00
2005-04-17 02:20:36 +04:00
/* Send to upper protocol */
2011-12-21 16:11:33 +04:00
l2cap_recv_acldata ( conn , skb , flags ) ;
return ;
2005-04-17 02:20:36 +04:00
} else {
2017-10-30 12:42:59 +03:00
bt_dev_err ( hdev , " ACL packet for unknown connection handle %d " ,
handle ) ;
2005-04-17 02:20:36 +04:00
}
kfree_skb ( skb ) ;
}
/* SCO data packet */
2012-05-23 11:04:18 +04:00
static void hci_scodata_packet ( struct hci_dev * hdev , struct sk_buff * skb )
2005-04-17 02:20:36 +04:00
{
struct hci_sco_hdr * hdr = ( void * ) skb - > data ;
struct hci_conn * conn ;
2020-03-05 18:28:39 +03:00
__u16 handle , flags ;
2005-04-17 02:20:36 +04:00
skb_pull ( skb , HCI_SCO_HDR_SIZE ) ;
handle = __le16_to_cpu ( hdr - > handle ) ;
2020-03-05 18:28:39 +03:00
flags = hci_flags ( handle ) ;
handle = hci_handle ( handle ) ;
2005-04-17 02:20:36 +04:00
2020-03-05 18:28:39 +03:00
BT_DBG ( " %s len %d handle 0x%4.4x flags 0x%4.4x " , hdev - > name , skb - > len ,
handle , flags ) ;
2005-04-17 02:20:36 +04:00
hdev - > stat . sco_rx + + ;
hci_dev_lock ( hdev ) ;
conn = hci_conn_hash_lookup_handle ( hdev , handle ) ;
hci_dev_unlock ( hdev ) ;
if ( conn ) {
/* Send to upper protocol */
2020-06-11 22:50:41 +03:00
bt_cb ( skb ) - > sco . pkt_status = flags & 0x03 ;
2011-12-21 16:11:33 +04:00
sco_recv_scodata ( conn , skb ) ;
return ;
2005-04-17 02:20:36 +04:00
} else {
2022-01-25 22:15:37 +03:00
bt_dev_err_ratelimited ( hdev , " SCO packet for unknown connection handle %d " ,
handle ) ;
2005-04-17 02:20:36 +04:00
}
kfree_skb ( skb ) ;
}
2019-07-29 18:15:43 +03:00
static void hci_isodata_packet ( struct hci_dev * hdev , struct sk_buff * skb )
{
struct hci_iso_hdr * hdr ;
struct hci_conn * conn ;
__u16 handle , flags ;
hdr = skb_pull_data ( skb , sizeof ( * hdr ) ) ;
if ( ! hdr ) {
bt_dev_err ( hdev , " ISO packet too small " ) ;
goto drop ;
}
handle = __le16_to_cpu ( hdr - > handle ) ;
flags = hci_flags ( handle ) ;
handle = hci_handle ( handle ) ;
bt_dev_dbg ( hdev , " len %d handle 0x%4.4x flags 0x%4.4x " , skb - > len ,
handle , flags ) ;
hci_dev_lock ( hdev ) ;
conn = hci_conn_hash_lookup_handle ( hdev , handle ) ;
hci_dev_unlock ( hdev ) ;
if ( ! conn ) {
bt_dev_err ( hdev , " ISO packet for unknown connection handle %d " ,
handle ) ;
2020-01-17 02:55:57 +03:00
goto drop ;
2019-07-29 18:15:43 +03:00
}
2020-01-17 02:55:57 +03:00
/* Send to upper protocol */
iso_recv ( conn , skb , flags ) ;
return ;
2019-07-29 18:15:43 +03:00
drop :
kfree_skb ( skb ) ;
}
2013-03-05 22:37:48 +04:00
static bool hci_req_is_complete ( struct hci_dev * hdev )
{
struct sk_buff * skb ;
skb = skb_peek ( & hdev - > cmd_q ) ;
if ( ! skb )
return true ;
2015-11-05 10:31:40 +03:00
return ( bt_cb ( skb ) - > hci . req_flags & HCI_REQ_START ) ;
2013-03-05 22:37:48 +04:00
}
2013-03-05 22:37:49 +04:00
static void hci_resend_last ( struct hci_dev * hdev )
{
struct hci_command_hdr * sent ;
struct sk_buff * skb ;
u16 opcode ;
if ( ! hdev - > sent_cmd )
return ;
sent = ( void * ) hdev - > sent_cmd - > data ;
opcode = __le16_to_cpu ( sent - > opcode ) ;
if ( opcode = = HCI_OP_RESET )
return ;
skb = skb_clone ( hdev - > sent_cmd , GFP_KERNEL ) ;
if ( ! skb )
return ;
skb_queue_head ( & hdev - > cmd_q , skb ) ;
queue_work ( hdev - > workqueue , & hdev - > cmd_work ) ;
}
2015-04-02 13:41:08 +03:00
void hci_req_cmd_complete ( struct hci_dev * hdev , u16 opcode , u8 status ,
hci_req_complete_t * req_complete ,
hci_req_complete_skb_t * req_complete_skb )
2013-03-05 22:37:48 +04:00
{
struct sk_buff * skb ;
unsigned long flags ;
BT_DBG ( " opcode 0x%04x status 0x%02x " , opcode , status ) ;
2013-03-05 22:37:49 +04:00
/* If the completed command doesn't match the last one that was
* sent we need to do special handling of it .
2013-03-05 22:37:48 +04:00
*/
2013-03-05 22:37:49 +04:00
if ( ! hci_sent_cmd_data ( hdev , opcode ) ) {
/* Some CSR based controllers generate a spontaneous
* reset complete event during init and any pending
* command will never be completed . In such a case we
* need to resend whatever was the last sent
* command .
*/
if ( test_bit ( HCI_INIT , & hdev - > flags ) & & opcode = = HCI_OP_RESET )
hci_resend_last ( hdev ) ;
2013-03-05 22:37:48 +04:00
return ;
2013-03-05 22:37:49 +04:00
}
2013-03-05 22:37:48 +04:00
2019-05-02 05:01:52 +03:00
/* If we reach this point this event matches the last command sent */
hci_dev_clear_flag ( hdev , HCI_CMD_PENDING ) ;
2013-03-05 22:37:48 +04:00
/* If the command succeeded and there's still more commands in
* this request the request is not yet complete .
*/
if ( ! status & & ! hci_req_is_complete ( hdev ) )
return ;
/* If this was the last command in a request the complete
* callback would be found in hdev - > sent_cmd instead of the
* command queue ( hdev - > cmd_q ) .
*/
2015-11-05 10:31:40 +03:00
if ( bt_cb ( hdev - > sent_cmd ) - > hci . req_flags & HCI_REQ_SKB ) {
* req_complete_skb = bt_cb ( hdev - > sent_cmd ) - > hci . req_complete_skb ;
2015-04-02 13:41:08 +03:00
return ;
}
2013-07-27 23:11:14 +04:00
2015-11-05 10:31:40 +03:00
if ( bt_cb ( hdev - > sent_cmd ) - > hci . req_complete ) {
* req_complete = bt_cb ( hdev - > sent_cmd ) - > hci . req_complete ;
2015-04-02 13:41:08 +03:00
return ;
2013-03-05 22:37:48 +04:00
}
/* Remove all pending commands belonging to this request */
spin_lock_irqsave ( & hdev - > cmd_q . lock , flags ) ;
while ( ( skb = __skb_dequeue ( & hdev - > cmd_q ) ) ) {
2015-11-05 10:31:40 +03:00
if ( bt_cb ( skb ) - > hci . req_flags & HCI_REQ_START ) {
2013-03-05 22:37:48 +04:00
__skb_queue_head ( & hdev - > cmd_q , skb ) ;
break ;
}
2016-02-20 01:25:21 +03:00
if ( bt_cb ( skb ) - > hci . req_flags & HCI_REQ_SKB )
* req_complete_skb = bt_cb ( skb ) - > hci . req_complete_skb ;
else
* req_complete = bt_cb ( skb ) - > hci . req_complete ;
2022-12-07 05:18:34 +03:00
dev_kfree_skb_irq ( skb ) ;
2013-03-05 22:37:48 +04:00
}
spin_unlock_irqrestore ( & hdev - > cmd_q . lock , flags ) ;
}
2010-08-09 07:06:53 +04:00
static void hci_rx_work ( struct work_struct * work )
2005-04-17 02:20:36 +04:00
{
2010-08-09 07:06:53 +04:00
struct hci_dev * hdev = container_of ( work , struct hci_dev , rx_work ) ;
2005-04-17 02:20:36 +04:00
struct sk_buff * skb ;
BT_DBG ( " %s " , hdev - > name ) ;
2022-07-14 13:48:14 +03:00
/* The kcov_remote functions used for collecting packet parsing
* coverage information from this background thread and associate
* the coverage with the syscall ' s thread which originally injected
* the packet . This helps fuzzing the kernel .
*/
for ( ; ( skb = skb_dequeue ( & hdev - > rx_q ) ) ; kcov_remote_stop ( ) ) {
kcov_remote_start_common ( skb_get_kcov_handle ( skb ) ) ;
2012-02-20 23:34:38 +04:00
/* Send copy to monitor */
hci_send_to_monitor ( hdev , skb ) ;
2005-04-17 02:20:36 +04:00
if ( atomic_read ( & hdev - > promisc ) ) {
/* Send copy to the sockets */
2012-02-20 17:50:30 +04:00
hci_send_to_sock ( hdev , skb ) ;
2005-04-17 02:20:36 +04:00
}
2019-10-17 06:20:39 +03:00
/* If the device has been opened in HCI_USER_CHANNEL,
* the userspace has exclusive access to device .
* When device is HCI_INIT , we still need to process
* the data packets to the driver in order
* to complete its setup ( ) .
*/
if ( hci_dev_test_flag ( hdev , HCI_USER_CHANNEL ) & &
! test_bit ( HCI_INIT , & hdev - > flags ) ) {
2005-04-17 02:20:36 +04:00
kfree_skb ( skb ) ;
continue ;
}
if ( test_bit ( HCI_INIT , & hdev - > flags ) ) {
/* Don't process data packets in this states. */
2015-11-05 09:10:00 +03:00
switch ( hci_skb_pkt_type ( skb ) ) {
2005-04-17 02:20:36 +04:00
case HCI_ACLDATA_PKT :
case HCI_SCODATA_PKT :
2020-01-25 11:23:47 +03:00
case HCI_ISODATA_PKT :
2005-04-17 02:20:36 +04:00
kfree_skb ( skb ) ;
continue ;
2007-04-21 04:09:22 +04:00
}
2005-04-17 02:20:36 +04:00
}
/* Process frame */
2015-11-05 09:10:00 +03:00
switch ( hci_skb_pkt_type ( skb ) ) {
2005-04-17 02:20:36 +04:00
case HCI_EVENT_PKT :
2010-08-09 07:06:53 +04:00
BT_DBG ( " %s Event packet " , hdev - > name ) ;
2005-04-17 02:20:36 +04:00
hci_event_packet ( hdev , skb ) ;
break ;
case HCI_ACLDATA_PKT :
BT_DBG ( " %s ACL data packet " , hdev - > name ) ;
hci_acldata_packet ( hdev , skb ) ;
break ;
case HCI_SCODATA_PKT :
BT_DBG ( " %s SCO data packet " , hdev - > name ) ;
hci_scodata_packet ( hdev , skb ) ;
break ;
2019-07-29 18:15:43 +03:00
case HCI_ISODATA_PKT :
BT_DBG ( " %s ISO data packet " , hdev - > name ) ;
hci_isodata_packet ( hdev , skb ) ;
break ;
2005-04-17 02:20:36 +04:00
default :
kfree_skb ( skb ) ;
break ;
}
}
}
2011-12-15 05:53:47 +04:00
static void hci_cmd_work ( struct work_struct * work )
2005-04-17 02:20:36 +04:00
{
2011-12-15 05:53:47 +04:00
struct hci_dev * hdev = container_of ( work , struct hci_dev , cmd_work ) ;
2005-04-17 02:20:36 +04:00
struct sk_buff * skb ;
2012-07-10 16:27:47 +04:00
BT_DBG ( " %s cmd_cnt %d cmd queued %d " , hdev - > name ,
atomic_read ( & hdev - > cmd_cnt ) , skb_queue_len ( & hdev - > cmd_q ) ) ;
2005-04-17 02:20:36 +04:00
/* Send queued commands */
2011-01-11 18:20:20 +03:00
if ( atomic_read ( & hdev - > cmd_cnt ) ) {
skb = skb_dequeue ( & hdev - > cmd_q ) ;
if ( ! skb )
return ;
2009-02-25 13:29:52 +03:00
kfree_skb ( hdev - > sent_cmd ) ;
2005-04-17 02:20:36 +04:00
2013-09-04 05:11:07 +04:00
hdev - > sent_cmd = skb_clone ( skb , GFP_KERNEL ) ;
2010-12-01 17:58:25 +03:00
if ( hdev - > sent_cmd ) {
2021-12-03 17:59:01 +03:00
int res ;
2019-05-02 05:01:52 +03:00
if ( hci_req_status_pend ( hdev ) )
hci_dev_set_flag ( hdev , HCI_CMD_PENDING ) ;
2005-04-17 02:20:36 +04:00
atomic_dec ( & hdev - > cmd_cnt ) ;
2021-12-03 17:59:01 +03:00
res = hci_send_frame ( hdev , skb ) ;
if ( res < 0 )
2021-12-17 18:28:09 +03:00
__hci_cmd_sync_cancel ( hdev , - res ) ;
2021-12-03 17:59:01 +03:00
2022-09-02 14:23:48 +03:00
rcu_read_lock ( ) ;
2022-06-03 11:19:14 +03:00
if ( test_bit ( HCI_RESET , & hdev - > flags ) | |
hci_dev_test_flag ( hdev , HCI_CMD_DRAIN_WORKQUEUE ) )
2014-06-16 14:30:56 +04:00
cancel_delayed_work ( & hdev - > cmd_timer ) ;
2011-07-27 00:46:54 +04:00
else
2022-09-02 14:23:48 +03:00
queue_delayed_work ( hdev - > workqueue , & hdev - > cmd_timer ,
HCI_CMD_TIMEOUT ) ;
rcu_read_unlock ( ) ;
2005-04-17 02:20:36 +04:00
} else {
skb_queue_head ( & hdev - > cmd_q , skb ) ;
2011-12-15 05:53:47 +04:00
queue_work ( hdev - > workqueue , & hdev - > cmd_work ) ;
2005-04-17 02:20:36 +04:00
}
}
}