2007-02-09 17:24:33 +03:00
/*
2005-04-17 02:20:36 +04:00
BlueZ - Bluetooth protocol stack for Linux
Copyright ( C ) 2000 - 2001 Qualcomm Incorporated
2011-12-18 19:39:33 +04:00
Copyright ( C ) 2011 ProFUSION Embedded Systems
2005-04-17 02:20:36 +04:00
Written 2000 , 2001 by Maxim Krasnyansky < maxk @ qualcomm . com >
This program is free software ; you can redistribute it and / or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation ;
THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS
OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS .
IN NO EVENT SHALL THE COPYRIGHT HOLDER ( S ) AND AUTHOR ( S ) BE LIABLE FOR ANY
2007-02-09 17:24:33 +03:00
CLAIM , OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES , OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE , DATA OR PROFITS , WHETHER IN AN
ACTION OF CONTRACT , NEGLIGENCE OR OTHER TORTIOUS ACTION , ARISING OUT OF
2005-04-17 02:20:36 +04:00
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE .
2007-02-09 17:24:33 +03:00
ALL LIABILITY , INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS ,
COPYRIGHTS , TRADEMARKS OR OTHER RIGHTS , RELATING TO USE OF THIS
2005-04-17 02:20:36 +04:00
SOFTWARE IS DISCLAIMED .
*/
/* Bluetooth HCI core. */
2012-05-23 11:04:22 +04:00
# include <linux/export.h>
2012-05-28 00:36:56 +04:00
# include <linux/idr.h>
2012-05-23 11:04:22 +04:00
# include <linux/rfkill.h>
2013-10-16 14:28:55 +04:00
# include <linux/debugfs.h>
2013-10-18 04:24:15 +04:00
# include <asm/unaligned.h>
2005-04-17 02:20:36 +04:00
# include <net/bluetooth/bluetooth.h>
# include <net/bluetooth/hci_core.h>
2010-08-09 07:06:53 +04:00
static void hci_rx_work ( struct work_struct * work ) ;
2011-12-15 05:53:47 +04:00
static void hci_cmd_work ( struct work_struct * work ) ;
2011-12-15 06:50:02 +04:00
static void hci_tx_work ( struct work_struct * work ) ;
2005-04-17 02:20:36 +04:00
/* HCI device list */
LIST_HEAD ( hci_dev_list ) ;
DEFINE_RWLOCK ( hci_dev_list_lock ) ;
/* HCI callback list */
LIST_HEAD ( hci_cb_list ) ;
DEFINE_RWLOCK ( hci_cb_list_lock ) ;
2012-05-28 00:36:56 +04:00
/* HCI ID Numbering */
static DEFINE_IDA ( hci_index_ida ) ;
2005-04-17 02:20:36 +04:00
/* ---- HCI notifications ---- */
2005-10-28 21:20:48 +04:00
static void hci_notify ( struct hci_dev * hdev , int event )
2005-04-17 02:20:36 +04:00
{
2012-02-20 17:50:37 +04:00
hci_sock_dev_event ( hdev , event ) ;
2005-04-17 02:20:36 +04:00
}
2013-10-16 14:28:55 +04:00
/* ---- HCI debugfs entries ---- */
2013-10-19 18:09:12 +04:00
static ssize_t dut_mode_read ( struct file * file , char __user * user_buf ,
size_t count , loff_t * ppos )
{
struct hci_dev * hdev = file - > private_data ;
char buf [ 3 ] ;
buf [ 0 ] = test_bit ( HCI_DUT_MODE , & hdev - > dev_flags ) ? ' Y ' : ' N ' ;
buf [ 1 ] = ' \n ' ;
buf [ 2 ] = ' \0 ' ;
return simple_read_from_buffer ( user_buf , count , ppos , buf , 2 ) ;
}
static ssize_t dut_mode_write ( struct file * file , const char __user * user_buf ,
size_t count , loff_t * ppos )
{
struct hci_dev * hdev = file - > private_data ;
struct sk_buff * skb ;
char buf [ 32 ] ;
size_t buf_size = min ( count , ( sizeof ( buf ) - 1 ) ) ;
bool enable ;
int err ;
if ( ! test_bit ( HCI_UP , & hdev - > flags ) )
return - ENETDOWN ;
if ( copy_from_user ( buf , user_buf , buf_size ) )
return - EFAULT ;
buf [ buf_size ] = ' \0 ' ;
if ( strtobool ( buf , & enable ) )
return - EINVAL ;
if ( enable = = test_bit ( HCI_DUT_MODE , & hdev - > dev_flags ) )
return - EALREADY ;
hci_req_lock ( hdev ) ;
if ( enable )
skb = __hci_cmd_sync ( hdev , HCI_OP_ENABLE_DUT_MODE , 0 , NULL ,
HCI_CMD_TIMEOUT ) ;
else
skb = __hci_cmd_sync ( hdev , HCI_OP_RESET , 0 , NULL ,
HCI_CMD_TIMEOUT ) ;
hci_req_unlock ( hdev ) ;
if ( IS_ERR ( skb ) )
return PTR_ERR ( skb ) ;
err = - bt_to_errno ( skb - > data [ 0 ] ) ;
kfree_skb ( skb ) ;
if ( err < 0 )
return err ;
change_bit ( HCI_DUT_MODE , & hdev - > dev_flags ) ;
return count ;
}
static const struct file_operations dut_mode_fops = {
. open = simple_open ,
. read = dut_mode_read ,
. write = dut_mode_write ,
. llseek = default_llseek ,
} ;
2013-10-18 23:04:46 +04:00
static int features_show ( struct seq_file * f , void * ptr )
{
struct hci_dev * hdev = f - > private ;
u8 p ;
hci_dev_lock ( hdev ) ;
for ( p = 0 ; p < HCI_MAX_PAGES & & p < = hdev - > max_page ; p + + ) {
2013-10-19 13:25:33 +04:00
seq_printf ( f , " %2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
2013-10-18 23:04:46 +04:00
" 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x \n " , p ,
hdev - > features [ p ] [ 0 ] , hdev - > features [ p ] [ 1 ] ,
hdev - > features [ p ] [ 2 ] , hdev - > features [ p ] [ 3 ] ,
hdev - > features [ p ] [ 4 ] , hdev - > features [ p ] [ 5 ] ,
hdev - > features [ p ] [ 6 ] , hdev - > features [ p ] [ 7 ] ) ;
}
2013-10-19 13:25:33 +04:00
if ( lmp_le_capable ( hdev ) )
seq_printf ( f , " LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
" 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x \n " ,
hdev - > le_features [ 0 ] , hdev - > le_features [ 1 ] ,
hdev - > le_features [ 2 ] , hdev - > le_features [ 3 ] ,
hdev - > le_features [ 4 ] , hdev - > le_features [ 5 ] ,
hdev - > le_features [ 6 ] , hdev - > le_features [ 7 ] ) ;
2013-10-18 23:04:46 +04:00
hci_dev_unlock ( hdev ) ;
return 0 ;
}
static int features_open ( struct inode * inode , struct file * file )
{
return single_open ( file , features_show , inode - > i_private ) ;
}
static const struct file_operations features_fops = {
. open = features_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = single_release ,
} ;
2013-10-18 04:24:14 +04:00
static int blacklist_show ( struct seq_file * f , void * p )
{
struct hci_dev * hdev = f - > private ;
struct bdaddr_list * b ;
hci_dev_lock ( hdev ) ;
list_for_each_entry ( b , & hdev - > blacklist , list )
2013-10-18 04:24:20 +04:00
seq_printf ( f , " %pMR (type %u) \n " , & b - > bdaddr , b - > bdaddr_type ) ;
2013-10-18 04:24:14 +04:00
hci_dev_unlock ( hdev ) ;
return 0 ;
}
static int blacklist_open ( struct inode * inode , struct file * file )
{
return single_open ( file , blacklist_show , inode - > i_private ) ;
}
static const struct file_operations blacklist_fops = {
. open = blacklist_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = single_release ,
} ;
2013-10-18 04:24:15 +04:00
static int uuids_show ( struct seq_file * f , void * p )
{
struct hci_dev * hdev = f - > private ;
struct bt_uuid * uuid ;
hci_dev_lock ( hdev ) ;
list_for_each_entry ( uuid , & hdev - > uuids , list ) {
2013-10-19 20:31:59 +04:00
u8 i , val [ 16 ] ;
/* The Bluetooth UUID values are stored in big endian,
* but with reversed byte order . So convert them into
* the right order for the % pUb modifier .
*/
for ( i = 0 ; i < 16 ; i + + )
val [ i ] = uuid - > uuid [ 15 - i ] ;
seq_printf ( f , " %pUb \n " , val ) ;
2013-10-18 04:24:15 +04:00
}
hci_dev_unlock ( hdev ) ;
return 0 ;
}
static int uuids_open ( struct inode * inode , struct file * file )
{
return single_open ( file , uuids_show , inode - > i_private ) ;
}
static const struct file_operations uuids_fops = {
. open = uuids_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = single_release ,
} ;
2013-10-16 14:28:55 +04:00
static int inquiry_cache_show ( struct seq_file * f , void * p )
{
struct hci_dev * hdev = f - > private ;
struct discovery_state * cache = & hdev - > discovery ;
struct inquiry_entry * e ;
hci_dev_lock ( hdev ) ;
list_for_each_entry ( e , & cache - > all , all ) {
struct inquiry_data * data = & e - > data ;
seq_printf ( f , " %pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u \n " ,
& data - > bdaddr ,
data - > pscan_rep_mode , data - > pscan_period_mode ,
data - > pscan_mode , data - > dev_class [ 2 ] ,
data - > dev_class [ 1 ] , data - > dev_class [ 0 ] ,
__le16_to_cpu ( data - > clock_offset ) ,
data - > rssi , data - > ssp_mode , e - > timestamp ) ;
}
hci_dev_unlock ( hdev ) ;
return 0 ;
}
static int inquiry_cache_open ( struct inode * inode , struct file * file )
{
return single_open ( file , inquiry_cache_show , inode - > i_private ) ;
}
static const struct file_operations inquiry_cache_fops = {
. open = inquiry_cache_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = single_release ,
} ;
2013-10-18 23:04:52 +04:00
static int link_keys_show ( struct seq_file * f , void * ptr )
{
struct hci_dev * hdev = f - > private ;
struct list_head * p , * n ;
hci_dev_lock ( hdev ) ;
list_for_each_safe ( p , n , & hdev - > link_keys ) {
struct link_key * key = list_entry ( p , struct link_key , list ) ;
seq_printf ( f , " %pMR %u %*phN %u \n " , & key - > bdaddr , key - > type ,
HCI_LINK_KEY_SIZE , key - > val , key - > pin_len ) ;
}
hci_dev_unlock ( hdev ) ;
return 0 ;
}
static int link_keys_open ( struct inode * inode , struct file * file )
{
return single_open ( file , link_keys_show , inode - > i_private ) ;
}
static const struct file_operations link_keys_fops = {
. open = link_keys_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = single_release ,
} ;
2013-10-19 04:14:22 +04:00
static ssize_t use_debug_keys_read ( struct file * file , char __user * user_buf ,
size_t count , loff_t * ppos )
{
struct hci_dev * hdev = file - > private_data ;
char buf [ 3 ] ;
buf [ 0 ] = test_bit ( HCI_DEBUG_KEYS , & hdev - > dev_flags ) ? ' Y ' : ' N ' ;
buf [ 1 ] = ' \n ' ;
buf [ 2 ] = ' \0 ' ;
return simple_read_from_buffer ( user_buf , count , ppos , buf , 2 ) ;
}
static const struct file_operations use_debug_keys_fops = {
. open = simple_open ,
. read = use_debug_keys_read ,
. llseek = default_llseek ,
} ;
2013-10-18 23:04:51 +04:00
static int dev_class_show ( struct seq_file * f , void * ptr )
{
struct hci_dev * hdev = f - > private ;
hci_dev_lock ( hdev ) ;
seq_printf ( f , " 0x%.2x%.2x%.2x \n " , hdev - > dev_class [ 2 ] ,
hdev - > dev_class [ 1 ] , hdev - > dev_class [ 0 ] ) ;
hci_dev_unlock ( hdev ) ;
return 0 ;
}
static int dev_class_open ( struct inode * inode , struct file * file )
{
return single_open ( file , dev_class_show , inode - > i_private ) ;
}
static const struct file_operations dev_class_fops = {
. open = dev_class_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = single_release ,
} ;
2013-10-17 23:02:31 +04:00
static int voice_setting_get ( void * data , u64 * val )
{
struct hci_dev * hdev = data ;
hci_dev_lock ( hdev ) ;
* val = hdev - > voice_setting ;
hci_dev_unlock ( hdev ) ;
return 0 ;
}
DEFINE_SIMPLE_ATTRIBUTE ( voice_setting_fops , voice_setting_get ,
NULL , " 0x%4.4llx \n " ) ;
2013-10-17 21:54:46 +04:00
static int auto_accept_delay_set ( void * data , u64 val )
{
struct hci_dev * hdev = data ;
hci_dev_lock ( hdev ) ;
hdev - > auto_accept_delay = val ;
hci_dev_unlock ( hdev ) ;
return 0 ;
}
static int auto_accept_delay_get ( void * data , u64 * val )
{
struct hci_dev * hdev = data ;
hci_dev_lock ( hdev ) ;
* val = hdev - > auto_accept_delay ;
hci_dev_unlock ( hdev ) ;
return 0 ;
}
DEFINE_SIMPLE_ATTRIBUTE ( auto_accept_delay_fops , auto_accept_delay_get ,
auto_accept_delay_set , " %llu \n " ) ;
2013-10-19 18:09:11 +04:00
static int ssp_debug_mode_set ( void * data , u64 val )
{
struct hci_dev * hdev = data ;
struct sk_buff * skb ;
__u8 mode ;
int err ;
if ( val ! = 0 & & val ! = 1 )
return - EINVAL ;
if ( ! test_bit ( HCI_UP , & hdev - > flags ) )
return - ENETDOWN ;
hci_req_lock ( hdev ) ;
mode = val ;
skb = __hci_cmd_sync ( hdev , HCI_OP_WRITE_SSP_DEBUG_MODE , sizeof ( mode ) ,
& mode , HCI_CMD_TIMEOUT ) ;
hci_req_unlock ( hdev ) ;
if ( IS_ERR ( skb ) )
return PTR_ERR ( skb ) ;
err = - bt_to_errno ( skb - > data [ 0 ] ) ;
kfree_skb ( skb ) ;
if ( err < 0 )
return err ;
hci_dev_lock ( hdev ) ;
hdev - > ssp_debug_mode = val ;
hci_dev_unlock ( hdev ) ;
return 0 ;
}
static int ssp_debug_mode_get ( void * data , u64 * val )
{
struct hci_dev * hdev = data ;
hci_dev_lock ( hdev ) ;
* val = hdev - > ssp_debug_mode ;
hci_dev_unlock ( hdev ) ;
return 0 ;
}
DEFINE_SIMPLE_ATTRIBUTE ( ssp_debug_mode_fops , ssp_debug_mode_get ,
ssp_debug_mode_set , " %llu \n " ) ;
2013-10-18 06:16:02 +04:00
static int idle_timeout_set ( void * data , u64 val )
{
struct hci_dev * hdev = data ;
if ( val ! = 0 & & ( val < 500 | | val > 3600000 ) )
return - EINVAL ;
hci_dev_lock ( hdev ) ;
2013-10-19 21:19:15 +04:00
hdev - > idle_timeout = val ;
2013-10-18 06:16:02 +04:00
hci_dev_unlock ( hdev ) ;
return 0 ;
}
static int idle_timeout_get ( void * data , u64 * val )
{
struct hci_dev * hdev = data ;
hci_dev_lock ( hdev ) ;
* val = hdev - > idle_timeout ;
hci_dev_unlock ( hdev ) ;
return 0 ;
}
DEFINE_SIMPLE_ATTRIBUTE ( idle_timeout_fops , idle_timeout_get ,
idle_timeout_set , " %llu \n " ) ;
static int sniff_min_interval_set ( void * data , u64 val )
{
struct hci_dev * hdev = data ;
if ( val = = 0 | | val % 2 | | val > hdev - > sniff_max_interval )
return - EINVAL ;
hci_dev_lock ( hdev ) ;
2013-10-19 21:19:15 +04:00
hdev - > sniff_min_interval = val ;
2013-10-18 06:16:02 +04:00
hci_dev_unlock ( hdev ) ;
return 0 ;
}
static int sniff_min_interval_get ( void * data , u64 * val )
{
struct hci_dev * hdev = data ;
hci_dev_lock ( hdev ) ;
* val = hdev - > sniff_min_interval ;
hci_dev_unlock ( hdev ) ;
return 0 ;
}
DEFINE_SIMPLE_ATTRIBUTE ( sniff_min_interval_fops , sniff_min_interval_get ,
sniff_min_interval_set , " %llu \n " ) ;
static int sniff_max_interval_set ( void * data , u64 val )
{
struct hci_dev * hdev = data ;
if ( val = = 0 | | val % 2 | | val < hdev - > sniff_min_interval )
return - EINVAL ;
hci_dev_lock ( hdev ) ;
2013-10-19 21:19:15 +04:00
hdev - > sniff_max_interval = val ;
2013-10-18 06:16:02 +04:00
hci_dev_unlock ( hdev ) ;
return 0 ;
}
static int sniff_max_interval_get ( void * data , u64 * val )
{
struct hci_dev * hdev = data ;
hci_dev_lock ( hdev ) ;
* val = hdev - > sniff_max_interval ;
hci_dev_unlock ( hdev ) ;
return 0 ;
}
DEFINE_SIMPLE_ATTRIBUTE ( sniff_max_interval_fops , sniff_max_interval_get ,
sniff_max_interval_set , " %llu \n " ) ;
2013-10-17 22:45:09 +04:00
static int static_address_show ( struct seq_file * f , void * p )
{
struct hci_dev * hdev = f - > private ;
hci_dev_lock ( hdev ) ;
seq_printf ( f , " %pMR \n " , & hdev - > static_addr ) ;
hci_dev_unlock ( hdev ) ;
return 0 ;
}
static int static_address_open ( struct inode * inode , struct file * file )
{
return single_open ( file , static_address_show , inode - > i_private ) ;
}
static const struct file_operations static_address_fops = {
. open = static_address_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = single_release ,
} ;
2013-10-19 03:38:10 +04:00
static int own_address_type_set ( void * data , u64 val )
{
struct hci_dev * hdev = data ;
if ( val ! = 0 & & val ! = 1 )
return - EINVAL ;
hci_dev_lock ( hdev ) ;
hdev - > own_addr_type = val ;
hci_dev_unlock ( hdev ) ;
return 0 ;
}
static int own_address_type_get ( void * data , u64 * val )
{
struct hci_dev * hdev = data ;
hci_dev_lock ( hdev ) ;
* val = hdev - > own_addr_type ;
hci_dev_unlock ( hdev ) ;
return 0 ;
}
DEFINE_SIMPLE_ATTRIBUTE ( own_address_type_fops , own_address_type_get ,
own_address_type_set , " %llu \n " ) ;
2013-10-19 02:56:57 +04:00
static int long_term_keys_show ( struct seq_file * f , void * ptr )
{
struct hci_dev * hdev = f - > private ;
struct list_head * p , * n ;
hci_dev_lock ( hdev ) ;
list_for_each_safe ( p , n , & hdev - > link_keys ) {
struct smp_ltk * ltk = list_entry ( p , struct smp_ltk , list ) ;
seq_printf ( f , " %pMR (type %u) %u %u %u %.4x %*phN %*phN \\ n " ,
& ltk - > bdaddr , ltk - > bdaddr_type , ltk - > authenticated ,
ltk - > type , ltk - > enc_size , __le16_to_cpu ( ltk - > ediv ) ,
8 , ltk - > rand , 16 , ltk - > val ) ;
}
hci_dev_unlock ( hdev ) ;
return 0 ;
}
static int long_term_keys_open ( struct inode * inode , struct file * file )
{
return single_open ( file , long_term_keys_show , inode - > i_private ) ;
}
static const struct file_operations long_term_keys_fops = {
. open = long_term_keys_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = single_release ,
} ;
2013-10-19 18:09:13 +04:00
static int conn_min_interval_set ( void * data , u64 val )
{
struct hci_dev * hdev = data ;
if ( val < 0x0006 | | val > 0x0c80 | | val > hdev - > le_conn_max_interval )
return - EINVAL ;
hci_dev_lock ( hdev ) ;
2013-10-19 21:19:15 +04:00
hdev - > le_conn_min_interval = val ;
2013-10-19 18:09:13 +04:00
hci_dev_unlock ( hdev ) ;
return 0 ;
}
static int conn_min_interval_get ( void * data , u64 * val )
{
struct hci_dev * hdev = data ;
hci_dev_lock ( hdev ) ;
* val = hdev - > le_conn_min_interval ;
hci_dev_unlock ( hdev ) ;
return 0 ;
}
DEFINE_SIMPLE_ATTRIBUTE ( conn_min_interval_fops , conn_min_interval_get ,
conn_min_interval_set , " %llu \n " ) ;
static int conn_max_interval_set ( void * data , u64 val )
{
struct hci_dev * hdev = data ;
if ( val < 0x0006 | | val > 0x0c80 | | val < hdev - > le_conn_min_interval )
return - EINVAL ;
hci_dev_lock ( hdev ) ;
2013-10-19 21:19:15 +04:00
hdev - > le_conn_max_interval = val ;
2013-10-19 18:09:13 +04:00
hci_dev_unlock ( hdev ) ;
return 0 ;
}
static int conn_max_interval_get ( void * data , u64 * val )
{
struct hci_dev * hdev = data ;
hci_dev_lock ( hdev ) ;
* val = hdev - > le_conn_max_interval ;
hci_dev_unlock ( hdev ) ;
return 0 ;
}
DEFINE_SIMPLE_ATTRIBUTE ( conn_max_interval_fops , conn_max_interval_get ,
conn_max_interval_set , " %llu \n " ) ;
2005-04-17 02:20:36 +04:00
/* ---- HCI requests ---- */
2013-03-05 22:37:49 +04:00
static void hci_req_sync_complete ( struct hci_dev * hdev , u8 result )
2005-04-17 02:20:36 +04:00
{
2013-03-05 22:37:49 +04:00
BT_DBG ( " %s result 0x%2.2x " , hdev - > name , result ) ;
2005-04-17 02:20:36 +04:00
if ( hdev - > req_status = = HCI_REQ_PEND ) {
hdev - > req_result = result ;
hdev - > req_status = HCI_REQ_DONE ;
wake_up_interruptible ( & hdev - > req_wait_q ) ;
}
}
static void hci_req_cancel ( struct hci_dev * hdev , int err )
{
BT_DBG ( " %s err 0x%2.2x " , hdev - > name , err ) ;
if ( hdev - > req_status = = HCI_REQ_PEND ) {
hdev - > req_result = err ;
hdev - > req_status = HCI_REQ_CANCELED ;
wake_up_interruptible ( & hdev - > req_wait_q ) ;
}
}
2013-04-20 17:24:31 +04:00
static struct sk_buff * hci_get_cmd_complete ( struct hci_dev * hdev , u16 opcode ,
u8 event )
2013-04-02 14:35:04 +04:00
{
struct hci_ev_cmd_complete * ev ;
struct hci_event_hdr * hdr ;
struct sk_buff * skb ;
hci_dev_lock ( hdev ) ;
skb = hdev - > recv_evt ;
hdev - > recv_evt = NULL ;
hci_dev_unlock ( hdev ) ;
if ( ! skb )
return ERR_PTR ( - ENODATA ) ;
if ( skb - > len < sizeof ( * hdr ) ) {
BT_ERR ( " Too short HCI event " ) ;
goto failed ;
}
hdr = ( void * ) skb - > data ;
skb_pull ( skb , HCI_EVENT_HDR_SIZE ) ;
2013-04-03 22:54:47 +04:00
if ( event ) {
if ( hdr - > evt ! = event )
goto failed ;
return skb ;
}
2013-04-02 14:35:04 +04:00
if ( hdr - > evt ! = HCI_EV_CMD_COMPLETE ) {
BT_DBG ( " Last event is not cmd complete (0x%2.2x) " , hdr - > evt ) ;
goto failed ;
}
if ( skb - > len < sizeof ( * ev ) ) {
BT_ERR ( " Too short cmd_complete event " ) ;
goto failed ;
}
ev = ( void * ) skb - > data ;
skb_pull ( skb , sizeof ( * ev ) ) ;
if ( opcode = = __le16_to_cpu ( ev - > opcode ) )
return skb ;
BT_DBG ( " opcode doesn't match (0x%2.2x != 0x%2.2x) " , opcode ,
__le16_to_cpu ( ev - > opcode ) ) ;
failed :
kfree_skb ( skb ) ;
return ERR_PTR ( - ENODATA ) ;
}
2013-04-03 22:54:47 +04:00
struct sk_buff * __hci_cmd_sync_ev ( struct hci_dev * hdev , u16 opcode , u32 plen ,
2013-04-19 11:14:51 +04:00
const void * param , u8 event , u32 timeout )
2013-04-02 14:35:04 +04:00
{
DECLARE_WAITQUEUE ( wait , current ) ;
struct hci_request req ;
int err = 0 ;
BT_DBG ( " %s " , hdev - > name ) ;
hci_req_init ( & req , hdev ) ;
2013-04-03 22:54:47 +04:00
hci_req_add_ev ( & req , opcode , plen , param , event ) ;
2013-04-02 14:35:04 +04:00
hdev - > req_status = HCI_REQ_PEND ;
err = hci_req_run ( & req , hci_req_sync_complete ) ;
if ( err < 0 )
return ERR_PTR ( err ) ;
add_wait_queue ( & hdev - > req_wait_q , & wait ) ;
set_current_state ( TASK_INTERRUPTIBLE ) ;
schedule_timeout ( timeout ) ;
remove_wait_queue ( & hdev - > req_wait_q , & wait ) ;
if ( signal_pending ( current ) )
return ERR_PTR ( - EINTR ) ;
switch ( hdev - > req_status ) {
case HCI_REQ_DONE :
err = - bt_to_errno ( hdev - > req_result ) ;
break ;
case HCI_REQ_CANCELED :
err = - hdev - > req_result ;
break ;
default :
err = - ETIMEDOUT ;
break ;
}
hdev - > req_status = hdev - > req_result = 0 ;
BT_DBG ( " %s end: err %d " , hdev - > name , err ) ;
if ( err < 0 )
return ERR_PTR ( err ) ;
2013-04-03 22:54:47 +04:00
return hci_get_cmd_complete ( hdev , opcode , event ) ;
}
EXPORT_SYMBOL ( __hci_cmd_sync_ev ) ;
struct sk_buff * __hci_cmd_sync ( struct hci_dev * hdev , u16 opcode , u32 plen ,
2013-04-19 11:14:51 +04:00
const void * param , u32 timeout )
2013-04-03 22:54:47 +04:00
{
return __hci_cmd_sync_ev ( hdev , opcode , plen , param , 0 , timeout ) ;
2013-04-02 14:35:04 +04:00
}
EXPORT_SYMBOL ( __hci_cmd_sync ) ;
2005-04-17 02:20:36 +04:00
/* Execute request and wait for completion. */
2013-03-05 22:37:41 +04:00
static int __hci_req_sync ( struct hci_dev * hdev ,
2013-03-05 22:37:49 +04:00
void ( * func ) ( struct hci_request * req ,
unsigned long opt ) ,
2013-03-05 22:37:41 +04:00
unsigned long opt , __u32 timeout )
2005-04-17 02:20:36 +04:00
{
2013-03-05 22:37:49 +04:00
struct hci_request req ;
2005-04-17 02:20:36 +04:00
DECLARE_WAITQUEUE ( wait , current ) ;
int err = 0 ;
BT_DBG ( " %s start " , hdev - > name ) ;
2013-03-05 22:37:49 +04:00
hci_req_init ( & req , hdev ) ;
2005-04-17 02:20:36 +04:00
hdev - > req_status = HCI_REQ_PEND ;
2013-03-05 22:37:49 +04:00
func ( & req , opt ) ;
2013-03-05 22:37:42 +04:00
2013-03-05 22:37:49 +04:00
err = hci_req_run ( & req , hci_req_sync_complete ) ;
if ( err < 0 ) {
2013-03-05 22:37:42 +04:00
hdev - > req_status = 0 ;
2013-03-08 18:20:15 +04:00
/* ENODATA means the HCI request command queue is empty.
* This can happen when a request with conditionals doesn ' t
* trigger any commands to be sent . This is normal behavior
* and should not trigger an error return .
2013-03-05 22:37:49 +04:00
*/
2013-03-08 18:20:15 +04:00
if ( err = = - ENODATA )
return 0 ;
return err ;
2013-03-05 22:37:42 +04:00
}
2013-03-08 18:20:13 +04:00
add_wait_queue ( & hdev - > req_wait_q , & wait ) ;
set_current_state ( TASK_INTERRUPTIBLE ) ;
2005-04-17 02:20:36 +04:00
schedule_timeout ( timeout ) ;
remove_wait_queue ( & hdev - > req_wait_q , & wait ) ;
if ( signal_pending ( current ) )
return - EINTR ;
switch ( hdev - > req_status ) {
case HCI_REQ_DONE :
2011-06-30 05:18:29 +04:00
err = - bt_to_errno ( hdev - > req_result ) ;
2005-04-17 02:20:36 +04:00
break ;
case HCI_REQ_CANCELED :
err = - hdev - > req_result ;
break ;
default :
err = - ETIMEDOUT ;
break ;
2007-04-21 04:09:22 +04:00
}
2005-04-17 02:20:36 +04:00
2011-01-10 14:28:59 +03:00
hdev - > req_status = hdev - > req_result = 0 ;
2005-04-17 02:20:36 +04:00
BT_DBG ( " %s end: err %d " , hdev - > name , err ) ;
return err ;
}
2013-03-05 22:37:41 +04:00
static int hci_req_sync ( struct hci_dev * hdev ,
2013-03-05 22:37:49 +04:00
void ( * req ) ( struct hci_request * req ,
unsigned long opt ) ,
2013-03-05 22:37:41 +04:00
unsigned long opt , __u32 timeout )
2005-04-17 02:20:36 +04:00
{
int ret ;
2008-09-12 05:11:54 +04:00
if ( ! test_bit ( HCI_UP , & hdev - > flags ) )
return - ENETDOWN ;
2005-04-17 02:20:36 +04:00
/* Serialize all requests */
hci_req_lock ( hdev ) ;
2013-03-05 22:37:41 +04:00
ret = __hci_req_sync ( hdev , req , opt , timeout ) ;
2005-04-17 02:20:36 +04:00
hci_req_unlock ( hdev ) ;
return ret ;
}
2013-03-05 22:37:49 +04:00
static void hci_reset_req ( struct hci_request * req , unsigned long opt )
2005-04-17 02:20:36 +04:00
{
2013-03-05 22:37:49 +04:00
BT_DBG ( " %s %ld " , req - > hdev - > name , opt ) ;
2005-04-17 02:20:36 +04:00
/* Reset device */
2013-03-05 22:37:49 +04:00
set_bit ( HCI_RESET , & req - > hdev - > flags ) ;
hci_req_add ( req , HCI_OP_RESET , 0 , NULL ) ;
2005-04-17 02:20:36 +04:00
}
2013-03-05 22:37:49 +04:00
static void bredr_init ( struct hci_request * req )
2005-04-17 02:20:36 +04:00
{
2013-03-05 22:37:49 +04:00
req - > hdev - > flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED ;
2011-12-19 18:31:28 +04:00
2005-04-17 02:20:36 +04:00
/* Read Local Supported Features */
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_READ_LOCAL_FEATURES , 0 , NULL ) ;
2005-04-17 02:20:36 +04:00
2006-09-23 11:57:20 +04:00
/* Read Local Version */
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_READ_LOCAL_VERSION , 0 , NULL ) ;
2013-03-05 22:37:43 +04:00
/* Read BD Address */
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_READ_BD_ADDR , 0 , NULL ) ;
2005-04-17 02:20:36 +04:00
}
2013-03-05 22:37:49 +04:00
static void amp_init ( struct hci_request * req )
2011-12-19 18:31:27 +04:00
{
2013-03-05 22:37:49 +04:00
req - > hdev - > flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED ;
2011-12-19 18:31:28 +04:00
2011-12-19 18:31:27 +04:00
/* Read Local Version */
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_READ_LOCAL_VERSION , 0 , NULL ) ;
2012-03-28 17:31:24 +04:00
2013-10-07 13:31:39 +04:00
/* Read Local Supported Commands */
hci_req_add ( req , HCI_OP_READ_LOCAL_COMMANDS , 0 , NULL ) ;
/* Read Local Supported Features */
hci_req_add ( req , HCI_OP_READ_LOCAL_FEATURES , 0 , NULL ) ;
2012-03-28 17:31:24 +04:00
/* Read Local AMP Info */
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_READ_LOCAL_AMP_INFO , 0 , NULL ) ;
2012-09-06 16:05:46 +04:00
/* Read Data Blk size */
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_READ_DATA_BLOCK_SIZE , 0 , NULL ) ;
2013-10-07 14:55:52 +04:00
2013-10-07 14:55:53 +04:00
/* Read Flow Control Mode */
hci_req_add ( req , HCI_OP_READ_FLOW_CONTROL_MODE , 0 , NULL ) ;
2013-10-07 14:55:52 +04:00
/* Read Location Data */
hci_req_add ( req , HCI_OP_READ_LOCATION_DATA , 0 , NULL ) ;
2011-12-19 18:31:27 +04:00
}
2013-03-05 22:37:49 +04:00
static void hci_init1_req ( struct hci_request * req , unsigned long opt )
2011-12-19 18:31:27 +04:00
{
2013-03-05 22:37:49 +04:00
struct hci_dev * hdev = req - > hdev ;
2011-12-19 18:31:27 +04:00
BT_DBG ( " %s %ld " , hdev - > name , opt ) ;
2012-06-11 12:13:10 +04:00
/* Reset */
if ( ! test_bit ( HCI_QUIRK_RESET_ON_CLOSE , & hdev - > quirks ) )
2013-03-05 22:37:49 +04:00
hci_reset_req ( req , 0 ) ;
2012-06-11 12:13:10 +04:00
2011-12-19 18:31:27 +04:00
switch ( hdev - > dev_type ) {
case HCI_BREDR :
2013-03-05 22:37:49 +04:00
bredr_init ( req ) ;
2011-12-19 18:31:27 +04:00
break ;
case HCI_AMP :
2013-03-05 22:37:49 +04:00
amp_init ( req ) ;
2011-12-19 18:31:27 +04:00
break ;
default :
BT_ERR ( " Unknown device type %d " , hdev - > dev_type ) ;
break ;
}
}
2013-03-05 22:37:49 +04:00
static void bredr_setup ( struct hci_request * req )
2013-03-05 22:37:43 +04:00
{
2013-10-12 03:42:07 +04:00
struct hci_dev * hdev = req - > hdev ;
2013-03-05 22:37:43 +04:00
__le16 param ;
__u8 flt_type ;
/* Read Buffer Size (ACL mtu, max pkt, etc.) */
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_READ_BUFFER_SIZE , 0 , NULL ) ;
2013-03-05 22:37:43 +04:00
/* Read Class of Device */
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_READ_CLASS_OF_DEV , 0 , NULL ) ;
2013-03-05 22:37:43 +04:00
/* Read Local Name */
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_READ_LOCAL_NAME , 0 , NULL ) ;
2013-03-05 22:37:43 +04:00
/* Read Voice Setting */
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_READ_VOICE_SETTING , 0 , NULL ) ;
2013-03-05 22:37:43 +04:00
2013-10-15 00:56:16 +04:00
/* Read Number of Supported IAC */
hci_req_add ( req , HCI_OP_READ_NUM_SUPPORTED_IAC , 0 , NULL ) ;
2013-10-15 01:06:36 +04:00
/* Read Current IAC LAP */
hci_req_add ( req , HCI_OP_READ_CURRENT_IAC_LAP , 0 , NULL ) ;
2013-03-05 22:37:43 +04:00
/* Clear Event Filters */
flt_type = HCI_FLT_CLEAR_ALL ;
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_SET_EVENT_FLT , 1 , & flt_type ) ;
2013-03-05 22:37:43 +04:00
/* Connection accept timeout ~20 secs */
param = __constant_cpu_to_le16 ( 0x7d00 ) ;
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_WRITE_CA_TIMEOUT , 2 , & param ) ;
2013-03-05 22:37:43 +04:00
2013-10-12 03:42:07 +04:00
/* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
* but it does not support page scan related HCI commands .
*/
if ( hdev - > manufacturer ! = 31 & & hdev - > hci_ver > BLUETOOTH_VER_1_1 ) {
2013-03-16 02:07:11 +04:00
hci_req_add ( req , HCI_OP_READ_PAGE_SCAN_ACTIVITY , 0 , NULL ) ;
hci_req_add ( req , HCI_OP_READ_PAGE_SCAN_TYPE , 0 , NULL ) ;
}
2013-03-05 22:37:43 +04:00
}
2013-03-05 22:37:49 +04:00
static void le_setup ( struct hci_request * req )
2013-03-05 22:37:43 +04:00
{
2013-04-19 19:35:21 +04:00
struct hci_dev * hdev = req - > hdev ;
2013-03-05 22:37:43 +04:00
/* Read LE Buffer Size */
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_LE_READ_BUFFER_SIZE , 0 , NULL ) ;
2013-03-05 22:37:43 +04:00
/* Read LE Local Supported Features */
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_LE_READ_LOCAL_FEATURES , 0 , NULL ) ;
2013-03-05 22:37:43 +04:00
/* Read LE Advertising Channel TX Power */
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_LE_READ_ADV_TX_POWER , 0 , NULL ) ;
2013-03-05 22:37:43 +04:00
/* Read LE White List Size */
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_LE_READ_WHITE_LIST_SIZE , 0 , NULL ) ;
2013-03-05 22:37:43 +04:00
/* Read LE Supported States */
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_LE_READ_SUPPORTED_STATES , 0 , NULL ) ;
2013-04-19 19:35:21 +04:00
/* LE-only controllers have LE implicitly enabled */
if ( ! lmp_bredr_capable ( hdev ) )
set_bit ( HCI_LE_ENABLED , & hdev - > dev_flags ) ;
2013-03-05 22:37:43 +04:00
}
static u8 hci_get_inquiry_mode ( struct hci_dev * hdev )
{
if ( lmp_ext_inq_capable ( hdev ) )
return 0x02 ;
if ( lmp_inq_rssi_capable ( hdev ) )
return 0x01 ;
if ( hdev - > manufacturer = = 11 & & hdev - > hci_rev = = 0x00 & &
hdev - > lmp_subver = = 0x0757 )
return 0x01 ;
if ( hdev - > manufacturer = = 15 ) {
if ( hdev - > hci_rev = = 0x03 & & hdev - > lmp_subver = = 0x6963 )
return 0x01 ;
if ( hdev - > hci_rev = = 0x09 & & hdev - > lmp_subver = = 0x6963 )
return 0x01 ;
if ( hdev - > hci_rev = = 0x00 & & hdev - > lmp_subver = = 0x6965 )
return 0x01 ;
}
if ( hdev - > manufacturer = = 31 & & hdev - > hci_rev = = 0x2005 & &
hdev - > lmp_subver = = 0x1805 )
return 0x01 ;
return 0x00 ;
}
2013-03-05 22:37:49 +04:00
static void hci_setup_inquiry_mode ( struct hci_request * req )
2013-03-05 22:37:43 +04:00
{
u8 mode ;
2013-03-05 22:37:49 +04:00
mode = hci_get_inquiry_mode ( req - > hdev ) ;
2013-03-05 22:37:43 +04:00
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_WRITE_INQUIRY_MODE , 1 , & mode ) ;
2013-03-05 22:37:43 +04:00
}
2013-03-05 22:37:49 +04:00
static void hci_setup_event_mask ( struct hci_request * req )
2013-03-05 22:37:43 +04:00
{
2013-03-05 22:37:49 +04:00
struct hci_dev * hdev = req - > hdev ;
2013-03-05 22:37:43 +04:00
/* The second byte is 0xff instead of 0x9f (two reserved bits
* disabled ) since a Broadcom 1.2 dongle doesn ' t respond to the
* command otherwise .
*/
u8 events [ 8 ] = { 0xff , 0xff , 0xfb , 0xff , 0x00 , 0x00 , 0x00 , 0x00 } ;
/* CSR 1.1 dongles does not accept any bitfield so don't try to set
* any event mask for pre 1.2 devices .
*/
if ( hdev - > hci_ver < BLUETOOTH_VER_1_2 )
return ;
if ( lmp_bredr_capable ( hdev ) ) {
events [ 4 ] | = 0x01 ; /* Flow Specification Complete */
events [ 4 ] | = 0x02 ; /* Inquiry Result with RSSI */
events [ 4 ] | = 0x04 ; /* Read Remote Extended Features Complete */
events [ 5 ] | = 0x08 ; /* Synchronous Connection Complete */
events [ 5 ] | = 0x10 ; /* Synchronous Connection Changed */
2013-08-13 21:00:54 +04:00
} else {
/* Use a different default for LE-only devices */
memset ( events , 0 , sizeof ( events ) ) ;
events [ 0 ] | = 0x10 ; /* Disconnection Complete */
events [ 0 ] | = 0x80 ; /* Encryption Change */
events [ 1 ] | = 0x08 ; /* Read Remote Version Information Complete */
events [ 1 ] | = 0x20 ; /* Command Complete */
events [ 1 ] | = 0x40 ; /* Command Status */
events [ 1 ] | = 0x80 ; /* Hardware Error */
events [ 2 ] | = 0x04 ; /* Number of Completed Packets */
events [ 3 ] | = 0x02 ; /* Data Buffer Overflow */
events [ 5 ] | = 0x80 ; /* Encryption Key Refresh Complete */
2013-03-05 22:37:43 +04:00
}
if ( lmp_inq_rssi_capable ( hdev ) )
events [ 4 ] | = 0x02 ; /* Inquiry Result with RSSI */
if ( lmp_sniffsubr_capable ( hdev ) )
events [ 5 ] | = 0x20 ; /* Sniff Subrating */
if ( lmp_pause_enc_capable ( hdev ) )
events [ 5 ] | = 0x80 ; /* Encryption Key Refresh Complete */
if ( lmp_ext_inq_capable ( hdev ) )
events [ 5 ] | = 0x40 ; /* Extended Inquiry Result */
if ( lmp_no_flush_capable ( hdev ) )
events [ 7 ] | = 0x01 ; /* Enhanced Flush Complete */
if ( lmp_lsto_capable ( hdev ) )
events [ 6 ] | = 0x80 ; /* Link Supervision Timeout Changed */
if ( lmp_ssp_capable ( hdev ) ) {
events [ 6 ] | = 0x01 ; /* IO Capability Request */
events [ 6 ] | = 0x02 ; /* IO Capability Response */
events [ 6 ] | = 0x04 ; /* User Confirmation Request */
events [ 6 ] | = 0x08 ; /* User Passkey Request */
events [ 6 ] | = 0x10 ; /* Remote OOB Data Request */
events [ 6 ] | = 0x20 ; /* Simple Pairing Complete */
events [ 7 ] | = 0x04 ; /* User Passkey Notification */
events [ 7 ] | = 0x08 ; /* Keypress Notification */
events [ 7 ] | = 0x10 ; /* Remote Host Supported
* Features Notification
*/
}
if ( lmp_le_capable ( hdev ) )
events [ 7 ] | = 0x20 ; /* LE Meta-Event */
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_SET_EVENT_MASK , sizeof ( events ) , events ) ;
2013-03-05 22:37:43 +04:00
if ( lmp_le_capable ( hdev ) ) {
memset ( events , 0 , sizeof ( events ) ) ;
events [ 0 ] = 0x1f ;
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_LE_SET_EVENT_MASK ,
sizeof ( events ) , events ) ;
2013-03-05 22:37:43 +04:00
}
}
2013-03-05 22:37:49 +04:00
static void hci_init2_req ( struct hci_request * req , unsigned long opt )
2013-03-05 22:37:43 +04:00
{
2013-03-05 22:37:49 +04:00
struct hci_dev * hdev = req - > hdev ;
2013-03-05 22:37:43 +04:00
if ( lmp_bredr_capable ( hdev ) )
2013-03-05 22:37:49 +04:00
bredr_setup ( req ) ;
2013-10-02 14:43:13 +04:00
else
clear_bit ( HCI_BREDR_ENABLED , & hdev - > dev_flags ) ;
2013-03-05 22:37:43 +04:00
if ( lmp_le_capable ( hdev ) )
2013-03-05 22:37:49 +04:00
le_setup ( req ) ;
2013-03-05 22:37:43 +04:00
2013-03-05 22:37:49 +04:00
hci_setup_event_mask ( req ) ;
2013-03-05 22:37:43 +04:00
2013-07-24 03:32:46 +04:00
/* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
* local supported commands HCI command .
*/
if ( hdev - > manufacturer ! = 31 & & hdev - > hci_ver > BLUETOOTH_VER_1_1 )
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_READ_LOCAL_COMMANDS , 0 , NULL ) ;
2013-03-05 22:37:43 +04:00
if ( lmp_ssp_capable ( hdev ) ) {
2013-10-18 23:04:47 +04:00
/* When SSP is available, then the host features page
* should also be available as well . However some
* controllers list the max_page as 0 as long as SSP
* has not been enabled . To achieve proper debugging
* output , force the minimum max_page to 1 at least .
*/
hdev - > max_page = 0x01 ;
2013-03-05 22:37:43 +04:00
if ( test_bit ( HCI_SSP_ENABLED , & hdev - > dev_flags ) ) {
u8 mode = 0x01 ;
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_WRITE_SSP_MODE ,
sizeof ( mode ) , & mode ) ;
2013-03-05 22:37:43 +04:00
} else {
struct hci_cp_write_eir cp ;
memset ( hdev - > eir , 0 , sizeof ( hdev - > eir ) ) ;
memset ( & cp , 0 , sizeof ( cp ) ) ;
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_WRITE_EIR , sizeof ( cp ) , & cp ) ;
2013-03-05 22:37:43 +04:00
}
}
if ( lmp_inq_rssi_capable ( hdev ) )
2013-03-05 22:37:49 +04:00
hci_setup_inquiry_mode ( req ) ;
2013-03-05 22:37:43 +04:00
if ( lmp_inq_tx_pwr_capable ( hdev ) )
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_READ_INQ_RSP_TX_POWER , 0 , NULL ) ;
2013-03-05 22:37:43 +04:00
if ( lmp_ext_feat_capable ( hdev ) ) {
struct hci_cp_read_local_ext_features cp ;
cp . page = 0x01 ;
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_READ_LOCAL_EXT_FEATURES ,
sizeof ( cp ) , & cp ) ;
2013-03-05 22:37:43 +04:00
}
if ( test_bit ( HCI_LINK_SECURITY , & hdev - > dev_flags ) ) {
u8 enable = 1 ;
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_WRITE_AUTH_ENABLE , sizeof ( enable ) ,
& enable ) ;
2013-03-05 22:37:43 +04:00
}
}
2013-03-05 22:37:49 +04:00
static void hci_setup_link_policy ( struct hci_request * req )
2013-03-05 22:37:43 +04:00
{
2013-03-05 22:37:49 +04:00
struct hci_dev * hdev = req - > hdev ;
2013-03-05 22:37:43 +04:00
struct hci_cp_write_def_link_policy cp ;
u16 link_policy = 0 ;
if ( lmp_rswitch_capable ( hdev ) )
link_policy | = HCI_LP_RSWITCH ;
if ( lmp_hold_capable ( hdev ) )
link_policy | = HCI_LP_HOLD ;
if ( lmp_sniff_capable ( hdev ) )
link_policy | = HCI_LP_SNIFF ;
if ( lmp_park_capable ( hdev ) )
link_policy | = HCI_LP_PARK ;
cp . policy = cpu_to_le16 ( link_policy ) ;
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_WRITE_DEF_LINK_POLICY , sizeof ( cp ) , & cp ) ;
2013-03-05 22:37:43 +04:00
}
2013-03-05 22:37:49 +04:00
static void hci_set_le_support ( struct hci_request * req )
2013-03-05 22:37:43 +04:00
{
2013-03-05 22:37:49 +04:00
struct hci_dev * hdev = req - > hdev ;
2013-03-05 22:37:43 +04:00
struct hci_cp_write_le_host_supported cp ;
2013-04-19 19:35:21 +04:00
/* LE-only devices do not support explicit enablement */
if ( ! lmp_bredr_capable ( hdev ) )
return ;
2013-03-05 22:37:43 +04:00
memset ( & cp , 0 , sizeof ( cp ) ) ;
if ( test_bit ( HCI_LE_ENABLED , & hdev - > dev_flags ) ) {
cp . le = 0x01 ;
cp . simul = lmp_le_br_capable ( hdev ) ;
}
if ( cp . le ! = lmp_host_le_capable ( hdev ) )
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_WRITE_LE_HOST_SUPPORTED , sizeof ( cp ) ,
& cp ) ;
2013-03-05 22:37:43 +04:00
}
2013-09-13 12:40:02 +04:00
static void hci_set_event_mask_page_2 ( struct hci_request * req )
{
struct hci_dev * hdev = req - > hdev ;
u8 events [ 8 ] = { 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 , 0x00 } ;
/* If Connectionless Slave Broadcast master role is supported
* enable all necessary events for it .
*/
if ( hdev - > features [ 2 ] [ 0 ] & 0x01 ) {
events [ 1 ] | = 0x40 ; /* Triggered Clock Capture */
events [ 1 ] | = 0x80 ; /* Synchronization Train Complete */
events [ 2 ] | = 0x10 ; /* Slave Page Response Timeout */
events [ 2 ] | = 0x20 ; /* CSB Channel Map Change */
}
/* If Connectionless Slave Broadcast slave role is supported
* enable all necessary events for it .
*/
if ( hdev - > features [ 2 ] [ 0 ] & 0x02 ) {
events [ 2 ] | = 0x01 ; /* Synchronization Train Received */
events [ 2 ] | = 0x02 ; /* CSB Receive */
events [ 2 ] | = 0x04 ; /* CSB Timeout */
events [ 2 ] | = 0x08 ; /* Truncated Page Complete */
}
hci_req_add ( req , HCI_OP_SET_EVENT_MASK_PAGE_2 , sizeof ( events ) , events ) ;
}
2013-03-05 22:37:49 +04:00
static void hci_init3_req ( struct hci_request * req , unsigned long opt )
2013-03-05 22:37:43 +04:00
{
2013-03-05 22:37:49 +04:00
struct hci_dev * hdev = req - > hdev ;
2013-04-17 16:00:52 +04:00
u8 p ;
2013-03-05 22:37:49 +04:00
2013-06-13 15:34:31 +04:00
/* Some Broadcom based Bluetooth controllers do not support the
* Delete Stored Link Key command . They are clearly indicating its
* absence in the bit mask of supported commands .
*
* Check the supported commands and only if the the command is marked
* as supported send it . If not supported assume that the controller
* does not have actual support for stored link keys which makes this
* command redundant anyway .
2013-07-02 01:14:46 +04:00
*/
2013-06-13 12:01:13 +04:00
if ( hdev - > commands [ 6 ] & 0x80 ) {
struct hci_cp_delete_stored_link_key cp ;
bacpy ( & cp . bdaddr , BDADDR_ANY ) ;
cp . delete_all = 0x01 ;
hci_req_add ( req , HCI_OP_DELETE_STORED_LINK_KEY ,
sizeof ( cp ) , & cp ) ;
}
2013-03-05 22:37:43 +04:00
if ( hdev - > commands [ 5 ] & 0x10 )
2013-03-05 22:37:49 +04:00
hci_setup_link_policy ( req ) ;
2013-03-05 22:37:43 +04:00
2013-10-19 03:38:09 +04:00
if ( lmp_le_capable ( hdev ) ) {
/* If the controller has a public BD_ADDR, then by
* default use that one . If this is a LE only
* controller without one , default to the random
* address .
*/
if ( bacmp ( & hdev - > bdaddr , BDADDR_ANY ) )
hdev - > own_addr_type = ADDR_LE_DEV_PUBLIC ;
else
hdev - > own_addr_type = ADDR_LE_DEV_RANDOM ;
2013-03-05 22:37:49 +04:00
hci_set_le_support ( req ) ;
2013-10-19 03:38:09 +04:00
}
2013-04-17 16:00:52 +04:00
/* Read features beyond page 1 if available */
for ( p = 2 ; p < HCI_MAX_PAGES & & p < = hdev - > max_page ; p + + ) {
struct hci_cp_read_local_ext_features cp ;
cp . page = p ;
hci_req_add ( req , HCI_OP_READ_LOCAL_EXT_FEATURES ,
sizeof ( cp ) , & cp ) ;
}
2013-03-05 22:37:43 +04:00
}
2013-09-13 12:40:01 +04:00
static void hci_init4_req ( struct hci_request * req , unsigned long opt )
{
struct hci_dev * hdev = req - > hdev ;
2013-09-13 12:40:02 +04:00
/* Set event mask page 2 if the HCI command for it is supported */
if ( hdev - > commands [ 22 ] & 0x04 )
hci_set_event_mask_page_2 ( req ) ;
2013-09-13 12:40:01 +04:00
/* Check for Synchronization Train support */
if ( hdev - > features [ 2 ] [ 0 ] & 0x04 )
hci_req_add ( req , HCI_OP_READ_SYNC_TRAIN_PARAMS , 0 , NULL ) ;
}
2013-03-05 22:37:43 +04:00
static int __hci_init ( struct hci_dev * hdev )
{
int err ;
err = __hci_req_sync ( hdev , hci_init1_req , 0 , HCI_INIT_TIMEOUT ) ;
if ( err < 0 )
return err ;
2013-10-19 18:09:12 +04:00
/* The Device Under Test (DUT) mode is special and available for
* all controller types . So just create it early on .
*/
if ( test_bit ( HCI_SETUP , & hdev - > dev_flags ) ) {
debugfs_create_file ( " dut_mode " , 0644 , hdev - > debugfs , hdev ,
& dut_mode_fops ) ;
}
2013-03-05 22:37:43 +04:00
/* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
* BR / EDR / LE type controllers . AMP controllers only need the
* first stage init .
*/
if ( hdev - > dev_type ! = HCI_BREDR )
return 0 ;
err = __hci_req_sync ( hdev , hci_init2_req , 0 , HCI_INIT_TIMEOUT ) ;
if ( err < 0 )
return err ;
2013-09-13 12:40:01 +04:00
err = __hci_req_sync ( hdev , hci_init3_req , 0 , HCI_INIT_TIMEOUT ) ;
if ( err < 0 )
return err ;
2013-10-16 14:28:55 +04:00
err = __hci_req_sync ( hdev , hci_init4_req , 0 , HCI_INIT_TIMEOUT ) ;
if ( err < 0 )
return err ;
/* Only create debugfs entries during the initial setup
* phase and not every time the controller gets powered on .
*/
if ( ! test_bit ( HCI_SETUP , & hdev - > dev_flags ) )
return 0 ;
2013-10-18 23:04:46 +04:00
debugfs_create_file ( " features " , 0444 , hdev - > debugfs , hdev ,
& features_fops ) ;
2013-10-18 23:04:49 +04:00
debugfs_create_u16 ( " manufacturer " , 0444 , hdev - > debugfs ,
& hdev - > manufacturer ) ;
debugfs_create_u8 ( " hci_version " , 0444 , hdev - > debugfs , & hdev - > hci_ver ) ;
debugfs_create_u16 ( " hci_revision " , 0444 , hdev - > debugfs , & hdev - > hci_rev ) ;
2013-10-18 04:24:14 +04:00
debugfs_create_file ( " blacklist " , 0444 , hdev - > debugfs , hdev ,
& blacklist_fops ) ;
2013-10-18 04:24:15 +04:00
debugfs_create_file ( " uuids " , 0444 , hdev - > debugfs , hdev , & uuids_fops ) ;
2013-10-16 14:28:55 +04:00
if ( lmp_bredr_capable ( hdev ) ) {
debugfs_create_file ( " inquiry_cache " , 0444 , hdev - > debugfs ,
hdev , & inquiry_cache_fops ) ;
2013-10-18 23:04:52 +04:00
debugfs_create_file ( " link_keys " , 0400 , hdev - > debugfs ,
hdev , & link_keys_fops ) ;
2013-10-19 04:14:22 +04:00
debugfs_create_file ( " use_debug_keys " , 0444 , hdev - > debugfs ,
hdev , & use_debug_keys_fops ) ;
2013-10-18 23:04:51 +04:00
debugfs_create_file ( " dev_class " , 0444 , hdev - > debugfs ,
hdev , & dev_class_fops ) ;
2013-10-17 23:02:31 +04:00
debugfs_create_file ( " voice_setting " , 0444 , hdev - > debugfs ,
hdev , & voice_setting_fops ) ;
2013-10-16 14:28:55 +04:00
}
2013-10-19 18:09:11 +04:00
if ( lmp_ssp_capable ( hdev ) ) {
2013-10-17 21:54:46 +04:00
debugfs_create_file ( " auto_accept_delay " , 0644 , hdev - > debugfs ,
hdev , & auto_accept_delay_fops ) ;
2013-10-19 18:09:11 +04:00
debugfs_create_file ( " ssp_debug_mode " , 0644 , hdev - > debugfs ,
hdev , & ssp_debug_mode_fops ) ;
}
2013-10-17 21:54:46 +04:00
2013-10-18 06:16:02 +04:00
if ( lmp_sniff_capable ( hdev ) ) {
debugfs_create_file ( " idle_timeout " , 0644 , hdev - > debugfs ,
hdev , & idle_timeout_fops ) ;
debugfs_create_file ( " sniff_min_interval " , 0644 , hdev - > debugfs ,
hdev , & sniff_min_interval_fops ) ;
debugfs_create_file ( " sniff_max_interval " , 0644 , hdev - > debugfs ,
hdev , & sniff_max_interval_fops ) ;
}
2013-10-19 02:23:46 +04:00
if ( lmp_le_capable ( hdev ) ) {
debugfs_create_u8 ( " white_list_size " , 0444 , hdev - > debugfs ,
& hdev - > le_white_list_size ) ;
2013-10-17 22:45:09 +04:00
debugfs_create_file ( " static_address " , 0444 , hdev - > debugfs ,
hdev , & static_address_fops ) ;
2013-10-19 03:38:10 +04:00
debugfs_create_file ( " own_address_type " , 0644 , hdev - > debugfs ,
hdev , & own_address_type_fops ) ;
2013-10-19 02:56:57 +04:00
debugfs_create_file ( " long_term_keys " , 0400 , hdev - > debugfs ,
hdev , & long_term_keys_fops ) ;
2013-10-19 18:09:13 +04:00
debugfs_create_file ( " conn_min_interval " , 0644 , hdev - > debugfs ,
hdev , & conn_min_interval_fops ) ;
debugfs_create_file ( " conn_max_interval " , 0644 , hdev - > debugfs ,
hdev , & conn_max_interval_fops ) ;
2013-10-19 02:23:46 +04:00
}
2013-10-17 22:45:09 +04:00
2013-10-16 14:28:55 +04:00
return 0 ;
2013-03-05 22:37:43 +04:00
}
2013-03-05 22:37:49 +04:00
static void hci_scan_req ( struct hci_request * req , unsigned long opt )
2005-04-17 02:20:36 +04:00
{
__u8 scan = opt ;
2013-03-05 22:37:49 +04:00
BT_DBG ( " %s %x " , req - > hdev - > name , scan ) ;
2005-04-17 02:20:36 +04:00
/* Inquiry and Page scans */
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_WRITE_SCAN_ENABLE , 1 , & scan ) ;
2005-04-17 02:20:36 +04:00
}
2013-03-05 22:37:49 +04:00
static void hci_auth_req ( struct hci_request * req , unsigned long opt )
2005-04-17 02:20:36 +04:00
{
__u8 auth = opt ;
2013-03-05 22:37:49 +04:00
BT_DBG ( " %s %x " , req - > hdev - > name , auth ) ;
2005-04-17 02:20:36 +04:00
/* Authentication */
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_WRITE_AUTH_ENABLE , 1 , & auth ) ;
2005-04-17 02:20:36 +04:00
}
2013-03-05 22:37:49 +04:00
static void hci_encrypt_req ( struct hci_request * req , unsigned long opt )
2005-04-17 02:20:36 +04:00
{
__u8 encrypt = opt ;
2013-03-05 22:37:49 +04:00
BT_DBG ( " %s %x " , req - > hdev - > name , encrypt ) ;
2005-04-17 02:20:36 +04:00
2008-07-14 22:13:47 +04:00
/* Encryption */
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_WRITE_ENCRYPT_MODE , 1 , & encrypt ) ;
2005-04-17 02:20:36 +04:00
}
2013-03-05 22:37:49 +04:00
static void hci_linkpol_req ( struct hci_request * req , unsigned long opt )
2008-07-14 22:13:47 +04:00
{
__le16 policy = cpu_to_le16 ( opt ) ;
2013-03-05 22:37:49 +04:00
BT_DBG ( " %s %x " , req - > hdev - > name , policy ) ;
2008-07-14 22:13:47 +04:00
/* Default link policy */
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_WRITE_DEF_LINK_POLICY , 2 , & policy ) ;
2008-07-14 22:13:47 +04:00
}
2007-02-09 17:24:33 +03:00
/* Get HCI device by index.
2005-04-17 02:20:36 +04:00
* Device is held on return . */
struct hci_dev * hci_dev_get ( int index )
{
2011-11-01 12:58:56 +04:00
struct hci_dev * hdev = NULL , * d ;
2005-04-17 02:20:36 +04:00
BT_DBG ( " %d " , index ) ;
if ( index < 0 )
return NULL ;
read_lock ( & hci_dev_list_lock ) ;
2011-11-01 12:58:56 +04:00
list_for_each_entry ( d , & hci_dev_list , list ) {
2005-04-17 02:20:36 +04:00
if ( d - > id = = index ) {
hdev = hci_dev_hold ( d ) ;
break ;
}
}
read_unlock ( & hci_dev_list_lock ) ;
return hdev ;
}
/* ---- Inquiry support ---- */
2012-01-04 16:23:45 +04:00
2012-01-04 17:44:20 +04:00
bool hci_discovery_active ( struct hci_dev * hdev )
{
struct discovery_state * discov = & hdev - > discovery ;
2012-02-04 00:47:58 +04:00
switch ( discov - > state ) {
2012-02-18 03:39:37 +04:00
case DISCOVERY_FINDING :
2012-02-04 00:47:58 +04:00
case DISCOVERY_RESOLVING :
2012-01-04 17:44:20 +04:00
return true ;
2012-02-04 00:47:58 +04:00
default :
return false ;
}
2012-01-04 17:44:20 +04:00
}
2012-01-04 16:23:45 +04:00
void hci_discovery_set_state ( struct hci_dev * hdev , int state )
{
BT_DBG ( " %s state %u -> %u " , hdev - > name , hdev - > discovery . state , state ) ;
if ( hdev - > discovery . state = = state )
return ;
switch ( state ) {
case DISCOVERY_STOPPED :
2012-02-13 22:41:02 +04:00
if ( hdev - > discovery . state ! = DISCOVERY_STARTING )
mgmt_discovering ( hdev , 0 ) ;
2012-01-04 16:23:45 +04:00
break ;
case DISCOVERY_STARTING :
break ;
2012-02-18 03:39:37 +04:00
case DISCOVERY_FINDING :
2012-01-04 16:23:45 +04:00
mgmt_discovering ( hdev , 1 ) ;
break ;
2012-01-04 17:44:20 +04:00
case DISCOVERY_RESOLVING :
break ;
2012-01-04 16:23:45 +04:00
case DISCOVERY_STOPPING :
break ;
}
hdev - > discovery . state = state ;
}
2013-04-30 22:29:27 +04:00
void hci_inquiry_cache_flush ( struct hci_dev * hdev )
2005-04-17 02:20:36 +04:00
{
2012-01-04 16:16:21 +04:00
struct discovery_state * cache = & hdev - > discovery ;
2012-01-03 18:03:00 +04:00
struct inquiry_entry * p , * n ;
2005-04-17 02:20:36 +04:00
2012-01-04 15:31:59 +04:00
list_for_each_entry_safe ( p , n , & cache - > all , all ) {
list_del ( & p - > all ) ;
2012-01-03 18:03:00 +04:00
kfree ( p ) ;
2005-04-17 02:20:36 +04:00
}
2012-01-04 15:31:59 +04:00
INIT_LIST_HEAD ( & cache - > unknown ) ;
INIT_LIST_HEAD ( & cache - > resolve ) ;
2005-04-17 02:20:36 +04:00
}
2012-05-17 07:36:26 +04:00
struct inquiry_entry * hci_inquiry_cache_lookup ( struct hci_dev * hdev ,
bdaddr_t * bdaddr )
2005-04-17 02:20:36 +04:00
{
2012-01-04 16:16:21 +04:00
struct discovery_state * cache = & hdev - > discovery ;
2005-04-17 02:20:36 +04:00
struct inquiry_entry * e ;
2012-09-25 13:49:43 +04:00
BT_DBG ( " cache %p, %pMR " , cache , bdaddr ) ;
2005-04-17 02:20:36 +04:00
2012-01-04 15:31:59 +04:00
list_for_each_entry ( e , & cache - > all , all ) {
if ( ! bacmp ( & e - > data . bdaddr , bdaddr ) )
return e ;
}
return NULL ;
}
struct inquiry_entry * hci_inquiry_cache_lookup_unknown ( struct hci_dev * hdev ,
2012-03-08 08:25:00 +04:00
bdaddr_t * bdaddr )
2012-01-04 15:31:59 +04:00
{
2012-01-04 16:16:21 +04:00
struct discovery_state * cache = & hdev - > discovery ;
2012-01-04 15:31:59 +04:00
struct inquiry_entry * e ;
2012-09-25 13:49:43 +04:00
BT_DBG ( " cache %p, %pMR " , cache , bdaddr ) ;
2012-01-04 15:31:59 +04:00
list_for_each_entry ( e , & cache - > unknown , list ) {
2005-04-17 02:20:36 +04:00
if ( ! bacmp ( & e - > data . bdaddr , bdaddr ) )
2012-01-03 18:03:00 +04:00
return e ;
}
return NULL ;
2005-04-17 02:20:36 +04:00
}
2012-01-04 17:44:20 +04:00
struct inquiry_entry * hci_inquiry_cache_lookup_resolve ( struct hci_dev * hdev ,
2012-03-08 08:25:00 +04:00
bdaddr_t * bdaddr ,
int state )
2012-01-04 17:44:20 +04:00
{
struct discovery_state * cache = & hdev - > discovery ;
struct inquiry_entry * e ;
2012-09-25 13:49:43 +04:00
BT_DBG ( " cache %p bdaddr %pMR state %d " , cache , bdaddr , state ) ;
2012-01-04 17:44:20 +04:00
list_for_each_entry ( e , & cache - > resolve , list ) {
if ( ! bacmp ( bdaddr , BDADDR_ANY ) & & e - > name_state = = state )
return e ;
if ( ! bacmp ( & e - > data . bdaddr , bdaddr ) )
return e ;
}
return NULL ;
}
2012-01-09 02:53:02 +04:00
void hci_inquiry_cache_update_resolve ( struct hci_dev * hdev ,
2012-03-08 08:25:00 +04:00
struct inquiry_entry * ie )
2012-01-09 02:53:02 +04:00
{
struct discovery_state * cache = & hdev - > discovery ;
struct list_head * pos = & cache - > resolve ;
struct inquiry_entry * p ;
list_del ( & ie - > list ) ;
list_for_each_entry ( p , & cache - > resolve , list ) {
if ( p - > name_state ! = NAME_PENDING & &
2012-05-17 07:36:26 +04:00
abs ( p - > data . rssi ) > = abs ( ie - > data . rssi ) )
2012-01-09 02:53:02 +04:00
break ;
pos = & p - > list ;
}
list_add ( & ie - > list , pos ) ;
}
2012-01-04 15:39:52 +04:00
bool hci_inquiry_cache_update ( struct hci_dev * hdev , struct inquiry_data * data ,
2012-03-08 08:25:00 +04:00
bool name_known , bool * ssp )
2005-04-17 02:20:36 +04:00
{
2012-01-04 16:16:21 +04:00
struct discovery_state * cache = & hdev - > discovery ;
2010-12-01 17:58:25 +03:00
struct inquiry_entry * ie ;
2005-04-17 02:20:36 +04:00
2012-09-25 13:49:43 +04:00
BT_DBG ( " cache %p, %pMR " , cache , & data - > bdaddr ) ;
2005-04-17 02:20:36 +04:00
2012-11-20 14:38:54 +04:00
hci_remove_remote_oob_data ( hdev , & data - > bdaddr ) ;
2012-02-23 02:38:59 +04:00
if ( ssp )
* ssp = data - > ssp_mode ;
2010-12-01 17:58:25 +03:00
ie = hci_inquiry_cache_lookup ( hdev , & data - > bdaddr ) ;
2012-01-09 02:53:02 +04:00
if ( ie ) {
2012-02-23 02:38:59 +04:00
if ( ie - > data . ssp_mode & & ssp )
* ssp = true ;
2012-01-09 02:53:02 +04:00
if ( ie - > name_state = = NAME_NEEDED & &
2012-05-17 07:36:26 +04:00
data - > rssi ! = ie - > data . rssi ) {
2012-01-09 02:53:02 +04:00
ie - > data . rssi = data - > rssi ;
hci_inquiry_cache_update_resolve ( hdev , ie ) ;
}
2012-01-04 15:31:59 +04:00
goto update ;
2012-01-09 02:53:02 +04:00
}
2012-01-04 15:31:59 +04:00
/* Entry not in the cache. Add new one. */
ie = kzalloc ( sizeof ( struct inquiry_entry ) , GFP_ATOMIC ) ;
if ( ! ie )
2012-01-04 15:39:52 +04:00
return false ;
2012-01-04 15:31:59 +04:00
list_add ( & ie - > all , & cache - > all ) ;
if ( name_known ) {
ie - > name_state = NAME_KNOWN ;
} else {
ie - > name_state = NAME_NOT_KNOWN ;
list_add ( & ie - > list , & cache - > unknown ) ;
}
2010-12-01 17:58:25 +03:00
2012-01-04 15:31:59 +04:00
update :
if ( name_known & & ie - > name_state ! = NAME_KNOWN & &
2012-05-17 07:36:26 +04:00
ie - > name_state ! = NAME_PENDING ) {
2012-01-04 15:31:59 +04:00
ie - > name_state = NAME_KNOWN ;
list_del ( & ie - > list ) ;
2005-04-17 02:20:36 +04:00
}
2010-12-01 17:58:25 +03:00
memcpy ( & ie - > data , data , sizeof ( * data ) ) ;
ie - > timestamp = jiffies ;
2005-04-17 02:20:36 +04:00
cache - > timestamp = jiffies ;
2012-01-04 15:39:52 +04:00
if ( ie - > name_state = = NAME_NOT_KNOWN )
return false ;
return true ;
2005-04-17 02:20:36 +04:00
}
static int inquiry_cache_dump ( struct hci_dev * hdev , int num , __u8 * buf )
{
2012-01-04 16:16:21 +04:00
struct discovery_state * cache = & hdev - > discovery ;
2005-04-17 02:20:36 +04:00
struct inquiry_info * info = ( struct inquiry_info * ) buf ;
struct inquiry_entry * e ;
int copied = 0 ;
2012-01-04 15:31:59 +04:00
list_for_each_entry ( e , & cache - > all , all ) {
2005-04-17 02:20:36 +04:00
struct inquiry_data * data = & e - > data ;
2012-01-03 18:03:00 +04:00
if ( copied > = num )
break ;
2005-04-17 02:20:36 +04:00
bacpy ( & info - > bdaddr , & data - > bdaddr ) ;
info - > pscan_rep_mode = data - > pscan_rep_mode ;
info - > pscan_period_mode = data - > pscan_period_mode ;
info - > pscan_mode = data - > pscan_mode ;
memcpy ( info - > dev_class , data - > dev_class , 3 ) ;
info - > clock_offset = data - > clock_offset ;
2012-01-03 18:03:00 +04:00
2005-04-17 02:20:36 +04:00
info + + ;
2012-01-03 18:03:00 +04:00
copied + + ;
2005-04-17 02:20:36 +04:00
}
BT_DBG ( " cache %p, copied %d " , cache , copied ) ;
return copied ;
}
2013-03-05 22:37:49 +04:00
static void hci_inq_req ( struct hci_request * req , unsigned long opt )
2005-04-17 02:20:36 +04:00
{
struct hci_inquiry_req * ir = ( struct hci_inquiry_req * ) opt ;
2013-03-05 22:37:49 +04:00
struct hci_dev * hdev = req - > hdev ;
2005-04-17 02:20:36 +04:00
struct hci_cp_inquiry cp ;
BT_DBG ( " %s " , hdev - > name ) ;
if ( test_bit ( HCI_INQUIRY , & hdev - > flags ) )
return ;
/* Start Inquiry */
memcpy ( & cp . lap , & ir - > lap , 3 ) ;
cp . length = ir - > length ;
cp . num_rsp = ir - > num_rsp ;
2013-03-05 22:37:49 +04:00
hci_req_add ( req , HCI_OP_INQUIRY , sizeof ( cp ) , & cp ) ;
2005-04-17 02:20:36 +04:00
}
2013-03-28 03:04:56 +04:00
static int wait_inquiry ( void * word )
{
schedule ( ) ;
return signal_pending ( current ) ;
}
2005-04-17 02:20:36 +04:00
int hci_inquiry ( void __user * arg )
{
__u8 __user * ptr = arg ;
struct hci_inquiry_req ir ;
struct hci_dev * hdev ;
int err = 0 , do_inquiry = 0 , max_rsp ;
long timeo ;
__u8 * buf ;
if ( copy_from_user ( & ir , ptr , sizeof ( ir ) ) )
return - EFAULT ;
2011-01-11 18:20:20 +03:00
hdev = hci_dev_get ( ir . dev_id ) ;
if ( ! hdev )
2005-04-17 02:20:36 +04:00
return - ENODEV ;
2013-08-27 08:40:51 +04:00
if ( test_bit ( HCI_USER_CHANNEL , & hdev - > dev_flags ) ) {
err = - EBUSY ;
goto done ;
}
2013-10-10 21:02:08 +04:00
if ( hdev - > dev_type ! = HCI_BREDR ) {
err = - EOPNOTSUPP ;
goto done ;
}
2013-10-02 14:43:13 +04:00
if ( ! test_bit ( HCI_BREDR_ENABLED , & hdev - > dev_flags ) ) {
err = - EOPNOTSUPP ;
goto done ;
}
2011-06-17 20:03:21 +04:00
hci_dev_lock ( hdev ) ;
2007-02-09 17:24:33 +03:00
if ( inquiry_cache_age ( hdev ) > INQUIRY_CACHE_AGE_MAX | |
2012-05-17 07:36:26 +04:00
inquiry_cache_empty ( hdev ) | | ir . flags & IREQ_CACHE_FLUSH ) {
2013-04-30 22:29:27 +04:00
hci_inquiry_cache_flush ( hdev ) ;
2005-04-17 02:20:36 +04:00
do_inquiry = 1 ;
}
2011-06-17 20:03:21 +04:00
hci_dev_unlock ( hdev ) ;
2005-04-17 02:20:36 +04:00
2006-07-03 12:02:33 +04:00
timeo = ir . length * msecs_to_jiffies ( 2000 ) ;
2010-12-01 17:58:25 +03:00
if ( do_inquiry ) {
2013-03-05 22:37:41 +04:00
err = hci_req_sync ( hdev , hci_inq_req , ( unsigned long ) & ir ,
timeo ) ;
2010-12-01 17:58:25 +03:00
if ( err < 0 )
goto done ;
2013-03-28 03:04:56 +04:00
/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
* cleared ) . If it is interrupted by a signal , return - EINTR .
*/
if ( wait_on_bit ( & hdev - > flags , HCI_INQUIRY , wait_inquiry ,
TASK_INTERRUPTIBLE ) )
return - EINTR ;
2010-12-01 17:58:25 +03:00
}
2005-04-17 02:20:36 +04:00
2012-05-23 11:04:21 +04:00
/* for unlimited number of responses we will use buffer with
* 255 entries
*/
2005-04-17 02:20:36 +04:00
max_rsp = ( ir . num_rsp = = 0 ) ? 255 : ir . num_rsp ;
/* cache_dump can't sleep. Therefore we allocate temp buffer and then
* copy it to the user space .
*/
2011-02-17 18:46:47 +03:00
buf = kmalloc ( sizeof ( struct inquiry_info ) * max_rsp , GFP_KERNEL ) ;
2010-12-01 17:58:25 +03:00
if ( ! buf ) {
2005-04-17 02:20:36 +04:00
err = - ENOMEM ;
goto done ;
}
2011-06-17 20:03:21 +04:00
hci_dev_lock ( hdev ) ;
2005-04-17 02:20:36 +04:00
ir . num_rsp = inquiry_cache_dump ( hdev , max_rsp , buf ) ;
2011-06-17 20:03:21 +04:00
hci_dev_unlock ( hdev ) ;
2005-04-17 02:20:36 +04:00
BT_DBG ( " num_rsp %d " , ir . num_rsp ) ;
if ( ! copy_to_user ( ptr , & ir , sizeof ( ir ) ) ) {
ptr + = sizeof ( ir ) ;
if ( copy_to_user ( ptr , buf , sizeof ( struct inquiry_info ) *
2012-05-17 07:36:26 +04:00
ir . num_rsp ) )
2005-04-17 02:20:36 +04:00
err = - EFAULT ;
2007-02-09 17:24:33 +03:00
} else
2005-04-17 02:20:36 +04:00
err = - EFAULT ;
kfree ( buf ) ;
done :
hci_dev_put ( hdev ) ;
return err ;
}
2013-10-01 23:44:49 +04:00
static int hci_dev_do_open ( struct hci_dev * hdev )
2005-04-17 02:20:36 +04:00
{
int ret = 0 ;
BT_DBG ( " %s %p " , hdev - > name , hdev ) ;
hci_req_lock ( hdev ) ;
2012-03-15 17:48:41 +04:00
if ( test_bit ( HCI_UNREGISTER , & hdev - > dev_flags ) ) {
ret = - ENODEV ;
goto done ;
}
2013-10-06 12:08:57 +04:00
if ( ! test_bit ( HCI_SETUP , & hdev - > dev_flags ) ) {
/* Check for rfkill but allow the HCI setup stage to
* proceed ( which in itself doesn ' t cause any RF activity ) .
*/
if ( test_bit ( HCI_RFKILLED , & hdev - > dev_flags ) ) {
ret = - ERFKILL ;
goto done ;
}
/* Check for valid public address or a configured static
* random adddress , but let the HCI setup proceed to
* be able to determine if there is a public address
* or not .
*
* This check is only valid for BR / EDR controllers
* since AMP controllers do not have an address .
*/
if ( hdev - > dev_type = = HCI_BREDR & &
! bacmp ( & hdev - > bdaddr , BDADDR_ANY ) & &
! bacmp ( & hdev - > static_addr , BDADDR_ANY ) ) {
ret = - EADDRNOTAVAIL ;
goto done ;
}
2009-06-08 16:41:38 +04:00
}
2005-04-17 02:20:36 +04:00
if ( test_bit ( HCI_UP , & hdev - > flags ) ) {
ret = - EALREADY ;
goto done ;
}
if ( hdev - > open ( hdev ) ) {
ret = - EIO ;
goto done ;
}
2012-11-12 09:02:14 +04:00
atomic_set ( & hdev - > cmd_cnt , 1 ) ;
set_bit ( HCI_INIT , & hdev - > flags ) ;
if ( hdev - > setup & & test_bit ( HCI_SETUP , & hdev - > dev_flags ) )
ret = hdev - > setup ( hdev ) ;
if ( ! ret ) {
if ( test_bit ( HCI_QUIRK_RAW_DEVICE , & hdev - > quirks ) )
set_bit ( HCI_RAW , & hdev - > flags ) ;
2013-08-27 08:40:51 +04:00
if ( ! test_bit ( HCI_RAW , & hdev - > flags ) & &
! test_bit ( HCI_USER_CHANNEL , & hdev - > dev_flags ) )
2012-11-12 09:02:14 +04:00
ret = __hci_init ( hdev ) ;
2005-04-17 02:20:36 +04:00
}
2012-11-12 09:02:14 +04:00
clear_bit ( HCI_INIT , & hdev - > flags ) ;
2005-04-17 02:20:36 +04:00
if ( ! ret ) {
hci_dev_hold ( hdev ) ;
set_bit ( HCI_UP , & hdev - > flags ) ;
hci_notify ( hdev , HCI_DEV_UP ) ;
2012-07-19 18:03:40 +04:00
if ( ! test_bit ( HCI_SETUP , & hdev - > dev_flags ) & &
2013-08-27 08:40:51 +04:00
! test_bit ( HCI_USER_CHANNEL , & hdev - > dev_flags ) & &
2013-10-06 19:25:01 +04:00
hdev - > dev_type = = HCI_BREDR ) {
2011-06-17 20:03:21 +04:00
hci_dev_lock ( hdev ) ;
2011-11-08 22:40:14 +04:00
mgmt_powered ( hdev , 1 ) ;
2011-06-17 20:03:21 +04:00
hci_dev_unlock ( hdev ) ;
2011-11-08 22:40:16 +04:00
}
2007-02-09 17:24:33 +03:00
} else {
2005-04-17 02:20:36 +04:00
/* Init failed, cleanup */
2011-12-15 06:50:02 +04:00
flush_work ( & hdev - > tx_work ) ;
2011-12-15 05:53:47 +04:00
flush_work ( & hdev - > cmd_work ) ;
2010-08-09 07:06:53 +04:00
flush_work ( & hdev - > rx_work ) ;
2005-04-17 02:20:36 +04:00
skb_queue_purge ( & hdev - > cmd_q ) ;
skb_queue_purge ( & hdev - > rx_q ) ;
if ( hdev - > flush )
hdev - > flush ( hdev ) ;
if ( hdev - > sent_cmd ) {
kfree_skb ( hdev - > sent_cmd ) ;
hdev - > sent_cmd = NULL ;
}
hdev - > close ( hdev ) ;
hdev - > flags = 0 ;
}
done :
hci_req_unlock ( hdev ) ;
return ret ;
}
2013-10-01 23:44:49 +04:00
/* ---- HCI ioctl helpers ---- */
int hci_dev_open ( __u16 dev )
{
struct hci_dev * hdev ;
int err ;
hdev = hci_dev_get ( dev ) ;
if ( ! hdev )
return - ENODEV ;
2013-10-01 23:44:50 +04:00
/* We need to ensure that no other power on/off work is pending
* before proceeding to call hci_dev_do_open . This is
* particularly important if the setup procedure has not yet
* completed .
*/
if ( test_and_clear_bit ( HCI_AUTO_OFF , & hdev - > dev_flags ) )
cancel_delayed_work ( & hdev - > power_off ) ;
2013-10-06 12:08:57 +04:00
/* After this call it is guaranteed that the setup procedure
* has finished . This means that error conditions like RFKILL
* or no valid public or static random address apply .
*/
2013-10-01 23:44:50 +04:00
flush_workqueue ( hdev - > req_workqueue ) ;
2013-10-01 23:44:49 +04:00
err = hci_dev_do_open ( hdev ) ;
hci_dev_put ( hdev ) ;
return err ;
}
2005-04-17 02:20:36 +04:00
static int hci_dev_do_close ( struct hci_dev * hdev )
{
BT_DBG ( " %s %p " , hdev - > name , hdev ) ;
2012-09-14 23:34:46 +04:00
cancel_delayed_work ( & hdev - > power_off ) ;
2005-04-17 02:20:36 +04:00
hci_req_cancel ( hdev , ENODEV ) ;
hci_req_lock ( hdev ) ;
if ( ! test_and_clear_bit ( HCI_UP , & hdev - > flags ) ) {
2011-04-12 01:46:55 +04:00
del_timer_sync ( & hdev - > cmd_timer ) ;
2005-04-17 02:20:36 +04:00
hci_req_unlock ( hdev ) ;
return 0 ;
}
2011-12-15 06:50:02 +04:00
/* Flush RX and TX works */
flush_work ( & hdev - > tx_work ) ;
2010-08-09 07:06:53 +04:00
flush_work ( & hdev - > rx_work ) ;
2005-04-17 02:20:36 +04:00
2011-11-08 00:16:02 +04:00
if ( hdev - > discov_timeout > 0 ) {
2011-11-09 03:44:22 +04:00
cancel_delayed_work ( & hdev - > discov_off ) ;
2011-11-08 00:16:02 +04:00
hdev - > discov_timeout = 0 ;
2012-02-21 18:01:30 +04:00
clear_bit ( HCI_DISCOVERABLE , & hdev - > dev_flags ) ;
2013-10-15 20:13:39 +04:00
clear_bit ( HCI_LIMITED_DISCOVERABLE , & hdev - > dev_flags ) ;
2011-11-08 00:16:02 +04:00
}
2012-01-09 01:11:15 +04:00
if ( test_and_clear_bit ( HCI_SERVICE_CACHE , & hdev - > dev_flags ) )
2011-12-15 02:47:39 +04:00
cancel_delayed_work ( & hdev - > service_cache ) ;
2012-02-04 00:47:59 +04:00
cancel_delayed_work_sync ( & hdev - > le_scan_disable ) ;
2011-06-17 20:03:21 +04:00
hci_dev_lock ( hdev ) ;
2013-04-30 22:29:27 +04:00
hci_inquiry_cache_flush ( hdev ) ;
2005-04-17 02:20:36 +04:00
hci_conn_hash_flush ( hdev ) ;
2011-06-17 20:03:21 +04:00
hci_dev_unlock ( hdev ) ;
2005-04-17 02:20:36 +04:00
hci_notify ( hdev , HCI_DEV_DOWN ) ;
if ( hdev - > flush )
hdev - > flush ( hdev ) ;
/* Reset device */
skb_queue_purge ( & hdev - > cmd_q ) ;
atomic_set ( & hdev - > cmd_cnt , 1 ) ;
2012-02-03 23:29:40 +04:00
if ( ! test_bit ( HCI_RAW , & hdev - > flags ) & &
2013-10-11 20:44:12 +04:00
! test_bit ( HCI_AUTO_OFF , & hdev - > dev_flags ) & &
2012-05-23 14:35:46 +04:00
test_bit ( HCI_QUIRK_RESET_ON_CLOSE , & hdev - > quirks ) ) {
2005-04-17 02:20:36 +04:00
set_bit ( HCI_INIT , & hdev - > flags ) ;
2013-03-05 22:37:41 +04:00
__hci_req_sync ( hdev , hci_reset_req , 0 , HCI_CMD_TIMEOUT ) ;
2005-04-17 02:20:36 +04:00
clear_bit ( HCI_INIT , & hdev - > flags ) ;
}
2011-12-15 05:53:47 +04:00
/* flush cmd work */
flush_work ( & hdev - > cmd_work ) ;
2005-04-17 02:20:36 +04:00
/* Drop queues */
skb_queue_purge ( & hdev - > rx_q ) ;
skb_queue_purge ( & hdev - > cmd_q ) ;
skb_queue_purge ( & hdev - > raw_q ) ;
/* Drop last sent command */
if ( hdev - > sent_cmd ) {
2011-04-12 01:46:55 +04:00
del_timer_sync ( & hdev - > cmd_timer ) ;
2005-04-17 02:20:36 +04:00
kfree_skb ( hdev - > sent_cmd ) ;
hdev - > sent_cmd = NULL ;
}
2013-04-02 14:34:31 +04:00
kfree_skb ( hdev - > recv_evt ) ;
hdev - > recv_evt = NULL ;
2005-04-17 02:20:36 +04:00
/* After this point our queues are empty
* and no tasks are scheduled . */
hdev - > close ( hdev ) ;
2013-03-16 02:06:59 +04:00
/* Clear flags */
hdev - > flags = 0 ;
hdev - > dev_flags & = ~ HCI_PERSISTENT_MASK ;
2013-10-07 11:58:33 +04:00
if ( ! test_and_clear_bit ( HCI_AUTO_OFF , & hdev - > dev_flags ) ) {
if ( hdev - > dev_type = = HCI_BREDR ) {
hci_dev_lock ( hdev ) ;
mgmt_powered ( hdev , 0 ) ;
hci_dev_unlock ( hdev ) ;
}
2012-02-21 15:33:48 +04:00
}
2010-12-16 11:00:37 +03:00
2012-11-28 19:59:42 +04:00
/* Controller radio is available but is currently powered down */
2013-10-05 22:47:45 +04:00
hdev - > amp_status = AMP_STATUS_POWERED_DOWN ;
2012-11-28 19:59:42 +04:00
2012-02-22 20:11:53 +04:00
memset ( hdev - > eir , 0 , sizeof ( hdev - > eir ) ) ;
2012-02-23 00:01:41 +04:00
memset ( hdev - > dev_class , 0 , sizeof ( hdev - > dev_class ) ) ;
2012-02-22 20:11:53 +04:00
2005-04-17 02:20:36 +04:00
hci_req_unlock ( hdev ) ;
hci_dev_put ( hdev ) ;
return 0 ;
}
int hci_dev_close ( __u16 dev )
{
struct hci_dev * hdev ;
int err ;
2010-12-01 17:58:25 +03:00
hdev = hci_dev_get ( dev ) ;
if ( ! hdev )
2005-04-17 02:20:36 +04:00
return - ENODEV ;
2012-02-21 15:33:48 +04:00
2013-08-27 08:40:51 +04:00
if ( test_bit ( HCI_USER_CHANNEL , & hdev - > dev_flags ) ) {
err = - EBUSY ;
goto done ;
}
2012-02-21 15:33:48 +04:00
if ( test_and_clear_bit ( HCI_AUTO_OFF , & hdev - > dev_flags ) )
cancel_delayed_work ( & hdev - > power_off ) ;
2005-04-17 02:20:36 +04:00
err = hci_dev_do_close ( hdev ) ;
2012-02-21 15:33:48 +04:00
2013-08-27 08:40:51 +04:00
done :
2005-04-17 02:20:36 +04:00
hci_dev_put ( hdev ) ;
return err ;
}
int hci_dev_reset ( __u16 dev )
{
struct hci_dev * hdev ;
int ret = 0 ;
2010-12-01 17:58:25 +03:00
hdev = hci_dev_get ( dev ) ;
if ( ! hdev )
2005-04-17 02:20:36 +04:00
return - ENODEV ;
hci_req_lock ( hdev ) ;
2013-08-27 07:57:58 +04:00
if ( ! test_bit ( HCI_UP , & hdev - > flags ) ) {
ret = - ENETDOWN ;
2005-04-17 02:20:36 +04:00
goto done ;
2013-08-27 07:57:58 +04:00
}
2005-04-17 02:20:36 +04:00
2013-08-27 08:40:51 +04:00
if ( test_bit ( HCI_USER_CHANNEL , & hdev - > dev_flags ) ) {
ret = - EBUSY ;
goto done ;
}
2005-04-17 02:20:36 +04:00
/* Drop queues */
skb_queue_purge ( & hdev - > rx_q ) ;
skb_queue_purge ( & hdev - > cmd_q ) ;
2011-06-17 20:03:21 +04:00
hci_dev_lock ( hdev ) ;
2013-04-30 22:29:27 +04:00
hci_inquiry_cache_flush ( hdev ) ;
2005-04-17 02:20:36 +04:00
hci_conn_hash_flush ( hdev ) ;
2011-06-17 20:03:21 +04:00
hci_dev_unlock ( hdev ) ;
2005-04-17 02:20:36 +04:00
if ( hdev - > flush )
hdev - > flush ( hdev ) ;
2007-02-09 17:24:33 +03:00
atomic_set ( & hdev - > cmd_cnt , 1 ) ;
2011-02-11 04:38:48 +03:00
hdev - > acl_cnt = 0 ; hdev - > sco_cnt = 0 ; hdev - > le_cnt = 0 ;
2005-04-17 02:20:36 +04:00
if ( ! test_bit ( HCI_RAW , & hdev - > flags ) )
2013-03-05 22:37:41 +04:00
ret = __hci_req_sync ( hdev , hci_reset_req , 0 , HCI_INIT_TIMEOUT ) ;
2005-04-17 02:20:36 +04:00
done :
hci_req_unlock ( hdev ) ;
hci_dev_put ( hdev ) ;
return ret ;
}
int hci_dev_reset_stat ( __u16 dev )
{
struct hci_dev * hdev ;
int ret = 0 ;
2010-12-01 17:58:25 +03:00
hdev = hci_dev_get ( dev ) ;
if ( ! hdev )
2005-04-17 02:20:36 +04:00
return - ENODEV ;
2013-08-27 08:40:51 +04:00
if ( test_bit ( HCI_USER_CHANNEL , & hdev - > dev_flags ) ) {
ret = - EBUSY ;
goto done ;
}
2005-04-17 02:20:36 +04:00
memset ( & hdev - > stat , 0 , sizeof ( struct hci_dev_stats ) ) ;
2013-08-27 08:40:51 +04:00
done :
2005-04-17 02:20:36 +04:00
hci_dev_put ( hdev ) ;
return ret ;
}
int hci_dev_cmd ( unsigned int cmd , void __user * arg )
{
struct hci_dev * hdev ;
struct hci_dev_req dr ;
int err = 0 ;
if ( copy_from_user ( & dr , arg , sizeof ( dr ) ) )
return - EFAULT ;
2010-12-01 17:58:25 +03:00
hdev = hci_dev_get ( dr . dev_id ) ;
if ( ! hdev )
2005-04-17 02:20:36 +04:00
return - ENODEV ;
2013-08-27 08:40:51 +04:00
if ( test_bit ( HCI_USER_CHANNEL , & hdev - > dev_flags ) ) {
err = - EBUSY ;
goto done ;
}
2013-10-10 21:02:08 +04:00
if ( hdev - > dev_type ! = HCI_BREDR ) {
err = - EOPNOTSUPP ;
goto done ;
}
2013-10-02 14:43:13 +04:00
if ( ! test_bit ( HCI_BREDR_ENABLED , & hdev - > dev_flags ) ) {
err = - EOPNOTSUPP ;
goto done ;
}
2005-04-17 02:20:36 +04:00
switch ( cmd ) {
case HCISETAUTH :
2013-03-05 22:37:41 +04:00
err = hci_req_sync ( hdev , hci_auth_req , dr . dev_opt ,
HCI_INIT_TIMEOUT ) ;
2005-04-17 02:20:36 +04:00
break ;
case HCISETENCRYPT :
if ( ! lmp_encrypt_capable ( hdev ) ) {
err = - EOPNOTSUPP ;
break ;
}
if ( ! test_bit ( HCI_AUTH , & hdev - > flags ) ) {
/* Auth must be enabled first */
2013-03-05 22:37:41 +04:00
err = hci_req_sync ( hdev , hci_auth_req , dr . dev_opt ,
HCI_INIT_TIMEOUT ) ;
2005-04-17 02:20:36 +04:00
if ( err )
break ;
}
2013-03-05 22:37:41 +04:00
err = hci_req_sync ( hdev , hci_encrypt_req , dr . dev_opt ,
HCI_INIT_TIMEOUT ) ;
2005-04-17 02:20:36 +04:00
break ;
case HCISETSCAN :
2013-03-05 22:37:41 +04:00
err = hci_req_sync ( hdev , hci_scan_req , dr . dev_opt ,
HCI_INIT_TIMEOUT ) ;
2005-04-17 02:20:36 +04:00
break ;
case HCISETLINKPOL :
2013-03-05 22:37:41 +04:00
err = hci_req_sync ( hdev , hci_linkpol_req , dr . dev_opt ,
HCI_INIT_TIMEOUT ) ;
2005-04-17 02:20:36 +04:00
break ;
case HCISETLINKMODE :
2008-07-14 22:13:47 +04:00
hdev - > link_mode = ( ( __u16 ) dr . dev_opt ) &
( HCI_LM_MASTER | HCI_LM_ACCEPT ) ;
break ;
case HCISETPTYPE :
hdev - > pkt_type = ( __u16 ) dr . dev_opt ;
2005-04-17 02:20:36 +04:00
break ;
case HCISETACLMTU :
2008-07-14 22:13:47 +04:00
hdev - > acl_mtu = * ( ( __u16 * ) & dr . dev_opt + 1 ) ;
hdev - > acl_pkts = * ( ( __u16 * ) & dr . dev_opt + 0 ) ;
2005-04-17 02:20:36 +04:00
break ;
case HCISETSCOMTU :
2008-07-14 22:13:47 +04:00
hdev - > sco_mtu = * ( ( __u16 * ) & dr . dev_opt + 1 ) ;
hdev - > sco_pkts = * ( ( __u16 * ) & dr . dev_opt + 0 ) ;
2005-04-17 02:20:36 +04:00
break ;
default :
err = - EINVAL ;
break ;
}
2008-07-14 22:13:47 +04:00
2013-08-27 08:40:51 +04:00
done :
2005-04-17 02:20:36 +04:00
hci_dev_put ( hdev ) ;
return err ;
}
int hci_get_dev_list ( void __user * arg )
{
2011-11-01 12:58:56 +04:00
struct hci_dev * hdev ;
2005-04-17 02:20:36 +04:00
struct hci_dev_list_req * dl ;
struct hci_dev_req * dr ;
int n = 0 , size , err ;
__u16 dev_num ;
if ( get_user ( dev_num , ( __u16 __user * ) arg ) )
return - EFAULT ;
if ( ! dev_num | | dev_num > ( PAGE_SIZE * 2 ) / sizeof ( * dr ) )
return - EINVAL ;
size = sizeof ( * dl ) + dev_num * sizeof ( * dr ) ;
2010-12-01 17:58:25 +03:00
dl = kzalloc ( size , GFP_KERNEL ) ;
if ( ! dl )
2005-04-17 02:20:36 +04:00
return - ENOMEM ;
dr = dl - > dev_req ;
2011-12-22 22:30:27 +04:00
read_lock ( & hci_dev_list_lock ) ;
2011-11-01 12:58:56 +04:00
list_for_each_entry ( hdev , & hci_dev_list , list ) {
2012-01-09 01:11:15 +04:00
if ( test_and_clear_bit ( HCI_AUTO_OFF , & hdev - > dev_flags ) )
2011-11-09 03:44:22 +04:00
cancel_delayed_work ( & hdev - > power_off ) ;
2011-01-26 14:11:03 +03:00
2012-01-09 01:11:15 +04:00
if ( ! test_bit ( HCI_MGMT , & hdev - > dev_flags ) )
set_bit ( HCI_PAIRABLE , & hdev - > dev_flags ) ;
2011-01-26 14:11:03 +03:00
2005-04-17 02:20:36 +04:00
( dr + n ) - > dev_id = hdev - > id ;
( dr + n ) - > dev_opt = hdev - > flags ;
2011-01-26 14:11:03 +03:00
2005-04-17 02:20:36 +04:00
if ( + + n > = dev_num )
break ;
}
2011-12-22 22:30:27 +04:00
read_unlock ( & hci_dev_list_lock ) ;
2005-04-17 02:20:36 +04:00
dl - > dev_num = n ;
size = sizeof ( * dl ) + n * sizeof ( * dr ) ;
err = copy_to_user ( arg , dl , size ) ;
kfree ( dl ) ;
return err ? - EFAULT : 0 ;
}
int hci_get_dev_info ( void __user * arg )
{
struct hci_dev * hdev ;
struct hci_dev_info di ;
int err = 0 ;
if ( copy_from_user ( & di , arg , sizeof ( di ) ) )
return - EFAULT ;
2010-12-01 17:58:25 +03:00
hdev = hci_dev_get ( di . dev_id ) ;
if ( ! hdev )
2005-04-17 02:20:36 +04:00
return - ENODEV ;
2012-01-09 01:11:15 +04:00
if ( test_and_clear_bit ( HCI_AUTO_OFF , & hdev - > dev_flags ) )
2011-11-08 00:16:04 +04:00
cancel_delayed_work_sync ( & hdev - > power_off ) ;
2010-12-15 14:53:18 +03:00
2012-01-09 01:11:15 +04:00
if ( ! test_bit ( HCI_MGMT , & hdev - > dev_flags ) )
set_bit ( HCI_PAIRABLE , & hdev - > dev_flags ) ;
2011-01-26 14:11:03 +03:00
2005-04-17 02:20:36 +04:00
strcpy ( di . name , hdev - > name ) ;
di . bdaddr = hdev - > bdaddr ;
2013-10-02 09:59:20 +04:00
di . type = ( hdev - > bus & 0x0f ) | ( ( hdev - > dev_type & 0x03 ) < < 4 ) ;
2005-04-17 02:20:36 +04:00
di . flags = hdev - > flags ;
di . pkt_type = hdev - > pkt_type ;
2012-10-19 21:57:46 +04:00
if ( lmp_bredr_capable ( hdev ) ) {
di . acl_mtu = hdev - > acl_mtu ;
di . acl_pkts = hdev - > acl_pkts ;
di . sco_mtu = hdev - > sco_mtu ;
di . sco_pkts = hdev - > sco_pkts ;
} else {
di . acl_mtu = hdev - > le_mtu ;
di . acl_pkts = hdev - > le_pkts ;
di . sco_mtu = 0 ;
di . sco_pkts = 0 ;
}
2005-04-17 02:20:36 +04:00
di . link_policy = hdev - > link_policy ;
di . link_mode = hdev - > link_mode ;
memcpy ( & di . stat , & hdev - > stat , sizeof ( di . stat ) ) ;
memcpy ( & di . features , & hdev - > features , sizeof ( di . features ) ) ;
if ( copy_to_user ( arg , & di , sizeof ( di ) ) )
err = - EFAULT ;
hci_dev_put ( hdev ) ;
return err ;
}
/* ---- Interface to HCI drivers ---- */
2009-06-08 16:41:38 +04:00
static int hci_rfkill_set_block ( void * data , bool blocked )
{
struct hci_dev * hdev = data ;
BT_DBG ( " %p name %s blocked %d " , hdev , hdev - > name , blocked ) ;
2013-08-27 08:40:51 +04:00
if ( test_bit ( HCI_USER_CHANNEL , & hdev - > dev_flags ) )
return - EBUSY ;
2013-09-13 09:58:17 +04:00
if ( blocked ) {
set_bit ( HCI_RFKILLED , & hdev - > dev_flags ) ;
2013-09-13 09:58:18 +04:00
if ( ! test_bit ( HCI_SETUP , & hdev - > dev_flags ) )
hci_dev_do_close ( hdev ) ;
2013-09-13 09:58:17 +04:00
} else {
clear_bit ( HCI_RFKILLED , & hdev - > dev_flags ) ;
2013-09-27 18:56:14 +04:00
}
2009-06-08 16:41:38 +04:00
return 0 ;
}
static const struct rfkill_ops hci_rfkill_ops = {
. set_block = hci_rfkill_set_block ,
} ;
2010-12-15 14:53:18 +03:00
static void hci_power_on ( struct work_struct * work )
{
struct hci_dev * hdev = container_of ( work , struct hci_dev , power_on ) ;
2013-05-29 10:51:29 +04:00
int err ;
2010-12-15 14:53:18 +03:00
BT_DBG ( " %s " , hdev - > name ) ;
2013-10-01 23:44:49 +04:00
err = hci_dev_do_open ( hdev ) ;
2013-05-29 10:51:29 +04:00
if ( err < 0 ) {
mgmt_set_powered_failed ( hdev , err ) ;
2010-12-15 14:53:18 +03:00
return ;
2013-05-29 10:51:29 +04:00
}
2010-12-15 14:53:18 +03:00
2013-10-06 12:08:57 +04:00
/* During the HCI setup phase, a few error conditions are
* ignored and they need to be checked now . If they are still
* valid , it is important to turn the device back off .
*/
if ( test_bit ( HCI_RFKILLED , & hdev - > dev_flags ) | |
( hdev - > dev_type = = HCI_BREDR & &
! bacmp ( & hdev - > bdaddr , BDADDR_ANY ) & &
! bacmp ( & hdev - > static_addr , BDADDR_ANY ) ) ) {
2013-09-13 09:58:18 +04:00
clear_bit ( HCI_AUTO_OFF , & hdev - > dev_flags ) ;
hci_dev_do_close ( hdev ) ;
} else if ( test_bit ( HCI_AUTO_OFF , & hdev - > dev_flags ) ) {
2013-01-15 00:33:51 +04:00
queue_delayed_work ( hdev - > req_workqueue , & hdev - > power_off ,
HCI_AUTO_OFF_TIMEOUT ) ;
2013-09-13 09:58:18 +04:00
}
2010-12-15 14:53:18 +03:00
2012-01-09 01:11:15 +04:00
if ( test_and_clear_bit ( HCI_SETUP , & hdev - > dev_flags ) )
2011-11-08 22:40:14 +04:00
mgmt_index_added ( hdev ) ;
2010-12-15 14:53:18 +03:00
}
static void hci_power_off ( struct work_struct * work )
{
2011-11-08 00:16:04 +04:00
struct hci_dev * hdev = container_of ( work , struct hci_dev ,
2012-05-17 07:36:26 +04:00
power_off . work ) ;
2010-12-15 14:53:18 +03:00
BT_DBG ( " %s " , hdev - > name ) ;
2012-02-21 15:33:48 +04:00
hci_dev_do_close ( hdev ) ;
2010-12-15 14:53:18 +03:00
}
2011-11-08 00:16:02 +04:00
static void hci_discov_off ( struct work_struct * work )
{
struct hci_dev * hdev ;
hdev = container_of ( work , struct hci_dev , discov_off . work ) ;
BT_DBG ( " %s " , hdev - > name ) ;
2013-10-15 21:57:40 +04:00
mgmt_discoverable_timeout ( hdev ) ;
2011-11-08 00:16:02 +04:00
}
2011-01-04 13:08:51 +03:00
int hci_uuids_clear ( struct hci_dev * hdev )
{
2013-01-27 02:31:28 +04:00
struct bt_uuid * uuid , * tmp ;
2011-01-04 13:08:51 +03:00
2013-01-27 02:31:28 +04:00
list_for_each_entry_safe ( uuid , tmp , & hdev - > uuids , list ) {
list_del ( & uuid - > list ) ;
2011-01-04 13:08:51 +03:00
kfree ( uuid ) ;
}
return 0 ;
}
2011-01-17 15:41:05 +03:00
int hci_link_keys_clear ( struct hci_dev * hdev )
{
struct list_head * p , * n ;
list_for_each_safe ( p , n , & hdev - > link_keys ) {
struct link_key * key ;
key = list_entry ( p , struct link_key , list ) ;
list_del ( p ) ;
kfree ( key ) ;
}
return 0 ;
}
2012-02-03 04:08:00 +04:00
int hci_smp_ltks_clear ( struct hci_dev * hdev )
{
struct smp_ltk * k , * tmp ;
list_for_each_entry_safe ( k , tmp , & hdev - > long_term_keys , list ) {
list_del ( & k - > list ) ;
kfree ( k ) ;
}
return 0 ;
}
2011-01-17 15:41:05 +03:00
struct link_key * hci_find_link_key ( struct hci_dev * hdev , bdaddr_t * bdaddr )
{
2011-11-01 12:58:56 +04:00
struct link_key * k ;
2011-01-17 15:41:05 +03:00
2011-11-01 12:58:56 +04:00
list_for_each_entry ( k , & hdev - > link_keys , list )
2011-01-17 15:41:05 +03:00
if ( bacmp ( bdaddr , & k - > bdaddr ) = = 0 )
return k ;
return NULL ;
}
2012-04-13 16:13:22 +04:00
static bool hci_persistent_key ( struct hci_dev * hdev , struct hci_conn * conn ,
2012-05-17 07:36:26 +04:00
u8 key_type , u8 old_key_type )
2011-04-28 22:28:59 +04:00
{
/* Legacy key */
if ( key_type < 0x03 )
2012-04-13 16:13:22 +04:00
return true ;
2011-04-28 22:28:59 +04:00
/* Debug keys are insecure so don't store them persistently */
if ( key_type = = HCI_LK_DEBUG_COMBINATION )
2012-04-13 16:13:22 +04:00
return false ;
2011-04-28 22:28:59 +04:00
/* Changed combination key and there's no previous one */
if ( key_type = = HCI_LK_CHANGED_COMBINATION & & old_key_type = = 0xff )
2012-04-13 16:13:22 +04:00
return false ;
2011-04-28 22:28:59 +04:00
/* Security mode 3 case */
if ( ! conn )
2012-04-13 16:13:22 +04:00
return true ;
2011-04-28 22:28:59 +04:00
/* Neither local nor remote side had no-bonding as requirement */
if ( conn - > auth_type > 0x01 & & conn - > remote_auth > 0x01 )
2012-04-13 16:13:22 +04:00
return true ;
2011-04-28 22:28:59 +04:00
/* Local side had dedicated bonding as requirement */
if ( conn - > auth_type = = 0x02 | | conn - > auth_type = = 0x03 )
2012-04-13 16:13:22 +04:00
return true ;
2011-04-28 22:28:59 +04:00
/* Remote side had dedicated bonding as requirement */
if ( conn - > remote_auth = = 0x02 | | conn - > remote_auth = = 0x03 )
2012-04-13 16:13:22 +04:00
return true ;
2011-04-28 22:28:59 +04:00
/* If none of the above criteria match, then don't store the key
* persistently */
2012-04-13 16:13:22 +04:00
return false ;
2011-04-28 22:28:59 +04:00
}
2012-02-03 04:08:01 +04:00
struct smp_ltk * hci_find_ltk ( struct hci_dev * hdev , __le16 ediv , u8 rand [ 8 ] )
2011-07-08 01:59:36 +04:00
{
2012-02-03 04:08:01 +04:00
struct smp_ltk * k ;
2011-07-08 01:59:36 +04:00
2012-02-03 04:08:01 +04:00
list_for_each_entry ( k , & hdev - > long_term_keys , list ) {
if ( k - > ediv ! = ediv | |
2012-05-17 07:36:26 +04:00
memcmp ( rand , k - > rand , sizeof ( k - > rand ) ) )
2011-07-08 01:59:36 +04:00
continue ;
2012-02-03 04:08:01 +04:00
return k ;
2011-07-08 01:59:36 +04:00
}
return NULL ;
}
2012-02-03 04:08:01 +04:00
struct smp_ltk * hci_find_ltk_by_addr ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
2012-03-08 08:25:00 +04:00
u8 addr_type )
2011-07-08 01:59:36 +04:00
{
2012-02-03 04:08:01 +04:00
struct smp_ltk * k ;
2011-07-08 01:59:36 +04:00
2012-02-03 04:08:01 +04:00
list_for_each_entry ( k , & hdev - > long_term_keys , list )
if ( addr_type = = k - > bdaddr_type & &
2012-05-17 07:36:26 +04:00
bacmp ( bdaddr , & k - > bdaddr ) = = 0 )
2011-07-08 01:59:36 +04:00
return k ;
return NULL ;
}
2011-04-28 22:28:59 +04:00
int hci_add_link_key ( struct hci_dev * hdev , struct hci_conn * conn , int new_key ,
2012-03-08 08:25:00 +04:00
bdaddr_t * bdaddr , u8 * val , u8 type , u8 pin_len )
2011-01-17 15:41:05 +03:00
{
struct link_key * key , * old_key ;
2012-04-13 16:13:22 +04:00
u8 old_key_type ;
bool persistent ;
2011-01-17 15:41:05 +03:00
old_key = hci_find_link_key ( hdev , bdaddr ) ;
if ( old_key ) {
old_key_type = old_key - > type ;
key = old_key ;
} else {
2011-04-28 22:29:00 +04:00
old_key_type = conn ? conn - > key_type : 0xff ;
2011-01-17 15:41:05 +03:00
key = kzalloc ( sizeof ( * key ) , GFP_ATOMIC ) ;
if ( ! key )
return - ENOMEM ;
list_add ( & key - > list , & hdev - > link_keys ) ;
}
2012-09-25 13:49:43 +04:00
BT_DBG ( " %s key for %pMR type %u " , hdev - > name , bdaddr , type ) ;
2011-01-17 15:41:05 +03:00
2011-04-28 22:28:59 +04:00
/* Some buggy controller combinations generate a changed
* combination key for legacy pairing even when there ' s no
* previous key */
if ( type = = HCI_LK_CHANGED_COMBINATION & &
2012-05-17 07:36:26 +04:00
( ! conn | | conn - > remote_auth = = 0xff ) & & old_key_type = = 0xff ) {
2011-04-28 22:28:59 +04:00
type = HCI_LK_COMBINATION ;
2011-04-28 22:29:01 +04:00
if ( conn )
conn - > key_type = type ;
}
2011-04-28 22:28:59 +04:00
2011-01-17 15:41:05 +03:00
bacpy ( & key - > bdaddr , bdaddr ) ;
2012-05-23 12:31:20 +04:00
memcpy ( key - > val , val , HCI_LINK_KEY_SIZE ) ;
2011-01-17 15:41:05 +03:00
key - > pin_len = pin_len ;
2011-04-28 14:07:53 +04:00
if ( type = = HCI_LK_CHANGED_COMBINATION )
2011-01-17 15:41:05 +03:00
key - > type = old_key_type ;
2011-04-28 22:29:02 +04:00
else
key - > type = type ;
2011-04-28 22:29:03 +04:00
if ( ! new_key )
return 0 ;
persistent = hci_persistent_key ( hdev , conn , type , old_key_type ) ;
2011-11-08 22:40:14 +04:00
mgmt_new_link_key ( hdev , key , persistent ) ;
2011-04-28 22:29:03 +04:00
2012-04-16 13:14:44 +04:00
if ( conn )
conn - > flush_key = ! persistent ;
2011-01-17 15:41:05 +03:00
return 0 ;
}
2012-02-03 04:08:01 +04:00
int hci_add_ltk ( struct hci_dev * hdev , bdaddr_t * bdaddr , u8 addr_type , u8 type ,
2012-03-09 14:12:12 +04:00
int new_key , u8 authenticated , u8 tk [ 16 ] , u8 enc_size , __le16
2012-03-08 08:25:00 +04:00
ediv , u8 rand [ 8 ] )
2011-07-08 01:59:36 +04:00
{
2012-02-03 04:08:01 +04:00
struct smp_ltk * key , * old_key ;
2011-07-08 01:59:36 +04:00
2012-02-03 04:08:01 +04:00
if ( ! ( type & HCI_SMP_STK ) & & ! ( type & HCI_SMP_LTK ) )
return 0 ;
2011-07-08 01:59:36 +04:00
2012-02-03 04:08:01 +04:00
old_key = hci_find_ltk_by_addr ( hdev , bdaddr , addr_type ) ;
if ( old_key )
2011-07-08 01:59:36 +04:00
key = old_key ;
2012-02-03 04:08:01 +04:00
else {
key = kzalloc ( sizeof ( * key ) , GFP_ATOMIC ) ;
2011-07-08 01:59:36 +04:00
if ( ! key )
return - ENOMEM ;
2012-02-03 04:08:01 +04:00
list_add ( & key - > list , & hdev - > long_term_keys ) ;
2011-07-08 01:59:36 +04:00
}
bacpy ( & key - > bdaddr , bdaddr ) ;
2012-02-03 04:08:01 +04:00
key - > bdaddr_type = addr_type ;
memcpy ( key - > val , tk , sizeof ( key - > val ) ) ;
key - > authenticated = authenticated ;
key - > ediv = ediv ;
key - > enc_size = enc_size ;
key - > type = type ;
memcpy ( key - > rand , rand , sizeof ( key - > rand ) ) ;
2011-07-08 01:59:36 +04:00
2012-02-03 04:08:01 +04:00
if ( ! new_key )
return 0 ;
2011-07-08 01:59:36 +04:00
2012-02-03 04:08:05 +04:00
if ( type & HCI_SMP_LTK )
mgmt_new_ltk ( hdev , key , 1 ) ;
2011-07-08 01:59:36 +04:00
return 0 ;
}
2011-01-17 15:41:05 +03:00
int hci_remove_link_key ( struct hci_dev * hdev , bdaddr_t * bdaddr )
{
struct link_key * key ;
key = hci_find_link_key ( hdev , bdaddr ) ;
if ( ! key )
return - ENOENT ;
2012-09-25 13:49:43 +04:00
BT_DBG ( " %s removing %pMR " , hdev - > name , bdaddr ) ;
2011-01-17 15:41:05 +03:00
list_del ( & key - > list ) ;
kfree ( key ) ;
return 0 ;
}
2012-02-03 04:08:00 +04:00
int hci_remove_ltk ( struct hci_dev * hdev , bdaddr_t * bdaddr )
{
struct smp_ltk * k , * tmp ;
list_for_each_entry_safe ( k , tmp , & hdev - > long_term_keys , list ) {
if ( bacmp ( bdaddr , & k - > bdaddr ) )
continue ;
2012-09-25 13:49:43 +04:00
BT_DBG ( " %s removing %pMR " , hdev - > name , bdaddr ) ;
2012-02-03 04:08:00 +04:00
list_del ( & k - > list ) ;
kfree ( k ) ;
}
return 0 ;
}
2011-02-16 17:32:41 +03:00
/* HCI command timer function */
2012-06-11 12:13:08 +04:00
static void hci_cmd_timeout ( unsigned long arg )
2011-02-16 17:32:41 +03:00
{
struct hci_dev * hdev = ( void * ) arg ;
2012-06-11 12:13:08 +04:00
if ( hdev - > sent_cmd ) {
struct hci_command_hdr * sent = ( void * ) hdev - > sent_cmd - > data ;
u16 opcode = __le16_to_cpu ( sent - > opcode ) ;
BT_ERR ( " %s command 0x%4.4x tx timeout " , hdev - > name , opcode ) ;
} else {
BT_ERR ( " %s command tx timeout " , hdev - > name ) ;
}
2011-02-16 17:32:41 +03:00
atomic_set ( & hdev - > cmd_cnt , 1 ) ;
2011-12-15 05:53:47 +04:00
queue_work ( hdev - > workqueue , & hdev - > cmd_work ) ;
2011-02-16 17:32:41 +03:00
}
2011-03-22 15:12:22 +03:00
struct oob_data * hci_find_remote_oob_data ( struct hci_dev * hdev ,
2012-03-08 08:25:00 +04:00
bdaddr_t * bdaddr )
2011-03-22 15:12:22 +03:00
{
struct oob_data * data ;
list_for_each_entry ( data , & hdev - > remote_oob_data , list )
if ( bacmp ( bdaddr , & data - > bdaddr ) = = 0 )
return data ;
return NULL ;
}
int hci_remove_remote_oob_data ( struct hci_dev * hdev , bdaddr_t * bdaddr )
{
struct oob_data * data ;
data = hci_find_remote_oob_data ( hdev , bdaddr ) ;
if ( ! data )
return - ENOENT ;
2012-09-25 13:49:43 +04:00
BT_DBG ( " %s removing %pMR " , hdev - > name , bdaddr ) ;
2011-03-22 15:12:22 +03:00
list_del ( & data - > list ) ;
kfree ( data ) ;
return 0 ;
}
int hci_remote_oob_data_clear ( struct hci_dev * hdev )
{
struct oob_data * data , * n ;
list_for_each_entry_safe ( data , n , & hdev - > remote_oob_data , list ) {
list_del ( & data - > list ) ;
kfree ( data ) ;
}
return 0 ;
}
int hci_add_remote_oob_data ( struct hci_dev * hdev , bdaddr_t * bdaddr , u8 * hash ,
2012-03-08 08:25:00 +04:00
u8 * randomizer )
2011-03-22 15:12:22 +03:00
{
struct oob_data * data ;
data = hci_find_remote_oob_data ( hdev , bdaddr ) ;
if ( ! data ) {
data = kmalloc ( sizeof ( * data ) , GFP_ATOMIC ) ;
if ( ! data )
return - ENOMEM ;
bacpy ( & data - > bdaddr , bdaddr ) ;
list_add ( & data - > list , & hdev - > remote_oob_data ) ;
}
memcpy ( data - > hash , hash , sizeof ( data - > hash ) ) ;
memcpy ( data - > randomizer , randomizer , sizeof ( data - > randomizer ) ) ;
2012-09-25 13:49:43 +04:00
BT_DBG ( " %s for %pMR " , hdev - > name , bdaddr ) ;
2011-03-22 15:12:22 +03:00
return 0 ;
}
2013-10-18 04:24:13 +04:00
struct bdaddr_list * hci_blacklist_lookup ( struct hci_dev * hdev ,
bdaddr_t * bdaddr , u8 type )
2011-06-15 13:01:14 +04:00
{
2011-11-01 12:58:56 +04:00
struct bdaddr_list * b ;
2011-06-15 13:01:14 +04:00
2013-10-18 04:24:13 +04:00
list_for_each_entry ( b , & hdev - > blacklist , list ) {
if ( ! bacmp ( & b - > bdaddr , bdaddr ) & & b - > bdaddr_type = = type )
2011-06-15 13:01:14 +04:00
return b ;
2013-10-18 04:24:13 +04:00
}
2011-06-15 13:01:14 +04:00
return NULL ;
}
int hci_blacklist_clear ( struct hci_dev * hdev )
{
struct list_head * p , * n ;
list_for_each_safe ( p , n , & hdev - > blacklist ) {
2013-10-18 04:24:13 +04:00
struct bdaddr_list * b = list_entry ( p , struct bdaddr_list , list ) ;
2011-06-15 13:01:14 +04:00
list_del ( p ) ;
kfree ( b ) ;
}
return 0 ;
}
2012-02-09 17:56:11 +04:00
int hci_blacklist_add ( struct hci_dev * hdev , bdaddr_t * bdaddr , u8 type )
2011-06-15 13:01:14 +04:00
{
struct bdaddr_list * entry ;
2013-10-18 04:24:13 +04:00
if ( ! bacmp ( bdaddr , BDADDR_ANY ) )
2011-06-15 13:01:14 +04:00
return - EBADF ;
2013-10-18 04:24:13 +04:00
if ( hci_blacklist_lookup ( hdev , bdaddr , type ) )
2011-08-25 17:48:02 +04:00
return - EEXIST ;
2011-06-15 13:01:14 +04:00
entry = kzalloc ( sizeof ( struct bdaddr_list ) , GFP_KERNEL ) ;
2011-08-25 17:48:02 +04:00
if ( ! entry )
return - ENOMEM ;
2011-06-15 13:01:14 +04:00
bacpy ( & entry - > bdaddr , bdaddr ) ;
2013-10-18 04:24:13 +04:00
entry - > bdaddr_type = type ;
2011-06-15 13:01:14 +04:00
list_add ( & entry - > list , & hdev - > blacklist ) ;
2012-02-09 17:56:11 +04:00
return mgmt_device_blocked ( hdev , bdaddr , type ) ;
2011-06-15 13:01:14 +04:00
}
2012-02-09 17:56:11 +04:00
int hci_blacklist_del ( struct hci_dev * hdev , bdaddr_t * bdaddr , u8 type )
2011-06-15 13:01:14 +04:00
{
struct bdaddr_list * entry ;
2013-10-18 04:24:13 +04:00
if ( ! bacmp ( bdaddr , BDADDR_ANY ) )
2011-08-25 17:48:02 +04:00
return hci_blacklist_clear ( hdev ) ;
2011-06-15 13:01:14 +04:00
2013-10-18 04:24:13 +04:00
entry = hci_blacklist_lookup ( hdev , bdaddr , type ) ;
2011-11-16 12:32:21 +04:00
if ( ! entry )
2011-08-25 17:48:02 +04:00
return - ENOENT ;
2011-06-15 13:01:14 +04:00
list_del ( & entry - > list ) ;
kfree ( entry ) ;
2012-02-09 17:56:11 +04:00
return mgmt_device_unblocked ( hdev , bdaddr , type ) ;
2011-06-15 13:01:14 +04:00
}
2013-04-30 22:29:32 +04:00
static void inquiry_complete ( struct hci_dev * hdev , u8 status )
2012-02-04 00:47:59 +04:00
{
2013-04-30 22:29:32 +04:00
if ( status ) {
BT_ERR ( " Failed to start inquiry: status %d " , status ) ;
2012-02-04 00:47:59 +04:00
2013-04-30 22:29:32 +04:00
hci_dev_lock ( hdev ) ;
hci_discovery_set_state ( hdev , DISCOVERY_STOPPED ) ;
hci_dev_unlock ( hdev ) ;
return ;
}
2012-02-04 00:47:59 +04:00
}
2013-04-30 22:29:32 +04:00
static void le_scan_disable_work_complete ( struct hci_dev * hdev , u8 status )
2012-02-04 00:47:59 +04:00
{
2013-04-30 22:29:32 +04:00
/* General inquiry access code (GIAC) */
u8 lap [ 3 ] = { 0x33 , 0x8b , 0x9e } ;
struct hci_request req ;
struct hci_cp_inquiry cp ;
2012-02-04 00:47:59 +04:00
int err ;
2013-04-30 22:29:32 +04:00
if ( status ) {
BT_ERR ( " Failed to disable LE scanning: status %d " , status ) ;
return ;
}
2012-02-04 00:47:59 +04:00
2013-04-30 22:29:32 +04:00
switch ( hdev - > discovery . type ) {
case DISCOV_TYPE_LE :
hci_dev_lock ( hdev ) ;
hci_discovery_set_state ( hdev , DISCOVERY_STOPPED ) ;
hci_dev_unlock ( hdev ) ;
break ;
2012-02-04 00:47:59 +04:00
2013-04-30 22:29:32 +04:00
case DISCOV_TYPE_INTERLEAVED :
hci_req_init ( & req , hdev ) ;
2012-02-04 00:47:59 +04:00
2013-04-30 22:29:32 +04:00
memset ( & cp , 0 , sizeof ( cp ) ) ;
memcpy ( & cp . lap , lap , sizeof ( cp . lap ) ) ;
cp . length = DISCOV_INTERLEAVED_INQUIRY_LEN ;
hci_req_add ( & req , HCI_OP_INQUIRY , sizeof ( cp ) , & cp ) ;
2012-02-04 00:47:59 +04:00
2013-04-30 22:29:32 +04:00
hci_dev_lock ( hdev ) ;
2012-03-15 23:52:07 +04:00
2013-04-30 22:29:32 +04:00
hci_inquiry_cache_flush ( hdev ) ;
2012-03-15 23:52:07 +04:00
2013-04-30 22:29:32 +04:00
err = hci_req_run ( & req , inquiry_complete ) ;
if ( err ) {
BT_ERR ( " Inquiry request failed: err %d " , err ) ;
hci_discovery_set_state ( hdev , DISCOVERY_STOPPED ) ;
}
2012-03-15 23:52:07 +04:00
2013-04-30 22:29:32 +04:00
hci_dev_unlock ( hdev ) ;
break ;
2012-03-15 23:52:07 +04:00
}
}
2012-02-04 00:47:59 +04:00
static void le_scan_disable_work ( struct work_struct * work )
{
struct hci_dev * hdev = container_of ( work , struct hci_dev ,
2012-03-08 08:25:00 +04:00
le_scan_disable . work ) ;
2012-02-04 00:47:59 +04:00
struct hci_cp_le_set_scan_enable cp ;
2013-04-30 22:29:32 +04:00
struct hci_request req ;
int err ;
2012-02-04 00:47:59 +04:00
BT_DBG ( " %s " , hdev - > name ) ;
2013-04-30 22:29:32 +04:00
hci_req_init ( & req , hdev ) ;
2012-02-04 00:48:00 +04:00
2012-02-04 00:47:59 +04:00
memset ( & cp , 0 , sizeof ( cp ) ) ;
2013-04-30 22:29:32 +04:00
cp . enable = LE_SCAN_DISABLE ;
hci_req_add ( & req , HCI_OP_LE_SET_SCAN_ENABLE , sizeof ( cp ) , & cp ) ;
2012-02-04 00:48:00 +04:00
2013-04-30 22:29:32 +04:00
err = hci_req_run ( & req , le_scan_disable_work_complete ) ;
if ( err )
BT_ERR ( " Disable LE scanning request failed: err %d " , err ) ;
2012-02-04 00:48:00 +04:00
}
2012-04-22 16:39:57 +04:00
/* Alloc HCI device */
struct hci_dev * hci_alloc_dev ( void )
{
struct hci_dev * hdev ;
hdev = kzalloc ( sizeof ( struct hci_dev ) , GFP_KERNEL ) ;
if ( ! hdev )
return NULL ;
2012-04-22 16:39:58 +04:00
hdev - > pkt_type = ( HCI_DM1 | HCI_DH1 | HCI_HV1 ) ;
hdev - > esco_type = ( ESCO_HV1 ) ;
hdev - > link_mode = ( HCI_LM_ACCEPT ) ;
2013-10-15 00:56:16 +04:00
hdev - > num_iac = 0x01 ; /* One IAC support is mandatory */
hdev - > io_capability = 0x03 ; /* No Input No Output */
2012-11-08 04:22:59 +04:00
hdev - > inq_tx_power = HCI_TX_POWER_INVALID ;
hdev - > adv_tx_power = HCI_TX_POWER_INVALID ;
2012-04-22 16:39:58 +04:00
hdev - > sniff_max_interval = 800 ;
hdev - > sniff_min_interval = 80 ;
2013-10-11 19:23:19 +04:00
hdev - > le_scan_interval = 0x0060 ;
hdev - > le_scan_window = 0x0030 ;
2013-10-19 18:09:13 +04:00
hdev - > le_conn_min_interval = 0x0028 ;
hdev - > le_conn_max_interval = 0x0038 ;
2013-10-11 19:23:19 +04:00
2012-04-22 16:39:58 +04:00
mutex_init ( & hdev - > lock ) ;
mutex_init ( & hdev - > req_lock ) ;
INIT_LIST_HEAD ( & hdev - > mgmt_pending ) ;
INIT_LIST_HEAD ( & hdev - > blacklist ) ;
INIT_LIST_HEAD ( & hdev - > uuids ) ;
INIT_LIST_HEAD ( & hdev - > link_keys ) ;
INIT_LIST_HEAD ( & hdev - > long_term_keys ) ;
INIT_LIST_HEAD ( & hdev - > remote_oob_data ) ;
2012-08-31 17:39:28 +04:00
INIT_LIST_HEAD ( & hdev - > conn_hash . list ) ;
2012-04-22 16:39:58 +04:00
INIT_WORK ( & hdev - > rx_work , hci_rx_work ) ;
INIT_WORK ( & hdev - > cmd_work , hci_cmd_work ) ;
INIT_WORK ( & hdev - > tx_work , hci_tx_work ) ;
INIT_WORK ( & hdev - > power_on , hci_power_on ) ;
INIT_DELAYED_WORK ( & hdev - > power_off , hci_power_off ) ;
INIT_DELAYED_WORK ( & hdev - > discov_off , hci_discov_off ) ;
INIT_DELAYED_WORK ( & hdev - > le_scan_disable , le_scan_disable_work ) ;
skb_queue_head_init ( & hdev - > rx_q ) ;
skb_queue_head_init ( & hdev - > cmd_q ) ;
skb_queue_head_init ( & hdev - > raw_q ) ;
init_waitqueue_head ( & hdev - > req_wait_q ) ;
2012-06-11 12:13:08 +04:00
setup_timer ( & hdev - > cmd_timer , hci_cmd_timeout , ( unsigned long ) hdev ) ;
2012-04-22 16:39:58 +04:00
hci_init_sysfs ( hdev ) ;
discovery_init ( hdev ) ;
2012-04-22 16:39:57 +04:00
return hdev ;
}
EXPORT_SYMBOL ( hci_alloc_dev ) ;
/* Free HCI device */
void hci_free_dev ( struct hci_dev * hdev )
{
/* will free via device release */
put_device ( & hdev - > dev ) ;
}
EXPORT_SYMBOL ( hci_free_dev ) ;
2005-04-17 02:20:36 +04:00
/* Register HCI device */
int hci_register_dev ( struct hci_dev * hdev )
{
2012-04-22 16:39:58 +04:00
int id , error ;
2005-04-17 02:20:36 +04:00
2012-01-07 18:47:07 +04:00
if ( ! hdev - > open | | ! hdev - > close )
2005-04-17 02:20:36 +04:00
return - EINVAL ;
2011-11-03 03:18:36 +04:00
/* Do not allow HCI_AMP devices to register at index 0,
* so the index can be used as the AMP controller ID .
*/
2012-05-28 00:36:56 +04:00
switch ( hdev - > dev_type ) {
case HCI_BREDR :
id = ida_simple_get ( & hci_index_ida , 0 , 0 , GFP_KERNEL ) ;
break ;
case HCI_AMP :
id = ida_simple_get ( & hci_index_ida , 1 , 0 , GFP_KERNEL ) ;
break ;
default :
return - EINVAL ;
2005-04-17 02:20:36 +04:00
}
2007-02-09 17:24:33 +03:00
2012-05-28 00:36:56 +04:00
if ( id < 0 )
return id ;
2005-04-17 02:20:36 +04:00
sprintf ( hdev - > name , " hci%d " , id ) ;
hdev - > id = id ;
2012-04-16 17:32:04 +04:00
BT_DBG ( " %p name %s bus %d " , hdev , hdev - > name , hdev - > bus ) ;
2013-07-04 02:04:57 +04:00
hdev - > workqueue = alloc_workqueue ( " %s " , WQ_HIGHPRI | WQ_UNBOUND |
WQ_MEM_RECLAIM , 1 , hdev - > name ) ;
2011-10-08 16:58:49 +04:00
if ( ! hdev - > workqueue ) {
error = - ENOMEM ;
goto err ;
}
2010-03-20 17:20:04 +03:00
2013-07-04 02:04:57 +04:00
hdev - > req_workqueue = alloc_workqueue ( " %s " , WQ_HIGHPRI | WQ_UNBOUND |
WQ_MEM_RECLAIM , 1 , hdev - > name ) ;
2013-01-15 00:33:50 +04:00
if ( ! hdev - > req_workqueue ) {
destroy_workqueue ( hdev - > workqueue ) ;
error = - ENOMEM ;
goto err ;
}
2013-10-18 04:24:17 +04:00
if ( ! IS_ERR_OR_NULL ( bt_debugfs ) )
hdev - > debugfs = debugfs_create_dir ( hdev - > name , bt_debugfs ) ;
2013-10-18 04:24:19 +04:00
dev_set_name ( & hdev - > dev , " %s " , hdev - > name ) ;
error = device_add ( & hdev - > dev ) ;
2011-10-08 16:58:49 +04:00
if ( error < 0 )
goto err_wqueue ;
2005-04-17 02:20:36 +04:00
2009-06-08 16:41:38 +04:00
hdev - > rfkill = rfkill_alloc ( hdev - > name , & hdev - > dev ,
2012-05-17 07:36:26 +04:00
RFKILL_TYPE_BLUETOOTH , & hci_rfkill_ops ,
hdev ) ;
2009-06-08 16:41:38 +04:00
if ( hdev - > rfkill ) {
if ( rfkill_register ( hdev - > rfkill ) < 0 ) {
rfkill_destroy ( hdev - > rfkill ) ;
hdev - > rfkill = NULL ;
}
}
2013-09-13 09:58:17 +04:00
if ( hdev - > rfkill & & rfkill_blocked ( hdev - > rfkill ) )
set_bit ( HCI_RFKILLED , & hdev - > dev_flags ) ;
2012-01-09 01:11:15 +04:00
set_bit ( HCI_SETUP , & hdev - > dev_flags ) ;
2013-10-07 11:58:32 +04:00
set_bit ( HCI_AUTO_OFF , & hdev - > dev_flags ) ;
2012-06-29 16:07:00 +04:00
2013-10-06 12:16:22 +04:00
if ( hdev - > dev_type = = HCI_BREDR ) {
2013-10-02 14:43:13 +04:00
/* Assume BR/EDR support until proven otherwise (such as
* through reading supported features during init .
*/
set_bit ( HCI_BREDR_ENABLED , & hdev - > dev_flags ) ;
}
2012-06-29 16:07:00 +04:00
2013-07-11 14:34:28 +04:00
write_lock ( & hci_dev_list_lock ) ;
list_add ( & hdev - > list , & hci_dev_list ) ;
write_unlock ( & hci_dev_list_lock ) ;
2005-04-17 02:20:36 +04:00
hci_notify ( hdev , HCI_DEV_REG ) ;
2012-01-07 18:47:24 +04:00
hci_dev_hold ( hdev ) ;
2005-04-17 02:20:36 +04:00
2013-01-15 00:33:51 +04:00
queue_work ( hdev - > req_workqueue , & hdev - > power_on ) ;
2012-10-30 12:35:40 +04:00
2005-04-17 02:20:36 +04:00
return id ;
2010-03-20 17:20:04 +03:00
2011-10-08 16:58:49 +04:00
err_wqueue :
destroy_workqueue ( hdev - > workqueue ) ;
2013-01-15 00:33:50 +04:00
destroy_workqueue ( hdev - > req_workqueue ) ;
2011-10-08 16:58:49 +04:00
err :
2012-05-28 00:36:56 +04:00
ida_simple_remove ( & hci_index_ida , hdev - > id ) ;
2010-03-20 17:20:04 +03:00
2011-10-08 16:58:49 +04:00
return error ;
2005-04-17 02:20:36 +04:00
}
EXPORT_SYMBOL ( hci_register_dev ) ;
/* Unregister HCI device */
2011-10-26 12:43:19 +04:00
void hci_unregister_dev ( struct hci_dev * hdev )
2005-04-17 02:20:36 +04:00
{
2012-05-28 00:36:56 +04:00
int i , id ;
2007-07-11 08:42:04 +04:00
2010-02-08 17:27:07 +03:00
BT_DBG ( " %p name %s bus %d " , hdev , hdev - > name , hdev - > bus ) ;
2005-04-17 02:20:36 +04:00
2012-03-15 17:48:41 +04:00
set_bit ( HCI_UNREGISTER , & hdev - > dev_flags ) ;
2012-05-28 00:36:56 +04:00
id = hdev - > id ;
2011-12-22 22:30:27 +04:00
write_lock ( & hci_dev_list_lock ) ;
2005-04-17 02:20:36 +04:00
list_del ( & hdev - > list ) ;
2011-12-22 22:30:27 +04:00
write_unlock ( & hci_dev_list_lock ) ;
2005-04-17 02:20:36 +04:00
hci_dev_do_close ( hdev ) ;
2010-07-14 11:32:16 +04:00
for ( i = 0 ; i < NUM_REASSEMBLY ; i + + )
2007-07-11 08:42:04 +04:00
kfree_skb ( hdev - > reassembly [ i ] ) ;
2012-11-21 06:50:21 +04:00
cancel_work_sync ( & hdev - > power_on ) ;
2010-12-15 14:53:18 +03:00
if ( ! test_bit ( HCI_INIT , & hdev - > flags ) & &
2012-05-17 07:36:26 +04:00
! test_bit ( HCI_SETUP , & hdev - > dev_flags ) ) {
2011-06-17 20:03:21 +04:00
hci_dev_lock ( hdev ) ;
2011-11-08 22:40:14 +04:00
mgmt_index_removed ( hdev ) ;
2011-06-17 20:03:21 +04:00
hci_dev_unlock ( hdev ) ;
2011-11-08 22:40:16 +04:00
}
2010-12-15 14:53:18 +03:00
2011-11-08 22:40:15 +04:00
/* mgmt_index_removed should take care of emptying the
* pending list */
BUG_ON ( ! list_empty ( & hdev - > mgmt_pending ) ) ;
2005-04-17 02:20:36 +04:00
hci_notify ( hdev , HCI_DEV_UNREG ) ;
2009-06-08 16:41:38 +04:00
if ( hdev - > rfkill ) {
rfkill_unregister ( hdev - > rfkill ) ;
rfkill_destroy ( hdev - > rfkill ) ;
}
2013-10-18 04:24:19 +04:00
device_del ( & hdev - > dev ) ;
2008-03-06 05:45:59 +03:00
2013-10-18 04:24:17 +04:00
debugfs_remove_recursive ( hdev - > debugfs ) ;
2010-03-20 17:20:04 +03:00
destroy_workqueue ( hdev - > workqueue ) ;
2013-01-15 00:33:50 +04:00
destroy_workqueue ( hdev - > req_workqueue ) ;
2010-03-20 17:20:04 +03:00
2011-06-17 20:03:21 +04:00
hci_dev_lock ( hdev ) ;
2011-01-04 13:08:50 +03:00
hci_blacklist_clear ( hdev ) ;
2011-01-04 13:08:51 +03:00
hci_uuids_clear ( hdev ) ;
2011-01-17 15:41:05 +03:00
hci_link_keys_clear ( hdev ) ;
2012-02-03 04:08:00 +04:00
hci_smp_ltks_clear ( hdev ) ;
2011-03-22 15:12:22 +03:00
hci_remote_oob_data_clear ( hdev ) ;
2011-06-17 20:03:21 +04:00
hci_dev_unlock ( hdev ) ;
2011-01-04 13:08:50 +03:00
2012-01-07 18:47:24 +04:00
hci_dev_put ( hdev ) ;
2012-05-28 00:36:56 +04:00
ida_simple_remove ( & hci_index_ida , id ) ;
2005-04-17 02:20:36 +04:00
}
EXPORT_SYMBOL ( hci_unregister_dev ) ;
/* Suspend HCI device */
int hci_suspend_dev ( struct hci_dev * hdev )
{
hci_notify ( hdev , HCI_DEV_SUSPEND ) ;
return 0 ;
}
EXPORT_SYMBOL ( hci_suspend_dev ) ;
/* Resume HCI device */
int hci_resume_dev ( struct hci_dev * hdev )
{
hci_notify ( hdev , HCI_DEV_RESUME ) ;
return 0 ;
}
EXPORT_SYMBOL ( hci_resume_dev ) ;
2009-11-18 02:40:39 +03:00
/* Receive frame from HCI drivers */
2013-10-11 03:52:43 +04:00
int hci_recv_frame ( struct hci_dev * hdev , struct sk_buff * skb )
2009-11-18 02:40:39 +03:00
{
if ( ! hdev | | ( ! test_bit ( HCI_UP , & hdev - > flags )
2012-05-17 07:36:26 +04:00
& & ! test_bit ( HCI_INIT , & hdev - > flags ) ) ) {
2009-11-18 02:40:39 +03:00
kfree_skb ( skb ) ;
return - ENXIO ;
}
2012-12-27 20:33:02 +04:00
/* Incoming skb */
2009-11-18 02:40:39 +03:00
bt_cb ( skb ) - > incoming = 1 ;
/* Time stamp */
__net_timestamp ( skb ) ;
skb_queue_tail ( & hdev - > rx_q , skb ) ;
2010-08-09 07:06:53 +04:00
queue_work ( hdev - > workqueue , & hdev - > rx_work ) ;
2009-11-18 03:02:54 +03:00
2009-11-18 02:40:39 +03:00
return 0 ;
}
EXPORT_SYMBOL ( hci_recv_frame ) ;
2010-07-14 11:32:17 +04:00
static int hci_reassembly ( struct hci_dev * hdev , int type , void * data ,
2012-05-17 07:36:26 +04:00
int count , __u8 index )
2010-07-14 11:32:17 +04:00
{
int len = 0 ;
int hlen = 0 ;
int remain = count ;
struct sk_buff * skb ;
struct bt_skb_cb * scb ;
if ( ( type < HCI_ACLDATA_PKT | | type > HCI_EVENT_PKT ) | |
2012-05-17 07:36:26 +04:00
index > = NUM_REASSEMBLY )
2010-07-14 11:32:17 +04:00
return - EILSEQ ;
skb = hdev - > reassembly [ index ] ;
if ( ! skb ) {
switch ( type ) {
case HCI_ACLDATA_PKT :
len = HCI_MAX_FRAME_SIZE ;
hlen = HCI_ACL_HDR_SIZE ;
break ;
case HCI_EVENT_PKT :
len = HCI_MAX_EVENT_SIZE ;
hlen = HCI_EVENT_HDR_SIZE ;
break ;
case HCI_SCODATA_PKT :
len = HCI_MAX_SCO_SIZE ;
hlen = HCI_SCO_HDR_SIZE ;
break ;
}
2011-04-05 01:25:14 +04:00
skb = bt_skb_alloc ( len , GFP_ATOMIC ) ;
2010-07-14 11:32:17 +04:00
if ( ! skb )
return - ENOMEM ;
scb = ( void * ) skb - > cb ;
scb - > expect = hlen ;
scb - > pkt_type = type ;
hdev - > reassembly [ index ] = skb ;
}
while ( count ) {
scb = ( void * ) skb - > cb ;
2012-02-28 10:57:59 +04:00
len = min_t ( uint , scb - > expect , count ) ;
2010-07-14 11:32:17 +04:00
memcpy ( skb_put ( skb , len ) , data , len ) ;
count - = len ;
data + = len ;
scb - > expect - = len ;
remain = count ;
switch ( type ) {
case HCI_EVENT_PKT :
if ( skb - > len = = HCI_EVENT_HDR_SIZE ) {
struct hci_event_hdr * h = hci_event_hdr ( skb ) ;
scb - > expect = h - > plen ;
if ( skb_tailroom ( skb ) < scb - > expect ) {
kfree_skb ( skb ) ;
hdev - > reassembly [ index ] = NULL ;
return - ENOMEM ;
}
}
break ;
case HCI_ACLDATA_PKT :
if ( skb - > len = = HCI_ACL_HDR_SIZE ) {
struct hci_acl_hdr * h = hci_acl_hdr ( skb ) ;
scb - > expect = __le16_to_cpu ( h - > dlen ) ;
if ( skb_tailroom ( skb ) < scb - > expect ) {
kfree_skb ( skb ) ;
hdev - > reassembly [ index ] = NULL ;
return - ENOMEM ;
}
}
break ;
case HCI_SCODATA_PKT :
if ( skb - > len = = HCI_SCO_HDR_SIZE ) {
struct hci_sco_hdr * h = hci_sco_hdr ( skb ) ;
scb - > expect = h - > dlen ;
if ( skb_tailroom ( skb ) < scb - > expect ) {
kfree_skb ( skb ) ;
hdev - > reassembly [ index ] = NULL ;
return - ENOMEM ;
}
}
break ;
}
if ( scb - > expect = = 0 ) {
/* Complete frame */
bt_cb ( skb ) - > pkt_type = type ;
2013-10-11 03:52:43 +04:00
hci_recv_frame ( hdev , skb ) ;
2010-07-14 11:32:17 +04:00
hdev - > reassembly [ index ] = NULL ;
return remain ;
}
}
return remain ;
}
2007-07-11 08:42:04 +04:00
int hci_recv_fragment ( struct hci_dev * hdev , int type , void * data , int count )
{
2010-07-14 11:32:18 +04:00
int rem = 0 ;
2007-07-11 08:42:04 +04:00
if ( type < HCI_ACLDATA_PKT | | type > HCI_EVENT_PKT )
return - EILSEQ ;
2010-07-24 08:34:54 +04:00
while ( count ) {
2011-04-05 01:25:14 +04:00
rem = hci_reassembly ( hdev , type , data , count , type - 1 ) ;
2010-07-14 11:32:18 +04:00
if ( rem < 0 )
return rem ;
2007-07-11 08:42:04 +04:00
2010-07-14 11:32:18 +04:00
data + = ( count - rem ) ;
count = rem ;
2011-06-03 15:51:19 +04:00
}
2007-07-11 08:42:04 +04:00
2010-07-14 11:32:18 +04:00
return rem ;
2007-07-11 08:42:04 +04:00
}
EXPORT_SYMBOL ( hci_recv_fragment ) ;
2010-07-14 11:32:19 +04:00
# define STREAM_REASSEMBLY 0
int hci_recv_stream_fragment ( struct hci_dev * hdev , void * data , int count )
{
int type ;
int rem = 0 ;
2010-07-24 08:34:54 +04:00
while ( count ) {
2010-07-14 11:32:19 +04:00
struct sk_buff * skb = hdev - > reassembly [ STREAM_REASSEMBLY ] ;
if ( ! skb ) {
struct { char type ; } * pkt ;
/* Start of the frame */
pkt = data ;
type = pkt - > type ;
data + + ;
count - - ;
} else
type = bt_cb ( skb ) - > pkt_type ;
2011-04-05 01:25:14 +04:00
rem = hci_reassembly ( hdev , type , data , count ,
2012-05-17 07:36:26 +04:00
STREAM_REASSEMBLY ) ;
2010-07-14 11:32:19 +04:00
if ( rem < 0 )
return rem ;
data + = ( count - rem ) ;
count = rem ;
2011-06-03 15:51:19 +04:00
}
2010-07-14 11:32:19 +04:00
return rem ;
}
EXPORT_SYMBOL ( hci_recv_stream_fragment ) ;
2005-04-17 02:20:36 +04:00
/* ---- Interface to upper protocols ---- */
int hci_register_cb ( struct hci_cb * cb )
{
BT_DBG ( " %p name %s " , cb , cb - > name ) ;
2011-12-22 22:30:27 +04:00
write_lock ( & hci_cb_list_lock ) ;
2005-04-17 02:20:36 +04:00
list_add ( & cb - > list , & hci_cb_list ) ;
2011-12-22 22:30:27 +04:00
write_unlock ( & hci_cb_list_lock ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
EXPORT_SYMBOL ( hci_register_cb ) ;
int hci_unregister_cb ( struct hci_cb * cb )
{
BT_DBG ( " %p name %s " , cb , cb - > name ) ;
2011-12-22 22:30:27 +04:00
write_lock ( & hci_cb_list_lock ) ;
2005-04-17 02:20:36 +04:00
list_del ( & cb - > list ) ;
2011-12-22 22:30:27 +04:00
write_unlock ( & hci_cb_list_lock ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
EXPORT_SYMBOL ( hci_unregister_cb ) ;
2013-10-11 01:54:19 +04:00
static void hci_send_frame ( struct hci_dev * hdev , struct sk_buff * skb )
2005-04-17 02:20:36 +04:00
{
2005-08-10 07:30:28 +04:00
BT_DBG ( " %s type %d len %d " , hdev - > name , bt_cb ( skb ) - > pkt_type , skb - > len ) ;
2005-04-17 02:20:36 +04:00
2012-02-20 23:34:38 +04:00
/* Time stamp */
__net_timestamp ( skb ) ;
2005-04-17 02:20:36 +04:00
2012-02-20 23:34:38 +04:00
/* Send copy to monitor */
hci_send_to_monitor ( hdev , skb ) ;
if ( atomic_read ( & hdev - > promisc ) ) {
/* Send copy to the sockets */
2012-02-20 17:50:30 +04:00
hci_send_to_sock ( hdev , skb ) ;
2005-04-17 02:20:36 +04:00
}
/* Get rid of skb owner, prior to sending to the driver. */
skb_orphan ( skb ) ;
2013-10-11 17:19:18 +04:00
if ( hdev - > send ( hdev , skb ) < 0 )
2013-10-11 01:54:19 +04:00
BT_ERR ( " %s sending frame failed " , hdev - > name ) ;
2005-04-17 02:20:36 +04:00
}
2013-03-05 22:37:44 +04:00
void hci_req_init ( struct hci_request * req , struct hci_dev * hdev )
{
skb_queue_head_init ( & req - > cmd_q ) ;
req - > hdev = hdev ;
2013-03-08 18:20:16 +04:00
req - > err = 0 ;
2013-03-05 22:37:44 +04:00
}
int hci_req_run ( struct hci_request * req , hci_req_complete_t complete )
{
struct hci_dev * hdev = req - > hdev ;
struct sk_buff * skb ;
unsigned long flags ;
BT_DBG ( " length %u " , skb_queue_len ( & req - > cmd_q ) ) ;
2013-03-08 18:20:16 +04:00
/* If an error occured during request building, remove all HCI
* commands queued on the HCI request queue .
*/
if ( req - > err ) {
skb_queue_purge ( & req - > cmd_q ) ;
return req - > err ;
}
2013-03-05 22:37:44 +04:00
/* Do not allow empty requests */
if ( skb_queue_empty ( & req - > cmd_q ) )
2013-03-08 18:20:14 +04:00
return - ENODATA ;
2013-03-05 22:37:44 +04:00
skb = skb_peek_tail ( & req - > cmd_q ) ;
bt_cb ( skb ) - > req . complete = complete ;
spin_lock_irqsave ( & hdev - > cmd_q . lock , flags ) ;
skb_queue_splice_tail ( & req - > cmd_q , & hdev - > cmd_q ) ;
spin_unlock_irqrestore ( & hdev - > cmd_q . lock , flags ) ;
queue_work ( hdev - > workqueue , & hdev - > cmd_work ) ;
return 0 ;
}
2013-03-05 22:37:45 +04:00
static struct sk_buff * hci_prepare_cmd ( struct hci_dev * hdev , u16 opcode ,
2013-04-19 11:14:51 +04:00
u32 plen , const void * param )
2005-04-17 02:20:36 +04:00
{
int len = HCI_COMMAND_HDR_SIZE + plen ;
struct hci_command_hdr * hdr ;
struct sk_buff * skb ;
skb = bt_skb_alloc ( len , GFP_ATOMIC ) ;
2013-03-05 22:37:45 +04:00
if ( ! skb )
return NULL ;
2005-04-17 02:20:36 +04:00
hdr = ( struct hci_command_hdr * ) skb_put ( skb , HCI_COMMAND_HDR_SIZE ) ;
2007-10-20 15:33:56 +04:00
hdr - > opcode = cpu_to_le16 ( opcode ) ;
2005-04-17 02:20:36 +04:00
hdr - > plen = plen ;
if ( plen )
memcpy ( skb_put ( skb , plen ) , param , plen ) ;
BT_DBG ( " skb len %d " , skb - > len ) ;
2005-08-10 07:30:28 +04:00
bt_cb ( skb ) - > pkt_type = HCI_COMMAND_PKT ;
2009-11-18 03:02:54 +03:00
2013-03-05 22:37:45 +04:00
return skb ;
}
/* Send HCI command */
2013-04-19 11:14:51 +04:00
int hci_send_cmd ( struct hci_dev * hdev , __u16 opcode , __u32 plen ,
const void * param )
2013-03-05 22:37:45 +04:00
{
struct sk_buff * skb ;
BT_DBG ( " %s opcode 0x%4.4x plen %d " , hdev - > name , opcode , plen ) ;
skb = hci_prepare_cmd ( hdev , opcode , plen , param ) ;
if ( ! skb ) {
BT_ERR ( " %s no memory for command " , hdev - > name ) ;
return - ENOMEM ;
}
2013-03-05 22:37:47 +04:00
/* Stand-alone HCI commands must be flaged as
* single - command requests .
*/
bt_cb ( skb ) - > req . start = true ;
2005-04-17 02:20:36 +04:00
skb_queue_tail ( & hdev - > cmd_q , skb ) ;
2011-12-15 05:53:47 +04:00
queue_work ( hdev - > workqueue , & hdev - > cmd_work ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2013-03-05 22:37:46 +04:00
/* Queue a command to an asynchronous HCI request */
2013-04-19 11:14:51 +04:00
void hci_req_add_ev ( struct hci_request * req , u16 opcode , u32 plen ,
const void * param , u8 event )
2013-03-05 22:37:46 +04:00
{
struct hci_dev * hdev = req - > hdev ;
struct sk_buff * skb ;
BT_DBG ( " %s opcode 0x%4.4x plen %d " , hdev - > name , opcode , plen ) ;
2013-03-08 18:20:18 +04:00
/* If an error occured during request building, there is no point in
* queueing the HCI command . We can simply return .
*/
if ( req - > err )
return ;
2013-03-05 22:37:46 +04:00
skb = hci_prepare_cmd ( hdev , opcode , plen , param ) ;
if ( ! skb ) {
2013-03-08 18:20:16 +04:00
BT_ERR ( " %s no memory for command (opcode 0x%4.4x) " ,
hdev - > name , opcode ) ;
req - > err = - ENOMEM ;
2013-03-08 18:20:17 +04:00
return ;
2013-03-05 22:37:46 +04:00
}
if ( skb_queue_empty ( & req - > cmd_q ) )
bt_cb ( skb ) - > req . start = true ;
2013-04-03 22:50:29 +04:00
bt_cb ( skb ) - > req . event = event ;
2013-03-05 22:37:46 +04:00
skb_queue_tail ( & req - > cmd_q , skb ) ;
}
2013-04-19 11:14:51 +04:00
void hci_req_add ( struct hci_request * req , u16 opcode , u32 plen ,
const void * param )
2013-04-03 22:50:29 +04:00
{
hci_req_add_ev ( req , opcode , plen , param , 0 ) ;
}
2005-04-17 02:20:36 +04:00
/* Get data from the previously sent command */
2007-10-20 15:33:56 +04:00
void * hci_sent_cmd_data ( struct hci_dev * hdev , __u16 opcode )
2005-04-17 02:20:36 +04:00
{
struct hci_command_hdr * hdr ;
if ( ! hdev - > sent_cmd )
return NULL ;
hdr = ( void * ) hdev - > sent_cmd - > data ;
2007-10-20 15:33:56 +04:00
if ( hdr - > opcode ! = cpu_to_le16 ( opcode ) )
2005-04-17 02:20:36 +04:00
return NULL ;
2012-06-11 12:13:09 +04:00
BT_DBG ( " %s opcode 0x%4.4x " , hdev - > name , opcode ) ;
2005-04-17 02:20:36 +04:00
return hdev - > sent_cmd - > data + HCI_COMMAND_HDR_SIZE ;
}
/* Send ACL data */
static void hci_add_acl_hdr ( struct sk_buff * skb , __u16 handle , __u16 flags )
{
struct hci_acl_hdr * hdr ;
int len = skb - > len ;
2007-03-13 19:06:52 +03:00
skb_push ( skb , HCI_ACL_HDR_SIZE ) ;
skb_reset_transport_header ( skb ) ;
2007-04-26 05:04:18 +04:00
hdr = ( struct hci_acl_hdr * ) skb_transport_header ( skb ) ;
2007-03-26 07:12:50 +04:00
hdr - > handle = cpu_to_le16 ( hci_handle_pack ( handle , flags ) ) ;
hdr - > dlen = cpu_to_le16 ( len ) ;
2005-04-17 02:20:36 +04:00
}
2012-09-21 13:30:04 +04:00
static void hci_queue_acl ( struct hci_chan * chan , struct sk_buff_head * queue ,
2012-05-17 07:36:26 +04:00
struct sk_buff * skb , __u16 flags )
2005-04-17 02:20:36 +04:00
{
2012-09-21 13:30:04 +04:00
struct hci_conn * conn = chan - > conn ;
2005-04-17 02:20:36 +04:00
struct hci_dev * hdev = conn - > hdev ;
struct sk_buff * list ;
2012-05-11 20:16:11 +04:00
skb - > len = skb_headlen ( skb ) ;
skb - > data_len = 0 ;
bt_cb ( skb ) - > pkt_type = HCI_ACLDATA_PKT ;
2012-10-15 12:58:39 +04:00
switch ( hdev - > dev_type ) {
case HCI_BREDR :
hci_add_acl_hdr ( skb , conn - > handle , flags ) ;
break ;
case HCI_AMP :
hci_add_acl_hdr ( skb , chan - > handle , flags ) ;
break ;
default :
BT_ERR ( " %s unknown dev_type %d " , hdev - > name , hdev - > dev_type ) ;
return ;
}
2012-05-11 20:16:11 +04:00
2010-12-01 17:58:25 +03:00
list = skb_shinfo ( skb ) - > frag_list ;
if ( ! list ) {
2005-04-17 02:20:36 +04:00
/* Non fragmented */
BT_DBG ( " %s nonfrag skb %p len %d " , hdev - > name , skb , skb - > len ) ;
2011-11-02 17:52:01 +04:00
skb_queue_tail ( queue , skb ) ;
2005-04-17 02:20:36 +04:00
} else {
/* Fragmented */
BT_DBG ( " %s frag %p len %d " , hdev - > name , skb , skb - > len ) ;
skb_shinfo ( skb ) - > frag_list = NULL ;
/* Queue all fragments atomically */
2011-12-22 22:35:05 +04:00
spin_lock ( & queue - > lock ) ;
2005-04-17 02:20:36 +04:00
2011-11-02 17:52:01 +04:00
__skb_queue_tail ( queue , skb ) ;
2011-01-03 12:14:36 +03:00
flags & = ~ ACL_START ;
flags | = ACL_CONT ;
2005-04-17 02:20:36 +04:00
do {
skb = list ; list = list - > next ;
2007-02-09 17:24:33 +03:00
2005-08-10 07:30:28 +04:00
bt_cb ( skb ) - > pkt_type = HCI_ACLDATA_PKT ;
2011-01-03 12:14:36 +03:00
hci_add_acl_hdr ( skb , conn - > handle , flags ) ;
2005-04-17 02:20:36 +04:00
BT_DBG ( " %s frag %p len %d " , hdev - > name , skb , skb - > len ) ;
2011-11-02 17:52:01 +04:00
__skb_queue_tail ( queue , skb ) ;
2005-04-17 02:20:36 +04:00
} while ( list ) ;
2011-12-22 22:35:05 +04:00
spin_unlock ( & queue - > lock ) ;
2005-04-17 02:20:36 +04:00
}
2011-11-02 17:52:01 +04:00
}
void hci_send_acl ( struct hci_chan * chan , struct sk_buff * skb , __u16 flags )
{
2012-09-21 13:30:04 +04:00
struct hci_dev * hdev = chan - > conn - > hdev ;
2011-11-02 17:52:01 +04:00
2012-06-11 12:13:09 +04:00
BT_DBG ( " %s chan %p flags 0x%4.4x " , hdev - > name , chan , flags ) ;
2011-11-02 17:52:01 +04:00
2012-09-21 13:30:04 +04:00
hci_queue_acl ( chan , & chan - > data_q , skb , flags ) ;
2005-04-17 02:20:36 +04:00
2011-12-15 06:50:02 +04:00
queue_work ( hdev - > workqueue , & hdev - > tx_work ) ;
2005-04-17 02:20:36 +04:00
}
/* Send SCO data */
2010-05-01 23:15:35 +04:00
void hci_send_sco ( struct hci_conn * conn , struct sk_buff * skb )
2005-04-17 02:20:36 +04:00
{
struct hci_dev * hdev = conn - > hdev ;
struct hci_sco_hdr hdr ;
BT_DBG ( " %s len %d " , hdev - > name , skb - > len ) ;
2007-03-26 07:12:50 +04:00
hdr . handle = cpu_to_le16 ( conn - > handle ) ;
2005-04-17 02:20:36 +04:00
hdr . dlen = skb - > len ;
2007-03-13 19:06:52 +03:00
skb_push ( skb , HCI_SCO_HDR_SIZE ) ;
skb_reset_transport_header ( skb ) ;
2007-04-26 05:04:18 +04:00
memcpy ( skb_transport_header ( skb ) , & hdr , HCI_SCO_HDR_SIZE ) ;
2005-04-17 02:20:36 +04:00
2005-08-10 07:30:28 +04:00
bt_cb ( skb ) - > pkt_type = HCI_SCODATA_PKT ;
2009-11-18 03:02:54 +03:00
2005-04-17 02:20:36 +04:00
skb_queue_tail ( & conn - > data_q , skb ) ;
2011-12-15 06:50:02 +04:00
queue_work ( hdev - > workqueue , & hdev - > tx_work ) ;
2005-04-17 02:20:36 +04:00
}
/* ---- HCI TX task (outgoing data) ---- */
/* HCI Connection scheduler */
2012-05-23 11:04:18 +04:00
static struct hci_conn * hci_low_sent ( struct hci_dev * hdev , __u8 type ,
int * quote )
2005-04-17 02:20:36 +04:00
{
struct hci_conn_hash * h = & hdev - > conn_hash ;
2011-11-01 12:58:56 +04:00
struct hci_conn * conn = NULL , * c ;
2012-04-11 10:48:47 +04:00
unsigned int num = 0 , min = ~ 0 ;
2005-04-17 02:20:36 +04:00
2007-02-09 17:24:33 +03:00
/* We don't have to lock device here. Connections are always
2005-04-17 02:20:36 +04:00
* added and removed with TX task disabled . */
2011-12-15 04:54:12 +04:00
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( c , & h - > list , list ) {
2008-07-14 22:13:49 +04:00
if ( c - > type ! = type | | skb_queue_empty ( & c - > data_q ) )
2005-04-17 02:20:36 +04:00
continue ;
2008-07-14 22:13:49 +04:00
if ( c - > state ! = BT_CONNECTED & & c - > state ! = BT_CONFIG )
continue ;
2005-04-17 02:20:36 +04:00
num + + ;
if ( c - > sent < min ) {
min = c - > sent ;
conn = c ;
}
2011-08-17 17:23:00 +04:00
if ( hci_conn_num ( hdev , type ) = = num )
break ;
2005-04-17 02:20:36 +04:00
}
2011-12-15 04:54:12 +04:00
rcu_read_unlock ( ) ;
2005-04-17 02:20:36 +04:00
if ( conn ) {
2011-02-11 04:38:48 +03:00
int cnt , q ;
switch ( conn - > type ) {
case ACL_LINK :
cnt = hdev - > acl_cnt ;
break ;
case SCO_LINK :
case ESCO_LINK :
cnt = hdev - > sco_cnt ;
break ;
case LE_LINK :
cnt = hdev - > le_mtu ? hdev - > le_cnt : hdev - > acl_cnt ;
break ;
default :
cnt = 0 ;
BT_ERR ( " Unknown link type " ) ;
}
q = cnt / num ;
2005-04-17 02:20:36 +04:00
* quote = q ? q : 1 ;
} else
* quote = 0 ;
BT_DBG ( " conn %p quote %d " , conn , * quote ) ;
return conn ;
}
2012-05-23 11:04:18 +04:00
static void hci_link_tx_to ( struct hci_dev * hdev , __u8 type )
2005-04-17 02:20:36 +04:00
{
struct hci_conn_hash * h = & hdev - > conn_hash ;
2011-11-01 12:58:56 +04:00
struct hci_conn * c ;
2005-04-17 02:20:36 +04:00
2011-02-11 04:38:53 +03:00
BT_ERR ( " %s link tx timeout " , hdev - > name ) ;
2005-04-17 02:20:36 +04:00
2011-12-15 04:54:12 +04:00
rcu_read_lock ( ) ;
2005-04-17 02:20:36 +04:00
/* Kill stalled connections */
2011-12-15 04:54:12 +04:00
list_for_each_entry_rcu ( c , & h - > list , list ) {
2011-02-11 04:38:53 +03:00
if ( c - > type = = type & & c - > sent ) {
2012-09-25 13:49:43 +04:00
BT_ERR ( " %s killing stalled connection %pMR " ,
hdev - > name , & c - > dst ) ;
2013-01-30 18:50:56 +04:00
hci_disconnect ( c , HCI_ERROR_REMOTE_USER_TERM ) ;
2005-04-17 02:20:36 +04:00
}
}
2011-12-15 04:54:12 +04:00
rcu_read_unlock ( ) ;
2005-04-17 02:20:36 +04:00
}
2012-05-23 11:04:18 +04:00
static struct hci_chan * hci_chan_sent ( struct hci_dev * hdev , __u8 type ,
int * quote )
2005-04-17 02:20:36 +04:00
{
2011-11-02 17:52:01 +04:00
struct hci_conn_hash * h = & hdev - > conn_hash ;
struct hci_chan * chan = NULL ;
2012-04-11 10:48:47 +04:00
unsigned int num = 0 , min = ~ 0 , cur_prio = 0 ;
2005-04-17 02:20:36 +04:00
struct hci_conn * conn ;
2011-11-02 17:52:01 +04:00
int cnt , q , conn_num = 0 ;
BT_DBG ( " %s " , hdev - > name ) ;
2011-12-15 04:54:12 +04:00
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( conn , & h - > list , list ) {
2011-11-02 17:52:01 +04:00
struct hci_chan * tmp ;
if ( conn - > type ! = type )
continue ;
if ( conn - > state ! = BT_CONNECTED & & conn - > state ! = BT_CONFIG )
continue ;
conn_num + + ;
2011-12-14 21:08:48 +04:00
list_for_each_entry_rcu ( tmp , & conn - > chan_list , list ) {
2011-11-02 17:52:01 +04:00
struct sk_buff * skb ;
if ( skb_queue_empty ( & tmp - > data_q ) )
continue ;
skb = skb_peek ( & tmp - > data_q ) ;
if ( skb - > priority < cur_prio )
continue ;
if ( skb - > priority > cur_prio ) {
num = 0 ;
min = ~ 0 ;
cur_prio = skb - > priority ;
}
num + + ;
if ( conn - > sent < min ) {
min = conn - > sent ;
chan = tmp ;
}
}
if ( hci_conn_num ( hdev , type ) = = conn_num )
break ;
}
2011-12-15 04:54:12 +04:00
rcu_read_unlock ( ) ;
2011-11-02 17:52:01 +04:00
if ( ! chan )
return NULL ;
switch ( chan - > conn - > type ) {
case ACL_LINK :
cnt = hdev - > acl_cnt ;
break ;
2012-10-10 18:38:30 +04:00
case AMP_LINK :
cnt = hdev - > block_cnt ;
break ;
2011-11-02 17:52:01 +04:00
case SCO_LINK :
case ESCO_LINK :
cnt = hdev - > sco_cnt ;
break ;
case LE_LINK :
cnt = hdev - > le_mtu ? hdev - > le_cnt : hdev - > acl_cnt ;
break ;
default :
cnt = 0 ;
BT_ERR ( " Unknown link type " ) ;
}
q = cnt / num ;
* quote = q ? q : 1 ;
BT_DBG ( " chan %p quote %d " , chan , * quote ) ;
return chan ;
}
2011-11-02 17:52:03 +04:00
static void hci_prio_recalculate ( struct hci_dev * hdev , __u8 type )
{
struct hci_conn_hash * h = & hdev - > conn_hash ;
struct hci_conn * conn ;
int num = 0 ;
BT_DBG ( " %s " , hdev - > name ) ;
2011-12-15 04:54:12 +04:00
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( conn , & h - > list , list ) {
2011-11-02 17:52:03 +04:00
struct hci_chan * chan ;
if ( conn - > type ! = type )
continue ;
if ( conn - > state ! = BT_CONNECTED & & conn - > state ! = BT_CONFIG )
continue ;
num + + ;
2011-12-14 21:08:48 +04:00
list_for_each_entry_rcu ( chan , & conn - > chan_list , list ) {
2011-11-02 17:52:03 +04:00
struct sk_buff * skb ;
if ( chan - > sent ) {
chan - > sent = 0 ;
continue ;
}
if ( skb_queue_empty ( & chan - > data_q ) )
continue ;
skb = skb_peek ( & chan - > data_q ) ;
if ( skb - > priority > = HCI_PRIO_MAX - 1 )
continue ;
skb - > priority = HCI_PRIO_MAX - 1 ;
BT_DBG ( " chan %p skb %p promoted to %d " , chan , skb ,
2012-05-17 07:36:26 +04:00
skb - > priority ) ;
2011-11-02 17:52:03 +04:00
}
if ( hci_conn_num ( hdev , type ) = = num )
break ;
}
2011-12-15 04:54:12 +04:00
rcu_read_unlock ( ) ;
2011-11-02 17:52:03 +04:00
}
2012-02-03 18:27:54 +04:00
static inline int __get_blocks ( struct hci_dev * hdev , struct sk_buff * skb )
{
/* Calculate count of blocks used by this packet */
return DIV_ROUND_UP ( skb - > len - HCI_ACL_HDR_SIZE , hdev - > block_len ) ;
}
2012-05-23 11:04:18 +04:00
static void __check_timeout ( struct hci_dev * hdev , unsigned int cnt )
2011-11-02 17:52:01 +04:00
{
2005-04-17 02:20:36 +04:00
if ( ! test_bit ( HCI_RAW , & hdev - > flags ) ) {
/* ACL tx timeout must be longer than maximum
* link supervision timeout ( 40.9 seconds ) */
2012-02-03 18:27:55 +04:00
if ( ! cnt & & time_after ( jiffies , hdev - > acl_last_tx +
2012-06-11 12:13:07 +04:00
HCI_ACL_TX_TIMEOUT ) )
2011-02-11 04:38:53 +03:00
hci_link_tx_to ( hdev , ACL_LINK ) ;
2005-04-17 02:20:36 +04:00
}
2012-02-03 18:27:55 +04:00
}
2005-04-17 02:20:36 +04:00
2012-05-23 11:04:18 +04:00
static void hci_sched_acl_pkt ( struct hci_dev * hdev )
2012-02-03 18:27:55 +04:00
{
unsigned int cnt = hdev - > acl_cnt ;
struct hci_chan * chan ;
struct sk_buff * skb ;
int quote ;
__check_timeout ( hdev , cnt ) ;
2006-07-03 12:02:33 +04:00
2011-11-02 17:52:01 +04:00
while ( hdev - > acl_cnt & &
2012-05-17 07:36:26 +04:00
( chan = hci_chan_sent ( hdev , ACL_LINK , & quote ) ) ) {
2011-11-02 17:52:02 +04:00
u32 priority = ( skb_peek ( & chan - > data_q ) ) - > priority ;
while ( quote - - & & ( skb = skb_peek ( & chan - > data_q ) ) ) {
2011-11-02 17:52:01 +04:00
BT_DBG ( " chan %p skb %p len %d priority %u " , chan , skb ,
2012-05-17 07:36:26 +04:00
skb - > len , skb - > priority ) ;
2011-11-02 17:52:01 +04:00
2011-11-02 17:52:02 +04:00
/* Stop if priority has changed */
if ( skb - > priority < priority )
break ;
skb = skb_dequeue ( & chan - > data_q ) ;
2011-11-02 17:52:01 +04:00
hci_conn_enter_active_mode ( chan - > conn ,
2012-03-08 08:25:00 +04:00
bt_cb ( skb ) - > force_active ) ;
2006-07-03 12:02:33 +04:00
2013-10-11 01:54:17 +04:00
hci_send_frame ( hdev , skb ) ;
2005-04-17 02:20:36 +04:00
hdev - > acl_last_tx = jiffies ;
hdev - > acl_cnt - - ;
2011-11-02 17:52:01 +04:00
chan - > sent + + ;
chan - > conn - > sent + + ;
2005-04-17 02:20:36 +04:00
}
}
2011-11-02 17:52:03 +04:00
if ( cnt ! = hdev - > acl_cnt )
hci_prio_recalculate ( hdev , ACL_LINK ) ;
2005-04-17 02:20:36 +04:00
}
2012-05-23 11:04:18 +04:00
static void hci_sched_acl_blk ( struct hci_dev * hdev )
2012-02-03 18:27:54 +04:00
{
2012-02-03 18:27:55 +04:00
unsigned int cnt = hdev - > block_cnt ;
2012-02-03 18:27:54 +04:00
struct hci_chan * chan ;
struct sk_buff * skb ;
int quote ;
2012-10-10 18:38:30 +04:00
u8 type ;
2012-02-03 18:27:54 +04:00
2012-02-03 18:27:55 +04:00
__check_timeout ( hdev , cnt ) ;
2012-02-03 18:27:54 +04:00
2012-10-10 18:38:30 +04:00
BT_DBG ( " %s " , hdev - > name ) ;
if ( hdev - > dev_type = = HCI_AMP )
type = AMP_LINK ;
else
type = ACL_LINK ;
2012-02-03 18:27:54 +04:00
while ( hdev - > block_cnt > 0 & &
2012-10-10 18:38:30 +04:00
( chan = hci_chan_sent ( hdev , type , & quote ) ) ) {
2012-02-03 18:27:54 +04:00
u32 priority = ( skb_peek ( & chan - > data_q ) ) - > priority ;
while ( quote > 0 & & ( skb = skb_peek ( & chan - > data_q ) ) ) {
int blocks ;
BT_DBG ( " chan %p skb %p len %d priority %u " , chan , skb ,
2012-05-17 07:36:26 +04:00
skb - > len , skb - > priority ) ;
2012-02-03 18:27:54 +04:00
/* Stop if priority has changed */
if ( skb - > priority < priority )
break ;
skb = skb_dequeue ( & chan - > data_q ) ;
blocks = __get_blocks ( hdev , skb ) ;
if ( blocks > hdev - > block_cnt )
return ;
hci_conn_enter_active_mode ( chan - > conn ,
2012-05-17 07:36:26 +04:00
bt_cb ( skb ) - > force_active ) ;
2012-02-03 18:27:54 +04:00
2013-10-11 01:54:17 +04:00
hci_send_frame ( hdev , skb ) ;
2012-02-03 18:27:54 +04:00
hdev - > acl_last_tx = jiffies ;
hdev - > block_cnt - = blocks ;
quote - = blocks ;
chan - > sent + = blocks ;
chan - > conn - > sent + = blocks ;
}
}
if ( cnt ! = hdev - > block_cnt )
2012-10-10 18:38:30 +04:00
hci_prio_recalculate ( hdev , type ) ;
2012-02-03 18:27:54 +04:00
}
2012-05-23 11:04:18 +04:00
static void hci_sched_acl ( struct hci_dev * hdev )
2012-02-03 18:27:54 +04:00
{
BT_DBG ( " %s " , hdev - > name ) ;
2012-10-10 18:38:30 +04:00
/* No ACL link over BR/EDR controller */
if ( ! hci_conn_num ( hdev , ACL_LINK ) & & hdev - > dev_type = = HCI_BREDR )
return ;
/* No AMP link over AMP controller */
if ( ! hci_conn_num ( hdev , AMP_LINK ) & & hdev - > dev_type = = HCI_AMP )
2012-02-03 18:27:54 +04:00
return ;
switch ( hdev - > flow_ctl_mode ) {
case HCI_FLOW_CTL_MODE_PACKET_BASED :
hci_sched_acl_pkt ( hdev ) ;
break ;
case HCI_FLOW_CTL_MODE_BLOCK_BASED :
hci_sched_acl_blk ( hdev ) ;
break ;
}
}
2005-04-17 02:20:36 +04:00
/* Schedule SCO */
2012-05-23 11:04:18 +04:00
static void hci_sched_sco ( struct hci_dev * hdev )
2005-04-17 02:20:36 +04:00
{
struct hci_conn * conn ;
struct sk_buff * skb ;
int quote ;
BT_DBG ( " %s " , hdev - > name ) ;
2011-08-17 17:23:00 +04:00
if ( ! hci_conn_num ( hdev , SCO_LINK ) )
return ;
2005-04-17 02:20:36 +04:00
while ( hdev - > sco_cnt & & ( conn = hci_low_sent ( hdev , SCO_LINK , & quote ) ) ) {
while ( quote - - & & ( skb = skb_dequeue ( & conn - > data_q ) ) ) {
BT_DBG ( " skb %p len %d " , skb , skb - > len ) ;
2013-10-11 01:54:17 +04:00
hci_send_frame ( hdev , skb ) ;
2005-04-17 02:20:36 +04:00
conn - > sent + + ;
if ( conn - > sent = = ~ 0 )
conn - > sent = 0 ;
}
}
}
2012-05-23 11:04:18 +04:00
static void hci_sched_esco ( struct hci_dev * hdev )
2007-10-20 16:55:10 +04:00
{
struct hci_conn * conn ;
struct sk_buff * skb ;
int quote ;
BT_DBG ( " %s " , hdev - > name ) ;
2011-08-17 17:23:00 +04:00
if ( ! hci_conn_num ( hdev , ESCO_LINK ) )
return ;
2012-05-23 11:04:21 +04:00
while ( hdev - > sco_cnt & & ( conn = hci_low_sent ( hdev , ESCO_LINK ,
& quote ) ) ) {
2007-10-20 16:55:10 +04:00
while ( quote - - & & ( skb = skb_dequeue ( & conn - > data_q ) ) ) {
BT_DBG ( " skb %p len %d " , skb , skb - > len ) ;
2013-10-11 01:54:17 +04:00
hci_send_frame ( hdev , skb ) ;
2007-10-20 16:55:10 +04:00
conn - > sent + + ;
if ( conn - > sent = = ~ 0 )
conn - > sent = 0 ;
}
}
}
2012-05-23 11:04:18 +04:00
static void hci_sched_le ( struct hci_dev * hdev )
2011-02-11 04:38:48 +03:00
{
2011-11-02 17:52:01 +04:00
struct hci_chan * chan ;
2011-02-11 04:38:48 +03:00
struct sk_buff * skb ;
2011-11-02 17:52:03 +04:00
int quote , cnt , tmp ;
2011-02-11 04:38:48 +03:00
BT_DBG ( " %s " , hdev - > name ) ;
2011-08-17 17:23:00 +04:00
if ( ! hci_conn_num ( hdev , LE_LINK ) )
return ;
2011-02-11 04:38:48 +03:00
if ( ! test_bit ( HCI_RAW , & hdev - > flags ) ) {
/* LE tx timeout must be longer than maximum
* link supervision timeout ( 40.9 seconds ) */
2011-02-11 04:38:53 +03:00
if ( ! hdev - > le_cnt & & hdev - > le_pkts & &
2012-05-17 07:36:26 +04:00
time_after ( jiffies , hdev - > le_last_tx + HZ * 45 ) )
2011-02-11 04:38:53 +03:00
hci_link_tx_to ( hdev , LE_LINK ) ;
2011-02-11 04:38:48 +03:00
}
cnt = hdev - > le_pkts ? hdev - > le_cnt : hdev - > acl_cnt ;
2011-11-02 17:52:03 +04:00
tmp = cnt ;
2011-11-02 17:52:01 +04:00
while ( cnt & & ( chan = hci_chan_sent ( hdev , LE_LINK , & quote ) ) ) {
2011-11-02 17:52:02 +04:00
u32 priority = ( skb_peek ( & chan - > data_q ) ) - > priority ;
while ( quote - - & & ( skb = skb_peek ( & chan - > data_q ) ) ) {
2011-11-02 17:52:01 +04:00
BT_DBG ( " chan %p skb %p len %d priority %u " , chan , skb ,
2012-05-17 07:36:26 +04:00
skb - > len , skb - > priority ) ;
2011-02-11 04:38:48 +03:00
2011-11-02 17:52:02 +04:00
/* Stop if priority has changed */
if ( skb - > priority < priority )
break ;
skb = skb_dequeue ( & chan - > data_q ) ;
2013-10-11 01:54:17 +04:00
hci_send_frame ( hdev , skb ) ;
2011-02-11 04:38:48 +03:00
hdev - > le_last_tx = jiffies ;
cnt - - ;
2011-11-02 17:52:01 +04:00
chan - > sent + + ;
chan - > conn - > sent + + ;
2011-02-11 04:38:48 +03:00
}
}
2011-11-02 17:52:01 +04:00
2011-02-11 04:38:48 +03:00
if ( hdev - > le_pkts )
hdev - > le_cnt = cnt ;
else
hdev - > acl_cnt = cnt ;
2011-11-02 17:52:03 +04:00
if ( cnt ! = tmp )
hci_prio_recalculate ( hdev , LE_LINK ) ;
2011-02-11 04:38:48 +03:00
}
2011-12-15 06:50:02 +04:00
static void hci_tx_work ( struct work_struct * work )
2005-04-17 02:20:36 +04:00
{
2011-12-15 06:50:02 +04:00
struct hci_dev * hdev = container_of ( work , struct hci_dev , tx_work ) ;
2005-04-17 02:20:36 +04:00
struct sk_buff * skb ;
2011-02-11 04:38:48 +03:00
BT_DBG ( " %s acl %d sco %d le %d " , hdev - > name , hdev - > acl_cnt ,
2012-05-17 07:36:26 +04:00
hdev - > sco_cnt , hdev - > le_cnt ) ;
2005-04-17 02:20:36 +04:00
2013-09-04 05:08:38 +04:00
if ( ! test_bit ( HCI_USER_CHANNEL , & hdev - > dev_flags ) ) {
/* Schedule queues and send stuff to HCI driver */
hci_sched_acl ( hdev ) ;
hci_sched_sco ( hdev ) ;
hci_sched_esco ( hdev ) ;
hci_sched_le ( hdev ) ;
}
2011-02-11 04:38:48 +03:00
2005-04-17 02:20:36 +04:00
/* Send next queued raw (unknown type) packet */
while ( ( skb = skb_dequeue ( & hdev - > raw_q ) ) )
2013-10-11 01:54:17 +04:00
hci_send_frame ( hdev , skb ) ;
2005-04-17 02:20:36 +04:00
}
2011-03-31 05:57:33 +04:00
/* ----- HCI RX task (incoming data processing) ----- */
2005-04-17 02:20:36 +04:00
/* ACL data packet */
2012-05-23 11:04:18 +04:00
static void hci_acldata_packet ( struct hci_dev * hdev , struct sk_buff * skb )
2005-04-17 02:20:36 +04:00
{
struct hci_acl_hdr * hdr = ( void * ) skb - > data ;
struct hci_conn * conn ;
__u16 handle , flags ;
skb_pull ( skb , HCI_ACL_HDR_SIZE ) ;
handle = __le16_to_cpu ( hdr - > handle ) ;
flags = hci_flags ( handle ) ;
handle = hci_handle ( handle ) ;
2012-06-11 12:13:09 +04:00
BT_DBG ( " %s len %d handle 0x%4.4x flags 0x%4.4x " , hdev - > name , skb - > len ,
2012-05-17 07:36:26 +04:00
handle , flags ) ;
2005-04-17 02:20:36 +04:00
hdev - > stat . acl_rx + + ;
hci_dev_lock ( hdev ) ;
conn = hci_conn_hash_lookup_handle ( hdev , handle ) ;
hci_dev_unlock ( hdev ) ;
2007-02-09 17:24:33 +03:00
2005-04-17 02:20:36 +04:00
if ( conn ) {
2011-12-14 03:06:02 +04:00
hci_conn_enter_active_mode ( conn , BT_POWER_FORCE_ACTIVE_OFF ) ;
2006-07-03 12:02:33 +04:00
2005-04-17 02:20:36 +04:00
/* Send to upper protocol */
2011-12-21 16:11:33 +04:00
l2cap_recv_acldata ( conn , skb , flags ) ;
return ;
2005-04-17 02:20:36 +04:00
} else {
2007-02-09 17:24:33 +03:00
BT_ERR ( " %s ACL packet for unknown connection handle %d " ,
2012-05-17 07:36:26 +04:00
hdev - > name , handle ) ;
2005-04-17 02:20:36 +04:00
}
kfree_skb ( skb ) ;
}
/* SCO data packet */
2012-05-23 11:04:18 +04:00
static void hci_scodata_packet ( struct hci_dev * hdev , struct sk_buff * skb )
2005-04-17 02:20:36 +04:00
{
struct hci_sco_hdr * hdr = ( void * ) skb - > data ;
struct hci_conn * conn ;
__u16 handle ;
skb_pull ( skb , HCI_SCO_HDR_SIZE ) ;
handle = __le16_to_cpu ( hdr - > handle ) ;
2012-06-11 12:13:09 +04:00
BT_DBG ( " %s len %d handle 0x%4.4x " , hdev - > name , skb - > len , handle ) ;
2005-04-17 02:20:36 +04:00
hdev - > stat . sco_rx + + ;
hci_dev_lock ( hdev ) ;
conn = hci_conn_hash_lookup_handle ( hdev , handle ) ;
hci_dev_unlock ( hdev ) ;
if ( conn ) {
/* Send to upper protocol */
2011-12-21 16:11:33 +04:00
sco_recv_scodata ( conn , skb ) ;
return ;
2005-04-17 02:20:36 +04:00
} else {
2007-02-09 17:24:33 +03:00
BT_ERR ( " %s SCO packet for unknown connection handle %d " ,
2012-05-17 07:36:26 +04:00
hdev - > name , handle ) ;
2005-04-17 02:20:36 +04:00
}
kfree_skb ( skb ) ;
}
2013-03-05 22:37:48 +04:00
static bool hci_req_is_complete ( struct hci_dev * hdev )
{
struct sk_buff * skb ;
skb = skb_peek ( & hdev - > cmd_q ) ;
if ( ! skb )
return true ;
return bt_cb ( skb ) - > req . start ;
}
2013-03-05 22:37:49 +04:00
static void hci_resend_last ( struct hci_dev * hdev )
{
struct hci_command_hdr * sent ;
struct sk_buff * skb ;
u16 opcode ;
if ( ! hdev - > sent_cmd )
return ;
sent = ( void * ) hdev - > sent_cmd - > data ;
opcode = __le16_to_cpu ( sent - > opcode ) ;
if ( opcode = = HCI_OP_RESET )
return ;
skb = skb_clone ( hdev - > sent_cmd , GFP_KERNEL ) ;
if ( ! skb )
return ;
skb_queue_head ( & hdev - > cmd_q , skb ) ;
queue_work ( hdev - > workqueue , & hdev - > cmd_work ) ;
}
2013-03-05 22:37:48 +04:00
void hci_req_cmd_complete ( struct hci_dev * hdev , u16 opcode , u8 status )
{
hci_req_complete_t req_complete = NULL ;
struct sk_buff * skb ;
unsigned long flags ;
BT_DBG ( " opcode 0x%04x status 0x%02x " , opcode , status ) ;
2013-03-05 22:37:49 +04:00
/* If the completed command doesn't match the last one that was
* sent we need to do special handling of it .
2013-03-05 22:37:48 +04:00
*/
2013-03-05 22:37:49 +04:00
if ( ! hci_sent_cmd_data ( hdev , opcode ) ) {
/* Some CSR based controllers generate a spontaneous
* reset complete event during init and any pending
* command will never be completed . In such a case we
* need to resend whatever was the last sent
* command .
*/
if ( test_bit ( HCI_INIT , & hdev - > flags ) & & opcode = = HCI_OP_RESET )
hci_resend_last ( hdev ) ;
2013-03-05 22:37:48 +04:00
return ;
2013-03-05 22:37:49 +04:00
}
2013-03-05 22:37:48 +04:00
/* If the command succeeded and there's still more commands in
* this request the request is not yet complete .
*/
if ( ! status & & ! hci_req_is_complete ( hdev ) )
return ;
/* If this was the last command in a request the complete
* callback would be found in hdev - > sent_cmd instead of the
* command queue ( hdev - > cmd_q ) .
*/
if ( hdev - > sent_cmd ) {
req_complete = bt_cb ( hdev - > sent_cmd ) - > req . complete ;
2013-07-27 23:11:14 +04:00
if ( req_complete ) {
/* We must set the complete callback to NULL to
* avoid calling the callback more than once if
* this function gets called again .
*/
bt_cb ( hdev - > sent_cmd ) - > req . complete = NULL ;
2013-03-05 22:37:48 +04:00
goto call_complete ;
2013-07-27 23:11:14 +04:00
}
2013-03-05 22:37:48 +04:00
}
/* Remove all pending commands belonging to this request */
spin_lock_irqsave ( & hdev - > cmd_q . lock , flags ) ;
while ( ( skb = __skb_dequeue ( & hdev - > cmd_q ) ) ) {
if ( bt_cb ( skb ) - > req . start ) {
__skb_queue_head ( & hdev - > cmd_q , skb ) ;
break ;
}
req_complete = bt_cb ( skb ) - > req . complete ;
kfree_skb ( skb ) ;
}
spin_unlock_irqrestore ( & hdev - > cmd_q . lock , flags ) ;
call_complete :
if ( req_complete )
req_complete ( hdev , status ) ;
}
2010-08-09 07:06:53 +04:00
static void hci_rx_work ( struct work_struct * work )
2005-04-17 02:20:36 +04:00
{
2010-08-09 07:06:53 +04:00
struct hci_dev * hdev = container_of ( work , struct hci_dev , rx_work ) ;
2005-04-17 02:20:36 +04:00
struct sk_buff * skb ;
BT_DBG ( " %s " , hdev - > name ) ;
while ( ( skb = skb_dequeue ( & hdev - > rx_q ) ) ) {
2012-02-20 23:34:38 +04:00
/* Send copy to monitor */
hci_send_to_monitor ( hdev , skb ) ;
2005-04-17 02:20:36 +04:00
if ( atomic_read ( & hdev - > promisc ) ) {
/* Send copy to the sockets */
2012-02-20 17:50:30 +04:00
hci_send_to_sock ( hdev , skb ) ;
2005-04-17 02:20:36 +04:00
}
2013-08-27 08:40:51 +04:00
if ( test_bit ( HCI_RAW , & hdev - > flags ) | |
test_bit ( HCI_USER_CHANNEL , & hdev - > dev_flags ) ) {
2005-04-17 02:20:36 +04:00
kfree_skb ( skb ) ;
continue ;
}
if ( test_bit ( HCI_INIT , & hdev - > flags ) ) {
/* Don't process data packets in this states. */
2005-08-10 07:30:28 +04:00
switch ( bt_cb ( skb ) - > pkt_type ) {
2005-04-17 02:20:36 +04:00
case HCI_ACLDATA_PKT :
case HCI_SCODATA_PKT :
kfree_skb ( skb ) ;
continue ;
2007-04-21 04:09:22 +04:00
}
2005-04-17 02:20:36 +04:00
}
/* Process frame */
2005-08-10 07:30:28 +04:00
switch ( bt_cb ( skb ) - > pkt_type ) {
2005-04-17 02:20:36 +04:00
case HCI_EVENT_PKT :
2010-08-09 07:06:53 +04:00
BT_DBG ( " %s Event packet " , hdev - > name ) ;
2005-04-17 02:20:36 +04:00
hci_event_packet ( hdev , skb ) ;
break ;
case HCI_ACLDATA_PKT :
BT_DBG ( " %s ACL data packet " , hdev - > name ) ;
hci_acldata_packet ( hdev , skb ) ;
break ;
case HCI_SCODATA_PKT :
BT_DBG ( " %s SCO data packet " , hdev - > name ) ;
hci_scodata_packet ( hdev , skb ) ;
break ;
default :
kfree_skb ( skb ) ;
break ;
}
}
}
2011-12-15 05:53:47 +04:00
static void hci_cmd_work ( struct work_struct * work )
2005-04-17 02:20:36 +04:00
{
2011-12-15 05:53:47 +04:00
struct hci_dev * hdev = container_of ( work , struct hci_dev , cmd_work ) ;
2005-04-17 02:20:36 +04:00
struct sk_buff * skb ;
2012-07-10 16:27:47 +04:00
BT_DBG ( " %s cmd_cnt %d cmd queued %d " , hdev - > name ,
atomic_read ( & hdev - > cmd_cnt ) , skb_queue_len ( & hdev - > cmd_q ) ) ;
2005-04-17 02:20:36 +04:00
/* Send queued commands */
2011-01-11 18:20:20 +03:00
if ( atomic_read ( & hdev - > cmd_cnt ) ) {
skb = skb_dequeue ( & hdev - > cmd_q ) ;
if ( ! skb )
return ;
2009-02-25 13:29:52 +03:00
kfree_skb ( hdev - > sent_cmd ) ;
2005-04-17 02:20:36 +04:00
2013-09-04 05:11:07 +04:00
hdev - > sent_cmd = skb_clone ( skb , GFP_KERNEL ) ;
2010-12-01 17:58:25 +03:00
if ( hdev - > sent_cmd ) {
2005-04-17 02:20:36 +04:00
atomic_dec ( & hdev - > cmd_cnt ) ;
2013-10-11 01:54:17 +04:00
hci_send_frame ( hdev , skb ) ;
2011-07-27 00:46:54 +04:00
if ( test_bit ( HCI_RESET , & hdev - > flags ) )
del_timer ( & hdev - > cmd_timer ) ;
else
mod_timer ( & hdev - > cmd_timer ,
2012-06-11 12:13:07 +04:00
jiffies + HCI_CMD_TIMEOUT ) ;
2005-04-17 02:20:36 +04:00
} else {
skb_queue_head ( & hdev - > cmd_q , skb ) ;
2011-12-15 05:53:47 +04:00
queue_work ( hdev - > workqueue , & hdev - > cmd_work ) ;
2005-04-17 02:20:36 +04:00
}
}
}