2010-05-28 19:53:45 +04:00
/*
2005-04-17 02:20:36 +04:00
BlueZ - Bluetooth protocol stack for Linux
2010-05-28 19:53:46 +04:00
Copyright ( c ) 2000 - 2001 , 2010 , Code Aurora Forum . All rights reserved .
2005-04-17 02:20:36 +04:00
Written 2000 , 2001 by Maxim Krasnyansky < maxk @ qualcomm . com >
This program is free software ; you can redistribute it and / or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation ;
THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS
OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS .
IN NO EVENT SHALL THE COPYRIGHT HOLDER ( S ) AND AUTHOR ( S ) BE LIABLE FOR ANY
2010-05-28 19:53:45 +04:00
CLAIM , OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES , OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE , DATA OR PROFITS , WHETHER IN AN
ACTION OF CONTRACT , NEGLIGENCE OR OTHER TORTIOUS ACTION , ARISING OUT OF
2005-04-17 02:20:36 +04:00
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE .
2010-05-28 19:53:45 +04:00
ALL LIABILITY , INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS ,
COPYRIGHTS , TRADEMARKS OR OTHER RIGHTS , RELATING TO USE OF THIS
2005-04-17 02:20:36 +04:00
SOFTWARE IS DISCLAIMED .
*/
# ifndef __HCI_CORE_H
# define __HCI_CORE_H
# include <net/bluetooth/hci.h>
2014-07-11 06:58:29 +04:00
# include <net/bluetooth/hci_sock.h>
2005-04-17 02:20:36 +04:00
2011-11-01 12:58:57 +04:00
/* HCI priority */
# define HCI_PRIO_MAX 7
2005-04-17 02:20:36 +04:00
/* HCI Core structures */
struct inquiry_data {
bdaddr_t bdaddr ;
__u8 pscan_rep_mode ;
__u8 pscan_period_mode ;
__u8 pscan_mode ;
__u8 dev_class [ 3 ] ;
2005-11-08 20:57:21 +03:00
__le16 clock_offset ;
2005-04-17 02:20:36 +04:00
__s8 rssi ;
2008-07-14 22:13:48 +04:00
__u8 ssp_mode ;
2005-04-17 02:20:36 +04:00
} ;
struct inquiry_entry {
2012-01-04 15:31:59 +04:00
struct list_head all ; /* inq_cache.all */
struct list_head list ; /* unknown or resolve */
enum {
NAME_NOT_KNOWN ,
NAME_NEEDED ,
NAME_PENDING ,
NAME_KNOWN ,
} name_state ;
2005-04-17 02:20:36 +04:00
__u32 timestamp ;
struct inquiry_data data ;
} ;
2012-01-04 16:16:21 +04:00
struct discovery_state {
2012-03-07 06:48:33 +04:00
int type ;
2012-01-04 16:23:45 +04:00
enum {
DISCOVERY_STOPPED ,
DISCOVERY_STARTING ,
2012-02-18 03:39:37 +04:00
DISCOVERY_FINDING ,
2012-01-04 17:44:20 +04:00
DISCOVERY_RESOLVING ,
2012-01-04 16:23:45 +04:00
DISCOVERY_STOPPING ,
} state ;
2012-05-23 11:04:20 +04:00
struct list_head all ; /* All devices found during inquiry */
2012-03-07 06:48:33 +04:00
struct list_head unknown ; /* Name state not known */
struct list_head resolve ; /* Name needs to be resolved */
__u32 timestamp ;
2014-03-25 12:51:52 +04:00
bdaddr_t last_adv_addr ;
u8 last_adv_addr_type ;
2014-03-25 16:40:52 +04:00
s8 last_adv_rssi ;
2014-07-01 16:11:21 +04:00
u32 last_adv_flags ;
2014-03-25 12:51:52 +04:00
u8 last_adv_data [ HCI_MAX_AD_LENGTH ] ;
u8 last_adv_data_len ;
2014-12-05 15:03:35 +03:00
bool report_invalid_rssi ;
2015-03-05 03:24:26 +03:00
bool result_filtering ;
2016-01-05 14:19:32 +03:00
bool limited ;
2014-12-05 12:55:55 +03:00
s8 rssi ;
u16 uuid_count ;
u8 ( * uuids ) [ 16 ] ;
2015-02-02 10:07:54 +03:00
unsigned long scan_start ;
unsigned long scan_duration ;
2005-04-17 02:20:36 +04:00
} ;
struct hci_conn_hash {
struct list_head list ;
unsigned int acl_num ;
2012-10-10 18:38:30 +04:00
unsigned int amp_num ;
2005-04-17 02:20:36 +04:00
unsigned int sco_num ;
2011-02-11 04:38:47 +03:00
unsigned int le_num ;
2014-07-16 12:56:08 +04:00
unsigned int le_num_slave ;
2005-04-17 02:20:36 +04:00
} ;
2010-05-18 15:20:32 +04:00
struct bdaddr_list {
struct list_head list ;
bdaddr_t bdaddr ;
2013-10-18 04:24:13 +04:00
u8 bdaddr_type ;
2010-05-18 15:20:32 +04:00
} ;
2011-01-04 13:08:51 +03:00
struct bt_uuid {
struct list_head list ;
u8 uuid [ 16 ] ;
2013-01-27 02:31:29 +04:00
u8 size ;
2011-01-13 22:56:52 +03:00
u8 svc_hint ;
2011-01-04 13:08:51 +03:00
} ;
2014-03-09 23:19:17 +04:00
struct smp_csrk {
bdaddr_t bdaddr ;
u8 bdaddr_type ;
2015-02-27 11:11:13 +03:00
u8 type ;
2014-03-09 23:19:17 +04:00
u8 val [ 16 ] ;
} ;
2012-02-03 04:08:00 +04:00
struct smp_ltk {
struct list_head list ;
2014-11-13 15:37:47 +03:00
struct rcu_head rcu ;
2012-02-03 04:08:00 +04:00
bdaddr_t bdaddr ;
u8 bdaddr_type ;
u8 authenticated ;
u8 type ;
u8 enc_size ;
__le16 ediv ;
2014-02-28 04:00:28 +04:00
__le64 rand ;
2012-02-03 04:08:00 +04:00
u8 val [ 16 ] ;
2014-02-01 06:42:18 +04:00
} ;
2012-02-03 04:08:00 +04:00
2014-02-18 12:19:33 +04:00
struct smp_irk {
struct list_head list ;
2014-11-13 15:37:48 +03:00
struct rcu_head rcu ;
2014-02-18 12:19:33 +04:00
bdaddr_t rpa ;
bdaddr_t bdaddr ;
u8 addr_type ;
u8 val [ 16 ] ;
} ;
2011-01-17 15:41:05 +03:00
struct link_key {
struct list_head list ;
2014-11-19 16:22:22 +03:00
struct rcu_head rcu ;
2011-01-17 15:41:05 +03:00
bdaddr_t bdaddr ;
u8 type ;
2012-05-23 12:31:20 +04:00
u8 val [ HCI_LINK_KEY_SIZE ] ;
2011-01-17 15:41:05 +03:00
u8 pin_len ;
} ;
2011-03-22 15:12:22 +03:00
struct oob_data {
struct list_head list ;
bdaddr_t bdaddr ;
2014-10-26 22:46:09 +03:00
u8 bdaddr_type ;
2015-01-31 10:20:55 +03:00
u8 present ;
2014-01-10 14:07:28 +04:00
u8 hash192 [ 16 ] ;
2014-11-17 21:52:20 +03:00
u8 rand192 [ 16 ] ;
2014-01-10 14:07:28 +04:00
u8 hash256 [ 16 ] ;
2014-11-17 21:52:20 +03:00
u8 rand256 [ 16 ] ;
2011-03-22 15:12:22 +03:00
} ;
2015-03-24 01:57:11 +03:00
struct adv_info {
2015-06-18 04:16:34 +03:00
struct list_head list ;
2015-06-18 04:16:47 +03:00
bool pending ;
2015-03-24 01:57:11 +03:00
__u8 instance ;
__u32 flags ;
2015-03-24 01:57:15 +03:00
__u16 timeout ;
2015-06-18 04:16:35 +03:00
__u16 remaining_time ;
2015-06-18 04:16:34 +03:00
__u16 duration ;
2015-03-24 01:57:11 +03:00
__u16 adv_data_len ;
__u8 adv_data [ HCI_MAX_AD_LENGTH ] ;
__u16 scan_rsp_len ;
__u8 scan_rsp_data [ HCI_MAX_AD_LENGTH ] ;
} ;
2015-06-18 04:16:53 +03:00
# define HCI_MAX_ADV_INSTANCES 5
2015-06-18 04:16:34 +03:00
# define HCI_DEFAULT_ADV_DURATION 2
2012-02-22 21:19:09 +04:00
# define HCI_MAX_SHORT_NAME_LENGTH 10
2014-02-23 21:42:20 +04:00
/* Default LE RPA expiry time, 15 minutes */
# define HCI_DEFAULT_RPA_TIMEOUT (15 * 60)
2014-05-14 15:43:02 +04:00
/* Default min/max age of connection information (1s/3s) */
# define DEFAULT_CONN_INFO_MIN_AGE 1000
# define DEFAULT_CONN_INFO_MAX_AGE 3000
2012-09-27 18:26:09 +04:00
struct amp_assoc {
__u16 len ;
__u16 offset ;
2012-09-27 18:26:20 +04:00
__u16 rem_len ;
__u16 len_so_far ;
2012-09-27 18:26:09 +04:00
__u8 data [ HCI_MAX_AMP_ASSOC_SIZE ] ;
} ;
2013-04-17 16:00:52 +04:00
# define HCI_MAX_PAGES 3
2013-04-17 16:00:51 +04:00
2005-04-17 02:20:36 +04:00
struct hci_dev {
struct list_head list ;
2011-06-17 20:03:21 +04:00
struct mutex lock ;
2005-04-17 02:20:36 +04:00
char name [ 8 ] ;
unsigned long flags ;
__u16 id ;
2010-02-08 17:27:07 +03:00
__u8 bus ;
2010-02-13 04:28:41 +03:00
__u8 dev_type ;
2005-04-17 02:20:36 +04:00
bdaddr_t bdaddr ;
2014-07-05 12:48:03 +04:00
bdaddr_t setup_addr ;
2014-07-02 02:53:47 +04:00
bdaddr_t public_addr ;
2014-02-20 07:52:13 +04:00
bdaddr_t random_addr ;
2013-10-02 15:41:30 +04:00
bdaddr_t static_addr ;
2014-02-27 16:05:40 +04:00
__u8 adv_addr_type ;
2011-03-16 15:29:35 +03:00
__u8 dev_name [ HCI_MAX_NAME_LENGTH ] ;
2012-02-22 21:19:09 +04:00
__u8 short_name [ HCI_MAX_SHORT_NAME_LENGTH ] ;
2011-03-28 15:07:23 +04:00
__u8 eir [ HCI_MAX_EIR_LENGTH ] ;
2007-10-20 15:33:56 +04:00
__u8 dev_class [ 3 ] ;
2011-01-13 22:56:52 +03:00
__u8 major_class ;
__u8 minor_class ;
2013-04-17 16:00:52 +04:00
__u8 max_page ;
2013-04-17 16:00:51 +04:00
__u8 features [ HCI_MAX_PAGES ] [ 8 ] ;
2013-01-22 16:01:59 +04:00
__u8 le_features [ 8 ] ;
2013-01-22 16:02:00 +04:00
__u8 le_white_list_size ;
2013-01-22 16:02:01 +04:00
__u8 le_states [ 8 ] ;
2007-10-20 15:33:56 +04:00
__u8 commands [ 64 ] ;
2006-09-23 11:57:20 +04:00
__u8 hci_ver ;
__u16 hci_rev ;
2011-01-25 02:19:58 +03:00
__u8 lmp_ver ;
2006-09-23 11:57:20 +04:00
__u16 manufacturer ;
2012-03-09 13:46:49 +04:00
__u16 lmp_subver ;
2005-04-17 02:20:36 +04:00
__u16 voice_setting ;
2013-10-15 00:56:16 +04:00
__u8 num_iac ;
2015-01-12 20:21:25 +03:00
__u8 stored_max_keys ;
__u8 stored_num_keys ;
2011-01-25 14:28:33 +03:00
__u8 io_capability ;
2012-03-12 06:27:21 +04:00
__s8 inq_tx_power ;
2013-03-16 02:07:11 +04:00
__u16 page_scan_interval ;
__u16 page_scan_window ;
__u8 page_scan_type ;
2014-02-20 23:55:56 +04:00
__u8 le_adv_channel_map ;
2014-07-26 15:59:57 +04:00
__u16 le_adv_min_interval ;
__u16 le_adv_max_interval ;
2014-03-21 23:18:10 +04:00
__u8 le_scan_type ;
2013-10-11 19:23:19 +04:00
__u16 le_scan_interval ;
__u16 le_scan_window ;
2013-10-19 18:09:13 +04:00
__u16 le_conn_min_interval ;
__u16 le_conn_max_interval ;
2014-06-30 14:34:36 +04:00
__u16 le_conn_latency ;
__u16 le_supv_timeout ;
2014-12-20 18:28:40 +03:00
__u16 le_def_tx_len ;
__u16 le_def_tx_time ;
__u16 le_max_tx_len ;
__u16 le_max_tx_time ;
__u16 le_max_rx_len ;
__u16 le_max_rx_time ;
2014-03-27 23:55:20 +04:00
__u16 discov_interleaved_timeout ;
2014-05-14 15:43:02 +04:00
__u16 conn_info_min_age ;
__u16 conn_info_max_age ;
2013-10-19 18:09:11 +04:00
__u8 ssp_debug_mode ;
2015-01-28 22:09:55 +03:00
__u8 hw_error_code ;
2014-06-28 18:54:06 +04:00
__u32 clock ;
2013-03-16 02:07:11 +04:00
2012-03-12 06:32:12 +04:00
__u16 devid_source ;
__u16 devid_vendor ;
__u16 devid_product ;
__u16 devid_version ;
2005-04-17 02:20:36 +04:00
__u16 pkt_type ;
2007-07-11 11:51:55 +04:00
__u16 esco_type ;
2005-04-17 02:20:36 +04:00
__u16 link_policy ;
__u16 link_mode ;
2006-07-03 12:02:33 +04:00
__u32 idle_timeout ;
__u16 sniff_min_interval ;
__u16 sniff_max_interval ;
2011-10-12 11:53:57 +04:00
__u8 amp_status ;
__u32 amp_total_bw ;
__u32 amp_max_bw ;
__u32 amp_min_latency ;
__u32 amp_max_pdu ;
__u8 amp_type ;
__u16 amp_pal_cap ;
__u16 amp_assoc_size ;
__u32 amp_max_flush_to ;
__u32 amp_be_flush_to ;
2012-09-27 18:26:09 +04:00
struct amp_assoc loc_assoc ;
2011-11-24 16:52:02 +04:00
__u8 flow_ctl_mode ;
2011-04-28 22:28:54 +04:00
unsigned int auto_accept_delay ;
2005-04-17 02:20:36 +04:00
unsigned long quirks ;
atomic_t cmd_cnt ;
unsigned int acl_cnt ;
unsigned int sco_cnt ;
2011-02-11 04:38:48 +03:00
unsigned int le_cnt ;
2005-04-17 02:20:36 +04:00
unsigned int acl_mtu ;
unsigned int sco_mtu ;
2011-02-11 04:38:48 +03:00
unsigned int le_mtu ;
2005-04-17 02:20:36 +04:00
unsigned int acl_pkts ;
unsigned int sco_pkts ;
2011-02-11 04:38:48 +03:00
unsigned int le_pkts ;
2005-04-17 02:20:36 +04:00
2011-12-07 17:56:51 +04:00
__u16 block_len ;
__u16 block_mtu ;
__u16 num_blocks ;
__u16 block_cnt ;
2005-04-17 02:20:36 +04:00
unsigned long acl_last_tx ;
unsigned long sco_last_tx ;
2011-02-11 04:38:48 +03:00
unsigned long le_last_tx ;
2005-04-17 02:20:36 +04:00
2010-03-20 17:20:04 +03:00
struct workqueue_struct * workqueue ;
2013-01-15 00:33:50 +04:00
struct workqueue_struct * req_workqueue ;
2010-03-20 17:20:04 +03:00
2010-12-15 14:53:18 +03:00
struct work_struct power_on ;
2011-11-08 00:16:04 +04:00
struct delayed_work power_off ;
2015-01-28 22:09:55 +03:00
struct work_struct error_reset ;
2010-12-15 14:53:18 +03:00
2011-11-08 00:16:02 +04:00
__u16 discov_timeout ;
struct delayed_work discov_off ;
2011-12-15 02:47:39 +04:00
struct delayed_work service_cache ;
2014-06-16 14:30:56 +04:00
struct delayed_work cmd_timer ;
2010-08-09 07:06:53 +04:00
struct work_struct rx_work ;
2011-12-15 05:53:47 +04:00
struct work_struct cmd_work ;
2011-12-15 06:50:02 +04:00
struct work_struct tx_work ;
2005-04-17 02:20:36 +04:00
2015-11-11 09:30:30 +03:00
struct work_struct discov_update ;
2015-11-11 09:11:17 +03:00
struct work_struct bg_scan_update ;
2015-11-16 13:52:21 +03:00
struct work_struct scan_update ;
2015-11-22 16:43:43 +03:00
struct work_struct connectable_update ;
2015-11-22 17:24:44 +03:00
struct work_struct discoverable_update ;
2015-11-11 09:11:23 +03:00
struct delayed_work le_scan_disable ;
struct delayed_work le_scan_restart ;
2015-11-11 09:11:17 +03:00
2005-04-17 02:20:36 +04:00
struct sk_buff_head rx_q ;
struct sk_buff_head raw_q ;
struct sk_buff_head cmd_q ;
struct sk_buff * sent_cmd ;
2009-07-26 12:18:19 +04:00
struct mutex req_lock ;
2005-04-17 02:20:36 +04:00
wait_queue_head_t req_wait_q ;
__u32 req_status ;
__u32 req_result ;
2015-04-02 13:41:09 +03:00
struct sk_buff * req_skb ;
2011-01-10 14:28:59 +03:00
2014-08-08 10:37:16 +04:00
void * smp_data ;
2014-08-13 16:12:32 +04:00
void * smp_bredr_data ;
2011-11-08 22:40:15 +04:00
2012-01-04 16:16:21 +04:00
struct discovery_state discovery ;
2005-04-17 02:20:36 +04:00
struct hci_conn_hash conn_hash ;
2014-01-31 01:22:05 +04:00
struct list_head mgmt_pending ;
struct list_head blacklist ;
2014-07-09 13:59:14 +04:00
struct list_head whitelist ;
2011-01-04 13:08:51 +03:00
struct list_head uuids ;
2011-01-17 15:41:05 +03:00
struct list_head link_keys ;
2012-02-03 04:08:00 +04:00
struct list_head long_term_keys ;
2014-02-18 12:19:33 +04:00
struct list_head identity_resolving_keys ;
2011-03-22 15:12:22 +03:00
struct list_head remote_oob_data ;
2014-02-28 08:37:30 +04:00
struct list_head le_white_list ;
2014-02-03 20:56:18 +04:00
struct list_head le_conn_params ;
2014-02-27 03:21:46 +04:00
struct list_head pend_le_conns ;
2014-07-04 13:37:18 +04:00
struct list_head pend_le_reports ;
2011-03-22 15:12:22 +03:00
2005-04-17 02:20:36 +04:00
struct hci_dev_stats stat ;
2010-12-01 17:58:25 +03:00
atomic_t promisc ;
2005-04-17 02:20:36 +04:00
2010-02-08 18:22:31 +03:00
struct dentry * debugfs ;
2006-07-03 12:02:41 +04:00
struct device dev ;
2005-04-17 02:20:36 +04:00
2009-06-08 16:41:38 +04:00
struct rfkill * rfkill ;
2015-03-13 19:04:17 +03:00
DECLARE_BITMAP ( dev_flags , __HCI_NUM_FLAGS ) ;
2011-11-26 03:53:38 +04:00
2012-10-19 21:57:49 +04:00
__s8 adv_tx_power ;
2012-11-08 04:23:00 +04:00
__u8 adv_data [ HCI_MAX_AD_LENGTH ] ;
__u8 adv_data_len ;
2013-10-16 11:16:47 +04:00
__u8 scan_rsp_data [ HCI_MAX_AD_LENGTH ] ;
__u8 scan_rsp_data_len ;
2012-10-19 21:57:49 +04:00
2015-06-18 04:16:34 +03:00
struct list_head adv_instances ;
unsigned int adv_instance_cnt ;
__u8 cur_adv_instance ;
2015-06-18 04:16:35 +03:00
__u16 adv_instance_timeout ;
struct delayed_work adv_instance_expire ;
2015-03-24 01:57:11 +03:00
2014-02-22 21:06:32 +04:00
__u8 irk [ 16 ] ;
2014-02-23 21:42:20 +04:00
__u32 rpa_timeout ;
struct delayed_work rpa_expired ;
2014-02-24 08:39:22 +04:00
bdaddr_t rpa ;
2014-02-22 21:06:32 +04:00
2005-04-17 02:20:36 +04:00
int ( * open ) ( struct hci_dev * hdev ) ;
int ( * close ) ( struct hci_dev * hdev ) ;
int ( * flush ) ( struct hci_dev * hdev ) ;
2012-11-12 09:02:14 +04:00
int ( * setup ) ( struct hci_dev * hdev ) ;
2015-02-13 20:20:50 +03:00
int ( * shutdown ) ( struct hci_dev * hdev ) ;
2013-10-11 17:19:18 +04:00
int ( * send ) ( struct hci_dev * hdev , struct sk_buff * skb ) ;
2005-04-17 02:20:36 +04:00
void ( * notify ) ( struct hci_dev * hdev , unsigned int evt ) ;
2015-01-28 22:09:55 +03:00
void ( * hw_error ) ( struct hci_dev * hdev , u8 code ) ;
2015-10-21 00:25:42 +03:00
int ( * post_init ) ( struct hci_dev * hdev ) ;
2015-10-07 20:52:35 +03:00
int ( * set_diag ) ( struct hci_dev * hdev , bool enable ) ;
2014-07-02 02:53:47 +04:00
int ( * set_bdaddr ) ( struct hci_dev * hdev , const bdaddr_t * bdaddr ) ;
2005-04-17 02:20:36 +04:00
} ;
2012-10-10 18:38:27 +04:00
# define HCI_PHY_HANDLE(handle) (handle & 0xff)
2005-04-17 02:20:36 +04:00
struct hci_conn {
struct list_head list ;
2011-02-17 18:42:00 +03:00
atomic_t refcnt ;
bdaddr_t dst ;
2011-06-07 13:18:06 +04:00
__u8 dst_type ;
2013-10-13 16:23:59 +04:00
bdaddr_t src ;
2013-10-13 14:57:39 +04:00
__u8 src_type ;
2014-02-28 14:54:16 +04:00
bdaddr_t init_addr ;
__u8 init_addr_type ;
bdaddr_t resp_addr ;
__u8 resp_addr_type ;
2011-02-17 18:42:00 +03:00
__u16 handle ;
__u16 state ;
__u8 mode ;
__u8 type ;
2014-07-16 12:42:27 +04:00
__u8 role ;
2012-01-16 11:49:58 +04:00
bool out ;
2011-02-17 18:42:00 +03:00
__u8 attempt ;
__u8 dev_class [ 3 ] ;
2013-04-17 16:00:51 +04:00
__u8 features [ HCI_MAX_PAGES ] [ 8 ] ;
2011-02-17 18:42:00 +03:00
__u16 pkt_type ;
__u16 link_policy ;
2011-04-28 14:07:55 +04:00
__u8 key_type ;
2011-02-17 18:42:00 +03:00
__u8 auth_type ;
__u8 sec_level ;
__u8 pending_sec_level ;
__u8 pin_length ;
2011-07-09 01:31:45 +04:00
__u8 enc_key_size ;
2011-02-17 18:42:00 +03:00
__u8 io_capability ;
2012-09-06 19:39:26 +04:00
__u32 passkey_notify ;
__u8 passkey_entered ;
2011-02-17 18:42:00 +03:00
__u16 disc_timeout ;
2014-07-06 14:41:15 +04:00
__u16 conn_timeout ;
2013-08-19 16:23:59 +04:00
__u16 setting ;
2014-01-31 01:22:04 +04:00
__u16 le_conn_min_interval ;
__u16 le_conn_max_interval ;
2014-06-23 13:40:04 +04:00
__u16 le_conn_interval ;
__u16 le_conn_latency ;
__u16 le_supv_timeout ;
2014-10-07 12:44:11 +04:00
__u8 le_adv_data [ HCI_MAX_AD_LENGTH ] ;
__u8 le_adv_data_len ;
2014-05-08 17:32:08 +04:00
__s8 rssi ;
2014-05-09 23:35:28 +04:00
__s8 tx_power ;
2014-05-14 15:43:05 +04:00
__s8 max_tx_power ;
2012-01-16 08:10:31 +04:00
unsigned long flags ;
2006-07-03 12:02:33 +04:00
2014-06-28 18:54:06 +04:00
__u32 clock ;
__u16 clock_accuracy ;
2014-05-14 15:43:03 +04:00
unsigned long conn_info_timestamp ;
2011-01-04 16:40:05 +03:00
__u8 remote_cap ;
__u8 remote_auth ;
2012-09-27 18:26:11 +04:00
__u8 remote_id ;
2011-01-04 16:40:05 +03:00
2011-02-17 18:42:00 +03:00
unsigned int sent ;
2006-07-03 12:02:33 +04:00
2005-04-17 02:20:36 +04:00
struct sk_buff_head data_q ;
2011-12-14 19:02:51 +04:00
struct list_head chan_list ;
2005-04-17 02:20:36 +04:00
2011-06-17 20:03:21 +04:00
struct delayed_work disc_work ;
2013-10-16 19:11:39 +04:00
struct delayed_work auto_accept_work ;
2013-10-16 19:11:40 +04:00
struct delayed_work idle_work ;
2014-02-28 19:45:46 +04:00
struct delayed_work le_conn_timeout ;
Bluetooth: Fix missing hdev locking for LE scan cleanup
The hci_conn objects don't have a dedicated lock themselves but rely
on the caller to hold the hci_dev lock for most types of access. The
hci_conn_timeout() function has so far sent certain HCI commands based
on the hci_conn state which has been possible without holding the
hci_dev lock.
The recent changes to do LE scanning before connect attempts added
even more operations to hci_conn and hci_dev from hci_conn_timeout,
thereby exposing potential race conditions with the hci_dev and
hci_conn states.
As an example of such a race, here there's a timeout but an
l2cap_sock_connect() call manages to race with the cleanup routine:
[Oct21 08:14] l2cap_chan_timeout: chan ee4b12c0 state BT_CONNECT
[ +0.000004] l2cap_chan_close: chan ee4b12c0 state BT_CONNECT
[ +0.000002] l2cap_chan_del: chan ee4b12c0, conn f3141580, err 111, state BT_CONNECT
[ +0.000002] l2cap_sock_teardown_cb: chan ee4b12c0 state BT_CONNECT
[ +0.000005] l2cap_chan_put: chan ee4b12c0 orig refcnt 4
[ +0.000010] hci_conn_drop: hcon f53d56e0 orig refcnt 1
[ +0.000013] l2cap_chan_put: chan ee4b12c0 orig refcnt 3
[ +0.000063] hci_conn_timeout: hcon f53d56e0 state BT_CONNECT
[ +0.000049] hci_conn_params_del: addr ee:0d:30:09:53:1f (type 1)
[ +0.000002] hci_chan_list_flush: hcon f53d56e0
[ +0.000001] hci_chan_del: hci0 hcon f53d56e0 chan f4e7ccc0
[ +0.004528] l2cap_sock_create: sock e708fc00
[ +0.000023] l2cap_chan_create: chan ee4b1770
[ +0.000001] l2cap_chan_hold: chan ee4b1770 orig refcnt 1
[ +0.000002] l2cap_sock_init: sk ee4b3390
[ +0.000029] l2cap_sock_bind: sk ee4b3390
[ +0.000010] l2cap_sock_setsockopt: sk ee4b3390
[ +0.000037] l2cap_sock_connect: sk ee4b3390
[ +0.000002] l2cap_chan_connect: 00:02:72:d9:e5:8b -> ee:0d:30:09:53:1f (type 2) psm 0x00
[ +0.000002] hci_get_route: 00:02:72:d9:e5:8b -> ee:0d:30:09:53:1f
[ +0.000001] hci_dev_hold: hci0 orig refcnt 8
[ +0.000003] hci_conn_hold: hcon f53d56e0 orig refcnt 0
Above the l2cap_chan_connect() shouldn't have been able to reach the
hci_conn f53d56e0 anymore but since hci_conn_timeout didn't do proper
locking that's not the case. The end result is a reference to hci_conn
that's not in the conn_hash list, resulting in list corruption when
trying to remove it later:
[Oct21 08:15] l2cap_chan_timeout: chan ee4b1770 state BT_CONNECT
[ +0.000004] l2cap_chan_close: chan ee4b1770 state BT_CONNECT
[ +0.000003] l2cap_chan_del: chan ee4b1770, conn f3141580, err 111, state BT_CONNECT
[ +0.000001] l2cap_sock_teardown_cb: chan ee4b1770 state BT_CONNECT
[ +0.000005] l2cap_chan_put: chan ee4b1770 orig refcnt 4
[ +0.000002] hci_conn_drop: hcon f53d56e0 orig refcnt 1
[ +0.000015] l2cap_chan_put: chan ee4b1770 orig refcnt 3
[ +0.000038] hci_conn_timeout: hcon f53d56e0 state BT_CONNECT
[ +0.000003] hci_chan_list_flush: hcon f53d56e0
[ +0.000002] hci_conn_hash_del: hci0 hcon f53d56e0
[ +0.000001] ------------[ cut here ]------------
[ +0.000461] WARNING: CPU: 0 PID: 1782 at lib/list_debug.c:56 __list_del_entry+0x3f/0x71()
[ +0.000839] list_del corruption, f53d56e0->prev is LIST_POISON2 (00000200)
The necessary fix is unfortunately more complicated than just adding
hci_dev_lock/unlock calls to the hci_conn_timeout() call path.
Particularly, the hci_conn_del() API, which expects the hci_dev lock to
be held, performs a cancel_delayed_work_sync(&hcon->disc_work) which
would lead to a deadlock if the hci_conn_timeout() call path tries to
acquire the same lock.
This patch solves the problem by deferring the cleanup work to a
separate work callback. To protect against the hci_dev or hci_conn
going away meanwhile temporary references are taken with the help of
hci_dev_hold() and hci_conn_get().
Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
Cc: stable@vger.kernel.org # 4.3
2015-10-21 15:21:31 +03:00
struct work_struct le_scan_cleanup ;
2006-07-03 12:02:33 +04:00
2006-07-06 14:38:46 +04:00
struct device dev ;
2014-12-20 19:13:41 +03:00
struct dentry * debugfs ;
2006-07-06 14:38:46 +04:00
2005-04-17 02:20:36 +04:00
struct hci_dev * hdev ;
void * l2cap_data ;
void * sco_data ;
2012-05-29 14:59:02 +04:00
struct amp_mgr * amp_mgr ;
2005-04-17 02:20:36 +04:00
struct hci_conn * link ;
2011-02-19 18:05:56 +03:00
void ( * connect_cfm_cb ) ( struct hci_conn * conn , u8 status ) ;
void ( * security_cfm_cb ) ( struct hci_conn * conn , u8 status ) ;
void ( * disconn_cfm_cb ) ( struct hci_conn * conn , u8 reason ) ;
2005-04-17 02:20:36 +04:00
} ;
2011-11-02 17:52:01 +04:00
struct hci_chan {
struct list_head list ;
2012-10-10 18:38:28 +04:00
__u16 handle ;
2011-11-02 17:52:01 +04:00
struct hci_conn * conn ;
struct sk_buff_head data_q ;
unsigned int sent ;
2012-10-24 02:24:13 +04:00
__u8 state ;
2011-11-02 17:52:01 +04:00
} ;
2014-02-03 20:56:18 +04:00
struct hci_conn_params {
struct list_head list ;
2014-07-04 13:37:17 +04:00
struct list_head action ;
2014-02-03 20:56:18 +04:00
bdaddr_t addr ;
u8 addr_type ;
u16 conn_min_interval ;
u16 conn_max_interval ;
2014-06-29 18:43:26 +04:00
u16 conn_latency ;
u16 supervision_timeout ;
2014-02-27 03:21:48 +04:00
enum {
HCI_AUTO_CONN_DISABLED ,
2014-07-02 18:37:27 +04:00
HCI_AUTO_CONN_REPORT ,
2014-07-23 23:55:23 +04:00
HCI_AUTO_CONN_DIRECT ,
2014-02-27 03:21:48 +04:00
HCI_AUTO_CONN_ALWAYS ,
HCI_AUTO_CONN_LINK_LOSS ,
2015-08-07 21:22:51 +03:00
HCI_AUTO_CONN_EXPLICIT ,
2014-02-27 03:21:48 +04:00
} auto_connect ;
2014-08-15 22:06:54 +04:00
struct hci_conn * conn ;
2015-08-07 21:22:51 +03:00
bool explicit_connect ;
2014-02-03 20:56:18 +04:00
} ;
2005-04-17 02:20:36 +04:00
extern struct list_head hci_dev_list ;
extern struct list_head hci_cb_list ;
extern rwlock_t hci_dev_list_lock ;
2015-02-18 15:53:55 +03:00
extern struct mutex hci_cb_list_lock ;
2005-04-17 02:20:36 +04:00
2015-03-13 19:04:17 +03:00
# define hci_dev_set_flag(hdev, nr) set_bit((nr), (hdev)->dev_flags)
# define hci_dev_clear_flag(hdev, nr) clear_bit((nr), (hdev)->dev_flags)
# define hci_dev_change_flag(hdev, nr) change_bit((nr), (hdev)->dev_flags)
# define hci_dev_test_flag(hdev, nr) test_bit((nr), (hdev)->dev_flags)
# define hci_dev_test_and_set_flag(hdev, nr) test_and_set_bit((nr), (hdev)->dev_flags)
# define hci_dev_test_and_clear_flag(hdev, nr) test_and_clear_bit((nr), (hdev)->dev_flags)
# define hci_dev_test_and_change_flag(hdev, nr) test_and_change_bit((nr), (hdev)->dev_flags)
# define hci_dev_clear_volatile_flags(hdev) \
do { \
hci_dev_clear_flag ( hdev , HCI_LE_SCAN ) ; \
hci_dev_clear_flag ( hdev , HCI_LE_ADV ) ; \
hci_dev_clear_flag ( hdev , HCI_PERIODIC_INQ ) ; \
} while ( 0 )
2015-03-13 12:11:04 +03:00
2011-12-21 16:11:33 +04:00
/* ----- HCI interface to upper protocols ----- */
2013-09-23 22:37:36 +04:00
int l2cap_connect_ind ( struct hci_dev * hdev , bdaddr_t * bdaddr ) ;
int l2cap_disconn_ind ( struct hci_conn * hcon ) ;
2015-06-09 12:47:22 +03:00
void l2cap_recv_acldata ( struct hci_conn * hcon , struct sk_buff * skb , u16 flags ) ;
2013-09-23 22:37:36 +04:00
2015-06-09 12:47:23 +03:00
# if IS_ENABLED(CONFIG_BT_BREDR)
2013-09-23 22:37:36 +04:00
int sco_connect_ind ( struct hci_dev * hdev , bdaddr_t * bdaddr , __u8 * flags ) ;
2015-06-09 12:47:22 +03:00
void sco_recv_scodata ( struct hci_conn * hcon , struct sk_buff * skb ) ;
2015-06-09 12:47:23 +03:00
# else
static inline int sco_connect_ind ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
__u8 * flags )
{
return 0 ;
}
static inline void sco_recv_scodata ( struct hci_conn * hcon , struct sk_buff * skb )
{
}
# endif
2011-12-21 16:11:33 +04:00
2005-04-17 02:20:36 +04:00
/* ----- Inquiry cache ----- */
2010-12-01 17:58:25 +03:00
# define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */
# define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */
2005-04-17 02:20:36 +04:00
2012-01-04 16:16:21 +04:00
static inline void discovery_init ( struct hci_dev * hdev )
2005-04-17 02:20:36 +04:00
{
2012-01-04 16:23:45 +04:00
hdev - > discovery . state = DISCOVERY_STOPPED ;
2012-01-04 16:16:21 +04:00
INIT_LIST_HEAD ( & hdev - > discovery . all ) ;
INIT_LIST_HEAD ( & hdev - > discovery . unknown ) ;
INIT_LIST_HEAD ( & hdev - > discovery . resolve ) ;
2014-12-05 15:03:35 +03:00
hdev - > discovery . report_invalid_rssi = true ;
2014-12-05 12:55:55 +03:00
hdev - > discovery . rssi = HCI_RSSI_INVALID ;
2005-04-17 02:20:36 +04:00
}
2014-12-05 13:45:21 +03:00
static inline void hci_discovery_filter_clear ( struct hci_dev * hdev )
{
2015-03-05 03:24:26 +03:00
hdev - > discovery . result_filtering = false ;
2014-12-05 15:03:35 +03:00
hdev - > discovery . report_invalid_rssi = true ;
2014-12-05 13:45:21 +03:00
hdev - > discovery . rssi = HCI_RSSI_INVALID ;
hdev - > discovery . uuid_count = 0 ;
kfree ( hdev - > discovery . uuids ) ;
hdev - > discovery . uuids = NULL ;
2015-02-02 10:07:54 +03:00
hdev - > discovery . scan_start = 0 ;
hdev - > discovery . scan_duration = 0 ;
2014-12-05 13:45:21 +03:00
}
2012-01-04 17:44:20 +04:00
bool hci_discovery_active ( struct hci_dev * hdev ) ;
2012-01-04 16:23:45 +04:00
void hci_discovery_set_state ( struct hci_dev * hdev , int state ) ;
2005-04-17 02:20:36 +04:00
static inline int inquiry_cache_empty ( struct hci_dev * hdev )
{
2012-01-04 16:16:21 +04:00
return list_empty ( & hdev - > discovery . all ) ;
2005-04-17 02:20:36 +04:00
}
static inline long inquiry_cache_age ( struct hci_dev * hdev )
{
2012-01-04 16:16:21 +04:00
struct discovery_state * c = & hdev - > discovery ;
2005-04-17 02:20:36 +04:00
return jiffies - c - > timestamp ;
}
static inline long inquiry_entry_age ( struct inquiry_entry * e )
{
return jiffies - e - > timestamp ;
}
2011-06-07 13:18:06 +04:00
struct inquiry_entry * hci_inquiry_cache_lookup ( struct hci_dev * hdev ,
2012-03-08 08:25:00 +04:00
bdaddr_t * bdaddr ) ;
2012-01-04 15:31:59 +04:00
struct inquiry_entry * hci_inquiry_cache_lookup_unknown ( struct hci_dev * hdev ,
2012-03-08 08:25:00 +04:00
bdaddr_t * bdaddr ) ;
2012-01-04 17:44:20 +04:00
struct inquiry_entry * hci_inquiry_cache_lookup_resolve ( struct hci_dev * hdev ,
2012-03-08 08:25:00 +04:00
bdaddr_t * bdaddr ,
int state ) ;
2012-01-09 02:53:02 +04:00
void hci_inquiry_cache_update_resolve ( struct hci_dev * hdev ,
2012-03-08 08:25:00 +04:00
struct inquiry_entry * ie ) ;
2014-07-01 16:11:20 +04:00
u32 hci_inquiry_cache_update ( struct hci_dev * hdev , struct inquiry_data * data ,
bool name_known ) ;
2013-04-30 22:29:27 +04:00
void hci_inquiry_cache_flush ( struct hci_dev * hdev ) ;
2005-04-17 02:20:36 +04:00
/* ----- HCI Connections ----- */
enum {
HCI_CONN_AUTH_PEND ,
2011-05-31 17:49:25 +04:00
HCI_CONN_REAUTH_PEND ,
2005-04-17 02:20:36 +04:00
HCI_CONN_ENCRYPT_PEND ,
2006-07-03 12:02:33 +04:00
HCI_CONN_RSWITCH_PEND ,
HCI_CONN_MODE_CHANGE_PEND ,
2010-07-26 18:06:00 +04:00
HCI_CONN_SCO_SETUP_PEND ,
2012-01-17 23:48:47 +04:00
HCI_CONN_MGMT_CONNECTED ,
2012-01-16 08:47:28 +04:00
HCI_CONN_SSP_ENABLED ,
2014-01-16 10:37:38 +04:00
HCI_CONN_SC_ENABLED ,
2014-02-01 04:24:28 +04:00
HCI_CONN_AES_CCM ,
2012-01-16 08:47:28 +04:00
HCI_CONN_POWER_SAVE ,
2014-06-24 14:15:53 +04:00
HCI_CONN_FLUSH_KEY ,
2014-06-24 18:03:50 +04:00
HCI_CONN_ENCRYPT ,
HCI_CONN_AUTH ,
HCI_CONN_SECURE ,
HCI_CONN_FIPS ,
2014-07-01 20:14:12 +04:00
HCI_CONN_STK_ENCRYPT ,
2014-07-17 16:35:39 +04:00
HCI_CONN_AUTH_INITIATOR ,
2014-08-18 01:41:44 +04:00
HCI_CONN_DROP ,
2014-10-12 01:44:47 +04:00
HCI_CONN_PARAM_REMOVAL_PEND ,
2014-08-14 13:33:17 +04:00
HCI_CONN_NEW_LINK_KEY ,
2015-08-07 21:22:51 +03:00
HCI_CONN_SCANNING ,
2005-04-17 02:20:36 +04:00
} ;
2012-01-18 23:33:12 +04:00
static inline bool hci_conn_ssp_enabled ( struct hci_conn * conn )
{
struct hci_dev * hdev = conn - > hdev ;
2015-03-13 12:11:00 +03:00
return hci_dev_test_flag ( hdev , HCI_SSP_ENABLED ) & &
2012-05-23 11:04:20 +04:00
test_bit ( HCI_CONN_SSP_ENABLED , & conn - > flags ) ;
2012-01-18 23:33:12 +04:00
}
2014-01-16 10:37:38 +04:00
static inline bool hci_conn_sc_enabled ( struct hci_conn * conn )
{
struct hci_dev * hdev = conn - > hdev ;
2015-03-13 12:11:00 +03:00
return hci_dev_test_flag ( hdev , HCI_SC_ENABLED ) & &
2014-01-16 10:37:38 +04:00
test_bit ( HCI_CONN_SC_ENABLED , & conn - > flags ) ;
}
2005-04-17 02:20:36 +04:00
static inline void hci_conn_hash_add ( struct hci_dev * hdev , struct hci_conn * c )
{
struct hci_conn_hash * h = & hdev - > conn_hash ;
2011-12-15 04:54:12 +04:00
list_add_rcu ( & c - > list , & h - > list ) ;
2011-02-11 04:38:47 +03:00
switch ( c - > type ) {
case ACL_LINK :
2005-04-17 02:20:36 +04:00
h - > acl_num + + ;
2011-02-11 04:38:47 +03:00
break ;
2012-10-10 18:38:30 +04:00
case AMP_LINK :
h - > amp_num + + ;
break ;
2011-02-11 04:38:47 +03:00
case LE_LINK :
h - > le_num + + ;
2014-07-16 12:56:08 +04:00
if ( c - > role = = HCI_ROLE_SLAVE )
h - > le_num_slave + + ;
2011-02-11 04:38:47 +03:00
break ;
case SCO_LINK :
case ESCO_LINK :
2005-04-17 02:20:36 +04:00
h - > sco_num + + ;
2011-02-11 04:38:47 +03:00
break ;
}
2005-04-17 02:20:36 +04:00
}
static inline void hci_conn_hash_del ( struct hci_dev * hdev , struct hci_conn * c )
{
struct hci_conn_hash * h = & hdev - > conn_hash ;
2011-12-15 04:54:12 +04:00
list_del_rcu ( & c - > list ) ;
synchronize_rcu ( ) ;
2011-02-11 04:38:47 +03:00
switch ( c - > type ) {
case ACL_LINK :
2005-04-17 02:20:36 +04:00
h - > acl_num - - ;
2011-02-11 04:38:47 +03:00
break ;
2012-10-10 18:38:30 +04:00
case AMP_LINK :
h - > amp_num - - ;
break ;
2011-02-11 04:38:47 +03:00
case LE_LINK :
h - > le_num - - ;
2014-07-16 12:56:08 +04:00
if ( c - > role = = HCI_ROLE_SLAVE )
h - > le_num_slave - - ;
2011-02-11 04:38:47 +03:00
break ;
case SCO_LINK :
case ESCO_LINK :
2005-04-17 02:20:36 +04:00
h - > sco_num - - ;
2011-02-11 04:38:47 +03:00
break ;
}
2005-04-17 02:20:36 +04:00
}
2011-08-17 17:23:00 +04:00
static inline unsigned int hci_conn_num ( struct hci_dev * hdev , __u8 type )
{
struct hci_conn_hash * h = & hdev - > conn_hash ;
switch ( type ) {
case ACL_LINK :
return h - > acl_num ;
2012-10-10 18:38:30 +04:00
case AMP_LINK :
return h - > amp_num ;
2011-08-17 17:23:00 +04:00
case LE_LINK :
return h - > le_num ;
case SCO_LINK :
case ESCO_LINK :
return h - > sco_num ;
default :
return 0 ;
}
}
2014-02-24 16:52:16 +04:00
static inline unsigned int hci_conn_count ( struct hci_dev * hdev )
{
struct hci_conn_hash * c = & hdev - > conn_hash ;
return c - > acl_num + c - > amp_num + c - > sco_num + c - > le_num ;
}
2014-11-03 07:16:08 +03:00
static inline __u8 hci_conn_lookup_type ( struct hci_dev * hdev , __u16 handle )
{
struct hci_conn_hash * h = & hdev - > conn_hash ;
struct hci_conn * c ;
__u8 type = INVALID_LINK ;
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( c , & h - > list , list ) {
if ( c - > handle = = handle ) {
type = c - > type ;
break ;
}
}
rcu_read_unlock ( ) ;
return type ;
}
2005-04-17 02:20:36 +04:00
static inline struct hci_conn * hci_conn_hash_lookup_handle ( struct hci_dev * hdev ,
2011-02-17 18:42:00 +03:00
__u16 handle )
2005-04-17 02:20:36 +04:00
{
struct hci_conn_hash * h = & hdev - > conn_hash ;
struct hci_conn * c ;
2011-12-15 04:54:12 +04:00
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( c , & h - > list , list ) {
if ( c - > handle = = handle ) {
rcu_read_unlock ( ) ;
2005-04-17 02:20:36 +04:00
return c ;
2011-12-15 04:54:12 +04:00
}
2005-04-17 02:20:36 +04:00
}
2011-12-15 04:54:12 +04:00
rcu_read_unlock ( ) ;
2005-04-17 02:20:36 +04:00
return NULL ;
}
static inline struct hci_conn * hci_conn_hash_lookup_ba ( struct hci_dev * hdev ,
2011-02-17 18:42:00 +03:00
__u8 type , bdaddr_t * ba )
2005-04-17 02:20:36 +04:00
{
struct hci_conn_hash * h = & hdev - > conn_hash ;
struct hci_conn * c ;
2011-12-15 04:54:12 +04:00
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( c , & h - > list , list ) {
if ( c - > type = = type & & ! bacmp ( & c - > dst , ba ) ) {
rcu_read_unlock ( ) ;
2005-04-17 02:20:36 +04:00
return c ;
2011-12-15 04:54:12 +04:00
}
2005-04-17 02:20:36 +04:00
}
2011-12-15 04:54:12 +04:00
rcu_read_unlock ( ) ;
2005-04-17 02:20:36 +04:00
return NULL ;
}
2015-10-21 18:03:00 +03:00
static inline struct hci_conn * hci_conn_hash_lookup_le ( struct hci_dev * hdev ,
bdaddr_t * ba ,
__u8 ba_type )
{
struct hci_conn_hash * h = & hdev - > conn_hash ;
struct hci_conn * c ;
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( c , & h - > list , list ) {
if ( c - > type ! = LE_LINK )
continue ;
if ( ba_type = = c - > dst_type & & ! bacmp ( & c - > dst , ba ) ) {
rcu_read_unlock ( ) ;
return c ;
}
}
rcu_read_unlock ( ) ;
return NULL ;
}
2006-10-15 19:30:56 +04:00
static inline struct hci_conn * hci_conn_hash_lookup_state ( struct hci_dev * hdev ,
2011-02-17 18:42:00 +03:00
__u8 type , __u16 state )
2006-10-15 19:30:56 +04:00
{
struct hci_conn_hash * h = & hdev - > conn_hash ;
struct hci_conn * c ;
2011-12-15 04:54:12 +04:00
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( c , & h - > list , list ) {
if ( c - > type = = type & & c - > state = = state ) {
rcu_read_unlock ( ) ;
2006-10-15 19:30:56 +04:00
return c ;
2011-12-15 04:54:12 +04:00
}
2006-10-15 19:30:56 +04:00
}
2011-11-02 17:52:01 +04:00
2011-12-15 04:54:12 +04:00
rcu_read_unlock ( ) ;
2011-11-02 17:52:01 +04:00
2006-10-15 19:30:56 +04:00
return NULL ;
2011-11-02 17:52:01 +04:00
}
2015-08-07 21:22:52 +03:00
static inline struct hci_conn * hci_lookup_le_connect ( struct hci_dev * hdev )
{
struct hci_conn_hash * h = & hdev - > conn_hash ;
struct hci_conn * c ;
rcu_read_lock ( ) ;
list_for_each_entry_rcu ( c , & h - > list , list ) {
if ( c - > type = = LE_LINK & & c - > state = = BT_CONNECT & &
! test_bit ( HCI_CONN_SCANNING , & c - > flags ) ) {
rcu_read_unlock ( ) ;
return c ;
}
}
rcu_read_unlock ( ) ;
return NULL ;
}
2014-08-18 21:33:32 +04:00
int hci_disconnect ( struct hci_conn * conn , __u8 reason ) ;
2013-08-19 16:24:03 +04:00
bool hci_setup_sync ( struct hci_conn * conn , __u16 handle ) ;
2010-07-26 18:06:00 +04:00
void hci_sco_setup ( struct hci_conn * conn , __u8 status ) ;
2005-04-17 02:20:36 +04:00
2014-07-16 12:56:07 +04:00
struct hci_conn * hci_conn_add ( struct hci_dev * hdev , int type , bdaddr_t * dst ,
u8 role ) ;
2007-10-20 15:33:56 +04:00
int hci_conn_del ( struct hci_conn * conn ) ;
void hci_conn_hash_flush ( struct hci_dev * hdev ) ;
void hci_conn_check_pending ( struct hci_dev * hdev ) ;
2005-04-17 02:20:36 +04:00
2011-11-02 17:52:01 +04:00
struct hci_chan * hci_chan_create ( struct hci_conn * conn ) ;
2012-09-06 16:05:43 +04:00
void hci_chan_del ( struct hci_chan * chan ) ;
2011-12-14 19:02:51 +04:00
void hci_chan_list_flush ( struct hci_conn * conn ) ;
2012-10-10 18:38:28 +04:00
struct hci_chan * hci_chan_lookup_handle ( struct hci_dev * hdev , __u16 handle ) ;
2011-11-02 17:52:01 +04:00
2015-08-07 21:22:53 +03:00
struct hci_conn * hci_connect_le_scan ( struct hci_dev * hdev , bdaddr_t * dst ,
u8 dst_type , u8 sec_level ,
2015-11-11 15:44:57 +03:00
u16 conn_timeout ) ;
2014-02-27 03:21:44 +04:00
struct hci_conn * hci_connect_le ( struct hci_dev * hdev , bdaddr_t * dst ,
2014-07-07 16:02:28 +04:00
u8 dst_type , u8 sec_level , u16 conn_timeout ,
2014-07-16 12:42:28 +04:00
u8 role ) ;
2014-02-27 03:21:44 +04:00
struct hci_conn * hci_connect_acl ( struct hci_dev * hdev , bdaddr_t * dst ,
u8 sec_level , u8 auth_type ) ;
2013-08-19 16:23:59 +04:00
struct hci_conn * hci_connect_sco ( struct hci_dev * hdev , int type , bdaddr_t * dst ,
__u16 setting ) ;
2008-09-09 09:19:20 +04:00
int hci_conn_check_link_mode ( struct hci_conn * conn ) ;
2011-05-06 11:42:31 +04:00
int hci_conn_check_secure ( struct hci_conn * conn , __u8 sec_level ) ;
2014-07-17 16:35:38 +04:00
int hci_conn_security ( struct hci_conn * conn , __u8 sec_level , __u8 auth_type ,
bool initiator ) ;
2009-01-15 23:58:04 +03:00
int hci_conn_switch_role ( struct hci_conn * conn , __u8 role ) ;
2005-04-17 02:20:36 +04:00
2011-05-24 05:06:04 +04:00
void hci_conn_enter_active_mode ( struct hci_conn * conn , __u8 force_active ) ;
2005-04-17 02:20:36 +04:00
2014-02-27 03:21:41 +04:00
void hci_le_conn_failed ( struct hci_conn * conn , u8 status ) ;
Bluetooth: introduce hci_conn ref-counting
We currently do not allow using hci_conn from outside of HCI-core.
However, several other users could make great use of it. This includes
HIDP, rfcomm and all other sub-protocols that rely on an active
connection.
Hence, we now introduce hci_conn ref-counting. We currently never call
get_device(). put_device() is exclusively used in hci_conn_del_sysfs().
Hence, we currently never have a greater device-refcnt than 1.
Therefore, it is safe to move the put_device() call from
hci_conn_del_sysfs() to hci_conn_del() (it's the only caller). In fact,
this even fixes a "use-after-free" bug as we access hci_conn after calling
hci_conn_del_sysfs() in hci_conn_del().
From now on we can add references to hci_conn objects in other layers
(like l2cap_sock, HIDP, rfcomm, ...) and grab a reference via
hci_conn_get(). This does _not_ guarantee, that the connection is still
alive. But, this isn't what we want. We can simply lock the hci_conn
device and use "device_is_registered(hci_conn->dev)" to test that.
However, this is hardly necessary as outside users should never rely on
the HCI connection to be alive, anyway. Instead, they should solely rely
on the device-object to be available.
But if sub-devices want the hci_conn object as sysfs parent, they need to
be notified when the connection drops. This will be introduced in later
patches with l2cap_users.
Signed-off-by: David Herrmann <dh.herrmann@gmail.com>
Acked-by: Marcel Holtmann <marcel@holtmann.org>
Signed-off-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
2013-04-06 22:28:39 +04:00
/*
* hci_conn_get ( ) and hci_conn_put ( ) are used to control the life - time of an
* " hci_conn " object . They do not guarantee that the hci_conn object is running ,
* working or anything else . They just guarantee that the object is available
* and can be dereferenced . So you can use its locks , local variables and any
* other constant data .
* Before accessing runtime data , you _must_ lock the object and then check that
* it is still running . As soon as you release the locks , the connection might
* get dropped , though .
*
* On the other hand , hci_conn_hold ( ) and hci_conn_drop ( ) are used to control
* how long the underlying connection is held . So every channel that runs on the
* hci_conn object calls this to prevent the connection from disappearing . As
* long as you hold a device , you must also guarantee that you have a valid
* reference to the device via hci_conn_get ( ) ( or the initial reference from
* hci_conn_add ( ) ) .
* The hold ( ) / drop ( ) ref - count is known to drop below 0 sometimes , which doesn ' t
* break because nobody cares for that . But this means , we cannot use
* _get ( ) / _drop ( ) in it , but require the caller to have a valid ref ( FIXME ) .
*/
2014-08-15 22:06:57 +04:00
static inline struct hci_conn * hci_conn_get ( struct hci_conn * conn )
Bluetooth: introduce hci_conn ref-counting
We currently do not allow using hci_conn from outside of HCI-core.
However, several other users could make great use of it. This includes
HIDP, rfcomm and all other sub-protocols that rely on an active
connection.
Hence, we now introduce hci_conn ref-counting. We currently never call
get_device(). put_device() is exclusively used in hci_conn_del_sysfs().
Hence, we currently never have a greater device-refcnt than 1.
Therefore, it is safe to move the put_device() call from
hci_conn_del_sysfs() to hci_conn_del() (it's the only caller). In fact,
this even fixes a "use-after-free" bug as we access hci_conn after calling
hci_conn_del_sysfs() in hci_conn_del().
From now on we can add references to hci_conn objects in other layers
(like l2cap_sock, HIDP, rfcomm, ...) and grab a reference via
hci_conn_get(). This does _not_ guarantee, that the connection is still
alive. But, this isn't what we want. We can simply lock the hci_conn
device and use "device_is_registered(hci_conn->dev)" to test that.
However, this is hardly necessary as outside users should never rely on
the HCI connection to be alive, anyway. Instead, they should solely rely
on the device-object to be available.
But if sub-devices want the hci_conn object as sysfs parent, they need to
be notified when the connection drops. This will be introduced in later
patches with l2cap_users.
Signed-off-by: David Herrmann <dh.herrmann@gmail.com>
Acked-by: Marcel Holtmann <marcel@holtmann.org>
Signed-off-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
2013-04-06 22:28:39 +04:00
{
get_device ( & conn - > dev ) ;
2014-08-15 22:06:57 +04:00
return conn ;
Bluetooth: introduce hci_conn ref-counting
We currently do not allow using hci_conn from outside of HCI-core.
However, several other users could make great use of it. This includes
HIDP, rfcomm and all other sub-protocols that rely on an active
connection.
Hence, we now introduce hci_conn ref-counting. We currently never call
get_device(). put_device() is exclusively used in hci_conn_del_sysfs().
Hence, we currently never have a greater device-refcnt than 1.
Therefore, it is safe to move the put_device() call from
hci_conn_del_sysfs() to hci_conn_del() (it's the only caller). In fact,
this even fixes a "use-after-free" bug as we access hci_conn after calling
hci_conn_del_sysfs() in hci_conn_del().
From now on we can add references to hci_conn objects in other layers
(like l2cap_sock, HIDP, rfcomm, ...) and grab a reference via
hci_conn_get(). This does _not_ guarantee, that the connection is still
alive. But, this isn't what we want. We can simply lock the hci_conn
device and use "device_is_registered(hci_conn->dev)" to test that.
However, this is hardly necessary as outside users should never rely on
the HCI connection to be alive, anyway. Instead, they should solely rely
on the device-object to be available.
But if sub-devices want the hci_conn object as sysfs parent, they need to
be notified when the connection drops. This will be introduced in later
patches with l2cap_users.
Signed-off-by: David Herrmann <dh.herrmann@gmail.com>
Acked-by: Marcel Holtmann <marcel@holtmann.org>
Signed-off-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
2013-04-06 22:28:39 +04:00
}
static inline void hci_conn_put ( struct hci_conn * conn )
{
put_device ( & conn - > dev ) ;
}
2005-04-17 02:20:36 +04:00
static inline void hci_conn_hold ( struct hci_conn * conn )
{
2012-07-19 18:03:42 +04:00
BT_DBG ( " hcon %p orig refcnt %d " , conn , atomic_read ( & conn - > refcnt ) ) ;
2012-06-15 12:50:28 +04:00
2005-04-17 02:20:36 +04:00
atomic_inc ( & conn - > refcnt ) ;
2012-01-28 02:42:02 +04:00
cancel_delayed_work ( & conn - > disc_work ) ;
2005-04-17 02:20:36 +04:00
}
2013-04-06 22:28:37 +04:00
static inline void hci_conn_drop ( struct hci_conn * conn )
2005-04-17 02:20:36 +04:00
{
2012-07-19 18:03:42 +04:00
BT_DBG ( " hcon %p orig refcnt %d " , conn , atomic_read ( & conn - > refcnt ) ) ;
2012-06-15 12:50:28 +04:00
2005-04-17 02:20:36 +04:00
if ( atomic_dec_and_test ( & conn - > refcnt ) ) {
2006-07-03 12:02:33 +04:00
unsigned long timeo ;
2012-10-10 18:38:31 +04:00
switch ( conn - > type ) {
case ACL_LINK :
case LE_LINK :
2013-10-16 19:11:40 +04:00
cancel_delayed_work ( & conn - > idle_work ) ;
2006-09-26 11:43:48 +04:00
if ( conn - > state = = BT_CONNECTED ) {
2012-06-11 12:13:07 +04:00
timeo = conn - > disc_timeout ;
2006-09-26 11:43:48 +04:00
if ( ! conn - > out )
2009-04-26 22:01:22 +04:00
timeo * = 2 ;
2011-06-07 13:18:06 +04:00
} else {
2014-08-18 01:41:41 +04:00
timeo = 0 ;
2011-06-07 13:18:06 +04:00
}
2012-10-10 18:38:31 +04:00
break ;
case AMP_LINK :
timeo = conn - > disc_timeout ;
break ;
default :
2014-08-18 01:41:41 +04:00
timeo = 0 ;
2012-10-10 18:38:31 +04:00
break ;
2011-06-07 13:18:06 +04:00
}
2012-10-10 18:38:31 +04:00
2012-01-28 02:42:02 +04:00
cancel_delayed_work ( & conn - > disc_work ) ;
2011-06-17 20:03:21 +04:00
queue_delayed_work ( conn - > hdev - > workqueue ,
2012-10-10 18:38:31 +04:00
& conn - > disc_work , timeo ) ;
2005-04-17 02:20:36 +04:00
}
}
/* ----- HCI Devices ----- */
2012-01-07 18:47:24 +04:00
static inline void hci_dev_put ( struct hci_dev * d )
2005-04-17 02:20:36 +04:00
{
2012-09-06 16:05:45 +04:00
BT_DBG ( " %s orig refcnt %d " , d - > name ,
atomic_read ( & d - > dev . kobj . kref . refcount ) ) ;
2012-01-07 18:47:23 +04:00
put_device ( & d - > dev ) ;
2005-04-17 02:20:36 +04:00
}
2012-01-07 18:47:24 +04:00
static inline struct hci_dev * hci_dev_hold ( struct hci_dev * d )
2005-04-17 02:20:36 +04:00
{
2012-09-06 16:05:45 +04:00
BT_DBG ( " %s orig refcnt %d " , d - > name ,
atomic_read ( & d - > dev . kobj . kref . refcount ) ) ;
2012-01-07 18:47:23 +04:00
get_device ( & d - > dev ) ;
2005-04-17 02:20:36 +04:00
return d ;
}
2011-06-17 20:03:21 +04:00
# define hci_dev_lock(d) mutex_lock(&d->lock)
# define hci_dev_unlock(d) mutex_unlock(&d->lock)
2005-04-17 02:20:36 +04:00
2012-02-10 00:58:30 +04:00
# define to_hci_dev(d) container_of(d, struct hci_dev, dev)
2012-02-10 00:58:33 +04:00
# define to_hci_conn(c) container_of(c, struct hci_conn, dev)
2012-02-10 00:58:30 +04:00
2012-02-10 00:58:32 +04:00
static inline void * hci_get_drvdata ( struct hci_dev * hdev )
{
return dev_get_drvdata ( & hdev - > dev ) ;
}
static inline void hci_set_drvdata ( struct hci_dev * hdev , void * data )
{
dev_set_drvdata ( & hdev - > dev , data ) ;
}
2005-04-17 02:20:36 +04:00
struct hci_dev * hci_dev_get ( int index ) ;
2012-11-01 15:27:26 +04:00
struct hci_dev * hci_get_route ( bdaddr_t * dst , bdaddr_t * src ) ;
2005-04-17 02:20:36 +04:00
struct hci_dev * hci_alloc_dev ( void ) ;
void hci_free_dev ( struct hci_dev * hdev ) ;
int hci_register_dev ( struct hci_dev * hdev ) ;
2011-10-26 12:43:19 +04:00
void hci_unregister_dev ( struct hci_dev * hdev ) ;
2005-04-17 02:20:36 +04:00
int hci_suspend_dev ( struct hci_dev * hdev ) ;
int hci_resume_dev ( struct hci_dev * hdev ) ;
2014-11-02 10:15:38 +03:00
int hci_reset_dev ( struct hci_dev * hdev ) ;
2005-04-17 02:20:36 +04:00
int hci_dev_open ( __u16 dev ) ;
int hci_dev_close ( __u16 dev ) ;
2015-09-02 13:10:12 +03:00
int hci_dev_do_close ( struct hci_dev * hdev ) ;
2005-04-17 02:20:36 +04:00
int hci_dev_reset ( __u16 dev ) ;
int hci_dev_reset_stat ( __u16 dev ) ;
int hci_dev_cmd ( unsigned int cmd , void __user * arg ) ;
int hci_get_dev_list ( void __user * arg ) ;
int hci_get_dev_info ( void __user * arg ) ;
int hci_get_conn_list ( void __user * arg ) ;
int hci_get_conn_info ( struct hci_dev * hdev , void __user * arg ) ;
2008-07-14 22:13:50 +04:00
int hci_get_auth_info ( struct hci_dev * hdev , void __user * arg ) ;
2005-04-17 02:20:36 +04:00
int hci_inquiry ( void __user * arg ) ;
2014-07-09 13:59:13 +04:00
struct bdaddr_list * hci_bdaddr_list_lookup ( struct list_head * list ,
bdaddr_t * bdaddr , u8 type ) ;
int hci_bdaddr_list_add ( struct list_head * list , bdaddr_t * bdaddr , u8 type ) ;
int hci_bdaddr_list_del ( struct list_head * list , bdaddr_t * bdaddr , u8 type ) ;
void hci_bdaddr_list_clear ( struct list_head * list ) ;
2014-02-28 08:37:30 +04:00
2014-02-03 20:56:18 +04:00
struct hci_conn_params * hci_conn_params_lookup ( struct hci_dev * hdev ,
bdaddr_t * addr , u8 addr_type ) ;
2014-07-01 14:11:04 +04:00
struct hci_conn_params * hci_conn_params_add ( struct hci_dev * hdev ,
bdaddr_t * addr , u8 addr_type ) ;
2014-02-03 20:56:18 +04:00
void hci_conn_params_del ( struct hci_dev * hdev , bdaddr_t * addr , u8 addr_type ) ;
2014-07-02 18:37:26 +04:00
void hci_conn_params_clear_disabled ( struct hci_dev * hdev ) ;
2014-02-03 20:56:18 +04:00
2014-07-04 13:37:26 +04:00
struct hci_conn_params * hci_pend_le_action_lookup ( struct list_head * list ,
bdaddr_t * addr ,
u8 addr_type ) ;
2014-02-27 03:21:46 +04:00
2014-02-18 19:14:32 +04:00
void hci_uuids_clear ( struct hci_dev * hdev ) ;
2011-01-04 13:08:51 +03:00
2014-02-18 19:14:32 +04:00
void hci_link_keys_clear ( struct hci_dev * hdev ) ;
2011-01-17 15:41:05 +03:00
struct link_key * hci_find_link_key ( struct hci_dev * hdev , bdaddr_t * bdaddr ) ;
2014-06-24 14:15:48 +04:00
struct link_key * hci_add_link_key ( struct hci_dev * hdev , struct hci_conn * conn ,
2014-06-24 14:15:49 +04:00
bdaddr_t * bdaddr , u8 * val , u8 type ,
u8 pin_len , bool * persistent ) ;
2014-02-19 16:57:44 +04:00
struct smp_ltk * hci_add_ltk ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
2014-02-19 16:57:47 +04:00
u8 addr_type , u8 type , u8 authenticated ,
2014-02-28 04:00:28 +04:00
u8 tk [ 16 ] , u8 enc_size , __le16 ediv , __le64 rand ) ;
2014-05-29 16:02:59 +04:00
struct smp_ltk * hci_find_ltk ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
u8 addr_type , u8 role ) ;
2014-02-18 19:14:31 +04:00
int hci_remove_ltk ( struct hci_dev * hdev , bdaddr_t * bdaddr , u8 bdaddr_type ) ;
2014-02-18 19:14:32 +04:00
void hci_smp_ltks_clear ( struct hci_dev * hdev ) ;
2011-01-17 15:41:05 +03:00
int hci_remove_link_key ( struct hci_dev * hdev , bdaddr_t * bdaddr ) ;
2014-02-18 12:19:33 +04:00
struct smp_irk * hci_find_irk_by_rpa ( struct hci_dev * hdev , bdaddr_t * rpa ) ;
struct smp_irk * hci_find_irk_by_addr ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
u8 addr_type ) ;
2014-02-19 16:57:44 +04:00
struct smp_irk * hci_add_irk ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
u8 addr_type , u8 val [ 16 ] , bdaddr_t * rpa ) ;
2014-02-18 19:14:35 +04:00
void hci_remove_irk ( struct hci_dev * hdev , bdaddr_t * bdaddr , u8 addr_type ) ;
2014-02-18 12:19:33 +04:00
void hci_smp_irks_clear ( struct hci_dev * hdev ) ;
2015-03-10 23:34:40 +03:00
bool hci_bdaddr_is_paired ( struct hci_dev * hdev , bdaddr_t * bdaddr , u8 type ) ;
2014-02-18 19:14:32 +04:00
void hci_remote_oob_data_clear ( struct hci_dev * hdev ) ;
2011-03-22 15:12:22 +03:00
struct oob_data * hci_find_remote_oob_data ( struct hci_dev * hdev ,
2014-10-26 22:46:09 +03:00
bdaddr_t * bdaddr , u8 bdaddr_type ) ;
2014-01-10 14:07:29 +04:00
int hci_add_remote_oob_data ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
2014-10-26 22:46:09 +03:00
u8 bdaddr_type , u8 * hash192 , u8 * rand192 ,
2014-10-26 22:33:47 +03:00
u8 * hash256 , u8 * rand256 ) ;
2014-10-26 22:46:09 +03:00
int hci_remove_remote_oob_data ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
u8 bdaddr_type ) ;
2011-03-22 15:12:22 +03:00
2015-06-18 04:16:34 +03:00
void hci_adv_instances_clear ( struct hci_dev * hdev ) ;
struct adv_info * hci_find_adv_instance ( struct hci_dev * hdev , u8 instance ) ;
struct adv_info * hci_get_next_instance ( struct hci_dev * hdev , u8 instance ) ;
int hci_add_adv_instance ( struct hci_dev * hdev , u8 instance , u32 flags ,
u16 adv_data_len , u8 * adv_data ,
u16 scan_rsp_len , u8 * scan_rsp_data ,
u16 timeout , u16 duration ) ;
int hci_remove_adv_instance ( struct hci_dev * hdev , u8 instance ) ;
2005-04-17 02:20:36 +04:00
void hci_event_packet ( struct hci_dev * hdev , struct sk_buff * skb ) ;
2013-10-11 03:52:43 +04:00
int hci_recv_frame ( struct hci_dev * hdev , struct sk_buff * skb ) ;
2015-10-07 17:38:35 +03:00
int hci_recv_diag ( struct hci_dev * hdev , struct sk_buff * skb ) ;
2007-07-11 08:42:04 +04:00
2011-10-08 16:58:47 +04:00
void hci_init_sysfs ( struct hci_dev * hdev ) ;
2009-05-03 05:24:06 +04:00
void hci_conn_init_sysfs ( struct hci_conn * conn ) ;
2006-07-06 14:38:46 +04:00
void hci_conn_add_sysfs ( struct hci_conn * conn ) ;
void hci_conn_del_sysfs ( struct hci_conn * conn ) ;
2005-04-17 02:20:36 +04:00
2012-03-09 18:53:42 +04:00
# define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev))
2005-04-17 02:20:36 +04:00
/* ----- LMP capabilities ----- */
2013-04-17 16:00:51 +04:00
# define lmp_encrypt_capable(dev) ((dev)->features[0][0] & LMP_ENCRYPT)
# define lmp_rswitch_capable(dev) ((dev)->features[0][0] & LMP_RSWITCH)
# define lmp_hold_capable(dev) ((dev)->features[0][0] & LMP_HOLD)
# define lmp_sniff_capable(dev) ((dev)->features[0][0] & LMP_SNIFF)
# define lmp_park_capable(dev) ((dev)->features[0][1] & LMP_PARK)
# define lmp_inq_rssi_capable(dev) ((dev)->features[0][3] & LMP_RSSI_INQ)
# define lmp_esco_capable(dev) ((dev)->features[0][3] & LMP_ESCO)
# define lmp_bredr_capable(dev) (!((dev)->features[0][4] & LMP_NO_BREDR))
# define lmp_le_capable(dev) ((dev)->features[0][4] & LMP_LE)
# define lmp_sniffsubr_capable(dev) ((dev)->features[0][5] & LMP_SNIFF_SUBR)
# define lmp_pause_enc_capable(dev) ((dev)->features[0][5] & LMP_PAUSE_ENC)
# define lmp_ext_inq_capable(dev) ((dev)->features[0][6] & LMP_EXT_INQ)
# define lmp_le_br_capable(dev) (!!((dev)->features[0][6] & LMP_SIMUL_LE_BR))
# define lmp_ssp_capable(dev) ((dev)->features[0][6] & LMP_SIMPLE_PAIR)
# define lmp_no_flush_capable(dev) ((dev)->features[0][6] & LMP_NO_FLUSH)
# define lmp_lsto_capable(dev) ((dev)->features[0][7] & LMP_LSTO)
# define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR)
# define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES)
2013-08-19 16:24:00 +04:00
# define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT)
2005-04-17 02:20:36 +04:00
2011-07-01 02:20:55 +04:00
/* ----- Extended LMP capabilities ----- */
2013-12-08 23:55:33 +04:00
# define lmp_csb_master_capable(dev) ((dev)->features[2][0] & LMP_CSB_MASTER)
# define lmp_csb_slave_capable(dev) ((dev)->features[2][0] & LMP_CSB_SLAVE)
# define lmp_sync_train_capable(dev) ((dev)->features[2][0] & LMP_SYNC_TRAIN)
# define lmp_sync_scan_capable(dev) ((dev)->features[2][0] & LMP_SYNC_SCAN)
2014-01-10 14:07:16 +04:00
# define lmp_sc_capable(dev) ((dev)->features[2][1] & LMP_SC)
# define lmp_ping_capable(dev) ((dev)->features[2][1] & LMP_PING)
2013-12-08 23:55:33 +04:00
/* ----- Host capabilities ----- */
2013-04-17 16:00:51 +04:00
# define lmp_host_ssp_capable(dev) ((dev)->features[1][0] & LMP_HOST_SSP)
2014-01-10 14:07:16 +04:00
# define lmp_host_sc_capable(dev) ((dev)->features[1][0] & LMP_HOST_SC)
2013-04-17 16:00:51 +04:00
# define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE))
# define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR))
2011-07-01 02:20:55 +04:00
2015-03-13 12:11:00 +03:00
# define hdev_is_powered(dev) (test_bit(HCI_UP, &(dev)->flags) && \
! hci_dev_test_flag ( dev , HCI_AUTO_OFF ) )
# define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
hci_dev_test_flag ( dev , HCI_SC_ENABLED ) )
2014-08-01 12:13:31 +04:00
2005-04-17 02:20:36 +04:00
/* ----- HCI protocols ----- */
2012-11-21 13:51:12 +04:00
# define HCI_PROTO_DEFER 0x01
2011-06-07 13:18:06 +04:00
static inline int hci_proto_connect_ind ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
2012-11-21 13:51:12 +04:00
__u8 type , __u8 * flags )
2005-04-17 02:20:36 +04:00
{
2011-12-21 16:11:33 +04:00
switch ( type ) {
case ACL_LINK :
return l2cap_connect_ind ( hdev , bdaddr ) ;
2005-04-17 02:20:36 +04:00
2011-12-21 16:11:33 +04:00
case SCO_LINK :
case ESCO_LINK :
2012-11-21 13:51:12 +04:00
return sco_connect_ind ( hdev , bdaddr , flags ) ;
2005-04-17 02:20:36 +04:00
2011-12-21 16:11:33 +04:00
default :
BT_ERR ( " unknown link type %d " , type ) ;
return - EINVAL ;
}
2005-04-17 02:20:36 +04:00
}
2009-02-12 16:02:50 +03:00
static inline int hci_proto_disconn_ind ( struct hci_conn * conn )
2005-04-17 02:20:36 +04:00
{
2011-12-21 16:11:33 +04:00
if ( conn - > type ! = ACL_LINK & & conn - > type ! = LE_LINK )
return HCI_ERROR_REMOTE_USER_TERM ;
2005-04-17 02:20:36 +04:00
2011-12-21 16:11:33 +04:00
return l2cap_disconn_ind ( conn ) ;
2009-02-12 16:02:50 +03:00
}
2005-04-17 02:20:36 +04:00
/* ----- HCI callbacks ----- */
struct hci_cb {
struct list_head list ;
char * name ;
2015-02-18 15:53:57 +03:00
void ( * connect_cfm ) ( struct hci_conn * conn , __u8 status ) ;
2015-02-18 15:53:58 +03:00
void ( * disconn_cfm ) ( struct hci_conn * conn , __u8 status ) ;
2011-06-07 13:18:06 +04:00
void ( * security_cfm ) ( struct hci_conn * conn , __u8 status ,
__u8 encrypt ) ;
2005-04-17 02:20:36 +04:00
void ( * key_change_cfm ) ( struct hci_conn * conn , __u8 status ) ;
void ( * role_switch_cfm ) ( struct hci_conn * conn , __u8 status , __u8 role ) ;
} ;
2015-02-18 15:53:57 +03:00
static inline void hci_connect_cfm ( struct hci_conn * conn , __u8 status )
{
struct hci_cb * cb ;
mutex_lock ( & hci_cb_list_lock ) ;
list_for_each_entry ( cb , & hci_cb_list , list ) {
if ( cb - > connect_cfm )
cb - > connect_cfm ( conn , status ) ;
}
mutex_unlock ( & hci_cb_list_lock ) ;
if ( conn - > connect_cfm_cb )
conn - > connect_cfm_cb ( conn , status ) ;
}
2015-02-18 15:53:58 +03:00
static inline void hci_disconn_cfm ( struct hci_conn * conn , __u8 reason )
{
struct hci_cb * cb ;
mutex_lock ( & hci_cb_list_lock ) ;
list_for_each_entry ( cb , & hci_cb_list , list ) {
if ( cb - > disconn_cfm )
cb - > disconn_cfm ( conn , reason ) ;
}
mutex_unlock ( & hci_cb_list_lock ) ;
if ( conn - > disconn_cfm_cb )
conn - > disconn_cfm_cb ( conn , reason ) ;
}
2005-04-17 02:20:36 +04:00
static inline void hci_auth_cfm ( struct hci_conn * conn , __u8 status )
{
2012-10-22 17:22:01 +04:00
struct hci_cb * cb ;
2009-01-15 23:58:04 +03:00
__u8 encrypt ;
2005-04-17 02:20:36 +04:00
2012-01-16 08:10:31 +04:00
if ( test_bit ( HCI_CONN_ENCRYPT_PEND , & conn - > flags ) )
2009-01-15 23:58:04 +03:00
return ;
2014-06-24 18:03:50 +04:00
encrypt = test_bit ( HCI_CONN_ENCRYPT , & conn - > flags ) ? 0x01 : 0x00 ;
2009-01-15 23:58:04 +03:00
2015-02-18 15:53:55 +03:00
mutex_lock ( & hci_cb_list_lock ) ;
2012-10-22 17:22:01 +04:00
list_for_each_entry ( cb , & hci_cb_list , list ) {
2009-01-15 23:58:04 +03:00
if ( cb - > security_cfm )
cb - > security_cfm ( conn , status , encrypt ) ;
2005-04-17 02:20:36 +04:00
}
2015-02-18 15:53:55 +03:00
mutex_unlock ( & hci_cb_list_lock ) ;
2015-02-18 15:53:56 +03:00
if ( conn - > security_cfm_cb )
conn - > security_cfm_cb ( conn , status ) ;
2005-04-17 02:20:36 +04:00
}
2011-06-07 13:18:06 +04:00
static inline void hci_encrypt_cfm ( struct hci_conn * conn , __u8 status ,
__u8 encrypt )
2005-04-17 02:20:36 +04:00
{
2012-10-22 17:22:01 +04:00
struct hci_cb * cb ;
2005-04-17 02:20:36 +04:00
2009-02-09 05:55:28 +03:00
if ( conn - > sec_level = = BT_SECURITY_SDP )
conn - > sec_level = BT_SECURITY_LOW ;
2011-06-10 01:50:51 +04:00
if ( conn - > pending_sec_level > conn - > sec_level )
conn - > sec_level = conn - > pending_sec_level ;
2015-02-18 15:53:55 +03:00
mutex_lock ( & hci_cb_list_lock ) ;
2012-10-22 17:22:01 +04:00
list_for_each_entry ( cb , & hci_cb_list , list ) {
2009-01-15 23:58:04 +03:00
if ( cb - > security_cfm )
cb - > security_cfm ( conn , status , encrypt ) ;
2005-04-17 02:20:36 +04:00
}
2015-02-18 15:53:55 +03:00
mutex_unlock ( & hci_cb_list_lock ) ;
2015-02-18 15:53:56 +03:00
if ( conn - > security_cfm_cb )
conn - > security_cfm_cb ( conn , status ) ;
2005-04-17 02:20:36 +04:00
}
static inline void hci_key_change_cfm ( struct hci_conn * conn , __u8 status )
{
2012-10-22 17:22:01 +04:00
struct hci_cb * cb ;
2005-04-17 02:20:36 +04:00
2015-02-18 15:53:55 +03:00
mutex_lock ( & hci_cb_list_lock ) ;
2012-10-22 17:22:01 +04:00
list_for_each_entry ( cb , & hci_cb_list , list ) {
2005-04-17 02:20:36 +04:00
if ( cb - > key_change_cfm )
cb - > key_change_cfm ( conn , status ) ;
}
2015-02-18 15:53:55 +03:00
mutex_unlock ( & hci_cb_list_lock ) ;
2005-04-17 02:20:36 +04:00
}
2011-06-07 13:18:06 +04:00
static inline void hci_role_switch_cfm ( struct hci_conn * conn , __u8 status ,
__u8 role )
2005-04-17 02:20:36 +04:00
{
2012-10-22 17:22:01 +04:00
struct hci_cb * cb ;
2005-04-17 02:20:36 +04:00
2015-02-18 15:53:55 +03:00
mutex_lock ( & hci_cb_list_lock ) ;
2012-10-22 17:22:01 +04:00
list_for_each_entry ( cb , & hci_cb_list , list ) {
2005-04-17 02:20:36 +04:00
if ( cb - > role_switch_cfm )
cb - > role_switch_cfm ( conn , status , role ) ;
}
2015-02-18 15:53:55 +03:00
mutex_unlock ( & hci_cb_list_lock ) ;
2005-04-17 02:20:36 +04:00
}
2016-01-05 14:19:31 +03:00
static inline void * eir_get_data ( u8 * eir , size_t eir_len , u8 type ,
size_t * data_len )
2012-01-15 22:51:14 +04:00
{
2012-03-26 15:21:41 +04:00
size_t parsed = 0 ;
2012-01-15 22:51:14 +04:00
2016-01-05 14:19:31 +03:00
if ( eir_len < 2 )
return NULL ;
2012-03-26 15:21:42 +04:00
2016-01-05 14:19:31 +03:00
while ( parsed < eir_len - 1 ) {
u8 field_len = eir [ 0 ] ;
2012-01-15 22:51:14 +04:00
if ( field_len = = 0 )
break ;
parsed + = field_len + 1 ;
2016-01-05 14:19:31 +03:00
if ( parsed > eir_len )
2012-01-15 22:51:14 +04:00
break ;
2016-01-05 14:19:31 +03:00
if ( eir [ 1 ] ! = type ) {
eir + = field_len + 1 ;
continue ;
}
/* Zero length data */
if ( field_len = = 1 )
return NULL ;
2012-01-15 22:51:14 +04:00
2016-01-05 14:19:31 +03:00
if ( data_len )
* data_len = field_len - 1 ;
return & eir [ 2 ] ;
2012-01-15 22:51:14 +04:00
}
2016-01-05 14:19:31 +03:00
return NULL ;
2012-01-15 22:51:14 +04:00
}
2014-02-18 12:19:34 +04:00
static inline bool hci_bdaddr_is_rpa ( bdaddr_t * bdaddr , u8 addr_type )
{
2014-06-25 23:44:45 +04:00
if ( addr_type ! = ADDR_LE_DEV_RANDOM )
2014-02-18 12:19:34 +04:00
return false ;
if ( ( bdaddr - > b [ 5 ] & 0xc0 ) = = 0x40 )
return true ;
return false ;
}
2014-07-02 18:37:33 +04:00
static inline bool hci_is_identity_address ( bdaddr_t * addr , u8 addr_type )
{
if ( addr_type = = ADDR_LE_DEV_PUBLIC )
return true ;
/* Check for Random Static address type */
if ( ( addr - > b [ 5 ] & 0xc0 ) = = 0xc0 )
return true ;
return false ;
}
2014-02-18 19:14:36 +04:00
static inline struct smp_irk * hci_get_irk ( struct hci_dev * hdev ,
bdaddr_t * bdaddr , u8 addr_type )
{
if ( ! hci_bdaddr_is_rpa ( bdaddr , addr_type ) )
return NULL ;
return hci_find_irk_by_rpa ( hdev , bdaddr ) ;
}
2014-06-26 04:52:52 +04:00
static inline int hci_check_conn_params ( u16 min , u16 max , u16 latency ,
u16 to_multiplier )
{
u16 max_latency ;
if ( min > max | | min < 6 | | max > 3200 )
return - EINVAL ;
if ( to_multiplier < 10 | | to_multiplier > 3200 )
return - EINVAL ;
if ( max > = to_multiplier * 8 )
return - EINVAL ;
2015-07-13 11:28:13 +03:00
max_latency = ( to_multiplier * 4 / max ) - 1 ;
2014-06-26 04:52:52 +04:00
if ( latency > 499 | | latency > max_latency )
return - EINVAL ;
return 0 ;
}
2005-04-17 02:20:36 +04:00
int hci_register_cb ( struct hci_cb * hcb ) ;
int hci_unregister_cb ( struct hci_cb * hcb ) ;
2013-04-02 14:35:04 +04:00
struct sk_buff * __hci_cmd_sync ( struct hci_dev * hdev , u16 opcode , u32 plen ,
2013-04-19 11:14:51 +04:00
const void * param , u32 timeout ) ;
2013-04-03 22:54:47 +04:00
struct sk_buff * __hci_cmd_sync_ev ( struct hci_dev * hdev , u16 opcode , u32 plen ,
2013-04-19 11:14:51 +04:00
const void * param , u8 event , u32 timeout ) ;
2013-04-02 14:35:04 +04:00
2013-04-19 11:14:51 +04:00
int hci_send_cmd ( struct hci_dev * hdev , __u16 opcode , __u32 plen ,
const void * param ) ;
2011-11-02 17:52:01 +04:00
void hci_send_acl ( struct hci_chan * chan , struct sk_buff * skb , __u16 flags ) ;
2010-05-01 23:15:35 +04:00
void hci_send_sco ( struct hci_conn * conn , struct sk_buff * skb ) ;
2005-04-17 02:20:36 +04:00
2007-10-20 15:33:56 +04:00
void * hci_sent_cmd_data ( struct hci_dev * hdev , __u16 opcode ) ;
2005-04-17 02:20:36 +04:00
2015-09-29 16:05:44 +03:00
struct sk_buff * hci_cmd_sync ( struct hci_dev * hdev , u16 opcode , u32 plen ,
const void * param , u32 timeout ) ;
2005-04-17 02:20:36 +04:00
/* ----- HCI Sockets ----- */
2012-02-20 17:50:30 +04:00
void hci_send_to_sock ( struct hci_dev * hdev , struct sk_buff * skb ) ;
2015-02-20 14:26:23 +03:00
void hci_send_to_channel ( unsigned short channel , struct sk_buff * skb ,
2015-03-15 05:27:59 +03:00
int flag , struct sock * skip_sk ) ;
2012-02-20 23:34:38 +04:00
void hci_send_to_monitor ( struct hci_dev * hdev , struct sk_buff * skb ) ;
2005-04-17 02:20:36 +04:00
2012-02-20 17:50:37 +04:00
void hci_sock_dev_event ( struct hci_dev * hdev , int event ) ;
2015-03-15 05:28:06 +03:00
# define HCI_MGMT_VAR_LEN BIT(0)
# define HCI_MGMT_NO_HDEV BIT(1)
# define HCI_MGMT_UNTRUSTED BIT(2)
# define HCI_MGMT_UNCONFIGURED BIT(3)
2015-03-06 22:08:52 +03:00
2015-03-06 22:08:50 +03:00
struct hci_mgmt_handler {
int ( * func ) ( struct sock * sk , struct hci_dev * hdev , void * data ,
u16 data_len ) ;
size_t data_len ;
2015-03-06 22:08:52 +03:00
unsigned long flags ;
2015-03-06 22:08:50 +03:00
} ;
struct hci_mgmt_chan {
struct list_head list ;
unsigned short channel ;
size_t handler_count ;
const struct hci_mgmt_handler * handlers ;
2015-03-17 14:48:49 +03:00
void ( * hdev_init ) ( struct sock * sk , struct hci_dev * hdev ) ;
2015-03-06 22:08:50 +03:00
} ;
int hci_mgmt_chan_register ( struct hci_mgmt_chan * c ) ;
void hci_mgmt_chan_unregister ( struct hci_mgmt_chan * c ) ;
2010-12-08 01:21:06 +03:00
/* Management interface */
2012-04-25 04:02:49 +04:00
# define DISCOV_TYPE_BREDR (BIT(BDADDR_BREDR))
# define DISCOV_TYPE_LE (BIT(BDADDR_LE_PUBLIC) | \
BIT ( BDADDR_LE_RANDOM ) )
# define DISCOV_TYPE_INTERLEAVED (BIT(BDADDR_BREDR) | \
BIT ( BDADDR_LE_PUBLIC ) | \
BIT ( BDADDR_LE_RANDOM ) )
2012-02-18 03:39:35 +04:00
2013-04-30 22:29:31 +04:00
/* These LE scan and inquiry parameters were chosen according to LE General
* Discovery Procedure specification .
*/
# define DISCOV_LE_SCAN_WIN 0x12
# define DISCOV_LE_SCAN_INT 0x12
2014-03-27 23:55:21 +04:00
# define DISCOV_LE_TIMEOUT 10240 /* msec */
2014-03-27 23:55:19 +04:00
# define DISCOV_INTERLEAVED_TIMEOUT 5120 /* msec */
2013-04-30 22:29:31 +04:00
# define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04
# define DISCOV_BREDR_INQUIRY_LEN 0x08
2015-02-02 10:07:55 +03:00
# define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */
2013-04-30 22:29:31 +04:00
2014-07-09 14:28:26 +04:00
int mgmt_new_settings ( struct hci_dev * hdev ) ;
2013-10-07 10:55:45 +04:00
void mgmt_index_added ( struct hci_dev * hdev ) ;
void mgmt_index_removed ( struct hci_dev * hdev ) ;
2013-10-07 10:55:46 +04:00
void mgmt_set_powered_failed ( struct hci_dev * hdev , int err ) ;
2015-11-25 17:15:44 +03:00
void mgmt_power_on ( struct hci_dev * hdev , int err ) ;
void __mgmt_power_off ( struct hci_dev * hdev ) ;
2013-10-15 21:15:57 +04:00
void mgmt_new_link_key ( struct hci_dev * hdev , struct link_key * key ,
bool persistent ) ;
2014-10-07 12:44:10 +04:00
void mgmt_device_connected ( struct hci_dev * hdev , struct hci_conn * conn ,
u32 flags , u8 * name , u8 name_len ) ;
2013-10-07 10:55:50 +04:00
void mgmt_device_disconnected ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
2014-02-24 16:52:18 +04:00
u8 link_type , u8 addr_type , u8 reason ,
bool mgmt_connected ) ;
2013-10-07 10:55:47 +04:00
void mgmt_disconnect_failed ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
u8 link_type , u8 addr_type , u8 status ) ;
2013-10-07 10:55:48 +04:00
void mgmt_connect_failed ( struct hci_dev * hdev , bdaddr_t * bdaddr , u8 link_type ,
u8 addr_type , u8 status ) ;
2013-10-16 01:26:20 +04:00
void mgmt_pin_code_request ( struct hci_dev * hdev , bdaddr_t * bdaddr , u8 secure ) ;
2013-10-16 01:26:21 +04:00
void mgmt_pin_code_reply_complete ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
u8 status ) ;
2013-10-16 01:26:22 +04:00
void mgmt_pin_code_neg_reply_complete ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
u8 status ) ;
2011-11-08 22:40:14 +04:00
int mgmt_user_confirm_request ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
2014-03-20 10:18:14 +04:00
u8 link_type , u8 addr_type , u32 value ,
2012-03-08 08:25:00 +04:00
u8 confirm_hint ) ;
2011-11-08 22:40:14 +04:00
int mgmt_user_confirm_reply_complete ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
2012-03-08 08:25:00 +04:00
u8 link_type , u8 addr_type , u8 status ) ;
2012-02-09 17:26:12 +04:00
int mgmt_user_confirm_neg_reply_complete ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
2012-03-08 08:25:00 +04:00
u8 link_type , u8 addr_type , u8 status ) ;
2012-02-09 17:26:12 +04:00
int mgmt_user_passkey_request ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
2012-03-08 08:25:00 +04:00
u8 link_type , u8 addr_type ) ;
2011-11-23 20:28:33 +04:00
int mgmt_user_passkey_reply_complete ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
2012-03-08 08:25:00 +04:00
u8 link_type , u8 addr_type , u8 status ) ;
2012-02-09 17:26:12 +04:00
int mgmt_user_passkey_neg_reply_complete ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
2012-03-08 08:25:00 +04:00
u8 link_type , u8 addr_type , u8 status ) ;
2012-09-06 19:39:26 +04:00
int mgmt_user_passkey_notify ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
u8 link_type , u8 addr_type , u32 passkey ,
u8 entered ) ;
2014-09-09 04:09:49 +04:00
void mgmt_auth_failed ( struct hci_conn * conn , u8 status ) ;
2013-10-16 01:26:24 +04:00
void mgmt_auth_enable_complete ( struct hci_dev * hdev , u8 status ) ;
2013-10-16 01:26:25 +04:00
void mgmt_ssp_enable_complete ( struct hci_dev * hdev , u8 enable , u8 status ) ;
2013-10-16 01:26:26 +04:00
void mgmt_set_class_of_dev_complete ( struct hci_dev * hdev , u8 * dev_class ,
u8 status ) ;
2013-10-16 01:26:27 +04:00
void mgmt_set_local_name_complete ( struct hci_dev * hdev , u8 * name , u8 status ) ;
2015-11-11 09:30:30 +03:00
void mgmt_start_discovery_complete ( struct hci_dev * hdev , u8 status ) ;
2015-11-11 09:30:45 +03:00
void mgmt_stop_discovery_complete ( struct hci_dev * hdev , u8 status ) ;
2013-10-07 10:55:51 +04:00
void mgmt_device_found ( struct hci_dev * hdev , bdaddr_t * bdaddr , u8 link_type ,
2014-07-01 16:11:20 +04:00
u8 addr_type , u8 * dev_class , s8 rssi , u32 flags ,
u8 * eir , u16 eir_len , u8 * scan_rsp , u8 scan_rsp_len ) ;
2013-10-07 10:55:52 +04:00
void mgmt_remote_name ( struct hci_dev * hdev , bdaddr_t * bdaddr , u8 link_type ,
u8 addr_type , s8 rssi , u8 * name , u8 name_len ) ;
2013-10-07 10:55:53 +04:00
void mgmt_discovering ( struct hci_dev * hdev , u8 discovering ) ;
2014-08-01 12:13:30 +04:00
bool mgmt_powering_down ( struct hci_dev * hdev ) ;
2014-03-10 10:38:42 +04:00
void mgmt_new_ltk ( struct hci_dev * hdev , struct smp_ltk * key , bool persistent ) ;
2015-10-12 14:36:19 +03:00
void mgmt_new_irk ( struct hci_dev * hdev , struct smp_irk * irk , bool persistent ) ;
2014-03-10 10:38:42 +04:00
void mgmt_new_csrk ( struct hci_dev * hdev , struct smp_csrk * csrk ,
bool persistent ) ;
2014-07-02 01:10:11 +04:00
void mgmt_new_conn_param ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
2014-07-02 18:37:32 +04:00
u8 bdaddr_type , u8 store_hint , u16 min_interval ,
u16 max_interval , u16 latency , u16 timeout ) ;
2014-02-18 23:41:34 +04:00
void mgmt_smp_complete ( struct hci_conn * conn , bool complete ) ;
2015-11-18 13:49:20 +03:00
bool mgmt_get_connectable ( struct hci_dev * hdev ) ;
2015-11-22 16:43:43 +03:00
void mgmt_set_connectable_complete ( struct hci_dev * hdev , u8 status ) ;
2015-11-22 17:24:44 +03:00
void mgmt_set_discoverable_complete ( struct hci_dev * hdev , u8 status ) ;
2015-11-18 13:49:20 +03:00
u8 mgmt_get_adv_discov_flags ( struct hci_dev * hdev ) ;
void mgmt_advertising_added ( struct sock * sk , struct hci_dev * hdev ,
u8 instance ) ;
void mgmt_advertising_removed ( struct sock * sk , struct hci_dev * hdev ,
u8 instance ) ;
2012-02-03 04:08:02 +04:00
2014-07-02 18:37:31 +04:00
u8 hci_le_conn_update ( struct hci_conn * conn , u16 min , u16 max , u16 latency ,
u16 to_multiplier ) ;
2014-02-28 04:00:28 +04:00
void hci_le_start_enc ( struct hci_conn * conn , __le16 ediv , __le64 rand ,
2015-06-08 18:14:39 +03:00
__u8 ltk [ 16 ] , __u8 key_size ) ;
2011-11-07 18:45:24 +04:00
2014-02-27 16:05:41 +04:00
void hci_copy_identity_address ( struct hci_dev * hdev , bdaddr_t * bdaddr ,
u8 * bdaddr_type ) ;
2014-02-23 21:42:21 +04:00
2013-08-19 16:23:57 +04:00
# define SCO_AIRMODE_MASK 0x0003
# define SCO_AIRMODE_CVSD 0x0000
# define SCO_AIRMODE_TRANSP 0x0003
2005-04-17 02:20:36 +04:00
# endif /* __HCI_CORE_H */