2019-05-30 02:57:47 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2015-08-11 00:24:12 +03:00
/*
* Bluetooth supports for Qualcomm Atheros chips
*
* Copyright ( c ) 2015 The Linux Foundation . All rights reserved .
*/
# include <linux/module.h>
# include <linux/firmware.h>
# include <net/bluetooth/bluetooth.h>
# include <net/bluetooth/hci_core.h>
# include "btqca.h"
# define VERSION "0.1"
2019-11-06 12:48:32 +03:00
int qca_read_soc_version ( struct hci_dev * hdev , u32 * soc_version ,
enum qca_btsoc_type soc_type )
2015-08-11 00:24:12 +03:00
{
struct sk_buff * skb ;
struct edl_event_hdr * edl ;
2019-11-06 12:48:31 +03:00
struct qca_btsoc_version * ver ;
2015-08-11 00:24:12 +03:00
char cmd ;
int err = 0 ;
2019-11-06 12:48:32 +03:00
u8 event_type = HCI_EV_VENDOR ;
u8 rlen = sizeof ( * edl ) + sizeof ( * ver ) ;
u8 rtype = EDL_APP_VER_RES_EVT ;
2015-08-11 00:24:12 +03:00
2018-08-03 15:16:27 +03:00
bt_dev_dbg ( hdev , " QCA Version Request " ) ;
2015-08-11 00:24:12 +03:00
2019-11-06 12:48:32 +03:00
/* Unlike other SoC's sending version command response as payload to
* VSE event . WCN3991 sends version command response as a payload to
* command complete event .
*/
if ( soc_type = = QCA_WCN3991 ) {
event_type = 0 ;
rlen + = 1 ;
rtype = EDL_PATCH_VER_REQ_CMD ;
}
2015-08-11 00:24:12 +03:00
cmd = EDL_PATCH_VER_REQ_CMD ;
skb = __hci_cmd_sync_ev ( hdev , EDL_PATCH_CMD_OPCODE , EDL_PATCH_CMD_LEN ,
2019-11-06 12:48:32 +03:00
& cmd , event_type , HCI_INIT_TIMEOUT ) ;
2015-08-11 00:24:12 +03:00
if ( IS_ERR ( skb ) ) {
err = PTR_ERR ( skb ) ;
2018-08-03 15:16:27 +03:00
bt_dev_err ( hdev , " Reading QCA version information failed (%d) " ,
err ) ;
2015-08-11 00:24:12 +03:00
return err ;
}
2019-11-06 12:48:32 +03:00
if ( skb - > len ! = rlen ) {
2018-08-03 15:16:27 +03:00
bt_dev_err ( hdev , " QCA Version size mismatch len %d " , skb - > len ) ;
2015-08-11 00:24:12 +03:00
err = - EILSEQ ;
goto out ;
}
edl = ( struct edl_event_hdr * ) ( skb - > data ) ;
2016-09-06 15:15:49 +03:00
if ( ! edl ) {
2018-08-03 15:16:27 +03:00
bt_dev_err ( hdev , " QCA TLV with no header " ) ;
2015-08-11 00:24:12 +03:00
err = - EILSEQ ;
goto out ;
}
if ( edl - > cresp ! = EDL_CMD_REQ_RES_EVT | |
2019-11-06 12:48:32 +03:00
edl - > rtype ! = rtype ) {
2018-08-03 15:16:27 +03:00
bt_dev_err ( hdev , " QCA Wrong packet received %d %d " , edl - > cresp ,
edl - > rtype ) ;
2015-08-11 00:24:12 +03:00
err = - EIO ;
goto out ;
}
2019-11-06 12:48:32 +03:00
if ( soc_type = = QCA_WCN3991 )
memmove ( & edl - > data , & edl - > data [ 1 ] , sizeof ( * ver ) ) ;
2019-11-06 12:48:31 +03:00
ver = ( struct qca_btsoc_version * ) ( edl - > data ) ;
2015-08-11 00:24:12 +03:00
BT_DBG ( " %s: Product:0x%08x " , hdev - > name , le32_to_cpu ( ver - > product_id ) ) ;
BT_DBG ( " %s: Patch :0x%08x " , hdev - > name , le16_to_cpu ( ver - > patch_ver ) ) ;
2019-11-06 12:48:31 +03:00
BT_DBG ( " %s: ROM :0x%08x " , hdev - > name , le16_to_cpu ( ver - > rom_ver ) ) ;
2015-08-11 00:24:12 +03:00
BT_DBG ( " %s: SOC :0x%08x " , hdev - > name , le32_to_cpu ( ver - > soc_id ) ) ;
2018-08-03 15:16:27 +03:00
/* QCA chipset version can be decided by patch and SoC
2015-08-11 00:24:12 +03:00
* version , combination with upper 2 bytes from SoC
* and lower 2 bytes from patch will be used .
*/
2018-08-03 15:16:27 +03:00
* soc_version = ( le32_to_cpu ( ver - > soc_id ) < < 16 ) |
2019-11-06 12:48:31 +03:00
( le16_to_cpu ( ver - > rom_ver ) & 0x0000ffff ) ;
2018-08-03 15:16:28 +03:00
if ( * soc_version = = 0 )
err = - EILSEQ ;
2015-08-11 00:24:12 +03:00
out :
kfree_skb ( skb ) ;
2018-08-03 15:16:28 +03:00
if ( err )
bt_dev_err ( hdev , " QCA Failed to get version (%d) " , err ) ;
2015-08-11 00:24:12 +03:00
return err ;
}
2018-08-03 15:16:27 +03:00
EXPORT_SYMBOL_GPL ( qca_read_soc_version ) ;
2015-08-11 00:24:12 +03:00
2018-08-03 15:16:27 +03:00
static int qca_send_reset ( struct hci_dev * hdev )
2015-08-11 00:24:12 +03:00
{
struct sk_buff * skb ;
int err ;
2018-08-03 15:16:27 +03:00
bt_dev_dbg ( hdev , " QCA HCI_RESET " ) ;
2015-08-11 00:24:12 +03:00
skb = __hci_cmd_sync ( hdev , HCI_OP_RESET , 0 , NULL , HCI_INIT_TIMEOUT ) ;
if ( IS_ERR ( skb ) ) {
err = PTR_ERR ( skb ) ;
2018-08-03 15:16:27 +03:00
bt_dev_err ( hdev , " QCA Reset failed (%d) " , err ) ;
2015-08-11 00:24:12 +03:00
return err ;
}
kfree_skb ( skb ) ;
return 0 ;
}
2019-07-12 08:09:40 +03:00
int qca_send_pre_shutdown_cmd ( struct hci_dev * hdev )
{
struct sk_buff * skb ;
int err ;
bt_dev_dbg ( hdev , " QCA pre shutdown cmd " ) ;
2019-08-30 15:28:36 +03:00
skb = __hci_cmd_sync_ev ( hdev , QCA_PRE_SHUTDOWN_CMD , 0 ,
NULL , HCI_EV_CMD_COMPLETE , HCI_INIT_TIMEOUT ) ;
2019-07-12 08:09:40 +03:00
if ( IS_ERR ( skb ) ) {
err = PTR_ERR ( skb ) ;
bt_dev_err ( hdev , " QCA preshutdown_cmd failed (%d) " , err ) ;
return err ;
}
kfree_skb ( skb ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( qca_send_pre_shutdown_cmd ) ;
2019-11-06 12:48:31 +03:00
static void qca_tlv_check_data ( struct qca_fw_config * config ,
2015-08-11 00:24:12 +03:00
const struct firmware * fw )
{
const u8 * data ;
u32 type_len ;
u16 tag_id , tag_len ;
int idx , length ;
struct tlv_type_hdr * tlv ;
struct tlv_type_patch * tlv_patch ;
struct tlv_type_nvm * tlv_nvm ;
tlv = ( struct tlv_type_hdr * ) fw - > data ;
type_len = le32_to_cpu ( tlv - > type_len ) ;
length = ( type_len > > 8 ) & 0x00ffffff ;
BT_DBG ( " TLV Type \t \t : 0x%x " , type_len & 0x000000ff ) ;
BT_DBG ( " Length \t \t : %d bytes " , length ) ;
2019-11-06 12:48:31 +03:00
config - > dnld_mode = QCA_SKIP_EVT_NONE ;
config - > dnld_type = QCA_SKIP_EVT_NONE ;
2018-04-26 14:13:27 +03:00
2015-08-11 00:24:12 +03:00
switch ( config - > type ) {
case TLV_TYPE_PATCH :
tlv_patch = ( struct tlv_type_patch * ) tlv - > data ;
2018-04-26 14:13:27 +03:00
/* For Rome version 1.1 to 3.1, all segment commands
* are acked by a vendor specific event ( VSE ) .
* For Rome > = 3.2 , the download mode field indicates
* if VSE is skipped by the controller .
* In case VSE is skipped , only the last segment is acked .
*/
config - > dnld_mode = tlv_patch - > download_mode ;
2019-05-29 00:43:22 +03:00
config - > dnld_type = config - > dnld_mode ;
2018-04-26 14:13:27 +03:00
BT_DBG ( " Total Length : %d bytes " ,
2015-08-11 00:24:12 +03:00
le32_to_cpu ( tlv_patch - > total_size ) ) ;
2018-04-26 14:13:27 +03:00
BT_DBG ( " Patch Data Length : %d bytes " ,
2015-08-11 00:24:12 +03:00
le32_to_cpu ( tlv_patch - > data_length ) ) ;
BT_DBG ( " Signing Format Version : 0x%x " ,
tlv_patch - > format_version ) ;
2018-04-26 14:13:27 +03:00
BT_DBG ( " Signature Algorithm : 0x%x " ,
2015-08-11 00:24:12 +03:00
tlv_patch - > signature ) ;
2018-04-26 14:13:27 +03:00
BT_DBG ( " Download mode : 0x%x " ,
tlv_patch - > download_mode ) ;
BT_DBG ( " Reserved : 0x%x " ,
tlv_patch - > reserved1 ) ;
BT_DBG ( " Product ID : 0x%04x " ,
2015-08-11 00:24:12 +03:00
le16_to_cpu ( tlv_patch - > product_id ) ) ;
2018-04-26 14:13:27 +03:00
BT_DBG ( " Rom Build Version : 0x%04x " ,
2015-08-11 00:24:12 +03:00
le16_to_cpu ( tlv_patch - > rom_build ) ) ;
2018-04-26 14:13:27 +03:00
BT_DBG ( " Patch Version : 0x%04x " ,
2015-08-11 00:24:12 +03:00
le16_to_cpu ( tlv_patch - > patch_version ) ) ;
2018-04-26 14:13:27 +03:00
BT_DBG ( " Reserved : 0x%x " ,
2015-08-11 00:24:12 +03:00
le16_to_cpu ( tlv_patch - > reserved2 ) ) ;
2018-04-26 14:13:27 +03:00
BT_DBG ( " Patch Entry Address : 0x%x " ,
2015-08-11 00:24:12 +03:00
le32_to_cpu ( tlv_patch - > entry ) ) ;
break ;
case TLV_TYPE_NVM :
idx = 0 ;
data = tlv - > data ;
while ( idx < length ) {
tlv_nvm = ( struct tlv_type_nvm * ) ( data + idx ) ;
tag_id = le16_to_cpu ( tlv_nvm - > tag_id ) ;
tag_len = le16_to_cpu ( tlv_nvm - > tag_len ) ;
/* Update NVM tags as needed */
switch ( tag_id ) {
case EDL_TAG_ID_HCI :
/* HCI transport layer parameters
* enabling software inband sleep
* onto controller side .
*/
tlv_nvm - > data [ 0 ] | = 0x80 ;
/* UART Baud Rate */
tlv_nvm - > data [ 2 ] = config - > user_baud_rate ;
break ;
case EDL_TAG_ID_DEEP_SLEEP :
/* Sleep enable mask
* enabling deep sleep feature on controller .
*/
tlv_nvm - > data [ 0 ] | = 0x01 ;
break ;
}
idx + = ( sizeof ( u16 ) + sizeof ( u16 ) + 8 + tag_len ) ;
}
break ;
default :
BT_ERR ( " Unknown TLV type %d " , config - > type ) ;
break ;
}
}
2018-08-03 15:16:27 +03:00
static int qca_tlv_send_segment ( struct hci_dev * hdev , int seg_size ,
2019-11-06 12:48:32 +03:00
const u8 * data , enum qca_tlv_dnld_mode mode ,
enum qca_btsoc_type soc_type )
2015-08-11 00:24:12 +03:00
{
struct sk_buff * skb ;
struct edl_event_hdr * edl ;
struct tlv_seg_resp * tlv_resp ;
u8 cmd [ MAX_SIZE_PER_TLV_SEGMENT + 2 ] ;
int err = 0 ;
2019-11-06 12:48:32 +03:00
u8 event_type = HCI_EV_VENDOR ;
u8 rlen = ( sizeof ( * edl ) + sizeof ( * tlv_resp ) ) ;
u8 rtype = EDL_TVL_DNLD_RES_EVT ;
2015-08-11 00:24:12 +03:00
cmd [ 0 ] = EDL_PATCH_TLV_REQ_CMD ;
cmd [ 1 ] = seg_size ;
memcpy ( cmd + 2 , data , seg_size ) ;
2019-11-06 12:48:31 +03:00
if ( mode = = QCA_SKIP_EVT_VSE_CC | | mode = = QCA_SKIP_EVT_VSE )
2018-04-26 14:13:27 +03:00
return __hci_cmd_send ( hdev , EDL_PATCH_CMD_OPCODE , seg_size + 2 ,
cmd ) ;
2019-11-06 12:48:32 +03:00
/* Unlike other SoC's sending version command response as payload to
* VSE event . WCN3991 sends version command response as a payload to
* command complete event .
*/
if ( soc_type = = QCA_WCN3991 ) {
event_type = 0 ;
rlen = sizeof ( * edl ) ;
rtype = EDL_PATCH_TLV_REQ_CMD ;
}
2015-08-11 00:24:12 +03:00
skb = __hci_cmd_sync_ev ( hdev , EDL_PATCH_CMD_OPCODE , seg_size + 2 , cmd ,
2019-11-06 12:48:32 +03:00
event_type , HCI_INIT_TIMEOUT ) ;
2015-08-11 00:24:12 +03:00
if ( IS_ERR ( skb ) ) {
err = PTR_ERR ( skb ) ;
2018-08-03 15:16:27 +03:00
bt_dev_err ( hdev , " QCA Failed to send TLV segment (%d) " , err ) ;
2015-08-11 00:24:12 +03:00
return err ;
}
2019-11-06 12:48:32 +03:00
if ( skb - > len ! = rlen ) {
2018-08-03 15:16:27 +03:00
bt_dev_err ( hdev , " QCA TLV response size mismatch " ) ;
2015-08-11 00:24:12 +03:00
err = - EILSEQ ;
goto out ;
}
edl = ( struct edl_event_hdr * ) ( skb - > data ) ;
2016-09-06 15:15:49 +03:00
if ( ! edl ) {
2018-08-03 15:16:27 +03:00
bt_dev_err ( hdev , " TLV with no header " ) ;
2015-08-11 00:24:12 +03:00
err = - EILSEQ ;
goto out ;
}
2019-11-06 12:48:32 +03:00
if ( edl - > cresp ! = EDL_CMD_REQ_RES_EVT | | edl - > rtype ! = rtype ) {
bt_dev_err ( hdev , " QCA TLV with error stat 0x%x rtype 0x%x " ,
edl - > cresp , edl - > rtype ) ;
err = - EIO ;
}
2015-08-11 00:24:12 +03:00
2019-11-06 12:48:32 +03:00
if ( soc_type = = QCA_WCN3991 )
goto out ;
tlv_resp = ( struct tlv_seg_resp * ) ( edl - > data ) ;
if ( tlv_resp - > result ) {
2018-08-03 15:16:27 +03:00
bt_dev_err ( hdev , " QCA TLV with error stat 0x%x rtype 0x%x (0x%x) " ,
edl - > cresp , edl - > rtype , tlv_resp - > result ) ;
2015-08-11 00:24:12 +03:00
}
out :
kfree_skb ( skb ) ;
return err ;
}
2019-05-29 00:43:22 +03:00
static int qca_inject_cmd_complete_event ( struct hci_dev * hdev )
{
struct hci_event_hdr * hdr ;
struct hci_ev_cmd_complete * evt ;
struct sk_buff * skb ;
skb = bt_skb_alloc ( sizeof ( * hdr ) + sizeof ( * evt ) + 1 , GFP_KERNEL ) ;
if ( ! skb )
return - ENOMEM ;
hdr = skb_put ( skb , sizeof ( * hdr ) ) ;
hdr - > evt = HCI_EV_CMD_COMPLETE ;
hdr - > plen = sizeof ( * evt ) + 1 ;
evt = skb_put ( skb , sizeof ( * evt ) ) ;
evt - > ncmd = 1 ;
2019-07-09 00:57:42 +03:00
evt - > opcode = cpu_to_le16 ( QCA_HCI_CC_OPCODE ) ;
2019-05-29 00:43:22 +03:00
skb_put_u8 ( skb , QCA_HCI_CC_SUCCESS ) ;
hci_skb_pkt_type ( skb ) = HCI_EVENT_PKT ;
return hci_recv_frame ( hdev , skb ) ;
}
2018-08-03 15:16:27 +03:00
static int qca_download_firmware ( struct hci_dev * hdev ,
2019-11-06 12:48:32 +03:00
struct qca_fw_config * config ,
enum qca_btsoc_type soc_type )
2015-08-11 00:24:12 +03:00
{
const struct firmware * fw ;
2018-04-26 14:13:27 +03:00
const u8 * segment ;
int ret , remain , i = 0 ;
2015-08-11 00:24:12 +03:00
2018-08-03 15:16:27 +03:00
bt_dev_info ( hdev , " QCA Downloading %s " , config - > fwname ) ;
2015-08-11 00:24:12 +03:00
ret = request_firmware ( & fw , config - > fwname , & hdev - > dev ) ;
if ( ret ) {
2018-08-03 15:16:27 +03:00
bt_dev_err ( hdev , " QCA Failed to request file: %s (%d) " ,
config - > fwname , ret ) ;
2015-08-11 00:24:12 +03:00
return ret ;
}
2018-08-03 15:16:27 +03:00
qca_tlv_check_data ( config , fw ) ;
2015-08-11 00:24:12 +03:00
2018-04-26 14:13:27 +03:00
segment = fw - > data ;
remain = fw - > size ;
while ( remain > 0 ) {
int segsize = min ( MAX_SIZE_PER_TLV_SEGMENT , remain ) ;
bt_dev_dbg ( hdev , " Send segment %d, size %d " , i + + , segsize ) ;
remain - = segsize ;
/* The last segment is always acked regardless download mode */
if ( ! remain | | segsize < MAX_SIZE_PER_TLV_SEGMENT )
2019-11-06 12:48:31 +03:00
config - > dnld_mode = QCA_SKIP_EVT_NONE ;
2018-04-26 14:13:27 +03:00
2018-08-03 15:16:27 +03:00
ret = qca_tlv_send_segment ( hdev , segsize , segment ,
2019-11-06 12:48:32 +03:00
config - > dnld_mode , soc_type ) ;
2018-04-26 14:13:27 +03:00
if ( ret )
2019-05-29 00:43:22 +03:00
goto out ;
2018-04-26 14:13:27 +03:00
segment + = segsize ;
2015-08-11 00:24:12 +03:00
}
2019-05-29 00:43:22 +03:00
/* Latest qualcomm chipsets are not sending a command complete event
* for every fw packet sent . They only respond with a vendor specific
* event for the last packet . This optimization in the chip will
* decrease the BT in initialization time . Here we will inject a command
* complete event to avoid a command timeout error message .
*/
2019-11-06 12:48:31 +03:00
if ( config - > dnld_type = = QCA_SKIP_EVT_VSE_CC | |
config - > dnld_type = = QCA_SKIP_EVT_VSE )
2019-08-06 12:56:29 +03:00
ret = qca_inject_cmd_complete_event ( hdev ) ;
2019-05-29 00:43:22 +03:00
out :
2015-08-11 00:24:12 +03:00
release_firmware ( fw ) ;
return ret ;
}
int qca_set_bdaddr_rome ( struct hci_dev * hdev , const bdaddr_t * bdaddr )
{
struct sk_buff * skb ;
u8 cmd [ 9 ] ;
int err ;
cmd [ 0 ] = EDL_NVM_ACCESS_SET_REQ_CMD ;
cmd [ 1 ] = 0x02 ; /* TAG ID */
cmd [ 2 ] = sizeof ( bdaddr_t ) ; /* size */
memcpy ( cmd + 3 , bdaddr , sizeof ( bdaddr_t ) ) ;
skb = __hci_cmd_sync_ev ( hdev , EDL_NVM_ACCESS_OPCODE , sizeof ( cmd ) , cmd ,
2018-08-06 18:58:40 +03:00
HCI_EV_VENDOR , HCI_INIT_TIMEOUT ) ;
2015-08-11 00:24:12 +03:00
if ( IS_ERR ( skb ) ) {
err = PTR_ERR ( skb ) ;
2018-08-03 15:16:27 +03:00
bt_dev_err ( hdev , " QCA Change address command failed (%d) " , err ) ;
2015-08-11 00:24:12 +03:00
return err ;
}
kfree_skb ( skb ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( qca_set_bdaddr_rome ) ;
2018-08-03 15:16:28 +03:00
int qca_uart_setup ( struct hci_dev * hdev , uint8_t baudrate ,
2019-06-06 12:40:30 +03:00
enum qca_btsoc_type soc_type , u32 soc_ver ,
const char * firmware_name )
2015-08-11 00:24:12 +03:00
{
2019-11-06 12:48:31 +03:00
struct qca_fw_config config ;
2015-08-11 00:24:12 +03:00
int err ;
2019-04-26 16:56:01 +03:00
u8 rom_ver = 0 ;
2015-08-11 00:24:12 +03:00
2018-08-03 15:16:27 +03:00
bt_dev_dbg ( hdev , " QCA setup on UART " ) ;
2015-08-11 00:24:12 +03:00
config . user_baud_rate = baudrate ;
/* Download rampatch file */
config . type = TLV_TYPE_PATCH ;
2019-04-26 16:56:01 +03:00
if ( qca_is_wcn399x ( soc_type ) ) {
2018-08-03 15:16:31 +03:00
/* Firmware files to download are based on ROM version.
* ROM version is derived from last two bytes of soc_ver .
*/
rom_ver = ( ( soc_ver & 0x00000f00 ) > > 0x04 ) |
( soc_ver & 0x0000000f ) ;
snprintf ( config . fwname , sizeof ( config . fwname ) ,
" qca/crbtfw%02x.tlv " , rom_ver ) ;
} else {
snprintf ( config . fwname , sizeof ( config . fwname ) ,
" qca/rampatch_%08x.bin " , soc_ver ) ;
}
2019-11-06 12:48:32 +03:00
err = qca_download_firmware ( hdev , & config , soc_type ) ;
2015-08-11 00:24:12 +03:00
if ( err < 0 ) {
2018-08-03 15:16:27 +03:00
bt_dev_err ( hdev , " QCA Failed to download patch (%d) " , err ) ;
2015-08-11 00:24:12 +03:00
return err ;
}
2019-07-10 01:44:50 +03:00
/* Give the controller some time to get ready to receive the NVM */
msleep ( 10 ) ;
2015-08-11 00:24:12 +03:00
/* Download NVM configuration */
config . type = TLV_TYPE_NVM ;
2019-06-06 12:40:30 +03:00
if ( firmware_name )
snprintf ( config . fwname , sizeof ( config . fwname ) ,
" qca/%s " , firmware_name ) ;
else if ( qca_is_wcn399x ( soc_type ) )
2018-08-03 15:16:31 +03:00
snprintf ( config . fwname , sizeof ( config . fwname ) ,
" qca/crnv%02x.bin " , rom_ver ) ;
else
snprintf ( config . fwname , sizeof ( config . fwname ) ,
" qca/nvm_%08x.bin " , soc_ver ) ;
2019-11-06 12:48:32 +03:00
err = qca_download_firmware ( hdev , & config , soc_type ) ;
2015-08-11 00:24:12 +03:00
if ( err < 0 ) {
2018-08-03 15:16:27 +03:00
bt_dev_err ( hdev , " QCA Failed to download NVM (%d) " , err ) ;
2015-08-11 00:24:12 +03:00
return err ;
}
/* Perform HCI reset */
2018-08-03 15:16:27 +03:00
err = qca_send_reset ( hdev ) ;
2015-08-11 00:24:12 +03:00
if ( err < 0 ) {
2018-08-03 15:16:27 +03:00
bt_dev_err ( hdev , " QCA Failed to run HCI_RESET (%d) " , err ) ;
2015-08-11 00:24:12 +03:00
return err ;
}
2018-08-03 15:16:27 +03:00
bt_dev_info ( hdev , " QCA setup on UART is completed " ) ;
2015-08-11 00:24:12 +03:00
return 0 ;
}
2018-08-03 15:16:27 +03:00
EXPORT_SYMBOL_GPL ( qca_uart_setup ) ;
2015-08-11 00:24:12 +03:00
2019-01-16 15:31:15 +03:00
int qca_set_bdaddr ( struct hci_dev * hdev , const bdaddr_t * bdaddr )
{
struct sk_buff * skb ;
int err ;
skb = __hci_cmd_sync_ev ( hdev , EDL_WRITE_BD_ADDR_OPCODE , 6 , bdaddr ,
HCI_EV_VENDOR , HCI_INIT_TIMEOUT ) ;
if ( IS_ERR ( skb ) ) {
err = PTR_ERR ( skb ) ;
bt_dev_err ( hdev , " QCA Change address cmd failed (%d) " , err ) ;
return err ;
}
kfree_skb ( skb ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( qca_set_bdaddr ) ;
2019-04-26 16:56:01 +03:00
2015-08-11 00:24:12 +03:00
MODULE_AUTHOR ( " Ben Young Tae Kim <ytkim@qca.qualcomm.com> " ) ;
MODULE_DESCRIPTION ( " Bluetooth support for Qualcomm Atheros family ver " VERSION ) ;
MODULE_VERSION ( VERSION ) ;
MODULE_LICENSE ( " GPL " ) ;