bluetooth-next pull request for net-next:

- qca: use the power sequencer for QCA6390
  - btusb: mediatek: add ISO data transmission functions
  - hci_bcm4377: Add BCM4388 support
  - btintel: Add support for BlazarU core
  - btintel: Add support for Whale Peak2
  - btnxpuart: Add support for AW693 A1 chipset
  - btnxpuart: Add support for IW615 chipset
  - btusb: Add Realtek RTL8852BE support ID 0x13d3:0x3591
 -----BEGIN PGP SIGNATURE-----
 
 iQJNBAABCAA3FiEE7E6oRXp8w05ovYr/9JCA4xAyCykFAmaVLq0ZHGx1aXoudm9u
 LmRlbnR6QGludGVsLmNvbQAKCRD0kIDjEDILKTJXD/9AK+xa+zTPc9Y0HLY5rca3
 lSqyVAqqWuvZ34GPo0qlH6L6w9bPVM+QiwtzfhD5OpN8E30k44HdoJQSIlv+sDrT
 5xgAAJ5+8QSpxvyjnHhPwbAnKq23Gic+PKHVsgUtZcTSCImAdq8q+QsfLqrRNv9m
 zKgHBuDtl//uchfobi2LkwBQRGFalupfiFcvb/N/rE5Uley0wJ3nDrOY2kbZzl0l
 IuHg6uCNxxV1hr/tB0FtEfTr0otJas5vnMN2M3tG01lJ7xXUYVzzKuMMm+bRY62B
 uULIFDtrB9y5eX2IzjtXtNRmQNqYApBIDR2nl2PDSu5XlqdgG4Fg8xCZ1I6axQqK
 6jza6xOcwSI0sGuFON7HNusL3/AMqjGuI7VUxbHgs+XaqJWvz/67pyWsGJ8n9NUU
 ba8CfTOBcOWgYbjxwfp8zdqO9MVwE42gkeTS6m6UWrjVdDMf0bi1xX2qUS3mZMMF
 9tqP6pKRwWYxp3d/bcIFbnbljqIxok1K4Up4S36OgRSCA2c0kgq+bP7NPADS9pn/
 avjGIlY5kSOC/hPUwtwvEA7mKmoAdQ3tmB97GG8wf5LwUwukbdSpk2m5kANPq798
 uAu0yxQ6c71vz/EXfen2yy1+/REQYcH/PpVkPdooYcMBwzM3diwdGWJ9Ju1EK+Nb
 +toke/Zg0wjCM2JZDeotwA==
 =rQ2W
 -----END PGP SIGNATURE-----

Merge tag 'for-net-next-2024-07-15' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next

Luiz Augusto von Dentz says:

====================
bluetooth-next pull request for net-next:

 - qca: use the power sequencer for QCA6390
 - btusb: mediatek: add ISO data transmission functions
 - hci_bcm4377: Add BCM4388 support
 - btintel: Add support for BlazarU core
 - btintel: Add support for Whale Peak2
 - btnxpuart: Add support for AW693 A1 chipset
 - btnxpuart: Add support for IW615 chipset
 - btusb: Add Realtek RTL8852BE support ID 0x13d3:0x3591

* tag 'for-net-next-2024-07-15' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next: (71 commits)
  Bluetooth: btmtk: Mark all stub functions as inline
  Bluetooth: hci_qca: Fix build error
  Bluetooth: hci_qca: use the power sequencer for wcn7850 and wcn6855
  Bluetooth: hci_qca: make pwrseq calls the default if available
  Bluetooth: hci_qca: unduplicate calls to hci_uart_register_device()
  Bluetooth: hci_qca: schedule a devm action for disabling the clock
  dt-bindings: bluetooth: qualcomm: describe the inputs from PMU for wcn7850
  Bluetooth: btnxpuart: Fix warnings for suspend and resume functions
  Bluetooth: btnxpuart: Add system suspend and resume handlers
  Bluetooth: btnxpuart: Add support for IW615 chipset
  Bluetooth: btnxpuart: Add support for AW693 A1 chipset
  Bluetooth: btintel: Add support for Whale Peak2
  Bluetooth: btintel: Add support for BlazarU core
  Bluetooth: btusb: mediatek: add ISO data transmission functions
  Bluetooth: btmtk: move btusb_recv_acl_mtk to btmtk.c
  Bluetooth: btmtk: move btusb_mtk_[setup, shutdown] to btmtk.c
  Bluetooth: btmtk: move btusb_mtk_hci_wmt_sync to btmtk.c
  Bluetooth: btusb: add callback function in btusb suspend/resume
  Bluetooth: btmtk: rename btmediatek_data
  Bluetooth: btusb: mediatek: return error for failed reg access
  ...
====================

Link: https://patch.msgid.link/20240715142543.303944-1-luiz.dentz@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-07-15 08:27:40 -07:00
commit cd9b6f4795
46 changed files with 3697 additions and 2018 deletions

View File

@ -0,0 +1,51 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/net/bluetooth/mediatek,mt7622-bluetooth.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek SoC built-in Bluetooth
description:
This device is a serial attached device to BTIF device and thus it must be a
child node of the serial node with BTIF. The dt-bindings details for BTIF
device can be known via Documentation/devicetree/bindings/serial/8250.yaml.
maintainers:
- Sean Wang <sean.wang@mediatek.com>
allOf:
- $ref: bluetooth-controller.yaml#
properties:
compatible:
const: mediatek,mt7622-bluetooth
clocks:
maxItems: 1
clock-names:
const: ref
power-domains:
maxItems: 1
required:
- clocks
- clock-names
- power-domains
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/power/mt7622-power.h>
serial {
bluetooth {
compatible = "mediatek,mt7622-bluetooth";
power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>;
clocks = <&clk25m>;
clock-names = "ref";
};
};

View File

@ -31,6 +31,9 @@ properties:
This property depends on the module vendor's
configuration.
firmware-name:
maxItems: 1
required:
- compatible
@ -42,5 +45,6 @@ examples:
bluetooth {
compatible = "nxp,88w8987-bt";
fw-init-baudrate = <3000000>;
firmware-name = "uartuart8987_bt_v0.bin";
};
};

View File

@ -62,6 +62,9 @@ properties:
vdddig-supply:
description: VDD_DIG supply regulator handle
vddbtcmx-supply:
description: VDD_BT_CMX supply regulator handle
vddbtcxmx-supply:
description: VDD_BT_CXMX supply regulator handle
@ -74,6 +77,9 @@ properties:
vddrfa1p7-supply:
description: VDD_RFA_1P7 supply regulator handle
vddrfa1p8-supply:
description: VDD_RFA_1P8 supply regulator handle
vddrfa1p2-supply:
description: VDD_RFA_1P2 supply regulator handle
@ -86,6 +92,12 @@ properties:
vddasd-supply:
description: VDD_ASD supply regulator handle
vddwlcx-supply:
description: VDD_WLCX supply regulator handle
vddwlmx-supply:
description: VDD_WLMX supply regulator handle
max-speed:
description: see Documentation/devicetree/bindings/serial/serial.yaml
@ -176,14 +188,27 @@ allOf:
- qcom,wcn7850-bt
then:
required:
- enable-gpios
- swctrl-gpios
- vddio-supply
- vddrfacmn-supply
- vddaon-supply
- vdddig-supply
- vddwlcx-supply
- vddwlmx-supply
- vddrfa0p8-supply
- vddrfa1p2-supply
- vddrfa1p9-supply
- vddrfa1p8-supply
- if:
properties:
compatible:
contains:
enum:
- qcom,qca6390-bt
then:
required:
- vddrfacmn-supply
- vddaon-supply
- vddbtcmx-supply
- vddrfa0p8-supply
- vddrfa1p2-supply
- vddrfa1p7-supply
examples:
- |

View File

@ -1,39 +1,3 @@
MediaTek SoC built-in Bluetooth Devices
==================================
This device is a serial attached device to BTIF device and thus it must be a
child node of the serial node with BTIF. The dt-bindings details for BTIF
device can be known via Documentation/devicetree/bindings/serial/8250.yaml.
Required properties:
- compatible: Must be
"mediatek,mt7622-bluetooth": for MT7622 SoC
- clocks: Should be the clock specifiers corresponding to the entry in
clock-names property.
- clock-names: Should contain "ref" entries.
- power-domains: Phandle to the power domain that the device is part of
Example:
btif: serial@1100c000 {
compatible = "mediatek,mt7622-btif",
"mediatek,mtk-btif";
reg = <0 0x1100c000 0 0x1000>;
interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_LOW>;
clocks = <&pericfg CLK_PERI_BTIF_PD>;
clock-names = "main";
reg-shift = <2>;
reg-io-width = <4>;
bluetooth {
compatible = "mediatek,mt7622-bluetooth";
power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>;
clocks = <&clk25m>;
clock-names = "ref";
};
};
MediaTek UART based Bluetooth Devices
==================================

View File

@ -17908,6 +17908,14 @@ F: include/linux/pm_*
F: include/linux/powercap.h
F: kernel/configs/nopm.config
POWER SEQUENCING
M: Bartosz Golaszewski <brgl@bgdev.pl>
L: linux-pm@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
F: drivers/power/sequencing/
F: include/linux/pwrseq/
POWER STATE COORDINATION INTERFACE (PSCI)
M: Mark Rutland <mark.rutland@arm.com>
M: Lorenzo Pieralisi <lpieralisi@kernel.org>

View File

@ -105,6 +105,7 @@ config BT_HCIUART
tristate "HCI UART driver"
depends on SERIAL_DEV_BUS || !SERIAL_DEV_BUS
depends on NVMEM || !NVMEM
depends on POWER_SEQUENCING || !POWER_SEQUENCING
depends on TTY
help
Bluetooth HCI UART driver.
@ -287,12 +288,12 @@ config BT_HCIBCM203X
config BT_HCIBCM4377
tristate "HCI BCM4377/4378/4387 PCIe driver"
tristate "HCI BCM4377/4378/4387/4388 PCIe driver"
depends on PCI
select FW_LOADER
help
Support for Broadcom BCM4377/4378/4387 Bluetooth chipsets attached via
PCIe. These are usually found in Apple machines.
Support for Broadcom BCM4377/4378/4387/4388 Bluetooth chipsets
attached via PCIe. These are usually found in Apple machines.
Say Y here to compile support for HCI BCM4377 family devices into the
kernel or say M to compile it as module (hci_bcm4377).

View File

@ -26,21 +26,11 @@
#define ECDSA_OFFSET 644
#define ECDSA_HEADER_LEN 320
#define BTINTEL_PPAG_NAME "PPAG"
enum {
DSM_SET_WDISABLE2_DELAY = 1,
DSM_SET_RESET_METHOD = 3,
};
/* structure to store the PPAG data read from ACPI table */
struct btintel_ppag {
u32 domain;
u32 mode;
acpi_status status;
struct hci_dev *hdev;
};
#define CMD_WRITE_BOOT_PARAMS 0xfc0e
struct cmd_write_boot_params {
__le32 boot_addr;
@ -482,6 +472,7 @@ int btintel_version_info_tlv(struct hci_dev *hdev,
case 0x19: /* Slr-F */
case 0x1b: /* Mgr */
case 0x1c: /* Gale Peak (GaP) */
case 0x1d: /* BlazarU (BzrU) */
case 0x1e: /* BlazarI (Bzr) */
break;
default:
@ -641,6 +632,10 @@ int btintel_parse_version_tlv(struct hci_dev *hdev,
case INTEL_TLV_GIT_SHA1:
version->git_sha1 = get_unaligned_le32(tlv->val);
break;
case INTEL_TLV_FW_ID:
snprintf(version->fw_id, sizeof(version->fw_id),
"%s", tlv->val);
break;
default:
/* Ignore rest of information */
break;
@ -1324,65 +1319,6 @@ static int btintel_read_debug_features(struct hci_dev *hdev,
return 0;
}
static acpi_status btintel_ppag_callback(acpi_handle handle, u32 lvl, void *data,
void **ret)
{
acpi_status status;
size_t len;
struct btintel_ppag *ppag = data;
union acpi_object *p, *elements;
struct acpi_buffer string = {ACPI_ALLOCATE_BUFFER, NULL};
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
struct hci_dev *hdev = ppag->hdev;
status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
if (ACPI_FAILURE(status)) {
bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status));
return status;
}
len = strlen(string.pointer);
if (len < strlen(BTINTEL_PPAG_NAME)) {
kfree(string.pointer);
return AE_OK;
}
if (strncmp((char *)string.pointer + len - 4, BTINTEL_PPAG_NAME, 4)) {
kfree(string.pointer);
return AE_OK;
}
kfree(string.pointer);
status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
if (ACPI_FAILURE(status)) {
ppag->status = status;
bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status));
return status;
}
p = buffer.pointer;
ppag = (struct btintel_ppag *)data;
if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 2) {
kfree(buffer.pointer);
bt_dev_warn(hdev, "PPAG-BT: Invalid object type: %d or package count: %d",
p->type, p->package.count);
ppag->status = AE_ERROR;
return AE_ERROR;
}
elements = p->package.elements;
/* PPAG table is located at element[1] */
p = &elements[1];
ppag->domain = (u32)p->package.elements[0].integer.value;
ppag->mode = (u32)p->package.elements[1].integer.value;
ppag->status = AE_OK;
kfree(buffer.pointer);
return AE_CTRL_TERMINATE;
}
static int btintel_set_debug_features(struct hci_dev *hdev,
const struct intel_debug_features *features)
{
@ -2202,30 +2138,61 @@ static void btintel_get_fw_name_tlv(const struct intel_version_tlv *ver,
const char *suffix)
{
const char *format;
/* The firmware file name for new generation controllers will be
* ibt-<cnvi_top type+cnvi_top step>-<cnvr_top type+cnvr_top step>
*/
switch (ver->cnvi_top & 0xfff) {
u32 cnvi, cnvr;
cnvi = INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvi_top),
INTEL_CNVX_TOP_STEP(ver->cnvi_top));
cnvr = INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvr_top),
INTEL_CNVX_TOP_STEP(ver->cnvr_top));
/* Only Blazar product supports downloading of intermediate loader
* image
*/
case BTINTEL_CNVI_BLAZARI:
if (ver->img_type == BTINTEL_IMG_BOOTLOADER)
format = "intel/ibt-%04x-%04x-iml.%s";
else
format = "intel/ibt-%04x-%04x.%s";
break;
default:
format = "intel/ibt-%04x-%04x.%s";
break;
}
if (INTEL_HW_VARIANT(ver->cnvi_bt) >= 0x1e) {
u8 zero[BTINTEL_FWID_MAXLEN];
snprintf(fw_name, len, format,
INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvi_top),
INTEL_CNVX_TOP_STEP(ver->cnvi_top)),
INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvr_top),
INTEL_CNVX_TOP_STEP(ver->cnvr_top)),
suffix);
if (ver->img_type == BTINTEL_IMG_BOOTLOADER) {
format = "intel/ibt-%04x-%04x-iml.%s";
snprintf(fw_name, len, format, cnvi, cnvr, suffix);
return;
}
memset(zero, 0, sizeof(zero));
/* ibt-<cnvi_top type+cnvi_top step>-<cnvr_top type+cnvr_top step-fw_id> */
if (memcmp(ver->fw_id, zero, sizeof(zero))) {
format = "intel/ibt-%04x-%04x-%s.%s";
snprintf(fw_name, len, format, cnvi, cnvr,
ver->fw_id, suffix);
return;
}
/* If firmware id is not present, fallback to legacy naming
* convention
*/
}
/* Fallback to legacy naming convention for other controllers
* ibt-<cnvi_top type+cnvi_top step>-<cnvr_top type+cnvr_top step>
*/
format = "intel/ibt-%04x-%04x.%s";
snprintf(fw_name, len, format, cnvi, cnvr, suffix);
}
static void btintel_get_iml_tlv(const struct intel_version_tlv *ver,
char *fw_name, size_t len,
const char *suffix)
{
const char *format;
u32 cnvi, cnvr;
cnvi = INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvi_top),
INTEL_CNVX_TOP_STEP(ver->cnvi_top));
cnvr = INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvr_top),
INTEL_CNVX_TOP_STEP(ver->cnvr_top));
format = "intel/ibt-%04x-%04x-iml.%s";
snprintf(fw_name, len, format, cnvi, cnvr, suffix);
}
static int btintel_prepare_fw_download_tlv(struct hci_dev *hdev,
@ -2233,7 +2200,7 @@ static int btintel_prepare_fw_download_tlv(struct hci_dev *hdev,
u32 *boot_param)
{
const struct firmware *fw;
char fwname[64];
char fwname[128];
int err;
ktime_t calltime;
@ -2268,7 +2235,20 @@ static int btintel_prepare_fw_download_tlv(struct hci_dev *hdev,
}
}
btintel_get_fw_name_tlv(ver, fwname, sizeof(fwname), "sfi");
if (ver->img_type == BTINTEL_IMG_OP) {
/* Controller running OP image. In case of FW downgrade,
* FWID TLV may not be present and driver may attempt to load
* firmware image which doesn't exist. Lets compare the version
* of IML image
*/
if (INTEL_HW_VARIANT(ver->cnvi_bt) >= 0x1e)
btintel_get_iml_tlv(ver, fwname, sizeof(fwname), "sfi");
else
btintel_get_fw_name_tlv(ver, fwname, sizeof(fwname), "sfi");
} else {
btintel_get_fw_name_tlv(ver, fwname, sizeof(fwname), "sfi");
}
err = firmware_request_nowarn(&fw, fwname, &hdev->dev);
if (err < 0) {
if (!btintel_test_flag(hdev, INTEL_BOOTLOADER)) {
@ -2427,10 +2407,13 @@ error:
static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver)
{
struct btintel_ppag ppag;
struct sk_buff *skb;
struct hci_ppag_enable_cmd ppag_cmd;
acpi_handle handle;
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *p, *elements;
u32 domain, mode;
acpi_status status;
/* PPAG is not supported if CRF is HrP2, Jfp2, JfP1 */
switch (ver->cnvr_top & 0xFFF) {
@ -2448,22 +2431,34 @@ static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver
return;
}
memset(&ppag, 0, sizeof(ppag));
ppag.hdev = hdev;
ppag.status = AE_NOT_FOUND;
acpi_walk_namespace(ACPI_TYPE_PACKAGE, handle, 1, NULL,
btintel_ppag_callback, &ppag, NULL);
if (ACPI_FAILURE(ppag.status)) {
if (ppag.status == AE_NOT_FOUND) {
status = acpi_evaluate_object(handle, "PPAG", NULL, &buffer);
if (ACPI_FAILURE(status)) {
if (status == AE_NOT_FOUND) {
bt_dev_dbg(hdev, "PPAG-BT: ACPI entry not found");
return;
}
bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status));
return;
}
if (ppag.domain != 0x12) {
p = buffer.pointer;
if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 2) {
bt_dev_warn(hdev, "PPAG-BT: Invalid object type: %d or package count: %d",
p->type, p->package.count);
kfree(buffer.pointer);
return;
}
elements = p->package.elements;
/* PPAG table is located at element[1] */
p = &elements[1];
domain = (u32)p->package.elements[0].integer.value;
mode = (u32)p->package.elements[1].integer.value;
kfree(buffer.pointer);
if (domain != 0x12) {
bt_dev_dbg(hdev, "PPAG-BT: Bluetooth domain is disabled in ACPI firmware");
return;
}
@ -2474,19 +2469,22 @@ static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver
* BIT 1 : 0 Disabled in China
* 1 Enabled in China
*/
if ((ppag.mode & 0x01) != BIT(0) && (ppag.mode & 0x02) != BIT(1)) {
bt_dev_dbg(hdev, "PPAG-BT: EU, China mode are disabled in CB/BIOS");
mode &= 0x03;
if (!mode) {
bt_dev_dbg(hdev, "PPAG-BT: EU, China mode are disabled in BIOS");
return;
}
ppag_cmd.ppag_enable_flags = cpu_to_le32(ppag.mode);
ppag_cmd.ppag_enable_flags = cpu_to_le32(mode);
skb = __hci_cmd_sync(hdev, INTEL_OP_PPAG_CMD, sizeof(ppag_cmd), &ppag_cmd, HCI_CMD_TIMEOUT);
skb = __hci_cmd_sync(hdev, INTEL_OP_PPAG_CMD, sizeof(ppag_cmd),
&ppag_cmd, HCI_CMD_TIMEOUT);
if (IS_ERR(skb)) {
bt_dev_warn(hdev, "Failed to send PPAG Enable (%ld)", PTR_ERR(skb));
return;
}
bt_dev_info(hdev, "PPAG-BT: Enabled (Mode %d)", ppag.mode);
bt_dev_info(hdev, "PPAG-BT: Enabled (Mode %d)", mode);
kfree_skb(skb);
}
@ -2600,6 +2598,24 @@ static void btintel_set_dsm_reset_method(struct hci_dev *hdev,
data->acpi_reset_method = btintel_acpi_reset_method;
}
#define BTINTEL_ISODATA_HANDLE_BASE 0x900
static u8 btintel_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
{
/*
* Distinguish ISO data packets form ACL data packets
* based on their connection handle value range.
*/
if (hci_skb_pkt_type(skb) == HCI_ACLDATA_PKT) {
__u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
if (hci_handle(handle) >= BTINTEL_ISODATA_HANDLE_BASE)
return HCI_ISODATA_PKT;
}
return hci_skb_pkt_type(skb);
}
int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
struct intel_version_tlv *ver)
{
@ -2635,7 +2651,7 @@ int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
return err;
/* If image type returned is BTINTEL_IMG_IML, then controller supports
* intermediae loader image
* intermediate loader image
*/
if (ver->img_type == BTINTEL_IMG_IML) {
err = btintel_prepare_fw_download_tlv(hdev, ver, &boot_param);
@ -2703,6 +2719,7 @@ void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
case 0x19:
case 0x1b:
case 0x1c:
case 0x1d:
case 0x1e:
hci_set_msft_opcode(hdev, 0xFC1E);
break;
@ -2713,7 +2730,7 @@ void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
}
EXPORT_SYMBOL_GPL(btintel_set_msft_opcode);
static void btintel_print_fseq_info(struct hci_dev *hdev)
void btintel_print_fseq_info(struct hci_dev *hdev)
{
struct sk_buff *skb;
u8 *p;
@ -2825,6 +2842,7 @@ static void btintel_print_fseq_info(struct hci_dev *hdev)
kfree_skb(skb);
}
EXPORT_SYMBOL_GPL(btintel_print_fseq_info);
static int btintel_setup_combined(struct hci_dev *hdev)
{
@ -3039,11 +3057,15 @@ static int btintel_setup_combined(struct hci_dev *hdev)
err = btintel_bootloader_setup(hdev, &ver);
btintel_register_devcoredump_support(hdev);
break;
case 0x18: /* GfP2 */
case 0x1c: /* GaP */
/* Re-classify packet type for controllers with LE audio */
hdev->classify_pkt_type = btintel_classify_pkt_type;
fallthrough;
case 0x17:
case 0x18:
case 0x19:
case 0x1b:
case 0x1c:
case 0x1d:
case 0x1e:
/* Display version information of TLV type */
btintel_version_info_tlv(hdev, &ver_tlv);

View File

@ -42,7 +42,8 @@ enum {
INTEL_TLV_SBE_TYPE,
INTEL_TLV_OTP_BDADDR,
INTEL_TLV_UNLOCKED_STATE,
INTEL_TLV_GIT_SHA1
INTEL_TLV_GIT_SHA1,
INTEL_TLV_FW_ID = 0x50
};
struct intel_tlv {
@ -57,6 +58,8 @@ struct intel_tlv {
#define BTINTEL_IMG_IML 0x02 /* Intermediate image */
#define BTINTEL_IMG_OP 0x03 /* Operational image */
#define BTINTEL_FWID_MAXLEN 64
struct intel_version_tlv {
u32 cnvi_top;
u32 cnvr_top;
@ -77,6 +80,7 @@ struct intel_version_tlv {
u8 limited_cce;
u8 sbe_type;
u32 git_sha1;
u8 fw_id[BTINTEL_FWID_MAXLEN];
bdaddr_t otp_bd_addr;
};
@ -244,6 +248,7 @@ int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
struct intel_version_tlv *ver);
int btintel_shutdown_combined(struct hci_dev *hdev);
void btintel_hw_error(struct hci_dev *hdev, u8 code);
void btintel_print_fseq_info(struct hci_dev *hdev);
#else
static inline int btintel_check_bdaddr(struct hci_dev *hdev)
@ -373,4 +378,8 @@ static inline int btintel_shutdown_combined(struct hci_dev *hdev)
static inline void btintel_hw_error(struct hci_dev *hdev, u8 code)
{
}
static inline void btintel_print_fseq_info(struct hci_dev *hdev)
{
}
#endif

View File

@ -797,7 +797,6 @@ static int btintel_pcie_setup_txq_bufs(struct btintel_pcie_data *data,
kfree(txq->bufs);
return -ENOMEM;
}
memset(txq->buf_v_addr, 0, txq->count * BTINTEL_PCIE_BUFFER_SIZE);
/* Setup the allocated DMA buffer to bufs. Each data_buf should
* have virtual address and physical address
@ -842,7 +841,6 @@ static int btintel_pcie_setup_rxq_bufs(struct btintel_pcie_data *data,
kfree(rxq->bufs);
return -ENOMEM;
}
memset(rxq->buf_v_addr, 0, rxq->count * BTINTEL_PCIE_BUFFER_SIZE);
/* Setup the allocated DMA buffer to bufs. Each data_buf should
* have virtual address and physical address
@ -1197,9 +1195,11 @@ static int btintel_pcie_setup(struct hci_dev *hdev)
bt_dev_err(hdev, "Unsupported Intel hw variant (%u)",
INTEL_HW_VARIANT(ver_tlv.cnvi_bt));
err = -EINVAL;
goto exit_error;
break;
}
btintel_print_fseq_info(hdev);
exit_error:
kfree_skb(skb);
@ -1327,6 +1327,12 @@ static void btintel_pcie_remove(struct pci_dev *pdev)
data = pci_get_drvdata(pdev);
btintel_pcie_reset_bt(data);
for (int i = 0; i < data->alloc_vecs; i++) {
struct msix_entry *msix_entry;
msix_entry = &data->msix_entries[i];
free_irq(msix_entry->vector, msix_entry);
}
pci_free_irq_vectors(pdev);

File diff suppressed because it is too large Load Diff

View File

@ -28,6 +28,21 @@
#define MTK_COREDUMP_END_LEN (sizeof(MTK_COREDUMP_END))
#define MTK_COREDUMP_NUM 255
/* UHW CR mapping */
#define MTK_BT_MISC 0x70002510
#define MTK_BT_SUBSYS_RST 0x70002610
#define MTK_UDMA_INT_STA_BT 0x74000024
#define MTK_UDMA_INT_STA_BT1 0x74000308
#define MTK_BT_WDT_STATUS 0x740003A0
#define MTK_EP_RST_OPT 0x74011890
#define MTK_EP_RST_IN_OUT_OPT 0x00010001
#define MTK_BT_RST_DONE 0x00000100
#define MTK_BT_RESET_REG_CONNV3 0x70028610
#define MTK_BT_READ_DEV_ID 0x70010200
/* MediaTek ISO Interface */
#define MTK_ISO_IFNUM 2
enum {
BTMTK_WMT_PATCH_DWNLD = 0x1,
BTMTK_WMT_TEST = 0x2,
@ -126,6 +141,14 @@ struct btmtk_hci_wmt_params {
u32 *status;
};
enum {
BTMTK_TX_WAIT_VND_EVT,
BTMTK_FIRMWARE_LOADED,
BTMTK_HW_RESET_ACTIVE,
BTMTK_ISOPKT_OVER_INTR,
BTMTK_ISOPKT_RUNNING,
};
typedef int (*btmtk_reset_sync_func_t)(struct hci_dev *, void *);
struct btmtk_coredump_info {
@ -135,10 +158,25 @@ struct btmtk_coredump_info {
int state;
};
struct btmediatek_data {
struct btmtk_data {
const char *drv_name;
unsigned long flags;
u32 dev_id;
btmtk_reset_sync_func_t reset_sync;
struct btmtk_coredump_info cd_info;
struct usb_device *udev;
struct usb_interface *intf;
struct usb_anchor *ctrl_anchor;
struct sk_buff *evt_skb;
struct usb_endpoint_descriptor *isopkt_tx_ep;
struct usb_endpoint_descriptor *isopkt_rx_ep;
struct usb_interface *isopkt_intf;
struct usb_anchor isopkt_anchor;
struct sk_buff *isopkt_skb;
/* spinlock for ISO data transmission */
spinlock_t isorxlock;
};
typedef int (*wmt_cmd_sync_func_t)(struct hci_dev *,
@ -160,6 +198,24 @@ int btmtk_register_coredump(struct hci_dev *hdev, const char *name,
u32 fw_version);
int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb);
void btmtk_fw_get_filename(char *buf, size_t size, u32 dev_id, u32 fw_ver,
u32 fw_flavor);
int btmtk_usb_subsys_reset(struct hci_dev *hdev, u32 dev_id);
int btmtk_usb_recv_acl(struct hci_dev *hdev, struct sk_buff *skb);
struct urb *alloc_mtk_intr_urb(struct hci_dev *hdev, struct sk_buff *skb,
usb_complete_t tx_complete);
int btmtk_usb_resume(struct hci_dev *hdev);
int btmtk_usb_suspend(struct hci_dev *hdev);
int btmtk_usb_setup(struct hci_dev *hdev);
int btmtk_usb_shutdown(struct hci_dev *hdev);
#else
static inline int btmtk_set_bdaddr(struct hci_dev *hdev,
@ -168,29 +224,73 @@ static inline int btmtk_set_bdaddr(struct hci_dev *hdev,
return -EOPNOTSUPP;
}
static int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname,
wmt_cmd_sync_func_t wmt_cmd_sync)
static inline int btmtk_setup_firmware_79xx(struct hci_dev *hdev,
const char *fwname,
wmt_cmd_sync_func_t wmt_cmd_sync)
{
return -EOPNOTSUPP;
}
static int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname,
wmt_cmd_sync_func_t wmt_cmd_sync)
static inline int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname,
wmt_cmd_sync_func_t wmt_cmd_sync)
{
return -EOPNOTSUPP;
}
static void btmtk_reset_sync(struct hci_dev *hdev)
static inline void btmtk_reset_sync(struct hci_dev *hdev)
{
}
static int btmtk_register_coredump(struct hci_dev *hdev, const char *name,
u32 fw_version)
static inline int btmtk_register_coredump(struct hci_dev *hdev,
const char *name, u32 fw_version)
{
return -EOPNOTSUPP;
}
static int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
static inline int btmtk_process_coredump(struct hci_dev *hdev,
struct sk_buff *skb)
{
return -EOPNOTSUPP;
}
static inline void btmtk_fw_get_filename(char *buf, size_t size, u32 dev_id,
u32 fw_ver, u32 fw_flavor)
{
}
static inline int btmtk_usb_subsys_reset(struct hci_dev *hdev, u32 dev_id)
{
return -EOPNOTSUPP;
}
static inline int btmtk_usb_recv_acl(struct hci_dev *hdev, struct sk_buff *skb)
{
return -EOPNOTSUPP;
}
static inline struct urb *alloc_mtk_intr_urb(struct hci_dev *hdev,
struct sk_buff *skb,
usb_complete_t tx_complete)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline int btmtk_usb_resume(struct hci_dev *hdev)
{
return -EOPNOTSUPP;
}
static inline int btmtk_usb_suspend(struct hci_dev *hdev)
{
return -EOPNOTSUPP;
}
static inline int btmtk_usb_setup(struct hci_dev *hdev)
{
return -EOPNOTSUPP;
}
static inline int btmtk_usb_shutdown(struct hci_dev *hdev)
{
return -EOPNOTSUPP;
}

View File

@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/skbuff.h>
#include <linux/usb.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdio_ids.h>
@ -1117,6 +1118,9 @@ static int btmtksdio_setup(struct hci_dev *hdev)
return err;
}
btmtk_fw_get_filename(fwname, sizeof(fwname), dev_id,
fw_version, 0);
snprintf(fwname, sizeof(fwname),
"mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
dev_id & 0xffff, (fw_version & 0xff) + 1);

View File

@ -22,6 +22,7 @@
#include <linux/regulator/consumer.h>
#include <linux/serdev.h>
#include <linux/skbuff.h>
#include <linux/usb.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>

View File

@ -29,27 +29,38 @@
#define BTNXPUART_CHECK_BOOT_SIGNATURE 3
#define BTNXPUART_SERDEV_OPEN 4
#define BTNXPUART_IR_IN_PROGRESS 5
#define BTNXPUART_FW_DOWNLOAD_ABORT 6
/* NXP HW err codes */
#define BTNXPUART_IR_HW_ERR 0xb0
#define FIRMWARE_W8987 "nxp/uartuart8987_bt.bin"
#define FIRMWARE_W8997 "nxp/uartuart8997_bt_v4.bin"
#define FIRMWARE_W9098 "nxp/uartuart9098_bt_v1.bin"
#define FIRMWARE_IW416 "nxp/uartiw416_bt_v0.bin"
#define FIRMWARE_IW612 "nxp/uartspi_n61x_v1.bin.se"
#define FIRMWARE_IW624 "nxp/uartiw624_bt.bin"
#define FIRMWARE_SECURE_IW624 "nxp/uartiw624_bt.bin.se"
#define FIRMWARE_AW693 "nxp/uartaw693_bt.bin"
#define FIRMWARE_SECURE_AW693 "nxp/uartaw693_bt.bin.se"
#define FIRMWARE_HELPER "nxp/helper_uart_3000000.bin"
#define FIRMWARE_W8987 "uart8987_bt_v0.bin"
#define FIRMWARE_W8987_OLD "uartuart8987_bt.bin"
#define FIRMWARE_W8997 "uart8997_bt_v4.bin"
#define FIRMWARE_W8997_OLD "uartuart8997_bt_v4.bin"
#define FIRMWARE_W9098 "uart9098_bt_v1.bin"
#define FIRMWARE_W9098_OLD "uartuart9098_bt_v1.bin"
#define FIRMWARE_IW416 "uartiw416_bt_v0.bin"
#define FIRMWARE_IW612 "uartspi_n61x_v1.bin.se"
#define FIRMWARE_IW615 "uartspi_iw610_v0.bin"
#define FIRMWARE_SECURE_IW615 "uartspi_iw610_v0.bin.se"
#define FIRMWARE_IW624 "uartiw624_bt.bin"
#define FIRMWARE_SECURE_IW624 "uartiw624_bt.bin.se"
#define FIRMWARE_AW693 "uartaw693_bt.bin"
#define FIRMWARE_SECURE_AW693 "uartaw693_bt.bin.se"
#define FIRMWARE_AW693_A1 "uartaw693_bt_v1.bin"
#define FIRMWARE_SECURE_AW693_A1 "uartaw693_bt_v1.bin.se"
#define FIRMWARE_HELPER "helper_uart_3000000.bin"
#define CHIP_ID_W9098 0x5c03
#define CHIP_ID_IW416 0x7201
#define CHIP_ID_IW612 0x7601
#define CHIP_ID_IW624a 0x8000
#define CHIP_ID_IW624c 0x8001
#define CHIP_ID_AW693 0x8200
#define CHIP_ID_AW693a0 0x8200
#define CHIP_ID_AW693a1 0x8201
#define CHIP_ID_IW615a0 0x8800
#define CHIP_ID_IW615a1 0x8801
#define FW_SECURE_MASK 0xc0
#define FW_OPEN 0x00
@ -144,6 +155,7 @@ struct psmode_cmd_payload {
struct btnxpuart_data {
const char *helper_fw_name;
const char *fw_name;
const char *fw_name_old;
};
struct btnxpuart_dev {
@ -159,6 +171,7 @@ struct btnxpuart_dev {
u8 fw_name[MAX_FW_FILE_NAME_LEN];
u32 fw_dnld_v1_offset;
u32 fw_v1_sent_bytes;
u32 fw_dnld_v3_offset;
u32 fw_v3_offset_correction;
u32 fw_v1_expected_len;
u32 boot_reg_offset;
@ -187,6 +200,11 @@ struct btnxpuart_dev {
#define NXP_NAK_V3 0x7b
#define NXP_CRC_ERROR_V3 0x7c
/* Bootloader signature error codes */
#define NXP_ACK_RX_TIMEOUT 0x0002 /* ACK not received from host */
#define NXP_HDR_RX_TIMEOUT 0x0003 /* FW Header chunk not received */
#define NXP_DATA_RX_TIMEOUT 0x0004 /* FW Data chunk not received */
#define HDR_LEN 16
#define NXP_RECV_CHIP_VER_V1 \
@ -277,6 +295,17 @@ struct nxp_bootloader_cmd {
__be32 crc;
} __packed;
struct nxp_v3_rx_timeout_nak {
u8 nak;
__le32 offset;
u8 crc;
} __packed;
union nxp_v3_rx_timeout_nak_u {
struct nxp_v3_rx_timeout_nak pkt;
u8 buf[6];
};
static u8 crc8_table[CRC8_TABLE_SIZE];
/* Default configurations */
@ -328,7 +357,7 @@ static void ps_cancel_timer(struct btnxpuart_dev *nxpdev)
struct ps_data *psdata = &nxpdev->psdata;
flush_work(&psdata->work);
del_timer_sync(&psdata->ps_timer);
timer_shutdown_sync(&psdata->ps_timer);
}
static void ps_control(struct hci_dev *hdev, u8 ps_state)
@ -550,6 +579,7 @@ static int nxp_download_firmware(struct hci_dev *hdev)
nxpdev->fw_v1_sent_bytes = 0;
nxpdev->fw_v1_expected_len = HDR_LEN;
nxpdev->boot_reg_offset = 0;
nxpdev->fw_dnld_v3_offset = 0;
nxpdev->fw_v3_offset_correction = 0;
nxpdev->baudrate_changed = false;
nxpdev->timeout_changed = false;
@ -564,14 +594,23 @@ static int nxp_download_firmware(struct hci_dev *hdev)
!test_bit(BTNXPUART_FW_DOWNLOADING,
&nxpdev->tx_state),
msecs_to_jiffies(60000));
release_firmware(nxpdev->fw);
memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name));
if (err == 0) {
bt_dev_err(hdev, "FW Download Timeout.");
bt_dev_err(hdev, "FW Download Timeout. offset: %d",
nxpdev->fw_dnld_v1_offset ?
nxpdev->fw_dnld_v1_offset :
nxpdev->fw_dnld_v3_offset);
return -ETIMEDOUT;
}
if (test_bit(BTNXPUART_FW_DOWNLOAD_ABORT, &nxpdev->tx_state)) {
bt_dev_err(hdev, "FW Download Aborted");
return -EINTR;
}
serdev_device_set_flow_control(nxpdev->serdev, true);
release_firmware(nxpdev->fw);
memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name));
/* Allow the downloaded FW to initialize */
msleep(1200);
@ -682,19 +721,30 @@ static bool process_boot_signature(struct btnxpuart_dev *nxpdev)
return is_fw_downloading(nxpdev);
}
static int nxp_request_firmware(struct hci_dev *hdev, const char *fw_name)
static int nxp_request_firmware(struct hci_dev *hdev, const char *fw_name,
const char *fw_name_old)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
const char *fw_name_dt;
int err = 0;
if (!fw_name)
return -ENOENT;
if (!strlen(nxpdev->fw_name)) {
snprintf(nxpdev->fw_name, MAX_FW_FILE_NAME_LEN, "%s", fw_name);
if (strcmp(fw_name, FIRMWARE_HELPER) &&
!device_property_read_string(&nxpdev->serdev->dev,
"firmware-name",
&fw_name_dt))
fw_name = fw_name_dt;
snprintf(nxpdev->fw_name, MAX_FW_FILE_NAME_LEN, "nxp/%s", fw_name);
err = request_firmware_direct(&nxpdev->fw, nxpdev->fw_name, &hdev->dev);
if (err < 0 && fw_name_old) {
snprintf(nxpdev->fw_name, MAX_FW_FILE_NAME_LEN, "nxp/%s", fw_name_old);
err = request_firmware_direct(&nxpdev->fw, nxpdev->fw_name, &hdev->dev);
}
bt_dev_dbg(hdev, "Request Firmware: %s", nxpdev->fw_name);
err = request_firmware(&nxpdev->fw, nxpdev->fw_name, &hdev->dev);
bt_dev_info(hdev, "Request Firmware: %s", nxpdev->fw_name);
if (err < 0) {
bt_dev_err(hdev, "Firmware file %s not found", nxpdev->fw_name);
clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
@ -773,15 +823,15 @@ static int nxp_recv_fw_req_v1(struct hci_dev *hdev, struct sk_buff *skb)
}
if (!nxp_data->helper_fw_name || nxpdev->helper_downloaded) {
if (nxp_request_firmware(hdev, nxp_data->fw_name))
if (nxp_request_firmware(hdev, nxp_data->fw_name, nxp_data->fw_name_old))
goto free_skb;
} else if (nxp_data->helper_fw_name && !nxpdev->helper_downloaded) {
if (nxp_request_firmware(hdev, nxp_data->helper_fw_name))
if (nxp_request_firmware(hdev, nxp_data->helper_fw_name, NULL))
goto free_skb;
}
if (!len) {
bt_dev_dbg(hdev, "FW Downloaded Successfully: %zu bytes",
bt_dev_info(hdev, "FW Download Complete: %zu bytes",
nxpdev->fw->size);
if (nxp_data->helper_fw_name && !nxpdev->helper_downloaded) {
nxpdev->helper_downloaded = true;
@ -863,7 +913,7 @@ static char *nxp_get_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid,
else
bt_dev_err(hdev, "Illegal loader version %02x", loader_ver);
break;
case CHIP_ID_AW693:
case CHIP_ID_AW693a0:
if ((loader_ver & FW_SECURE_MASK) == FW_OPEN)
fw_name = FIRMWARE_AW693;
else if ((loader_ver & FW_SECURE_MASK) != FW_AUTH_ILLEGAL)
@ -871,6 +921,23 @@ static char *nxp_get_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid,
else
bt_dev_err(hdev, "Illegal loader version %02x", loader_ver);
break;
case CHIP_ID_AW693a1:
if ((loader_ver & FW_SECURE_MASK) == FW_OPEN)
fw_name = FIRMWARE_AW693_A1;
else if ((loader_ver & FW_SECURE_MASK) != FW_AUTH_ILLEGAL)
fw_name = FIRMWARE_SECURE_AW693_A1;
else
bt_dev_err(hdev, "Illegal loader version %02x", loader_ver);
break;
case CHIP_ID_IW615a0:
case CHIP_ID_IW615a1:
if ((loader_ver & FW_SECURE_MASK) == FW_OPEN)
fw_name = FIRMWARE_IW615;
else if ((loader_ver & FW_SECURE_MASK) != FW_AUTH_ILLEGAL)
fw_name = FIRMWARE_SECURE_IW615;
else
bt_dev_err(hdev, "Illegal loader version %02x", loader_ver);
break;
default:
bt_dev_err(hdev, "Unknown chip signature %04x", chipid);
break;
@ -878,10 +945,25 @@ static char *nxp_get_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid,
return fw_name;
}
static char *nxp_get_old_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid,
u8 loader_ver)
{
char *fw_name_old = NULL;
switch (chipid) {
case CHIP_ID_W9098:
fw_name_old = FIRMWARE_W9098_OLD;
break;
}
return fw_name_old;
}
static int nxp_recv_chip_ver_v3(struct hci_dev *hdev, struct sk_buff *skb)
{
struct v3_start_ind *req = skb_pull_data(skb, sizeof(*req));
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
const char *fw_name;
const char *fw_name_old;
u16 chip_id;
u8 loader_ver;
@ -890,8 +972,10 @@ static int nxp_recv_chip_ver_v3(struct hci_dev *hdev, struct sk_buff *skb)
chip_id = le16_to_cpu(req->chip_id);
loader_ver = req->loader_ver;
if (!nxp_request_firmware(hdev, nxp_get_fw_name_from_chipid(hdev,
chip_id, loader_ver)))
bt_dev_info(hdev, "ChipID: %04x, Version: %d", chip_id, loader_ver);
fw_name = nxp_get_fw_name_from_chipid(hdev, chip_id, loader_ver);
fw_name_old = nxp_get_old_fw_name_from_chipid(hdev, chip_id, loader_ver);
if (!nxp_request_firmware(hdev, fw_name, fw_name_old))
nxp_send_ack(NXP_ACK_V3, hdev);
free_skb:
@ -899,6 +983,32 @@ free_skb:
return 0;
}
static void nxp_handle_fw_download_error(struct hci_dev *hdev, struct v3_data_req *req)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
__u32 offset = __le32_to_cpu(req->offset);
__u16 err = __le16_to_cpu(req->error);
union nxp_v3_rx_timeout_nak_u nak_tx_buf;
switch (err) {
case NXP_ACK_RX_TIMEOUT:
case NXP_HDR_RX_TIMEOUT:
case NXP_DATA_RX_TIMEOUT:
nak_tx_buf.pkt.nak = NXP_NAK_V3;
nak_tx_buf.pkt.offset = __cpu_to_le32(offset);
nak_tx_buf.pkt.crc = crc8(crc8_table, nak_tx_buf.buf,
sizeof(nak_tx_buf) - 1, 0xff);
serdev_device_write_buf(nxpdev->serdev, nak_tx_buf.buf,
sizeof(nak_tx_buf));
break;
default:
bt_dev_dbg(hdev, "Unknown bootloader error code: %d", err);
break;
}
}
static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
@ -913,7 +1023,12 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
if (!req || !nxpdev->fw)
goto free_skb;
nxp_send_ack(NXP_ACK_V3, hdev);
if (!req->error) {
nxp_send_ack(NXP_ACK_V3, hdev);
} else {
nxp_handle_fw_download_error(hdev, req);
goto free_skb;
}
len = __le16_to_cpu(req->len);
@ -934,15 +1049,12 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
}
if (req->len == 0) {
bt_dev_dbg(hdev, "FW Downloaded Successfully: %zu bytes",
bt_dev_info(hdev, "FW Download Complete: %zu bytes",
nxpdev->fw->size);
clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q);
goto free_skb;
}
if (req->error)
bt_dev_dbg(hdev, "FW Download received err 0x%02x from chip",
req->error);
offset = __le32_to_cpu(req->offset);
if (offset < nxpdev->fw_v3_offset_correction) {
@ -954,8 +1066,9 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb)
goto free_skb;
}
serdev_device_write_buf(nxpdev->serdev, nxpdev->fw->data + offset -
nxpdev->fw_v3_offset_correction, len);
nxpdev->fw_dnld_v3_offset = offset - nxpdev->fw_v3_offset_correction;
serdev_device_write_buf(nxpdev->serdev, nxpdev->fw->data +
nxpdev->fw_dnld_v3_offset, len);
free_skb:
kfree_skb(skb);
@ -1037,7 +1150,7 @@ static int nxp_setup(struct hci_dev *hdev)
if (err < 0)
return err;
} else {
bt_dev_dbg(hdev, "FW already running.");
bt_dev_info(hdev, "FW already running.");
clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
}
@ -1253,8 +1366,10 @@ static int btnxpuart_close(struct hci_dev *hdev)
ps_wakeup(nxpdev);
serdev_device_close(nxpdev->serdev);
skb_queue_purge(&nxpdev->txq);
kfree_skb(nxpdev->rx_skb);
nxpdev->rx_skb = NULL;
if (!IS_ERR_OR_NULL(nxpdev->rx_skb)) {
kfree_skb(nxpdev->rx_skb);
nxpdev->rx_skb = NULL;
}
clear_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state);
return 0;
}
@ -1269,8 +1384,10 @@ static int btnxpuart_flush(struct hci_dev *hdev)
cancel_work_sync(&nxpdev->tx_work);
kfree_skb(nxpdev->rx_skb);
nxpdev->rx_skb = NULL;
if (!IS_ERR_OR_NULL(nxpdev->rx_skb)) {
kfree_skb(nxpdev->rx_skb);
nxpdev->rx_skb = NULL;
}
return 0;
}
@ -1385,28 +1502,56 @@ static void nxp_serdev_remove(struct serdev_device *serdev)
struct btnxpuart_dev *nxpdev = serdev_device_get_drvdata(serdev);
struct hci_dev *hdev = nxpdev->hdev;
/* Restore FW baudrate to fw_init_baudrate if changed.
* This will ensure FW baudrate is in sync with
* driver baudrate in case this driver is re-inserted.
*/
if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) {
nxpdev->new_baudrate = nxpdev->fw_init_baudrate;
nxp_set_baudrate_cmd(hdev, NULL);
if (is_fw_downloading(nxpdev)) {
set_bit(BTNXPUART_FW_DOWNLOAD_ABORT, &nxpdev->tx_state);
clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state);
wake_up_interruptible(&nxpdev->check_boot_sign_wait_q);
wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q);
} else {
/* Restore FW baudrate to fw_init_baudrate if changed.
* This will ensure FW baudrate is in sync with
* driver baudrate in case this driver is re-inserted.
*/
if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) {
nxpdev->new_baudrate = nxpdev->fw_init_baudrate;
nxp_set_baudrate_cmd(hdev, NULL);
}
ps_cancel_timer(nxpdev);
}
ps_cancel_timer(nxpdev);
hci_unregister_dev(hdev);
hci_free_dev(hdev);
}
#ifdef CONFIG_PM_SLEEP
static int nxp_serdev_suspend(struct device *dev)
{
struct btnxpuart_dev *nxpdev = dev_get_drvdata(dev);
struct ps_data *psdata = &nxpdev->psdata;
ps_control(psdata->hdev, PS_STATE_SLEEP);
return 0;
}
static int nxp_serdev_resume(struct device *dev)
{
struct btnxpuart_dev *nxpdev = dev_get_drvdata(dev);
struct ps_data *psdata = &nxpdev->psdata;
ps_control(psdata->hdev, PS_STATE_AWAKE);
return 0;
}
#endif
static struct btnxpuart_data w8987_data __maybe_unused = {
.helper_fw_name = NULL,
.fw_name = FIRMWARE_W8987,
.fw_name_old = FIRMWARE_W8987_OLD,
};
static struct btnxpuart_data w8997_data __maybe_unused = {
.helper_fw_name = FIRMWARE_HELPER,
.fw_name = FIRMWARE_W8997,
.fw_name_old = FIRMWARE_W8997_OLD,
};
static const struct of_device_id nxpuart_of_match_table[] __maybe_unused = {
@ -1416,12 +1561,17 @@ static const struct of_device_id nxpuart_of_match_table[] __maybe_unused = {
};
MODULE_DEVICE_TABLE(of, nxpuart_of_match_table);
static const struct dev_pm_ops nxp_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(nxp_serdev_suspend, nxp_serdev_resume)
};
static struct serdev_device_driver nxp_serdev_driver = {
.probe = nxp_serdev_probe,
.remove = nxp_serdev_remove,
.driver = {
.name = "btnxpuart",
.of_match_table = of_match_ptr(nxpuart_of_match_table),
.pm = &nxp_pm_ops,
},
};

View File

@ -811,7 +811,7 @@ static int rtl_download_firmware(struct hci_dev *hdev,
struct sk_buff *skb;
struct hci_rp_read_local_version *rp;
dl_cmd = kmalloc(sizeof(struct rtl_download_cmd), GFP_KERNEL);
dl_cmd = kmalloc(sizeof(*dl_cmd), GFP_KERNEL);
if (!dl_cmd)
return -ENOMEM;

View File

@ -479,6 +479,7 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x8087, 0x0036), .driver_info = BTUSB_INTEL_COMBINED },
{ USB_DEVICE(0x8087, 0x0037), .driver_info = BTUSB_INTEL_COMBINED },
{ USB_DEVICE(0x8087, 0x0038), .driver_info = BTUSB_INTEL_COMBINED },
{ USB_DEVICE(0x8087, 0x0039), .driver_info = BTUSB_INTEL_COMBINED },
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL_COMBINED |
BTUSB_INTEL_NO_WBS_SUPPORT |
@ -555,6 +556,10 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3572), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3591), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe125), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
/* Realtek 8852BT/8852BE-VT Bluetooth devices */
{ USB_DEVICE(0x0bda, 0x8520), .driver_info = BTUSB_REALTEK |
@ -891,6 +896,9 @@ struct btusb_data {
int (*setup_on_usb)(struct hci_dev *hdev);
int (*suspend)(struct hci_dev *hdev);
int (*resume)(struct hci_dev *hdev);
int oob_wake_irq; /* irq for out-of-band wake-on-bt */
unsigned cmd_timeout_cnt;
@ -2638,410 +2646,48 @@ static int btusb_recv_event_realtek(struct hci_dev *hdev, struct sk_buff *skb)
return hci_recv_frame(hdev, skb);
}
/* UHW CR mapping */
#define MTK_BT_MISC 0x70002510
#define MTK_BT_SUBSYS_RST 0x70002610
#define MTK_UDMA_INT_STA_BT 0x74000024
#define MTK_UDMA_INT_STA_BT1 0x74000308
#define MTK_BT_WDT_STATUS 0x740003A0
#define MTK_EP_RST_OPT 0x74011890
#define MTK_EP_RST_IN_OUT_OPT 0x00010001
#define MTK_BT_RST_DONE 0x00000100
#define MTK_BT_RESET_REG_CONNV3 0x70028610
#define MTK_BT_READ_DEV_ID 0x70010200
static void btusb_mtk_wmt_recv(struct urb *urb)
static void btusb_mtk_claim_iso_intf(struct btusb_data *data)
{
struct hci_dev *hdev = urb->context;
struct btusb_data *data = hci_get_drvdata(hdev);
struct sk_buff *skb;
struct btmtk_data *btmtk_data = hci_get_priv(data->hdev);
int err;
if (urb->status == 0 && urb->actual_length > 0) {
hdev->stat.byte_rx += urb->actual_length;
/* WMT event shouldn't be fragmented and the size should be
* less than HCI_WMT_MAX_EVENT_SIZE.
*/
skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC);
if (!skb) {
hdev->stat.err_rx++;
kfree(urb->setup_packet);
return;
}
hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
skb_put_data(skb, urb->transfer_buffer, urb->actual_length);
/* When someone waits for the WMT event, the skb is being cloned
* and being processed the events from there then.
*/
if (test_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags)) {
data->evt_skb = skb_clone(skb, GFP_ATOMIC);
if (!data->evt_skb) {
kfree_skb(skb);
kfree(urb->setup_packet);
return;
}
}
err = hci_recv_frame(hdev, skb);
if (err < 0) {
kfree_skb(data->evt_skb);
data->evt_skb = NULL;
kfree(urb->setup_packet);
return;
}
if (test_and_clear_bit(BTUSB_TX_WAIT_VND_EVT,
&data->flags)) {
/* Barrier to sync with other CPUs */
smp_mb__after_atomic();
wake_up_bit(&data->flags,
BTUSB_TX_WAIT_VND_EVT);
}
kfree(urb->setup_packet);
return;
} else if (urb->status == -ENOENT) {
/* Avoid suspend failed when usb_kill_urb */
err = usb_driver_claim_interface(&btusb_driver,
btmtk_data->isopkt_intf, data);
if (err < 0) {
btmtk_data->isopkt_intf = NULL;
bt_dev_err(data->hdev, "Failed to claim iso interface");
return;
}
usb_mark_last_busy(data->udev);
/* The URB complete handler is still called with urb->actual_length = 0
* when the event is not available, so we should keep re-submitting
* URB until WMT event returns, Also, It's necessary to wait some time
* between the two consecutive control URBs to relax the target device
* to generate the event. Otherwise, the WMT event cannot return from
* the device successfully.
*/
udelay(500);
usb_anchor_urb(urb, &data->ctrl_anchor);
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
kfree(urb->setup_packet);
/* -EPERM: urb is being killed;
* -ENODEV: device got disconnected
*/
if (err != -EPERM && err != -ENODEV)
bt_dev_err(hdev, "urb %p failed to resubmit (%d)",
urb, -err);
usb_unanchor_urb(urb);
}
set_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags);
}
static int btusb_mtk_submit_wmt_recv_urb(struct hci_dev *hdev)
static void btusb_mtk_release_iso_intf(struct btusb_data *data)
{
struct btusb_data *data = hci_get_drvdata(hdev);
struct usb_ctrlrequest *dr;
unsigned char *buf;
int err, size = 64;
unsigned int pipe;
struct urb *urb;
struct btmtk_data *btmtk_data = hci_get_priv(data->hdev);
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
return -ENOMEM;
if (btmtk_data->isopkt_intf) {
usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor);
clear_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags);
dr = kmalloc(sizeof(*dr), GFP_KERNEL);
if (!dr) {
usb_free_urb(urb);
return -ENOMEM;
dev_kfree_skb_irq(btmtk_data->isopkt_skb);
btmtk_data->isopkt_skb = NULL;
usb_set_intfdata(btmtk_data->isopkt_intf, NULL);
usb_driver_release_interface(&btusb_driver,
btmtk_data->isopkt_intf);
}
dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_IN;
dr->bRequest = 1;
dr->wIndex = cpu_to_le16(0);
dr->wValue = cpu_to_le16(48);
dr->wLength = cpu_to_le16(size);
buf = kmalloc(size, GFP_KERNEL);
if (!buf) {
kfree(dr);
usb_free_urb(urb);
return -ENOMEM;
}
pipe = usb_rcvctrlpipe(data->udev, 0);
usb_fill_control_urb(urb, data->udev, pipe, (void *)dr,
buf, size, btusb_mtk_wmt_recv, hdev);
urb->transfer_flags |= URB_FREE_BUFFER;
usb_anchor_urb(urb, &data->ctrl_anchor);
err = usb_submit_urb(urb, GFP_KERNEL);
if (err < 0) {
if (err != -EPERM && err != -ENODEV)
bt_dev_err(hdev, "urb %p submission failed (%d)",
urb, -err);
usb_unanchor_urb(urb);
}
usb_free_urb(urb);
return err;
}
static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
struct btmtk_hci_wmt_params *wmt_params)
{
struct btusb_data *data = hci_get_drvdata(hdev);
struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
u32 hlen, status = BTMTK_WMT_INVALID;
struct btmtk_hci_wmt_evt *wmt_evt;
struct btmtk_hci_wmt_cmd *wc;
struct btmtk_wmt_hdr *hdr;
int err;
/* Send the WMT command and wait until the WMT event returns */
hlen = sizeof(*hdr) + wmt_params->dlen;
if (hlen > 255)
return -EINVAL;
wc = kzalloc(hlen, GFP_KERNEL);
if (!wc)
return -ENOMEM;
hdr = &wc->hdr;
hdr->dir = 1;
hdr->op = wmt_params->op;
hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
hdr->flag = wmt_params->flag;
memcpy(wc->data, wmt_params->data, wmt_params->dlen);
set_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
/* WMT cmd/event doesn't follow up the generic HCI cmd/event handling,
* it needs constantly polling control pipe until the host received the
* WMT event, thus, we should require to specifically acquire PM counter
* on the USB to prevent the interface from entering auto suspended
* while WMT cmd/event in progress.
*/
err = usb_autopm_get_interface(data->intf);
if (err < 0)
goto err_free_wc;
err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc);
if (err < 0) {
clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
usb_autopm_put_interface(data->intf);
goto err_free_wc;
}
/* Submit control IN URB on demand to process the WMT event */
err = btusb_mtk_submit_wmt_recv_urb(hdev);
usb_autopm_put_interface(data->intf);
if (err < 0)
goto err_free_wc;
/* The vendor specific WMT commands are all answered by a vendor
* specific event and will have the Command Status or Command
* Complete as with usual HCI command flow control.
*
* After sending the command, wait for BTUSB_TX_WAIT_VND_EVT
* state to be cleared. The driver specific event receive routine
* will clear that state and with that indicate completion of the
* WMT command.
*/
err = wait_on_bit_timeout(&data->flags, BTUSB_TX_WAIT_VND_EVT,
TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
if (err == -EINTR) {
bt_dev_err(hdev, "Execution of wmt command interrupted");
clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
goto err_free_wc;
}
if (err) {
bt_dev_err(hdev, "Execution of wmt command timed out");
clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
err = -ETIMEDOUT;
goto err_free_wc;
}
if (data->evt_skb == NULL)
goto err_free_wc;
/* Parse and handle the return WMT event */
wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data;
if (wmt_evt->whdr.op != hdr->op) {
bt_dev_err(hdev, "Wrong op received %d expected %d",
wmt_evt->whdr.op, hdr->op);
err = -EIO;
goto err_free_skb;
}
switch (wmt_evt->whdr.op) {
case BTMTK_WMT_SEMAPHORE:
if (wmt_evt->whdr.flag == 2)
status = BTMTK_WMT_PATCH_UNDONE;
else
status = BTMTK_WMT_PATCH_DONE;
break;
case BTMTK_WMT_FUNC_CTRL:
wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
status = BTMTK_WMT_ON_DONE;
else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420)
status = BTMTK_WMT_ON_PROGRESS;
else
status = BTMTK_WMT_ON_UNDONE;
break;
case BTMTK_WMT_PATCH_DWNLD:
if (wmt_evt->whdr.flag == 2)
status = BTMTK_WMT_PATCH_DONE;
else if (wmt_evt->whdr.flag == 1)
status = BTMTK_WMT_PATCH_PROGRESS;
else
status = BTMTK_WMT_PATCH_UNDONE;
break;
}
if (wmt_params->status)
*wmt_params->status = status;
err_free_skb:
kfree_skb(data->evt_skb);
data->evt_skb = NULL;
err_free_wc:
kfree(wc);
return err;
}
static int btusb_mtk_func_query(struct hci_dev *hdev)
{
struct btmtk_hci_wmt_params wmt_params;
int status, err;
u8 param = 0;
/* Query whether the function is enabled */
wmt_params.op = BTMTK_WMT_FUNC_CTRL;
wmt_params.flag = 4;
wmt_params.dlen = sizeof(param);
wmt_params.data = &param;
wmt_params.status = &status;
err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to query function status (%d)", err);
return err;
}
return status;
}
static int btusb_mtk_uhw_reg_write(struct btusb_data *data, u32 reg, u32 val)
{
struct hci_dev *hdev = data->hdev;
int pipe, err;
void *buf;
buf = kzalloc(4, GFP_KERNEL);
if (!buf)
return -ENOMEM;
put_unaligned_le32(val, buf);
pipe = usb_sndctrlpipe(data->udev, 0);
err = usb_control_msg(data->udev, pipe, 0x02,
0x5E,
reg >> 16, reg & 0xffff,
buf, 4, USB_CTRL_SET_TIMEOUT);
if (err < 0) {
bt_dev_err(hdev, "Failed to write uhw reg(%d)", err);
goto err_free_buf;
}
err_free_buf:
kfree(buf);
return err;
}
static int btusb_mtk_uhw_reg_read(struct btusb_data *data, u32 reg, u32 *val)
{
struct hci_dev *hdev = data->hdev;
int pipe, err;
void *buf;
buf = kzalloc(4, GFP_KERNEL);
if (!buf)
return -ENOMEM;
pipe = usb_rcvctrlpipe(data->udev, 0);
err = usb_control_msg(data->udev, pipe, 0x01,
0xDE,
reg >> 16, reg & 0xffff,
buf, 4, USB_CTRL_GET_TIMEOUT);
if (err < 0) {
bt_dev_err(hdev, "Failed to read uhw reg(%d)", err);
goto err_free_buf;
}
*val = get_unaligned_le32(buf);
bt_dev_dbg(hdev, "reg=%x, value=0x%08x", reg, *val);
err_free_buf:
kfree(buf);
return err;
}
static int btusb_mtk_reg_read(struct btusb_data *data, u32 reg, u32 *val)
{
int pipe, err, size = sizeof(u32);
void *buf;
buf = kzalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
pipe = usb_rcvctrlpipe(data->udev, 0);
err = usb_control_msg(data->udev, pipe, 0x63,
USB_TYPE_VENDOR | USB_DIR_IN,
reg >> 16, reg & 0xffff,
buf, size, USB_CTRL_GET_TIMEOUT);
if (err < 0)
goto err_free_buf;
*val = get_unaligned_le32(buf);
err_free_buf:
kfree(buf);
return err;
}
static int btusb_mtk_id_get(struct btusb_data *data, u32 reg, u32 *id)
{
return btusb_mtk_reg_read(data, reg, id);
}
static u32 btusb_mtk_reset_done(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
u32 val = 0;
btusb_mtk_uhw_reg_read(data, MTK_BT_MISC, &val);
return val & MTK_BT_RST_DONE;
clear_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags);
}
static int btusb_mtk_reset(struct hci_dev *hdev, void *rst_data)
{
struct btusb_data *data = hci_get_drvdata(hdev);
struct btmediatek_data *mediatek;
u32 val;
struct btmtk_data *btmtk_data = hci_get_priv(hdev);
int err;
/* It's MediaTek specific bluetooth reset mechanism via USB */
if (test_and_set_bit(BTUSB_HW_RESET_ACTIVE, &data->flags)) {
if (test_and_set_bit(BTMTK_HW_RESET_ACTIVE, &btmtk_data->flags)) {
bt_dev_err(hdev, "last reset failed? Not resetting again");
return -EBUSY;
}
@ -3050,302 +2696,68 @@ static int btusb_mtk_reset(struct hci_dev *hdev, void *rst_data)
if (err < 0)
return err;
if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags))
btusb_mtk_release_iso_intf(data);
btusb_stop_traffic(data);
usb_kill_anchored_urbs(&data->tx_anchor);
mediatek = hci_get_priv(hdev);
if (mediatek->dev_id == 0x7925) {
btusb_mtk_uhw_reg_read(data, MTK_BT_RESET_REG_CONNV3, &val);
val |= (1 << 5);
btusb_mtk_uhw_reg_write(data, MTK_BT_RESET_REG_CONNV3, val);
btusb_mtk_uhw_reg_read(data, MTK_BT_RESET_REG_CONNV3, &val);
val &= 0xFFFF00FF;
val |= (1 << 13);
btusb_mtk_uhw_reg_write(data, MTK_BT_RESET_REG_CONNV3, val);
btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, 0x00010001);
btusb_mtk_uhw_reg_read(data, MTK_BT_RESET_REG_CONNV3, &val);
val |= (1 << 0);
btusb_mtk_uhw_reg_write(data, MTK_BT_RESET_REG_CONNV3, val);
btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT, 0x000000FF);
btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT, &val);
btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT1, 0x000000FF);
btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT1, &val);
msleep(100);
} else {
/* It's Device EndPoint Reset Option Register */
bt_dev_dbg(hdev, "Initiating reset mechanism via uhw");
btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT);
btusb_mtk_uhw_reg_read(data, MTK_BT_WDT_STATUS, &val);
/* Reset the bluetooth chip via USB interface. */
btusb_mtk_uhw_reg_write(data, MTK_BT_SUBSYS_RST, 1);
btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT, 0x000000FF);
btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT, &val);
btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT1, 0x000000FF);
btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT1, &val);
/* MT7921 need to delay 20ms between toggle reset bit */
msleep(20);
btusb_mtk_uhw_reg_write(data, MTK_BT_SUBSYS_RST, 0);
btusb_mtk_uhw_reg_read(data, MTK_BT_SUBSYS_RST, &val);
}
err = readx_poll_timeout(btusb_mtk_reset_done, hdev, val,
val & MTK_BT_RST_DONE, 20000, 1000000);
if (err < 0)
bt_dev_err(hdev, "Reset timeout");
btusb_mtk_id_get(data, 0x70010200, &val);
if (!val)
bt_dev_err(hdev, "Can't get device id, subsys reset fail.");
err = btmtk_usb_subsys_reset(hdev, btmtk_data->dev_id);
usb_queue_reset_device(data->intf);
clear_bit(BTUSB_HW_RESET_ACTIVE, &data->flags);
clear_bit(BTMTK_HW_RESET_ACTIVE, &btmtk_data->flags);
return err;
}
static int btusb_send_frame_mtk(struct hci_dev *hdev, struct sk_buff *skb)
{
struct urb *urb;
BT_DBG("%s", hdev->name);
if (hci_skb_pkt_type(skb) == HCI_ISODATA_PKT) {
urb = alloc_mtk_intr_urb(hdev, skb, btusb_tx_complete);
if (IS_ERR(urb))
return PTR_ERR(urb);
return submit_or_queue_tx_urb(hdev, urb);
} else {
return btusb_send_frame(hdev, skb);
}
}
static int btusb_mtk_setup(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
struct btmtk_hci_wmt_params wmt_params;
ktime_t calltime, delta, rettime;
struct btmtk_tci_sleep tci_sleep;
unsigned long long duration;
struct sk_buff *skb;
const char *fwname;
int err, status;
u32 dev_id = 0;
char fw_bin_name[64];
u32 fw_version = 0, fw_flavor = 0;
u8 param;
struct btmediatek_data *mediatek;
struct btmtk_data *btmtk_data = hci_get_priv(hdev);
calltime = ktime_get();
/* MediaTek WMT vendor cmd requiring below USB resources to
* complete the handshake.
*/
btmtk_data->drv_name = btusb_driver.name;
btmtk_data->intf = data->intf;
btmtk_data->udev = data->udev;
btmtk_data->ctrl_anchor = &data->ctrl_anchor;
btmtk_data->reset_sync = btusb_mtk_reset;
err = btusb_mtk_id_get(data, 0x80000008, &dev_id);
if (err < 0) {
bt_dev_err(hdev, "Failed to get device id (%d)", err);
return err;
}
/* Claim ISO data interface and endpoint */
btmtk_data->isopkt_intf = usb_ifnum_to_if(data->udev, MTK_ISO_IFNUM);
if (btmtk_data->isopkt_intf)
btusb_mtk_claim_iso_intf(data);
if (!dev_id || dev_id != 0x7663) {
err = btusb_mtk_id_get(data, 0x70010200, &dev_id);
if (err < 0) {
bt_dev_err(hdev, "Failed to get device id (%d)", err);
return err;
}
err = btusb_mtk_id_get(data, 0x80021004, &fw_version);
if (err < 0) {
bt_dev_err(hdev, "Failed to get fw version (%d)", err);
return err;
}
err = btusb_mtk_id_get(data, 0x70010020, &fw_flavor);
if (err < 0) {
bt_dev_err(hdev, "Failed to get fw flavor (%d)", err);
return err;
}
fw_flavor = (fw_flavor & 0x00000080) >> 7;
}
mediatek = hci_get_priv(hdev);
mediatek->dev_id = dev_id;
mediatek->reset_sync = btusb_mtk_reset;
err = btmtk_register_coredump(hdev, btusb_driver.name, fw_version);
if (err < 0)
bt_dev_err(hdev, "Failed to register coredump (%d)", err);
switch (dev_id) {
case 0x7663:
fwname = FIRMWARE_MT7663;
break;
case 0x7668:
fwname = FIRMWARE_MT7668;
break;
case 0x7922:
case 0x7961:
case 0x7925:
if (dev_id == 0x7925)
snprintf(fw_bin_name, sizeof(fw_bin_name),
"mediatek/mt%04x/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
dev_id & 0xffff, dev_id & 0xffff, (fw_version & 0xff) + 1);
else if (dev_id == 0x7961 && fw_flavor)
snprintf(fw_bin_name, sizeof(fw_bin_name),
"mediatek/BT_RAM_CODE_MT%04x_1a_%x_hdr.bin",
dev_id & 0xffff, (fw_version & 0xff) + 1);
else
snprintf(fw_bin_name, sizeof(fw_bin_name),
"mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
dev_id & 0xffff, (fw_version & 0xff) + 1);
err = btmtk_setup_firmware_79xx(hdev, fw_bin_name,
btusb_mtk_hci_wmt_sync);
if (err < 0) {
bt_dev_err(hdev, "Failed to set up firmware (%d)", err);
return err;
}
/* It's Device EndPoint Reset Option Register */
btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT);
/* Enable Bluetooth protocol */
param = 1;
wmt_params.op = BTMTK_WMT_FUNC_CTRL;
wmt_params.flag = 0;
wmt_params.dlen = sizeof(param);
wmt_params.data = &param;
wmt_params.status = NULL;
err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
return err;
}
hci_set_msft_opcode(hdev, 0xFD30);
hci_set_aosp_capable(hdev);
goto done;
default:
bt_dev_err(hdev, "Unsupported hardware variant (%08x)",
dev_id);
return -ENODEV;
}
/* Query whether the firmware is already download */
wmt_params.op = BTMTK_WMT_SEMAPHORE;
wmt_params.flag = 1;
wmt_params.dlen = 0;
wmt_params.data = NULL;
wmt_params.status = &status;
err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to query firmware status (%d)", err);
return err;
}
if (status == BTMTK_WMT_PATCH_DONE) {
bt_dev_info(hdev, "firmware already downloaded");
goto ignore_setup_fw;
}
/* Setup a firmware which the device definitely requires */
err = btmtk_setup_firmware(hdev, fwname,
btusb_mtk_hci_wmt_sync);
if (err < 0)
return err;
ignore_setup_fw:
err = readx_poll_timeout(btusb_mtk_func_query, hdev, status,
status < 0 || status != BTMTK_WMT_ON_PROGRESS,
2000, 5000000);
/* -ETIMEDOUT happens */
if (err < 0)
return err;
/* The other errors happen in btusb_mtk_func_query */
if (status < 0)
return status;
if (status == BTMTK_WMT_ON_DONE) {
bt_dev_info(hdev, "function already on");
goto ignore_func_on;
}
/* Enable Bluetooth protocol */
param = 1;
wmt_params.op = BTMTK_WMT_FUNC_CTRL;
wmt_params.flag = 0;
wmt_params.dlen = sizeof(param);
wmt_params.data = &param;
wmt_params.status = NULL;
err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
return err;
}
ignore_func_on:
/* Apply the low power environment setup */
tci_sleep.mode = 0x5;
tci_sleep.duration = cpu_to_le16(0x640);
tci_sleep.host_duration = cpu_to_le16(0x640);
tci_sleep.host_wakeup_pin = 0;
tci_sleep.time_compensation = 0;
skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
bt_dev_err(hdev, "Failed to apply low power setting (%d)", err);
return err;
}
kfree_skb(skb);
done:
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
duration = (unsigned long long)ktime_to_ns(delta) >> 10;
bt_dev_info(hdev, "Device setup in %llu usecs", duration);
return 0;
return btmtk_usb_setup(hdev);
}
static int btusb_mtk_shutdown(struct hci_dev *hdev)
{
struct btmtk_hci_wmt_params wmt_params;
u8 param = 0;
int err;
/* Disable the device */
wmt_params.op = BTMTK_WMT_FUNC_CTRL;
wmt_params.flag = 0;
wmt_params.dlen = sizeof(param);
wmt_params.data = &param;
wmt_params.status = NULL;
err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
return err;
}
return 0;
}
static int btusb_recv_acl_mtk(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btusb_data *data = hci_get_drvdata(hdev);
u16 handle = le16_to_cpu(hci_acl_hdr(skb)->handle);
struct btmtk_data *btmtk_data = hci_get_priv(hdev);
switch (handle) {
case 0xfc6f: /* Firmware dump from device */
/* When the firmware hangs, the device can no longer
* suspend and thus disable auto-suspend.
*/
usb_disable_autosuspend(data->udev);
if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags))
btusb_mtk_release_iso_intf(data);
/* We need to forward the diagnostic packet to userspace daemon
* for backward compatibility, so we have to clone the packet
* extraly for the in-kernel coredump support.
*/
if (IS_ENABLED(CONFIG_DEV_COREDUMP)) {
struct sk_buff *skb_cd = skb_clone(skb, GFP_ATOMIC);
if (skb_cd)
btmtk_process_coredump(hdev, skb_cd);
}
fallthrough;
case 0x05ff: /* Firmware debug logging 1 */
case 0x05fe: /* Firmware debug logging 2 */
return hci_recv_diag(hdev, skb);
}
return hci_recv_frame(hdev, skb);
return btmtk_usb_shutdown(hdev);
}
#ifdef CONFIG_PM
@ -4347,7 +3759,7 @@ static int btusb_probe(struct usb_interface *intf,
data->recv_event = btusb_recv_event_realtek;
} else if (id->driver_info & BTUSB_MEDIATEK) {
/* Allocate extra space for Mediatek device */
priv_size += sizeof(struct btmediatek_data);
priv_size += sizeof(struct btmtk_data);
}
data->recv_acl = hci_recv_frame;
@ -4451,9 +3863,12 @@ static int btusb_probe(struct usb_interface *intf,
hdev->manufacturer = 70;
hdev->cmd_timeout = btmtk_reset_sync;
hdev->set_bdaddr = btmtk_set_bdaddr;
hdev->send = btusb_send_frame_mtk;
set_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &hdev->quirks);
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
data->recv_acl = btusb_recv_acl_mtk;
data->recv_acl = btmtk_usb_recv_acl;
data->suspend = btmtk_usb_suspend;
data->resume = btmtk_usb_resume;
}
if (id->driver_info & BTUSB_SWAVE) {
@ -4694,6 +4109,9 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
cancel_work_sync(&data->work);
if (data->suspend)
data->suspend(data->hdev);
btusb_stop_traffic(data);
usb_kill_anchored_urbs(&data->tx_anchor);
@ -4797,6 +4215,9 @@ static int btusb_resume(struct usb_interface *intf)
btusb_submit_isoc_urb(hdev, GFP_NOIO);
}
if (data->resume)
data->resume(hdev);
spin_lock_irq(&data->txlock);
play_deferred(data);
clear_bit(BTUSB_SUSPENDING, &data->flags);

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
* Bluetooth HCI driver for Broadcom 4377/4378/4387 devices attached via PCIe
* Bluetooth HCI driver for Broadcom 4377/4378/4387/4388 devices attached via PCIe
*
* Copyright (C) The Asahi Linux Contributors
*/
@ -26,13 +26,16 @@ enum bcm4377_chip {
BCM4377 = 0,
BCM4378,
BCM4387,
BCM4388,
};
#define BCM4377_DEVICE_ID 0x5fa0
#define BCM4378_DEVICE_ID 0x5f69
#define BCM4387_DEVICE_ID 0x5f71
#define BCM4388_DEVICE_ID 0x5f72
#define BCM4377_TIMEOUT 1000
#define BCM4377_TIMEOUT msecs_to_jiffies(1000)
#define BCM4377_BOOT_TIMEOUT msecs_to_jiffies(5000)
/*
* These devices only support DMA transactions inside a 32bit window
@ -487,6 +490,7 @@ struct bcm4377_data;
* second window in BAR0
* has_bar0_core2_window2: Set to true if this chip requires the second core's
* second window to be configured
* bar2_offset: Offset to the start of the variables in BAR2
* clear_pciecfg_subsystem_ctrl_bit19: Set to true if bit 19 in the
* vendor-specific subsystem control
* register has to be cleared
@ -510,6 +514,7 @@ struct bcm4377_hw {
u32 bar0_window1;
u32 bar0_window2;
u32 bar0_core2_window2;
u32 bar2_offset;
unsigned long has_bar0_core2_window2 : 1;
unsigned long clear_pciecfg_subsystem_ctrl_bit19 : 1;
@ -835,8 +840,8 @@ static irqreturn_t bcm4377_irq(int irq, void *data)
struct bcm4377_data *bcm4377 = data;
u32 bootstage, rti_status;
bootstage = ioread32(bcm4377->bar2 + BCM4377_BAR2_BOOTSTAGE);
rti_status = ioread32(bcm4377->bar2 + BCM4377_BAR2_RTI_STATUS);
bootstage = ioread32(bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_BOOTSTAGE);
rti_status = ioread32(bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_STATUS);
if (bootstage != bcm4377->bootstage ||
rti_status != bcm4377->rti_status) {
@ -1196,6 +1201,14 @@ static int bcm4387_send_calibration(struct bcm4377_data *bcm4377)
bcm4377->taurus_cal_size);
}
static int bcm4388_send_calibration(struct bcm4377_data *bcm4377)
{
/* BCM4388 always uses beamforming */
return __bcm4378_send_calibration(
bcm4377, bcm4377->taurus_beamforming_cal_blob,
bcm4377->taurus_beamforming_cal_size);
}
static const struct firmware *bcm4377_request_blob(struct bcm4377_data *bcm4377,
const char *suffix)
{
@ -1819,8 +1832,8 @@ static int bcm4377_boot(struct bcm4377_data *bcm4377)
int ret = 0;
u32 bootstage, rti_status;
bootstage = ioread32(bcm4377->bar2 + BCM4377_BAR2_BOOTSTAGE);
rti_status = ioread32(bcm4377->bar2 + BCM4377_BAR2_RTI_STATUS);
bootstage = ioread32(bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_BOOTSTAGE);
rti_status = ioread32(bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_STATUS);
if (bootstage != 0) {
dev_err(&bcm4377->pdev->dev, "bootstage is %d and not 0\n",
@ -1854,15 +1867,18 @@ static int bcm4377_boot(struct bcm4377_data *bcm4377)
iowrite32(BCM4377_DMA_MASK,
bcm4377->bar0 + BCM4377_BAR0_HOST_WINDOW_SIZE);
iowrite32(lower_32_bits(fw_dma), bcm4377->bar2 + BCM4377_BAR2_FW_LO);
iowrite32(upper_32_bits(fw_dma), bcm4377->bar2 + BCM4377_BAR2_FW_HI);
iowrite32(fw->size, bcm4377->bar2 + BCM4377_BAR2_FW_SIZE);
iowrite32(lower_32_bits(fw_dma),
bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_FW_LO);
iowrite32(upper_32_bits(fw_dma),
bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_FW_HI);
iowrite32(fw->size,
bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_FW_SIZE);
iowrite32(0, bcm4377->bar0 + BCM4377_BAR0_FW_DOORBELL);
dev_dbg(&bcm4377->pdev->dev, "waiting for firmware to boot\n");
ret = wait_for_completion_interruptible_timeout(&bcm4377->event,
BCM4377_TIMEOUT);
BCM4377_BOOT_TIMEOUT);
if (ret == 0) {
ret = -ETIMEDOUT;
goto out_dma_free;
@ -1913,16 +1929,16 @@ static int bcm4377_setup_rti(struct bcm4377_data *bcm4377)
dev_dbg(&bcm4377->pdev->dev, "RTI is in state 1\n");
/* allow access to the entire IOVA space again */
iowrite32(0, bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_LO);
iowrite32(0, bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_HI);
iowrite32(0, bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_WINDOW_LO);
iowrite32(0, bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_WINDOW_HI);
iowrite32(BCM4377_DMA_MASK,
bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_SIZE);
bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_WINDOW_SIZE);
/* setup "Converged IPC" context */
iowrite32(lower_32_bits(bcm4377->ctx_dma),
bcm4377->bar2 + BCM4377_BAR2_CONTEXT_ADDR_LO);
bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_CONTEXT_ADDR_LO);
iowrite32(upper_32_bits(bcm4377->ctx_dma),
bcm4377->bar2 + BCM4377_BAR2_CONTEXT_ADDR_HI);
bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_CONTEXT_ADDR_HI);
iowrite32(2, bcm4377->bar0 + BCM4377_BAR0_RTI_CONTROL);
ret = wait_for_completion_interruptible_timeout(&bcm4377->event,
@ -2488,6 +2504,21 @@ static const struct bcm4377_hw bcm4377_hw_variants[] = {
.send_calibration = bcm4387_send_calibration,
.send_ptb = bcm4378_send_ptb,
},
[BCM4388] = {
.id = 0x4388,
.otp_offset = 0x415c,
.bar2_offset = 0x200000,
.bar0_window1 = 0x18002000,
.bar0_window2 = 0x18109000,
.bar0_core2_window2 = 0x18106000,
.has_bar0_core2_window2 = true,
.broken_mws_transport_config = true,
.broken_le_coded = true,
.broken_le_ext_adv_report_phy = true,
.send_calibration = bcm4388_send_calibration,
.send_ptb = bcm4378_send_ptb,
},
};
#define BCM4377_DEVID_ENTRY(id) \
@ -2501,6 +2532,7 @@ static const struct pci_device_id bcm4377_devid_table[] = {
BCM4377_DEVID_ENTRY(4377),
BCM4377_DEVID_ENTRY(4378),
BCM4377_DEVID_ENTRY(4387),
BCM4377_DEVID_ENTRY(4388),
{},
};
MODULE_DEVICE_TABLE(pci, bcm4377_devid_table);
@ -2515,7 +2547,7 @@ static struct pci_driver bcm4377_pci_driver = {
module_pci_driver(bcm4377_pci_driver);
MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
MODULE_DESCRIPTION("Bluetooth support for Broadcom 4377/4378/4387 devices");
MODULE_DESCRIPTION("Bluetooth support for Broadcom 4377/4378/4387/4388 devices");
MODULE_LICENSE("Dual MIT/GPL");
MODULE_FIRMWARE("brcm/brcmbt4377*.bin");
MODULE_FIRMWARE("brcm/brcmbt4377*.ptb");
@ -2523,3 +2555,5 @@ MODULE_FIRMWARE("brcm/brcmbt4378*.bin");
MODULE_FIRMWARE("brcm/brcmbt4378*.ptb");
MODULE_FIRMWARE("brcm/brcmbt4387*.bin");
MODULE_FIRMWARE("brcm/brcmbt4387*.ptb");
MODULE_FIRMWARE("brcm/brcmbt4388*.bin");
MODULE_FIRMWARE("brcm/brcmbt4388*.ptb");

View File

@ -488,7 +488,7 @@ static int hci_uart_tty_open(struct tty_struct *tty)
if (tty->ops->write == NULL)
return -EOPNOTSUPP;
hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL);
hu = kzalloc(sizeof(*hu), GFP_KERNEL);
if (!hu) {
BT_ERR("Can't allocate control structure");
return -ENFILE;

View File

@ -116,11 +116,6 @@ struct hci_nokia_neg_evt {
#define SETUP_BAUD_RATE 921600
#define INIT_BAUD_RATE 120000
struct hci_nokia_radio_hdr {
u8 evt;
u8 dlen;
} __packed;
struct nokia_bt_dev {
struct hci_uart hu;
struct serdev_device *serdev;

View File

@ -28,6 +28,7 @@
#include <linux/of.h>
#include <linux/acpi.h>
#include <linux/platform_device.h>
#include <linux/pwrseq/consumer.h>
#include <linux/regulator/consumer.h>
#include <linux/serdev.h>
#include <linux/mutex.h>
@ -214,6 +215,7 @@ struct qca_power {
struct regulator_bulk_data *vreg_bulk;
int num_vregs;
bool vregs_on;
struct pwrseq_desc *pwrseq;
};
struct qca_serdev {
@ -569,7 +571,7 @@ static int qca_open(struct hci_uart *hu)
if (!hci_uart_has_flow_control(hu))
return -EOPNOTSUPP;
qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL);
qca = kzalloc(sizeof(*qca), GFP_KERNEL);
if (!qca)
return -ENOMEM;
@ -1040,8 +1042,7 @@ static void qca_controller_memdump(struct work_struct *work)
}
if (!qca_memdump) {
qca_memdump = kzalloc(sizeof(struct qca_memdump_info),
GFP_ATOMIC);
qca_memdump = kzalloc(sizeof(*qca_memdump), GFP_ATOMIC);
if (!qca_memdump) {
mutex_unlock(&qca->hci_memdump_lock);
return;
@ -1685,6 +1686,27 @@ static bool qca_wakeup(struct hci_dev *hdev)
return wakeup;
}
static int qca_port_reopen(struct hci_uart *hu)
{
int ret;
/* Now the device is in ready state to communicate with host.
* To sync host with device we need to reopen port.
* Without this, we will have RTS and CTS synchronization
* issues.
*/
serdev_device_close(hu->serdev);
ret = serdev_device_open(hu->serdev);
if (ret) {
bt_dev_err(hu->hdev, "failed to open port");
return ret;
}
hci_uart_set_flow_control(hu, false);
return 0;
}
static int qca_regulator_init(struct hci_uart *hu)
{
enum qca_btsoc_type soc_type = qca_soc_type(hu);
@ -1696,6 +1718,7 @@ static int qca_regulator_init(struct hci_uart *hu)
* off the voltage regulator.
*/
qcadev = serdev_device_get_drvdata(hu->serdev);
if (!qcadev->bt_power->vregs_on) {
serdev_device_close(hu->serdev);
ret = qca_regulator_enable(qcadev);
@ -1753,21 +1776,7 @@ static int qca_regulator_init(struct hci_uart *hu)
break;
}
/* Now the device is in ready state to communicate with host.
* To sync host with device we need to reopen port.
* Without this, we will have RTS and CTS synchronization
* issues.
*/
serdev_device_close(hu->serdev);
ret = serdev_device_open(hu->serdev);
if (ret) {
bt_dev_err(hu->hdev, "failed to open port");
return ret;
}
hci_uart_set_flow_control(hu, false);
return 0;
return qca_port_reopen(hu);
}
static int qca_power_on(struct hci_dev *hdev)
@ -1792,6 +1801,7 @@ static int qca_power_on(struct hci_dev *hdev)
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
case QCA_QCA6390:
ret = qca_regulator_init(hu);
break;
@ -2130,6 +2140,7 @@ static void qca_power_shutdown(struct hci_uart *hu)
unsigned long flags;
enum qca_btsoc_type soc_type = qca_soc_type(hu);
bool sw_ctrl_state;
struct qca_power *power;
/* From this point we go into power off state. But serial port is
* still open, stop queueing the IBS data and flush all the buffered
@ -2147,6 +2158,13 @@ static void qca_power_shutdown(struct hci_uart *hu)
return;
qcadev = serdev_device_get_drvdata(hu->serdev);
power = qcadev->bt_power;
if (power->pwrseq) {
pwrseq_power_off(power->pwrseq);
set_bit(QCA_BT_OFF, &qca->flags);
return;
}
switch (soc_type) {
case QCA_WCN3988:
@ -2169,6 +2187,10 @@ static void qca_power_shutdown(struct hci_uart *hu)
}
break;
case QCA_QCA6390:
pwrseq_power_off(qcadev->bt_power->pwrseq);
break;
default:
gpiod_set_value_cansleep(qcadev->bt_en, 0);
}
@ -2204,6 +2226,9 @@ static int qca_regulator_enable(struct qca_serdev *qcadev)
struct qca_power *power = qcadev->bt_power;
int ret;
if (power->pwrseq)
return pwrseq_power_on(power->pwrseq);
/* Already enabled */
if (power->vregs_on)
return 0;
@ -2272,6 +2297,13 @@ static int qca_init_regulators(struct qca_power *qca,
return 0;
}
static void qca_clk_disable_unprepare(void *data)
{
struct clk *clk = data;
clk_disable_unprepare(clk);
}
static int qca_serdev_probe(struct serdev_device *serdev)
{
struct qca_serdev *qcadev;
@ -2310,12 +2342,40 @@ static int qca_serdev_probe(struct serdev_device *serdev)
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
case QCA_QCA6390:
qcadev->bt_power = devm_kzalloc(&serdev->dev,
sizeof(struct qca_power),
GFP_KERNEL);
if (!qcadev->bt_power)
return -ENOMEM;
break;
default:
break;
}
switch (qcadev->btsoc_type) {
case QCA_WCN6855:
case QCA_WCN7850:
if (!device_property_present(&serdev->dev, "enable-gpios")) {
/*
* Backward compatibility with old DT sources. If the
* node doesn't have the 'enable-gpios' property then
* let's use the power sequencer. Otherwise, let's
* drive everything outselves.
*/
qcadev->bt_power->pwrseq = devm_pwrseq_get(&serdev->dev,
"bluetooth");
if (IS_ERR(qcadev->bt_power->pwrseq))
return PTR_ERR(qcadev->bt_power->pwrseq);
break;
}
fallthrough;
case QCA_WCN3988:
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
case QCA_WCN6750:
qcadev->bt_power->dev = &serdev->dev;
err = qca_init_regulators(qcadev->bt_power, data->vregs,
data->num_vregs);
@ -2353,12 +2413,13 @@ static int qca_serdev_probe(struct serdev_device *serdev)
dev_err(&serdev->dev, "failed to acquire clk\n");
return PTR_ERR(qcadev->susclk);
}
break;
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
if (err) {
BT_ERR("wcn3990 serdev registration failed");
return err;
}
case QCA_QCA6390:
qcadev->bt_power->pwrseq = devm_pwrseq_get(&serdev->dev,
"bluetooth");
if (IS_ERR(qcadev->bt_power->pwrseq))
return PTR_ERR(qcadev->bt_power->pwrseq);
break;
default:
@ -2385,12 +2446,18 @@ static int qca_serdev_probe(struct serdev_device *serdev)
if (err)
return err;
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
if (err) {
BT_ERR("Rome serdev registration failed");
clk_disable_unprepare(qcadev->susclk);
err = devm_add_action_or_reset(&serdev->dev,
qca_clk_disable_unprepare,
qcadev->susclk);
if (err)
return err;
}
}
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
if (err) {
BT_ERR("serdev registration failed");
return err;
}
hdev = qcadev->serdev_hu.hdev;
@ -2428,15 +2495,11 @@ static void qca_serdev_remove(struct serdev_device *serdev)
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
if (power->vregs_on) {
if (power->vregs_on)
qca_power_shutdown(&qcadev->serdev_hu);
break;
}
fallthrough;
break;
default:
if (qcadev->susclk)
clk_disable_unprepare(qcadev->susclk);
break;
}
hci_uart_unregister_device(&qcadev->serdev_hu);

View File

@ -633,7 +633,7 @@ static int vhci_open(struct inode *inode, struct file *file)
{
struct vhci_data *data;
data = kzalloc(sizeof(struct vhci_data), GFP_KERNEL);
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
source "drivers/power/reset/Kconfig"
source "drivers/power/sequencing/Kconfig"
source "drivers/power/supply/Kconfig"

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_POWER_RESET) += reset/
obj-$(CONFIG_POWER_SEQUENCING) += sequencing/
obj-$(CONFIG_POWER_SUPPLY) += supply/

View File

@ -0,0 +1,29 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig POWER_SEQUENCING
tristate "Power Sequencing support"
help
Say Y here to enable the Power Sequencing subsystem.
This subsystem is designed to control power to devices that share
complex resources and/or require specific power sequences to be run
during power-up.
If unsure, say no.
if POWER_SEQUENCING
config POWER_SEQUENCING_QCOM_WCN
tristate "Qualcomm WCN family PMU driver"
default m if ARCH_QCOM
help
Say Y here to enable the power sequencing driver for Qualcomm
WCN Bluetooth/WLAN chipsets.
Typically, a package from the Qualcomm WCN family contains the BT
and WLAN modules whose power is controlled by the PMU module. As the
former two share the power-up sequence which is executed by the PMU,
this driver is needed for correct power control or else we'd risk not
respecting the required delays between enabling Bluetooth and WLAN.
endif

View File

@ -0,0 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_POWER_SEQUENCING) += pwrseq-core.o
pwrseq-core-y := core.o
obj-$(CONFIG_POWER_SEQUENCING_QCOM_WCN) += pwrseq-qcom-wcn.o

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,336 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2024 Linaro Ltd.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/jiffies.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/pwrseq/provider.h>
#include <linux/string.h>
#include <linux/types.h>
struct pwrseq_qcom_wcn_pdata {
const char *const *vregs;
size_t num_vregs;
unsigned int pwup_delay_ms;
unsigned int gpio_enable_delay_ms;
};
struct pwrseq_qcom_wcn_ctx {
struct pwrseq_device *pwrseq;
struct device_node *of_node;
const struct pwrseq_qcom_wcn_pdata *pdata;
struct regulator_bulk_data *regs;
struct gpio_desc *bt_gpio;
struct gpio_desc *wlan_gpio;
struct clk *clk;
unsigned long last_gpio_enable_jf;
};
static void pwrseq_qcom_wcn_ensure_gpio_delay(struct pwrseq_qcom_wcn_ctx *ctx)
{
unsigned long diff_jiffies;
unsigned int diff_msecs;
if (!ctx->pdata->gpio_enable_delay_ms)
return;
diff_jiffies = jiffies - ctx->last_gpio_enable_jf;
diff_msecs = jiffies_to_msecs(diff_jiffies);
if (diff_msecs < ctx->pdata->gpio_enable_delay_ms)
msleep(ctx->pdata->gpio_enable_delay_ms - diff_msecs);
}
static int pwrseq_qcom_wcn_vregs_enable(struct pwrseq_device *pwrseq)
{
struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
return regulator_bulk_enable(ctx->pdata->num_vregs, ctx->regs);
}
static int pwrseq_qcom_wcn_vregs_disable(struct pwrseq_device *pwrseq)
{
struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
return regulator_bulk_disable(ctx->pdata->num_vregs, ctx->regs);
}
static const struct pwrseq_unit_data pwrseq_qcom_wcn_vregs_unit_data = {
.name = "regulators-enable",
.enable = pwrseq_qcom_wcn_vregs_enable,
.disable = pwrseq_qcom_wcn_vregs_disable,
};
static int pwrseq_qcom_wcn_clk_enable(struct pwrseq_device *pwrseq)
{
struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
return clk_prepare_enable(ctx->clk);
}
static int pwrseq_qcom_wcn_clk_disable(struct pwrseq_device *pwrseq)
{
struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
clk_disable_unprepare(ctx->clk);
return 0;
}
static const struct pwrseq_unit_data pwrseq_qcom_wcn_clk_unit_data = {
.name = "clock-enable",
.enable = pwrseq_qcom_wcn_clk_enable,
.disable = pwrseq_qcom_wcn_clk_disable,
};
static const struct pwrseq_unit_data *pwrseq_qcom_wcn_unit_deps[] = {
&pwrseq_qcom_wcn_vregs_unit_data,
&pwrseq_qcom_wcn_clk_unit_data,
NULL
};
static int pwrseq_qcom_wcn_bt_enable(struct pwrseq_device *pwrseq)
{
struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
pwrseq_qcom_wcn_ensure_gpio_delay(ctx);
gpiod_set_value_cansleep(ctx->bt_gpio, 1);
ctx->last_gpio_enable_jf = jiffies;
return 0;
}
static int pwrseq_qcom_wcn_bt_disable(struct pwrseq_device *pwrseq)
{
struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
gpiod_set_value_cansleep(ctx->bt_gpio, 0);
return 0;
}
static const struct pwrseq_unit_data pwrseq_qcom_wcn_bt_unit_data = {
.name = "bluetooth-enable",
.deps = pwrseq_qcom_wcn_unit_deps,
.enable = pwrseq_qcom_wcn_bt_enable,
.disable = pwrseq_qcom_wcn_bt_disable,
};
static int pwrseq_qcom_wcn_wlan_enable(struct pwrseq_device *pwrseq)
{
struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
pwrseq_qcom_wcn_ensure_gpio_delay(ctx);
gpiod_set_value_cansleep(ctx->wlan_gpio, 1);
ctx->last_gpio_enable_jf = jiffies;
return 0;
}
static int pwrseq_qcom_wcn_wlan_disable(struct pwrseq_device *pwrseq)
{
struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
gpiod_set_value_cansleep(ctx->wlan_gpio, 0);
return 0;
}
static const struct pwrseq_unit_data pwrseq_qcom_wcn_wlan_unit_data = {
.name = "wlan-enable",
.deps = pwrseq_qcom_wcn_unit_deps,
.enable = pwrseq_qcom_wcn_wlan_enable,
.disable = pwrseq_qcom_wcn_wlan_disable,
};
static int pwrseq_qcom_wcn_pwup_delay(struct pwrseq_device *pwrseq)
{
struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
if (ctx->pdata->pwup_delay_ms)
msleep(ctx->pdata->pwup_delay_ms);
return 0;
}
static const struct pwrseq_target_data pwrseq_qcom_wcn_bt_target_data = {
.name = "bluetooth",
.unit = &pwrseq_qcom_wcn_bt_unit_data,
.post_enable = pwrseq_qcom_wcn_pwup_delay,
};
static const struct pwrseq_target_data pwrseq_qcom_wcn_wlan_target_data = {
.name = "wlan",
.unit = &pwrseq_qcom_wcn_wlan_unit_data,
.post_enable = pwrseq_qcom_wcn_pwup_delay,
};
static const struct pwrseq_target_data *pwrseq_qcom_wcn_targets[] = {
&pwrseq_qcom_wcn_bt_target_data,
&pwrseq_qcom_wcn_wlan_target_data,
NULL
};
static const char *const pwrseq_qca6390_vregs[] = {
"vddio",
"vddaon",
"vddpmu",
"vddrfa0p95",
"vddrfa1p3",
"vddrfa1p9",
"vddpcie1p3",
"vddpcie1p9",
};
static const struct pwrseq_qcom_wcn_pdata pwrseq_qca6390_of_data = {
.vregs = pwrseq_qca6390_vregs,
.num_vregs = ARRAY_SIZE(pwrseq_qca6390_vregs),
.pwup_delay_ms = 60,
.gpio_enable_delay_ms = 100,
};
static const char *const pwrseq_wcn7850_vregs[] = {
"vdd",
"vddio",
"vddio1p2",
"vddaon",
"vdddig",
"vddrfa1p2",
"vddrfa1p8",
};
static const struct pwrseq_qcom_wcn_pdata pwrseq_wcn7850_of_data = {
.vregs = pwrseq_wcn7850_vregs,
.num_vregs = ARRAY_SIZE(pwrseq_wcn7850_vregs),
.pwup_delay_ms = 50,
};
static int pwrseq_qcom_wcn_match(struct pwrseq_device *pwrseq,
struct device *dev)
{
struct pwrseq_qcom_wcn_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
struct device_node *dev_node = dev->of_node;
/*
* The PMU supplies power to the Bluetooth and WLAN modules. both
* consume the PMU AON output so check the presence of the
* 'vddaon-supply' property and whether it leads us to the right
* device.
*/
if (!of_property_present(dev_node, "vddaon-supply"))
return 0;
struct device_node *reg_node __free(device_node) =
of_parse_phandle(dev_node, "vddaon-supply", 0);
if (!reg_node)
return 0;
/*
* `reg_node` is the PMU AON regulator, its parent is the `regulators`
* node and finally its grandparent is the PMU device node that we're
* looking for.
*/
if (!reg_node->parent || !reg_node->parent->parent ||
reg_node->parent->parent != ctx->of_node)
return 0;
return 1;
}
static int pwrseq_qcom_wcn_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct pwrseq_qcom_wcn_ctx *ctx;
struct pwrseq_config config;
int i, ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->of_node = dev->of_node;
ctx->pdata = of_device_get_match_data(dev);
if (!ctx->pdata)
return dev_err_probe(dev, -ENODEV,
"Failed to obtain platform data\n");
ctx->regs = devm_kcalloc(dev, ctx->pdata->num_vregs,
sizeof(*ctx->regs), GFP_KERNEL);
if (!ctx->regs)
return -ENOMEM;
for (i = 0; i < ctx->pdata->num_vregs; i++)
ctx->regs[i].supply = ctx->pdata->vregs[i];
ret = devm_regulator_bulk_get(dev, ctx->pdata->num_vregs, ctx->regs);
if (ret < 0)
return dev_err_probe(dev, ret,
"Failed to get all regulators\n");
ctx->bt_gpio = devm_gpiod_get_optional(dev, "bt-enable", GPIOD_OUT_LOW);
if (IS_ERR(ctx->bt_gpio))
return dev_err_probe(dev, PTR_ERR(ctx->bt_gpio),
"Failed to get the Bluetooth enable GPIO\n");
ctx->wlan_gpio = devm_gpiod_get_optional(dev, "wlan-enable",
GPIOD_OUT_LOW);
if (IS_ERR(ctx->wlan_gpio))
return dev_err_probe(dev, PTR_ERR(ctx->wlan_gpio),
"Failed to get the WLAN enable GPIO\n");
ctx->clk = devm_clk_get_optional(dev, NULL);
if (IS_ERR(ctx->clk))
return dev_err_probe(dev, PTR_ERR(ctx->clk),
"Failed to get the reference clock\n");
memset(&config, 0, sizeof(config));
config.parent = dev;
config.owner = THIS_MODULE;
config.drvdata = ctx;
config.match = pwrseq_qcom_wcn_match;
config.targets = pwrseq_qcom_wcn_targets;
ctx->pwrseq = devm_pwrseq_device_register(dev, &config);
if (IS_ERR(ctx->pwrseq))
return dev_err_probe(dev, PTR_ERR(ctx->pwrseq),
"Failed to register the power sequencer\n");
return 0;
}
static const struct of_device_id pwrseq_qcom_wcn_of_match[] = {
{
.compatible = "qcom,qca6390-pmu",
.data = &pwrseq_qca6390_of_data,
},
{
.compatible = "qcom,wcn7850-pmu",
.data = &pwrseq_wcn7850_of_data,
},
{ }
};
MODULE_DEVICE_TABLE(of, pwrseq_qcom_wcn_of_match);
static struct platform_driver pwrseq_qcom_wcn_driver = {
.driver = {
.name = "pwrseq-qcom_wcn",
.of_match_table = pwrseq_qcom_wcn_of_match,
},
.probe = pwrseq_qcom_wcn_probe,
};
module_platform_driver(pwrseq_qcom_wcn_driver);
MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>");
MODULE_DESCRIPTION("Qualcomm WCN PMU power sequencing driver");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,56 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2024 Linaro Ltd.
*/
#ifndef __POWER_SEQUENCING_CONSUMER_H__
#define __POWER_SEQUENCING_CONSUMER_H__
#include <linux/err.h>
struct device;
struct pwrseq_desc;
#if IS_ENABLED(CONFIG_POWER_SEQUENCING)
struct pwrseq_desc * __must_check
pwrseq_get(struct device *dev, const char *target);
void pwrseq_put(struct pwrseq_desc *desc);
struct pwrseq_desc * __must_check
devm_pwrseq_get(struct device *dev, const char *target);
int pwrseq_power_on(struct pwrseq_desc *desc);
int pwrseq_power_off(struct pwrseq_desc *desc);
#else /* CONFIG_POWER_SEQUENCING */
static inline struct pwrseq_desc * __must_check
pwrseq_get(struct device *dev, const char *target)
{
return ERR_PTR(-ENOSYS);
}
static inline void pwrseq_put(struct pwrseq_desc *desc)
{
}
static inline struct pwrseq_desc * __must_check
devm_pwrseq_get(struct device *dev, const char *target)
{
return ERR_PTR(-ENOSYS);
}
static inline int pwrseq_power_on(struct pwrseq_desc *desc)
{
return -ENOSYS;
}
static inline int pwrseq_power_off(struct pwrseq_desc *desc)
{
return -ENOSYS;
}
#endif /* CONFIG_POWER_SEQUENCING */
#endif /* __POWER_SEQUENCING_CONSUMER_H__ */

View File

@ -0,0 +1,75 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2024 Linaro Ltd.
*/
#ifndef __POWER_SEQUENCING_PROVIDER_H__
#define __POWER_SEQUENCING_PROVIDER_H__
struct device;
struct module;
struct pwrseq_device;
typedef int (*pwrseq_power_state_func)(struct pwrseq_device *);
typedef int (*pwrseq_match_func)(struct pwrseq_device *, struct device *);
/**
* struct pwrseq_unit_data - Configuration of a single power sequencing
* unit.
* @name: Name of the unit.
* @deps: Units that must be enabled before this one and disabled after it
* in the order they come in this array. Must be NULL-terminated.
* @enable: Callback running the part of the power-on sequence provided by
* this unit.
* @disable: Callback running the part of the power-off sequence provided
* by this unit.
*/
struct pwrseq_unit_data {
const char *name;
const struct pwrseq_unit_data **deps;
pwrseq_power_state_func enable;
pwrseq_power_state_func disable;
};
/**
* struct pwrseq_target_data - Configuration of a power sequencing target.
* @name: Name of the target.
* @unit: Final unit that this target must reach in order to be considered
* enabled.
* @post_enable: Callback run after the target unit has been enabled, *after*
* the state lock has been released. It's useful for implementing
* boot-up delays without blocking other users from powering up
* using the same power sequencer.
*/
struct pwrseq_target_data {
const char *name;
const struct pwrseq_unit_data *unit;
pwrseq_power_state_func post_enable;
};
/**
* struct pwrseq_config - Configuration used for registering a new provider.
* @parent: Parent device for the sequencer. Must be set.
* @owner: Module providing this device.
* @drvdata: Private driver data.
* @match: Provider callback used to match the consumer device to the sequencer.
* @targets: Array of targets for this power sequencer. Must be NULL-terminated.
*/
struct pwrseq_config {
struct device *parent;
struct module *owner;
void *drvdata;
pwrseq_match_func match;
const struct pwrseq_target_data **targets;
};
struct pwrseq_device *
pwrseq_device_register(const struct pwrseq_config *config);
void pwrseq_device_unregister(struct pwrseq_device *pwrseq);
struct pwrseq_device *
devm_pwrseq_device_register(struct device *dev,
const struct pwrseq_config *config);
void *pwrseq_device_get_drvdata(struct pwrseq_device *pwrseq);
#endif /* __POWER_SEQUENCING_PROVIDER_H__ */

View File

@ -441,6 +441,10 @@ typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode);
typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status,
u16 opcode, struct sk_buff *skb);
void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
hci_req_complete_t *req_complete,
hci_req_complete_skb_t *req_complete_skb);
#define HCI_REQ_START BIT(0)
#define HCI_REQ_SKB BIT(1)

View File

@ -91,8 +91,6 @@ struct discovery_state {
s8 rssi;
u16 uuid_count;
u8 (*uuids)[16];
unsigned long scan_start;
unsigned long scan_duration;
unsigned long name_resolve_timeout;
};
@ -478,7 +476,6 @@ struct hci_dev {
unsigned int iso_pkts;
unsigned long acl_last_tx;
unsigned long sco_last_tx;
unsigned long le_last_tx;
__u8 le_tx_def_phys;
@ -530,7 +527,6 @@ struct hci_dev {
struct discovery_state discovery;
int discovery_old_state;
bool discovery_paused;
int advertising_old_state;
bool advertising_paused;
@ -649,6 +645,7 @@ struct hci_dev {
int (*get_codec_config_data)(struct hci_dev *hdev, __u8 type,
struct bt_codec *codec, __u8 *vnd_len,
__u8 **vnd_data);
u8 (*classify_pkt_type)(struct hci_dev *hdev, struct sk_buff *skb);
};
#define HCI_PHY_HANDLE(handle) (handle & 0xff)
@ -890,8 +887,6 @@ static inline void hci_discovery_filter_clear(struct hci_dev *hdev)
hdev->discovery.uuid_count = 0;
kfree(hdev->discovery.uuids);
hdev->discovery.uuids = NULL;
hdev->discovery.scan_start = 0;
hdev->discovery.scan_duration = 0;
}
bool hci_discovery_active(struct hci_dev *hdev);

View File

@ -144,7 +144,7 @@ struct hci_dev_req {
struct hci_dev_list_req {
__u16 dev_num;
struct hci_dev_req dev_req[]; /* hci_dev_req structures */
struct hci_dev_req dev_req[] __counted_by(dev_num);
};
struct hci_conn_list_req {

View File

@ -8,6 +8,23 @@
#define UINT_PTR(_handle) ((void *)((uintptr_t)_handle))
#define PTR_UINT(_ptr) ((uintptr_t)((void *)_ptr))
#define HCI_REQ_DONE 0
#define HCI_REQ_PEND 1
#define HCI_REQ_CANCELED 2
#define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock)
#define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock)
struct hci_request {
struct hci_dev *hdev;
struct sk_buff_head cmd_q;
/* If something goes wrong when building the HCI request, the error
* value is stored in this field.
*/
int err;
};
typedef int (*hci_cmd_sync_work_func_t)(struct hci_dev *hdev, void *data);
typedef void (*hci_cmd_sync_work_destroy_t)(struct hci_dev *hdev, void *data,
int err);
@ -20,6 +37,10 @@ struct hci_cmd_sync_work_entry {
};
struct adv_info;
struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, struct sock *sk);
/* Function with sync suffix shall not be called with hdev->lock held as they
* wait the command to complete and in the meantime an event could be received
* which could attempt to acquire hdev->lock causing a deadlock.
@ -131,6 +152,8 @@ int hci_update_discoverable(struct hci_dev *hdev);
int hci_update_connectable_sync(struct hci_dev *hdev);
int hci_inquiry_sync(struct hci_dev *hdev, u8 length, u8 num_rsp);
int hci_start_discovery_sync(struct hci_dev *hdev);
int hci_stop_discovery_sync(struct hci_dev *hdev);
@ -138,6 +161,7 @@ int hci_suspend_sync(struct hci_dev *hdev);
int hci_resume_sync(struct hci_dev *hdev);
struct hci_conn;
struct hci_conn_params;
int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason);
@ -156,3 +180,5 @@ int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn);
int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn);
int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn);
int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn,
struct hci_conn_params *params);

View File

@ -355,7 +355,7 @@ struct rfcomm_dev_info {
struct rfcomm_dev_list_req {
u16 dev_num;
struct rfcomm_dev_info dev_info[];
struct rfcomm_dev_info dev_info[] __counted_by(dev_num);
};
int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);

View File

@ -14,8 +14,7 @@ bluetooth_6lowpan-y := 6lowpan.o
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \
ecdh_helper.o hci_request.o mgmt_util.o mgmt_config.o hci_codec.o \
eir.o hci_sync.o
ecdh_helper.o mgmt_util.o mgmt_config.o hci_codec.o eir.o hci_sync.o
bluetooth-$(CONFIG_DEV_COREDUMP) += coredump.o

View File

@ -34,7 +34,6 @@
#include <net/bluetooth/iso.h>
#include <net/bluetooth/mgmt.h>
#include "hci_request.h"
#include "smp.h"
#include "eir.h"

View File

@ -40,7 +40,6 @@
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/mgmt.h>
#include "hci_request.h"
#include "hci_debugfs.h"
#include "smp.h"
#include "leds.h"
@ -312,33 +311,12 @@ static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
return copied;
}
static int hci_inq_req(struct hci_request *req, unsigned long opt)
{
struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
struct hci_dev *hdev = req->hdev;
struct hci_cp_inquiry cp;
BT_DBG("%s", hdev->name);
if (test_bit(HCI_INQUIRY, &hdev->flags))
return 0;
/* Start Inquiry */
memcpy(&cp.lap, &ir->lap, 3);
cp.length = ir->length;
cp.num_rsp = ir->num_rsp;
hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
return 0;
}
int hci_inquiry(void __user *arg)
{
__u8 __user *ptr = arg;
struct hci_inquiry_req ir;
struct hci_dev *hdev;
int err = 0, do_inquiry = 0, max_rsp;
long timeo;
__u8 *buf;
if (copy_from_user(&ir, ptr, sizeof(ir)))
@ -377,11 +355,11 @@ int hci_inquiry(void __user *arg)
}
hci_dev_unlock(hdev);
timeo = ir.length * msecs_to_jiffies(2000);
if (do_inquiry) {
err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
timeo, NULL);
hci_req_sync_lock(hdev);
err = hci_inquiry_sync(hdev, ir.length, ir.num_rsp);
hci_req_sync_unlock(hdev);
if (err < 0)
goto done;
@ -718,8 +696,8 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
switch (cmd) {
case HCISETAUTH:
err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
1, &dr.dev_opt, HCI_CMD_TIMEOUT);
err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
1, &dr.dev_opt, HCI_CMD_TIMEOUT);
break;
case HCISETENCRYPT:
@ -730,23 +708,21 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
if (!test_bit(HCI_AUTH, &hdev->flags)) {
/* Auth must be enabled first */
err = __hci_cmd_sync_status(hdev,
HCI_OP_WRITE_AUTH_ENABLE,
1, &dr.dev_opt,
HCI_CMD_TIMEOUT);
err = hci_cmd_sync_status(hdev,
HCI_OP_WRITE_AUTH_ENABLE,
1, &dr.dev_opt,
HCI_CMD_TIMEOUT);
if (err)
break;
}
err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
1, &dr.dev_opt,
HCI_CMD_TIMEOUT);
err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
1, &dr.dev_opt, HCI_CMD_TIMEOUT);
break;
case HCISETSCAN:
err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
1, &dr.dev_opt,
HCI_CMD_TIMEOUT);
err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
1, &dr.dev_opt, HCI_CMD_TIMEOUT);
/* Ensure that the connectable and discoverable states
* get correctly modified as this was a non-mgmt change.
@ -758,9 +734,8 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
case HCISETLINKPOL:
policy = cpu_to_le16(dr.dev_opt);
err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
2, &policy,
HCI_CMD_TIMEOUT);
err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
2, &policy, HCI_CMD_TIMEOUT);
break;
case HCISETLINKMODE:
@ -801,7 +776,7 @@ int hci_get_dev_list(void __user *arg)
struct hci_dev *hdev;
struct hci_dev_list_req *dl;
struct hci_dev_req *dr;
int n = 0, size, err;
int n = 0, err;
__u16 dev_num;
if (get_user(dev_num, (__u16 __user *) arg))
@ -810,12 +785,11 @@ int hci_get_dev_list(void __user *arg)
if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
return -EINVAL;
size = sizeof(*dl) + dev_num * sizeof(*dr);
dl = kzalloc(size, GFP_KERNEL);
dl = kzalloc(struct_size(dl, dev_req, dev_num), GFP_KERNEL);
if (!dl)
return -ENOMEM;
dl->dev_num = dev_num;
dr = dl->dev_req;
read_lock(&hci_dev_list_lock);
@ -829,8 +803,8 @@ int hci_get_dev_list(void __user *arg)
if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
flags &= ~BIT(HCI_UP);
(dr + n)->dev_id = hdev->id;
(dr + n)->dev_opt = flags;
dr[n].dev_id = hdev->id;
dr[n].dev_opt = flags;
if (++n >= dev_num)
break;
@ -838,9 +812,7 @@ int hci_get_dev_list(void __user *arg)
read_unlock(&hci_dev_list_lock);
dl->dev_num = n;
size = sizeof(*dl) + n * sizeof(*dr);
err = copy_to_user(arg, dl, size);
err = copy_to_user(arg, dl, struct_size(dl, dev_req, n));
kfree(dl);
return err ? -EFAULT : 0;
@ -2579,7 +2551,6 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
hci_devcd_setup(hdev);
hci_request_setup(hdev);
hci_init_sysfs(hdev);
discovery_init(hdev);
@ -2912,15 +2883,31 @@ int hci_reset_dev(struct hci_dev *hdev)
}
EXPORT_SYMBOL(hci_reset_dev);
static u8 hci_dev_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
{
if (hdev->classify_pkt_type)
return hdev->classify_pkt_type(hdev, skb);
return hci_skb_pkt_type(skb);
}
/* Receive frame from HCI drivers */
int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
u8 dev_pkt_type;
if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
&& !test_bit(HCI_INIT, &hdev->flags))) {
kfree_skb(skb);
return -ENXIO;
}
/* Check if the driver agree with packet type classification */
dev_pkt_type = hci_dev_classify_pkt_type(hdev, skb);
if (hci_skb_pkt_type(skb) != dev_pkt_type) {
hci_skb_pkt_type(skb) = dev_pkt_type;
}
switch (hci_skb_pkt_type(skb)) {
case HCI_EVENT_PKT:
break;
@ -3065,7 +3052,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
skb = hci_prepare_cmd(hdev, opcode, plen, param);
skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
if (!skb) {
bt_dev_err(hdev, "no memory for command");
return -ENOMEM;
@ -3100,7 +3087,7 @@ int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
return -EINVAL;
}
skb = hci_prepare_cmd(hdev, opcode, plen, param);
skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
if (!skb) {
bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
opcode);
@ -4085,7 +4072,7 @@ static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
return;
}
if (hci_req_status_pend(hdev) &&
if (hdev->req_status == HCI_REQ_PEND &&
!hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
kfree_skb(hdev->req_skb);
hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);

View File

@ -28,7 +28,6 @@
#include <net/bluetooth/hci_core.h>
#include "smp.h"
#include "hci_request.h"
#include "hci_debugfs.h"
#define DEFINE_QUIRK_ATTRIBUTE(__name, __quirk) \

View File

@ -33,7 +33,6 @@
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/mgmt.h>
#include "hci_request.h"
#include "hci_debugfs.h"
#include "hci_codec.h"
#include "smp.h"
@ -6988,6 +6987,8 @@ static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
if (!pa_sync)
goto unlock;
pa_sync->iso_qos.bcast.encryption = ev->encryption;
/* Notify iso layer */
hci_connect_cfm(pa_sync, 0);

View File

@ -1,903 +0,0 @@
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2014 Intel Corporation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
#include <linux/sched/signal.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/mgmt.h>
#include "smp.h"
#include "hci_request.h"
#include "msft.h"
#include "eir.h"
void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
{
skb_queue_head_init(&req->cmd_q);
req->hdev = hdev;
req->err = 0;
}
void hci_req_purge(struct hci_request *req)
{
skb_queue_purge(&req->cmd_q);
}
bool hci_req_status_pend(struct hci_dev *hdev)
{
return hdev->req_status == HCI_REQ_PEND;
}
static int req_run(struct hci_request *req, hci_req_complete_t complete,
hci_req_complete_skb_t complete_skb)
{
struct hci_dev *hdev = req->hdev;
struct sk_buff *skb;
unsigned long flags;
bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
/* If an error occurred during request building, remove all HCI
* commands queued on the HCI request queue.
*/
if (req->err) {
skb_queue_purge(&req->cmd_q);
return req->err;
}
/* Do not allow empty requests */
if (skb_queue_empty(&req->cmd_q))
return -ENODATA;
skb = skb_peek_tail(&req->cmd_q);
if (complete) {
bt_cb(skb)->hci.req_complete = complete;
} else if (complete_skb) {
bt_cb(skb)->hci.req_complete_skb = complete_skb;
bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
}
spin_lock_irqsave(&hdev->cmd_q.lock, flags);
skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
queue_work(hdev->workqueue, &hdev->cmd_work);
return 0;
}
int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
{
return req_run(req, complete, NULL);
}
int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
{
return req_run(req, NULL, complete);
}
void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
struct sk_buff *skb)
{
bt_dev_dbg(hdev, "result 0x%2.2x", result);
if (hdev->req_status == HCI_REQ_PEND) {
hdev->req_result = result;
hdev->req_status = HCI_REQ_DONE;
if (skb) {
kfree_skb(hdev->req_skb);
hdev->req_skb = skb_get(skb);
}
wake_up_interruptible(&hdev->req_wait_q);
}
}
/* Execute request and wait for completion. */
int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
unsigned long opt),
unsigned long opt, u32 timeout, u8 *hci_status)
{
struct hci_request req;
int err = 0;
bt_dev_dbg(hdev, "start");
hci_req_init(&req, hdev);
hdev->req_status = HCI_REQ_PEND;
err = func(&req, opt);
if (err) {
if (hci_status)
*hci_status = HCI_ERROR_UNSPECIFIED;
return err;
}
err = hci_req_run_skb(&req, hci_req_sync_complete);
if (err < 0) {
hdev->req_status = 0;
/* ENODATA means the HCI request command queue is empty.
* This can happen when a request with conditionals doesn't
* trigger any commands to be sent. This is normal behavior
* and should not trigger an error return.
*/
if (err == -ENODATA) {
if (hci_status)
*hci_status = 0;
return 0;
}
if (hci_status)
*hci_status = HCI_ERROR_UNSPECIFIED;
return err;
}
err = wait_event_interruptible_timeout(hdev->req_wait_q,
hdev->req_status != HCI_REQ_PEND, timeout);
if (err == -ERESTARTSYS)
return -EINTR;
switch (hdev->req_status) {
case HCI_REQ_DONE:
err = -bt_to_errno(hdev->req_result);
if (hci_status)
*hci_status = hdev->req_result;
break;
case HCI_REQ_CANCELED:
err = -hdev->req_result;
if (hci_status)
*hci_status = HCI_ERROR_UNSPECIFIED;
break;
default:
err = -ETIMEDOUT;
if (hci_status)
*hci_status = HCI_ERROR_UNSPECIFIED;
break;
}
kfree_skb(hdev->req_skb);
hdev->req_skb = NULL;
hdev->req_status = hdev->req_result = 0;
bt_dev_dbg(hdev, "end: err %d", err);
return err;
}
int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
unsigned long opt),
unsigned long opt, u32 timeout, u8 *hci_status)
{
int ret;
/* Serialize all requests */
hci_req_sync_lock(hdev);
/* check the state after obtaing the lock to protect the HCI_UP
* against any races from hci_dev_do_close when the controller
* gets removed.
*/
if (test_bit(HCI_UP, &hdev->flags))
ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
else
ret = -ENETDOWN;
hci_req_sync_unlock(hdev);
return ret;
}
struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param)
{
int len = HCI_COMMAND_HDR_SIZE + plen;
struct hci_command_hdr *hdr;
struct sk_buff *skb;
skb = bt_skb_alloc(len, GFP_ATOMIC);
if (!skb)
return NULL;
hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
hdr->opcode = cpu_to_le16(opcode);
hdr->plen = plen;
if (plen)
skb_put_data(skb, param, plen);
bt_dev_dbg(hdev, "skb len %d", skb->len);
hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
hci_skb_opcode(skb) = opcode;
return skb;
}
/* Queue a command to an asynchronous HCI request */
void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
const void *param, u8 event)
{
struct hci_dev *hdev = req->hdev;
struct sk_buff *skb;
bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
/* If an error occurred during request building, there is no point in
* queueing the HCI command. We can simply return.
*/
if (req->err)
return;
skb = hci_prepare_cmd(hdev, opcode, plen, param);
if (!skb) {
bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
opcode);
req->err = -ENOMEM;
return;
}
if (skb_queue_empty(&req->cmd_q))
bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
hci_skb_event(skb) = event;
skb_queue_tail(&req->cmd_q, skb);
}
void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
const void *param)
{
bt_dev_dbg(req->hdev, "HCI_REQ-0x%4.4x", opcode);
hci_req_add_ev(req, opcode, plen, param, 0);
}
static void start_interleave_scan(struct hci_dev *hdev)
{
hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
queue_delayed_work(hdev->req_workqueue,
&hdev->interleave_scan, 0);
}
static bool is_interleave_scanning(struct hci_dev *hdev)
{
return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
}
static void cancel_interleave_scan(struct hci_dev *hdev)
{
bt_dev_dbg(hdev, "cancelling interleave scan");
cancel_delayed_work_sync(&hdev->interleave_scan);
hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
}
/* Return true if interleave_scan wasn't started until exiting this function,
* otherwise, return false
*/
static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
{
/* Do interleaved scan only if all of the following are true:
* - There is at least one ADV monitor
* - At least one pending LE connection or one device to be scanned for
* - Monitor offloading is not supported
* If so, we should alternate between allowlist scan and one without
* any filters to save power.
*/
bool use_interleaving = hci_is_adv_monitoring(hdev) &&
!(list_empty(&hdev->pend_le_conns) &&
list_empty(&hdev->pend_le_reports)) &&
hci_get_adv_monitor_offload_ext(hdev) ==
HCI_ADV_MONITOR_EXT_NONE;
bool is_interleaving = is_interleave_scanning(hdev);
if (use_interleaving && !is_interleaving) {
start_interleave_scan(hdev);
bt_dev_dbg(hdev, "starting interleave scan");
return true;
}
if (!use_interleaving && is_interleaving)
cancel_interleave_scan(hdev);
return false;
}
void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
{
struct hci_dev *hdev = req->hdev;
if (hdev->scanning_paused) {
bt_dev_dbg(hdev, "Scanning is paused for suspend");
return;
}
if (use_ext_scan(hdev)) {
struct hci_cp_le_set_ext_scan_enable cp;
memset(&cp, 0, sizeof(cp));
cp.enable = LE_SCAN_DISABLE;
hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
&cp);
} else {
struct hci_cp_le_set_scan_enable cp;
memset(&cp, 0, sizeof(cp));
cp.enable = LE_SCAN_DISABLE;
hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
}
/* Disable address resolution */
if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
__u8 enable = 0x00;
hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
}
}
static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
u8 bdaddr_type)
{
struct hci_cp_le_del_from_accept_list cp;
cp.bdaddr_type = bdaddr_type;
bacpy(&cp.bdaddr, bdaddr);
bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
cp.bdaddr_type);
hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
if (use_ll_privacy(req->hdev)) {
struct smp_irk *irk;
irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
if (irk) {
struct hci_cp_le_del_from_resolv_list cp;
cp.bdaddr_type = bdaddr_type;
bacpy(&cp.bdaddr, bdaddr);
hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
sizeof(cp), &cp);
}
}
}
/* Adds connection to accept list if needed. On error, returns -1. */
static int add_to_accept_list(struct hci_request *req,
struct hci_conn_params *params, u8 *num_entries,
bool allow_rpa)
{
struct hci_cp_le_add_to_accept_list cp;
struct hci_dev *hdev = req->hdev;
/* Already in accept list */
if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
params->addr_type))
return 0;
/* Select filter policy to accept all advertising */
if (*num_entries >= hdev->le_accept_list_size)
return -1;
/* Accept list can not be used with RPAs */
if (!allow_rpa &&
!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
return -1;
}
/* During suspend, only wakeable devices can be in accept list */
if (hdev->suspended &&
!(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
return 0;
*num_entries += 1;
cp.bdaddr_type = params->addr_type;
bacpy(&cp.bdaddr, &params->addr);
bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
cp.bdaddr_type);
hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
if (use_ll_privacy(hdev)) {
struct smp_irk *irk;
irk = hci_find_irk_by_addr(hdev, &params->addr,
params->addr_type);
if (irk) {
struct hci_cp_le_add_to_resolv_list cp;
cp.bdaddr_type = params->addr_type;
bacpy(&cp.bdaddr, &params->addr);
memcpy(cp.peer_irk, irk->val, 16);
if (hci_dev_test_flag(hdev, HCI_PRIVACY))
memcpy(cp.local_irk, hdev->irk, 16);
else
memset(cp.local_irk, 0, 16);
hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
sizeof(cp), &cp);
}
}
return 0;
}
static u8 update_accept_list(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
struct hci_conn_params *params;
struct bdaddr_list *b;
u8 num_entries = 0;
bool pend_conn, pend_report;
/* We allow usage of accept list even with RPAs in suspend. In the worst
* case, we won't be able to wake from devices that use the privacy1.2
* features. Additionally, once we support privacy1.2 and IRK
* offloading, we can update this to also check for those conditions.
*/
bool allow_rpa = hdev->suspended;
if (use_ll_privacy(hdev))
allow_rpa = true;
/* Go through the current accept list programmed into the
* controller one by one and check if that address is still
* in the list of pending connections or list of devices to
* report. If not present in either list, then queue the
* command to remove it from the controller.
*/
list_for_each_entry(b, &hdev->le_accept_list, list) {
pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
&b->bdaddr,
b->bdaddr_type);
pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
&b->bdaddr,
b->bdaddr_type);
/* If the device is not likely to connect or report,
* remove it from the accept list.
*/
if (!pend_conn && !pend_report) {
del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
continue;
}
/* Accept list can not be used with RPAs */
if (!allow_rpa &&
!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
return 0x00;
}
num_entries++;
}
/* Since all no longer valid accept list entries have been
* removed, walk through the list of pending connections
* and ensure that any new device gets programmed into
* the controller.
*
* If the list of the devices is larger than the list of
* available accept list entries in the controller, then
* just abort and return filer policy value to not use the
* accept list.
*/
list_for_each_entry(params, &hdev->pend_le_conns, action) {
if (add_to_accept_list(req, params, &num_entries, allow_rpa))
return 0x00;
}
/* After adding all new pending connections, walk through
* the list of pending reports and also add these to the
* accept list if there is still space. Abort if space runs out.
*/
list_for_each_entry(params, &hdev->pend_le_reports, action) {
if (add_to_accept_list(req, params, &num_entries, allow_rpa))
return 0x00;
}
/* Use the allowlist unless the following conditions are all true:
* - We are not currently suspending
* - There are 1 or more ADV monitors registered and it's not offloaded
* - Interleaved scanning is not currently using the allowlist
*/
if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
return 0x00;
/* Select filter policy to use accept list */
return 0x01;
}
static bool scan_use_rpa(struct hci_dev *hdev)
{
return hci_dev_test_flag(hdev, HCI_PRIVACY);
}
static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
u16 window, u8 own_addr_type, u8 filter_policy,
bool filter_dup, bool addr_resolv)
{
struct hci_dev *hdev = req->hdev;
if (hdev->scanning_paused) {
bt_dev_dbg(hdev, "Scanning is paused for suspend");
return;
}
if (use_ll_privacy(hdev) && addr_resolv) {
u8 enable = 0x01;
hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
}
/* Use ext scanning if set ext scan param and ext scan enable is
* supported
*/
if (use_ext_scan(hdev)) {
struct hci_cp_le_set_ext_scan_params *ext_param_cp;
struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
struct hci_cp_le_scan_phy_params *phy_params;
u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
u32 plen;
ext_param_cp = (void *)data;
phy_params = (void *)ext_param_cp->data;
memset(ext_param_cp, 0, sizeof(*ext_param_cp));
ext_param_cp->own_addr_type = own_addr_type;
ext_param_cp->filter_policy = filter_policy;
plen = sizeof(*ext_param_cp);
if (scan_1m(hdev) || scan_2m(hdev)) {
ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
memset(phy_params, 0, sizeof(*phy_params));
phy_params->type = type;
phy_params->interval = cpu_to_le16(interval);
phy_params->window = cpu_to_le16(window);
plen += sizeof(*phy_params);
phy_params++;
}
if (scan_coded(hdev)) {
ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
memset(phy_params, 0, sizeof(*phy_params));
phy_params->type = type;
phy_params->interval = cpu_to_le16(interval);
phy_params->window = cpu_to_le16(window);
plen += sizeof(*phy_params);
phy_params++;
}
hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
plen, ext_param_cp);
memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
ext_enable_cp.enable = LE_SCAN_ENABLE;
ext_enable_cp.filter_dup = filter_dup;
hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
sizeof(ext_enable_cp), &ext_enable_cp);
} else {
struct hci_cp_le_set_scan_param param_cp;
struct hci_cp_le_set_scan_enable enable_cp;
memset(&param_cp, 0, sizeof(param_cp));
param_cp.type = type;
param_cp.interval = cpu_to_le16(interval);
param_cp.window = cpu_to_le16(window);
param_cp.own_address_type = own_addr_type;
param_cp.filter_policy = filter_policy;
hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
&param_cp);
memset(&enable_cp, 0, sizeof(enable_cp));
enable_cp.enable = LE_SCAN_ENABLE;
enable_cp.filter_dup = filter_dup;
hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
&enable_cp);
}
}
static void set_random_addr(struct hci_request *req, bdaddr_t *rpa);
static int hci_update_random_address(struct hci_request *req,
bool require_privacy, bool use_rpa,
u8 *own_addr_type)
{
struct hci_dev *hdev = req->hdev;
int err;
/* If privacy is enabled use a resolvable private address. If
* current RPA has expired or there is something else than
* the current RPA in use, then generate a new one.
*/
if (use_rpa) {
/* If Controller supports LL Privacy use own address type is
* 0x03
*/
if (use_ll_privacy(hdev))
*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
else
*own_addr_type = ADDR_LE_DEV_RANDOM;
if (rpa_valid(hdev))
return 0;
err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
if (err < 0) {
bt_dev_err(hdev, "failed to generate new RPA");
return err;
}
set_random_addr(req, &hdev->rpa);
return 0;
}
/* In case of required privacy without resolvable private address,
* use an non-resolvable private address. This is useful for active
* scanning and non-connectable advertising.
*/
if (require_privacy) {
bdaddr_t nrpa;
while (true) {
/* The non-resolvable private address is generated
* from random six bytes with the two most significant
* bits cleared.
*/
get_random_bytes(&nrpa, 6);
nrpa.b[5] &= 0x3f;
/* The non-resolvable private address shall not be
* equal to the public address.
*/
if (bacmp(&hdev->bdaddr, &nrpa))
break;
}
*own_addr_type = ADDR_LE_DEV_RANDOM;
set_random_addr(req, &nrpa);
return 0;
}
/* If forcing static address is in use or there is no public
* address use the static address as random address (but skip
* the HCI command if the current random address is already the
* static one.
*
* In case BR/EDR has been disabled on a dual-mode controller
* and a static address has been configured, then use that
* address instead of the public BR/EDR address.
*/
if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
!bacmp(&hdev->bdaddr, BDADDR_ANY) ||
(!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
bacmp(&hdev->static_addr, BDADDR_ANY))) {
*own_addr_type = ADDR_LE_DEV_RANDOM;
if (bacmp(&hdev->static_addr, &hdev->random_addr))
hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
&hdev->static_addr);
return 0;
}
/* Neither privacy nor static address is being used so use a
* public address.
*/
*own_addr_type = ADDR_LE_DEV_PUBLIC;
return 0;
}
/* Ensure to call hci_req_add_le_scan_disable() first to disable the
* controller based address resolution to be able to reconfigure
* resolving list.
*/
void hci_req_add_le_passive_scan(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
u8 own_addr_type;
u8 filter_policy;
u16 window, interval;
/* Default is to enable duplicates filter */
u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
/* Background scanning should run with address resolution */
bool addr_resolv = true;
if (hdev->scanning_paused) {
bt_dev_dbg(hdev, "Scanning is paused for suspend");
return;
}
/* Set require_privacy to false since no SCAN_REQ are send
* during passive scanning. Not using an non-resolvable address
* here is important so that peer devices using direct
* advertising with our address will be correctly reported
* by the controller.
*/
if (hci_update_random_address(req, false, scan_use_rpa(hdev),
&own_addr_type))
return;
if (hdev->enable_advmon_interleave_scan &&
__hci_update_interleaved_scan(hdev))
return;
bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
/* Adding or removing entries from the accept list must
* happen before enabling scanning. The controller does
* not allow accept list modification while scanning.
*/
filter_policy = update_accept_list(req);
/* When the controller is using random resolvable addresses and
* with that having LE privacy enabled, then controllers with
* Extended Scanner Filter Policies support can now enable support
* for handling directed advertising.
*
* So instead of using filter polices 0x00 (no accept list)
* and 0x01 (accept list enabled) use the new filter policies
* 0x02 (no accept list) and 0x03 (accept list enabled).
*/
if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
(hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
filter_policy |= 0x02;
if (hdev->suspended) {
window = hdev->le_scan_window_suspend;
interval = hdev->le_scan_int_suspend;
} else if (hci_is_le_conn_scanning(hdev)) {
window = hdev->le_scan_window_connect;
interval = hdev->le_scan_int_connect;
} else if (hci_is_adv_monitoring(hdev)) {
window = hdev->le_scan_window_adv_monitor;
interval = hdev->le_scan_int_adv_monitor;
/* Disable duplicates filter when scanning for advertisement
* monitor for the following reasons.
*
* For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
* controllers ignore RSSI_Sampling_Period when the duplicates
* filter is enabled.
*
* For SW pattern filtering, when we're not doing interleaved
* scanning, it is necessary to disable duplicates filter,
* otherwise hosts can only receive one advertisement and it's
* impossible to know if a peer is still in range.
*/
filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
} else {
window = hdev->le_scan_window;
interval = hdev->le_scan_interval;
}
bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
filter_policy);
hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
own_addr_type, filter_policy, filter_dup,
addr_resolv);
}
static int hci_req_add_le_interleaved_scan(struct hci_request *req,
unsigned long opt)
{
struct hci_dev *hdev = req->hdev;
int ret = 0;
hci_dev_lock(hdev);
if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
hci_req_add_le_scan_disable(req, false);
hci_req_add_le_passive_scan(req);
switch (hdev->interleave_scan_state) {
case INTERLEAVE_SCAN_ALLOWLIST:
bt_dev_dbg(hdev, "next state: allowlist");
hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
break;
case INTERLEAVE_SCAN_NO_FILTER:
bt_dev_dbg(hdev, "next state: no filter");
hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
break;
case INTERLEAVE_SCAN_NONE:
BT_ERR("unexpected error");
ret = -1;
}
hci_dev_unlock(hdev);
return ret;
}
static void interleave_scan_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
interleave_scan.work);
u8 status;
unsigned long timeout;
if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
} else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
} else {
bt_dev_err(hdev, "unexpected error");
return;
}
hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
HCI_CMD_TIMEOUT, &status);
/* Don't continue interleaving if it was canceled */
if (is_interleave_scanning(hdev))
queue_delayed_work(hdev->req_workqueue,
&hdev->interleave_scan, timeout);
}
static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
{
struct hci_dev *hdev = req->hdev;
/* If we're advertising or initiating an LE connection we can't
* go ahead and change the random address at this time. This is
* because the eventual initiator address used for the
* subsequently created connection will be undefined (some
* controllers use the new address and others the one we had
* when the operation started).
*
* In this kind of scenario skip the update and let the random
* address be updated at the next cycle.
*/
if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
hci_lookup_le_connect(hdev)) {
bt_dev_dbg(hdev, "Deferring random address update");
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
return;
}
hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
}
void hci_request_setup(struct hci_dev *hdev)
{
INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
}
void hci_request_cancel_all(struct hci_dev *hdev)
{
hci_cmd_sync_cancel_sync(hdev, ENODEV);
cancel_interleave_scan(hdev);
}

View File

@ -1,71 +0,0 @@
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2014 Intel Corporation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
#include <asm/unaligned.h>
#define HCI_REQ_DONE 0
#define HCI_REQ_PEND 1
#define HCI_REQ_CANCELED 2
#define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock)
#define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock)
struct hci_request {
struct hci_dev *hdev;
struct sk_buff_head cmd_q;
/* If something goes wrong when building the HCI request, the error
* value is stored in this field.
*/
int err;
};
void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
void hci_req_purge(struct hci_request *req);
bool hci_req_status_pend(struct hci_dev *hdev);
int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete);
void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
struct sk_buff *skb);
void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
const void *param);
void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
const void *param, u8 event);
void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
hci_req_complete_t *req_complete,
hci_req_complete_skb_t *req_complete_skb);
int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
unsigned long opt),
unsigned long opt, u32 timeout, u8 *hci_status);
int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
unsigned long opt),
unsigned long opt, u32 timeout, u8 *hci_status);
struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param);
void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn);
void hci_req_add_le_passive_scan(struct hci_request *req);
void hci_request_setup(struct hci_dev *hdev);
void hci_request_cancel_all(struct hci_dev *hdev);

View File

@ -12,7 +12,6 @@
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/mgmt.h>
#include "hci_request.h"
#include "hci_codec.h"
#include "hci_debugfs.h"
#include "smp.h"
@ -49,9 +48,8 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
wake_up_interruptible(&hdev->req_wait_q);
}
static struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode,
u32 plen, const void *param,
struct sock *sk)
struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, struct sock *sk)
{
int len = HCI_COMMAND_HDR_SIZE + plen;
struct hci_command_hdr *hdr;
@ -147,6 +145,13 @@ static int hci_cmd_sync_run(struct hci_request *req)
return 0;
}
static void hci_request_init(struct hci_request *req, struct hci_dev *hdev)
{
skb_queue_head_init(&req->cmd_q);
req->hdev = hdev;
req->err = 0;
}
/* This function requires the caller holds hdev->req_lock. */
struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u8 event, u32 timeout,
@ -158,7 +163,7 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode);
hci_req_init(&req, hdev);
hci_request_init(&req, hdev);
hci_cmd_sync_add(&req, opcode, plen, param, event, sk);
@ -347,10 +352,9 @@ static int scan_disable_sync(struct hci_dev *hdev, void *data)
return hci_scan_disable_sync(hdev);
}
static int hci_inquiry_sync(struct hci_dev *hdev, u8 length);
static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data)
{
return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN);
return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN, 0);
}
static void le_scan_disable(struct work_struct *work)
@ -371,8 +375,6 @@ static void le_scan_disable(struct work_struct *work)
goto _return;
}
hdev->discovery.scan_start = 0;
/* If we were running LE only scan, change discovery state. If
* we were running both LE and BR/EDR inquiry simultaneously,
* and BR/EDR inquiry is already finished, stop discovery,
@ -570,6 +572,53 @@ unlock:
hci_dev_unlock(hdev);
}
static bool is_interleave_scanning(struct hci_dev *hdev)
{
return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
}
static int hci_passive_scan_sync(struct hci_dev *hdev);
static void interleave_scan_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
interleave_scan.work);
unsigned long timeout;
if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
} else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
} else {
bt_dev_err(hdev, "unexpected error");
return;
}
hci_passive_scan_sync(hdev);
hci_dev_lock(hdev);
switch (hdev->interleave_scan_state) {
case INTERLEAVE_SCAN_ALLOWLIST:
bt_dev_dbg(hdev, "next state: allowlist");
hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
break;
case INTERLEAVE_SCAN_NO_FILTER:
bt_dev_dbg(hdev, "next state: no filter");
hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
break;
case INTERLEAVE_SCAN_NONE:
bt_dev_err(hdev, "unexpected error");
}
hci_dev_unlock(hdev);
/* Don't continue interleaving if it was canceled */
if (is_interleave_scanning(hdev))
queue_delayed_work(hdev->req_workqueue,
&hdev->interleave_scan, timeout);
}
void hci_cmd_sync_init(struct hci_dev *hdev)
{
INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
@ -581,6 +630,7 @@ void hci_cmd_sync_init(struct hci_dev *hdev)
INIT_WORK(&hdev->reenable_adv_work, reenable_adv);
INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable);
INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
}
static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
@ -2114,11 +2164,6 @@ static void hci_start_interleave_scan(struct hci_dev *hdev)
&hdev->interleave_scan, 0);
}
static bool is_interleave_scanning(struct hci_dev *hdev)
{
return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
}
static void cancel_interleave_scan(struct hci_dev *hdev)
{
bt_dev_dbg(hdev, "cancelling interleave scan");
@ -5017,7 +5062,9 @@ int hci_dev_close_sync(struct hci_dev *hdev)
cancel_delayed_work(&hdev->ncmd_timer);
cancel_delayed_work(&hdev->le_scan_disable);
hci_request_cancel_all(hdev);
hci_cmd_sync_cancel_sync(hdev, ENODEV);
cancel_interleave_scan(hdev);
if (hdev->adv_instance_timeout) {
cancel_delayed_work_sync(&hdev->adv_instance_expire);
@ -5664,7 +5711,7 @@ int hci_update_connectable_sync(struct hci_dev *hdev)
return hci_update_passive_scan_sync(hdev);
}
static int hci_inquiry_sync(struct hci_dev *hdev, u8 length)
int hci_inquiry_sync(struct hci_dev *hdev, u8 length, u8 num_rsp)
{
const u8 giac[3] = { 0x33, 0x8b, 0x9e };
const u8 liac[3] = { 0x00, 0x8b, 0x9e };
@ -5687,6 +5734,7 @@ static int hci_inquiry_sync(struct hci_dev *hdev, u8 length)
memcpy(&cp.lap, giac, sizeof(cp.lap));
cp.length = length;
cp.num_rsp = num_rsp;
return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
@ -5773,7 +5821,7 @@ static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev)
if (err)
return err;
return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN);
return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0);
}
int hci_start_discovery_sync(struct hci_dev *hdev)
@ -5785,7 +5833,7 @@ int hci_start_discovery_sync(struct hci_dev *hdev)
switch (hdev->discovery.type) {
case DISCOV_TYPE_BREDR:
return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN);
return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0);
case DISCOV_TYPE_INTERLEAVED:
/* When running simultaneous discovery, the LE scanning time
* should occupy the whole discovery time sine BR/EDR inquiry
@ -5855,7 +5903,6 @@ static int hci_pause_discovery_sync(struct hci_dev *hdev)
return err;
hdev->discovery_paused = true;
hdev->discovery_old_state = old_state;
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
return 0;
@ -6724,3 +6771,21 @@ int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn)
return -ENOENT;
}
int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn,
struct hci_conn_params *params)
{
struct hci_cp_le_conn_update cp;
memset(&cp, 0, sizeof(cp));
cp.handle = cpu_to_le16(conn->handle);
cp.conn_interval_min = cpu_to_le16(params->conn_min_interval);
cp.conn_interval_max = cpu_to_le16(params->conn_max_interval);
cp.conn_latency = cpu_to_le16(params->conn_latency);
cp.supervision_timeout = cpu_to_le16(params->supervision_timeout);
cp.min_ce_len = cpu_to_le16(0x0000);
cp.max_ce_len = cpu_to_le16(0x0000);
return __hci_cmd_sync_status(hdev, HCI_OP_LE_CONN_UPDATE,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
}

View File

@ -1720,11 +1720,6 @@ static void iso_sock_ready(struct sock *sk)
release_sock(sk);
}
struct iso_list_data {
struct hci_conn *hcon;
int count;
};
static bool iso_match_big(struct sock *sk, void *data)
{
struct hci_evt_le_big_sync_estabilished *ev = data;

View File

@ -33,7 +33,6 @@
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/mgmt.h>
#include "hci_request.h"
#include "smp.h"
#include "mgmt_util.h"
#include "mgmt_config.h"
@ -42,7 +41,7 @@
#include "aosp.h"
#define MGMT_VERSION 1
#define MGMT_REVISION 22
#define MGMT_REVISION 23
static const u16 mgmt_commands[] = {
MGMT_OP_READ_INDEX_LIST,
@ -7813,6 +7812,18 @@ unlock:
return err;
}
static int conn_update_sync(struct hci_dev *hdev, void *data)
{
struct hci_conn_params *params = data;
struct hci_conn *conn;
conn = hci_conn_hash_lookup_le(hdev, &params->addr, params->addr_type);
if (!conn)
return -ECANCELED;
return hci_le_conn_update_sync(hdev, conn, params);
}
static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
@ -7846,12 +7857,14 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
hci_conn_params_clear_disabled(hdev);
if (param_count > 1)
hci_conn_params_clear_disabled(hdev);
for (i = 0; i < param_count; i++) {
struct mgmt_conn_param *param = &cp->params[i];
struct hci_conn_params *hci_param;
u16 min, max, latency, timeout;
bool update = false;
u8 addr_type;
bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
@ -7879,6 +7892,19 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
continue;
}
/* Detect when the loading is for an existing parameter then
* attempt to trigger the connection update procedure.
*/
if (!i && param_count == 1) {
hci_param = hci_conn_params_lookup(hdev,
&param->addr.bdaddr,
addr_type);
if (hci_param)
update = true;
else
hci_conn_params_clear_disabled(hdev);
}
hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
addr_type);
if (!hci_param) {
@ -7890,6 +7916,25 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
hci_param->conn_max_interval = max;
hci_param->conn_latency = latency;
hci_param->supervision_timeout = timeout;
/* Check if we need to trigger a connection update */
if (update) {
struct hci_conn *conn;
/* Lookup for existing connection as central and check
* if parameters match and if they don't then trigger
* a connection update.
*/
conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
addr_type);
if (conn && conn->role == HCI_ROLE_MASTER &&
(conn->le_conn_min_interval != min ||
conn->le_conn_max_interval != max ||
conn->le_conn_latency != latency ||
conn->le_supv_timeout != timeout))
hci_cmd_sync_queue(hdev, conn_update_sync,
hci_param, NULL);
}
}
hci_dev_unlock(hdev);

View File

@ -7,7 +7,6 @@
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/mgmt.h>
#include "hci_request.h"
#include "mgmt_util.h"
#include "msft.h"

View File

@ -504,7 +504,7 @@ static int rfcomm_get_dev_list(void __user *arg)
struct rfcomm_dev *dev;
struct rfcomm_dev_list_req *dl;
struct rfcomm_dev_info *di;
int n = 0, size, err;
int n = 0, err;
u16 dev_num;
BT_DBG("");
@ -515,12 +515,11 @@ static int rfcomm_get_dev_list(void __user *arg)
if (!dev_num || dev_num > (PAGE_SIZE * 4) / sizeof(*di))
return -EINVAL;
size = sizeof(*dl) + dev_num * sizeof(*di);
dl = kzalloc(size, GFP_KERNEL);
dl = kzalloc(struct_size(dl, dev_info, dev_num), GFP_KERNEL);
if (!dl)
return -ENOMEM;
dl->dev_num = dev_num;
di = dl->dev_info;
mutex_lock(&rfcomm_dev_lock);
@ -528,12 +527,12 @@ static int rfcomm_get_dev_list(void __user *arg)
list_for_each_entry(dev, &rfcomm_dev_list, list) {
if (!tty_port_get(&dev->port))
continue;
(di + n)->id = dev->id;
(di + n)->flags = dev->flags;
(di + n)->state = dev->dlc->state;
(di + n)->channel = dev->channel;
bacpy(&(di + n)->src, &dev->src);
bacpy(&(di + n)->dst, &dev->dst);
di[n].id = dev->id;
di[n].flags = dev->flags;
di[n].state = dev->dlc->state;
di[n].channel = dev->channel;
bacpy(&di[n].src, &dev->src);
bacpy(&di[n].dst, &dev->dst);
tty_port_put(&dev->port);
if (++n >= dev_num)
break;
@ -542,9 +541,7 @@ static int rfcomm_get_dev_list(void __user *arg)
mutex_unlock(&rfcomm_dev_lock);
dl->dev_num = n;
size = sizeof(*dl) + n * sizeof(*di);
err = copy_to_user(arg, dl, size);
err = copy_to_user(arg, dl, struct_size(dl, dev_info, n));
kfree(dl);
return err ? -EFAULT : 0;