wireless-drivers-next patches for 4.20

Second set of patches for 4.20. Heavy refactoring on mt76 continues
 and the usual drivers in active development (iwlwifi, qtnfmac, ath10k)
 getting new features. And as always, fixes and cleanup all over.
 
 Major changes:
 
 mt76
 
 * more major refactoring to make it easier add new hardware support
 
 * more work on mt76x0e support
 
 * support for getting firmware version via ethtool
 
 * add mt7650 PCI ID
 
 iwlwifi
 
 * HE radiotap cleanup and improvements
 
 * reorder channel optimization for scans
 
 * bump the FW API version
 
 qtnfmac
 
 * fixes for 'iw' output: rates for enabled SGI, 'dump station'
 
 * expose more scan features to host: scan flush and dwell time
 
 * inform cfg80211 when OBSS is not supported by firmware
 
 wlcore
 
 * add support for optional wakeirq
 
 ath10k
 
 * retrieve MAC address from system firmware if provided
 
 * support extended board data download for dual-band QCA9984
 
 * extended per sta tx statistics support via debugfs
 
 * average ack rssi support for data frames
 
 * speed up QCA6174 and QCA9377 firmware download using diag Copy
   Engine
 
 * HTT High Latency mode support needed by SDIO and USB support
 
 * get STA power save state via debugfs
 
 ath9k
 
 * add reset functionality for airtime station debugfs file
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJbua+lAAoJEG4XJFUm622bc74H/3CU3nKceottcybfmdNcxbTj
 OHQ6ulu2OJvIrTnKOIe9WWcgoGeoEEKUcEvrV2SjopYUvFR+XPdC0TPKCxmzi020
 QQyemogR+EyMx4nDqYkMysACQjTgzym8vKYrYZ5JAf0S12nRkeu4Qhvod6a9bHkO
 HJWJekhm2+wIXOb02LoLhsdijhUUlYPcROZCAXlsYf7mpRIgf7PJQURI+yjfiGBW
 Z8cGOH7QHU/68b5ExQAgD3OVpI7cTQ3JlE/IM1nM7UozDPRm7ZipW+Q5XJCYwjfU
 Yexh1wo8CUSoK2FW5mwJJUaDUfWR5PQMDBjmGXuauFrfIrTIltJpWliEAIFV++c=
 =gsaN
 -----END PGP SIGNATURE-----

Merge tag 'wireless-drivers-next-for-davem-2018-10-07' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
wireless-drivers-next patches for 4.20

Second set of patches for 4.20. Heavy refactoring on mt76 continues
and the usual drivers in active development (iwlwifi, qtnfmac, ath10k)
getting new features. And as always, fixes and cleanup all over.

Major changes:

mt76

* more major refactoring to make it easier add new hardware support

* more work on mt76x0e support

* support for getting firmware version via ethtool

* add mt7650 PCI ID

iwlwifi

* HE radiotap cleanup and improvements

* reorder channel optimization for scans

* bump the FW API version

qtnfmac

* fixes for 'iw' output: rates for enabled SGI, 'dump station'

* expose more scan features to host: scan flush and dwell time

* inform cfg80211 when OBSS is not supported by firmware

wlcore

* add support for optional wakeirq

ath10k

* retrieve MAC address from system firmware if provided

* support extended board data download for dual-band QCA9984

* extended per sta tx statistics support via debugfs

* average ack rssi support for data frames

* speed up QCA6174 and QCA9377 firmware download using diag Copy
  Engine

* HTT High Latency mode support needed by SDIO and USB support

* get STA power save state via debugfs

ath9k

* add reset functionality for airtime station debugfs file
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-10-07 10:31:24 -07:00
commit 5057ef7f56
177 changed files with 7334 additions and 6051 deletions

View File

@ -42,7 +42,8 @@ config ATH10K_USB
config ATH10K_SNOC
tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)"
depends on ATH10K && ARCH_QCOM
depends on ATH10K
depends on ARCH_QCOM || COMPILE_TEST
---help---
This module adds support for integrated WCN3990 chip connected
to system NOC(SNOC). Currently work in progress and will not

View File

@ -750,7 +750,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
enum ath10k_hw_rev hw_rev;
size_t size;
int ret;
u32 chip_id;
struct ath10k_bus_params bus_params;
of_id = of_match_device(ath10k_ahb_of_match, &pdev->dev);
if (!of_id) {
@ -806,14 +806,15 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
ath10k_pci_ce_deinit(ar);
chip_id = ath10k_ahb_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
if (chip_id == 0xffffffff) {
bus_params.dev_type = ATH10K_DEV_TYPE_LL;
bus_params.chip_id = ath10k_ahb_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
if (bus_params.chip_id == 0xffffffff) {
ath10k_err(ar, "failed to get chip id\n");
ret = -ENODEV;
goto err_halt_device;
}
ret = ath10k_core_register(ar, chip_id);
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret);
goto err_halt_device;

View File

@ -459,3 +459,26 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
return ret;
}
int ath10k_bmi_set_start(struct ath10k *ar, u32 address)
{
struct bmi_cmd cmd;
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.set_app_start);
int ret;
if (ar->bmi.done_sent) {
ath10k_warn(ar, "bmi set start command disallowed\n");
return -EBUSY;
}
cmd.id = __cpu_to_le32(BMI_SET_APP_START);
cmd.set_app_start.addr = __cpu_to_le32(address);
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
if (ret) {
ath10k_warn(ar, "unable to set start to the device:%d\n", ret);
return ret;
}
return 0;
}

View File

@ -86,6 +86,10 @@ enum bmi_cmd_id {
#define BMI_PARAM_GET_FLASH_BOARD_ID 0x8000
#define BMI_PARAM_FLASH_SECTION_ALL 0x10000
/* Dual-band Extended Board ID */
#define BMI_PARAM_GET_EXT_BOARD_ID 0x40000
#define ATH10K_BMI_EXT_BOARD_ID_SUPPORT 0x40000
#define ATH10K_BMI_BOARD_ID_FROM_OTP_MASK 0x7c00
#define ATH10K_BMI_BOARD_ID_FROM_OTP_LSB 10
@ -93,6 +97,7 @@ enum bmi_cmd_id {
#define ATH10K_BMI_CHIP_ID_FROM_OTP_LSB 15
#define ATH10K_BMI_BOARD_ID_STATUS_MASK 0xff
#define ATH10K_BMI_EBOARD_ID_STATUS_MASK 0xff
struct bmi_cmd {
__le32 id; /* enum bmi_cmd_id */
@ -190,6 +195,35 @@ struct bmi_target_info {
u32 type;
};
struct bmi_segmented_file_header {
__le32 magic_num;
__le32 file_flags;
u8 data[];
};
struct bmi_segmented_metadata {
__le32 addr;
__le32 length;
u8 data[];
};
#define BMI_SGMTFILE_MAGIC_NUM 0x544d4753 /* "SGMT" */
#define BMI_SGMTFILE_FLAG_COMPRESS 1
/* Special values for bmi_segmented_metadata.length (all have high bit set) */
/* end of segmented data */
#define BMI_SGMTFILE_DONE 0xffffffff
/* Board Data segment */
#define BMI_SGMTFILE_BDDATA 0xfffffffe
/* set beginning address */
#define BMI_SGMTFILE_BEGINADDR 0xfffffffd
/* immediate function execution */
#define BMI_SGMTFILE_EXEC 0xfffffffc
/* in jiffies */
#define BMI_COMMUNICATION_TIMEOUT_HZ (3 * HZ)
@ -239,4 +273,6 @@ int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
const void *buffer, u32 length);
int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val);
int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val);
int ath10k_bmi_set_start(struct ath10k *ar, u32 address);
#endif /* _BMI_H_ */

View File

@ -1280,10 +1280,17 @@ static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
int ath10k_ce_disable_interrupts(struct ath10k *ar)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_pipe *ce_state;
u32 ctrl_addr;
int ce_id;
for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
ce_state = &ce->ce_states[ce_id];
if (ce_state->attr_flags & CE_ATTR_POLL)
continue;
ctrl_addr = ath10k_ce_base_address(ar, ce_id);
ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
ath10k_ce_error_intr_disable(ar, ctrl_addr);
@ -1300,11 +1307,14 @@ void ath10k_ce_enable_interrupts(struct ath10k *ar)
int ce_id;
struct ath10k_ce_pipe *ce_state;
/* Skip the last copy engine, CE7 the diagnostic window, as that
* uses polling and isn't initialized for interrupts.
/* Enable interrupts for copy engine that
* are not using polling mode.
*/
for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++) {
for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
ce_state = &ce->ce_states[ce_id];
if (ce_state->attr_flags & CE_ATTR_POLL)
continue;
ath10k_ce_per_engine_handler_adjust(ce_state);
}
}

View File

@ -275,16 +275,19 @@ void ath10k_ce_free_rri(struct ath10k *ar);
/* ce_attr.flags values */
/* Use NonSnooping PCIe accesses? */
#define CE_ATTR_NO_SNOOP 1
#define CE_ATTR_NO_SNOOP BIT(0)
/* Byte swap data words */
#define CE_ATTR_BYTE_SWAP_DATA 2
#define CE_ATTR_BYTE_SWAP_DATA BIT(1)
/* Swizzle descriptors? */
#define CE_ATTR_SWIZZLE_DESCRIPTORS 4
#define CE_ATTR_SWIZZLE_DESCRIPTORS BIT(2)
/* no interrupt on copy completion */
#define CE_ATTR_DIS_INTR 8
#define CE_ATTR_DIS_INTR BIT(3)
/* no interrupt, only polling */
#define CE_ATTR_POLL BIT(4)
/* Attributes of an instance of a Copy Engine */
struct ce_attr {

File diff suppressed because it is too large Load Diff

View File

@ -92,14 +92,6 @@
struct ath10k;
enum ath10k_bus {
ATH10K_BUS_PCI,
ATH10K_BUS_AHB,
ATH10K_BUS_SDIO,
ATH10K_BUS_USB,
ATH10K_BUS_SNOC,
};
static inline const char *ath10k_bus_str(enum ath10k_bus bus)
{
switch (bus) {
@ -461,6 +453,36 @@ struct ath10k_sta_tid_stats {
unsigned long int rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_MAX];
};
enum ath10k_counter_type {
ATH10K_COUNTER_TYPE_BYTES,
ATH10K_COUNTER_TYPE_PKTS,
ATH10K_COUNTER_TYPE_MAX,
};
enum ath10k_stats_type {
ATH10K_STATS_TYPE_SUCC,
ATH10K_STATS_TYPE_FAIL,
ATH10K_STATS_TYPE_RETRY,
ATH10K_STATS_TYPE_AMPDU,
ATH10K_STATS_TYPE_MAX,
};
struct ath10k_htt_data_stats {
u64 legacy[ATH10K_COUNTER_TYPE_MAX][ATH10K_LEGACY_NUM];
u64 ht[ATH10K_COUNTER_TYPE_MAX][ATH10K_HT_MCS_NUM];
u64 vht[ATH10K_COUNTER_TYPE_MAX][ATH10K_VHT_MCS_NUM];
u64 bw[ATH10K_COUNTER_TYPE_MAX][ATH10K_BW_NUM];
u64 nss[ATH10K_COUNTER_TYPE_MAX][ATH10K_NSS_NUM];
u64 gi[ATH10K_COUNTER_TYPE_MAX][ATH10K_GI_NUM];
};
struct ath10k_htt_tx_stats {
struct ath10k_htt_data_stats stats[ATH10K_STATS_TYPE_MAX];
u64 tx_duration;
u64 ba_fails;
u64 ack_fails;
};
struct ath10k_sta {
struct ath10k_vif *arvif;
@ -474,6 +496,7 @@ struct ath10k_sta {
struct work_struct update_wk;
u64 rx_duration;
struct ath10k_htt_tx_stats *tx_stats;
#ifdef CONFIG_MAC80211_DEBUGFS
/* protected by conf_mutex */
@ -482,6 +505,8 @@ struct ath10k_sta {
/* Protected with ar->data_lock */
struct ath10k_sta_tid_stats tid_stats[IEEE80211_NUM_TIDS + 1];
#endif
/* Protected with ar->data_lock */
u32 peer_ps_state;
};
#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5 * HZ)
@ -607,6 +632,7 @@ struct ath10k_debug {
u32 reg_addr;
u32 nf_cal_period;
void *cal_data;
u32 enable_extd_tx_stats;
};
enum ath10k_state {
@ -861,6 +887,9 @@ struct ath10k_fw_components {
const struct firmware *board;
const void *board_data;
size_t board_len;
const struct firmware *ext_board;
const void *ext_board_data;
size_t ext_board_len;
struct ath10k_fw_file fw_file;
};
@ -880,6 +909,16 @@ struct ath10k_per_peer_tx_stats {
u32 reserved2;
};
enum ath10k_dev_type {
ATH10K_DEV_TYPE_LL,
ATH10K_DEV_TYPE_HL,
};
struct ath10k_bus_params {
u32 chip_id;
enum ath10k_dev_type dev_type;
};
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
@ -890,6 +929,7 @@ struct ath10k {
enum ath10k_hw_rev hw_rev;
u16 dev_id;
u32 chip_id;
enum ath10k_dev_type dev_type;
u32 target_version;
u8 fw_version_major;
u32 fw_version_minor;
@ -908,6 +948,8 @@ struct ath10k {
u32 low_5ghz_chan;
u32 high_5ghz_chan;
bool ani_enabled;
/* protected by conf_mutex */
u8 ps_state_enable;
bool p2p;
@ -947,7 +989,9 @@ struct ath10k {
bool bmi_ids_valid;
u8 bmi_board_id;
u8 bmi_eboard_id;
u8 bmi_chip_id;
bool ext_bid_supported;
char bdf_ext[ATH10K_SMBIOS_BDF_EXT_STR_LENGTH];
} id;
@ -1003,6 +1047,7 @@ struct ath10k {
struct completion install_key_done;
int last_wmi_vdev_start_status;
struct completion vdev_setup_done;
struct workqueue_struct *workqueue;
@ -1167,7 +1212,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
const struct ath10k_fw_components *fw_components);
int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
void ath10k_core_stop(struct ath10k *ar);
int ath10k_core_register(struct ath10k *ar, u32 chip_id);
int ath10k_core_register(struct ath10k *ar,
const struct ath10k_bus_params *bus_params);
void ath10k_core_unregister(struct ath10k *ar);
#endif /* _CORE_H_ */

View File

@ -2042,6 +2042,61 @@ static const struct file_operations fops_btcoex = {
.open = simple_open
};
static ssize_t ath10k_write_enable_extd_tx_stats(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
u32 filter;
int ret;
if (kstrtouint_from_user(ubuf, count, 0, &filter))
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_ON) {
ar->debug.enable_extd_tx_stats = filter;
ret = count;
goto out;
}
if (filter == ar->debug.enable_extd_tx_stats) {
ret = count;
goto out;
}
ar->debug.enable_extd_tx_stats = filter;
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static ssize_t ath10k_read_enable_extd_tx_stats(struct file *file,
char __user *ubuf,
size_t count, loff_t *ppos)
{
char buf[32];
struct ath10k *ar = file->private_data;
int len = 0;
mutex_lock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
ar->debug.enable_extd_tx_stats);
mutex_unlock(&ar->conf_mutex);
return simple_read_from_buffer(ubuf, count, ppos, buf, len);
}
static const struct file_operations fops_enable_extd_tx_stats = {
.read = ath10k_read_enable_extd_tx_stats,
.write = ath10k_write_enable_extd_tx_stats,
.open = simple_open
};
static ssize_t ath10k_write_peer_stats(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
@ -2343,6 +2398,85 @@ static const struct file_operations fops_warm_hw_reset = {
.llseek = default_llseek,
};
static void ath10k_peer_ps_state_disable(void *data,
struct ieee80211_sta *sta)
{
struct ath10k *ar = data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
spin_lock_bh(&ar->data_lock);
arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
spin_unlock_bh(&ar->data_lock);
}
static ssize_t ath10k_write_ps_state_enable(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
int ret;
u32 param;
u8 ps_state_enable;
if (kstrtou8_from_user(user_buf, count, 0, &ps_state_enable))
return -EINVAL;
if (ps_state_enable > 1 || ps_state_enable < 0)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if (ar->ps_state_enable == ps_state_enable) {
ret = count;
goto exit;
}
param = ar->wmi.pdev_param->peer_sta_ps_statechg_enable;
ret = ath10k_wmi_pdev_set_param(ar, param, ps_state_enable);
if (ret) {
ath10k_warn(ar, "failed to enable ps_state_enable: %d\n",
ret);
goto exit;
}
ar->ps_state_enable = ps_state_enable;
if (!ar->ps_state_enable)
ieee80211_iterate_stations_atomic(ar->hw,
ath10k_peer_ps_state_disable,
ar);
ret = count;
exit:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static ssize_t ath10k_read_ps_state_enable(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
int len = 0;
char buf[32];
mutex_lock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf) - len, "%d\n",
ar->ps_state_enable);
mutex_unlock(&ar->conf_mutex);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_ps_state_enable = {
.read = ath10k_read_ps_state_enable,
.write = ath10k_write_ps_state_enable,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
@ -2454,10 +2588,15 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("btcoex", 0644, ar->debug.debugfs_phy, ar,
&fops_btcoex);
if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) {
debugfs_create_file("peer_stats", 0644, ar->debug.debugfs_phy, ar,
&fops_peer_stats);
debugfs_create_file("enable_extd_tx_stats", 0644,
ar->debug.debugfs_phy, ar,
&fops_enable_extd_tx_stats);
}
debugfs_create_file("fw_checksums", 0400, ar->debug.debugfs_phy, ar,
&fops_fw_checksums);
@ -2474,6 +2613,9 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("warm_hw_reset", 0600, ar->debug.debugfs_phy, ar,
&fops_warm_hw_reset);
debugfs_create_file("ps_state_enable", 0600, ar->debug.debugfs_phy, ar,
&fops_ps_state_enable);
return 0;
}

View File

@ -128,6 +128,10 @@ static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar)
return ar->debug.fw_dbglog_level;
}
static inline int ath10k_debug_is_extd_tx_stats_enabled(struct ath10k *ar)
{
return ar->debug.enable_extd_tx_stats;
}
#else
static inline int ath10k_debug_start(struct ath10k *ar)
@ -190,6 +194,11 @@ static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar)
return 0;
}
static inline int ath10k_debug_is_extd_tx_stats_enabled(struct ath10k *ar)
{
return 0;
}
#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
#define ath10k_debug_get_et_strings NULL

View File

@ -460,6 +460,33 @@ static const struct file_operations fops_peer_debug_trigger = {
.llseek = default_llseek,
};
static ssize_t ath10k_dbg_sta_read_peer_ps_state(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
char buf[20];
int len = 0;
spin_lock_bh(&ar->data_lock);
len = scnprintf(buf, sizeof(buf) - len, "%d\n",
arsta->peer_ps_state);
spin_unlock_bh(&ar->data_lock);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_peer_ps_state = {
.open = simple_open,
.read = ath10k_dbg_sta_read_peer_ps_state,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static char *get_err_str(enum ath10k_pkt_rx_err i)
{
switch (i) {
@ -626,9 +653,105 @@ static const struct file_operations fops_tid_stats_dump = {
.llseek = default_llseek,
};
static ssize_t ath10k_dbg_sta_dump_tx_stats(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
struct ath10k_htt_data_stats *stats;
const char *str_name[ATH10K_STATS_TYPE_MAX] = {"succ", "fail",
"retry", "ampdu"};
const char *str[ATH10K_COUNTER_TYPE_MAX] = {"bytes", "packets"};
int len = 0, i, j, k, retval = 0;
const int size = 2 * 4096;
char *buf;
buf = kzalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
for (k = 0; k < ATH10K_STATS_TYPE_MAX; k++) {
for (j = 0; j < ATH10K_COUNTER_TYPE_MAX; j++) {
stats = &arsta->tx_stats->stats[k];
len += scnprintf(buf + len, size - len, "%s_%s\n",
str_name[k],
str[j]);
len += scnprintf(buf + len, size - len,
" VHT MCS %s\n",
str[j]);
for (i = 0; i < ATH10K_VHT_MCS_NUM; i++)
len += scnprintf(buf + len, size - len,
" %llu ",
stats->vht[j][i]);
len += scnprintf(buf + len, size - len, "\n");
len += scnprintf(buf + len, size - len, " HT MCS %s\n",
str[j]);
for (i = 0; i < ATH10K_HT_MCS_NUM; i++)
len += scnprintf(buf + len, size - len,
" %llu ", stats->ht[j][i]);
len += scnprintf(buf + len, size - len, "\n");
len += scnprintf(buf + len, size - len,
" BW %s (20,40,80,160 MHz)\n", str[j]);
len += scnprintf(buf + len, size - len,
" %llu %llu %llu %llu\n",
stats->bw[j][0], stats->bw[j][1],
stats->bw[j][2], stats->bw[j][3]);
len += scnprintf(buf + len, size - len,
" NSS %s (1x1,2x2,3x3,4x4)\n", str[j]);
len += scnprintf(buf + len, size - len,
" %llu %llu %llu %llu\n",
stats->nss[j][0], stats->nss[j][1],
stats->nss[j][2], stats->nss[j][3]);
len += scnprintf(buf + len, size - len,
" GI %s (LGI,SGI)\n",
str[j]);
len += scnprintf(buf + len, size - len, " %llu %llu\n",
stats->gi[j][0], stats->gi[j][1]);
len += scnprintf(buf + len, size - len,
" legacy rate %s (1,2 ... Mbps)\n ",
str[j]);
for (i = 0; i < ATH10K_LEGACY_NUM; i++)
len += scnprintf(buf + len, size - len, "%llu ",
stats->legacy[j][i]);
len += scnprintf(buf + len, size - len, "\n");
}
}
len += scnprintf(buf + len, size - len,
"\nTX duration\n %llu usecs\n",
arsta->tx_stats->tx_duration);
len += scnprintf(buf + len, size - len,
"BA fails\n %llu\n", arsta->tx_stats->ba_fails);
len += scnprintf(buf + len, size - len,
"ack fails\n %llu\n", arsta->tx_stats->ack_fails);
spin_unlock_bh(&ar->data_lock);
if (len > size)
len = size;
retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
mutex_unlock(&ar->conf_mutex);
return retval;
}
static const struct file_operations fops_tx_stats = {
.read = ath10k_dbg_sta_dump_tx_stats,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir)
{
struct ath10k *ar = hw->priv;
debugfs_create_file("aggr_mode", 0644, dir, sta, &fops_aggr_mode);
debugfs_create_file("addba", 0200, dir, sta, &fops_addba);
debugfs_create_file("addba_resp", 0200, dir, sta, &fops_addba_resp);
@ -637,4 +760,11 @@ void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
&fops_peer_debug_trigger);
debugfs_create_file("dump_tid_stats", 0400, dir, sta,
&fops_tid_stats_dump);
if (ath10k_peer_stats_enabled(ar) &&
ath10k_debug_is_extd_tx_stats_enabled(ar))
debugfs_create_file("tx_stats", 0400, dir, sta,
&fops_tx_stats);
debugfs_create_file("peer_ps_state", 0400, dir, sta,
&fops_peer_ps_state);
}

View File

@ -53,7 +53,8 @@ static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
if (htc->ar->dev_type != ATH10K_DEV_TYPE_HL)
dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
}
@ -137,11 +138,14 @@ int ath10k_htc_send(struct ath10k_htc *htc,
ath10k_htc_prepare_tx_skb(ep, skb);
skb_cb->eid = eid;
skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
ret = dma_mapping_error(dev, skb_cb->paddr);
if (ret) {
ret = -EIO;
goto err_credits;
if (ar->dev_type != ATH10K_DEV_TYPE_HL) {
skb_cb->paddr = dma_map_single(dev, skb->data, skb->len,
DMA_TO_DEVICE);
ret = dma_mapping_error(dev, skb_cb->paddr);
if (ret) {
ret = -EIO;
goto err_credits;
}
}
sg_item.transfer_id = ep->eid;
@ -157,7 +161,8 @@ int ath10k_htc_send(struct ath10k_htc *htc,
return 0;
err_unmap:
dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
if (ar->dev_type != ATH10K_DEV_TYPE_HL)
dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
err_credits:
if (ep->tx_credit_flow_enabled) {
spin_lock_bh(&htc->tx_lock);
@ -803,8 +808,11 @@ setup:
ep->service_id,
&ep->ul_pipe_id,
&ep->dl_pipe_id);
if (status)
if (status) {
ath10k_warn(ar, "unsupported HTC service id: %d\n",
ep->service_id);
return status;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
@ -838,6 +846,56 @@ struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
return skb;
}
static void ath10k_htc_pktlog_process_rx(struct ath10k *ar, struct sk_buff *skb)
{
trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
dev_kfree_skb_any(skb);
}
static int ath10k_htc_pktlog_connect(struct ath10k *ar)
{
struct ath10k_htc_svc_conn_resp conn_resp;
struct ath10k_htc_svc_conn_req conn_req;
int status;
memset(&conn_req, 0, sizeof(conn_req));
memset(&conn_resp, 0, sizeof(conn_resp));
conn_req.ep_ops.ep_tx_complete = NULL;
conn_req.ep_ops.ep_rx_complete = ath10k_htc_pktlog_process_rx;
conn_req.ep_ops.ep_tx_credits = NULL;
/* connect to control service */
conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_LOG_MSG;
status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
if (status) {
ath10k_warn(ar, "failed to connect to PKTLOG service: %d\n",
status);
return status;
}
return 0;
}
static bool ath10k_htc_pktlog_svc_supported(struct ath10k *ar)
{
u8 ul_pipe_id;
u8 dl_pipe_id;
int status;
status = ath10k_hif_map_service_to_pipe(ar, ATH10K_HTC_SVC_ID_HTT_LOG_MSG,
&ul_pipe_id,
&dl_pipe_id);
if (status) {
ath10k_warn(ar, "unsupported HTC service id: %d\n",
ATH10K_HTC_SVC_ID_HTT_LOG_MSG);
return false;
}
return true;
}
int ath10k_htc_start(struct ath10k_htc *htc)
{
struct ath10k *ar = htc->ar;
@ -871,6 +929,14 @@ int ath10k_htc_start(struct ath10k_htc *htc)
return status;
}
if (ath10k_htc_pktlog_svc_supported(ar)) {
status = ath10k_htc_pktlog_connect(ar);
if (status) {
ath10k_err(ar, "failed to connect to pktlog: %d\n", status);
return status;
}
}
return 0;
}

View File

@ -29,7 +29,6 @@
#include "htc.h"
#include "hw.h"
#include "rx_desc.h"
#include "hw.h"
enum htt_dbg_stats_type {
HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
@ -577,6 +576,8 @@ struct htt_mgmt_tx_completion {
#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK 0xFF000000
#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB 24
#define HTT_TX_CMPL_FLAG_DATA_RSSI BIT(0)
struct htt_rx_indication_hdr {
u8 info0; /* %HTT_RX_INDICATION_INFO0_ */
__le16 peer_id;
@ -719,6 +720,15 @@ struct htt_rx_indication {
struct htt_rx_indication_mpdu_range mpdu_ranges[0];
} __packed;
/* High latency version of the RX indication */
struct htt_rx_indication_hl {
struct htt_rx_indication_hdr hdr;
struct htt_rx_indication_ppdu ppdu;
struct htt_rx_indication_prefix prefix;
struct fw_rx_desc_hl fw_desc;
struct htt_rx_indication_mpdu_range mpdu_ranges[0];
} __packed;
static inline struct htt_rx_indication_mpdu_range *
htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication *rx_ind)
{
@ -731,6 +741,18 @@ static inline struct htt_rx_indication_mpdu_range *
return ptr;
}
static inline struct htt_rx_indication_mpdu_range *
htt_rx_ind_get_mpdu_ranges_hl(struct htt_rx_indication_hl *rx_ind)
{
void *ptr = rx_ind;
ptr += sizeof(rx_ind->hdr)
+ sizeof(rx_ind->ppdu)
+ sizeof(rx_ind->prefix)
+ sizeof(rx_ind->fw_desc);
return ptr;
}
enum htt_rx_flush_mpdu_status {
HTT_RX_FLUSH_MPDU_DISCARD = 0,
HTT_RX_FLUSH_MPDU_REORDER = 1,
@ -840,7 +862,7 @@ struct htt_data_tx_completion {
} __packed;
} __packed;
u8 num_msdus;
u8 rsvd0;
u8 flags2; /* HTT_TX_CMPL_FLAG_DATA_RSSI */
__le16 msdus[0]; /* variable length based on %num_msdus */
} __packed;
@ -1641,6 +1663,7 @@ struct htt_resp {
struct htt_mgmt_tx_completion mgmt_tx_completion;
struct htt_data_tx_completion data_tx_completion;
struct htt_rx_indication rx_ind;
struct htt_rx_indication_hl rx_ind_hl;
struct htt_rx_fragment_indication rx_frag_ind;
struct htt_rx_peer_map peer_map;
struct htt_rx_peer_unmap peer_unmap;
@ -1994,6 +2017,31 @@ struct htt_rx_desc {
u8 msdu_payload[0];
};
#define HTT_RX_DESC_HL_INFO_SEQ_NUM_MASK 0x00000fff
#define HTT_RX_DESC_HL_INFO_SEQ_NUM_LSB 0
#define HTT_RX_DESC_HL_INFO_ENCRYPTED_MASK 0x00001000
#define HTT_RX_DESC_HL_INFO_ENCRYPTED_LSB 12
#define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_MASK 0x00002000
#define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_LSB 13
#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_MASK 0x00008000
#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_LSB 15
#define HTT_RX_DESC_HL_INFO_FRAGMENT_MASK 0x00010000
#define HTT_RX_DESC_HL_INFO_FRAGMENT_LSB 16
#define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_MASK 0x01fe0000
#define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_LSB 17
struct htt_rx_desc_base_hl {
__le32 info; /* HTT_RX_DESC_HL_INFO_ */
};
struct htt_rx_chan_info {
__le16 primary_chan_center_freq_mhz;
__le16 contig_chan1_center_freq_mhz;
__le16 contig_chan2_center_freq_mhz;
u8 phy_mode;
u8 reserved;
} __packed;
#define HTT_RX_DESC_ALIGN 8
#define HTT_MAC_ADDR_LEN 6

View File

@ -265,6 +265,9 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
struct ath10k_htt *htt = &ar->htt;
int ret;
if (ar->dev_type == ATH10K_DEV_TYPE_HL)
return 0;
spin_lock_bh(&htt->rx_ring.lock);
ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
htt->rx_ring.fill_cnt));
@ -279,6 +282,9 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
void ath10k_htt_rx_free(struct ath10k_htt *htt)
{
if (htt->ar->dev_type == ATH10K_DEV_TYPE_HL)
return;
del_timer_sync(&htt->rx_ring.refill_retry_timer);
skb_queue_purge(&htt->rx_msdus_q);
@ -570,6 +576,9 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
size_t size;
struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
if (ar->dev_type == ATH10K_DEV_TYPE_HL)
return 0;
htt->rx_confused = false;
/* XXX: The fill level could be changed during runtime in response to
@ -1846,8 +1855,116 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
return 0;
}
static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
struct htt_rx_indication *rx)
static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
struct htt_rx_indication_hl *rx,
struct sk_buff *skb)
{
struct ath10k *ar = htt->ar;
struct ath10k_peer *peer;
struct htt_rx_indication_mpdu_range *mpdu_ranges;
struct fw_rx_desc_hl *fw_desc;
struct ieee80211_hdr *hdr;
struct ieee80211_rx_status *rx_status;
u16 peer_id;
u8 rx_desc_len;
int num_mpdu_ranges;
size_t tot_hdr_len;
struct ieee80211_channel *ch;
peer_id = __le16_to_cpu(rx->hdr.peer_id);
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
spin_unlock_bh(&ar->data_lock);
if (!peer)
ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id);
num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx);
fw_desc = &rx->fw_desc;
rx_desc_len = fw_desc->len;
/* I have not yet seen any case where num_mpdu_ranges > 1.
* qcacld does not seem handle that case either, so we introduce the
* same limitiation here as well.
*/
if (num_mpdu_ranges > 1)
ath10k_warn(ar,
"Unsupported number of MPDU ranges: %d, ignoring all but the first\n",
num_mpdu_ranges);
if (mpdu_ranges->mpdu_range_status !=
HTT_RX_IND_MPDU_STATUS_OK) {
ath10k_warn(ar, "MPDU range status: %d\n",
mpdu_ranges->mpdu_range_status);
goto err;
}
/* Strip off all headers before the MAC header before delivery to
* mac80211
*/
tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) +
sizeof(rx->ppdu) + sizeof(rx->prefix) +
sizeof(rx->fw_desc) +
sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len;
skb_pull(skb, tot_hdr_len);
hdr = (struct ieee80211_hdr *)skb->data;
rx_status = IEEE80211_SKB_RXCB(skb);
rx_status->chains |= BIT(0);
rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
rx->ppdu.combined_rssi;
rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
spin_lock_bh(&ar->data_lock);
ch = ar->scan_channel;
if (!ch)
ch = ar->rx_channel;
if (!ch)
ch = ath10k_htt_rx_h_any_channel(ar);
if (!ch)
ch = ar->tgt_oper_chan;
spin_unlock_bh(&ar->data_lock);
if (ch) {
rx_status->band = ch->band;
rx_status->freq = ch->center_freq;
}
if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU)
rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
else
rx_status->flag |= RX_FLAG_AMSDU_MORE;
/* Not entirely sure about this, but all frames from the chipset has
* the protected flag set even though they have already been decrypted.
* Unmasking this flag is necessary in order for mac80211 not to drop
* the frame.
* TODO: Verify this is always the case or find out a way to check
* if there has been hw decryption.
*/
if (ieee80211_has_protected(hdr->frame_control)) {
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
rx_status->flag |= RX_FLAG_DECRYPTED |
RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED;
}
ieee80211_rx_ni(ar->hw, skb);
/* We have delivered the skb to the upper layers (mac80211) so we
* must not free it.
*/
return false;
err:
/* Tell the caller that it must free the skb since we have not
* consumed it
*/
return true;
}
static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt,
struct htt_rx_indication *rx)
{
struct ath10k *ar = htt->ar;
struct htt_rx_indication_mpdu_range *mpdu_ranges;
@ -1884,7 +2001,9 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
struct htt_resp *resp = (struct htt_resp *)skb->data;
struct htt_tx_done tx_done = {};
int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
__le16 msdu_id;
__le16 msdu_id, *msdus;
bool rssi_enabled = false;
u8 msdu_count = 0;
int i;
switch (status) {
@ -1908,10 +2027,30 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
resp->data_tx_completion.num_msdus);
for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
msdu_id = resp->data_tx_completion.msdus[i];
msdu_count = resp->data_tx_completion.num_msdus;
if (resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_DATA_RSSI)
rssi_enabled = true;
for (i = 0; i < msdu_count; i++) {
msdus = resp->data_tx_completion.msdus;
msdu_id = msdus[i];
tx_done.msdu_id = __le16_to_cpu(msdu_id);
if (rssi_enabled) {
/* Total no of MSDUs should be even,
* if odd MSDUs are sent firmware fills
* last msdu id with 0xffff
*/
if (msdu_count & 0x01) {
msdu_id = msdus[msdu_count + i + 1];
tx_done.ack_rssi = __le16_to_cpu(msdu_id);
} else {
msdu_id = msdus[msdu_count + i];
tx_done.ack_rssi = __le16_to_cpu(msdu_id);
}
}
/* kfifo_put: In practice firmware shouldn't fire off per-CE
* interrupt and main interrupt (MSI/-X range case) for the same
* HTC service so it should be safe to use kfifo_put w/o lock.
@ -2488,7 +2627,7 @@ void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
dev_kfree_skb_any(skb);
}
static inline bool is_valid_legacy_rate(u8 rate)
static inline int ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate)
{
static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
18, 24, 36, 48, 54};
@ -2496,10 +2635,116 @@ static inline bool is_valid_legacy_rate(u8 rate)
for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
if (rate == legacy_rates[i])
return true;
return i;
}
return false;
ath10k_warn(ar, "Invalid legacy rate %hhd peer stats", rate);
return -EINVAL;
}
static void
ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
struct ath10k_sta *arsta,
struct ath10k_per_peer_tx_stats *pstats,
u8 legacy_rate_idx)
{
struct rate_info *txrate = &arsta->txrate;
struct ath10k_htt_tx_stats *tx_stats;
int ht_idx, gi, mcs, bw, nss;
if (!arsta->tx_stats)
return;
tx_stats = arsta->tx_stats;
gi = (arsta->txrate.flags & RATE_INFO_FLAGS_SHORT_GI);
ht_idx = txrate->mcs + txrate->nss * 8;
mcs = txrate->mcs;
bw = txrate->bw;
nss = txrate->nss;
#define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
if (txrate->flags == RATE_INFO_FLAGS_VHT_MCS) {
STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts;
STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts;
STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts;
} else if (txrate->flags == RATE_INFO_FLAGS_MCS) {
STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts;
STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts;
STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts;
} else {
mcs = legacy_rate_idx;
if (mcs < 0)
return;
STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts;
STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts;
STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts;
}
if (ATH10K_HW_AMPDU(pstats->flags)) {
tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags);
if (txrate->flags == RATE_INFO_FLAGS_MCS) {
STATS_OP_FMT(AMPDU).ht[0][ht_idx] +=
pstats->succ_bytes + pstats->retry_bytes;
STATS_OP_FMT(AMPDU).ht[1][ht_idx] +=
pstats->succ_pkts + pstats->retry_pkts;
} else {
STATS_OP_FMT(AMPDU).vht[0][mcs] +=
pstats->succ_bytes + pstats->retry_bytes;
STATS_OP_FMT(AMPDU).vht[1][mcs] +=
pstats->succ_pkts + pstats->retry_pkts;
}
STATS_OP_FMT(AMPDU).bw[0][bw] +=
pstats->succ_bytes + pstats->retry_bytes;
STATS_OP_FMT(AMPDU).nss[0][nss] +=
pstats->succ_bytes + pstats->retry_bytes;
STATS_OP_FMT(AMPDU).gi[0][gi] +=
pstats->succ_bytes + pstats->retry_bytes;
STATS_OP_FMT(AMPDU).bw[1][bw] +=
pstats->succ_pkts + pstats->retry_pkts;
STATS_OP_FMT(AMPDU).nss[1][nss] +=
pstats->succ_pkts + pstats->retry_pkts;
STATS_OP_FMT(AMPDU).gi[1][gi] +=
pstats->succ_pkts + pstats->retry_pkts;
} else {
tx_stats->ack_fails +=
ATH10K_HW_BA_FAIL(pstats->flags);
}
STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).nss[0][nss] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts;
STATS_OP_FMT(SUCC).nss[1][nss] += pstats->succ_pkts;
STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts;
STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).nss[0][nss] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts;
STATS_OP_FMT(FAIL).nss[1][nss] += pstats->failed_pkts;
STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts;
STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).nss[0][nss] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts;
STATS_OP_FMT(RETRY).nss[1][nss] += pstats->retry_pkts;
STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts;
}
static void
@ -2508,7 +2753,7 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
struct ath10k_per_peer_tx_stats *peer_stats)
{
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
u8 rate = 0, sgi;
u8 rate = 0, rate_idx = 0, sgi;
struct rate_info txrate;
lockdep_assert_held(&ar->data_lock);
@ -2536,17 +2781,12 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
if (!is_valid_legacy_rate(rate)) {
ath10k_warn(ar, "Invalid legacy rate %hhd peer stats",
rate);
return;
}
/* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
rate *= 10;
if (rate == 60 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
rate = rate - 5;
if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
rate = 5;
rate_idx = ath10k_get_legacy_rate_idx(ar, rate);
if (rate_idx < 0)
return;
arsta->txrate.legacy = rate;
} else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
@ -2561,6 +2801,10 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
arsta->txrate.nss = txrate.nss;
arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
if (ath10k_debug_is_extd_tx_stats_enabled(ar))
ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
rate_idx);
}
static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
@ -2702,7 +2946,12 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_RX_IND:
ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
if (ar->dev_type == ATH10K_DEV_TYPE_HL)
return ath10k_htt_rx_proc_rx_ind_hl(htt,
&resp->rx_ind_hl,
skb);
else
ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind);
break;
case HTT_T2H_MSG_TYPE_PEER_MAP: {
struct htt_peer_map_event ev = {
@ -2986,11 +3235,16 @@ static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
.htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
};
static const struct ath10k_htt_rx_ops htt_rx_ops_hl = {
};
void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
if (ar->hw_params.target_64bit)
if (ar->dev_type == ATH10K_DEV_TYPE_HL)
htt->rx_ops = &htt_rx_ops_hl;
else if (ar->hw_params.target_64bit)
htt->rx_ops = &htt_rx_ops_64;
else
htt->rx_ops = &htt_rx_ops_32;

View File

@ -495,6 +495,9 @@ int ath10k_htt_tx_start(struct ath10k_htt *htt)
if (htt->tx_mem_allocated)
return 0;
if (ar->dev_type == ATH10K_DEV_TYPE_HL)
return 0;
ret = ath10k_htt_tx_alloc_buf(htt);
if (ret)
goto free_idr_pending_tx;
@ -934,6 +937,57 @@ static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
return 0;
}
static int ath10k_htt_send_rx_ring_cfg_hl(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
struct sk_buff *skb;
struct htt_cmd *cmd;
struct htt_rx_ring_setup_ring32 *ring;
const int num_rx_ring = 1;
u16 flags;
int len;
int ret;
/*
* the HW expects the buffer to be an integral number of 4-byte
* "words"
*/
BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)
+ (sizeof(*ring) * num_rx_ring);
skb = ath10k_htc_alloc_skb(ar, len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_cmd *)skb->data;
ring = &cmd->rx_setup_32.rings[0];
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
cmd->rx_setup_32.hdr.num_rings = 1;
flags = 0;
flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
memset(ring, 0, sizeof(*ring));
ring->rx_ring_len = __cpu_to_le16(HTT_RX_RING_SIZE_MIN);
ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
ring->flags = __cpu_to_le16(flags);
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
return 0;
}
int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
u8 max_subfrms_ampdu,
u8 max_subfrms_amsdu)
@ -1123,7 +1177,8 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
return 0;
err_unmap_msdu:
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
if (ar->dev_type != ATH10K_DEV_TYPE_HL)
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_txdesc:
dev_kfree_skb_any(txdesc);
err_free_msdu_id:
@ -1134,6 +1189,94 @@ err:
return res;
}
#define HTT_TX_HL_NEEDED_HEADROOM \
(unsigned int)(sizeof(struct htt_cmd_hdr) + \
sizeof(struct htt_data_tx_desc) + \
sizeof(struct ath10k_htc_hdr))
static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
struct sk_buff *msdu)
{
struct ath10k *ar = htt->ar;
int res, data_len;
struct htt_cmd_hdr *cmd_hdr;
struct htt_data_tx_desc *tx_desc;
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
struct sk_buff *tmp_skb;
bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
u8 flags0 = 0;
u16 flags1 = 0;
data_len = msdu->len;
switch (txmode) {
case ATH10K_HW_TXRX_RAW:
case ATH10K_HW_TXRX_NATIVE_WIFI:
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
/* fall through */
case ATH10K_HW_TXRX_ETHERNET:
flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
break;
case ATH10K_HW_TXRX_MGMT:
flags0 |= SM(ATH10K_HW_TXRX_MGMT,
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
break;
}
if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
if (msdu->ip_summed == CHECKSUM_PARTIAL &&
!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
}
/* Prepend the HTT header and TX desc struct to the data message
* and realloc the skb if it does not have enough headroom.
*/
if (skb_headroom(msdu) < HTT_TX_HL_NEEDED_HEADROOM) {
tmp_skb = msdu;
ath10k_dbg(htt->ar, ATH10K_DBG_HTT,
"Not enough headroom in skb. Current headroom: %u, needed: %u. Reallocating...\n",
skb_headroom(msdu), HTT_TX_HL_NEEDED_HEADROOM);
msdu = skb_realloc_headroom(msdu, HTT_TX_HL_NEEDED_HEADROOM);
kfree_skb(tmp_skb);
if (!msdu) {
ath10k_warn(htt->ar, "htt hl tx: Unable to realloc skb!\n");
res = -ENOMEM;
goto out;
}
}
skb_push(msdu, sizeof(*cmd_hdr));
skb_push(msdu, sizeof(*tx_desc));
cmd_hdr = (struct htt_cmd_hdr *)msdu->data;
tx_desc = (struct htt_data_tx_desc *)(msdu->data + sizeof(*cmd_hdr));
cmd_hdr->msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
tx_desc->flags0 = flags0;
tx_desc->flags1 = __cpu_to_le16(flags1);
tx_desc->len = __cpu_to_le16(data_len);
tx_desc->id = 0;
tx_desc->frags_paddr = 0; /* always zero */
/* Initialize peer_id to INVALID_PEER because this is NOT
* Reinjection path
*/
tx_desc->peerid = __cpu_to_le32(HTT_INVALID_PEERID);
res = ath10k_htc_send(&htt->ar->htc, htt->eid, msdu);
out:
return res;
}
static int ath10k_htt_tx_32(struct ath10k_htt *htt,
enum ath10k_hw_txrx_mode txmode,
struct sk_buff *msdu)
@ -1561,11 +1704,19 @@ static const struct ath10k_htt_tx_ops htt_tx_ops_64 = {
.htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64,
};
static const struct ath10k_htt_tx_ops htt_tx_ops_hl = {
.htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_hl,
.htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
.htt_tx = ath10k_htt_tx_hl,
};
void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
if (ar->hw_params.target_64bit)
if (ar->dev_type == ATH10K_DEV_TYPE_HL)
htt->tx_ops = &htt_tx_ops_hl;
else if (ar->hw_params.target_64bit)
htt->tx_ops = &htt_tx_ops_64;
else
htt->tx_ops = &htt_tx_ops_32;

View File

@ -16,6 +16,7 @@
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/bitfield.h>
#include "core.h"
#include "hw.h"
#include "hif.h"
@ -918,6 +919,196 @@ static int ath10k_hw_qca6174_enable_pll_clock(struct ath10k *ar)
return 0;
}
/* Program CPU_ADDR_MSB to allow different memory
* region access.
*/
static void ath10k_hw_map_target_mem(struct ath10k *ar, u32 msb)
{
u32 address = SOC_CORE_BASE_ADDRESS + FW_RAM_CONFIG_ADDRESS;
ath10k_hif_write32(ar, address, msb);
}
/* 1. Write to memory region of target, such as IRAM adn DRAM.
* 2. Target address( 0 ~ 00100000 & 0x00400000~0x00500000)
* can be written directly. See ath10k_pci_targ_cpu_to_ce_addr() too.
* 3. In order to access the region other than the above,
* we need to set the value of register CPU_ADDR_MSB.
* 4. Target memory access space is limited to 1M size. If the size is larger
* than 1M, need to split it and program CPU_ADDR_MSB accordingly.
*/
static int ath10k_hw_diag_segment_msb_download(struct ath10k *ar,
const void *buffer,
u32 address,
u32 length)
{
u32 addr = address & REGION_ACCESS_SIZE_MASK;
int ret, remain_size, size;
const u8 *buf;
ath10k_hw_map_target_mem(ar, CPU_ADDR_MSB_REGION_VAL(address));
if (addr + length > REGION_ACCESS_SIZE_LIMIT) {
size = REGION_ACCESS_SIZE_LIMIT - addr;
remain_size = length - size;
ret = ath10k_hif_diag_write(ar, address, buffer, size);
if (ret) {
ath10k_warn(ar,
"failed to download the first %d bytes segment to address:0x%x: %d\n",
size, address, ret);
goto done;
}
/* Change msb to the next memory region*/
ath10k_hw_map_target_mem(ar,
CPU_ADDR_MSB_REGION_VAL(address) + 1);
buf = buffer + size;
ret = ath10k_hif_diag_write(ar,
address & ~REGION_ACCESS_SIZE_MASK,
buf, remain_size);
if (ret) {
ath10k_warn(ar,
"failed to download the second %d bytes segment to address:0x%x: %d\n",
remain_size,
address & ~REGION_ACCESS_SIZE_MASK,
ret);
goto done;
}
} else {
ret = ath10k_hif_diag_write(ar, address, buffer, length);
if (ret) {
ath10k_warn(ar,
"failed to download the only %d bytes segment to address:0x%x: %d\n",
length, address, ret);
goto done;
}
}
done:
/* Change msb to DRAM */
ath10k_hw_map_target_mem(ar,
CPU_ADDR_MSB_REGION_VAL(DRAM_BASE_ADDRESS));
return ret;
}
static int ath10k_hw_diag_segment_download(struct ath10k *ar,
const void *buffer,
u32 address,
u32 length)
{
if (address >= DRAM_BASE_ADDRESS + REGION_ACCESS_SIZE_LIMIT)
/* Needs to change MSB for memory write */
return ath10k_hw_diag_segment_msb_download(ar, buffer,
address, length);
else
return ath10k_hif_diag_write(ar, address, buffer, length);
}
int ath10k_hw_diag_fast_download(struct ath10k *ar,
u32 address,
const void *buffer,
u32 length)
{
const u8 *buf = buffer;
bool sgmt_end = false;
u32 base_addr = 0;
u32 base_len = 0;
u32 left = 0;
struct bmi_segmented_file_header *hdr;
struct bmi_segmented_metadata *metadata;
int ret = 0;
if (length < sizeof(*hdr))
return -EINVAL;
/* check firmware header. If it has no correct magic number
* or it's compressed, returns error.
*/
hdr = (struct bmi_segmented_file_header *)buf;
if (__le32_to_cpu(hdr->magic_num) != BMI_SGMTFILE_MAGIC_NUM) {
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"Not a supported firmware, magic_num:0x%x\n",
hdr->magic_num);
return -EINVAL;
}
if (hdr->file_flags != 0) {
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"Not a supported firmware, file_flags:0x%x\n",
hdr->file_flags);
return -EINVAL;
}
metadata = (struct bmi_segmented_metadata *)hdr->data;
left = length - sizeof(*hdr);
while (left > 0) {
if (left < sizeof(*metadata)) {
ath10k_warn(ar, "firmware segment is truncated: %d\n",
left);
ret = -EINVAL;
break;
}
base_addr = __le32_to_cpu(metadata->addr);
base_len = __le32_to_cpu(metadata->length);
buf = metadata->data;
left -= sizeof(*metadata);
switch (base_len) {
case BMI_SGMTFILE_BEGINADDR:
/* base_addr is the start address to run */
ret = ath10k_bmi_set_start(ar, base_addr);
base_len = 0;
break;
case BMI_SGMTFILE_DONE:
/* no more segment */
base_len = 0;
sgmt_end = true;
ret = 0;
break;
case BMI_SGMTFILE_BDDATA:
case BMI_SGMTFILE_EXEC:
ath10k_warn(ar,
"firmware has unsupported segment:%d\n",
base_len);
ret = -EINVAL;
break;
default:
if (base_len > left) {
/* sanity check */
ath10k_warn(ar,
"firmware has invalid segment length, %d > %d\n",
base_len, left);
ret = -EINVAL;
break;
}
ret = ath10k_hw_diag_segment_download(ar,
buf,
base_addr,
base_len);
if (ret)
ath10k_warn(ar,
"failed to download firmware via diag interface:%d\n",
ret);
break;
}
if (ret || sgmt_end)
break;
metadata = (struct bmi_segmented_metadata *)(buf + base_len);
left -= base_len;
}
if (ret == 0)
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot firmware fast diag download successfully.\n");
return ret;
}
const struct ath10k_hw_ops qca988x_ops = {
.set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
};

View File

@ -21,6 +21,14 @@
#include "targaddrs.h"
enum ath10k_bus {
ATH10K_BUS_PCI,
ATH10K_BUS_AHB,
ATH10K_BUS_SDIO,
ATH10K_BUS_USB,
ATH10K_BUS_SNOC,
};
#define ATH10K_FW_DIR "ath10k"
#define QCA988X_2_0_DEVICE_ID_UBNT (0x11ac)
@ -109,6 +117,7 @@ enum qca9377_chip_id_rev {
#define QCA9984_HW_1_0_CHIP_ID_REV 0x0
#define QCA9984_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9984/hw1.0"
#define QCA9984_HW_1_0_BOARD_DATA_FILE "board.bin"
#define QCA9984_HW_1_0_EBOARD_DATA_FILE "eboard.bin"
#define QCA9984_HW_1_0_PATCH_LOAD_ADDR 0x1234
/* QCA9888 2.0 defines */
@ -221,6 +230,7 @@ enum ath10k_fw_htt_op_version {
enum ath10k_bd_ie_type {
/* contains sub IEs of enum ath10k_bd_ie_board_type */
ATH10K_BD_IE_BOARD = 0,
ATH10K_BD_IE_BOARD_EXT = 1,
};
enum ath10k_bd_ie_board_type {
@ -389,6 +399,11 @@ extern const struct ath10k_hw_ce_regs qcax_ce_regs;
void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
int ath10k_hw_diag_fast_download(struct ath10k *ar,
u32 address,
const void *buffer,
u32 length);
#define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
#define QCA_REV_9887(ar) ((ar)->hw_rev == ATH10K_HW_QCA9887)
#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
@ -501,6 +516,7 @@ struct ath10k_hw_clk_params {
struct ath10k_hw_params {
u32 id;
u16 dev_id;
enum ath10k_bus bus;
const char *name;
u32 patch_load_addr;
int uart_pin;
@ -539,6 +555,8 @@ struct ath10k_hw_params {
const char *dir;
const char *board;
size_t board_size;
const char *eboard;
size_t ext_board_size;
size_t board_ext_size;
} fw;
@ -594,6 +612,9 @@ struct ath10k_hw_params {
* to avoid it sending spurious acks.
*/
bool hw_filter_reset_required;
/* target supporting fw download via diag ce */
bool fw_diag_ce_download;
};
struct htt_rx_desc;
@ -1129,4 +1150,15 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
#define RTC_SYNC_STATUS_PLL_CHANGING_MASK 0x00000020
/* qca6174 PLL offset/mask end */
/* CPU_ADDR_MSB is a register, bit[3:0] is to specify which memory
* region is accessed. The memory region size is 1M.
* If host wants to access 0xX12345 at target, then CPU_ADDR_MSB[3:0]
* is 0xX.
* The following MACROs are defined to get the 0xX and the size limit.
*/
#define CPU_ADDR_MSB_REGION_MASK GENMASK(23, 20)
#define CPU_ADDR_MSB_REGION_VAL(X) FIELD_GET(CPU_ADDR_MSB_REGION_MASK, X)
#define REGION_ACCESS_SIZE_LIMIT 0x100000
#define REGION_ACCESS_SIZE_MASK (REGION_ACCESS_SIZE_LIMIT - 1)
#endif /* _HW_H_ */

View File

@ -30,7 +30,6 @@
#include "htt.h"
#include "txrx.h"
#include "testmode.h"
#include "wmi.h"
#include "wmi-tlv.h"
#include "wmi-ops.h"
#include "wow.h"
@ -157,6 +156,22 @@ u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
return 0;
}
static int ath10k_mac_get_rate_hw_value(int bitrate)
{
int i;
u8 hw_value_prefix = 0;
if (ath10k_mac_bitrate_is_cck(bitrate))
hw_value_prefix = WMI_RATE_PREAMBLE_CCK << 6;
for (i = 0; i < sizeof(ath10k_rates); i++) {
if (ath10k_rates[i].bitrate == bitrate)
return hw_value_prefix | ath10k_rates[i].hw_value;
}
return -EINVAL;
}
static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
{
switch ((mcs_map >> (2 * nss)) & 0x3) {
@ -968,7 +983,7 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
if (time_left == 0)
return -ETIMEDOUT;
return 0;
return ar->last_wmi_vdev_start_status;
}
static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
@ -5452,9 +5467,10 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
struct cfg80211_chan_def def;
u32 vdev_param, pdev_param, slottime, preamble;
u16 bitrate, hw_value;
u8 rate;
int rateidx, ret = 0;
u8 rate, basic_rate_idx;
int rateidx, ret = 0, hw_rate_code;
enum nl80211_band band;
const struct ieee80211_supported_band *sband;
mutex_lock(&ar->conf_mutex);
@ -5660,6 +5676,30 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_BASIC_RATES) {
if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) {
mutex_unlock(&ar->conf_mutex);
return;
}
sband = ar->hw->wiphy->bands[def.chan->band];
basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
bitrate = sband->bitrates[basic_rate_idx].bitrate;
hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate);
if (hw_rate_code < 0) {
ath10k_warn(ar, "bitrate not supported %d\n", bitrate);
mutex_unlock(&ar->conf_mutex);
return;
}
vdev_param = ar->wmi.vdev_param->mgmt_rate;
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
hw_rate_code);
if (ret)
ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret);
}
mutex_unlock(&ar->conf_mutex);
}
@ -6216,6 +6256,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
new_state == IEEE80211_STA_NONE) {
memset(arsta, 0, sizeof(*arsta));
arsta->arvif = arvif;
arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
@ -6244,6 +6285,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ar->num_stations + 1, ar->max_num_stations,
ar->num_peers + 1, ar->max_num_peers);
if (ath10k_debug_is_extd_tx_stats_enabled(ar)) {
arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats),
GFP_KERNEL);
if (!arsta->tx_stats)
goto exit;
}
num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
@ -6329,6 +6377,9 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
"mac vdev %d peer delete %pM sta %pK (sta gone)\n",
arvif->vdev_id, sta->addr, sta);
if (ath10k_debug_is_extd_tx_stats_enabled(ar))
kfree(arsta->tx_stats);
if (sta->tdls) {
ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id,
sta,
@ -6769,23 +6820,17 @@ static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
return -EOPNOTSUPP;
}
static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
void ath10k_mac_wait_tx_complete(struct ath10k *ar)
{
struct ath10k *ar = hw->priv;
bool skip;
long time_left;
/* mac80211 doesn't care if we really xmit queued frames or not
* we'll collect those frames either way if we stop/delete vdevs
*/
if (drop)
return;
mutex_lock(&ar->conf_mutex);
if (ar->state == ATH10K_STATE_WEDGED)
goto skip;
return;
time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
bool empty;
@ -6804,8 +6849,18 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (time_left == 0 || skip)
ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
skip, ar->state, time_left);
}
skip:
static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct ath10k *ar = hw->priv;
if (drop)
return;
mutex_lock(&ar->conf_mutex);
ath10k_mac_wait_tx_complete(ar);
mutex_unlock(&ar->conf_mutex);
}
@ -8149,6 +8204,24 @@ static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
},
};
static const struct
ieee80211_iface_combination ath10k_10_4_bcn_int_if_comb[] = {
{
.limits = ath10k_10_4_if_limits,
.n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
.max_interfaces = 16,
.num_different_channels = 1,
.beacon_int_infra_match = true,
.beacon_int_min_gcd = 100,
#ifdef CONFIG_ATH10K_DFS_CERTIFIED
.radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80),
#endif
},
};
static void ath10k_get_arvif_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
@ -8311,6 +8384,10 @@ int ath10k_mac_register(struct ath10k *ar)
void *channels;
int ret;
if (!is_valid_ether_addr(ar->mac_addr)) {
ath10k_warn(ar, "invalid MAC address; choosing random\n");
eth_random_addr(ar->mac_addr);
}
SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
SET_IEEE80211_DEV(ar->hw, ar->dev);
@ -8465,6 +8542,10 @@ int ath10k_mac_register(struct ath10k *ar)
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_SET_SCAN_DWELL);
if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT);
/*
* on LL hardware queues are managed entirely by the FW
* so we only advertise to mac we can do the queues thing
@ -8508,6 +8589,13 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
ar->hw->wiphy->n_iface_combinations =
ARRAY_SIZE(ath10k_10_4_if_comb);
if (test_bit(WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
ar->wmi.svc_map)) {
ar->hw->wiphy->iface_combinations =
ath10k_10_4_bcn_int_if_comb;
ar->hw->wiphy->n_iface_combinations =
ARRAY_SIZE(ath10k_10_4_bcn_int_if_comb);
}
break;
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:

View File

@ -82,6 +82,7 @@ struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
u16 peer_id,
u8 tid);
int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val);
void ath10k_mac_wait_tx_complete(struct ath10k *ar);
static inline void ath10k_tx_h_seq_no(struct ieee80211_vif *vif,
struct sk_buff *skb)

View File

@ -192,7 +192,7 @@ static struct ce_attr host_ce_config_wlan[] = {
/* CE7: ce_diag, the Diagnostic Window */
{
.flags = CE_ATTR_FLAGS,
.flags = CE_ATTR_FLAGS | CE_ATTR_POLL,
.src_nentries = 2,
.src_sz_max = DIAG_TRANSFER_LIMIT,
.dest_nentries = 2,
@ -870,6 +870,21 @@ static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
return val;
}
/* Refactor from ath10k_pci_qca988x_targ_cpu_to_ce_addr.
* Support to access target space below 1M for qca6174 and qca9377.
* If target space is below 1M, the bit[20] of converted CE addr is 0.
* Otherwise bit[20] of converted CE addr is 1.
*/
static u32 ath10k_pci_qca6174_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
{
u32 val = 0, region = addr & 0xfffff;
val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
& 0x7ff) << 21;
val |= ((addr >= 0x100000) ? 0x100000 : 0) | region;
return val;
}
static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
{
u32 val = 0, region = addr & 0xfffff;
@ -931,6 +946,15 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
goto done;
}
/* The address supplied by the caller is in the
* Target CPU virtual address space.
*
* In order to use this address with the diagnostic CE,
* convert it from Target CPU virtual address space
* to CE address space
*/
address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
remaining_bytes = nbytes;
ce_data = ce_data_base;
while (remaining_bytes) {
@ -942,16 +966,6 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
goto done;
/* Request CE to send from Target(!) address to Host buffer */
/*
* The address supplied by the caller is in the
* Target CPU virtual address space.
*
* In order to use this address with the diagnostic CE,
* convert it from Target CPU virtual address space
* to CE address space
*/
address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
0);
if (ret)
@ -960,8 +974,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
i = 0;
while (ath10k_ce_completed_send_next_nolock(ce_diag,
NULL) != 0) {
mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
udelay(DIAG_ACCESS_CE_WAIT_US);
i += DIAG_ACCESS_CE_WAIT_US;
if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
ret = -EBUSY;
goto done;
}
@ -972,9 +988,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
(void **)&buf,
&completed_nbytes)
!= 0) {
mdelay(1);
udelay(DIAG_ACCESS_CE_WAIT_US);
i += DIAG_ACCESS_CE_WAIT_US;
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
ret = -EBUSY;
goto done;
}
@ -1119,9 +1136,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
i = 0;
while (ath10k_ce_completed_send_next_nolock(ce_diag,
NULL) != 0) {
mdelay(1);
udelay(DIAG_ACCESS_CE_WAIT_US);
i += DIAG_ACCESS_CE_WAIT_US;
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
ret = -EBUSY;
goto done;
}
@ -1132,9 +1150,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
(void **)&buf,
&completed_nbytes)
!= 0) {
mdelay(1);
udelay(DIAG_ACCESS_CE_WAIT_US);
i += DIAG_ACCESS_CE_WAIT_US;
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
ret = -EBUSY;
goto done;
}
@ -1839,7 +1858,7 @@ int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
}
}
if (WARN_ON(!ul_set || !dl_set))
if (!ul_set || !dl_set)
return -ENOENT;
return 0;
@ -3482,7 +3501,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
struct ath10k *ar;
struct ath10k_pci *ar_pci;
enum ath10k_hw_rev hw_rev;
u32 chip_id;
struct ath10k_bus_params bus_params;
bool pci_ps;
int (*pci_soft_reset)(struct ath10k *ar);
int (*pci_hard_reset)(struct ath10k *ar);
@ -3510,7 +3529,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
pci_ps = true;
pci_soft_reset = ath10k_pci_warm_reset;
pci_hard_reset = ath10k_pci_qca6174_chip_reset;
targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
break;
case QCA99X0_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA99X0;
@ -3538,7 +3557,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
pci_ps = true;
pci_soft_reset = NULL;
pci_hard_reset = ath10k_pci_qca6174_chip_reset;
targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
break;
default:
WARN_ON(1);
@ -3618,19 +3637,20 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_free_irq;
}
chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
if (chip_id == 0xffffffff) {
bus_params.dev_type = ATH10K_DEV_TYPE_LL;
bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
if (bus_params.chip_id == 0xffffffff) {
ath10k_err(ar, "failed to get chip id\n");
goto err_free_irq;
}
if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) {
ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
pdev->device, chip_id);
pdev->device, bus_params.chip_id);
goto err_free_irq;
}
ret = ath10k_core_register(ar, chip_id);
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret);
goto err_free_irq;

View File

@ -207,7 +207,8 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
#define CDC_WAR_DATA_CE 4
/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
#define DIAG_ACCESS_CE_TIMEOUT_MS 10
#define DIAG_ACCESS_CE_TIMEOUT_US 10000 /* 10 ms */
#define DIAG_ACCESS_CE_WAIT_US 50
void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value);
void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val);

View File

@ -1277,4 +1277,19 @@ struct fw_rx_desc_base {
u8 info0;
} __packed;
#define FW_RX_DESC_FLAGS_FIRST_MSDU (1 << 0)
#define FW_RX_DESC_FLAGS_LAST_MSDU (1 << 1)
#define FW_RX_DESC_C3_FAILED (1 << 2)
#define FW_RX_DESC_C4_FAILED (1 << 3)
#define FW_RX_DESC_IPV6 (1 << 4)
#define FW_RX_DESC_TCP (1 << 5)
#define FW_RX_DESC_UDP (1 << 6)
struct fw_rx_desc_hl {
u8 info0;
u8 version;
u8 len;
u8 flags;
} __packed;
#endif /* _RX_DESC_H_ */

View File

@ -1941,7 +1941,8 @@ static int ath10k_sdio_probe(struct sdio_func *func,
struct ath10k_sdio *ar_sdio;
struct ath10k *ar;
enum ath10k_hw_rev hw_rev;
u32 chip_id, dev_id_base;
u32 dev_id_base;
struct ath10k_bus_params bus_params;
int ret, i;
/* Assumption: All SDIO based chipsets (so far) are QCA6174 based.
@ -2035,9 +2036,10 @@ static int ath10k_sdio_probe(struct sdio_func *func,
goto err_free_wq;
}
bus_params.dev_type = ATH10K_DEV_TYPE_HL;
/* TODO: don't know yet how to get chip_id with SDIO */
chip_id = 0;
ret = ath10k_core_register(ar, chip_id);
bus_params.chip_id = 0;
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret);
goto err_free_wq;

View File

@ -62,6 +62,7 @@ static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
static const struct ath10k_snoc_drv_priv drv_priv = {
.hw_rev = ATH10K_HW_WCN3990,
@ -171,7 +172,7 @@ static struct ce_attr host_ce_config_wlan[] = {
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 512,
.recv_cb = ath10k_snoc_htt_htc_rx_cb,
.recv_cb = ath10k_snoc_pktlog_rx_cb,
},
};
@ -436,6 +437,14 @@ static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
}
/* Called by lower (CE) layer when data is received from the Target.
* WCN3990 firmware uses separate CE(CE11) to transfer pktlog data.
*/
static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
{
ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
}
static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
{
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
@ -616,7 +625,7 @@ static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
}
}
if (WARN_ON(!ul_set || !dl_set))
if (!ul_set || !dl_set)
return -ENOENT;
return 0;
@ -722,14 +731,15 @@ static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
static void ath10k_snoc_hif_stop(struct ath10k *ar)
{
ath10k_snoc_irq_disable(ar);
ath10k_snoc_buffer_cleanup(ar);
napi_synchronize(&ar->napi);
napi_disable(&ar->napi);
ath10k_snoc_buffer_cleanup(ar);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
}
static int ath10k_snoc_hif_start(struct ath10k *ar)
{
napi_enable(&ar->napi);
ath10k_snoc_irq_enable(ar);
ath10k_snoc_rx_post(ar);
@ -792,7 +802,6 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar)
goto err_wlan_enable;
}
napi_enable(&ar->napi);
return 0;
err_wlan_enable:
@ -1274,6 +1283,7 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
struct ath10k *ar;
int ret;
u32 i;
struct ath10k_bus_params bus_params;
of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev);
if (!of_id) {
@ -1341,7 +1351,9 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
goto err_free_irq;
}
ret = ath10k_core_register(ar, drv_data->hw_rev);
bus_params.dev_type = ATH10K_DEV_TYPE_LL;
bus_params.chip_id = drv_data->hw_rev;
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret);
goto err_hw_power_off;

View File

@ -484,6 +484,10 @@ struct host_interest {
#define QCA99X0_BOARD_DATA_SZ 12288
#define QCA99X0_BOARD_EXT_DATA_SZ 0
/* Dual band extended board data */
#define QCA99X0_EXT_BOARD_DATA_SZ 2048
#define EXT_BOARD_ADDRESS_OFFSET 0x3000
#define QCA4019_BOARD_DATA_SZ 12064
#define QCA4019_BOARD_EXT_DATA_SZ 0

View File

@ -95,7 +95,8 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock);
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
if (ar->dev_type != ATH10K_DEV_TYPE_HL)
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
ath10k_report_offchan_tx(htt->ar, msdu);

View File

@ -983,7 +983,7 @@ static int ath10k_usb_probe(struct usb_interface *interface,
struct usb_device *dev = interface_to_usbdev(interface);
int ret, vendor_id, product_id;
enum ath10k_hw_rev hw_rev;
u32 chip_id;
struct ath10k_bus_params bus_params;
/* Assumption: All USB based chipsets (so far) are QCA9377 based.
* If there will be newer chipsets that does not use the hw reg
@ -1016,9 +1016,10 @@ static int ath10k_usb_probe(struct usb_interface *interface,
ar->id.vendor = vendor_id;
ar->id.device = product_id;
bus_params.dev_type = ATH10K_DEV_TYPE_HL;
/* TODO: don't know yet how to get chip_id with USB */
chip_id = 0;
ret = ath10k_core_register(ar, chip_id);
bus_params.chip_id = 0;
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
ath10k_warn(ar, "failed to register driver core: %d\n", ret);
goto err;

View File

@ -19,7 +19,6 @@
#include "debug.h"
#include "mac.h"
#include "hw.h"
#include "mac.h"
#include "wmi.h"
#include "wmi-ops.h"
#include "wmi-tlv.h"
@ -1569,7 +1568,10 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers);
if (ar->hw_params.num_peers)
cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers);
else
cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
cfg->ast_skid_limit = __cpu_to_le32(ar->hw_params.ast_skid_limit);
cfg->num_wds_entries = __cpu_to_le32(ar->hw_params.num_wds_entries);
@ -1582,7 +1584,10 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
}
cfg->num_peer_keys = __cpu_to_le32(2);
cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
if (ar->hw_params.num_peers)
cfg->num_tids = __cpu_to_le32(ar->hw_params.num_peers * 2);
else
cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
cfg->tx_chain_mask = __cpu_to_le32(0x7);
cfg->rx_chain_mask = __cpu_to_le32(0x7);
cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);

View File

@ -1307,7 +1307,8 @@ static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
.peer_sta_ps_statechg_enable =
WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE,
.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
@ -2342,7 +2343,12 @@ static int wmi_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id,
dma_unmap_single(ar->dev, pkt_addr->paddr,
msdu->len, DMA_FROM_DEVICE);
info = IEEE80211_SKB_CB(msdu);
info->flags |= status;
if (status)
info->flags &= ~IEEE80211_TX_STAT_ACK;
else
info->flags |= IEEE80211_TX_STAT_ACK;
ieee80211_tx_status_irqsafe(ar->hw, msdu);
ret = 0;
@ -2482,7 +2488,8 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
status->freq, status->band, status->signal,
status->rate_idx);
ieee80211_rx(ar->hw, skb);
ieee80211_rx_ni(ar->hw, skb);
return 0;
}
@ -3242,18 +3249,31 @@ void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_vdev_start_ev_arg arg = {};
int ret;
u32 status;
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
ar->last_wmi_vdev_start_status = 0;
ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg);
if (ret) {
ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret);
return;
ar->last_wmi_vdev_start_status = ret;
goto out;
}
if (WARN_ON(__le32_to_cpu(arg.status)))
return;
status = __le32_to_cpu(arg.status);
if (WARN_ON_ONCE(status)) {
ath10k_warn(ar, "vdev-start-response reports status error: %d (%s)\n",
status, (status == WMI_VDEV_START_CHAN_INVALID) ?
"chan-invalid" : "unknown");
/* Setup is done one way or another though, so we should still
* do the completion, so don't return here.
*/
ar->last_wmi_vdev_start_status = -EINVAL;
}
out:
complete(&ar->vdev_setup_done);
}
@ -4780,6 +4800,13 @@ ath10k_wmi_tpc_final_get_rate(struct ath10k *ar,
}
}
if (pream == -1) {
ath10k_warn(ar, "unknown wmi tpc final index and frequency: %u, %u\n",
pream_idx, __le32_to_cpu(ev->chan_freq));
tpc = 0;
goto out;
}
if (pream == 4)
tpc = min_t(u8, ev->rates_array[rate_idx],
ev->max_reg_allow_pow[ch]);
@ -5022,6 +5049,36 @@ ath10k_wmi_handle_tdls_peer_event(struct ath10k *ar, struct sk_buff *skb)
}
}
static void
ath10k_wmi_event_peer_sta_ps_state_chg(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_peer_sta_ps_state_chg_event *ev;
struct ieee80211_sta *sta;
struct ath10k_sta *arsta;
u8 peer_addr[ETH_ALEN];
lockdep_assert_held(&ar->data_lock);
ev = (struct wmi_peer_sta_ps_state_chg_event *)skb->data;
ether_addr_copy(peer_addr, ev->peer_macaddr.addr);
rcu_read_lock();
sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer_addr, NULL);
if (!sta) {
ath10k_warn(ar, "failed to find station entry %pM\n",
peer_addr);
goto exit;
}
arsta = (struct ath10k_sta *)sta->drv_priv;
arsta->peer_ps_state = __le32_to_cpu(ev->peer_ps_state);
exit:
rcu_read_unlock();
}
void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
@ -5455,7 +5512,8 @@ int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
arg.mac_addr,
__le32_to_cpu(arg.status));
ether_addr_copy(ar->mac_addr, arg.mac_addr);
if (is_zero_ether_addr(ar->mac_addr))
ether_addr_copy(ar->mac_addr, arg.mac_addr);
complete(&ar->wmi.unified_ready);
return 0;
}
@ -5951,6 +6009,9 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_dbg(ar, ATH10K_DBG_WMI,
"received event id %d not implemented\n", id);
break;
case WMI_10_2_PEER_STA_PS_STATECHG_EVENTID:
ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb);
break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;
@ -6068,6 +6129,9 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_10_4_DFS_STATUS_CHECK_EVENTID:
ath10k_wmi_event_dfs_status_check(ar, skb);
break;
case WMI_10_4_PEER_STA_PS_STATECHG_EVENTID:
ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb);
break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;

View File

@ -203,6 +203,8 @@ enum wmi_service {
WMI_SERVICE_TPC_STATS_FINAL,
WMI_SERVICE_RESET_CHIP,
WMI_SERVICE_SPOOF_MAC_SUPPORT,
WMI_SERVICE_TX_DATA_ACK_RSSI,
WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
/* keep last */
WMI_SERVICE_MAX,
@ -350,6 +352,13 @@ enum wmi_10_4_service {
WMI_10_4_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
WMI_10_4_SERVICE_HOST_DFS_CHECK_SUPPORT,
WMI_10_4_SERVICE_TPC_STATS_FINAL,
WMI_10_4_SERVICE_CFR_CAPTURE_SUPPORT,
WMI_10_4_SERVICE_TX_DATA_ACK_RSSI,
WMI_10_4_SERVICE_CFR_CAPTURE_IND_MSG_TYPE_LEGACY,
WMI_10_4_SERVICE_PER_PACKET_SW_ENCRYPT,
WMI_10_4_SERVICE_PEER_TID_CONFIGS_SUPPORT,
WMI_10_4_SERVICE_VDEV_BCN_RATE_CONTROL,
WMI_10_4_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
};
static inline char *wmi_service_name(int service_id)
@ -463,6 +472,8 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT);
SVCSTR(WMI_SERVICE_TPC_STATS_FINAL);
SVCSTR(WMI_SERVICE_RESET_CHIP);
SVCSTR(WMI_SERVICE_TX_DATA_ACK_RSSI);
SVCSTR(WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT);
default:
return NULL;
}
@ -771,6 +782,10 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, len);
SVCMAP(WMI_10_4_SERVICE_TPC_STATS_FINAL,
WMI_SERVICE_TPC_STATS_FINAL, len);
SVCMAP(WMI_10_4_SERVICE_TX_DATA_ACK_RSSI,
WMI_SERVICE_TX_DATA_ACK_RSSI, len);
SVCMAP(WMI_10_4_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, len);
}
#undef SVCMAP
@ -2924,6 +2939,7 @@ enum wmi_coex_version {
* @WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE: TDLS connection tracker in host
* enable/disable
* @WMI_10_4_TDLS_EXPLICIT_MODE_ONLY:Explicit TDLS mode enable/disable
* @WMI_10_4_TX_DATA_ACK_RSSI: Enable DATA ACK RSSI if firmware is capable
*/
enum wmi_10_4_feature_mask {
WMI_10_4_LTEU_SUPPORT = BIT(0),
@ -2939,6 +2955,7 @@ enum wmi_10_4_feature_mask {
WMI_10_4_TDLS_UAPSD_SLEEP_STA = BIT(10),
WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE = BIT(11),
WMI_10_4_TDLS_EXPLICIT_MODE_ONLY = BIT(12),
WMI_10_4_TX_DATA_ACK_RSSI = BIT(16),
};
@ -4153,6 +4170,13 @@ enum wmi_tpc_pream_5ghz {
WMI_TPC_PREAM_5GHZ_HTCUP,
};
#define WMI_PEER_PS_STATE_DISABLED 2
struct wmi_peer_sta_ps_state_chg_event {
struct wmi_mac_addr peer_macaddr;
__le32 peer_ps_state;
} __packed;
struct wmi_pdev_chanlist_update_event {
/* number of channels */
__le32 num_chan;
@ -4958,10 +4982,15 @@ enum wmi_rate_preamble {
#define ATH10K_HW_GI(flags) (((flags) >> 5) & 0x1)
#define ATH10K_HW_RATECODE(rate, nss, preamble) \
(((preamble) << 6) | ((nss) << 4) | (rate))
#define ATH10K_HW_AMPDU(flags) ((flags) & 0x1)
#define ATH10K_HW_BA_FAIL(flags) (((flags) >> 1) & 0x3)
#define VHT_MCS_NUM 10
#define VHT_BW_NUM 4
#define VHT_NSS_NUM 4
#define ATH10K_VHT_MCS_NUM 10
#define ATH10K_BW_NUM 4
#define ATH10K_NSS_NUM 4
#define ATH10K_LEGACY_NUM 12
#define ATH10K_GI_NUM 2
#define ATH10K_HT_MCS_NUM 32
/* Value to disable fixed rate setting */
#define WMI_FIXED_RATE_NONE (0xff)
@ -6642,11 +6671,17 @@ struct wmi_ch_info_ev_arg {
__le32 rx_frame_count;
};
/* From 10.4 firmware, not sure all have the same values. */
enum wmi_vdev_start_status {
WMI_VDEV_START_OK = 0,
WMI_VDEV_START_CHAN_INVALID,
};
struct wmi_vdev_start_ev_arg {
__le32 vdev_id;
__le32 req_id;
__le32 resp_type; /* %WMI_VDEV_RESP_ */
__le32 status;
__le32 status; /* See wmi_vdev_start_status enum above */
};
struct wmi_peer_kick_ev_arg {

View File

@ -374,6 +374,8 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
goto cleanup;
}
ath10k_mac_wait_tx_complete(ar);
ret = ath10k_wow_enable(ar);
if (ret) {
ath10k_warn(ar, "failed to start wow: %d\n", ret);

View File

@ -710,8 +710,8 @@ static bool check_device_tree(struct ath6kl *ar)
for_each_compatible_node(node, NULL, "atheros,ath6kl") {
board_id = of_get_property(node, board_id_prop, NULL);
if (board_id == NULL) {
ath6kl_warn("No \"%s\" property on %s node.\n",
board_id_prop, node->name);
ath6kl_warn("No \"%s\" property on %pOFn node.\n",
board_id_prop, node);
continue;
}
snprintf(board_filename, sizeof(board_filename),

View File

@ -1074,6 +1074,7 @@ struct ath_softc {
struct ath_spec_scan_priv spec_priv;
struct ieee80211_vif *tx99_vif;
struct sk_buff *tx99_skb;
bool tx99_state;
s16 tx99_power;

View File

@ -144,6 +144,8 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
RXS_ERR("BEACONS", rx_beacons);
RXS_ERR("FRAGS", rx_frags);
RXS_ERR("SPECTRAL", rx_spectral);
RXS_ERR("SPECTRAL SMPL GOOD", rx_spectral_sample_good);
RXS_ERR("SPECTRAL SMPL ERR", rx_spectral_sample_err);
RXS_ERR("CRC ERR", crc_err);
RXS_ERR("DECRYPT CRC ERR", decrypt_crc_err);

View File

@ -39,6 +39,8 @@
* @rx_beacons: No. of beacons received.
* @rx_frags: No. of rx-fragements received.
* @rx_spectral: No of spectral packets received.
* @rx_spectral_sample_good: No. of good spectral samples
* @rx_spectral_sample_err: No. of good spectral samples
*/
struct ath_rx_stats {
u32 rx_pkts_all;
@ -58,6 +60,8 @@ struct ath_rx_stats {
u32 rx_beacons;
u32 rx_frags;
u32 rx_spectral;
u32 rx_spectral_sample_good;
u32 rx_spectral_sample_err;
};
#ifdef CONFIG_ATH9K_COMMON_DEBUG

View File

@ -59,8 +59,7 @@ ath_cmn_max_idx_verify_ht20_fft(u8 *sample_end, int bytes_read)
sample = sample_end - SPECTRAL_HT20_SAMPLE_LEN + 1;
max_index = spectral_max_index(mag_info->all_bins,
SPECTRAL_HT20_NUM_BINS);
max_index = spectral_max_index_ht20(mag_info->all_bins);
max_magnitude = spectral_max_magnitude(mag_info->all_bins);
max_exp = mag_info->max_exp & 0xf;
@ -72,7 +71,7 @@ ath_cmn_max_idx_verify_ht20_fft(u8 *sample_end, int bytes_read)
if (bytes_read < SPECTRAL_HT20_SAMPLE_LEN && max_index < 1)
return -1;
if (sample[max_index] != (max_magnitude >> max_exp))
if ((sample[max_index] & 0xf8) != ((max_magnitude >> max_exp) & 0xf8))
return -1;
else
return 0;
@ -100,12 +99,10 @@ ath_cmn_max_idx_verify_ht20_40_fft(u8 *sample_end, int bytes_read)
sample = sample_end - SPECTRAL_HT20_40_SAMPLE_LEN + 1;
lower_mag = spectral_max_magnitude(mag_info->lower_bins);
lower_max_index = spectral_max_index(mag_info->lower_bins,
SPECTRAL_HT20_40_NUM_BINS);
lower_max_index = spectral_max_index_ht40(mag_info->lower_bins);
upper_mag = spectral_max_magnitude(mag_info->upper_bins);
upper_max_index = spectral_max_index(mag_info->upper_bins,
SPECTRAL_HT20_40_NUM_BINS);
upper_max_index = spectral_max_index_ht40(mag_info->upper_bins);
max_exp = mag_info->max_exp & 0xf;
@ -117,19 +114,10 @@ ath_cmn_max_idx_verify_ht20_40_fft(u8 *sample_end, int bytes_read)
((upper_max_index < 1) || (lower_max_index < 1)))
return -1;
/* Some time hardware messes up the index and adds
* the index of the middle point (dc_pos). Try to fix it.
*/
if ((upper_max_index - dc_pos > 0) &&
(sample[upper_max_index] == (upper_mag >> max_exp)))
upper_max_index -= dc_pos;
if ((lower_max_index - dc_pos > 0) &&
(sample[lower_max_index - dc_pos] == (lower_mag >> max_exp)))
lower_max_index -= dc_pos;
if ((sample[upper_max_index + dc_pos] != (upper_mag >> max_exp)) ||
(sample[lower_max_index] != (lower_mag >> max_exp)))
if (((sample[upper_max_index + dc_pos] & 0xf8) !=
((upper_mag >> max_exp) & 0xf8)) ||
((sample[lower_max_index] & 0xf8) !=
((lower_mag >> max_exp) & 0xf8)))
return -1;
else
return 0;
@ -169,8 +157,7 @@ ath_cmn_process_ht20_fft(struct ath_rx_status *rs,
magnitude = spectral_max_magnitude(mag_info->all_bins);
fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
max_index = spectral_max_index(mag_info->all_bins,
SPECTRAL_HT20_NUM_BINS);
max_index = spectral_max_index_ht20(mag_info->all_bins);
fft_sample_20.max_index = max_index;
bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
@ -188,7 +175,8 @@ ath_cmn_process_ht20_fft(struct ath_rx_status *rs,
magnitude >> max_exp,
max_index);
if (fft_sample_20.data[max_index] != (magnitude >> max_exp)) {
if ((fft_sample_20.data[max_index] & 0xf8) !=
((magnitude >> max_exp) & 0xf8)) {
ath_dbg(common, SPECTRAL_SCAN, "Magnitude mismatch !\n");
ret = -1;
}
@ -302,12 +290,10 @@ ath_cmn_process_ht20_40_fft(struct ath_rx_status *rs,
upper_mag = spectral_max_magnitude(mag_info->upper_bins);
fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
lower_max_index = spectral_max_index(mag_info->lower_bins,
SPECTRAL_HT20_40_NUM_BINS);
lower_max_index = spectral_max_index_ht40(mag_info->lower_bins);
fft_sample_40.lower_max_index = lower_max_index;
upper_max_index = spectral_max_index(mag_info->upper_bins,
SPECTRAL_HT20_40_NUM_BINS);
upper_max_index = spectral_max_index_ht40(mag_info->upper_bins);
fft_sample_40.upper_max_index = upper_max_index;
lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
@ -331,29 +317,13 @@ ath_cmn_process_ht20_40_fft(struct ath_rx_status *rs,
upper_mag >> max_exp,
upper_max_index);
/* Some time hardware messes up the index and adds
* the index of the middle point (dc_pos). Try to fix it.
*/
if ((upper_max_index - dc_pos > 0) &&
(fft_sample_40.data[upper_max_index] == (upper_mag >> max_exp))) {
upper_max_index -= dc_pos;
fft_sample_40.upper_max_index = upper_max_index;
}
if ((lower_max_index - dc_pos > 0) &&
(fft_sample_40.data[lower_max_index - dc_pos] ==
(lower_mag >> max_exp))) {
lower_max_index -= dc_pos;
fft_sample_40.lower_max_index = lower_max_index;
}
/* Check if we got the expected magnitude values at
* the expected bins
*/
if ((fft_sample_40.data[upper_max_index + dc_pos]
!= (upper_mag >> max_exp)) ||
(fft_sample_40.data[lower_max_index]
!= (lower_mag >> max_exp))) {
if (((fft_sample_40.data[upper_max_index + dc_pos] & 0xf8)
!= ((upper_mag >> max_exp) & 0xf8)) ||
((fft_sample_40.data[lower_max_index] & 0xf8)
!= ((lower_mag >> max_exp) & 0xf8))) {
ath_dbg(common, SPECTRAL_SCAN, "Magnitude mismatch !\n");
ret = -1;
}
@ -411,7 +381,7 @@ ath_cmn_process_ht20_40_fft(struct ath_rx_status *rs,
ath_dbg(common, SPECTRAL_SCAN,
"Calculated new upper max 0x%X at %i\n",
tmp_mag, i);
tmp_mag, fft_sample_40.upper_max_index);
} else
for (i = dc_pos; i < SPECTRAL_HT20_40_NUM_BINS; i++) {
if (fft_sample_40.data[i] == (upper_mag >> max_exp))
@ -501,6 +471,7 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
u8 sample_buf[SPECTRAL_SAMPLE_MAX_LEN] = {0};
struct ath_hw *ah = spec_priv->ah;
struct ath_common *common = ath9k_hw_common(spec_priv->ah);
struct ath_softc *sc = (struct ath_softc *)common->priv;
u8 num_bins, *vdata = (u8 *)hdr;
struct ath_radar_info *radar_info;
int len = rs->rs_datalen;
@ -649,8 +620,13 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
sample_buf, sample_len,
sample_bytes);
fft_handler(rs, spec_priv, sample_buf,
tsf, freq, chan_type);
ret = fft_handler(rs, spec_priv, sample_buf,
tsf, freq, chan_type);
if (ret == 0)
RX_STAT_INC(rx_spectral_sample_good);
else
RX_STAT_INC(rx_spectral_sample_err);
memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN);
@ -665,6 +641,11 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
ret = fft_handler(rs, spec_priv, sample_start,
tsf, freq, chan_type);
if (ret == 0)
RX_STAT_INC(rx_spectral_sample_good);
else
RX_STAT_INC(rx_spectral_sample_err);
/* Mix the received bins to the /dev/random
* pool
*/
@ -675,7 +656,7 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
* loop.
*/
if (len <= fft_len + 2)
break;
return 1;
sample_start = &vdata[i + 1];

View File

@ -145,6 +145,23 @@ static inline u8 spectral_max_index(u8 *bins, int num_bins)
return m;
}
static inline u8 spectral_max_index_ht40(u8 *bins)
{
u8 idx;
idx = spectral_max_index(bins, SPECTRAL_HT20_40_NUM_BINS);
/* positive values and zero are starting at the beginning
* of the data field.
*/
return idx % (SPECTRAL_HT20_40_NUM_BINS / 2);
}
static inline u8 spectral_max_index_ht20(u8 *bins)
{
return spectral_max_index(bins, SPECTRAL_HT20_NUM_BINS);
}
/* return the bitmap weight from the all/upper/lower bins */
static inline u8 spectral_bitmap_weight(u8 *bins)
{

View File

@ -990,19 +990,6 @@ static int read_file_dump_nfcal(struct seq_file *file, void *data)
return 0;
}
static int open_file_dump_nfcal(struct inode *inode, struct file *f)
{
return single_open(f, read_file_dump_nfcal, inode->i_private);
}
static const struct file_operations fops_dump_nfcal = {
.read = seq_read,
.open = open_file_dump_nfcal,
.owner = THIS_MODULE,
.llseek = seq_lseek,
.release = single_release,
};
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
static ssize_t read_file_btcoex(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)

View File

@ -286,9 +286,25 @@ static ssize_t read_airtime(struct file *file, char __user *user_buf,
return retval;
}
static ssize_t
write_airtime_reset_stub(struct file *file, const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ath_node *an = file->private_data;
struct ath_airtime_stats *astats;
int i;
astats = &an->airtime_stats;
astats->rx_airtime = 0;
astats->tx_airtime = 0;
for (i = 0; i < 4; i++)
an->airtime_deficit[i] = ATH_AIRTIME_QUANTUM;
return count;
}
static const struct file_operations fops_airtime = {
.read = read_airtime,
.write = write_airtime_reset_stub,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
@ -304,5 +320,5 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
debugfs_create_file("node_aggr", 0444, dir, an, &fops_node_aggr);
debugfs_create_file("node_recv", 0444, dir, an, &fops_node_recv);
debugfs_create_file("airtime", 0444, dir, an, &fops_airtime);
debugfs_create_file("airtime", 0644, dir, an, &fops_airtime);
}

View File

@ -1251,8 +1251,13 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
struct ath_vif *avp = (void *)vif->drv_priv;
struct ath_node *an = &avp->mcast_node;
if (IS_ENABLED(CONFIG_ATH9K_TX99))
return -EOPNOTSUPP;
if (IS_ENABLED(CONFIG_ATH9K_TX99)) {
if (sc->cur_chan->nvifs >= 1) {
mutex_unlock(&sc->mutex);
return -EOPNOTSUPP;
}
sc->tx99_vif = vif;
}
mutex_lock(&sc->mutex);
@ -1337,6 +1342,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
ath9k_p2p_remove_vif(sc, vif);
sc->cur_chan->nvifs--;
sc->tx99_vif = NULL;
if (!ath9k_is_chanctx_enabled())
list_del(&avp->list);

View File

@ -54,6 +54,7 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *tx_info;
struct sk_buff *skb;
struct ath_vif *avp;
skb = alloc_skb(len, GFP_KERNEL);
if (!skb)
@ -71,11 +72,17 @@ static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
if (sc->tx99_vif) {
avp = (struct ath_vif *) sc->tx99_vif->drv_priv;
hdr->seq_ctrl |= cpu_to_le16(avp->seq_no);
}
tx_info = IEEE80211_SKB_CB(skb);
memset(tx_info, 0, sizeof(*tx_info));
rate = &tx_info->control.rates[0];
tx_info->band = sc->cur_chan->chandef.chan->band;
tx_info->flags = IEEE80211_TX_CTL_NO_ACK;
tx_info->control.vif = sc->tx99_vif;
rate->count = 1;
if (ah->curchan && IS_CHAN_HT(ah->curchan)) {
rate->flags |= IEEE80211_TX_RC_MCS;

View File

@ -2973,7 +2973,7 @@ int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
return -EINVAL;
}
ath_set_rates(NULL, NULL, bf);
ath_set_rates(sc->tx99_vif, NULL, bf);
ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, bf->bf_daddr);
ath9k_hw_tx99_start(sc->sc_ah, txctl->txq->axq_qnum);

View File

@ -190,7 +190,7 @@ out:
static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
{
int rc = 0;
unsigned long start, data_comp_to;
unsigned long data_comp_to;
wil_dbg_pm(wil, "suspend keep radio on\n");
@ -232,7 +232,6 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
}
/* Wait for completion of the pending RX packets */
start = jiffies;
data_comp_to = jiffies + msecs_to_jiffies(WIL_DATA_COMPLETION_TO_MS);
if (test_bit(wil_status_napi_en, wil->status)) {
while (!wil->txrx_ops.is_rx_idle(wil)) {

View File

@ -455,7 +455,7 @@ static inline void parse_cidxtid(u8 cidxtid, u8 *cid, u8 *tid)
*/
static inline bool wil_cid_valid(u8 cid)
{
return (cid >= 0 && cid < WIL6210_MAX_CID);
return cid < WIL6210_MAX_CID;
}
struct wil6210_mbox_ring {

View File

@ -1177,7 +1177,7 @@ static void wmi_evt_ring_en(struct wil6210_vif *vif, int id, void *d, int len)
u8 vri = evt->ring_index;
struct wireless_dev *wdev = vif_to_wdev(vif);
struct wil_sta_info *sta;
int cid;
u8 cid;
struct key_params params;
wil_dbg_wmi(wil, "Enable vring %d MID %d\n", vri, vif->mid);

View File

@ -5493,13 +5493,11 @@ err_powerdown:
static void b43_one_core_detach(struct b43_bus_dev *dev)
{
struct b43_wldev *wldev;
struct b43_wl *wl;
/* Do not cancel ieee80211-workqueue based work here.
* See comment in b43_remove(). */
wldev = b43_bus_get_wldev(dev);
wl = wldev->wl;
b43_debugfs_remove_device(wldev);
b43_wireless_core_detach(wldev);
list_del(&wldev->list);

View File

@ -74,7 +74,7 @@
#define P2P_AF_MAX_WAIT_TIME msecs_to_jiffies(2000)
#define P2P_INVALID_CHANNEL -1
#define P2P_CHANNEL_SYNC_RETRY 5
#define P2P_AF_FRM_SCAN_MAX_WAIT msecs_to_jiffies(1500)
#define P2P_AF_FRM_SCAN_MAX_WAIT msecs_to_jiffies(450)
#define P2P_DEFAULT_SLEEP_TIME_VSDB 200
/* WiFi P2P Public Action Frame OUI Subtypes */
@ -1134,7 +1134,6 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
{
struct afx_hdl *afx_hdl = &p2p->afx_hdl;
struct brcmf_cfg80211_vif *pri_vif;
unsigned long duration;
s32 retry;
brcmf_dbg(TRACE, "Enter\n");
@ -1150,7 +1149,6 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
* pending action frame tx is cancelled.
*/
retry = 0;
duration = msecs_to_jiffies(P2P_AF_FRM_SCAN_MAX_WAIT);
while ((retry < P2P_CHANNEL_SYNC_RETRY) &&
(afx_hdl->peer_chan == P2P_INVALID_CHANNEL)) {
afx_hdl->is_listen = false;
@ -1158,7 +1156,8 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
retry);
/* search peer on peer's listen channel */
schedule_work(&afx_hdl->afx_work);
wait_for_completion_timeout(&afx_hdl->act_frm_scan, duration);
wait_for_completion_timeout(&afx_hdl->act_frm_scan,
P2P_AF_FRM_SCAN_MAX_WAIT);
if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) ||
(!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
&p2p->status)))
@ -1171,7 +1170,7 @@ static s32 brcmf_p2p_af_searching_channel(struct brcmf_p2p_info *p2p)
afx_hdl->is_listen = true;
schedule_work(&afx_hdl->afx_work);
wait_for_completion_timeout(&afx_hdl->act_frm_scan,
duration);
P2P_AF_FRM_SCAN_MAX_WAIT);
}
if ((afx_hdl->peer_chan != P2P_INVALID_CHANNEL) ||
(!test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL,
@ -1458,10 +1457,12 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
return 0;
if (e->event_code == BRCMF_E_ACTION_FRAME_COMPLETE) {
if (e->status == BRCMF_E_STATUS_SUCCESS)
if (e->status == BRCMF_E_STATUS_SUCCESS) {
set_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED,
&p2p->status);
else {
if (!p2p->wait_for_offchan_complete)
complete(&p2p->send_af_done);
} else {
set_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
/* If there is no ack, we don't need to wait for
* WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE event
@ -1512,6 +1513,17 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
p2p->af_sent_channel = le32_to_cpu(af_params->channel);
p2p->af_tx_sent_jiffies = jiffies;
if (test_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status) &&
p2p->af_sent_channel ==
ieee80211_frequency_to_channel(p2p->remain_on_channel.center_freq))
p2p->wait_for_offchan_complete = false;
else
p2p->wait_for_offchan_complete = true;
brcmf_dbg(TRACE, "Waiting for %s tx completion event\n",
(p2p->wait_for_offchan_complete) ?
"off-channel" : "on-channel");
timeout = wait_for_completion_timeout(&p2p->send_af_done,
P2P_AF_MAX_WAIT_TIME);

View File

@ -124,6 +124,7 @@ struct afx_hdl {
* @gon_req_action: about to send go negotiation requets frame.
* @block_gon_req_tx: drop tx go negotiation requets frame.
* @p2pdev_dynamically: is p2p device if created by module param or supplicant.
* @wait_for_offchan_complete: wait for off-channel tx completion event.
*/
struct brcmf_p2p_info {
struct brcmf_cfg80211_info *cfg;
@ -144,6 +145,7 @@ struct brcmf_p2p_info {
bool gon_req_action;
bool block_gon_req_tx;
bool p2pdev_dynamically;
bool wait_for_offchan_complete;
};
s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced);

View File

@ -56,7 +56,7 @@
#include "iwl-config.h"
/* Highest firmware API version supported */
#define IWL_22000_UCODE_API_MAX 38
#define IWL_22000_UCODE_API_MAX 41
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39

View File

@ -57,7 +57,7 @@
#include "fw/file.h"
/* Highest firmware API version supported */
#define IWL9000_UCODE_API_MAX 38
#define IWL9000_UCODE_API_MAX 41
/* Lowest firmware API version supported */
#define IWL9000_UCODE_API_MIN 30

View File

@ -165,7 +165,7 @@ struct iwl_nvm_access_resp {
*/
struct iwl_nvm_get_info {
__le32 reserved;
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_S_VER_1 */
} __packed; /* REGULATORY_NVM_GET_INFO_CMD_API_S_VER_1 */
/**
* enum iwl_nvm_info_general_flags - flags in NVM_GET_INFO resp
@ -180,14 +180,14 @@ enum iwl_nvm_info_general_flags {
* @flags: bit 0: 1 - empty, 0 - non-empty
* @nvm_version: nvm version
* @board_type: board type
* @reserved: reserved
* @n_hw_addrs: number of reserved MAC addresses
*/
struct iwl_nvm_get_info_general {
__le32 flags;
__le16 nvm_version;
u8 board_type;
u8 reserved;
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_GENERAL_S_VER_1 */
u8 n_hw_addrs;
} __packed; /* REGULATORY_NVM_GET_INFO_GENERAL_S_VER_2 */
/**
* enum iwl_nvm_mac_sku_flags - flags in &iwl_nvm_get_info_sku
@ -231,7 +231,7 @@ struct iwl_nvm_get_info_sku {
struct iwl_nvm_get_info_phy {
__le32 tx_chains;
__le32 rx_chains;
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */
} __packed; /* REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */
#define IWL_NUM_CHANNELS (51)
@ -245,7 +245,7 @@ struct iwl_nvm_get_info_regulatory {
__le32 lar_enabled;
__le16 channel_profile[IWL_NUM_CHANNELS];
__le16 reserved;
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */
} __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */
/**
* struct iwl_nvm_get_info_rsp - response to get NVM data
@ -259,7 +259,7 @@ struct iwl_nvm_get_info_rsp {
struct iwl_nvm_get_info_sku mac_sku;
struct iwl_nvm_get_info_phy phy_sku;
struct iwl_nvm_get_info_regulatory regulatory;
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_2 */
} __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_3 */
/**
* struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed
@ -269,22 +269,6 @@ struct iwl_nvm_access_complete_cmd {
__le32 reserved;
} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */
/**
* struct iwl_mcc_update_cmd_v1 - Request the device to update geographic
* regulatory profile according to the given MCC (Mobile Country Code).
* The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
* 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
* MCC in the cmd response will be the relevant MCC in the NVM.
* @mcc: given mobile country code
* @source_id: the source from where we got the MCC, see iwl_mcc_source
* @reserved: reserved for alignment
*/
struct iwl_mcc_update_cmd_v1 {
__le16 mcc;
u8 source_id;
u8 reserved;
} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_1 */
/**
* struct iwl_mcc_update_cmd - Request the device to update geographic
* regulatory profile according to the given MCC (Mobile Country Code).
@ -305,29 +289,6 @@ struct iwl_mcc_update_cmd {
u8 reserved2[20];
} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */
/**
* struct iwl_mcc_update_resp_v1 - response to MCC_UPDATE_CMD.
* Contains the new channel control profile map, if changed, and the new MCC
* (mobile country code).
* The new MCC may be different than what was requested in MCC_UPDATE_CMD.
* @status: see &enum iwl_mcc_update_status
* @mcc: the new applied MCC
* @cap: capabilities for all channels which matches the MCC
* @source_id: the MCC source, see iwl_mcc_source
* @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51
* channels, depending on platform)
* @channels: channel control data map, DWORD for each channel. Only the first
* 16bits are used.
*/
struct iwl_mcc_update_resp_v1 {
__le32 status;
__le16 mcc;
u8 cap;
u8 source_id;
__le32 n_channels;
__le32 channels[0];
} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_1 */
/**
* enum iwl_geo_information - geographic information.
* @GEO_NO_INFO: no special info for this geo profile.
@ -340,7 +301,7 @@ enum iwl_geo_information {
};
/**
* struct iwl_mcc_update_resp - response to MCC_UPDATE_CMD.
* struct iwl_mcc_update_resp_v3 - response to MCC_UPDATE_CMD.
* Contains the new channel control profile map, if changed, and the new MCC
* (mobile country code).
* The new MCC may be different than what was requested in MCC_UPDATE_CMD.
@ -348,15 +309,14 @@ enum iwl_geo_information {
* @mcc: the new applied MCC
* @cap: capabilities for all channels which matches the MCC
* @source_id: the MCC source, see iwl_mcc_source
* @time: time elapsed from the MCC test start (in 30 seconds TU)
* @time: time elapsed from the MCC test start (in units of 30 seconds)
* @geo_info: geographic specific profile information
* see &enum iwl_geo_information.
* @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51
* channels, depending on platform)
* @n_channels: number of channels in @channels_data.
* @channels: channel control data map, DWORD for each channel. Only the first
* 16bits are used.
*/
struct iwl_mcc_update_resp {
struct iwl_mcc_update_resp_v3 {
__le32 status;
__le16 mcc;
u8 cap;
@ -367,6 +327,35 @@ struct iwl_mcc_update_resp {
__le32 channels[0];
} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_3 */
/**
* struct iwl_mcc_update_resp - response to MCC_UPDATE_CMD.
* Contains the new channel control profile map, if changed, and the new MCC
* (mobile country code).
* The new MCC may be different than what was requested in MCC_UPDATE_CMD.
* @status: see &enum iwl_mcc_update_status
* @mcc: the new applied MCC
* @cap: capabilities for all channels which matches the MCC
* @time: time elapsed from the MCC test start (in units of 30 seconds)
* @geo_info: geographic specific profile information
* see &enum iwl_geo_information.
* @source_id: the MCC source, see iwl_mcc_source
* @reserved: for four bytes alignment.
* @n_channels: number of channels in @channels_data.
* @channels: channel control data map, DWORD for each channel. Only the first
* 16bits are used.
*/
struct iwl_mcc_update_resp {
__le32 status;
__le16 mcc;
__le16 cap;
__le16 time;
__le16 geo_info;
u8 source_id;
u8 reserved[3];
__le32 n_channels;
__le32 channels[0];
} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_4 */
/**
* struct iwl_mcc_chub_notif - chub notifies of mcc change
* (MCC_CHUB_UPDATE_CMD = 0xc9)

View File

@ -368,10 +368,10 @@ enum iwl_rx_he_phy {
/* trigger encoded */
IWL_RX_HE_PHY_RU_ALLOC_MASK = 0xfe0000000000ULL,
IWL_RX_HE_PHY_INFO_TYPE_MASK = 0xf000000000000000ULL,
IWL_RX_HE_PHY_INFO_TYPE_SU = 0x0,
IWL_RX_HE_PHY_INFO_TYPE_MU = 0x1,
IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO = 0x2,
IWL_RX_HE_PHY_INFO_TYPE_TB_EXT_INFO = 0x3,
IWL_RX_HE_PHY_INFO_TYPE_SU = 0x0, /* TSF low valid (first DW) */
IWL_RX_HE_PHY_INFO_TYPE_MU = 0x1, /* TSF low/high valid (both DWs) */
IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO = 0x2, /* same + SIGB-common0/1/2 valid */
IWL_RX_HE_PHY_INFO_TYPE_TB = 0x3, /* TSF low/high valid (both DWs) */
/* second dword - MU data */
IWL_RX_HE_PHY_MU_SIGB_COMPRESSION = BIT_ULL(32 + 0),

View File

@ -596,9 +596,12 @@ enum iwl_umac_scan_general_flags {
* enum iwl_umac_scan_general_flags2 - UMAC scan general flags #2
* @IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL: Whether to send a complete
* notification per channel or not.
* @IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER: Whether to allow channel
* reorder optimization or not.
*/
enum iwl_umac_scan_general_flags2 {
IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL = BIT(0),
IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL = BIT(0),
IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER = BIT(1),
};
/**

View File

@ -240,7 +240,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
return;
if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
/* Pull RXF1 */
iwl_fwrt_dump_rxf(fwrt, dump_data,
cfg->lmac[0].rxfifo1_size, 0, 0);
@ -254,7 +254,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
LMAC2_PRPH_OFFSET, 2);
}
if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
/* Pull TXF data from LMAC1 */
for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
/* Mark the number of TXF we're pulling now */
@ -279,7 +279,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
}
}
if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
/* Pull UMAC internal TXF data from all TXFs */
@ -573,103 +573,95 @@ static int iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt)
static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt,
struct iwl_fw_error_dump_data **dump_data,
u32 sram_len, u32 sram_ofs, u32 smem_len,
u32 sram2_len)
u32 len, u32 ofs, u32 type)
{
const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = fwrt->fw->dbg_mem_tlv;
struct iwl_fw_error_dump_mem *dump_mem;
int i;
if (!fwrt->fw->n_dbg_mem_tlv) {
(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
(*dump_data)->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
dump_mem = (void *)(*dump_data)->data;
dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
dump_mem->offset = cpu_to_le32(sram_ofs);
iwl_trans_read_mem_bytes(fwrt->trans, sram_ofs, dump_mem->data,
sram_len);
*dump_data = iwl_fw_error_next_data(*dump_data);
}
if (!len)
return;
for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
u32 len = le32_to_cpu(fw_dbg_mem[i].len);
u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
(*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem));
dump_mem = (void *)(*dump_data)->data;
dump_mem->type = cpu_to_le32(type);
dump_mem->offset = cpu_to_le32(ofs);
iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len);
*dump_data = iwl_fw_error_next_data(*dump_data);
(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
(*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem));
dump_mem = (void *)(*dump_data)->data;
dump_mem->type = fw_dbg_mem[i].data_type;
dump_mem->offset = cpu_to_le32(ofs);
IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n",
dump_mem->type);
iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len);
*dump_data = iwl_fw_error_next_data(*dump_data);
}
if (smem_len) {
IWL_DEBUG_INFO(fwrt, "WRT SMEM dump\n");
(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
(*dump_data)->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
dump_mem = (void *)(*dump_data)->data;
dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM);
dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->smem_offset);
iwl_trans_read_mem_bytes(fwrt->trans,
fwrt->trans->cfg->smem_offset,
dump_mem->data, smem_len);
*dump_data = iwl_fw_error_next_data(*dump_data);
}
if (sram2_len) {
IWL_DEBUG_INFO(fwrt, "WRT SRAM dump\n");
(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
(*dump_data)->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
dump_mem = (void *)(*dump_data)->data;
dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->dccm2_offset);
iwl_trans_read_mem_bytes(fwrt->trans,
fwrt->trans->cfg->dccm2_offset,
dump_mem->data, sram2_len);
*dump_data = iwl_fw_error_next_data(*dump_data);
}
IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type);
}
void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
#define ADD_LEN(len, item_len, const_len) \
do {size_t item = item_len; len += (!!item) * const_len + item; } \
while (0)
static int iwl_fw_fifo_len(struct iwl_fw_runtime *fwrt,
struct iwl_fwrt_shared_mem_cfg *mem_cfg)
{
size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
u32 fifo_len = 0;
int i;
if (!(fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)))
goto dump_txf;
/* Count RXF2 size */
ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len);
/* Count RXF1 sizes */
for (i = 0; i < mem_cfg->num_lmacs; i++)
ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len);
dump_txf:
if (!(fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)))
goto dump_internal_txf;
/* Count TXF sizes */
for (i = 0; i < mem_cfg->num_lmacs; i++) {
int j;
for (j = 0; j < mem_cfg->num_txfifo_entries; j++)
ADD_LEN(fifo_len, mem_cfg->lmac[i].txfifo_size[j],
hdr_len);
}
dump_internal_txf:
if (!((fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) &&
fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)))
goto out;
for (i = 0; i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); i++)
ADD_LEN(fifo_len, mem_cfg->internal_txfifo_size[i], hdr_len);
out:
return fifo_len;
}
static struct iwl_fw_error_dump_file *
_iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dump_ptrs *fw_error_dump)
{
struct iwl_fw_error_dump_file *dump_file;
struct iwl_fw_error_dump_data *dump_data;
struct iwl_fw_error_dump_info *dump_info;
struct iwl_fw_error_dump_smem_cfg *dump_smem_cfg;
struct iwl_fw_error_dump_trigger_desc *dump_trig;
struct iwl_fw_dump_ptrs *fw_error_dump;
struct scatterlist *sg_dump_data;
u32 sram_len, sram_ofs;
const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = fwrt->fw->dbg_mem_tlv;
const struct iwl_fw_dbg_mem_seg_tlv *fw_mem = fwrt->fw->dbg.mem_tlv;
struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg;
u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0;
u32 smem_len = fwrt->fw->n_dbg_mem_tlv ? 0 : fwrt->trans->cfg->smem_len;
u32 sram2_len = fwrt->fw->n_dbg_mem_tlv ?
u32 file_len, fifo_len = 0, prph_len = 0, radio_len = 0;
u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len;
u32 sram2_len = fwrt->fw->dbg.n_mem_tlv ?
0 : fwrt->trans->cfg->dccm2_len;
bool monitor_dump_only = false;
int i;
IWL_DEBUG_INFO(fwrt, "WRT dump start\n");
/* there's no point in fw dump if the bus is dead */
if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
goto out;
}
if (fwrt->dump.trig &&
fwrt->dump.trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
monitor_dump_only = true;
fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
if (!fw_error_dump)
goto out;
/* SRAM - include stack CCM if driver knows the values for it */
if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) {
const struct fw_img *img;
@ -684,112 +676,43 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
/* reading RXF/TXF sizes */
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
fifo_data_len = 0;
if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
/* Count RXF2 size */
if (mem_cfg->rxfifo2_size) {
/* Add header info */
fifo_data_len +=
mem_cfg->rxfifo2_size +
sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
}
/* Count RXF1 sizes */
for (i = 0; i < mem_cfg->num_lmacs; i++) {
if (!mem_cfg->lmac[i].rxfifo1_size)
continue;
/* Add header info */
fifo_data_len +=
mem_cfg->lmac[i].rxfifo1_size +
sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
}
}
if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
size_t fifo_const_len = sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
/* Count TXF sizes */
for (i = 0; i < mem_cfg->num_lmacs; i++) {
int j;
for (j = 0; j < mem_cfg->num_txfifo_entries;
j++) {
if (!mem_cfg->lmac[i].txfifo_size[j])
continue;
/* Add header info */
fifo_data_len +=
fifo_const_len +
mem_cfg->lmac[i].txfifo_size[j];
}
}
}
if ((fwrt->fw->dbg_dump_mask &
BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) &&
fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
for (i = 0;
i < ARRAY_SIZE(mem_cfg->internal_txfifo_size);
i++) {
if (!mem_cfg->internal_txfifo_size[i])
continue;
/* Add header info */
fifo_data_len +=
mem_cfg->internal_txfifo_size[i] +
sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
}
}
fifo_len = iwl_fw_fifo_len(fwrt, mem_cfg);
/* Make room for PRPH registers */
if (!fwrt->trans->cfg->gen2 &&
fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH))
fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH))
prph_len += iwl_fw_get_prph_len(fwrt);
if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG))
fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG))
radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
}
file_len = sizeof(*dump_file) +
fifo_data_len +
prph_len +
radio_len;
file_len = sizeof(*dump_file) + fifo_len + prph_len + radio_len;
if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO))
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO))
file_len += sizeof(*dump_data) + sizeof(*dump_info);
if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG))
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG))
file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg);
if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
/* Make room for the SMEM, if it exists */
if (smem_len)
file_len += sizeof(*dump_data) + smem_len +
sizeof(struct iwl_fw_error_dump_mem);
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
size_t hdr_len = sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_mem);
/* Make room for the secondary SRAM, if it exists */
if (sram2_len)
file_len += sizeof(*dump_data) + sram2_len +
sizeof(struct iwl_fw_error_dump_mem);
/* Dump SRAM only if no mem_tlvs */
if (!fwrt->fw->dbg.n_mem_tlv)
ADD_LEN(file_len, sram_len, hdr_len);
/* Make room for MEM segments */
for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
file_len += sizeof(*dump_data) +
le32_to_cpu(fw_dbg_mem[i].len) +
sizeof(struct iwl_fw_error_dump_mem);
}
/* Make room for all mem types that exist */
ADD_LEN(file_len, smem_len, hdr_len);
ADD_LEN(file_len, sram2_len, hdr_len);
for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++)
ADD_LEN(file_len, le32_to_cpu(fw_mem[i].len), hdr_len);
}
/* Make room for fw's virtual image pages, if it exists */
if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
!fwrt->trans->cfg->gen2 &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block)
@ -809,28 +732,21 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
sizeof(*dump_info) + sizeof(*dump_smem_cfg);
}
if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
fwrt->dump.desc)
file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
fwrt->dump.desc->len;
if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM) &&
!fwrt->fw->n_dbg_mem_tlv)
file_len += sizeof(*dump_data) + sram_len +
sizeof(struct iwl_fw_error_dump_mem);
dump_file = vzalloc(file_len);
if (!dump_file) {
kfree(fw_error_dump);
goto out;
}
if (!dump_file)
return NULL;
fw_error_dump->fwrt_ptr = dump_file;
dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
dump_data = (void *)dump_file->data;
if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
dump_data->len = cpu_to_le32(sizeof(*dump_info));
dump_info = (void *)dump_data->data;
@ -851,7 +767,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
dump_data = iwl_fw_error_next_data(dump_data);
}
if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) {
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) {
/* Dump shared memory configuration */
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
@ -882,13 +798,13 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
/* We only dump the FIFOs if the FW is in error state */
if (fifo_data_len) {
if (fifo_len) {
iwl_fw_dump_fifos(fwrt, &dump_data);
if (radio_len)
iwl_read_radio_regs(fwrt, &dump_data);
}
if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
fwrt->dump.desc) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
@ -902,12 +818,32 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
/* In case we only want monitor dump, skip to dump trasport data */
if (monitor_dump_only)
goto dump_trans_data;
goto out;
if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM))
iwl_fw_dump_mem(fwrt, &dump_data, sram_len, sram_ofs, smem_len,
sram2_len);
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem =
fwrt->fw->dbg.mem_tlv;
if (!fwrt->fw->dbg.n_mem_tlv)
iwl_fw_dump_mem(fwrt, &dump_data, sram_len, sram_ofs,
IWL_FW_ERROR_DUMP_MEM_SRAM);
for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) {
u32 len = le32_to_cpu(fw_dbg_mem[i].len);
u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
iwl_fw_dump_mem(fwrt, &dump_data, len, ofs,
le32_to_cpu(fw_dbg_mem[i].data_type));
}
iwl_fw_dump_mem(fwrt, &dump_data, smem_len,
fwrt->trans->cfg->smem_offset,
IWL_FW_ERROR_DUMP_MEM_SMEM);
iwl_fw_dump_mem(fwrt, &dump_data, sram2_len,
fwrt->trans->cfg->dccm2_offset,
IWL_FW_ERROR_DUMP_MEM_SRAM);
}
if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
u32 addr = fwrt->trans->cfg->d3_debug_data_base_addr;
@ -929,7 +865,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
}
/* Dump fw's virtual image */
if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
!fwrt->trans->cfg->gen2 &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block) {
@ -965,13 +901,44 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
ARRAY_SIZE(iwl_prph_dump_addr_9000));
}
dump_trans_data:
out:
dump_file->file_len = cpu_to_le32(file_len);
return dump_file;
}
void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
{
struct iwl_fw_dump_ptrs *fw_error_dump;
struct iwl_fw_error_dump_file *dump_file;
struct scatterlist *sg_dump_data;
u32 file_len;
IWL_DEBUG_INFO(fwrt, "WRT dump start\n");
/* there's no point in fw dump if the bus is dead */
if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
goto out;
}
fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
if (!fw_error_dump)
goto out;
dump_file = _iwl_fw_error_dump(fwrt, fw_error_dump);
if (!dump_file) {
kfree(fw_error_dump);
goto out;
}
fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans,
fwrt->dump.trig);
file_len = le32_to_cpu(dump_file->file_len);
fw_error_dump->fwrt_len = file_len;
if (fw_error_dump->trans_ptr)
if (fw_error_dump->trans_ptr) {
file_len += fw_error_dump->trans_ptr->len;
dump_file->file_len = cpu_to_le32(file_len);
dump_file->file_len = cpu_to_le32(file_len);
}
sg_dump_data = alloc_sgtable(file_len);
if (sg_dump_data) {
@ -1006,15 +973,34 @@ const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
};
IWL_EXPORT_SYMBOL(iwl_dump_desc_assert);
int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
const struct iwl_fw_dump_desc *desc,
const struct iwl_fw_dbg_trigger_tlv *trigger)
void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt)
{
unsigned int delay = 0;
struct iwl_fw_dump_desc *iwl_dump_desc_no_alive =
kmalloc(sizeof(*iwl_dump_desc_no_alive), GFP_KERNEL);
if (trigger)
delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
if (!iwl_dump_desc_no_alive)
return;
iwl_dump_desc_no_alive->trig_desc.type =
cpu_to_le32(FW_DBG_TRIGGER_NO_ALIVE);
iwl_dump_desc_no_alive->len = 0;
if (WARN_ON(fwrt->dump.desc))
iwl_fw_free_dump_desc(fwrt);
IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
FW_DBG_TRIGGER_NO_ALIVE);
fwrt->dump.desc = iwl_dump_desc_no_alive;
iwl_fw_error_dump(fwrt);
clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status);
}
IWL_EXPORT_SYMBOL(iwl_fw_alive_error_dump);
int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
const struct iwl_fw_dump_desc *desc, void *trigger,
unsigned int delay)
{
/*
* If the loading of the FW completed successfully, the next step is to
* get the SMEM config data. Thus, if fwrt->smem_cfg.num_lmacs is non
@ -1031,7 +1017,8 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
fwrt->smem_cfg.num_lmacs)
return -EIO;
if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status) ||
test_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status))
return -EBUSY;
if (WARN_ON(fwrt->dump.desc))
@ -1052,25 +1039,38 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc);
int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig,
const char *str, size_t len,
const struct iwl_fw_dbg_trigger_tlv *trigger)
struct iwl_fw_dbg_trigger_tlv *trigger)
{
struct iwl_fw_dump_desc *desc;
unsigned int delay = 0;
if (trigger && trigger->flags & IWL_FW_DBG_FORCE_RESTART) {
IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", trig);
iwl_force_nmi(fwrt->trans);
return 0;
if (trigger) {
u16 occurrences = le16_to_cpu(trigger->occurrences) - 1;
if (!le16_to_cpu(trigger->occurrences))
return 0;
if (trigger->flags & IWL_FW_DBG_FORCE_RESTART) {
IWL_WARN(fwrt, "Force restart: trigger %d fired.\n",
trig);
iwl_force_nmi(fwrt->trans);
return 0;
}
trigger->occurrences = cpu_to_le16(occurrences);
delay = le16_to_cpu(trigger->trig_dis_ms);
}
desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
if (!desc)
return -ENOMEM;
desc->len = len;
desc->trig_desc.type = cpu_to_le32(trig);
memcpy(desc->trig_desc.data, str, len);
return iwl_fw_dbg_collect_desc(fwrt, desc, trigger);
return iwl_fw_dbg_collect_desc(fwrt, desc, trigger, delay);
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
@ -1078,13 +1078,9 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_trigger_tlv *trigger,
const char *fmt, ...)
{
u16 occurrences = le16_to_cpu(trigger->occurrences);
int ret, len = 0;
char buf[64];
if (!occurrences)
return 0;
if (fmt) {
va_list ap;
@ -1107,7 +1103,6 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
if (ret)
return ret;
trigger->occurrences = cpu_to_le16(occurrences - 1);
return 0;
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_trig);
@ -1118,17 +1113,17 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
int ret;
int i;
if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg_conf_tlv),
if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg.conf_tlv),
"Invalid configuration %d\n", conf_id))
return -EINVAL;
/* EARLY START - firmware's configuration is hard coded */
if ((!fwrt->fw->dbg_conf_tlv[conf_id] ||
!fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
if ((!fwrt->fw->dbg.conf_tlv[conf_id] ||
!fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds) &&
conf_id == FW_DBG_START_FROM_ALIVE)
return 0;
if (!fwrt->fw->dbg_conf_tlv[conf_id])
if (!fwrt->fw->dbg.conf_tlv[conf_id])
return -EINVAL;
if (fwrt->dump.conf != FW_DBG_INVALID)
@ -1136,8 +1131,8 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
fwrt->dump.conf);
/* Send all HCMDs for configuring the FW debug */
ptr = (void *)&fwrt->fw->dbg_conf_tlv[conf_id]->hcmd;
for (i = 0; i < fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) {
ptr = (void *)&fwrt->fw->dbg.conf_tlv[conf_id]->hcmd;
for (i = 0; i < fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds; i++) {
struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr;
struct iwl_host_cmd hcmd = {
.id = cmd->id,
@ -1183,7 +1178,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
/* start recording again if the firmware is not crashed */
if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
fwrt->fw->dbg_dest_tlv) {
fwrt->fw->dbg.dest_tlv) {
/* wait before we collect the data till the DBGC stop */
udelay(500);
iwl_fw_dbg_restart_recording(fwrt, &params);

View File

@ -107,25 +107,25 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt);
int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
const struct iwl_fw_dump_desc *desc,
const struct iwl_fw_dbg_trigger_tlv *trigger);
void *trigger, unsigned int delay);
int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig,
const char *str, size_t len,
const struct iwl_fw_dbg_trigger_tlv *trigger);
struct iwl_fw_dbg_trigger_tlv *trigger);
int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_trigger_tlv *trigger,
const char *fmt, ...) __printf(3, 4);
int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 id);
#define iwl_fw_dbg_trigger_enabled(fw, id) ({ \
void *__dbg_trigger = (fw)->dbg_trigger_tlv[(id)]; \
void *__dbg_trigger = (fw)->dbg.trigger_tlv[(id)]; \
unlikely(__dbg_trigger); \
})
static inline struct iwl_fw_dbg_trigger_tlv*
_iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, enum iwl_fw_dbg_trigger id)
{
return fw->dbg_trigger_tlv[id];
return fw->dbg.trigger_tlv[id];
}
#define iwl_fw_dbg_get_trigger(fw, id) ({ \
@ -154,12 +154,9 @@ iwl_fw_dbg_trigger_stop_conf_match(struct iwl_fw_runtime *fwrt,
}
static inline bool
iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_trigger_tlv *trig)
iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt, u32 id, u32 dis_ms)
{
unsigned long wind_jiff =
msecs_to_jiffies(le16_to_cpu(trig->trig_dis_ms));
u32 id = le32_to_cpu(trig->id);
unsigned long wind_jiff = msecs_to_jiffies(dis_ms);
/* If this is the first event checked, jump to update start ts */
if (fwrt->dump.non_collect_ts_start[id] &&
@ -179,7 +176,8 @@ iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt,
if (wdev && !iwl_fw_dbg_trigger_vif_match(trig, wdev))
return false;
if (iwl_fw_dbg_no_trig_window(fwrt, trig)) {
if (iwl_fw_dbg_no_trig_window(fwrt, le32_to_cpu(trig->id),
le16_to_cpu(trig->trig_dis_ms))) {
IWL_WARN(fwrt, "Trigger %d occurred while no-collect window.\n",
trig->id);
return false;
@ -188,6 +186,30 @@ iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt,
return iwl_fw_dbg_trigger_stop_conf_match(fwrt, trig);
}
static inline struct iwl_fw_dbg_trigger_tlv*
_iwl_fw_dbg_trigger_on(struct iwl_fw_runtime *fwrt,
struct wireless_dev *wdev,
const enum iwl_fw_dbg_trigger id)
{
struct iwl_fw_dbg_trigger_tlv *trig;
if (!iwl_fw_dbg_trigger_enabled(fwrt->fw, id))
return NULL;
trig = _iwl_fw_dbg_get_trigger(fwrt->fw, id);
if (!iwl_fw_dbg_trigger_check_stop(fwrt, wdev, trig))
return NULL;
return trig;
}
#define iwl_fw_dbg_trigger_on(fwrt, wdev, id) ({ \
BUILD_BUG_ON(!__builtin_constant_p(id)); \
BUILD_BUG_ON((id) >= FW_DBG_TRIGGER_MAX); \
_iwl_fw_dbg_trigger_on((fwrt), (wdev), (id)); \
})
static inline void
_iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
struct wireless_dev *wdev,
@ -293,7 +315,7 @@ static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt)
return fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_D3_DEBUG) &&
fwrt->trans->cfg->d3_debug_data_length &&
fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
}
void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt);
@ -344,4 +366,5 @@ static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {}
#endif /* CONFIG_IWLWIFI_DEBUGFS */
void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt);
#endif /* __iwl_fw_dbg_h__ */

View File

@ -258,11 +258,75 @@ static ssize_t iwl_dbgfs_timestamp_marker_read(struct iwl_fw_runtime *fwrt,
FWRT_DEBUGFS_READ_WRITE_FILE_OPS(timestamp_marker, 16);
struct hcmd_write_data {
__be32 cmd_id;
__be32 flags;
__be16 length;
u8 data[0];
} __packed;
static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf,
size_t count)
{
size_t header_size = (sizeof(u32) * 2 + sizeof(u16)) * 2;
size_t data_size = (count - 1) / 2;
int ret;
struct hcmd_write_data *data;
struct iwl_host_cmd hcmd = {
.len = { 0, },
.data = { NULL, },
};
if (fwrt->ops && fwrt->ops->fw_running &&
!fwrt->ops->fw_running(fwrt->ops_ctx))
return -EIO;
if (count < header_size + 1 || count > 1024 * 4)
return -EINVAL;
data = kmalloc(data_size, GFP_KERNEL);
if (!data)
return -ENOMEM;
ret = hex2bin((u8 *)data, buf, data_size);
if (ret)
goto out;
hcmd.id = be32_to_cpu(data->cmd_id);
hcmd.flags = be32_to_cpu(data->flags);
hcmd.len[0] = be16_to_cpu(data->length);
hcmd.data[0] = data->data;
if (count != header_size + hcmd.len[0] * 2 + 1) {
IWL_ERR(fwrt,
"host command data size does not match header length\n");
ret = -EINVAL;
goto out;
}
if (fwrt->ops && fwrt->ops->send_hcmd)
ret = fwrt->ops->send_hcmd(fwrt->ops_ctx, &hcmd);
else
ret = -EPERM;
if (ret < 0)
goto out;
if (hcmd.flags & CMD_WANT_SKB)
iwl_free_resp(&hcmd);
out:
kfree(data);
return ret ?: count;
}
FWRT_DEBUGFS_WRITE_FILE_OPS(send_hcmd, 512);
int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
struct dentry *dbgfs_dir)
{
INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk);
FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200);
return 0;
err:
IWL_ERR(fwrt, "Can't create the fwrt debugfs directory\n");

View File

@ -328,6 +328,7 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
* @FW_DBG_TDLS: trigger log collection upon TDLS related events.
* @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when
* the firmware sends a tx reply.
* @FW_DBG_TRIGGER_NO_ALIVE: trigger log collection if alive flow fails
*/
enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_INVALID = 0,
@ -345,6 +346,7 @@ enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_TX_LATENCY,
FW_DBG_TRIGGER_TDLS,
FW_DBG_TRIGGER_TX_STATUS,
FW_DBG_TRIGGER_NO_ALIVE,
/* must be last */
FW_DBG_TRIGGER_MAX,

View File

@ -337,7 +337,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* antenna the beacon should be transmitted
* @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon
* from AP and will send it upon d0i3 exit.
* @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2
* @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V3: support LAR API V3
* @IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW: firmware responsible for CT-kill
* @IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT: supports temperature
* thresholds reporting
@ -352,6 +352,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* power reduction.
* @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload
* @IWL_UCODE_TLV_CAPA_D3_DEBUG: supports debug recording during D3
* @IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT: MCC response support 11ax
* capability.
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@ -392,7 +394,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD = (__force iwl_ucode_tlv_capa_t)70,
IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71,
IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2 = (__force iwl_ucode_tlv_capa_t)73,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V3 = (__force iwl_ucode_tlv_capa_t)73,
IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW = (__force iwl_ucode_tlv_capa_t)74,
IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75,
IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76,
@ -402,6 +404,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84,
IWL_UCODE_TLV_CAPA_D3_DEBUG = (__force iwl_ucode_tlv_capa_t)87,
IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT = (__force iwl_ucode_tlv_capa_t)88,
IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT = (__force iwl_ucode_tlv_capa_t)89,
IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96,
NUM_IWL_UCODE_TLV_CAPA

View File

@ -197,6 +197,29 @@ enum iwl_fw_type {
IWL_FW_MVM,
};
/**
* struct iwl_fw_dbg - debug data
*
* @dest_tlv: points to debug destination TLV (typically SRAM or DRAM)
* @n_dest_reg: num of reg_ops in dest_tlv
* @conf_tlv: array of pointers to configuration HCMDs
* @trigger_tlv: array of pointers to triggers TLVs
* @trigger_tlv_len: lengths of the @dbg_trigger_tlv entries
* @mem_tlv: Runtime addresses to dump
* @n_mem_tlv: number of runtime addresses
* @dump_mask: bitmask of dump regions
*/
struct iwl_fw_dbg {
struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
u8 n_dest_reg;
struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv *trigger_tlv[FW_DBG_TRIGGER_MAX];
size_t trigger_tlv_len[FW_DBG_TRIGGER_MAX];
struct iwl_fw_dbg_mem_seg_tlv *mem_tlv;
size_t n_mem_tlv;
u32 dump_mask;
};
/**
* struct iwl_fw - variables associated with the firmware
*
@ -217,12 +240,6 @@ enum iwl_fw_type {
* @cipher_scheme: optional external cipher scheme.
* @human_readable: human readable version
* we get the ALIVE from the uCode
* @dbg_dest_tlv: points to the destination TLV for debug
* @dbg_conf_tlv: array of pointers to configuration TLVs for debug
* @dbg_conf_tlv_len: lengths of the @dbg_conf_tlv entries
* @dbg_trigger_tlv: array of pointers to triggers TLVs
* @dbg_trigger_tlv_len: lengths of the @dbg_trigger_tlv entries
* @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
*/
struct iwl_fw {
u32 ucode_ver;
@ -250,15 +267,7 @@ struct iwl_fw {
struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS];
u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
size_t n_dbg_mem_tlv;
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
u8 dbg_dest_reg_num;
u32 dbg_dump_mask;
struct iwl_fw_dbg dbg;
};
static inline const char *get_fw_dbg_mode_string(int mode)
@ -280,7 +289,7 @@ static inline const char *get_fw_dbg_mode_string(int mode)
static inline bool
iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id)
{
const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id];
const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg.conf_tlv[id];
if (!conf_tlv)
return false;

View File

@ -71,6 +71,7 @@ struct iwl_fw_runtime_ops {
int (*dump_start)(void *ctx);
void (*dump_end)(void *ctx);
bool (*fw_running)(void *ctx);
int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd);
};
#define MAX_NUM_LMAC 2
@ -88,6 +89,7 @@ struct iwl_fwrt_shared_mem_cfg {
enum iwl_fw_runtime_status {
IWL_FWRT_STATUS_DUMPING = 0,
IWL_FWRT_STATUS_WAIT_ALIVE,
};
/**

View File

@ -168,12 +168,12 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
{
int i;
kfree(drv->fw.dbg_dest_tlv);
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++)
kfree(drv->fw.dbg_conf_tlv[i]);
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++)
kfree(drv->fw.dbg_trigger_tlv[i]);
kfree(drv->fw.dbg_mem_tlv);
kfree(drv->fw.dbg.dest_tlv);
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.conf_tlv); i++)
kfree(drv->fw.dbg.conf_tlv[i]);
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.trigger_tlv); i++)
kfree(drv->fw.dbg.trigger_tlv[i]);
kfree(drv->fw.dbg.mem_tlv);
kfree(drv->fw.iml);
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
@ -303,7 +303,7 @@ struct iwl_firmware_pieces {
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
size_t n_dbg_mem_tlv;
size_t n_mem_tlv;
};
/*
@ -936,7 +936,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
IWL_INFO(drv, "Found debug destination: %s\n",
get_fw_dbg_mode_string(mon_mode));
drv->fw.dbg_dest_reg_num = (dest_v1) ?
drv->fw.dbg.n_dest_reg = (dest_v1) ?
tlv_len -
offsetof(struct iwl_fw_dbg_dest_tlv_v1,
reg_ops) :
@ -944,8 +944,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
offsetof(struct iwl_fw_dbg_dest_tlv,
reg_ops);
drv->fw.dbg_dest_reg_num /=
sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]);
drv->fw.dbg.n_dest_reg /=
sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]);
break;
}
@ -959,7 +959,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
if (conf->id >= ARRAY_SIZE(drv->fw.dbg_conf_tlv)) {
if (conf->id >= ARRAY_SIZE(drv->fw.dbg.conf_tlv)) {
IWL_ERR(drv,
"Skip unknown configuration: %d\n",
conf->id);
@ -988,7 +988,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
(void *)tlv_data;
u32 trigger_id = le32_to_cpu(trigger->id);
if (trigger_id >= ARRAY_SIZE(drv->fw.dbg_trigger_tlv)) {
if (trigger_id >= ARRAY_SIZE(drv->fw.dbg.trigger_tlv)) {
IWL_ERR(drv,
"Skip unknown trigger: %u\n",
trigger->id);
@ -1015,7 +1015,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
drv->fw.dbg_dump_mask =
drv->fw.dbg.dump_mask =
le32_to_cpup((__le32 *)tlv_data);
break;
}
@ -1070,13 +1070,13 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
dbg_mem->data_type);
size = sizeof(*pieces->dbg_mem_tlv) *
(pieces->n_dbg_mem_tlv + 1);
(pieces->n_mem_tlv + 1);
n = krealloc(pieces->dbg_mem_tlv, size, GFP_KERNEL);
if (!n)
return -ENOMEM;
pieces->dbg_mem_tlv = n;
pieces->dbg_mem_tlv[pieces->n_dbg_mem_tlv] = *dbg_mem;
pieces->n_dbg_mem_tlv++;
pieces->dbg_mem_tlv[pieces->n_mem_tlv] = *dbg_mem;
pieces->n_mem_tlv++;
break;
}
case IWL_UCODE_TLV_IML: {
@ -1256,7 +1256,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
/* dump all fw memory areas by default except d3 debug data */
fw->dbg_dump_mask = 0xfffdffff;
fw->dbg.dump_mask = 0xfffdffff;
pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
if (!pieces)
@ -1323,21 +1323,21 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
goto out_free_fw;
if (pieces->dbg_dest_tlv_init) {
size_t dbg_dest_size = sizeof(*drv->fw.dbg_dest_tlv) +
sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) *
drv->fw.dbg_dest_reg_num;
size_t dbg_dest_size = sizeof(*drv->fw.dbg.dest_tlv) +
sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) *
drv->fw.dbg.n_dest_reg;
drv->fw.dbg_dest_tlv = kmalloc(dbg_dest_size, GFP_KERNEL);
drv->fw.dbg.dest_tlv = kmalloc(dbg_dest_size, GFP_KERNEL);
if (!drv->fw.dbg_dest_tlv)
if (!drv->fw.dbg.dest_tlv)
goto out_free_fw;
if (*pieces->dbg_dest_ver == 0) {
memcpy(drv->fw.dbg_dest_tlv, pieces->dbg_dest_tlv_v1,
memcpy(drv->fw.dbg.dest_tlv, pieces->dbg_dest_tlv_v1,
dbg_dest_size);
} else {
struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv =
drv->fw.dbg_dest_tlv;
drv->fw.dbg.dest_tlv;
dest_tlv->version = pieces->dbg_dest_tlv->version;
dest_tlv->monitor_mode =
@ -1352,8 +1352,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
pieces->dbg_dest_tlv->base_shift;
memcpy(dest_tlv->reg_ops,
pieces->dbg_dest_tlv->reg_ops,
sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) *
drv->fw.dbg_dest_reg_num);
sizeof(drv->fw.dbg.dest_tlv->reg_ops[0]) *
drv->fw.dbg.n_dest_reg);
/* In version 1 of the destination tlv, which is
* relevant for internal buffer exclusively,
@ -1369,15 +1369,13 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
}
}
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) {
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.conf_tlv); i++) {
if (pieces->dbg_conf_tlv[i]) {
drv->fw.dbg_conf_tlv_len[i] =
pieces->dbg_conf_tlv_len[i];
drv->fw.dbg_conf_tlv[i] =
drv->fw.dbg.conf_tlv[i] =
kmemdup(pieces->dbg_conf_tlv[i],
drv->fw.dbg_conf_tlv_len[i],
pieces->dbg_conf_tlv_len[i],
GFP_KERNEL);
if (!drv->fw.dbg_conf_tlv[i])
if (!pieces->dbg_conf_tlv_len[i])
goto out_free_fw;
}
}
@ -1404,7 +1402,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
trigger_tlv_sz[FW_DBG_TRIGGER_TDLS] =
sizeof(struct iwl_fw_dbg_trigger_tdls);
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) {
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg.trigger_tlv); i++) {
if (pieces->dbg_trigger_tlv[i]) {
/*
* If the trigger isn't long enough, WARN and exit.
@ -1417,22 +1415,22 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
(trigger_tlv_sz[i] +
sizeof(struct iwl_fw_dbg_trigger_tlv))))
goto out_free_fw;
drv->fw.dbg_trigger_tlv_len[i] =
drv->fw.dbg.trigger_tlv_len[i] =
pieces->dbg_trigger_tlv_len[i];
drv->fw.dbg_trigger_tlv[i] =
drv->fw.dbg.trigger_tlv[i] =
kmemdup(pieces->dbg_trigger_tlv[i],
drv->fw.dbg_trigger_tlv_len[i],
drv->fw.dbg.trigger_tlv_len[i],
GFP_KERNEL);
if (!drv->fw.dbg_trigger_tlv[i])
if (!drv->fw.dbg.trigger_tlv[i])
goto out_free_fw;
}
}
/* Now that we can no longer fail, copy information */
drv->fw.dbg_mem_tlv = pieces->dbg_mem_tlv;
drv->fw.dbg.mem_tlv = pieces->dbg_mem_tlv;
pieces->dbg_mem_tlv = NULL;
drv->fw.n_dbg_mem_tlv = pieces->n_dbg_mem_tlv;
drv->fw.dbg.n_mem_tlv = pieces->n_mem_tlv;
/*
* The (size - 16) / 12 formula is based on the information recorded
@ -1473,6 +1471,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
break;
default:
WARN(1, "Invalid fw type %d\n", fw->type);
/* fall through */
case IWL_FW_MVM:
op = &iwlwifi_opmode_table[MVM_OP_MODE];
break;

View File

@ -1335,6 +1335,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
bool lar_fw_supported = !iwlwifi_mod_params.lar_disable &&
fw_has_capa(&fw->ucode_capa,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
bool empty_otp;
u32 mac_flags;
u32 sbands_flags = 0;
@ -1350,7 +1351,9 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
}
rsp = (void *)hcmd.resp_pkt->data;
if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP)
empty_otp = !!(le32_to_cpu(rsp->general.flags) &
NVM_GENERAL_FLAGS_EMPTY_OTP);
if (empty_otp)
IWL_INFO(trans, "OTP is empty\n");
nvm = kzalloc(sizeof(*nvm) +
@ -1374,6 +1377,11 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
/* Initialize general data */
nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version);
nvm->n_hw_addrs = rsp->general.n_hw_addrs;
if (nvm->n_hw_addrs == 0)
IWL_WARN(trans,
"Firmware declares no reserved mac addresses. OTP is empty: %d\n",
empty_otp);
/* Initialize MAC sku data */
mac_flags = le32_to_cpu(rsp->mac_sku.mac_sku_flags);

View File

@ -725,7 +725,7 @@ struct iwl_dram_data {
* @dbg_dest_tlv: points to the destination TLV for debug
* @dbg_conf_tlv: array of pointers to configuration TLVs for debug
* @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
* @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
* @dbg_n_dest_reg: num of reg_ops in %dbg_dest_tlv
* @num_blocks: number of blocks in fw_mon
* @fw_mon: address of the buffers for firmware monitor
* @system_pm_mode: the system-wide power management mode in use.
@ -778,7 +778,7 @@ struct iwl_trans {
const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
u32 dbg_dump_mask;
u8 dbg_dest_reg_num;
u8 dbg_n_dest_reg;
int num_blocks;
struct iwl_dram_data fw_mon[IWL_MAX_DEBUG_ALLOCATIONS];

View File

@ -666,16 +666,11 @@ iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf,
};
int ret, bt_force_ant_mode;
for (bt_force_ant_mode = 0;
bt_force_ant_mode < ARRAY_SIZE(modes_str);
bt_force_ant_mode++) {
if (!strcmp(buf, modes_str[bt_force_ant_mode]))
break;
}
if (bt_force_ant_mode >= ARRAY_SIZE(modes_str))
return -EINVAL;
ret = match_string(modes_str, ARRAY_SIZE(modes_str), buf);
if (ret < 0)
return ret;
bt_force_ant_mode = ret;
ret = 0;
mutex_lock(&mvm->mutex);
if (mvm->bt_force_ant_mode == bt_force_ant_mode)

View File

@ -299,6 +299,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
static const u16 alive_cmd[] = { MVM_ALIVE };
set_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status);
if (ucode_type == IWL_UCODE_REGULAR &&
iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
!(fw_has_capa(&mvm->fw->ucode_capa,
@ -369,6 +370,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status);
return 0;
}
@ -699,8 +701,12 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
enabled = !!(wifi_pkg->package.elements[1].integer.value);
n_profiles = wifi_pkg->package.elements[2].integer.value;
/* in case of BIOS bug */
if (n_profiles <= 0) {
/*
* Check the validity of n_profiles. The EWRD profiles start
* from index 1, so the maximum value allowed here is
* ACPI_SAR_PROFILES_NUM - 1.
*/
if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
ret = -EINVAL;
goto out_free;
}
@ -1022,7 +1028,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
mvm->fwrt.dump.conf = FW_DBG_INVALID;
/* if we have a destination, assume EARLY START */
if (mvm->fw->dbg_dest_tlv)
if (mvm->fw->dbg.dest_tlv)
mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE);

View File

@ -1487,12 +1487,11 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
IWL_MVM_MISSED_BEACONS_THRESHOLD)
ieee80211_beacon_loss(vif);
if (!iwl_fw_dbg_trigger_enabled(mvm->fw,
FW_DBG_TRIGGER_MISSED_BEACONS))
trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_MISSED_BEACONS);
if (!trigger)
return;
trigger = iwl_fw_dbg_get_trigger(mvm->fw,
FW_DBG_TRIGGER_MISSED_BEACONS);
bcon_trig = (void *)trigger->data;
stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon);
stop_trig_missed_bcon_since_rx =
@ -1500,11 +1499,6 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
/* TODO: implement start trigger */
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(vif),
trigger))
return;
if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx ||
rx_missed_bcon >= stop_trig_missed_bcon)
iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL);

View File

@ -857,16 +857,13 @@ iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_ba *ba_trig;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_BA);
if (!trig)
return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
ba_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(vif), trig))
return;
switch (action) {
case IEEE80211_AMPDU_TX_OPERATIONAL: {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
@ -1231,12 +1228,15 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
iwl_mvm_del_aux_sta(mvm);
/*
* Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
* won't be called in this case).
* Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the
* hw (as restart_complete() won't be called in this case) and mac80211
* won't execute the restart.
* But make sure to cleanup interfaces that have gone down before/during
* HW restart was requested.
*/
if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
&mvm->status))
ieee80211_iterate_interfaces(mvm->hw, 0,
iwl_mvm_cleanup_iterator, mvm);
@ -2802,14 +2802,12 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_tdls *tdls_trig;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TDLS))
trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_TDLS);
if (!trig)
return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS);
tdls_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(vif), trig))
return;
if (!(tdls_trig->action_bitmap & BIT(action)))
return;
@ -4491,14 +4489,12 @@ static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_mlme *trig_mlme;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_MLME);
if (!trig)
return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
trig_mlme = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(vif), trig))
return;
if (event->u.mlme.data == ASSOC_EVENT) {
if (event->u.mlme.status == MLME_DENIED)
@ -4533,14 +4529,12 @@ static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_ba *ba_trig;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_BA);
if (!trig)
return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
ba_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(vif), trig))
return;
if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
return;

View File

@ -477,15 +477,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
u32 status;
int resp_len, n_channels;
u16 mcc;
bool resp_v2 = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
return ERR_PTR(-EOPNOTSUPP);
cmd.len[0] = sizeof(struct iwl_mcc_update_cmd);
if (!resp_v2)
cmd.len[0] = sizeof(struct iwl_mcc_update_cmd_v1);
IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n",
alpha2[0], alpha2[1], src_id);
@ -497,7 +493,8 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
pkt = cmd.resp_pkt;
/* Extract MCC response */
if (resp_v2) {
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT)) {
struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data;
n_channels = __le32_to_cpu(mcc_resp->n_channels);
@ -509,9 +506,9 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
goto exit;
}
} else {
struct iwl_mcc_update_resp_v1 *mcc_resp_v1 = (void *)pkt->data;
struct iwl_mcc_update_resp_v3 *mcc_resp_v3 = (void *)pkt->data;
n_channels = __le32_to_cpu(mcc_resp_v1->n_channels);
n_channels = __le32_to_cpu(mcc_resp_v3->n_channels);
resp_len = sizeof(struct iwl_mcc_update_resp) +
n_channels * sizeof(__le32);
resp_cp = kzalloc(resp_len, GFP_KERNEL);
@ -520,12 +517,14 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
goto exit;
}
resp_cp->status = mcc_resp_v1->status;
resp_cp->mcc = mcc_resp_v1->mcc;
resp_cp->cap = mcc_resp_v1->cap;
resp_cp->source_id = mcc_resp_v1->source_id;
resp_cp->n_channels = mcc_resp_v1->n_channels;
memcpy(resp_cp->channels, mcc_resp_v1->channels,
resp_cp->status = mcc_resp_v3->status;
resp_cp->mcc = mcc_resp_v3->mcc;
resp_cp->cap = cpu_to_le16(mcc_resp_v3->cap);
resp_cp->source_id = mcc_resp_v3->source_id;
resp_cp->time = mcc_resp_v3->time;
resp_cp->geo_info = mcc_resp_v3->geo_info;
resp_cp->n_channels = mcc_resp_v3->n_channels;
memcpy(resp_cp->channels, mcc_resp_v3->channels,
n_channels * sizeof(__le32));
}

View File

@ -565,10 +565,23 @@ static bool iwl_mvm_fwrt_fw_running(void *ctx)
return iwl_mvm_firmware_running(ctx);
}
static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
{
struct iwl_mvm *mvm = (struct iwl_mvm *)ctx;
int ret;
mutex_lock(&mvm->mutex);
ret = iwl_mvm_send_cmd(mvm, host_cmd);
mutex_unlock(&mvm->mutex);
return ret;
}
static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
.dump_start = iwl_mvm_fwrt_dump_start,
.dump_end = iwl_mvm_fwrt_dump_end,
.fw_running = iwl_mvm_fwrt_fw_running,
.send_hcmd = iwl_mvm_fwrt_send_hcmd,
};
static struct iwl_op_mode *
@ -604,9 +617,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (cfg->max_rx_agg_size)
hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size;
else
hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
if (cfg->max_tx_agg_size)
hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
else
hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
op_mode = hw->priv;
@ -748,12 +765,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_trans_configure(mvm->trans, &trans_cfg);
trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv;
trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num;
memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
trans->dbg_dest_tlv = mvm->fw->dbg.dest_tlv;
trans->dbg_n_dest_reg = mvm->fw->dbg.n_dest_reg;
memcpy(trans->dbg_conf_tlv, mvm->fw->dbg.conf_tlv,
sizeof(trans->dbg_conf_tlv));
trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv;
trans->dbg_dump_mask = mvm->fw->dbg_dump_mask;
trans->dbg_trigger_tlv = mvm->fw->dbg.trigger_tlv;
trans->dbg_dump_mask = mvm->fw->dbg.dump_mask;
trans->iml = mvm->fw->iml;
trans->iml_len = mvm->fw->iml_len;
@ -784,6 +801,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mutex_lock(&mvm->mutex);
iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
err = iwl_run_init_mvm_ucode(mvm, true);
if (test_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status))
iwl_fw_alive_error_dump(&mvm->fwrt);
if (!iwlmvm_mod_params.init_dbg || !err)
iwl_mvm_stop_device(mvm);
iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
@ -953,15 +972,13 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_cmd *cmds_trig;
int i;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF))
trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
FW_DBG_TRIGGER_FW_NOTIF);
if (!trig)
return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF);
cmds_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
return;
for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
/* don't collect on CMD 0 */
if (!cmds_trig->cmds[i].cmd_id)
@ -1223,7 +1240,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
*/
if (!mvm->fw_restart && fw_error) {
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
NULL);
NULL, 0);
} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
struct iwl_mvm_reprobe *reprobe;

View File

@ -433,13 +433,14 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_vif *tx_blocked_vif =
rcu_dereference(mvm->csa_tx_blocked_vif);
struct iwl_fw_dbg_trigger_tlv *trig;
struct ieee80211_vif *vif = mvmsta->vif;
/* We have tx blocked stations (with CS bit). If we heard
* frames from a blocked station on a new channel we can
* TX to it again.
*/
if (unlikely(tx_blocked_vif) &&
mvmsta->vif == tx_blocked_vif) {
if (unlikely(tx_blocked_vif) && vif == tx_blocked_vif) {
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(tx_blocked_vif);
@ -450,23 +451,18 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
rs_update_last_rssi(mvm, mvmsta, rx_status);
if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
ieee80211_is_beacon(hdr->frame_control)) {
struct iwl_fw_dbg_trigger_tlv *trig;
trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_RSSI);
if (trig && ieee80211_is_beacon(hdr->frame_control)) {
struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
bool trig_check;
s32 rssi;
trig = iwl_fw_dbg_get_trigger(mvm->fw,
FW_DBG_TRIGGER_RSSI);
rssi_trig = (void *)trig->data;
rssi = le32_to_cpu(rssi_trig->rssi);
trig_check =
iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(mvmsta->vif),
trig);
if (trig_check && rx_status->signal < rssi)
if (rx_status->signal < rssi)
iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
NULL);
}
@ -693,15 +689,12 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
struct iwl_fw_dbg_trigger_stats *trig_stats;
u32 trig_offset, trig_thold;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_STATS))
trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_STATS);
if (!trig)
return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_STATS);
trig_stats = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
return;
trig_offset = le32_to_cpu(trig_stats->stop_offset);
trig_thold = le32_to_cpu(trig_stats->stop_threshold);

View File

@ -923,6 +923,185 @@ static void iwl_mvm_decode_he_sigb(struct iwl_mvm *mvm,
}
}
static void
iwl_mvm_decode_he_phy_ru_alloc(u64 he_phy_data, u32 rate_n_flags,
struct ieee80211_radiotap_he *he,
struct ieee80211_radiotap_he_mu *he_mu,
struct ieee80211_rx_status *rx_status)
{
/*
* Unfortunately, we have to leave the mac80211 data
* incorrect for the case that we receive an HE-MU
* transmission and *don't* have the HE phy data (due
* to the bits being used for TSF). This shouldn't
* happen though as management frames where we need
* the TSF/timers are not be transmitted in HE-MU.
*/
u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data);
u8 offs = 0;
rx_status->bw = RATE_INFO_BW_HE_RU;
he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
switch (ru) {
case 0 ... 36:
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
offs = ru;
break;
case 37 ... 52:
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
offs = ru - 37;
break;
case 53 ... 60:
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
offs = ru - 53;
break;
case 61 ... 64:
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
offs = ru - 61;
break;
case 65 ... 66:
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
offs = ru - 65;
break;
case 67:
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
break;
case 68:
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
break;
}
he->data2 |= le16_encode_bits(offs,
IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN);
if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80)
he->data2 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
if (he_mu) {
#define CHECK_BW(bw) \
BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \
RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS)
CHECK_BW(20);
CHECK_BW(40);
CHECK_BW(80);
CHECK_BW(160);
he_mu->flags2 |=
le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
rate_n_flags),
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW);
}
}
static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
struct iwl_rx_mpdu_desc *desc,
struct ieee80211_radiotap_he *he,
struct ieee80211_radiotap_he_mu *he_mu,
struct ieee80211_rx_status *rx_status,
u64 he_phy_data, u32 rate_n_flags,
int queue)
{
u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
bool sigb_data;
u16 d1known = IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN;
u16 d2known = IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN;
he->data1 |= cpu_to_le16(d1known);
he->data2 |= cpu_to_le16(d2known);
he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_BSS_COLOR_MASK,
he_phy_data),
IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR);
he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_UPLINK,
he_phy_data),
IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_LDPC_EXT_SYM,
he_phy_data),
IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG);
he->data4 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SPATIAL_REUSE_MASK,
he_phy_data),
IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE);
he->data5 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PRE_FEC_PAD_MASK,
he_phy_data),
IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD);
he->data5 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PE_DISAMBIG,
he_phy_data),
IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG);
he->data6 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_TXOP_DUR_MASK,
he_phy_data),
IEEE80211_RADIOTAP_HE_DATA6_TXOP);
he->data6 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_DOPPLER,
he_phy_data),
IEEE80211_RADIOTAP_HE_DATA6_DOPPLER);
switch (he_type) {
case RATE_MCS_HE_TYPE_MU:
he_mu->flags1 |=
le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_DCM,
he_phy_data),
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
he_mu->flags1 |=
le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_MCS_MASK,
he_phy_data),
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
he_mu->flags2 |=
le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIBG_SYM_OR_USER_NUM_MASK,
he_phy_data),
IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
he_mu->flags2 |=
le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_COMPRESSION,
he_phy_data),
IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
he_mu->flags2 |=
le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_PREAMBLE_PUNC_TYPE_MASK,
he_phy_data),
IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
sigb_data = FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK,
he_phy_data) ==
IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO;
if (sigb_data)
iwl_mvm_decode_he_sigb(mvm, desc, rate_n_flags, he_mu);
/* fall through */
case RATE_MCS_HE_TYPE_TRIG:
he->data2 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
he->data5 |=
le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK,
he_phy_data),
IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
break;
case RATE_MCS_HE_TYPE_SU:
case RATE_MCS_HE_TYPE_EXT_SU:
he->data1 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN);
he->data3 |=
le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_BEAM_CHNG,
he_phy_data),
IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE);
break;
}
switch (FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK, he_phy_data)) {
case IWL_RX_HE_PHY_INFO_TYPE_MU:
case IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO:
case IWL_RX_HE_PHY_INFO_TYPE_TB:
iwl_mvm_decode_he_phy_ru_alloc(he_phy_data, rate_n_flags,
he, he_mu, rx_status);
break;
default:
/* nothing */
break;
}
}
static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
struct iwl_rx_mpdu_desc *desc,
u32 rate_n_flags, u16 phy_info, int queue)
@ -933,9 +1112,8 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
u64 he_phy_data = HE_PHY_DATA_INVAL;
struct ieee80211_radiotap_he *he = NULL;
struct ieee80211_radiotap_he_mu *he_mu = NULL;
u32 he_type = 0xffffffff;
u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
u8 stbc, ltf;
static const struct ieee80211_radiotap_he known = {
.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
@ -953,25 +1131,19 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN),
};
unsigned int radiotap_len = 0;
bool overload = phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD;
bool sigb_data = false;
he = skb_put_data(skb, &known, sizeof(known));
radiotap_len += sizeof(known);
rx_status->flag |= RX_FLAG_RADIOTAP_HE;
he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) {
if (mvm->trans->cfg->device_family >=
IWL_DEVICE_FAMILY_22560)
if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
else
he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
if (he_type == RATE_MCS_HE_TYPE_MU) {
he_mu = skb_put_data(skb, &mu_known,
sizeof(mu_known));
he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known));
radiotap_len += sizeof(mu_known);
rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
}
@ -980,59 +1152,20 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
/* temporarily hide the radiotap data */
__skb_pull(skb, radiotap_len);
if (overload && he_type == RATE_MCS_HE_TYPE_SU) {
he->data1 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
if (FIELD_GET(IWL_RX_HE_PHY_UPLINK, he_phy_data))
he->data3 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
if (he_phy_data != HE_PHY_DATA_INVAL &&
he_type == RATE_MCS_HE_TYPE_SU) {
/* report the AMPDU-EOF bit on single frames */
if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF, he_phy_data))
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
}
} else if (overload && he_mu && he_phy_data != HE_PHY_DATA_INVAL) {
he_mu->flags1 |=
le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIBG_SYM_OR_USER_NUM_MASK,
he_phy_data),
IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
he_mu->flags1 |=
le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_DCM,
he_phy_data),
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
he_mu->flags1 |=
le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_MCS_MASK,
he_phy_data),
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
he_mu->flags2 |=
le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_COMPRESSION,
he_phy_data),
IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
he_mu->flags2 |=
le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_PREAMBLE_PUNC_TYPE_MASK,
he_phy_data),
IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
sigb_data = FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK,
he_phy_data) ==
IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO;
if (sigb_data)
iwl_mvm_decode_he_sigb(mvm, desc, rate_n_flags, he_mu);
}
if (he_phy_data != HE_PHY_DATA_INVAL &&
(he_type == RATE_MCS_HE_TYPE_SU ||
he_type == RATE_MCS_HE_TYPE_MU)) {
u8 bss_color = FIELD_GET(IWL_RX_HE_PHY_BSS_COLOR_MASK,
he_phy_data);
if (bss_color) {
he->data1 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN);
he->data3 |= cpu_to_le16(bss_color);
}
}
if (he_phy_data != HE_PHY_DATA_INVAL)
iwl_mvm_decode_he_phy_data(mvm, desc, he, he_mu, rx_status,
he_phy_data, rate_n_flags, queue);
/* update aggregation data for monitor sake on default queue */
if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
@ -1056,84 +1189,12 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
}
if (he_phy_data != HE_PHY_DATA_INVAL &&
(FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK, he_phy_data) ==
IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO ||
FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK, he_phy_data) ==
IWL_RX_HE_PHY_INFO_TYPE_TB_EXT_INFO)) {
/*
* Unfortunately, we have to leave the mac80211 data
* incorrect for the case that we receive an HE-MU
* transmission and *don't* have the HE phy data (due
* to the bits being used for TSF). This shouldn't
* happen though as management frames where we need
* the TSF/timers are not be transmitted in HE-MU.
*/
u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data);
u8 offs = 0;
rx_status->bw = RATE_INFO_BW_HE_RU;
/* actually data is filled in mac80211 */
if (he_type == RATE_MCS_HE_TYPE_SU ||
he_type == RATE_MCS_HE_TYPE_EXT_SU)
he->data1 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
switch (ru) {
case 0 ... 36:
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
offs = ru;
break;
case 37 ... 52:
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
offs = ru - 37;
break;
case 53 ... 60:
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
offs = ru - 53;
break;
case 61 ... 64:
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
offs = ru - 61;
break;
case 65 ... 66:
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
offs = ru - 65;
break;
case 67:
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
break;
case 68:
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
break;
}
he->data2 |=
le16_encode_bits(offs,
IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
he->data2 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN);
if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80)
he->data2 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
if (he_mu) {
#define CHECK_BW(bw) \
BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \
RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS)
CHECK_BW(20);
CHECK_BW(40);
CHECK_BW(80);
CHECK_BW(160);
he->data2 |=
le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
rate_n_flags),
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW);
}
} else if (he_type == RATE_MCS_HE_TYPE_SU ||
he_type == RATE_MCS_HE_TYPE_EXT_SU) {
he->data1 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
}
stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS;
rx_status->nss =
((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
@ -1202,9 +1263,8 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
he->data5 |= le16_encode_bits(ltf, IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
switch (he_type) {
case RATE_MCS_HE_TYPE_SU:
case RATE_MCS_HE_TYPE_EXT_SU: {
if (he_type == RATE_MCS_HE_TYPE_SU ||
he_type == RATE_MCS_HE_TYPE_EXT_SU) {
u16 val;
/* LTF syms correspond to streams */
@ -1234,31 +1294,10 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
rx_status->nss);
val = 0;
}
he->data5 |=
le16_encode_bits(val,
IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
}
break;
case RATE_MCS_HE_TYPE_MU: {
u16 val;
if (he_phy_data == HE_PHY_DATA_INVAL)
break;
val = FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK,
he_phy_data);
he->data2 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
he->data5 |=
cpu_to_le16(FIELD_PREP(
IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS,
val));
}
break;
case RATE_MCS_HE_TYPE_TRIG:
/* not supported */
break;
}
}
@ -1424,6 +1463,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
u8 baid = (u8)((le32_to_cpu(desc->reorder_data) &
IWL_RX_MPDU_REORDER_BAID_MASK) >>
IWL_RX_MPDU_REORDER_BAID_SHIFT);
struct iwl_fw_dbg_trigger_tlv *trig;
struct ieee80211_vif *vif = mvmsta->vif;
if (!mvm->tcm.paused && len >= sizeof(*hdr) &&
!is_multicast_ether_addr(hdr->addr1) &&
@ -1436,8 +1477,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
* frames from a blocked station on a new channel we can
* TX to it again.
*/
if (unlikely(tx_blocked_vif) &&
tx_blocked_vif == mvmsta->vif) {
if (unlikely(tx_blocked_vif) && tx_blocked_vif == vif) {
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(tx_blocked_vif);
@ -1448,23 +1488,18 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rs_update_last_rssi(mvm, mvmsta, rx_status);
if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
ieee80211_is_beacon(hdr->frame_control)) {
struct iwl_fw_dbg_trigger_tlv *trig;
trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_RSSI);
if (trig && ieee80211_is_beacon(hdr->frame_control)) {
struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
bool trig_check;
s32 rssi;
trig = iwl_fw_dbg_get_trigger(mvm->fw,
FW_DBG_TRIGGER_RSSI);
rssi_trig = (void *)trig->data;
rssi = le32_to_cpu(rssi_trig->rssi);
trig_check =
iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(mvmsta->vif),
trig);
if (trig_check && rx_status->signal < rssi)
if (rx_status->signal < rssi)
iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
NULL);
}

View File

@ -1448,6 +1448,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED)
cmd->v8.num_of_fragments[SCAN_HB_LMAC_IDX] =
IWL_SCAN_NUM_OF_FRAGS;
cmd->v8.general_flags2 =
IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER;
}
cmd->scan_start_mac_id = scan_vif->id;

View File

@ -254,17 +254,14 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_time_event *te_trig;
int i;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT))
trig = iwl_fw_dbg_trigger_on(&mvm->fwrt,
ieee80211_vif_to_wdev(te_data->vif),
FW_DBG_TRIGGER_TIME_EVENT);
if (!trig)
return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT);
te_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(te_data->vif),
trig))
return;
for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
u32 trig_action_bitmap =

View File

@ -79,15 +79,12 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_ba *ba_trig;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA);
if (!trig)
return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
ba_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
return;
if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
return;
@ -1414,15 +1411,13 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tx_status *status_trig;
int i;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_STATUS))
trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
FW_DBG_TRIGGER_TX_STATUS);
if (!trig)
return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS);
status_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
return;
for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
/* don't collect on status 0 */
if (!status_trig->statuses[i].status)

View File

@ -1238,14 +1238,12 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_mlme *trig_mlme;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_MLME);
if (!trig)
goto out;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
trig_mlme = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(vif), trig))
goto out;
if (trig_mlme->stop_connection_loss &&
--trig_mlme->stop_connection_loss)
@ -1430,14 +1428,12 @@ void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
struct iwl_fw_dbg_trigger_tlv *trig;
struct iwl_fw_dbg_trigger_ba *ba_trig;
if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_BA);
if (!trig)
return;
trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
ba_trig = (void *)trig->data;
if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
ieee80211_vif_to_wdev(vif), trig))
return;
if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid)))
return;

View File

@ -1144,6 +1144,14 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
kfree(trans_pcie->rxq);
}
static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
struct iwl_rb_allocator *rba)
{
spin_lock(&rba->lock);
list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
spin_unlock(&rba->lock);
}
/*
* iwl_pcie_rx_reuse_rbd - Recycle used RBDs
*
@ -1175,9 +1183,7 @@ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
/* Move the 2 RBDs to the allocator ownership.
Allocator has another 6 from pool for the request completion*/
spin_lock(&rba->lock);
list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
spin_unlock(&rba->lock);
iwl_pcie_rx_move_to_allocator(rxq, rba);
atomic_inc(&rba->req_pending);
queue_work(rba->alloc_wq, &rba->rx_alloc);
@ -1400,10 +1406,18 @@ restart:
IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
while (i != r) {
struct iwl_rb_allocator *rba = &trans_pcie->rba;
struct iwl_rx_mem_buffer *rxb;
/* number of RBDs still waiting for page allocation */
u32 rb_pending_alloc =
atomic_read(&trans_pcie->rba.req_pending) *
RX_CLAIM_REQ_ALLOC;
if (unlikely(rxq->used_count == rxq->queue_size / 2))
if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
!emergency)) {
iwl_pcie_rx_move_to_allocator(rxq, rba);
emergency = true;
}
rxb = iwl_pcie_get_rxb(trans, rxq, i);
if (!rxb)
@ -1425,17 +1439,13 @@ restart:
iwl_pcie_rx_allocator_get(trans, rxq);
if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
struct iwl_rb_allocator *rba = &trans_pcie->rba;
/* Add the remaining empty RBDs for allocator use */
spin_lock(&rba->lock);
list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
spin_unlock(&rba->lock);
iwl_pcie_rx_move_to_allocator(rxq, rba);
} else if (emergency) {
count++;
if (count == 8) {
count = 0;
if (rxq->used_count < rxq->queue_size / 3)
if (rb_pending_alloc < rxq->queue_size / 3)
emergency = false;
rxq->read = i;

View File

@ -931,7 +931,7 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans)
else
IWL_WARN(trans, "PCI should have external buffer debug\n");
for (i = 0; i < trans->dbg_dest_reg_num; i++) {
for (i = 0; i < trans->dbg_n_dest_reg; i++) {
u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
u32 val = le32_to_cpu(dest->reg_ops[i].val);

View File

@ -438,6 +438,8 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
return -ENOMEM;
tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
skb_frag_size(frag));
if (tb_idx < 0)
return tb_idx;
out_meta->tbs |= BIT(tb_idx);
}

View File

@ -2013,6 +2013,8 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
return -EINVAL;
tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
skb_frag_size(frag), false);
if (tb_idx < 0)
return tb_idx;
out_meta->tbs |= BIT(tb_idx);
}

View File

@ -13,44 +13,5 @@ config MT76x02_USB
tristate
select MT76_USB
config MT76x0_COMMON
tristate
select MT76x02_LIB
config MT76x2_COMMON
tristate
select MT76x02_LIB
config MT76x0U
tristate "MediaTek MT76x0U (USB) support"
select MT76x0_COMMON
select MT76x02_USB
depends on MAC80211
depends on USB
help
This adds support for MT7610U-based wireless USB dongles.
config MT76x0E
tristate "MediaTek MT76x0E (PCIe) support"
select MT76x0_COMMON
depends on MAC80211
depends on PCI
help
This adds support for MT7610/MT7630-based wireless PCIe devices.
config MT76x2E
tristate "MediaTek MT76x2E (PCIe) support"
select MT76x2_COMMON
depends on MAC80211
depends on PCI
---help---
This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices.
config MT76x2U
tristate "MediaTek MT76x2U (USB) support"
select MT76x2_COMMON
select MT76x02_USB
depends on MAC80211
depends on USB
help
This adds support for MT7612U-based wireless USB dongles.
source "drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig"

View File

@ -1,11 +1,7 @@
obj-$(CONFIG_MT76_CORE) += mt76.o
obj-$(CONFIG_MT76_USB) += mt76-usb.o
obj-$(CONFIG_MT76x0_COMMON) += mt76x0/
obj-$(CONFIG_MT76x02_LIB) += mt76x02-lib.o
obj-$(CONFIG_MT76x02_USB) += mt76x02-usb.o
obj-$(CONFIG_MT76x2_COMMON) += mt76x2-common.o
obj-$(CONFIG_MT76x2E) += mt76x2e.o
obj-$(CONFIG_MT76x2U) += mt76x2u.o
mt76-y := \
mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o agg-rx.o
@ -14,24 +10,13 @@ mt76-usb-y := usb.o usb_trace.o usb_mcu.o
CFLAGS_trace.o := -I$(src)
CFLAGS_usb_trace.o := -I$(src)
CFLAGS_mt76x02_trace.o := -I$(src)
mt76x02-lib-y := mt76x02_util.o mt76x02_mac.o mt76x02_mcu.o
mt76x02-lib-y := mt76x02_util.o mt76x02_mac.o mt76x02_mcu.o \
mt76x02_eeprom.o mt76x02_phy.o mt76x02_mmio.o \
mt76x02_txrx.o mt76x02_trace.o
mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o
mt76x2-common-y := \
mt76x2_eeprom.o mt76x2_tx_common.o mt76x2_mac_common.o \
mt76x2_init_common.o mt76x2_common.o mt76x2_phy_common.o \
mt76x2_debugfs.o mt76x2_mcu_common.o
mt76x2e-y := \
mt76x2_pci.o mt76x2_dma.o \
mt76x2_main.o mt76x2_init.o mt76x2_tx.o \
mt76x2_core.o mt76x2_mac.o mt76x2_mcu.o mt76x2_phy.o \
mt76x2_dfs.o mt76x2_trace.o
mt76x2u-y := \
mt76x2_usb.o mt76x2u_init.o mt76x2u_main.o mt76x2u_mac.o \
mt76x2u_mcu.o mt76x2u_phy.o mt76x2u_core.o
CFLAGS_mt76x2_trace.o := -I$(src)
obj-$(CONFIG_MT76x0_COMMON) += mt76x0/
obj-$(CONFIG_MT76x2_COMMON) += mt76x2/

View File

@ -56,6 +56,35 @@ mt76_queues_read(struct seq_file *s, void *data)
return 0;
}
void mt76_seq_puts_array(struct seq_file *file, const char *str,
s8 *val, int len)
{
int i;
seq_printf(file, "%10s:", str);
for (i = 0; i < len; i++)
seq_printf(file, " %2d", val[i]);
seq_puts(file, "\n");
}
EXPORT_SYMBOL_GPL(mt76_seq_puts_array);
static int mt76_read_rate_txpower(struct seq_file *s, void *data)
{
struct mt76_dev *dev = dev_get_drvdata(s->private);
mt76_seq_puts_array(s, "CCK", dev->rate_power.cck,
ARRAY_SIZE(dev->rate_power.cck));
mt76_seq_puts_array(s, "OFDM", dev->rate_power.ofdm,
ARRAY_SIZE(dev->rate_power.ofdm));
mt76_seq_puts_array(s, "STBC", dev->rate_power.stbc,
ARRAY_SIZE(dev->rate_power.stbc));
mt76_seq_puts_array(s, "HT", dev->rate_power.ht,
ARRAY_SIZE(dev->rate_power.ht));
mt76_seq_puts_array(s, "VHT", dev->rate_power.vht,
ARRAY_SIZE(dev->rate_power.vht));
return 0;
}
struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
{
struct dentry *dir;
@ -72,6 +101,8 @@ struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
if (dev->otp.data)
debugfs_create_blob("otp", 0400, dir, &dev->otp);
debugfs_create_devm_seqfile(dev->dev, "queues", dir, mt76_queues_read);
debugfs_create_devm_seqfile(dev->dev, "rate_txpower", dir,
mt76_read_rate_txpower);
return dir;
}

View File

@ -550,6 +550,12 @@ mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb)
struct mt76_wcid *wcid = status->wcid;
bool ps;
if (ieee80211_is_pspoll(hdr->frame_control) && !wcid) {
sta = ieee80211_find_sta_by_ifaddr(dev->hw, hdr->addr2, NULL);
if (sta)
wcid = status->wcid = (struct mt76_wcid *) sta->drv_priv;
}
if (!wcid || !wcid->sta)
return;

View File

@ -46,6 +46,30 @@ static void mt76_mmio_copy(struct mt76_dev *dev, u32 offset, const void *data,
__iowrite32_copy(dev->mmio.regs + offset, data, len >> 2);
}
static int mt76_mmio_wr_rp(struct mt76_dev *dev, u32 base,
const struct mt76_reg_pair *data, int len)
{
while (len > 0) {
mt76_mmio_wr(dev, data->reg, data->value);
data++;
len--;
}
return 0;
}
static int mt76_mmio_rd_rp(struct mt76_dev *dev, u32 base,
struct mt76_reg_pair *data, int len)
{
while (len > 0) {
data->value = mt76_mmio_rr(dev, data->reg);
data++;
len--;
}
return 0;
}
void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
{
static const struct mt76_bus_ops mt76_mmio_ops = {
@ -53,6 +77,8 @@ void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
.rmw = mt76_mmio_rmw,
.wr = mt76_mmio_wr,
.copy = mt76_mmio_copy,
.wr_rp = mt76_mmio_wr_rp,
.rd_rp = mt76_mmio_rd_rp,
};
dev->bus = &mt76_mmio_ops;
@ -60,6 +86,7 @@ void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
skb_queue_head_init(&dev->mmio.mcu.res_q);
init_waitqueue_head(&dev->mmio.mcu.wait);
spin_lock_init(&dev->mmio.irq_lock);
mutex_init(&dev->mmio.mcu.mutex);
}
EXPORT_SYMBOL_GPL(mt76_mmio_init);

View File

@ -122,6 +122,7 @@ struct mt76_queue {
dma_addr_t desc_dma;
struct sk_buff *rx_head;
struct page_frag_cache rx_page;
spinlock_t rx_page_lock;
};
struct mt76_mcu_ops {
@ -261,8 +262,6 @@ struct mt76_driver_ops {
void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
bool ps);
s8 (*get_max_txpwr_adj)(struct mt76_dev *dev,
const struct ieee80211_tx_rate *rate);
};
struct mt76_channel_state {
@ -275,6 +274,19 @@ struct mt76_sband {
struct mt76_channel_state *chan;
};
struct mt76_rate_power {
union {
struct {
s8 cck[4];
s8 ofdm[8];
s8 stbc[10];
s8 ht[16];
s8 vht[10];
};
s8 all[48];
};
};
/* addr req mask */
#define MT_VEND_TYPE_EEPROM BIT(31)
#define MT_VEND_TYPE_CFG BIT(30)
@ -349,6 +361,8 @@ struct mt76_mmio {
u32 msg_seq;
} mcu;
void __iomem *regs;
spinlock_t irq_lock;
u32 irqmask;
};
struct mt76_dev {
@ -388,6 +402,7 @@ struct mt76_dev {
unsigned long state;
u8 antenna_mask;
u16 chainmask;
struct mt76_sband sband_2g;
struct mt76_sband sband_5g;
@ -395,6 +410,10 @@ struct mt76_dev {
struct debugfs_blob_wrapper otp;
struct mt76_hw_cap cap;
struct mt76_rate_power rate_power;
int txpower_conf;
int txpower_cur;
u32 debugfs_reg;
struct led_classdev led_cdev;
@ -418,18 +437,6 @@ enum mt76_phy_type {
MT_PHY_TYPE_VHT,
};
struct mt76_rate_power {
union {
struct {
s8 cck[4];
s8 ofdm[8];
s8 ht[16];
s8 vht[10];
};
s8 all[38];
};
};
struct mt76_rx_status {
struct mt76_wcid *wcid;
@ -539,6 +546,8 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
void mt76_unregister_device(struct mt76_dev *dev);
struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
void mt76_seq_puts_array(struct seq_file *file, const char *str,
s8 *val, int len);
int mt76_eeprom_init(struct mt76_dev *dev, int len);
void mt76_eeprom_override(struct mt76_dev *dev);

View File

@ -0,0 +1,20 @@
config MT76x0_COMMON
tristate
select MT76x02_LIB
config MT76x0U
tristate "MediaTek MT76x0U (USB) support"
select MT76x0_COMMON
select MT76x02_USB
depends on MAC80211
depends on USB
help
This adds support for MT7610U-based wireless USB dongles.
config MT76x0E
tristate "MediaTek MT76x0E (PCIe) support"
select MT76x0_COMMON
depends on MAC80211
depends on PCI
help
This adds support for MT7610/MT7630-based wireless PCIe devices.

View File

@ -4,9 +4,9 @@ obj-$(CONFIG_MT76x0_COMMON) += mt76x0-common.o
mt76x0-common-y := \
init.o main.o trace.o eeprom.o phy.o \
mac.o debugfs.o tx.o
mt76x0u-y := usb.o
mt76x0e-y := pci.o
mac.o debugfs.o
mt76x0u-y := usb.o usb_mcu.o
mt76x0e-y := pci.o pci_mcu.o
# ccflags-y := -DDEBUG
CFLAGS_trace.o := -I$(src)

View File

@ -18,30 +18,10 @@
#include "mt76x0.h"
#include "eeprom.h"
static int
mt76_reg_set(void *data, u64 val)
{
struct mt76x0_dev *dev = data;
mt76_wr(dev, dev->debugfs_reg, val);
return 0;
}
static int
mt76_reg_get(void *data, u64 *val)
{
struct mt76x0_dev *dev = data;
*val = mt76_rr(dev, dev->debugfs_reg);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
static int
mt76x0_ampdu_stat_read(struct seq_file *file, void *data)
{
struct mt76x0_dev *dev = file->private;
struct mt76x02_dev *dev = file->private;
int i, j;
#define stat_printf(grp, off, name) \
@ -95,72 +75,13 @@ static const struct file_operations fops_ampdu_stat = {
.release = single_release,
};
static int
mt76x0_eeprom_param_read(struct seq_file *file, void *data)
{
struct mt76x0_dev *dev = file->private;
int i;
seq_printf(file, "RF freq offset: %hhx\n", dev->ee->rf_freq_off);
seq_printf(file, "RSSI offset 2GHz: %hhx %hhx\n",
dev->ee->rssi_offset_2ghz[0], dev->ee->rssi_offset_2ghz[1]);
seq_printf(file, "RSSI offset 5GHz: %hhx %hhx %hhx\n",
dev->ee->rssi_offset_5ghz[0], dev->ee->rssi_offset_5ghz[1],
dev->ee->rssi_offset_5ghz[2]);
seq_printf(file, "Temperature offset: %hhx\n", dev->ee->temp_off);
seq_printf(file, "LNA gain 2Ghz: %hhx\n", dev->ee->lna_gain_2ghz);
seq_printf(file, "LNA gain 5Ghz: %hhx %hhx %hhx\n",
dev->ee->lna_gain_5ghz[0], dev->ee->lna_gain_5ghz[1],
dev->ee->lna_gain_5ghz[2]);
seq_printf(file, "Power Amplifier type %hhx\n", dev->ee->pa_type);
seq_printf(file, "Reg channels: %hhu-%hhu\n", dev->ee->reg.start,
dev->ee->reg.start + dev->ee->reg.num - 1);
seq_puts(file, "Per channel power:\n");
for (i = 0; i < 58; i++)
seq_printf(file, "\t%d chan:%d pwr:%d\n", i, i,
dev->ee->tx_pwr_per_chan[i]);
seq_puts(file, "Per rate power 2GHz:\n");
for (i = 0; i < 5; i++)
seq_printf(file, "\t %d bw20:%d bw40:%d\n",
i, dev->ee->tx_pwr_cfg_2g[i][0],
dev->ee->tx_pwr_cfg_5g[i][1]);
seq_puts(file, "Per rate power 5GHz:\n");
for (i = 0; i < 5; i++)
seq_printf(file, "\t %d bw20:%d bw40:%d\n",
i, dev->ee->tx_pwr_cfg_5g[i][0],
dev->ee->tx_pwr_cfg_5g[i][1]);
return 0;
}
static int
mt76x0_eeprom_param_open(struct inode *inode, struct file *f)
{
return single_open(f, mt76x0_eeprom_param_read, inode->i_private);
}
static const struct file_operations fops_eeprom_param = {
.open = mt76x0_eeprom_param_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
void mt76x0_init_debugfs(struct mt76x0_dev *dev)
void mt76x0_init_debugfs(struct mt76x02_dev *dev)
{
struct dentry *dir;
dir = debugfs_create_dir("mt76x0", dev->mt76.hw->wiphy->debugfsdir);
dir = mt76_register_debugfs(&dev->mt76);
if (!dir)
return;
debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg);
debugfs_create_file("regval", S_IRUSR | S_IWUSR, dir, dev,
&fops_regval);
debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat);
debugfs_create_file("eeprom_param", S_IRUSR, dir, dev,
&fops_eeprom_param);
}

View File

@ -13,6 +13,7 @@
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
@ -20,81 +21,20 @@
#include <asm/unaligned.h>
#include "mt76x0.h"
#include "eeprom.h"
static bool
field_valid(u8 val)
{
return val != 0xff;
}
static s8
field_validate(u8 val)
{
if (!field_valid(val))
return 0;
return val;
}
static inline int
sign_extend(u32 val, unsigned int size)
{
bool sign = val & BIT(size - 1);
val &= BIT(size - 1) - 1;
return sign ? val : -val;
}
static int
mt76x0_efuse_read(struct mt76x0_dev *dev, u16 addr, u8 *data,
enum mt76x0_eeprom_access_modes mode)
{
u32 val;
int i;
val = mt76_rr(dev, MT_EFUSE_CTRL);
val &= ~(MT_EFUSE_CTRL_AIN |
MT_EFUSE_CTRL_MODE);
val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf) |
FIELD_PREP(MT_EFUSE_CTRL_MODE, mode) |
MT_EFUSE_CTRL_KICK;
mt76_wr(dev, MT_EFUSE_CTRL, val);
if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
return -ETIMEDOUT;
val = mt76_rr(dev, MT_EFUSE_CTRL);
if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
/* Parts of eeprom not in the usage map (0x80-0xc0,0xf0)
* will not return valid data but it's ok.
*/
memset(data, 0xff, 16);
return 0;
}
for (i = 0; i < 4; i++) {
val = mt76_rr(dev, MT_EFUSE_DATA(i));
put_unaligned_le32(val, data + 4 * i);
}
return 0;
}
#include "../mt76x02_phy.h"
#define MT_MAP_READS DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16)
static int
mt76x0_efuse_physical_size_check(struct mt76x0_dev *dev)
mt76x0_efuse_physical_size_check(struct mt76x02_dev *dev)
{
u8 data[MT_MAP_READS * 16];
int ret, i;
u32 start = 0, end = 0, cnt_free;
for (i = 0; i < MT_MAP_READS; i++) {
ret = mt76x0_efuse_read(dev, MT_EE_USAGE_MAP_START + i * 16,
data + i * 16, MT_EE_PHYSICAL_READ);
if (ret)
return ret;
}
ret = mt76x02_get_efuse_data(&dev->mt76, MT_EE_USAGE_MAP_START,
data, sizeof(data), MT_EE_PHYSICAL_READ);
if (ret)
return ret;
for (i = 0; i < MT_EFUSE_USAGE_MAP_SIZE; i++)
if (!data[i]) {
@ -105,345 +45,307 @@ mt76x0_efuse_physical_size_check(struct mt76x0_dev *dev)
cnt_free = end - start + 1;
if (MT_EFUSE_USAGE_MAP_SIZE - cnt_free < 5) {
dev_err(dev->mt76.dev, "Error: your device needs default EEPROM file and this driver doesn't support it!\n");
dev_err(dev->mt76.dev,
"driver does not support default EEPROM\n");
return -EINVAL;
}
return 0;
}
static void
mt76x0_set_chip_cap(struct mt76x0_dev *dev, u8 *eeprom)
static void mt76x0_set_chip_cap(struct mt76x02_dev *dev)
{
enum mt76x2_board_type { BOARD_TYPE_2GHZ = 1, BOARD_TYPE_5GHZ = 2 };
u16 nic_conf0 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_0);
u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
u16 nic_conf0 = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_0);
u16 nic_conf1 = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_1);
dev_dbg(dev->mt76.dev, "NIC_CONF0: %04x NIC_CONF1: %04x\n", nic_conf0, nic_conf1);
switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, nic_conf0)) {
case BOARD_TYPE_5GHZ:
dev->mt76.cap.has_5ghz = true;
break;
case BOARD_TYPE_2GHZ:
dev->mt76.cap.has_2ghz = true;
break;
default:
dev->mt76.cap.has_2ghz = true;
dev->mt76.cap.has_5ghz = true;
break;
}
dev_dbg(dev->mt76.dev, "Has 2GHZ %d 5GHZ %d\n",
mt76x02_eeprom_parse_hw_cap(&dev->mt76);
dev_dbg(dev->mt76.dev, "2GHz %d 5GHz %d\n",
dev->mt76.cap.has_2ghz, dev->mt76.cap.has_5ghz);
if (!field_valid(nic_conf1 & 0xff))
if (dev->no_2ghz) {
dev->mt76.cap.has_2ghz = false;
dev_dbg(dev->mt76.dev, "mask out 2GHz support\n");
}
if (!mt76x02_field_valid(nic_conf1 & 0xff))
nic_conf1 &= 0xff00;
if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL)
dev_err(dev->mt76.dev,
"Error: this driver does not support HW RF ctrl\n");
"driver does not support HW RF ctrl\n");
if (!field_valid(nic_conf0 >> 8))
if (!mt76x02_field_valid(nic_conf0 >> 8))
return;
if (FIELD_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 ||
FIELD_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1)
dev_err(dev->mt76.dev,
"Error: device has more than 1 RX/TX stream!\n");
dev->ee->pa_type = FIELD_GET(MT_EE_NIC_CONF_0_PA_TYPE, nic_conf0);
dev_dbg(dev->mt76.dev, "PA Type %d\n", dev->ee->pa_type);
dev_err(dev->mt76.dev, "invalid tx-rx stream\n");
}
static int
mt76x0_set_macaddr(struct mt76x0_dev *dev, const u8 *eeprom)
static void mt76x0_set_temp_offset(struct mt76x02_dev *dev)
{
const void *src = eeprom + MT_EE_MAC_ADDR;
u8 *dst = dev->mt76.macaddr;
u8 val;
ether_addr_copy(dev->mt76.macaddr, src);
val = mt76x02_eeprom_get(&dev->mt76, MT_EE_2G_TARGET_POWER) >> 8;
if (mt76x02_field_valid(val))
dev->cal.rx.temp_offset = mt76x02_sign_extend(val, 8);
else
dev->cal.rx.temp_offset = -10;
}
if (!is_valid_ether_addr(dst)) {
eth_random_addr(dst);
dev_info(dev->mt76.dev,
"Invalid MAC address, using random address %pM\n",
dst);
static void mt76x0_set_freq_offset(struct mt76x02_dev *dev)
{
struct mt76x02_rx_freq_cal *caldata = &dev->cal.rx;
u8 val;
val = mt76x02_eeprom_get(&dev->mt76, MT_EE_FREQ_OFFSET);
if (!mt76x02_field_valid(val))
val = 0;
caldata->freq_offset = val;
val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TSSI_BOUND4) >> 8;
if (!mt76x02_field_valid(val))
val = 0;
caldata->freq_offset -= mt76x02_sign_extend(val, 8);
}
void mt76x0_read_rx_gain(struct mt76x02_dev *dev)
{
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
struct mt76x02_rx_freq_cal *caldata = &dev->cal.rx;
s8 val, lna_5g[3], lna_2g;
u16 rssi_offset;
int i;
mt76x02_get_rx_gain(&dev->mt76, chan->band, &rssi_offset,
&lna_2g, lna_5g);
caldata->lna_gain = mt76x02_get_lna_gain(&dev->mt76, &lna_2g,
lna_5g, chan);
for (i = 0; i < ARRAY_SIZE(caldata->rssi_offset); i++) {
val = rssi_offset >> (8 * i);
if (val < -10 || val > 10)
val = 0;
caldata->rssi_offset[i] = val;
}
}
static s8 mt76x0_get_delta(struct mt76_dev *dev)
{
struct cfg80211_chan_def *chandef = &dev->chandef;
u8 val;
if (mt76x02_tssi_enabled(dev))
return 0;
if (chandef->width == NL80211_CHAN_WIDTH_80) {
val = mt76x02_eeprom_get(dev, MT_EE_5G_TARGET_POWER) >> 8;
} else if (chandef->width == NL80211_CHAN_WIDTH_40) {
u16 data;
data = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40);
if (chandef->chan->band == NL80211_BAND_5GHZ)
val = data >> 8;
else
val = data;
} else {
return 0;
}
mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dst));
mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dst + 4) |
FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
return mt76x02_rate_power_val(val);
}
void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev)
{
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
bool is_2ghz = chan->band == NL80211_BAND_2GHZ;
struct mt76_rate_power *t = &dev->mt76.rate_power;
s8 delta = mt76x0_get_delta(&dev->mt76);
u16 val, addr;
memset(t, 0, sizeof(*t));
/* cck 1M, 2M, 5.5M, 11M */
val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_BYRATE_BASE);
t->cck[0] = t->cck[1] = s6_to_s8(val);
t->cck[2] = t->cck[3] = s6_to_s8(val >> 8);
/* ofdm 6M, 9M, 12M, 18M */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 2 : 0x120;
val = mt76x02_eeprom_get(&dev->mt76, addr);
t->ofdm[0] = t->ofdm[1] = s6_to_s8(val);
t->ofdm[2] = t->ofdm[3] = s6_to_s8(val >> 8);
/* ofdm 24M, 36M, 48M, 54M */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 4 : 0x122;
val = mt76x02_eeprom_get(&dev->mt76, addr);
t->ofdm[4] = t->ofdm[5] = s6_to_s8(val);
t->ofdm[6] = t->ofdm[7] = s6_to_s8(val >> 8);
/* ht-vht mcs 1ss 0, 1, 2, 3 */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 6 : 0x124;
val = mt76x02_eeprom_get(&dev->mt76, addr);
t->ht[0] = t->ht[1] = t->vht[0] = t->vht[1] = s6_to_s8(val);
t->ht[2] = t->ht[3] = t->vht[2] = t->vht[3] = s6_to_s8(val >> 8);
/* ht-vht mcs 1ss 4, 5, 6 */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 8 : 0x126;
val = mt76x02_eeprom_get(&dev->mt76, addr);
t->ht[4] = t->ht[5] = t->vht[4] = t->vht[5] = s6_to_s8(val);
t->ht[6] = t->vht[6] = s6_to_s8(val >> 8);
/* ht-vht mcs 1ss 0, 1, 2, 3 stbc */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 14 : 0xec;
val = mt76x02_eeprom_get(&dev->mt76, addr);
t->stbc[0] = t->stbc[1] = s6_to_s8(val);
t->stbc[2] = t->stbc[3] = s6_to_s8(val >> 8);
/* ht-vht mcs 1ss 4, 5, 6 stbc */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 16 : 0xee;
val = mt76x02_eeprom_get(&dev->mt76, addr);
t->stbc[4] = t->stbc[5] = s6_to_s8(val);
t->stbc[6] = t->stbc[7] = s6_to_s8(val >> 8);
/* vht mcs 8, 9 5GHz */
val = mt76x02_eeprom_get(&dev->mt76, 0x132);
t->vht[7] = s6_to_s8(val);
t->vht[8] = s6_to_s8(val >> 8);
mt76x02_add_rate_power_offset(t, delta);
}
void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
{
struct mt76x0_chan_map {
u8 chan;
u8 offset;
} chan_map[] = {
{ 2, 0 }, { 4, 1 }, { 6, 2 }, { 8, 3 },
{ 10, 4 }, { 12, 5 }, { 14, 6 }, { 38, 0 },
{ 44, 1 }, { 48, 2 }, { 54, 3 }, { 60, 4 },
{ 64, 5 }, { 102, 6 }, { 108, 7 }, { 112, 8 },
{ 118, 9 }, { 124, 10 }, { 128, 11 }, { 134, 12 },
{ 140, 13 }, { 151, 14 }, { 157, 15 }, { 161, 16 },
{ 167, 17 }, { 171, 18 }, { 173, 19 },
};
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
u8 offset, addr;
u16 data;
int i;
for (i = 0; i < ARRAY_SIZE(chan_map); i++) {
if (chan_map[i].chan <= chan->hw_value) {
offset = chan_map[i].offset;
break;
}
}
if (i == ARRAY_SIZE(chan_map))
offset = chan_map[0].offset;
if (chan->band == NL80211_BAND_2GHZ) {
addr = MT_EE_TX_POWER_DELTA_BW80 + offset;
} else {
switch (chan->hw_value) {
case 58:
offset = 8;
break;
case 106:
offset = 14;
break;
case 112:
offset = 20;
break;
case 155:
offset = 30;
break;
default:
break;
}
addr = MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE + 2 + offset;
}
data = mt76x02_eeprom_get(&dev->mt76, addr);
info[0] = data;
if (!info[0] || info[0] > 0x3f)
info[0] = 5;
info[1] = data >> 8;
if (!info[1] || info[1] > 0x3f)
info[1] = 5;
}
static int mt76x0_check_eeprom(struct mt76x02_dev *dev)
{
u16 val;
val = get_unaligned_le16(dev->mt76.eeprom.data);
if (!val)
val = get_unaligned_le16(dev->mt76.eeprom.data +
MT_EE_PCI_ID);
switch (val) {
case 0x7650:
case 0x7610:
return 0;
default:
dev_err(dev->mt76.dev, "EEPROM data check failed: %04x\n",
val);
return -EINVAL;
}
}
static int mt76x0_load_eeprom(struct mt76x02_dev *dev)
{
int found;
found = mt76_eeprom_init(&dev->mt76, MT76X0_EEPROM_SIZE);
if (found < 0)
return found;
if (found && !mt76x0_check_eeprom(dev))
return 0;
found = mt76x0_efuse_physical_size_check(dev);
if (found < 0)
return found;
return mt76x02_get_efuse_data(&dev->mt76, 0, dev->mt76.eeprom.data,
MT76X0_EEPROM_SIZE, MT_EE_READ);
}
int mt76x0_eeprom_init(struct mt76x02_dev *dev)
{
u8 version, fae;
u16 data;
int err;
err = mt76x0_load_eeprom(dev);
if (err < 0)
return err;
data = mt76x02_eeprom_get(&dev->mt76, MT_EE_VERSION);
version = data >> 8;
fae = data;
if (version > MT76X0U_EE_MAX_VER)
dev_warn(dev->mt76.dev,
"Warning: unsupported EEPROM version %02hhx\n",
version);
dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n",
version, fae);
mt76x02_mac_setaddr(&dev->mt76,
dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
mt76x0_set_chip_cap(dev);
mt76x0_set_freq_offset(dev);
mt76x0_set_temp_offset(dev);
dev->mt76.chainmask = 0x0101;
return 0;
}
static void
mt76x0_set_temp_offset(struct mt76x0_dev *dev, u8 *eeprom)
{
u8 temp = eeprom[MT_EE_TEMP_OFFSET];
if (field_valid(temp))
dev->ee->temp_off = sign_extend(temp, 8);
else
dev->ee->temp_off = -10;
}
static void
mt76x0_set_country_reg(struct mt76x0_dev *dev, u8 *eeprom)
{
/* Note: - region 31 is not valid for mt76x0 (see rtmp_init.c)
* - comments in rtmp_def.h are incorrect (see rt_channel.c)
*/
static const struct reg_channel_bounds chan_bounds[] = {
/* EEPROM country regions 0 - 7 */
{ 1, 11 }, { 1, 13 }, { 10, 2 }, { 10, 4 },
{ 14, 1 }, { 1, 14 }, { 3, 7 }, { 5, 9 },
/* EEPROM country regions 32 - 33 */
{ 1, 11 }, { 1, 14 }
};
u8 val = eeprom[MT_EE_COUNTRY_REGION_2GHZ];
int idx = -1;
dev_dbg(dev->mt76.dev, "REG 2GHZ %u REG 5GHZ %u\n", val, eeprom[MT_EE_COUNTRY_REGION_5GHZ]);
if (val < 8)
idx = val;
if (val > 31 && val < 33)
idx = val - 32 + 8;
if (idx != -1)
dev_info(dev->mt76.dev,
"EEPROM country region %02hhx (channels %hhd-%hhd)\n",
val, chan_bounds[idx].start,
chan_bounds[idx].start + chan_bounds[idx].num - 1);
else
idx = 5; /* channels 1 - 14 */
dev->ee->reg = chan_bounds[idx];
/* TODO: country region 33 is special - phy should be set to B-mode
* before entering channel 14 (see sta/connect.c)
*/
}
static void
mt76x0_set_rf_freq_off(struct mt76x0_dev *dev, u8 *eeprom)
{
u8 comp;
dev->ee->rf_freq_off = field_validate(eeprom[MT_EE_FREQ_OFFSET]);
comp = field_validate(eeprom[MT_EE_FREQ_OFFSET_COMPENSATION]);
if (comp & BIT(7))
dev->ee->rf_freq_off -= comp & 0x7f;
else
dev->ee->rf_freq_off += comp;
}
static void
mt76x0_set_lna_gain(struct mt76x0_dev *dev, u8 *eeprom)
{
u8 gain;
dev->ee->lna_gain_2ghz = eeprom[MT_EE_LNA_GAIN_2GHZ];
dev->ee->lna_gain_5ghz[0] = eeprom[MT_EE_LNA_GAIN_5GHZ_0];
gain = eeprom[MT_EE_LNA_GAIN_5GHZ_1];
if (gain == 0xff || gain == 0)
dev->ee->lna_gain_5ghz[1] = dev->ee->lna_gain_5ghz[0];
else
dev->ee->lna_gain_5ghz[1] = gain;
gain = eeprom[MT_EE_LNA_GAIN_5GHZ_2];
if (gain == 0xff || gain == 0)
dev->ee->lna_gain_5ghz[2] = dev->ee->lna_gain_5ghz[0];
else
dev->ee->lna_gain_5ghz[2] = gain;
}
static void
mt76x0_set_rssi_offset(struct mt76x0_dev *dev, u8 *eeprom)
{
int i;
s8 *rssi_offset = dev->ee->rssi_offset_2ghz;
for (i = 0; i < 2; i++) {
rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET + i];
if (rssi_offset[i] < -10 || rssi_offset[i] > 10) {
dev_warn(dev->mt76.dev,
"Warning: EEPROM RSSI is invalid %02hhx\n",
rssi_offset[i]);
rssi_offset[i] = 0;
}
}
rssi_offset = dev->ee->rssi_offset_5ghz;
for (i = 0; i < 3; i++) {
rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET_5GHZ + i];
if (rssi_offset[i] < -10 || rssi_offset[i] > 10) {
dev_warn(dev->mt76.dev,
"Warning: EEPROM RSSI is invalid %02hhx\n",
rssi_offset[i]);
rssi_offset[i] = 0;
}
}
}
static u32
calc_bw40_power_rate(u32 value, int delta)
{
u32 ret = 0;
int i, tmp;
for (i = 0; i < 4; i++) {
tmp = s6_to_int((value >> i*8) & 0xff) + delta;
ret |= (u32)(int_to_s6(tmp)) << i*8;
}
return ret;
}
static s8
get_delta(u8 val)
{
s8 ret;
if (!field_valid(val) || !(val & BIT(7)))
return 0;
ret = val & 0x1f;
if (ret > 8)
ret = 8;
if (val & BIT(6))
ret = -ret;
return ret;
}
static void
mt76x0_set_tx_power_per_rate(struct mt76x0_dev *dev, u8 *eeprom)
{
s8 bw40_delta_2g, bw40_delta_5g;
u32 val;
int i;
bw40_delta_2g = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40]);
bw40_delta_5g = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40 + 1]);
for (i = 0; i < 5; i++) {
val = get_unaligned_le32(eeprom + MT_EE_TX_POWER_BYRATE(i));
/* Skip last 16 bits. */
if (i == 4)
val &= 0x0000ffff;
dev->ee->tx_pwr_cfg_2g[i][0] = val;
dev->ee->tx_pwr_cfg_2g[i][1] = calc_bw40_power_rate(val, bw40_delta_2g);
}
/* Reading per rate tx power for 5 GHz band is a bit more complex. Note
* we mix 16 bit and 32 bit reads and sometimes do shifts.
*/
val = get_unaligned_le16(eeprom + 0x120);
val <<= 16;
dev->ee->tx_pwr_cfg_5g[0][0] = val;
dev->ee->tx_pwr_cfg_5g[0][1] = calc_bw40_power_rate(val, bw40_delta_5g);
val = get_unaligned_le32(eeprom + 0x122);
dev->ee->tx_pwr_cfg_5g[1][0] = val;
dev->ee->tx_pwr_cfg_5g[1][1] = calc_bw40_power_rate(val, bw40_delta_5g);
val = get_unaligned_le16(eeprom + 0x126);
dev->ee->tx_pwr_cfg_5g[2][0] = val;
dev->ee->tx_pwr_cfg_5g[2][1] = calc_bw40_power_rate(val, bw40_delta_5g);
val = get_unaligned_le16(eeprom + 0xec);
val <<= 16;
dev->ee->tx_pwr_cfg_5g[3][0] = val;
dev->ee->tx_pwr_cfg_5g[3][1] = calc_bw40_power_rate(val, bw40_delta_5g);
val = get_unaligned_le16(eeprom + 0xee);
dev->ee->tx_pwr_cfg_5g[4][0] = val;
dev->ee->tx_pwr_cfg_5g[4][1] = calc_bw40_power_rate(val, bw40_delta_5g);
}
static void
mt76x0_set_tx_power_per_chan(struct mt76x0_dev *dev, u8 *eeprom)
{
int i;
u8 tx_pwr;
for (i = 0; i < 14; i++) {
tx_pwr = eeprom[MT_EE_TX_POWER_OFFSET_2GHZ + i];
if (tx_pwr <= 0x3f && tx_pwr > 0)
dev->ee->tx_pwr_per_chan[i] = tx_pwr;
else
dev->ee->tx_pwr_per_chan[i] = 5;
}
for (i = 0; i < 40; i++) {
tx_pwr = eeprom[MT_EE_TX_POWER_OFFSET_5GHZ + i];
if (tx_pwr <= 0x3f && tx_pwr > 0)
dev->ee->tx_pwr_per_chan[14 + i] = tx_pwr;
else
dev->ee->tx_pwr_per_chan[14 + i] = 5;
}
dev->ee->tx_pwr_per_chan[54] = dev->ee->tx_pwr_per_chan[22];
dev->ee->tx_pwr_per_chan[55] = dev->ee->tx_pwr_per_chan[28];
dev->ee->tx_pwr_per_chan[56] = dev->ee->tx_pwr_per_chan[34];
dev->ee->tx_pwr_per_chan[57] = dev->ee->tx_pwr_per_chan[44];
}
int
mt76x0_eeprom_init(struct mt76x0_dev *dev)
{
u8 *eeprom;
int i, ret;
ret = mt76x0_efuse_physical_size_check(dev);
if (ret)
return ret;
dev->ee = devm_kzalloc(dev->mt76.dev, sizeof(*dev->ee), GFP_KERNEL);
if (!dev->ee)
return -ENOMEM;
eeprom = kmalloc(MT76X0_EEPROM_SIZE, GFP_KERNEL);
if (!eeprom)
return -ENOMEM;
for (i = 0; i + 16 <= MT76X0_EEPROM_SIZE; i += 16) {
ret = mt76x0_efuse_read(dev, i, eeprom + i, MT_EE_READ);
if (ret)
goto out;
}
if (eeprom[MT_EE_VERSION_EE] > MT76X0U_EE_MAX_VER)
dev_warn(dev->mt76.dev,
"Warning: unsupported EEPROM version %02hhx\n",
eeprom[MT_EE_VERSION_EE]);
dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n",
eeprom[MT_EE_VERSION_EE], eeprom[MT_EE_VERSION_FAE]);
mt76x0_set_macaddr(dev, eeprom);
mt76x0_set_chip_cap(dev, eeprom);
mt76x0_set_country_reg(dev, eeprom);
mt76x0_set_rf_freq_off(dev, eeprom);
mt76x0_set_temp_offset(dev, eeprom);
mt76x0_set_lna_gain(dev, eeprom);
mt76x0_set_rssi_offset(dev, eeprom);
dev->chainmask = 0x0101;
mt76x0_set_tx_power_per_rate(dev, eeprom);
mt76x0_set_tx_power_per_chan(dev, eeprom);
out:
kfree(eeprom);
return ret;
}
MODULE_LICENSE("Dual BSD/GPL");

View File

@ -16,131 +16,25 @@
#ifndef __MT76X0U_EEPROM_H
#define __MT76X0U_EEPROM_H
struct mt76x0_dev;
#include "../mt76x02_eeprom.h"
#define MT76X0U_EE_MAX_VER 0x0c
#define MT76X0_EEPROM_SIZE 512
struct mt76x02_dev;
#define MT76X0U_DEFAULT_TX_POWER 6
#define MT76X0U_EE_MAX_VER 0x0c
#define MT76X0_EEPROM_SIZE 512
enum mt76_eeprom_field {
MT_EE_CHIP_ID = 0x00,
MT_EE_VERSION_FAE = 0x02,
MT_EE_VERSION_EE = 0x03,
MT_EE_MAC_ADDR = 0x04,
MT_EE_NIC_CONF_0 = 0x34,
MT_EE_NIC_CONF_1 = 0x36,
MT_EE_COUNTRY_REGION_5GHZ = 0x38,
MT_EE_COUNTRY_REGION_2GHZ = 0x39,
MT_EE_FREQ_OFFSET = 0x3a,
MT_EE_NIC_CONF_2 = 0x42,
int mt76x0_eeprom_init(struct mt76x02_dev *dev);
void mt76x0_read_rx_gain(struct mt76x02_dev *dev);
void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev);
void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info);
MT_EE_LNA_GAIN_2GHZ = 0x44,
MT_EE_LNA_GAIN_5GHZ_0 = 0x45,
MT_EE_RSSI_OFFSET = 0x46,
MT_EE_RSSI_OFFSET_5GHZ = 0x4a,
MT_EE_LNA_GAIN_5GHZ_1 = 0x49,
MT_EE_LNA_GAIN_5GHZ_2 = 0x4d,
MT_EE_TX_POWER_DELTA_BW40 = 0x50,
MT_EE_TX_POWER_OFFSET_2GHZ = 0x52,
MT_EE_TX_TSSI_SLOPE = 0x6e,
MT_EE_TX_TSSI_OFFSET_GROUP = 0x6f,
MT_EE_TX_TSSI_OFFSET = 0x76,
MT_EE_TX_POWER_OFFSET_5GHZ = 0x78,
MT_EE_TEMP_OFFSET = 0xd1,
MT_EE_FREQ_OFFSET_COMPENSATION = 0xdb,
MT_EE_TX_POWER_BYRATE_BASE = 0xde,
MT_EE_TX_POWER_BYRATE_BASE_5GHZ = 0x120,
MT_EE_USAGE_MAP_START = 0x1e0,
MT_EE_USAGE_MAP_END = 0x1fc,
};
#define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0)
#define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4)
#define MT_EE_NIC_CONF_0_PA_TYPE GENMASK(9, 8)
#define MT_EE_NIC_CONF_0_BOARD_TYPE GENMASK(13, 12)
#define MT_EE_NIC_CONF_1_HW_RF_CTRL BIT(0)
#define MT_EE_NIC_CONF_1_TEMP_TX_ALC BIT(1)
#define MT_EE_NIC_CONF_1_LNA_EXT_2G BIT(2)
#define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3)
#define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13)
#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0)
#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4)
#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8)
#define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9)
#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11)
#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13)
#define MT_EE_TX_POWER_BYRATE(i) (MT_EE_TX_POWER_BYRATE_BASE + \
(i) * 4)
#define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \
MT_EE_USAGE_MAP_START + 1)
enum mt76x0_eeprom_access_modes {
MT_EE_READ = 0,
MT_EE_PHYSICAL_READ = 1,
};
struct reg_channel_bounds {
u8 start;
u8 num;
};
struct mt76x0_eeprom_params {
u8 rf_freq_off;
s16 temp_off;
s8 rssi_offset_2ghz[2];
s8 rssi_offset_5ghz[3];
s8 lna_gain_2ghz;
s8 lna_gain_5ghz[3];
u8 pa_type;
/* TX_PWR_CFG_* values from EEPROM for 20 and 40 Mhz bandwidths. */
u32 tx_pwr_cfg_2g[5][2];
u32 tx_pwr_cfg_5g[5][2];
u8 tx_pwr_per_chan[58];
struct reg_channel_bounds reg;
};
int mt76x0_eeprom_init(struct mt76x0_dev *dev);
static inline u32 s6_validate(u32 reg)
static inline s8 s6_to_s8(u32 val)
{
WARN_ON(reg & ~GENMASK(5, 0));
return reg & GENMASK(5, 0);
}
s8 ret = val & GENMASK(5, 0);
static inline int s6_to_int(u32 reg)
{
int s6;
s6 = s6_validate(reg);
if (s6 & BIT(5))
s6 -= BIT(6);
return s6;
}
static inline u32 int_to_s6(int val)
{
if (val < -0x20)
return 0x20;
if (val > 0x1f)
return 0x1f;
return val & 0x3f;
if (ret & BIT(5))
ret -= BIT(6);
return ret;
}
#endif

View File

@ -18,8 +18,6 @@
#include "eeprom.h"
#include "trace.h"
#include "mcu.h"
#include "../mt76x02_util.h"
#include "initvals.h"
static void mt76x0_vht_cap_mask(struct ieee80211_supported_band *sband)
@ -41,9 +39,9 @@ static void mt76x0_vht_cap_mask(struct ieee80211_supported_band *sband)
}
static void
mt76x0_set_wlan_state(struct mt76x0_dev *dev, u32 val, bool enable)
mt76x0_set_wlan_state(struct mt76x02_dev *dev, u32 val, bool enable)
{
int i;
u32 mask = MT_CMB_CTRL_XTAL_RDY | MT_CMB_CTRL_PLL_LD;
/* Note: we don't turn off WLAN_CLK because that makes the device
* not respond properly on the probe path.
@ -60,32 +58,18 @@ mt76x0_set_wlan_state(struct mt76x0_dev *dev, u32 val, bool enable)
mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
udelay(20);
if (!enable)
return;
for (i = 200; i; i--) {
val = mt76_rr(dev, MT_CMB_CTRL);
if (val & MT_CMB_CTRL_XTAL_RDY && val & MT_CMB_CTRL_PLL_LD)
break;
udelay(20);
}
/* Note: vendor driver tries to disable/enable wlan here and retry
* but the code which does it is so buggy it must have never
* triggered, so don't bother.
*/
if (!i)
dev_err(dev->mt76.dev, "Error: PLL and XTAL check failed!\n");
if (enable && !mt76_poll(dev, MT_CMB_CTRL, mask, mask, 2000))
dev_err(dev->mt76.dev, "PLL and XTAL check failed\n");
}
void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset)
void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset)
{
u32 val;
mutex_lock(&dev->hw_atomic_mutex);
val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
if (reset) {
@ -107,57 +91,25 @@ void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset)
udelay(20);
mt76x0_set_wlan_state(dev, val, enable);
mutex_unlock(&dev->hw_atomic_mutex);
}
EXPORT_SYMBOL_GPL(mt76x0_chip_onoff);
static void mt76x0_reset_csr_bbp(struct mt76x0_dev *dev)
static void mt76x0_reset_csr_bbp(struct mt76x02_dev *dev)
{
u32 val;
val = mt76_rr(dev, MT_PBF_SYS_CTRL);
val &= ~0x2000;
mt76_wr(dev, MT_PBF_SYS_CTRL, val);
mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR |
MT_MAC_SYS_CTRL_RESET_BBP);
mt76_wr(dev, MT_MAC_SYS_CTRL,
MT_MAC_SYS_CTRL_RESET_CSR |
MT_MAC_SYS_CTRL_RESET_BBP);
msleep(200);
}
static void mt76x0_init_usb_dma(struct mt76x0_dev *dev)
{
u32 val;
val = mt76_rr(dev, MT_USB_DMA_CFG);
val |= MT_USB_DMA_CFG_RX_BULK_EN |
MT_USB_DMA_CFG_TX_BULK_EN;
/* disable AGGR_BULK_RX in order to receive one
* frame in each rx urb and avoid copies
*/
val &= ~MT_USB_DMA_CFG_RX_BULK_AGG_EN;
mt76_wr(dev, MT_USB_DMA_CFG, val);
val = mt76_rr(dev, MT_COM_REG0);
if (val & 1)
dev_dbg(dev->mt76.dev, "MCU not ready\n");
val = mt76_rr(dev, MT_USB_DMA_CFG);
val |= MT_USB_DMA_CFG_RX_DROP_OR_PAD;
mt76_wr(dev, MT_USB_DMA_CFG, val);
val &= ~MT_USB_DMA_CFG_RX_DROP_OR_PAD;
mt76_wr(dev, MT_USB_DMA_CFG, val);
mt76_clear(dev, MT_MAC_SYS_CTRL,
MT_MAC_SYS_CTRL_RESET_CSR |
MT_MAC_SYS_CTRL_RESET_BBP);
}
#define RANDOM_WRITE(dev, tab) \
mt76_wr_rp(dev, MT_MCU_MEMMAP_WLAN, \
tab, ARRAY_SIZE(tab))
static int mt76x0_init_bbp(struct mt76x0_dev *dev)
static int mt76x0_init_bbp(struct mt76x02_dev *dev)
{
int ret, i;
@ -180,30 +132,13 @@ static int mt76x0_init_bbp(struct mt76x0_dev *dev)
return 0;
}
static void
mt76_init_beacon_offsets(struct mt76x0_dev *dev)
{
u16 base = MT_BEACON_BASE;
u32 regs[4] = {};
int i;
for (i = 0; i < 16; i++) {
u16 addr = dev->beacon_offsets[i];
regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4));
}
for (i = 0; i < 4; i++)
mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
}
static void mt76x0_init_mac_registers(struct mt76x0_dev *dev)
static void mt76x0_init_mac_registers(struct mt76x02_dev *dev)
{
u32 reg;
RANDOM_WRITE(dev, common_mac_reg_table);
mt76_init_beacon_offsets(dev);
mt76x02_set_beacon_offsets(&dev->mt76);
/* Enable PBF and MAC clock SYS_CTRL[11:10] = 0x3 */
RANDOM_WRITE(dev, mt76x0_mac_reg_table);
@ -213,13 +148,6 @@ static void mt76x0_init_mac_registers(struct mt76x0_dev *dev)
reg &= ~0x3;
mt76_wr(dev, MT_MAC_SYS_CTRL, reg);
if (is_mt7610e(dev)) {
/* Disable COEX_EN */
reg = mt76_rr(dev, MT_COEXCFG0);
reg &= 0xFFFFFFFE;
mt76_wr(dev, MT_COEXCFG0, reg);
}
/* Set 0x141C[15:12]=0xF */
reg = mt76_rr(dev, MT_EXT_CCA_CFG);
reg |= 0x0000F000;
@ -237,15 +165,9 @@ static void mt76x0_init_mac_registers(struct mt76x0_dev *dev)
reg &= ~0x000003FF;
reg |= 0x00000201;
mt76_wr(dev, MT_WMM_CTRL, reg);
/* TODO: Probably not needed */
mt76_wr(dev, 0x7028, 0);
mt76_wr(dev, 0x7010, 0);
mt76_wr(dev, 0x7024, 0);
msleep(10);
}
static int mt76x0_init_wcid_mem(struct mt76x0_dev *dev)
static int mt76x0_init_wcid_mem(struct mt76x02_dev *dev)
{
u32 *vals;
int i;
@ -264,14 +186,14 @@ static int mt76x0_init_wcid_mem(struct mt76x0_dev *dev)
return 0;
}
static void mt76x0_init_key_mem(struct mt76x0_dev *dev)
static void mt76x0_init_key_mem(struct mt76x02_dev *dev)
{
u32 vals[4] = {};
mt76_wr_copy(dev, MT_SKEY_MODE_BASE_0, vals, ARRAY_SIZE(vals));
}
static int mt76x0_init_wcid_attr_mem(struct mt76x0_dev *dev)
static int mt76x0_init_wcid_attr_mem(struct mt76x02_dev *dev)
{
u32 *vals;
int i;
@ -288,7 +210,7 @@ static int mt76x0_init_wcid_attr_mem(struct mt76x0_dev *dev)
return 0;
}
static void mt76x0_reset_counters(struct mt76x0_dev *dev)
static void mt76x0_reset_counters(struct mt76x02_dev *dev)
{
mt76_rr(dev, MT_RX_STAT_0);
mt76_rr(dev, MT_RX_STAT_1);
@ -298,49 +220,26 @@ static void mt76x0_reset_counters(struct mt76x0_dev *dev)
mt76_rr(dev, MT_TX_STA_2);
}
int mt76x0_mac_start(struct mt76x0_dev *dev)
int mt76x0_mac_start(struct mt76x02_dev *dev)
{
mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 200000))
if (!mt76x02_wait_for_wpdma(&dev->mt76, 200000))
return -ETIMEDOUT;
dev->mt76.rxfilter = MT_RX_FILTR_CFG_CRC_ERR |
MT_RX_FILTR_CFG_PHY_ERR | MT_RX_FILTR_CFG_PROMISC |
MT_RX_FILTR_CFG_VER_ERR | MT_RX_FILTR_CFG_DUP |
MT_RX_FILTR_CFG_CFACK | MT_RX_FILTR_CFG_CFEND |
MT_RX_FILTR_CFG_ACK | MT_RX_FILTR_CFG_CTS |
MT_RX_FILTR_CFG_RTS | MT_RX_FILTR_CFG_PSPOLL |
MT_RX_FILTR_CFG_BA | MT_RX_FILTR_CFG_CTRL_RSV;
mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
mt76_wr(dev, MT_MAC_SYS_CTRL,
MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 50))
return -ETIMEDOUT;
return 0;
return !mt76x02_wait_for_wpdma(&dev->mt76, 50) ? -ETIMEDOUT : 0;
}
EXPORT_SYMBOL_GPL(mt76x0_mac_start);
static void mt76x0_mac_stop_hw(struct mt76x0_dev *dev)
void mt76x0_mac_stop(struct mt76x02_dev *dev)
{
int i, ok;
if (test_bit(MT76_REMOVED, &dev->mt76.state))
return;
mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN |
MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN |
MT_BEACON_TIME_CFG_BEACON_TX);
if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
dev_warn(dev->mt76.dev, "Warning: TX DMA did not stop!\n");
int i = 200, ok = 0;
/* Page count on TxQ */
i = 200;
while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) ||
(mt76_rr(dev, 0x0a30) & 0x000000ff) ||
(mt76_rr(dev, 0x0a34) & 0x00ff00ff)))
@ -353,9 +252,7 @@ static void mt76x0_mac_stop_hw(struct mt76x0_dev *dev)
MT_MAC_SYS_CTRL_ENABLE_TX);
/* Page count on RxQ */
ok = 0;
i = 200;
while (i--) {
for (i = 0; i < 200; i++) {
if (!(mt76_rr(dev, MT_RXQ_STA) & 0x00ff0000) &&
!mt76_rr(dev, 0x0a30) &&
!mt76_rr(dev, 0x0a34)) {
@ -368,36 +265,14 @@ static void mt76x0_mac_stop_hw(struct mt76x0_dev *dev)
if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 1000))
dev_warn(dev->mt76.dev, "Warning: MAC RX did not stop!\n");
if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000))
dev_warn(dev->mt76.dev, "Warning: RX DMA did not stop!\n");
}
void mt76x0_mac_stop(struct mt76x0_dev *dev)
{
cancel_delayed_work_sync(&dev->cal_work);
cancel_delayed_work_sync(&dev->mac_work);
mt76u_stop_stat_wk(&dev->mt76);
mt76x0_mac_stop_hw(dev);
}
EXPORT_SYMBOL_GPL(mt76x0_mac_stop);
int mt76x0_init_hardware(struct mt76x0_dev *dev)
int mt76x0_init_hardware(struct mt76x02_dev *dev)
{
static const u16 beacon_offsets[16] = {
/* 512 byte per beacon */
0xc000, 0xc200, 0xc400, 0xc600,
0xc800, 0xca00, 0xcc00, 0xce00,
0xd000, 0xd200, 0xd400, 0xd600,
0xd800, 0xda00, 0xdc00, 0xde00
};
int ret;
dev->beacon_offsets = beacon_offsets;
if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100))
if (!mt76x02_wait_for_wpdma(&dev->mt76, 1000))
return -EIO;
/* Wait for ASIC ready after FW load. */
@ -405,25 +280,21 @@ int mt76x0_init_hardware(struct mt76x0_dev *dev)
return -ETIMEDOUT;
mt76x0_reset_csr_bbp(dev);
mt76x0_init_usb_dma(dev);
mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0x0);
mt76_wr(dev, MT_TSO_CTRL, 0x0);
ret = mt76x02_mcu_function_select(&dev->mt76, Q_SELECT, 1, false);
if (ret)
return ret;
mt76x0_init_mac_registers(dev);
if (!mt76_poll_msec(dev, MT_MAC_STATUS,
MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 0, 1000))
if (!mt76x02_wait_for_txrx_idle(&dev->mt76))
return -EIO;
ret = mt76x0_init_bbp(dev);
if (ret)
return ret;
dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
ret = mt76x0_init_wcid_mem(dev);
if (ret)
return ret;
@ -441,12 +312,6 @@ int mt76x0_init_hardware(struct mt76x0_dev *dev)
mt76x0_reset_counters(dev);
mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
mt76_wr(dev, MT_TXOP_CTRL_CFG,
FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
ret = mt76x0_eeprom_init(dev);
if (ret)
return ret;
@ -457,50 +322,36 @@ int mt76x0_init_hardware(struct mt76x0_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76x0_init_hardware);
void mt76x0_cleanup(struct mt76x0_dev *dev)
struct mt76x02_dev *
mt76x0_alloc_device(struct device *pdev,
const struct mt76_driver_ops *drv_ops,
const struct ieee80211_ops *ops)
{
clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
mt76x0_chip_onoff(dev, false, false);
mt76u_queues_deinit(&dev->mt76);
mt76u_mcu_deinit(&dev->mt76);
}
EXPORT_SYMBOL_GPL(mt76x0_cleanup);
struct mt76x0_dev *
mt76x0_alloc_device(struct device *pdev, const struct mt76_driver_ops *drv_ops)
{
struct mt76x0_dev *dev;
struct mt76x02_dev *dev;
struct mt76_dev *mdev;
mdev = mt76_alloc_device(sizeof(*dev), &mt76x0_ops);
mdev = mt76_alloc_device(sizeof(*dev), ops);
if (!mdev)
return NULL;
mdev->dev = pdev;
mdev->drv = drv_ops;
dev = container_of(mdev, struct mt76x0_dev, mt76);
mutex_init(&dev->reg_atomic_mutex);
mutex_init(&dev->hw_atomic_mutex);
spin_lock_init(&dev->mac_lock);
spin_lock_init(&dev->con_mon_lock);
dev = container_of(mdev, struct mt76x02_dev, mt76);
mutex_init(&dev->phy_mutex);
atomic_set(&dev->avg_ampdu_len, 1);
return dev;
}
EXPORT_SYMBOL_GPL(mt76x0_alloc_device);
int mt76x0_register_device(struct mt76x0_dev *dev)
int mt76x0_register_device(struct mt76x02_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
struct ieee80211_hw *hw = mdev->hw;
struct wiphy *wiphy = hw->wiphy;
int ret;
ret = mt76x0_init_hardware(dev);
if (ret)
return ret;
/* Reserve WCID 0 for mcast - thanks to this APs WCID will go to
* entry no. 1 like it does in the vendor driver.
*/
@ -535,12 +386,6 @@ int mt76x0_register_device(struct mt76x0_dev *dev)
if (mdev->cap.has_5ghz)
mt76x0_vht_cap_mask(&dev->mt76.sband_5g.sband);
/* check hw sg support in order to enable AMSDU */
if (mt76u_check_sg(mdev))
hw->max_tx_fragments = MT_SG_MAX_SIZE;
else
hw->max_tx_fragments = 1;
mt76x0_init_debugfs(dev);
return 0;

View File

@ -2,6 +2,7 @@
* (c) Copyright 2002-2010, Ralink Technology, Inc.
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
* Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
@ -19,264 +20,215 @@
#include "phy.h"
static const struct mt76_reg_pair common_mac_reg_table[] = {
#if 1
{MT_BCN_OFFSET(0), 0xf8f0e8e0}, /* 0x3800(e0), 0x3A00(e8), 0x3C00(f0), 0x3E00(f8), 512B for each beacon */
{MT_BCN_OFFSET(1), 0x6f77d0c8}, /* 0x3200(c8), 0x3400(d0), 0x1DC0(77), 0x1BC0(6f), 512B for each beacon */
#endif
{MT_LEGACY_BASIC_RATE, 0x0000013f}, /* Basic rate set bitmap*/
{MT_HT_BASIC_RATE, 0x00008003}, /* Basic HT rate set , 20M, MCS=3, MM. Format is the same as in TXWI.*/
{MT_MAC_SYS_CTRL, 0x00}, /* 0x1004, , default Disable RX*/
{MT_RX_FILTR_CFG, 0x17f97}, /*0x1400 , RX filter control, */
{MT_BKOFF_SLOT_CFG, 0x209}, /* default set short slot time, CC_DELAY_TIME should be 2 */
/*{TX_SW_CFG0, 0x40a06}, Gary,2006-08-23 */
{MT_TX_SW_CFG0, 0x0}, /* Gary,2008-05-21 for CWC test */
{MT_TX_SW_CFG1, 0x80606}, /* Gary,2006-08-23 */
{MT_TX_LINK_CFG, 0x1020}, /* Gary,2006-08-23 */
/*{TX_TIMEOUT_CFG, 0x00182090}, CCK has some problem. So increase timieout value. 2006-10-09 MArvek RT*/
{MT_TX_TIMEOUT_CFG, 0x000a2090}, /* CCK has some problem. So increase timieout value. 2006-10-09 MArvek RT , Modify for 2860E ,2007-08-01*/
{MT_MAX_LEN_CFG, 0xa0fff | 0x00001000}, /* 0x3018, MAX frame length. Max PSDU = 16kbytes.*/
{MT_LED_CFG, 0x7f031e46}, /* Gary, 2006-08-23*/
{MT_PBF_TX_MAX_PCNT, 0x1fbf1f1f /*0xbfbf3f1f*/},
{MT_PBF_RX_MAX_PCNT, 0x9f},
/*{TX_RTY_CFG, 0x6bb80408}, Jan, 2006/11/16*/
/* WMM_ACM_SUPPORT */
/* {TX_RTY_CFG, 0x6bb80101}, sample*/
{MT_TX_RETRY_CFG, 0x47d01f0f}, /* Jan, 2006/11/16, Set TxWI->ACK =0 in Probe Rsp Modify for 2860E ,2007-08-03*/
{MT_AUTO_RSP_CFG, 0x00000013}, /* Initial Auto_Responder, because QA will turn off Auto-Responder*/
{MT_CCK_PROT_CFG, 0x05740003 /*0x01740003*/}, /* Initial Auto_Responder, because QA will turn off Auto-Responder. And RTS threshold is enabled. */
{MT_OFDM_PROT_CFG, 0x05740003 /*0x01740003*/}, /* Initial Auto_Responder, because QA will turn off Auto-Responder. And RTS threshold is enabled. */
{MT_PBF_CFG, 0xf40006}, /* Only enable Queue 2*/
{MT_MM40_PROT_CFG, 0x3F44084}, /* Initial Auto_Responder, because QA will turn off Auto-Responder*/
{MT_WPDMA_GLO_CFG, 0x00000030},
{MT_GF20_PROT_CFG, 0x01744004}, /* set 19:18 --> Short NAV for MIMO PS*/
{MT_GF40_PROT_CFG, 0x03F44084},
{MT_MM20_PROT_CFG, 0x01744004},
{MT_TXOP_CTRL_CFG, 0x0000583f, /*0x0000243f*/ /*0x000024bf*/}, /*Extension channel backoff.*/
{MT_TX_RTS_CFG, 0x00092b20},
{MT_EXP_ACK_TIME, 0x002400ca}, /* default value */
{MT_TXOP_HLDR_ET, 0x00000002},
/* Jerry comments 2008/01/16: we use SIFS = 10us in CCK defaultly, but it seems that 10us
is too small for INTEL 2200bg card, so in MBSS mode, the delta time between beacon0
and beacon1 is SIFS (10us), so if INTEL 2200bg card connects to BSS0, the ping
will always lost. So we change the SIFS of CCK from 10us to 16us. */
{MT_XIFS_TIME_CFG, 0x33a41010},
{MT_PWR_PIN_CFG, 0x00000000},
{ MT_BCN_OFFSET(0), 0xf8f0e8e0 },
{ MT_BCN_OFFSET(1), 0x6f77d0c8 },
{ MT_LEGACY_BASIC_RATE, 0x0000013f },
{ MT_HT_BASIC_RATE, 0x00008003 },
{ MT_MAC_SYS_CTRL, 0x00000000 },
{ MT_RX_FILTR_CFG, 0x00017f97 },
{ MT_BKOFF_SLOT_CFG, 0x00000209 },
{ MT_TX_SW_CFG0, 0x00000000 },
{ MT_TX_SW_CFG1, 0x00080606 },
{ MT_TX_LINK_CFG, 0x00001020 },
{ MT_TX_TIMEOUT_CFG, 0x000a2090 },
{ MT_MAX_LEN_CFG, 0xa0fff | 0x00001000 },
{ MT_LED_CFG, 0x7f031e46 },
{ MT_PBF_TX_MAX_PCNT, 0x1fbf1f1f },
{ MT_PBF_RX_MAX_PCNT, 0x0000fe9f },
{ MT_TX_RETRY_CFG, 0x47d01f0f },
{ MT_AUTO_RSP_CFG, 0x00000013 },
{ MT_CCK_PROT_CFG, 0x05740003 },
{ MT_OFDM_PROT_CFG, 0x05740003 },
{ MT_PBF_CFG, 0x00f40006 },
{ MT_WPDMA_GLO_CFG, 0x00000030 },
{ MT_GF20_PROT_CFG, 0x01744004 },
{ MT_GF40_PROT_CFG, 0x03f44084 },
{ MT_MM20_PROT_CFG, 0x01744004 },
{ MT_MM40_PROT_CFG, 0x03f54084 },
{ MT_TXOP_CTRL_CFG, 0x0000583f },
{ MT_TX_RTS_CFG, 0x00092b20 },
{ MT_EXP_ACK_TIME, 0x002400ca },
{ MT_TXOP_HLDR_ET, 0x00000002 },
{ MT_XIFS_TIME_CFG, 0x33a41010 },
{ MT_PWR_PIN_CFG, 0x00000000 },
};
static const struct mt76_reg_pair mt76x0_mac_reg_table[] = {
/* {MT_IOCFG_6, 0xA0040080 }, */
{MT_PBF_SYS_CTRL, 0x00080c00 },
{MT_PBF_CFG, 0x77723c1f },
{MT_FCE_PSE_CTRL, 0x00000001 },
{MT_AMPDU_MAX_LEN_20M1S, 0xBAA99887 },
/* Delay bb_tx_pe for proper tx_mcs_pwr update */
{MT_TX_SW_CFG0, 0x00000601 },
/* Set rf_tx_pe deassert time to 1us by Chee's comment @MT7650_CR_setting_1018.xlsx */
{MT_TX_SW_CFG1, 0x00040000 },
{MT_TX_SW_CFG2, 0x00000000 },
/* disable Tx info report */
{0xa44, 0x0000000 },
{MT_HEADER_TRANS_CTRL_REG, 0x0},
{MT_TSO_CTRL, 0x0},
/* BB_PA_MODE_CFG0(0x1214) Keep default value @20120903 */
{MT_BB_PA_MODE_CFG1, 0x00500055},
/* RF_PA_MODE_CFG0(0x121C) Keep default value @20120903 */
{MT_RF_PA_MODE_CFG1, 0x00500055},
{MT_TX_ALC_CFG_0, 0x2F2F000C},
{MT_TX0_BB_GAIN_ATTEN, 0x00000000}, /* set BBP atten gain = 0 */
{MT_TX_PWR_CFG_0, 0x3A3A3A3A},
{MT_TX_PWR_CFG_1, 0x3A3A3A3A},
{MT_TX_PWR_CFG_2, 0x3A3A3A3A},
{MT_TX_PWR_CFG_3, 0x3A3A3A3A},
{MT_TX_PWR_CFG_4, 0x3A3A3A3A},
{MT_TX_PWR_CFG_7, 0x3A3A3A3A},
{MT_TX_PWR_CFG_8, 0x3A},
{MT_TX_PWR_CFG_9, 0x3A},
/* Enable Tx length > 4095 byte */
{0x150C, 0x00000002},
/* Disable bt_abort_tx_en(0x1238[21] = 0) which is not used at MT7650 */
{0x1238, 0x001700C8},
/* PMU_OCLEVEL<5:1> from default <5'b10010> to <5'b11011> for normal driver */
/* {MT_LDO_CTRL_0, 0x00A647B6}, */
/* Default LDO_DIG supply 1.26V, change to 1.2V */
{MT_LDO_CTRL_1, 0x6B006464 },
/*
{MT_HT_BASIC_RATE, 0x00004003 },
{MT_HT_CTRL_CFG, 0x000001FF },
*/
{ MT_IOCFG_6, 0xa0040080 },
{ MT_PBF_SYS_CTRL, 0x00080c00 },
{ MT_PBF_CFG, 0x77723c1f },
{ MT_FCE_PSE_CTRL, 0x00000001 },
{ MT_AMPDU_MAX_LEN_20M1S, 0xAAA99887 },
{ MT_TX_SW_CFG0, 0x00000601 },
{ MT_TX_SW_CFG1, 0x00040000 },
{ MT_TX_SW_CFG2, 0x00000000 },
{ 0xa44, 0x00000000 },
{ MT_HEADER_TRANS_CTRL_REG, 0x00000000 },
{ MT_TSO_CTRL, 0x00000000 },
{ MT_BB_PA_MODE_CFG1, 0x00500055 },
{ MT_RF_PA_MODE_CFG1, 0x00500055 },
{ MT_TX_ALC_CFG_0, 0x2F2F000C },
{ MT_TX0_BB_GAIN_ATTEN, 0x00000000 },
{ MT_TX_PWR_CFG_0, 0x3A3A3A3A },
{ MT_TX_PWR_CFG_1, 0x3A3A3A3A },
{ MT_TX_PWR_CFG_2, 0x3A3A3A3A },
{ MT_TX_PWR_CFG_3, 0x3A3A3A3A },
{ MT_TX_PWR_CFG_4, 0x3A3A3A3A },
{ MT_TX_PWR_CFG_7, 0x3A3A3A3A },
{ MT_TX_PWR_CFG_8, 0x0000003A },
{ MT_TX_PWR_CFG_9, 0x0000003A },
{ 0x150C, 0x00000002 },
{ 0x1238, 0x001700C8 },
{ MT_LDO_CTRL_0, 0x00A647B6 },
{ MT_LDO_CTRL_1, 0x6B006464 },
{ MT_HT_BASIC_RATE, 0x00004003 },
{ MT_HT_CTRL_CFG, 0x000001FF },
{ MT_TXOP_HLDR_ET, 0x00000000 },
{ MT_PN_PAD_MODE, 0x00000003 },
};
static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = {
{MT_BBP(CORE, 1), 0x00000002},
{MT_BBP(CORE, 4), 0x00000000},
{MT_BBP(CORE, 24), 0x00000000},
{MT_BBP(CORE, 32), 0x4003000a},
{MT_BBP(CORE, 42), 0x00000000},
{MT_BBP(CORE, 44), 0x00000000},
{MT_BBP(IBI, 11), 0x00000080},
/*
0x2300[5] Default Antenna:
0 for WIFI main antenna
1 for WIFI aux antenna
*/
{MT_BBP(AGC, 0), 0x00021400},
{MT_BBP(AGC, 1), 0x00000003},
{MT_BBP(AGC, 2), 0x003A6464},
{MT_BBP(AGC, 15), 0x88A28CB8},
{MT_BBP(AGC, 22), 0x00001E21},
{MT_BBP(AGC, 23), 0x0000272C},
{MT_BBP(AGC, 24), 0x00002F3A},
{MT_BBP(AGC, 25), 0x8000005A},
{MT_BBP(AGC, 26), 0x007C2005},
{MT_BBP(AGC, 34), 0x000A0C0C},
{MT_BBP(AGC, 37), 0x2121262C},
{MT_BBP(AGC, 41), 0x38383E45},
{MT_BBP(AGC, 57), 0x00001010},
{MT_BBP(AGC, 59), 0xBAA20E96},
{MT_BBP(AGC, 63), 0x00000001},
{MT_BBP(TXC, 0), 0x00280403},
{MT_BBP(TXC, 1), 0x00000000},
{MT_BBP(RXC, 1), 0x00000012},
{MT_BBP(RXC, 2), 0x00000011},
{MT_BBP(RXC, 3), 0x00000005},
{MT_BBP(RXC, 4), 0x00000000},
{MT_BBP(RXC, 5), 0xF977C4EC},
{MT_BBP(RXC, 7), 0x00000090},
{MT_BBP(TXO, 8), 0x00000000},
{MT_BBP(TXBE, 0), 0x00000000},
{MT_BBP(TXBE, 4), 0x00000004},
{MT_BBP(TXBE, 6), 0x00000000},
{MT_BBP(TXBE, 8), 0x00000014},
{MT_BBP(TXBE, 9), 0x20000000},
{MT_BBP(TXBE, 10), 0x00000000},
{MT_BBP(TXBE, 12), 0x00000000},
{MT_BBP(TXBE, 13), 0x00000000},
{MT_BBP(TXBE, 14), 0x00000000},
{MT_BBP(TXBE, 15), 0x00000000},
{MT_BBP(TXBE, 16), 0x00000000},
{MT_BBP(TXBE, 17), 0x00000000},
{MT_BBP(RXFE, 1), 0x00008800}, /* Add for E3 */
{MT_BBP(RXFE, 3), 0x00000000},
{MT_BBP(RXFE, 4), 0x00000000},
{MT_BBP(RXO, 13), 0x00000092},
{MT_BBP(RXO, 14), 0x00060612},
{MT_BBP(RXO, 15), 0xC8321B18},
{MT_BBP(RXO, 16), 0x0000001E},
{MT_BBP(RXO, 17), 0x00000000},
{MT_BBP(RXO, 18), 0xCC00A993},
{MT_BBP(RXO, 19), 0xB9CB9CB9},
{MT_BBP(RXO, 20), 0x26c00057},
{MT_BBP(RXO, 21), 0x00000001},
{MT_BBP(RXO, 24), 0x00000006},
{ MT_BBP(CORE, 1), 0x00000002 },
{ MT_BBP(CORE, 4), 0x00000000 },
{ MT_BBP(CORE, 24), 0x00000000 },
{ MT_BBP(CORE, 32), 0x4003000a },
{ MT_BBP(CORE, 42), 0x00000000 },
{ MT_BBP(CORE, 44), 0x00000000 },
{ MT_BBP(IBI, 11), 0x0FDE8081 },
{ MT_BBP(AGC, 0), 0x00021400 },
{ MT_BBP(AGC, 1), 0x00000003 },
{ MT_BBP(AGC, 2), 0x003A6464 },
{ MT_BBP(AGC, 15), 0x88A28CB8 },
{ MT_BBP(AGC, 22), 0x00001E21 },
{ MT_BBP(AGC, 23), 0x0000272C },
{ MT_BBP(AGC, 24), 0x00002F3A },
{ MT_BBP(AGC, 25), 0x8000005A },
{ MT_BBP(AGC, 26), 0x007C2005 },
{ MT_BBP(AGC, 33), 0x00003238 },
{ MT_BBP(AGC, 34), 0x000A0C0C },
{ MT_BBP(AGC, 37), 0x2121262C },
{ MT_BBP(AGC, 41), 0x38383E45 },
{ MT_BBP(AGC, 57), 0x00001010 },
{ MT_BBP(AGC, 59), 0xBAA20E96 },
{ MT_BBP(AGC, 63), 0x00000001 },
{ MT_BBP(TXC, 0), 0x00280403 },
{ MT_BBP(TXC, 1), 0x00000000 },
{ MT_BBP(RXC, 1), 0x00000012 },
{ MT_BBP(RXC, 2), 0x00000011 },
{ MT_BBP(RXC, 3), 0x00000005 },
{ MT_BBP(RXC, 4), 0x00000000 },
{ MT_BBP(RXC, 5), 0xF977C4EC },
{ MT_BBP(RXC, 7), 0x00000090 },
{ MT_BBP(TXO, 8), 0x00000000 },
{ MT_BBP(TXBE, 0), 0x00000000 },
{ MT_BBP(TXBE, 4), 0x00000004 },
{ MT_BBP(TXBE, 6), 0x00000000 },
{ MT_BBP(TXBE, 8), 0x00000014 },
{ MT_BBP(TXBE, 9), 0x20000000 },
{ MT_BBP(TXBE, 10), 0x00000000 },
{ MT_BBP(TXBE, 12), 0x00000000 },
{ MT_BBP(TXBE, 13), 0x00000000 },
{ MT_BBP(TXBE, 14), 0x00000000 },
{ MT_BBP(TXBE, 15), 0x00000000 },
{ MT_BBP(TXBE, 16), 0x00000000 },
{ MT_BBP(TXBE, 17), 0x00000000 },
{ MT_BBP(RXFE, 1), 0x00008800 },
{ MT_BBP(RXFE, 3), 0x00000000 },
{ MT_BBP(RXFE, 4), 0x00000000 },
{ MT_BBP(RXO, 13), 0x00000192 },
{ MT_BBP(RXO, 14), 0x00060612 },
{ MT_BBP(RXO, 15), 0xC8321B18 },
{ MT_BBP(RXO, 16), 0x0000001E },
{ MT_BBP(RXO, 17), 0x00000000 },
{ MT_BBP(RXO, 18), 0xCC00A993 },
{ MT_BBP(RXO, 19), 0xB9CB9CB9 },
{ MT_BBP(RXO, 20), 0x26c00057 },
{ MT_BBP(RXO, 21), 0x00000001 },
{ MT_BBP(RXO, 24), 0x00000006 },
{ MT_BBP(RXO, 28), 0x0000003F },
};
static const struct mt76x0_bbp_switch_item mt76x0_bbp_switch_tab[] = {
{RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 8), 0x0E344EF0}},
{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 8), 0x122C54F2}},
{ RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 4), 0x1FEDA049 } },
{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 4), 0x1FECA054 } },
{RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 14), 0x310F2E39}},
{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 14), 0x310F2A3F}},
{ RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 6), 0x00000045 } },
{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 6), 0x0000000A } },
{RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 32), 0x00003230}},
{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 32), 0x0000181C}},
{ RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 8), 0x16344EF0 } },
{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 8), 0x122C54F2 } },
{RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 33), 0x00003240}},
{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 33), 0x00003218}},
{ RF_G_BAND | RF_BW_20, { MT_BBP(AGC, 12), 0x05052879 } },
{ RF_G_BAND | RF_BW_40, { MT_BBP(AGC, 12), 0x050528F9 } },
{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 12), 0x050528F9 } },
{RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 35), 0x11112016}},
{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 35), 0x11112016}},
{ RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 13), 0x35050004 } },
{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 13), 0x2C3A0406 } },
{RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(RXO, 28), 0x0000008A}},
{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(RXO, 28), 0x0000008A}},
{ RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 14), 0x310F2E3C } },
{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 14), 0x310F2A3F } },
{RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 4), 0x1FEDA049}},
{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 4), 0x1FECA054}},
{ RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 26), 0x007C2005 } },
{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 26), 0x007C2005 } },
{RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 6), 0x00000045}},
{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 6), 0x0000000A}},
{ RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 27), 0x000000E1 } },
{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 27), 0x000000EC } },
{RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 12), 0x05052879}},
{RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 12), 0x050528F9}},
{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 12), 0x050528F9}},
{ RF_G_BAND | RF_BW_20, { MT_BBP(AGC, 28), 0x00060806 } },
{ RF_G_BAND | RF_BW_40, { MT_BBP(AGC, 28), 0x00050806 } },
{ RF_A_BAND | RF_BW_40, { MT_BBP(AGC, 28), 0x00060801 } },
{ RF_A_BAND | RF_BW_20 | RF_BW_80, { MT_BBP(AGC, 28), 0x00060806 } },
{RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 13), 0x35050004}},
{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 13), 0x2C3A0406}},
{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(RXO, 28), 0x0000008A } },
{RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 27), 0x000000E1}},
{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 27), 0x000000EC}},
{ RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 31), 0x00000E23 } },
{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 31), 0x00000E13 } },
{RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 28), 0x00060806}},
{RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 28), 0x00050806}},
{RF_A_BAND | RF_BW_40, {MT_BBP(AGC, 28), 0x00060801}},
{RF_A_BAND | RF_BW_20 | RF_BW_80, {MT_BBP(AGC, 28), 0x00060806}},
{ RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 32), 0x00003218 } },
{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 32), 0x0000181C } },
{RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 31), 0x00000F23}},
{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 31), 0x00000F13}},
{ RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 33), 0x00003240 } },
{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 33), 0x00003218 } },
{RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 39), 0x2A2A3036}},
{RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 39), 0x2A2A2C36}},
{RF_A_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 39), 0x2A2A3036}},
{RF_A_BAND | RF_BW_80, {MT_BBP(AGC, 39), 0x2A2A2A36}},
{ RF_G_BAND | RF_BW_20, { MT_BBP(AGC, 35), 0x11111616 } },
{ RF_G_BAND | RF_BW_40, { MT_BBP(AGC, 35), 0x11111516 } },
{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 35), 0x11111111 } },
{RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 43), 0x27273438}},
{RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 43), 0x27272D38}},
{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 43), 0x27272B30}},
{ RF_G_BAND | RF_BW_20, { MT_BBP(AGC, 39), 0x2A2A3036 } },
{ RF_G_BAND | RF_BW_40, { MT_BBP(AGC, 39), 0x2A2A2C36 } },
{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 39), 0x2A2A2A2A } },
{RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 51), 0x17171C1C}},
{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 51), 0xFFFFFFFF}},
{ RF_G_BAND | RF_BW_20, { MT_BBP(AGC, 43), 0x27273438 } },
{ RF_G_BAND | RF_BW_40, { MT_BBP(AGC, 43), 0x27272D38 } },
{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 43), 0x27271A1A } },
{RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 53), 0x26262A2F}},
{RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 53), 0x2626322F}},
{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 53), 0xFFFFFFFF}},
{ RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 51), 0x17171C1C } },
{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 51), 0xFFFFFFFF } },
{RF_G_BAND | RF_BW_20, {MT_BBP(AGC, 55), 0x40404E58}},
{RF_G_BAND | RF_BW_40, {MT_BBP(AGC, 55), 0x40405858}},
{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 55), 0xFFFFFFFF}},
{ RF_G_BAND | RF_BW_20, { MT_BBP(AGC, 53), 0x26262A2F } },
{ RF_G_BAND | RF_BW_40, { MT_BBP(AGC, 53), 0x2626322F } },
{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 53), 0xFFFFFFFF } },
{RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(AGC, 58), 0x00001010}},
{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(AGC, 58), 0x00000000}},
{ RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 55), 0x40404040 } },
{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 55), 0xFFFFFFFF } },
{RF_G_BAND | RF_BW_20 | RF_BW_40, {MT_BBP(RXFE, 0), 0x3D5000E0}},
{RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, {MT_BBP(RXFE, 0), 0x895000E0}},
{ RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 58), 0x00001010 } },
{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 58), 0x00000000 } },
{ RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(RXFE, 0), 0x3D5000E0 } },
{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(RXFE, 0), 0x895000E0 } },
};
static const struct mt76_reg_pair mt76x0_dcoc_tab[] = {
{MT_BBP(CAL, 47), 0x000010F0 },
{MT_BBP(CAL, 48), 0x00008080 },
{MT_BBP(CAL, 49), 0x00000F07 },
{MT_BBP(CAL, 50), 0x00000040 },
{MT_BBP(CAL, 51), 0x00000404 },
{MT_BBP(CAL, 52), 0x00080803 },
{MT_BBP(CAL, 53), 0x00000704 },
{MT_BBP(CAL, 54), 0x00002828 },
{MT_BBP(CAL, 55), 0x00005050 },
{ MT_BBP(CAL, 47), 0x000010F0 },
{ MT_BBP(CAL, 48), 0x00008080 },
{ MT_BBP(CAL, 49), 0x00000F07 },
{ MT_BBP(CAL, 50), 0x00000040 },
{ MT_BBP(CAL, 51), 0x00000404 },
{ MT_BBP(CAL, 52), 0x00080803 },
{ MT_BBP(CAL, 53), 0x00000704 },
{ MT_BBP(CAL, 54), 0x00002828 },
{ MT_BBP(CAL, 55), 0x00005050 },
};
#endif

View File

@ -13,13 +13,13 @@
* GNU General Public License for more details.
*/
#include "mt76x0.h"
#include "trace.h"
#include "../mt76x02_util.h"
#include <linux/etherdevice.h>
void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
int ht_mode)
#include "mt76x0.h"
#include "trace.h"
void mt76x0_mac_set_protection(struct mt76x02_dev *dev, bool legacy_prot,
int ht_mode)
{
int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
@ -77,7 +77,7 @@ void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
}
void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb)
void mt76x0_mac_set_short_preamble(struct mt76x02_dev *dev, bool short_preamb)
{
if (short_preamb)
mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
@ -85,7 +85,7 @@ void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb)
mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
}
void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval)
void mt76x0_mac_config_tsf(struct mt76x02_dev *dev, bool enable, int interval)
{
u32 val = mt76_rr(dev, MT_BEACON_TIME_CFG);
@ -105,7 +105,7 @@ void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval)
MT_BEACON_TIME_CFG_TBTT_EN;
}
static void mt76x0_check_mac_err(struct mt76x0_dev *dev)
static void mt76x0_check_mac_err(struct mt76x02_dev *dev)
{
u32 val = mt76_rr(dev, 0x10f4);
@ -120,7 +120,7 @@ static void mt76x0_check_mac_err(struct mt76x0_dev *dev)
}
void mt76x0_mac_work(struct work_struct *work)
{
struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
mac_work.work);
struct {
u32 addr_base;
@ -171,7 +171,7 @@ void mt76x0_mac_work(struct work_struct *work)
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, 10 * HZ);
}
void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev)
void mt76x0_mac_set_ampdu_factor(struct mt76x02_dev *dev)
{
struct ieee80211_sta *sta;
struct mt76_wcid *wcid;
@ -195,67 +195,3 @@ void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev)
mt76_wr(dev, MT_MAX_LEN_CFG, 0xa0fff |
FIELD_PREP(MT_MAX_LEN_CFG_AMPDU, min_factor));
}
static void
mt76x0_rx_monitor_beacon(struct mt76x0_dev *dev, struct mt76x02_rxwi *rxwi,
u16 rate, int rssi)
{
dev->bcn_phy_mode = FIELD_GET(MT_RXWI_RATE_PHY, rate);
dev->avg_rssi = ((dev->avg_rssi * 15) / 16 + (rssi << 8)) / 256;
}
static int
mt76x0_rx_is_our_beacon(struct mt76x0_dev *dev, u8 *data)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
return ieee80211_is_beacon(hdr->frame_control) &&
ether_addr_equal(hdr->addr2, dev->ap_bssid);
}
u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb,
void *rxi)
{
struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
struct mt76x02_rxwi *rxwi = rxi;
u32 len, ctl = le32_to_cpu(rxwi->ctl);
u16 rate = le16_to_cpu(rxwi->rate);
int rssi, pad_len = 0;
len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
if (WARN_ON(len < 10))
return 0;
if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) {
status->flag |= RX_FLAG_DECRYPTED;
status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
}
if (rxwi->rxinfo & MT_RXINFO_L2PAD)
pad_len += 2;
mt76x02_remove_hdr_pad(skb, pad_len);
pskb_trim(skb, len);
status->chains = BIT(0);
rssi = mt76x0_phy_get_rssi(dev, rxwi);
status->chain_signal[0] = status->signal = rssi;
status->freq = dev->mt76.chandef.chan->center_freq;
status->band = dev->mt76.chandef.chan->band;
mt76x02_mac_process_rate(status, rate);
spin_lock_bh(&dev->con_mon_lock);
if (mt76x0_rx_is_our_beacon(dev, skb->data)) {
mt76x0_rx_monitor_beacon(dev, rxwi, rate, rssi);
} else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST)) {
if (dev->avg_rssi == 0)
dev->avg_rssi = rssi;
else
dev->avg_rssi = (dev->avg_rssi * 15) / 16 + rssi / 16;
}
spin_unlock_bh(&dev->con_mon_lock);
return len;
}

View File

@ -1,20 +0,0 @@
/*
* Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __MT76_MAC_H
#define __MT76_MAC_H
u32 mt76x0_mac_process_rx(struct mt76x0_dev *dev, struct sk_buff *skb,
void *rxi);
#endif

View File

@ -13,49 +13,12 @@
* GNU General Public License for more details.
*/
#include "mt76x0.h"
#include "mac.h"
#include "../mt76x02_util.h"
#include <linux/etherdevice.h>
#include "mt76x0.h"
static int mt76x0_start(struct ieee80211_hw *hw)
int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt76x0_dev *dev = hw->priv;
int ret;
mutex_lock(&dev->mt76.mutex);
ret = mt76x0_mac_start(dev);
if (ret)
goto out;
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
MT_CALIBRATE_INTERVAL);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
out:
mutex_unlock(&dev->mt76.mutex);
return ret;
}
static void mt76x0_stop(struct ieee80211_hw *hw)
{
struct mt76x0_dev *dev = hw->priv;
mutex_lock(&dev->mt76.mutex);
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
mt76x0_mac_stop(dev);
mutex_unlock(&dev->mt76.mutex);
}
static int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt76x0_dev *dev = hw->priv;
struct mt76x02_dev *dev = hw->priv;
int ret = 0;
mutex_lock(&dev->mt76.mutex);
@ -66,29 +29,43 @@ static int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
ieee80211_wake_queues(hw);
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
dev->mt76.txpower_conf = hw->conf.power_level * 2;
if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
mt76x0_phy_set_txpower(dev);
}
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
dev->mt76.rxfilter |= MT_RX_FILTR_CFG_PROMISC;
else
dev->mt76.rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;
mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
}
mutex_unlock(&dev->mt76.mutex);
return ret;
}
EXPORT_SYMBOL_GPL(mt76x0_config);
static void
mt76x0_addr_wr(struct mt76x0_dev *dev, const u32 offset, const u8 *addr)
mt76x0_addr_wr(struct mt76x02_dev *dev, const u32 offset, const u8 *addr)
{
mt76_wr(dev, offset, get_unaligned_le32(addr));
mt76_wr(dev, offset + 4, addr[4] | addr[5] << 8);
}
static void
mt76x0_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed)
void mt76x0_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed)
{
struct mt76x0_dev *dev = hw->priv;
struct mt76x02_dev *dev = hw->priv;
mutex_lock(&dev->mt76.mutex);
if (changed & BSS_CHANGED_ASSOC)
mt76x0_phy_con_cal_onoff(dev, info);
if (changed & BSS_CHANGED_BSSID) {
mt76x0_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid);
@ -130,24 +107,23 @@ mt76x0_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_unlock(&dev->mt76.mutex);
}
EXPORT_SYMBOL_GPL(mt76x0_bss_info_changed);
static void
mt76x0_sw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const u8 *mac_addr)
void mt76x0_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac_addr)
{
struct mt76x0_dev *dev = hw->priv;
struct mt76x02_dev *dev = hw->priv;
cancel_delayed_work_sync(&dev->cal_work);
mt76x0_agc_save(dev);
set_bit(MT76_SCANNING, &dev->mt76.state);
}
EXPORT_SYMBOL_GPL(mt76x0_sw_scan);
static void
mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
void mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mt76x0_dev *dev = hw->priv;
struct mt76x02_dev *dev = hw->priv;
mt76x0_agc_restore(dev);
clear_bit(MT76_SCANNING, &dev->mt76.state);
@ -155,33 +131,14 @@ mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
}
EXPORT_SYMBOL_GPL(mt76x0_sw_scan_complete);
static int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
struct mt76x0_dev *dev = hw->priv;
struct mt76x02_dev *dev = hw->priv;
mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value);
return 0;
}
const struct ieee80211_ops mt76x0_ops = {
.tx = mt76x0_tx,
.start = mt76x0_start,
.stop = mt76x0_stop,
.add_interface = mt76x02_add_interface,
.remove_interface = mt76x02_remove_interface,
.config = mt76x0_config,
.configure_filter = mt76x02_configure_filter,
.bss_info_changed = mt76x0_bss_info_changed,
.sta_add = mt76x02_sta_add,
.sta_remove = mt76x02_sta_remove,
.set_key = mt76x02_set_key,
.conf_tx = mt76x02_conf_tx,
.sw_scan_start = mt76x0_sw_scan,
.sw_scan_complete = mt76x0_sw_scan_complete,
.ampdu_action = mt76x02_ampdu_action,
.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
.set_rts_threshold = mt76x0_set_rts_threshold,
.wake_tx_queue = mt76_wake_tx_queue,
};
EXPORT_SYMBOL_GPL(mt76x0_set_rts_threshold);

View File

@ -17,7 +17,7 @@
#include "../mt76x02_mcu.h"
struct mt76x0_dev;
struct mt76x02_dev;
#define MT_MCU_IVB_SIZE 0x40
#define MT_MCU_DLM_OFFSET 0x80000
@ -41,4 +41,11 @@ enum mcu_calibrate {
MCU_CAL_TX_GROUP_DELAY,
};
int mt76x0e_mcu_init(struct mt76x02_dev *dev);
int mt76x0u_mcu_init(struct mt76x02_dev *dev);
static inline int mt76x0_firmware_running(struct mt76x02_dev *dev)
{
return mt76_rr(dev, MT_MCU_COM_REG0) == 1;
}
#endif

View File

@ -25,147 +25,60 @@
#include <net/mac80211.h>
#include <linux/debugfs.h>
#include "../mt76.h"
#include "../mt76x02_regs.h"
#include "../mt76x02_mac.h"
#include "../mt76x02.h"
#include "eeprom.h"
#define MT_CALIBRATE_INTERVAL (4 * HZ)
#define MT_FREQ_CAL_INIT_DELAY (30 * HZ)
#define MT_FREQ_CAL_CHECK_INTERVAL (10 * HZ)
#define MT_FREQ_CAL_ADJ_INTERVAL (HZ / 2)
#define MT_BBP_REG_VERSION 0x00
#define MT_USB_AGGR_SIZE_LIMIT 21 /* * 1024B */
#define MT_USB_AGGR_TIMEOUT 0x80 /* * 33ns */
struct mac_stats {
u64 rx_stat[6];
u64 tx_stat[6];
u64 aggr_stat[2];
u64 aggr_n[32];
u64 zero_len_del[2];
};
struct mt76x0_eeprom_params;
#define MT_EE_TEMPERATURE_SLOPE 39
#define MT_FREQ_OFFSET_INVALID -128
/* addr req mask */
#define MT_VEND_TYPE_EEPROM BIT(31)
#define MT_VEND_TYPE_CFG BIT(30)
#define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
#define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n))
enum mt_bw {
MT_BW_20,
MT_BW_40,
};
/**
* struct mt76x0_dev - adapter structure
* @lock: protects @wcid->tx_rate.
* @mac_lock: locks out mac80211's tx status and rx paths.
* @con_mon_lock: protects @ap_bssid, @bcn_*, @avg_rssi.
* @mutex: ensures exclusive access from mac80211 callbacks.
* @reg_atomic_mutex: ensures atomicity of indirect register accesses
* (accesses to RF and BBP).
* @hw_atomic_mutex: ensures exclusive access to HW during critical
* operations (power management, channel switch).
*/
struct mt76x0_dev {
struct mt76_dev mt76; /* must be first */
u8 data[32];
struct delayed_work cal_work;
struct delayed_work mac_work;
spinlock_t mac_lock;
const u16 *beacon_offsets;
struct mt76x0_eeprom_params *ee;
struct mutex reg_atomic_mutex;
struct mutex hw_atomic_mutex;
u32 debugfs_reg;
atomic_t avg_ampdu_len;
/* Connection monitoring things */
spinlock_t con_mon_lock;
u8 ap_bssid[ETH_ALEN];
s8 bcn_freq_off;
u8 bcn_phy_mode;
int avg_rssi; /* starts at 0 and converges */
u8 agc_save;
u16 chainmask;
struct mac_stats stats;
};
extern const struct ieee80211_ops mt76x0_ops;
static inline bool is_mt7610e(struct mt76x0_dev *dev)
static inline bool is_mt7610e(struct mt76x02_dev *dev)
{
/* TODO */
return false;
}
void mt76x0_init_debugfs(struct mt76x0_dev *dev);
/* Compatibility with mt76 */
#define mt76_rmw_field(_dev, _reg, _field, _val) \
mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
void mt76x0_init_debugfs(struct mt76x02_dev *dev);
/* Init */
struct mt76x0_dev *
mt76x0_alloc_device(struct device *pdev, const struct mt76_driver_ops *drv_ops);
int mt76x0_init_hardware(struct mt76x0_dev *dev);
int mt76x0_register_device(struct mt76x0_dev *dev);
void mt76x0_cleanup(struct mt76x0_dev *dev);
void mt76x0_chip_onoff(struct mt76x0_dev *dev, bool enable, bool reset);
struct mt76x02_dev *
mt76x0_alloc_device(struct device *pdev,
const struct mt76_driver_ops *drv_ops,
const struct ieee80211_ops *ops);
int mt76x0_init_hardware(struct mt76x02_dev *dev);
int mt76x0_register_device(struct mt76x02_dev *dev);
void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset);
int mt76x0_mac_start(struct mt76x0_dev *dev);
void mt76x0_mac_stop(struct mt76x0_dev *dev);
int mt76x0_mac_start(struct mt76x02_dev *dev);
void mt76x0_mac_stop(struct mt76x02_dev *dev);
int mt76x0_config(struct ieee80211_hw *hw, u32 changed);
void mt76x0_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed);
void mt76x0_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac_addr);
void mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
/* PHY */
void mt76x0_phy_init(struct mt76x0_dev *dev);
int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev);
void mt76x0_agc_save(struct mt76x0_dev *dev);
void mt76x0_agc_restore(struct mt76x0_dev *dev);
int mt76x0_phy_set_channel(struct mt76x0_dev *dev,
void mt76x0_phy_init(struct mt76x02_dev *dev);
int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev);
void mt76x0_agc_save(struct mt76x02_dev *dev);
void mt76x0_agc_restore(struct mt76x02_dev *dev);
int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef);
void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev);
int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x02_rxwi *rxwi);
void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev,
struct ieee80211_bss_conf *info);
void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev);
void mt76x0_phy_set_txpower(struct mt76x02_dev *dev);
/* MAC */
void mt76x0_mac_work(struct work_struct *work);
void mt76x0_mac_set_protection(struct mt76x0_dev *dev, bool legacy_prot,
void mt76x0_mac_set_protection(struct mt76x02_dev *dev, bool legacy_prot,
int ht_mode);
void mt76x0_mac_set_short_preamble(struct mt76x0_dev *dev, bool short_preamb);
void mt76x0_mac_config_tsf(struct mt76x0_dev *dev, bool enable, int interval);
void mt76x0_mac_set_ampdu_factor(struct mt76x0_dev *dev);
void mt76x0_mac_set_short_preamble(struct mt76x02_dev *dev, bool short_preamb);
void mt76x0_mac_config_tsf(struct mt76x02_dev *dev, bool enable, int interval);
void mt76x0_mac_set_ampdu_factor(struct mt76x02_dev *dev);
/* TX */
void mt76x0_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct sk_buff *skb);
void mt76x0_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
int mt76x0_tx_prepare_skb(struct mt76_dev *mdev, void *data,
struct sk_buff *skb, struct mt76_queue *q,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
u32 *tx_info);
#endif

View File

@ -19,12 +19,113 @@
#include <linux/pci.h>
#include "mt76x0.h"
#include "mcu.h"
static int mt76x0e_start(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
mutex_lock(&dev->mt76.mutex);
mt76x02_mac_start(dev);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
MT_CALIBRATE_INTERVAL);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
mutex_unlock(&dev->mt76.mutex);
return 0;
}
static void mt76x0e_stop_hw(struct mt76x02_dev *dev)
{
cancel_delayed_work_sync(&dev->cal_work);
cancel_delayed_work_sync(&dev->mac_work);
if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY,
0, 1000))
dev_warn(dev->mt76.dev, "TX DMA did not stop\n");
mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN);
mt76x0_mac_stop(dev);
if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
0, 1000))
dev_warn(dev->mt76.dev, "TX DMA did not stop\n");
mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_EN);
}
static void mt76x0e_stop(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
mutex_lock(&dev->mt76.mutex);
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
mt76x0e_stop_hw(dev);
mutex_unlock(&dev->mt76.mutex);
}
static const struct ieee80211_ops mt76x0e_ops = {
.tx = mt76x02_tx,
.start = mt76x0e_start,
.stop = mt76x0e_stop,
.config = mt76x0_config,
.add_interface = mt76x02_add_interface,
.remove_interface = mt76x02_remove_interface,
.configure_filter = mt76x02_configure_filter,
};
static int mt76x0e_register_device(struct mt76x02_dev *dev)
{
int err;
mt76x0_chip_onoff(dev, true, false);
if (!mt76x02_wait_for_mac(&dev->mt76))
return -ETIMEDOUT;
mt76x02_dma_disable(dev);
err = mt76x0e_mcu_init(dev);
if (err < 0)
return err;
err = mt76x02_dma_init(dev);
if (err < 0)
return err;
err = mt76x0_init_hardware(dev);
if (err < 0)
return err;
if (mt76_chip(&dev->mt76) == 0x7610) {
u16 val;
mt76_clear(dev, MT_COEXCFG0, BIT(0));
val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_0);
if (val & MT_EE_NIC_CONF_0_PA_IO_CURRENT) {
u32 data;
/* set external external PA I/O
* current to 16mA
*/
data = mt76_rr(dev, 0x11c);
val |= 0xc03;
mt76_wr(dev, 0x11c, val);
}
}
mt76_clear(dev, 0x110, BIT(9));
mt76_set(dev, MT_MAX_LEN_CFG, BIT(13));
return 0;
}
static int
mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct mt76x0_dev *dev;
int ret = -ENODEV;
struct mt76x02_dev *dev;
int ret;
ret = pcim_enable_device(pdev);
if (ret)
@ -40,7 +141,7 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
dev = mt76x0_alloc_device(&pdev->dev, NULL);
dev = mt76x0_alloc_device(&pdev->dev, NULL, &mt76x0e_ops);
if (!dev)
return -ENOMEM;
@ -49,22 +150,40 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
/* error: */
ret = mt76x0e_register_device(dev);
if (ret < 0)
goto error;
return 0;
error:
ieee80211_free_hw(mt76_hw(dev));
return ret;
}
static void mt76x0e_cleanup(struct mt76x02_dev *dev)
{
clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
mt76x0_chip_onoff(dev, false, false);
mt76x0e_stop_hw(dev);
mt76x02_dma_cleanup(dev);
mt76x02_mcu_cleanup(&dev->mt76);
}
static void
mt76x0e_remove(struct pci_dev *pdev)
{
struct mt76_dev *mdev = pci_get_drvdata(pdev);
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
mt76_unregister_device(mdev);
mt76x0e_cleanup(dev);
ieee80211_free_hw(mdev->hw);
}
static const struct pci_device_id mt76x0e_device_table[] = {
{ PCI_DEVICE(0x14c3, 0x7630) },
{ PCI_DEVICE(0x14c3, 0x7650) },
{ },
};

View File

@ -0,0 +1,146 @@
/*
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/firmware.h>
#include "mt76x0.h"
#include "mcu.h"
#define MT7610E_FIRMWARE "mediatek/mt7610e.bin"
#define MT7650E_FIRMWARE "mediatek/mt7650e.bin"
#define MT_MCU_IVB_ADDR (MT_MCU_ILM_ADDR + 0x54000 - MT_MCU_IVB_SIZE)
static int mt76x0e_load_firmware(struct mt76x02_dev *dev)
{
bool is_combo_chip = mt76_chip(&dev->mt76) != 0x7610;
u32 val, ilm_len, dlm_len, offset = 0;
const struct mt76x02_fw_header *hdr;
const struct firmware *fw;
const char *firmware;
const u8 *fw_payload;
int len, err;
if (is_combo_chip)
firmware = MT7650E_FIRMWARE;
else
firmware = MT7610E_FIRMWARE;
err = request_firmware(&fw, firmware, dev->mt76.dev);
if (err)
return err;
if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
err = -EIO;
goto out;
}
hdr = (const struct mt76x02_fw_header *)fw->data;
len = sizeof(*hdr);
len += le32_to_cpu(hdr->ilm_len);
len += le32_to_cpu(hdr->dlm_len);
if (fw->size != len) {
err = -EIO;
goto out;
}
fw_payload = fw->data + sizeof(*hdr);
val = le16_to_cpu(hdr->fw_ver);
dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
(val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
val = le16_to_cpu(hdr->fw_ver);
dev_dbg(dev->mt76.dev,
"Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
(val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
le16_to_cpu(hdr->build_ver), hdr->build_time);
if (is_combo_chip && !mt76_poll(dev, MT_MCU_SEMAPHORE_00, 1, 1, 600)) {
dev_err(dev->mt76.dev,
"Could not get hardware semaphore for loading fw\n");
err = -ETIMEDOUT;
goto out;
}
/* upload ILM. */
mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
ilm_len = le32_to_cpu(hdr->ilm_len);
if (is_combo_chip) {
ilm_len -= MT_MCU_IVB_SIZE;
offset = MT_MCU_IVB_SIZE;
}
dev_dbg(dev->mt76.dev, "loading FW - ILM %u\n", ilm_len);
mt76_wr_copy(dev, MT_MCU_ILM_ADDR + offset, fw_payload + offset,
ilm_len);
/* upload IVB. */
if (is_combo_chip) {
dev_dbg(dev->mt76.dev, "loading FW - IVB %u\n",
MT_MCU_IVB_SIZE);
mt76_wr_copy(dev, MT_MCU_IVB_ADDR, fw_payload, MT_MCU_IVB_SIZE);
}
/* upload DLM. */
mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET);
dlm_len = le32_to_cpu(hdr->dlm_len);
dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len);
mt76_wr_copy(dev, MT_MCU_ILM_ADDR,
fw_payload + le32_to_cpu(hdr->ilm_len), dlm_len);
/* trigger firmware */
mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
if (is_combo_chip)
mt76_wr(dev, MT_MCU_INT_LEVEL, 0x3);
else
mt76_wr(dev, MT_MCU_RESET_CTL, 0x300);
if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 1000)) {
dev_err(dev->mt76.dev, "Firmware failed to start\n");
err = -ETIMEDOUT;
goto out;
}
dev_dbg(dev->mt76.dev, "Firmware running!\n");
out:
if (is_combo_chip)
mt76_wr(dev, MT_MCU_SEMAPHORE_00, 0x1);
release_firmware(fw);
return err;
}
int mt76x0e_mcu_init(struct mt76x02_dev *dev)
{
static const struct mt76_mcu_ops mt76x0e_mcu_ops = {
.mcu_msg_alloc = mt76x02_mcu_msg_alloc,
.mcu_send_msg = mt76x02_mcu_msg_send,
};
int err;
dev->mt76.mcu_ops = &mt76x0e_mcu_ops;
err = mt76x0e_load_firmware(dev);
if (err < 0)
return err;
set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
return 0;
}

View File

@ -21,11 +21,12 @@
#include "phy.h"
#include "initvals.h"
#include "initvals_phy.h"
#include "../mt76x02_phy.h"
#include <linux/etherdevice.h>
static int
mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value)
mt76x0_rf_csr_wr(struct mt76x02_dev *dev, u32 offset, u8 value)
{
int ret = 0;
u8 bank, reg;
@ -39,7 +40,7 @@ mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value)
if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8)
return -EINVAL;
mutex_lock(&dev->reg_atomic_mutex);
mutex_lock(&dev->phy_mutex);
if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) {
ret = -ETIMEDOUT;
@ -54,7 +55,7 @@ mt76x0_rf_csr_wr(struct mt76x0_dev *dev, u32 offset, u8 value)
MT_RF_CSR_CFG_KICK);
trace_mt76x0_rf_write(&dev->mt76, bank, offset, value);
out:
mutex_unlock(&dev->reg_atomic_mutex);
mutex_unlock(&dev->phy_mutex);
if (ret < 0)
dev_err(dev->mt76.dev, "Error: RF write %d:%d failed:%d!!\n",
@ -63,8 +64,7 @@ out:
return ret;
}
static int
mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset)
static int mt76x0_rf_csr_rr(struct mt76x02_dev *dev, u32 offset)
{
int ret = -ETIMEDOUT;
u32 val;
@ -79,7 +79,7 @@ mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset)
if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8)
return -EINVAL;
mutex_lock(&dev->reg_atomic_mutex);
mutex_lock(&dev->phy_mutex);
if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
goto out;
@ -99,7 +99,7 @@ mt76x0_rf_csr_rr(struct mt76x0_dev *dev, u32 offset)
trace_mt76x0_rf_read(&dev->mt76, bank, offset, ret);
}
out:
mutex_unlock(&dev->reg_atomic_mutex);
mutex_unlock(&dev->phy_mutex);
if (ret < 0)
dev_err(dev->mt76.dev, "Error: RF read %d:%d failed:%d!!\n",
@ -109,7 +109,7 @@ out:
}
static int
rf_wr(struct mt76x0_dev *dev, u32 offset, u8 val)
rf_wr(struct mt76x02_dev *dev, u32 offset, u8 val)
{
if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) {
struct mt76_reg_pair pair = {
@ -125,7 +125,7 @@ rf_wr(struct mt76x0_dev *dev, u32 offset, u8 val)
}
static int
rf_rr(struct mt76x0_dev *dev, u32 offset)
rf_rr(struct mt76x02_dev *dev, u32 offset)
{
int ret;
u32 val;
@ -146,7 +146,7 @@ rf_rr(struct mt76x0_dev *dev, u32 offset)
}
static int
rf_rmw(struct mt76x0_dev *dev, u32 offset, u8 mask, u8 val)
rf_rmw(struct mt76x02_dev *dev, u32 offset, u8 mask, u8 val)
{
int ret;
@ -162,14 +162,14 @@ rf_rmw(struct mt76x0_dev *dev, u32 offset, u8 mask, u8 val)
}
static int
rf_set(struct mt76x0_dev *dev, u32 offset, u8 val)
rf_set(struct mt76x02_dev *dev, u32 offset, u8 val)
{
return rf_rmw(dev, offset, 0, val);
}
#if 0
static int
rf_clear(struct mt76x0_dev *dev, u32 offset, u8 mask)
rf_clear(struct mt76x02_dev *dev, u32 offset, u8 mask)
{
return rf_rmw(dev, offset, mask, 0);
}
@ -179,7 +179,7 @@ rf_clear(struct mt76x0_dev *dev, u32 offset, u8 mask)
mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, \
tab, ARRAY_SIZE(tab))
int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev)
int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev)
{
int i = 20;
u32 val;
@ -200,7 +200,7 @@ int mt76x0_wait_bbp_ready(struct mt76x0_dev *dev)
}
static void
mt76x0_bbp_set_ctrlch(struct mt76x0_dev *dev, enum nl80211_chan_width width,
mt76x0_bbp_set_ctrlch(struct mt76x02_dev *dev, enum nl80211_chan_width width,
u8 ctrl)
{
int core_val, agc_val;
@ -226,25 +226,7 @@ mt76x0_bbp_set_ctrlch(struct mt76x0_dev *dev, enum nl80211_chan_width width,
mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
}
int mt76x0_phy_get_rssi(struct mt76x0_dev *dev, struct mt76x02_rxwi *rxwi)
{
s8 lna_gain, rssi_offset;
int val;
if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ) {
lna_gain = dev->ee->lna_gain_2ghz;
rssi_offset = dev->ee->rssi_offset_2ghz[0];
} else {
lna_gain = dev->ee->lna_gain_5ghz[0];
rssi_offset = dev->ee->rssi_offset_5ghz[0];
}
val = rxwi->rssi[0] + rssi_offset - lna_gain;
return val;
}
static void mt76x0_vco_cal(struct mt76x0_dev *dev, u8 channel)
static void mt76x0_vco_cal(struct mt76x02_dev *dev, u8 channel)
{
u8 val;
@ -301,14 +283,14 @@ static void mt76x0_vco_cal(struct mt76x0_dev *dev, u8 channel)
}
static void
mt76x0_mac_set_ctrlch(struct mt76x0_dev *dev, bool primary_upper)
mt76x0_mac_set_ctrlch(struct mt76x02_dev *dev, bool primary_upper)
{
mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
primary_upper);
}
static void
mt76x0_phy_set_band(struct mt76x0_dev *dev, enum nl80211_band band)
mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band)
{
switch (band) {
case NL80211_BAND_2GHZ:
@ -340,16 +322,12 @@ mt76x0_phy_set_band(struct mt76x0_dev *dev, enum nl80211_band band)
}
}
#define EXT_PA_2G_5G 0x0
#define EXT_PA_5G_ONLY 0x1
#define EXT_PA_2G_ONLY 0x2
#define INT_PA_2G_5G 0x3
static void
mt76x0_phy_set_chan_rf_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band)
mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_band)
{
u16 rf_band = rf_bw_band & 0xff00;
u16 rf_bw = rf_bw_band & 0x00ff;
enum nl80211_band band;
u32 mac_reg;
u8 rf_val;
int i;
@ -496,11 +474,8 @@ mt76x0_phy_set_chan_rf_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band
mac_reg &= ~0xC; /* Clear 0x518[3:2] */
mt76_wr(dev, MT_RF_MISC, mac_reg);
if (dev->ee->pa_type == INT_PA_2G_5G ||
(dev->ee->pa_type == EXT_PA_5G_ONLY && (rf_band & RF_G_BAND)) ||
(dev->ee->pa_type == EXT_PA_2G_ONLY && (rf_band & RF_A_BAND))) {
; /* Internal PA - nothing to do. */
} else {
band = (rf_band & RF_G_BAND) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
if (mt76x02_ext_pa_enabled(&dev->mt76, band)) {
/*
MT_RF_MISC (offset: 0x0518)
[2]1'b1: enable external A band PA, 1'b0: disable external A band PA
@ -539,7 +514,7 @@ mt76x0_phy_set_chan_rf_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band
}
static void
mt76x0_phy_set_chan_bbp_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_band)
mt76x0_phy_set_chan_bbp_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_band)
{
int i;
@ -552,20 +527,10 @@ mt76x0_phy_set_chan_bbp_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_ban
if (pair->reg == MT_BBP(AGC, 8)) {
u32 val = pair->value;
u8 gain = FIELD_GET(MT_BBP_AGC_GAIN, val);
if (channel > 14) {
if (channel < 100)
gain -= dev->ee->lna_gain_5ghz[0]*2;
else if (channel < 137)
gain -= dev->ee->lna_gain_5ghz[1]*2;
else
gain -= dev->ee->lna_gain_5ghz[2]*2;
} else {
gain -= dev->ee->lna_gain_2ghz*2;
}
u8 gain;
gain = FIELD_GET(MT_BBP_AGC_GAIN, val);
gain -= dev->cal.rx.lna_gain * 2;
val &= ~MT_BBP_AGC_GAIN;
val |= FIELD_PREP(MT_BBP_AGC_GAIN, gain);
mt76_wr(dev, pair->reg, val);
@ -575,46 +540,27 @@ mt76x0_phy_set_chan_bbp_params(struct mt76x0_dev *dev, u8 channel, u16 rf_bw_ban
}
}
#if 0
static void
mt76x0_extra_power_over_mac(struct mt76x0_dev *dev)
static void mt76x0_ant_select(struct mt76x02_dev *dev)
{
u32 val;
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
val = ((mt76_rr(dev, MT_TX_PWR_CFG_1) & 0x00003f00) >> 8);
val |= ((mt76_rr(dev, MT_TX_PWR_CFG_2) & 0x00003f00) << 8);
mt76_wr(dev, MT_TX_PWR_CFG_7, val);
/* TODO: fix VHT */
val = ((mt76_rr(dev, MT_TX_PWR_CFG_3) & 0x0000ff00) >> 8);
mt76_wr(dev, MT_TX_PWR_CFG_8, val);
val = ((mt76_rr(dev, MT_TX_PWR_CFG_4) & 0x0000ff00) >> 8);
mt76_wr(dev, MT_TX_PWR_CFG_9, val);
}
static void
mt76x0_phy_set_tx_power(struct mt76x0_dev *dev, u8 channel, u8 rf_bw_band)
{
u32 val;
int i;
int bw = (rf_bw_band & RF_BW_20) ? 0 : 1;
for (i = 0; i < 4; i++) {
if (channel <= 14)
val = dev->ee->tx_pwr_cfg_2g[i][bw];
else
val = dev->ee->tx_pwr_cfg_5g[i][bw];
mt76_wr(dev, MT_TX_PWR_CFG_0 + 4*i, val);
/* single antenna mode */
if (chan->band == NL80211_BAND_2GHZ) {
mt76_rmw(dev, MT_COEXCFG3,
BIT(5) | BIT(4) | BIT(3) | BIT(2), BIT(1));
mt76_rmw(dev, MT_WLAN_FUN_CTRL, BIT(5), BIT(6));
} else {
mt76_rmw(dev, MT_COEXCFG3, BIT(5) | BIT(2),
BIT(4) | BIT(3));
mt76_clear(dev, MT_WLAN_FUN_CTRL,
BIT(6) | BIT(5));
}
mt76x0_extra_power_over_mac(dev);
mt76_clear(dev, MT_CMB_CTRL, BIT(14) | BIT(12));
mt76_clear(dev, MT_COEXCFG0, BIT(2));
}
#endif
static void
mt76x0_bbp_set_bw(struct mt76x0_dev *dev, enum nl80211_chan_width width)
mt76x0_bbp_set_bw(struct mt76x02_dev *dev, enum nl80211_chan_width width)
{
enum { BW_20 = 0, BW_40 = 1, BW_80 = 2, BW_10 = 4};
int bw;
@ -644,36 +590,24 @@ mt76x0_bbp_set_bw(struct mt76x0_dev *dev, enum nl80211_chan_width width)
mt76x02_mcu_function_select(&dev->mt76, BW_SETTING, bw, false);
}
static void
mt76x0_phy_set_chan_pwr(struct mt76x0_dev *dev, u8 channel)
void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
{
static const int mt76x0_tx_pwr_ch_list[] = {
1,2,3,4,5,6,7,8,9,10,11,12,13,14,
36,38,40,44,46,48,52,54,56,60,62,64,
100,102,104,108,110,112,116,118,120,124,126,128,132,134,136,140,
149,151,153,157,159,161,165,167,169,171,173,
42,58,106,122,155
};
int i;
u32 val;
struct mt76_rate_power *t = &dev->mt76.rate_power;
u8 info[2];
for (i = 0; i < ARRAY_SIZE(mt76x0_tx_pwr_ch_list); i++)
if (mt76x0_tx_pwr_ch_list[i] == channel)
break;
mt76x0_get_power_info(dev, info);
mt76x0_get_tx_power_per_rate(dev);
if (WARN_ON(i == ARRAY_SIZE(mt76x0_tx_pwr_ch_list)))
return;
mt76x02_add_rate_power_offset(t, info[0]);
mt76x02_limit_rate_power(t, dev->mt76.txpower_conf);
dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t);
mt76x02_add_rate_power_offset(t, -info[0]);
val = mt76_rr(dev, MT_TX_ALC_CFG_0);
val &= ~0x3f3f;
val |= dev->ee->tx_pwr_per_chan[i];
val |= 0x2f2f << 16;
mt76_wr(dev, MT_TX_ALC_CFG_0, val);
mt76x02_phy_set_txpower(&dev->mt76, info[0], info[1]);
}
static int
__mt76x0_phy_set_channel(struct mt76x0_dev *dev,
struct cfg80211_chan_def *chandef)
int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef)
{
u32 ext_cca_chan[4] = {
[0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
@ -707,6 +641,7 @@ __mt76x0_phy_set_channel(struct mt76x0_dev *dev,
freq1 = chandef->center_freq1;
channel = chandef->chan->hw_value;
rf_bw_band = (channel <= 14) ? RF_G_BAND : RF_A_BAND;
dev->mt76.chandef = *chandef;
switch (chandef->width) {
case NL80211_CHAN_WIDTH_40:
@ -733,6 +668,7 @@ __mt76x0_phy_set_channel(struct mt76x0_dev *dev,
mt76x0_bbp_set_bw(dev, chandef->width);
mt76x0_bbp_set_ctrlch(dev, chandef->width, ch_group_index);
mt76x0_mac_set_ctrlch(dev, ch_group_index & 1);
mt76x0_ant_select(dev);
mt76_rmw(dev, MT_EXT_CCA_CFG,
(MT_EXT_CCA_CFG_CCA0 |
@ -744,6 +680,7 @@ __mt76x0_phy_set_channel(struct mt76x0_dev *dev,
mt76x0_phy_set_band(dev, chandef->chan->band);
mt76x0_phy_set_chan_rf_params(dev, channel, rf_bw_band);
mt76x0_read_rx_gain(dev);
/* set Japan Tx filter at channel 14 */
val = mt76_rr(dev, MT_BBP(CORE, 1));
@ -762,25 +699,12 @@ __mt76x0_phy_set_channel(struct mt76x0_dev *dev,
if (scan)
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 1, false);
mt76x0_phy_set_chan_pwr(dev, channel);
mt76x0_phy_set_txpower(dev);
dev->mt76.chandef = *chandef;
return 0;
}
int mt76x0_phy_set_channel(struct mt76x0_dev *dev,
struct cfg80211_chan_def *chandef)
{
int ret;
mutex_lock(&dev->hw_atomic_mutex);
ret = __mt76x0_phy_set_channel(dev, chandef);
mutex_unlock(&dev->hw_atomic_mutex);
return ret;
}
void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev)
void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev)
{
u32 tx_alc, reg_val;
u8 channel = dev->mt76.chandef.chan->hw_value;
@ -816,18 +740,18 @@ void mt76x0_phy_recalibrate_after_assoc(struct mt76x0_dev *dev)
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 1, false);
}
void mt76x0_agc_save(struct mt76x0_dev *dev)
void mt76x0_agc_save(struct mt76x02_dev *dev)
{
/* Only one RX path */
dev->agc_save = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, 8)));
}
void mt76x0_agc_restore(struct mt76x0_dev *dev)
void mt76x0_agc_restore(struct mt76x02_dev *dev)
{
mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, dev->agc_save);
}
static void mt76x0_temp_sensor(struct mt76x0_dev *dev)
static void mt76x0_temp_sensor(struct mt76x02_dev *dev)
{
u8 rf_b7_73, rf_b0_66, rf_b0_67;
int cycle, temp;
@ -863,7 +787,7 @@ static void mt76x0_temp_sensor(struct mt76x0_dev *dev)
else
sval |= 0xffffff00; /* Negative */
temp = (35 * (sval - dev->ee->temp_off))/ 10 + 25;
temp = (35 * (sval - dev->cal.rx.temp_offset)) / 10 + 25;
done:
rf_wr(dev, MT_RF(7, 73), rf_b7_73);
@ -871,14 +795,17 @@ done:
rf_wr(dev, MT_RF(0, 73), rf_b0_67);
}
static void mt76x0_dynamic_vga_tuning(struct mt76x0_dev *dev)
static void mt76x0_dynamic_vga_tuning(struct mt76x02_dev *dev)
{
struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
u32 val, init_vga;
int avg_rssi;
init_vga = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 0x54 : 0x4E;
if (dev->avg_rssi > -60)
init_vga = chandef->chan->band == NL80211_BAND_5GHZ ? 0x54 : 0x4E;
avg_rssi = mt76x02_phy_get_min_avg_rssi(&dev->mt76);
if (avg_rssi > -60)
init_vga -= 0x20;
else if (dev->avg_rssi > -70)
else if (avg_rssi > -70)
init_vga -= 0x10;
val = mt76_rr(dev, MT_BBP(AGC, 8));
@ -889,8 +816,8 @@ static void mt76x0_dynamic_vga_tuning(struct mt76x0_dev *dev)
static void mt76x0_phy_calibrate(struct work_struct *work)
{
struct mt76x0_dev *dev = container_of(work, struct mt76x0_dev,
cal_work.work);
struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
cal_work.work);
mt76x0_dynamic_vga_tuning(dev);
mt76x0_temp_sensor(dev);
@ -899,45 +826,7 @@ static void mt76x0_phy_calibrate(struct work_struct *work)
MT_CALIBRATE_INTERVAL);
}
void mt76x0_phy_con_cal_onoff(struct mt76x0_dev *dev,
struct ieee80211_bss_conf *info)
{
/* Start/stop collecting beacon data */
spin_lock_bh(&dev->con_mon_lock);
ether_addr_copy(dev->ap_bssid, info->bssid);
dev->avg_rssi = 0;
dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID;
spin_unlock_bh(&dev->con_mon_lock);
}
static void
mt76x0_set_rx_chains(struct mt76x0_dev *dev)
{
u32 val;
val = mt76_rr(dev, MT_BBP(AGC, 0));
val &= ~(BIT(3) | BIT(4));
if (dev->chainmask & BIT(1))
val |= BIT(3);
mt76_wr(dev, MT_BBP(AGC, 0), val);
mb();
val = mt76_rr(dev, MT_BBP(AGC, 0));
}
static void
mt76x0_set_tx_dac(struct mt76x0_dev *dev)
{
if (dev->chainmask & BIT(1))
mt76_set(dev, MT_BBP(TXBE, 5), 3);
else
mt76_clear(dev, MT_BBP(TXBE, 5), 3);
}
static void
mt76x0_rf_init(struct mt76x0_dev *dev)
static void mt76x0_rf_init(struct mt76x02_dev *dev)
{
int i;
u8 val;
@ -969,7 +858,8 @@ mt76x0_rf_init(struct mt76x0_dev *dev)
E1: B0.R22<6:0>: xo_cxo<6:0>
E2: B0.R21<0>: xo_cxo<0>, B0.R22<7:0>: xo_cxo<8:1>
*/
rf_wr(dev, MT_RF(0, 22), min_t(u8, dev->ee->rf_freq_off, 0xBF));
rf_wr(dev, MT_RF(0, 22),
min_t(u8, dev->cal.rx.freq_offset, 0xbf));
val = rf_rr(dev, MT_RF(0, 22));
/*
@ -989,23 +879,11 @@ mt76x0_rf_init(struct mt76x0_dev *dev)
rf_set(dev, MT_RF(0, 4), 0x80);
}
static void mt76x0_ant_select(struct mt76x0_dev *dev)
{
/* Single antenna mode. */
mt76_rmw(dev, MT_WLAN_FUN_CTRL, BIT(5), BIT(6));
mt76_clear(dev, MT_CMB_CTRL, BIT(14) | BIT(12));
mt76_clear(dev, MT_COEXCFG0, BIT(2));
mt76_rmw(dev, MT_COEXCFG3, BIT(5) | BIT(4) | BIT(3) | BIT(2), BIT(1));
}
void mt76x0_phy_init(struct mt76x0_dev *dev)
void mt76x0_phy_init(struct mt76x02_dev *dev)
{
INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibrate);
mt76x0_ant_select(dev);
mt76x0_rf_init(dev);
mt76x0_set_rx_chains(dev);
mt76x0_set_tx_dac(dev);
mt76x02_phy_set_rxpath(&dev->mt76);
mt76x02_phy_set_txdac(&dev->mt76);
}

Some files were not shown because too many files have changed in this diff Show More